Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
dm snapshot: allow chunk size to be less than page size
dm snapshot: use unsigned integer chunk size
dm snapshot: lock snapshot while supplying status
dm exception store: fix failed set_chunk_size error path
dm snapshot: require non zero chunk size by end of ctr
dm: dec_pending needs locking to save error value
dm: add missing del_gendisk to alloc_dev error path
dm log: userspace fix incorrect luid cast in userspace_ctr
dm snapshot: free exception store on init failure
dm snapshot: sort by chunk size to fix race

+55 -45
+12 -26
drivers/md/dm-exception-store.c
··· 138 138 } 139 139 EXPORT_SYMBOL(dm_exception_store_type_unregister); 140 140 141 - /* 142 - * Round a number up to the nearest 'size' boundary. size must 143 - * be a power of 2. 144 - */ 145 - static ulong round_up(ulong n, ulong size) 146 - { 147 - size--; 148 - return (n + size) & ~size; 149 - } 150 - 151 141 static int set_chunk_size(struct dm_exception_store *store, 152 142 const char *chunk_size_arg, char **error) 153 143 { ··· 145 155 char *value; 146 156 147 157 chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10); 148 - if (*chunk_size_arg == '\0' || *value != '\0') { 158 + if (*chunk_size_arg == '\0' || *value != '\0' || 159 + chunk_size_ulong > UINT_MAX) { 149 160 *error = "Invalid chunk size"; 150 161 return -EINVAL; 151 162 } ··· 156 165 return 0; 157 166 } 158 167 159 - /* 160 - * Chunk size must be multiple of page size. Silently 161 - * round up if it's not. 162 - */ 163 - chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); 164 - 165 - return dm_exception_store_set_chunk_size(store, chunk_size_ulong, 168 + return dm_exception_store_set_chunk_size(store, 169 + (unsigned) chunk_size_ulong, 166 170 error); 167 171 } 168 172 169 173 int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 170 - unsigned long chunk_size_ulong, 174 + unsigned chunk_size, 171 175 char **error) 172 176 { 173 177 /* Check chunk_size is a power of 2 */ 174 - if (!is_power_of_2(chunk_size_ulong)) { 178 + if (!is_power_of_2(chunk_size)) { 175 179 *error = "Chunk size is not a power of 2"; 176 180 return -EINVAL; 177 181 } 178 182 179 183 /* Validate the chunk size against the device block size */ 180 - if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 184 + if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 181 185 *error = "Chunk size is not a multiple of device blocksize"; 182 186 return -EINVAL; 183 187 } 184 188 185 - if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { 189 + if (chunk_size > INT_MAX >> SECTOR_SHIFT) { 186 190 *error = "Chunk size is too high"; 187 191 return -EINVAL; 188 192 } 189 193 190 - store->chunk_size = chunk_size_ulong; 191 - store->chunk_mask = chunk_size_ulong - 1; 192 - store->chunk_shift = ffs(chunk_size_ulong) - 1; 194 + store->chunk_size = chunk_size; 195 + store->chunk_mask = chunk_size - 1; 196 + store->chunk_shift = ffs(chunk_size) - 1; 193 197 194 198 return 0; 195 199 } ··· 237 251 238 252 r = set_chunk_size(tmp_store, argv[2], &ti->error); 239 253 if (r) 240 - goto bad_cow; 254 + goto bad_ctr; 241 255 242 256 r = type->ctr(tmp_store, 0, NULL); 243 257 if (r) {
+4 -4
drivers/md/dm-exception-store.h
··· 101 101 struct dm_dev *cow; 102 102 103 103 /* Size of data blocks saved - must be a power of 2 */ 104 - chunk_t chunk_size; 105 - chunk_t chunk_mask; 106 - chunk_t chunk_shift; 104 + unsigned chunk_size; 105 + unsigned chunk_mask; 106 + unsigned chunk_shift; 107 107 108 108 void *context; 109 109 }; ··· 169 169 int dm_exception_store_type_unregister(struct dm_exception_store_type *type); 170 170 171 171 int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 172 - unsigned long chunk_size_ulong, 172 + unsigned chunk_size, 173 173 char **error); 174 174 175 175 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+1 -1
drivers/md/dm-log-userspace-base.c
··· 156 156 } 157 157 158 158 /* The ptr value is sufficient for local unique id */ 159 - lc->luid = (uint64_t)lc; 159 + lc->luid = (unsigned long)lc; 160 160 161 161 lc->ti = ti; 162 162
+8 -8
drivers/md/dm-snap-persistent.c
··· 284 284 { 285 285 int r; 286 286 struct disk_header *dh; 287 - chunk_t chunk_size; 287 + unsigned chunk_size; 288 288 int chunk_size_supplied = 1; 289 289 char *chunk_err; 290 290 291 291 /* 292 - * Use default chunk size (or hardsect_size, if larger) if none supplied 292 + * Use default chunk size (or logical_block_size, if larger) 293 + * if none supplied 293 294 */ 294 295 if (!ps->store->chunk_size) { 295 296 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, ··· 335 334 return 0; 336 335 337 336 if (chunk_size_supplied) 338 - DMWARN("chunk size %llu in device metadata overrides " 339 - "table chunk size of %llu.", 340 - (unsigned long long)chunk_size, 341 - (unsigned long long)ps->store->chunk_size); 337 + DMWARN("chunk size %u in device metadata overrides " 338 + "table chunk size of %u.", 339 + chunk_size, ps->store->chunk_size); 342 340 343 341 /* We had a bogus chunk_size. Fix stuff up. */ 344 342 free_area(ps); ··· 345 345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size, 346 346 &chunk_err); 347 347 if (r) { 348 - DMERR("invalid on-disk chunk size %llu: %s.", 349 - (unsigned long long)chunk_size, chunk_err); 348 + DMERR("invalid on-disk chunk size %u: %s.", 349 + chunk_size, chunk_err); 350 350 return r; 351 351 } 352 352
+21 -4
drivers/md/dm-snap.c
··· 296 296 */ 297 297 static int register_snapshot(struct dm_snapshot *snap) 298 298 { 299 + struct dm_snapshot *l; 299 300 struct origin *o, *new_o; 300 301 struct block_device *bdev = snap->origin->bdev; 301 302 ··· 320 319 __insert_origin(o); 321 320 } 322 321 323 - list_add_tail(&snap->list, &o->snapshots); 322 + /* Sort the list according to chunk size, largest-first smallest-last */ 323 + list_for_each_entry(l, &o->snapshots, list) 324 + if (l->store->chunk_size < snap->store->chunk_size) 325 + break; 326 + list_add_tail(&snap->list, &l->list); 324 327 325 328 up_write(&_origins_lock); 326 329 return 0; ··· 673 668 bio_list_init(&s->queued_bios); 674 669 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 675 670 671 + if (!s->store->chunk_size) { 672 + ti->error = "Chunk size not set"; 673 + goto bad_load_and_register; 674 + } 675 + 676 676 /* Add snapshot to the list of snapshots for this origin */ 677 677 /* Exceptions aren't triggered till snapshot_resume() is called */ 678 678 if (register_snapshot(s)) { ··· 961 951 962 952 src.bdev = bdev; 963 953 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 964 - src.count = min(s->store->chunk_size, dev_size - src.sector); 954 + src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 965 955 966 956 dest.bdev = s->store->cow->bdev; 967 957 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); ··· 1152 1142 unsigned sz = 0; 1153 1143 struct dm_snapshot *snap = ti->private; 1154 1144 1145 + down_write(&snap->lock); 1146 + 1155 1147 switch (type) { 1156 1148 case STATUSTYPE_INFO: 1157 1149 if (!snap->valid) ··· 1184 1172 maxlen - sz); 1185 1173 break; 1186 1174 } 1175 + 1176 + up_write(&snap->lock); 1187 1177 1188 1178 return 0; 1189 1179 } ··· 1402 1388 struct dm_dev *dev = ti->private; 1403 1389 struct dm_snapshot *snap; 1404 1390 struct origin *o; 1405 - chunk_t chunk_size = 0; 1391 + unsigned chunk_size = 0; 1406 1392 1407 1393 down_read(&_origins_lock); 1408 1394 o = __lookup_origin(dev->bdev); ··· 1479 1465 r = dm_register_target(&snapshot_target); 1480 1466 if (r) { 1481 1467 DMERR("snapshot target register failed %d", r); 1482 - return r; 1468 + goto bad_register_snapshot_target; 1483 1469 } 1484 1470 1485 1471 r = dm_register_target(&origin_target); ··· 1536 1522 dm_unregister_target(&origin_target); 1537 1523 bad1: 1538 1524 dm_unregister_target(&snapshot_target); 1525 + 1526 + bad_register_snapshot_target: 1527 + dm_exception_store_exit(); 1539 1528 return r; 1540 1529 } 1541 1530
+9 -2
drivers/md/dm.c
··· 47 47 atomic_t io_count; 48 48 struct bio *bio; 49 49 unsigned long start_time; 50 + spinlock_t endio_lock; 50 51 }; 51 52 52 53 /* ··· 579 578 struct mapped_device *md = io->md; 580 579 581 580 /* Push-back supersedes any I/O errors */ 582 - if (error && !(io->error > 0 && __noflush_suspending(md))) 583 - io->error = error; 581 + if (unlikely(error)) { 582 + spin_lock_irqsave(&io->endio_lock, flags); 583 + if (!(io->error > 0 && __noflush_suspending(md))) 584 + io->error = error; 585 + spin_unlock_irqrestore(&io->endio_lock, flags); 586 + } 584 587 585 588 if (atomic_dec_and_test(&io->io_count)) { 586 589 if (io->error == DM_ENDIO_REQUEUE) { ··· 1231 1226 atomic_set(&ci.io->io_count, 1); 1232 1227 ci.io->bio = bio; 1233 1228 ci.io->md = md; 1229 + spin_lock_init(&ci.io->endio_lock); 1234 1230 ci.sector = bio->bi_sector; 1235 1231 ci.sector_count = bio_sectors(bio); 1236 1232 if (unlikely(bio_empty_barrier(bio))) ··· 1828 1822 bad_bdev: 1829 1823 destroy_workqueue(md->wq); 1830 1824 bad_thread: 1825 + del_gendisk(md->disk); 1831 1826 put_disk(md->disk); 1832 1827 bad_disk: 1833 1828 blk_cleanup_queue(md->queue);