Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dm-3.17-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper changes from Mike Snitzer:

- Allow the thin target to paired with any size external origin; also
allow thin snapshots to be larger than the external origin.

- Add support for quickly loading a repetitive pattern into the
dm-switch target.

- Use per-bio data in the dm-crypt target instead of always using a
mempool for each allocation. Required switching to kmalloc alignment
for the bio slab.

- Fix DM core to properly stack the QUEUE_FLAG_NO_SG_MERGE flag

- Fix the dm-cache and dm-thin targets' export of the minimum_io_size
to match the data block size -- this fixes an issue where mkfs.xfs
would improperly infer raid striping was in place on the underlying
storage.

- Small cleanups in dm-io, dm-mpath and dm-cache

* tag 'dm-3.17-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm table: propagate QUEUE_FLAG_NO_SG_MERGE
dm switch: efficiently support repetitive patterns
dm switch: factor out switch_region_table_read
dm cache: set minimum_io_size to cache's data block size
dm thin: set minimum_io_size to pool's data block size
dm crypt: use per-bio data
block: use kmalloc alignment for bio slab
dm table: make dm_table_supports_discards static
dm cache metadata: use dm-space-map-metadata.h defined size limits
dm cache: fail migrations in the do_worker error path
dm cache: simplify deferred set reference count increments
dm thin: relax external origin size constraints
dm thin: switch to an atomic_t for tracking pending new block preparations
dm mpath: eliminate pg_ready() wrapper
dm io: simplify dec_count and sync_io

+408 -206
+12
Documentation/device-mapper/switch.txt
··· 106 106 The path number in the range 0 ... (<num_paths> - 1). 107 107 Expressed in hexadecimal (WITHOUT any prefix like 0x). 108 108 109 + R<n>,<m> 110 + This parameter allows repetitive patterns to be loaded quickly. <n> and <m> 111 + are hexadecimal numbers. The last <n> mappings are repeated in the next <m> 112 + slots. 113 + 109 114 Status 110 115 ====== 111 116 ··· 129 124 Set mappings for the first 7 entries to point to devices switch0, switch1, 130 125 switch2, switch0, switch1, switch2, switch1: 131 126 dmsetup message switch 0 set_region_mappings 0:0 :1 :2 :0 :1 :2 :1 127 + 128 + Set repetitive mapping. This command: 129 + dmsetup message switch 0 set_region_mappings 1000:1 :2 R2,10 130 + is equivalent to: 131 + dmsetup message switch 0 set_region_mappings 1000:1 :2 :1 :2 :1 :2 :1 :2 \ 132 + :1 :2 :1 :2 :1 :2 :1 :2 :1 :2 133 +
+2 -1
block/bio.c
··· 112 112 bslab = &bio_slabs[entry]; 113 113 114 114 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 115 - slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL); 115 + slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 116 + SLAB_HWCACHE_ALIGN, NULL); 116 117 if (!slab) 117 118 goto out_unlock; 118 119
+2 -2
drivers/md/dm-cache-metadata.c
··· 330 330 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 331 331 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); 332 332 disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); 333 - disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); 333 + disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE); 334 334 disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); 335 335 disk_super->cache_blocks = cpu_to_le32(0); 336 336 ··· 478 478 bool may_format_device) 479 479 { 480 480 int r; 481 - cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE, 481 + cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, 482 482 CACHE_METADATA_CACHE_SIZE, 483 483 CACHE_MAX_CONCURRENT_LOCKS); 484 484 if (IS_ERR(cmd->bm)) {
+3 -5
drivers/md/dm-cache-metadata.h
··· 9 9 10 10 #include "dm-cache-block-types.h" 11 11 #include "dm-cache-policy-internal.h" 12 + #include "persistent-data/dm-space-map-metadata.h" 12 13 13 14 /*----------------------------------------------------------------*/ 14 15 15 - #define DM_CACHE_METADATA_BLOCK_SIZE 4096 16 + #define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE 16 17 17 18 /* FIXME: remove this restriction */ 18 19 /* 19 20 * The metadata device is currently limited in size. 20 - * 21 - * We have one block of index, which can hold 255 index entries. Each 22 - * index entry contains allocation info about 16k metadata blocks. 23 21 */ 24 - #define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT))) 22 + #define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS 25 23 26 24 /* 27 25 * A metadata device larger than 16GB triggers a warning.
+80 -48
drivers/md/dm-cache-target.c
··· 718 718 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); 719 719 } 720 720 721 + /* 722 + * You must increment the deferred set whilst the prison cell is held. To 723 + * encourage this, we ask for 'cell' to be passed in. 724 + */ 725 + static void inc_ds(struct cache *cache, struct bio *bio, 726 + struct dm_bio_prison_cell *cell) 727 + { 728 + size_t pb_data_size = get_per_bio_data_size(cache); 729 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 730 + 731 + BUG_ON(!cell); 732 + BUG_ON(pb->all_io_entry); 733 + 734 + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 735 + } 736 + 721 737 static void issue(struct cache *cache, struct bio *bio) 722 738 { 723 739 unsigned long flags; ··· 751 735 cache->commit_requested = true; 752 736 bio_list_add(&cache->deferred_flush_bios, bio); 753 737 spin_unlock_irqrestore(&cache->lock, flags); 738 + } 739 + 740 + static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) 741 + { 742 + inc_ds(cache, bio, cell); 743 + issue(cache, bio); 754 744 } 755 745 756 746 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) ··· 1037 1015 1038 1016 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); 1039 1017 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); 1018 + 1019 + /* 1020 + * No need to inc_ds() here, since the cell will be held for the 1021 + * duration of the io. 1022 + */ 1040 1023 generic_make_request(bio); 1041 1024 } 1042 1025 ··· 1142 1115 return; 1143 1116 1144 1117 INIT_LIST_HEAD(&work); 1145 - if (pb->all_io_entry) 1146 - dm_deferred_entry_dec(pb->all_io_entry, &work); 1118 + dm_deferred_entry_dec(pb->all_io_entry, &work); 1147 1119 1148 1120 if (!list_empty(&work)) 1149 1121 queue_quiesced_migrations(cache, &work); ··· 1278 1252 else 1279 1253 remap_to_cache(cache, bio, 0); 1280 1254 1255 + /* 1256 + * REQ_FLUSH is not directed at any particular block so we don't 1257 + * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH 1258 + * by dm-core. 1259 + */ 1281 1260 issue(cache, bio); 1282 1261 } 1283 1262 ··· 1332 1301 &cache->stats.read_miss : &cache->stats.write_miss); 1333 1302 } 1334 1303 1335 - static void issue_cache_bio(struct cache *cache, struct bio *bio, 1336 - struct per_bio_data *pb, 1337 - dm_oblock_t oblock, dm_cblock_t cblock) 1338 - { 1339 - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1340 - remap_to_cache_dirty(cache, bio, oblock, cblock); 1341 - issue(cache, bio); 1342 - } 1343 - 1344 1304 static void process_bio(struct cache *cache, struct prealloc *structs, 1345 1305 struct bio *bio) 1346 1306 { ··· 1340 1318 dm_oblock_t block = get_bio_block(cache, bio); 1341 1319 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1342 1320 struct policy_result lookup_result; 1343 - size_t pb_data_size = get_per_bio_data_size(cache); 1344 - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1345 1321 bool discarded_block = is_discarded_oblock(cache, block); 1346 1322 bool passthrough = passthrough_mode(&cache->features); 1347 1323 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); ··· 1379 1359 1380 1360 } else { 1381 1361 /* FIXME: factor out issue_origin() */ 1382 - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1383 1362 remap_to_origin_clear_discard(cache, bio, block); 1384 - issue(cache, bio); 1363 + inc_and_issue(cache, bio, new_ocell); 1385 1364 } 1386 1365 } else { 1387 1366 inc_hit_counter(cache, bio); ··· 1388 1369 if (bio_data_dir(bio) == WRITE && 1389 1370 writethrough_mode(&cache->features) && 1390 1371 !is_dirty(cache, lookup_result.cblock)) { 1391 - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1392 1372 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 1393 - issue(cache, bio); 1394 - } else 1395 - issue_cache_bio(cache, bio, pb, block, lookup_result.cblock); 1373 + inc_and_issue(cache, bio, new_ocell); 1374 + 1375 + } else { 1376 + remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 1377 + inc_and_issue(cache, bio, new_ocell); 1378 + } 1396 1379 } 1397 1380 1398 1381 break; 1399 1382 1400 1383 case POLICY_MISS: 1401 1384 inc_miss_counter(cache, bio); 1402 - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1403 1385 remap_to_origin_clear_discard(cache, bio, block); 1404 - issue(cache, bio); 1386 + inc_and_issue(cache, bio, new_ocell); 1405 1387 break; 1406 1388 1407 1389 case POLICY_NEW: ··· 1521 1501 bio_list_init(&cache->deferred_flush_bios); 1522 1502 spin_unlock_irqrestore(&cache->lock, flags); 1523 1503 1504 + /* 1505 + * These bios have already been through inc_ds() 1506 + */ 1524 1507 while ((bio = bio_list_pop(&bios))) 1525 1508 submit_bios ? generic_make_request(bio) : bio_io_error(bio); 1526 1509 } ··· 1541 1518 bio_list_init(&cache->deferred_writethrough_bios); 1542 1519 spin_unlock_irqrestore(&cache->lock, flags); 1543 1520 1521 + /* 1522 + * These bios have already been through inc_ds() 1523 + */ 1544 1524 while ((bio = bio_list_pop(&bios))) 1545 1525 generic_make_request(bio); 1546 1526 } ··· 1720 1694 1721 1695 if (commit_if_needed(cache)) { 1722 1696 process_deferred_flush_bios(cache, false); 1697 + process_migrations(cache, &cache->need_commit_migrations, migration_failure); 1723 1698 1724 1699 /* 1725 1700 * FIXME: rollback metadata or just go into a ··· 2433 2406 return r; 2434 2407 } 2435 2408 2436 - static int cache_map(struct dm_target *ti, struct bio *bio) 2409 + static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) 2437 2410 { 2438 - struct cache *cache = ti->private; 2439 - 2440 2411 int r; 2441 2412 dm_oblock_t block = get_bio_block(cache, bio); 2442 2413 size_t pb_data_size = get_per_bio_data_size(cache); 2443 2414 bool can_migrate = false; 2444 2415 bool discarded_block; 2445 - struct dm_bio_prison_cell *cell; 2446 2416 struct policy_result lookup_result; 2447 2417 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); 2448 2418 ··· 2461 2437 /* 2462 2438 * Check to see if that block is currently migrating. 2463 2439 */ 2464 - cell = alloc_prison_cell(cache); 2465 - if (!cell) { 2440 + *cell = alloc_prison_cell(cache); 2441 + if (!*cell) { 2466 2442 defer_bio(cache, bio); 2467 2443 return DM_MAPIO_SUBMITTED; 2468 2444 } 2469 2445 2470 - r = bio_detain(cache, block, bio, cell, 2446 + r = bio_detain(cache, block, bio, *cell, 2471 2447 (cell_free_fn) free_prison_cell, 2472 - cache, &cell); 2448 + cache, cell); 2473 2449 if (r) { 2474 2450 if (r < 0) 2475 2451 defer_bio(cache, bio); ··· 2482 2458 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 2483 2459 bio, &lookup_result); 2484 2460 if (r == -EWOULDBLOCK) { 2485 - cell_defer(cache, cell, true); 2461 + cell_defer(cache, *cell, true); 2486 2462 return DM_MAPIO_SUBMITTED; 2487 2463 2488 2464 } else if (r) { 2489 2465 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 2466 + cell_defer(cache, *cell, false); 2490 2467 bio_io_error(bio); 2491 2468 return DM_MAPIO_SUBMITTED; 2492 2469 } ··· 2501 2476 * We need to invalidate this block, so 2502 2477 * defer for the worker thread. 2503 2478 */ 2504 - cell_defer(cache, cell, true); 2479 + cell_defer(cache, *cell, true); 2505 2480 r = DM_MAPIO_SUBMITTED; 2506 2481 2507 2482 } else { 2508 - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2509 2483 inc_miss_counter(cache, bio); 2510 2484 remap_to_origin_clear_discard(cache, bio, block); 2511 - 2512 - cell_defer(cache, cell, false); 2513 2485 } 2514 2486 2515 2487 } else { 2516 2488 inc_hit_counter(cache, bio); 2517 - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2518 - 2519 2489 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2520 2490 !is_dirty(cache, lookup_result.cblock)) 2521 2491 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 2522 2492 else 2523 2493 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 2524 - 2525 - cell_defer(cache, cell, false); 2526 2494 } 2527 2495 break; 2528 2496 2529 2497 case POLICY_MISS: 2530 2498 inc_miss_counter(cache, bio); 2531 - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2532 - 2533 2499 if (pb->req_nr != 0) { 2534 2500 /* 2535 2501 * This is a duplicate writethrough io that is no 2536 2502 * longer needed because the block has been demoted. 2537 2503 */ 2538 2504 bio_endio(bio, 0); 2539 - cell_defer(cache, cell, false); 2540 - return DM_MAPIO_SUBMITTED; 2541 - } else { 2505 + cell_defer(cache, *cell, false); 2506 + r = DM_MAPIO_SUBMITTED; 2507 + 2508 + } else 2542 2509 remap_to_origin_clear_discard(cache, bio, block); 2543 - cell_defer(cache, cell, false); 2544 - } 2510 + 2545 2511 break; 2546 2512 2547 2513 default: 2548 2514 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 2549 2515 (unsigned) lookup_result.op); 2516 + cell_defer(cache, *cell, false); 2550 2517 bio_io_error(bio); 2551 2518 r = DM_MAPIO_SUBMITTED; 2519 + } 2520 + 2521 + return r; 2522 + } 2523 + 2524 + static int cache_map(struct dm_target *ti, struct bio *bio) 2525 + { 2526 + int r; 2527 + struct dm_bio_prison_cell *cell; 2528 + struct cache *cache = ti->private; 2529 + 2530 + r = __cache_map(cache, bio, &cell); 2531 + if (r == DM_MAPIO_REMAPPED) { 2532 + inc_ds(cache, bio, cell); 2533 + cell_defer(cache, cell, false); 2552 2534 } 2553 2535 2554 2536 return r; ··· 2840 2808 residency = policy_residency(cache->policy); 2841 2809 2842 2810 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ", 2843 - (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), 2811 + (unsigned)DM_CACHE_METADATA_BLOCK_SIZE, 2844 2812 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 2845 2813 (unsigned long long)nr_blocks_metadata, 2846 2814 cache->sectors_per_block, ··· 3094 3062 */ 3095 3063 if (io_opt_sectors < cache->sectors_per_block || 3096 3064 do_div(io_opt_sectors, cache->sectors_per_block)) { 3097 - blk_limits_io_min(limits, 0); 3065 + blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); 3098 3066 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); 3099 3067 } 3100 3068 set_discard_limits(cache, limits); ··· 3104 3072 3105 3073 static struct target_type cache_target = { 3106 3074 .name = "cache", 3107 - .version = {1, 4, 0}, 3075 + .version = {1, 5, 0}, 3108 3076 .module = THIS_MODULE, 3109 3077 .ctr = cache_ctr, 3110 3078 .dtr = cache_dtr,
+27 -14
drivers/md/dm-crypt.c
··· 59 59 int error; 60 60 sector_t sector; 61 61 struct dm_crypt_io *base_io; 62 - }; 62 + } CRYPTO_MINALIGN_ATTR; 63 63 64 64 struct dm_crypt_request { 65 65 struct convert_context *ctx; ··· 161 161 * correctly aligned. 162 162 */ 163 163 unsigned int dmreq_start; 164 + 165 + unsigned int per_bio_data_size; 164 166 165 167 unsigned long flags; 166 168 unsigned int key_size; ··· 897 895 kcryptd_async_done, dmreq_of_req(cc, ctx->req)); 898 896 } 899 897 898 + static void crypt_free_req(struct crypt_config *cc, 899 + struct ablkcipher_request *req, struct bio *base_bio) 900 + { 901 + struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 902 + 903 + if ((struct ablkcipher_request *)(io + 1) != req) 904 + mempool_free(req, cc->req_pool); 905 + } 906 + 900 907 /* 901 908 * Encrypt / decrypt data from one bio to another one (can be the same one) 902 909 */ ··· 1019 1008 } 1020 1009 } 1021 1010 1022 - static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, 1023 - struct bio *bio, sector_t sector) 1011 + static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, 1012 + struct bio *bio, sector_t sector) 1024 1013 { 1025 - struct dm_crypt_io *io; 1026 - 1027 - io = mempool_alloc(cc->io_pool, GFP_NOIO); 1028 1014 io->cc = cc; 1029 1015 io->base_bio = bio; 1030 1016 io->sector = sector; ··· 1029 1021 io->base_io = NULL; 1030 1022 io->ctx.req = NULL; 1031 1023 atomic_set(&io->io_pending, 0); 1032 - 1033 - return io; 1034 1024 } 1035 1025 1036 1026 static void crypt_inc_pending(struct dm_crypt_io *io) ··· 1052 1046 return; 1053 1047 1054 1048 if (io->ctx.req) 1055 - mempool_free(io->ctx.req, cc->req_pool); 1056 - mempool_free(io, cc->io_pool); 1049 + crypt_free_req(cc, io->ctx.req, base_bio); 1050 + if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) 1051 + mempool_free(io, cc->io_pool); 1057 1052 1058 1053 if (likely(!base_io)) 1059 1054 bio_endio(base_bio, error); ··· 1262 1255 * between fragments, so switch to a new dm_crypt_io structure. 1263 1256 */ 1264 1257 if (unlikely(!crypt_finished && remaining)) { 1265 - new_io = crypt_io_alloc(io->cc, io->base_bio, 1266 - sector); 1258 + new_io = mempool_alloc(cc->io_pool, GFP_NOIO); 1259 + crypt_io_init(new_io, io->cc, io->base_bio, sector); 1267 1260 crypt_inc_pending(new_io); 1268 1261 crypt_convert_init(cc, &new_io->ctx, NULL, 1269 1262 io->base_bio, sector); ··· 1332 1325 if (error < 0) 1333 1326 io->error = -EIO; 1334 1327 1335 - mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 1328 + crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 1336 1329 1337 1330 if (!atomic_dec_and_test(&ctx->cc_pending)) 1338 1331 return; ··· 1735 1728 goto bad; 1736 1729 } 1737 1730 1731 + cc->per_bio_data_size = ti->per_bio_data_size = 1732 + sizeof(struct dm_crypt_io) + cc->dmreq_start + 1733 + sizeof(struct dm_crypt_request) + cc->iv_size; 1734 + 1738 1735 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1739 1736 if (!cc->page_pool) { 1740 1737 ti->error = "Cannot allocate page mempool"; ··· 1835 1824 return DM_MAPIO_REMAPPED; 1836 1825 } 1837 1826 1838 - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1827 + io = dm_per_bio_data(bio, cc->per_bio_data_size); 1828 + crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1829 + io->ctx.req = (struct ablkcipher_request *)(io + 1); 1839 1830 1840 1831 if (bio_data_dir(io->base_bio) == READ) { 1841 1832 if (kcryptd_io_read(io, GFP_NOWAIT))
+42 -35
drivers/md/dm-io.c
··· 33 33 struct io { 34 34 unsigned long error_bits; 35 35 atomic_t count; 36 - struct completion *wait; 37 36 struct dm_io_client *client; 38 37 io_notify_fn callback; 39 38 void *context; ··· 111 112 * We need an io object to keep track of the number of bios that 112 113 * have been dispatched for a particular io. 113 114 *---------------------------------------------------------------*/ 115 + static void complete_io(struct io *io) 116 + { 117 + unsigned long error_bits = io->error_bits; 118 + io_notify_fn fn = io->callback; 119 + void *context = io->context; 120 + 121 + if (io->vma_invalidate_size) 122 + invalidate_kernel_vmap_range(io->vma_invalidate_address, 123 + io->vma_invalidate_size); 124 + 125 + mempool_free(io, io->client->pool); 126 + fn(error_bits, context); 127 + } 128 + 114 129 static void dec_count(struct io *io, unsigned int region, int error) 115 130 { 116 131 if (error) 117 132 set_bit(region, &io->error_bits); 118 133 119 - if (atomic_dec_and_test(&io->count)) { 120 - if (io->vma_invalidate_size) 121 - invalidate_kernel_vmap_range(io->vma_invalidate_address, 122 - io->vma_invalidate_size); 123 - 124 - if (io->wait) 125 - complete(io->wait); 126 - 127 - else { 128 - unsigned long r = io->error_bits; 129 - io_notify_fn fn = io->callback; 130 - void *context = io->context; 131 - 132 - mempool_free(io, io->client->pool); 133 - fn(r, context); 134 - } 135 - } 134 + if (atomic_dec_and_test(&io->count)) 135 + complete_io(io); 136 136 } 137 137 138 138 static void endio(struct bio *bio, int error) ··· 374 376 dec_count(io, 0, 0); 375 377 } 376 378 379 + struct sync_io { 380 + unsigned long error_bits; 381 + struct completion wait; 382 + }; 383 + 384 + static void sync_io_complete(unsigned long error, void *context) 385 + { 386 + struct sync_io *sio = context; 387 + 388 + sio->error_bits = error; 389 + complete(&sio->wait); 390 + } 391 + 377 392 static int sync_io(struct dm_io_client *client, unsigned int num_regions, 378 393 struct dm_io_region *where, int rw, struct dpages *dp, 379 394 unsigned long *error_bits) 380 395 { 381 - /* 382 - * gcc <= 4.3 can't do the alignment for stack variables, so we must 383 - * align it on our own. 384 - * volatile prevents the optimizer from removing or reusing 385 - * "io_" field from the stack frame (allowed in ANSI C). 386 - */ 387 - volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; 388 - struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); 389 - DECLARE_COMPLETION_ONSTACK(wait); 396 + struct io *io; 397 + struct sync_io sio; 390 398 391 399 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 392 400 WARN_ON(1); 393 401 return -EIO; 394 402 } 395 403 404 + init_completion(&sio.wait); 405 + 406 + io = mempool_alloc(client->pool, GFP_NOIO); 396 407 io->error_bits = 0; 397 408 atomic_set(&io->count, 1); /* see dispatch_io() */ 398 - io->wait = &wait; 399 409 io->client = client; 410 + io->callback = sync_io_complete; 411 + io->context = &sio; 400 412 401 413 io->vma_invalidate_address = dp->vma_invalidate_address; 402 414 io->vma_invalidate_size = dp->vma_invalidate_size; 403 415 404 416 dispatch_io(rw, num_regions, where, dp, io, 1); 405 417 406 - wait_for_completion_io(&wait); 418 + wait_for_completion_io(&sio.wait); 407 419 408 420 if (error_bits) 409 - *error_bits = io->error_bits; 421 + *error_bits = sio.error_bits; 410 422 411 - return io->error_bits ? -EIO : 0; 423 + return sio.error_bits ? -EIO : 0; 412 424 } 413 425 414 426 static int async_io(struct dm_io_client *client, unsigned int num_regions, ··· 436 428 io = mempool_alloc(client->pool, GFP_NOIO); 437 429 io->error_bits = 0; 438 430 atomic_set(&io->count, 1); /* see dispatch_io() */ 439 - io->wait = NULL; 440 431 io->client = client; 441 432 io->callback = fn; 442 433 io->context = context; ··· 488 481 * New collapsed (a)synchronous interface. 489 482 * 490 483 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 491 - * the queue with blk_unplug() some time later or set REQ_SYNC in 492 - io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 493 - * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 484 + * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw. 485 + * If you fail to do one of these, the IO will be submitted to the disk after 486 + * q->unplug_delay, which defaults to 3ms in blk-settings.c. 494 487 */ 495 488 int dm_io(struct dm_io_request *io_req, unsigned num_regions, 496 489 struct dm_io_region *where, unsigned long *sync_error_bits)
+2 -4
drivers/md/dm-mpath.c
··· 373 373 dm_noflush_suspending(m->ti))); 374 374 } 375 375 376 - #define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required) 377 - 378 376 /* 379 377 * Map cloned requests 380 378 */ ··· 400 402 if (!__must_push_back(m)) 401 403 r = -EIO; /* Failed */ 402 404 goto out_unlock; 403 - } 404 - if (!pg_ready(m)) { 405 + } else if (m->queue_io || m->pg_init_required) { 405 406 __pg_init_all_paths(m); 406 407 goto out_unlock; 407 408 } 409 + 408 410 if (set_mapinfo(m, map_context) < 0) 409 411 /* ENOMEM, requeue */ 410 412 goto out_unlock;
+60 -7
drivers/md/dm-switch.c
··· 137 137 *bit *= sctx->region_table_entry_bits; 138 138 } 139 139 140 + static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr) 141 + { 142 + unsigned long region_index; 143 + unsigned bit; 144 + 145 + switch_get_position(sctx, region_nr, &region_index, &bit); 146 + 147 + return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & 148 + ((1 << sctx->region_table_entry_bits) - 1); 149 + } 150 + 140 151 /* 141 152 * Find which path to use at given offset. 142 153 */ 143 154 static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset) 144 155 { 145 - unsigned long region_index; 146 - unsigned bit, path_nr; 156 + unsigned path_nr; 147 157 sector_t p; 148 158 149 159 p = offset; ··· 162 152 else 163 153 sector_div(p, sctx->region_size); 164 154 165 - switch_get_position(sctx, p, &region_index, &bit); 166 - path_nr = (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & 167 - ((1 << sctx->region_table_entry_bits) - 1); 155 + path_nr = switch_region_table_read(sctx, p); 168 156 169 157 /* This can only happen if the processor uses non-atomic stores. */ 170 158 if (unlikely(path_nr >= sctx->nr_paths)) ··· 371 363 } 372 364 373 365 static int process_set_region_mappings(struct switch_ctx *sctx, 374 - unsigned argc, char **argv) 366 + unsigned argc, char **argv) 375 367 { 376 368 unsigned i; 377 369 unsigned long region_index = 0; ··· 379 371 for (i = 1; i < argc; i++) { 380 372 unsigned long path_nr; 381 373 const char *string = argv[i]; 374 + 375 + if ((*string & 0xdf) == 'R') { 376 + unsigned long cycle_length, num_write; 377 + 378 + string++; 379 + if (unlikely(*string == ',')) { 380 + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); 381 + return -EINVAL; 382 + } 383 + cycle_length = parse_hex(&string); 384 + if (unlikely(*string != ',')) { 385 + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); 386 + return -EINVAL; 387 + } 388 + string++; 389 + if (unlikely(!*string)) { 390 + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); 391 + return -EINVAL; 392 + } 393 + num_write = parse_hex(&string); 394 + if (unlikely(*string)) { 395 + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); 396 + return -EINVAL; 397 + } 398 + 399 + if (unlikely(!cycle_length) || unlikely(cycle_length - 1 > region_index)) { 400 + DMWARN("invalid set_region_mappings cycle length: %lu > %lu", 401 + cycle_length - 1, region_index); 402 + return -EINVAL; 403 + } 404 + if (unlikely(region_index + num_write < region_index) || 405 + unlikely(region_index + num_write >= sctx->nr_regions)) { 406 + DMWARN("invalid set_region_mappings region number: %lu + %lu >= %lu", 407 + region_index, num_write, sctx->nr_regions); 408 + return -EINVAL; 409 + } 410 + 411 + while (num_write--) { 412 + region_index++; 413 + path_nr = switch_region_table_read(sctx, region_index - cycle_length); 414 + switch_region_table_write(sctx, region_index, path_nr); 415 + } 416 + 417 + continue; 418 + } 382 419 383 420 if (*string == ':') 384 421 region_index++; ··· 553 500 554 501 static struct target_type switch_target = { 555 502 .name = "switch", 556 - .version = {1, 0, 0}, 503 + .version = {1, 1, 0}, 557 504 .module = THIS_MODULE, 558 505 .ctr = switch_ctr, 559 506 .dtr = switch_dtr,
+50 -36
drivers/md/dm-table.c
··· 1386 1386 return q && !blk_queue_add_random(q); 1387 1387 } 1388 1388 1389 + static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, 1390 + sector_t start, sector_t len, void *data) 1391 + { 1392 + struct request_queue *q = bdev_get_queue(dev->bdev); 1393 + 1394 + return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); 1395 + } 1396 + 1389 1397 static bool dm_table_all_devices_attribute(struct dm_table *t, 1390 1398 iterate_devices_callout_fn func) 1391 1399 { ··· 1438 1430 return true; 1439 1431 } 1440 1432 1433 + static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1434 + sector_t start, sector_t len, void *data) 1435 + { 1436 + struct request_queue *q = bdev_get_queue(dev->bdev); 1437 + 1438 + return q && blk_queue_discard(q); 1439 + } 1440 + 1441 + static bool dm_table_supports_discards(struct dm_table *t) 1442 + { 1443 + struct dm_target *ti; 1444 + unsigned i = 0; 1445 + 1446 + /* 1447 + * Unless any target used by the table set discards_supported, 1448 + * require at least one underlying device to support discards. 1449 + * t->devices includes internal dm devices such as mirror logs 1450 + * so we need to use iterate_devices here, which targets 1451 + * supporting discard selectively must provide. 1452 + */ 1453 + while (i < dm_table_get_num_targets(t)) { 1454 + ti = dm_table_get_target(t, i++); 1455 + 1456 + if (!ti->num_discard_bios) 1457 + continue; 1458 + 1459 + if (ti->discards_supported) 1460 + return 1; 1461 + 1462 + if (ti->type->iterate_devices && 1463 + ti->type->iterate_devices(ti, device_discard_capable, NULL)) 1464 + return 1; 1465 + } 1466 + 1467 + return 0; 1468 + } 1469 + 1441 1470 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1442 1471 struct queue_limits *limits) 1443 1472 { ··· 1508 1463 1509 1464 if (!dm_table_supports_write_same(t)) 1510 1465 q->limits.max_write_same_sectors = 0; 1466 + 1467 + if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) 1468 + queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1469 + else 1470 + queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1511 1471 1512 1472 dm_table_set_integrity(t); 1513 1473 ··· 1686 1636 } 1687 1637 EXPORT_SYMBOL(dm_table_run_md_queue_async); 1688 1638 1689 - static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1690 - sector_t start, sector_t len, void *data) 1691 - { 1692 - struct request_queue *q = bdev_get_queue(dev->bdev); 1693 - 1694 - return q && blk_queue_discard(q); 1695 - } 1696 - 1697 - bool dm_table_supports_discards(struct dm_table *t) 1698 - { 1699 - struct dm_target *ti; 1700 - unsigned i = 0; 1701 - 1702 - /* 1703 - * Unless any target used by the table set discards_supported, 1704 - * require at least one underlying device to support discards. 1705 - * t->devices includes internal dm devices such as mirror logs 1706 - * so we need to use iterate_devices here, which targets 1707 - * supporting discard selectively must provide. 1708 - */ 1709 - while (i < dm_table_get_num_targets(t)) { 1710 - ti = dm_table_get_target(t, i++); 1711 - 1712 - if (!ti->num_discard_bios) 1713 - continue; 1714 - 1715 - if (ti->discards_supported) 1716 - return 1; 1717 - 1718 - if (ti->type->iterate_devices && 1719 - ti->type->iterate_devices(ti, device_discard_capable, NULL)) 1720 - return 1; 1721 - } 1722 - 1723 - return 0; 1724 - }
+128 -53
drivers/md/dm-thin.c
··· 227 227 struct list_head list; 228 228 struct dm_dev *pool_dev; 229 229 struct dm_dev *origin_dev; 230 + sector_t origin_size; 230 231 dm_thin_id dev_id; 231 232 232 233 struct pool *pool; ··· 555 554 struct dm_thin_new_mapping { 556 555 struct list_head list; 557 556 558 - bool quiesced:1; 559 - bool prepared:1; 560 557 bool pass_discard:1; 561 558 bool definitely_not_shared:1; 559 + 560 + /* 561 + * Track quiescing, copying and zeroing preparation actions. When this 562 + * counter hits zero the block is prepared and can be inserted into the 563 + * btree. 564 + */ 565 + atomic_t prepare_actions; 562 566 563 567 int err; 564 568 struct thin_c *tc; ··· 581 575 bio_end_io_t *saved_bi_end_io; 582 576 }; 583 577 584 - static void __maybe_add_mapping(struct dm_thin_new_mapping *m) 578 + static void __complete_mapping_preparation(struct dm_thin_new_mapping *m) 585 579 { 586 580 struct pool *pool = m->tc->pool; 587 581 588 - if (m->quiesced && m->prepared) { 582 + if (atomic_dec_and_test(&m->prepare_actions)) { 589 583 list_add_tail(&m->list, &pool->prepared_mappings); 590 584 wake_worker(pool); 591 585 } 592 586 } 593 587 594 - static void copy_complete(int read_err, unsigned long write_err, void *context) 588 + static void complete_mapping_preparation(struct dm_thin_new_mapping *m) 595 589 { 596 590 unsigned long flags; 597 - struct dm_thin_new_mapping *m = context; 598 591 struct pool *pool = m->tc->pool; 599 592 600 - m->err = read_err || write_err ? -EIO : 0; 601 - 602 593 spin_lock_irqsave(&pool->lock, flags); 603 - m->prepared = true; 604 - __maybe_add_mapping(m); 594 + __complete_mapping_preparation(m); 605 595 spin_unlock_irqrestore(&pool->lock, flags); 596 + } 597 + 598 + static void copy_complete(int read_err, unsigned long write_err, void *context) 599 + { 600 + struct dm_thin_new_mapping *m = context; 601 + 602 + m->err = read_err || write_err ? -EIO : 0; 603 + complete_mapping_preparation(m); 606 604 } 607 605 608 606 static void overwrite_endio(struct bio *bio, int err) 609 607 { 610 - unsigned long flags; 611 608 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 612 609 struct dm_thin_new_mapping *m = h->overwrite_mapping; 613 - struct pool *pool = m->tc->pool; 614 610 615 611 m->err = err; 616 - 617 - spin_lock_irqsave(&pool->lock, flags); 618 - m->prepared = true; 619 - __maybe_add_mapping(m); 620 - spin_unlock_irqrestore(&pool->lock, flags); 612 + complete_mapping_preparation(m); 621 613 } 622 614 623 615 /*----------------------------------------------------------------*/ ··· 825 821 return m; 826 822 } 827 823 824 + static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, 825 + sector_t begin, sector_t end) 826 + { 827 + int r; 828 + struct dm_io_region to; 829 + 830 + to.bdev = tc->pool_dev->bdev; 831 + to.sector = begin; 832 + to.count = end - begin; 833 + 834 + r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); 835 + if (r < 0) { 836 + DMERR_LIMIT("dm_kcopyd_zero() failed"); 837 + copy_complete(1, 1, m); 838 + } 839 + } 840 + 841 + /* 842 + * A partial copy also needs to zero the uncopied region. 843 + */ 828 844 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 829 845 struct dm_dev *origin, dm_block_t data_origin, 830 846 dm_block_t data_dest, 831 - struct dm_bio_prison_cell *cell, struct bio *bio) 847 + struct dm_bio_prison_cell *cell, struct bio *bio, 848 + sector_t len) 832 849 { 833 850 int r; 834 851 struct pool *pool = tc->pool; ··· 860 835 m->data_block = data_dest; 861 836 m->cell = cell; 862 837 838 + /* 839 + * quiesce action + copy action + an extra reference held for the 840 + * duration of this function (we may need to inc later for a 841 + * partial zero). 842 + */ 843 + atomic_set(&m->prepare_actions, 3); 844 + 863 845 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) 864 - m->quiesced = true; 846 + complete_mapping_preparation(m); /* already quiesced */ 865 847 866 848 /* 867 849 * IO to pool_dev remaps to the pool target's data_dev. ··· 889 857 890 858 from.bdev = origin->bdev; 891 859 from.sector = data_origin * pool->sectors_per_block; 892 - from.count = pool->sectors_per_block; 860 + from.count = len; 893 861 894 862 to.bdev = tc->pool_dev->bdev; 895 863 to.sector = data_dest * pool->sectors_per_block; 896 - to.count = pool->sectors_per_block; 864 + to.count = len; 897 865 898 866 r = dm_kcopyd_copy(pool->copier, &from, 1, &to, 899 867 0, copy_complete, m); 900 868 if (r < 0) { 901 - mempool_free(m, pool->mapping_pool); 902 869 DMERR_LIMIT("dm_kcopyd_copy() failed"); 903 - cell_error(pool, cell); 870 + copy_complete(1, 1, m); 871 + 872 + /* 873 + * We allow the zero to be issued, to simplify the 874 + * error path. Otherwise we'd need to start 875 + * worrying about decrementing the prepare_actions 876 + * counter. 877 + */ 878 + } 879 + 880 + /* 881 + * Do we need to zero a tail region? 882 + */ 883 + if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { 884 + atomic_inc(&m->prepare_actions); 885 + ll_zero(tc, m, 886 + data_dest * pool->sectors_per_block + len, 887 + (data_dest + 1) * pool->sectors_per_block); 904 888 } 905 889 } 890 + 891 + complete_mapping_preparation(m); /* drop our ref */ 906 892 } 907 893 908 894 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, ··· 928 878 struct dm_bio_prison_cell *cell, struct bio *bio) 929 879 { 930 880 schedule_copy(tc, virt_block, tc->pool_dev, 931 - data_origin, data_dest, cell, bio); 932 - } 933 - 934 - static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 935 - dm_block_t data_dest, 936 - struct dm_bio_prison_cell *cell, struct bio *bio) 937 - { 938 - schedule_copy(tc, virt_block, tc->origin_dev, 939 - virt_block, data_dest, cell, bio); 881 + data_origin, data_dest, cell, bio, 882 + tc->pool->sectors_per_block); 940 883 } 941 884 942 885 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, ··· 939 896 struct pool *pool = tc->pool; 940 897 struct dm_thin_new_mapping *m = get_next_mapping(pool); 941 898 942 - m->quiesced = true; 943 - m->prepared = false; 899 + atomic_set(&m->prepare_actions, 1); /* no need to quiesce */ 944 900 m->tc = tc; 945 901 m->virt_block = virt_block; 946 902 m->data_block = data_block; ··· 961 919 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 962 920 inc_all_io_entry(pool, bio); 963 921 remap_and_issue(tc, bio, data_block); 964 - } else { 965 - int r; 966 - struct dm_io_region to; 967 922 968 - to.bdev = tc->pool_dev->bdev; 969 - to.sector = data_block * pool->sectors_per_block; 970 - to.count = pool->sectors_per_block; 923 + } else 924 + ll_zero(tc, m, 925 + data_block * pool->sectors_per_block, 926 + (data_block + 1) * pool->sectors_per_block); 927 + } 971 928 972 - r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m); 973 - if (r < 0) { 974 - mempool_free(m, pool->mapping_pool); 975 - DMERR_LIMIT("dm_kcopyd_zero() failed"); 976 - cell_error(pool, cell); 977 - } 978 - } 929 + static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 930 + dm_block_t data_dest, 931 + struct dm_bio_prison_cell *cell, struct bio *bio) 932 + { 933 + struct pool *pool = tc->pool; 934 + sector_t virt_block_begin = virt_block * pool->sectors_per_block; 935 + sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; 936 + 937 + if (virt_block_end <= tc->origin_size) 938 + schedule_copy(tc, virt_block, tc->origin_dev, 939 + virt_block, data_dest, cell, bio, 940 + pool->sectors_per_block); 941 + 942 + else if (virt_block_begin < tc->origin_size) 943 + schedule_copy(tc, virt_block, tc->origin_dev, 944 + virt_block, data_dest, cell, bio, 945 + tc->origin_size - virt_block_begin); 946 + 947 + else 948 + schedule_zero(tc, virt_block, data_dest, cell, bio); 979 949 } 980 950 981 951 /* ··· 1369 1315 inc_all_io_entry(pool, bio); 1370 1316 cell_defer_no_holder(tc, cell); 1371 1317 1372 - remap_to_origin_and_issue(tc, bio); 1318 + if (bio_end_sector(bio) <= tc->origin_size) 1319 + remap_to_origin_and_issue(tc, bio); 1320 + 1321 + else if (bio->bi_iter.bi_sector < tc->origin_size) { 1322 + zero_fill_bio(bio); 1323 + bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; 1324 + remap_to_origin_and_issue(tc, bio); 1325 + 1326 + } else { 1327 + zero_fill_bio(bio); 1328 + bio_endio(bio, 0); 1329 + } 1373 1330 } else 1374 1331 provision_block(tc, bio, block, cell); 1375 1332 break; ··· 3177 3112 */ 3178 3113 if (io_opt_sectors < pool->sectors_per_block || 3179 3114 do_div(io_opt_sectors, pool->sectors_per_block)) { 3180 - blk_limits_io_min(limits, 0); 3115 + blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); 3181 3116 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 3182 3117 } 3183 3118 ··· 3206 3141 .name = "thin-pool", 3207 3142 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 3208 3143 DM_TARGET_IMMUTABLE, 3209 - .version = {1, 12, 0}, 3144 + .version = {1, 13, 0}, 3210 3145 .module = THIS_MODULE, 3211 3146 .ctr = pool_ctr, 3212 3147 .dtr = pool_dtr, ··· 3426 3361 spin_lock_irqsave(&pool->lock, flags); 3427 3362 list_for_each_entry_safe(m, tmp, &work, list) { 3428 3363 list_del(&m->list); 3429 - m->quiesced = true; 3430 - __maybe_add_mapping(m); 3364 + __complete_mapping_preparation(m); 3431 3365 } 3432 3366 spin_unlock_irqrestore(&pool->lock, flags); 3433 3367 } ··· 3463 3399 * unfortunately we must always run this. 3464 3400 */ 3465 3401 noflush_work(tc, do_noflush_stop); 3402 + } 3403 + 3404 + static int thin_preresume(struct dm_target *ti) 3405 + { 3406 + struct thin_c *tc = ti->private; 3407 + 3408 + if (tc->origin_dev) 3409 + tc->origin_size = get_dev_size(tc->origin_dev->bdev); 3410 + 3411 + return 0; 3466 3412 } 3467 3413 3468 3414 /* ··· 3557 3483 3558 3484 static struct target_type thin_target = { 3559 3485 .name = "thin", 3560 - .version = {1, 12, 0}, 3486 + .version = {1, 13, 0}, 3561 3487 .module = THIS_MODULE, 3562 3488 .ctr = thin_ctr, 3563 3489 .dtr = thin_dtr, 3564 3490 .map = thin_map, 3565 3491 .end_io = thin_endio, 3492 + .preresume = thin_preresume, 3566 3493 .presuspend = thin_presuspend, 3567 3494 .postsuspend = thin_postsuspend, 3568 3495 .status = thin_status,
-1
drivers/md/dm.h
··· 72 72 unsigned dm_table_get_type(struct dm_table *t); 73 73 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); 74 74 bool dm_table_request_based(struct dm_table *t); 75 - bool dm_table_supports_discards(struct dm_table *t); 76 75 void dm_table_free_md_mempools(struct dm_table *t); 77 76 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 78 77