Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm cache: remove remainder of distinct discard block size

Discard block size not being equal to cache block size causes data
corruption by erroneously avoiding migrations in issue_copy() because
the discard state is being cleared for a group of cache blocks when it
should not.

Completely remove all code that enabled a distinction between the
cache block size and discard block size.

Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Heinz Mauelshagen and committed by
Mike Snitzer
64ab346a d132cc6d

+46 -77
-11
drivers/md/dm-cache-block-types.h
··· 19 19 20 20 typedef dm_block_t __bitwise__ dm_oblock_t; 21 21 typedef uint32_t __bitwise__ dm_cblock_t; 22 - typedef dm_block_t __bitwise__ dm_dblock_t; 23 22 24 23 static inline dm_oblock_t to_oblock(dm_block_t b) 25 24 { ··· 38 39 static inline uint32_t from_cblock(dm_cblock_t b) 39 40 { 40 41 return (__force uint32_t) b; 41 - } 42 - 43 - static inline dm_dblock_t to_dblock(dm_block_t b) 44 - { 45 - return (__force dm_dblock_t) b; 46 - } 47 - 48 - static inline dm_block_t from_dblock(dm_dblock_t b) 49 - { 50 - return (__force dm_block_t) b; 51 42 } 52 43 53 44 #endif /* DM_CACHE_BLOCK_TYPES_H */
+17 -17
drivers/md/dm-cache-metadata.c
··· 109 109 dm_block_t discard_root; 110 110 111 111 sector_t discard_block_size; 112 - dm_dblock_t discard_nr_blocks; 112 + dm_oblock_t discard_nr_blocks; 113 113 114 114 sector_t data_block_size; 115 115 dm_cblock_t cache_blocks; ··· 302 302 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 303 303 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 304 304 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); 305 - disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); 305 + disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); 306 306 disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); 307 307 disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); 308 308 disk_super->cache_blocks = cpu_to_le32(0); ··· 496 496 cmd->hint_root = le64_to_cpu(disk_super->hint_root); 497 497 cmd->discard_root = le64_to_cpu(disk_super->discard_root); 498 498 cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size); 499 - cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks)); 499 + cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks)); 500 500 cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); 501 501 cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); 502 502 strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); ··· 594 594 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 595 595 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 596 596 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); 597 - disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); 597 + disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); 598 598 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); 599 599 strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); 600 600 disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); ··· 771 771 772 772 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, 773 773 sector_t discard_block_size, 774 - dm_dblock_t new_nr_entries) 774 + dm_oblock_t new_nr_entries) 775 775 { 776 776 int r; 777 777 778 778 down_write(&cmd->root_lock); 779 779 r = dm_bitset_resize(&cmd->discard_info, 780 780 cmd->discard_root, 781 - from_dblock(cmd->discard_nr_blocks), 782 - from_dblock(new_nr_entries), 781 + from_oblock(cmd->discard_nr_blocks), 782 + from_oblock(new_nr_entries), 783 783 false, &cmd->discard_root); 784 784 if (!r) { 785 785 cmd->discard_block_size = discard_block_size; ··· 792 792 return r; 793 793 } 794 794 795 - static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) 795 + static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) 796 796 { 797 797 return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root, 798 - from_dblock(b), &cmd->discard_root); 798 + from_oblock(b), &cmd->discard_root); 799 799 } 800 800 801 - static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) 801 + static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) 802 802 { 803 803 return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root, 804 - from_dblock(b), &cmd->discard_root); 804 + from_oblock(b), &cmd->discard_root); 805 805 } 806 806 807 - static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b, 807 + static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b, 808 808 bool *is_discarded) 809 809 { 810 810 return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root, 811 - from_dblock(b), &cmd->discard_root, 811 + from_oblock(b), &cmd->discard_root, 812 812 is_discarded); 813 813 } 814 814 815 815 static int __discard(struct dm_cache_metadata *cmd, 816 - dm_dblock_t dblock, bool discard) 816 + dm_oblock_t dblock, bool discard) 817 817 { 818 818 int r; 819 819 ··· 826 826 } 827 827 828 828 int dm_cache_set_discard(struct dm_cache_metadata *cmd, 829 - dm_dblock_t dblock, bool discard) 829 + dm_oblock_t dblock, bool discard) 830 830 { 831 831 int r; 832 832 ··· 844 844 dm_block_t b; 845 845 bool discard; 846 846 847 - for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) { 848 - dm_dblock_t dblock = to_dblock(b); 847 + for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) { 848 + dm_oblock_t dblock = to_oblock(b); 849 849 850 850 if (cmd->clean_when_opened) { 851 851 r = __is_discarded(cmd, dblock, &discard);
+3 -3
drivers/md/dm-cache-metadata.h
··· 72 72 73 73 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, 74 74 sector_t discard_block_size, 75 - dm_dblock_t new_nr_entries); 75 + dm_oblock_t new_nr_entries); 76 76 77 77 typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, 78 - dm_dblock_t dblock, bool discarded); 78 + dm_oblock_t dblock, bool discarded); 79 79 int dm_cache_load_discards(struct dm_cache_metadata *cmd, 80 80 load_discard_fn fn, void *context); 81 81 82 - int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard); 82 + int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard); 83 83 84 84 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock); 85 85 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
+26 -46
drivers/md/dm-cache-target.c
··· 237 237 /* 238 238 * origin_blocks entries, discarded if set. 239 239 */ 240 - dm_dblock_t discard_nr_blocks; 240 + dm_oblock_t discard_nr_blocks; 241 241 unsigned long *discard_bitset; 242 - uint32_t discard_block_size; 243 242 244 243 /* 245 244 * Rather than reconstructing the table line for the status we just ··· 525 526 return b; 526 527 } 527 528 528 - static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) 529 - { 530 - uint32_t discard_blocks = cache->discard_block_size; 531 - dm_block_t b = from_oblock(oblock); 532 - 533 - if (!block_size_is_power_of_two(cache)) 534 - discard_blocks = discard_blocks / cache->sectors_per_block; 535 - else 536 - discard_blocks >>= cache->sectors_per_block_shift; 537 - 538 - b = block_div(b, discard_blocks); 539 - 540 - return to_dblock(b); 541 - } 542 - 543 - static void set_discard(struct cache *cache, dm_dblock_t b) 529 + static void set_discard(struct cache *cache, dm_oblock_t b) 544 530 { 545 531 unsigned long flags; 546 532 547 533 atomic_inc(&cache->stats.discard_count); 548 534 549 535 spin_lock_irqsave(&cache->lock, flags); 550 - set_bit(from_dblock(b), cache->discard_bitset); 536 + set_bit(from_oblock(b), cache->discard_bitset); 551 537 spin_unlock_irqrestore(&cache->lock, flags); 552 538 } 553 539 554 - static void clear_discard(struct cache *cache, dm_dblock_t b) 540 + static void clear_discard(struct cache *cache, dm_oblock_t b) 555 541 { 556 542 unsigned long flags; 557 543 558 544 spin_lock_irqsave(&cache->lock, flags); 559 - clear_bit(from_dblock(b), cache->discard_bitset); 545 + clear_bit(from_oblock(b), cache->discard_bitset); 560 546 spin_unlock_irqrestore(&cache->lock, flags); 561 547 } 562 548 563 - static bool is_discarded(struct cache *cache, dm_dblock_t b) 549 + static bool is_discarded(struct cache *cache, dm_oblock_t b) 564 550 { 565 551 int r; 566 552 unsigned long flags; 567 553 568 554 spin_lock_irqsave(&cache->lock, flags); 569 - r = test_bit(from_dblock(b), cache->discard_bitset); 555 + r = test_bit(from_oblock(b), cache->discard_bitset); 570 556 spin_unlock_irqrestore(&cache->lock, flags); 571 557 572 558 return r; ··· 563 579 unsigned long flags; 564 580 565 581 spin_lock_irqsave(&cache->lock, flags); 566 - r = test_bit(from_dblock(oblock_to_dblock(cache, b)), 567 - cache->discard_bitset); 582 + r = test_bit(from_oblock(b), cache->discard_bitset); 568 583 spin_unlock_irqrestore(&cache->lock, flags); 569 584 570 585 return r; ··· 688 705 check_if_tick_bio_needed(cache, bio); 689 706 remap_to_origin(cache, bio); 690 707 if (bio_data_dir(bio) == WRITE) 691 - clear_discard(cache, oblock_to_dblock(cache, oblock)); 708 + clear_discard(cache, oblock); 692 709 } 693 710 694 711 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, ··· 698 715 remap_to_cache(cache, bio, cblock); 699 716 if (bio_data_dir(bio) == WRITE) { 700 717 set_dirty(cache, oblock, cblock); 701 - clear_discard(cache, oblock_to_dblock(cache, oblock)); 718 + clear_discard(cache, oblock); 702 719 } 703 720 } 704 721 ··· 1271 1288 static void process_discard_bio(struct cache *cache, struct bio *bio) 1272 1289 { 1273 1290 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, 1274 - cache->discard_block_size); 1291 + cache->sectors_per_block); 1275 1292 dm_block_t end_block = bio_end_sector(bio); 1276 1293 dm_block_t b; 1277 1294 1278 - end_block = block_div(end_block, cache->discard_block_size); 1295 + end_block = block_div(end_block, cache->sectors_per_block); 1279 1296 1280 1297 for (b = start_block; b < end_block; b++) 1281 - set_discard(cache, to_dblock(b)); 1298 + set_discard(cache, to_oblock(b)); 1282 1299 1283 1300 bio_endio(bio, 0); 1284 1301 } ··· 2275 2292 } 2276 2293 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); 2277 2294 2278 - cache->discard_block_size = cache->sectors_per_block; 2279 - cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); 2280 - cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); 2295 + cache->discard_nr_blocks = cache->origin_blocks; 2296 + cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks)); 2281 2297 if (!cache->discard_bitset) { 2282 2298 *error = "could not allocate discard bitset"; 2283 2299 goto bad; 2284 2300 } 2285 - clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); 2301 + clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks)); 2286 2302 2287 2303 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2288 2304 if (IS_ERR(cache->copier)) { ··· 2565 2583 { 2566 2584 unsigned i, r; 2567 2585 2568 - r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, 2569 - cache->discard_nr_blocks); 2586 + r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, 2587 + cache->origin_blocks); 2570 2588 if (r) { 2571 2589 DMERR("could not resize on-disk discard bitset"); 2572 2590 return r; 2573 2591 } 2574 2592 2575 - for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { 2576 - r = dm_cache_set_discard(cache->cmd, to_dblock(i), 2577 - is_discarded(cache, to_dblock(i))); 2593 + for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { 2594 + r = dm_cache_set_discard(cache->cmd, to_oblock(i), 2595 + is_discarded(cache, to_oblock(i))); 2578 2596 if (r) 2579 2597 return r; 2580 2598 } ··· 2671 2689 } 2672 2690 2673 2691 static int load_discard(void *context, sector_t discard_block_size, 2674 - dm_dblock_t dblock, bool discard) 2692 + dm_oblock_t oblock, bool discard) 2675 2693 { 2676 2694 struct cache *cache = context; 2677 2695 2678 - /* FIXME: handle mis-matched block size */ 2679 - 2680 2696 if (discard) 2681 - set_discard(cache, dblock); 2697 + set_discard(cache, oblock); 2682 2698 else 2683 - clear_discard(cache, dblock); 2699 + clear_discard(cache, oblock); 2684 2700 2685 2701 return 0; 2686 2702 } ··· 3069 3089 /* 3070 3090 * FIXME: these limits may be incompatible with the cache device 3071 3091 */ 3072 - limits->max_discard_sectors = cache->discard_block_size; 3073 - limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; 3092 + limits->max_discard_sectors = cache->sectors_per_block; 3093 + limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT; 3074 3094 } 3075 3095 3076 3096 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)