Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm cache: revert "remove remainder of distinct discard block size"

This reverts commit 64ab346a360a4b15c28fb8531918d4a01f4eabd9 because we
actually do want to allow the discard blocksize to be larger than the
cache blocksize. Further dm-cache discard changes will make this
possible.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Joe Thornber and committed by
Mike Snitzer
1bad9bc4 5f274d88

+77 -46
+11
drivers/md/dm-cache-block-types.h
··· 19 19 20 20 typedef dm_block_t __bitwise__ dm_oblock_t; 21 21 typedef uint32_t __bitwise__ dm_cblock_t; 22 + typedef dm_block_t __bitwise__ dm_dblock_t; 22 23 23 24 static inline dm_oblock_t to_oblock(dm_block_t b) 24 25 { ··· 39 38 static inline uint32_t from_cblock(dm_cblock_t b) 40 39 { 41 40 return (__force uint32_t) b; 41 + } 42 + 43 + static inline dm_dblock_t to_dblock(dm_block_t b) 44 + { 45 + return (__force dm_dblock_t) b; 46 + } 47 + 48 + static inline dm_block_t from_dblock(dm_dblock_t b) 49 + { 50 + return (__force dm_block_t) b; 42 51 } 43 52 44 53 #endif /* DM_CACHE_BLOCK_TYPES_H */
+17 -17
drivers/md/dm-cache-metadata.c
··· 109 109 dm_block_t discard_root; 110 110 111 111 sector_t discard_block_size; 112 - dm_oblock_t discard_nr_blocks; 112 + dm_dblock_t discard_nr_blocks; 113 113 114 114 sector_t data_block_size; 115 115 dm_cblock_t cache_blocks; ··· 329 329 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 330 330 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 331 331 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); 332 - disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); 332 + disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); 333 333 disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE); 334 334 disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); 335 335 disk_super->cache_blocks = cpu_to_le32(0); ··· 528 528 cmd->hint_root = le64_to_cpu(disk_super->hint_root); 529 529 cmd->discard_root = le64_to_cpu(disk_super->discard_root); 530 530 cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size); 531 - cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks)); 531 + cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks)); 532 532 cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); 533 533 cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); 534 534 strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); ··· 626 626 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 627 627 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 628 628 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); 629 - disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); 629 + disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); 630 630 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); 631 631 strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); 632 632 disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); ··· 797 797 798 798 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, 799 799 sector_t discard_block_size, 800 - dm_oblock_t new_nr_entries) 800 + dm_dblock_t new_nr_entries) 801 801 { 802 802 int r; 803 803 804 804 down_write(&cmd->root_lock); 805 805 r = dm_bitset_resize(&cmd->discard_info, 806 806 cmd->discard_root, 807 - from_oblock(cmd->discard_nr_blocks), 808 - from_oblock(new_nr_entries), 807 + from_dblock(cmd->discard_nr_blocks), 808 + from_dblock(new_nr_entries), 809 809 false, &cmd->discard_root); 810 810 if (!r) { 811 811 cmd->discard_block_size = discard_block_size; ··· 818 818 return r; 819 819 } 820 820 821 - static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) 821 + static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) 822 822 { 823 823 return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root, 824 - from_oblock(b), &cmd->discard_root); 824 + from_dblock(b), &cmd->discard_root); 825 825 } 826 826 827 - static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) 827 + static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) 828 828 { 829 829 return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root, 830 - from_oblock(b), &cmd->discard_root); 830 + from_dblock(b), &cmd->discard_root); 831 831 } 832 832 833 - static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b, 833 + static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b, 834 834 bool *is_discarded) 835 835 { 836 836 return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root, 837 - from_oblock(b), &cmd->discard_root, 837 + from_dblock(b), &cmd->discard_root, 838 838 is_discarded); 839 839 } 840 840 841 841 static int __discard(struct dm_cache_metadata *cmd, 842 - dm_oblock_t dblock, bool discard) 842 + dm_dblock_t dblock, bool discard) 843 843 { 844 844 int r; 845 845 ··· 852 852 } 853 853 854 854 int dm_cache_set_discard(struct dm_cache_metadata *cmd, 855 - dm_oblock_t dblock, bool discard) 855 + dm_dblock_t dblock, bool discard) 856 856 { 857 857 int r; 858 858 ··· 870 870 dm_block_t b; 871 871 bool discard; 872 872 873 - for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) { 874 - dm_oblock_t dblock = to_oblock(b); 873 + for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) { 874 + dm_dblock_t dblock = to_dblock(b); 875 875 876 876 if (cmd->clean_when_opened) { 877 877 r = __is_discarded(cmd, dblock, &discard);
+3 -3
drivers/md/dm-cache-metadata.h
··· 70 70 71 71 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, 72 72 sector_t discard_block_size, 73 - dm_oblock_t new_nr_entries); 73 + dm_dblock_t new_nr_entries); 74 74 75 75 typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, 76 - dm_oblock_t dblock, bool discarded); 76 + dm_dblock_t dblock, bool discarded); 77 77 int dm_cache_load_discards(struct dm_cache_metadata *cmd, 78 78 load_discard_fn fn, void *context); 79 79 80 - int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard); 80 + int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard); 81 81 82 82 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock); 83 83 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
+46 -26
drivers/md/dm-cache-target.c
··· 236 236 /* 237 237 * origin_blocks entries, discarded if set. 238 238 */ 239 - dm_oblock_t discard_nr_blocks; 239 + dm_dblock_t discard_nr_blocks; 240 240 unsigned long *discard_bitset; 241 + uint32_t discard_block_size; 241 242 242 243 /* 243 244 * Rather than reconstructing the table line for the status we just ··· 525 524 return b; 526 525 } 527 526 528 - static void set_discard(struct cache *cache, dm_oblock_t b) 527 + static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) 528 + { 529 + uint32_t discard_blocks = cache->discard_block_size; 530 + dm_block_t b = from_oblock(oblock); 531 + 532 + if (!block_size_is_power_of_two(cache)) 533 + discard_blocks = discard_blocks / cache->sectors_per_block; 534 + else 535 + discard_blocks >>= cache->sectors_per_block_shift; 536 + 537 + b = block_div(b, discard_blocks); 538 + 539 + return to_dblock(b); 540 + } 541 + 542 + static void set_discard(struct cache *cache, dm_dblock_t b) 529 543 { 530 544 unsigned long flags; 531 545 532 546 atomic_inc(&cache->stats.discard_count); 533 547 534 548 spin_lock_irqsave(&cache->lock, flags); 535 - set_bit(from_oblock(b), cache->discard_bitset); 549 + set_bit(from_dblock(b), cache->discard_bitset); 536 550 spin_unlock_irqrestore(&cache->lock, flags); 537 551 } 538 552 539 - static void clear_discard(struct cache *cache, dm_oblock_t b) 553 + static void clear_discard(struct cache *cache, dm_dblock_t b) 540 554 { 541 555 unsigned long flags; 542 556 543 557 spin_lock_irqsave(&cache->lock, flags); 544 - clear_bit(from_oblock(b), cache->discard_bitset); 558 + clear_bit(from_dblock(b), cache->discard_bitset); 545 559 spin_unlock_irqrestore(&cache->lock, flags); 546 560 } 547 561 548 - static bool is_discarded(struct cache *cache, dm_oblock_t b) 562 + static bool is_discarded(struct cache *cache, dm_dblock_t b) 549 563 { 550 564 int r; 551 565 unsigned long flags; 552 566 553 567 spin_lock_irqsave(&cache->lock, flags); 554 - r = test_bit(from_oblock(b), cache->discard_bitset); 568 + r = test_bit(from_dblock(b), cache->discard_bitset); 555 569 spin_unlock_irqrestore(&cache->lock, flags); 556 570 557 571 return r; ··· 578 562 unsigned long flags; 579 563 580 564 spin_lock_irqsave(&cache->lock, flags); 581 - r = test_bit(from_oblock(b), cache->discard_bitset); 565 + r = test_bit(from_dblock(oblock_to_dblock(cache, b)), 566 + cache->discard_bitset); 582 567 spin_unlock_irqrestore(&cache->lock, flags); 583 568 584 569 return r; ··· 704 687 check_if_tick_bio_needed(cache, bio); 705 688 remap_to_origin(cache, bio); 706 689 if (bio_data_dir(bio) == WRITE) 707 - clear_discard(cache, oblock); 690 + clear_discard(cache, oblock_to_dblock(cache, oblock)); 708 691 } 709 692 710 693 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, ··· 714 697 remap_to_cache(cache, bio, cblock); 715 698 if (bio_data_dir(bio) == WRITE) { 716 699 set_dirty(cache, oblock, cblock); 717 - clear_discard(cache, oblock); 700 + clear_discard(cache, oblock_to_dblock(cache, oblock)); 718 701 } 719 702 } 720 703 ··· 1318 1301 static void process_discard_bio(struct cache *cache, struct bio *bio) 1319 1302 { 1320 1303 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, 1321 - cache->sectors_per_block); 1304 + cache->discard_block_size); 1322 1305 dm_block_t end_block = bio_end_sector(bio); 1323 1306 dm_block_t b; 1324 1307 1325 - end_block = block_div(end_block, cache->sectors_per_block); 1308 + end_block = block_div(end_block, cache->discard_block_size); 1326 1309 1327 1310 for (b = start_block; b < end_block; b++) 1328 - set_discard(cache, to_oblock(b)); 1311 + set_discard(cache, to_dblock(b)); 1329 1312 1330 1313 bio_endio(bio, 0); 1331 1314 } ··· 2320 2303 } 2321 2304 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); 2322 2305 2323 - cache->discard_nr_blocks = cache->origin_blocks; 2324 - cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks)); 2306 + cache->discard_block_size = cache->sectors_per_block; 2307 + cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); 2308 + cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); 2325 2309 if (!cache->discard_bitset) { 2326 2310 *error = "could not allocate discard bitset"; 2327 2311 goto bad; 2328 2312 } 2329 - clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks)); 2313 + clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); 2330 2314 2331 2315 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2332 2316 if (IS_ERR(cache->copier)) { ··· 2617 2599 { 2618 2600 unsigned i, r; 2619 2601 2620 - r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, 2621 - cache->origin_blocks); 2602 + r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, 2603 + cache->discard_nr_blocks); 2622 2604 if (r) { 2623 2605 DMERR("could not resize on-disk discard bitset"); 2624 2606 return r; 2625 2607 } 2626 2608 2627 - for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { 2628 - r = dm_cache_set_discard(cache->cmd, to_oblock(i), 2629 - is_discarded(cache, to_oblock(i))); 2609 + for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { 2610 + r = dm_cache_set_discard(cache->cmd, to_dblock(i), 2611 + is_discarded(cache, to_dblock(i))); 2630 2612 if (r) 2631 2613 return r; 2632 2614 } ··· 2699 2681 } 2700 2682 2701 2683 static int load_discard(void *context, sector_t discard_block_size, 2702 - dm_oblock_t oblock, bool discard) 2684 + dm_dblock_t dblock, bool discard) 2703 2685 { 2704 2686 struct cache *cache = context; 2705 2687 2688 + /* FIXME: handle mis-matched block size */ 2689 + 2706 2690 if (discard) 2707 - set_discard(cache, oblock); 2691 + set_discard(cache, dblock); 2708 2692 else 2709 - clear_discard(cache, oblock); 2693 + clear_discard(cache, dblock); 2710 2694 2711 2695 return 0; 2712 2696 } ··· 3099 3079 /* 3100 3080 * FIXME: these limits may be incompatible with the cache device 3101 3081 */ 3102 - limits->max_discard_sectors = cache->sectors_per_block; 3103 - limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT; 3082 + limits->max_discard_sectors = cache->discard_block_size; 3083 + limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; 3104 3084 } 3105 3085 3106 3086 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)