Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: remove use btrfs_remove_free_space_cache instead of variant

We are calling __btrfs_remove_free_space_cache everywhere to cleanup the
block group free space, however we can just use
btrfs_remove_free_space_cache and pass in the block group in all of
these places. Then we can remove __btrfs_remove_free_space_cache and
rename __btrfs_remove_free_space_cache_locked to
__btrfs_remove_free_space_cache.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>

authored by

Josef Bacik and committed by
David Sterba
fc80f7ac 8a1ae278

+16 -31
+1 -1
fs/btrfs/block-group.c
··· 4110 4110 * tasks trimming this block group have left 1 entry each one. 4111 4111 * Free them if any. 4112 4112 */ 4113 - __btrfs_remove_free_space_cache(block_group->free_space_ctl); 4113 + btrfs_remove_free_space_cache(block_group); 4114 4114 } 4115 4115 } 4116 4116
+3 -17
fs/btrfs/free-space-cache.c
··· 48 48 struct btrfs_free_space *info, u64 offset, 49 49 u64 bytes, bool update_stats); 50 50 51 - static void __btrfs_remove_free_space_cache_locked( 52 - struct btrfs_free_space_ctl *ctl) 51 + static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) 53 52 { 54 53 struct btrfs_free_space *info; 55 54 struct rb_node *node; ··· 897 898 free_cache: 898 899 io_ctl_drop_pages(&io_ctl); 899 900 900 - /* 901 - * We need to call the _locked variant so we don't try to update the 902 - * discard counters. 903 - */ 904 901 spin_lock(&ctl->tree_lock); 905 - __btrfs_remove_free_space_cache_locked(ctl); 902 + __btrfs_remove_free_space_cache(ctl); 906 903 spin_unlock(&ctl->tree_lock); 907 904 goto out; 908 905 } ··· 3005 3010 btrfs_put_block_group(block_group); 3006 3011 } 3007 3012 3008 - void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) 3009 - { 3010 - spin_lock(&ctl->tree_lock); 3011 - __btrfs_remove_free_space_cache_locked(ctl); 3012 - if (ctl->block_group) 3013 - btrfs_discard_update_discardable(ctl->block_group); 3014 - spin_unlock(&ctl->tree_lock); 3015 - } 3016 - 3017 3013 void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group) 3018 3014 { 3019 3015 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; ··· 3022 3036 3023 3037 cond_resched_lock(&ctl->tree_lock); 3024 3038 } 3025 - __btrfs_remove_free_space_cache_locked(ctl); 3039 + __btrfs_remove_free_space_cache(ctl); 3026 3040 btrfs_discard_update_discardable(block_group); 3027 3041 spin_unlock(&ctl->tree_lock); 3028 3042
-1
fs/btrfs/free-space-cache.h
··· 113 113 u64 bytenr, u64 size); 114 114 int btrfs_remove_free_space(struct btrfs_block_group *block_group, 115 115 u64 bytenr, u64 size); 116 - void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); 117 116 void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group); 118 117 bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group); 119 118 u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
+1 -1
fs/btrfs/tests/btrfs-tests.c
··· 243 243 { 244 244 if (!cache) 245 245 return; 246 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 246 + btrfs_remove_free_space_cache(cache); 247 247 kfree(cache->free_space_ctl); 248 248 kfree(cache); 249 249 }
+11 -11
fs/btrfs/tests/free-space-tests.c
··· 82 82 } 83 83 84 84 /* Cleanup */ 85 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 85 + btrfs_remove_free_space_cache(cache); 86 86 87 87 return 0; 88 88 } ··· 149 149 return -1; 150 150 } 151 151 152 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 152 + btrfs_remove_free_space_cache(cache); 153 153 154 154 return 0; 155 155 } ··· 230 230 return -1; 231 231 } 232 232 233 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 233 + btrfs_remove_free_space_cache(cache); 234 234 235 235 /* Now with the extent entry offset into the bitmap */ 236 236 ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1); ··· 266 266 * [ bitmap ] 267 267 * [ del ] 268 268 */ 269 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 269 + btrfs_remove_free_space_cache(cache); 270 270 ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1); 271 271 if (ret) { 272 272 test_err("couldn't add bitmap %d", ret); ··· 291 291 return -1; 292 292 } 293 293 294 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 294 + btrfs_remove_free_space_cache(cache); 295 295 296 296 /* 297 297 * This blew up before, we have part of the free space in a bitmap and ··· 317 317 return ret; 318 318 } 319 319 320 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 320 + btrfs_remove_free_space_cache(cache); 321 321 return 0; 322 322 } 323 323 ··· 629 629 if (ret) 630 630 return ret; 631 631 632 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 632 + btrfs_remove_free_space_cache(cache); 633 633 634 634 /* 635 635 * Now test a similar scenario, but where our extent entry is located ··· 819 819 return ret; 820 820 821 821 cache->free_space_ctl->op = orig_free_space_ops; 822 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 822 + btrfs_remove_free_space_cache(cache); 823 823 824 824 return 0; 825 825 } ··· 868 868 } 869 869 870 870 /* Now validate bitmaps do the correct thing. */ 871 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 871 + btrfs_remove_free_space_cache(cache); 872 872 for (i = 0; i < 2; i++) { 873 873 offset = i * BITS_PER_BITMAP * sectorsize; 874 874 bytes = (i + 1) * SZ_1M; ··· 891 891 } 892 892 893 893 /* Now validate bitmaps with different ->max_extent_size. */ 894 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 894 + btrfs_remove_free_space_cache(cache); 895 895 orig_free_space_ops = cache->free_space_ctl->op; 896 896 cache->free_space_ctl->op = &test_free_space_ops; 897 897 ··· 998 998 } 999 999 1000 1000 cache->free_space_ctl->op = orig_free_space_ops; 1001 - __btrfs_remove_free_space_cache(cache->free_space_ctl); 1001 + btrfs_remove_free_space_cache(cache); 1002 1002 return 0; 1003 1003 } 1004 1004