Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: cleanup ino cache members of btrfs_root

The naming is confusing, generic yet used for a specific cache. Add a
prefix 'ino_' or rename appropriately.

Signed-off-by: David Sterba <dsterba@suse.cz>
Signed-off-by: Chris Mason <clm@fb.com>

authored by

David Sterba and committed by
Chris Mason
57cdc8db c6f83c74

+52 -52
+5 -5
fs/btrfs/ctree.h
··· 1776 1776 1777 1777 /* free ino cache stuff */ 1778 1778 struct btrfs_free_space_ctl *free_ino_ctl; 1779 - enum btrfs_caching_type cached; 1780 - spinlock_t cache_lock; 1781 - wait_queue_head_t cache_wait; 1779 + enum btrfs_caching_type ino_cache_state; 1780 + spinlock_t ino_cache_lock; 1781 + wait_queue_head_t ino_cache_wait; 1782 1782 struct btrfs_free_space_ctl *free_ino_pinned; 1783 - u64 cache_progress; 1784 - struct inode *cache_inode; 1783 + u64 ino_cache_progress; 1784 + struct inode *ino_cache_inode; 1785 1785 1786 1786 struct mutex log_mutex; 1787 1787 wait_queue_head_t log_writer_wait;
+3 -3
fs/btrfs/disk-io.c
··· 1573 1573 root->subv_writers = writers; 1574 1574 1575 1575 btrfs_init_free_ino_ctl(root); 1576 - spin_lock_init(&root->cache_lock); 1577 - init_waitqueue_head(&root->cache_wait); 1576 + spin_lock_init(&root->ino_cache_lock); 1577 + init_waitqueue_head(&root->ino_cache_wait); 1578 1578 1579 1579 ret = get_anon_bdev(&root->anon_dev); 1580 1580 if (ret) ··· 3532 3532 3533 3533 static void free_fs_root(struct btrfs_root *root) 3534 3534 { 3535 - iput(root->cache_inode); 3535 + iput(root->ino_cache_inode); 3536 3536 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 3537 3537 btrfs_free_block_rsv(root, root->orphan_block_rsv); 3538 3538 root->orphan_block_rsv = NULL;
+7 -7
fs/btrfs/free-space-cache.c
··· 3033 3033 { 3034 3034 struct inode *inode = NULL; 3035 3035 3036 - spin_lock(&root->cache_lock); 3037 - if (root->cache_inode) 3038 - inode = igrab(root->cache_inode); 3039 - spin_unlock(&root->cache_lock); 3036 + spin_lock(&root->ino_cache_lock); 3037 + if (root->ino_cache_inode) 3038 + inode = igrab(root->ino_cache_inode); 3039 + spin_unlock(&root->ino_cache_lock); 3040 3040 if (inode) 3041 3041 return inode; 3042 3042 ··· 3044 3044 if (IS_ERR(inode)) 3045 3045 return inode; 3046 3046 3047 - spin_lock(&root->cache_lock); 3047 + spin_lock(&root->ino_cache_lock); 3048 3048 if (!btrfs_fs_closing(root->fs_info)) 3049 - root->cache_inode = igrab(inode); 3050 - spin_unlock(&root->cache_lock); 3049 + root->ino_cache_inode = igrab(inode); 3050 + spin_unlock(&root->ino_cache_lock); 3051 3051 3052 3052 return inode; 3053 3053 }
+34 -34
fs/btrfs/inode-map.c
··· 87 87 */ 88 88 btrfs_item_key_to_cpu(leaf, &key, 0); 89 89 btrfs_release_path(path); 90 - root->cache_progress = last; 90 + root->ino_cache_progress = last; 91 91 up_read(&fs_info->commit_root_sem); 92 92 schedule_timeout(1); 93 93 goto again; ··· 106 106 if (last != (u64)-1 && last + 1 != key.objectid) { 107 107 __btrfs_add_free_space(ctl, last + 1, 108 108 key.objectid - last - 1); 109 - wake_up(&root->cache_wait); 109 + wake_up(&root->ino_cache_wait); 110 110 } 111 111 112 112 last = key.objectid; ··· 119 119 root->highest_objectid - last - 1); 120 120 } 121 121 122 - spin_lock(&root->cache_lock); 123 - root->cached = BTRFS_CACHE_FINISHED; 124 - spin_unlock(&root->cache_lock); 122 + spin_lock(&root->ino_cache_lock); 123 + root->ino_cache_state = BTRFS_CACHE_FINISHED; 124 + spin_unlock(&root->ino_cache_lock); 125 125 126 - root->cache_progress = (u64)-1; 126 + root->ino_cache_progress = (u64)-1; 127 127 btrfs_unpin_free_ino(root); 128 128 out: 129 - wake_up(&root->cache_wait); 129 + wake_up(&root->ino_cache_wait); 130 130 up_read(&fs_info->commit_root_sem); 131 131 132 132 btrfs_free_path(path); ··· 144 144 if (!btrfs_test_opt(root, INODE_MAP_CACHE)) 145 145 return; 146 146 147 - spin_lock(&root->cache_lock); 148 - if (root->cached != BTRFS_CACHE_NO) { 149 - spin_unlock(&root->cache_lock); 147 + spin_lock(&root->ino_cache_lock); 148 + if (root->ino_cache_state != BTRFS_CACHE_NO) { 149 + spin_unlock(&root->ino_cache_lock); 150 150 return; 151 151 } 152 152 153 - root->cached = BTRFS_CACHE_STARTED; 154 - spin_unlock(&root->cache_lock); 153 + root->ino_cache_state = BTRFS_CACHE_STARTED; 154 + spin_unlock(&root->ino_cache_lock); 155 155 156 156 ret = load_free_ino_cache(root->fs_info, root); 157 157 if (ret == 1) { 158 - spin_lock(&root->cache_lock); 159 - root->cached = BTRFS_CACHE_FINISHED; 160 - spin_unlock(&root->cache_lock); 158 + spin_lock(&root->ino_cache_lock); 159 + root->ino_cache_state = BTRFS_CACHE_FINISHED; 160 + spin_unlock(&root->ino_cache_lock); 161 161 return; 162 162 } 163 163 ··· 196 196 197 197 start_caching(root); 198 198 199 - wait_event(root->cache_wait, 200 - root->cached == BTRFS_CACHE_FINISHED || 199 + wait_event(root->ino_cache_wait, 200 + root->ino_cache_state == BTRFS_CACHE_FINISHED || 201 201 root->free_ino_ctl->free_space > 0); 202 202 203 - if (root->cached == BTRFS_CACHE_FINISHED && 203 + if (root->ino_cache_state == BTRFS_CACHE_FINISHED && 204 204 root->free_ino_ctl->free_space == 0) 205 205 return -ENOSPC; 206 206 else ··· 214 214 if (!btrfs_test_opt(root, INODE_MAP_CACHE)) 215 215 return; 216 216 again: 217 - if (root->cached == BTRFS_CACHE_FINISHED) { 217 + if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 218 218 __btrfs_add_free_space(pinned, objectid, 1); 219 219 } else { 220 220 down_write(&root->fs_info->commit_root_sem); 221 - spin_lock(&root->cache_lock); 222 - if (root->cached == BTRFS_CACHE_FINISHED) { 223 - spin_unlock(&root->cache_lock); 221 + spin_lock(&root->ino_cache_lock); 222 + if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 223 + spin_unlock(&root->ino_cache_lock); 224 224 up_write(&root->fs_info->commit_root_sem); 225 225 goto again; 226 226 } 227 - spin_unlock(&root->cache_lock); 227 + spin_unlock(&root->ino_cache_lock); 228 228 229 229 start_caching(root); 230 230 ··· 235 235 } 236 236 237 237 /* 238 - * When a transaction is committed, we'll move those inode numbers which 239 - * are smaller than root->cache_progress from pinned tree to free_ino tree, 240 - * and others will just be dropped, because the commit root we were 241 - * searching has changed. 238 + * When a transaction is committed, we'll move those inode numbers which are 239 + * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and 240 + * others will just be dropped, because the commit root we were searching has 241 + * changed. 242 242 * 243 243 * Must be called with root->fs_info->commit_root_sem held 244 244 */ ··· 261 261 info = rb_entry(n, struct btrfs_free_space, offset_index); 262 262 BUG_ON(info->bitmap); /* Logic error */ 263 263 264 - if (info->offset > root->cache_progress) 264 + if (info->offset > root->ino_cache_progress) 265 265 goto free; 266 - else if (info->offset + info->bytes > root->cache_progress) 267 - count = root->cache_progress - info->offset + 1; 266 + else if (info->offset + info->bytes > root->ino_cache_progress) 267 + count = root->ino_cache_progress - info->offset + 1; 268 268 else 269 269 count = info->bytes; 270 270 ··· 462 462 } 463 463 } 464 464 465 - spin_lock(&root->cache_lock); 466 - if (root->cached != BTRFS_CACHE_FINISHED) { 465 + spin_lock(&root->ino_cache_lock); 466 + if (root->ino_cache_state != BTRFS_CACHE_FINISHED) { 467 467 ret = -1; 468 - spin_unlock(&root->cache_lock); 468 + spin_unlock(&root->ino_cache_lock); 469 469 goto out_put; 470 470 } 471 - spin_unlock(&root->cache_lock); 471 + spin_unlock(&root->ino_cache_lock); 472 472 473 473 spin_lock(&ctl->tree_lock); 474 474 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
+3 -3
fs/btrfs/ioctl.c
··· 2526 2526 ASSERT(dest->send_in_progress == 0); 2527 2527 2528 2528 /* the last ref */ 2529 - if (dest->cache_inode) { 2530 - iput(dest->cache_inode); 2531 - dest->cache_inode = NULL; 2529 + if (dest->ino_cache_inode) { 2530 + iput(dest->ino_cache_inode); 2531 + dest->ino_cache_inode = NULL; 2532 2532 } 2533 2533 } 2534 2534 out_dput: