Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ext2, ext4: make mb block cache names more explicit

There will be a second mb_cache instance that tracks ea_inodes. Make
existing names more explicit so that it is clear that they refer to
xattr block cache.

Signed-off-by: Tahsin Erdogan <tahsin@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>

authored by

Tahsin Erdogan and committed by
Theodore Ts'o
47387409 c07dfcb4

+75 -70
+1 -1
fs/ext2/ext2.h
··· 113 113 * of the mount options. 114 114 */ 115 115 spinlock_t s_lock; 116 - struct mb_cache *s_mb_cache; 116 + struct mb_cache *s_ea_block_cache; 117 117 }; 118 118 119 119 static inline spinlock_t *
+8 -8
fs/ext2/super.c
··· 147 147 148 148 ext2_quota_off_umount(sb); 149 149 150 - if (sbi->s_mb_cache) { 151 - ext2_xattr_destroy_cache(sbi->s_mb_cache); 152 - sbi->s_mb_cache = NULL; 150 + if (sbi->s_ea_block_cache) { 151 + ext2_xattr_destroy_cache(sbi->s_ea_block_cache); 152 + sbi->s_ea_block_cache = NULL; 153 153 } 154 154 if (!(sb->s_flags & MS_RDONLY)) { 155 155 struct ext2_super_block *es = sbi->s_es; ··· 1131 1131 } 1132 1132 1133 1133 #ifdef CONFIG_EXT2_FS_XATTR 1134 - sbi->s_mb_cache = ext2_xattr_create_cache(); 1135 - if (!sbi->s_mb_cache) { 1136 - ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache"); 1134 + sbi->s_ea_block_cache = ext2_xattr_create_cache(); 1135 + if (!sbi->s_ea_block_cache) { 1136 + ext2_msg(sb, KERN_ERR, "Failed to create ea_block_cache"); 1137 1137 goto failed_mount3; 1138 1138 } 1139 1139 #endif ··· 1182 1182 sb->s_id); 1183 1183 goto failed_mount; 1184 1184 failed_mount3: 1185 - if (sbi->s_mb_cache) 1186 - ext2_xattr_destroy_cache(sbi->s_mb_cache); 1185 + if (sbi->s_ea_block_cache) 1186 + ext2_xattr_destroy_cache(sbi->s_ea_block_cache); 1187 1187 percpu_counter_destroy(&sbi->s_freeblocks_counter); 1188 1188 percpu_counter_destroy(&sbi->s_freeinodes_counter); 1189 1189 percpu_counter_destroy(&sbi->s_dirs_counter);
+19 -17
fs/ext2/xattr.c
··· 121 121 NULL 122 122 }; 123 123 124 + #define EA_BLOCK_CACHE(inode) (EXT2_SB(inode->i_sb)->s_ea_block_cache) 125 + 124 126 static inline const struct xattr_handler * 125 127 ext2_xattr_handler(int name_index) 126 128 { ··· 152 150 size_t name_len, size; 153 151 char *end; 154 152 int error; 155 - struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; 153 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 156 154 157 155 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 158 156 name_index, name, buffer, (long)buffer_size); ··· 197 195 goto found; 198 196 entry = next; 199 197 } 200 - if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) 198 + if (ext2_xattr_cache_insert(ea_block_cache, bh)) 201 199 ea_idebug(inode, "cache insert failed"); 202 200 error = -ENODATA; 203 201 goto cleanup; ··· 210 208 le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) 211 209 goto bad_block; 212 210 213 - if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) 211 + if (ext2_xattr_cache_insert(ea_block_cache, bh)) 214 212 ea_idebug(inode, "cache insert failed"); 215 213 if (buffer) { 216 214 error = -ERANGE; ··· 248 246 char *end; 249 247 size_t rest = buffer_size; 250 248 int error; 251 - struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; 249 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 252 250 253 251 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 254 252 buffer, (long)buffer_size); ··· 283 281 goto bad_block; 284 282 entry = next; 285 283 } 286 - if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) 284 + if (ext2_xattr_cache_insert(ea_block_cache, bh)) 287 285 ea_idebug(inode, "cache insert failed"); 288 286 289 287 /* list the attribute names */ ··· 495 493 * This must happen under buffer lock for 496 494 * ext2_xattr_set2() to reliably detect modified block 497 495 */ 498 - mb_cache_entry_delete(EXT2_SB(sb)->s_mb_cache, hash, 496 + mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash, 499 497 bh->b_blocknr); 500 498 501 499 /* keep the buffer locked while modifying it. */ ··· 629 627 struct super_block *sb = inode->i_sb; 630 628 struct buffer_head *new_bh = NULL; 631 629 int error; 632 - struct mb_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache; 630 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 633 631 634 632 if (header) { 635 633 new_bh = ext2_xattr_cache_find(inode, header); ··· 657 655 don't need to change the reference count. */ 658 656 new_bh = old_bh; 659 657 get_bh(new_bh); 660 - ext2_xattr_cache_insert(ext2_mb_cache, new_bh); 658 + ext2_xattr_cache_insert(ea_block_cache, new_bh); 661 659 } else { 662 660 /* We need to allocate a new block */ 663 661 ext2_fsblk_t goal = ext2_group_first_block_no(sb, ··· 678 676 memcpy(new_bh->b_data, header, new_bh->b_size); 679 677 set_buffer_uptodate(new_bh); 680 678 unlock_buffer(new_bh); 681 - ext2_xattr_cache_insert(ext2_mb_cache, new_bh); 679 + ext2_xattr_cache_insert(ea_block_cache, new_bh); 682 680 683 681 ext2_xattr_update_super_block(sb); 684 682 } ··· 723 721 * This must happen under buffer lock for 724 722 * ext2_xattr_set2() to reliably detect freed block 725 723 */ 726 - mb_cache_entry_delete(ext2_mb_cache, hash, 724 + mb_cache_entry_delete(ea_block_cache, hash, 727 725 old_bh->b_blocknr); 728 726 /* Free the old block. */ 729 727 ea_bdebug(old_bh, "freeing"); ··· 797 795 * This must happen under buffer lock for ext2_xattr_set2() to 798 796 * reliably detect freed block 799 797 */ 800 - mb_cache_entry_delete(EXT2_SB(inode->i_sb)->s_mb_cache, hash, 798 + mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash, 801 799 bh->b_blocknr); 802 800 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); 803 801 get_bh(bh); ··· 899 897 { 900 898 __u32 hash = le32_to_cpu(header->h_hash); 901 899 struct mb_cache_entry *ce; 902 - struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; 900 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 903 901 904 902 if (!header->h_hash) 905 903 return NULL; /* never share */ 906 904 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 907 905 again: 908 - ce = mb_cache_entry_find_first(ext2_mb_cache, hash); 906 + ce = mb_cache_entry_find_first(ea_block_cache, hash); 909 907 while (ce) { 910 908 struct buffer_head *bh; 911 909 ··· 926 924 * entry is still hashed is reliable. 927 925 */ 928 926 if (hlist_bl_unhashed(&ce->e_hash_list)) { 929 - mb_cache_entry_put(ext2_mb_cache, ce); 927 + mb_cache_entry_put(ea_block_cache, ce); 930 928 unlock_buffer(bh); 931 929 brelse(bh); 932 930 goto again; ··· 939 937 } else if (!ext2_xattr_cmp(header, HDR(bh))) { 940 938 ea_bdebug(bh, "b_count=%d", 941 939 atomic_read(&(bh->b_count))); 942 - mb_cache_entry_touch(ext2_mb_cache, ce); 943 - mb_cache_entry_put(ext2_mb_cache, ce); 940 + mb_cache_entry_touch(ea_block_cache, ce); 941 + mb_cache_entry_put(ea_block_cache, ce); 944 942 return bh; 945 943 } 946 944 unlock_buffer(bh); 947 945 brelse(bh); 948 946 } 949 - ce = mb_cache_entry_find_next(ext2_mb_cache, ce); 947 + ce = mb_cache_entry_find_next(ea_block_cache, ce); 950 948 } 951 949 return NULL; 952 950 }
+1 -1
fs/ext4/ext4.h
··· 1516 1516 struct list_head s_es_list; /* List of inodes with reclaimable extents */ 1517 1517 long s_es_nr_inode; 1518 1518 struct ext4_es_stats s_es_stats; 1519 - struct mb_cache *s_mb_cache; 1519 + struct mb_cache *s_ea_block_cache; 1520 1520 spinlock_t s_es_lock ____cacheline_aligned_in_smp; 1521 1521 1522 1522 /* Ratelimit ext4 messages. */
+9 -9
fs/ext4/super.c
··· 927 927 invalidate_bdev(sbi->journal_bdev); 928 928 ext4_blkdev_remove(sbi); 929 929 } 930 - if (sbi->s_mb_cache) { 931 - ext4_xattr_destroy_cache(sbi->s_mb_cache); 932 - sbi->s_mb_cache = NULL; 930 + if (sbi->s_ea_block_cache) { 931 + ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 932 + sbi->s_ea_block_cache = NULL; 933 933 } 934 934 if (sbi->s_mmp_tsk) 935 935 kthread_stop(sbi->s_mmp_tsk); ··· 4061 4061 sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; 4062 4062 4063 4063 no_journal: 4064 - sbi->s_mb_cache = ext4_xattr_create_cache(); 4065 - if (!sbi->s_mb_cache) { 4066 - ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache"); 4064 + sbi->s_ea_block_cache = ext4_xattr_create_cache(); 4065 + if (!sbi->s_ea_block_cache) { 4066 + ext4_msg(sb, KERN_ERR, "Failed to create ea_block_cache"); 4067 4067 goto failed_mount_wq; 4068 4068 } 4069 4069 ··· 4296 4296 if (EXT4_SB(sb)->rsv_conversion_wq) 4297 4297 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); 4298 4298 failed_mount_wq: 4299 - if (sbi->s_mb_cache) { 4300 - ext4_xattr_destroy_cache(sbi->s_mb_cache); 4301 - sbi->s_mb_cache = NULL; 4299 + if (sbi->s_ea_block_cache) { 4300 + ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 4301 + sbi->s_ea_block_cache = NULL; 4302 4302 } 4303 4303 if (sbi->s_journal) { 4304 4304 jbd2_journal_destroy(sbi->s_journal);
+37 -34
fs/ext4/xattr.c
··· 72 72 # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) 73 73 #endif 74 74 75 - static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *); 76 - static struct buffer_head *ext4_xattr_cache_find(struct inode *, 77 - struct ext4_xattr_header *, 78 - struct mb_cache_entry **); 75 + static void ext4_xattr_block_cache_insert(struct mb_cache *, 76 + struct buffer_head *); 77 + static struct buffer_head * 78 + ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *, 79 + struct mb_cache_entry **); 79 80 static void ext4_xattr_rehash(struct ext4_xattr_header *, 80 81 struct ext4_xattr_entry *); 81 82 ··· 105 104 NULL 106 105 }; 107 106 108 - #define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \ 109 - inode->i_sb->s_fs_info)->s_mb_cache) 107 + #define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \ 108 + inode->i_sb->s_fs_info)->s_ea_block_cache) 110 109 111 110 #ifdef CONFIG_LOCKDEP 112 111 void ext4_xattr_inode_set_class(struct inode *ea_inode) ··· 375 374 struct ext4_xattr_entry *entry; 376 375 size_t size; 377 376 int error; 378 - struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 377 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 379 378 380 379 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 381 380 name_index, name, buffer, (long)buffer_size); ··· 396 395 error = -EFSCORRUPTED; 397 396 goto cleanup; 398 397 } 399 - ext4_xattr_cache_insert(ext4_mb_cache, bh); 398 + ext4_xattr_block_cache_insert(ea_block_cache, bh); 400 399 entry = BFIRST(bh); 401 400 error = ext4_xattr_find_entry(&entry, name_index, name, 1); 402 401 if (error) ··· 542 541 struct inode *inode = d_inode(dentry); 543 542 struct buffer_head *bh = NULL; 544 543 int error; 545 - struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 546 544 547 545 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 548 546 buffer, (long)buffer_size); ··· 563 563 error = -EFSCORRUPTED; 564 564 goto cleanup; 565 565 } 566 - ext4_xattr_cache_insert(ext4_mb_cache, bh); 566 + ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh); 567 567 error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size); 568 568 569 569 cleanup: ··· 660 660 ext4_xattr_release_block(handle_t *handle, struct inode *inode, 661 661 struct buffer_head *bh) 662 662 { 663 - struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 663 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 664 664 u32 hash, ref; 665 665 int error = 0; 666 666 ··· 678 678 * This must happen under buffer lock for 679 679 * ext4_xattr_block_set() to reliably detect freed block 680 680 */ 681 - mb_cache_entry_delete(ext4_mb_cache, hash, bh->b_blocknr); 681 + mb_cache_entry_delete(ea_block_cache, hash, bh->b_blocknr); 682 682 get_bh(bh); 683 683 unlock_buffer(bh); 684 684 ext4_free_blocks(handle, inode, bh, 0, 1, ··· 690 690 if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) { 691 691 struct mb_cache_entry *ce; 692 692 693 - ce = mb_cache_entry_get(ext4_mb_cache, hash, 693 + ce = mb_cache_entry_get(ea_block_cache, hash, 694 694 bh->b_blocknr); 695 695 if (ce) { 696 696 ce->e_reusable = 1; 697 - mb_cache_entry_put(ext4_mb_cache, ce); 697 + mb_cache_entry_put(ea_block_cache, ce); 698 698 } 699 699 } 700 700 ··· 1094 1094 struct ext4_xattr_search *s = &s_copy; 1095 1095 struct mb_cache_entry *ce = NULL; 1096 1096 int error = 0; 1097 - struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 1097 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 1098 1098 1099 1099 #define header(x) ((struct ext4_xattr_header *)(x)) 1100 1100 ··· 1113 1113 * ext4_xattr_block_set() to reliably detect modified 1114 1114 * block 1115 1115 */ 1116 - mb_cache_entry_delete(ext4_mb_cache, hash, 1116 + mb_cache_entry_delete(ea_block_cache, hash, 1117 1117 bs->bh->b_blocknr); 1118 1118 ea_bdebug(bs->bh, "modifying in-place"); 1119 1119 error = ext4_xattr_set_entry(i, s, handle, inode); ··· 1121 1121 if (!IS_LAST_ENTRY(s->first)) 1122 1122 ext4_xattr_rehash(header(s->base), 1123 1123 s->here); 1124 - ext4_xattr_cache_insert(ext4_mb_cache, 1125 - bs->bh); 1124 + ext4_xattr_block_cache_insert(ea_block_cache, 1125 + bs->bh); 1126 1126 } 1127 1127 ext4_xattr_block_csum_set(inode, bs->bh); 1128 1128 unlock_buffer(bs->bh); ··· 1175 1175 1176 1176 inserted: 1177 1177 if (!IS_LAST_ENTRY(s->first)) { 1178 - new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce); 1178 + new_bh = ext4_xattr_block_cache_find(inode, header(s->base), 1179 + &ce); 1179 1180 if (new_bh) { 1180 1181 /* We found an identical block in the cache. */ 1181 1182 if (new_bh == bs->bh) ··· 1221 1220 EXT4_C2B(EXT4_SB(sb), 1222 1221 1)); 1223 1222 brelse(new_bh); 1224 - mb_cache_entry_put(ext4_mb_cache, ce); 1223 + mb_cache_entry_put(ea_block_cache, ce); 1225 1224 ce = NULL; 1226 1225 new_bh = NULL; 1227 1226 goto inserted; ··· 1240 1239 if (error) 1241 1240 goto cleanup_dquot; 1242 1241 } 1243 - mb_cache_entry_touch(ext4_mb_cache, ce); 1244 - mb_cache_entry_put(ext4_mb_cache, ce); 1242 + mb_cache_entry_touch(ea_block_cache, ce); 1243 + mb_cache_entry_put(ea_block_cache, ce); 1245 1244 ce = NULL; 1246 1245 } else if (bs->bh && s->base == bs->bh->b_data) { 1247 1246 /* We were modifying this block in-place. */ ··· 1291 1290 ext4_xattr_block_csum_set(inode, new_bh); 1292 1291 set_buffer_uptodate(new_bh); 1293 1292 unlock_buffer(new_bh); 1294 - ext4_xattr_cache_insert(ext4_mb_cache, new_bh); 1293 + ext4_xattr_block_cache_insert(ea_block_cache, new_bh); 1295 1294 error = ext4_handle_dirty_metadata(handle, inode, 1296 1295 new_bh); 1297 1296 if (error) ··· 1309 1308 1310 1309 cleanup: 1311 1310 if (ce) 1312 - mb_cache_entry_put(ext4_mb_cache, ce); 1311 + mb_cache_entry_put(ea_block_cache, ce); 1313 1312 brelse(new_bh); 1314 1313 if (!(bs->bh && s->base == bs->bh->b_data)) 1315 1314 kfree(s->base); ··· 2149 2148 } 2150 2149 2151 2150 /* 2152 - * ext4_xattr_cache_insert() 2151 + * ext4_xattr_block_cache_insert() 2153 2152 * 2154 - * Create a new entry in the extended attribute cache, and insert 2153 + * Create a new entry in the extended attribute block cache, and insert 2155 2154 * it unless such an entry is already in the cache. 2156 2155 * 2157 2156 * Returns 0, or a negative error number on failure. 2158 2157 */ 2159 2158 static void 2160 - ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh) 2159 + ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache, 2160 + struct buffer_head *bh) 2161 2161 { 2162 2162 struct ext4_xattr_header *header = BHDR(bh); 2163 2163 __u32 hash = le32_to_cpu(header->h_hash); ··· 2166 2164 EXT4_XATTR_REFCOUNT_MAX; 2167 2165 int error; 2168 2166 2169 - error = mb_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash, 2167 + error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash, 2170 2168 bh->b_blocknr, reusable); 2171 2169 if (error) { 2172 2170 if (error == -EBUSY) ··· 2216 2214 } 2217 2215 2218 2216 /* 2219 - * ext4_xattr_cache_find() 2217 + * ext4_xattr_block_cache_find() 2220 2218 * 2221 2219 * Find an identical extended attribute block. 2222 2220 * ··· 2224 2222 * not found or an error occurred. 2225 2223 */ 2226 2224 static struct buffer_head * 2227 - ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, 2228 - struct mb_cache_entry **pce) 2225 + ext4_xattr_block_cache_find(struct inode *inode, 2226 + struct ext4_xattr_header *header, 2227 + struct mb_cache_entry **pce) 2229 2228 { 2230 2229 __u32 hash = le32_to_cpu(header->h_hash); 2231 2230 struct mb_cache_entry *ce; 2232 - struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 2231 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 2233 2232 2234 2233 if (!header->h_hash) 2235 2234 return NULL; /* never share */ 2236 2235 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 2237 - ce = mb_cache_entry_find_first(ext4_mb_cache, hash); 2236 + ce = mb_cache_entry_find_first(ea_block_cache, hash); 2238 2237 while (ce) { 2239 2238 struct buffer_head *bh; 2240 2239 ··· 2248 2245 return bh; 2249 2246 } 2250 2247 brelse(bh); 2251 - ce = mb_cache_entry_find_next(ext4_mb_cache, ce); 2248 + ce = mb_cache_entry_find_next(ea_block_cache, ce); 2252 2249 } 2253 2250 return NULL; 2254 2251 }