Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mbcache: make mbcache naming more generic

Make names more generic so that mbcache usage is not limited to
block sharing. In a subsequent patch in the series
("ext4: xattr inode deduplication"), we start using the mbcache code
for sharing xattr inodes. With that patch, old mb_cache_entry.e_block
field could be holding either a block number or an inode number.

Signed-off-by: Tahsin Erdogan <tahsin@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>

authored by

Tahsin Erdogan and committed by
Theodore Ts'o
c07dfcb4 b6d9029d

+40 -42
+9 -9
fs/ext2/xattr.c
··· 493 493 * This must happen under buffer lock for 494 494 * ext2_xattr_set2() to reliably detect modified block 495 495 */ 496 - mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache, 497 - hash, bh->b_blocknr); 496 + mb_cache_entry_delete(EXT2_SB(sb)->s_mb_cache, hash, 497 + bh->b_blocknr); 498 498 499 499 /* keep the buffer locked while modifying it. */ 500 500 } else { ··· 721 721 * This must happen under buffer lock for 722 722 * ext2_xattr_set2() to reliably detect freed block 723 723 */ 724 - mb_cache_entry_delete_block(ext2_mb_cache, 725 - hash, old_bh->b_blocknr); 724 + mb_cache_entry_delete(ext2_mb_cache, hash, 725 + old_bh->b_blocknr); 726 726 /* Free the old block. */ 727 727 ea_bdebug(old_bh, "freeing"); 728 728 ext2_free_blocks(inode, old_bh->b_blocknr, 1); ··· 795 795 * This must happen under buffer lock for ext2_xattr_set2() to 796 796 * reliably detect freed block 797 797 */ 798 - mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache, 799 - hash, bh->b_blocknr); 798 + mb_cache_entry_delete(EXT2_SB(inode->i_sb)->s_mb_cache, hash, 799 + bh->b_blocknr); 800 800 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); 801 801 get_bh(bh); 802 802 bforget(bh); ··· 907 907 while (ce) { 908 908 struct buffer_head *bh; 909 909 910 - bh = sb_bread(inode->i_sb, ce->e_block); 910 + bh = sb_bread(inode->i_sb, ce->e_value); 911 911 if (!bh) { 912 912 ext2_error(inode->i_sb, "ext2_xattr_cache_find", 913 913 "inode %ld: block %ld read error", 914 - inode->i_ino, (unsigned long) ce->e_block); 914 + inode->i_ino, (unsigned long) ce->e_value); 915 915 } else { 916 916 lock_buffer(bh); 917 917 /* ··· 931 931 } else if (le32_to_cpu(HDR(bh)->h_refcount) > 932 932 EXT2_XATTR_REFCOUNT_MAX) { 933 933 ea_idebug(inode, "block %ld refcount %d>%d", 934 - (unsigned long) ce->e_block, 934 + (unsigned long) ce->e_value, 935 935 le32_to_cpu(HDR(bh)->h_refcount), 936 936 EXT2_XATTR_REFCOUNT_MAX); 937 937 } else if (!ext2_xattr_cmp(header, HDR(bh))) {
+5 -5
fs/ext4/xattr.c
··· 678 678 * This must happen under buffer lock for 679 679 * ext4_xattr_block_set() to reliably detect freed block 680 680 */ 681 - mb_cache_entry_delete_block(ext4_mb_cache, hash, bh->b_blocknr); 681 + mb_cache_entry_delete(ext4_mb_cache, hash, bh->b_blocknr); 682 682 get_bh(bh); 683 683 unlock_buffer(bh); 684 684 ext4_free_blocks(handle, inode, bh, 0, 1, ··· 1113 1113 * ext4_xattr_block_set() to reliably detect modified 1114 1114 * block 1115 1115 */ 1116 - mb_cache_entry_delete_block(ext4_mb_cache, hash, 1117 - bs->bh->b_blocknr); 1116 + mb_cache_entry_delete(ext4_mb_cache, hash, 1117 + bs->bh->b_blocknr); 1118 1118 ea_bdebug(bs->bh, "modifying in-place"); 1119 1119 error = ext4_xattr_set_entry(i, s, handle, inode); 1120 1120 if (!error) { ··· 2236 2236 while (ce) { 2237 2237 struct buffer_head *bh; 2238 2238 2239 - bh = sb_bread(inode->i_sb, ce->e_block); 2239 + bh = sb_bread(inode->i_sb, ce->e_value); 2240 2240 if (!bh) { 2241 2241 EXT4_ERROR_INODE(inode, "block %lu read error", 2242 - (unsigned long) ce->e_block); 2242 + (unsigned long)ce->e_value); 2243 2243 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { 2244 2244 *pce = ce; 2245 2245 return bh;
+21 -22
fs/mbcache.c
··· 10 10 /* 11 11 * Mbcache is a simple key-value store. Keys need not be unique, however 12 12 * key-value pairs are expected to be unique (we use this fact in 13 - * mb_cache_entry_delete_block()). 13 + * mb_cache_entry_delete()). 14 14 * 15 15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. 16 16 * They use hash of a block contents as a key and block number as a value. ··· 62 62 * @cache - cache where the entry should be created 63 63 * @mask - gfp mask with which the entry should be allocated 64 64 * @key - key of the entry 65 - * @block - block that contains data 66 - * @reusable - is the block reusable by other inodes? 65 + * @value - value of the entry 66 + * @reusable - is the entry reusable by others? 67 67 * 68 - * Creates entry in @cache with key @key and records that data is stored in 69 - * block @block. The function returns -EBUSY if entry with the same key 70 - * and for the same block already exists in cache. Otherwise 0 is returned. 68 + * Creates entry in @cache with key @key and value @value. The function returns 69 + * -EBUSY if entry with the same key and value already exists in cache. 70 + * Otherwise 0 is returned. 71 71 */ 72 72 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, 73 - sector_t block, bool reusable) 73 + u64 value, bool reusable) 74 74 { 75 75 struct mb_cache_entry *entry, *dup; 76 76 struct hlist_bl_node *dup_node; ··· 91 91 /* One ref for hash, one ref returned */ 92 92 atomic_set(&entry->e_refcnt, 1); 93 93 entry->e_key = key; 94 - entry->e_block = block; 94 + entry->e_value = value; 95 95 entry->e_reusable = reusable; 96 96 head = mb_cache_entry_head(cache, key); 97 97 hlist_bl_lock(head); 98 98 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { 99 - if (dup->e_key == key && dup->e_block == block) { 99 + if (dup->e_key == key && dup->e_value == value) { 100 100 hlist_bl_unlock(head); 101 101 kmem_cache_free(mb_entry_cache, entry); 102 102 return -EBUSY; ··· 187 187 EXPORT_SYMBOL(mb_cache_entry_find_next); 188 188 189 189 /* 190 - * mb_cache_entry_get - get a cache entry by block number (and key) 190 + * mb_cache_entry_get - get a cache entry by value (and key) 191 191 * @cache - cache we work with 192 - * @key - key of block number @block 193 - * @block - block number 192 + * @key - key 193 + * @value - value 194 194 */ 195 195 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, 196 - sector_t block) 196 + u64 value) 197 197 { 198 198 struct hlist_bl_node *node; 199 199 struct hlist_bl_head *head; ··· 202 202 head = mb_cache_entry_head(cache, key); 203 203 hlist_bl_lock(head); 204 204 hlist_bl_for_each_entry(entry, node, head, e_hash_list) { 205 - if (entry->e_key == key && entry->e_block == block) { 205 + if (entry->e_key == key && entry->e_value == value) { 206 206 atomic_inc(&entry->e_refcnt); 207 207 goto out; 208 208 } ··· 214 214 } 215 215 EXPORT_SYMBOL(mb_cache_entry_get); 216 216 217 - /* mb_cache_entry_delete_block - remove information about block from cache 217 + /* mb_cache_entry_delete - remove a cache entry 218 218 * @cache - cache we work with 219 - * @key - key of block @block 220 - * @block - block number 219 + * @key - key 220 + * @value - value 221 221 * 222 - * Remove entry from cache @cache with key @key with data stored in @block. 222 + * Remove entry from cache @cache with key @key and value @value. 223 223 */ 224 - void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, 225 - sector_t block) 224 + void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value) 226 225 { 227 226 struct hlist_bl_node *node; 228 227 struct hlist_bl_head *head; ··· 230 231 head = mb_cache_entry_head(cache, key); 231 232 hlist_bl_lock(head); 232 233 hlist_bl_for_each_entry(entry, node, head, e_hash_list) { 233 - if (entry->e_key == key && entry->e_block == block) { 234 + if (entry->e_key == key && entry->e_value == value) { 234 235 /* We keep hash list reference to keep entry alive */ 235 236 hlist_bl_del_init(&entry->e_hash_list); 236 237 hlist_bl_unlock(head); ··· 247 248 } 248 249 hlist_bl_unlock(head); 249 250 } 250 - EXPORT_SYMBOL(mb_cache_entry_delete_block); 251 + EXPORT_SYMBOL(mb_cache_entry_delete); 251 252 252 253 /* mb_cache_entry_touch - cache entry got used 253 254 * @cache - cache the entry belongs to
+5 -6
include/linux/mbcache.h
··· 19 19 u32 e_key; 20 20 u32 e_referenced:1; 21 21 u32 e_reusable:1; 22 - /* Block number of hashed block - stable during lifetime of the entry */ 23 - sector_t e_block; 22 + /* User provided value - stable during lifetime of the entry */ 23 + u64 e_value; 24 24 }; 25 25 26 26 struct mb_cache *mb_cache_create(int bucket_bits); 27 27 void mb_cache_destroy(struct mb_cache *cache); 28 28 29 29 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, 30 - sector_t block, bool reusable); 30 + u64 value, bool reusable); 31 31 void __mb_cache_entry_free(struct mb_cache_entry *entry); 32 32 static inline int mb_cache_entry_put(struct mb_cache *cache, 33 33 struct mb_cache_entry *entry) ··· 38 38 return 1; 39 39 } 40 40 41 - void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, 42 - sector_t block); 41 + void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); 43 42 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, 44 - sector_t block); 43 + u64 value); 45 44 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, 46 45 u32 key); 47 46 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,