Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ext4: each filesystem creates and uses its own mb_cache

This patch adds new interfaces to create and destory cache,
ext4_xattr_create_cache() and ext4_xattr_destroy_cache(), and remove
the cache creation and destory calls from ex4_init_xattr() and
ext4_exitxattr() in fs/ext4/xattr.c.

fs/ext4/super.c has been changed so that when a filesystem is mounted
a cache is allocated and attched to its ext4_sb_info structure.

fs/mbcache.c has been changed so that only one slab allocator is
allocated and used by all mbcache structures.

Signed-off-by: T. Makphaibulchoke <tmac@hp.com>

authored by

T Makphaibulchoke and committed by
Theodore Ts'o
9c191f70 1f3e55fe

+62 -39
+1
fs/ext4/ext4.h
··· 1329 1329 struct list_head s_es_lru; 1330 1330 unsigned long s_es_last_sorted; 1331 1331 struct percpu_counter s_extent_cache_cnt; 1332 + struct mb_cache *s_mb_cache; 1332 1333 spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; 1333 1334 1334 1335 /* Ratelimit ext4 messages. */
+17 -8
fs/ext4/super.c
··· 59 59 static struct ext4_lazy_init *ext4_li_info; 60 60 static struct mutex ext4_li_mtx; 61 61 static struct ext4_features *ext4_feat; 62 + static int ext4_mballoc_ready; 62 63 63 64 static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 64 65 unsigned long journal_devnum); ··· 845 844 sync_blockdev(sbi->journal_bdev); 846 845 invalidate_bdev(sbi->journal_bdev); 847 846 ext4_blkdev_remove(sbi); 847 + } 848 + if (sbi->s_mb_cache) { 849 + ext4_xattr_destroy_cache(sbi->s_mb_cache); 850 + sbi->s_mb_cache = NULL; 848 851 } 849 852 if (sbi->s_mmp_tsk) 850 853 kthread_stop(sbi->s_mmp_tsk); ··· 4015 4010 percpu_counter_set(&sbi->s_dirtyclusters_counter, 0); 4016 4011 4017 4012 no_journal: 4013 + if (ext4_mballoc_ready) { 4014 + sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id); 4015 + if (!sbi->s_mb_cache) { 4016 + ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache"); 4017 + goto failed_mount_wq; 4018 + } 4019 + } 4020 + 4018 4021 /* 4019 4022 * Get the # of file system overhead blocks from the 4020 4023 * superblock if present. ··· 5532 5519 5533 5520 err = ext4_init_mballoc(); 5534 5521 if (err) 5535 - goto out3; 5536 - 5537 - err = ext4_init_xattr(); 5538 - if (err) 5539 5522 goto out2; 5523 + else 5524 + ext4_mballoc_ready = 1; 5540 5525 err = init_inodecache(); 5541 5526 if (err) 5542 5527 goto out1; ··· 5550 5539 unregister_as_ext3(); 5551 5540 destroy_inodecache(); 5552 5541 out1: 5553 - ext4_exit_xattr(); 5554 - out2: 5542 + ext4_mballoc_ready = 0; 5555 5543 ext4_exit_mballoc(); 5556 - out3: 5544 + out2: 5557 5545 ext4_exit_feat_adverts(); 5558 5546 out4: 5559 5547 if (ext4_proc_root) ··· 5575 5565 unregister_as_ext3(); 5576 5566 unregister_filesystem(&ext4_fs_type); 5577 5567 destroy_inodecache(); 5578 - ext4_exit_xattr(); 5579 5568 ext4_exit_mballoc(); 5580 5569 ext4_exit_feat_adverts(); 5581 5570 remove_proc_entry("fs/ext4", NULL);
+28 -23
fs/ext4/xattr.c
··· 81 81 # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) 82 82 #endif 83 83 84 - static void ext4_xattr_cache_insert(struct buffer_head *); 84 + static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *); 85 85 static struct buffer_head *ext4_xattr_cache_find(struct inode *, 86 86 struct ext4_xattr_header *, 87 87 struct mb_cache_entry **); ··· 89 89 struct ext4_xattr_entry *); 90 90 static int ext4_xattr_list(struct dentry *dentry, char *buffer, 91 91 size_t buffer_size); 92 - 93 - static struct mb_cache *ext4_xattr_cache; 94 92 95 93 static const struct xattr_handler *ext4_xattr_handler_map[] = { 96 94 [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler, ··· 114 116 #endif 115 117 NULL 116 118 }; 119 + 120 + #define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \ 121 + inode->i_sb->s_fs_info)->s_mb_cache) 117 122 118 123 static __le32 ext4_xattr_block_csum(struct inode *inode, 119 124 sector_t block_nr, ··· 266 265 struct ext4_xattr_entry *entry; 267 266 size_t size; 268 267 int error; 268 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 269 269 270 270 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 271 271 name_index, name, buffer, (long)buffer_size); ··· 288 286 error = -EIO; 289 287 goto cleanup; 290 288 } 291 - ext4_xattr_cache_insert(bh); 289 + ext4_xattr_cache_insert(ext4_mb_cache, bh); 292 290 entry = BFIRST(bh); 293 291 error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1); 294 292 if (error == -EIO) ··· 411 409 struct inode *inode = dentry->d_inode; 412 410 struct buffer_head *bh = NULL; 413 411 int error; 412 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 414 413 415 414 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 416 415 buffer, (long)buffer_size); ··· 433 430 error = -EIO; 434 431 goto cleanup; 435 432 } 436 - ext4_xattr_cache_insert(bh); 433 + ext4_xattr_cache_insert(ext4_mb_cache, bh); 437 434 error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size); 438 435 439 436 cleanup: ··· 529 526 { 530 527 struct mb_cache_entry *ce = NULL; 531 528 int error = 0; 529 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 532 530 533 - ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr); 531 + ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr); 534 532 error = ext4_journal_get_write_access(handle, bh); 535 533 if (error) 536 534 goto out; ··· 750 746 struct ext4_xattr_search *s = &bs->s; 751 747 struct mb_cache_entry *ce = NULL; 752 748 int error = 0; 749 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 753 750 754 751 #define header(x) ((struct ext4_xattr_header *)(x)) 755 752 756 753 if (i->value && i->value_len > sb->s_blocksize) 757 754 return -ENOSPC; 758 755 if (s->base) { 759 - ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev, 756 + ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev, 760 757 bs->bh->b_blocknr); 761 758 error = ext4_journal_get_write_access(handle, bs->bh); 762 759 if (error) ··· 775 770 if (!IS_LAST_ENTRY(s->first)) 776 771 ext4_xattr_rehash(header(s->base), 777 772 s->here); 778 - ext4_xattr_cache_insert(bs->bh); 773 + ext4_xattr_cache_insert(ext4_mb_cache, 774 + bs->bh); 779 775 } 780 776 unlock_buffer(bs->bh); 781 777 if (error == -EIO) ··· 912 906 memcpy(new_bh->b_data, s->base, new_bh->b_size); 913 907 set_buffer_uptodate(new_bh); 914 908 unlock_buffer(new_bh); 915 - ext4_xattr_cache_insert(new_bh); 909 + ext4_xattr_cache_insert(ext4_mb_cache, new_bh); 916 910 error = ext4_handle_dirty_xattr_block(handle, 917 911 inode, new_bh); 918 912 if (error) ··· 1501 1495 * Returns 0, or a negative error number on failure. 1502 1496 */ 1503 1497 static void 1504 - ext4_xattr_cache_insert(struct buffer_head *bh) 1498 + ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh) 1505 1499 { 1506 1500 __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); 1507 1501 struct mb_cache_entry *ce; 1508 1502 int error; 1509 1503 1510 - ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS); 1504 + ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS); 1511 1505 if (!ce) { 1512 1506 ea_bdebug(bh, "out of memory"); 1513 1507 return; ··· 1579 1573 { 1580 1574 __u32 hash = le32_to_cpu(header->h_hash); 1581 1575 struct mb_cache_entry *ce; 1576 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); 1582 1577 1583 1578 if (!header->h_hash) 1584 1579 return NULL; /* never share */ 1585 1580 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 1586 1581 again: 1587 - ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev, 1582 + ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev, 1588 1583 hash); 1589 1584 while (ce) { 1590 1585 struct buffer_head *bh; ··· 1683 1676 1684 1677 #undef BLOCK_HASH_SHIFT 1685 1678 1686 - int __init 1687 - ext4_init_xattr(void) 1679 + #define HASH_BUCKET_BITS 10 1680 + 1681 + struct mb_cache * 1682 + ext4_xattr_create_cache(char *name) 1688 1683 { 1689 - ext4_xattr_cache = mb_cache_create("ext4_xattr", 6); 1690 - if (!ext4_xattr_cache) 1691 - return -ENOMEM; 1692 - return 0; 1684 + return mb_cache_create(name, HASH_BUCKET_BITS); 1693 1685 } 1694 1686 1695 - void 1696 - ext4_exit_xattr(void) 1687 + void ext4_xattr_destroy_cache(struct mb_cache *cache) 1697 1688 { 1698 - if (ext4_xattr_cache) 1699 - mb_cache_destroy(ext4_xattr_cache); 1700 - ext4_xattr_cache = NULL; 1689 + if (cache) 1690 + mb_cache_destroy(cache); 1701 1691 } 1692 +
+3 -3
fs/ext4/xattr.h
··· 110 110 extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, 111 111 struct ext4_inode *raw_inode, handle_t *handle); 112 112 113 - extern int __init ext4_init_xattr(void); 114 - extern void ext4_exit_xattr(void); 115 - 116 113 extern const struct xattr_handler *ext4_xattr_handlers[]; 117 114 118 115 extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, ··· 120 123 extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, 121 124 struct ext4_xattr_info *i, 122 125 struct ext4_xattr_ibody_find *is); 126 + 127 + extern struct mb_cache *ext4_xattr_create_cache(char *name); 128 + extern void ext4_xattr_destroy_cache(struct mb_cache *); 123 129 124 130 #ifdef CONFIG_EXT4_FS_SECURITY 125 131 extern int ext4_init_security(handle_t *handle, struct inode *inode,
+13 -5
fs/mbcache.c
··· 99 99 100 100 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue); 101 101 static struct blockgroup_lock *mb_cache_bg_lock; 102 + static struct kmem_cache *mb_cache_kmem_cache; 102 103 103 104 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>"); 104 105 MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); ··· 352 351 goto fail; 353 352 for (n=0; n<bucket_count; n++) 354 353 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]); 355 - cache->c_entry_cache = kmem_cache_create(name, 356 - sizeof(struct mb_cache_entry), 0, 357 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 358 - if (!cache->c_entry_cache) 359 - goto fail2; 354 + if (!mb_cache_kmem_cache) { 355 + mb_cache_kmem_cache = kmem_cache_create(name, 356 + sizeof(struct mb_cache_entry), 0, 357 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 358 + if (!mb_cache_kmem_cache) 359 + goto fail2; 360 + } 361 + cache->c_entry_cache = mb_cache_kmem_cache; 360 362 361 363 /* 362 364 * Set an upper limit on the number of cache entries so that the hash ··· 480 476 atomic_read(&cache->c_entry_count)); 481 477 } 482 478 479 + if (list_empty(&mb_cache_list)) { 480 + kmem_cache_destroy(mb_cache_kmem_cache); 481 + mb_cache_kmem_cache = NULL; 482 + } 483 483 kfree(cache->c_index_hash); 484 484 kfree(cache->c_block_hash); 485 485 kfree(cache);