Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nilfs2: always set back pointer to host inode in mapping->host

In the current nilfs, page cache for btree nodes and meta data files
do not set a valid back pointer to the host inode in mapping->host.

This will change it so that every address space in nilfs uses
mapping->host to hold its host inode.

Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>

+14 -36
-6
fs/nilfs2/btnode.c
··· 34 34 #include "page.h" 35 35 #include "btnode.h" 36 36 37 - void nilfs_btnode_cache_init(struct address_space *btnc, 38 - struct backing_dev_info *bdi) 39 - { 40 - nilfs_mapping_init(btnc, bdi); 41 - } 42 - 43 37 void nilfs_btnode_cache_clear(struct address_space *btnc) 44 38 { 45 39 invalidate_mapping_pages(btnc, 0, -1);
-1
fs/nilfs2/btnode.h
··· 37 37 struct buffer_head *newbh; 38 38 }; 39 39 40 - void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); 41 40 void nilfs_btnode_cache_clear(struct address_space *); 42 41 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, 43 42 __u64 blocknr);
+2 -2
fs/nilfs2/mdt.c
··· 450 450 451 451 INIT_LIST_HEAD(&shadow->frozen_buffers); 452 452 address_space_init_once(&shadow->frozen_data); 453 - nilfs_mapping_init(&shadow->frozen_data, bdi); 453 + nilfs_mapping_init(&shadow->frozen_data, inode, bdi); 454 454 address_space_init_once(&shadow->frozen_btnodes); 455 - nilfs_mapping_init(&shadow->frozen_btnodes, bdi); 455 + nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi); 456 456 mi->mi_shadow = shadow; 457 457 return 0; 458 458 }
-6
fs/nilfs2/nilfs.h
··· 80 80 return &ii->vfs_inode; 81 81 } 82 82 83 - static inline struct inode *NILFS_AS_I(struct address_space *mapping) 84 - { 85 - return (mapping->host) ? : 86 - container_of(mapping, struct inode, i_data); 87 - } 88 - 89 83 /* 90 84 * Dynamic state flags of NILFS on-memory inode (i_state) 91 85 */
+5 -8
fs/nilfs2/page.c
··· 182 182 void nilfs_page_bug(struct page *page) 183 183 { 184 184 struct address_space *m; 185 - unsigned long ino = 0; 185 + unsigned long ino; 186 186 187 187 if (unlikely(!page)) { 188 188 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); ··· 190 190 } 191 191 192 192 m = page->mapping; 193 - if (m) { 194 - struct inode *inode = NILFS_AS_I(m); 195 - if (inode != NULL) 196 - ino = inode->i_ino; 197 - } 193 + ino = m ? m->host->i_ino : 0; 194 + 198 195 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " 199 196 "mapping=%p ino=%lu\n", 200 197 page, atomic_read(&page->_count), ··· 438 441 return nc; 439 442 } 440 443 441 - void nilfs_mapping_init(struct address_space *mapping, 444 + void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, 442 445 struct backing_dev_info *bdi) 443 446 { 444 - mapping->host = NULL; 447 + mapping->host = inode; 445 448 mapping->flags = 0; 446 449 mapping_set_gfp_mask(mapping, GFP_NOFS); 447 450 mapping->assoc_mapping = NULL;
+1 -1
fs/nilfs2/page.h
··· 57 57 int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); 58 58 void nilfs_copy_back_pages(struct address_space *, struct address_space *); 59 59 void nilfs_clear_dirty_pages(struct address_space *); 60 - void nilfs_mapping_init(struct address_space *mapping, 60 + void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, 61 61 struct backing_dev_info *bdi); 62 62 unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); 63 63 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
+5 -11
fs/nilfs2/segment.c
··· 655 655 if (unlikely(page->index > last)) 656 656 break; 657 657 658 - if (mapping->host) { 659 - lock_page(page); 660 - if (!page_has_buffers(page)) 661 - create_empty_buffers(page, 662 - 1 << inode->i_blkbits, 0); 663 - unlock_page(page); 664 - } 658 + lock_page(page); 659 + if (!page_has_buffers(page)) 660 + create_empty_buffers(page, 1 << inode->i_blkbits, 0); 661 + unlock_page(page); 665 662 666 663 bh = head = page_buffers(page); 667 664 do { ··· 1500 1503 nblocks = le32_to_cpu(finfo->fi_nblocks); 1501 1504 ndatablk = le32_to_cpu(finfo->fi_ndatablk); 1502 1505 1503 - if (buffer_nilfs_node(bh)) 1504 - inode = NILFS_BTNC_I(bh->b_page->mapping); 1505 - else 1506 - inode = NILFS_AS_I(bh->b_page->mapping); 1506 + inode = bh->b_page->mapping->host; 1507 1507 1508 1508 if (mode == SC_LSEG_DSYNC) 1509 1509 sc_op = &nilfs_sc_dsync_ops;
+1 -1
fs/nilfs2/super.c
··· 166 166 ii->i_state = 0; 167 167 ii->i_cno = 0; 168 168 ii->vfs_inode.i_version = 1; 169 - nilfs_btnode_cache_init(&ii->i_btnode_cache, sb->s_bdi); 169 + nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi); 170 170 return &ii->vfs_inode; 171 171 } 172 172