Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vfs-6.19-rc1.inode' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs inode updates from Christian Brauner:
"Features:

- Hide inode->i_state behind accessors. Open-coded accesses prevent
asserting they are done correctly. One obvious aspect is locking,
but significantly more can be checked. For example it can be
detected when the code is clearing flags which are already missing,
or is setting flags when it is illegal (e.g., I_FREEING when
->i_count > 0)

- Provide accessors for ->i_state, converts all filesystems using
coccinelle and manual conversions (btrfs, ceph, smb, f2fs, gfs2,
overlayfs, nilfs2, xfs), and makes plain ->i_state access fail to
compile

- Rework I_NEW handling to operate without fences, simplifying the
code after the accessor infrastructure is in place

Cleanups:

- Move wait_on_inode() from writeback.h to fs.h

- Spell out fenced ->i_state accesses with explicit smp_wmb/smp_rmb
for clarity

- Cosmetic fixes to LRU handling

- Push list presence check into inode_io_list_del()

- Touch up predicts in __d_lookup_rcu()

- ocfs2: retire ocfs2_drop_inode() and I_WILL_FREE usage

- Assert on ->i_count in iput_final()

- Assert ->i_lock held in __iget()

Fixes:

- Add missing fences to I_NEW handling"

* tag 'vfs-6.19-rc1.inode' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (22 commits)
dcache: touch up predicts in __d_lookup_rcu()
fs: push list presence check into inode_io_list_del()
fs: cosmetic fixes to lru handling
fs: rework I_NEW handling to operate without fences
fs: make plain ->i_state access fail to compile
xfs: use the new ->i_state accessors
nilfs2: use the new ->i_state accessors
overlayfs: use the new ->i_state accessors
gfs2: use the new ->i_state accessors
f2fs: use the new ->i_state accessors
smb: use the new ->i_state accessors
ceph: use the new ->i_state accessors
btrfs: use the new ->i_state accessors
Manual conversion to use ->i_state accessors of all places not covered by coccinelle
Coccinelle-based conversion to use ->i_state accessors
fs: provide accessors for ->i_state
fs: spell out fenced ->i_state accesses with explicit smp_wmb/smp_rmb
fs: move wait_on_inode() from writeback.h to fs.h
fs: add missing fences to I_NEW handling
ocfs2: retire ocfs2_drop_inode() and I_WILL_FREE usage
...

+517 -417
+1 -1
Documentation/filesystems/porting.rst
··· 211 211 e.g.:: 212 212 213 213 inode = iget_locked(sb, ino); 214 - if (inode->i_state & I_NEW) { 214 + if (inode_state_read_once(inode) & I_NEW) { 215 215 err = read_inode_from_disk(inode); 216 216 if (err < 0) { 217 217 iget_failed(inode);
+2 -2
block/bdev.c
··· 67 67 int ret; 68 68 69 69 spin_lock(&inode->i_lock); 70 - while (inode->i_state & I_DIRTY) { 70 + while (inode_state_read(inode) & I_DIRTY) { 71 71 spin_unlock(&inode->i_lock); 72 72 ret = write_inode_now(inode, true); 73 73 if (ret) ··· 1282 1282 struct block_device *bdev; 1283 1283 1284 1284 spin_lock(&inode->i_lock); 1285 - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || 1285 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW) || 1286 1286 mapping->nrpages == 0) { 1287 1287 spin_unlock(&inode->i_lock); 1288 1288 continue;
+1 -1
drivers/dax/super.c
··· 433 433 return NULL; 434 434 435 435 dax_dev = to_dax_dev(inode); 436 - if (inode->i_state & I_NEW) { 436 + if (inode_state_read_once(inode) & I_NEW) { 437 437 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 438 438 inode->i_cdev = &dax_dev->cdev; 439 439 inode->i_mode = S_IFCHR;
+1 -1
fs/9p/vfs_inode.c
··· 422 422 inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode, st); 423 423 if (!inode) 424 424 return ERR_PTR(-ENOMEM); 425 - if (!(inode->i_state & I_NEW)) 425 + if (!(inode_state_read_once(inode) & I_NEW)) 426 426 return inode; 427 427 /* 428 428 * initialize the inode with the stat info
+1 -1
fs/9p/vfs_inode_dotl.c
··· 112 112 inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode_dotl, st); 113 113 if (!inode) 114 114 return ERR_PTR(-ENOMEM); 115 - if (!(inode->i_state & I_NEW)) 115 + if (!(inode_state_read_once(inode) & I_NEW)) 116 116 return inode; 117 117 /* 118 118 * initialize the inode with the stat info
+1 -1
fs/affs/inode.c
··· 29 29 inode = iget_locked(sb, ino); 30 30 if (!inode) 31 31 return ERR_PTR(-ENOMEM); 32 - if (!(inode->i_state & I_NEW)) 32 + if (!(inode_state_read_once(inode) & I_NEW)) 33 33 return inode; 34 34 35 35 pr_debug("affs_iget(%lu)\n", inode->i_ino);
+2 -2
fs/afs/dir.c
··· 779 779 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; 780 780 struct inode *inode = NULL, *ti; 781 781 afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); 782 - bool supports_ibulk; 782 + bool supports_ibulk, isnew; 783 783 long ret; 784 784 int i; 785 785 ··· 850 850 * callback counters. 851 851 */ 852 852 ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode, 853 - afs_ilookup5_test_by_fid, &vp->fid); 853 + afs_ilookup5_test_by_fid, &vp->fid, &isnew); 854 854 if (!IS_ERR_OR_NULL(ti)) { 855 855 vnode = AFS_FS_I(ti); 856 856 vp->dv_before = vnode->status.data_version;
+3 -3
fs/afs/dynroot.c
··· 64 64 65 65 vnode = AFS_FS_I(inode); 66 66 67 - if (inode->i_state & I_NEW) { 67 + if (inode_state_read_once(inode) & I_NEW) { 68 68 netfs_inode_init(&vnode->netfs, NULL, false); 69 69 simple_inode_init_ts(inode); 70 70 set_nlink(inode, 2); ··· 259 259 260 260 vnode = AFS_FS_I(inode); 261 261 262 - if (inode->i_state & I_NEW) { 262 + if (inode_state_read_once(inode) & I_NEW) { 263 263 netfs_inode_init(&vnode->netfs, NULL, false); 264 264 simple_inode_init_ts(inode); 265 265 set_nlink(inode, 1); ··· 384 384 vnode = AFS_FS_I(inode); 385 385 386 386 /* there shouldn't be an existing inode */ 387 - if (inode->i_state & I_NEW) { 387 + if (inode_state_read_once(inode) & I_NEW) { 388 388 netfs_inode_init(&vnode->netfs, NULL, false); 389 389 simple_inode_init_ts(inode); 390 390 set_nlink(inode, 2);
+4 -4
fs/afs/inode.c
··· 427 427 struct afs_vnode *vnode = vp->vnode; 428 428 int ret; 429 429 430 - if (vnode->netfs.inode.i_state & I_NEW) { 430 + if (inode_state_read_once(&vnode->netfs.inode) & I_NEW) { 431 431 ret = afs_inode_init_from_status(op, vp, vnode); 432 432 afs_op_set_error(op, ret); 433 433 if (ret == 0) ··· 579 579 inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 580 580 581 581 /* deal with an existing inode */ 582 - if (!(inode->i_state & I_NEW)) { 582 + if (!(inode_state_read_once(inode) & I_NEW)) { 583 583 _leave(" = %p", inode); 584 584 return inode; 585 585 } ··· 639 639 640 640 _debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid); 641 641 642 - BUG_ON(!(inode->i_state & I_NEW)); 642 + BUG_ON(!(inode_state_read_once(inode) & I_NEW)); 643 643 644 644 vnode = AFS_FS_I(inode); 645 645 vnode->cb_v_check = atomic_read(&as->volume->cb_v_break); ··· 748 748 749 749 if ((S_ISDIR(inode->i_mode) || 750 750 S_ISLNK(inode->i_mode)) && 751 - (inode->i_state & I_DIRTY) && 751 + (inode_state_read_once(inode) & I_DIRTY) && 752 752 !sbi->dyn_root) { 753 753 struct writeback_control wbc = { 754 754 .sync_mode = WB_SYNC_ALL,
+1 -1
fs/befs/linuxvfs.c
··· 307 307 inode = iget_locked(sb, ino); 308 308 if (!inode) 309 309 return ERR_PTR(-ENOMEM); 310 - if (!(inode->i_state & I_NEW)) 310 + if (!(inode_state_read_once(inode) & I_NEW)) 311 311 return inode; 312 312 313 313 befs_ino = BEFS_I(inode);
+1 -1
fs/bfs/inode.c
··· 42 42 inode = iget_locked(sb, ino); 43 43 if (!inode) 44 44 return ERR_PTR(-ENOMEM); 45 - if (!(inode->i_state & I_NEW)) 45 + if (!(inode_state_read_once(inode) & I_NEW)) 46 46 return inode; 47 47 48 48 if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
+5 -5
fs/btrfs/inode.c
··· 3886 3886 ASSERT(ret != -ENOMEM); 3887 3887 return ret; 3888 3888 } else if (existing) { 3889 - WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING))); 3889 + WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING))); 3890 3890 } 3891 3891 3892 3892 return 0; ··· 5363 5363 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5364 5364 struct rb_node *node; 5365 5365 5366 - ASSERT(inode->i_state & I_FREEING); 5366 + ASSERT(inode_state_read_once(inode) & I_FREEING); 5367 5367 truncate_inode_pages_final(&inode->i_data); 5368 5368 5369 5369 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); ··· 5801 5801 if (!inode) 5802 5802 return ERR_PTR(-ENOMEM); 5803 5803 5804 - if (!(inode->vfs_inode.i_state & I_NEW)) 5804 + if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW)) 5805 5805 return inode; 5806 5806 5807 5807 ret = btrfs_read_locked_inode(inode, path); ··· 5825 5825 if (!inode) 5826 5826 return ERR_PTR(-ENOMEM); 5827 5827 5828 - if (!(inode->vfs_inode.i_state & I_NEW)) 5828 + if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW)) 5829 5829 return inode; 5830 5830 5831 5831 path = btrfs_alloc_path(); ··· 7486 7486 u64 page_start = folio_pos(folio); 7487 7487 u64 page_end = page_start + folio_size(folio) - 1; 7488 7488 u64 cur; 7489 - int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 7489 + int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING; 7490 7490 7491 7491 /* 7492 7492 * We have folio locked so no new ordered extent can be created on this
+2 -2
fs/buffer.c
··· 611 611 return err; 612 612 613 613 ret = sync_mapping_buffers(inode->i_mapping); 614 - if (!(inode->i_state & I_DIRTY_ALL)) 614 + if (!(inode_state_read_once(inode) & I_DIRTY_ALL)) 615 615 goto out; 616 - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 616 + if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) 617 617 goto out; 618 618 619 619 err = sync_inode_metadata(inode, 1);
+1 -1
fs/ceph/cache.c
··· 26 26 return; 27 27 28 28 /* Only new inodes! */ 29 - if (!(inode->i_state & I_NEW)) 29 + if (!(inode_state_read_once(inode) & I_NEW)) 30 30 return; 31 31 32 32 WARN_ON_ONCE(ci->netfs.cache);
+2 -2
fs/ceph/crypto.c
··· 329 329 out: 330 330 kfree(cryptbuf); 331 331 if (dir != parent) { 332 - if ((dir->i_state & I_NEW)) 332 + if ((inode_state_read_once(dir) & I_NEW)) 333 333 discard_new_inode(dir); 334 334 else 335 335 iput(dir); ··· 438 438 fscrypt_fname_free_buffer(&_tname); 439 439 out_inode: 440 440 if (dir != fname->dir) { 441 - if ((dir->i_state & I_NEW)) 441 + if ((inode_state_read_once(dir) & I_NEW)) 442 442 discard_new_inode(dir); 443 443 else 444 444 iput(dir);
+2 -2
fs/ceph/file.c
··· 740 740 vino.ino, ceph_ino(dir), dentry->d_name.name); 741 741 ceph_dir_clear_ordered(dir); 742 742 ceph_init_inode_acls(inode, as_ctx); 743 - if (inode->i_state & I_NEW) { 743 + if (inode_state_read_once(inode) & I_NEW) { 744 744 /* 745 745 * If it's not I_NEW, then someone created this before 746 746 * we got here. Assume the server is aware of it at ··· 901 901 new_inode = NULL; 902 902 goto out_req; 903 903 } 904 - WARN_ON_ONCE(!(new_inode->i_state & I_NEW)); 904 + WARN_ON_ONCE(!(inode_state_read_once(new_inode) & I_NEW)); 905 905 906 906 spin_lock(&dentry->d_lock); 907 907 di->flags |= CEPH_DENTRY_ASYNC_CREATE;
+14 -14
fs/ceph/inode.c
··· 132 132 goto out_err; 133 133 } 134 134 135 - inode->i_state = 0; 135 + inode_state_assign_raw(inode, 0); 136 136 inode->i_mode = *mode; 137 137 138 138 err = ceph_security_init_secctx(dentry, *mode, as_ctx); ··· 201 201 202 202 doutc(cl, "on %llx=%llx.%llx got %p new %d\n", 203 203 ceph_present_inode(inode), ceph_vinop(inode), inode, 204 - !!(inode->i_state & I_NEW)); 204 + !!(inode_state_read_once(inode) & I_NEW)); 205 205 return inode; 206 206 } 207 207 ··· 228 228 goto err; 229 229 } 230 230 231 - if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) { 231 + if (!(inode_state_read_once(inode) & I_NEW) && !S_ISDIR(inode->i_mode)) { 232 232 pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n", 233 233 inode->i_mode); 234 234 goto err; ··· 261 261 } 262 262 } 263 263 #endif 264 - if (inode->i_state & I_NEW) { 264 + if (inode_state_read_once(inode) & I_NEW) { 265 265 inode->i_op = &ceph_snapdir_iops; 266 266 inode->i_fop = &ceph_snapdir_fops; 267 267 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ ··· 270 270 271 271 return inode; 272 272 err: 273 - if ((inode->i_state & I_NEW)) 273 + if ((inode_state_read_once(inode) & I_NEW)) 274 274 discard_new_inode(inode); 275 275 else 276 276 iput(inode); ··· 744 744 745 745 netfs_wait_for_outstanding_io(inode); 746 746 truncate_inode_pages_final(&inode->i_data); 747 - if (inode->i_state & I_PINNING_NETFS_WB) 747 + if (inode_state_read_once(inode) & I_PINNING_NETFS_WB) 748 748 ceph_fscache_unuse_cookie(inode, true); 749 749 clear_inode(inode); 750 750 ··· 1013 1013 le64_to_cpu(info->version), ci->i_version); 1014 1014 1015 1015 /* Once I_NEW is cleared, we can't change type or dev numbers */ 1016 - if (inode->i_state & I_NEW) { 1016 + if (inode_state_read_once(inode) & I_NEW) { 1017 1017 inode->i_mode = mode; 1018 1018 } else { 1019 1019 if (inode_wrong_type(inode, mode)) { ··· 1090 1090 1091 1091 #ifdef CONFIG_FS_ENCRYPTION 1092 1092 if (iinfo->fscrypt_auth_len && 1093 - ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) { 1093 + ((inode_state_read_once(inode) & I_NEW) || (ci->fscrypt_auth_len == 0))) { 1094 1094 kfree(ci->fscrypt_auth); 1095 1095 ci->fscrypt_auth_len = iinfo->fscrypt_auth_len; 1096 1096 ci->fscrypt_auth = iinfo->fscrypt_auth; ··· 1692 1692 pr_err_client(cl, "badness %p %llx.%llx\n", in, 1693 1693 ceph_vinop(in)); 1694 1694 req->r_target_inode = NULL; 1695 - if (in->i_state & I_NEW) 1695 + if (inode_state_read_once(in) & I_NEW) 1696 1696 discard_new_inode(in); 1697 1697 else 1698 1698 iput(in); 1699 1699 goto done; 1700 1700 } 1701 - if (in->i_state & I_NEW) 1701 + if (inode_state_read_once(in) & I_NEW) 1702 1702 unlock_new_inode(in); 1703 1703 } 1704 1704 ··· 1898 1898 pr_err_client(cl, "inode badness on %p got %d\n", in, 1899 1899 rc); 1900 1900 err = rc; 1901 - if (in->i_state & I_NEW) { 1901 + if (inode_state_read_once(in) & I_NEW) { 1902 1902 ihold(in); 1903 1903 discard_new_inode(in); 1904 1904 } 1905 - } else if (in->i_state & I_NEW) { 1905 + } else if (inode_state_read_once(in) & I_NEW) { 1906 1906 unlock_new_inode(in); 1907 1907 } 1908 1908 ··· 2114 2114 pr_err_client(cl, "badness on %p %llx.%llx\n", in, 2115 2115 ceph_vinop(in)); 2116 2116 if (d_really_is_negative(dn)) { 2117 - if (in->i_state & I_NEW) { 2117 + if (inode_state_read_once(in) & I_NEW) { 2118 2118 ihold(in); 2119 2119 discard_new_inode(in); 2120 2120 } ··· 2124 2124 err = ret; 2125 2125 goto next_item; 2126 2126 } 2127 - if (in->i_state & I_NEW) 2127 + if (inode_state_read_once(in) & I_NEW) 2128 2128 unlock_new_inode(in); 2129 2129 2130 2130 if (d_really_is_negative(dn)) {
+2 -2
fs/coda/cnode.c
··· 70 70 if (!inode) 71 71 return ERR_PTR(-ENOMEM); 72 72 73 - if (inode->i_state & I_NEW) { 73 + if (inode_state_read_once(inode) & I_NEW) { 74 74 cii = ITOC(inode); 75 75 /* we still need to set i_ino for things like stat(2) */ 76 76 inode->i_ino = hash; ··· 148 148 149 149 /* we should never see newly created inodes because we intentionally 150 150 * fail in the initialization callback */ 151 - BUG_ON(inode->i_state & I_NEW); 151 + BUG_ON(inode_state_read_once(inode) & I_NEW); 152 152 153 153 return inode; 154 154 }
+1 -1
fs/cramfs/inode.c
··· 95 95 inode = iget_locked(sb, cramino(cramfs_inode, offset)); 96 96 if (!inode) 97 97 return ERR_PTR(-ENOMEM); 98 - if (!(inode->i_state & I_NEW)) 98 + if (!(inode_state_read_once(inode) & I_NEW)) 99 99 return inode; 100 100 101 101 switch (cramfs_inode->mode & S_IFMT) {
+1 -1
fs/crypto/keyring.c
··· 945 945 list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) { 946 946 inode = ci->ci_inode; 947 947 spin_lock(&inode->i_lock); 948 - if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { 948 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 949 949 spin_unlock(&inode->i_lock); 950 950 continue; 951 951 }
+1 -1
fs/crypto/keysetup.c
··· 834 834 * userspace is still using the files, inodes can be dirtied between 835 835 * then and now. We mustn't lose any writes, so skip dirty inodes here. 836 836 */ 837 - if (inode->i_state & I_DIRTY_ALL) 837 + if (inode_state_read(inode) & I_DIRTY_ALL) 838 838 return 0; 839 839 840 840 /*
+16 -13
fs/dcache.c
··· 795 795 de->d_flags |= DCACHE_DONTCACHE; 796 796 spin_unlock(&de->d_lock); 797 797 } 798 - inode->i_state |= I_DONTCACHE; 798 + inode_state_set(inode, I_DONTCACHE); 799 799 spin_unlock(&inode->i_lock); 800 800 } 801 801 EXPORT_SYMBOL(d_mark_dontcache); ··· 1074 1074 spin_lock(&inode->i_lock); 1075 1075 // ->i_dentry and ->i_rcu are colocated, but the latter won't be 1076 1076 // used without having I_FREEING set, which means no aliases left 1077 - if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { 1077 + if (likely(!(inode_state_read(inode) & I_FREEING) && !hlist_empty(l))) { 1078 1078 if (S_ISDIR(inode->i_mode)) { 1079 1079 de = hlist_entry(l->first, struct dentry, d_u.d_alias); 1080 1080 } else { ··· 1981 1981 security_d_instantiate(entry, inode); 1982 1982 spin_lock(&inode->i_lock); 1983 1983 __d_instantiate(entry, inode); 1984 - WARN_ON(!(inode->i_state & I_NEW)); 1985 - inode->i_state &= ~I_NEW & ~I_CREATING; 1986 - /* 1987 - * Pairs with the barrier in prepare_to_wait_event() to make sure 1988 - * ___wait_var_event() either sees the bit cleared or 1989 - * waitqueue_active() check in wake_up_var() sees the waiter. 1990 - */ 1991 - smp_mb(); 1984 + WARN_ON(!(inode_state_read(inode) & I_NEW)); 1985 + inode_state_clear(inode, I_NEW | I_CREATING); 1992 1986 inode_wake_up_bit(inode, __I_NEW); 1993 1987 spin_unlock(&inode->i_lock); 1994 1988 } ··· 2301 2307 seq = raw_seqcount_begin(&dentry->d_seq); 2302 2308 if (dentry->d_parent != parent) 2303 2309 continue; 2304 - if (d_unhashed(dentry)) 2305 - continue; 2306 2310 if (dentry->d_name.hash_len != hashlen) 2307 2311 continue; 2308 - if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0) 2312 + if (unlikely(dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)) 2313 + continue; 2314 + /* 2315 + * Check for the dentry being unhashed. 2316 + * 2317 + * As tempting as it is, we *can't* skip it because of a race window 2318 + * between us finding the dentry before it gets unhashed and loading 2319 + * the sequence counter after unhashing is finished. 2320 + * 2321 + * We can at least predict on it. 2322 + */ 2323 + if (unlikely(d_unhashed(dentry))) 2309 2324 continue; 2310 2325 *seqp = seq; 2311 2326 return dentry;
+1 -1
fs/drop_caches.c
··· 28 28 * inodes without pages but we deliberately won't in case 29 29 * we need to reschedule to avoid softlockups. 30 30 */ 31 - if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 31 + if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) || 32 32 (mapping_empty(inode->i_mapping) && !need_resched())) { 33 33 spin_unlock(&inode->i_lock); 34 34 continue;
+3 -3
fs/ecryptfs/inode.c
··· 95 95 iput(lower_inode); 96 96 return ERR_PTR(-EACCES); 97 97 } 98 - if (!(inode->i_state & I_NEW)) 98 + if (!(inode_state_read_once(inode) & I_NEW)) 99 99 iput(lower_inode); 100 100 101 101 return inode; ··· 106 106 { 107 107 struct inode *inode = __ecryptfs_get_inode(lower_inode, sb); 108 108 109 - if (!IS_ERR(inode) && (inode->i_state & I_NEW)) 109 + if (!IS_ERR(inode) && (inode_state_read_once(inode) & I_NEW)) 110 110 unlock_new_inode(inode); 111 111 112 112 return inode; ··· 364 364 } 365 365 } 366 366 367 - if (inode->i_state & I_NEW) 367 + if (inode_state_read_once(inode) & I_NEW) 368 368 unlock_new_inode(inode); 369 369 return d_splice_alias(inode, dentry); 370 370 }
+1 -1
fs/efs/inode.c
··· 62 62 inode = iget_locked(super, ino); 63 63 if (!inode) 64 64 return ERR_PTR(-ENOMEM); 65 - if (!(inode->i_state & I_NEW)) 65 + if (!(inode_state_read_once(inode) & I_NEW)) 66 66 return inode; 67 67 68 68 in = INODE_INFO(inode);
+1 -1
fs/erofs/inode.c
··· 295 295 if (!inode) 296 296 return ERR_PTR(-ENOMEM); 297 297 298 - if (inode->i_state & I_NEW) { 298 + if (inode_state_read_once(inode) & I_NEW) { 299 299 int err = erofs_fill_inode(inode); 300 300 301 301 if (err) {
+1 -1
fs/ext2/inode.c
··· 1398 1398 inode = iget_locked(sb, ino); 1399 1399 if (!inode) 1400 1400 return ERR_PTR(-ENOMEM); 1401 - if (!(inode->i_state & I_NEW)) 1401 + if (!(inode_state_read_once(inode) & I_NEW)) 1402 1402 return inode; 1403 1403 1404 1404 ei = EXT2_I(inode);
+6 -7
fs/ext4/inode.c
··· 202 202 * the inode. Flush worker is ignoring it because of I_FREEING flag but 203 203 * we still need to remove the inode from the writeback lists. 204 204 */ 205 - if (!list_empty_careful(&inode->i_io_list)) 206 - inode_io_list_del(inode); 205 + inode_io_list_del(inode); 207 206 208 207 /* 209 208 * Protect us against freezing - iput() caller didn't have to have any ··· 424 425 if (!S_ISREG(inode->i_mode) || 425 426 IS_NOQUOTA(inode) || IS_VERITY(inode) || 426 427 is_special_ino(inode->i_sb, inode->i_ino) || 427 - (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) || 428 + (inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) || 428 429 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) || 429 430 ext4_verity_in_progress(inode)) 430 431 return; ··· 3472 3473 /* Any metadata buffers to write? */ 3473 3474 if (!list_empty(&inode->i_mapping->i_private_list)) 3474 3475 return true; 3475 - return inode->i_state & I_DIRTY_DATASYNC; 3476 + return inode_state_read_once(inode) & I_DIRTY_DATASYNC; 3476 3477 } 3477 3478 3478 3479 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap, ··· 4551 4552 * or it's a completely new inode. In those cases we might not 4552 4553 * have i_rwsem locked because it's not necessary. 4553 4554 */ 4554 - if (!(inode->i_state & (I_NEW|I_FREEING))) 4555 + if (!(inode_state_read_once(inode) & (I_NEW | I_FREEING))) 4555 4556 WARN_ON(!inode_is_locked(inode)); 4556 4557 trace_ext4_truncate_enter(inode); 4557 4558 ··· 5209 5210 inode = iget_locked(sb, ino); 5210 5211 if (!inode) 5211 5212 return ERR_PTR(-ENOMEM); 5212 - if (!(inode->i_state & I_NEW)) { 5213 + if (!(inode_state_read_once(inode) & I_NEW)) { 5213 5214 ret = check_igot_inode(inode, flags, function, line); 5214 5215 if (ret) { 5215 5216 iput(inode); ··· 5548 5549 if (inode_is_dirtytime_only(inode)) { 5549 5550 struct ext4_inode_info *ei = EXT4_I(inode); 5550 5551 5551 - inode->i_state &= ~I_DIRTY_TIME; 5552 + inode_state_clear(inode, I_DIRTY_TIME); 5552 5553 spin_unlock(&inode->i_lock); 5553 5554 5554 5555 spin_lock(&ei->i_raw_lock);
+2 -2
fs/ext4/orphan.c
··· 107 107 if (!sbi->s_journal || is_bad_inode(inode)) 108 108 return 0; 109 109 110 - WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && 110 + WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) && 111 111 !inode_is_locked(inode)); 112 112 if (ext4_inode_orphan_tracked(inode)) 113 113 return 0; ··· 232 232 if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS)) 233 233 return 0; 234 234 235 - WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && 235 + WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) && 236 236 !inode_is_locked(inode)); 237 237 if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE)) 238 238 return ext4_orphan_file_del(handle, inode);
+1 -1
fs/f2fs/data.c
··· 4222 4222 4223 4223 if (map.m_flags & F2FS_MAP_NEW) 4224 4224 iomap->flags |= IOMAP_F_NEW; 4225 - if ((inode->i_state & I_DIRTY_DATASYNC) || 4225 + if ((inode_state_read_once(inode) & I_DIRTY_DATASYNC) || 4226 4226 offset + length > i_size_read(inode)) 4227 4227 iomap->flags |= IOMAP_F_DIRTY; 4228 4228
+1 -1
fs/f2fs/inode.c
··· 569 569 if (!inode) 570 570 return ERR_PTR(-ENOMEM); 571 571 572 - if (!(inode->i_state & I_NEW)) { 572 + if (!(inode_state_read_once(inode) & I_NEW)) { 573 573 if (is_meta_ino(sbi, ino)) { 574 574 f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino); 575 575 set_sbi_flag(sbi, SBI_NEED_FSCK);
+2 -2
fs/f2fs/namei.c
··· 844 844 f2fs_i_links_write(inode, false); 845 845 846 846 spin_lock(&inode->i_lock); 847 - inode->i_state |= I_LINKABLE; 847 + inode_state_set(inode, I_LINKABLE); 848 848 spin_unlock(&inode->i_lock); 849 849 } else { 850 850 if (file) ··· 1057 1057 goto put_out_dir; 1058 1058 1059 1059 spin_lock(&whiteout->i_lock); 1060 - whiteout->i_state &= ~I_LINKABLE; 1060 + inode_state_clear(whiteout, I_LINKABLE); 1061 1061 spin_unlock(&whiteout->i_lock); 1062 1062 1063 1063 iput(whiteout);
+1 -1
fs/f2fs/super.c
··· 1798 1798 * - f2fs_gc -> iput -> evict 1799 1799 * - inode_wait_for_writeback(inode) 1800 1800 */ 1801 - if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) { 1801 + if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) { 1802 1802 if (!inode->i_nlink && !is_bad_inode(inode)) { 1803 1803 /* to avoid evict_inode call simultaneously */ 1804 1804 __iget(inode);
+1 -1
fs/freevxfs/vxfs_inode.c
··· 258 258 ip = iget_locked(sbp, ino); 259 259 if (!ip) 260 260 return ERR_PTR(-ENOMEM); 261 - if (!(ip->i_state & I_NEW)) 261 + if (!(inode_state_read_once(ip) & I_NEW)) 262 262 return ip; 263 263 264 264 vip = VXFS_INO(ip);
+70 -62
fs/fs-writeback.c
··· 121 121 { 122 122 assert_spin_locked(&wb->list_lock); 123 123 assert_spin_locked(&inode->i_lock); 124 - WARN_ON_ONCE(inode->i_state & I_FREEING); 124 + WARN_ON_ONCE(inode_state_read(inode) & I_FREEING); 125 125 126 126 list_move(&inode->i_io_list, head); 127 127 ··· 304 304 { 305 305 assert_spin_locked(&wb->list_lock); 306 306 assert_spin_locked(&inode->i_lock); 307 - WARN_ON_ONCE(inode->i_state & I_FREEING); 307 + WARN_ON_ONCE(inode_state_read(inode) & I_FREEING); 308 308 309 - inode->i_state &= ~I_SYNC_QUEUED; 309 + inode_state_clear(inode, I_SYNC_QUEUED); 310 310 if (wb != &wb->bdi->wb) 311 311 list_move(&inode->i_io_list, &wb->b_attached); 312 312 else ··· 408 408 * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction 409 409 * path owns the inode and we shouldn't modify ->i_io_list. 410 410 */ 411 - if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE))) 411 + if (unlikely(inode_state_read(inode) & (I_FREEING | I_WILL_FREE))) 412 412 goto skip_switch; 413 413 414 414 trace_inode_switch_wbs(inode, old_wb, new_wb); ··· 451 451 if (!list_empty(&inode->i_io_list)) { 452 452 inode->i_wb = new_wb; 453 453 454 - if (inode->i_state & I_DIRTY_ALL) { 454 + if (inode_state_read(inode) & I_DIRTY_ALL) { 455 455 /* 456 456 * We need to keep b_dirty list sorted by 457 457 * dirtied_time_when. However properly sorting the ··· 476 476 switched = true; 477 477 skip_switch: 478 478 /* 479 - * Paired with load_acquire in unlocked_inode_to_wb_begin() and 479 + * Paired with an acquire fence in unlocked_inode_to_wb_begin() and 480 480 * ensures that the new wb is visible if they see !I_WB_SWITCH. 481 481 */ 482 - smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); 482 + smp_wmb(); 483 + inode_state_clear(inode, I_WB_SWITCH); 483 484 484 485 xa_unlock_irq(&mapping->i_pages); 485 486 spin_unlock(&inode->i_lock); ··· 601 600 /* while holding I_WB_SWITCH, no one else can update the association */ 602 601 spin_lock(&inode->i_lock); 603 602 if (!(inode->i_sb->s_flags & SB_ACTIVE) || 604 - inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) || 603 + inode_state_read(inode) & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) || 605 604 inode_to_wb(inode) == new_wb) { 606 605 spin_unlock(&inode->i_lock); 607 606 return false; 608 607 } 609 - inode->i_state |= I_WB_SWITCH; 608 + inode_state_set(inode, I_WB_SWITCH); 610 609 __iget(inode); 611 610 spin_unlock(&inode->i_lock); 612 611 ··· 636 635 struct bdi_writeback *new_wb = NULL; 637 636 638 637 /* noop if seems to be already in progress */ 639 - if (inode->i_state & I_WB_SWITCH) 638 + if (inode_state_read_once(inode) & I_WB_SWITCH) 640 639 return; 641 640 642 641 /* avoid queueing a new switch if too many are already in flight */ ··· 1237 1236 { 1238 1237 assert_spin_locked(&wb->list_lock); 1239 1238 assert_spin_locked(&inode->i_lock); 1240 - WARN_ON_ONCE(inode->i_state & I_FREEING); 1239 + WARN_ON_ONCE(inode_state_read(inode) & I_FREEING); 1241 1240 1242 - inode->i_state &= ~I_SYNC_QUEUED; 1241 + inode_state_clear(inode, I_SYNC_QUEUED); 1243 1242 list_del_init(&inode->i_io_list); 1244 1243 wb_io_lists_depopulated(wb); 1245 1244 } ··· 1349 1348 { 1350 1349 struct bdi_writeback *wb; 1351 1350 1351 + /* 1352 + * FIXME: ext4 can call here from ext4_evict_inode() after evict() already 1353 + * unlinked the inode. 1354 + */ 1355 + if (list_empty_careful(&inode->i_io_list)) 1356 + return; 1357 + 1352 1358 wb = inode_to_wb_and_lock_list(inode); 1353 1359 spin_lock(&inode->i_lock); 1354 1360 1355 - inode->i_state &= ~I_SYNC_QUEUED; 1361 + inode_state_clear(inode, I_SYNC_QUEUED); 1356 1362 list_del_init(&inode->i_io_list); 1357 1363 wb_io_lists_depopulated(wb); 1358 1364 ··· 1417 1409 { 1418 1410 assert_spin_locked(&inode->i_lock); 1419 1411 1420 - inode->i_state &= ~I_SYNC_QUEUED; 1412 + inode_state_clear(inode, I_SYNC_QUEUED); 1421 1413 /* 1422 1414 * When the inode is being freed just don't bother with dirty list 1423 1415 * tracking. Flush worker will ignore this inode anyway and it will 1424 1416 * trigger assertions in inode_io_list_move_locked(). 1425 1417 */ 1426 - if (inode->i_state & I_FREEING) { 1418 + if (inode_state_read(inode) & I_FREEING) { 1427 1419 list_del_init(&inode->i_io_list); 1428 1420 wb_io_lists_depopulated(wb); 1429 1421 return; ··· 1457 1449 { 1458 1450 assert_spin_locked(&inode->i_lock); 1459 1451 1460 - inode->i_state &= ~I_SYNC; 1452 + inode_state_clear(inode, I_SYNC); 1461 1453 /* If inode is clean an unused, put it into LRU now... */ 1462 - inode_add_lru(inode); 1454 + inode_lru_list_add(inode); 1463 1455 /* Called with inode->i_lock which ensures memory ordering. */ 1464 1456 inode_wake_up_bit(inode, __I_SYNC); 1465 1457 } ··· 1501 1493 spin_lock(&inode->i_lock); 1502 1494 list_move(&inode->i_io_list, &tmp); 1503 1495 moved++; 1504 - inode->i_state |= I_SYNC_QUEUED; 1496 + inode_state_set(inode, I_SYNC_QUEUED); 1505 1497 spin_unlock(&inode->i_lock); 1506 1498 if (sb_is_blkdev_sb(inode->i_sb)) 1507 1499 continue; ··· 1587 1579 1588 1580 assert_spin_locked(&inode->i_lock); 1589 1581 1590 - if (!(inode->i_state & I_SYNC)) 1582 + if (!(inode_state_read(inode) & I_SYNC)) 1591 1583 return; 1592 1584 1593 1585 wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC); 1594 1586 for (;;) { 1595 1587 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 1596 1588 /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ 1597 - if (!(inode->i_state & I_SYNC)) 1589 + if (!(inode_state_read(inode) & I_SYNC)) 1598 1590 break; 1599 1591 spin_unlock(&inode->i_lock); 1600 1592 schedule(); ··· 1620 1612 wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC); 1621 1613 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 1622 1614 /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ 1623 - sleep = !!(inode->i_state & I_SYNC); 1615 + sleep = !!(inode_state_read(inode) & I_SYNC); 1624 1616 spin_unlock(&inode->i_lock); 1625 1617 if (sleep) 1626 1618 schedule(); ··· 1639 1631 struct writeback_control *wbc, 1640 1632 unsigned long dirtied_before) 1641 1633 { 1642 - if (inode->i_state & I_FREEING) 1634 + if (inode_state_read(inode) & I_FREEING) 1643 1635 return; 1644 1636 1645 1637 /* ··· 1647 1639 * shot. If still dirty, it will be redirty_tail()'ed below. Update 1648 1640 * the dirty time to prevent enqueue and sync it again. 1649 1641 */ 1650 - if ((inode->i_state & I_DIRTY) && 1642 + if ((inode_state_read(inode) & I_DIRTY) && 1651 1643 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 1652 1644 inode->dirtied_when = jiffies; 1653 1645 ··· 1658 1650 * is odd for clean inodes, it can happen for some 1659 1651 * filesystems so handle that gracefully. 1660 1652 */ 1661 - if (inode->i_state & I_DIRTY_ALL) 1653 + if (inode_state_read(inode) & I_DIRTY_ALL) 1662 1654 redirty_tail_locked(inode, wb); 1663 1655 else 1664 1656 inode_cgwb_move_to_attached(inode, wb); ··· 1684 1676 */ 1685 1677 redirty_tail_locked(inode, wb); 1686 1678 } 1687 - } else if (inode->i_state & I_DIRTY) { 1679 + } else if (inode_state_read(inode) & I_DIRTY) { 1688 1680 /* 1689 1681 * Filesystems can dirty the inode during writeback operations, 1690 1682 * such as delayed allocation during submission or metadata 1691 1683 * updates after data IO completion. 1692 1684 */ 1693 1685 redirty_tail_locked(inode, wb); 1694 - } else if (inode->i_state & I_DIRTY_TIME) { 1686 + } else if (inode_state_read(inode) & I_DIRTY_TIME) { 1695 1687 inode->dirtied_when = jiffies; 1696 1688 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); 1697 - inode->i_state &= ~I_SYNC_QUEUED; 1689 + inode_state_clear(inode, I_SYNC_QUEUED); 1698 1690 } else { 1699 1691 /* The inode is clean. Remove from writeback lists. */ 1700 1692 inode_cgwb_move_to_attached(inode, wb); ··· 1720 1712 unsigned dirty; 1721 1713 int ret; 1722 1714 1723 - WARN_ON(!(inode->i_state & I_SYNC)); 1715 + WARN_ON(!(inode_state_read_once(inode) & I_SYNC)); 1724 1716 1725 1717 trace_writeback_single_inode_start(inode, wbc, nr_to_write); 1726 1718 ··· 1744 1736 * mark_inode_dirty_sync() to notify the filesystem about it and to 1745 1737 * change I_DIRTY_TIME into I_DIRTY_SYNC. 1746 1738 */ 1747 - if ((inode->i_state & I_DIRTY_TIME) && 1739 + if ((inode_state_read_once(inode) & I_DIRTY_TIME) && 1748 1740 (wbc->sync_mode == WB_SYNC_ALL || 1749 1741 time_after(jiffies, inode->dirtied_time_when + 1750 1742 dirtytime_expire_interval * HZ))) { ··· 1759 1751 * after handling timestamp expiration, as that may dirty the inode too. 1760 1752 */ 1761 1753 spin_lock(&inode->i_lock); 1762 - dirty = inode->i_state & I_DIRTY; 1763 - inode->i_state &= ~dirty; 1754 + dirty = inode_state_read(inode) & I_DIRTY; 1755 + inode_state_clear(inode, dirty); 1764 1756 1765 1757 /* 1766 1758 * Paired with smp_mb() in __mark_inode_dirty(). This allows ··· 1776 1768 smp_mb(); 1777 1769 1778 1770 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 1779 - inode->i_state |= I_DIRTY_PAGES; 1780 - else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) { 1781 - if (!(inode->i_state & I_DIRTY_PAGES)) { 1782 - inode->i_state &= ~I_PINNING_NETFS_WB; 1771 + inode_state_set(inode, I_DIRTY_PAGES); 1772 + else if (unlikely(inode_state_read(inode) & I_PINNING_NETFS_WB)) { 1773 + if (!(inode_state_read(inode) & I_DIRTY_PAGES)) { 1774 + inode_state_clear(inode, I_PINNING_NETFS_WB); 1783 1775 wbc->unpinned_netfs_wb = true; 1784 1776 dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */ 1785 1777 } ··· 1815 1807 1816 1808 spin_lock(&inode->i_lock); 1817 1809 if (!icount_read(inode)) 1818 - WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 1810 + WARN_ON(!(inode_state_read(inode) & (I_WILL_FREE | I_FREEING))); 1819 1811 else 1820 - WARN_ON(inode->i_state & I_WILL_FREE); 1812 + WARN_ON(inode_state_read(inode) & I_WILL_FREE); 1821 1813 1822 - if (inode->i_state & I_SYNC) { 1814 + if (inode_state_read(inode) & I_SYNC) { 1823 1815 /* 1824 1816 * Writeback is already running on the inode. For WB_SYNC_NONE, 1825 1817 * that's enough and we can just return. For WB_SYNC_ALL, we ··· 1830 1822 goto out; 1831 1823 inode_wait_for_writeback(inode); 1832 1824 } 1833 - WARN_ON(inode->i_state & I_SYNC); 1825 + WARN_ON(inode_state_read(inode) & I_SYNC); 1834 1826 /* 1835 1827 * If the inode is already fully clean, then there's nothing to do. 1836 1828 * ··· 1838 1830 * still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If 1839 1831 * there are any such pages, we'll need to wait for them. 1840 1832 */ 1841 - if (!(inode->i_state & I_DIRTY_ALL) && 1833 + if (!(inode_state_read(inode) & I_DIRTY_ALL) && 1842 1834 (wbc->sync_mode != WB_SYNC_ALL || 1843 1835 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 1844 1836 goto out; 1845 - inode->i_state |= I_SYNC; 1837 + inode_state_set(inode, I_SYNC); 1846 1838 wbc_attach_and_unlock_inode(wbc, inode); 1847 1839 1848 1840 ret = __writeback_single_inode(inode, wbc); ··· 1855 1847 * If the inode is freeing, its i_io_list shoudn't be updated 1856 1848 * as it can be finally deleted at this moment. 1857 1849 */ 1858 - if (!(inode->i_state & I_FREEING)) { 1850 + if (!(inode_state_read(inode) & I_FREEING)) { 1859 1851 /* 1860 1852 * If the inode is now fully clean, then it can be safely 1861 1853 * removed from its writeback list (if any). Otherwise the 1862 1854 * flusher threads are responsible for the writeback lists. 1863 1855 */ 1864 - if (!(inode->i_state & I_DIRTY_ALL)) 1856 + if (!(inode_state_read(inode) & I_DIRTY_ALL)) 1865 1857 inode_cgwb_move_to_attached(inode, wb); 1866 - else if (!(inode->i_state & I_SYNC_QUEUED)) { 1867 - if ((inode->i_state & I_DIRTY)) 1858 + else if (!(inode_state_read(inode) & I_SYNC_QUEUED)) { 1859 + if ((inode_state_read(inode) & I_DIRTY)) 1868 1860 redirty_tail_locked(inode, wb); 1869 - else if (inode->i_state & I_DIRTY_TIME) { 1861 + else if (inode_state_read(inode) & I_DIRTY_TIME) { 1870 1862 inode->dirtied_when = jiffies; 1871 1863 inode_io_list_move_locked(inode, 1872 1864 wb, ··· 1975 1967 * kind writeout is handled by the freer. 1976 1968 */ 1977 1969 spin_lock(&inode->i_lock); 1978 - if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 1970 + if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) { 1979 1971 redirty_tail_locked(inode, wb); 1980 1972 spin_unlock(&inode->i_lock); 1981 1973 continue; 1982 1974 } 1983 - if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 1975 + if ((inode_state_read(inode) & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 1984 1976 /* 1985 1977 * If this inode is locked for writeback and we are not 1986 1978 * doing writeback-for-data-integrity, move it to ··· 2002 1994 * are doing WB_SYNC_NONE writeback. So this catches only the 2003 1995 * WB_SYNC_ALL case. 2004 1996 */ 2005 - if (inode->i_state & I_SYNC) { 1997 + if (inode_state_read(inode) & I_SYNC) { 2006 1998 /* Wait for I_SYNC. This function drops i_lock... */ 2007 1999 inode_sleep_on_writeback(inode); 2008 2000 /* Inode may be gone, start again */ 2009 2001 spin_lock(&wb->list_lock); 2010 2002 continue; 2011 2003 } 2012 - inode->i_state |= I_SYNC; 2004 + inode_state_set(inode, I_SYNC); 2013 2005 wbc_attach_and_unlock_inode(&wbc, inode); 2014 2006 2015 2007 write_chunk = writeback_chunk_size(wb, work); ··· 2047 2039 */ 2048 2040 tmp_wb = inode_to_wb_and_lock_list(inode); 2049 2041 spin_lock(&inode->i_lock); 2050 - if (!(inode->i_state & I_DIRTY_ALL)) 2042 + if (!(inode_state_read(inode) & I_DIRTY_ALL)) 2051 2043 total_wrote++; 2052 2044 requeue_inode(inode, tmp_wb, &wbc, dirtied_before); 2053 2045 inode_sync_complete(inode); ··· 2553 2545 * We tell ->dirty_inode callback that timestamps need to 2554 2546 * be updated by setting I_DIRTY_TIME in flags. 2555 2547 */ 2556 - if (inode->i_state & I_DIRTY_TIME) { 2548 + if (inode_state_read_once(inode) & I_DIRTY_TIME) { 2557 2549 spin_lock(&inode->i_lock); 2558 - if (inode->i_state & I_DIRTY_TIME) { 2559 - inode->i_state &= ~I_DIRTY_TIME; 2550 + if (inode_state_read(inode) & I_DIRTY_TIME) { 2551 + inode_state_clear(inode, I_DIRTY_TIME); 2560 2552 flags |= I_DIRTY_TIME; 2561 2553 } 2562 2554 spin_unlock(&inode->i_lock); ··· 2593 2585 */ 2594 2586 smp_mb(); 2595 2587 2596 - if ((inode->i_state & flags) == flags) 2588 + if ((inode_state_read_once(inode) & flags) == flags) 2597 2589 return; 2598 2590 2599 2591 spin_lock(&inode->i_lock); 2600 - if ((inode->i_state & flags) != flags) { 2601 - const int was_dirty = inode->i_state & I_DIRTY; 2592 + if ((inode_state_read(inode) & flags) != flags) { 2593 + const int was_dirty = inode_state_read(inode) & I_DIRTY; 2602 2594 2603 2595 inode_attach_wb(inode, NULL); 2604 2596 2605 - inode->i_state |= flags; 2597 + inode_state_set(inode, flags); 2606 2598 2607 2599 /* 2608 2600 * Grab inode's wb early because it requires dropping i_lock and we ··· 2621 2613 * the inode it will place it on the appropriate superblock 2622 2614 * list, based upon its state. 2623 2615 */ 2624 - if (inode->i_state & I_SYNC_QUEUED) 2616 + if (inode_state_read(inode) & I_SYNC_QUEUED) 2625 2617 goto out_unlock; 2626 2618 2627 2619 /* ··· 2632 2624 if (inode_unhashed(inode)) 2633 2625 goto out_unlock; 2634 2626 } 2635 - if (inode->i_state & I_FREEING) 2627 + if (inode_state_read(inode) & I_FREEING) 2636 2628 goto out_unlock; 2637 2629 2638 2630 /* ··· 2647 2639 if (dirtytime) 2648 2640 inode->dirtied_time_when = jiffies; 2649 2641 2650 - if (inode->i_state & I_DIRTY) 2642 + if (inode_state_read(inode) & I_DIRTY) 2651 2643 dirty_list = &wb->b_dirty; 2652 2644 else 2653 2645 dirty_list = &wb->b_dirty_time; ··· 2744 2736 spin_unlock_irq(&sb->s_inode_wblist_lock); 2745 2737 2746 2738 spin_lock(&inode->i_lock); 2747 - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { 2739 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 2748 2740 spin_unlock(&inode->i_lock); 2749 2741 2750 2742 spin_lock_irq(&sb->s_inode_wblist_lock);
+2 -2
fs/fuse/inode.c
··· 160 160 struct fuse_inode *fi = get_fuse_inode(inode); 161 161 162 162 /* Will write inode on close/munmap and in all other dirtiers */ 163 - WARN_ON(inode->i_state & I_DIRTY_INODE); 163 + WARN_ON(inode_state_read_once(inode) & I_DIRTY_INODE); 164 164 165 165 if (FUSE_IS_DAX(inode)) 166 166 dax_break_layout_final(inode); ··· 505 505 if (!inode) 506 506 return NULL; 507 507 508 - if ((inode->i_state & I_NEW)) { 508 + if ((inode_state_read_once(inode) & I_NEW)) { 509 509 inode->i_flags |= S_NOATIME; 510 510 if (!fc->writeback_cache || !S_ISREG(attr->mode)) 511 511 inode->i_flags |= S_NOCMTIME;
+1 -1
fs/gfs2/file.c
··· 744 744 { 745 745 struct address_space *mapping = file->f_mapping; 746 746 struct inode *inode = mapping->host; 747 - int sync_state = inode->i_state & I_DIRTY; 747 + int sync_state = inode_state_read_once(inode) & I_DIRTY; 748 748 struct gfs2_inode *ip = GFS2_I(inode); 749 749 int ret = 0, ret1 = 0; 750 750
+1 -1
fs/gfs2/glock.c
··· 957 957 ip = NULL; 958 958 spin_unlock(&gl->gl_lockref.lock); 959 959 if (ip) { 960 - wait_on_inode(&ip->i_inode); 960 + wait_on_new_inode(&ip->i_inode); 961 961 if (is_bad_inode(&ip->i_inode)) { 962 962 iput(&ip->i_inode); 963 963 ip = NULL;
+1 -1
fs/gfs2/glops.c
··· 394 394 u16 height, depth; 395 395 umode_t mode = be32_to_cpu(str->di_mode); 396 396 struct inode *inode = &ip->i_inode; 397 - bool is_new = inode->i_state & I_NEW; 397 + bool is_new = inode_state_read_once(inode) & I_NEW; 398 398 399 399 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) { 400 400 gfs2_consist_inode(ip);
+2 -2
fs/gfs2/inode.c
··· 127 127 128 128 ip = GFS2_I(inode); 129 129 130 - if (inode->i_state & I_NEW) { 130 + if (inode_state_read_once(inode) & I_NEW) { 131 131 struct gfs2_sbd *sdp = GFS2_SB(inode); 132 132 struct gfs2_glock *io_gl; 133 133 int extra_flags = 0; ··· 924 924 gfs2_dir_no_add(&da); 925 925 gfs2_glock_dq_uninit(&d_gh); 926 926 if (!IS_ERR_OR_NULL(inode)) { 927 - if (inode->i_state & I_NEW) 927 + if (inode_state_read_once(inode) & I_NEW) 928 928 iget_failed(inode); 929 929 else 930 930 iput(inode);
+1 -1
fs/gfs2/ops_fstype.c
··· 1751 1751 spin_lock(&sb->s_inode_list_lock); 1752 1752 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1753 1753 spin_lock(&inode->i_lock); 1754 - if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) && 1754 + if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) && 1755 1755 !need_resched()) { 1756 1756 spin_unlock(&inode->i_lock); 1757 1757 continue;
+1 -1
fs/hfs/btree.c
··· 42 42 tree->inode = iget_locked(sb, id); 43 43 if (!tree->inode) 44 44 goto free_tree; 45 - BUG_ON(!(tree->inode->i_state & I_NEW)); 45 + BUG_ON(!(inode_state_read_once(tree->inode) & I_NEW)); 46 46 { 47 47 struct hfs_mdb *mdb = HFS_SB(sb)->mdb; 48 48 HFS_I(tree->inode)->flags = 0;
+1 -1
fs/hfs/inode.c
··· 412 412 return NULL; 413 413 } 414 414 inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data); 415 - if (inode && (inode->i_state & I_NEW)) 415 + if (inode && (inode_state_read_once(inode) & I_NEW)) 416 416 unlock_new_inode(inode); 417 417 return inode; 418 418 }
+1 -1
fs/hfsplus/super.c
··· 65 65 inode = iget_locked(sb, ino); 66 66 if (!inode) 67 67 return ERR_PTR(-ENOMEM); 68 - if (!(inode->i_state & I_NEW)) 68 + if (!(inode_state_read_once(inode) & I_NEW)) 69 69 return inode; 70 70 71 71 atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+1 -1
fs/hostfs/hostfs_kern.c
··· 581 581 if (!inode) 582 582 return ERR_PTR(-ENOMEM); 583 583 584 - if (inode->i_state & I_NEW) { 584 + if (inode_state_read_once(inode) & I_NEW) { 585 585 unlock_new_inode(inode); 586 586 } else { 587 587 spin_lock(&inode->i_lock);
+1 -1
fs/hpfs/dir.c
··· 247 247 result = ERR_PTR(-ENOMEM); 248 248 goto bail1; 249 249 } 250 - if (result->i_state & I_NEW) { 250 + if (inode_state_read_once(result) & I_NEW) { 251 251 hpfs_init_inode(result); 252 252 if (de->directory) 253 253 hpfs_read_inode(result);
+1 -1
fs/hpfs/inode.c
··· 196 196 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); 197 197 if (parent) { 198 198 hpfs_inode->i_dirty = 0; 199 - if (parent->i_state & I_NEW) { 199 + if (inode_state_read_once(parent) & I_NEW) { 200 200 hpfs_init_inode(parent); 201 201 hpfs_read_inode(parent); 202 202 unlock_new_inode(parent);
+145 -108
fs/inode.c
··· 233 233 inode->i_sb = sb; 234 234 inode->i_blkbits = sb->s_blocksize_bits; 235 235 inode->i_flags = 0; 236 - inode->i_state = 0; 236 + inode_state_assign_raw(inode, 0); 237 237 atomic64_set(&inode->i_sequence, 0); 238 238 atomic_set(&inode->i_count, 1); 239 239 inode->i_op = &empty_iops; ··· 471 471 void inc_nlink(struct inode *inode) 472 472 { 473 473 if (unlikely(inode->i_nlink == 0)) { 474 - WARN_ON(!(inode->i_state & I_LINKABLE)); 474 + WARN_ON(!(inode_state_read_once(inode) & I_LINKABLE)); 475 475 atomic_long_dec(&inode->i_sb->s_remove_count); 476 476 } 477 477 ··· 530 530 } 531 531 EXPORT_SYMBOL(ihold); 532 532 533 - static void __inode_add_lru(struct inode *inode, bool rotate) 534 - { 535 - if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) 536 - return; 537 - if (icount_read(inode)) 538 - return; 539 - if (!(inode->i_sb->s_flags & SB_ACTIVE)) 540 - return; 541 - if (!mapping_shrinkable(&inode->i_data)) 542 - return; 543 - 544 - if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru)) 545 - this_cpu_inc(nr_unused); 546 - else if (rotate) 547 - inode->i_state |= I_REFERENCED; 548 - } 549 - 550 533 struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe, 551 534 struct inode *inode, u32 bit) 552 535 { ··· 541 558 } 542 559 EXPORT_SYMBOL(inode_bit_waitqueue); 543 560 561 + void wait_on_new_inode(struct inode *inode) 562 + { 563 + struct wait_bit_queue_entry wqe; 564 + struct wait_queue_head *wq_head; 565 + 566 + spin_lock(&inode->i_lock); 567 + if (!(inode_state_read(inode) & I_NEW)) { 568 + spin_unlock(&inode->i_lock); 569 + return; 570 + } 571 + 572 + wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW); 573 + for (;;) { 574 + prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 575 + if (!(inode_state_read(inode) & I_NEW)) 576 + break; 577 + spin_unlock(&inode->i_lock); 578 + schedule(); 579 + spin_lock(&inode->i_lock); 580 + } 581 + finish_wait(wq_head, &wqe.wq_entry); 582 + WARN_ON(inode_state_read(inode) & I_NEW); 583 + spin_unlock(&inode->i_lock); 584 + } 585 + EXPORT_SYMBOL(wait_on_new_inode); 586 + 587 + static void __inode_lru_list_add(struct inode *inode, bool rotate) 588 + { 589 + lockdep_assert_held(&inode->i_lock); 590 + 591 + if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) 592 + return; 593 + if (icount_read(inode)) 594 + return; 595 + if (!(inode->i_sb->s_flags & SB_ACTIVE)) 596 + return; 597 + if (!mapping_shrinkable(&inode->i_data)) 598 + return; 599 + 600 + if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru)) 601 + this_cpu_inc(nr_unused); 602 + else if (rotate) 603 + inode_state_set(inode, I_REFERENCED); 604 + } 605 + 544 606 /* 545 607 * Add inode to LRU if needed (inode is unused and clean). 546 - * 547 - * Needs inode->i_lock held. 548 608 */ 549 - void inode_add_lru(struct inode *inode) 609 + void inode_lru_list_add(struct inode *inode) 550 610 { 551 - __inode_add_lru(inode, false); 611 + __inode_lru_list_add(inode, false); 552 612 } 553 613 554 614 static void inode_lru_list_del(struct inode *inode) 555 615 { 616 + if (list_empty(&inode->i_lru)) 617 + return; 618 + 556 619 if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru)) 557 620 this_cpu_dec(nr_unused); 558 621 } ··· 606 577 static void inode_pin_lru_isolating(struct inode *inode) 607 578 { 608 579 lockdep_assert_held(&inode->i_lock); 609 - WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE)); 610 - inode->i_state |= I_LRU_ISOLATING; 580 + WARN_ON(inode_state_read(inode) & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE)); 581 + inode_state_set(inode, I_LRU_ISOLATING); 611 582 } 612 583 613 584 static void inode_unpin_lru_isolating(struct inode *inode) 614 585 { 615 586 spin_lock(&inode->i_lock); 616 - WARN_ON(!(inode->i_state & I_LRU_ISOLATING)); 617 - inode->i_state &= ~I_LRU_ISOLATING; 587 + WARN_ON(!(inode_state_read(inode) & I_LRU_ISOLATING)); 588 + inode_state_clear(inode, I_LRU_ISOLATING); 618 589 /* Called with inode->i_lock which ensures memory ordering. */ 619 590 inode_wake_up_bit(inode, __I_LRU_ISOLATING); 620 591 spin_unlock(&inode->i_lock); ··· 626 597 struct wait_queue_head *wq_head; 627 598 628 599 lockdep_assert_held(&inode->i_lock); 629 - if (!(inode->i_state & I_LRU_ISOLATING)) 600 + if (!(inode_state_read(inode) & I_LRU_ISOLATING)) 630 601 return; 631 602 632 603 wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING); ··· 636 607 * Checking I_LRU_ISOLATING with inode->i_lock guarantees 637 608 * memory ordering. 638 609 */ 639 - if (!(inode->i_state & I_LRU_ISOLATING)) 610 + if (!(inode_state_read(inode) & I_LRU_ISOLATING)) 640 611 break; 641 612 spin_unlock(&inode->i_lock); 642 613 schedule(); 643 614 spin_lock(&inode->i_lock); 644 615 } 645 616 finish_wait(wq_head, &wqe.wq_entry); 646 - WARN_ON(inode->i_state & I_LRU_ISOLATING); 617 + WARN_ON(inode_state_read(inode) & I_LRU_ISOLATING); 647 618 } 648 619 649 620 /** ··· 790 761 */ 791 762 xa_unlock_irq(&inode->i_data.i_pages); 792 763 BUG_ON(!list_empty(&inode->i_data.i_private_list)); 793 - BUG_ON(!(inode->i_state & I_FREEING)); 794 - BUG_ON(inode->i_state & I_CLEAR); 764 + BUG_ON(!(inode_state_read_once(inode) & I_FREEING)); 765 + BUG_ON(inode_state_read_once(inode) & I_CLEAR); 795 766 BUG_ON(!list_empty(&inode->i_wb_list)); 796 767 /* don't need i_lock here, no concurrent mods to i_state */ 797 - inode->i_state = I_FREEING | I_CLEAR; 768 + inode_state_assign_raw(inode, I_FREEING | I_CLEAR); 798 769 } 799 770 EXPORT_SYMBOL(clear_inode); 800 771 ··· 815 786 { 816 787 const struct super_operations *op = inode->i_sb->s_op; 817 788 818 - BUG_ON(!(inode->i_state & I_FREEING)); 789 + BUG_ON(!(inode_state_read_once(inode) & I_FREEING)); 819 790 BUG_ON(!list_empty(&inode->i_lru)); 820 791 821 - if (!list_empty(&inode->i_io_list)) 822 - inode_io_list_del(inode); 823 - 792 + inode_io_list_del(inode); 824 793 inode_sb_list_del(inode); 825 794 826 795 spin_lock(&inode->i_lock); ··· 856 829 * This also means we don't need any fences for the call below. 857 830 */ 858 831 inode_wake_up_bit(inode, __I_NEW); 859 - BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 832 + BUG_ON(inode_state_read_once(inode) != (I_FREEING | I_CLEAR)); 860 833 861 834 destroy_inode(inode); 862 835 } ··· 906 879 spin_unlock(&inode->i_lock); 907 880 continue; 908 881 } 909 - if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 882 + if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) { 910 883 spin_unlock(&inode->i_lock); 911 884 continue; 912 885 } 913 886 914 - inode->i_state |= I_FREEING; 887 + inode_state_set(inode, I_FREEING); 915 888 inode_lru_list_del(inode); 916 889 spin_unlock(&inode->i_lock); 917 890 list_add(&inode->i_lru, &dispose); ··· 965 938 * sync, or the last page cache deletion will requeue them. 966 939 */ 967 940 if (icount_read(inode) || 968 - (inode->i_state & ~I_REFERENCED) || 941 + (inode_state_read(inode) & ~I_REFERENCED) || 969 942 !mapping_shrinkable(&inode->i_data)) { 970 943 list_lru_isolate(lru, &inode->i_lru); 971 944 spin_unlock(&inode->i_lock); ··· 974 947 } 975 948 976 949 /* Recently referenced inodes get one more pass */ 977 - if (inode->i_state & I_REFERENCED) { 978 - inode->i_state &= ~I_REFERENCED; 950 + if (inode_state_read(inode) & I_REFERENCED) { 951 + inode_state_clear(inode, I_REFERENCED); 979 952 spin_unlock(&inode->i_lock); 980 953 return LRU_ROTATE; 981 954 } ··· 1002 975 return LRU_RETRY; 1003 976 } 1004 977 1005 - WARN_ON(inode->i_state & I_NEW); 1006 - inode->i_state |= I_FREEING; 978 + WARN_ON(inode_state_read(inode) & I_NEW); 979 + inode_state_set(inode, I_FREEING); 1007 980 list_lru_isolate_move(lru, &inode->i_lru, freeable); 1008 981 spin_unlock(&inode->i_lock); 1009 982 ··· 1035 1008 static struct inode *find_inode(struct super_block *sb, 1036 1009 struct hlist_head *head, 1037 1010 int (*test)(struct inode *, void *), 1038 - void *data, bool is_inode_hash_locked) 1011 + void *data, bool is_inode_hash_locked, 1012 + bool *isnew) 1039 1013 { 1040 1014 struct inode *inode = NULL; 1041 1015 ··· 1053 1025 if (!test(inode, data)) 1054 1026 continue; 1055 1027 spin_lock(&inode->i_lock); 1056 - if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 1028 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) { 1057 1029 __wait_on_freeing_inode(inode, is_inode_hash_locked); 1058 1030 goto repeat; 1059 1031 } 1060 - if (unlikely(inode->i_state & I_CREATING)) { 1032 + if (unlikely(inode_state_read(inode) & I_CREATING)) { 1061 1033 spin_unlock(&inode->i_lock); 1062 1034 rcu_read_unlock(); 1063 1035 return ERR_PTR(-ESTALE); 1064 1036 } 1065 1037 __iget(inode); 1038 + *isnew = !!(inode_state_read(inode) & I_NEW); 1066 1039 spin_unlock(&inode->i_lock); 1067 1040 rcu_read_unlock(); 1068 1041 return inode; ··· 1078 1049 */ 1079 1050 static struct inode *find_inode_fast(struct super_block *sb, 1080 1051 struct hlist_head *head, unsigned long ino, 1081 - bool is_inode_hash_locked) 1052 + bool is_inode_hash_locked, bool *isnew) 1082 1053 { 1083 1054 struct inode *inode = NULL; 1084 1055 ··· 1095 1066 if (inode->i_sb != sb) 1096 1067 continue; 1097 1068 spin_lock(&inode->i_lock); 1098 - if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 1069 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) { 1099 1070 __wait_on_freeing_inode(inode, is_inode_hash_locked); 1100 1071 goto repeat; 1101 1072 } 1102 - if (unlikely(inode->i_state & I_CREATING)) { 1073 + if (unlikely(inode_state_read(inode) & I_CREATING)) { 1103 1074 spin_unlock(&inode->i_lock); 1104 1075 rcu_read_unlock(); 1105 1076 return ERR_PTR(-ESTALE); 1106 1077 } 1107 1078 __iget(inode); 1079 + *isnew = !!(inode_state_read(inode) & I_NEW); 1108 1080 spin_unlock(&inode->i_lock); 1109 1081 rcu_read_unlock(); 1110 1082 return inode; ··· 1210 1180 { 1211 1181 lockdep_annotate_inode_mutex_key(inode); 1212 1182 spin_lock(&inode->i_lock); 1213 - WARN_ON(!(inode->i_state & I_NEW)); 1214 - inode->i_state &= ~I_NEW & ~I_CREATING; 1215 - /* 1216 - * Pairs with the barrier in prepare_to_wait_event() to make sure 1217 - * ___wait_var_event() either sees the bit cleared or 1218 - * waitqueue_active() check in wake_up_var() sees the waiter. 1219 - */ 1220 - smp_mb(); 1183 + WARN_ON(!(inode_state_read(inode) & I_NEW)); 1184 + inode_state_clear(inode, I_NEW | I_CREATING); 1221 1185 inode_wake_up_bit(inode, __I_NEW); 1222 1186 spin_unlock(&inode->i_lock); 1223 1187 } ··· 1221 1197 { 1222 1198 lockdep_annotate_inode_mutex_key(inode); 1223 1199 spin_lock(&inode->i_lock); 1224 - WARN_ON(!(inode->i_state & I_NEW)); 1225 - inode->i_state &= ~I_NEW; 1226 - /* 1227 - * Pairs with the barrier in prepare_to_wait_event() to make sure 1228 - * ___wait_var_event() either sees the bit cleared or 1229 - * waitqueue_active() check in wake_up_var() sees the waiter. 1230 - */ 1231 - smp_mb(); 1200 + WARN_ON(!(inode_state_read(inode) & I_NEW)); 1201 + inode_state_clear(inode, I_NEW); 1232 1202 inode_wake_up_bit(inode, __I_NEW); 1233 1203 spin_unlock(&inode->i_lock); 1234 1204 iput(inode); ··· 1278 1260 * @test: callback used for comparisons between inodes 1279 1261 * @set: callback used to initialize a new struct inode 1280 1262 * @data: opaque data pointer to pass to @test and @set 1263 + * @isnew: pointer to a bool which will indicate whether I_NEW is set 1281 1264 * 1282 1265 * Search for the inode specified by @hashval and @data in the inode cache, 1283 1266 * and if present return it with an increased reference count. This is a ··· 1297 1278 { 1298 1279 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 1299 1280 struct inode *old; 1281 + bool isnew; 1300 1282 1301 1283 might_sleep(); 1302 1284 1303 1285 again: 1304 1286 spin_lock(&inode_hash_lock); 1305 - old = find_inode(inode->i_sb, head, test, data, true); 1287 + old = find_inode(inode->i_sb, head, test, data, true, &isnew); 1306 1288 if (unlikely(old)) { 1307 1289 /* 1308 1290 * Uhhuh, somebody else created the same inode under us. ··· 1312 1292 spin_unlock(&inode_hash_lock); 1313 1293 if (IS_ERR(old)) 1314 1294 return NULL; 1315 - wait_on_inode(old); 1295 + if (unlikely(isnew)) 1296 + wait_on_new_inode(old); 1316 1297 if (unlikely(inode_unhashed(old))) { 1317 1298 iput(old); 1318 1299 goto again; ··· 1331 1310 * caller is responsible for filling in the contents 1332 1311 */ 1333 1312 spin_lock(&inode->i_lock); 1334 - inode->i_state |= I_NEW; 1313 + inode_state_set(inode, I_NEW); 1335 1314 hlist_add_head_rcu(&inode->i_hash, head); 1336 1315 spin_unlock(&inode->i_lock); 1337 1316 ··· 1404 1383 { 1405 1384 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1406 1385 struct inode *inode, *new; 1386 + bool isnew; 1407 1387 1408 1388 might_sleep(); 1409 1389 1410 1390 again: 1411 - inode = find_inode(sb, head, test, data, false); 1391 + inode = find_inode(sb, head, test, data, false, &isnew); 1412 1392 if (inode) { 1413 1393 if (IS_ERR(inode)) 1414 1394 return NULL; 1415 - wait_on_inode(inode); 1395 + if (unlikely(isnew)) 1396 + wait_on_new_inode(inode); 1416 1397 if (unlikely(inode_unhashed(inode))) { 1417 1398 iput(inode); 1418 1399 goto again; ··· 1449 1426 { 1450 1427 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1451 1428 struct inode *inode; 1429 + bool isnew; 1452 1430 1453 1431 might_sleep(); 1454 1432 1455 1433 again: 1456 - inode = find_inode_fast(sb, head, ino, false); 1434 + inode = find_inode_fast(sb, head, ino, false, &isnew); 1457 1435 if (inode) { 1458 1436 if (IS_ERR(inode)) 1459 1437 return NULL; 1460 - wait_on_inode(inode); 1438 + if (unlikely(isnew)) 1439 + wait_on_new_inode(inode); 1461 1440 if (unlikely(inode_unhashed(inode))) { 1462 1441 iput(inode); 1463 1442 goto again; ··· 1473 1448 1474 1449 spin_lock(&inode_hash_lock); 1475 1450 /* We released the lock, so.. */ 1476 - old = find_inode_fast(sb, head, ino, true); 1451 + old = find_inode_fast(sb, head, ino, true, &isnew); 1477 1452 if (!old) { 1478 1453 inode->i_ino = ino; 1479 1454 spin_lock(&inode->i_lock); 1480 - inode->i_state = I_NEW; 1455 + inode_state_assign(inode, I_NEW); 1481 1456 hlist_add_head_rcu(&inode->i_hash, head); 1482 1457 spin_unlock(&inode->i_lock); 1483 1458 spin_unlock(&inode_hash_lock); ··· 1499 1474 if (IS_ERR(old)) 1500 1475 return NULL; 1501 1476 inode = old; 1502 - wait_on_inode(inode); 1477 + if (unlikely(isnew)) 1478 + wait_on_new_inode(inode); 1503 1479 if (unlikely(inode_unhashed(inode))) { 1504 1480 iput(inode); 1505 1481 goto again; ··· 1571 1545 struct inode *igrab(struct inode *inode) 1572 1546 { 1573 1547 spin_lock(&inode->i_lock); 1574 - if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1548 + if (!(inode_state_read(inode) & (I_FREEING | I_WILL_FREE))) { 1575 1549 __iget(inode); 1576 1550 spin_unlock(&inode->i_lock); 1577 1551 } else { ··· 1604 1578 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1605 1579 */ 1606 1580 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1607 - int (*test)(struct inode *, void *), void *data) 1581 + int (*test)(struct inode *, void *), void *data, bool *isnew) 1608 1582 { 1609 1583 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1610 1584 struct inode *inode; 1611 1585 1612 1586 spin_lock(&inode_hash_lock); 1613 - inode = find_inode(sb, head, test, data, true); 1587 + inode = find_inode(sb, head, test, data, true, isnew); 1614 1588 spin_unlock(&inode_hash_lock); 1615 1589 1616 1590 return IS_ERR(inode) ? NULL : inode; ··· 1638 1612 int (*test)(struct inode *, void *), void *data) 1639 1613 { 1640 1614 struct inode *inode; 1615 + bool isnew; 1641 1616 1642 1617 might_sleep(); 1643 1618 1644 1619 again: 1645 - inode = ilookup5_nowait(sb, hashval, test, data); 1620 + inode = ilookup5_nowait(sb, hashval, test, data, &isnew); 1646 1621 if (inode) { 1647 - wait_on_inode(inode); 1622 + if (unlikely(isnew)) 1623 + wait_on_new_inode(inode); 1648 1624 if (unlikely(inode_unhashed(inode))) { 1649 1625 iput(inode); 1650 1626 goto again; ··· 1668 1640 { 1669 1641 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1670 1642 struct inode *inode; 1643 + bool isnew; 1671 1644 1672 1645 might_sleep(); 1673 1646 1674 1647 again: 1675 - inode = find_inode_fast(sb, head, ino, false); 1648 + inode = find_inode_fast(sb, head, ino, false, &isnew); 1676 1649 1677 1650 if (inode) { 1678 1651 if (IS_ERR(inode)) 1679 1652 return NULL; 1680 - wait_on_inode(inode); 1653 + if (unlikely(isnew)) 1654 + wait_on_new_inode(inode); 1681 1655 if (unlikely(inode_unhashed(inode))) { 1682 1656 iput(inode); 1683 1657 goto again; ··· 1771 1741 1772 1742 hlist_for_each_entry_rcu(inode, head, i_hash) { 1773 1743 if (inode->i_sb == sb && 1774 - !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) && 1744 + !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)) && 1775 1745 test(inode, data)) 1776 1746 return inode; 1777 1747 } ··· 1810 1780 hlist_for_each_entry_rcu(inode, head, i_hash) { 1811 1781 if (inode->i_ino == ino && 1812 1782 inode->i_sb == sb && 1813 - !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE))) 1783 + !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE))) 1814 1784 return inode; 1815 1785 } 1816 1786 return NULL; ··· 1822 1792 struct super_block *sb = inode->i_sb; 1823 1793 ino_t ino = inode->i_ino; 1824 1794 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1795 + bool isnew; 1825 1796 1826 1797 might_sleep(); 1827 1798 ··· 1835 1804 if (old->i_sb != sb) 1836 1805 continue; 1837 1806 spin_lock(&old->i_lock); 1838 - if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1807 + if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) { 1839 1808 spin_unlock(&old->i_lock); 1840 1809 continue; 1841 1810 } ··· 1843 1812 } 1844 1813 if (likely(!old)) { 1845 1814 spin_lock(&inode->i_lock); 1846 - inode->i_state |= I_NEW | I_CREATING; 1815 + inode_state_set(inode, I_NEW | I_CREATING); 1847 1816 hlist_add_head_rcu(&inode->i_hash, head); 1848 1817 spin_unlock(&inode->i_lock); 1849 1818 spin_unlock(&inode_hash_lock); 1850 1819 return 0; 1851 1820 } 1852 - if (unlikely(old->i_state & I_CREATING)) { 1821 + if (unlikely(inode_state_read(old) & I_CREATING)) { 1853 1822 spin_unlock(&old->i_lock); 1854 1823 spin_unlock(&inode_hash_lock); 1855 1824 return -EBUSY; 1856 1825 } 1857 1826 __iget(old); 1827 + isnew = !!(inode_state_read(old) & I_NEW); 1858 1828 spin_unlock(&old->i_lock); 1859 1829 spin_unlock(&inode_hash_lock); 1860 - wait_on_inode(old); 1830 + if (isnew) 1831 + wait_on_new_inode(old); 1861 1832 if (unlikely(!inode_unhashed(old))) { 1862 1833 iput(old); 1863 1834 return -EBUSY; ··· 1876 1843 1877 1844 might_sleep(); 1878 1845 1879 - inode->i_state |= I_CREATING; 1846 + inode_state_set_raw(inode, I_CREATING); 1880 1847 old = inode_insert5(inode, hashval, test, NULL, data); 1881 1848 1882 1849 if (old != inode) { ··· 1908 1875 { 1909 1876 struct super_block *sb = inode->i_sb; 1910 1877 const struct super_operations *op = inode->i_sb->s_op; 1911 - unsigned long state; 1912 1878 int drop; 1913 1879 1914 - WARN_ON(inode->i_state & I_NEW); 1880 + WARN_ON(inode_state_read(inode) & I_NEW); 1881 + VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode); 1915 1882 1916 1883 if (op->drop_inode) 1917 1884 drop = op->drop_inode(inode); ··· 1919 1886 drop = inode_generic_drop(inode); 1920 1887 1921 1888 if (!drop && 1922 - !(inode->i_state & I_DONTCACHE) && 1889 + !(inode_state_read(inode) & I_DONTCACHE) && 1923 1890 (sb->s_flags & SB_ACTIVE)) { 1924 - __inode_add_lru(inode, true); 1891 + __inode_lru_list_add(inode, true); 1925 1892 spin_unlock(&inode->i_lock); 1926 1893 return; 1927 1894 } 1928 1895 1929 - state = inode->i_state; 1930 - if (!drop) { 1931 - WRITE_ONCE(inode->i_state, state | I_WILL_FREE); 1896 + /* 1897 + * Re-check ->i_count in case the ->drop_inode() hooks played games. 1898 + * Note we only execute this if the verdict was to drop the inode. 1899 + */ 1900 + VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode); 1901 + 1902 + if (drop) { 1903 + inode_state_set(inode, I_FREEING); 1904 + } else { 1905 + inode_state_set(inode, I_WILL_FREE); 1932 1906 spin_unlock(&inode->i_lock); 1933 1907 1934 1908 write_inode_now(inode, 1); 1935 1909 1936 1910 spin_lock(&inode->i_lock); 1937 - state = inode->i_state; 1938 - WARN_ON(state & I_NEW); 1939 - state &= ~I_WILL_FREE; 1911 + WARN_ON(inode_state_read(inode) & I_NEW); 1912 + inode_state_replace(inode, I_WILL_FREE, I_FREEING); 1940 1913 } 1941 1914 1942 - WRITE_ONCE(inode->i_state, state | I_FREEING); 1943 - if (!list_empty(&inode->i_lru)) 1944 - inode_lru_list_del(inode); 1915 + inode_lru_list_del(inode); 1945 1916 spin_unlock(&inode->i_lock); 1946 1917 1947 1918 evict(inode); ··· 1968 1931 1969 1932 retry: 1970 1933 lockdep_assert_not_held(&inode->i_lock); 1971 - VFS_BUG_ON_INODE(inode->i_state & I_CLEAR, inode); 1934 + VFS_BUG_ON_INODE(inode_state_read_once(inode) & I_CLEAR, inode); 1972 1935 /* 1973 1936 * Note this assert is technically racy as if the count is bogusly 1974 1937 * equal to one, then two CPUs racing to further drop it can both ··· 1979 1942 if (atomic_add_unless(&inode->i_count, -1, 1)) 1980 1943 return; 1981 1944 1982 - if ((inode->i_state & I_DIRTY_TIME) && inode->i_nlink) { 1945 + if ((inode_state_read_once(inode) & I_DIRTY_TIME) && inode->i_nlink) { 1983 1946 trace_writeback_lazytime_iput(inode); 1984 1947 mark_inode_dirty_sync(inode); 1985 1948 goto retry; 1986 1949 } 1987 1950 1988 1951 spin_lock(&inode->i_lock); 1989 - if (unlikely((inode->i_state & I_DIRTY_TIME) && inode->i_nlink)) { 1952 + if (unlikely((inode_state_read(inode) & I_DIRTY_TIME) && inode->i_nlink)) { 1990 1953 spin_unlock(&inode->i_lock); 1991 1954 goto retry; 1992 1955 } ··· 2999 2962 pr_warn("%s encountered for inode %px\n" 3000 2963 "fs %s mode %ho opflags 0x%hx flags 0x%x state 0x%x count %d\n", 3001 2964 reason, inode, sb->s_type->name, inode->i_mode, inode->i_opflags, 3002 - inode->i_flags, inode->i_state, atomic_read(&inode->i_count)); 2965 + inode->i_flags, inode_state_read_once(inode), atomic_read(&inode->i_count)); 3003 2966 } 3004 2967 3005 2968 EXPORT_SYMBOL(dump_inode);
+1 -1
fs/isofs/inode.c
··· 1520 1520 if (!inode) 1521 1521 return ERR_PTR(-ENOMEM); 1522 1522 1523 - if (inode->i_state & I_NEW) { 1523 + if (inode_state_read_once(inode) & I_NEW) { 1524 1524 ret = isofs_read_inode(inode, relocated); 1525 1525 if (ret < 0) { 1526 1526 iget_failed(inode);
+2 -2
fs/jffs2/fs.c
··· 265 265 inode = iget_locked(sb, ino); 266 266 if (!inode) 267 267 return ERR_PTR(-ENOMEM); 268 - if (!(inode->i_state & I_NEW)) 268 + if (!(inode_state_read_once(inode) & I_NEW)) 269 269 return inode; 270 270 271 271 f = JFFS2_INODE_INFO(inode); ··· 373 373 { 374 374 struct iattr iattr; 375 375 376 - if (!(inode->i_state & I_DIRTY_DATASYNC)) { 376 + if (!(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) { 377 377 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n", 378 378 __func__, inode->i_ino); 379 379 return;
+2 -2
fs/jfs/file.c
··· 26 26 return rc; 27 27 28 28 inode_lock(inode); 29 - if (!(inode->i_state & I_DIRTY_ALL) || 30 - (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { 29 + if (!(inode_state_read_once(inode) & I_DIRTY_ALL) || 30 + (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))) { 31 31 /* Make sure committed changes hit the disk */ 32 32 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); 33 33 inode_unlock(inode);
+1 -1
fs/jfs/inode.c
··· 29 29 inode = iget_locked(sb, ino); 30 30 if (!inode) 31 31 return ERR_PTR(-ENOMEM); 32 - if (!(inode->i_state & I_NEW)) 32 + if (!(inode_state_read_once(inode) & I_NEW)) 33 33 return inode; 34 34 35 35 ret = diRead(inode);
+1 -1
fs/jfs/jfs_txnmgr.c
··· 1287 1287 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done. 1288 1288 * Joern 1289 1289 */ 1290 - if (tblk->u.ip->i_state & I_SYNC) 1290 + if (inode_state_read_once(tblk->u.ip) & I_SYNC) 1291 1291 tblk->xflag &= ~COMMIT_LAZY; 1292 1292 } 1293 1293
+1 -1
fs/kernfs/inode.c
··· 251 251 struct inode *inode; 252 252 253 253 inode = iget_locked(sb, kernfs_ino(kn)); 254 - if (inode && (inode->i_state & I_NEW)) 254 + if (inode && (inode_state_read_once(inode) & I_NEW)) 255 255 kernfs_init_inode(kn, inode); 256 256 257 257 return inode;
+3 -3
fs/libfs.c
··· 1542 1542 1543 1543 inode_lock(inode); 1544 1544 ret = sync_mapping_buffers(inode->i_mapping); 1545 - if (!(inode->i_state & I_DIRTY_ALL)) 1545 + if (!(inode_state_read_once(inode) & I_DIRTY_ALL)) 1546 1546 goto out; 1547 - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 1547 + if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) 1548 1548 goto out; 1549 1549 1550 1550 err = sync_inode_metadata(inode, 1); ··· 1664 1664 * list because mark_inode_dirty() will think 1665 1665 * that it already _is_ on the dirty list. 1666 1666 */ 1667 - inode->i_state = I_DIRTY; 1667 + inode_state_assign_raw(inode, I_DIRTY); 1668 1668 /* 1669 1669 * Historically anonymous inodes don't have a type at all and 1670 1670 * userspace has come to rely on this.
+1 -1
fs/minix/inode.c
··· 589 589 inode = iget_locked(sb, ino); 590 590 if (!inode) 591 591 return ERR_PTR(-ENOMEM); 592 - if (!(inode->i_state & I_NEW)) 592 + if (!(inode_state_read_once(inode) & I_NEW)) 593 593 return inode; 594 594 595 595 if (INODE_VERSION(inode) == MINIX_V1)
+4 -4
fs/namei.c
··· 4106 4106 inode = file_inode(file); 4107 4107 if (!(open_flag & O_EXCL)) { 4108 4108 spin_lock(&inode->i_lock); 4109 - inode->i_state |= I_LINKABLE; 4109 + inode_state_set(inode, I_LINKABLE); 4110 4110 spin_unlock(&inode->i_lock); 4111 4111 } 4112 4112 security_inode_post_create_tmpfile(idmap, inode); ··· 5001 5001 5002 5002 inode_lock(inode); 5003 5003 /* Make sure we don't allow creating hardlink to an unlinked file */ 5004 - if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) 5004 + if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE)) 5005 5005 error = -ENOENT; 5006 5006 else if (max_links && inode->i_nlink >= max_links) 5007 5007 error = -EMLINK; ··· 5011 5011 error = dir->i_op->link(old_dentry, dir, new_dentry); 5012 5012 } 5013 5013 5014 - if (!error && (inode->i_state & I_LINKABLE)) { 5014 + if (!error && (inode_state_read_once(inode) & I_LINKABLE)) { 5015 5015 spin_lock(&inode->i_lock); 5016 - inode->i_state &= ~I_LINKABLE; 5016 + inode_state_clear(inode, I_LINKABLE); 5017 5017 spin_unlock(&inode->i_lock); 5018 5018 } 5019 5019 inode_unlock(inode);
+4 -4
fs/netfs/misc.c
··· 147 147 if (!fscache_cookie_valid(cookie)) 148 148 return true; 149 149 150 - if (!(inode->i_state & I_PINNING_NETFS_WB)) { 150 + if (!(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) { 151 151 spin_lock(&inode->i_lock); 152 - if (!(inode->i_state & I_PINNING_NETFS_WB)) { 153 - inode->i_state |= I_PINNING_NETFS_WB; 152 + if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) { 153 + inode_state_set(inode, I_PINNING_NETFS_WB); 154 154 need_use = true; 155 155 } 156 156 spin_unlock(&inode->i_lock); ··· 192 192 { 193 193 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 194 194 195 - if (inode->i_state & I_PINNING_NETFS_WB) { 195 + if (inode_state_read_once(inode) & I_PINNING_NETFS_WB) { 196 196 loff_t i_size = i_size_read(inode); 197 197 fscache_unuse_cookie(cookie, aux, &i_size); 198 198 }
+3 -3
fs/netfs/read_single.c
··· 36 36 37 37 mark_inode_dirty(inode); 38 38 39 - if (caching && !(inode->i_state & I_PINNING_NETFS_WB)) { 39 + if (caching && !(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) { 40 40 bool need_use = false; 41 41 42 42 spin_lock(&inode->i_lock); 43 - if (!(inode->i_state & I_PINNING_NETFS_WB)) { 44 - inode->i_state |= I_PINNING_NETFS_WB; 43 + if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) { 44 + inode_state_set(inode, I_PINNING_NETFS_WB); 45 45 need_use = true; 46 46 } 47 47 spin_unlock(&inode->i_lock);
+1 -1
fs/nfs/inode.c
··· 475 475 goto out_no_inode; 476 476 } 477 477 478 - if (inode->i_state & I_NEW) { 478 + if (inode_state_read_once(inode) & I_NEW) { 479 479 struct nfs_inode *nfsi = NFS_I(inode); 480 480 unsigned long now = jiffies; 481 481
+1 -1
fs/nfs/pnfs.c
··· 317 317 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); 318 318 pnfs_detach_layout_hdr(lo); 319 319 /* Notify pnfs_destroy_layout_final() that we're done */ 320 - if (inode->i_state & (I_FREEING | I_CLEAR)) 320 + if (inode_state_read(inode) & (I_FREEING | I_CLEAR)) 321 321 wake_up_var_locked(lo, &inode->i_lock); 322 322 spin_unlock(&inode->i_lock); 323 323 pnfs_free_layout_hdr(lo);
+1 -1
fs/nfsd/vfs.c
··· 1159 1159 dprintk("nfsd: write resume %d\n", task_pid_nr(current)); 1160 1160 } 1161 1161 1162 - if (inode->i_state & I_DIRTY) { 1162 + if (inode_state_read_once(inode) & I_DIRTY) { 1163 1163 dprintk("nfsd: write sync %d\n", task_pid_nr(current)); 1164 1164 err = vfs_fsync(file, 0); 1165 1165 }
+1 -1
fs/nilfs2/cpfile.c
··· 1148 1148 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO); 1149 1149 if (unlikely(!cpfile)) 1150 1150 return -ENOMEM; 1151 - if (!(cpfile->i_state & I_NEW)) 1151 + if (!(inode_state_read_once(cpfile) & I_NEW)) 1152 1152 goto out; 1153 1153 1154 1154 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
+1 -1
fs/nilfs2/dat.c
··· 506 506 dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO); 507 507 if (unlikely(!dat)) 508 508 return -ENOMEM; 509 - if (!(dat->i_state & I_NEW)) 509 + if (!(inode_state_read_once(dat) & I_NEW)) 510 510 goto out; 511 511 512 512 err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
+1 -1
fs/nilfs2/ifile.c
··· 188 188 ifile = nilfs_iget_locked(sb, root, NILFS_IFILE_INO); 189 189 if (unlikely(!ifile)) 190 190 return -ENOMEM; 191 - if (!(ifile->i_state & I_NEW)) 191 + if (!(inode_state_read_once(ifile) & I_NEW)) 192 192 goto out; 193 193 194 194 err = nilfs_mdt_init(ifile, NILFS_MDT_GFP,
+5 -5
fs/nilfs2/inode.c
··· 365 365 366 366 failed_after_creation: 367 367 clear_nlink(inode); 368 - if (inode->i_state & I_NEW) 368 + if (inode_state_read_once(inode) & I_NEW) 369 369 unlock_new_inode(inode); 370 370 iput(inode); /* 371 371 * raw_inode will be deleted through ··· 562 562 if (unlikely(!inode)) 563 563 return ERR_PTR(-ENOMEM); 564 564 565 - if (!(inode->i_state & I_NEW)) { 565 + if (!(inode_state_read_once(inode) & I_NEW)) { 566 566 if (!inode->i_nlink) { 567 567 iput(inode); 568 568 return ERR_PTR(-ESTALE); ··· 591 591 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 592 592 if (unlikely(!inode)) 593 593 return ERR_PTR(-ENOMEM); 594 - if (!(inode->i_state & I_NEW)) 594 + if (!(inode_state_read_once(inode) & I_NEW)) 595 595 return inode; 596 596 597 597 err = nilfs_init_gcinode(inode); ··· 631 631 nilfs_iget_set, &args); 632 632 if (unlikely(!btnc_inode)) 633 633 return -ENOMEM; 634 - if (btnc_inode->i_state & I_NEW) { 634 + if (inode_state_read_once(btnc_inode) & I_NEW) { 635 635 nilfs_init_btnc_inode(btnc_inode); 636 636 unlock_new_inode(btnc_inode); 637 637 } ··· 686 686 nilfs_iget_set, &args); 687 687 if (unlikely(!s_inode)) 688 688 return ERR_PTR(-ENOMEM); 689 - if (!(s_inode->i_state & I_NEW)) 689 + if (!(inode_state_read_once(s_inode) & I_NEW)) 690 690 return inode; 691 691 692 692 NILFS_I(s_inode)->i_flags = 0;
+1 -1
fs/nilfs2/sufile.c
··· 1226 1226 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); 1227 1227 if (unlikely(!sufile)) 1228 1228 return -ENOMEM; 1229 - if (!(sufile->i_state & I_NEW)) 1229 + if (!(inode_state_read_once(sufile) & I_NEW)) 1230 1230 goto out; 1231 1231 1232 1232 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
+1 -1
fs/notify/fsnotify.c
··· 52 52 * the inode cannot have any associated watches. 53 53 */ 54 54 spin_lock(&inode->i_lock); 55 - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { 55 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 56 56 spin_unlock(&inode->i_lock); 57 57 continue; 58 58 }
+1 -1
fs/ntfs3/inode.c
··· 537 537 return ERR_PTR(-ENOMEM); 538 538 539 539 /* If this is a freshly allocated inode, need to read it now. */ 540 - if (inode->i_state & I_NEW) 540 + if (inode_state_read_once(inode) & I_NEW) 541 541 inode = ntfs_read_mft(inode, name, ref); 542 542 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) { 543 543 /*
+1 -1
fs/ocfs2/dlmglue.c
··· 2487 2487 * which hasn't been populated yet, so clear the refresh flag 2488 2488 * and let the caller handle it. 2489 2489 */ 2490 - if (inode->i_state & I_NEW) { 2490 + if (inode_state_read_once(inode) & I_NEW) { 2491 2491 status = 0; 2492 2492 if (lockres) 2493 2493 ocfs2_complete_lock_res_refresh(lockres, 0);
+4 -23
fs/ocfs2/inode.c
··· 152 152 mlog_errno(PTR_ERR(inode)); 153 153 goto bail; 154 154 } 155 - trace_ocfs2_iget5_locked(inode->i_state); 156 - if (inode->i_state & I_NEW) { 155 + trace_ocfs2_iget5_locked(inode_state_read_once(inode)); 156 + if (inode_state_read_once(inode) & I_NEW) { 157 157 rc = ocfs2_read_locked_inode(inode, &args); 158 158 unlock_new_inode(inode); 159 159 } ··· 1290 1290 1291 1291 void ocfs2_evict_inode(struct inode *inode) 1292 1292 { 1293 + write_inode_now(inode, 1); 1294 + 1293 1295 if (!inode->i_nlink || 1294 1296 (OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) { 1295 1297 ocfs2_delete_inode(inode); ··· 1299 1297 truncate_inode_pages_final(&inode->i_data); 1300 1298 } 1301 1299 ocfs2_clear_inode(inode); 1302 - } 1303 - 1304 - /* Called under inode_lock, with no more references on the 1305 - * struct inode, so it's safe here to check the flags field 1306 - * and to manipulate i_nlink without any other locks. */ 1307 - int ocfs2_drop_inode(struct inode *inode) 1308 - { 1309 - struct ocfs2_inode_info *oi = OCFS2_I(inode); 1310 - 1311 - trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno, 1312 - inode->i_nlink, oi->ip_flags); 1313 - 1314 - assert_spin_locked(&inode->i_lock); 1315 - inode->i_state |= I_WILL_FREE; 1316 - spin_unlock(&inode->i_lock); 1317 - write_inode_now(inode, 1); 1318 - spin_lock(&inode->i_lock); 1319 - WARN_ON(inode->i_state & I_NEW); 1320 - inode->i_state &= ~I_WILL_FREE; 1321 - 1322 - return 1; 1323 1300 } 1324 1301 1325 1302 /*
-1
fs/ocfs2/inode.h
··· 116 116 } 117 117 118 118 void ocfs2_evict_inode(struct inode *inode); 119 - int ocfs2_drop_inode(struct inode *inode); 120 119 121 120 /* Flags for ocfs2_iget() */ 122 121 #define OCFS2_FI_FLAG_SYSFILE 0x1
-2
fs/ocfs2/ocfs2_trace.h
··· 1569 1569 1570 1570 DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_clear_inode); 1571 1571 1572 - DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_drop_inode); 1573 - 1574 1572 TRACE_EVENT(ocfs2_inode_revalidate, 1575 1573 TP_PROTO(void *inode, unsigned long long ino, 1576 1574 unsigned int flags),
+1 -1
fs/ocfs2/super.c
··· 129 129 .statfs = ocfs2_statfs, 130 130 .alloc_inode = ocfs2_alloc_inode, 131 131 .free_inode = ocfs2_free_inode, 132 - .drop_inode = ocfs2_drop_inode, 132 + .drop_inode = inode_just_drop, 133 133 .evict_inode = ocfs2_evict_inode, 134 134 .sync_fs = ocfs2_sync_fs, 135 135 .put_super = ocfs2_put_super,
+1 -1
fs/omfs/inode.c
··· 212 212 inode = iget_locked(sb, ino); 213 213 if (!inode) 214 214 return ERR_PTR(-ENOMEM); 215 - if (!(inode->i_state & I_NEW)) 215 + if (!(inode_state_read_once(inode) & I_NEW)) 216 216 return inode; 217 217 218 218 bh = omfs_bread(inode->i_sb, ino);
+1 -1
fs/openpromfs/inode.c
··· 236 236 mutex_unlock(&op_mutex); 237 237 if (IS_ERR(inode)) 238 238 return ERR_CAST(inode); 239 - if (inode->i_state & I_NEW) { 239 + if (inode_state_read_once(inode) & I_NEW) { 240 240 simple_inode_init_ts(inode); 241 241 ent_oi = OP_I(inode); 242 242 ent_oi->type = ent_type;
+1 -1
fs/orangefs/inode.c
··· 1043 1043 if (!inode) 1044 1044 return ERR_PTR(-ENOMEM); 1045 1045 1046 - if (!(inode->i_state & I_NEW)) 1046 + if (!(inode_state_read_once(inode) & I_NEW)) 1047 1047 return inode; 1048 1048 1049 1049 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
+3 -3
fs/orangefs/orangefs-utils.c
··· 247 247 spin_lock(&inode->i_lock); 248 248 /* Must have all the attributes in the mask and be within cache time. */ 249 249 if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) || 250 - orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) { 250 + orangefs_inode->attr_valid || inode_state_read(inode) & I_DIRTY_PAGES) { 251 251 if (orangefs_inode->attr_valid) { 252 252 spin_unlock(&inode->i_lock); 253 253 write_inode_now(inode, 1); ··· 281 281 spin_lock(&inode->i_lock); 282 282 /* Must have all the attributes in the mask and be within cache time. */ 283 283 if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) || 284 - orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) { 284 + orangefs_inode->attr_valid || inode_state_read(inode) & I_DIRTY_PAGES) { 285 285 if (orangefs_inode->attr_valid) { 286 286 spin_unlock(&inode->i_lock); 287 287 write_inode_now(inode, 1); 288 288 goto again2; 289 289 } 290 - if (inode->i_state & I_DIRTY_PAGES) { 290 + if (inode_state_read(inode) & I_DIRTY_PAGES) { 291 291 ret = 0; 292 292 goto out_unlock; 293 293 }
+1 -1
fs/overlayfs/dir.c
··· 686 686 goto out_drop_write; 687 687 688 688 spin_lock(&inode->i_lock); 689 - inode->i_state |= I_CREATING; 689 + inode_state_set(inode, I_CREATING); 690 690 spin_unlock(&inode->i_lock); 691 691 692 692 inode_init_owner(&nop_mnt_idmap, inode, dentry->d_parent->d_inode, mode);
+3 -3
fs/overlayfs/inode.c
··· 1152 1152 if (!trap) 1153 1153 return ERR_PTR(-ENOMEM); 1154 1154 1155 - if (!(trap->i_state & I_NEW)) { 1155 + if (!(inode_state_read_once(trap) & I_NEW)) { 1156 1156 /* Conflicting layer roots? */ 1157 1157 iput(trap); 1158 1158 return ERR_PTR(-ELOOP); ··· 1243 1243 inode = ovl_iget5(sb, oip->newinode, key); 1244 1244 if (!inode) 1245 1245 goto out_err; 1246 - if (!(inode->i_state & I_NEW)) { 1246 + if (!(inode_state_read_once(inode) & I_NEW)) { 1247 1247 /* 1248 1248 * Verify that the underlying files stored in the inode 1249 1249 * match those in the dentry. ··· 1303 1303 if (upperdentry) 1304 1304 ovl_check_protattr(inode, upperdentry); 1305 1305 1306 - if (inode->i_state & I_NEW) 1306 + if (inode_state_read_once(inode) & I_NEW) 1307 1307 unlock_new_inode(inode); 1308 1308 out: 1309 1309 return inode;
+5 -5
fs/overlayfs/util.c
··· 1019 1019 bool locked = false; 1020 1020 1021 1021 spin_lock(&inode->i_lock); 1022 - if (!(inode->i_state & I_OVL_INUSE)) { 1023 - inode->i_state |= I_OVL_INUSE; 1022 + if (!(inode_state_read(inode) & I_OVL_INUSE)) { 1023 + inode_state_set(inode, I_OVL_INUSE); 1024 1024 locked = true; 1025 1025 } 1026 1026 spin_unlock(&inode->i_lock); ··· 1034 1034 struct inode *inode = d_inode(dentry); 1035 1035 1036 1036 spin_lock(&inode->i_lock); 1037 - WARN_ON(!(inode->i_state & I_OVL_INUSE)); 1038 - inode->i_state &= ~I_OVL_INUSE; 1037 + WARN_ON(!(inode_state_read(inode) & I_OVL_INUSE)); 1038 + inode_state_clear(inode, I_OVL_INUSE); 1039 1039 spin_unlock(&inode->i_lock); 1040 1040 } 1041 1041 } ··· 1046 1046 bool inuse; 1047 1047 1048 1048 spin_lock(&inode->i_lock); 1049 - inuse = (inode->i_state & I_OVL_INUSE); 1049 + inuse = (inode_state_read(inode) & I_OVL_INUSE); 1050 1050 spin_unlock(&inode->i_lock); 1051 1051 1052 1052 return inuse;
+1 -1
fs/pipe.c
··· 908 908 * list because "mark_inode_dirty()" will think 909 909 * that it already _is_ on the dirty list. 910 910 */ 911 - inode->i_state = I_DIRTY; 911 + inode_state_assign_raw(inode, I_DIRTY); 912 912 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; 913 913 inode->i_uid = current_fsuid(); 914 914 inode->i_gid = current_fsgid();
+1 -1
fs/qnx4/inode.c
··· 290 290 inode = iget_locked(sb, ino); 291 291 if (!inode) 292 292 return ERR_PTR(-ENOMEM); 293 - if (!(inode->i_state & I_NEW)) 293 + if (!(inode_state_read_once(inode) & I_NEW)) 294 294 return inode; 295 295 296 296 qnx4_inode = qnx4_raw_inode(inode);
+1 -1
fs/qnx6/inode.c
··· 521 521 inode = iget_locked(sb, ino); 522 522 if (!inode) 523 523 return ERR_PTR(-ENOMEM); 524 - if (!(inode->i_state & I_NEW)) 524 + if (!(inode_state_read_once(inode) & I_NEW)) 525 525 return inode; 526 526 527 527 ei = QNX6_I(inode);
+1 -1
fs/quota/dquot.c
··· 1033 1033 spin_lock(&sb->s_inode_list_lock); 1034 1034 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1035 1035 spin_lock(&inode->i_lock); 1036 - if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1036 + if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) || 1037 1037 !atomic_read(&inode->i_writecount) || 1038 1038 !dqinit_needed(inode, type)) { 1039 1039 spin_unlock(&inode->i_lock);
+1 -1
fs/romfs/super.c
··· 302 302 if (!i) 303 303 return ERR_PTR(-ENOMEM); 304 304 305 - if (!(i->i_state & I_NEW)) 305 + if (!(inode_state_read_once(i) & I_NEW)) 306 306 return i; 307 307 308 308 /* precalculate the data offset */
+1 -1
fs/smb/client/cifsfs.c
··· 500 500 { 501 501 netfs_wait_for_outstanding_io(inode); 502 502 truncate_inode_pages_final(&inode->i_data); 503 - if (inode->i_state & I_PINNING_NETFS_WB) 503 + if (inode_state_read_once(inode) & I_PINNING_NETFS_WB) 504 504 cifs_fscache_unuse_inode_cookie(inode, true); 505 505 cifs_fscache_release_inode_cookie(inode); 506 506 clear_inode(inode);
+7 -7
fs/smb/client/inode.c
··· 101 101 cifs_dbg(FYI, "%s: revalidating inode %llu\n", 102 102 __func__, cifs_i->uniqueid); 103 103 104 - if (inode->i_state & I_NEW) { 104 + if (inode_state_read_once(inode) & I_NEW) { 105 105 cifs_dbg(FYI, "%s: inode %llu is new\n", 106 106 __func__, cifs_i->uniqueid); 107 107 return; ··· 146 146 */ 147 147 if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) { 148 148 /* only provide fake values on a new inode */ 149 - if (inode->i_state & I_NEW) { 149 + if (inode_state_read_once(inode) & I_NEW) { 150 150 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) 151 151 set_nlink(inode, 2); 152 152 else ··· 167 167 struct cifsInodeInfo *cifs_i = CIFS_I(inode); 168 168 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 169 169 170 - if (!(inode->i_state & I_NEW) && 170 + if (!(inode_state_read_once(inode) & I_NEW) && 171 171 unlikely(inode_wrong_type(inode, fattr->cf_mode))) { 172 172 CIFS_I(inode)->time = 0; /* force reval */ 173 173 return -ESTALE; 174 174 } 175 - if (inode->i_state & I_NEW) 175 + if (inode_state_read_once(inode) & I_NEW) 176 176 CIFS_I(inode)->netfs.zero_point = fattr->cf_eof; 177 177 178 178 cifs_revalidate_cache(inode, fattr); ··· 194 194 inode->i_gid = fattr->cf_gid; 195 195 196 196 /* if dynperm is set, don't clobber existing mode */ 197 - if (inode->i_state & I_NEW || 197 + if (inode_state_read(inode) & I_NEW || 198 198 !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) 199 199 inode->i_mode = fattr->cf_mode; 200 200 ··· 236 236 237 237 if (fattr->cf_flags & CIFS_FATTR_JUNCTION) 238 238 inode->i_flags |= S_AUTOMOUNT; 239 - if (inode->i_state & I_NEW) { 239 + if (inode_state_read_once(inode) & I_NEW) { 240 240 cifs_set_netfs_context(inode); 241 241 cifs_set_ops(inode); 242 242 } ··· 1638 1638 cifs_fattr_to_inode(inode, fattr, false); 1639 1639 if (sb->s_flags & SB_NOATIME) 1640 1640 inode->i_flags |= S_NOATIME | S_NOCMTIME; 1641 - if (inode->i_state & I_NEW) { 1641 + if (inode_state_read_once(inode) & I_NEW) { 1642 1642 inode->i_ino = hash; 1643 1643 cifs_fscache_get_inode_cookie(inode); 1644 1644 unlock_new_inode(inode);
+1 -1
fs/squashfs/inode.c
··· 86 86 87 87 if (!inode) 88 88 return ERR_PTR(-ENOMEM); 89 - if (!(inode->i_state & I_NEW)) 89 + if (!(inode_state_read_once(inode) & I_NEW)) 90 90 return inode; 91 91 92 92 err = squashfs_read_inode(inode, ino);
+1 -1
fs/sync.c
··· 183 183 184 184 if (!file->f_op->fsync) 185 185 return -EINVAL; 186 - if (!datasync && (inode->i_state & I_DIRTY_TIME)) 186 + if (!datasync && (inode_state_read_once(inode) & I_DIRTY_TIME)) 187 187 mark_inode_dirty_sync(inode); 188 188 return file->f_op->fsync(file, start, end, datasync); 189 189 }
+1 -1
fs/ubifs/file.c
··· 1323 1323 inode_lock(inode); 1324 1324 1325 1325 /* Synchronize the inode unless this is a 'datasync()' call. */ 1326 - if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { 1326 + if (!datasync || (inode_state_read_once(inode) & I_DIRTY_DATASYNC)) { 1327 1327 err = inode->i_sb->s_op->write_inode(inode, NULL); 1328 1328 if (err) 1329 1329 goto out;
+1 -1
fs/ubifs/super.c
··· 114 114 inode = iget_locked(sb, inum); 115 115 if (!inode) 116 116 return ERR_PTR(-ENOMEM); 117 - if (!(inode->i_state & I_NEW)) 117 + if (!(inode_state_read_once(inode) & I_NEW)) 118 118 return inode; 119 119 ui = ubifs_inode(inode); 120 120
+1 -1
fs/udf/inode.c
··· 1962 1962 if (!inode) 1963 1963 return ERR_PTR(-ENOMEM); 1964 1964 1965 - if (!(inode->i_state & I_NEW)) { 1965 + if (!(inode_state_read_once(inode) & I_NEW)) { 1966 1966 if (UDF_I(inode)->i_hidden != hidden_inode) { 1967 1967 iput(inode); 1968 1968 return ERR_PTR(-EFSCORRUPTED);
+1 -1
fs/ufs/inode.c
··· 655 655 inode = iget_locked(sb, ino); 656 656 if (!inode) 657 657 return ERR_PTR(-ENOMEM); 658 - if (!(inode->i_state & I_NEW)) 658 + if (!(inode_state_read_once(inode) & I_NEW)) 659 659 return inode; 660 660 661 661 ufsi = UFS_I(inode);
+1 -1
fs/xfs/scrub/common.c
··· 1249 1249 * hits do not clear DONTCACHE, so we must do it here. 1250 1250 */ 1251 1251 spin_lock(&VFS_I(ip)->i_lock); 1252 - VFS_I(ip)->i_state &= ~I_DONTCACHE; 1252 + inode_state_clear(VFS_I(ip), I_DONTCACHE); 1253 1253 spin_unlock(&VFS_I(ip)->i_lock); 1254 1254 } 1255 1255
+1 -1
fs/xfs/scrub/inode_repair.c
··· 1933 1933 * Unlinked inodes that cannot be added to the directory tree will not 1934 1934 * have a parent pointer. 1935 1935 */ 1936 - if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) 1936 + if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE)) 1937 1937 return 0; 1938 1938 1939 1939 /* Children of the superblock do not have parent pointers. */
+1 -1
fs/xfs/scrub/parent.c
··· 915 915 * Temporary files that cannot be linked into the directory tree do not 916 916 * have attr forks because they cannot ever have parents. 917 917 */ 918 - if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) 918 + if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE)) 919 919 return false; 920 920 921 921 /*
+1 -1
fs/xfs/xfs_bmap_util.c
··· 514 514 * Caller must either hold the exclusive io lock; or be inactivating 515 515 * the inode, which guarantees there are no other users of the inode. 516 516 */ 517 - if (!(VFS_I(ip)->i_state & I_FREEING)) 517 + if (!(inode_state_read_once(VFS_I(ip)) & I_FREEING)) 518 518 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL); 519 519 520 520 /* prealloc/delalloc exists only on regular files */
+2 -2
fs/xfs/xfs_health.c
··· 285 285 * is not the case here. 286 286 */ 287 287 spin_lock(&VFS_I(ip)->i_lock); 288 - VFS_I(ip)->i_state &= ~I_DONTCACHE; 288 + inode_state_clear(VFS_I(ip), I_DONTCACHE); 289 289 spin_unlock(&VFS_I(ip)->i_lock); 290 290 } 291 291 ··· 309 309 * is not the case here. 310 310 */ 311 311 spin_lock(&VFS_I(ip)->i_lock); 312 - VFS_I(ip)->i_state &= ~I_DONTCACHE; 312 + inode_state_clear(VFS_I(ip), I_DONTCACHE); 313 313 spin_unlock(&VFS_I(ip)->i_lock); 314 314 } 315 315
+3 -3
fs/xfs/xfs_icache.c
··· 334 334 dev_t dev = inode->i_rdev; 335 335 kuid_t uid = inode->i_uid; 336 336 kgid_t gid = inode->i_gid; 337 - unsigned long state = inode->i_state; 337 + unsigned long state = inode_state_read_once(inode); 338 338 339 339 error = inode_init_always(mp->m_super, inode); 340 340 ··· 345 345 inode->i_rdev = dev; 346 346 inode->i_uid = uid; 347 347 inode->i_gid = gid; 348 - inode->i_state = state; 348 + inode_state_assign_raw(inode, state); 349 349 mapping_set_folio_min_order(inode->i_mapping, 350 350 M_IGEO(mp)->min_folio_order); 351 351 return error; ··· 411 411 ip->i_flags |= XFS_INEW; 412 412 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), 413 413 XFS_ICI_RECLAIM_TAG); 414 - inode->i_state = I_NEW; 414 + inode_state_assign_raw(inode, I_NEW); 415 415 spin_unlock(&ip->i_flags_lock); 416 416 spin_unlock(&pag->pag_ici_lock); 417 417
+3 -3
fs/xfs/xfs_inode.c
··· 1580 1580 next_ip->i_prev_unlinked = prev_agino; 1581 1581 trace_xfs_iunlink_reload_next(next_ip); 1582 1582 rele: 1583 - ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE)); 1583 + ASSERT(!(inode_state_read_once(VFS_I(next_ip)) & I_DONTCACHE)); 1584 1584 if (xfs_is_quotacheck_running(mp) && next_ip) 1585 1585 xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED); 1586 1586 xfs_irele(next_ip); ··· 2111 2111 */ 2112 2112 xfs_setup_iops(tmpfile); 2113 2113 xfs_finish_inode_setup(tmpfile); 2114 - VFS_I(tmpfile)->i_state |= I_LINKABLE; 2114 + inode_state_set_raw(VFS_I(tmpfile), I_LINKABLE); 2115 2115 2116 2116 *wip = tmpfile; 2117 2117 return 0; ··· 2330 2330 * flag from the inode so it doesn't accidentally get misused in 2331 2331 * future. 2332 2332 */ 2333 - VFS_I(du_wip.ip)->i_state &= ~I_LINKABLE; 2333 + inode_state_clear_raw(VFS_I(du_wip.ip), I_LINKABLE); 2334 2334 } 2335 2335 2336 2336 out_commit:
+2 -2
fs/xfs/xfs_inode_item.c
··· 113 113 * to log the timestamps, or will clear already cleared fields in the 114 114 * worst case. 115 115 */ 116 - if (inode->i_state & I_DIRTY_TIME) { 116 + if (inode_state_read_once(inode) & I_DIRTY_TIME) { 117 117 spin_lock(&inode->i_lock); 118 - inode->i_state &= ~I_DIRTY_TIME; 118 + inode_state_clear(inode, I_DIRTY_TIME); 119 119 spin_unlock(&inode->i_lock); 120 120 } 121 121
+1 -1
fs/xfs/xfs_iops.c
··· 1420 1420 bool is_meta = xfs_is_internal_inode(ip); 1421 1421 1422 1422 inode->i_ino = ip->i_ino; 1423 - inode->i_state |= I_NEW; 1423 + inode_state_set_raw(inode, I_NEW); 1424 1424 1425 1425 inode_sb_list_add(inode); 1426 1426 /* make the inode look hashed for the writeback code */
+1 -1
fs/xfs/xfs_reflink.h
··· 17 17 { 18 18 struct inode *inode = VFS_I(ip); 19 19 20 - if ((inode->i_state & I_DIRTY_PAGES) || 20 + if ((inode_state_read_once(inode) & I_DIRTY_PAGES) || 21 21 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || 22 22 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK) || 23 23 atomic_read(&inode->i_dio_count))
+2 -2
fs/zonefs/super.c
··· 644 644 inode = iget_locked(sb, ino); 645 645 if (!inode) 646 646 return ERR_PTR(-ENOMEM); 647 - if (!(inode->i_state & I_NEW)) { 647 + if (!(inode_state_read_once(inode) & I_NEW)) { 648 648 WARN_ON_ONCE(inode->i_private != z); 649 649 return inode; 650 650 } ··· 683 683 inode = iget_locked(sb, ino); 684 684 if (!inode) 685 685 return ERR_PTR(-ENOMEM); 686 - if (!(inode->i_state & I_NEW)) 686 + if (!(inode_state_read_once(inode) & I_NEW)) 687 687 return inode; 688 688 689 689 inode->i_ino = ino;
+3 -2
include/linux/backing-dev.h
··· 277 277 rcu_read_lock(); 278 278 279 279 /* 280 - * Paired with store_release in inode_switch_wbs_work_fn() and 280 + * Paired with a release fence in inode_do_switch_wbs() and 281 281 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 282 282 */ 283 - cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 283 + cookie->locked = inode_state_read_once(inode) & I_WB_SWITCH; 284 + smp_rmb(); 284 285 285 286 if (unlikely(cookie->locked)) 286 287 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
+90 -9
include/linux/fs.h
··· 735 735 /* reserved wait address bit 3 */ 736 736 }; 737 737 738 - enum inode_state_flags_t { 738 + enum inode_state_flags_enum { 739 739 I_NEW = (1U << __I_NEW), 740 740 I_SYNC = (1U << __I_SYNC), 741 741 I_LRU_ISOLATING = (1U << __I_LRU_ISOLATING), ··· 760 760 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 761 761 #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) 762 762 #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) 763 + 764 + /* 765 + * Use inode_state_read() & friends to access. 766 + */ 767 + struct inode_state_flags { 768 + enum inode_state_flags_enum __state; 769 + }; 763 770 764 771 /* 765 772 * Keep mostly read-only and often accessed (especially for ··· 826 819 #endif 827 820 828 821 /* Misc */ 829 - enum inode_state_flags_t i_state; 822 + struct inode_state_flags i_state; 830 823 /* 32-bit hole */ 831 824 struct rw_semaphore i_rwsem; 832 825 ··· 885 878 void *i_private; /* fs or device private pointer */ 886 879 } __randomize_layout; 887 880 881 + /* 882 + * i_state handling 883 + * 884 + * We hide all of it behind helpers so that we can validate consumers. 885 + */ 886 + static inline enum inode_state_flags_enum inode_state_read_once(struct inode *inode) 887 + { 888 + return READ_ONCE(inode->i_state.__state); 889 + } 890 + 891 + static inline enum inode_state_flags_enum inode_state_read(struct inode *inode) 892 + { 893 + lockdep_assert_held(&inode->i_lock); 894 + return inode->i_state.__state; 895 + } 896 + 897 + static inline void inode_state_set_raw(struct inode *inode, 898 + enum inode_state_flags_enum flags) 899 + { 900 + WRITE_ONCE(inode->i_state.__state, inode->i_state.__state | flags); 901 + } 902 + 903 + static inline void inode_state_set(struct inode *inode, 904 + enum inode_state_flags_enum flags) 905 + { 906 + lockdep_assert_held(&inode->i_lock); 907 + inode_state_set_raw(inode, flags); 908 + } 909 + 910 + static inline void inode_state_clear_raw(struct inode *inode, 911 + enum inode_state_flags_enum flags) 912 + { 913 + WRITE_ONCE(inode->i_state.__state, inode->i_state.__state & ~flags); 914 + } 915 + 916 + static inline void inode_state_clear(struct inode *inode, 917 + enum inode_state_flags_enum flags) 918 + { 919 + lockdep_assert_held(&inode->i_lock); 920 + inode_state_clear_raw(inode, flags); 921 + } 922 + 923 + static inline void inode_state_assign_raw(struct inode *inode, 924 + enum inode_state_flags_enum flags) 925 + { 926 + WRITE_ONCE(inode->i_state.__state, flags); 927 + } 928 + 929 + static inline void inode_state_assign(struct inode *inode, 930 + enum inode_state_flags_enum flags) 931 + { 932 + lockdep_assert_held(&inode->i_lock); 933 + inode_state_assign_raw(inode, flags); 934 + } 935 + 936 + static inline void inode_state_replace_raw(struct inode *inode, 937 + enum inode_state_flags_enum clearflags, 938 + enum inode_state_flags_enum setflags) 939 + { 940 + enum inode_state_flags_enum flags; 941 + flags = inode->i_state.__state; 942 + flags &= ~clearflags; 943 + flags |= setflags; 944 + inode_state_assign_raw(inode, flags); 945 + } 946 + 947 + static inline void inode_state_replace(struct inode *inode, 948 + enum inode_state_flags_enum clearflags, 949 + enum inode_state_flags_enum setflags) 950 + { 951 + lockdep_assert_held(&inode->i_lock); 952 + inode_state_replace_raw(inode, clearflags, setflags); 953 + } 954 + 888 955 static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen) 889 956 { 890 957 VFS_WARN_ON_INODE(strlen(link) != linklen, inode); ··· 1005 924 { 1006 925 hlist_add_fake(&inode->i_hash); 1007 926 } 927 + 928 + void wait_on_new_inode(struct inode *inode); 1008 929 1009 930 /* 1010 931 * inode->i_rwsem nesting subclasses for the lock validator: ··· 2694 2611 */ 2695 2612 static inline bool inode_is_dirtytime_only(struct inode *inode) 2696 2613 { 2697 - return (inode->i_state & (I_DIRTY_TIME | I_NEW | 2698 - I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME; 2614 + return (inode_state_read_once(inode) & 2615 + (I_DIRTY_TIME | I_NEW | I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME; 2699 2616 } 2700 2617 2701 2618 extern void inc_nlink(struct inode *inode); ··· 3387 3304 3388 3305 extern struct inode *ilookup5_nowait(struct super_block *sb, 3389 3306 unsigned long hashval, int (*test)(struct inode *, void *), 3390 - void *data); 3307 + void *data, bool *isnew); 3391 3308 extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 3392 3309 int (*test)(struct inode *, void *), void *data); 3393 3310 extern struct inode *ilookup(struct super_block *sb, unsigned long ino); ··· 3439 3356 return (u32)ino == 0; 3440 3357 } 3441 3358 3442 - /* 3443 - * inode->i_lock must be held 3444 - */ 3445 3359 static inline void __iget(struct inode *inode) 3446 3360 { 3361 + lockdep_assert_held(&inode->i_lock); 3447 3362 atomic_inc(&inode->i_count); 3448 3363 } 3449 3364 ··· 3480 3399 } 3481 3400 3482 3401 extern void inode_sb_list_add(struct inode *inode); 3483 - extern void inode_add_lru(struct inode *inode); 3402 + extern void inode_lru_list_add(struct inode *inode); 3484 3403 3485 3404 int sb_set_blocksize(struct super_block *sb, int size); 3486 3405 int __must_check sb_min_blocksize(struct super_block *sb, int size);
+1 -8
include/linux/writeback.h
··· 189 189 void inode_wait_for_writeback(struct inode *inode); 190 190 void inode_io_list_del(struct inode *inode); 191 191 192 - /* writeback.h requires fs.h; it, too, is not included from here. */ 193 - static inline void wait_on_inode(struct inode *inode) 194 - { 195 - wait_var_event(inode_state_wait_address(inode, __I_NEW), 196 - !(READ_ONCE(inode->i_state) & I_NEW)); 197 - } 198 - 199 192 #ifdef CONFIG_CGROUP_WRITEBACK 200 193 201 194 #include <linux/cgroup.h> ··· 227 234 static inline void inode_detach_wb(struct inode *inode) 228 235 { 229 236 if (inode->i_wb) { 230 - WARN_ON_ONCE(!(inode->i_state & I_CLEAR)); 237 + WARN_ON_ONCE(!(inode_state_read_once(inode) & I_CLEAR)); 231 238 wb_put(inode->i_wb); 232 239 inode->i_wb = NULL; 233 240 }
+4 -4
include/trace/events/writeback.h
··· 120 120 /* may be called for files on pseudo FSes w/ unregistered bdi */ 121 121 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); 122 122 __entry->ino = inode->i_ino; 123 - __entry->state = inode->i_state; 123 + __entry->state = inode_state_read_once(inode); 124 124 __entry->flags = flags; 125 125 ), 126 126 ··· 748 748 strscpy_pad(__entry->name, 749 749 bdi_dev_name(inode_to_bdi(inode)), 32); 750 750 __entry->ino = inode->i_ino; 751 - __entry->state = inode->i_state; 751 + __entry->state = inode_state_read_once(inode); 752 752 __entry->dirtied_when = inode->dirtied_when; 753 753 __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); 754 754 ), ··· 787 787 strscpy_pad(__entry->name, 788 788 bdi_dev_name(inode_to_bdi(inode)), 32); 789 789 __entry->ino = inode->i_ino; 790 - __entry->state = inode->i_state; 790 + __entry->state = inode_state_read_once(inode); 791 791 __entry->dirtied_when = inode->dirtied_when; 792 792 __entry->writeback_index = inode->i_mapping->writeback_index; 793 793 __entry->nr_to_write = nr_to_write; ··· 839 839 TP_fast_assign( 840 840 __entry->dev = inode->i_sb->s_dev; 841 841 __entry->ino = inode->i_ino; 842 - __entry->state = inode->i_state; 842 + __entry->state = inode_state_read_once(inode); 843 843 __entry->mode = inode->i_mode; 844 844 __entry->dirtied_when = inode->dirtied_when; 845 845 ),
+1 -1
mm/backing-dev.c
··· 72 72 list_for_each_entry(inode, &wb->b_more_io, i_io_list) 73 73 stats->nr_more_io++; 74 74 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) 75 - if (inode->i_state & I_DIRTY_TIME) 75 + if (inode_state_read_once(inode) & I_DIRTY_TIME) 76 76 stats->nr_dirty_time++; 77 77 spin_unlock(&wb->list_lock); 78 78
+2 -2
mm/filemap.c
··· 256 256 __filemap_remove_folio(folio, NULL); 257 257 xa_unlock_irq(&mapping->i_pages); 258 258 if (mapping_shrinkable(mapping)) 259 - inode_add_lru(mapping->host); 259 + inode_lru_list_add(mapping->host); 260 260 spin_unlock(&mapping->host->i_lock); 261 261 262 262 filemap_free_folio(mapping, folio); ··· 335 335 page_cache_delete_batch(mapping, fbatch); 336 336 xa_unlock_irq(&mapping->i_pages); 337 337 if (mapping_shrinkable(mapping)) 338 - inode_add_lru(mapping->host); 338 + inode_lru_list_add(mapping->host); 339 339 spin_unlock(&mapping->host->i_lock); 340 340 341 341 for (i = 0; i < folio_batch_count(fbatch); i++)
+3 -3
mm/truncate.c
··· 46 46 47 47 xas_unlock_irq(&xas); 48 48 if (mapping_shrinkable(mapping)) 49 - inode_add_lru(mapping->host); 49 + inode_lru_list_add(mapping->host); 50 50 spin_unlock(&mapping->host->i_lock); 51 51 } 52 52 ··· 111 111 112 112 xas_unlock_irq(&xas); 113 113 if (mapping_shrinkable(mapping)) 114 - inode_add_lru(mapping->host); 114 + inode_lru_list_add(mapping->host); 115 115 spin_unlock(&mapping->host->i_lock); 116 116 out: 117 117 folio_batch_remove_exceptionals(fbatch); ··· 647 647 __filemap_remove_folio(folio, NULL); 648 648 xa_unlock_irq(&mapping->i_pages); 649 649 if (mapping_shrinkable(mapping)) 650 - inode_add_lru(mapping->host); 650 + inode_lru_list_add(mapping->host); 651 651 spin_unlock(&mapping->host->i_lock); 652 652 653 653 filemap_free_folio(mapping, folio);
+1 -1
mm/vmscan.c
··· 811 811 __filemap_remove_folio(folio, shadow); 812 812 xa_unlock_irq(&mapping->i_pages); 813 813 if (mapping_shrinkable(mapping)) 814 - inode_add_lru(mapping->host); 814 + inode_lru_list_add(mapping->host); 815 815 spin_unlock(&mapping->host->i_lock); 816 816 817 817 if (free_folio)
+1 -1
mm/workingset.c
··· 755 755 xa_unlock_irq(&mapping->i_pages); 756 756 if (mapping->host != NULL) { 757 757 if (mapping_shrinkable(mapping)) 758 - inode_add_lru(mapping->host); 758 + inode_lru_list_add(mapping->host); 759 759 spin_unlock(&mapping->host->i_lock); 760 760 } 761 761 ret = LRU_REMOVED_RETRY;
+1 -1
security/landlock/fs.c
··· 1296 1296 * second call to iput() for the same Landlock object. Also 1297 1297 * checks I_NEW because such inode cannot be tied to an object. 1298 1298 */ 1299 - if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { 1299 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 1300 1300 spin_unlock(&inode->i_lock); 1301 1301 continue; 1302 1302 }