Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Coccinelle-based conversion to use ->i_state accessors

All places were patched by coccinelle with the default expecting that
->i_lock is held, afterwards entries got fixed up by hand to use
unlocked variants as needed.

The script:
@@
expression inode, flags;
@@

- inode->i_state & flags
+ inode_state_read(inode) & flags

@@
expression inode, flags;
@@

- inode->i_state &= ~flags
+ inode_state_clear(inode, flags)

@@
expression inode, flag1, flag2;
@@

- inode->i_state &= ~flag1 & ~flag2
+ inode_state_clear(inode, flag1 | flag2)

@@
expression inode, flags;
@@

- inode->i_state |= flags
+ inode_state_set(inode, flags)

@@
expression inode, flags;
@@

- inode->i_state = flags
+ inode_state_assign(inode, flags)

@@
expression inode, flags;
@@

- flags = inode->i_state
+ flags = inode_state_read(inode)

@@
expression inode, flags;
@@

- READ_ONCE(inode->i_state) & flags
+ inode_state_read(inode) & flags

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

Mateusz Guzik and committed by
Christian Brauner
b4dbfd86 d8753f78

+199 -199
+2 -2
block/bdev.c
··· 67 67 int ret; 68 68 69 69 spin_lock(&inode->i_lock); 70 - while (inode->i_state & I_DIRTY) { 70 + while (inode_state_read(inode) & I_DIRTY) { 71 71 spin_unlock(&inode->i_lock); 72 72 ret = write_inode_now(inode, true); 73 73 if (ret) ··· 1265 1265 struct block_device *bdev; 1266 1266 1267 1267 spin_lock(&inode->i_lock); 1268 - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || 1268 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW) || 1269 1269 mapping->nrpages == 0) { 1270 1270 spin_unlock(&inode->i_lock); 1271 1271 continue;
+1 -1
drivers/dax/super.c
··· 433 433 return NULL; 434 434 435 435 dax_dev = to_dax_dev(inode); 436 - if (inode->i_state & I_NEW) { 436 + if (inode_state_read_once(inode) & I_NEW) { 437 437 set_bit(DAXDEV_ALIVE, &dax_dev->flags); 438 438 inode->i_cdev = &dax_dev->cdev; 439 439 inode->i_mode = S_IFCHR;
+1 -1
fs/9p/vfs_inode.c
··· 422 422 inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode, st); 423 423 if (!inode) 424 424 return ERR_PTR(-ENOMEM); 425 - if (!(inode->i_state & I_NEW)) 425 + if (!(inode_state_read_once(inode) & I_NEW)) 426 426 return inode; 427 427 /* 428 428 * initialize the inode with the stat info
+1 -1
fs/9p/vfs_inode_dotl.c
··· 112 112 inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode_dotl, st); 113 113 if (!inode) 114 114 return ERR_PTR(-ENOMEM); 115 - if (!(inode->i_state & I_NEW)) 115 + if (!(inode_state_read_once(inode) & I_NEW)) 116 116 return inode; 117 117 /* 118 118 * initialize the inode with the stat info
+1 -1
fs/affs/inode.c
··· 29 29 inode = iget_locked(sb, ino); 30 30 if (!inode) 31 31 return ERR_PTR(-ENOMEM); 32 - if (!(inode->i_state & I_NEW)) 32 + if (!(inode_state_read_once(inode) & I_NEW)) 33 33 return inode; 34 34 35 35 pr_debug("affs_iget(%lu)\n", inode->i_ino);
+3 -3
fs/afs/dynroot.c
··· 64 64 65 65 vnode = AFS_FS_I(inode); 66 66 67 - if (inode->i_state & I_NEW) { 67 + if (inode_state_read_once(inode) & I_NEW) { 68 68 netfs_inode_init(&vnode->netfs, NULL, false); 69 69 simple_inode_init_ts(inode); 70 70 set_nlink(inode, 2); ··· 258 258 259 259 vnode = AFS_FS_I(inode); 260 260 261 - if (inode->i_state & I_NEW) { 261 + if (inode_state_read_once(inode) & I_NEW) { 262 262 netfs_inode_init(&vnode->netfs, NULL, false); 263 263 simple_inode_init_ts(inode); 264 264 set_nlink(inode, 1); ··· 383 383 vnode = AFS_FS_I(inode); 384 384 385 385 /* there shouldn't be an existing inode */ 386 - if (inode->i_state & I_NEW) { 386 + if (inode_state_read_once(inode) & I_NEW) { 387 387 netfs_inode_init(&vnode->netfs, NULL, false); 388 388 simple_inode_init_ts(inode); 389 389 set_nlink(inode, 2);
+3 -3
fs/afs/inode.c
··· 579 579 inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 580 580 581 581 /* deal with an existing inode */ 582 - if (!(inode->i_state & I_NEW)) { 582 + if (!(inode_state_read_once(inode) & I_NEW)) { 583 583 _leave(" = %p", inode); 584 584 return inode; 585 585 } ··· 639 639 640 640 _debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid); 641 641 642 - BUG_ON(!(inode->i_state & I_NEW)); 642 + BUG_ON(!(inode_state_read_once(inode) & I_NEW)); 643 643 644 644 vnode = AFS_FS_I(inode); 645 645 vnode->cb_v_check = atomic_read(&as->volume->cb_v_break); ··· 748 748 749 749 if ((S_ISDIR(inode->i_mode) || 750 750 S_ISLNK(inode->i_mode)) && 751 - (inode->i_state & I_DIRTY) && 751 + (inode_state_read_once(inode) & I_DIRTY) && 752 752 !sbi->dyn_root) { 753 753 struct writeback_control wbc = { 754 754 .sync_mode = WB_SYNC_ALL,
+1 -1
fs/befs/linuxvfs.c
··· 307 307 inode = iget_locked(sb, ino); 308 308 if (!inode) 309 309 return ERR_PTR(-ENOMEM); 310 - if (!(inode->i_state & I_NEW)) 310 + if (!(inode_state_read_once(inode) & I_NEW)) 311 311 return inode; 312 312 313 313 befs_ino = BEFS_I(inode);
+1 -1
fs/bfs/inode.c
··· 42 42 inode = iget_locked(sb, ino); 43 43 if (!inode) 44 44 return ERR_PTR(-ENOMEM); 45 - if (!(inode->i_state & I_NEW)) 45 + if (!(inode_state_read_once(inode) & I_NEW)) 46 46 return inode; 47 47 48 48 if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
+2 -2
fs/buffer.c
··· 611 611 return err; 612 612 613 613 ret = sync_mapping_buffers(inode->i_mapping); 614 - if (!(inode->i_state & I_DIRTY_ALL)) 614 + if (!(inode_state_read_once(inode) & I_DIRTY_ALL)) 615 615 goto out; 616 - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 616 + if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) 617 617 goto out; 618 618 619 619 err = sync_inode_metadata(inode, 1);
+2 -2
fs/coda/cnode.c
··· 70 70 if (!inode) 71 71 return ERR_PTR(-ENOMEM); 72 72 73 - if (inode->i_state & I_NEW) { 73 + if (inode_state_read_once(inode) & I_NEW) { 74 74 cii = ITOC(inode); 75 75 /* we still need to set i_ino for things like stat(2) */ 76 76 inode->i_ino = hash; ··· 148 148 149 149 /* we should never see newly created inodes because we intentionally 150 150 * fail in the initialization callback */ 151 - BUG_ON(inode->i_state & I_NEW); 151 + BUG_ON(inode_state_read_once(inode) & I_NEW); 152 152 153 153 return inode; 154 154 }
+1 -1
fs/cramfs/inode.c
··· 95 95 inode = iget_locked(sb, cramino(cramfs_inode, offset)); 96 96 if (!inode) 97 97 return ERR_PTR(-ENOMEM); 98 - if (!(inode->i_state & I_NEW)) 98 + if (!(inode_state_read_once(inode) & I_NEW)) 99 99 return inode; 100 100 101 101 switch (cramfs_inode->mode & S_IFMT) {
+1 -1
fs/crypto/keyring.c
··· 945 945 list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) { 946 946 inode = ci->ci_inode; 947 947 spin_lock(&inode->i_lock); 948 - if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { 948 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 949 949 spin_unlock(&inode->i_lock); 950 950 continue; 951 951 }
+1 -1
fs/crypto/keysetup.c
··· 834 834 * userspace is still using the files, inodes can be dirtied between 835 835 * then and now. We mustn't lose any writes, so skip dirty inodes here. 836 836 */ 837 - if (inode->i_state & I_DIRTY_ALL) 837 + if (inode_state_read(inode) & I_DIRTY_ALL) 838 838 return 0; 839 839 840 840 /*
+4 -4
fs/dcache.c
··· 794 794 de->d_flags |= DCACHE_DONTCACHE; 795 795 spin_unlock(&de->d_lock); 796 796 } 797 - inode->i_state |= I_DONTCACHE; 797 + inode_state_set(inode, I_DONTCACHE); 798 798 spin_unlock(&inode->i_lock); 799 799 } 800 800 EXPORT_SYMBOL(d_mark_dontcache); ··· 1073 1073 spin_lock(&inode->i_lock); 1074 1074 // ->i_dentry and ->i_rcu are colocated, but the latter won't be 1075 1075 // used without having I_FREEING set, which means no aliases left 1076 - if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { 1076 + if (likely(!(inode_state_read(inode) & I_FREEING) && !hlist_empty(l))) { 1077 1077 if (S_ISDIR(inode->i_mode)) { 1078 1078 de = hlist_entry(l->first, struct dentry, d_u.d_alias); 1079 1079 } else { ··· 1980 1980 security_d_instantiate(entry, inode); 1981 1981 spin_lock(&inode->i_lock); 1982 1982 __d_instantiate(entry, inode); 1983 - WARN_ON(!(inode->i_state & I_NEW)); 1983 + WARN_ON(!(inode_state_read(inode) & I_NEW)); 1984 1984 /* 1985 1985 * Pairs with smp_rmb in wait_on_inode(). 1986 1986 */ 1987 1987 smp_wmb(); 1988 - inode->i_state &= ~I_NEW & ~I_CREATING; 1988 + inode_state_clear(inode, I_NEW | I_CREATING); 1989 1989 /* 1990 1990 * Pairs with the barrier in prepare_to_wait_event() to make sure 1991 1991 * ___wait_var_event() either sees the bit cleared or
+1 -1
fs/drop_caches.c
··· 28 28 * inodes without pages but we deliberately won't in case 29 29 * we need to reschedule to avoid softlockups. 30 30 */ 31 - if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 31 + if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) || 32 32 (mapping_empty(inode->i_mapping) && !need_resched())) { 33 33 spin_unlock(&inode->i_lock); 34 34 continue;
+3 -3
fs/ecryptfs/inode.c
··· 95 95 iput(lower_inode); 96 96 return ERR_PTR(-EACCES); 97 97 } 98 - if (!(inode->i_state & I_NEW)) 98 + if (!(inode_state_read_once(inode) & I_NEW)) 99 99 iput(lower_inode); 100 100 101 101 return inode; ··· 106 106 { 107 107 struct inode *inode = __ecryptfs_get_inode(lower_inode, sb); 108 108 109 - if (!IS_ERR(inode) && (inode->i_state & I_NEW)) 109 + if (!IS_ERR(inode) && (inode_state_read_once(inode) & I_NEW)) 110 110 unlock_new_inode(inode); 111 111 112 112 return inode; ··· 364 364 } 365 365 } 366 366 367 - if (inode->i_state & I_NEW) 367 + if (inode_state_read_once(inode) & I_NEW) 368 368 unlock_new_inode(inode); 369 369 return d_splice_alias(inode, dentry); 370 370 }
+1 -1
fs/efs/inode.c
··· 62 62 inode = iget_locked(super, ino); 63 63 if (!inode) 64 64 return ERR_PTR(-ENOMEM); 65 - if (!(inode->i_state & I_NEW)) 65 + if (!(inode_state_read_once(inode) & I_NEW)) 66 66 return inode; 67 67 68 68 in = INODE_INFO(inode);
+1 -1
fs/erofs/inode.c
··· 295 295 if (!inode) 296 296 return ERR_PTR(-ENOMEM); 297 297 298 - if (inode->i_state & I_NEW) { 298 + if (inode_state_read_once(inode) & I_NEW) { 299 299 int err = erofs_fill_inode(inode); 300 300 301 301 if (err) {
+1 -1
fs/ext2/inode.c
··· 1398 1398 inode = iget_locked(sb, ino); 1399 1399 if (!inode) 1400 1400 return ERR_PTR(-ENOMEM); 1401 - if (!(inode->i_state & I_NEW)) 1401 + if (!(inode_state_read_once(inode) & I_NEW)) 1402 1402 return inode; 1403 1403 1404 1404 ei = EXT2_I(inode);
+1 -1
fs/freevxfs/vxfs_inode.c
··· 258 258 ip = iget_locked(sbp, ino); 259 259 if (!ip) 260 260 return ERR_PTR(-ENOMEM); 261 - if (!(ip->i_state & I_NEW)) 261 + if (!(inode_state_read_once(ip) & I_NEW)) 262 262 return ip; 263 263 264 264 vip = VXFS_INO(ip);
+60 -60
fs/fs-writeback.c
··· 121 121 { 122 122 assert_spin_locked(&wb->list_lock); 123 123 assert_spin_locked(&inode->i_lock); 124 - WARN_ON_ONCE(inode->i_state & I_FREEING); 124 + WARN_ON_ONCE(inode_state_read(inode) & I_FREEING); 125 125 126 126 list_move(&inode->i_io_list, head); 127 127 ··· 304 304 { 305 305 assert_spin_locked(&wb->list_lock); 306 306 assert_spin_locked(&inode->i_lock); 307 - WARN_ON_ONCE(inode->i_state & I_FREEING); 307 + WARN_ON_ONCE(inode_state_read(inode) & I_FREEING); 308 308 309 - inode->i_state &= ~I_SYNC_QUEUED; 309 + inode_state_clear(inode, I_SYNC_QUEUED); 310 310 if (wb != &wb->bdi->wb) 311 311 list_move(&inode->i_io_list, &wb->b_attached); 312 312 else ··· 408 408 * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction 409 409 * path owns the inode and we shouldn't modify ->i_io_list. 410 410 */ 411 - if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE))) 411 + if (unlikely(inode_state_read(inode) & (I_FREEING | I_WILL_FREE))) 412 412 goto skip_switch; 413 413 414 414 trace_inode_switch_wbs(inode, old_wb, new_wb); ··· 451 451 if (!list_empty(&inode->i_io_list)) { 452 452 inode->i_wb = new_wb; 453 453 454 - if (inode->i_state & I_DIRTY_ALL) { 454 + if (inode_state_read(inode) & I_DIRTY_ALL) { 455 455 /* 456 456 * We need to keep b_dirty list sorted by 457 457 * dirtied_time_when. However properly sorting the ··· 480 480 * ensures that the new wb is visible if they see !I_WB_SWITCH. 481 481 */ 482 482 smp_wmb(); 483 - inode->i_state &= ~I_WB_SWITCH; 483 + inode_state_clear(inode, I_WB_SWITCH); 484 484 485 485 xa_unlock_irq(&mapping->i_pages); 486 486 spin_unlock(&inode->i_lock); ··· 601 601 /* while holding I_WB_SWITCH, no one else can update the association */ 602 602 spin_lock(&inode->i_lock); 603 603 if (!(inode->i_sb->s_flags & SB_ACTIVE) || 604 - inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) || 604 + inode_state_read(inode) & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) || 605 605 inode_to_wb(inode) == new_wb) { 606 606 spin_unlock(&inode->i_lock); 607 607 return false; 608 608 } 609 - inode->i_state |= I_WB_SWITCH; 609 + inode_state_set(inode, I_WB_SWITCH); 610 610 __iget(inode); 611 611 spin_unlock(&inode->i_lock); 612 612 ··· 636 636 struct bdi_writeback *new_wb = NULL; 637 637 638 638 /* noop if seems to be already in progress */ 639 - if (inode->i_state & I_WB_SWITCH) 639 + if (inode_state_read_once(inode) & I_WB_SWITCH) 640 640 return; 641 641 642 642 /* avoid queueing a new switch if too many are already in flight */ ··· 1237 1237 { 1238 1238 assert_spin_locked(&wb->list_lock); 1239 1239 assert_spin_locked(&inode->i_lock); 1240 - WARN_ON_ONCE(inode->i_state & I_FREEING); 1240 + WARN_ON_ONCE(inode_state_read(inode) & I_FREEING); 1241 1241 1242 - inode->i_state &= ~I_SYNC_QUEUED; 1242 + inode_state_clear(inode, I_SYNC_QUEUED); 1243 1243 list_del_init(&inode->i_io_list); 1244 1244 wb_io_lists_depopulated(wb); 1245 1245 } ··· 1352 1352 wb = inode_to_wb_and_lock_list(inode); 1353 1353 spin_lock(&inode->i_lock); 1354 1354 1355 - inode->i_state &= ~I_SYNC_QUEUED; 1355 + inode_state_clear(inode, I_SYNC_QUEUED); 1356 1356 list_del_init(&inode->i_io_list); 1357 1357 wb_io_lists_depopulated(wb); 1358 1358 ··· 1410 1410 { 1411 1411 assert_spin_locked(&inode->i_lock); 1412 1412 1413 - inode->i_state &= ~I_SYNC_QUEUED; 1413 + inode_state_clear(inode, I_SYNC_QUEUED); 1414 1414 /* 1415 1415 * When the inode is being freed just don't bother with dirty list 1416 1416 * tracking. Flush worker will ignore this inode anyway and it will 1417 1417 * trigger assertions in inode_io_list_move_locked(). 1418 1418 */ 1419 - if (inode->i_state & I_FREEING) { 1419 + if (inode_state_read(inode) & I_FREEING) { 1420 1420 list_del_init(&inode->i_io_list); 1421 1421 wb_io_lists_depopulated(wb); 1422 1422 return; ··· 1450 1450 { 1451 1451 assert_spin_locked(&inode->i_lock); 1452 1452 1453 - inode->i_state &= ~I_SYNC; 1453 + inode_state_clear(inode, I_SYNC); 1454 1454 /* If inode is clean an unused, put it into LRU now... */ 1455 1455 inode_add_lru(inode); 1456 1456 /* Called with inode->i_lock which ensures memory ordering. */ ··· 1494 1494 spin_lock(&inode->i_lock); 1495 1495 list_move(&inode->i_io_list, &tmp); 1496 1496 moved++; 1497 - inode->i_state |= I_SYNC_QUEUED; 1497 + inode_state_set(inode, I_SYNC_QUEUED); 1498 1498 spin_unlock(&inode->i_lock); 1499 1499 if (sb_is_blkdev_sb(inode->i_sb)) 1500 1500 continue; ··· 1580 1580 1581 1581 assert_spin_locked(&inode->i_lock); 1582 1582 1583 - if (!(inode->i_state & I_SYNC)) 1583 + if (!(inode_state_read(inode) & I_SYNC)) 1584 1584 return; 1585 1585 1586 1586 wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC); 1587 1587 for (;;) { 1588 1588 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 1589 1589 /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ 1590 - if (!(inode->i_state & I_SYNC)) 1590 + if (!(inode_state_read(inode) & I_SYNC)) 1591 1591 break; 1592 1592 spin_unlock(&inode->i_lock); 1593 1593 schedule(); ··· 1613 1613 wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC); 1614 1614 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 1615 1615 /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ 1616 - sleep = !!(inode->i_state & I_SYNC); 1616 + sleep = !!(inode_state_read(inode) & I_SYNC); 1617 1617 spin_unlock(&inode->i_lock); 1618 1618 if (sleep) 1619 1619 schedule(); ··· 1632 1632 struct writeback_control *wbc, 1633 1633 unsigned long dirtied_before) 1634 1634 { 1635 - if (inode->i_state & I_FREEING) 1635 + if (inode_state_read(inode) & I_FREEING) 1636 1636 return; 1637 1637 1638 1638 /* ··· 1640 1640 * shot. If still dirty, it will be redirty_tail()'ed below. Update 1641 1641 * the dirty time to prevent enqueue and sync it again. 1642 1642 */ 1643 - if ((inode->i_state & I_DIRTY) && 1643 + if ((inode_state_read(inode) & I_DIRTY) && 1644 1644 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 1645 1645 inode->dirtied_when = jiffies; 1646 1646 ··· 1651 1651 * is odd for clean inodes, it can happen for some 1652 1652 * filesystems so handle that gracefully. 1653 1653 */ 1654 - if (inode->i_state & I_DIRTY_ALL) 1654 + if (inode_state_read(inode) & I_DIRTY_ALL) 1655 1655 redirty_tail_locked(inode, wb); 1656 1656 else 1657 1657 inode_cgwb_move_to_attached(inode, wb); ··· 1677 1677 */ 1678 1678 redirty_tail_locked(inode, wb); 1679 1679 } 1680 - } else if (inode->i_state & I_DIRTY) { 1680 + } else if (inode_state_read(inode) & I_DIRTY) { 1681 1681 /* 1682 1682 * Filesystems can dirty the inode during writeback operations, 1683 1683 * such as delayed allocation during submission or metadata 1684 1684 * updates after data IO completion. 1685 1685 */ 1686 1686 redirty_tail_locked(inode, wb); 1687 - } else if (inode->i_state & I_DIRTY_TIME) { 1687 + } else if (inode_state_read(inode) & I_DIRTY_TIME) { 1688 1688 inode->dirtied_when = jiffies; 1689 1689 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); 1690 - inode->i_state &= ~I_SYNC_QUEUED; 1690 + inode_state_clear(inode, I_SYNC_QUEUED); 1691 1691 } else { 1692 1692 /* The inode is clean. Remove from writeback lists. */ 1693 1693 inode_cgwb_move_to_attached(inode, wb); ··· 1713 1713 unsigned dirty; 1714 1714 int ret; 1715 1715 1716 - WARN_ON(!(inode->i_state & I_SYNC)); 1716 + WARN_ON(!(inode_state_read_once(inode) & I_SYNC)); 1717 1717 1718 1718 trace_writeback_single_inode_start(inode, wbc, nr_to_write); 1719 1719 ··· 1737 1737 * mark_inode_dirty_sync() to notify the filesystem about it and to 1738 1738 * change I_DIRTY_TIME into I_DIRTY_SYNC. 1739 1739 */ 1740 - if ((inode->i_state & I_DIRTY_TIME) && 1740 + if ((inode_state_read_once(inode) & I_DIRTY_TIME) && 1741 1741 (wbc->sync_mode == WB_SYNC_ALL || 1742 1742 time_after(jiffies, inode->dirtied_time_when + 1743 1743 dirtytime_expire_interval * HZ))) { ··· 1752 1752 * after handling timestamp expiration, as that may dirty the inode too. 1753 1753 */ 1754 1754 spin_lock(&inode->i_lock); 1755 - dirty = inode->i_state & I_DIRTY; 1756 - inode->i_state &= ~dirty; 1755 + dirty = inode_state_read(inode) & I_DIRTY; 1756 + inode_state_clear(inode, dirty); 1757 1757 1758 1758 /* 1759 1759 * Paired with smp_mb() in __mark_inode_dirty(). This allows ··· 1769 1769 smp_mb(); 1770 1770 1771 1771 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 1772 - inode->i_state |= I_DIRTY_PAGES; 1773 - else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) { 1774 - if (!(inode->i_state & I_DIRTY_PAGES)) { 1775 - inode->i_state &= ~I_PINNING_NETFS_WB; 1772 + inode_state_set(inode, I_DIRTY_PAGES); 1773 + else if (unlikely(inode_state_read(inode) & I_PINNING_NETFS_WB)) { 1774 + if (!(inode_state_read(inode) & I_DIRTY_PAGES)) { 1775 + inode_state_clear(inode, I_PINNING_NETFS_WB); 1776 1776 wbc->unpinned_netfs_wb = true; 1777 1777 dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */ 1778 1778 } ··· 1808 1808 1809 1809 spin_lock(&inode->i_lock); 1810 1810 if (!icount_read(inode)) 1811 - WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 1811 + WARN_ON(!(inode_state_read(inode) & (I_WILL_FREE | I_FREEING))); 1812 1812 else 1813 - WARN_ON(inode->i_state & I_WILL_FREE); 1813 + WARN_ON(inode_state_read(inode) & I_WILL_FREE); 1814 1814 1815 - if (inode->i_state & I_SYNC) { 1815 + if (inode_state_read(inode) & I_SYNC) { 1816 1816 /* 1817 1817 * Writeback is already running on the inode. For WB_SYNC_NONE, 1818 1818 * that's enough and we can just return. For WB_SYNC_ALL, we ··· 1823 1823 goto out; 1824 1824 inode_wait_for_writeback(inode); 1825 1825 } 1826 - WARN_ON(inode->i_state & I_SYNC); 1826 + WARN_ON(inode_state_read(inode) & I_SYNC); 1827 1827 /* 1828 1828 * If the inode is already fully clean, then there's nothing to do. 1829 1829 * ··· 1831 1831 * still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If 1832 1832 * there are any such pages, we'll need to wait for them. 1833 1833 */ 1834 - if (!(inode->i_state & I_DIRTY_ALL) && 1834 + if (!(inode_state_read(inode) & I_DIRTY_ALL) && 1835 1835 (wbc->sync_mode != WB_SYNC_ALL || 1836 1836 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 1837 1837 goto out; 1838 - inode->i_state |= I_SYNC; 1838 + inode_state_set(inode, I_SYNC); 1839 1839 wbc_attach_and_unlock_inode(wbc, inode); 1840 1840 1841 1841 ret = __writeback_single_inode(inode, wbc); ··· 1848 1848 * If the inode is freeing, its i_io_list shoudn't be updated 1849 1849 * as it can be finally deleted at this moment. 1850 1850 */ 1851 - if (!(inode->i_state & I_FREEING)) { 1851 + if (!(inode_state_read(inode) & I_FREEING)) { 1852 1852 /* 1853 1853 * If the inode is now fully clean, then it can be safely 1854 1854 * removed from its writeback list (if any). Otherwise the 1855 1855 * flusher threads are responsible for the writeback lists. 1856 1856 */ 1857 - if (!(inode->i_state & I_DIRTY_ALL)) 1857 + if (!(inode_state_read(inode) & I_DIRTY_ALL)) 1858 1858 inode_cgwb_move_to_attached(inode, wb); 1859 - else if (!(inode->i_state & I_SYNC_QUEUED)) { 1860 - if ((inode->i_state & I_DIRTY)) 1859 + else if (!(inode_state_read(inode) & I_SYNC_QUEUED)) { 1860 + if ((inode_state_read(inode) & I_DIRTY)) 1861 1861 redirty_tail_locked(inode, wb); 1862 - else if (inode->i_state & I_DIRTY_TIME) { 1862 + else if (inode_state_read(inode) & I_DIRTY_TIME) { 1863 1863 inode->dirtied_when = jiffies; 1864 1864 inode_io_list_move_locked(inode, 1865 1865 wb, ··· 1968 1968 * kind writeout is handled by the freer. 1969 1969 */ 1970 1970 spin_lock(&inode->i_lock); 1971 - if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 1971 + if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) { 1972 1972 redirty_tail_locked(inode, wb); 1973 1973 spin_unlock(&inode->i_lock); 1974 1974 continue; 1975 1975 } 1976 - if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 1976 + if ((inode_state_read(inode) & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 1977 1977 /* 1978 1978 * If this inode is locked for writeback and we are not 1979 1979 * doing writeback-for-data-integrity, move it to ··· 1995 1995 * are doing WB_SYNC_NONE writeback. So this catches only the 1996 1996 * WB_SYNC_ALL case. 1997 1997 */ 1998 - if (inode->i_state & I_SYNC) { 1998 + if (inode_state_read(inode) & I_SYNC) { 1999 1999 /* Wait for I_SYNC. This function drops i_lock... */ 2000 2000 inode_sleep_on_writeback(inode); 2001 2001 /* Inode may be gone, start again */ 2002 2002 spin_lock(&wb->list_lock); 2003 2003 continue; 2004 2004 } 2005 - inode->i_state |= I_SYNC; 2005 + inode_state_set(inode, I_SYNC); 2006 2006 wbc_attach_and_unlock_inode(&wbc, inode); 2007 2007 2008 2008 write_chunk = writeback_chunk_size(wb, work); ··· 2040 2040 */ 2041 2041 tmp_wb = inode_to_wb_and_lock_list(inode); 2042 2042 spin_lock(&inode->i_lock); 2043 - if (!(inode->i_state & I_DIRTY_ALL)) 2043 + if (!(inode_state_read(inode) & I_DIRTY_ALL)) 2044 2044 total_wrote++; 2045 2045 requeue_inode(inode, tmp_wb, &wbc, dirtied_before); 2046 2046 inode_sync_complete(inode); ··· 2546 2546 * We tell ->dirty_inode callback that timestamps need to 2547 2547 * be updated by setting I_DIRTY_TIME in flags. 2548 2548 */ 2549 - if (inode->i_state & I_DIRTY_TIME) { 2549 + if (inode_state_read_once(inode) & I_DIRTY_TIME) { 2550 2550 spin_lock(&inode->i_lock); 2551 - if (inode->i_state & I_DIRTY_TIME) { 2552 - inode->i_state &= ~I_DIRTY_TIME; 2551 + if (inode_state_read(inode) & I_DIRTY_TIME) { 2552 + inode_state_clear(inode, I_DIRTY_TIME); 2553 2553 flags |= I_DIRTY_TIME; 2554 2554 } 2555 2555 spin_unlock(&inode->i_lock); ··· 2586 2586 */ 2587 2587 smp_mb(); 2588 2588 2589 - if ((inode->i_state & flags) == flags) 2589 + if ((inode_state_read_once(inode) & flags) == flags) 2590 2590 return; 2591 2591 2592 2592 spin_lock(&inode->i_lock); 2593 - if ((inode->i_state & flags) != flags) { 2594 - const int was_dirty = inode->i_state & I_DIRTY; 2593 + if ((inode_state_read(inode) & flags) != flags) { 2594 + const int was_dirty = inode_state_read(inode) & I_DIRTY; 2595 2595 2596 2596 inode_attach_wb(inode, NULL); 2597 2597 2598 - inode->i_state |= flags; 2598 + inode_state_set(inode, flags); 2599 2599 2600 2600 /* 2601 2601 * Grab inode's wb early because it requires dropping i_lock and we ··· 2614 2614 * the inode it will place it on the appropriate superblock 2615 2615 * list, based upon its state. 2616 2616 */ 2617 - if (inode->i_state & I_SYNC_QUEUED) 2617 + if (inode_state_read(inode) & I_SYNC_QUEUED) 2618 2618 goto out_unlock; 2619 2619 2620 2620 /* ··· 2625 2625 if (inode_unhashed(inode)) 2626 2626 goto out_unlock; 2627 2627 } 2628 - if (inode->i_state & I_FREEING) 2628 + if (inode_state_read(inode) & I_FREEING) 2629 2629 goto out_unlock; 2630 2630 2631 2631 /* ··· 2640 2640 if (dirtytime) 2641 2641 inode->dirtied_time_when = jiffies; 2642 2642 2643 - if (inode->i_state & I_DIRTY) 2643 + if (inode_state_read(inode) & I_DIRTY) 2644 2644 dirty_list = &wb->b_dirty; 2645 2645 else 2646 2646 dirty_list = &wb->b_dirty_time; ··· 2737 2737 spin_unlock_irq(&sb->s_inode_wblist_lock); 2738 2738 2739 2739 spin_lock(&inode->i_lock); 2740 - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { 2740 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 2741 2741 spin_unlock(&inode->i_lock); 2742 2742 2743 2743 spin_lock_irq(&sb->s_inode_wblist_lock);
+2 -2
fs/fuse/inode.c
··· 160 160 struct fuse_inode *fi = get_fuse_inode(inode); 161 161 162 162 /* Will write inode on close/munmap and in all other dirtiers */ 163 - WARN_ON(inode->i_state & I_DIRTY_INODE); 163 + WARN_ON(inode_state_read_once(inode) & I_DIRTY_INODE); 164 164 165 165 if (FUSE_IS_DAX(inode)) 166 166 dax_break_layout_final(inode); ··· 505 505 if (!inode) 506 506 return NULL; 507 507 508 - if ((inode->i_state & I_NEW)) { 508 + if ((inode_state_read_once(inode) & I_NEW)) { 509 509 inode->i_flags |= S_NOATIME; 510 510 if (!fc->writeback_cache || !S_ISREG(attr->mode)) 511 511 inode->i_flags |= S_NOCMTIME;
+1 -1
fs/hfs/btree.c
··· 42 42 tree->inode = iget_locked(sb, id); 43 43 if (!tree->inode) 44 44 goto free_tree; 45 - BUG_ON(!(tree->inode->i_state & I_NEW)); 45 + BUG_ON(!(inode_state_read_once(tree->inode) & I_NEW)); 46 46 { 47 47 struct hfs_mdb *mdb = HFS_SB(sb)->mdb; 48 48 HFS_I(tree->inode)->flags = 0;
+1 -1
fs/hfs/inode.c
··· 412 412 return NULL; 413 413 } 414 414 inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data); 415 - if (inode && (inode->i_state & I_NEW)) 415 + if (inode && (inode_state_read_once(inode) & I_NEW)) 416 416 unlock_new_inode(inode); 417 417 return inode; 418 418 }
+1 -1
fs/hfsplus/super.c
··· 65 65 inode = iget_locked(sb, ino); 66 66 if (!inode) 67 67 return ERR_PTR(-ENOMEM); 68 - if (!(inode->i_state & I_NEW)) 68 + if (!(inode_state_read_once(inode) & I_NEW)) 69 69 return inode; 70 70 71 71 atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+1 -1
fs/hostfs/hostfs_kern.c
··· 581 581 if (!inode) 582 582 return ERR_PTR(-ENOMEM); 583 583 584 - if (inode->i_state & I_NEW) { 584 + if (inode_state_read_once(inode) & I_NEW) { 585 585 unlock_new_inode(inode); 586 586 } else { 587 587 spin_lock(&inode->i_lock);
+1 -1
fs/hpfs/dir.c
··· 247 247 result = ERR_PTR(-ENOMEM); 248 248 goto bail1; 249 249 } 250 - if (result->i_state & I_NEW) { 250 + if (inode_state_read_once(result) & I_NEW) { 251 251 hpfs_init_inode(result); 252 252 if (de->directory) 253 253 hpfs_read_inode(result);
+1 -1
fs/hpfs/inode.c
··· 196 196 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); 197 197 if (parent) { 198 198 hpfs_inode->i_dirty = 0; 199 - if (parent->i_state & I_NEW) { 199 + if (inode_state_read_once(parent) & I_NEW) { 200 200 hpfs_init_inode(parent); 201 201 hpfs_read_inode(parent); 202 202 unlock_new_inode(parent);
+46 -46
fs/inode.c
··· 233 233 inode->i_sb = sb; 234 234 inode->i_blkbits = sb->s_blocksize_bits; 235 235 inode->i_flags = 0; 236 - inode->i_state = 0; 236 + inode_state_assign_raw(inode, 0); 237 237 atomic64_set(&inode->i_sequence, 0); 238 238 atomic_set(&inode->i_count, 1); 239 239 inode->i_op = &empty_iops; ··· 471 471 void inc_nlink(struct inode *inode) 472 472 { 473 473 if (unlikely(inode->i_nlink == 0)) { 474 - WARN_ON(!(inode->i_state & I_LINKABLE)); 474 + WARN_ON(!(inode_state_read_once(inode) & I_LINKABLE)); 475 475 atomic_long_dec(&inode->i_sb->s_remove_count); 476 476 } 477 477 ··· 532 532 533 533 static void __inode_add_lru(struct inode *inode, bool rotate) 534 534 { 535 - if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) 535 + if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) 536 536 return; 537 537 if (icount_read(inode)) 538 538 return; ··· 544 544 if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru)) 545 545 this_cpu_inc(nr_unused); 546 546 else if (rotate) 547 - inode->i_state |= I_REFERENCED; 547 + inode_state_set(inode, I_REFERENCED); 548 548 } 549 549 550 550 struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe, ··· 577 577 static void inode_pin_lru_isolating(struct inode *inode) 578 578 { 579 579 lockdep_assert_held(&inode->i_lock); 580 - WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE)); 581 - inode->i_state |= I_LRU_ISOLATING; 580 + WARN_ON(inode_state_read(inode) & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE)); 581 + inode_state_set(inode, I_LRU_ISOLATING); 582 582 } 583 583 584 584 static void inode_unpin_lru_isolating(struct inode *inode) 585 585 { 586 586 spin_lock(&inode->i_lock); 587 - WARN_ON(!(inode->i_state & I_LRU_ISOLATING)); 588 - inode->i_state &= ~I_LRU_ISOLATING; 587 + WARN_ON(!(inode_state_read(inode) & I_LRU_ISOLATING)); 588 + inode_state_clear(inode, I_LRU_ISOLATING); 589 589 /* Called with inode->i_lock which ensures memory ordering. */ 590 590 inode_wake_up_bit(inode, __I_LRU_ISOLATING); 591 591 spin_unlock(&inode->i_lock); ··· 597 597 struct wait_queue_head *wq_head; 598 598 599 599 lockdep_assert_held(&inode->i_lock); 600 - if (!(inode->i_state & I_LRU_ISOLATING)) 600 + if (!(inode_state_read(inode) & I_LRU_ISOLATING)) 601 601 return; 602 602 603 603 wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING); ··· 607 607 * Checking I_LRU_ISOLATING with inode->i_lock guarantees 608 608 * memory ordering. 609 609 */ 610 - if (!(inode->i_state & I_LRU_ISOLATING)) 610 + if (!(inode_state_read(inode) & I_LRU_ISOLATING)) 611 611 break; 612 612 spin_unlock(&inode->i_lock); 613 613 schedule(); 614 614 spin_lock(&inode->i_lock); 615 615 } 616 616 finish_wait(wq_head, &wqe.wq_entry); 617 - WARN_ON(inode->i_state & I_LRU_ISOLATING); 617 + WARN_ON(inode_state_read(inode) & I_LRU_ISOLATING); 618 618 } 619 619 620 620 /** ··· 761 761 */ 762 762 xa_unlock_irq(&inode->i_data.i_pages); 763 763 BUG_ON(!list_empty(&inode->i_data.i_private_list)); 764 - BUG_ON(!(inode->i_state & I_FREEING)); 765 - BUG_ON(inode->i_state & I_CLEAR); 764 + BUG_ON(!(inode_state_read_once(inode) & I_FREEING)); 765 + BUG_ON(inode_state_read_once(inode) & I_CLEAR); 766 766 BUG_ON(!list_empty(&inode->i_wb_list)); 767 767 /* don't need i_lock here, no concurrent mods to i_state */ 768 - inode->i_state = I_FREEING | I_CLEAR; 768 + inode_state_assign_raw(inode, I_FREEING | I_CLEAR); 769 769 } 770 770 EXPORT_SYMBOL(clear_inode); 771 771 ··· 786 786 { 787 787 const struct super_operations *op = inode->i_sb->s_op; 788 788 789 - BUG_ON(!(inode->i_state & I_FREEING)); 789 + BUG_ON(!(inode_state_read_once(inode) & I_FREEING)); 790 790 BUG_ON(!list_empty(&inode->i_lru)); 791 791 792 792 if (!list_empty(&inode->i_io_list)) ··· 879 879 spin_unlock(&inode->i_lock); 880 880 continue; 881 881 } 882 - if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 882 + if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) { 883 883 spin_unlock(&inode->i_lock); 884 884 continue; 885 885 } 886 886 887 - inode->i_state |= I_FREEING; 887 + inode_state_set(inode, I_FREEING); 888 888 inode_lru_list_del(inode); 889 889 spin_unlock(&inode->i_lock); 890 890 list_add(&inode->i_lru, &dispose); ··· 938 938 * sync, or the last page cache deletion will requeue them. 939 939 */ 940 940 if (icount_read(inode) || 941 - (inode->i_state & ~I_REFERENCED) || 941 + (inode_state_read(inode) & ~I_REFERENCED) || 942 942 !mapping_shrinkable(&inode->i_data)) { 943 943 list_lru_isolate(lru, &inode->i_lru); 944 944 spin_unlock(&inode->i_lock); ··· 947 947 } 948 948 949 949 /* Recently referenced inodes get one more pass */ 950 - if (inode->i_state & I_REFERENCED) { 951 - inode->i_state &= ~I_REFERENCED; 950 + if (inode_state_read(inode) & I_REFERENCED) { 951 + inode_state_clear(inode, I_REFERENCED); 952 952 spin_unlock(&inode->i_lock); 953 953 return LRU_ROTATE; 954 954 } ··· 975 975 return LRU_RETRY; 976 976 } 977 977 978 - WARN_ON(inode->i_state & I_NEW); 979 - inode->i_state |= I_FREEING; 978 + WARN_ON(inode_state_read(inode) & I_NEW); 979 + inode_state_set(inode, I_FREEING); 980 980 list_lru_isolate_move(lru, &inode->i_lru, freeable); 981 981 spin_unlock(&inode->i_lock); 982 982 ··· 1025 1025 if (!test(inode, data)) 1026 1026 continue; 1027 1027 spin_lock(&inode->i_lock); 1028 - if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 1028 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) { 1029 1029 __wait_on_freeing_inode(inode, is_inode_hash_locked); 1030 1030 goto repeat; 1031 1031 } 1032 - if (unlikely(inode->i_state & I_CREATING)) { 1032 + if (unlikely(inode_state_read(inode) & I_CREATING)) { 1033 1033 spin_unlock(&inode->i_lock); 1034 1034 rcu_read_unlock(); 1035 1035 return ERR_PTR(-ESTALE); ··· 1066 1066 if (inode->i_sb != sb) 1067 1067 continue; 1068 1068 spin_lock(&inode->i_lock); 1069 - if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 1069 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) { 1070 1070 __wait_on_freeing_inode(inode, is_inode_hash_locked); 1071 1071 goto repeat; 1072 1072 } 1073 - if (unlikely(inode->i_state & I_CREATING)) { 1073 + if (unlikely(inode_state_read(inode) & I_CREATING)) { 1074 1074 spin_unlock(&inode->i_lock); 1075 1075 rcu_read_unlock(); 1076 1076 return ERR_PTR(-ESTALE); ··· 1180 1180 { 1181 1181 lockdep_annotate_inode_mutex_key(inode); 1182 1182 spin_lock(&inode->i_lock); 1183 - WARN_ON(!(inode->i_state & I_NEW)); 1183 + WARN_ON(!(inode_state_read(inode) & I_NEW)); 1184 1184 /* 1185 1185 * Pairs with smp_rmb in wait_on_inode(). 1186 1186 */ 1187 1187 smp_wmb(); 1188 - inode->i_state &= ~I_NEW & ~I_CREATING; 1188 + inode_state_clear(inode, I_NEW | I_CREATING); 1189 1189 /* 1190 1190 * Pairs with the barrier in prepare_to_wait_event() to make sure 1191 1191 * ___wait_var_event() either sees the bit cleared or ··· 1201 1201 { 1202 1202 lockdep_annotate_inode_mutex_key(inode); 1203 1203 spin_lock(&inode->i_lock); 1204 - WARN_ON(!(inode->i_state & I_NEW)); 1204 + WARN_ON(!(inode_state_read(inode) & I_NEW)); 1205 1205 /* 1206 1206 * Pairs with smp_rmb in wait_on_inode(). 1207 1207 */ 1208 1208 smp_wmb(); 1209 - inode->i_state &= ~I_NEW; 1209 + inode_state_clear(inode, I_NEW); 1210 1210 /* 1211 1211 * Pairs with the barrier in prepare_to_wait_event() to make sure 1212 1212 * ___wait_var_event() either sees the bit cleared or ··· 1318 1318 * caller is responsible for filling in the contents 1319 1319 */ 1320 1320 spin_lock(&inode->i_lock); 1321 - inode->i_state |= I_NEW; 1321 + inode_state_set(inode, I_NEW); 1322 1322 hlist_add_head_rcu(&inode->i_hash, head); 1323 1323 spin_unlock(&inode->i_lock); 1324 1324 ··· 1460 1460 if (!old) { 1461 1461 inode->i_ino = ino; 1462 1462 spin_lock(&inode->i_lock); 1463 - inode->i_state = I_NEW; 1463 + inode_state_assign(inode, I_NEW); 1464 1464 hlist_add_head_rcu(&inode->i_hash, head); 1465 1465 spin_unlock(&inode->i_lock); 1466 1466 spin_unlock(&inode_hash_lock); ··· 1553 1553 struct inode *igrab(struct inode *inode) 1554 1554 { 1555 1555 spin_lock(&inode->i_lock); 1556 - if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1556 + if (!(inode_state_read(inode) & (I_FREEING | I_WILL_FREE))) { 1557 1557 __iget(inode); 1558 1558 spin_unlock(&inode->i_lock); 1559 1559 } else { ··· 1749 1749 1750 1750 hlist_for_each_entry_rcu(inode, head, i_hash) { 1751 1751 if (inode->i_sb == sb && 1752 - !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) && 1752 + !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)) && 1753 1753 test(inode, data)) 1754 1754 return inode; 1755 1755 } ··· 1788 1788 hlist_for_each_entry_rcu(inode, head, i_hash) { 1789 1789 if (inode->i_ino == ino && 1790 1790 inode->i_sb == sb && 1791 - !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE))) 1791 + !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE))) 1792 1792 return inode; 1793 1793 } 1794 1794 return NULL; ··· 1812 1812 if (old->i_sb != sb) 1813 1813 continue; 1814 1814 spin_lock(&old->i_lock); 1815 - if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1815 + if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) { 1816 1816 spin_unlock(&old->i_lock); 1817 1817 continue; 1818 1818 } ··· 1820 1820 } 1821 1821 if (likely(!old)) { 1822 1822 spin_lock(&inode->i_lock); 1823 - inode->i_state |= I_NEW | I_CREATING; 1823 + inode_state_set(inode, I_NEW | I_CREATING); 1824 1824 hlist_add_head_rcu(&inode->i_hash, head); 1825 1825 spin_unlock(&inode->i_lock); 1826 1826 spin_unlock(&inode_hash_lock); 1827 1827 return 0; 1828 1828 } 1829 - if (unlikely(old->i_state & I_CREATING)) { 1829 + if (unlikely(inode_state_read(old) & I_CREATING)) { 1830 1830 spin_unlock(&old->i_lock); 1831 1831 spin_unlock(&inode_hash_lock); 1832 1832 return -EBUSY; ··· 1851 1851 1852 1852 might_sleep(); 1853 1853 1854 - inode->i_state |= I_CREATING; 1854 + inode_state_set_raw(inode, I_CREATING); 1855 1855 old = inode_insert5(inode, hashval, test, NULL, data); 1856 1856 1857 1857 if (old != inode) { ··· 1886 1886 unsigned long state; 1887 1887 int drop; 1888 1888 1889 - WARN_ON(inode->i_state & I_NEW); 1889 + WARN_ON(inode_state_read(inode) & I_NEW); 1890 1890 VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode); 1891 1891 1892 1892 if (op->drop_inode) ··· 1895 1895 drop = inode_generic_drop(inode); 1896 1896 1897 1897 if (!drop && 1898 - !(inode->i_state & I_DONTCACHE) && 1898 + !(inode_state_read(inode) & I_DONTCACHE) && 1899 1899 (sb->s_flags & SB_ACTIVE)) { 1900 1900 __inode_add_lru(inode, true); 1901 1901 spin_unlock(&inode->i_lock); ··· 1908 1908 */ 1909 1909 VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode); 1910 1910 1911 - state = inode->i_state; 1911 + state = inode_state_read(inode); 1912 1912 if (!drop) { 1913 1913 WRITE_ONCE(inode->i_state, state | I_WILL_FREE); 1914 1914 spin_unlock(&inode->i_lock); ··· 1916 1916 write_inode_now(inode, 1); 1917 1917 1918 1918 spin_lock(&inode->i_lock); 1919 - state = inode->i_state; 1919 + state = inode_state_read(inode); 1920 1920 WARN_ON(state & I_NEW); 1921 1921 state &= ~I_WILL_FREE; 1922 1922 } ··· 1946 1946 1947 1947 retry: 1948 1948 lockdep_assert_not_held(&inode->i_lock); 1949 - VFS_BUG_ON_INODE(inode->i_state & I_CLEAR, inode); 1949 + VFS_BUG_ON_INODE(inode_state_read_once(inode) & I_CLEAR, inode); 1950 1950 /* 1951 1951 * Note this assert is technically racy as if the count is bogusly 1952 1952 * equal to one, then two CPUs racing to further drop it can both ··· 1957 1957 if (atomic_add_unless(&inode->i_count, -1, 1)) 1958 1958 return; 1959 1959 1960 - if ((inode->i_state & I_DIRTY_TIME) && inode->i_nlink) { 1960 + if ((inode_state_read_once(inode) & I_DIRTY_TIME) && inode->i_nlink) { 1961 1961 trace_writeback_lazytime_iput(inode); 1962 1962 mark_inode_dirty_sync(inode); 1963 1963 goto retry; 1964 1964 } 1965 1965 1966 1966 spin_lock(&inode->i_lock); 1967 - if (unlikely((inode->i_state & I_DIRTY_TIME) && inode->i_nlink)) { 1967 + if (unlikely((inode_state_read(inode) & I_DIRTY_TIME) && inode->i_nlink)) { 1968 1968 spin_unlock(&inode->i_lock); 1969 1969 goto retry; 1970 1970 }
+1 -1
fs/isofs/inode.c
··· 1515 1515 if (!inode) 1516 1516 return ERR_PTR(-ENOMEM); 1517 1517 1518 - if (inode->i_state & I_NEW) { 1518 + if (inode_state_read_once(inode) & I_NEW) { 1519 1519 ret = isofs_read_inode(inode, relocated); 1520 1520 if (ret < 0) { 1521 1521 iget_failed(inode);
+2 -2
fs/jffs2/fs.c
··· 265 265 inode = iget_locked(sb, ino); 266 266 if (!inode) 267 267 return ERR_PTR(-ENOMEM); 268 - if (!(inode->i_state & I_NEW)) 268 + if (!(inode_state_read_once(inode) & I_NEW)) 269 269 return inode; 270 270 271 271 f = JFFS2_INODE_INFO(inode); ··· 373 373 { 374 374 struct iattr iattr; 375 375 376 - if (!(inode->i_state & I_DIRTY_DATASYNC)) { 376 + if (!(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) { 377 377 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n", 378 378 __func__, inode->i_ino); 379 379 return;
+2 -2
fs/jfs/file.c
··· 26 26 return rc; 27 27 28 28 inode_lock(inode); 29 - if (!(inode->i_state & I_DIRTY_ALL) || 30 - (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { 29 + if (!(inode_state_read_once(inode) & I_DIRTY_ALL) || 30 + (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))) { 31 31 /* Make sure committed changes hit the disk */ 32 32 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); 33 33 inode_unlock(inode);
+1 -1
fs/jfs/inode.c
··· 29 29 inode = iget_locked(sb, ino); 30 30 if (!inode) 31 31 return ERR_PTR(-ENOMEM); 32 - if (!(inode->i_state & I_NEW)) 32 + if (!(inode_state_read_once(inode) & I_NEW)) 33 33 return inode; 34 34 35 35 ret = diRead(inode);
+1 -1
fs/jfs/jfs_txnmgr.c
··· 1287 1287 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done. 1288 1288 * Joern 1289 1289 */ 1290 - if (tblk->u.ip->i_state & I_SYNC) 1290 + if (inode_state_read_once(tblk->u.ip) & I_SYNC) 1291 1291 tblk->xflag &= ~COMMIT_LAZY; 1292 1292 } 1293 1293
+1 -1
fs/kernfs/inode.c
··· 251 251 struct inode *inode; 252 252 253 253 inode = iget_locked(sb, kernfs_ino(kn)); 254 - if (inode && (inode->i_state & I_NEW)) 254 + if (inode && (inode_state_read_once(inode) & I_NEW)) 255 255 kernfs_init_inode(kn, inode); 256 256 257 257 return inode;
+3 -3
fs/libfs.c
··· 1542 1542 1543 1543 inode_lock(inode); 1544 1544 ret = sync_mapping_buffers(inode->i_mapping); 1545 - if (!(inode->i_state & I_DIRTY_ALL)) 1545 + if (!(inode_state_read_once(inode) & I_DIRTY_ALL)) 1546 1546 goto out; 1547 - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 1547 + if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) 1548 1548 goto out; 1549 1549 1550 1550 err = sync_inode_metadata(inode, 1); ··· 1664 1664 * list because mark_inode_dirty() will think 1665 1665 * that it already _is_ on the dirty list. 1666 1666 */ 1667 - inode->i_state = I_DIRTY; 1667 + inode_state_assign_raw(inode, I_DIRTY); 1668 1668 /* 1669 1669 * Historically anonymous inodes don't have a type at all and 1670 1670 * userspace has come to rely on this.
+1 -1
fs/minix/inode.c
··· 589 589 inode = iget_locked(sb, ino); 590 590 if (!inode) 591 591 return ERR_PTR(-ENOMEM); 592 - if (!(inode->i_state & I_NEW)) 592 + if (!(inode_state_read_once(inode) & I_NEW)) 593 593 return inode; 594 594 595 595 if (INODE_VERSION(inode) == MINIX_V1)
+4 -4
fs/namei.c
··· 4036 4036 inode = file_inode(file); 4037 4037 if (!(open_flag & O_EXCL)) { 4038 4038 spin_lock(&inode->i_lock); 4039 - inode->i_state |= I_LINKABLE; 4039 + inode_state_set(inode, I_LINKABLE); 4040 4040 spin_unlock(&inode->i_lock); 4041 4041 } 4042 4042 security_inode_post_create_tmpfile(idmap, inode); ··· 4931 4931 4932 4932 inode_lock(inode); 4933 4933 /* Make sure we don't allow creating hardlink to an unlinked file */ 4934 - if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) 4934 + if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE)) 4935 4935 error = -ENOENT; 4936 4936 else if (max_links && inode->i_nlink >= max_links) 4937 4937 error = -EMLINK; ··· 4941 4941 error = dir->i_op->link(old_dentry, dir, new_dentry); 4942 4942 } 4943 4943 4944 - if (!error && (inode->i_state & I_LINKABLE)) { 4944 + if (!error && (inode_state_read_once(inode) & I_LINKABLE)) { 4945 4945 spin_lock(&inode->i_lock); 4946 - inode->i_state &= ~I_LINKABLE; 4946 + inode_state_clear(inode, I_LINKABLE); 4947 4947 spin_unlock(&inode->i_lock); 4948 4948 } 4949 4949 inode_unlock(inode);
+4 -4
fs/netfs/misc.c
··· 147 147 if (!fscache_cookie_valid(cookie)) 148 148 return true; 149 149 150 - if (!(inode->i_state & I_PINNING_NETFS_WB)) { 150 + if (!(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) { 151 151 spin_lock(&inode->i_lock); 152 - if (!(inode->i_state & I_PINNING_NETFS_WB)) { 153 - inode->i_state |= I_PINNING_NETFS_WB; 152 + if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) { 153 + inode_state_set(inode, I_PINNING_NETFS_WB); 154 154 need_use = true; 155 155 } 156 156 spin_unlock(&inode->i_lock); ··· 192 192 { 193 193 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 194 194 195 - if (inode->i_state & I_PINNING_NETFS_WB) { 195 + if (inode_state_read_once(inode) & I_PINNING_NETFS_WB) { 196 196 loff_t i_size = i_size_read(inode); 197 197 fscache_unuse_cookie(cookie, aux, &i_size); 198 198 }
+3 -3
fs/netfs/read_single.c
··· 36 36 37 37 mark_inode_dirty(inode); 38 38 39 - if (caching && !(inode->i_state & I_PINNING_NETFS_WB)) { 39 + if (caching && !(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) { 40 40 bool need_use = false; 41 41 42 42 spin_lock(&inode->i_lock); 43 - if (!(inode->i_state & I_PINNING_NETFS_WB)) { 44 - inode->i_state |= I_PINNING_NETFS_WB; 43 + if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) { 44 + inode_state_set(inode, I_PINNING_NETFS_WB); 45 45 need_use = true; 46 46 } 47 47 spin_unlock(&inode->i_lock);
+1 -1
fs/nfs/inode.c
··· 475 475 goto out_no_inode; 476 476 } 477 477 478 - if (inode->i_state & I_NEW) { 478 + if (inode_state_read_once(inode) & I_NEW) { 479 479 struct nfs_inode *nfsi = NFS_I(inode); 480 480 unsigned long now = jiffies; 481 481
+1 -1
fs/nfs/pnfs.c
··· 317 317 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); 318 318 pnfs_detach_layout_hdr(lo); 319 319 /* Notify pnfs_destroy_layout_final() that we're done */ 320 - if (inode->i_state & (I_FREEING | I_CLEAR)) 320 + if (inode_state_read(inode) & (I_FREEING | I_CLEAR)) 321 321 wake_up_var_locked(lo, &inode->i_lock); 322 322 spin_unlock(&inode->i_lock); 323 323 pnfs_free_layout_hdr(lo);
+1 -1
fs/nfsd/vfs.c
··· 1159 1159 dprintk("nfsd: write resume %d\n", task_pid_nr(current)); 1160 1160 } 1161 1161 1162 - if (inode->i_state & I_DIRTY) { 1162 + if (inode_state_read_once(inode) & I_DIRTY) { 1163 1163 dprintk("nfsd: write sync %d\n", task_pid_nr(current)); 1164 1164 err = vfs_fsync(file, 0); 1165 1165 }
+1 -1
fs/notify/fsnotify.c
··· 52 52 * the inode cannot have any associated watches. 53 53 */ 54 54 spin_lock(&inode->i_lock); 55 - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { 55 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 56 56 spin_unlock(&inode->i_lock); 57 57 continue; 58 58 }
+1 -1
fs/ntfs3/inode.c
··· 537 537 return ERR_PTR(-ENOMEM); 538 538 539 539 /* If this is a freshly allocated inode, need to read it now. */ 540 - if (inode->i_state & I_NEW) 540 + if (inode_state_read_once(inode) & I_NEW) 541 541 inode = ntfs_read_mft(inode, name, ref); 542 542 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) { 543 543 /*
+1 -1
fs/ocfs2/dlmglue.c
··· 2487 2487 * which hasn't been populated yet, so clear the refresh flag 2488 2488 * and let the caller handle it. 2489 2489 */ 2490 - if (inode->i_state & I_NEW) { 2490 + if (inode_state_read_once(inode) & I_NEW) { 2491 2491 status = 0; 2492 2492 if (lockres) 2493 2493 ocfs2_complete_lock_res_refresh(lockres, 0);
+2 -2
fs/ocfs2/inode.c
··· 152 152 mlog_errno(PTR_ERR(inode)); 153 153 goto bail; 154 154 } 155 - trace_ocfs2_iget5_locked(inode->i_state); 156 - if (inode->i_state & I_NEW) { 155 + trace_ocfs2_iget5_locked(inode_state_read_once(inode)); 156 + if (inode_state_read_once(inode) & I_NEW) { 157 157 rc = ocfs2_read_locked_inode(inode, &args); 158 158 unlock_new_inode(inode); 159 159 }
+1 -1
fs/omfs/inode.c
··· 212 212 inode = iget_locked(sb, ino); 213 213 if (!inode) 214 214 return ERR_PTR(-ENOMEM); 215 - if (!(inode->i_state & I_NEW)) 215 + if (!(inode_state_read_once(inode) & I_NEW)) 216 216 return inode; 217 217 218 218 bh = omfs_bread(inode->i_sb, ino);
+1 -1
fs/openpromfs/inode.c
··· 236 236 mutex_unlock(&op_mutex); 237 237 if (IS_ERR(inode)) 238 238 return ERR_CAST(inode); 239 - if (inode->i_state & I_NEW) { 239 + if (inode_state_read_once(inode) & I_NEW) { 240 240 simple_inode_init_ts(inode); 241 241 ent_oi = OP_I(inode); 242 242 ent_oi->type = ent_type;
+1 -1
fs/orangefs/inode.c
··· 1041 1041 if (!inode) 1042 1042 return ERR_PTR(-ENOMEM); 1043 1043 1044 - if (!(inode->i_state & I_NEW)) 1044 + if (!(inode_state_read_once(inode) & I_NEW)) 1045 1045 return inode; 1046 1046 1047 1047 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
+3 -3
fs/orangefs/orangefs-utils.c
··· 247 247 spin_lock(&inode->i_lock); 248 248 /* Must have all the attributes in the mask and be within cache time. */ 249 249 if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) || 250 - orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) { 250 + orangefs_inode->attr_valid || inode_state_read(inode) & I_DIRTY_PAGES) { 251 251 if (orangefs_inode->attr_valid) { 252 252 spin_unlock(&inode->i_lock); 253 253 write_inode_now(inode, 1); ··· 281 281 spin_lock(&inode->i_lock); 282 282 /* Must have all the attributes in the mask and be within cache time. */ 283 283 if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) || 284 - orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) { 284 + orangefs_inode->attr_valid || inode_state_read(inode) & I_DIRTY_PAGES) { 285 285 if (orangefs_inode->attr_valid) { 286 286 spin_unlock(&inode->i_lock); 287 287 write_inode_now(inode, 1); 288 288 goto again2; 289 289 } 290 - if (inode->i_state & I_DIRTY_PAGES) { 290 + if (inode_state_read(inode) & I_DIRTY_PAGES) { 291 291 ret = 0; 292 292 goto out_unlock; 293 293 }
+1 -1
fs/pipe.c
··· 908 908 * list because "mark_inode_dirty()" will think 909 909 * that it already _is_ on the dirty list. 910 910 */ 911 - inode->i_state = I_DIRTY; 911 + inode_state_assign_raw(inode, I_DIRTY); 912 912 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; 913 913 inode->i_uid = current_fsuid(); 914 914 inode->i_gid = current_fsgid();
+1 -1
fs/qnx4/inode.c
··· 290 290 inode = iget_locked(sb, ino); 291 291 if (!inode) 292 292 return ERR_PTR(-ENOMEM); 293 - if (!(inode->i_state & I_NEW)) 293 + if (!(inode_state_read_once(inode) & I_NEW)) 294 294 return inode; 295 295 296 296 qnx4_inode = qnx4_raw_inode(inode);
+1 -1
fs/qnx6/inode.c
··· 521 521 inode = iget_locked(sb, ino); 522 522 if (!inode) 523 523 return ERR_PTR(-ENOMEM); 524 - if (!(inode->i_state & I_NEW)) 524 + if (!(inode_state_read_once(inode) & I_NEW)) 525 525 return inode; 526 526 527 527 ei = QNX6_I(inode);
+1 -1
fs/quota/dquot.c
··· 1033 1033 spin_lock(&sb->s_inode_list_lock); 1034 1034 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1035 1035 spin_lock(&inode->i_lock); 1036 - if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1036 + if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) || 1037 1037 !atomic_read(&inode->i_writecount) || 1038 1038 !dqinit_needed(inode, type)) { 1039 1039 spin_unlock(&inode->i_lock);
+1 -1
fs/romfs/super.c
··· 302 302 if (!i) 303 303 return ERR_PTR(-ENOMEM); 304 304 305 - if (!(i->i_state & I_NEW)) 305 + if (!(inode_state_read_once(i) & I_NEW)) 306 306 return i; 307 307 308 308 /* precalculate the data offset */
+1 -1
fs/squashfs/inode.c
··· 86 86 87 87 if (!inode) 88 88 return ERR_PTR(-ENOMEM); 89 - if (!(inode->i_state & I_NEW)) 89 + if (!(inode_state_read_once(inode) & I_NEW)) 90 90 return inode; 91 91 92 92 err = squashfs_read_inode(inode, ino);
+1 -1
fs/sync.c
··· 182 182 183 183 if (!file->f_op->fsync) 184 184 return -EINVAL; 185 - if (!datasync && (inode->i_state & I_DIRTY_TIME)) 185 + if (!datasync && (inode_state_read_once(inode) & I_DIRTY_TIME)) 186 186 mark_inode_dirty_sync(inode); 187 187 return file->f_op->fsync(file, start, end, datasync); 188 188 }
+1 -1
fs/ubifs/file.c
··· 1323 1323 inode_lock(inode); 1324 1324 1325 1325 /* Synchronize the inode unless this is a 'datasync()' call. */ 1326 - if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { 1326 + if (!datasync || (inode_state_read_once(inode) & I_DIRTY_DATASYNC)) { 1327 1327 err = inode->i_sb->s_op->write_inode(inode, NULL); 1328 1328 if (err) 1329 1329 goto out;
+1 -1
fs/ubifs/super.c
··· 114 114 inode = iget_locked(sb, inum); 115 115 if (!inode) 116 116 return ERR_PTR(-ENOMEM); 117 - if (!(inode->i_state & I_NEW)) 117 + if (!(inode_state_read_once(inode) & I_NEW)) 118 118 return inode; 119 119 ui = ubifs_inode(inode); 120 120
+1 -1
fs/udf/inode.c
··· 1962 1962 if (!inode) 1963 1963 return ERR_PTR(-ENOMEM); 1964 1964 1965 - if (!(inode->i_state & I_NEW)) { 1965 + if (!(inode_state_read_once(inode) & I_NEW)) { 1966 1966 if (UDF_I(inode)->i_hidden != hidden_inode) { 1967 1967 iput(inode); 1968 1968 return ERR_PTR(-EFSCORRUPTED);
+1 -1
fs/ufs/inode.c
··· 655 655 inode = iget_locked(sb, ino); 656 656 if (!inode) 657 657 return ERR_PTR(-ENOMEM); 658 - if (!(inode->i_state & I_NEW)) 658 + if (!(inode_state_read_once(inode) & I_NEW)) 659 659 return inode; 660 660 661 661 ufsi = UFS_I(inode);
+2 -2
fs/zonefs/super.c
··· 644 644 inode = iget_locked(sb, ino); 645 645 if (!inode) 646 646 return ERR_PTR(-ENOMEM); 647 - if (!(inode->i_state & I_NEW)) { 647 + if (!(inode_state_read_once(inode) & I_NEW)) { 648 648 WARN_ON_ONCE(inode->i_private != z); 649 649 return inode; 650 650 } ··· 683 683 inode = iget_locked(sb, ino); 684 684 if (!inode) 685 685 return ERR_PTR(-ENOMEM); 686 - if (!(inode->i_state & I_NEW)) 686 + if (!(inode_state_read_once(inode) & I_NEW)) 687 687 return inode; 688 688 689 689 inode->i_ino = ino;
+1 -1
mm/backing-dev.c
··· 72 72 list_for_each_entry(inode, &wb->b_more_io, i_io_list) 73 73 stats->nr_more_io++; 74 74 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) 75 - if (inode->i_state & I_DIRTY_TIME) 75 + if (inode_state_read_once(inode) & I_DIRTY_TIME) 76 76 stats->nr_dirty_time++; 77 77 spin_unlock(&wb->list_lock); 78 78
+1 -1
security/landlock/fs.c
··· 1296 1296 * second call to iput() for the same Landlock object. Also 1297 1297 * checks I_NEW because such inode cannot be tied to an object. 1298 1298 */ 1299 - if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { 1299 + if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) { 1300 1300 spin_unlock(&inode->i_lock); 1301 1301 continue; 1302 1302 }