Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

kill I_LOCK

After I_SYNC was split from I_LOCK the leftover is always used together with
I_NEW and thus superflous.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by

Christoph Hellwig and committed by
Al Viro
eaff8079 7a0ad10c

+39 -44
+1 -1
fs/gfs2/inode.c
··· 125 125 * directory entry when gfs2_inode_lookup() is invoked. Part of the code 126 126 * segment inside gfs2_inode_lookup code needs to get moved around. 127 127 * 128 - * Clean up I_LOCK and I_NEW as well. 128 + * Clears I_NEW as well. 129 129 **/ 130 130 131 131 void gfs2_set_iop(struct inode *inode)
+13 -13
fs/inode.c
··· 113 113 * Prevent speculative execution through spin_unlock(&inode_lock); 114 114 */ 115 115 smp_mb(); 116 - wake_up_bit(&inode->i_state, __I_LOCK); 116 + wake_up_bit(&inode->i_state, __I_NEW); 117 117 } 118 118 119 119 /** ··· 690 690 } 691 691 #endif 692 692 /* 693 - * This is special! We do not need the spinlock when clearing I_LOCK, 693 + * This is special! We do not need the spinlock when clearing I_NEW, 694 694 * because we're guaranteed that nobody else tries to do anything about 695 695 * the state of the inode when it is locked, as we just created it (so 696 - * there can be no old holders that haven't tested I_LOCK). 696 + * there can be no old holders that haven't tested I_NEW). 697 697 * However we must emit the memory barrier so that other CPUs reliably 698 - * see the clearing of I_LOCK after the other inode initialisation has 698 + * see the clearing of I_NEW after the other inode initialisation has 699 699 * completed. 700 700 */ 701 701 smp_mb(); 702 - WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW)); 703 - inode->i_state &= ~(I_LOCK|I_NEW); 702 + WARN_ON(!(inode->i_state & I_NEW)); 703 + inode->i_state &= ~I_NEW; 704 704 wake_up_inode(inode); 705 705 } 706 706 EXPORT_SYMBOL(unlock_new_inode); ··· 731 731 goto set_failed; 732 732 733 733 __inode_add_to_lists(sb, head, inode); 734 - inode->i_state = I_LOCK|I_NEW; 734 + inode->i_state = I_NEW; 735 735 spin_unlock(&inode_lock); 736 736 737 737 /* Return the locked inode with I_NEW set, the ··· 778 778 if (!old) { 779 779 inode->i_ino = ino; 780 780 __inode_add_to_lists(sb, head, inode); 781 - inode->i_state = I_LOCK|I_NEW; 781 + inode->i_state = I_NEW; 782 782 spin_unlock(&inode_lock); 783 783 784 784 /* Return the locked inode with I_NEW set, the ··· 1083 1083 ino_t ino = inode->i_ino; 1084 1084 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1085 1085 1086 - inode->i_state |= I_LOCK|I_NEW; 1086 + inode->i_state |= I_NEW; 1087 1087 while (1) { 1088 1088 struct hlist_node *node; 1089 1089 struct inode *old = NULL; ··· 1120 1120 struct super_block *sb = inode->i_sb; 1121 1121 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1122 1122 1123 - inode->i_state |= I_LOCK|I_NEW; 1123 + inode->i_state |= I_NEW; 1124 1124 1125 1125 while (1) { 1126 1126 struct hlist_node *node; ··· 1510 1510 * until the deletion _might_ have completed. Callers are responsible 1511 1511 * to recheck inode state. 1512 1512 * 1513 - * It doesn't matter if I_LOCK is not set initially, a call to 1513 + * It doesn't matter if I_NEW is not set initially, a call to 1514 1514 * wake_up_inode() after removing from the hash list will DTRT. 1515 1515 * 1516 1516 * This is called with inode_lock held. ··· 1518 1518 static void __wait_on_freeing_inode(struct inode *inode) 1519 1519 { 1520 1520 wait_queue_head_t *wq; 1521 - DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); 1522 - wq = bit_waitqueue(&inode->i_state, __I_LOCK); 1521 + DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1522 + wq = bit_waitqueue(&inode->i_state, __I_NEW); 1523 1523 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1524 1524 spin_unlock(&inode_lock); 1525 1525 schedule();
+1 -1
fs/jfs/jfs_txnmgr.c
··· 1292 1292 */ 1293 1293 /* 1294 1294 * I believe this code is no longer needed. Splitting I_LOCK 1295 - * into two bits, I_LOCK and I_SYNC should prevent this 1295 + * into two bits, I_NEW and I_SYNC should prevent this 1296 1296 * deadlock as well. But since I don't have a JFS testload 1297 1297 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done. 1298 1298 * Joern
+3 -3
fs/ntfs/inode.c
··· 530 530 * the ntfs inode. 531 531 * 532 532 * Q: What locks are held when the function is called? 533 - * A: i_state has I_LOCK set, hence the inode is locked, also 533 + * A: i_state has I_NEW set, hence the inode is locked, also 534 534 * i_count is set to 1, so it is not going to go away 535 535 * i_flags is set to 0 and we have no business touching it. Only an ioctl() 536 536 * is allowed to write to them. We should of course be honouring them but ··· 1207 1207 * necessary fields in @vi as well as initializing the ntfs inode. 1208 1208 * 1209 1209 * Q: What locks are held when the function is called? 1210 - * A: i_state has I_LOCK set, hence the inode is locked, also 1210 + * A: i_state has I_NEW set, hence the inode is locked, also 1211 1211 * i_count is set to 1, so it is not going to go away 1212 1212 * 1213 1213 * Return 0 on success and -errno on error. In the error case, the inode will ··· 1474 1474 * normal directory inodes. 1475 1475 * 1476 1476 * Q: What locks are held when the function is called? 1477 - * A: i_state has I_LOCK set, hence the inode is locked, also 1477 + * A: i_state has I_NEW set, hence the inode is locked, also 1478 1478 * i_count is set to 1, so it is not going to go away 1479 1479 * 1480 1480 * Return 0 on success and -errno on error. In the error case, the inode will
+1 -1
fs/ubifs/file.c
··· 45 45 * 46 46 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the 47 47 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> 48 - * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not 48 + * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not 49 49 * set as well. However, UBIFS disables readahead. 50 50 */ 51 51
+1 -1
fs/xfs/linux-2.6/xfs_iops.c
··· 794 794 struct inode *inode = &ip->i_vnode; 795 795 796 796 inode->i_ino = ip->i_ino; 797 - inode->i_state = I_NEW|I_LOCK; 797 + inode->i_state = I_NEW; 798 798 inode_add_to_lists(ip->i_mount->m_super, inode); 799 799 800 800 inode->i_mode = ip->i_d.di_mode;
+2 -2
fs/xfs/xfs_iget.c
··· 91 91 ip->i_new_size = 0; 92 92 93 93 /* prevent anyone from using this yet */ 94 - VFS_I(ip)->i_state = I_NEW|I_LOCK; 94 + VFS_I(ip)->i_state = I_NEW; 95 95 96 96 return ip; 97 97 } ··· 217 217 trace_xfs_iget_reclaim(ip); 218 218 goto out_error; 219 219 } 220 - inode->i_state = I_LOCK|I_NEW; 220 + inode->i_state = I_NEW; 221 221 } else { 222 222 /* If the VFS inode is being torn down, pause and try again. */ 223 223 if (!igrab(inode)) {
+16 -20
include/linux/fs.h
··· 1587 1587 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at 1588 1588 * various stages of removing an inode. 1589 1589 * 1590 - * Two bits are used for locking and completion notification, I_LOCK and I_SYNC. 1590 + * Two bits are used for locking and completion notification, I_NEW and I_SYNC. 1591 1591 * 1592 1592 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on 1593 1593 * fdatasync(). i_atime is the usual cause. ··· 1596 1596 * don't have to write inode on fdatasync() when only 1597 1597 * mtime has changed in it. 1598 1598 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. 1599 - * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both 1600 - * are cleared by unlock_new_inode(), called from iget(). 1599 + * I_NEW Serves as both a mutex and completion notification. 1600 + * New inodes set I_NEW. If two processes both create 1601 + * the same inode, one of them will release its inode and 1602 + * wait for I_NEW to be released before returning. 1603 + * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can 1604 + * also cause waiting on I_NEW, without I_NEW actually 1605 + * being set. find_inode() uses this to prevent returning 1606 + * nearly-dead inodes. 1601 1607 * I_WILL_FREE Must be set when calling write_inode_now() if i_count 1602 1608 * is zero. I_FREEING must be set when I_WILL_FREE is 1603 1609 * cleared. ··· 1617 1611 * prohibited for many purposes. iget() must wait for 1618 1612 * the inode to be completely released, then create it 1619 1613 * anew. Other functions will just ignore such inodes, 1620 - * if appropriate. I_LOCK is used for waiting. 1614 + * if appropriate. I_NEW is used for waiting. 1621 1615 * 1622 - * I_LOCK Serves as both a mutex and completion notification. 1623 - * New inodes set I_LOCK. If two processes both create 1624 - * the same inode, one of them will release its inode and 1625 - * wait for I_LOCK to be released before returning. 1626 - * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can 1627 - * also cause waiting on I_LOCK, without I_LOCK actually 1628 - * being set. find_inode() uses this to prevent returning 1629 - * nearly-dead inodes. 1630 - * I_SYNC Similar to I_LOCK, but limited in scope to writeback 1631 - * of inode dirty data. Having a separate lock for this 1632 - * purpose reduces latency and prevents some filesystem- 1633 - * specific deadlocks. 1616 + * I_SYNC Synchonized write of dirty inode data. The bits is 1617 + * set during data writeback, and cleared with a wakeup 1618 + * on the bit address once it is done. 1634 1619 * 1635 1620 * Q: What is the difference between I_WILL_FREE and I_FREEING? 1636 1621 * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on ··· 1630 1633 #define I_DIRTY_SYNC 1 1631 1634 #define I_DIRTY_DATASYNC 2 1632 1635 #define I_DIRTY_PAGES 4 1633 - #define I_NEW 8 1636 + #define __I_NEW 3 1637 + #define I_NEW (1 << __I_NEW) 1634 1638 #define I_WILL_FREE 16 1635 1639 #define I_FREEING 32 1636 1640 #define I_CLEAR 64 1637 - #define __I_LOCK 7 1638 - #define I_LOCK (1 << __I_LOCK) 1639 - #define __I_SYNC 8 1641 + #define __I_SYNC 7 1640 1642 #define I_SYNC (1 << __I_SYNC) 1641 1643 1642 1644 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+1 -2
include/linux/writeback.h
··· 79 79 static inline void wait_on_inode(struct inode *inode) 80 80 { 81 81 might_sleep(); 82 - wait_on_bit(&inode->i_state, __I_LOCK, inode_wait, 83 - TASK_UNINTERRUPTIBLE); 82 + wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); 84 83 } 85 84 static inline void inode_sync_wait(struct inode *inode) 86 85 {