Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

inode: port __I_NEW to var event

Port the __I_NEW mechanism to use the new var event mechanism.

Link: https://lore.kernel.org/r/20240823-work-i_state-v3-4-5cd5fd207a57@kernel.org
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>

+38 -14
+6 -4
fs/bcachefs/fs.c
··· 1644 1644 break; 1645 1645 } 1646 1646 } else if (clean_pass && this_pass_clean) { 1647 - wait_queue_head_t *wq = bit_waitqueue(&inode->v.i_state, __I_NEW); 1648 - DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW); 1647 + struct wait_bit_queue_entry wqe; 1648 + struct wait_queue_head *wq_head; 1649 1649 1650 - prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 1650 + wq_head = inode_bit_waitqueue(&wqe, &inode->v, __I_NEW); 1651 + prepare_to_wait_event(wq_head, &wqe.wq_entry, 1652 + TASK_UNINTERRUPTIBLE); 1651 1653 mutex_unlock(&c->vfs_inodes_lock); 1652 1654 1653 1655 schedule(); 1654 - finish_wait(wq, &wait.wq_entry); 1656 + finish_wait(wq_head, &wqe.wq_entry); 1655 1657 goto again; 1656 1658 } 1657 1659 }
+6 -1
fs/dcache.c
··· 1908 1908 __d_instantiate(entry, inode); 1909 1909 WARN_ON(!(inode->i_state & I_NEW)); 1910 1910 inode->i_state &= ~I_NEW & ~I_CREATING; 1911 + /* 1912 + * Pairs with the barrier in prepare_to_wait_event() to make sure 1913 + * ___wait_var_event() either sees the bit cleared or 1914 + * waitqueue_active() check in wake_up_var() sees the waiter. 1915 + */ 1911 1916 smp_mb(); 1912 - wake_up_bit(&inode->i_state, __I_NEW); 1917 + inode_wake_up_bit(inode, __I_NEW); 1913 1918 spin_unlock(&inode->i_lock); 1914 1919 } 1915 1920 EXPORT_SYMBOL(d_instantiate_new);
+24 -8
fs/inode.c
··· 734 734 * used as an indicator whether blocking on it is safe. 735 735 */ 736 736 spin_lock(&inode->i_lock); 737 - wake_up_bit(&inode->i_state, __I_NEW); 737 + /* 738 + * Pairs with the barrier in prepare_to_wait_event() to make sure 739 + * ___wait_var_event() either sees the bit cleared or 740 + * waitqueue_active() check in wake_up_var() sees the waiter. 741 + */ 742 + smp_mb(); 743 + inode_wake_up_bit(inode, __I_NEW); 738 744 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 739 745 spin_unlock(&inode->i_lock); 740 746 ··· 1152 1146 spin_lock(&inode->i_lock); 1153 1147 WARN_ON(!(inode->i_state & I_NEW)); 1154 1148 inode->i_state &= ~I_NEW & ~I_CREATING; 1149 + /* 1150 + * Pairs with the barrier in prepare_to_wait_event() to make sure 1151 + * ___wait_var_event() either sees the bit cleared or 1152 + * waitqueue_active() check in wake_up_var() sees the waiter. 1153 + */ 1155 1154 smp_mb(); 1156 - wake_up_bit(&inode->i_state, __I_NEW); 1155 + inode_wake_up_bit(inode, __I_NEW); 1157 1156 spin_unlock(&inode->i_lock); 1158 1157 } 1159 1158 EXPORT_SYMBOL(unlock_new_inode); ··· 1169 1158 spin_lock(&inode->i_lock); 1170 1159 WARN_ON(!(inode->i_state & I_NEW)); 1171 1160 inode->i_state &= ~I_NEW; 1161 + /* 1162 + * Pairs with the barrier in prepare_to_wait_event() to make sure 1163 + * ___wait_var_event() either sees the bit cleared or 1164 + * waitqueue_active() check in wake_up_var() sees the waiter. 1165 + */ 1172 1166 smp_mb(); 1173 - wake_up_bit(&inode->i_state, __I_NEW); 1167 + inode_wake_up_bit(inode, __I_NEW); 1174 1168 spin_unlock(&inode->i_lock); 1175 1169 iput(inode); 1176 1170 } ··· 2364 2348 */ 2365 2349 static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked) 2366 2350 { 2367 - wait_queue_head_t *wq; 2368 - DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 2351 + struct wait_bit_queue_entry wqe; 2352 + struct wait_queue_head *wq_head; 2369 2353 2370 2354 /* 2371 2355 * Handle racing against evict(), see that routine for more details. ··· 2376 2360 return; 2377 2361 } 2378 2362 2379 - wq = bit_waitqueue(&inode->i_state, __I_NEW); 2380 - prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 2363 + wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW); 2364 + prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 2381 2365 spin_unlock(&inode->i_lock); 2382 2366 rcu_read_unlock(); 2383 2367 if (is_inode_hash_locked) 2384 2368 spin_unlock(&inode_hash_lock); 2385 2369 schedule(); 2386 - finish_wait(wq, &wait.wq_entry); 2370 + finish_wait(wq_head, &wqe.wq_entry); 2387 2371 if (is_inode_hash_locked) 2388 2372 spin_lock(&inode_hash_lock); 2389 2373 rcu_read_lock();
+2 -1
include/linux/writeback.h
··· 200 200 /* writeback.h requires fs.h; it, too, is not included from here. */ 201 201 static inline void wait_on_inode(struct inode *inode) 202 202 { 203 - wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); 203 + wait_var_event(inode_state_wait_address(inode, __I_NEW), 204 + !(READ_ONCE(inode->i_state) & I_NEW)); 204 205 } 205 206 206 207 #ifdef CONFIG_CGROUP_WRITEBACK