Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

inode: port __I_SYNC to var event

Port the __I_SYNC mechanism to use the new var event mechanism.

Link: https://lore.kernel.org/r/20240823-work-i_state-v3-3-5cd5fd207a57@kernel.org
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>

+29 -16
+29 -16
fs/fs-writeback.c
··· 1386 1386 1387 1387 static void inode_sync_complete(struct inode *inode) 1388 1388 { 1389 + assert_spin_locked(&inode->i_lock); 1390 + 1389 1391 inode->i_state &= ~I_SYNC; 1390 1392 /* If inode is clean an unused, put it into LRU now... */ 1391 1393 inode_add_lru(inode); 1392 - /* Waiters must see I_SYNC cleared before being woken up */ 1393 - smp_mb(); 1394 - wake_up_bit(&inode->i_state, __I_SYNC); 1394 + /* Called with inode->i_lock which ensures memory ordering. */ 1395 + inode_wake_up_bit(inode, __I_SYNC); 1395 1396 } 1396 1397 1397 1398 static bool inode_dirtied_after(struct inode *inode, unsigned long t) ··· 1513 1512 */ 1514 1513 void inode_wait_for_writeback(struct inode *inode) 1515 1514 { 1516 - DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 1517 - wait_queue_head_t *wqh; 1515 + struct wait_bit_queue_entry wqe; 1516 + struct wait_queue_head *wq_head; 1518 1517 1519 - lockdep_assert_held(&inode->i_lock); 1520 - wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1521 - while (inode->i_state & I_SYNC) { 1518 + assert_spin_locked(&inode->i_lock); 1519 + 1520 + if (!(inode->i_state & I_SYNC)) 1521 + return; 1522 + 1523 + wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC); 1524 + for (;;) { 1525 + prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 1526 + /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ 1527 + if (!(inode->i_state & I_SYNC)) 1528 + break; 1522 1529 spin_unlock(&inode->i_lock); 1523 - __wait_on_bit(wqh, &wq, bit_wait, 1524 - TASK_UNINTERRUPTIBLE); 1530 + schedule(); 1525 1531 spin_lock(&inode->i_lock); 1526 1532 } 1533 + finish_wait(wq_head, &wqe.wq_entry); 1527 1534 } 1528 1535 1529 1536 /* ··· 1542 1533 static void inode_sleep_on_writeback(struct inode *inode) 1543 1534 __releases(inode->i_lock) 1544 1535 { 1545 - DEFINE_WAIT(wait); 1546 - wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1547 - int sleep; 1536 + struct wait_bit_queue_entry wqe; 1537 + struct wait_queue_head *wq_head; 1538 + bool sleep; 1548 1539 1549 - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 1550 - sleep = inode->i_state & I_SYNC; 1540 + assert_spin_locked(&inode->i_lock); 1541 + 1542 + wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC); 1543 + prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 1544 + /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ 1545 + sleep = !!(inode->i_state & I_SYNC); 1551 1546 spin_unlock(&inode->i_lock); 1552 1547 if (sleep) 1553 1548 schedule(); 1554 - finish_wait(wqh, &wait); 1549 + finish_wait(wq_head, &wqe.wq_entry); 1555 1550 } 1556 1551 1557 1552 /*