fs: pull inode->i_lock up out of writeback_single_inode

First thing we do in writeback_single_inode() is take the i_lock and
the last thing we do is drop it. A caller already holds the i_lock,
so pull the i_lock out of writeback_single_inode() to reduce the
round trips on this lock during inode writeback.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by Dave Chinner and committed by Al Viro 0f1b1fd8 67a23c49

+11 -7
+11 -7
fs/fs-writeback.c
··· 332 } 333 334 /* 335 - * Write out an inode's dirty pages. Called under inode_wb_list_lock. Either 336 - * the caller has an active reference on the inode or the inode has I_WILL_FREE 337 - * set. 338 * 339 * If `wait' is set, wait on the writeout. 340 * ··· 349 unsigned dirty; 350 int ret; 351 352 - spin_lock(&inode->i_lock); 353 if (!atomic_read(&inode->i_count)) 354 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 355 else ··· 367 * completed a full scan of b_io. 368 */ 369 if (wbc->sync_mode != WB_SYNC_ALL) { 370 - spin_unlock(&inode->i_lock); 371 requeue_io(inode); 372 return 0; 373 } ··· 457 } 458 } 459 inode_sync_complete(inode); 460 - spin_unlock(&inode->i_lock); 461 return ret; 462 } 463 ··· 544 } 545 546 __iget(inode); 547 - spin_unlock(&inode->i_lock); 548 549 pages_skipped = wbc->pages_skipped; 550 writeback_single_inode(inode, wbc); ··· 554 */ 555 redirty_tail(inode); 556 } 557 spin_unlock(&inode_wb_list_lock); 558 iput(inode); 559 cond_resched(); ··· 1309 1310 might_sleep(); 1311 spin_lock(&inode_wb_list_lock); 1312 ret = writeback_single_inode(inode, &wbc); 1313 spin_unlock(&inode_wb_list_lock); 1314 if (sync) 1315 inode_sync_wait(inode); ··· 1335 int ret; 1336 1337 spin_lock(&inode_wb_list_lock); 1338 ret = writeback_single_inode(inode, wbc); 1339 spin_unlock(&inode_wb_list_lock); 1340 return ret; 1341 }
··· 332 } 333 334 /* 335 + * Write out an inode's dirty pages. Called under inode_wb_list_lock and 336 + * inode->i_lock. Either the caller has an active reference on the inode or 337 + * the inode has I_WILL_FREE set. 338 * 339 * If `wait' is set, wait on the writeout. 340 * ··· 349 unsigned dirty; 350 int ret; 351 352 + assert_spin_locked(&inode_wb_list_lock); 353 + assert_spin_locked(&inode->i_lock); 354 + 355 if (!atomic_read(&inode->i_count)) 356 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 357 else ··· 365 * completed a full scan of b_io. 366 */ 367 if (wbc->sync_mode != WB_SYNC_ALL) { 368 requeue_io(inode); 369 return 0; 370 } ··· 456 } 457 } 458 inode_sync_complete(inode); 459 return ret; 460 } 461 ··· 544 } 545 546 __iget(inode); 547 548 pages_skipped = wbc->pages_skipped; 549 writeback_single_inode(inode, wbc); ··· 555 */ 556 redirty_tail(inode); 557 } 558 + spin_unlock(&inode->i_lock); 559 spin_unlock(&inode_wb_list_lock); 560 iput(inode); 561 cond_resched(); ··· 1309 1310 might_sleep(); 1311 spin_lock(&inode_wb_list_lock); 1312 + spin_lock(&inode->i_lock); 1313 ret = writeback_single_inode(inode, &wbc); 1314 + spin_unlock(&inode->i_lock); 1315 spin_unlock(&inode_wb_list_lock); 1316 if (sync) 1317 inode_sync_wait(inode); ··· 1333 int ret; 1334 1335 spin_lock(&inode_wb_list_lock); 1336 + spin_lock(&inode->i_lock); 1337 ret = writeback_single_inode(inode, wbc); 1338 + spin_unlock(&inode->i_lock); 1339 spin_unlock(&inode_wb_list_lock); 1340 return ret; 1341 }