Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
xfs: fix race condition in AIL push trigger
xfs: make AIL target updates and compares 32bit safe.
xfs: always push the AIL to the target
xfs: exit AIL push work correctly when AIL is empty
xfs: ensure reclaim cursor is reset correctly at end of AG

+27 -21
+1
fs/xfs/linux-2.6/xfs_sync.c
··· 926 926 XFS_LOOKUP_BATCH, 927 927 XFS_ICI_RECLAIM_TAG); 928 928 if (!nr_found) { 929 + done = 1; 929 930 rcu_read_unlock(); 930 931 break; 931 932 }
+26 -21
fs/xfs/xfs_trans_ail.c
··· 346 346 */ 347 347 STATIC void 348 348 xfs_ail_worker( 349 - struct work_struct *work) 349 + struct work_struct *work) 350 350 { 351 - struct xfs_ail *ailp = container_of(to_delayed_work(work), 351 + struct xfs_ail *ailp = container_of(to_delayed_work(work), 352 352 struct xfs_ail, xa_work); 353 - long tout; 354 - xfs_lsn_t target = ailp->xa_target; 355 - xfs_lsn_t lsn; 356 - xfs_log_item_t *lip; 357 - int flush_log, count, stuck; 358 - xfs_mount_t *mp = ailp->xa_mount; 353 + xfs_mount_t *mp = ailp->xa_mount; 359 354 struct xfs_ail_cursor *cur = &ailp->xa_cursors; 360 - int push_xfsbufd = 0; 355 + xfs_log_item_t *lip; 356 + xfs_lsn_t lsn; 357 + xfs_lsn_t target; 358 + long tout = 10; 359 + int flush_log = 0; 360 + int stuck = 0; 361 + int count = 0; 362 + int push_xfsbufd = 0; 361 363 362 364 spin_lock(&ailp->xa_lock); 365 + target = ailp->xa_target; 363 366 xfs_trans_ail_cursor_init(ailp, cur); 364 367 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); 365 368 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { ··· 371 368 */ 372 369 xfs_trans_ail_cursor_done(ailp, cur); 373 370 spin_unlock(&ailp->xa_lock); 374 - ailp->xa_last_pushed_lsn = 0; 375 - return; 371 + goto out_done; 376 372 } 377 373 378 374 XFS_STATS_INC(xs_push_ail); ··· 388 386 * lots of contention on the AIL lists. 389 387 */ 390 388 lsn = lip->li_lsn; 391 - flush_log = stuck = count = 0; 392 - while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { 389 + while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 393 390 int lock_result; 394 391 /* 395 392 * If we can lock the item without sleeping, unlock the AIL ··· 481 480 } 482 481 483 482 /* assume we have more work to do in a short while */ 484 - tout = 10; 483 + out_done: 485 484 if (!count) { 486 485 /* We're past our target or empty, so idle */ 487 486 ailp->xa_last_pushed_lsn = 0; 488 487 489 488 /* 490 - * Check for an updated push target before clearing the 491 - * XFS_AIL_PUSHING_BIT. If the target changed, we've got more 492 - * work to do. Wait a bit longer before starting that work. 489 + * We clear the XFS_AIL_PUSHING_BIT first before checking 490 + * whether the target has changed. If the target has changed, 491 + * this pushes the requeue race directly onto the result of the 492 + * atomic test/set bit, so we are guaranteed that either the 493 + * the pusher that changed the target or ourselves will requeue 494 + * the work (but not both). 493 495 */ 496 + clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 494 497 smp_rmb(); 495 - if (ailp->xa_target == target) { 496 - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 498 + if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 499 + test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 497 500 return; 498 - } 501 + 499 502 tout = 50; 500 503 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 501 504 /* ··· 558 553 * the XFS_AIL_PUSHING_BIT. 559 554 */ 560 555 smp_wmb(); 561 - ailp->xa_target = threshold_lsn; 556 + xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 562 557 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 563 558 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 564 559 }