[XFS] Make xfs_ail_check check less by default

Checking the entire AIL on every insert and remove is prohibitively
expensive - the sustained sequntial create rate on a single disk drops
from about 1800/s to 60/s because of this checking resulting in the
xfslogd becoming cpu bound.

By default on debug builds, only check the next and previous entries in
the list to ensure they are ordered correctly. If you really want, define
XFS_TRANS_DEBUG to use the old behaviour.

SGI-PV: 972759
SGI-Modid: xfs-linux-melb:xfs-kern:30372a

Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>

authored by David Chinner and committed by Lachlan McIlroy de08dbc1 249a8c11

+28 -9
+28 -9
fs/xfs/xfs_trans_ail.c
··· 34 34 STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *); 35 35 36 36 #ifdef DEBUG 37 - STATIC void xfs_ail_check(xfs_ail_entry_t *); 37 + STATIC void xfs_ail_check(xfs_ail_entry_t *, xfs_log_item_t *); 38 38 #else 39 - #define xfs_ail_check(a) 39 + #define xfs_ail_check(a,l) 40 40 #endif /* DEBUG */ 41 41 42 42 ··· 563 563 next_lip->li_ail.ail_forw = lip; 564 564 lip->li_ail.ail_forw->li_ail.ail_back = lip; 565 565 566 - xfs_ail_check(base); 566 + xfs_ail_check(base, lip); 567 567 return; 568 568 } 569 569 ··· 577 577 xfs_log_item_t *lip) 578 578 /* ARGSUSED */ 579 579 { 580 + xfs_ail_check(base, lip); 580 581 lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back; 581 582 lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw; 582 583 lip->li_ail.ail_forw = NULL; 583 584 lip->li_ail.ail_back = NULL; 584 585 585 - xfs_ail_check(base); 586 586 return lip; 587 587 } 588 588 ··· 626 626 */ 627 627 STATIC void 628 628 xfs_ail_check( 629 - xfs_ail_entry_t *base) 629 + xfs_ail_entry_t *base, 630 + xfs_log_item_t *lip) 630 631 { 631 - xfs_log_item_t *lip; 632 632 xfs_log_item_t *prev_lip; 633 633 634 - lip = base->ail_forw; 635 - if (lip == (xfs_log_item_t*)base) { 634 + prev_lip = base->ail_forw; 635 + if (prev_lip == (xfs_log_item_t*)base) { 636 636 /* 637 637 * Make sure the pointers are correct when the list 638 638 * is empty. ··· 642 642 } 643 643 644 644 /* 645 + * Check the next and previous entries are valid. 646 + */ 647 + ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 648 + prev_lip = lip->li_ail.ail_back; 649 + if (prev_lip != (xfs_log_item_t*)base) { 650 + ASSERT(prev_lip->li_ail.ail_forw == lip); 651 + ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 652 + } 653 + prev_lip = lip->li_ail.ail_forw; 654 + if (prev_lip != (xfs_log_item_t*)base) { 655 + ASSERT(prev_lip->li_ail.ail_back == lip); 656 + ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 657 + } 658 + 659 + 660 + #ifdef XFS_TRANS_DEBUG 661 + /* 645 662 * Walk the list checking forward and backward pointers, 646 663 * lsn ordering, and that every entry has the XFS_LI_IN_AIL 647 - * flag set. 664 + * flag set. This is really expensive, so only do it when 665 + * specifically debugging the transaction subsystem. 648 666 */ 649 667 prev_lip = (xfs_log_item_t*)base; 650 668 while (lip != (xfs_log_item_t*)base) { ··· 677 659 } 678 660 ASSERT(lip == (xfs_log_item_t*)base); 679 661 ASSERT(base->ail_back == prev_lip); 662 + #endif /* XFS_TRANS_DEBUG */ 680 663 } 681 664 #endif /* DEBUG */