Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
xfs: revert to using a kthread for AIL pushing
xfs: force the log if we encounter pinned buffers in .iop_pushbuf
xfs: do not update xa_last_pushed_lsn for locked items

+2 -1
fs/xfs/xfs_buf_item.c
··· 629 629 * the xfsbufd to get this buffer written. We have to unlock the buffer 630 630 * to allow the xfsbufd to write it, too. 631 631 */ 632 - STATIC void 632 + STATIC bool 633 633 xfs_buf_item_pushbuf( 634 634 struct xfs_log_item *lip) 635 635 { ··· 643 643 644 644 xfs_buf_delwri_promote(bp); 645 645 xfs_buf_relse(bp); 646 + return true; 646 647 } 647 648 648 649 STATIC void
+7 -3
fs/xfs/xfs_dquot_item.c
··· 183 183 * search the buffer cache can be a time consuming thing, and AIL lock is a 184 184 * spinlock. 185 185 */ 186 - STATIC void 186 + STATIC bool 187 187 xfs_qm_dquot_logitem_pushbuf( 188 188 struct xfs_log_item *lip) 189 189 { 190 190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); 191 191 struct xfs_dquot *dqp = qlip->qli_dquot; 192 192 struct xfs_buf *bp; 193 + bool ret = true; 193 194 194 195 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 195 196 ··· 202 201 if (completion_done(&dqp->q_flush) || 203 202 !(lip->li_flags & XFS_LI_IN_AIL)) { 204 203 xfs_dqunlock(dqp); 205 - return; 204 + return true; 206 205 } 207 206 208 207 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, 209 208 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); 210 209 xfs_dqunlock(dqp); 211 210 if (!bp) 212 - return; 211 + return true; 213 212 if (XFS_BUF_ISDELAYWRITE(bp)) 214 213 xfs_buf_delwri_promote(bp); 214 + if (xfs_buf_ispinned(bp)) 215 + ret = false; 215 216 xfs_buf_relse(bp); 217 + return ret; 216 218 } 217 219 218 220 /*
+7 -3
fs/xfs/xfs_inode_item.c
··· 708 708 * marked delayed write. If that's the case, we'll promote it and that will 709 709 * allow the caller to write the buffer by triggering the xfsbufd to run. 710 710 */ 711 - STATIC void 711 + STATIC bool 712 712 xfs_inode_item_pushbuf( 713 713 struct xfs_log_item *lip) 714 714 { 715 715 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 716 716 struct xfs_inode *ip = iip->ili_inode; 717 717 struct xfs_buf *bp; 718 + bool ret = true; 718 719 719 720 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 720 721 ··· 726 725 if (completion_done(&ip->i_flush) || 727 726 !(lip->li_flags & XFS_LI_IN_AIL)) { 728 727 xfs_iunlock(ip, XFS_ILOCK_SHARED); 729 - return; 728 + return true; 730 729 } 731 730 732 731 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, ··· 734 733 735 734 xfs_iunlock(ip, XFS_ILOCK_SHARED); 736 735 if (!bp) 737 - return; 736 + return true; 738 737 if (XFS_BUF_ISDELAYWRITE(bp)) 739 738 xfs_buf_delwri_promote(bp); 739 + if (xfs_buf_ispinned(bp)) 740 + ret = false; 740 741 xfs_buf_relse(bp); 742 + return ret; 741 743 } 742 744 743 745 /*
+2
fs/xfs/xfs_linux.h
··· 68 68 #include <linux/ctype.h> 69 69 #include <linux/writeback.h> 70 70 #include <linux/capability.h> 71 + #include <linux/kthread.h> 72 + #include <linux/freezer.h> 71 73 #include <linux/list_sort.h> 72 74 73 75 #include <asm/page.h>
+1 -12
fs/xfs/xfs_super.c
··· 1652 1652 */ 1653 1653 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); 1654 1654 if (!xfs_syncd_wq) 1655 - goto out; 1656 - 1657 - xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8); 1658 - if (!xfs_ail_wq) 1659 - goto out_destroy_syncd; 1660 - 1655 + return -ENOMEM; 1661 1656 return 0; 1662 - 1663 - out_destroy_syncd: 1664 - destroy_workqueue(xfs_syncd_wq); 1665 - out: 1666 - return -ENOMEM; 1667 1657 } 1668 1658 1669 1659 STATIC void 1670 1660 xfs_destroy_workqueues(void) 1671 1661 { 1672 - destroy_workqueue(xfs_ail_wq); 1673 1662 destroy_workqueue(xfs_syncd_wq); 1674 1663 } 1675 1664
+1 -1
fs/xfs/xfs_trans.h
··· 350 350 void (*iop_unlock)(xfs_log_item_t *); 351 351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); 352 352 void (*iop_push)(xfs_log_item_t *); 353 - void (*iop_pushbuf)(xfs_log_item_t *); 353 + bool (*iop_pushbuf)(xfs_log_item_t *); 354 354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 355 355 } xfs_item_ops_t; 356 356
+48 -35
fs/xfs/xfs_trans_ail.c
··· 28 28 #include "xfs_trans_priv.h" 29 29 #include "xfs_error.h" 30 30 31 - struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 32 - 33 31 #ifdef DEBUG 34 32 /* 35 33 * Check that the list is sorted as it should be. ··· 354 356 xfs_trans_ail_cursor_clear(ailp, lip); 355 357 } 356 358 357 - /* 358 - * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 359 - * to run at a later time if there is more work to do to complete the push. 360 - */ 361 - STATIC void 362 - xfs_ail_worker( 363 - struct work_struct *work) 359 + static long 360 + xfsaild_push( 361 + struct xfs_ail *ailp) 364 362 { 365 - struct xfs_ail *ailp = container_of(to_delayed_work(work), 366 - struct xfs_ail, xa_work); 367 363 xfs_mount_t *mp = ailp->xa_mount; 368 364 struct xfs_ail_cursor cur; 369 365 xfs_log_item_t *lip; ··· 419 427 420 428 case XFS_ITEM_PUSHBUF: 421 429 XFS_STATS_INC(xs_push_ail_pushbuf); 422 - IOP_PUSHBUF(lip); 423 - ailp->xa_last_pushed_lsn = lsn; 430 + 431 + if (!IOP_PUSHBUF(lip)) { 432 + stuck++; 433 + flush_log = 1; 434 + } else { 435 + ailp->xa_last_pushed_lsn = lsn; 436 + } 424 437 push_xfsbufd = 1; 425 438 break; 426 439 ··· 437 440 438 441 case XFS_ITEM_LOCKED: 439 442 XFS_STATS_INC(xs_push_ail_locked); 440 - ailp->xa_last_pushed_lsn = lsn; 441 443 stuck++; 442 444 break; 443 445 ··· 497 501 /* We're past our target or empty, so idle */ 498 502 ailp->xa_last_pushed_lsn = 0; 499 503 500 - /* 501 - * We clear the XFS_AIL_PUSHING_BIT first before checking 502 - * whether the target has changed. If the target has changed, 503 - * this pushes the requeue race directly onto the result of the 504 - * atomic test/set bit, so we are guaranteed that either the 505 - * the pusher that changed the target or ourselves will requeue 506 - * the work (but not both). 507 - */ 508 - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 509 - smp_rmb(); 510 - if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 511 - test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 512 - return; 513 - 514 504 tout = 50; 515 505 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 516 506 /* ··· 519 537 tout = 20; 520 538 } 521 539 522 - /* There is more to do, requeue us. */ 523 - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 524 - msecs_to_jiffies(tout)); 540 + return tout; 541 + } 542 + 543 + static int 544 + xfsaild( 545 + void *data) 546 + { 547 + struct xfs_ail *ailp = data; 548 + long tout = 0; /* milliseconds */ 549 + 550 + while (!kthread_should_stop()) { 551 + if (tout && tout <= 20) 552 + __set_current_state(TASK_KILLABLE); 553 + else 554 + __set_current_state(TASK_INTERRUPTIBLE); 555 + schedule_timeout(tout ? 556 + msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); 557 + 558 + try_to_freeze(); 559 + 560 + tout = xfsaild_push(ailp); 561 + } 562 + 563 + return 0; 525 564 } 526 565 527 566 /* ··· 577 574 */ 578 575 smp_wmb(); 579 576 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 580 - if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 581 - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 577 + smp_wmb(); 578 + 579 + wake_up_process(ailp->xa_task); 582 580 } 583 581 584 582 /* ··· 817 813 INIT_LIST_HEAD(&ailp->xa_ail); 818 814 INIT_LIST_HEAD(&ailp->xa_cursors); 819 815 spin_lock_init(&ailp->xa_lock); 820 - INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 816 + 817 + ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s", 818 + ailp->xa_mount->m_fsname); 819 + if (IS_ERR(ailp->xa_task)) 820 + goto out_free_ailp; 821 + 821 822 mp->m_ail = ailp; 822 823 return 0; 824 + 825 + out_free_ailp: 826 + kmem_free(ailp); 827 + return ENOMEM; 823 828 } 824 829 825 830 void ··· 837 824 { 838 825 struct xfs_ail *ailp = mp->m_ail; 839 826 840 - cancel_delayed_work_sync(&ailp->xa_work); 827 + kthread_stop(ailp->xa_task); 841 828 kmem_free(ailp); 842 829 }
+1 -7
fs/xfs/xfs_trans_priv.h
··· 64 64 */ 65 65 struct xfs_ail { 66 66 struct xfs_mount *xa_mount; 67 + struct task_struct *xa_task; 67 68 struct list_head xa_ail; 68 69 xfs_lsn_t xa_target; 69 70 struct list_head xa_cursors; 70 71 spinlock_t xa_lock; 71 - struct delayed_work xa_work; 72 72 xfs_lsn_t xa_last_pushed_lsn; 73 - unsigned long xa_flags; 74 73 }; 75 - 76 - #define XFS_AIL_PUSHING_BIT 0 77 74 78 75 /* 79 76 * From xfs_trans_ail.c 80 77 */ 81 - 82 - extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 83 - 84 78 void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 85 79 struct xfs_ail_cursor *cur, 86 80 struct xfs_log_item **log_items, int nr_items,