Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
writeback: simplify the write back thread queue
writeback: split writeback_inodes_wb
writeback: remove writeback_inodes_wbc
fs-writeback: fix kernel-doc warnings
splice: check f_mode for seekable file
splice: direct_splice_actor() should not use pos in sd

+123 -255
-1
fs/afs/write.c
··· 680 680 { 681 681 struct address_space *mapping = vnode->vfs_inode.i_mapping; 682 682 struct writeback_control wbc = { 683 - .bdi = mapping->backing_dev_info, 684 683 .sync_mode = WB_SYNC_ALL, 685 684 .nr_to_write = LONG_MAX, 686 685 .range_cyclic = 1,
-2
fs/btrfs/extent_io.c
··· 2594 2594 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 2595 2595 }; 2596 2596 struct writeback_control wbc_writepages = { 2597 - .bdi = wbc->bdi, 2598 2597 .sync_mode = wbc->sync_mode, 2599 2598 .older_than_this = NULL, 2600 2599 .nr_to_write = 64, ··· 2627 2628 .sync_io = mode == WB_SYNC_ALL, 2628 2629 }; 2629 2630 struct writeback_control wbc_writepages = { 2630 - .bdi = inode->i_mapping->backing_dev_info, 2631 2631 .sync_mode = mode, 2632 2632 .older_than_this = NULL, 2633 2633 .nr_to_write = nr_pages * 2,
+112 -225
fs/fs-writeback.c
··· 38 38 /* 39 39 * Passed into wb_writeback(), essentially a subset of writeback_control 40 40 */ 41 - struct wb_writeback_args { 41 + struct wb_writeback_work { 42 42 long nr_pages; 43 43 struct super_block *sb; 44 44 enum writeback_sync_modes sync_mode; 45 45 unsigned int for_kupdate:1; 46 46 unsigned int range_cyclic:1; 47 47 unsigned int for_background:1; 48 - }; 49 48 50 - /* 51 - * Work items for the bdi_writeback threads 52 - */ 53 - struct bdi_work { 54 49 struct list_head list; /* pending work list */ 55 - struct rcu_head rcu_head; /* for RCU free/clear of work */ 56 - 57 - unsigned long seen; /* threads that have seen this work */ 58 - atomic_t pending; /* number of threads still to do work */ 59 - 60 - struct wb_writeback_args args; /* writeback arguments */ 61 - 62 - unsigned long state; /* flag bits, see WS_* */ 50 + struct completion *done; /* set if the caller waits */ 63 51 }; 64 - 65 - enum { 66 - WS_INPROGRESS = 0, 67 - WS_ONSTACK, 68 - }; 69 - 70 - static inline void bdi_work_init(struct bdi_work *work, 71 - struct wb_writeback_args *args) 72 - { 73 - INIT_RCU_HEAD(&work->rcu_head); 74 - work->args = *args; 75 - __set_bit(WS_INPROGRESS, &work->state); 76 - } 77 52 78 53 /** 79 54 * writeback_in_progress - determine whether there is writeback in progress ··· 62 87 return !list_empty(&bdi->work_list); 63 88 } 64 89 65 - static void bdi_work_free(struct rcu_head *head) 90 + static void bdi_queue_work(struct backing_dev_info *bdi, 91 + struct wb_writeback_work *work) 66 92 { 67 - struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); 68 - 69 - clear_bit(WS_INPROGRESS, &work->state); 70 - smp_mb__after_clear_bit(); 71 - wake_up_bit(&work->state, WS_INPROGRESS); 72 - 73 - if (!test_bit(WS_ONSTACK, &work->state)) 74 - kfree(work); 75 - } 76 - 77 - static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) 78 - { 79 - /* 80 - * The caller has retrieved the work arguments from this work, 81 - * drop our reference. If this is the last ref, delete and free it 82 - */ 83 - if (atomic_dec_and_test(&work->pending)) { 84 - struct backing_dev_info *bdi = wb->bdi; 85 - 86 - spin_lock(&bdi->wb_lock); 87 - list_del_rcu(&work->list); 88 - spin_unlock(&bdi->wb_lock); 89 - 90 - call_rcu(&work->rcu_head, bdi_work_free); 91 - } 92 - } 93 - 94 - static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) 95 - { 96 - work->seen = bdi->wb_mask; 97 - BUG_ON(!work->seen); 98 - atomic_set(&work->pending, bdi->wb_cnt); 99 - BUG_ON(!bdi->wb_cnt); 100 - 101 - /* 102 - * list_add_tail_rcu() contains the necessary barriers to 103 - * make sure the above stores are seen before the item is 104 - * noticed on the list 105 - */ 106 93 spin_lock(&bdi->wb_lock); 107 - list_add_tail_rcu(&work->list, &bdi->work_list); 94 + list_add_tail(&work->list, &bdi->work_list); 108 95 spin_unlock(&bdi->wb_lock); 109 96 110 97 /* ··· 83 146 } 84 147 } 85 148 86 - /* 87 - * Used for on-stack allocated work items. The caller needs to wait until 88 - * the wb threads have acked the work before it's safe to continue. 89 - */ 90 - static void bdi_wait_on_work_done(struct bdi_work *work) 149 + static void 150 + __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 151 + bool range_cyclic, bool for_background) 91 152 { 92 - wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait, 93 - TASK_UNINTERRUPTIBLE); 94 - } 95 - 96 - static void bdi_alloc_queue_work(struct backing_dev_info *bdi, 97 - struct wb_writeback_args *args) 98 - { 99 - struct bdi_work *work; 153 + struct wb_writeback_work *work; 100 154 101 155 /* 102 156 * This is WB_SYNC_NONE writeback, so if allocation fails just 103 157 * wakeup the thread for old dirty data writeback 104 158 */ 105 - work = kmalloc(sizeof(*work), GFP_ATOMIC); 106 - if (work) { 107 - bdi_work_init(work, args); 108 - bdi_queue_work(bdi, work); 109 - } else { 110 - struct bdi_writeback *wb = &bdi->wb; 111 - 112 - if (wb->task) 113 - wake_up_process(wb->task); 159 + work = kzalloc(sizeof(*work), GFP_ATOMIC); 160 + if (!work) { 161 + if (bdi->wb.task) 162 + wake_up_process(bdi->wb.task); 163 + return; 114 164 } 115 - } 116 165 117 - /** 118 - * bdi_queue_work_onstack - start and wait for writeback 119 - * @sb: write inodes from this super_block 120 - * 121 - * Description: 122 - * This function initiates writeback and waits for the operation to 123 - * complete. Callers must hold the sb s_umount semaphore for 124 - * reading, to avoid having the super disappear before we are done. 125 - */ 126 - static void bdi_queue_work_onstack(struct wb_writeback_args *args) 127 - { 128 - struct bdi_work work; 166 + work->sync_mode = WB_SYNC_NONE; 167 + work->nr_pages = nr_pages; 168 + work->range_cyclic = range_cyclic; 169 + work->for_background = for_background; 129 170 130 - bdi_work_init(&work, args); 131 - __set_bit(WS_ONSTACK, &work.state); 132 - 133 - bdi_queue_work(args->sb->s_bdi, &work); 134 - bdi_wait_on_work_done(&work); 171 + bdi_queue_work(bdi, work); 135 172 } 136 173 137 174 /** ··· 121 210 */ 122 211 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 123 212 { 124 - struct wb_writeback_args args = { 125 - .sync_mode = WB_SYNC_NONE, 126 - .nr_pages = nr_pages, 127 - .range_cyclic = 1, 128 - }; 129 - 130 - bdi_alloc_queue_work(bdi, &args); 213 + __bdi_start_writeback(bdi, nr_pages, true, false); 131 214 } 132 215 133 216 /** ··· 135 230 */ 136 231 void bdi_start_background_writeback(struct backing_dev_info *bdi) 137 232 { 138 - struct wb_writeback_args args = { 139 - .sync_mode = WB_SYNC_NONE, 140 - .nr_pages = LONG_MAX, 141 - .for_background = 1, 142 - .range_cyclic = 1, 143 - }; 144 - bdi_alloc_queue_work(bdi, &args); 233 + __bdi_start_writeback(bdi, LONG_MAX, true, true); 145 234 } 146 235 147 236 /* ··· 453 554 454 555 /* 455 556 * Write a portion of b_io inodes which belong to @sb. 456 - * If @wbc->sb != NULL, then find and write all such 557 + * 558 + * If @only_this_sb is true, then find and write all such 457 559 * inodes. Otherwise write only ones which go sequentially 458 560 * in reverse order. 561 + * 459 562 * Return 1, if the caller writeback routine should be 460 563 * interrupted. Otherwise return 0. 461 564 */ 462 - static int writeback_sb_inodes(struct super_block *sb, 463 - struct bdi_writeback *wb, 464 - struct writeback_control *wbc) 565 + static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 566 + struct writeback_control *wbc, bool only_this_sb) 465 567 { 466 568 while (!list_empty(&wb->b_io)) { 467 569 long pages_skipped; 468 570 struct inode *inode = list_entry(wb->b_io.prev, 469 571 struct inode, i_list); 470 - if (wbc->sb && sb != inode->i_sb) { 471 - /* super block given and doesn't 472 - match, skip this inode */ 473 - redirty_tail(inode); 474 - continue; 475 - } 476 - if (sb != inode->i_sb) 477 - /* finish with this superblock */ 572 + 573 + if (inode->i_sb != sb) { 574 + if (only_this_sb) { 575 + /* 576 + * We only want to write back data for this 577 + * superblock, move all inodes not belonging 578 + * to it back onto the dirty list. 579 + */ 580 + redirty_tail(inode); 581 + continue; 582 + } 583 + 584 + /* 585 + * The inode belongs to a different superblock. 586 + * Bounce back to the caller to unpin this and 587 + * pin the next superblock. 588 + */ 478 589 return 0; 590 + } 591 + 479 592 if (inode->i_state & (I_NEW | I_WILL_FREE)) { 480 593 requeue_io(inode); 481 594 continue; ··· 525 614 return 1; 526 615 } 527 616 528 - static void writeback_inodes_wb(struct bdi_writeback *wb, 529 - struct writeback_control *wbc) 617 + void writeback_inodes_wb(struct bdi_writeback *wb, 618 + struct writeback_control *wbc) 530 619 { 531 620 int ret = 0; 532 621 ··· 540 629 struct inode, i_list); 541 630 struct super_block *sb = inode->i_sb; 542 631 543 - if (wbc->sb) { 544 - /* 545 - * We are requested to write out inodes for a specific 546 - * superblock. This means we already have s_umount 547 - * taken by the caller which also waits for us to 548 - * complete the writeout. 549 - */ 550 - if (sb != wbc->sb) { 551 - redirty_tail(inode); 552 - continue; 553 - } 554 - 555 - WARN_ON(!rwsem_is_locked(&sb->s_umount)); 556 - 557 - ret = writeback_sb_inodes(sb, wb, wbc); 558 - } else { 559 - if (!pin_sb_for_writeback(sb)) { 560 - requeue_io(inode); 561 - continue; 562 - } 563 - ret = writeback_sb_inodes(sb, wb, wbc); 564 - drop_super(sb); 632 + if (!pin_sb_for_writeback(sb)) { 633 + requeue_io(inode); 634 + continue; 565 635 } 636 + ret = writeback_sb_inodes(sb, wb, wbc, false); 637 + drop_super(sb); 566 638 567 639 if (ret) 568 640 break; ··· 554 660 /* Leave any unwritten inodes on b_io */ 555 661 } 556 662 557 - void writeback_inodes_wbc(struct writeback_control *wbc) 663 + static void __writeback_inodes_sb(struct super_block *sb, 664 + struct bdi_writeback *wb, struct writeback_control *wbc) 558 665 { 559 - struct backing_dev_info *bdi = wbc->bdi; 666 + WARN_ON(!rwsem_is_locked(&sb->s_umount)); 560 667 561 - writeback_inodes_wb(&bdi->wb, wbc); 668 + wbc->wb_start = jiffies; /* livelock avoidance */ 669 + spin_lock(&inode_lock); 670 + if (!wbc->for_kupdate || list_empty(&wb->b_io)) 671 + queue_io(wb, wbc->older_than_this); 672 + writeback_sb_inodes(sb, wb, wbc, true); 673 + spin_unlock(&inode_lock); 562 674 } 563 675 564 676 /* ··· 602 702 * all dirty pages if they are all attached to "old" mappings. 603 703 */ 604 704 static long wb_writeback(struct bdi_writeback *wb, 605 - struct wb_writeback_args *args) 705 + struct wb_writeback_work *work) 606 706 { 607 707 struct writeback_control wbc = { 608 - .bdi = wb->bdi, 609 - .sb = args->sb, 610 - .sync_mode = args->sync_mode, 708 + .sync_mode = work->sync_mode, 611 709 .older_than_this = NULL, 612 - .for_kupdate = args->for_kupdate, 613 - .for_background = args->for_background, 614 - .range_cyclic = args->range_cyclic, 710 + .for_kupdate = work->for_kupdate, 711 + .for_background = work->for_background, 712 + .range_cyclic = work->range_cyclic, 615 713 }; 616 714 unsigned long oldest_jif; 617 715 long wrote = 0; ··· 629 731 /* 630 732 * Stop writeback when nr_pages has been consumed 631 733 */ 632 - if (args->nr_pages <= 0) 734 + if (work->nr_pages <= 0) 633 735 break; 634 736 635 737 /* 636 738 * For background writeout, stop when we are below the 637 739 * background dirty threshold 638 740 */ 639 - if (args->for_background && !over_bground_thresh()) 741 + if (work->for_background && !over_bground_thresh()) 640 742 break; 641 743 642 744 wbc.more_io = 0; 643 745 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 644 746 wbc.pages_skipped = 0; 645 - writeback_inodes_wb(wb, &wbc); 646 - args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 747 + if (work->sb) 748 + __writeback_inodes_sb(work->sb, wb, &wbc); 749 + else 750 + writeback_inodes_wb(wb, &wbc); 751 + work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 647 752 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 648 753 649 754 /* ··· 682 781 } 683 782 684 783 /* 685 - * Return the next bdi_work struct that hasn't been processed by this 686 - * wb thread yet. ->seen is initially set for each thread that exists 687 - * for this device, when a thread first notices a piece of work it 688 - * clears its bit. Depending on writeback type, the thread will notify 689 - * completion on either receiving the work (WB_SYNC_NONE) or after 690 - * it is done (WB_SYNC_ALL). 784 + * Return the next wb_writeback_work struct that hasn't been processed yet. 691 785 */ 692 - static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, 693 - struct bdi_writeback *wb) 786 + static struct wb_writeback_work * 787 + get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb) 694 788 { 695 - struct bdi_work *work, *ret = NULL; 789 + struct wb_writeback_work *work = NULL; 696 790 697 - rcu_read_lock(); 698 - 699 - list_for_each_entry_rcu(work, &bdi->work_list, list) { 700 - if (!test_bit(wb->nr, &work->seen)) 701 - continue; 702 - clear_bit(wb->nr, &work->seen); 703 - 704 - ret = work; 705 - break; 791 + spin_lock(&bdi->wb_lock); 792 + if (!list_empty(&bdi->work_list)) { 793 + work = list_entry(bdi->work_list.next, 794 + struct wb_writeback_work, list); 795 + list_del_init(&work->list); 706 796 } 707 - 708 - rcu_read_unlock(); 709 - return ret; 797 + spin_unlock(&bdi->wb_lock); 798 + return work; 710 799 } 711 800 712 801 static long wb_check_old_data_flush(struct bdi_writeback *wb) ··· 721 830 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 722 831 723 832 if (nr_pages) { 724 - struct wb_writeback_args args = { 833 + struct wb_writeback_work work = { 725 834 .nr_pages = nr_pages, 726 835 .sync_mode = WB_SYNC_NONE, 727 836 .for_kupdate = 1, 728 837 .range_cyclic = 1, 729 838 }; 730 839 731 - return wb_writeback(wb, &args); 840 + return wb_writeback(wb, &work); 732 841 } 733 842 734 843 return 0; ··· 740 849 long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 741 850 { 742 851 struct backing_dev_info *bdi = wb->bdi; 743 - struct bdi_work *work; 852 + struct wb_writeback_work *work; 744 853 long wrote = 0; 745 854 746 855 while ((work = get_next_work_item(bdi, wb)) != NULL) { 747 - struct wb_writeback_args args = work->args; 748 - 749 856 /* 750 857 * Override sync mode, in case we must wait for completion 858 + * because this thread is exiting now. 751 859 */ 752 860 if (force_wait) 753 - work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; 861 + work->sync_mode = WB_SYNC_ALL; 862 + 863 + wrote += wb_writeback(wb, work); 754 864 755 865 /* 756 - * If this isn't a data integrity operation, just notify 757 - * that we have seen this work and we are now starting it. 866 + * Notify the caller of completion if this is a synchronous 867 + * work item, otherwise just free it. 758 868 */ 759 - if (!test_bit(WS_ONSTACK, &work->state)) 760 - wb_clear_pending(wb, work); 761 - 762 - wrote += wb_writeback(wb, &args); 763 - 764 - /* 765 - * This is a data integrity writeback, so only do the 766 - * notification when we have completed the work. 767 - */ 768 - if (test_bit(WS_ONSTACK, &work->state)) 769 - wb_clear_pending(wb, work); 869 + if (work->done) 870 + complete(work->done); 871 + else 872 + kfree(work); 770 873 } 771 874 772 875 /* ··· 823 938 void wakeup_flusher_threads(long nr_pages) 824 939 { 825 940 struct backing_dev_info *bdi; 826 - struct wb_writeback_args args = { 827 - .sync_mode = WB_SYNC_NONE, 828 - }; 829 941 830 - if (nr_pages) { 831 - args.nr_pages = nr_pages; 832 - } else { 833 - args.nr_pages = global_page_state(NR_FILE_DIRTY) + 942 + if (!nr_pages) { 943 + nr_pages = global_page_state(NR_FILE_DIRTY) + 834 944 global_page_state(NR_UNSTABLE_NFS); 835 945 } 836 946 ··· 833 953 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 834 954 if (!bdi_has_dirty_io(bdi)) 835 955 continue; 836 - bdi_alloc_queue_work(bdi, &args); 956 + __bdi_start_writeback(bdi, nr_pages, false, false); 837 957 } 838 958 rcu_read_unlock(); 839 959 } ··· 1042 1162 { 1043 1163 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 1044 1164 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 1045 - struct wb_writeback_args args = { 1165 + DECLARE_COMPLETION_ONSTACK(done); 1166 + struct wb_writeback_work work = { 1046 1167 .sb = sb, 1047 1168 .sync_mode = WB_SYNC_NONE, 1169 + .done = &done, 1048 1170 }; 1049 1171 1050 1172 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1051 1173 1052 - args.nr_pages = nr_dirty + nr_unstable + 1174 + work.nr_pages = nr_dirty + nr_unstable + 1053 1175 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 1054 1176 1055 - bdi_queue_work_onstack(&args); 1177 + bdi_queue_work(sb->s_bdi, &work); 1178 + wait_for_completion(&done); 1056 1179 } 1057 1180 EXPORT_SYMBOL(writeback_inodes_sb); 1058 1181 ··· 1087 1204 */ 1088 1205 void sync_inodes_sb(struct super_block *sb) 1089 1206 { 1090 - struct wb_writeback_args args = { 1207 + DECLARE_COMPLETION_ONSTACK(done); 1208 + struct wb_writeback_work work = { 1091 1209 .sb = sb, 1092 1210 .sync_mode = WB_SYNC_ALL, 1093 1211 .nr_pages = LONG_MAX, 1094 1212 .range_cyclic = 0, 1213 + .done = &done, 1095 1214 }; 1096 1215 1097 1216 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1098 1217 1099 - bdi_queue_work_onstack(&args); 1218 + bdi_queue_work(sb->s_bdi, &work); 1219 + wait_for_completion(&done); 1220 + 1100 1221 wait_sb_inodes(sb); 1101 1222 } 1102 1223 EXPORT_SYMBOL(sync_inodes_sb);
+4 -5
fs/splice.c
··· 1282 1282 { 1283 1283 struct file *file = sd->u.file; 1284 1284 1285 - return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags); 1285 + return do_splice_from(pipe, file, &file->f_pos, sd->total_len, 1286 + sd->flags); 1286 1287 } 1287 1288 1288 1289 /** ··· 1372 1371 if (off_in) 1373 1372 return -ESPIPE; 1374 1373 if (off_out) { 1375 - if (!out->f_op || !out->f_op->llseek || 1376 - out->f_op->llseek == no_llseek) 1374 + if (!(out->f_mode & FMODE_PWRITE)) 1377 1375 return -EINVAL; 1378 1376 if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1379 1377 return -EFAULT; ··· 1392 1392 if (off_out) 1393 1393 return -ESPIPE; 1394 1394 if (off_in) { 1395 - if (!in->f_op || !in->f_op->llseek || 1396 - in->f_op->llseek == no_llseek) 1395 + if (!(in->f_mode & FMODE_PREAD)) 1397 1396 return -EINVAL; 1398 1397 if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1399 1398 return -EFAULT;
-2
include/linux/backing-dev.h
··· 82 82 struct bdi_writeback wb; /* default writeback info for this bdi */ 83 83 spinlock_t wb_lock; /* protects update side of wb_list */ 84 84 struct list_head wb_list; /* the flusher threads hanging off this bdi */ 85 - unsigned long wb_mask; /* bitmask of registered tasks */ 86 - unsigned int wb_cnt; /* number of registered tasks */ 87 85 88 86 struct list_head work_list; 89 87
+2 -5
include/linux/writeback.h
··· 27 27 * in a manner such that unspecified fields are set to zero. 28 28 */ 29 29 struct writeback_control { 30 - struct backing_dev_info *bdi; /* If !NULL, only write back this 31 - queue */ 32 - struct super_block *sb; /* if !NULL, only write inodes from 33 - this super_block */ 34 30 enum writeback_sync_modes sync_mode; 35 31 unsigned long *older_than_this; /* If !NULL, only write back inodes 36 32 older than this */ ··· 62 66 void writeback_inodes_sb(struct super_block *); 63 67 int writeback_inodes_sb_if_idle(struct super_block *); 64 68 void sync_inodes_sb(struct super_block *); 65 - void writeback_inodes_wbc(struct writeback_control *wbc); 69 + void writeback_inodes_wb(struct bdi_writeback *wb, 70 + struct writeback_control *wbc); 66 71 long wb_do_writeback(struct bdi_writeback *wb, int force_wait); 67 72 void wakeup_flusher_threads(long nr_pages); 68 73
+4 -13
mm/backing-dev.c
··· 104 104 "b_more_io: %8lu\n" 105 105 "bdi_list: %8u\n" 106 106 "state: %8lx\n" 107 - "wb_mask: %8lx\n" 108 - "wb_list: %8u\n" 109 - "wb_cnt: %8u\n", 107 + "wb_list: %8u\n", 110 108 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 111 109 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 112 110 K(bdi_thresh), K(dirty_thresh), 113 111 K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, 114 - !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask, 115 - !list_empty(&bdi->wb_list), bdi->wb_cnt); 112 + !list_empty(&bdi->bdi_list), bdi->state, 113 + !list_empty(&bdi->wb_list)); 116 114 #undef K 117 115 118 116 return 0; ··· 338 340 static void bdi_flush_io(struct backing_dev_info *bdi) 339 341 { 340 342 struct writeback_control wbc = { 341 - .bdi = bdi, 342 343 .sync_mode = WB_SYNC_NONE, 343 344 .older_than_this = NULL, 344 345 .range_cyclic = 1, 345 346 .nr_to_write = 1024, 346 347 }; 347 348 348 - writeback_inodes_wbc(&wbc); 349 + writeback_inodes_wb(&bdi->wb, &wbc); 349 350 } 350 351 351 352 /* ··· 671 674 INIT_LIST_HEAD(&bdi->work_list); 672 675 673 676 bdi_wb_init(&bdi->wb, bdi); 674 - 675 - /* 676 - * Just one thread support for now, hard code mask and count 677 - */ 678 - bdi->wb_mask = 1; 679 - bdi->wb_cnt = 1; 680 677 681 678 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 682 679 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
+1 -2
mm/page-writeback.c
··· 495 495 496 496 for (;;) { 497 497 struct writeback_control wbc = { 498 - .bdi = bdi, 499 498 .sync_mode = WB_SYNC_NONE, 500 499 .older_than_this = NULL, 501 500 .nr_to_write = write_chunk, ··· 536 537 * up. 537 538 */ 538 539 if (bdi_nr_reclaimable > bdi_thresh) { 539 - writeback_inodes_wbc(&wbc); 540 + writeback_inodes_wb(&bdi->wb, &wbc); 540 541 pages_written += write_chunk - wbc.nr_to_write; 541 542 get_dirty_limits(&background_thresh, &dirty_thresh, 542 543 &bdi_thresh, bdi);