Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

md: fix up raid1/raid10 unplugging.

We just need to make sure that an unplug event wakes up the md
thread, which is exactly what mddev_check_plugged does.

Also remove some plug-related code that is no longer needed.

Signed-off-by: NeilBrown <neilb@suse.de>

NeilBrown c3b328ac 7c13edc8

+20 -28
+10 -14
drivers/md/raid1.c
··· 565 565 spin_unlock_irq(&conf->device_lock); 566 566 } 567 567 568 - static void md_kick_device(mddev_t *mddev) 569 - { 570 - blk_flush_plug(current); 571 - md_wakeup_thread(mddev->thread); 572 - } 573 - 574 568 /* Barriers.... 575 569 * Sometimes we need to suspend IO while we do something else, 576 570 * either some resync/recovery, or reconfigure the array. ··· 594 600 595 601 /* Wait until no block IO is waiting */ 596 602 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 597 - conf->resync_lock, md_kick_device(conf->mddev)); 603 + conf->resync_lock, ); 598 604 599 605 /* block any new IO from starting */ 600 606 conf->barrier++; ··· 602 608 /* Now wait for all pending IO to complete */ 603 609 wait_event_lock_irq(conf->wait_barrier, 604 610 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 605 - conf->resync_lock, md_kick_device(conf->mddev)); 611 + conf->resync_lock, ); 606 612 607 613 spin_unlock_irq(&conf->resync_lock); 608 614 } ··· 624 630 conf->nr_waiting++; 625 631 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 626 632 conf->resync_lock, 627 - md_kick_device(conf->mddev)); 633 + ); 628 634 conf->nr_waiting--; 629 635 } 630 636 conf->nr_pending++; ··· 660 666 wait_event_lock_irq(conf->wait_barrier, 661 667 conf->nr_pending == conf->nr_queued+1, 662 668 conf->resync_lock, 663 - ({ flush_pending_writes(conf); 664 - md_kick_device(conf->mddev); })); 669 + flush_pending_writes(conf)); 665 670 spin_unlock_irq(&conf->resync_lock); 666 671 } 667 672 static void unfreeze_array(conf_t *conf) ··· 722 729 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 723 730 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 724 731 mdk_rdev_t *blocked_rdev; 732 + int plugged; 725 733 726 734 /* 727 735 * Register the new request and wait if the reconstruction ··· 814 820 * inc refcount on their rdev. Record them by setting 815 821 * bios[x] to bio 816 822 */ 823 + plugged = mddev_check_plugged(mddev); 824 + 817 825 disks = conf->raid_disks; 818 826 retry_write: 819 827 blocked_rdev = NULL; ··· 921 925 /* In case raid1d snuck in to freeze_array */ 922 926 wake_up(&conf->wait_barrier); 923 927 924 - if (do_sync || !bitmap) 928 + if (do_sync || !bitmap || !plugged) 925 929 md_wakeup_thread(mddev->thread); 926 930 927 931 return 0; ··· 1520 1524 for (;;) { 1521 1525 char b[BDEVNAME_SIZE]; 1522 1526 1523 - flush_pending_writes(conf); 1527 + if (atomic_read(&mddev->plug_cnt) == 0) 1528 + flush_pending_writes(conf); 1524 1529 1525 1530 spin_lock_irqsave(&conf->device_lock, flags); 1526 1531 if (list_empty(head)) { ··· 2039 2042 2040 2043 md_unregister_thread(mddev->thread); 2041 2044 mddev->thread = NULL; 2042 - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2043 2045 if (conf->r1bio_pool) 2044 2046 mempool_destroy(conf->r1bio_pool); 2045 2047 kfree(conf->mirrors);
+10 -14
drivers/md/raid10.c
··· 634 634 spin_unlock_irq(&conf->device_lock); 635 635 } 636 636 637 - static void md_kick_device(mddev_t *mddev) 638 - { 639 - blk_flush_plug(current); 640 - md_wakeup_thread(mddev->thread); 641 - } 642 - 643 637 /* Barriers.... 644 638 * Sometimes we need to suspend IO while we do something else, 645 639 * either some resync/recovery, or reconfigure the array. ··· 663 669 664 670 /* Wait until no block IO is waiting (unless 'force') */ 665 671 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 666 - conf->resync_lock, md_kick_device(conf->mddev)); 672 + conf->resync_lock, ); 667 673 668 674 /* block any new IO from starting */ 669 675 conf->barrier++; 670 676 671 - /* No wait for all pending IO to complete */ 677 + /* Now wait for all pending IO to complete */ 672 678 wait_event_lock_irq(conf->wait_barrier, 673 679 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 674 - conf->resync_lock, md_kick_device(conf->mddev)); 680 + conf->resync_lock, ); 675 681 676 682 spin_unlock_irq(&conf->resync_lock); 677 683 } ··· 692 698 conf->nr_waiting++; 693 699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 694 700 conf->resync_lock, 695 - md_kick_device(conf->mddev)); 701 + ); 696 702 conf->nr_waiting--; 697 703 } 698 704 conf->nr_pending++; ··· 728 734 wait_event_lock_irq(conf->wait_barrier, 729 735 conf->nr_pending == conf->nr_queued+1, 730 736 conf->resync_lock, 731 - ({ flush_pending_writes(conf); 732 - md_kick_device(conf->mddev); })); 737 + flush_pending_writes(conf)); 738 + 733 739 spin_unlock_irq(&conf->resync_lock); 734 740 } 735 741 ··· 756 762 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 757 763 unsigned long flags; 758 764 mdk_rdev_t *blocked_rdev; 765 + int plugged; 759 766 760 767 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 761 768 md_flush_request(mddev, bio); ··· 865 870 * inc refcount on their rdev. Record them by setting 866 871 * bios[x] to bio 867 872 */ 873 + plugged = mddev_check_plugged(mddev); 874 + 868 875 raid10_find_phys(conf, r10_bio); 869 876 retry_write: 870 877 blocked_rdev = NULL; ··· 943 946 /* In case raid10d snuck in to freeze_array */ 944 947 wake_up(&conf->wait_barrier); 945 948 946 - if (do_sync || !mddev->bitmap) 949 + if (do_sync || !mddev->bitmap || !plugged) 947 950 md_wakeup_thread(mddev->thread); 948 - 949 951 return 0; 950 952 } 951 953