Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'md-6.11-20240612' of git://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.11/block

Pull MD updates from Song:

"The major changes in this PR are:

- sync_action fix and refactoring, by Yu Kuai;
- Various small fixes by Christoph Hellwig, Li Nan, and Ofir Gal."

* tag 'md-6.11-20240612' of git://git.kernel.org/pub/scm/linux/kernel/git/song/md:
md/raid5: avoid BUG_ON() while continue reshape after reassembling
md: pass in max_sectors for pers->sync_request()
md: factor out helpers for different sync_action in md_do_sync()
md: replace last_sync_action with new enum type
md: use new helpers in md_do_sync()
md: don't fail action_store() if sync_thread is not registered
md: remove parameter check_seq for stop_sync_thread()
md: replace sysfs api sync_action with new helpers
md: factor out helper to start reshape from action_store()
md: add new helpers for sync_action
md: add a new enum type sync_action
md: rearrange recovery_flags
md/md-bitmap: fix writing non bitmap pages
md/raid1: don't free conf on raid0_run failure
md/raid0: don't free conf on raid0_run failure
md: make md_flush_request() more readable
md: fix deadlock between mddev_suspend and flush bio
md: change the return value type of md_write_start to void
md: do not delete safemode_timer in mddev_suspend

+440 -290
+1 -1
drivers/md/dm-raid.c
··· 3542 3542 recovery = rs->md.recovery; 3543 3543 state = decipher_sync_action(mddev, recovery); 3544 3544 progress = rs_get_progress(rs, recovery, state, resync_max_sectors); 3545 - resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? 3545 + resync_mismatches = mddev->last_sync_action == ACTION_CHECK ? 3546 3546 atomic64_read(&mddev->resync_mismatches) : 0; 3547 3547 3548 3548 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
+3 -3
drivers/md/md-bitmap.c
··· 227 227 struct block_device *bdev; 228 228 struct mddev *mddev = bitmap->mddev; 229 229 struct bitmap_storage *store = &bitmap->storage; 230 + unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) << 231 + PAGE_SHIFT; 230 232 loff_t sboff, offset = mddev->bitmap_info.offset; 231 233 sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE; 232 234 unsigned int size = PAGE_SIZE; ··· 271 269 if (size == 0) 272 270 /* bitmap runs in to data */ 273 271 return -EINVAL; 274 - } else { 275 - /* DATA METADATA BITMAP - no problems */ 276 272 } 277 273 278 - md_super_write(mddev, rdev, sboff + ps, (int) size, page); 274 + md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page); 279 275 return 0; 280 276 } 281 277
+304 -212
drivers/md/md.c
··· 69 69 #include "md-bitmap.h" 70 70 #include "md-cluster.h" 71 71 72 + static const char *action_name[NR_SYNC_ACTIONS] = { 73 + [ACTION_RESYNC] = "resync", 74 + [ACTION_RECOVER] = "recover", 75 + [ACTION_CHECK] = "check", 76 + [ACTION_REPAIR] = "repair", 77 + [ACTION_RESHAPE] = "reshape", 78 + [ACTION_FROZEN] = "frozen", 79 + [ACTION_IDLE] = "idle", 80 + }; 81 + 72 82 /* pers_list is a list of registered personalities protected by pers_lock. */ 73 83 static LIST_HEAD(pers_list); 74 84 static DEFINE_SPINLOCK(pers_lock); ··· 489 479 */ 490 480 WRITE_ONCE(mddev->suspended, mddev->suspended + 1); 491 481 492 - del_timer_sync(&mddev->safemode_timer); 493 482 /* restrict memory reclaim I/O during raid array is suspend */ 494 483 mddev->noio_flag = memalloc_noio_save(); 495 484 ··· 559 550 560 551 rdev_dec_pending(rdev, mddev); 561 552 562 - if (atomic_dec_and_test(&mddev->flush_pending)) { 563 - /* The pair is percpu_ref_get() from md_flush_request() */ 564 - percpu_ref_put(&mddev->active_io); 565 - 553 + if (atomic_dec_and_test(&mddev->flush_pending)) 566 554 /* The pre-request flush has finished */ 567 555 queue_work(md_wq, &mddev->flush_work); 568 - } 569 556 } 570 557 571 558 static void md_submit_flush_data(struct work_struct *ws); ··· 592 587 rcu_read_lock(); 593 588 } 594 589 rcu_read_unlock(); 595 - if (atomic_dec_and_test(&mddev->flush_pending)) { 596 - /* The pair is percpu_ref_get() from md_flush_request() */ 597 - percpu_ref_put(&mddev->active_io); 598 - 590 + if (atomic_dec_and_test(&mddev->flush_pending)) 599 591 queue_work(md_wq, &mddev->flush_work); 600 - } 601 592 } 602 593 603 594 static void md_submit_flush_data(struct work_struct *ws) ··· 618 617 bio_endio(bio); 619 618 } else { 620 619 bio->bi_opf &= ~REQ_PREFLUSH; 621 - md_handle_request(mddev, bio); 620 + 621 + /* 622 + * make_requst() will never return error here, it only 623 + * returns error in raid5_make_request() by dm-raid. 624 + * Since dm always splits data and flush operation into 625 + * two separate io, io size of flush submitted by dm 626 + * always is 0, make_request() will not be called here. 627 + */ 628 + if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio))) 629 + bio_io_error(bio);; 622 630 } 631 + 632 + /* The pair is percpu_ref_get() from md_flush_request() */ 633 + percpu_ref_put(&mddev->active_io); 623 634 } 624 635 625 636 /* ··· 667 654 WARN_ON(percpu_ref_is_zero(&mddev->active_io)); 668 655 percpu_ref_get(&mddev->active_io); 669 656 mddev->flush_bio = bio; 670 - bio = NULL; 671 - } 672 - spin_unlock_irq(&mddev->lock); 673 - 674 - if (!bio) { 657 + spin_unlock_irq(&mddev->lock); 675 658 INIT_WORK(&mddev->flush_work, submit_flushes); 676 659 queue_work(md_wq, &mddev->flush_work); 677 - } else { 678 - /* flush was performed for some other bio while we waited. */ 679 - if (bio->bi_iter.bi_size == 0) 680 - /* an empty barrier - all done */ 681 - bio_endio(bio); 682 - else { 683 - bio->bi_opf &= ~REQ_PREFLUSH; 684 - return false; 685 - } 660 + return true; 686 661 } 687 - return true; 662 + 663 + /* flush was performed for some other bio while we waited. */ 664 + spin_unlock_irq(&mddev->lock); 665 + if (bio->bi_iter.bi_size == 0) { 666 + /* pure flush without data - all done */ 667 + bio_endio(bio); 668 + return true; 669 + } 670 + 671 + bio->bi_opf &= ~REQ_PREFLUSH; 672 + return false; 688 673 } 689 674 EXPORT_SYMBOL(md_flush_request); 690 675 ··· 753 742 754 743 mutex_init(&mddev->open_mutex); 755 744 mutex_init(&mddev->reconfig_mutex); 756 - mutex_init(&mddev->sync_mutex); 757 745 mutex_init(&mddev->suspend_mutex); 758 746 mutex_init(&mddev->bitmap_info.mutex); 759 747 INIT_LIST_HEAD(&mddev->disks); ··· 768 758 init_waitqueue_head(&mddev->recovery_wait); 769 759 mddev->reshape_position = MaxSector; 770 760 mddev->reshape_backwards = 0; 771 - mddev->last_sync_action = "none"; 761 + mddev->last_sync_action = ACTION_IDLE; 772 762 mddev->resync_min = 0; 773 763 mddev->resync_max = MaxSector; 774 764 mddev->level = LEVEL_NONE; ··· 4877 4867 static struct md_sysfs_entry md_metadata = 4878 4868 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4879 4869 4870 + enum sync_action md_sync_action(struct mddev *mddev) 4871 + { 4872 + unsigned long recovery = mddev->recovery; 4873 + 4874 + /* 4875 + * frozen has the highest priority, means running sync_thread will be 4876 + * stopped immediately, and no new sync_thread can start. 4877 + */ 4878 + if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4879 + return ACTION_FROZEN; 4880 + 4881 + /* 4882 + * read-only array can't register sync_thread, and it can only 4883 + * add/remove spares. 4884 + */ 4885 + if (!md_is_rdwr(mddev)) 4886 + return ACTION_IDLE; 4887 + 4888 + /* 4889 + * idle means no sync_thread is running, and no new sync_thread is 4890 + * requested. 4891 + */ 4892 + if (!test_bit(MD_RECOVERY_RUNNING, &recovery) && 4893 + !test_bit(MD_RECOVERY_NEEDED, &recovery)) 4894 + return ACTION_IDLE; 4895 + 4896 + if (test_bit(MD_RECOVERY_RESHAPE, &recovery) || 4897 + mddev->reshape_position != MaxSector) 4898 + return ACTION_RESHAPE; 4899 + 4900 + if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4901 + return ACTION_RECOVER; 4902 + 4903 + if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4904 + /* 4905 + * MD_RECOVERY_CHECK must be paired with 4906 + * MD_RECOVERY_REQUESTED. 4907 + */ 4908 + if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4909 + return ACTION_CHECK; 4910 + if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4911 + return ACTION_REPAIR; 4912 + return ACTION_RESYNC; 4913 + } 4914 + 4915 + /* 4916 + * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no 4917 + * sync_action is specified. 4918 + */ 4919 + return ACTION_IDLE; 4920 + } 4921 + 4922 + enum sync_action md_sync_action_by_name(const char *page) 4923 + { 4924 + enum sync_action action; 4925 + 4926 + for (action = 0; action < NR_SYNC_ACTIONS; ++action) { 4927 + if (cmd_match(page, action_name[action])) 4928 + return action; 4929 + } 4930 + 4931 + return NR_SYNC_ACTIONS; 4932 + } 4933 + 4934 + const char *md_sync_action_name(enum sync_action action) 4935 + { 4936 + return action_name[action]; 4937 + } 4938 + 4880 4939 static ssize_t 4881 4940 action_show(struct mddev *mddev, char *page) 4882 4941 { 4883 - char *type = "idle"; 4884 - unsigned long recovery = mddev->recovery; 4885 - if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4886 - type = "frozen"; 4887 - else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4888 - (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4889 - if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4890 - type = "reshape"; 4891 - else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4892 - if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4893 - type = "resync"; 4894 - else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4895 - type = "check"; 4896 - else 4897 - type = "repair"; 4898 - } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4899 - type = "recover"; 4900 - else if (mddev->reshape_position != MaxSector) 4901 - type = "reshape"; 4902 - } 4903 - return sprintf(page, "%s\n", type); 4942 + enum sync_action action = md_sync_action(mddev); 4943 + 4944 + return sprintf(page, "%s\n", md_sync_action_name(action)); 4904 4945 } 4905 4946 4906 4947 /** ··· 4960 4899 * @locked: if set, reconfig_mutex will still be held after this function 4961 4900 * return; if not set, reconfig_mutex will be released after this 4962 4901 * function return. 4963 - * @check_seq: if set, only wait for curent running sync_thread to stop, noted 4964 - * that new sync_thread can still start. 4965 4902 */ 4966 - static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) 4903 + static void stop_sync_thread(struct mddev *mddev, bool locked) 4967 4904 { 4968 - int sync_seq; 4969 - 4970 - if (check_seq) 4971 - sync_seq = atomic_read(&mddev->sync_seq); 4905 + int sync_seq = atomic_read(&mddev->sync_seq); 4972 4906 4973 4907 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 4974 4908 if (!locked) ··· 4984 4928 4985 4929 wait_event(resync_wait, 4986 4930 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4987 - (check_seq && sync_seq != atomic_read(&mddev->sync_seq))); 4931 + (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) && 4932 + sync_seq != atomic_read(&mddev->sync_seq))); 4988 4933 4989 4934 if (locked) 4990 4935 mddev_lock_nointr(mddev); ··· 4996 4939 lockdep_assert_held(&mddev->reconfig_mutex); 4997 4940 4998 4941 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4999 - stop_sync_thread(mddev, true, true); 4942 + stop_sync_thread(mddev, true); 5000 4943 } 5001 4944 EXPORT_SYMBOL_GPL(md_idle_sync_thread); 5002 4945 ··· 5005 4948 lockdep_assert_held(&mddev->reconfig_mutex); 5006 4949 5007 4950 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5008 - stop_sync_thread(mddev, true, false); 4951 + stop_sync_thread(mddev, true); 5009 4952 } 5010 4953 EXPORT_SYMBOL_GPL(md_frozen_sync_thread); 5011 4954 ··· 5020 4963 } 5021 4964 EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread); 5022 4965 5023 - static void idle_sync_thread(struct mddev *mddev) 4966 + static int mddev_start_reshape(struct mddev *mddev) 5024 4967 { 5025 - mutex_lock(&mddev->sync_mutex); 5026 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4968 + int ret; 5027 4969 5028 - if (mddev_lock(mddev)) { 5029 - mutex_unlock(&mddev->sync_mutex); 5030 - return; 4970 + if (mddev->pers->start_reshape == NULL) 4971 + return -EINVAL; 4972 + 4973 + if (mddev->reshape_position == MaxSector || 4974 + mddev->pers->check_reshape == NULL || 4975 + mddev->pers->check_reshape(mddev)) { 4976 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4977 + ret = mddev->pers->start_reshape(mddev); 4978 + if (ret) 4979 + return ret; 4980 + } else { 4981 + /* 4982 + * If reshape is still in progress, and md_check_recovery() can 4983 + * continue to reshape, don't restart reshape because data can 4984 + * be corrupted for raid456. 4985 + */ 4986 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5031 4987 } 5032 4988 5033 - stop_sync_thread(mddev, false, true); 5034 - mutex_unlock(&mddev->sync_mutex); 5035 - } 5036 - 5037 - static void frozen_sync_thread(struct mddev *mddev) 5038 - { 5039 - mutex_lock(&mddev->sync_mutex); 5040 - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5041 - 5042 - if (mddev_lock(mddev)) { 5043 - mutex_unlock(&mddev->sync_mutex); 5044 - return; 5045 - } 5046 - 5047 - stop_sync_thread(mddev, false, false); 5048 - mutex_unlock(&mddev->sync_mutex); 4989 + sysfs_notify_dirent_safe(mddev->sysfs_degraded); 4990 + return 0; 5049 4991 } 5050 4992 5051 4993 static ssize_t 5052 4994 action_store(struct mddev *mddev, const char *page, size_t len) 5053 4995 { 4996 + int ret; 4997 + enum sync_action action; 4998 + 5054 4999 if (!mddev->pers || !mddev->pers->sync_request) 5055 5000 return -EINVAL; 5056 5001 5002 + retry: 5003 + if (work_busy(&mddev->sync_work)) 5004 + flush_work(&mddev->sync_work); 5057 5005 5058 - if (cmd_match(page, "idle")) 5059 - idle_sync_thread(mddev); 5060 - else if (cmd_match(page, "frozen")) 5061 - frozen_sync_thread(mddev); 5062 - else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5063 - return -EBUSY; 5064 - else if (cmd_match(page, "resync")) 5065 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5066 - else if (cmd_match(page, "recover")) { 5067 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5068 - set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5069 - } else if (cmd_match(page, "reshape")) { 5070 - int err; 5071 - if (mddev->pers->start_reshape == NULL) 5072 - return -EINVAL; 5073 - err = mddev_lock(mddev); 5074 - if (!err) { 5075 - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 5076 - err = -EBUSY; 5077 - } else if (mddev->reshape_position == MaxSector || 5078 - mddev->pers->check_reshape == NULL || 5079 - mddev->pers->check_reshape(mddev)) { 5080 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5081 - err = mddev->pers->start_reshape(mddev); 5082 - } else { 5083 - /* 5084 - * If reshape is still in progress, and 5085 - * md_check_recovery() can continue to reshape, 5086 - * don't restart reshape because data can be 5087 - * corrupted for raid456. 5088 - */ 5089 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5090 - } 5091 - mddev_unlock(mddev); 5092 - } 5093 - if (err) 5094 - return err; 5095 - sysfs_notify_dirent_safe(mddev->sysfs_degraded); 5096 - } else { 5097 - if (cmd_match(page, "check")) 5098 - set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5099 - else if (!cmd_match(page, "repair")) 5100 - return -EINVAL; 5101 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5102 - set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 5103 - set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5006 + ret = mddev_lock(mddev); 5007 + if (ret) 5008 + return ret; 5009 + 5010 + if (work_busy(&mddev->sync_work)) { 5011 + mddev_unlock(mddev); 5012 + goto retry; 5104 5013 } 5014 + 5015 + action = md_sync_action_by_name(page); 5016 + 5017 + /* TODO: mdadm rely on "idle" to start sync_thread. */ 5018 + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 5019 + switch (action) { 5020 + case ACTION_FROZEN: 5021 + md_frozen_sync_thread(mddev); 5022 + ret = len; 5023 + goto out; 5024 + case ACTION_IDLE: 5025 + md_idle_sync_thread(mddev); 5026 + break; 5027 + case ACTION_RESHAPE: 5028 + case ACTION_RECOVER: 5029 + case ACTION_CHECK: 5030 + case ACTION_REPAIR: 5031 + case ACTION_RESYNC: 5032 + ret = -EBUSY; 5033 + goto out; 5034 + default: 5035 + ret = -EINVAL; 5036 + goto out; 5037 + } 5038 + } else { 5039 + switch (action) { 5040 + case ACTION_FROZEN: 5041 + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5042 + ret = len; 5043 + goto out; 5044 + case ACTION_RESHAPE: 5045 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5046 + ret = mddev_start_reshape(mddev); 5047 + if (ret) 5048 + goto out; 5049 + break; 5050 + case ACTION_RECOVER: 5051 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5052 + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5053 + break; 5054 + case ACTION_CHECK: 5055 + set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5056 + fallthrough; 5057 + case ACTION_REPAIR: 5058 + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 5059 + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5060 + fallthrough; 5061 + case ACTION_RESYNC: 5062 + case ACTION_IDLE: 5063 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5064 + break; 5065 + default: 5066 + ret = -EINVAL; 5067 + goto out; 5068 + } 5069 + } 5070 + 5105 5071 if (mddev->ro == MD_AUTO_READ) { 5106 5072 /* A write to sync_action is enough to justify 5107 5073 * canceling read-auto mode 5108 5074 */ 5109 - flush_work(&mddev->sync_work); 5110 5075 mddev->ro = MD_RDWR; 5111 5076 md_wakeup_thread(mddev->sync_thread); 5112 5077 } 5078 + 5113 5079 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5114 5080 md_wakeup_thread(mddev->thread); 5115 5081 sysfs_notify_dirent_safe(mddev->sysfs_action); 5116 - return len; 5082 + ret = len; 5083 + 5084 + out: 5085 + mddev_unlock(mddev); 5086 + return ret; 5117 5087 } 5118 5088 5119 5089 static struct md_sysfs_entry md_scan_mode = ··· 5149 5065 static ssize_t 5150 5066 last_sync_action_show(struct mddev *mddev, char *page) 5151 5067 { 5152 - return sprintf(page, "%s\n", mddev->last_sync_action); 5068 + return sprintf(page, "%s\n", 5069 + md_sync_action_name(mddev->last_sync_action)); 5153 5070 } 5154 5071 5155 5072 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); ··· 6522 6437 { 6523 6438 mddev_lock_nointr(mddev); 6524 6439 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6525 - stop_sync_thread(mddev, true, false); 6440 + stop_sync_thread(mddev, true); 6526 6441 __md_stop_writes(mddev); 6527 6442 mddev_unlock(mddev); 6528 6443 } ··· 6590 6505 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6591 6506 } 6592 6507 6593 - stop_sync_thread(mddev, false, false); 6508 + stop_sync_thread(mddev, false); 6594 6509 wait_event(mddev->sb_wait, 6595 6510 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6596 6511 mddev_lock_nointr(mddev); ··· 6636 6551 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6637 6552 } 6638 6553 6639 - stop_sync_thread(mddev, true, false); 6554 + stop_sync_thread(mddev, true); 6640 6555 6641 6556 if (mddev->sysfs_active || 6642 6557 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { ··· 8726 8641 * A return value of 'false' means that the write wasn't recorded 8727 8642 * and cannot proceed as the array is being suspend. 8728 8643 */ 8729 - bool md_write_start(struct mddev *mddev, struct bio *bi) 8644 + void md_write_start(struct mddev *mddev, struct bio *bi) 8730 8645 { 8731 8646 int did_change = 0; 8732 8647 8733 8648 if (bio_data_dir(bi) != WRITE) 8734 - return true; 8649 + return; 8735 8650 8736 8651 BUG_ON(mddev->ro == MD_RDONLY); 8737 8652 if (mddev->ro == MD_AUTO_READ) { ··· 8764 8679 if (did_change) 8765 8680 sysfs_notify_dirent_safe(mddev->sysfs_state); 8766 8681 if (!mddev->has_superblocks) 8767 - return true; 8682 + return; 8768 8683 wait_event(mddev->sb_wait, 8769 - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8770 - is_md_suspended(mddev)); 8771 - if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8772 - percpu_ref_put(&mddev->writes_pending); 8773 - return false; 8774 - } 8775 - return true; 8684 + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8776 8685 } 8777 8686 EXPORT_SYMBOL(md_write_start); 8778 8687 ··· 8914 8835 } 8915 8836 EXPORT_SYMBOL_GPL(md_allow_write); 8916 8837 8838 + static sector_t md_sync_max_sectors(struct mddev *mddev, 8839 + enum sync_action action) 8840 + { 8841 + switch (action) { 8842 + case ACTION_RESYNC: 8843 + case ACTION_CHECK: 8844 + case ACTION_REPAIR: 8845 + atomic64_set(&mddev->resync_mismatches, 0); 8846 + fallthrough; 8847 + case ACTION_RESHAPE: 8848 + return mddev->resync_max_sectors; 8849 + case ACTION_RECOVER: 8850 + return mddev->dev_sectors; 8851 + default: 8852 + return 0; 8853 + } 8854 + } 8855 + 8856 + static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) 8857 + { 8858 + sector_t start = 0; 8859 + struct md_rdev *rdev; 8860 + 8861 + switch (action) { 8862 + case ACTION_CHECK: 8863 + case ACTION_REPAIR: 8864 + return mddev->resync_min; 8865 + case ACTION_RESYNC: 8866 + if (!mddev->bitmap) 8867 + return mddev->recovery_cp; 8868 + return 0; 8869 + case ACTION_RESHAPE: 8870 + /* 8871 + * If the original node aborts reshaping then we continue the 8872 + * reshaping, so set again to avoid restart reshape from the 8873 + * first beginning 8874 + */ 8875 + if (mddev_is_clustered(mddev) && 8876 + mddev->reshape_position != MaxSector) 8877 + return mddev->reshape_position; 8878 + return 0; 8879 + case ACTION_RECOVER: 8880 + start = MaxSector; 8881 + rcu_read_lock(); 8882 + rdev_for_each_rcu(rdev, mddev) 8883 + if (rdev->raid_disk >= 0 && 8884 + !test_bit(Journal, &rdev->flags) && 8885 + !test_bit(Faulty, &rdev->flags) && 8886 + !test_bit(In_sync, &rdev->flags) && 8887 + rdev->recovery_offset < start) 8888 + start = rdev->recovery_offset; 8889 + rcu_read_unlock(); 8890 + 8891 + /* If there is a bitmap, we need to make sure all 8892 + * writes that started before we added a spare 8893 + * complete before we start doing a recovery. 8894 + * Otherwise the write might complete and (via 8895 + * bitmap_endwrite) set a bit in the bitmap after the 8896 + * recovery has checked that bit and skipped that 8897 + * region. 8898 + */ 8899 + if (mddev->bitmap) { 8900 + mddev->pers->quiesce(mddev, 1); 8901 + mddev->pers->quiesce(mddev, 0); 8902 + } 8903 + return start; 8904 + default: 8905 + return MaxSector; 8906 + } 8907 + } 8908 + 8917 8909 #define SYNC_MARKS 10 8918 8910 #define SYNC_MARK_STEP (3*HZ) 8919 8911 #define UPDATE_FREQUENCY (5*60*HZ) ··· 9001 8851 sector_t last_check; 9002 8852 int skipped = 0; 9003 8853 struct md_rdev *rdev; 9004 - char *desc, *action = NULL; 8854 + enum sync_action action; 8855 + const char *desc; 9005 8856 struct blk_plug plug; 9006 8857 int ret; 9007 8858 ··· 9033 8882 goto skip; 9034 8883 } 9035 8884 9036 - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 9037 - if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 9038 - desc = "data-check"; 9039 - action = "check"; 9040 - } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 9041 - desc = "requested-resync"; 9042 - action = "repair"; 9043 - } else 9044 - desc = "resync"; 9045 - } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 9046 - desc = "reshape"; 9047 - else 9048 - desc = "recovery"; 9049 - 9050 - mddev->last_sync_action = action ?: desc; 8885 + action = md_sync_action(mddev); 8886 + desc = md_sync_action_name(action); 8887 + mddev->last_sync_action = action; 9051 8888 9052 8889 /* 9053 8890 * Before starting a resync we must have set curr_resync to ··· 9103 8964 spin_unlock(&all_mddevs_lock); 9104 8965 } while (mddev->curr_resync < MD_RESYNC_DELAYED); 9105 8966 9106 - j = 0; 9107 - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 9108 - /* resync follows the size requested by the personality, 9109 - * which defaults to physical size, but can be virtual size 9110 - */ 9111 - max_sectors = mddev->resync_max_sectors; 9112 - atomic64_set(&mddev->resync_mismatches, 0); 9113 - /* we don't use the checkpoint if there's a bitmap */ 9114 - if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9115 - j = mddev->resync_min; 9116 - else if (!mddev->bitmap) 9117 - j = mddev->recovery_cp; 9118 - 9119 - } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 9120 - max_sectors = mddev->resync_max_sectors; 9121 - /* 9122 - * If the original node aborts reshaping then we continue the 9123 - * reshaping, so set j again to avoid restart reshape from the 9124 - * first beginning 9125 - */ 9126 - if (mddev_is_clustered(mddev) && 9127 - mddev->reshape_position != MaxSector) 9128 - j = mddev->reshape_position; 9129 - } else { 9130 - /* recovery follows the physical size of devices */ 9131 - max_sectors = mddev->dev_sectors; 9132 - j = MaxSector; 9133 - rcu_read_lock(); 9134 - rdev_for_each_rcu(rdev, mddev) 9135 - if (rdev->raid_disk >= 0 && 9136 - !test_bit(Journal, &rdev->flags) && 9137 - !test_bit(Faulty, &rdev->flags) && 9138 - !test_bit(In_sync, &rdev->flags) && 9139 - rdev->recovery_offset < j) 9140 - j = rdev->recovery_offset; 9141 - rcu_read_unlock(); 9142 - 9143 - /* If there is a bitmap, we need to make sure all 9144 - * writes that started before we added a spare 9145 - * complete before we start doing a recovery. 9146 - * Otherwise the write might complete and (via 9147 - * bitmap_endwrite) set a bit in the bitmap after the 9148 - * recovery has checked that bit and skipped that 9149 - * region. 9150 - */ 9151 - if (mddev->bitmap) { 9152 - mddev->pers->quiesce(mddev, 1); 9153 - mddev->pers->quiesce(mddev, 0); 9154 - } 9155 - } 8967 + max_sectors = md_sync_max_sectors(mddev, action); 8968 + j = md_sync_position(mddev, action); 9156 8969 9157 8970 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 9158 8971 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); ··· 9186 9095 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9187 9096 break; 9188 9097 9189 - sectors = mddev->pers->sync_request(mddev, j, &skipped); 9098 + sectors = mddev->pers->sync_request(mddev, j, max_sectors, 9099 + &skipped); 9190 9100 if (sectors == 0) { 9191 9101 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9192 9102 break; ··· 9277 9185 mddev->curr_resync_completed = mddev->curr_resync; 9278 9186 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9279 9187 } 9280 - mddev->pers->sync_request(mddev, max_sectors, &skipped); 9188 + mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped); 9281 9189 9282 9190 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 9283 9191 mddev->curr_resync > MD_RESYNC_ACTIVE) {
+103 -23
drivers/md/md.h
··· 34 34 */ 35 35 #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) 36 36 37 + /* Status of sync thread. */ 38 + enum sync_action { 39 + /* 40 + * Represent by MD_RECOVERY_SYNC, start when: 41 + * 1) after assemble, sync data from first rdev to other copies, this 42 + * must be done first before other sync actions and will only execute 43 + * once; 44 + * 2) resize the array(notice that this is not reshape), sync data for 45 + * the new range; 46 + */ 47 + ACTION_RESYNC, 48 + /* 49 + * Represent by MD_RECOVERY_RECOVER, start when: 50 + * 1) for new replacement, sync data based on the replace rdev or 51 + * available copies from other rdev; 52 + * 2) for new member disk while the array is degraded, sync data from 53 + * other rdev; 54 + * 3) reassemble after power failure or re-add a hot removed rdev, sync 55 + * data from first rdev to other copies based on bitmap; 56 + */ 57 + ACTION_RECOVER, 58 + /* 59 + * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED | 60 + * MD_RECOVERY_CHECK, start when user echo "check" to sysfs api 61 + * sync_action, used to check if data copies from differenct rdev are 62 + * the same. The number of mismatch sectors will be exported to user 63 + * by sysfs api mismatch_cnt; 64 + */ 65 + ACTION_CHECK, 66 + /* 67 + * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when 68 + * user echo "repair" to sysfs api sync_action, usually paired with 69 + * ACTION_CHECK, used to force syncing data once user found that there 70 + * are inconsistent data, 71 + */ 72 + ACTION_REPAIR, 73 + /* 74 + * Represent by MD_RECOVERY_RESHAPE, start when new member disk is added 75 + * to the conf, notice that this is different from spares or 76 + * replacement; 77 + */ 78 + ACTION_RESHAPE, 79 + /* 80 + * Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action 81 + * or internal usage like setting the array read-only, will forbid above 82 + * actions. 83 + */ 84 + ACTION_FROZEN, 85 + /* 86 + * All above actions don't match. 87 + */ 88 + ACTION_IDLE, 89 + NR_SYNC_ACTIONS, 90 + }; 91 + 37 92 /* 38 93 * The struct embedded in rdev is used to serialize IO. 39 94 */ ··· 426 371 struct md_thread __rcu *thread; /* management thread */ 427 372 struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */ 428 373 429 - /* 'last_sync_action' is initialized to "none". It is set when a 430 - * sync operation (i.e "data-check", "requested-resync", "resync", 431 - * "recovery", or "reshape") is started. It holds this value even 374 + /* 375 + * Set when a sync operation is started. It holds this value even 432 376 * when the sync thread is "frozen" (interrupted) or "idle" (stopped 433 - * or finished). It is overwritten when a new sync operation is begun. 377 + * or finished). It is overwritten when a new sync operation is begun. 434 378 */ 435 - char *last_sync_action; 379 + enum sync_action last_sync_action; 436 380 sector_t curr_resync; /* last block scheduled */ 437 381 /* As resync requests can complete out of order, we cannot easily track 438 382 * how much resync has been completed. So we occasionally pause until ··· 594 540 */ 595 541 struct list_head deleting; 596 542 597 - /* Used to synchronize idle and frozen for action_store() */ 598 - struct mutex sync_mutex; 599 543 /* The sequence number for sync thread */ 600 544 atomic_t sync_seq; 601 545 ··· 603 551 }; 604 552 605 553 enum recovery_flags { 554 + /* flags for sync thread running status */ 555 + 606 556 /* 607 - * If neither SYNC or RESHAPE are set, then it is a recovery. 557 + * set when one of sync action is set and new sync thread need to be 558 + * registered, or just add/remove spares from conf. 608 559 */ 609 - MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */ 610 - MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */ 611 - MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */ 612 - MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */ 613 - MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */ 614 - MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */ 615 - MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */ 616 - MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */ 617 - MD_RECOVERY_RESHAPE, /* A reshape is happening */ 618 - MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */ 619 - MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */ 620 - MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */ 621 - MD_RESYNCING_REMOTE, /* remote node is running resync thread */ 560 + MD_RECOVERY_NEEDED, 561 + /* sync thread is running, or about to be started */ 562 + MD_RECOVERY_RUNNING, 563 + /* sync thread needs to be aborted for some reason */ 564 + MD_RECOVERY_INTR, 565 + /* sync thread is done and is waiting to be unregistered */ 566 + MD_RECOVERY_DONE, 567 + /* running sync thread must abort immediately, and not restart */ 568 + MD_RECOVERY_FROZEN, 569 + /* waiting for pers->start() to finish */ 570 + MD_RECOVERY_WAIT, 571 + /* interrupted because io-error */ 572 + MD_RECOVERY_ERROR, 573 + 574 + /* flags determines sync action, see details in enum sync_action */ 575 + 576 + /* if just this flag is set, action is resync. */ 577 + MD_RECOVERY_SYNC, 578 + /* 579 + * paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set, 580 + * action is repair, means user requested resync. 581 + */ 582 + MD_RECOVERY_REQUESTED, 583 + /* 584 + * paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is 585 + * check. 586 + */ 587 + MD_RECOVERY_CHECK, 588 + /* recovery, or need to try it */ 589 + MD_RECOVERY_RECOVER, 590 + /* reshape */ 591 + MD_RECOVERY_RESHAPE, 592 + /* remote node is running resync thread */ 593 + MD_RESYNCING_REMOTE, 622 594 }; 623 595 624 596 enum md_ro_state { ··· 729 653 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); 730 654 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); 731 655 int (*spare_active) (struct mddev *mddev); 732 - sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped); 656 + sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, 657 + sector_t max_sector, int *skipped); 733 658 int (*resize) (struct mddev *mddev, sector_t sectors); 734 659 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); 735 660 int (*check_reshape) (struct mddev *mddev); ··· 862 785 extern void md_wakeup_thread(struct md_thread __rcu *thread); 863 786 extern void md_check_recovery(struct mddev *mddev); 864 787 extern void md_reap_sync_thread(struct mddev *mddev); 865 - extern bool md_write_start(struct mddev *mddev, struct bio *bi); 788 + extern enum sync_action md_sync_action(struct mddev *mddev); 789 + extern enum sync_action md_sync_action_by_name(const char *page); 790 + extern const char *md_sync_action_name(enum sync_action action); 791 + extern void md_write_start(struct mddev *mddev, struct bio *bi); 866 792 extern void md_write_inc(struct mddev *mddev, struct bio *bi); 867 793 extern void md_write_end(struct mddev *mddev); 868 794 extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
+5 -16
drivers/md/raid0.c
··· 365 365 return array_sectors; 366 366 } 367 367 368 - static void free_conf(struct mddev *mddev, struct r0conf *conf) 369 - { 370 - kfree(conf->strip_zone); 371 - kfree(conf->devlist); 372 - kfree(conf); 373 - } 374 - 375 368 static void raid0_free(struct mddev *mddev, void *priv) 376 369 { 377 370 struct r0conf *conf = priv; 378 371 379 - free_conf(mddev, conf); 372 + kfree(conf->strip_zone); 373 + kfree(conf->devlist); 374 + kfree(conf); 380 375 } 381 376 382 377 static int raid0_set_limits(struct mddev *mddev) ··· 410 415 if (!mddev_is_dm(mddev)) { 411 416 ret = raid0_set_limits(mddev); 412 417 if (ret) 413 - goto out_free_conf; 418 + return ret; 414 419 } 415 420 416 421 /* calculate array device size */ ··· 422 427 423 428 dump_zones(mddev); 424 429 425 - ret = md_integrity_register(mddev); 426 - if (ret) 427 - goto out_free_conf; 428 - return 0; 429 - out_free_conf: 430 - free_conf(mddev, conf); 431 - return ret; 430 + return md_integrity_register(mddev); 432 431 } 433 432 434 433 /*
+6 -16
drivers/md/raid1.c
··· 1687 1687 if (bio_data_dir(bio) == READ) 1688 1688 raid1_read_request(mddev, bio, sectors, NULL); 1689 1689 else { 1690 - if (!md_write_start(mddev,bio)) 1691 - return false; 1690 + md_write_start(mddev,bio); 1692 1691 raid1_write_request(mddev, bio, sectors); 1693 1692 } 1694 1693 return true; ··· 2756 2757 */ 2757 2758 2758 2759 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, 2759 - int *skipped) 2760 + sector_t max_sector, int *skipped) 2760 2761 { 2761 2762 struct r1conf *conf = mddev->private; 2762 2763 struct r1bio *r1_bio; 2763 2764 struct bio *bio; 2764 - sector_t max_sector, nr_sectors; 2765 + sector_t nr_sectors; 2765 2766 int disk = -1; 2766 2767 int i; 2767 2768 int wonly = -1; ··· 2777 2778 if (init_resync(conf)) 2778 2779 return 0; 2779 2780 2780 - max_sector = mddev->dev_sectors; 2781 2781 if (sector_nr >= max_sector) { 2782 2782 /* If we aborted, we need to abort the 2783 2783 * sync on the 'current' bitmap chunk (there will ··· 3202 3204 return queue_limits_set(mddev->gendisk->queue, &lim); 3203 3205 } 3204 3206 3205 - static void raid1_free(struct mddev *mddev, void *priv); 3206 3207 static int raid1_run(struct mddev *mddev) 3207 3208 { 3208 3209 struct r1conf *conf; ··· 3235 3238 if (!mddev_is_dm(mddev)) { 3236 3239 ret = raid1_set_limits(mddev); 3237 3240 if (ret) 3238 - goto abort; 3241 + return ret; 3239 3242 } 3240 3243 3241 3244 mddev->degraded = 0; ··· 3249 3252 */ 3250 3253 if (conf->raid_disks - mddev->degraded < 1) { 3251 3254 md_unregister_thread(mddev, &conf->thread); 3252 - ret = -EINVAL; 3253 - goto abort; 3255 + return -EINVAL; 3254 3256 } 3255 3257 3256 3258 if (conf->raid_disks - mddev->degraded == 1) ··· 3273 3277 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 3274 3278 3275 3279 ret = md_integrity_register(mddev); 3276 - if (ret) { 3280 + if (ret) 3277 3281 md_unregister_thread(mddev, &mddev->thread); 3278 - goto abort; 3279 - } 3280 - return 0; 3281 - 3282 - abort: 3283 - raid1_free(mddev, conf); 3284 3282 return ret; 3285 3283 } 3286 3284
+3 -8
drivers/md/raid10.c
··· 1836 1836 && md_flush_request(mddev, bio)) 1837 1837 return true; 1838 1838 1839 - if (!md_write_start(mddev, bio)) 1840 - return false; 1839 + md_write_start(mddev, bio); 1841 1840 1842 1841 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1843 1842 if (!raid10_handle_discard(mddev, bio)) ··· 3139 3140 */ 3140 3141 3141 3142 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, 3142 - int *skipped) 3143 + sector_t max_sector, int *skipped) 3143 3144 { 3144 3145 struct r10conf *conf = mddev->private; 3145 3146 struct r10bio *r10_bio; 3146 3147 struct bio *biolist = NULL, *bio; 3147 - sector_t max_sector, nr_sectors; 3148 + sector_t nr_sectors; 3148 3149 int i; 3149 3150 int max_sync; 3150 3151 sector_t sync_blocks; ··· 3174 3175 return 0; 3175 3176 3176 3177 skipped: 3177 - max_sector = mddev->dev_sectors; 3178 - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 3179 - test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3180 - max_sector = mddev->resync_max_sectors; 3181 3178 if (sector_nr >= max_sector) { 3182 3179 conf->cluster_sync_low = 0; 3183 3180 conf->cluster_sync_high = 0;
+15 -11
drivers/md/raid5.c
··· 6078 6078 ctx.do_flush = bi->bi_opf & REQ_PREFLUSH; 6079 6079 } 6080 6080 6081 - if (!md_write_start(mddev, bi)) 6082 - return false; 6081 + md_write_start(mddev, bi); 6083 6082 /* 6084 6083 * If array is degraded, better not do chunk aligned read because 6085 6084 * later we might have to read it again in order to reconstruct ··· 6254 6255 safepos = conf->reshape_safe; 6255 6256 sector_div(safepos, data_disks); 6256 6257 if (mddev->reshape_backwards) { 6257 - BUG_ON(writepos < reshape_sectors); 6258 + if (WARN_ON(writepos < reshape_sectors)) 6259 + return MaxSector; 6260 + 6258 6261 writepos -= reshape_sectors; 6259 6262 readpos += reshape_sectors; 6260 6263 safepos += reshape_sectors; ··· 6274 6273 * to set 'stripe_addr' which is where we will write to. 6275 6274 */ 6276 6275 if (mddev->reshape_backwards) { 6277 - BUG_ON(conf->reshape_progress == 0); 6276 + if (WARN_ON(conf->reshape_progress == 0)) 6277 + return MaxSector; 6278 + 6278 6279 stripe_addr = writepos; 6279 - BUG_ON((mddev->dev_sectors & 6280 - ~((sector_t)reshape_sectors - 1)) 6281 - - reshape_sectors - stripe_addr 6282 - != sector_nr); 6280 + if (WARN_ON((mddev->dev_sectors & 6281 + ~((sector_t)reshape_sectors - 1)) - 6282 + reshape_sectors - stripe_addr != sector_nr)) 6283 + return MaxSector; 6283 6284 } else { 6284 - BUG_ON(writepos != sector_nr + reshape_sectors); 6285 + if (WARN_ON(writepos != sector_nr + reshape_sectors)) 6286 + return MaxSector; 6287 + 6285 6288 stripe_addr = sector_nr; 6286 6289 } 6287 6290 ··· 6463 6458 } 6464 6459 6465 6460 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, 6466 - int *skipped) 6461 + sector_t max_sector, int *skipped) 6467 6462 { 6468 6463 struct r5conf *conf = mddev->private; 6469 6464 struct stripe_head *sh; 6470 - sector_t max_sector = mddev->dev_sectors; 6471 6465 sector_t sync_blocks; 6472 6466 int still_degraded = 0; 6473 6467 int i;