Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md

Pull MD update from Shaohua Li:
"This update mostly includes bug fixes:

- md-cluster now supports raid10 from Guoqing

- raid5 PPL fixes from Artur

- badblock regression fix from Bo

- suspend hang related fixes from Neil

- raid5 reshape fixes from Neil

- raid1 freeze deadlock fix from Nate

- memleak fixes from Zdenek

- bitmap related fixes from Me and Tao

- other fixes and cleanups"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: (33 commits)
md: free unused memory after bitmap resize
md: release allocated bitset sync_set
md/bitmap: clear BITMAP_WRITE_ERROR bit before writing it to sb
md: be cautious about using ->curr_resync_completed for ->recovery_offset
badblocks: fix wrong return value in badblocks_set if badblocks are disabled
md: don't check MD_SB_CHANGE_CLEAN in md_allow_write
md-cluster: update document for raid10
md: remove redundant variable q
raid1: remove obsolete code in raid1_write_request
md-cluster: Use a small window for raid10 resync
md-cluster: Suspend writes in RAID10 if within range
md-cluster/raid10: set "do_balance = 0" if area is resyncing
md: use lockdep_assert_held
raid1: prevent freeze_array/wait_all_barriers deadlock
md: use TASK_IDLE instead of blocking signals
md: remove special meaning of ->quiesce(.., 2)
md: allow metadata update while suspending.
md: use mddev_suspend/resume instead of ->quiesce()
md: move suspend_hi/lo handling into core md code
md: don't call bitmap_create() while array is quiesced.
...

+414 -228
+2 -1
Documentation/md/md-cluster.txt
··· 1 - The cluster MD is a shared-device RAID for a cluster. 1 + The cluster MD is a shared-device RAID for a cluster, it supports 2 + two levels: raid1 and raid10 (limited support). 2 3 3 4 4 5 1. On-disk format
+6 -1
MAINTAINERS
··· 4103 4103 T: quilt http://people.redhat.com/agk/patches/linux/editing/ 4104 4104 S: Maintained 4105 4105 F: Documentation/device-mapper/ 4106 + F: drivers/md/Makefile 4107 + F: drivers/md/Kconfig 4106 4108 F: drivers/md/dm* 4107 4109 F: drivers/md/persistent-data/ 4108 4110 F: include/linux/device-mapper.h ··· 12489 12487 L: linux-raid@vger.kernel.org 12490 12488 T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git 12491 12489 S: Supported 12492 - F: drivers/md/ 12490 + F: drivers/md/Makefile 12491 + F: drivers/md/Kconfig 12492 + F: drivers/md/md* 12493 + F: drivers/md/raid* 12493 12494 F: include/linux/raid/ 12494 12495 F: include/uapi/linux/raid/ 12495 12496
+1 -1
block/badblocks.c
··· 178 178 179 179 if (bb->shift < 0) 180 180 /* badblocks are disabled */ 181 - return 0; 181 + return 1; 182 182 183 183 if (bb->shift) { 184 184 /* round the start down, and the end up */
+3 -2
drivers/md/Kconfig
··· 178 178 179 179 180 180 config MD_CLUSTER 181 - tristate "Cluster Support for MD (EXPERIMENTAL)" 181 + tristate "Cluster Support for MD" 182 182 depends on BLK_DEV_MD 183 183 depends on DLM 184 184 default n ··· 188 188 nodes in the cluster can access the MD devices simultaneously. 189 189 190 190 This brings the redundancy (and uptime) of RAID levels across the 191 - nodes of the cluster. 191 + nodes of the cluster. Currently, it can work with raid1 and raid10 192 + (limited support). 192 193 193 194 If unsure, say N. 194 195
+4 -1
drivers/md/Makefile
··· 19 19 dm-cache-smq-y += dm-cache-policy-smq.o 20 20 dm-era-y += dm-era-target.o 21 21 dm-verity-y += dm-verity-target.o 22 - md-mod-y += md.o bitmap.o 22 + md-mod-y += md.o md-bitmap.o 23 23 raid456-y += raid5.o raid5-cache.o raid5-ppl.o 24 24 dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o 25 + linear-y += md-linear.o 26 + multipath-y += md-multipath.o 27 + faulty-y += md-faulty.o 25 28 26 29 # Note: link order is important. All raid personalities 27 30 # and must come before md.o, as they each initialise
+23 -4
drivers/md/bitmap.c drivers/md/md-bitmap.c
··· 29 29 #include <linux/seq_file.h> 30 30 #include <trace/events/block.h> 31 31 #include "md.h" 32 - #include "bitmap.h" 32 + #include "md-bitmap.h" 33 33 34 34 static inline char *bmname(struct bitmap *bitmap) 35 35 { ··· 459 459 /* rocking back to read-only */ 460 460 bitmap->events_cleared = bitmap->mddev->events; 461 461 sb->events_cleared = cpu_to_le64(bitmap->events_cleared); 462 - sb->state = cpu_to_le32(bitmap->flags); 462 + /* 463 + * clear BITMAP_WRITE_ERROR bit to protect against the case that 464 + * a bitmap write error occurred but the later writes succeeded. 465 + */ 466 + sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR)); 463 467 /* Just in case these have been changed via sysfs: */ 464 468 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); 465 469 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); ··· 629 625 err = read_sb_page(bitmap->mddev, 630 626 offset, 631 627 sb_page, 632 - 0, PAGE_SIZE); 628 + 0, sizeof(bitmap_super_t)); 633 629 } 634 630 if (err) 635 631 return err; ··· 1820 1816 1821 1817 BUG_ON(file && mddev->bitmap_info.offset); 1822 1818 1819 + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1820 + pr_notice("md/raid:%s: array with journal cannot have bitmap\n", 1821 + mdname(mddev)); 1822 + return ERR_PTR(-EBUSY); 1823 + } 1824 + 1823 1825 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); 1824 1826 if (!bitmap) 1825 1827 return ERR_PTR(-ENOMEM); ··· 2133 2123 if (store.sb_page && bitmap->storage.sb_page) 2134 2124 memcpy(page_address(store.sb_page), 2135 2125 page_address(bitmap->storage.sb_page), 2136 - PAGE_SIZE); 2126 + sizeof(bitmap_super_t)); 2137 2127 bitmap_file_unmap(&bitmap->storage); 2138 2128 bitmap->storage = store; 2139 2129 ··· 2162 2152 for (k = 0; k < page; k++) { 2163 2153 kfree(new_bp[k].map); 2164 2154 } 2155 + kfree(new_bp); 2165 2156 2166 2157 /* restore some fields from old_counts */ 2167 2158 bitmap->counts.bp = old_counts.bp; ··· 2211 2200 old_blocks = new_blocks; 2212 2201 } 2213 2202 block += old_blocks; 2203 + } 2204 + 2205 + if (bitmap->counts.bp != old_counts.bp) { 2206 + unsigned long k; 2207 + for (k = 0; k < old_counts.pages; k++) 2208 + if (!old_counts.bp[k].hijacked) 2209 + kfree(old_counts.bp[k].map); 2210 + kfree(old_counts.bp); 2214 2211 } 2215 2212 2216 2213 if (!init) {
drivers/md/bitmap.h drivers/md/md-bitmap.h
+9 -3
drivers/md/dm-raid.c
··· 12 12 #include "raid1.h" 13 13 #include "raid5.h" 14 14 #include "raid10.h" 15 - #include "bitmap.h" 15 + #include "md-bitmap.h" 16 16 17 17 #include <linux/device-mapper.h> 18 18 ··· 3630 3630 { 3631 3631 struct raid_set *rs = ti->private; 3632 3632 3633 - if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) 3633 + if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { 3634 + mddev_lock_nointr(&rs->md); 3634 3635 mddev_suspend(&rs->md); 3636 + mddev_unlock(&rs->md); 3637 + } 3635 3638 3636 3639 rs->md.ro = 1; 3637 3640 } ··· 3891 3888 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) 3892 3889 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3893 3890 3894 - if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) 3891 + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { 3892 + mddev_lock_nointr(mddev); 3895 3893 mddev_resume(mddev); 3894 + mddev_unlock(mddev); 3895 + } 3896 3896 } 3897 3897 3898 3898 static struct target_type raid_target = {
drivers/md/faulty.c drivers/md/md-faulty.c
+1 -1
drivers/md/linear.c drivers/md/md-linear.c
··· 23 23 #include <linux/slab.h> 24 24 #include <trace/events/block.h> 25 25 #include "md.h" 26 - #include "linear.h" 26 + #include "md-linear.h" 27 27 28 28 /* 29 29 * find which device holds a particular offset
drivers/md/linear.h drivers/md/md-linear.h
+6 -6
drivers/md/md-cluster.c
··· 15 15 #include <linux/sched.h> 16 16 #include <linux/raid/md_p.h> 17 17 #include "md.h" 18 - #include "bitmap.h" 18 + #include "md-bitmap.h" 19 19 #include "md-cluster.h" 20 20 21 21 #define LVB_SIZE 64 ··· 442 442 static void remove_suspend_info(struct mddev *mddev, int slot) 443 443 { 444 444 struct md_cluster_info *cinfo = mddev->cluster_info; 445 + mddev->pers->quiesce(mddev, 1); 445 446 spin_lock_irq(&cinfo->suspend_lock); 446 447 __remove_suspend_info(cinfo, slot); 447 448 spin_unlock_irq(&cinfo->suspend_lock); 448 - mddev->pers->quiesce(mddev, 2); 449 + mddev->pers->quiesce(mddev, 0); 449 450 } 450 451 451 452 ··· 493 492 s->lo = lo; 494 493 s->hi = hi; 495 494 mddev->pers->quiesce(mddev, 1); 496 - mddev->pers->quiesce(mddev, 0); 497 495 spin_lock_irq(&cinfo->suspend_lock); 498 496 /* Remove existing entry (if exists) before adding */ 499 497 __remove_suspend_info(cinfo, slot); 500 498 list_add(&s->list, &cinfo->suspend_list); 501 499 spin_unlock_irq(&cinfo->suspend_lock); 502 - mddev->pers->quiesce(mddev, 2); 500 + mddev->pers->quiesce(mddev, 0); 503 501 } 504 502 505 503 static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) ··· 1094 1094 /* 1095 1095 * return 0 if all the bitmaps have the same sync_size 1096 1096 */ 1097 - int cluster_check_sync_size(struct mddev *mddev) 1097 + static int cluster_check_sync_size(struct mddev *mddev) 1098 1098 { 1099 1099 int i, rv; 1100 1100 bitmap_super_t *sb; ··· 1478 1478 1479 1479 static int __init cluster_init(void) 1480 1480 { 1481 - pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n"); 1481 + pr_warn("md-cluster: support raid1 and raid10 (limited support)\n"); 1482 1482 pr_info("Registering Cluster MD functions\n"); 1483 1483 register_md_cluster_operations(&cluster_ops, THIS_MODULE); 1484 1484 return 0;
+94 -53
drivers/md/md.c
··· 69 69 70 70 #include <trace/events/block.h> 71 71 #include "md.h" 72 - #include "bitmap.h" 72 + #include "md-bitmap.h" 73 73 #include "md-cluster.h" 74 74 75 75 #ifndef MODULE ··· 266 266 * call has finished, the bio has been linked into some internal structure 267 267 * and so is visible to ->quiesce(), so we don't need the refcount any more. 268 268 */ 269 + static bool is_suspended(struct mddev *mddev, struct bio *bio) 270 + { 271 + if (mddev->suspended) 272 + return true; 273 + if (bio_data_dir(bio) != WRITE) 274 + return false; 275 + if (mddev->suspend_lo >= mddev->suspend_hi) 276 + return false; 277 + if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 278 + return false; 279 + if (bio_end_sector(bio) < mddev->suspend_lo) 280 + return false; 281 + return true; 282 + } 283 + 269 284 void md_handle_request(struct mddev *mddev, struct bio *bio) 270 285 { 271 286 check_suspended: 272 287 rcu_read_lock(); 273 - if (mddev->suspended) { 288 + if (is_suspended(mddev, bio)) { 274 289 DEFINE_WAIT(__wait); 275 290 for (;;) { 276 291 prepare_to_wait(&mddev->sb_wait, &__wait, 277 292 TASK_UNINTERRUPTIBLE); 278 - if (!mddev->suspended) 293 + if (!is_suspended(mddev, bio)) 279 294 break; 280 295 rcu_read_unlock(); 281 296 schedule(); ··· 359 344 void mddev_suspend(struct mddev *mddev) 360 345 { 361 346 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 347 + lockdep_assert_held(&mddev->reconfig_mutex); 362 348 if (mddev->suspended++) 363 349 return; 364 350 synchronize_rcu(); 365 351 wake_up(&mddev->sb_wait); 352 + set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 353 + smp_mb__after_atomic(); 366 354 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 367 355 mddev->pers->quiesce(mddev, 1); 356 + clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 357 + wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 368 358 369 359 del_timer_sync(&mddev->safemode_timer); 370 360 } ··· 377 357 378 358 void mddev_resume(struct mddev *mddev) 379 359 { 360 + lockdep_assert_held(&mddev->reconfig_mutex); 380 361 if (--mddev->suspended) 381 362 return; 382 363 wake_up(&mddev->sb_wait); ··· 684 663 */ 685 664 spin_lock(&pers_lock); 686 665 md_wakeup_thread(mddev->thread); 666 + wake_up(&mddev->sb_wait); 687 667 spin_unlock(&pers_lock); 688 668 } 689 669 EXPORT_SYMBOL_GPL(mddev_unlock); ··· 2335 2313 2336 2314 static bool set_in_sync(struct mddev *mddev) 2337 2315 { 2338 - WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock)); 2316 + lockdep_assert_held(&mddev->lock); 2339 2317 if (!mddev->in_sync) { 2340 2318 mddev->sync_checkers++; 2341 2319 spin_unlock(&mddev->lock); ··· 2454 2432 } 2455 2433 } 2456 2434 2457 - /* First make sure individual recovery_offsets are correct */ 2435 + /* 2436 + * First make sure individual recovery_offsets are correct 2437 + * curr_resync_completed can only be used during recovery. 2438 + * During reshape/resync it might use array-addresses rather 2439 + * that device addresses. 2440 + */ 2458 2441 rdev_for_each(rdev, mddev) { 2459 2442 if (rdev->raid_disk >= 0 && 2460 2443 mddev->delta_disks >= 0 && 2444 + test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2445 + test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2446 + !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2461 2447 !test_bit(Journal, &rdev->flags) && 2462 2448 !test_bit(In_sync, &rdev->flags) && 2463 2449 mddev->curr_resync_completed > rdev->recovery_offset) ··· 4854 4824 static ssize_t 4855 4825 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4856 4826 { 4857 - unsigned long long old, new; 4827 + unsigned long long new; 4858 4828 int err; 4859 4829 4860 4830 err = kstrtoull(buf, 10, &new); ··· 4870 4840 if (mddev->pers == NULL || 4871 4841 mddev->pers->quiesce == NULL) 4872 4842 goto unlock; 4873 - old = mddev->suspend_lo; 4843 + mddev_suspend(mddev); 4874 4844 mddev->suspend_lo = new; 4875 - if (new >= old) 4876 - /* Shrinking suspended region */ 4877 - mddev->pers->quiesce(mddev, 2); 4878 - else { 4879 - /* Expanding suspended region - need to wait */ 4880 - mddev->pers->quiesce(mddev, 1); 4881 - mddev->pers->quiesce(mddev, 0); 4882 - } 4845 + mddev_resume(mddev); 4846 + 4883 4847 err = 0; 4884 4848 unlock: 4885 4849 mddev_unlock(mddev); ··· 4891 4867 static ssize_t 4892 4868 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4893 4869 { 4894 - unsigned long long old, new; 4870 + unsigned long long new; 4895 4871 int err; 4896 4872 4897 4873 err = kstrtoull(buf, 10, &new); ··· 4904 4880 if (err) 4905 4881 return err; 4906 4882 err = -EINVAL; 4907 - if (mddev->pers == NULL || 4908 - mddev->pers->quiesce == NULL) 4883 + if (mddev->pers == NULL) 4909 4884 goto unlock; 4910 - old = mddev->suspend_hi; 4885 + 4886 + mddev_suspend(mddev); 4911 4887 mddev->suspend_hi = new; 4912 - if (new <= old) 4913 - /* Shrinking suspended region */ 4914 - mddev->pers->quiesce(mddev, 2); 4915 - else { 4916 - /* Expanding suspended region - need to wait */ 4917 - mddev->pers->quiesce(mddev, 1); 4918 - mddev->pers->quiesce(mddev, 0); 4919 - } 4888 + mddev_resume(mddev); 4889 + 4920 4890 err = 0; 4921 4891 unlock: 4922 4892 mddev_unlock(mddev); ··· 5852 5834 * This is called from dm-raid 5853 5835 */ 5854 5836 __md_stop(mddev); 5855 - if (mddev->bio_set) 5837 + if (mddev->bio_set) { 5856 5838 bioset_free(mddev->bio_set); 5839 + mddev->bio_set = NULL; 5840 + } 5841 + if (mddev->sync_set) { 5842 + bioset_free(mddev->sync_set); 5843 + mddev->sync_set = NULL; 5844 + } 5857 5845 } 5858 5846 5859 5847 EXPORT_SYMBOL_GPL(md_stop); ··· 6386 6362 break; 6387 6363 } 6388 6364 } 6389 - if (has_journal) { 6365 + if (has_journal || mddev->bitmap) { 6390 6366 export_rdev(rdev); 6391 6367 return -EBUSY; 6392 6368 } ··· 6642 6618 return -ENOENT; /* cannot remove what isn't there */ 6643 6619 err = 0; 6644 6620 if (mddev->pers) { 6645 - mddev->pers->quiesce(mddev, 1); 6646 6621 if (fd >= 0) { 6647 6622 struct bitmap *bitmap; 6648 6623 6649 6624 bitmap = bitmap_create(mddev, -1); 6625 + mddev_suspend(mddev); 6650 6626 if (!IS_ERR(bitmap)) { 6651 6627 mddev->bitmap = bitmap; 6652 6628 err = bitmap_load(mddev); 6653 6629 } else 6654 6630 err = PTR_ERR(bitmap); 6655 - } 6656 - if (fd < 0 || err) { 6631 + if (err) { 6632 + bitmap_destroy(mddev); 6633 + fd = -1; 6634 + } 6635 + mddev_resume(mddev); 6636 + } else if (fd < 0) { 6637 + mddev_suspend(mddev); 6657 6638 bitmap_destroy(mddev); 6658 - fd = -1; /* make sure to put the file */ 6639 + mddev_resume(mddev); 6659 6640 } 6660 - mddev->pers->quiesce(mddev, 0); 6661 6641 } 6662 6642 if (fd < 0) { 6663 6643 struct file *f = mddev->bitmap_info.file; ··· 6763 6735 6764 6736 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6765 6737 { 6766 - WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 6738 + lockdep_assert_held(&mddev->reconfig_mutex); 6767 6739 6768 6740 if (mddev->external_size) 6769 6741 return; ··· 6945 6917 mddev->bitmap_info.default_offset; 6946 6918 mddev->bitmap_info.space = 6947 6919 mddev->bitmap_info.default_space; 6948 - mddev->pers->quiesce(mddev, 1); 6949 6920 bitmap = bitmap_create(mddev, -1); 6921 + mddev_suspend(mddev); 6950 6922 if (!IS_ERR(bitmap)) { 6951 6923 mddev->bitmap = bitmap; 6952 6924 rv = bitmap_load(mddev); ··· 6954 6926 rv = PTR_ERR(bitmap); 6955 6927 if (rv) 6956 6928 bitmap_destroy(mddev); 6957 - mddev->pers->quiesce(mddev, 0); 6929 + mddev_resume(mddev); 6958 6930 } else { 6959 6931 /* remove the bitmap */ 6960 6932 if (!mddev->bitmap) { ··· 6977 6949 mddev->bitmap_info.nodes = 0; 6978 6950 md_cluster_ops->leave(mddev); 6979 6951 } 6980 - mddev->pers->quiesce(mddev, 1); 6952 + mddev_suspend(mddev); 6981 6953 bitmap_destroy(mddev); 6982 - mddev->pers->quiesce(mddev, 0); 6954 + mddev_resume(mddev); 6983 6955 mddev->bitmap_info.offset = 0; 6984 6956 } 6985 6957 } ··· 7496 7468 { 7497 7469 if (thread) { 7498 7470 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7499 - if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags)) 7500 - wake_up(&thread->wqueue); 7471 + set_bit(THREAD_WAKEUP, &thread->flags); 7472 + wake_up(&thread->wqueue); 7501 7473 } 7502 7474 } 7503 7475 EXPORT_SYMBOL(md_wakeup_thread); ··· 8067 8039 if (did_change) 8068 8040 sysfs_notify_dirent_safe(mddev->sysfs_state); 8069 8041 wait_event(mddev->sb_wait, 8070 - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); 8042 + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8043 + mddev->suspended); 8071 8044 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8072 8045 percpu_ref_put(&mddev->writes_pending); 8073 8046 return false; ··· 8139 8110 sysfs_notify_dirent_safe(mddev->sysfs_state); 8140 8111 /* wait for the dirty state to be recorded in the metadata */ 8141 8112 wait_event(mddev->sb_wait, 8142 - !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) && 8143 8113 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8144 8114 } else 8145 8115 spin_unlock(&mddev->lock); ··· 8505 8477 } else { 8506 8478 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8507 8479 mddev->curr_resync = MaxSector; 8508 - rcu_read_lock(); 8509 - rdev_for_each_rcu(rdev, mddev) 8510 - if (rdev->raid_disk >= 0 && 8511 - mddev->delta_disks >= 0 && 8512 - !test_bit(Journal, &rdev->flags) && 8513 - !test_bit(Faulty, &rdev->flags) && 8514 - !test_bit(In_sync, &rdev->flags) && 8515 - rdev->recovery_offset < mddev->curr_resync) 8516 - rdev->recovery_offset = mddev->curr_resync; 8517 - rcu_read_unlock(); 8480 + if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8481 + test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 8482 + rcu_read_lock(); 8483 + rdev_for_each_rcu(rdev, mddev) 8484 + if (rdev->raid_disk >= 0 && 8485 + mddev->delta_disks >= 0 && 8486 + !test_bit(Journal, &rdev->flags) && 8487 + !test_bit(Faulty, &rdev->flags) && 8488 + !test_bit(In_sync, &rdev->flags) && 8489 + rdev->recovery_offset < mddev->curr_resync) 8490 + rdev->recovery_offset = mddev->curr_resync; 8491 + rcu_read_unlock(); 8492 + } 8518 8493 } 8519 8494 } 8520 8495 skip: ··· 8844 8813 unlock: 8845 8814 wake_up(&mddev->sb_wait); 8846 8815 mddev_unlock(mddev); 8816 + } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 8817 + /* Write superblock - thread that called mddev_suspend() 8818 + * holds reconfig_mutex for us. 8819 + */ 8820 + set_bit(MD_UPDATING_SB, &mddev->flags); 8821 + smp_mb__after_atomic(); 8822 + if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 8823 + md_update_sb(mddev, 0); 8824 + clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 8825 + wake_up(&mddev->sb_wait); 8847 8826 } 8848 8827 } 8849 8828 EXPORT_SYMBOL(md_check_recovery);
+10 -10
drivers/md/md.h
··· 237 237 */ 238 238 MD_HAS_PPL, /* The raid array has PPL feature set */ 239 239 MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */ 240 + MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update 241 + * the metadata without taking reconfig_mutex. 242 + */ 243 + MD_UPDATING_SB, /* md_check_recovery is updating the metadata 244 + * without explicitly holding reconfig_mutex. 245 + */ 240 246 }; 241 247 242 248 enum mddev_sb_flags { ··· 500 494 mutex_lock(&mddev->reconfig_mutex); 501 495 } 502 496 503 - static inline int mddev_is_locked(struct mddev *mddev) 504 - { 505 - return mutex_is_locked(&mddev->reconfig_mutex); 506 - } 507 - 508 497 static inline int mddev_trylock(struct mddev *mddev) 509 498 { 510 499 return mutex_trylock(&mddev->reconfig_mutex); ··· 539 538 int (*check_reshape) (struct mddev *mddev); 540 539 int (*start_reshape) (struct mddev *mddev); 541 540 void (*finish_reshape) (struct mddev *mddev); 542 - /* quiesce moves between quiescence states 543 - * 0 - fully active 544 - * 1 - no new requests allowed 545 - * others - reserved 541 + /* quiesce suspends or resumes internal processing. 542 + * 1 - stop new actions and wait for action io to complete 543 + * 0 - return to normal behaviour 546 544 */ 547 - void (*quiesce) (struct mddev *mddev, int state); 545 + void (*quiesce) (struct mddev *mddev, int quiesce); 548 546 /* takeover is used to transition an array from one 549 547 * personality to another. The new personality must be able 550 548 * to handle the data in the current layout.
+1 -3
drivers/md/multipath.c drivers/md/md-multipath.c
··· 25 25 #include <linux/seq_file.h> 26 26 #include <linux/slab.h> 27 27 #include "md.h" 28 - #include "multipath.h" 28 + #include "md-multipath.h" 29 29 30 30 #define MAX_WORK_PER_DISK 128 31 31 ··· 243 243 static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) 244 244 { 245 245 struct mpconf *conf = mddev->private; 246 - struct request_queue *q; 247 246 int err = -EEXIST; 248 247 int path; 249 248 struct multipath_info *p; ··· 256 257 257 258 for (path = first; path <= last; path++) 258 259 if ((p=conf->multipaths+path)->rdev == NULL) { 259 - q = rdev->bdev->bd_disk->queue; 260 260 disk_stack_limits(mddev->gendisk, rdev->bdev, 261 261 rdev->data_offset << 9); 262 262
drivers/md/multipath.h drivers/md/md-multipath.h
+1 -1
drivers/md/raid0.c
··· 768 768 return ERR_PTR(-EINVAL); 769 769 } 770 770 771 - static void raid0_quiesce(struct mddev *mddev, int state) 771 + static void raid0_quiesce(struct mddev *mddev, int quiesce) 772 772 { 773 773 } 774 774
+27 -61
drivers/md/raid1.c
··· 37 37 #include <linux/module.h> 38 38 #include <linux/seq_file.h> 39 39 #include <linux/ratelimit.h> 40 - #include <linux/sched/signal.h> 41 40 42 41 #include <trace/events/block.h> 43 42 44 43 #include "md.h" 45 44 #include "raid1.h" 46 - #include "bitmap.h" 45 + #include "md-bitmap.h" 47 46 48 47 #define UNSUPPORTED_MDDEV_FLAGS \ 49 48 ((1L << MD_HAS_JOURNAL) | \ ··· 989 990 _wait_barrier(conf, idx); 990 991 } 991 992 992 - static void wait_all_barriers(struct r1conf *conf) 993 - { 994 - int idx; 995 - 996 - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 997 - _wait_barrier(conf, idx); 998 - } 999 - 1000 993 static void _allow_barrier(struct r1conf *conf, int idx) 1001 994 { 1002 995 atomic_dec(&conf->nr_pending[idx]); ··· 1000 1009 int idx = sector_to_idx(sector_nr); 1001 1010 1002 1011 _allow_barrier(conf, idx); 1003 - } 1004 - 1005 - static void allow_all_barriers(struct r1conf *conf) 1006 - { 1007 - int idx; 1008 - 1009 - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1010 - _allow_barrier(conf, idx); 1011 1012 } 1012 1013 1013 1014 /* conf->resync_lock should be held */ ··· 1286 1303 int first_clone; 1287 1304 int max_sectors; 1288 1305 1306 + if (mddev_is_clustered(mddev) && 1307 + md_cluster_ops->area_resyncing(mddev, WRITE, 1308 + bio->bi_iter.bi_sector, bio_end_sector(bio))) { 1309 + 1310 + DEFINE_WAIT(w); 1311 + for (;;) { 1312 + prepare_to_wait(&conf->wait_barrier, 1313 + &w, TASK_IDLE); 1314 + if (!md_cluster_ops->area_resyncing(mddev, WRITE, 1315 + bio->bi_iter.bi_sector, 1316 + bio_end_sector(bio))) 1317 + break; 1318 + schedule(); 1319 + } 1320 + finish_wait(&conf->wait_barrier, &w); 1321 + } 1322 + 1289 1323 /* 1290 1324 * Register the new request and wait if the reconstruction 1291 1325 * thread has put up a bar for new requests. 1292 1326 * Continue immediately if no resync is active currently. 1293 1327 */ 1294 - 1295 - 1296 - if ((bio_end_sector(bio) > mddev->suspend_lo && 1297 - bio->bi_iter.bi_sector < mddev->suspend_hi) || 1298 - (mddev_is_clustered(mddev) && 1299 - md_cluster_ops->area_resyncing(mddev, WRITE, 1300 - bio->bi_iter.bi_sector, bio_end_sector(bio)))) { 1301 - 1302 - /* 1303 - * As the suspend_* range is controlled by userspace, we want 1304 - * an interruptible wait. 1305 - */ 1306 - DEFINE_WAIT(w); 1307 - for (;;) { 1308 - sigset_t full, old; 1309 - prepare_to_wait(&conf->wait_barrier, 1310 - &w, TASK_INTERRUPTIBLE); 1311 - if (bio_end_sector(bio) <= mddev->suspend_lo || 1312 - bio->bi_iter.bi_sector >= mddev->suspend_hi || 1313 - (mddev_is_clustered(mddev) && 1314 - !md_cluster_ops->area_resyncing(mddev, WRITE, 1315 - bio->bi_iter.bi_sector, 1316 - bio_end_sector(bio)))) 1317 - break; 1318 - sigfillset(&full); 1319 - sigprocmask(SIG_BLOCK, &full, &old); 1320 - schedule(); 1321 - sigprocmask(SIG_SETMASK, &old, NULL); 1322 - } 1323 - finish_wait(&conf->wait_barrier, &w); 1324 - } 1325 1328 wait_barrier(conf, bio->bi_iter.bi_sector); 1326 1329 1327 1330 r1_bio = alloc_r1bio(mddev, bio); ··· 1623 1654 1624 1655 static void close_sync(struct r1conf *conf) 1625 1656 { 1626 - wait_all_barriers(conf); 1627 - allow_all_barriers(conf); 1657 + int idx; 1658 + 1659 + for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { 1660 + _wait_barrier(conf, idx); 1661 + _allow_barrier(conf, idx); 1662 + } 1628 1663 1629 1664 mempool_destroy(conf->r1buf_pool); 1630 1665 conf->r1buf_pool = NULL; ··· 3250 3277 return 0; 3251 3278 } 3252 3279 3253 - static void raid1_quiesce(struct mddev *mddev, int state) 3280 + static void raid1_quiesce(struct mddev *mddev, int quiesce) 3254 3281 { 3255 3282 struct r1conf *conf = mddev->private; 3256 3283 3257 - switch(state) { 3258 - case 2: /* wake for suspend */ 3259 - wake_up(&conf->wait_barrier); 3260 - break; 3261 - case 1: 3284 + if (quiesce) 3262 3285 freeze_array(conf, 0); 3263 - break; 3264 - case 0: 3286 + else 3265 3287 unfreeze_array(conf); 3266 - break; 3267 - } 3268 3288 } 3269 3289 3270 3290 static void *raid1_takeover(struct mddev *mddev)
+148 -21
drivers/md/raid10.c
··· 29 29 #include "md.h" 30 30 #include "raid10.h" 31 31 #include "raid0.h" 32 - #include "bitmap.h" 32 + #include "md-bitmap.h" 33 33 34 34 /* 35 35 * RAID10 provides a combination of RAID0 and RAID1 functionality. ··· 136 136 kfree(r10_bio); 137 137 } 138 138 139 + #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 139 140 /* amount of memory to reserve for resync requests */ 140 141 #define RESYNC_WINDOW (1024*1024) 141 142 /* maximum number of concurrent requests, memory permitting */ 142 143 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 144 + #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 145 + #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 143 146 144 147 /* 145 148 * When performing a resync, we need to read and compare, so ··· 386 383 { 387 384 int uptodate = !bio->bi_status; 388 385 struct r10bio *r10_bio = bio->bi_private; 389 - int slot, dev; 386 + int slot; 390 387 struct md_rdev *rdev; 391 388 struct r10conf *conf = r10_bio->mddev->private; 392 389 393 390 slot = r10_bio->read_slot; 394 - dev = r10_bio->devs[slot].devnum; 395 391 rdev = r10_bio->devs[slot].rdev; 396 392 /* 397 393 * this branch is our 'one mirror IO has finished' event handler: ··· 750 748 751 749 raid10_find_phys(conf, r10_bio); 752 750 rcu_read_lock(); 753 - sectors = r10_bio->sectors; 754 751 best_slot = -1; 755 752 best_rdev = NULL; 756 753 best_dist = MaxSector; ··· 762 761 * the resync window. We take the first readable disk when 763 762 * above the resync window. 764 763 */ 765 - if (conf->mddev->recovery_cp < MaxSector 766 - && (this_sector + sectors >= conf->next_resync)) 764 + if ((conf->mddev->recovery_cp < MaxSector 765 + && (this_sector + sectors >= conf->next_resync)) || 766 + (mddev_is_clustered(conf->mddev) && 767 + md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 768 + this_sector + sectors))) 767 769 do_balance = 0; 768 770 769 771 for (slot = 0; slot < conf->copies ; slot++) { ··· 1296 1292 struct md_rdev *blocked_rdev; 1297 1293 sector_t sectors; 1298 1294 int max_sectors; 1295 + 1296 + if ((mddev_is_clustered(mddev) && 1297 + md_cluster_ops->area_resyncing(mddev, WRITE, 1298 + bio->bi_iter.bi_sector, 1299 + bio_end_sector(bio)))) { 1300 + DEFINE_WAIT(w); 1301 + for (;;) { 1302 + prepare_to_wait(&conf->wait_barrier, 1303 + &w, TASK_IDLE); 1304 + if (!md_cluster_ops->area_resyncing(mddev, WRITE, 1305 + bio->bi_iter.bi_sector, bio_end_sector(bio))) 1306 + break; 1307 + schedule(); 1308 + } 1309 + finish_wait(&conf->wait_barrier, &w); 1310 + } 1299 1311 1300 1312 /* 1301 1313 * Register the new request and wait if the reconstruction ··· 2595 2575 struct bio *bio; 2596 2576 struct r10conf *conf = mddev->private; 2597 2577 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2598 - sector_t bio_last_sector; 2599 2578 2600 2579 /* we got a read error. Maybe the drive is bad. Maybe just 2601 2580 * the block and we can fix it. ··· 2605 2586 * frozen. 2606 2587 */ 2607 2588 bio = r10_bio->devs[slot].bio; 2608 - bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; 2609 2589 bio_put(bio); 2610 2590 r10_bio->devs[slot].bio = NULL; 2611 2591 ··· 2844 2826 } 2845 2827 2846 2828 /* 2829 + * Set cluster_sync_high since we need other nodes to add the 2830 + * range [cluster_sync_low, cluster_sync_high] to suspend list. 2831 + */ 2832 + static void raid10_set_cluster_sync_high(struct r10conf *conf) 2833 + { 2834 + sector_t window_size; 2835 + int extra_chunk, chunks; 2836 + 2837 + /* 2838 + * First, here we define "stripe" as a unit which across 2839 + * all member devices one time, so we get chunks by use 2840 + * raid_disks / near_copies. Otherwise, if near_copies is 2841 + * close to raid_disks, then resync window could increases 2842 + * linearly with the increase of raid_disks, which means 2843 + * we will suspend a really large IO window while it is not 2844 + * necessary. If raid_disks is not divisible by near_copies, 2845 + * an extra chunk is needed to ensure the whole "stripe" is 2846 + * covered. 2847 + */ 2848 + 2849 + chunks = conf->geo.raid_disks / conf->geo.near_copies; 2850 + if (conf->geo.raid_disks % conf->geo.near_copies == 0) 2851 + extra_chunk = 0; 2852 + else 2853 + extra_chunk = 1; 2854 + window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; 2855 + 2856 + /* 2857 + * At least use a 32M window to align with raid1's resync window 2858 + */ 2859 + window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ? 2860 + CLUSTER_RESYNC_WINDOW_SECTORS : window_size; 2861 + 2862 + conf->cluster_sync_high = conf->cluster_sync_low + window_size; 2863 + } 2864 + 2865 + /* 2847 2866 * perform a "sync" on one "block" 2848 2867 * 2849 2868 * We need to make sure that no normal I/O request - particularly write ··· 2952 2897 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2953 2898 max_sector = mddev->resync_max_sectors; 2954 2899 if (sector_nr >= max_sector) { 2900 + conf->cluster_sync_low = 0; 2901 + conf->cluster_sync_high = 0; 2902 + 2955 2903 /* If we aborted, we need to abort the 2956 2904 * sync on the 'current' bitmap chucks (there can 2957 2905 * be several when recovering multiple devices). ··· 3309 3251 /* resync. Schedule a read for every block at this virt offset */ 3310 3252 int count = 0; 3311 3253 3312 - bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); 3254 + /* 3255 + * Since curr_resync_completed could probably not update in 3256 + * time, and we will set cluster_sync_low based on it. 3257 + * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for 3258 + * safety reason, which ensures curr_resync_completed is 3259 + * updated in bitmap_cond_end_sync. 3260 + */ 3261 + bitmap_cond_end_sync(mddev->bitmap, sector_nr, 3262 + mddev_is_clustered(mddev) && 3263 + (sector_nr + 2 * RESYNC_SECTORS > 3264 + conf->cluster_sync_high)); 3313 3265 3314 3266 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3315 3267 &sync_blocks, mddev->degraded) && ··· 3452 3384 sector_nr += len>>9; 3453 3385 } while (++page_idx < RESYNC_PAGES); 3454 3386 r10_bio->sectors = nr_sectors; 3387 + 3388 + if (mddev_is_clustered(mddev) && 3389 + test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3390 + /* It is resync not recovery */ 3391 + if (conf->cluster_sync_high < sector_nr + nr_sectors) { 3392 + conf->cluster_sync_low = mddev->curr_resync_completed; 3393 + raid10_set_cluster_sync_high(conf); 3394 + /* Send resync message */ 3395 + md_cluster_ops->resync_info_update(mddev, 3396 + conf->cluster_sync_low, 3397 + conf->cluster_sync_high); 3398 + } 3399 + } else if (mddev_is_clustered(mddev)) { 3400 + /* This is recovery not resync */ 3401 + sector_t sect_va1, sect_va2; 3402 + bool broadcast_msg = false; 3403 + 3404 + for (i = 0; i < conf->geo.raid_disks; i++) { 3405 + /* 3406 + * sector_nr is a device address for recovery, so we 3407 + * need translate it to array address before compare 3408 + * with cluster_sync_high. 3409 + */ 3410 + sect_va1 = raid10_find_virt(conf, sector_nr, i); 3411 + 3412 + if (conf->cluster_sync_high < sect_va1 + nr_sectors) { 3413 + broadcast_msg = true; 3414 + /* 3415 + * curr_resync_completed is similar as 3416 + * sector_nr, so make the translation too. 3417 + */ 3418 + sect_va2 = raid10_find_virt(conf, 3419 + mddev->curr_resync_completed, i); 3420 + 3421 + if (conf->cluster_sync_low == 0 || 3422 + conf->cluster_sync_low > sect_va2) 3423 + conf->cluster_sync_low = sect_va2; 3424 + } 3425 + } 3426 + if (broadcast_msg) { 3427 + raid10_set_cluster_sync_high(conf); 3428 + md_cluster_ops->resync_info_update(mddev, 3429 + conf->cluster_sync_low, 3430 + conf->cluster_sync_high); 3431 + } 3432 + } 3455 3433 3456 3434 while (biolist) { 3457 3435 bio = biolist; ··· 3758 3644 if (!conf) 3759 3645 goto out; 3760 3646 3647 + if (mddev_is_clustered(conf->mddev)) { 3648 + int fc, fo; 3649 + 3650 + fc = (mddev->layout >> 8) & 255; 3651 + fo = mddev->layout & (1<<16); 3652 + if (fc > 1 || fo > 0) { 3653 + pr_err("only near layout is supported by clustered" 3654 + " raid10\n"); 3655 + goto out; 3656 + } 3657 + } 3658 + 3761 3659 mddev->thread = conf->thread; 3762 3660 conf->thread = NULL; 3763 3661 ··· 3958 3832 kfree(conf); 3959 3833 } 3960 3834 3961 - static void raid10_quiesce(struct mddev *mddev, int state) 3835 + static void raid10_quiesce(struct mddev *mddev, int quiesce) 3962 3836 { 3963 3837 struct r10conf *conf = mddev->private; 3964 3838 3965 - switch(state) { 3966 - case 1: 3839 + if (quiesce) 3967 3840 raise_barrier(conf, 0); 3968 - break; 3969 - case 0: 3841 + else 3970 3842 lower_barrier(conf); 3971 - break; 3972 - } 3973 3843 } 3974 3844 3975 3845 static int raid10_resize(struct mddev *mddev, sector_t sectors) ··· 4700 4578 /* Use sync reads to get the blocks from somewhere else */ 4701 4579 int sectors = r10_bio->sectors; 4702 4580 struct r10conf *conf = mddev->private; 4703 - struct { 4704 - struct r10bio r10_bio; 4705 - struct r10dev devs[conf->copies]; 4706 - } on_stack; 4707 - struct r10bio *r10b = &on_stack.r10_bio; 4581 + struct r10bio *r10b; 4708 4582 int slot = 0; 4709 4583 int idx = 0; 4710 4584 struct page **pages; 4585 + 4586 + r10b = kmalloc(sizeof(*r10b) + 4587 + sizeof(struct r10dev) * conf->copies, GFP_NOIO); 4588 + if (!r10b) { 4589 + set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4590 + return -ENOMEM; 4591 + } 4711 4592 4712 4593 /* reshape IOs share pages from .devs[0].bio */ 4713 4594 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; ··· 4760 4635 /* couldn't read this block, must give up */ 4761 4636 set_bit(MD_RECOVERY_INTR, 4762 4637 &mddev->recovery); 4638 + kfree(r10b); 4763 4639 return -EIO; 4764 4640 } 4765 4641 sectors -= s; 4766 4642 idx++; 4767 4643 } 4644 + kfree(r10b); 4768 4645 return 0; 4769 4646 } 4770 4647
+6
drivers/md/raid10.h
··· 89 89 * the new thread here until we fully activate the array. 90 90 */ 91 91 struct md_thread *thread; 92 + 93 + /* 94 + * Keep track of cluster resync window to send to other nodes. 95 + */ 96 + sector_t cluster_sync_low; 97 + sector_t cluster_sync_high; 92 98 }; 93 99 94 100 /*
+26 -18
drivers/md/raid5-cache.c
··· 23 23 #include <linux/types.h> 24 24 #include "md.h" 25 25 #include "raid5.h" 26 - #include "bitmap.h" 26 + #include "md-bitmap.h" 27 27 #include "raid5-log.h" 28 28 29 29 /* ··· 539 539 { 540 540 struct r5l_io_unit *io, *next; 541 541 542 - assert_spin_locked(&log->io_list_lock); 542 + lockdep_assert_held(&log->io_list_lock); 543 543 544 544 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 545 545 /* don't change list order */ ··· 555 555 { 556 556 struct r5l_io_unit *io, *next; 557 557 558 - assert_spin_locked(&log->io_list_lock); 558 + lockdep_assert_held(&log->io_list_lock); 559 559 560 560 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 561 561 /* don't change list order */ ··· 693 693 struct r5l_log *log = container_of(work, struct r5l_log, 694 694 disable_writeback_work); 695 695 struct mddev *mddev = log->rdev->mddev; 696 + struct r5conf *conf = mddev->private; 697 + int locked = 0; 696 698 697 699 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 698 700 return; ··· 703 701 704 702 /* wait superblock change before suspend */ 705 703 wait_event(mddev->sb_wait, 706 - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 707 - 708 - mddev_suspend(mddev); 709 - log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 710 - mddev_resume(mddev); 704 + conf->log == NULL || 705 + (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && 706 + (locked = mddev_trylock(mddev)))); 707 + if (locked) { 708 + mddev_suspend(mddev); 709 + log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 710 + mddev_resume(mddev); 711 + mddev_unlock(mddev); 712 + } 711 713 } 712 714 713 715 static void r5l_submit_current_io(struct r5l_log *log) ··· 1200 1194 { 1201 1195 struct stripe_head *sh; 1202 1196 1203 - assert_spin_locked(&log->io_list_lock); 1197 + lockdep_assert_held(&log->io_list_lock); 1204 1198 1205 1199 if (!list_empty(&log->no_mem_stripes)) { 1206 1200 sh = list_first_entry(&log->no_mem_stripes, ··· 1216 1210 struct r5l_io_unit *io, *next; 1217 1211 bool found = false; 1218 1212 1219 - assert_spin_locked(&log->io_list_lock); 1213 + lockdep_assert_held(&log->io_list_lock); 1220 1214 1221 1215 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { 1222 1216 /* don't change list order */ ··· 1388 1382 * raid5_release_stripe() while holding conf->device_lock 1389 1383 */ 1390 1384 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); 1391 - assert_spin_locked(&conf->device_lock); 1385 + lockdep_assert_held(&conf->device_lock); 1392 1386 1393 1387 list_del_init(&sh->lru); 1394 1388 atomic_inc(&sh->count); ··· 1415 1409 int count; 1416 1410 struct stripe_head *sh, *next; 1417 1411 1418 - assert_spin_locked(&conf->device_lock); 1412 + lockdep_assert_held(&conf->device_lock); 1419 1413 if (!conf->log) 1420 1414 return; 1421 1415 ··· 1589 1583 md_wakeup_thread(log->reclaim_thread); 1590 1584 } 1591 1585 1592 - void r5l_quiesce(struct r5l_log *log, int state) 1586 + void r5l_quiesce(struct r5l_log *log, int quiesce) 1593 1587 { 1594 1588 struct mddev *mddev; 1595 - if (!log || state == 2) 1589 + if (!log) 1596 1590 return; 1597 - if (state == 0) 1598 - kthread_unpark(log->reclaim_thread->tsk); 1599 - else if (state == 1) { 1591 + 1592 + if (quiesce) { 1600 1593 /* make sure r5l_write_super_and_discard_space exits */ 1601 1594 mddev = log->rdev->mddev; 1602 1595 wake_up(&mddev->sb_wait); 1603 1596 kthread_park(log->reclaim_thread->tsk); 1604 1597 r5l_wake_reclaim(log, MaxSector); 1605 1598 r5l_do_reclaim(log); 1606 - } 1599 + } else 1600 + kthread_unpark(log->reclaim_thread->tsk); 1607 1601 } 1608 1602 1609 1603 bool r5l_log_disk_error(struct r5conf *conf) ··· 3171 3165 conf->log = NULL; 3172 3166 synchronize_rcu(); 3173 3167 3168 + /* Ensure disable_writeback_work wakes up and exits */ 3169 + wake_up(&conf->mddev->sb_wait); 3174 3170 flush_work(&log->disable_writeback_work); 3175 3171 md_unregister_thread(&log->reclaim_thread); 3176 3172 mempool_destroy(log->meta_pool);
+1 -1
drivers/md/raid5-log.h
··· 9 9 extern void r5l_flush_stripe_to_raid(struct r5l_log *log); 10 10 extern void r5l_stripe_write_finished(struct stripe_head *sh); 11 11 extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); 12 - extern void r5l_quiesce(struct r5l_log *log, int state); 12 + extern void r5l_quiesce(struct r5l_log *log, int quiesce); 13 13 extern bool r5l_log_disk_error(struct r5conf *conf); 14 14 extern bool r5c_is_writeback(struct r5l_log *log); 15 15 extern int
+3 -3
drivers/md/raid5-ppl.c
··· 758 758 (unsigned long long)sector); 759 759 760 760 rdev = conf->disks[dd_idx].rdev; 761 - if (!rdev) { 761 + if (!rdev || (!test_bit(In_sync, &rdev->flags) && 762 + sector >= rdev->recovery_offset)) { 762 763 pr_debug("%s:%*s data member disk %d missing\n", 763 764 __func__, indent, "", dd_idx); 764 765 update_parity = false; ··· 1297 1296 1298 1297 if (ret) { 1299 1298 goto err; 1300 - } else if (!mddev->pers && 1301 - mddev->recovery_cp == 0 && !mddev->degraded && 1299 + } else if (!mddev->pers && mddev->recovery_cp == 0 && 1302 1300 ppl_conf->recovered_entries > 0 && 1303 1301 ppl_conf->mismatch_count == 0) { 1304 1302 /*
+42 -37
drivers/md/raid5.c
··· 55 55 #include <linux/ratelimit.h> 56 56 #include <linux/nodemask.h> 57 57 #include <linux/flex_array.h> 58 - #include <linux/sched/signal.h> 59 58 60 59 #include <trace/events/block.h> 61 60 #include <linux/list_sort.h> ··· 62 63 #include "md.h" 63 64 #include "raid5.h" 64 65 #include "raid0.h" 65 - #include "bitmap.h" 66 + #include "md-bitmap.h" 66 67 #include "raid5-log.h" 67 68 68 69 #define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) ··· 1817 1818 struct r5dev *dev = &sh->dev[i]; 1818 1819 1819 1820 if (dev->written || i == pd_idx || i == qd_idx) { 1820 - if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) 1821 + if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) { 1821 1822 set_bit(R5_UPTODATE, &dev->flags); 1823 + if (test_bit(STRIPE_EXPAND_READY, &sh->state)) 1824 + set_bit(R5_Expanded, &dev->flags); 1825 + } 1822 1826 if (fua) 1823 1827 set_bit(R5_WantFUA, &dev->flags); 1824 1828 if (sync) ··· 5684 5682 goto retry; 5685 5683 } 5686 5684 5687 - if (rw == WRITE && 5688 - logical_sector >= mddev->suspend_lo && 5689 - logical_sector < mddev->suspend_hi) { 5690 - raid5_release_stripe(sh); 5691 - /* As the suspend_* range is controlled by 5692 - * userspace, we want an interruptible 5693 - * wait. 5694 - */ 5695 - prepare_to_wait(&conf->wait_for_overlap, 5696 - &w, TASK_INTERRUPTIBLE); 5697 - if (logical_sector >= mddev->suspend_lo && 5698 - logical_sector < mddev->suspend_hi) { 5699 - sigset_t full, old; 5700 - sigfillset(&full); 5701 - sigprocmask(SIG_BLOCK, &full, &old); 5702 - schedule(); 5703 - sigprocmask(SIG_SETMASK, &old, NULL); 5704 - do_prepare = true; 5705 - } 5706 - goto retry; 5707 - } 5708 - 5709 5685 if (test_bit(STRIPE_EXPANDING, &sh->state) || 5710 5686 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { 5711 5687 /* Stripe is busy expanding or ··· 5738 5758 */ 5739 5759 struct r5conf *conf = mddev->private; 5740 5760 struct stripe_head *sh; 5761 + struct md_rdev *rdev; 5741 5762 sector_t first_sector, last_sector; 5742 5763 int raid_disks = conf->previous_raid_disks; 5743 5764 int data_disks = raid_disks - conf->max_degraded; ··· 5861 5880 return 0; 5862 5881 mddev->reshape_position = conf->reshape_progress; 5863 5882 mddev->curr_resync_completed = sector_nr; 5883 + if (!mddev->reshape_backwards) 5884 + /* Can update recovery_offset */ 5885 + rdev_for_each(rdev, mddev) 5886 + if (rdev->raid_disk >= 0 && 5887 + !test_bit(Journal, &rdev->flags) && 5888 + !test_bit(In_sync, &rdev->flags) && 5889 + rdev->recovery_offset < sector_nr) 5890 + rdev->recovery_offset = sector_nr; 5891 + 5864 5892 conf->reshape_checkpoint = jiffies; 5865 5893 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5866 5894 md_wakeup_thread(mddev->thread); ··· 5968 5978 goto ret; 5969 5979 mddev->reshape_position = conf->reshape_progress; 5970 5980 mddev->curr_resync_completed = sector_nr; 5981 + if (!mddev->reshape_backwards) 5982 + /* Can update recovery_offset */ 5983 + rdev_for_each(rdev, mddev) 5984 + if (rdev->raid_disk >= 0 && 5985 + !test_bit(Journal, &rdev->flags) && 5986 + !test_bit(In_sync, &rdev->flags) && 5987 + rdev->recovery_offset < sector_nr) 5988 + rdev->recovery_offset = sector_nr; 5971 5989 conf->reshape_checkpoint = jiffies; 5972 5990 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5973 5991 md_wakeup_thread(mddev->thread); ··· 7154 7156 min_offset_diff = diff; 7155 7157 } 7156 7158 7159 + if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && 7160 + (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { 7161 + pr_notice("md/raid:%s: array cannot have both journal and bitmap\n", 7162 + mdname(mddev)); 7163 + return -EINVAL; 7164 + } 7165 + 7157 7166 if (mddev->reshape_position != MaxSector) { 7158 7167 /* Check that we can continue the reshape. 7159 7168 * Difficulties arise if the stripe we would write to ··· 7963 7958 { 7964 7959 7965 7960 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 7961 + struct md_rdev *rdev; 7966 7962 7967 7963 spin_lock_irq(&conf->device_lock); 7968 7964 conf->previous_raid_disks = conf->raid_disks; ··· 7971 7965 smp_wmb(); 7972 7966 conf->reshape_progress = MaxSector; 7973 7967 conf->mddev->reshape_position = MaxSector; 7968 + rdev_for_each(rdev, conf->mddev) 7969 + if (rdev->raid_disk >= 0 && 7970 + !test_bit(Journal, &rdev->flags) && 7971 + !test_bit(In_sync, &rdev->flags)) 7972 + rdev->recovery_offset = MaxSector; 7974 7973 spin_unlock_irq(&conf->device_lock); 7975 7974 wake_up(&conf->wait_for_overlap); 7976 7975 ··· 8031 8020 } 8032 8021 } 8033 8022 8034 - static void raid5_quiesce(struct mddev *mddev, int state) 8023 + static void raid5_quiesce(struct mddev *mddev, int quiesce) 8035 8024 { 8036 8025 struct r5conf *conf = mddev->private; 8037 8026 8038 - switch(state) { 8039 - case 2: /* resume for a suspend */ 8040 - wake_up(&conf->wait_for_overlap); 8041 - break; 8042 - 8043 - case 1: /* stop all writes */ 8027 + if (quiesce) { 8028 + /* stop all writes */ 8044 8029 lock_all_device_hash_locks_irq(conf); 8045 8030 /* '2' tells resync/reshape to pause so that all 8046 8031 * active stripes can drain ··· 8052 8045 unlock_all_device_hash_locks_irq(conf); 8053 8046 /* allow reshape to continue */ 8054 8047 wake_up(&conf->wait_for_overlap); 8055 - break; 8056 - 8057 - case 0: /* re-enable writes */ 8048 + } else { 8049 + /* re-enable writes */ 8058 8050 lock_all_device_hash_locks_irq(conf); 8059 8051 conf->quiesce = 0; 8060 8052 wake_up(&conf->wait_for_quiescent); 8061 8053 wake_up(&conf->wait_for_overlap); 8062 8054 unlock_all_device_hash_locks_irq(conf); 8063 - break; 8064 8055 } 8065 - r5l_quiesce(conf->log, state); 8056 + r5l_quiesce(conf->log, quiesce); 8066 8057 } 8067 8058 8068 8059 static void *raid45_takeover_raid0(struct mddev *mddev, int level)