Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md: cleanup mddev_create/destroy_serial_pool()

Now that except for stopping the array, all the callers already suspend
the array, there is no need to suspend anymore, hence remove the second
parameter.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20231010151958.145896-15-yukuai1@huaweicloud.com

authored by

Yu Kuai and committed by
Song Liu
b4128c00 58226942

+17 -31
+4 -4
drivers/md/md-bitmap.c
··· 1861 1861 1862 1862 md_bitmap_wait_behind_writes(mddev); 1863 1863 if (!mddev->serialize_policy) 1864 - mddev_destroy_serial_pool(mddev, NULL, true); 1864 + mddev_destroy_serial_pool(mddev, NULL); 1865 1865 1866 1866 mutex_lock(&mddev->bitmap_info.mutex); 1867 1867 spin_lock(&mddev->lock); ··· 1977 1977 goto out; 1978 1978 1979 1979 rdev_for_each(rdev, mddev) 1980 - mddev_create_serial_pool(mddev, rdev, true); 1980 + mddev_create_serial_pool(mddev, rdev); 1981 1981 1982 1982 if (mddev_is_clustered(mddev)) 1983 1983 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); ··· 2562 2562 if (!backlog && mddev->serial_info_pool) { 2563 2563 /* serial_info_pool is not needed if backlog is zero */ 2564 2564 if (!mddev->serialize_policy) 2565 - mddev_destroy_serial_pool(mddev, NULL, true); 2565 + mddev_destroy_serial_pool(mddev, NULL); 2566 2566 } else if (backlog && !mddev->serial_info_pool) { 2567 2567 /* serial_info_pool is needed since backlog is not zero */ 2568 2568 rdev_for_each(rdev, mddev) 2569 - mddev_create_serial_pool(mddev, rdev, true); 2569 + mddev_create_serial_pool(mddev, rdev); 2570 2570 } 2571 2571 if (old_mwb != backlog) 2572 2572 md_bitmap_update_sb(mddev->bitmap);
+10 -23
drivers/md/md.c
··· 206 206 * 1. rdev is the first device which return true from rdev_enable_serial. 207 207 * 2. rdev is NULL, means we want to enable serialization for all rdevs. 208 208 */ 209 - void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 210 - bool is_suspend) 209 + void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev) 211 210 { 212 211 int ret = 0; 213 212 ··· 214 215 !test_bit(CollisionCheck, &rdev->flags)) 215 216 return; 216 217 217 - if (!is_suspend) 218 - mddev_suspend(mddev); 219 - 220 218 if (!rdev) 221 219 ret = rdevs_init_serial(mddev); 222 220 else 223 221 ret = rdev_init_serial(rdev); 224 222 if (ret) 225 - goto abort; 223 + return; 226 224 227 225 if (mddev->serial_info_pool == NULL) { 228 226 /* ··· 234 238 pr_err("can't alloc memory pool for serialization\n"); 235 239 } 236 240 } 237 - 238 - abort: 239 - if (!is_suspend) 240 - mddev_resume(mddev); 241 241 } 242 242 243 243 /* ··· 242 250 * 2. when bitmap is destroyed while policy is not enabled. 243 251 * 3. for disable policy, the pool is destroyed only when no rdev needs it. 244 252 */ 245 - void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 246 - bool is_suspend) 253 + void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev) 247 254 { 248 255 if (rdev && !test_bit(CollisionCheck, &rdev->flags)) 249 256 return; ··· 251 260 struct md_rdev *temp; 252 261 int num = 0; /* used to track if other rdevs need the pool */ 253 262 254 - if (!is_suspend) 255 - mddev_suspend(mddev); 256 263 rdev_for_each(temp, mddev) { 257 264 if (!rdev) { 258 265 if (!mddev->serialize_policy || ··· 272 283 mempool_destroy(mddev->serial_info_pool); 273 284 mddev->serial_info_pool = NULL; 274 285 } 275 - if (!is_suspend) 276 - mddev_resume(mddev); 277 286 } 278 287 } 279 288 ··· 2544 2557 pr_debug("md: bind<%s>\n", b); 2545 2558 2546 2559 if (mddev->raid_disks) 2547 - mddev_create_serial_pool(mddev, rdev, true); 2560 + mddev_create_serial_pool(mddev, rdev); 2548 2561 2549 2562 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2550 2563 goto fail; ··· 2597 2610 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2598 2611 list_del_rcu(&rdev->same_set); 2599 2612 pr_debug("md: unbind<%pg>\n", rdev->bdev); 2600 - mddev_destroy_serial_pool(rdev->mddev, rdev, false); 2613 + mddev_destroy_serial_pool(rdev->mddev, rdev); 2601 2614 rdev->mddev = NULL; 2602 2615 sysfs_remove_link(&rdev->kobj, "block"); 2603 2616 sysfs_put(rdev->sysfs_state); ··· 3064 3077 } 3065 3078 } else if (cmd_match(buf, "writemostly")) { 3066 3079 set_bit(WriteMostly, &rdev->flags); 3067 - mddev_create_serial_pool(rdev->mddev, rdev, true); 3080 + mddev_create_serial_pool(rdev->mddev, rdev); 3068 3081 need_update_sb = true; 3069 3082 err = 0; 3070 3083 } else if (cmd_match(buf, "-writemostly")) { 3071 - mddev_destroy_serial_pool(rdev->mddev, rdev, true); 3084 + mddev_destroy_serial_pool(rdev->mddev, rdev); 3072 3085 clear_bit(WriteMostly, &rdev->flags); 3073 3086 need_update_sb = true; 3074 3087 err = 0; ··· 5578 5591 } 5579 5592 5580 5593 if (value) 5581 - mddev_create_serial_pool(mddev, NULL, true); 5594 + mddev_create_serial_pool(mddev, NULL); 5582 5595 else 5583 - mddev_destroy_serial_pool(mddev, NULL, true); 5596 + mddev_destroy_serial_pool(mddev, NULL); 5584 5597 mddev->serialize_policy = value; 5585 5598 unlock: 5586 5599 mddev_unlock_and_resume(mddev); ··· 6346 6359 } 6347 6360 /* disable policy to guarantee rdevs free resources for serialization */ 6348 6361 mddev->serialize_policy = 0; 6349 - mddev_destroy_serial_pool(mddev, NULL, true); 6362 + mddev_destroy_serial_pool(mddev, NULL); 6350 6363 } 6351 6364 6352 6365 void md_stop_writes(struct mddev *mddev)
+3 -4
drivers/md/md.h
··· 817 817 818 818 extern void md_reload_sb(struct mddev *mddev, int raid_disk); 819 819 extern void md_update_sb(struct mddev *mddev, int force); 820 - extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 821 - bool is_suspend); 822 - extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 823 - bool is_suspend); 820 + extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev); 821 + extern void mddev_destroy_serial_pool(struct mddev *mddev, 822 + struct md_rdev *rdev); 824 823 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); 825 824 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev); 826 825