Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'block-6.17-20250822' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
"A set of fixes for block that should go into this tree. A bit larger
than what I usually have at this point in time, a lot of that is the
continued fixing of the lockdep annotation for queue freezing that we
recently added, which has highlighted a number of little issues here
and there. This contains:

- MD pull request via Yu:

- Add a legacy_async_del_gendisk mode, to prevent a user tools
regression. New user tools releases will not use such a mode,
the old release with a new kernel now will have warning about
deprecated behavior, and we prepare to remove this legacy mode
after about a year later

- The rename in kernel causing user tools build failure, revert
the rename in mdp_superblock_s

- Fix a regression that interrupted resync can be shown as
recover from mdstat or sysfs

- Improve file size detection for loop, particularly for networked
file systems, by using getattr to get the size rather than the
cached inode size.

- Hotplug CPU lock vs queue freeze fix

- Lockdep fix while updating the number of hardware queues

- Fix stacking for PI devices

- Silence bio_check_eod() for the known case of device removal where
the size is truncated to 0 sectors"

* tag 'block-6.17-20250822' of git://git.kernel.dk/linux:
block: avoid cpu_hotplug_lock depedency on freeze_lock
block: decrement block_rq_qos static key in rq_qos_del()
block: skip q->rq_qos check in rq_qos_done_bio()
blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues
block: tone down bio_check_eod
loop: use vfs_getattr_nosec for accurate file size
loop: Consolidate size calculation logic into lo_calculate_size()
block: remove newlines from the warnings in blk_validate_integrity_limits
block: handle pi_tuple_size in queue_limits_stack_integrity
selftests: ublk: Use ARRAY_SIZE() macro to improve code
md: fix sync_action incorrect display during resync
md: add helper rdev_needs_recovery()
md: keep recovery_cp in mdp_superblock_s
md: add legacy_async_del_gendisk mode

+169 -83
+1 -1
block/blk-core.c
··· 557 557 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 558 558 unsigned int nr_sectors = bio_sectors(bio); 559 559 560 - if (nr_sectors && 560 + if (nr_sectors && maxsector && 561 561 (nr_sectors > maxsector || 562 562 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 563 563 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
+1
block/blk-mq-debugfs.c
··· 95 95 QUEUE_FLAG_NAME(SQ_SCHED), 96 96 QUEUE_FLAG_NAME(DISABLE_WBT_DEF), 97 97 QUEUE_FLAG_NAME(NO_ELV_SWITCH), 98 + QUEUE_FLAG_NAME(QOS_ENABLED), 98 99 }; 99 100 #undef QUEUE_FLAG_NAME 100 101
+9 -4
block/blk-mq.c
··· 5033 5033 unsigned int memflags; 5034 5034 int i; 5035 5035 struct xarray elv_tbl, et_tbl; 5036 + bool queues_frozen = false; 5036 5037 5037 5038 lockdep_assert_held(&set->tag_list_lock); 5038 5039 ··· 5057 5056 blk_mq_sysfs_unregister_hctxs(q); 5058 5057 } 5059 5058 5060 - list_for_each_entry(q, &set->tag_list, tag_set_list) 5061 - blk_mq_freeze_queue_nomemsave(q); 5062 - 5063 5059 /* 5064 5060 * Switch IO scheduler to 'none', cleaning up the data associated 5065 5061 * with the previous scheduler. We will switch back once we are done ··· 5066 5068 if (blk_mq_elv_switch_none(q, &elv_tbl)) 5067 5069 goto switch_back; 5068 5070 5071 + list_for_each_entry(q, &set->tag_list, tag_set_list) 5072 + blk_mq_freeze_queue_nomemsave(q); 5073 + queues_frozen = true; 5069 5074 if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) 5070 5075 goto switch_back; 5071 5076 ··· 5092 5091 } 5093 5092 switch_back: 5094 5093 /* The blk_mq_elv_switch_back unfreezes queue for us. */ 5095 - list_for_each_entry(q, &set->tag_list, tag_set_list) 5094 + list_for_each_entry(q, &set->tag_list, tag_set_list) { 5095 + /* switch_back expects queue to be frozen */ 5096 + if (!queues_frozen) 5097 + blk_mq_freeze_queue_nomemsave(q); 5096 5098 blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl); 5099 + } 5097 5100 5098 5101 list_for_each_entry(q, &set->tag_list, tag_set_list) { 5099 5102 blk_mq_sysfs_register_hctxs(q);
+4 -4
block/blk-rq-qos.c
··· 2 2 3 3 #include "blk-rq-qos.h" 4 4 5 - __read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos); 6 - 7 5 /* 8 6 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded, 9 7 * false if 'v' + 1 would be bigger than 'below'. ··· 317 319 struct rq_qos *rqos = q->rq_qos; 318 320 q->rq_qos = rqos->next; 319 321 rqos->ops->exit(rqos); 320 - static_branch_dec(&block_rq_qos); 321 322 } 323 + blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q); 322 324 mutex_unlock(&q->rq_qos_mutex); 323 325 } 324 326 ··· 344 346 goto ebusy; 345 347 rqos->next = q->rq_qos; 346 348 q->rq_qos = rqos; 347 - static_branch_inc(&block_rq_qos); 349 + blk_queue_flag_set(QUEUE_FLAG_QOS_ENABLED, q); 348 350 349 351 blk_mq_unfreeze_queue(q, memflags); 350 352 ··· 375 377 break; 376 378 } 377 379 } 380 + if (!q->rq_qos) 381 + blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q); 378 382 blk_mq_unfreeze_queue(q, memflags); 379 383 380 384 mutex_lock(&q->debugfs_mutex);
+31 -17
block/blk-rq-qos.h
··· 12 12 #include "blk-mq-debugfs.h" 13 13 14 14 struct blk_mq_debugfs_attr; 15 - extern struct static_key_false block_rq_qos; 16 15 17 16 enum rq_qos_id { 18 17 RQ_QOS_WBT, ··· 112 113 113 114 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) 114 115 { 115 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) 116 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 117 + q->rq_qos) 116 118 __rq_qos_cleanup(q->rq_qos, bio); 117 119 } 118 120 119 121 static inline void rq_qos_done(struct request_queue *q, struct request *rq) 120 122 { 121 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos && 122 - !blk_rq_is_passthrough(rq)) 123 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 124 + q->rq_qos && !blk_rq_is_passthrough(rq)) 123 125 __rq_qos_done(q->rq_qos, rq); 124 126 } 125 127 126 128 static inline void rq_qos_issue(struct request_queue *q, struct request *rq) 127 129 { 128 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) 130 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 131 + q->rq_qos) 129 132 __rq_qos_issue(q->rq_qos, rq); 130 133 } 131 134 132 135 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) 133 136 { 134 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) 137 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 138 + q->rq_qos) 135 139 __rq_qos_requeue(q->rq_qos, rq); 136 140 } 137 141 138 142 static inline void rq_qos_done_bio(struct bio *bio) 139 143 { 140 - if (static_branch_unlikely(&block_rq_qos) && 141 - bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || 142 - bio_flagged(bio, BIO_QOS_MERGED))) { 143 - struct request_queue *q = bdev_get_queue(bio->bi_bdev); 144 - if (q->rq_qos) 145 - __rq_qos_done_bio(q->rq_qos, bio); 146 - } 144 + struct request_queue *q; 145 + 146 + if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) && 147 + !bio_flagged(bio, BIO_QOS_MERGED))) 148 + return; 149 + 150 + q = bdev_get_queue(bio->bi_bdev); 151 + 152 + /* 153 + * If a bio has BIO_QOS_xxx set, it implicitly implies that 154 + * q->rq_qos is present. So, we skip re-checking q->rq_qos 155 + * here as an extra optimization and directly call 156 + * __rq_qos_done_bio(). 157 + */ 158 + __rq_qos_done_bio(q->rq_qos, bio); 147 159 } 148 160 149 161 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) 150 162 { 151 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) { 163 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 164 + q->rq_qos) { 152 165 bio_set_flag(bio, BIO_QOS_THROTTLED); 153 166 __rq_qos_throttle(q->rq_qos, bio); 154 167 } ··· 169 158 static inline void rq_qos_track(struct request_queue *q, struct request *rq, 170 159 struct bio *bio) 171 160 { 172 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) 161 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 162 + q->rq_qos) 173 163 __rq_qos_track(q->rq_qos, rq, bio); 174 164 } 175 165 176 166 static inline void rq_qos_merge(struct request_queue *q, struct request *rq, 177 167 struct bio *bio) 178 168 { 179 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) { 169 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 170 + q->rq_qos) { 180 171 bio_set_flag(bio, BIO_QOS_MERGED); 181 172 __rq_qos_merge(q->rq_qos, rq, bio); 182 173 } ··· 186 173 187 174 static inline void rq_qos_queue_depth_changed(struct request_queue *q) 188 175 { 189 - if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) 176 + if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 177 + q->rq_qos) 190 178 __rq_qos_queue_depth_changed(q->rq_qos); 191 179 } 192 180
+6 -6
block/blk-settings.c
··· 157 157 switch (bi->csum_type) { 158 158 case BLK_INTEGRITY_CSUM_NONE: 159 159 if (bi->pi_tuple_size) { 160 - pr_warn("pi_tuple_size must be 0 when checksum type \ 161 - is none\n"); 160 + pr_warn("pi_tuple_size must be 0 when checksum type is none\n"); 162 161 return -EINVAL; 163 162 } 164 163 break; 165 164 case BLK_INTEGRITY_CSUM_CRC: 166 165 case BLK_INTEGRITY_CSUM_IP: 167 166 if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) { 168 - pr_warn("pi_tuple_size mismatch for T10 PI: expected \ 169 - %zu, got %u\n", 167 + pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n", 170 168 sizeof(struct t10_pi_tuple), 171 169 bi->pi_tuple_size); 172 170 return -EINVAL; ··· 172 174 break; 173 175 case BLK_INTEGRITY_CSUM_CRC64: 174 176 if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) { 175 - pr_warn("pi_tuple_size mismatch for CRC64 PI: \ 176 - expected %zu, got %u\n", 177 + pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n", 177 178 sizeof(struct crc64_pi_tuple), 178 179 bi->pi_tuple_size); 179 180 return -EINVAL; ··· 969 972 goto incompatible; 970 973 if (ti->csum_type != bi->csum_type) 971 974 goto incompatible; 975 + if (ti->pi_tuple_size != bi->pi_tuple_size) 976 + goto incompatible; 972 977 if ((ti->flags & BLK_INTEGRITY_REF_TAG) != 973 978 (bi->flags & BLK_INTEGRITY_REF_TAG)) 974 979 goto incompatible; ··· 979 980 ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) | 980 981 (bi->flags & BLK_INTEGRITY_REF_TAG); 981 982 ti->csum_type = bi->csum_type; 983 + ti->pi_tuple_size = bi->pi_tuple_size; 982 984 ti->metadata_size = bi->metadata_size; 983 985 ti->pi_offset = bi->pi_offset; 984 986 ti->interval_exp = bi->interval_exp;
+21 -18
drivers/block/loop.c
··· 137 137 static int max_part; 138 138 static int part_shift; 139 139 140 - static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) 140 + static loff_t lo_calculate_size(struct loop_device *lo, struct file *file) 141 141 { 142 + struct kstat stat; 142 143 loff_t loopsize; 144 + int ret; 143 145 144 - /* Compute loopsize in bytes */ 145 - loopsize = i_size_read(file->f_mapping->host); 146 - if (offset > 0) 147 - loopsize -= offset; 146 + /* 147 + * Get the accurate file size. This provides better results than 148 + * cached inode data, particularly for network filesystems where 149 + * metadata may be stale. 150 + */ 151 + ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0); 152 + if (ret) 153 + return 0; 154 + 155 + loopsize = stat.size; 156 + if (lo->lo_offset > 0) 157 + loopsize -= lo->lo_offset; 148 158 /* offset is beyond i_size, weird but possible */ 149 159 if (loopsize < 0) 150 160 return 0; 151 - 152 - if (sizelimit > 0 && sizelimit < loopsize) 153 - loopsize = sizelimit; 161 + if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) 162 + loopsize = lo->lo_sizelimit; 154 163 /* 155 164 * Unfortunately, if we want to do I/O on the device, 156 165 * the number of 512-byte sectors has to fit into a sector_t. 157 166 */ 158 167 return loopsize >> 9; 159 - } 160 - 161 - static loff_t get_loop_size(struct loop_device *lo, struct file *file) 162 - { 163 - return get_size(lo->lo_offset, lo->lo_sizelimit, file); 164 168 } 165 169 166 170 /* ··· 573 569 error = -EINVAL; 574 570 575 571 /* size of the new backing store needs to be the same */ 576 - if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 572 + if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file)) 577 573 goto out_err; 578 574 579 575 /* ··· 1067 1063 loop_update_dio(lo); 1068 1064 loop_sysfs_init(lo); 1069 1065 1070 - size = get_loop_size(lo, file); 1066 + size = lo_calculate_size(lo, file); 1071 1067 loop_set_size(lo, size); 1072 1068 1073 1069 /* Order wrt reading lo_state in loop_validate_file(). */ ··· 1259 1255 if (partscan) 1260 1256 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); 1261 1257 if (!err && size_changed) { 1262 - loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, 1263 - lo->lo_backing_file); 1258 + loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file); 1264 1259 loop_set_size(lo, new_size); 1265 1260 } 1266 1261 out_unlock: ··· 1402 1399 if (unlikely(lo->lo_state != Lo_bound)) 1403 1400 return -ENXIO; 1404 1401 1405 - size = get_loop_size(lo, lo->lo_backing_file); 1402 + size = lo_calculate_size(lo, lo->lo_backing_file); 1406 1403 loop_set_size(lo, size); 1407 1404 1408 1405 return 0;
+92 -30
drivers/md/md.c
··· 339 339 * so all the races disappear. 340 340 */ 341 341 static bool create_on_open = true; 342 + static bool legacy_async_del_gendisk = true; 342 343 343 344 /* 344 345 * We have a system wide 'event count' that is incremented ··· 878 877 export_rdev(rdev, mddev); 879 878 } 880 879 881 - /* Call del_gendisk after release reconfig_mutex to avoid 882 - * deadlock (e.g. call del_gendisk under the lock and an 883 - * access to sysfs files waits the lock) 884 - * And MD_DELETED is only used for md raid which is set in 885 - * do_md_stop. dm raid only uses md_stop to stop. So dm raid 886 - * doesn't need to check MD_DELETED when getting reconfig lock 887 - */ 888 - if (test_bit(MD_DELETED, &mddev->flags)) 889 - del_gendisk(mddev->gendisk); 880 + if (!legacy_async_del_gendisk) { 881 + /* 882 + * Call del_gendisk after release reconfig_mutex to avoid 883 + * deadlock (e.g. call del_gendisk under the lock and an 884 + * access to sysfs files waits the lock) 885 + * And MD_DELETED is only used for md raid which is set in 886 + * do_md_stop. dm raid only uses md_stop to stop. So dm raid 887 + * doesn't need to check MD_DELETED when getting reconfig lock 888 + */ 889 + if (test_bit(MD_DELETED, &mddev->flags)) 890 + del_gendisk(mddev->gendisk); 891 + } 890 892 } 891 893 EXPORT_SYMBOL_GPL(mddev_unlock); 892 894 ··· 1423 1419 else { 1424 1420 if (sb->events_hi == sb->cp_events_hi && 1425 1421 sb->events_lo == sb->cp_events_lo) { 1426 - mddev->resync_offset = sb->resync_offset; 1422 + mddev->resync_offset = sb->recovery_cp; 1427 1423 } else 1428 1424 mddev->resync_offset = 0; 1429 1425 } ··· 1551 1547 mddev->minor_version = sb->minor_version; 1552 1548 if (mddev->in_sync) 1553 1549 { 1554 - sb->resync_offset = mddev->resync_offset; 1550 + sb->recovery_cp = mddev->resync_offset; 1555 1551 sb->cp_events_hi = (mddev->events>>32); 1556 1552 sb->cp_events_lo = (u32)mddev->events; 1557 1553 if (mddev->resync_offset == MaxSector) 1558 1554 sb->state = (1<< MD_SB_CLEAN); 1559 1555 } else 1560 - sb->resync_offset = 0; 1556 + sb->recovery_cp = 0; 1561 1557 1562 1558 sb->layout = mddev->layout; 1563 1559 sb->chunk_size = mddev->chunk_sectors << 9; ··· 4839 4835 static struct md_sysfs_entry md_metadata = 4840 4836 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4841 4837 4838 + static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors) 4839 + { 4840 + return rdev->raid_disk >= 0 && 4841 + !test_bit(Journal, &rdev->flags) && 4842 + !test_bit(Faulty, &rdev->flags) && 4843 + !test_bit(In_sync, &rdev->flags) && 4844 + rdev->recovery_offset < sectors; 4845 + } 4846 + 4847 + static enum sync_action md_get_active_sync_action(struct mddev *mddev) 4848 + { 4849 + struct md_rdev *rdev; 4850 + bool is_recover = false; 4851 + 4852 + if (mddev->resync_offset < MaxSector) 4853 + return ACTION_RESYNC; 4854 + 4855 + if (mddev->reshape_position != MaxSector) 4856 + return ACTION_RESHAPE; 4857 + 4858 + rcu_read_lock(); 4859 + rdev_for_each_rcu(rdev, mddev) { 4860 + if (rdev_needs_recovery(rdev, MaxSector)) { 4861 + is_recover = true; 4862 + break; 4863 + } 4864 + } 4865 + rcu_read_unlock(); 4866 + 4867 + return is_recover ? ACTION_RECOVER : ACTION_IDLE; 4868 + } 4869 + 4842 4870 enum sync_action md_sync_action(struct mddev *mddev) 4843 4871 { 4844 4872 unsigned long recovery = mddev->recovery; 4873 + enum sync_action active_action; 4845 4874 4846 4875 /* 4847 4876 * frozen has the highest priority, means running sync_thread will be ··· 4898 4861 !test_bit(MD_RECOVERY_NEEDED, &recovery)) 4899 4862 return ACTION_IDLE; 4900 4863 4901 - if (test_bit(MD_RECOVERY_RESHAPE, &recovery) || 4902 - mddev->reshape_position != MaxSector) 4864 + /* 4865 + * Check if any sync operation (resync/recover/reshape) is 4866 + * currently active. This ensures that only one sync operation 4867 + * can run at a time. Returns the type of active operation, or 4868 + * ACTION_IDLE if none are active. 4869 + */ 4870 + active_action = md_get_active_sync_action(mddev); 4871 + if (active_action != ACTION_IDLE) 4872 + return active_action; 4873 + 4874 + if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4903 4875 return ACTION_RESHAPE; 4904 4876 4905 4877 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) ··· 5864 5818 { 5865 5819 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5866 5820 5821 + if (legacy_async_del_gendisk) { 5822 + if (mddev->sysfs_state) 5823 + sysfs_put(mddev->sysfs_state); 5824 + if (mddev->sysfs_level) 5825 + sysfs_put(mddev->sysfs_level); 5826 + del_gendisk(mddev->gendisk); 5827 + } 5867 5828 put_disk(mddev->gendisk); 5868 5829 } 5869 5830 ··· 6073 6020 static int md_alloc_and_put(dev_t dev, char *name) 6074 6021 { 6075 6022 struct mddev *mddev = md_alloc(dev, name); 6023 + 6024 + if (legacy_async_del_gendisk) 6025 + pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n"); 6076 6026 6077 6027 if (IS_ERR(mddev)) 6078 6028 return PTR_ERR(mddev); ··· 6487 6431 mddev->persistent = 0; 6488 6432 mddev->level = LEVEL_NONE; 6489 6433 mddev->clevel[0] = 0; 6490 - /* if UNTIL_STOP is set, it's cleared here */ 6491 - mddev->hold_active = 0; 6492 - /* Don't clear MD_CLOSING, or mddev can be opened again. */ 6493 - mddev->flags &= BIT_ULL_MASK(MD_CLOSING); 6434 + 6435 + /* 6436 + * For legacy_async_del_gendisk mode, it can stop the array in the 6437 + * middle of assembling it, then it still can access the array. So 6438 + * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk, 6439 + * it can't open the array again after stopping it. So it doesn't 6440 + * clear MD_CLOSING. 6441 + */ 6442 + if (legacy_async_del_gendisk && mddev->hold_active) { 6443 + clear_bit(MD_CLOSING, &mddev->flags); 6444 + } else { 6445 + /* if UNTIL_STOP is set, it's cleared here */ 6446 + mddev->hold_active = 0; 6447 + /* Don't clear MD_CLOSING, or mddev can be opened again. */ 6448 + mddev->flags &= BIT_ULL_MASK(MD_CLOSING); 6449 + } 6494 6450 mddev->sb_flags = 0; 6495 6451 mddev->ro = MD_RDWR; 6496 6452 mddev->metadata_type[0] = 0; ··· 6726 6658 6727 6659 export_array(mddev); 6728 6660 md_clean(mddev); 6729 - set_bit(MD_DELETED, &mddev->flags); 6661 + if (!legacy_async_del_gendisk) 6662 + set_bit(MD_DELETED, &mddev->flags); 6730 6663 } 6731 6664 md_new_event(); 6732 6665 sysfs_notify_dirent_safe(mddev->sysfs_state); ··· 9037 8968 start = MaxSector; 9038 8969 rcu_read_lock(); 9039 8970 rdev_for_each_rcu(rdev, mddev) 9040 - if (rdev->raid_disk >= 0 && 9041 - !test_bit(Journal, &rdev->flags) && 9042 - !test_bit(Faulty, &rdev->flags) && 9043 - !test_bit(In_sync, &rdev->flags) && 9044 - rdev->recovery_offset < start) 8971 + if (rdev_needs_recovery(rdev, start)) 9045 8972 start = rdev->recovery_offset; 9046 8973 rcu_read_unlock(); 9047 8974 ··· 9396 9331 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 9397 9332 rcu_read_lock(); 9398 9333 rdev_for_each_rcu(rdev, mddev) 9399 - if (rdev->raid_disk >= 0 && 9400 - mddev->delta_disks >= 0 && 9401 - !test_bit(Journal, &rdev->flags) && 9402 - !test_bit(Faulty, &rdev->flags) && 9403 - !test_bit(In_sync, &rdev->flags) && 9404 - rdev->recovery_offset < mddev->curr_resync) 9334 + if (mddev->delta_disks >= 0 && 9335 + rdev_needs_recovery(rdev, mddev->curr_resync)) 9405 9336 rdev->recovery_offset = mddev->curr_resync; 9406 9337 rcu_read_unlock(); 9407 9338 } ··· 10453 10392 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 10454 10393 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 10455 10394 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 10395 + module_param(legacy_async_del_gendisk, bool, 0600); 10456 10396 10457 10397 MODULE_LICENSE("GPL"); 10458 10398 MODULE_DESCRIPTION("MD RAID framework");
+1
include/linux/blkdev.h
··· 656 656 QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ 657 657 QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */ 658 658 QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */ 659 + QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */ 659 660 QUEUE_FLAG_MAX 660 661 }; 661 662
+1 -1
include/uapi/linux/raid/md_p.h
··· 173 173 #else 174 174 #error unspecified endianness 175 175 #endif 176 - __u32 resync_offset; /* 11 resync checkpoint sector count */ 176 + __u32 recovery_cp; /* 11 resync checkpoint sector count */ 177 177 /* There are only valid for minor_version > 90 */ 178 178 __u64 reshape_position; /* 12,13 next address in array-space for reshape */ 179 179 __u32 new_level; /* 14 new level we are reshaping to */
+2 -2
tools/testing/selftests/ublk/kublk.c
··· 1400 1400 1401 1401 if (!((1ULL << i) & features)) 1402 1402 continue; 1403 - if (i < sizeof(feat_map) / sizeof(feat_map[0])) 1403 + if (i < ARRAY_SIZE(feat_map)) 1404 1404 feat = feat_map[i]; 1405 1405 else 1406 1406 feat = "unknown"; ··· 1477 1477 printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n"); 1478 1478 printf("\tdefault: nthreads=nr_queues"); 1479 1479 1480 - for (i = 0; i < sizeof(tgt_ops_list) / sizeof(tgt_ops_list[0]); i++) { 1480 + for (i = 0; i < ARRAY_SIZE(tgt_ops_list); i++) { 1481 1481 const struct ublk_tgt_ops *ops = tgt_ops_list[i]; 1482 1482 1483 1483 if (ops->usage)