Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: add a bdev_limits helper

Add a helper to get the queue_limits from the bdev without having to
poke into the request_queue.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241029141937.249920-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
2f5a65ef e4e535bf

+19 -18
+1 -2
block/blk-merge.c
··· 411 411 */ 412 412 struct bio *bio_split_to_limits(struct bio *bio) 413 413 { 414 - const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; 415 414 unsigned int nr_segs; 416 415 417 - return __bio_split_to_limits(bio, lim, &nr_segs); 416 + return __bio_split_to_limits(bio, bdev_limits(bio->bi_bdev), &nr_segs); 418 417 } 419 418 EXPORT_SYMBOL(bio_split_to_limits); 420 419
+1 -1
block/blk-settings.c
··· 661 661 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 662 662 sector_t offset, const char *pfx) 663 663 { 664 - if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits, 664 + if (blk_stack_limits(t, bdev_limits(bdev), 665 665 get_start_sect(bdev) + offset)) 666 666 pr_notice("%s: Warning: Device %pg is misaligned\n", 667 667 pfx, bdev);
+2 -2
drivers/md/dm-cache-target.c
··· 3360 3360 static void disable_passdown_if_not_supported(struct cache *cache) 3361 3361 { 3362 3362 struct block_device *origin_bdev = cache->origin_dev->bdev; 3363 - struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; 3363 + struct queue_limits *origin_limits = bdev_limits(origin_bdev); 3364 3364 const char *reason = NULL; 3365 3365 3366 3366 if (!cache->features.discard_passdown) ··· 3382 3382 static void set_discard_limits(struct cache *cache, struct queue_limits *limits) 3383 3383 { 3384 3384 struct block_device *origin_bdev = cache->origin_dev->bdev; 3385 - struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; 3385 + struct queue_limits *origin_limits = bdev_limits(origin_bdev); 3386 3386 3387 3387 if (!cache->features.discard_passdown) { 3388 3388 /* No passdown is done so setting own virtual limits */
+2 -2
drivers/md/dm-clone-target.c
··· 2020 2020 static void disable_passdown_if_not_supported(struct clone *clone) 2021 2021 { 2022 2022 struct block_device *dest_dev = clone->dest_dev->bdev; 2023 - struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; 2023 + struct queue_limits *dest_limits = bdev_limits(dest_dev); 2024 2024 const char *reason = NULL; 2025 2025 2026 2026 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) ··· 2041 2041 static void set_discard_limits(struct clone *clone, struct queue_limits *limits) 2042 2042 { 2043 2043 struct block_device *dest_bdev = clone->dest_dev->bdev; 2044 - struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; 2044 + struct queue_limits *dest_limits = bdev_limits(dest_bdev); 2045 2045 2046 2046 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) { 2047 2047 /* No passdown is done so we set our own virtual limits */
+1 -1
drivers/md/dm-thin.c
··· 2842 2842 { 2843 2843 struct pool *pool = pt->pool; 2844 2844 struct block_device *data_bdev = pt->data_dev->bdev; 2845 - struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; 2845 + struct queue_limits *data_limits = bdev_limits(data_bdev); 2846 2846 const char *reason = NULL; 2847 2847 2848 2848 if (!pt->adjusted_pf.discard_passdown)
+2 -5
fs/btrfs/zoned.c
··· 707 707 * zoned mode. In this case, we don't have a valid max zone 708 708 * append size. 709 709 */ 710 - if (bdev_is_zoned(device->bdev)) { 711 - blk_stack_limits(lim, 712 - &bdev_get_queue(device->bdev)->limits, 713 - 0); 714 - } 710 + if (bdev_is_zoned(device->bdev)) 711 + blk_stack_limits(lim, bdev_limits(device->bdev), 0); 715 712 } 716 713 717 714 /*
+10 -5
include/linux/blkdev.h
··· 1159 1159 */ 1160 1160 #define BLK_DEF_MAX_SECTORS_CAP 2560u 1161 1161 1162 + static inline struct queue_limits *bdev_limits(struct block_device *bdev) 1163 + { 1164 + return &bdev_get_queue(bdev)->limits; 1165 + } 1166 + 1162 1167 static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1163 1168 { 1164 1169 return q->limits.seg_boundary_mask; ··· 1298 1293 1299 1294 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1300 1295 { 1301 - return bdev_get_queue(bdev)->limits.max_discard_sectors; 1296 + return bdev_limits(bdev)->max_discard_sectors; 1302 1297 } 1303 1298 1304 1299 static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1305 1300 { 1306 - return bdev_get_queue(bdev)->limits.discard_granularity; 1301 + return bdev_limits(bdev)->discard_granularity; 1307 1302 } 1308 1303 1309 1304 static inline unsigned int 1310 1305 bdev_max_secure_erase_sectors(struct block_device *bdev) 1311 1306 { 1312 - return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 1307 + return bdev_limits(bdev)->max_secure_erase_sectors; 1313 1308 } 1314 1309 1315 1310 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1316 1311 { 1317 - return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors; 1312 + return bdev_limits(bdev)->max_write_zeroes_sectors; 1318 1313 } 1319 1314 1320 1315 static inline bool bdev_nonrot(struct block_device *bdev) ··· 1350 1345 1351 1346 static inline bool bdev_fua(struct block_device *bdev) 1352 1347 { 1353 - return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA; 1348 + return bdev_limits(bdev)->features & BLK_FEAT_FUA; 1354 1349 } 1355 1350 1356 1351 static inline bool bdev_nowait(struct block_device *bdev)