Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rnbd-srv: simplify rnbd_srv_fill_msg_open_rsp

Remove all the wrappers and just get the information directly from
the block device, or where no such helpers exist the request_queue.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Acked-by: Jack Wang <jinpu.wang@ionos.com>
Link: https://lore.kernel.org/r/20220909131509.3263924-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
9ad15320 b2bed51a

+13 -52
-30
drivers/block/rnbd/rnbd-srv-dev.h
··· 31 31 32 32 void rnbd_endio(void *priv, int error); 33 33 34 - static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev) 35 - { 36 - return queue_max_segments(bdev_get_queue(dev->bdev)); 37 - } 38 - 39 - static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev) 40 - { 41 - return queue_max_hw_sectors(bdev_get_queue(dev->bdev)); 42 - } 43 - 44 - static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev) 45 - { 46 - return bdev_max_secure_erase_sectors(dev->bdev); 47 - } 48 - 49 - static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev) 50 - { 51 - return bdev_max_discard_sectors(dev->bdev); 52 - } 53 - 54 - static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev) 55 - { 56 - return bdev_get_queue(dev->bdev)->limits.discard_granularity; 57 - } 58 - 59 - static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev) 60 - { 61 - return bdev_discard_alignment(dev->bdev); 62 - } 63 - 64 34 #endif /* RNBD_SRV_DEV_H */
+13 -22
drivers/block/rnbd/rnbd-srv.c
··· 544 544 static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, 545 545 struct rnbd_srv_sess_dev *sess_dev) 546 546 { 547 - struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev; 547 + struct block_device *bdev = sess_dev->rnbd_dev->bdev; 548 548 549 549 rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP); 550 - rsp->device_id = 551 - cpu_to_le32(sess_dev->device_id); 552 - rsp->nsectors = 553 - cpu_to_le64(get_capacity(rnbd_dev->bdev->bd_disk)); 554 - rsp->logical_block_size = 555 - cpu_to_le16(bdev_logical_block_size(rnbd_dev->bdev)); 556 - rsp->physical_block_size = 557 - cpu_to_le16(bdev_physical_block_size(rnbd_dev->bdev)); 558 - rsp->max_segments = 559 - cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev)); 550 + rsp->device_id = cpu_to_le32(sess_dev->device_id); 551 + rsp->nsectors = cpu_to_le64(bdev_nr_sectors(bdev)); 552 + rsp->logical_block_size = cpu_to_le16(bdev_logical_block_size(bdev)); 553 + rsp->physical_block_size = cpu_to_le16(bdev_physical_block_size(bdev)); 554 + rsp->max_segments = cpu_to_le16(bdev_max_segments(bdev)); 560 555 rsp->max_hw_sectors = 561 - cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev)); 556 + cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev))); 562 557 rsp->max_write_same_sectors = 0; 563 - rsp->max_discard_sectors = 564 - cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev)); 565 - rsp->discard_granularity = 566 - cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev)); 567 - rsp->discard_alignment = 568 - cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev)); 569 - rsp->secure_discard = 570 - cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev)); 558 + rsp->max_discard_sectors = cpu_to_le32(bdev_max_discard_sectors(bdev)); 559 + rsp->discard_granularity = cpu_to_le32(bdev_discard_granularity(bdev)); 560 + rsp->discard_alignment = cpu_to_le32(bdev_discard_alignment(bdev)); 561 + rsp->secure_discard = cpu_to_le16(bdev_max_secure_erase_sectors(bdev)); 571 562 rsp->cache_policy = 0; 572 - if (bdev_write_cache(rnbd_dev->bdev)) 563 + if (bdev_write_cache(bdev)) 573 564 rsp->cache_policy |= RNBD_WRITEBACK; 574 - if (bdev_fua(rnbd_dev->bdev)) 565 + if (bdev_fua(bdev)) 575 566 rsp->cache_policy |= RNBD_FUA; 576 567 } 577 568