Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove the q argument from blk_rq_map_kern

Remove the q argument from blk_rq_map_kern and the internal helpers
called by it as the queue can trivially be derived from the request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20250507120451.4000627-6-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
af78428e 8dd16f5e

+18 -24
+9 -13
block/blk-map.c
··· 319 319 320 320 /** 321 321 * bio_map_kern - map kernel address into bio 322 - * @q: the struct request_queue for the bio 323 322 * @data: pointer to buffer to map 324 323 * @len: length in bytes 325 324 * @gfp_mask: allocation flags for bio allocation ··· 326 327 * Map the kernel address into a bio suitable for io to a block 327 328 * device. Returns an error pointer in case of error. 328 329 */ 329 - static struct bio *bio_map_kern(struct request_queue *q, void *data, 330 - unsigned int len, gfp_t gfp_mask) 330 + static struct bio *bio_map_kern(void *data, unsigned int len, gfp_t gfp_mask) 331 331 { 332 332 unsigned long kaddr = (unsigned long)data; 333 333 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 400 402 401 403 /** 402 404 * bio_copy_kern - copy kernel address into bio 403 - * @q: the struct request_queue for the bio 404 405 * @data: pointer to buffer to copy 405 406 * @len: length in bytes 406 407 * @gfp_mask: allocation flags for bio and page allocation ··· 408 411 * copy the kernel address into a bio suitable for io to a block 409 412 * device. Returns an error pointer in case of error. 410 413 */ 411 - static struct bio *bio_copy_kern(struct request_queue *q, void *data, 412 - unsigned int len, gfp_t gfp_mask, int reading) 414 + static struct bio *bio_copy_kern(void *data, unsigned int len, gfp_t gfp_mask, 415 + int reading) 413 416 { 414 417 unsigned long kaddr = (unsigned long)data; 415 418 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 684 687 685 688 /** 686 689 * blk_rq_map_kern - map kernel data to a request, for passthrough requests 687 - * @q: request queue where request should be inserted 688 690 * @rq: request to fill 689 691 * @kbuf: the kernel buffer 690 692 * @len: length of user data ··· 694 698 * buffer is used. Can be called multiple times to append multiple 695 699 * buffers. 696 700 */ 697 - int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 698 - unsigned int len, gfp_t gfp_mask) 701 + int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len, 702 + gfp_t gfp_mask) 699 703 { 700 704 int reading = rq_data_dir(rq) == READ; 701 705 unsigned long addr = (unsigned long) kbuf; 702 706 struct bio *bio; 703 707 int ret; 704 708 705 - if (len > (queue_max_hw_sectors(q) << 9)) 709 + if (len > (queue_max_hw_sectors(rq->q) << SECTOR_SHIFT)) 706 710 return -EINVAL; 707 711 if (!len || !kbuf) 708 712 return -EINVAL; 709 713 710 - if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf)) 711 - bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 714 + if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf)) 715 + bio = bio_copy_kern(kbuf, len, gfp_mask, reading); 712 716 else 713 - bio = bio_map_kern(q, kbuf, len, gfp_mask); 717 + bio = bio_map_kern(kbuf, len, gfp_mask); 714 718 715 719 if (IS_ERR(bio)) 716 720 return PTR_ERR(bio);
+1 -1
drivers/block/pktcdvd.c
··· 725 725 scmd = blk_mq_rq_to_pdu(rq); 726 726 727 727 if (cgc->buflen) { 728 - ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, 728 + ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen, 729 729 GFP_NOIO); 730 730 if (ret) 731 731 goto out;
+1 -2
drivers/block/ublk_drv.c
··· 368 368 if (ret) 369 369 goto free_req; 370 370 371 - ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, 372 - GFP_KERNEL); 371 + ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL); 373 372 if (ret) 374 373 goto erase_desc; 375 374
+2 -2
drivers/block/virtio_blk.c
··· 571 571 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); 572 572 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); 573 573 574 - err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL); 574 + err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL); 575 575 if (err) 576 576 goto out; 577 577 ··· 817 817 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); 818 818 vbr->out_hdr.sector = 0; 819 819 820 - err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 820 + err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 821 821 if (err) 822 822 goto out; 823 823
+1 -1
drivers/nvme/host/core.c
··· 1174 1174 req->cmd_flags &= ~REQ_FAILFAST_DRIVER; 1175 1175 1176 1176 if (buffer && bufflen) { 1177 - ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1177 + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_KERNEL); 1178 1178 if (ret) 1179 1179 goto out; 1180 1180 }
+1 -1
drivers/scsi/scsi_ioctl.c
··· 601 601 } 602 602 603 603 if (bytes) { 604 - err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO); 604 + err = blk_rq_map_kern(rq, buffer, bytes, GFP_NOIO); 605 605 if (err) 606 606 goto error; 607 607 }
+1 -2
drivers/scsi/scsi_lib.c
··· 313 313 return PTR_ERR(req); 314 314 315 315 if (bufflen) { 316 - ret = blk_rq_map_kern(sdev->request_queue, req, 317 - buffer, bufflen, GFP_NOIO); 316 + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO); 318 317 if (ret) 319 318 goto out; 320 319 }
+2 -2
include/linux/blk-mq.h
··· 1037 1037 int blk_rq_map_user_iov(struct request_queue *, struct request *, 1038 1038 struct rq_map_data *, const struct iov_iter *, gfp_t); 1039 1039 int blk_rq_unmap_user(struct bio *); 1040 - int blk_rq_map_kern(struct request_queue *, struct request *, void *, 1041 - unsigned int, gfp_t); 1040 + int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len, 1041 + gfp_t gfp); 1042 1042 int blk_rq_append_bio(struct request *rq, struct bio *bio); 1043 1043 void blk_execute_rq_nowait(struct request *rq, bool at_head); 1044 1044 blk_status_t blk_execute_rq(struct request *rq, bool at_head);