Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove unused parameter 'q' parameter in __blk_rq_map_sg()

request_queue param is no longer used by blk_rq_map_sg and
__blk_rq_map_sg. Remove it.

Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250313035322.243239-1-anuj20.g@samsung.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Anuj Gupta and committed by
Jens Axboe
75618ac6 017ff379

+22 -26
+2 -2
block/blk-merge.c
··· 551 551 * Map a request to scatterlist, return number of sg entries setup. Caller 552 552 * must make sure sg can hold rq->nr_phys_segments entries. 553 553 */ 554 - int __blk_rq_map_sg(struct request_queue *q, struct request *rq, 555 - struct scatterlist *sglist, struct scatterlist **last_sg) 554 + int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, 555 + struct scatterlist **last_sg) 556 556 { 557 557 struct req_iterator iter = { 558 558 .bio = rq->bio,
+1 -1
block/bsg-lib.c
··· 219 219 if (!buf->sg_list) 220 220 return -ENOMEM; 221 221 sg_init_table(buf->sg_list, req->nr_phys_segments); 222 - buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); 222 + buf->sg_cnt = blk_rq_map_sg(req, buf->sg_list); 223 223 buf->payload_len = blk_rq_bytes(req); 224 224 return 0; 225 225 }
+1 -1
drivers/block/mtip32xx/mtip32xx.c
··· 2056 2056 unsigned int nents; 2057 2057 2058 2058 /* Map the scatter list for DMA access */ 2059 - nents = blk_rq_map_sg(hctx->queue, rq, command->sg); 2059 + nents = blk_rq_map_sg(rq, command->sg); 2060 2060 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); 2061 2061 2062 2062 prefetch(&port->flags);
+1 -1
drivers/block/rnbd/rnbd-clt.c
··· 1010 1010 * See queue limits. 1011 1011 */ 1012 1012 if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES)) 1013 - sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); 1013 + sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl); 1014 1014 1015 1015 if (sg_cnt == 0) 1016 1016 sg_mark_end(&iu->sgt.sgl[0]);
+1 -1
drivers/block/sunvdc.c
··· 485 485 } 486 486 487 487 sg_init_table(sg, port->ring_cookies); 488 - nsg = blk_rq_map_sg(req->q, req, sg); 488 + nsg = blk_rq_map_sg(req, sg); 489 489 490 490 len = 0; 491 491 for (i = 0; i < nsg; i++)
+1 -1
drivers/block/virtio_blk.c
··· 226 226 if (unlikely(err)) 227 227 return -ENOMEM; 228 228 229 - return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); 229 + return blk_rq_map_sg(req, vbr->sg_table.sgl); 230 230 } 231 231 232 232 static void virtblk_cleanup_cmd(struct request *req)
+1 -1
drivers/block/xen-blkfront.c
··· 751 751 id = blkif_ring_get_request(rinfo, req, &final_ring_req); 752 752 ring_req = &rinfo->shadow[id].req; 753 753 754 - num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); 754 + num_sg = blk_rq_map_sg(req, rinfo->shadow[id].sg); 755 755 num_grant = 0; 756 756 /* Calculate the number of grant used */ 757 757 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
+1 -1
drivers/memstick/core/ms_block.c
··· 1904 1904 1905 1905 /* process the request */ 1906 1906 dbg_verbose("IO: processing new request"); 1907 - blk_rq_map_sg(msb->queue, req, sg); 1907 + blk_rq_map_sg(req, sg); 1908 1908 1909 1909 lba = blk_rq_pos(req); 1910 1910
+1 -3
drivers/memstick/core/mspro_block.c
··· 627 627 while (true) { 628 628 msb->current_page = 0; 629 629 msb->current_seg = 0; 630 - msb->seg_count = blk_rq_map_sg(msb->block_req->q, 631 - msb->block_req, 632 - msb->req_sg); 630 + msb->seg_count = blk_rq_map_sg(msb->block_req, msb->req_sg); 633 631 634 632 if (!msb->seg_count) { 635 633 unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
+1 -1
drivers/mmc/core/queue.c
··· 523 523 { 524 524 struct request *req = mmc_queue_req_to_req(mqrq); 525 525 526 - return blk_rq_map_sg(mq->queue, req, mqrq->sg); 526 + return blk_rq_map_sg(req, mqrq->sg); 527 527 }
+1 -1
drivers/mtd/ubi/block.c
··· 199 199 * and ubi_read_sg() will check that limit. 200 200 */ 201 201 ubi_sgl_init(&pdu->usgl); 202 - blk_rq_map_sg(req->q, req, pdu->usgl.sg); 202 + blk_rq_map_sg(req, pdu->usgl.sg); 203 203 204 204 while (bytes_left) { 205 205 /*
+1 -1
drivers/nvme/host/apple.c
··· 525 525 if (!iod->sg) 526 526 return BLK_STS_RESOURCE; 527 527 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 528 - iod->nents = blk_rq_map_sg(req->q, req, iod->sg); 528 + iod->nents = blk_rq_map_sg(req, iod->sg); 529 529 if (!iod->nents) 530 530 goto out_free_sg; 531 531
+1 -1
drivers/nvme/host/fc.c
··· 2620 2620 if (ret) 2621 2621 return -ENOMEM; 2622 2622 2623 - op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 2623 + op->nents = blk_rq_map_sg(rq, freq->sg_table.sgl); 2624 2624 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 2625 2625 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 2626 2626 op->nents, rq_dma_dir(rq));
+1 -1
drivers/nvme/host/pci.c
··· 812 812 if (!iod->sgt.sgl) 813 813 return BLK_STS_RESOURCE; 814 814 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); 815 - iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); 815 + iod->sgt.orig_nents = blk_rq_map_sg(req, iod->sgt.sgl); 816 816 if (!iod->sgt.orig_nents) 817 817 goto out_free_sg; 818 818
+1 -2
drivers/nvme/host/rdma.c
··· 1476 1476 if (ret) 1477 1477 return -ENOMEM; 1478 1478 1479 - req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, 1480 - req->data_sgl.sg_table.sgl); 1479 + req->data_sgl.nents = blk_rq_map_sg(rq, req->data_sgl.sg_table.sgl); 1481 1480 1482 1481 *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, 1483 1482 req->data_sgl.nents, rq_dma_dir(rq));
+1 -1
drivers/nvme/target/loop.c
··· 162 162 } 163 163 164 164 iod->req.sg = iod->sg_table.sgl; 165 - iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); 165 + iod->req.sg_cnt = blk_rq_map_sg(req, iod->sg_table.sgl); 166 166 iod->req.transfer_len = blk_rq_payload_bytes(req); 167 167 } 168 168
+1 -1
drivers/scsi/scsi_lib.c
··· 1149 1149 * Next, walk the list, and fill in the addresses and sizes of 1150 1150 * each segment. 1151 1151 */ 1152 - count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); 1152 + count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg); 1153 1153 1154 1154 if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { 1155 1155 unsigned int pad_len =
+4 -5
include/linux/blk-mq.h
··· 1155 1155 return max_t(unsigned short, rq->nr_phys_segments, 1); 1156 1156 } 1157 1157 1158 - int __blk_rq_map_sg(struct request_queue *q, struct request *rq, 1159 - struct scatterlist *sglist, struct scatterlist **last_sg); 1160 - static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1161 - struct scatterlist *sglist) 1158 + int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, 1159 + struct scatterlist **last_sg); 1160 + static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist) 1162 1161 { 1163 1162 struct scatterlist *last_sg = NULL; 1164 1163 1165 - return __blk_rq_map_sg(q, rq, sglist, &last_sg); 1164 + return __blk_rq_map_sg(rq, sglist, &last_sg); 1166 1165 } 1167 1166 void blk_dump_rq_flags(struct request *, char *); 1168 1167