Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove the gendisk argument to blk_execute_rq

Remove the gendisk aregument to blk_execute_rq and blk_execute_rq_nowait
given that it is unused now. Also convert the boolean at_head parameter
to actually use the bool type while touching the prototype.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20211126121802.2090656-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
b84ba30b f3fa33ac

+35 -42
+3 -7
block/blk-mq.c
··· 1153 1153 1154 1154 /** 1155 1155 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1156 - * @bd_disk: matching gendisk 1157 1156 * @rq: request to insert 1158 1157 * @at_head: insert request at head or tail of queue 1159 1158 * @done: I/O completion handler ··· 1164 1165 * Note: 1165 1166 * This function will invoke @done directly if the queue is dead. 1166 1167 */ 1167 - void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq, 1168 - int at_head, rq_end_io_fn *done) 1168 + void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) 1169 1169 { 1170 1170 WARN_ON(irqs_disabled()); 1171 1171 WARN_ON(!blk_rq_is_passthrough(rq)); ··· 1202 1204 1203 1205 /** 1204 1206 * blk_execute_rq - insert a request into queue for execution 1205 - * @bd_disk: matching gendisk 1206 1207 * @rq: request to insert 1207 1208 * @at_head: insert request at head or tail of queue 1208 1209 * ··· 1210 1213 * for execution and wait for completion. 1211 1214 * Return: The blk_status_t result provided to blk_mq_end_request(). 1212 1215 */ 1213 - blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, 1214 - int at_head) 1216 + blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1215 1217 { 1216 1218 DECLARE_COMPLETION_ONSTACK(wait); 1217 1219 unsigned long hang_check; 1218 1220 1219 1221 rq->end_io_data = &wait; 1220 - blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq); 1222 + blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq); 1221 1223 1222 1224 /* Prevent hang_check timer from firing at us during very long I/O */ 1223 1225 hang_check = sysctl_hung_task_timeout_secs;
+1 -1
block/bsg-lib.c
··· 92 92 goto out_unmap_bidi_rq; 93 93 94 94 bio = rq->bio; 95 - blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); 95 + blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); 96 96 97 97 /* 98 98 * The assignments below don't make much sense, but are kept for
+1 -1
drivers/block/mtip32xx/mtip32xx.c
··· 1015 1015 rq->timeout = timeout; 1016 1016 1017 1017 /* insert request and run queue */ 1018 - blk_execute_rq(NULL, rq, true); 1018 + blk_execute_rq(rq, true); 1019 1019 1020 1020 if (int_cmd->status) { 1021 1021 dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
+1 -1
drivers/block/paride/pd.c
··· 781 781 req = blk_mq_rq_to_pdu(rq); 782 782 783 783 req->func = func; 784 - blk_execute_rq(disk->gd, rq, 0); 784 + blk_execute_rq(rq, false); 785 785 blk_mq_free_request(rq); 786 786 return 0; 787 787 }
+1 -1
drivers/block/pktcdvd.c
··· 722 722 if (cgc->quiet) 723 723 rq->rq_flags |= RQF_QUIET; 724 724 725 - blk_execute_rq(pd->bdev->bd_disk, rq, 0); 725 + blk_execute_rq(rq, false); 726 726 if (scsi_req(rq)->result) 727 727 ret = -EIO; 728 728 out:
+2 -2
drivers/block/sx8.c
··· 540 540 spin_unlock_irq(&host->lock); 541 541 542 542 DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); 543 - blk_execute_rq_nowait(NULL, rq, true, NULL); 543 + blk_execute_rq_nowait(rq, true, NULL); 544 544 545 545 return 0; 546 546 ··· 579 579 crq->msg_bucket = (u32) rc; 580 580 581 581 DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); 582 - blk_execute_rq_nowait(NULL, rq, true, NULL); 582 + blk_execute_rq_nowait(rq, true, NULL); 583 583 584 584 return 0; 585 585 }
+1 -1
drivers/block/virtio_blk.c
··· 384 384 if (err) 385 385 goto out; 386 386 387 - blk_execute_rq(vblk->disk, req, false); 387 + blk_execute_rq(req, false); 388 388 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req))); 389 389 out: 390 390 blk_mq_free_request(req);
+5 -5
drivers/mmc/core/block.c
··· 264 264 goto out_put; 265 265 } 266 266 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; 267 - blk_execute_rq(NULL, req, 0); 267 + blk_execute_rq(req, false); 268 268 ret = req_to_mmc_queue_req(req)->drv_op_result; 269 269 blk_mq_free_request(req); 270 270 ··· 657 657 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 658 658 req_to_mmc_queue_req(req)->drv_op_data = idatas; 659 659 req_to_mmc_queue_req(req)->ioc_count = 1; 660 - blk_execute_rq(NULL, req, 0); 660 + blk_execute_rq(req, false); 661 661 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 662 662 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); 663 663 blk_mq_free_request(req); ··· 726 726 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 727 727 req_to_mmc_queue_req(req)->drv_op_data = idata; 728 728 req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; 729 - blk_execute_rq(NULL, req, 0); 729 + blk_execute_rq(req, false); 730 730 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 731 731 732 732 /* copy to user if data and response */ ··· 2743 2743 if (IS_ERR(req)) 2744 2744 return PTR_ERR(req); 2745 2745 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; 2746 - blk_execute_rq(NULL, req, 0); 2746 + blk_execute_rq(req, false); 2747 2747 ret = req_to_mmc_queue_req(req)->drv_op_result; 2748 2748 if (ret >= 0) { 2749 2749 *val = ret; ··· 2782 2782 } 2783 2783 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; 2784 2784 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; 2785 - blk_execute_rq(NULL, req, 0); 2785 + blk_execute_rq(req, false); 2786 2786 err = req_to_mmc_queue_req(req)->drv_op_result; 2787 2787 blk_mq_free_request(req); 2788 2788 if (err) {
+2 -2
drivers/nvme/host/core.c
··· 1056 1056 { 1057 1057 blk_status_t status; 1058 1058 1059 - status = blk_execute_rq(disk, rq, at_head); 1059 + status = blk_execute_rq(rq, at_head); 1060 1060 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) 1061 1061 return -EINTR; 1062 1062 if (nvme_req(rq)->status) ··· 1283 1283 1284 1284 rq->timeout = ctrl->kato * HZ; 1285 1285 rq->end_io_data = ctrl; 1286 - blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); 1286 + blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io); 1287 1287 } 1288 1288 1289 1289 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+3 -4
drivers/nvme/host/pci.c
··· 1371 1371 } 1372 1372 1373 1373 abort_req->end_io_data = NULL; 1374 - blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio); 1374 + blk_execute_rq_nowait(abort_req, false, abort_endio); 1375 1375 1376 1376 /* 1377 1377 * The aborted req will be completed on receiving the abort req. ··· 2416 2416 req->end_io_data = nvmeq; 2417 2417 2418 2418 init_completion(&nvmeq->delete_done); 2419 - blk_execute_rq_nowait(NULL, req, false, 2420 - opcode == nvme_admin_delete_cq ? 2421 - nvme_del_cq_end : nvme_del_queue_end); 2419 + blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ? 2420 + nvme_del_cq_end : nvme_del_queue_end); 2422 2421 return 0; 2423 2422 } 2424 2423
+1 -2
drivers/nvme/target/passthru.c
··· 284 284 schedule_work(&req->p.work); 285 285 } else { 286 286 rq->end_io_data = req; 287 - blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0, 288 - nvmet_passthru_req_done); 287 + blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done); 289 288 } 290 289 291 290 if (ns)
+1 -1
drivers/scsi/scsi_bsg.c
··· 60 60 goto out_free_cmd; 61 61 62 62 bio = rq->bio; 63 - blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); 63 + blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); 64 64 65 65 /* 66 66 * fill in all the output members
+1 -1
drivers/scsi/scsi_error.c
··· 2040 2040 req->timeout = 10 * HZ; 2041 2041 rq->retries = 5; 2042 2042 2043 - blk_execute_rq_nowait(NULL, req, 1, eh_lock_door_done); 2043 + blk_execute_rq_nowait(req, true, eh_lock_door_done); 2044 2044 } 2045 2045 2046 2046 /**
+2 -2
drivers/scsi/scsi_ioctl.c
··· 483 483 484 484 start_time = jiffies; 485 485 486 - blk_execute_rq(disk, rq, at_head); 486 + blk_execute_rq(rq, at_head); 487 487 488 488 hdr->duration = jiffies_to_msecs(jiffies - start_time); 489 489 ··· 620 620 goto error; 621 621 } 622 622 623 - blk_execute_rq(disk, rq, 0); 623 + blk_execute_rq(rq, false); 624 624 625 625 err = req->result & 0xff; /* only 8 bit SCSI status */ 626 626 if (err) {
+1 -1
drivers/scsi/scsi_lib.c
··· 241 241 /* 242 242 * head injection *required* here otherwise quiesce won't work 243 243 */ 244 - blk_execute_rq(NULL, req, 1); 244 + blk_execute_rq(req, true); 245 245 246 246 /* 247 247 * Some devices (USB mass-storage in particular) may transfer
+1 -1
drivers/scsi/sg.c
··· 833 833 834 834 srp->rq->timeout = timeout; 835 835 kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ 836 - blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io); 836 + blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io); 837 837 return 0; 838 838 } 839 839
+1 -1
drivers/scsi/sr.c
··· 994 994 rq->timeout = 60 * HZ; 995 995 bio = rq->bio; 996 996 997 - blk_execute_rq(disk, rq, 0); 997 + blk_execute_rq(rq, false); 998 998 if (scsi_req(rq)->result) { 999 999 struct scsi_sense_hdr sshdr; 1000 1000
+1 -1
drivers/scsi/st.c
··· 581 581 rq->retries = retries; 582 582 req->end_io_data = SRpnt; 583 583 584 - blk_execute_rq_nowait(NULL, req, 1, st_scsi_execute_end); 584 + blk_execute_rq_nowait(req, true, st_scsi_execute_end); 585 585 return 0; 586 586 } 587 587
+2 -2
drivers/scsi/ufs/ufshpb.c
··· 677 677 ufshpb_set_unmap_cmd(rq->cmd, rgn); 678 678 rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; 679 679 680 - blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn); 680 + blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn); 681 681 682 682 hpb->stats.umap_req_cnt++; 683 683 } ··· 719 719 map_req->rb.srgn_idx, mem_size); 720 720 rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; 721 721 722 - blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn); 722 + blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn); 723 723 724 724 hpb->stats.map_req_cnt++; 725 725 return 0;
+1 -1
drivers/target/target_core_pscsi.c
··· 1005 1005 req->timeout = PS_TIMEOUT_OTHER; 1006 1006 scsi_req(req)->retries = PS_RETRY; 1007 1007 1008 - blk_execute_rq_nowait(NULL, req, (cmd->sam_task_attr == TCM_HEAD_TAG), 1008 + blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG, 1009 1009 pscsi_req_done); 1010 1010 1011 1011 return 0;
+3 -4
include/linux/blk-mq.h
··· 924 924 int blk_rq_map_kern(struct request_queue *, struct request *, void *, 925 925 unsigned int, gfp_t); 926 926 int blk_rq_append_bio(struct request *rq, struct bio *bio); 927 - void blk_execute_rq_nowait(struct gendisk *, struct request *, int, 928 - rq_end_io_fn *); 929 - blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, 930 - int at_head); 927 + void blk_execute_rq_nowait(struct request *rq, bool at_head, 928 + rq_end_io_fn *end_io); 929 + blk_status_t blk_execute_rq(struct request *rq, bool at_head); 931 930 932 931 struct req_iterator { 933 932 struct bvec_iter iter;