Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove blk_{get,put}_request

These are now pointless wrappers around blk_mq_{alloc,free}_request,
so remove them.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20211025070517.1548584-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
0bf6d96c 4845012e

+45 -69
-21
block/blk-core.c
··· 597 597 } 598 598 EXPORT_SYMBOL(blk_get_queue); 599 599 600 - /** 601 - * blk_get_request - allocate a request 602 - * @q: request queue to allocate a request for 603 - * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC. 604 - * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT. 605 - */ 606 - struct request *blk_get_request(struct request_queue *q, unsigned int op, 607 - blk_mq_req_flags_t flags) 608 - { 609 - WARN_ON_ONCE(op & REQ_NOWAIT); 610 - WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM)); 611 - return blk_mq_alloc_request(q, op, flags); 612 - } 613 - EXPORT_SYMBOL(blk_get_request); 614 - 615 - void blk_put_request(struct request *req) 616 - { 617 - blk_mq_free_request(req); 618 - } 619 - EXPORT_SYMBOL(blk_put_request); 620 - 621 600 static void handle_bad_sector(struct bio *bio, sector_t maxsector) 622 601 { 623 602 char b[BDEVNAME_SIZE];
+2 -2
drivers/block/paride/pd.c
··· 775 775 struct request *rq; 776 776 struct pd_req *req; 777 777 778 - rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0); 778 + rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0); 779 779 if (IS_ERR(rq)) 780 780 return PTR_ERR(rq); 781 781 req = blk_mq_rq_to_pdu(rq); 782 782 783 783 req->func = func; 784 784 blk_execute_rq(disk->gd, rq, 0); 785 - blk_put_request(rq); 785 + blk_mq_free_request(rq); 786 786 return 0; 787 787 } 788 788
+1 -1
drivers/block/pktcdvd.c
··· 726 726 if (scsi_req(rq)->result) 727 727 ret = -EIO; 728 728 out: 729 - blk_put_request(rq); 729 + blk_mq_free_request(rq); 730 730 return ret; 731 731 } 732 732
+2 -2
drivers/block/virtio_blk.c
··· 312 312 struct request *req; 313 313 int err; 314 314 315 - req = blk_get_request(q, REQ_OP_DRV_IN, 0); 315 + req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0); 316 316 if (IS_ERR(req)) 317 317 return PTR_ERR(req); 318 318 ··· 323 323 blk_execute_rq(vblk->disk, req, false); 324 324 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req))); 325 325 out: 326 - blk_put_request(req); 326 + blk_mq_free_request(req); 327 327 return err; 328 328 } 329 329
+2 -2
drivers/md/dm-mpath.c
··· 530 530 531 531 bdev = pgpath->path.dev->bdev; 532 532 q = bdev_get_queue(bdev); 533 - clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, 533 + clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE, 534 534 BLK_MQ_REQ_NOWAIT); 535 535 if (IS_ERR(clone)) { 536 536 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ ··· 579 579 clone->io_start_time_ns); 580 580 } 581 581 582 - blk_put_request(clone); 582 + blk_mq_free_request(clone); 583 583 } 584 584 585 585 /*
+10 -10
drivers/mmc/core/block.c
··· 258 258 mq = &md->queue; 259 259 260 260 /* Dispatch locking to the block layer */ 261 - req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0); 261 + req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0); 262 262 if (IS_ERR(req)) { 263 263 count = PTR_ERR(req); 264 264 goto out_put; ··· 266 266 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; 267 267 blk_execute_rq(NULL, req, 0); 268 268 ret = req_to_mmc_queue_req(req)->drv_op_result; 269 - blk_put_request(req); 269 + blk_mq_free_request(req); 270 270 271 271 if (!ret) { 272 272 pr_info("%s: Locking boot partition ro until next power on\n", ··· 646 646 * Dispatch the ioctl() into the block request queue. 647 647 */ 648 648 mq = &md->queue; 649 - req = blk_get_request(mq->queue, 649 + req = blk_mq_alloc_request(mq->queue, 650 650 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 651 651 if (IS_ERR(req)) { 652 652 err = PTR_ERR(req); ··· 660 660 blk_execute_rq(NULL, req, 0); 661 661 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 662 662 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); 663 - blk_put_request(req); 663 + blk_mq_free_request(req); 664 664 665 665 cmd_done: 666 666 kfree(idata->buf); ··· 716 716 * Dispatch the ioctl()s into the block request queue. 717 717 */ 718 718 mq = &md->queue; 719 - req = blk_get_request(mq->queue, 719 + req = blk_mq_alloc_request(mq->queue, 720 720 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 721 721 if (IS_ERR(req)) { 722 722 err = PTR_ERR(req); ··· 733 733 for (i = 0; i < num_of_cmds && !err; i++) 734 734 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); 735 735 736 - blk_put_request(req); 736 + blk_mq_free_request(req); 737 737 738 738 cmd_err: 739 739 for (i = 0; i < num_of_cmds; i++) { ··· 2730 2730 int ret; 2731 2731 2732 2732 /* Ask the block layer about the card status */ 2733 - req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0); 2733 + req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 2734 2734 if (IS_ERR(req)) 2735 2735 return PTR_ERR(req); 2736 2736 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; ··· 2740 2740 *val = ret; 2741 2741 ret = 0; 2742 2742 } 2743 - blk_put_request(req); 2743 + blk_mq_free_request(req); 2744 2744 2745 2745 return ret; 2746 2746 } ··· 2766 2766 return -ENOMEM; 2767 2767 2768 2768 /* Ask the block layer for the EXT CSD */ 2769 - req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0); 2769 + req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 2770 2770 if (IS_ERR(req)) { 2771 2771 err = PTR_ERR(req); 2772 2772 goto out_free; ··· 2775 2775 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; 2776 2776 blk_execute_rq(NULL, req, 0); 2777 2777 err = req_to_mmc_queue_req(req)->drv_op_result; 2778 - blk_put_request(req); 2778 + blk_mq_free_request(req); 2779 2779 if (err) { 2780 2780 pr_err("FAILED %d\n", err); 2781 2781 goto out_free;
+1 -1
drivers/scsi/scsi_bsg.c
··· 95 95 out_free_cmd: 96 96 scsi_req_free_cmd(scsi_req(rq)); 97 97 out_put_request: 98 - blk_put_request(rq); 98 + blk_mq_free_request(rq); 99 99 return ret; 100 100 } 101 101
+1 -1
drivers/scsi/scsi_error.c
··· 1979 1979 1980 1980 static void eh_lock_door_done(struct request *req, blk_status_t status) 1981 1981 { 1982 - blk_put_request(req); 1982 + blk_mq_free_request(req); 1983 1983 } 1984 1984 1985 1985 /**
+2 -2
drivers/scsi/scsi_ioctl.c
··· 490 490 out_free_cdb: 491 491 scsi_req_free_cmd(req); 492 492 out_put_request: 493 - blk_put_request(rq); 493 + blk_mq_free_request(rq); 494 494 return ret; 495 495 } 496 496 ··· 634 634 } 635 635 636 636 error: 637 - blk_put_request(rq); 637 + blk_mq_free_request(rq); 638 638 639 639 error_free_buffer: 640 640 kfree(buffer);
+2 -2
drivers/scsi/scsi_lib.c
··· 260 260 scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); 261 261 ret = rq->result; 262 262 out: 263 - blk_put_request(req); 263 + blk_mq_free_request(req); 264 264 265 265 return ret; 266 266 } ··· 1100 1100 { 1101 1101 struct request *rq; 1102 1102 1103 - rq = blk_get_request(q, op, flags); 1103 + rq = blk_mq_alloc_request(q, op, flags); 1104 1104 if (!IS_ERR(rq)) 1105 1105 scsi_initialize_rq(rq); 1106 1106 return rq;
+3 -3
drivers/scsi/sg.c
··· 815 815 if (atomic_read(&sdp->detaching)) { 816 816 if (srp->bio) { 817 817 scsi_req_free_cmd(scsi_req(srp->rq)); 818 - blk_put_request(srp->rq); 818 + blk_mq_free_request(srp->rq); 819 819 srp->rq = NULL; 820 820 } 821 821 ··· 1390 1390 */ 1391 1391 srp->rq = NULL; 1392 1392 scsi_req_free_cmd(scsi_req(rq)); 1393 - blk_put_request(rq); 1393 + blk_mq_free_request(rq); 1394 1394 1395 1395 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1396 1396 if (unlikely(srp->orphan)) { ··· 1830 1830 1831 1831 if (srp->rq) { 1832 1832 scsi_req_free_cmd(scsi_req(srp->rq)); 1833 - blk_put_request(srp->rq); 1833 + blk_mq_free_request(srp->rq); 1834 1834 } 1835 1835 1836 1836 if (srp->res_used)
+1 -1
drivers/scsi/sr.c
··· 1003 1003 if (blk_rq_unmap_user(bio)) 1004 1004 ret = -EFAULT; 1005 1005 out_put_request: 1006 - blk_put_request(rq); 1006 + blk_mq_free_request(rq); 1007 1007 return ret; 1008 1008 } 1009 1009
+2 -2
drivers/scsi/st.c
··· 530 530 complete(SRpnt->waiting); 531 531 532 532 blk_rq_unmap_user(tmp); 533 - blk_put_request(req); 533 + blk_mq_free_request(req); 534 534 } 535 535 536 536 static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, ··· 557 557 err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, 558 558 GFP_KERNEL); 559 559 if (err) { 560 - blk_put_request(req); 560 + blk_mq_free_request(req); 561 561 return err; 562 562 } 563 563 }
+10 -10
drivers/scsi/ufs/ufshcd.c
··· 2925 2925 * Even though we use wait_event() which sleeps indefinitely, 2926 2926 * the maximum wait time is bounded by SCSI request timeout. 2927 2927 */ 2928 - req = blk_get_request(q, REQ_OP_DRV_OUT, 0); 2928 + req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); 2929 2929 if (IS_ERR(req)) { 2930 2930 err = PTR_ERR(req); 2931 2931 goto out_unlock; ··· 2952 2952 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 2953 2953 2954 2954 out: 2955 - blk_put_request(req); 2955 + blk_mq_free_request(req); 2956 2956 out_unlock: 2957 2957 up_read(&hba->clk_scaling_lock); 2958 2958 return err; ··· 6517 6517 int task_tag, err; 6518 6518 6519 6519 /* 6520 - * blk_get_request() is used here only to get a free tag. 6520 + * blk_mq_alloc_request() is used here only to get a free tag. 6521 6521 */ 6522 - req = blk_get_request(q, REQ_OP_DRV_OUT, 0); 6522 + req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); 6523 6523 if (IS_ERR(req)) 6524 6524 return PTR_ERR(req); 6525 6525 ··· 6575 6575 spin_unlock_irqrestore(hba->host->host_lock, flags); 6576 6576 6577 6577 ufshcd_release(hba); 6578 - blk_put_request(req); 6578 + blk_mq_free_request(req); 6579 6579 6580 6580 return err; 6581 6581 } ··· 6660 6660 6661 6661 down_read(&hba->clk_scaling_lock); 6662 6662 6663 - req = blk_get_request(q, REQ_OP_DRV_OUT, 0); 6663 + req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); 6664 6664 if (IS_ERR(req)) { 6665 6665 err = PTR_ERR(req); 6666 6666 goto out_unlock; ··· 6741 6741 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 6742 6742 6743 6743 out: 6744 - blk_put_request(req); 6744 + blk_mq_free_request(req); 6745 6745 out_unlock: 6746 6746 up_read(&hba->clk_scaling_lock); 6747 6747 return err; ··· 7912 7912 if (error != BLK_STS_OK) 7913 7913 pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error); 7914 7914 kfree(rq->end_io_data); 7915 - blk_put_request(rq); 7915 + blk_mq_free_request(rq); 7916 7916 } 7917 7917 7918 7918 static int ··· 7932 7932 if (!buffer) 7933 7933 return -ENOMEM; 7934 7934 7935 - req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, 7935 + req = blk_mq_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 7936 7936 /*flags=*/BLK_MQ_REQ_PM); 7937 7937 if (IS_ERR(req)) { 7938 7938 ret = PTR_ERR(req); ··· 7957 7957 return 0; 7958 7958 7959 7959 out_put: 7960 - blk_put_request(req); 7960 + blk_mq_free_request(req); 7961 7961 out_free: 7962 7962 kfree(buffer); 7963 7963 return ret;
+4 -4
drivers/scsi/ufs/ufshpb.c
··· 564 564 int _read_id; 565 565 int ret = 0; 566 566 567 - req = blk_get_request(cmd->device->request_queue, 567 + req = blk_mq_alloc_request(cmd->device->request_queue, 568 568 REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT); 569 569 if (IS_ERR(req)) 570 570 return -EAGAIN; ··· 592 592 ufshpb_put_pre_req(hpb, pre_req); 593 593 unlock_out: 594 594 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); 595 - blk_put_request(req); 595 + blk_mq_free_request(req); 596 596 return ret; 597 597 } 598 598 ··· 721 721 return NULL; 722 722 723 723 retry: 724 - req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir, 724 + req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir, 725 725 BLK_MQ_REQ_NOWAIT); 726 726 727 727 if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { ··· 745 745 746 746 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) 747 747 { 748 - blk_put_request(rq->req); 748 + blk_mq_free_request(rq->req); 749 749 kmem_cache_free(hpb->map_req_cache, rq); 750 750 } 751 751
+2 -2
drivers/target/target_core_pscsi.c
··· 1011 1011 return 0; 1012 1012 1013 1013 fail_put_request: 1014 - blk_put_request(req); 1014 + blk_mq_free_request(req); 1015 1015 fail: 1016 1016 kfree(pt); 1017 1017 return ret; ··· 1066 1066 break; 1067 1067 } 1068 1068 1069 - blk_put_request(req); 1069 + blk_mq_free_request(req); 1070 1070 kfree(pt); 1071 1071 } 1072 1072
-3
include/linux/blk-mq.h
··· 892 892 } 893 893 894 894 void blk_rq_init(struct request_queue *q, struct request *rq); 895 - void blk_put_request(struct request *rq); 896 - struct request *blk_get_request(struct request_queue *q, unsigned int op, 897 - blk_mq_req_flags_t flags); 898 895 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 899 896 struct bio_set *bs, gfp_t gfp_mask, 900 897 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);