Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

blk-mq: switch ->queue_rq return value to blk_status_t

Use the same values for use for request completion errors as the return
value from ->queue_rq. BLK_STS_RESOURCE is special cased to cause
a requeue, and all the others are completed as-is.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
fc17b653 2a842aca

+133 -144
+19 -22
block/blk-mq.c
··· 924 924 { 925 925 struct blk_mq_hw_ctx *hctx; 926 926 struct request *rq; 927 - int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK; 927 + int errors, queued; 928 928 929 929 if (list_empty(list)) 930 930 return false; ··· 935 935 errors = queued = 0; 936 936 do { 937 937 struct blk_mq_queue_data bd; 938 + blk_status_t ret; 938 939 939 940 rq = list_first_entry(list, struct request, queuelist); 940 941 if (!blk_mq_get_driver_tag(rq, &hctx, false)) { ··· 976 975 } 977 976 978 977 ret = q->mq_ops->queue_rq(hctx, &bd); 979 - switch (ret) { 980 - case BLK_MQ_RQ_QUEUE_OK: 981 - queued++; 982 - break; 983 - case BLK_MQ_RQ_QUEUE_BUSY: 978 + if (ret == BLK_STS_RESOURCE) { 984 979 blk_mq_put_driver_tag_hctx(hctx, rq); 985 980 list_add(&rq->queuelist, list); 986 981 __blk_mq_requeue_request(rq); 987 982 break; 988 - default: 989 - pr_err("blk-mq: bad return on queue: %d\n", ret); 990 - case BLK_MQ_RQ_QUEUE_ERROR: 991 - errors++; 992 - blk_mq_end_request(rq, BLK_STS_IOERR); 993 - break; 994 983 } 995 984 996 - if (ret == BLK_MQ_RQ_QUEUE_BUSY) 997 - break; 985 + if (unlikely(ret != BLK_STS_OK)) { 986 + errors++; 987 + blk_mq_end_request(rq, BLK_STS_IOERR); 988 + continue; 989 + } 990 + 991 + queued++; 998 992 } while (!list_empty(list)); 999 993 1000 994 hctx->dispatched[queued_to_index(queued)]++; ··· 1027 1031 * - blk_mq_run_hw_queue() checks whether or not a queue has 1028 1032 * been stopped before rerunning a queue. 1029 1033 * - Some but not all block drivers stop a queue before 1030 - * returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq 1034 + * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1031 1035 * and dm-rq. 1032 1036 */ 1033 1037 if (!blk_mq_sched_needs_restart(hctx) && ··· 1406 1410 }; 1407 1411 struct blk_mq_hw_ctx *hctx; 1408 1412 blk_qc_t new_cookie; 1409 - int ret; 1413 + blk_status_t ret; 1410 1414 1411 1415 if (q->elevator) 1412 1416 goto insert; ··· 1422 1426 * would have done 1423 1427 */ 1424 1428 ret = q->mq_ops->queue_rq(hctx, &bd); 1425 - if (ret == BLK_MQ_RQ_QUEUE_OK) { 1429 + switch (ret) { 1430 + case BLK_STS_OK: 1426 1431 *cookie = new_cookie; 1427 1432 return; 1428 - } 1429 - 1430 - if (ret == BLK_MQ_RQ_QUEUE_ERROR) { 1433 + case BLK_STS_RESOURCE: 1434 + __blk_mq_requeue_request(rq); 1435 + goto insert; 1436 + default: 1431 1437 *cookie = BLK_QC_T_NONE; 1432 - blk_mq_end_request(rq, BLK_STS_IOERR); 1438 + blk_mq_end_request(rq, ret); 1433 1439 return; 1434 1440 } 1435 1441 1436 - __blk_mq_requeue_request(rq); 1437 1442 insert: 1438 1443 blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1439 1444 }
+3 -3
drivers/block/loop.c
··· 1674 1674 EXPORT_SYMBOL(loop_register_transfer); 1675 1675 EXPORT_SYMBOL(loop_unregister_transfer); 1676 1676 1677 - static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1677 + static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1678 1678 const struct blk_mq_queue_data *bd) 1679 1679 { 1680 1680 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); ··· 1683 1683 blk_mq_start_request(bd->rq); 1684 1684 1685 1685 if (lo->lo_state != Lo_bound) 1686 - return BLK_MQ_RQ_QUEUE_ERROR; 1686 + return BLK_STS_IOERR; 1687 1687 1688 1688 switch (req_op(cmd->rq)) { 1689 1689 case REQ_OP_FLUSH: ··· 1698 1698 1699 1699 kthread_queue_work(&lo->worker, &cmd->work); 1700 1700 1701 - return BLK_MQ_RQ_QUEUE_OK; 1701 + return BLK_STS_OK; 1702 1702 } 1703 1703 1704 1704 static void loop_handle_cmd(struct loop_cmd *cmd)
+8 -9
drivers/block/mtip32xx/mtip32xx.c
··· 3633 3633 return false; 3634 3634 } 3635 3635 3636 - static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, 3637 - struct request *rq) 3636 + static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, 3637 + struct request *rq) 3638 3638 { 3639 3639 struct driver_data *dd = hctx->queue->queuedata; 3640 3640 struct mtip_int_cmd *icmd = rq->special; ··· 3642 3642 struct mtip_cmd_sg *command_sg; 3643 3643 3644 3644 if (mtip_commands_active(dd->port)) 3645 - return BLK_MQ_RQ_QUEUE_BUSY; 3645 + return BLK_STS_RESOURCE; 3646 3646 3647 3647 /* Populate the SG list */ 3648 3648 cmd->command_header->opts = ··· 3666 3666 3667 3667 blk_mq_start_request(rq); 3668 3668 mtip_issue_non_ncq_command(dd->port, rq->tag); 3669 - return BLK_MQ_RQ_QUEUE_OK; 3669 + return 0; 3670 3670 } 3671 3671 3672 - static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, 3672 + static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx, 3673 3673 const struct blk_mq_queue_data *bd) 3674 3674 { 3675 3675 struct request *rq = bd->rq; ··· 3681 3681 return mtip_issue_reserved_cmd(hctx, rq); 3682 3682 3683 3683 if (unlikely(mtip_check_unal_depth(hctx, rq))) 3684 - return BLK_MQ_RQ_QUEUE_BUSY; 3684 + return BLK_STS_RESOURCE; 3685 3685 3686 3686 blk_mq_start_request(rq); 3687 3687 3688 3688 ret = mtip_submit_request(hctx, rq); 3689 3689 if (likely(!ret)) 3690 - return BLK_MQ_RQ_QUEUE_OK; 3691 - 3692 - return BLK_MQ_RQ_QUEUE_ERROR; 3690 + return BLK_STS_OK; 3691 + return BLK_STS_IOERR; 3693 3692 } 3694 3693 3695 3694 static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
+4 -8
drivers/block/nbd.c
··· 469 469 nsock->pending = req; 470 470 nsock->sent = sent; 471 471 } 472 - return BLK_MQ_RQ_QUEUE_BUSY; 472 + return BLK_STS_RESOURCE; 473 473 } 474 474 dev_err_ratelimited(disk_to_dev(nbd->disk), 475 475 "Send control failed (result %d)\n", result); ··· 510 510 */ 511 511 nsock->pending = req; 512 512 nsock->sent = sent; 513 - return BLK_MQ_RQ_QUEUE_BUSY; 513 + return BLK_STS_RESOURCE; 514 514 } 515 515 dev_err(disk_to_dev(nbd->disk), 516 516 "Send data failed (result %d)\n", ··· 798 798 return ret; 799 799 } 800 800 801 - static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 801 + static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 802 802 const struct blk_mq_queue_data *bd) 803 803 { 804 804 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); ··· 822 822 * appropriate. 823 823 */ 824 824 ret = nbd_handle_cmd(cmd, hctx->queue_num); 825 - if (ret < 0) 826 - ret = BLK_MQ_RQ_QUEUE_ERROR; 827 - if (!ret) 828 - ret = BLK_MQ_RQ_QUEUE_OK; 829 825 complete(&cmd->send_complete); 830 826 831 - return ret; 827 + return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK; 832 828 } 833 829 834 830 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
+2 -2
drivers/block/null_blk.c
··· 356 356 } 357 357 } 358 358 359 - static int null_queue_rq(struct blk_mq_hw_ctx *hctx, 359 + static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, 360 360 const struct blk_mq_queue_data *bd) 361 361 { 362 362 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); ··· 373 373 blk_mq_start_request(bd->rq); 374 374 375 375 null_handle_cmd(cmd); 376 - return BLK_MQ_RQ_QUEUE_OK; 376 + return BLK_STS_OK; 377 377 } 378 378 379 379 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+2 -2
drivers/block/rbd.c
··· 4154 4154 blk_mq_end_request(rq, errno_to_blk_status(result)); 4155 4155 } 4156 4156 4157 - static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, 4157 + static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, 4158 4158 const struct blk_mq_queue_data *bd) 4159 4159 { 4160 4160 struct request *rq = bd->rq; 4161 4161 struct work_struct *work = blk_mq_rq_to_pdu(rq); 4162 4162 4163 4163 queue_work(rbd_wq, work); 4164 - return BLK_MQ_RQ_QUEUE_OK; 4164 + return BLK_STS_OK; 4165 4165 } 4166 4166 4167 4167 static void rbd_free_disk(struct rbd_device *rbd_dev)
+5 -5
drivers/block/virtio_blk.c
··· 214 214 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 215 215 } 216 216 217 - static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, 217 + static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, 218 218 const struct blk_mq_queue_data *bd) 219 219 { 220 220 struct virtio_blk *vblk = hctx->queue->queuedata; ··· 246 246 break; 247 247 default: 248 248 WARN_ON_ONCE(1); 249 - return BLK_MQ_RQ_QUEUE_ERROR; 249 + return BLK_STS_IOERR; 250 250 } 251 251 252 252 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type); ··· 276 276 /* Out of mem doesn't actually happen, since we fall back 277 277 * to direct descriptors */ 278 278 if (err == -ENOMEM || err == -ENOSPC) 279 - return BLK_MQ_RQ_QUEUE_BUSY; 280 - return BLK_MQ_RQ_QUEUE_ERROR; 279 + return BLK_STS_RESOURCE; 280 + return BLK_STS_IOERR; 281 281 } 282 282 283 283 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) ··· 286 286 287 287 if (notify) 288 288 virtqueue_notify(vblk->vqs[qid].vq); 289 - return BLK_MQ_RQ_QUEUE_OK; 289 + return BLK_STS_OK; 290 290 } 291 291 292 292 /* return id (s/n) string for *disk to *id_str
+4 -4
drivers/block/xen-blkfront.c
··· 881 881 !info->feature_fua)); 882 882 } 883 883 884 - static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, 884 + static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, 885 885 const struct blk_mq_queue_data *qd) 886 886 { 887 887 unsigned long flags; ··· 904 904 905 905 flush_requests(rinfo); 906 906 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 907 - return BLK_MQ_RQ_QUEUE_OK; 907 + return BLK_STS_OK; 908 908 909 909 out_err: 910 910 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 911 - return BLK_MQ_RQ_QUEUE_ERROR; 911 + return BLK_STS_IOERR; 912 912 913 913 out_busy: 914 914 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 915 915 blk_mq_stop_hw_queue(hctx); 916 - return BLK_MQ_RQ_QUEUE_BUSY; 916 + return BLK_STS_RESOURCE; 917 917 } 918 918 919 919 static void blkif_complete_rq(struct request *rq)
+4 -4
drivers/md/dm-rq.c
··· 727 727 return __dm_rq_init_rq(set->driver_data, rq); 728 728 } 729 729 730 - static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 730 + static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 731 731 const struct blk_mq_queue_data *bd) 732 732 { 733 733 struct request *rq = bd->rq; ··· 744 744 } 745 745 746 746 if (ti->type->busy && ti->type->busy(ti)) 747 - return BLK_MQ_RQ_QUEUE_BUSY; 747 + return BLK_STS_RESOURCE; 748 748 749 749 dm_start_request(md, rq); 750 750 ··· 762 762 rq_end_stats(md, rq); 763 763 rq_completed(md, rq_data_dir(rq), false); 764 764 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/); 765 - return BLK_MQ_RQ_QUEUE_BUSY; 765 + return BLK_STS_RESOURCE; 766 766 } 767 767 768 - return BLK_MQ_RQ_QUEUE_OK; 768 + return BLK_STS_OK; 769 769 } 770 770 771 771 static const struct blk_mq_ops dm_mq_ops = {
+3 -3
drivers/mtd/ubi/block.c
··· 316 316 blk_mq_end_request(req, errno_to_blk_status(ret)); 317 317 } 318 318 319 - static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, 319 + static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, 320 320 const struct blk_mq_queue_data *bd) 321 321 { 322 322 struct request *req = bd->rq; ··· 327 327 case REQ_OP_READ: 328 328 ubi_sgl_init(&pdu->usgl); 329 329 queue_work(dev->wq, &pdu->work); 330 - return BLK_MQ_RQ_QUEUE_OK; 330 + return BLK_STS_OK; 331 331 default: 332 - return BLK_MQ_RQ_QUEUE_ERROR; 332 + return BLK_STS_IOERR; 333 333 } 334 334 335 335 }
+7 -7
drivers/nvme/host/core.c
··· 283 283 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 284 284 } 285 285 286 - static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, 286 + static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 287 287 struct nvme_command *cmnd) 288 288 { 289 289 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; ··· 292 292 293 293 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 294 294 if (!range) 295 - return BLK_MQ_RQ_QUEUE_BUSY; 295 + return BLK_STS_RESOURCE; 296 296 297 297 __rq_for_each_bio(bio, req) { 298 298 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); ··· 306 306 307 307 if (WARN_ON_ONCE(n != segments)) { 308 308 kfree(range); 309 - return BLK_MQ_RQ_QUEUE_ERROR; 309 + return BLK_STS_IOERR; 310 310 } 311 311 312 312 memset(cmnd, 0, sizeof(*cmnd)); ··· 320 320 req->special_vec.bv_len = sizeof(*range) * segments; 321 321 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 322 322 323 - return BLK_MQ_RQ_QUEUE_OK; 323 + return BLK_STS_OK; 324 324 } 325 325 326 326 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, ··· 364 364 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 365 365 } 366 366 367 - int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 367 + blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 368 368 struct nvme_command *cmd) 369 369 { 370 - int ret = BLK_MQ_RQ_QUEUE_OK; 370 + blk_status_t ret = BLK_STS_OK; 371 371 372 372 if (!(req->rq_flags & RQF_DONTPREP)) { 373 373 nvme_req(req)->retries = 0; ··· 394 394 break; 395 395 default: 396 396 WARN_ON_ONCE(1); 397 - return BLK_MQ_RQ_QUEUE_ERROR; 397 + return BLK_STS_IOERR; 398 398 } 399 399 400 400 cmd->common.command_id = req->tag;
+12 -11
drivers/nvme/host/fc.c
··· 1873 1873 * level FC exchange resource that is also outstanding. This must be 1874 1874 * considered in all cleanup operations. 1875 1875 */ 1876 - static int 1876 + static blk_status_t 1877 1877 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 1878 1878 struct nvme_fc_fcp_op *op, u32 data_len, 1879 1879 enum nvmefc_fcp_datadir io_dir) ··· 1888 1888 * the target device is present 1889 1889 */ 1890 1890 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1891 - return BLK_MQ_RQ_QUEUE_ERROR; 1891 + return BLK_STS_IOERR; 1892 1892 1893 1893 if (!nvme_fc_ctrl_get(ctrl)) 1894 - return BLK_MQ_RQ_QUEUE_ERROR; 1894 + return BLK_STS_IOERR; 1895 1895 1896 1896 /* format the FC-NVME CMD IU and fcp_req */ 1897 1897 cmdiu->connection_id = cpu_to_be64(queue->connection_id); ··· 1939 1939 if (ret < 0) { 1940 1940 nvme_cleanup_cmd(op->rq); 1941 1941 nvme_fc_ctrl_put(ctrl); 1942 - return (ret == -ENOMEM || ret == -EAGAIN) ? 1943 - BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR; 1942 + if (ret == -ENOMEM || ret == -EAGAIN) 1943 + return BLK_STS_RESOURCE; 1944 + return BLK_STS_IOERR; 1944 1945 } 1945 1946 } 1946 1947 ··· 1967 1966 nvme_fc_ctrl_put(ctrl); 1968 1967 1969 1968 if (ret != -EBUSY) 1970 - return BLK_MQ_RQ_QUEUE_ERROR; 1969 + return BLK_STS_IOERR; 1971 1970 1972 1971 if (op->rq) { 1973 1972 blk_mq_stop_hw_queues(op->rq->q); 1974 1973 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY); 1975 1974 } 1976 - return BLK_MQ_RQ_QUEUE_BUSY; 1975 + return BLK_STS_RESOURCE; 1977 1976 } 1978 1977 1979 - return BLK_MQ_RQ_QUEUE_OK; 1978 + return BLK_STS_OK; 1980 1979 } 1981 1980 1982 - static int 1981 + static blk_status_t 1983 1982 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 1984 1983 const struct blk_mq_queue_data *bd) 1985 1984 { ··· 1992 1991 struct nvme_command *sqe = &cmdiu->sqe; 1993 1992 enum nvmefc_fcp_datadir io_dir; 1994 1993 u32 data_len; 1995 - int ret; 1994 + blk_status_t ret; 1996 1995 1997 1996 ret = nvme_setup_cmd(ns, rq, sqe); 1998 1997 if (ret) ··· 2047 2046 struct nvme_fc_fcp_op *aen_op; 2048 2047 unsigned long flags; 2049 2048 bool terminating = false; 2050 - int ret; 2049 + blk_status_t ret; 2051 2050 2052 2051 if (aer_idx > NVME_FC_NR_AEN_COMMANDS) 2053 2052 return;
+1 -1
drivers/nvme/host/nvme.h
··· 296 296 #define NVME_QID_ANY -1 297 297 struct request *nvme_alloc_request(struct request_queue *q, 298 298 struct nvme_command *cmd, unsigned int flags, int qid); 299 - int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 299 + blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 300 300 struct nvme_command *cmd); 301 301 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 302 302 void *buf, unsigned bufflen);
+20 -22
drivers/nvme/host/pci.c
··· 427 427 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 428 428 } 429 429 430 - static int nvme_init_iod(struct request *rq, struct nvme_dev *dev) 430 + static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) 431 431 { 432 432 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 433 433 int nseg = blk_rq_nr_phys_segments(rq); ··· 436 436 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 437 437 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 438 438 if (!iod->sg) 439 - return BLK_MQ_RQ_QUEUE_BUSY; 439 + return BLK_STS_RESOURCE; 440 440 } else { 441 441 iod->sg = iod->inline_sg; 442 442 } ··· 446 446 iod->nents = 0; 447 447 iod->length = size; 448 448 449 - return BLK_MQ_RQ_QUEUE_OK; 449 + return BLK_STS_OK; 450 450 } 451 451 452 452 static void nvme_free_iod(struct nvme_dev *dev, struct request *req) ··· 616 616 return true; 617 617 } 618 618 619 - static int nvme_map_data(struct nvme_dev *dev, struct request *req, 619 + static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 620 620 struct nvme_command *cmnd) 621 621 { 622 622 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 623 623 struct request_queue *q = req->q; 624 624 enum dma_data_direction dma_dir = rq_data_dir(req) ? 625 625 DMA_TO_DEVICE : DMA_FROM_DEVICE; 626 - int ret = BLK_MQ_RQ_QUEUE_ERROR; 626 + blk_status_t ret = BLK_STS_IOERR; 627 627 628 628 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 629 629 iod->nents = blk_rq_map_sg(q, req, iod->sg); 630 630 if (!iod->nents) 631 631 goto out; 632 632 633 - ret = BLK_MQ_RQ_QUEUE_BUSY; 633 + ret = BLK_STS_RESOURCE; 634 634 if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, 635 635 DMA_ATTR_NO_WARN)) 636 636 goto out; ··· 638 638 if (!nvme_setup_prps(dev, req)) 639 639 goto out_unmap; 640 640 641 - ret = BLK_MQ_RQ_QUEUE_ERROR; 641 + ret = BLK_STS_IOERR; 642 642 if (blk_integrity_rq(req)) { 643 643 if (blk_rq_count_integrity_sg(q, req->bio) != 1) 644 644 goto out_unmap; ··· 658 658 cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma); 659 659 if (blk_integrity_rq(req)) 660 660 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); 661 - return BLK_MQ_RQ_QUEUE_OK; 661 + return BLK_STS_OK; 662 662 663 663 out_unmap: 664 664 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); ··· 688 688 /* 689 689 * NOTE: ns is NULL when called on the admin queue. 690 690 */ 691 - static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 691 + static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 692 692 const struct blk_mq_queue_data *bd) 693 693 { 694 694 struct nvme_ns *ns = hctx->queue->queuedata; ··· 696 696 struct nvme_dev *dev = nvmeq->dev; 697 697 struct request *req = bd->rq; 698 698 struct nvme_command cmnd; 699 - int ret = BLK_MQ_RQ_QUEUE_OK; 699 + blk_status_t ret = BLK_STS_OK; 700 700 701 701 /* 702 702 * If formated with metadata, require the block layer provide a buffer ··· 705 705 */ 706 706 if (ns && ns->ms && !blk_integrity_rq(req)) { 707 707 if (!(ns->pi_type && ns->ms == 8) && 708 - !blk_rq_is_passthrough(req)) { 709 - blk_mq_end_request(req, BLK_STS_NOTSUPP); 710 - return BLK_MQ_RQ_QUEUE_OK; 711 - } 708 + !blk_rq_is_passthrough(req)) 709 + return BLK_STS_NOTSUPP; 712 710 } 713 711 714 712 ret = nvme_setup_cmd(ns, req, &cmnd); 715 - if (ret != BLK_MQ_RQ_QUEUE_OK) 713 + if (ret) 716 714 return ret; 717 715 718 716 ret = nvme_init_iod(req, dev); 719 - if (ret != BLK_MQ_RQ_QUEUE_OK) 717 + if (ret) 720 718 goto out_free_cmd; 721 719 722 - if (blk_rq_nr_phys_segments(req)) 720 + if (blk_rq_nr_phys_segments(req)) { 723 721 ret = nvme_map_data(dev, req, &cmnd); 724 - 725 - if (ret != BLK_MQ_RQ_QUEUE_OK) 726 - goto out_cleanup_iod; 722 + if (ret) 723 + goto out_cleanup_iod; 724 + } 727 725 728 726 blk_mq_start_request(req); 729 727 730 728 spin_lock_irq(&nvmeq->q_lock); 731 729 if (unlikely(nvmeq->cq_vector < 0)) { 732 - ret = BLK_MQ_RQ_QUEUE_ERROR; 730 + ret = BLK_STS_IOERR; 733 731 spin_unlock_irq(&nvmeq->q_lock); 734 732 goto out_cleanup_iod; 735 733 } 736 734 __nvme_submit_cmd(nvmeq, &cmnd); 737 735 nvme_process_cq(nvmeq); 738 736 spin_unlock_irq(&nvmeq->q_lock); 739 - return BLK_MQ_RQ_QUEUE_OK; 737 + return BLK_STS_OK; 740 738 out_cleanup_iod: 741 739 nvme_free_iod(dev, req); 742 740 out_free_cmd:
+14 -12
drivers/nvme/host/rdma.c
··· 1448 1448 return true; 1449 1449 } 1450 1450 1451 - static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1451 + static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1452 1452 const struct blk_mq_queue_data *bd) 1453 1453 { 1454 1454 struct nvme_ns *ns = hctx->queue->queuedata; ··· 1459 1459 struct nvme_command *c = sqe->data; 1460 1460 bool flush = false; 1461 1461 struct ib_device *dev; 1462 - int ret; 1462 + blk_status_t ret; 1463 + int err; 1463 1464 1464 1465 WARN_ON_ONCE(rq->tag < 0); 1465 1466 1466 1467 if (!nvme_rdma_queue_is_ready(queue, rq)) 1467 - return BLK_MQ_RQ_QUEUE_BUSY; 1468 + return BLK_STS_RESOURCE; 1468 1469 1469 1470 dev = queue->device->dev; 1470 1471 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1471 1472 sizeof(struct nvme_command), DMA_TO_DEVICE); 1472 1473 1473 1474 ret = nvme_setup_cmd(ns, rq, c); 1474 - if (ret != BLK_MQ_RQ_QUEUE_OK) 1475 + if (ret) 1475 1476 return ret; 1476 1477 1477 1478 blk_mq_start_request(rq); 1478 1479 1479 - ret = nvme_rdma_map_data(queue, rq, c); 1480 - if (ret < 0) { 1480 + err = nvme_rdma_map_data(queue, rq, c); 1481 + if (err < 0) { 1481 1482 dev_err(queue->ctrl->ctrl.device, 1482 - "Failed to map data (%d)\n", ret); 1483 + "Failed to map data (%d)\n", err); 1483 1484 nvme_cleanup_cmd(rq); 1484 1485 goto err; 1485 1486 } ··· 1490 1489 1491 1490 if (req_op(rq) == REQ_OP_FLUSH) 1492 1491 flush = true; 1493 - ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1492 + err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1494 1493 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); 1495 - if (ret) { 1494 + if (err) { 1496 1495 nvme_rdma_unmap_data(queue, rq); 1497 1496 goto err; 1498 1497 } 1499 1498 1500 - return BLK_MQ_RQ_QUEUE_OK; 1499 + return BLK_STS_OK; 1501 1500 err: 1502 - return (ret == -ENOMEM || ret == -EAGAIN) ? 1503 - BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR; 1501 + if (err == -ENOMEM || err == -EAGAIN) 1502 + return BLK_STS_RESOURCE; 1503 + return BLK_STS_IOERR; 1504 1504 } 1505 1505 1506 1506 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+8 -9
drivers/nvme/target/loop.c
··· 159 159 return BLK_EH_HANDLED; 160 160 } 161 161 162 - static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 162 + static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 163 163 const struct blk_mq_queue_data *bd) 164 164 { 165 165 struct nvme_ns *ns = hctx->queue->queuedata; 166 166 struct nvme_loop_queue *queue = hctx->driver_data; 167 167 struct request *req = bd->rq; 168 168 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 169 - int ret; 169 + blk_status_t ret; 170 170 171 171 ret = nvme_setup_cmd(ns, req, &iod->cmd); 172 - if (ret != BLK_MQ_RQ_QUEUE_OK) 172 + if (ret) 173 173 return ret; 174 174 175 175 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; ··· 179 179 nvme_cleanup_cmd(req); 180 180 blk_mq_start_request(req); 181 181 nvme_loop_queue_response(&iod->req); 182 - return BLK_MQ_RQ_QUEUE_OK; 182 + return BLK_STS_OK; 183 183 } 184 184 185 185 if (blk_rq_bytes(req)) { 186 186 iod->sg_table.sgl = iod->first_sgl; 187 - ret = sg_alloc_table_chained(&iod->sg_table, 187 + if (sg_alloc_table_chained(&iod->sg_table, 188 188 blk_rq_nr_phys_segments(req), 189 - iod->sg_table.sgl); 190 - if (ret) 191 - return BLK_MQ_RQ_QUEUE_BUSY; 189 + iod->sg_table.sgl)) 190 + return BLK_STS_RESOURCE; 192 191 193 192 iod->req.sg = iod->sg_table.sgl; 194 193 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); ··· 196 197 blk_mq_start_request(req); 197 198 198 199 schedule_work(&iod->work); 199 - return BLK_MQ_RQ_QUEUE_OK; 200 + return BLK_STS_OK; 200 201 } 201 202 202 203 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+15 -15
drivers/scsi/scsi_lib.c
··· 1812 1812 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1813 1813 } 1814 1814 1815 - static inline int prep_to_mq(int ret) 1815 + static inline blk_status_t prep_to_mq(int ret) 1816 1816 { 1817 1817 switch (ret) { 1818 1818 case BLKPREP_OK: 1819 - return BLK_MQ_RQ_QUEUE_OK; 1819 + return BLK_STS_OK; 1820 1820 case BLKPREP_DEFER: 1821 - return BLK_MQ_RQ_QUEUE_BUSY; 1821 + return BLK_STS_RESOURCE; 1822 1822 default: 1823 - return BLK_MQ_RQ_QUEUE_ERROR; 1823 + return BLK_STS_IOERR; 1824 1824 } 1825 1825 } 1826 1826 ··· 1892 1892 blk_mq_complete_request(cmd->request); 1893 1893 } 1894 1894 1895 - static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1895 + static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1896 1896 const struct blk_mq_queue_data *bd) 1897 1897 { 1898 1898 struct request *req = bd->rq; ··· 1900 1900 struct scsi_device *sdev = q->queuedata; 1901 1901 struct Scsi_Host *shost = sdev->host; 1902 1902 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1903 - int ret; 1903 + blk_status_t ret; 1904 1904 int reason; 1905 1905 1906 1906 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); 1907 - if (ret != BLK_MQ_RQ_QUEUE_OK) 1907 + if (ret != BLK_STS_OK) 1908 1908 goto out; 1909 1909 1910 - ret = BLK_MQ_RQ_QUEUE_BUSY; 1910 + ret = BLK_STS_RESOURCE; 1911 1911 if (!get_device(&sdev->sdev_gendev)) 1912 1912 goto out; 1913 1913 ··· 1920 1920 1921 1921 if (!(req->rq_flags & RQF_DONTPREP)) { 1922 1922 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1923 - if (ret != BLK_MQ_RQ_QUEUE_OK) 1923 + if (ret != BLK_STS_OK) 1924 1924 goto out_dec_host_busy; 1925 1925 req->rq_flags |= RQF_DONTPREP; 1926 1926 } else { ··· 1938 1938 reason = scsi_dispatch_cmd(cmd); 1939 1939 if (reason) { 1940 1940 scsi_set_blocked(cmd, reason); 1941 - ret = BLK_MQ_RQ_QUEUE_BUSY; 1941 + ret = BLK_STS_RESOURCE; 1942 1942 goto out_dec_host_busy; 1943 1943 } 1944 1944 1945 - return BLK_MQ_RQ_QUEUE_OK; 1945 + return BLK_STS_OK; 1946 1946 1947 1947 out_dec_host_busy: 1948 1948 atomic_dec(&shost->host_busy); ··· 1955 1955 put_device(&sdev->sdev_gendev); 1956 1956 out: 1957 1957 switch (ret) { 1958 - case BLK_MQ_RQ_QUEUE_BUSY: 1958 + case BLK_STS_OK: 1959 + break; 1960 + case BLK_STS_RESOURCE: 1959 1961 if (atomic_read(&sdev->device_busy) == 0 && 1960 1962 !scsi_device_blocked(sdev)) 1961 1963 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); 1962 1964 break; 1963 - case BLK_MQ_RQ_QUEUE_ERROR: 1965 + default: 1964 1966 /* 1965 1967 * Make sure to release all allocated ressources when 1966 1968 * we hit an error, as we will never see this command ··· 1970 1968 */ 1971 1969 if (req->rq_flags & RQF_DONTPREP) 1972 1970 scsi_mq_uninit_cmd(cmd); 1973 - break; 1974 - default: 1975 1971 break; 1976 1972 } 1977 1973 return ret;
+2 -5
include/linux/blk-mq.h
··· 87 87 bool last; 88 88 }; 89 89 90 - typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 90 + typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, 91 + const struct blk_mq_queue_data *); 91 92 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 92 93 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 93 94 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); ··· 156 155 }; 157 156 158 157 enum { 159 - BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ 160 - BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ 161 - BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ 162 - 163 158 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 164 159 BLK_MQ_F_TAG_SHARED = 1 << 1, 165 160 BLK_MQ_F_SG_MERGE = 1 << 2,