Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nbd: Remove __force casts

Make it again possible for sparse to verify that blk_status_t and Unix
error codes are used in the proper context by making nbd_send_cmd()
return a blk_status_t instead of an integer.

No functionality has been changed.

Signed-off-by: Christoph Hellwig <hch@lst.de>
[ bvanassche: added description and made two small formatting changes ]
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20240604221531.327131-1-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
957df9af e038ee61

+22 -29
+22 -29
drivers/block/nbd.c
··· 589 589 } 590 590 591 591 /* 592 - * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns 593 - * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed. 592 + * Returns BLK_STS_RESOURCE if the caller should retry after a delay. 593 + * Returns BLK_STS_IOERR if sending failed. 594 594 */ 595 - static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 595 + static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, 596 + int index) 596 597 { 597 598 struct request *req = blk_mq_rq_from_pdu(cmd); 598 599 struct nbd_config *config = nbd->config; ··· 615 614 616 615 type = req_to_nbd_cmd_type(req); 617 616 if (type == U32_MAX) 618 - return -EIO; 617 + return BLK_STS_IOERR; 619 618 620 619 if (rq_data_dir(req) == WRITE && 621 620 (config->flags & NBD_FLAG_READ_ONLY)) { 622 621 dev_err_ratelimited(disk_to_dev(nbd->disk), 623 622 "Write on read-only\n"); 624 - return -EIO; 623 + return BLK_STS_IOERR; 625 624 } 626 625 627 626 if (req->cmd_flags & REQ_FUA) ··· 675 674 nsock->sent = sent; 676 675 } 677 676 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 678 - return (__force int)BLK_STS_RESOURCE; 677 + return BLK_STS_RESOURCE; 679 678 } 680 679 dev_err_ratelimited(disk_to_dev(nbd->disk), 681 680 "Send control failed (result %d)\n", result); 682 - return -EAGAIN; 681 + goto requeue; 683 682 } 684 683 send_pages: 685 684 if (type != NBD_CMD_WRITE) ··· 716 715 nsock->pending = req; 717 716 nsock->sent = sent; 718 717 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 719 - return (__force int)BLK_STS_RESOURCE; 718 + return BLK_STS_RESOURCE; 720 719 } 721 720 dev_err(disk_to_dev(nbd->disk), 722 721 "Send data failed (result %d)\n", 723 722 result); 724 - return -EAGAIN; 723 + goto requeue; 725 724 } 726 725 /* 727 726 * The completion might already have come in, ··· 738 737 trace_nbd_payload_sent(req, handle); 739 738 nsock->pending = NULL; 740 739 nsock->sent = 0; 741 - return 0; 740 + __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); 741 + return BLK_STS_OK; 742 + 743 + requeue: 744 + /* retry on a different socket */ 745 + dev_err_ratelimited(disk_to_dev(nbd->disk), 746 + "Request send failed, requeueing\n"); 747 + nbd_mark_nsock_dead(nbd, nsock, 1); 748 + nbd_requeue_cmd(cmd); 749 + return BLK_STS_OK; 742 750 } 743 751 744 752 static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, ··· 1028 1018 struct nbd_device *nbd = cmd->nbd; 1029 1019 struct nbd_config *config; 1030 1020 struct nbd_sock *nsock; 1031 - int ret; 1021 + blk_status_t ret; 1032 1022 1033 1023 lockdep_assert_held(&cmd->lock); 1034 1024 ··· 1082 1072 ret = BLK_STS_OK; 1083 1073 goto out; 1084 1074 } 1085 - /* 1086 - * Some failures are related to the link going down, so anything that 1087 - * returns EAGAIN can be retried on a different socket. 1088 - */ 1089 1075 ret = nbd_send_cmd(nbd, cmd, index); 1090 - /* 1091 - * Access to this flag is protected by cmd->lock, thus it's safe to set 1092 - * the flag after nbd_send_cmd() succeed to send request to server. 1093 - */ 1094 - if (!ret) 1095 - __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); 1096 - else if (ret == -EAGAIN) { 1097 - dev_err_ratelimited(disk_to_dev(nbd->disk), 1098 - "Request send failed, requeueing\n"); 1099 - nbd_mark_nsock_dead(nbd, nsock, 1); 1100 - nbd_requeue_cmd(cmd); 1101 - ret = BLK_STS_OK; 1102 - } 1103 1076 out: 1104 1077 mutex_unlock(&nsock->tx_lock); 1105 1078 nbd_config_put(nbd); 1106 - return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret; 1079 + return ret; 1107 1080 } 1108 1081 1109 1082 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,