Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme into block-5.11

Pull NVMe updates from Christoph:

"nvme updates for 5.11:

- fix a race in the nvme-tcp send code (Sagi Grimberg)
- fix a list corruption in an nvme-rdma error path (Israel Rukshin)
- avoid a possible double fetch in nvme-pci (Lalithambika Krishnakumar)
- add the susystem NQN quirk for a Samsung driver (Gopal Tiwari)
- fix two compiler warnings in nvme-fcloop (James Smart)
- don't call sleeping functions from irq context in nvme-fc (James Smart)
- remove an unused argument (Max Gurtovoy)
- remove unused exports (Minwoo Im)"

* tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme:
nvme: remove the unused status argument from nvme_trace_bio_complete
nvmet-rdma: Fix list_del corruption on queue establishment failure
nvme: unexport functions with no external caller
nvme: avoid possible double fetch in handling CQE
nvme-tcp: Fix possible race of io_work and direct send
nvme-pci: mark Samsung PM1725a as IGNORE_DEV_SUBNQN
nvme-fcloop: Fix sscanf type and list_first_entry_or_null warnings
nvme-fc: avoid calling _nvme_fc_abort_outstanding_ios from interrupt context

+50 -21
+3 -5
drivers/nvme/host/core.c
··· 179 179 } 180 180 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 181 181 182 - int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 182 + static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 183 183 { 184 184 int ret; 185 185 ··· 192 192 193 193 return ret; 194 194 } 195 - EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 196 195 197 196 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 198 197 { ··· 330 331 req->__sector = nvme_lba_to_sect(req->q->queuedata, 331 332 le64_to_cpu(nvme_req(req)->result.u64)); 332 333 333 - nvme_trace_bio_complete(req, status); 334 + nvme_trace_bio_complete(req); 334 335 blk_mq_end_request(req, status); 335 336 } 336 337 ··· 577 578 } 578 579 EXPORT_SYMBOL_GPL(nvme_alloc_request); 579 580 580 - struct request *nvme_alloc_request_qid(struct request_queue *q, 581 + static struct request *nvme_alloc_request_qid(struct request_queue *q, 581 582 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 582 583 { 583 584 struct request *req; ··· 588 589 nvme_init_request(req, cmd); 589 590 return req; 590 591 } 591 - EXPORT_SYMBOL_GPL(nvme_alloc_request_qid); 592 592 593 593 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 594 594 {
+14 -1
drivers/nvme/host/fc.c
··· 166 166 struct blk_mq_tag_set admin_tag_set; 167 167 struct blk_mq_tag_set tag_set; 168 168 169 + struct work_struct ioerr_work; 169 170 struct delayed_work connect_work; 170 171 171 172 struct kref ref; ··· 1890 1889 } 1891 1890 1892 1891 static void 1892 + nvme_fc_ctrl_ioerr_work(struct work_struct *work) 1893 + { 1894 + struct nvme_fc_ctrl *ctrl = 1895 + container_of(work, struct nvme_fc_ctrl, ioerr_work); 1896 + 1897 + nvme_fc_error_recovery(ctrl, "transport detected io error"); 1898 + } 1899 + 1900 + static void 1893 1901 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 1894 1902 { 1895 1903 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); ··· 2056 2046 2057 2047 check_error: 2058 2048 if (terminate_assoc) 2059 - nvme_fc_error_recovery(ctrl, "transport detected io error"); 2049 + queue_work(nvme_reset_wq, &ctrl->ioerr_work); 2060 2050 } 2061 2051 2062 2052 static int ··· 3243 3233 { 3244 3234 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 3245 3235 3236 + cancel_work_sync(&ctrl->ioerr_work); 3246 3237 cancel_delayed_work_sync(&ctrl->connect_work); 3247 3238 /* 3248 3239 * kill the association on the link side. this will block ··· 3460 3449 3461 3450 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3462 3451 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3452 + INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); 3463 3453 spin_lock_init(&ctrl->lock); 3464 3454 3465 3455 /* io queue count */ ··· 3552 3540 3553 3541 fail_ctrl: 3554 3542 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3543 + cancel_work_sync(&ctrl->ioerr_work); 3555 3544 cancel_work_sync(&ctrl->ctrl.reset_work); 3556 3545 cancel_delayed_work_sync(&ctrl->connect_work); 3557 3546
+2 -7
drivers/nvme/host/nvme.h
··· 610 610 #define NVME_QID_ANY -1 611 611 struct request *nvme_alloc_request(struct request_queue *q, 612 612 struct nvme_command *cmd, blk_mq_req_flags_t flags); 613 - struct request *nvme_alloc_request_qid(struct request_queue *q, 614 - struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); 615 613 void nvme_cleanup_cmd(struct request *req); 616 614 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 617 615 struct nvme_command *cmd); ··· 628 630 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 629 631 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 630 632 int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 631 - int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 632 633 int nvme_try_sched_reset(struct nvme_ctrl *ctrl); 633 634 int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 634 635 ··· 672 675 kblockd_schedule_work(&head->requeue_work); 673 676 } 674 677 675 - static inline void nvme_trace_bio_complete(struct request *req, 676 - blk_status_t status) 678 + static inline void nvme_trace_bio_complete(struct request *req) 677 679 { 678 680 struct nvme_ns *ns = req->q->queuedata; 679 681 ··· 727 731 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 728 732 { 729 733 } 730 - static inline void nvme_trace_bio_complete(struct request *req, 731 - blk_status_t status) 734 + static inline void nvme_trace_bio_complete(struct request *req) 732 735 { 733 736 } 734 737 static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+6 -4
drivers/nvme/host/pci.c
··· 967 967 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 968 968 { 969 969 struct nvme_completion *cqe = &nvmeq->cqes[idx]; 970 + __u16 command_id = READ_ONCE(cqe->command_id); 970 971 struct request *req; 971 972 972 973 /* ··· 976 975 * aborts. We don't even bother to allocate a struct request 977 976 * for them but rather special case them here. 978 977 */ 979 - if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { 978 + if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 980 979 nvme_complete_async_event(&nvmeq->dev->ctrl, 981 980 cqe->status, &cqe->result); 982 981 return; 983 982 } 984 983 985 - req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); 984 + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id); 986 985 if (unlikely(!req)) { 987 986 dev_warn(nvmeq->dev->ctrl.device, 988 987 "invalid id %d completed on queue %d\n", 989 - cqe->command_id, le16_to_cpu(cqe->sq_id)); 988 + command_id, le16_to_cpu(cqe->sq_id)); 990 989 return; 991 990 } 992 991 ··· 3197 3196 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3198 3197 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3199 3198 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 3200 - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3199 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3200 + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3201 3201 { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ 3202 3202 .driver_data = NVME_QUIRK_LIGHTNVM, }, 3203 3203 { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
+11 -1
drivers/nvme/host/tcp.c
··· 262 262 } 263 263 } 264 264 265 + static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) 266 + { 267 + int ret; 268 + 269 + /* drain the send queue as much as we can... */ 270 + do { 271 + ret = nvme_tcp_try_send(queue); 272 + } while (ret > 0); 273 + } 274 + 265 275 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, 266 276 bool sync, bool last) 267 277 { ··· 289 279 if (queue->io_cpu == smp_processor_id() && 290 280 sync && empty && mutex_trylock(&queue->send_mutex)) { 291 281 queue->more_requests = !last; 292 - nvme_tcp_try_send(queue); 282 + nvme_tcp_send_all(queue); 293 283 queue->more_requests = false; 294 284 mutex_unlock(&queue->send_mutex); 295 285 } else if (last) {
+4 -3
drivers/nvme/target/fcloop.c
··· 1501 1501 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr, 1502 1502 const char *buf, size_t count) 1503 1503 { 1504 - int opcode, starting, amount; 1504 + unsigned int opcode; 1505 + int starting, amount; 1505 1506 1506 1507 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3) 1507 1508 return -EBADRQC; ··· 1589 1588 1590 1589 static void __exit fcloop_exit(void) 1591 1590 { 1592 - struct fcloop_lport *lport; 1593 - struct fcloop_nport *nport; 1591 + struct fcloop_lport *lport = NULL; 1592 + struct fcloop_nport *nport = NULL; 1594 1593 struct fcloop_tport *tport; 1595 1594 struct fcloop_rport *rport; 1596 1595 unsigned long flags;
+10
drivers/nvme/target/rdma.c
··· 1641 1641 spin_lock_irqsave(&queue->state_lock, flags); 1642 1642 switch (queue->state) { 1643 1643 case NVMET_RDMA_Q_CONNECTING: 1644 + while (!list_empty(&queue->rsp_wait_list)) { 1645 + struct nvmet_rdma_rsp *rsp; 1646 + 1647 + rsp = list_first_entry(&queue->rsp_wait_list, 1648 + struct nvmet_rdma_rsp, 1649 + wait_list); 1650 + list_del(&rsp->wait_list); 1651 + nvmet_rdma_put_rsp(rsp); 1652 + } 1653 + fallthrough; 1644 1654 case NVMET_RDMA_Q_LIVE: 1645 1655 queue->state = NVMET_RDMA_Q_DISCONNECTING; 1646 1656 disconnect = true;