Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nvme-5.19-2022-05-18' of git://git.infradead.org/nvme into for-5.19/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.19

- tighten the PCI presence check (Stefan Roese):
- fix a potential NULL pointer dereference in an error path
(Kyle Miller Smith)
- fix interpretation of the DMRSL field (Tom Yan)
- relax the data transfer alignment (Keith Busch)
- verbose error logging improvements (Max Gurtovoy, Chaitanya Kulkarni)
- misc cleanups (Chaitanya Kulkarni, me)"

* tag 'nvme-5.19-2022-05-18' of git://git.infradead.org/nvme:
nvme: split the enum used for various register constants
nvme-fabrics: add a request timeout helper
nvme-pci: harden drive presence detect in nvme_dev_disable()
nvme-pci: fix a NULL pointer dereference in nvme_alloc_admin_tags
nvme: mark internal passthru request RQF_QUIET
nvme: remove unneeded include from constants file
nvme: add missing status values to verbose logging
nvme: set dma alignment to dword
nvme: fix interpretation of DMRSL

+37 -15
+3 -1
drivers/nvme/host/constants.c
··· 4 4 * Copyright (c) 2022, Oracle and/or its affiliates 5 5 */ 6 6 7 - #include <linux/blkdev.h> 8 7 #include "nvme.h" 9 8 10 9 #ifdef CONFIG_NVME_VERBOSE_ERRORS ··· 154 155 [NVME_SC_COMPARE_FAILED] = "Compare Failure", 155 156 [NVME_SC_ACCESS_DENIED] = "Access Denied", 156 157 [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block", 158 + [NVME_SC_INTERNAL_PATH_ERROR] = "Internal Pathing Error", 157 159 [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss", 158 160 [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible", 159 161 [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition", 162 + [NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error", 160 163 [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error", 164 + [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command", 161 165 }; 162 166 163 167 const unsigned char *nvme_get_error_status_str(u16 status)
+6 -3
drivers/nvme/host/core.c
··· 1207 1207 1208 1208 rq->timeout = ctrl->kato * HZ; 1209 1209 rq->end_io_data = ctrl; 1210 + rq->rq_flags |= RQF_QUIET; 1210 1211 blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io); 1211 1212 } 1212 1213 ··· 1635 1634 if (queue->limits.max_discard_sectors) 1636 1635 return; 1637 1636 1637 + if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) 1638 + ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); 1639 + 1638 1640 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); 1639 1641 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); 1640 1642 ··· 1774 1770 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1775 1771 } 1776 1772 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); 1777 - blk_queue_dma_alignment(q, 7); 1773 + blk_queue_dma_alignment(q, 3); 1778 1774 blk_queue_write_cache(q, vwc, vwc); 1779 1775 } 1780 1776 ··· 2897 2893 2898 2894 if (id->dmrl) 2899 2895 ctrl->max_discard_segments = id->dmrl; 2900 - if (id->dmrsl) 2901 - ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl); 2896 + ctrl->dmrsl = le32_to_cpu(id->dmrsl); 2902 2897 if (id->wzsl) 2903 2898 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 2904 2899
+8
drivers/nvme/host/fabrics.h
··· 187 187 return ctrl->subsys->subnqn; 188 188 } 189 189 190 + static inline void nvmf_complete_timed_out_request(struct request *rq) 191 + { 192 + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { 193 + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; 194 + blk_mq_complete_request(rq); 195 + } 196 + } 197 + 190 198 int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); 191 199 int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); 192 200 int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
+1
drivers/nvme/host/nvme.h
··· 284 284 #endif 285 285 u16 crdt[3]; 286 286 u16 oncs; 287 + u32 dmrsl; 287 288 u16 oacs; 288 289 u16 sqsize; 289 290 u32 max_namespaces;
+4 -1
drivers/nvme/host/pci.c
··· 1439 1439 nvme_init_request(abort_req, &cmd); 1440 1440 1441 1441 abort_req->end_io_data = NULL; 1442 + abort_req->rq_flags |= RQF_QUIET; 1442 1443 blk_execute_rq_nowait(abort_req, false, abort_endio); 1443 1444 1444 1445 /* ··· 1776 1775 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 1777 1776 if (IS_ERR(dev->ctrl.admin_q)) { 1778 1777 blk_mq_free_tag_set(&dev->admin_tagset); 1778 + dev->ctrl.admin_q = NULL; 1779 1779 return -ENOMEM; 1780 1780 } 1781 1781 if (!blk_get_queue(dev->ctrl.admin_q)) { ··· 2488 2486 req->end_io_data = nvmeq; 2489 2487 2490 2488 init_completion(&nvmeq->delete_done); 2489 + req->rq_flags |= RQF_QUIET; 2491 2490 blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ? 2492 2491 nvme_del_cq_end : nvme_del_queue_end); 2493 2492 return 0; ··· 2678 2675 struct pci_dev *pdev = to_pci_dev(dev->dev); 2679 2676 2680 2677 mutex_lock(&dev->shutdown_lock); 2681 - if (pci_is_enabled(pdev)) { 2678 + if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) { 2682 2679 u32 csts = readl(dev->bar + NVME_REG_CSTS); 2683 2680 2684 2681 if (dev->ctrl.state == NVME_CTRL_LIVE ||
+1 -4
drivers/nvme/host/rdma.c
··· 2010 2010 struct nvme_rdma_queue *queue = req->queue; 2011 2011 2012 2012 nvme_rdma_stop_queue(queue); 2013 - if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { 2014 - nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; 2015 - blk_mq_complete_request(rq); 2016 - } 2013 + nvmf_complete_timed_out_request(rq); 2017 2014 } 2018 2015 2019 2016 static enum blk_eh_timer_return
+1 -4
drivers/nvme/host/tcp.c
··· 2318 2318 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; 2319 2319 2320 2320 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); 2321 - if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { 2322 - nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; 2323 - blk_mq_complete_request(rq); 2324 - } 2321 + nvmf_complete_timed_out_request(rq); 2325 2322 } 2326 2323 2327 2324 static enum blk_eh_timer_return
+13 -2
include/linux/nvme.h
··· 204 204 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, 205 205 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, 206 206 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, 207 - NVME_CAP_CSS_NVM = 1 << 0, 208 - NVME_CAP_CSS_CSI = 1 << 6, 207 + }; 208 + 209 + enum { 209 210 NVME_CSTS_RDY = 1 << 0, 210 211 NVME_CSTS_CFS = 1 << 1, 211 212 NVME_CSTS_NSSRO = 1 << 4, ··· 215 214 NVME_CSTS_SHST_OCCUR = 1 << 2, 216 215 NVME_CSTS_SHST_CMPLT = 2 << 2, 217 216 NVME_CSTS_SHST_MASK = 3 << 2, 217 + }; 218 + 219 + enum { 218 220 NVME_CMBMSC_CRE = 1 << 0, 219 221 NVME_CMBMSC_CMSE = 1 << 1, 222 + }; 223 + 224 + enum { 225 + NVME_CAP_CSS_NVM = 1 << 0, 226 + NVME_CAP_CSS_CSI = 1 << 6, 220 227 }; 221 228 222 229 struct nvme_id_power_state { ··· 1688 1679 /* 1689 1680 * Path-related Errors: 1690 1681 */ 1682 + NVME_SC_INTERNAL_PATH_ERROR = 0x300, 1691 1683 NVME_SC_ANA_PERSISTENT_LOSS = 0x301, 1692 1684 NVME_SC_ANA_INACCESSIBLE = 0x302, 1693 1685 NVME_SC_ANA_TRANSITION = 0x303, 1686 + NVME_SC_CTRL_PATH_ERROR = 0x360, 1694 1687 NVME_SC_HOST_PATH_ERROR = 0x370, 1695 1688 NVME_SC_HOST_ABORTED_CMD = 0x371, 1696 1689