Merge branch 'nvme-4.16-rc' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Keith:

"After syncing with Christoph and Sagi, we feel this is a good time to
send our latest fixes across most of the nvme components for 4.16"

* 'nvme-4.16-rc' of git://git.infradead.org/nvme:
nvme-rdma: fix sysfs invoked reset_ctrl error flow
nvmet: Change return code of discard command if not supported
nvme-pci: Fix timeouts in connecting state
nvme-pci: Remap CMB SQ entries on every controller reset
nvme: fix the deadlock in nvme_update_formats
nvme: Don't use a stack buffer for keep-alive command
nvme_fc: cleanup io completion
nvme_fc: correct abort race condition on resets
nvme: Fix discard buffer overrun
nvme: delete NVME_CTRL_LIVE --> NVME_CTRL_CONNECTING transition
nvme-rdma: use NVME_CTRL_CONNECTING state to mark init process
nvme: rename NVME_CTRL_RECONNECTING state to NVME_CTRL_CONNECTING

Changed files
+106 -170
drivers
+27 -18
drivers/nvme/host/core.c
··· 120 int ret; 121 122 ret = nvme_reset_ctrl(ctrl); 123 - if (!ret) 124 flush_work(&ctrl->reset_work); 125 return ret; 126 } 127 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); ··· 269 switch (new_state) { 270 case NVME_CTRL_ADMIN_ONLY: 271 switch (old_state) { 272 - case NVME_CTRL_RECONNECTING: 273 changed = true; 274 /* FALLTHRU */ 275 default: ··· 280 switch (old_state) { 281 case NVME_CTRL_NEW: 282 case NVME_CTRL_RESETTING: 283 - case NVME_CTRL_RECONNECTING: 284 changed = true; 285 /* FALLTHRU */ 286 default: ··· 298 break; 299 } 300 break; 301 - case NVME_CTRL_RECONNECTING: 302 switch (old_state) { 303 - case NVME_CTRL_LIVE: 304 case NVME_CTRL_RESETTING: 305 changed = true; 306 /* FALLTHRU */ ··· 313 case NVME_CTRL_LIVE: 314 case NVME_CTRL_ADMIN_ONLY: 315 case NVME_CTRL_RESETTING: 316 - case NVME_CTRL_RECONNECTING: 317 changed = true; 318 /* FALLTHRU */ 319 default: ··· 522 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 523 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 524 525 - range[n].cattr = cpu_to_le32(0); 526 - range[n].nlb = cpu_to_le32(nlb); 527 - range[n].slba = cpu_to_le64(slba); 528 n++; 529 } 530 ··· 800 801 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 802 { 803 - struct nvme_command c; 804 struct request *rq; 805 806 - memset(&c, 0, sizeof(c)); 807 - c.common.opcode = nvme_admin_keep_alive; 808 - 809 - rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, 810 NVME_QID_ANY); 811 if (IS_ERR(rq)) 812 return PTR_ERR(rq); ··· 834 return; 835 836 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 837 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 838 } 839 EXPORT_SYMBOL_GPL(nvme_start_keep_alive); ··· 1121 1122 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1123 { 1124 - struct nvme_ns *ns; 1125 1126 mutex_lock(&ctrl->namespaces_mutex); 1127 list_for_each_entry(ns, &ctrl->namespaces, list) { 1128 - if (ns->disk && nvme_revalidate_disk(ns->disk)) 1129 - nvme_ns_remove(ns); 1130 } 1131 mutex_unlock(&ctrl->namespaces_mutex); 1132 } 1133 1134 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) ··· 2696 [NVME_CTRL_LIVE] = "live", 2697 [NVME_CTRL_ADMIN_ONLY] = "only-admin", 2698 [NVME_CTRL_RESETTING] = "resetting", 2699 - [NVME_CTRL_RECONNECTING]= "reconnecting", 2700 [NVME_CTRL_DELETING] = "deleting", 2701 [NVME_CTRL_DEAD] = "dead", 2702 };
··· 120 int ret; 121 122 ret = nvme_reset_ctrl(ctrl); 123 + if (!ret) { 124 flush_work(&ctrl->reset_work); 125 + if (ctrl->state != NVME_CTRL_LIVE) 126 + ret = -ENETRESET; 127 + } 128 + 129 return ret; 130 } 131 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); ··· 265 switch (new_state) { 266 case NVME_CTRL_ADMIN_ONLY: 267 switch (old_state) { 268 + case NVME_CTRL_CONNECTING: 269 changed = true; 270 /* FALLTHRU */ 271 default: ··· 276 switch (old_state) { 277 case NVME_CTRL_NEW: 278 case NVME_CTRL_RESETTING: 279 + case NVME_CTRL_CONNECTING: 280 changed = true; 281 /* FALLTHRU */ 282 default: ··· 294 break; 295 } 296 break; 297 + case NVME_CTRL_CONNECTING: 298 switch (old_state) { 299 + case NVME_CTRL_NEW: 300 case NVME_CTRL_RESETTING: 301 changed = true; 302 /* FALLTHRU */ ··· 309 case NVME_CTRL_LIVE: 310 case NVME_CTRL_ADMIN_ONLY: 311 case NVME_CTRL_RESETTING: 312 + case NVME_CTRL_CONNECTING: 313 changed = true; 314 /* FALLTHRU */ 315 default: ··· 518 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 519 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 520 521 + if (n < segments) { 522 + range[n].cattr = cpu_to_le32(0); 523 + range[n].nlb = cpu_to_le32(nlb); 524 + range[n].slba = cpu_to_le64(slba); 525 + } 526 n++; 527 } 528 ··· 794 795 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 796 { 797 struct request *rq; 798 799 + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, 800 NVME_QID_ANY); 801 if (IS_ERR(rq)) 802 return PTR_ERR(rq); ··· 832 return; 833 834 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 835 + memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 836 + ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 837 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 838 } 839 EXPORT_SYMBOL_GPL(nvme_start_keep_alive); ··· 1117 1118 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1119 { 1120 + struct nvme_ns *ns, *next; 1121 + LIST_HEAD(rm_list); 1122 1123 mutex_lock(&ctrl->namespaces_mutex); 1124 list_for_each_entry(ns, &ctrl->namespaces, list) { 1125 + if (ns->disk && nvme_revalidate_disk(ns->disk)) { 1126 + list_move_tail(&ns->list, &rm_list); 1127 + } 1128 } 1129 mutex_unlock(&ctrl->namespaces_mutex); 1130 + 1131 + list_for_each_entry_safe(ns, next, &rm_list, list) 1132 + nvme_ns_remove(ns); 1133 } 1134 1135 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) ··· 2687 [NVME_CTRL_LIVE] = "live", 2688 [NVME_CTRL_ADMIN_ONLY] = "only-admin", 2689 [NVME_CTRL_RESETTING] = "resetting", 2690 + [NVME_CTRL_CONNECTING] = "connecting", 2691 [NVME_CTRL_DELETING] = "deleting", 2692 [NVME_CTRL_DEAD] = "dead", 2693 };
+5 -4
drivers/nvme/host/fabrics.h
··· 171 cmd->common.opcode != nvme_fabrics_command || 172 cmd->fabrics.fctype != nvme_fabrics_type_connect) { 173 /* 174 - * Reconnecting state means transport disruption, which can take 175 - * a long time and even might fail permanently, fail fast to 176 - * give upper layers a chance to failover. 177 * Deleting state means that the ctrl will never accept commands 178 * again, fail it permanently. 179 */ 180 - if (ctrl->state == NVME_CTRL_RECONNECTING || 181 ctrl->state == NVME_CTRL_DELETING) { 182 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 183 return BLK_STS_IOERR;
··· 171 cmd->common.opcode != nvme_fabrics_command || 172 cmd->fabrics.fctype != nvme_fabrics_type_connect) { 173 /* 174 + * Connecting state means transport disruption or initial 175 + * establishment, which can take a long time and even might 176 + * fail permanently, fail fast to give upper layers a chance 177 + * to failover. 178 * Deleting state means that the ctrl will never accept commands 179 * again, fail it permanently. 180 */ 181 + if (ctrl->state == NVME_CTRL_CONNECTING || 182 ctrl->state == NVME_CTRL_DELETING) { 183 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 184 return BLK_STS_IOERR;
+36 -121
drivers/nvme/host/fc.c
··· 55 56 enum nvme_fcop_flags { 57 FCOP_FLAGS_TERMIO = (1 << 0), 58 - FCOP_FLAGS_RELEASED = (1 << 1), 59 - FCOP_FLAGS_COMPLETE = (1 << 2), 60 - FCOP_FLAGS_AEN = (1 << 3), 61 }; 62 63 struct nvmefc_ls_req_op { ··· 530 { 531 switch (ctrl->ctrl.state) { 532 case NVME_CTRL_NEW: 533 - case NVME_CTRL_RECONNECTING: 534 /* 535 * As all reconnects were suppressed, schedule a 536 * connect. ··· 775 } 776 break; 777 778 - case NVME_CTRL_RECONNECTING: 779 /* 780 * The association has already been terminated and the 781 * controller is attempting reconnects. No need to do anything ··· 1468 1469 /* *********************** NVME Ctrl Routines **************************** */ 1470 1471 - static void __nvme_fc_final_op_cleanup(struct request *rq); 1472 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1473 1474 static int ··· 1509 static int 1510 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1511 { 1512 - int state; 1513 1514 - state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1515 - if (state != FCPOP_STATE_ACTIVE) { 1516 - atomic_set(&op->state, state); 1517 return -ECANCELED; 1518 - } 1519 1520 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1521 &ctrl->rport->remoteport, ··· 1535 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1536 { 1537 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1538 - unsigned long flags; 1539 - int i, ret; 1540 1541 - for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 1542 - if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE) 1543 - continue; 1544 - 1545 - spin_lock_irqsave(&ctrl->lock, flags); 1546 - if (ctrl->flags & FCCTRL_TERMIO) { 1547 - ctrl->iocnt++; 1548 - aen_op->flags |= FCOP_FLAGS_TERMIO; 1549 - } 1550 - spin_unlock_irqrestore(&ctrl->lock, flags); 1551 - 1552 - ret = __nvme_fc_abort_op(ctrl, aen_op); 1553 - if (ret) { 1554 - /* 1555 - * if __nvme_fc_abort_op failed the io wasn't 1556 - * active. Thus this call path is running in 1557 - * parallel to the io complete. Treat as non-error. 1558 - */ 1559 - 1560 - /* back out the flags/counters */ 1561 - spin_lock_irqsave(&ctrl->lock, flags); 1562 - if (ctrl->flags & FCCTRL_TERMIO) 1563 - ctrl->iocnt--; 1564 - aen_op->flags &= ~FCOP_FLAGS_TERMIO; 1565 - spin_unlock_irqrestore(&ctrl->lock, flags); 1566 - return; 1567 - } 1568 - } 1569 } 1570 1571 - static inline int 1572 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1573 - struct nvme_fc_fcp_op *op) 1574 { 1575 unsigned long flags; 1576 - bool complete_rq = false; 1577 1578 - spin_lock_irqsave(&ctrl->lock, flags); 1579 - if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { 1580 if (ctrl->flags & FCCTRL_TERMIO) { 1581 if (!--ctrl->iocnt) 1582 wake_up(&ctrl->ioabort_wait); 1583 } 1584 } 1585 - if (op->flags & FCOP_FLAGS_RELEASED) 1586 - complete_rq = true; 1587 - else 1588 - op->flags |= FCOP_FLAGS_COMPLETE; 1589 - spin_unlock_irqrestore(&ctrl->lock, flags); 1590 - 1591 - return complete_rq; 1592 } 1593 1594 static void ··· 1570 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1571 union nvme_result result; 1572 bool terminate_assoc = true; 1573 1574 /* 1575 * WARNING: ··· 1609 * association to be terminated. 1610 */ 1611 1612 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1613 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1614 1615 - if (atomic_read(&op->state) == FCPOP_STATE_ABORTED || 1616 - op->flags & FCOP_FLAGS_TERMIO) 1617 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1618 else if (freq->status) 1619 status = cpu_to_le16(NVME_SC_INTERNAL << 1); ··· 1679 done: 1680 if (op->flags & FCOP_FLAGS_AEN) { 1681 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1682 - __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1683 atomic_set(&op->state, FCPOP_STATE_IDLE); 1684 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1685 nvme_fc_ctrl_put(ctrl); ··· 1693 if (status && 1694 (blk_queue_dying(rq->q) || 1695 ctrl->ctrl.state == NVME_CTRL_NEW || 1696 - ctrl->ctrl.state == NVME_CTRL_RECONNECTING)) 1697 status |= cpu_to_le16(NVME_SC_DNR << 1); 1698 1699 - if (__nvme_fc_fcpop_chk_teardowns(ctrl, op)) 1700 - __nvme_fc_final_op_cleanup(rq); 1701 - else 1702 - nvme_end_request(rq, status, result); 1703 1704 check_error: 1705 if (terminate_assoc) ··· 2384 } 2385 2386 static void 2387 - __nvme_fc_final_op_cleanup(struct request *rq) 2388 { 2389 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2390 struct nvme_fc_ctrl *ctrl = op->ctrl; 2391 2392 atomic_set(&op->state, FCPOP_STATE_IDLE); 2393 - op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED | 2394 - FCOP_FLAGS_COMPLETE); 2395 2396 nvme_fc_unmap_data(ctrl, rq, op); 2397 nvme_complete_rq(rq); 2398 nvme_fc_ctrl_put(ctrl); 2399 - 2400 - } 2401 - 2402 - static void 2403 - nvme_fc_complete_rq(struct request *rq) 2404 - { 2405 - struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2406 - struct nvme_fc_ctrl *ctrl = op->ctrl; 2407 - unsigned long flags; 2408 - bool completed = false; 2409 - 2410 - /* 2411 - * the core layer, on controller resets after calling 2412 - * nvme_shutdown_ctrl(), calls complete_rq without our 2413 - * calling blk_mq_complete_request(), thus there may still 2414 - * be live i/o outstanding with the LLDD. Means transport has 2415 - * to track complete calls vs fcpio_done calls to know what 2416 - * path to take on completes and dones. 2417 - */ 2418 - spin_lock_irqsave(&ctrl->lock, flags); 2419 - if (op->flags & FCOP_FLAGS_COMPLETE) 2420 - completed = true; 2421 - else 2422 - op->flags |= FCOP_FLAGS_RELEASED; 2423 - spin_unlock_irqrestore(&ctrl->lock, flags); 2424 - 2425 - if (completed) 2426 - __nvme_fc_final_op_cleanup(rq); 2427 } 2428 2429 /* ··· 2415 struct nvme_ctrl *nctrl = data; 2416 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2417 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2418 - unsigned long flags; 2419 - int status; 2420 2421 if (!blk_mq_request_started(req)) 2422 return; 2423 2424 - spin_lock_irqsave(&ctrl->lock, flags); 2425 - if (ctrl->flags & FCCTRL_TERMIO) { 2426 - ctrl->iocnt++; 2427 - op->flags |= FCOP_FLAGS_TERMIO; 2428 - } 2429 - spin_unlock_irqrestore(&ctrl->lock, flags); 2430 - 2431 - status = __nvme_fc_abort_op(ctrl, op); 2432 - if (status) { 2433 - /* 2434 - * if __nvme_fc_abort_op failed the io wasn't 2435 - * active. Thus this call path is running in 2436 - * parallel to the io complete. Treat as non-error. 2437 - */ 2438 - 2439 - /* back out the flags/counters */ 2440 - spin_lock_irqsave(&ctrl->lock, flags); 2441 - if (ctrl->flags & FCCTRL_TERMIO) 2442 - ctrl->iocnt--; 2443 - op->flags &= ~FCOP_FLAGS_TERMIO; 2444 - spin_unlock_irqrestore(&ctrl->lock, flags); 2445 - return; 2446 - } 2447 } 2448 2449 ··· 2858 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 2859 bool recon = true; 2860 2861 - if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) 2862 return; 2863 2864 if (portptr->port_state == FC_OBJSTATE_ONLINE) ··· 2906 /* will block will waiting for io to terminate */ 2907 nvme_fc_delete_association(ctrl); 2908 2909 - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 2910 dev_err(ctrl->ctrl.device, 2911 "NVME-FC{%d}: error_recovery: Couldn't change state " 2912 - "to RECONNECTING\n", ctrl->cnum); 2913 return; 2914 } 2915 ··· 3110 * transport errors (frame drop, LS failure) inherently must kill 3111 * the association. The transport is coded so that any command used 3112 * to create the association (prior to a LIVE state transition 3113 - * while NEW or RECONNECTING) will fail if it completes in error or 3114 * times out. 3115 * 3116 * As such: as the connect request was mostly likely due to a
··· 55 56 enum nvme_fcop_flags { 57 FCOP_FLAGS_TERMIO = (1 << 0), 58 + FCOP_FLAGS_AEN = (1 << 1), 59 }; 60 61 struct nvmefc_ls_req_op { ··· 532 { 533 switch (ctrl->ctrl.state) { 534 case NVME_CTRL_NEW: 535 + case NVME_CTRL_CONNECTING: 536 /* 537 * As all reconnects were suppressed, schedule a 538 * connect. ··· 777 } 778 break; 779 780 + case NVME_CTRL_CONNECTING: 781 /* 782 * The association has already been terminated and the 783 * controller is attempting reconnects. No need to do anything ··· 1470 1471 /* *********************** NVME Ctrl Routines **************************** */ 1472 1473 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1474 1475 static int ··· 1512 static int 1513 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1514 { 1515 + unsigned long flags; 1516 + int opstate; 1517 1518 + spin_lock_irqsave(&ctrl->lock, flags); 1519 + opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1520 + if (opstate != FCPOP_STATE_ACTIVE) 1521 + atomic_set(&op->state, opstate); 1522 + else if (ctrl->flags & FCCTRL_TERMIO) 1523 + ctrl->iocnt++; 1524 + spin_unlock_irqrestore(&ctrl->lock, flags); 1525 + 1526 + if (opstate != FCPOP_STATE_ACTIVE) 1527 return -ECANCELED; 1528 1529 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1530 &ctrl->rport->remoteport, ··· 1532 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1533 { 1534 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1535 + int i; 1536 1537 + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1538 + __nvme_fc_abort_op(ctrl, aen_op); 1539 } 1540 1541 + static inline void 1542 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1543 + struct nvme_fc_fcp_op *op, int opstate) 1544 { 1545 unsigned long flags; 1546 1547 + if (opstate == FCPOP_STATE_ABORTED) { 1548 + spin_lock_irqsave(&ctrl->lock, flags); 1549 if (ctrl->flags & FCCTRL_TERMIO) { 1550 if (!--ctrl->iocnt) 1551 wake_up(&ctrl->ioabort_wait); 1552 } 1553 + spin_unlock_irqrestore(&ctrl->lock, flags); 1554 } 1555 } 1556 1557 static void ··· 1601 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1602 union nvme_result result; 1603 bool terminate_assoc = true; 1604 + int opstate; 1605 1606 /* 1607 * WARNING: ··· 1639 * association to be terminated. 1640 */ 1641 1642 + opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 1643 + 1644 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1645 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1646 1647 + if (opstate == FCPOP_STATE_ABORTED) 1648 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1649 else if (freq->status) 1650 status = cpu_to_le16(NVME_SC_INTERNAL << 1); ··· 1708 done: 1709 if (op->flags & FCOP_FLAGS_AEN) { 1710 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1711 + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 1712 atomic_set(&op->state, FCPOP_STATE_IDLE); 1713 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1714 nvme_fc_ctrl_put(ctrl); ··· 1722 if (status && 1723 (blk_queue_dying(rq->q) || 1724 ctrl->ctrl.state == NVME_CTRL_NEW || 1725 + ctrl->ctrl.state == NVME_CTRL_CONNECTING)) 1726 status |= cpu_to_le16(NVME_SC_DNR << 1); 1727 1728 + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 1729 + nvme_end_request(rq, status, result); 1730 1731 check_error: 1732 if (terminate_assoc) ··· 2415 } 2416 2417 static void 2418 + nvme_fc_complete_rq(struct request *rq) 2419 { 2420 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2421 struct nvme_fc_ctrl *ctrl = op->ctrl; 2422 2423 atomic_set(&op->state, FCPOP_STATE_IDLE); 2424 2425 nvme_fc_unmap_data(ctrl, rq, op); 2426 nvme_complete_rq(rq); 2427 nvme_fc_ctrl_put(ctrl); 2428 } 2429 2430 /* ··· 2476 struct nvme_ctrl *nctrl = data; 2477 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2478 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2479 2480 if (!blk_mq_request_started(req)) 2481 return; 2482 2483 + __nvme_fc_abort_op(ctrl, op); 2484 } 2485 2486 ··· 2943 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 2944 bool recon = true; 2945 2946 + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 2947 return; 2948 2949 if (portptr->port_state == FC_OBJSTATE_ONLINE) ··· 2991 /* will block will waiting for io to terminate */ 2992 nvme_fc_delete_association(ctrl); 2993 2994 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 2995 dev_err(ctrl->ctrl.device, 2996 "NVME-FC{%d}: error_recovery: Couldn't change state " 2997 + "to CONNECTING\n", ctrl->cnum); 2998 return; 2999 } 3000 ··· 3195 * transport errors (frame drop, LS failure) inherently must kill 3196 * the association. The transport is coded so that any command used 3197 * to create the association (prior to a LIVE state transition 3198 + * while NEW or CONNECTING) will fail if it completes in error or 3199 * times out. 3200 * 3201 * As such: as the connect request was mostly likely due to a
+2 -1
drivers/nvme/host/nvme.h
··· 123 NVME_CTRL_LIVE, 124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 125 NVME_CTRL_RESETTING, 126 - NVME_CTRL_RECONNECTING, 127 NVME_CTRL_DELETING, 128 NVME_CTRL_DEAD, 129 }; ··· 183 struct work_struct scan_work; 184 struct work_struct async_event_work; 185 struct delayed_work ka_work; 186 struct work_struct fw_act_work; 187 188 /* Power saving configuration */
··· 123 NVME_CTRL_LIVE, 124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 125 NVME_CTRL_RESETTING, 126 + NVME_CTRL_CONNECTING, 127 NVME_CTRL_DELETING, 128 NVME_CTRL_DEAD, 129 }; ··· 183 struct work_struct scan_work; 184 struct work_struct async_event_work; 185 struct delayed_work ka_work; 186 + struct nvme_command ka_cmd; 187 struct work_struct fw_act_work; 188 189 /* Power saving configuration */
+23 -16
drivers/nvme/host/pci.c
··· 1141 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1142 switch (dev->ctrl.state) { 1143 case NVME_CTRL_RESETTING: 1144 - case NVME_CTRL_RECONNECTING: 1145 return false; 1146 default: 1147 break; ··· 1215 * cancellation error. All outstanding requests are completed on 1216 * shutdown, so we return BLK_EH_HANDLED. 1217 */ 1218 - if (dev->ctrl.state == NVME_CTRL_RESETTING) { 1219 dev_warn(dev->ctrl.device, 1220 "I/O %d QID %d timeout, disable controller\n", 1221 req->tag, nvmeq->qid); 1222 nvme_dev_disable(dev, false); 1223 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1224 return BLK_EH_HANDLED; 1225 } 1226 1227 /* ··· 1368 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1369 int qid, int depth) 1370 { 1371 - if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1372 - unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1373 - dev->ctrl.page_size); 1374 - nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; 1375 - nvmeq->sq_cmds_io = dev->cmb + offset; 1376 - } else { 1377 - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1378 - &nvmeq->sq_dma_addr, GFP_KERNEL); 1379 - if (!nvmeq->sq_cmds) 1380 - return -ENOMEM; 1381 - } 1382 1383 return 0; 1384 } 1385 ··· 1448 { 1449 struct nvme_dev *dev = nvmeq->dev; 1450 int result; 1451 1452 nvmeq->cq_vector = qid - 1; 1453 result = adapter_alloc_cq(dev, qid, nvmeq); ··· 2295 nvme_dev_disable(dev, false); 2296 2297 /* 2298 - * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the 2299 * initializing procedure here. 2300 */ 2301 - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { 2302 dev_warn(dev->ctrl.device, 2303 - "failed to mark controller RECONNECTING\n"); 2304 goto out; 2305 } 2306
··· 1141 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1142 switch (dev->ctrl.state) { 1143 case NVME_CTRL_RESETTING: 1144 + case NVME_CTRL_CONNECTING: 1145 return false; 1146 default: 1147 break; ··· 1215 * cancellation error. All outstanding requests are completed on 1216 * shutdown, so we return BLK_EH_HANDLED. 1217 */ 1218 + switch (dev->ctrl.state) { 1219 + case NVME_CTRL_CONNECTING: 1220 + case NVME_CTRL_RESETTING: 1221 dev_warn(dev->ctrl.device, 1222 "I/O %d QID %d timeout, disable controller\n", 1223 req->tag, nvmeq->qid); 1224 nvme_dev_disable(dev, false); 1225 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1226 return BLK_EH_HANDLED; 1227 + default: 1228 + break; 1229 } 1230 1231 /* ··· 1364 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1365 int qid, int depth) 1366 { 1367 + /* CMB SQEs will be mapped before creation */ 1368 + if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) 1369 + return 0; 1370 1371 + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1372 + &nvmeq->sq_dma_addr, GFP_KERNEL); 1373 + if (!nvmeq->sq_cmds) 1374 + return -ENOMEM; 1375 return 0; 1376 } 1377 ··· 1448 { 1449 struct nvme_dev *dev = nvmeq->dev; 1450 int result; 1451 + 1452 + if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1453 + unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), 1454 + dev->ctrl.page_size); 1455 + nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; 1456 + nvmeq->sq_cmds_io = dev->cmb + offset; 1457 + } 1458 1459 nvmeq->cq_vector = qid - 1; 1460 result = adapter_alloc_cq(dev, qid, nvmeq); ··· 2288 nvme_dev_disable(dev, false); 2289 2290 /* 2291 + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2292 * initializing procedure here. 2293 */ 2294 + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2295 dev_warn(dev->ctrl.device, 2296 + "failed to mark controller CONNECTING\n"); 2297 goto out; 2298 } 2299
+8 -8
drivers/nvme/host/rdma.c
··· 887 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 888 { 889 /* If we are resetting/deleting then do nothing */ 890 - if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { 891 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 892 ctrl->ctrl.state == NVME_CTRL_LIVE); 893 return; ··· 973 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 974 nvme_start_queues(&ctrl->ctrl); 975 976 - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 977 /* state change failure should never happen */ 978 WARN_ON_ONCE(1); 979 return; ··· 1756 nvme_stop_ctrl(&ctrl->ctrl); 1757 nvme_rdma_shutdown_ctrl(ctrl, false); 1758 1759 - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1760 /* state change failure should never happen */ 1761 WARN_ON_ONCE(1); 1762 return; ··· 1784 return; 1785 1786 out_fail: 1787 - dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 1788 - nvme_remove_namespaces(&ctrl->ctrl); 1789 - nvme_rdma_shutdown_ctrl(ctrl, true); 1790 - nvme_uninit_ctrl(&ctrl->ctrl); 1791 - nvme_put_ctrl(&ctrl->ctrl); 1792 } 1793 1794 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { ··· 1938 GFP_KERNEL); 1939 if (!ctrl->queues) 1940 goto out_uninit_ctrl; 1941 1942 ret = nvme_rdma_configure_admin_queue(ctrl, true); 1943 if (ret)
··· 887 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 888 { 889 /* If we are resetting/deleting then do nothing */ 890 + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { 891 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 892 ctrl->ctrl.state == NVME_CTRL_LIVE); 893 return; ··· 973 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 974 nvme_start_queues(&ctrl->ctrl); 975 976 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 977 /* state change failure should never happen */ 978 WARN_ON_ONCE(1); 979 return; ··· 1756 nvme_stop_ctrl(&ctrl->ctrl); 1757 nvme_rdma_shutdown_ctrl(ctrl, false); 1758 1759 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 1760 /* state change failure should never happen */ 1761 WARN_ON_ONCE(1); 1762 return; ··· 1784 return; 1785 1786 out_fail: 1787 + ++ctrl->ctrl.nr_reconnects; 1788 + nvme_rdma_reconnect_or_remove(ctrl); 1789 } 1790 1791 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { ··· 1941 GFP_KERNEL); 1942 if (!ctrl->queues) 1943 goto out_uninit_ctrl; 1944 + 1945 + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); 1946 + WARN_ON_ONCE(!changed); 1947 1948 ret = nvme_rdma_configure_admin_queue(ctrl, true); 1949 if (ret)
+5 -2
drivers/nvme/target/io-cmd.c
··· 105 static u16 nvmet_discard_range(struct nvmet_ns *ns, 106 struct nvme_dsm_range *range, struct bio **bio) 107 { 108 - if (__blkdev_issue_discard(ns->bdev, 109 le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 110 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 111 - GFP_KERNEL, 0, bio)) 112 return NVME_SC_INTERNAL | NVME_SC_DNR; 113 return 0; 114 }
··· 105 static u16 nvmet_discard_range(struct nvmet_ns *ns, 106 struct nvme_dsm_range *range, struct bio **bio) 107 { 108 + int ret; 109 + 110 + ret = __blkdev_issue_discard(ns->bdev, 111 le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 112 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 113 + GFP_KERNEL, 0, bio); 114 + if (ret && ret != -EOPNOTSUPP) 115 return NVME_SC_INTERNAL | NVME_SC_DNR; 116 return 0; 117 }