Merge branch 'nvme-4.16-rc' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Keith:

"After syncing with Christoph and Sagi, we feel this is a good time to
send our latest fixes across most of the nvme components for 4.16"

* 'nvme-4.16-rc' of git://git.infradead.org/nvme:
nvme-rdma: fix sysfs invoked reset_ctrl error flow
nvmet: Change return code of discard command if not supported
nvme-pci: Fix timeouts in connecting state
nvme-pci: Remap CMB SQ entries on every controller reset
nvme: fix the deadlock in nvme_update_formats
nvme: Don't use a stack buffer for keep-alive command
nvme_fc: cleanup io completion
nvme_fc: correct abort race condition on resets
nvme: Fix discard buffer overrun
nvme: delete NVME_CTRL_LIVE --> NVME_CTRL_CONNECTING transition
nvme-rdma: use NVME_CTRL_CONNECTING state to mark init process
nvme: rename NVME_CTRL_RECONNECTING state to NVME_CTRL_CONNECTING

Changed files
+106 -170
drivers
+27 -18
drivers/nvme/host/core.c
··· 120 120 int ret; 121 121 122 122 ret = nvme_reset_ctrl(ctrl); 123 - if (!ret) 123 + if (!ret) { 124 124 flush_work(&ctrl->reset_work); 125 + if (ctrl->state != NVME_CTRL_LIVE) 126 + ret = -ENETRESET; 127 + } 128 + 125 129 return ret; 126 130 } 127 131 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); ··· 269 265 switch (new_state) { 270 266 case NVME_CTRL_ADMIN_ONLY: 271 267 switch (old_state) { 272 - case NVME_CTRL_RECONNECTING: 268 + case NVME_CTRL_CONNECTING: 273 269 changed = true; 274 270 /* FALLTHRU */ 275 271 default: ··· 280 276 switch (old_state) { 281 277 case NVME_CTRL_NEW: 282 278 case NVME_CTRL_RESETTING: 283 - case NVME_CTRL_RECONNECTING: 279 + case NVME_CTRL_CONNECTING: 284 280 changed = true; 285 281 /* FALLTHRU */ 286 282 default: ··· 298 294 break; 299 295 } 300 296 break; 301 - case NVME_CTRL_RECONNECTING: 297 + case NVME_CTRL_CONNECTING: 302 298 switch (old_state) { 303 - case NVME_CTRL_LIVE: 299 + case NVME_CTRL_NEW: 304 300 case NVME_CTRL_RESETTING: 305 301 changed = true; 306 302 /* FALLTHRU */ ··· 313 309 case NVME_CTRL_LIVE: 314 310 case NVME_CTRL_ADMIN_ONLY: 315 311 case NVME_CTRL_RESETTING: 316 - case NVME_CTRL_RECONNECTING: 312 + case NVME_CTRL_CONNECTING: 317 313 changed = true; 318 314 /* FALLTHRU */ 319 315 default: ··· 522 518 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 523 519 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 524 520 525 - range[n].cattr = cpu_to_le32(0); 526 - range[n].nlb = cpu_to_le32(nlb); 527 - range[n].slba = cpu_to_le64(slba); 521 + if (n < segments) { 522 + range[n].cattr = cpu_to_le32(0); 523 + range[n].nlb = cpu_to_le32(nlb); 524 + range[n].slba = cpu_to_le64(slba); 525 + } 528 526 n++; 529 527 } 530 528 ··· 800 794 801 795 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 802 796 { 803 - struct nvme_command c; 804 797 struct request *rq; 805 798 806 - memset(&c, 0, sizeof(c)); 807 - c.common.opcode = nvme_admin_keep_alive; 808 - 809 - rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, 799 + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, 810 800 NVME_QID_ANY); 811 801 if (IS_ERR(rq)) 812 802 return PTR_ERR(rq); ··· 834 832 return; 835 833 836 834 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 835 + memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 836 + ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 837 837 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 838 838 } 839 839 EXPORT_SYMBOL_GPL(nvme_start_keep_alive); ··· 1121 1117 1122 1118 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1123 1119 { 1124 - struct nvme_ns *ns; 1120 + struct nvme_ns *ns, *next; 1121 + LIST_HEAD(rm_list); 1125 1122 1126 1123 mutex_lock(&ctrl->namespaces_mutex); 1127 1124 list_for_each_entry(ns, &ctrl->namespaces, list) { 1128 - if (ns->disk && nvme_revalidate_disk(ns->disk)) 1129 - nvme_ns_remove(ns); 1125 + if (ns->disk && nvme_revalidate_disk(ns->disk)) { 1126 + list_move_tail(&ns->list, &rm_list); 1127 + } 1130 1128 } 1131 1129 mutex_unlock(&ctrl->namespaces_mutex); 1130 + 1131 + list_for_each_entry_safe(ns, next, &rm_list, list) 1132 + nvme_ns_remove(ns); 1132 1133 } 1133 1134 1134 1135 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) ··· 2696 2687 [NVME_CTRL_LIVE] = "live", 2697 2688 [NVME_CTRL_ADMIN_ONLY] = "only-admin", 2698 2689 [NVME_CTRL_RESETTING] = "resetting", 2699 - [NVME_CTRL_RECONNECTING]= "reconnecting", 2690 + [NVME_CTRL_CONNECTING] = "connecting", 2700 2691 [NVME_CTRL_DELETING] = "deleting", 2701 2692 [NVME_CTRL_DEAD] = "dead", 2702 2693 };
+5 -4
drivers/nvme/host/fabrics.h
··· 171 171 cmd->common.opcode != nvme_fabrics_command || 172 172 cmd->fabrics.fctype != nvme_fabrics_type_connect) { 173 173 /* 174 - * Reconnecting state means transport disruption, which can take 175 - * a long time and even might fail permanently, fail fast to 176 - * give upper layers a chance to failover. 174 + * Connecting state means transport disruption or initial 175 + * establishment, which can take a long time and even might 176 + * fail permanently, fail fast to give upper layers a chance 177 + * to failover. 177 178 * Deleting state means that the ctrl will never accept commands 178 179 * again, fail it permanently. 179 180 */ 180 - if (ctrl->state == NVME_CTRL_RECONNECTING || 181 + if (ctrl->state == NVME_CTRL_CONNECTING || 181 182 ctrl->state == NVME_CTRL_DELETING) { 182 183 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 183 184 return BLK_STS_IOERR;
+36 -121
drivers/nvme/host/fc.c
··· 55 55 56 56 enum nvme_fcop_flags { 57 57 FCOP_FLAGS_TERMIO = (1 << 0), 58 - FCOP_FLAGS_RELEASED = (1 << 1), 59 - FCOP_FLAGS_COMPLETE = (1 << 2), 60 - FCOP_FLAGS_AEN = (1 << 3), 58 + FCOP_FLAGS_AEN = (1 << 1), 61 59 }; 62 60 63 61 struct nvmefc_ls_req_op { ··· 530 532 { 531 533 switch (ctrl->ctrl.state) { 532 534 case NVME_CTRL_NEW: 533 - case NVME_CTRL_RECONNECTING: 535 + case NVME_CTRL_CONNECTING: 534 536 /* 535 537 * As all reconnects were suppressed, schedule a 536 538 * connect. ··· 775 777 } 776 778 break; 777 779 778 - case NVME_CTRL_RECONNECTING: 780 + case NVME_CTRL_CONNECTING: 779 781 /* 780 782 * The association has already been terminated and the 781 783 * controller is attempting reconnects. No need to do anything ··· 1468 1470 1469 1471 /* *********************** NVME Ctrl Routines **************************** */ 1470 1472 1471 - static void __nvme_fc_final_op_cleanup(struct request *rq); 1472 1473 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1473 1474 1474 1475 static int ··· 1509 1512 static int 1510 1513 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1511 1514 { 1512 - int state; 1515 + unsigned long flags; 1516 + int opstate; 1513 1517 1514 - state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1515 - if (state != FCPOP_STATE_ACTIVE) { 1516 - atomic_set(&op->state, state); 1518 + spin_lock_irqsave(&ctrl->lock, flags); 1519 + opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1520 + if (opstate != FCPOP_STATE_ACTIVE) 1521 + atomic_set(&op->state, opstate); 1522 + else if (ctrl->flags & FCCTRL_TERMIO) 1523 + ctrl->iocnt++; 1524 + spin_unlock_irqrestore(&ctrl->lock, flags); 1525 + 1526 + if (opstate != FCPOP_STATE_ACTIVE) 1517 1527 return -ECANCELED; 1518 - } 1519 1528 1520 1529 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1521 1530 &ctrl->rport->remoteport, ··· 1535 1532 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1536 1533 { 1537 1534 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1538 - unsigned long flags; 1539 - int i, ret; 1535 + int i; 1540 1536 1541 - for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 1542 - if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE) 1543 - continue; 1544 - 1545 - spin_lock_irqsave(&ctrl->lock, flags); 1546 - if (ctrl->flags & FCCTRL_TERMIO) { 1547 - ctrl->iocnt++; 1548 - aen_op->flags |= FCOP_FLAGS_TERMIO; 1549 - } 1550 - spin_unlock_irqrestore(&ctrl->lock, flags); 1551 - 1552 - ret = __nvme_fc_abort_op(ctrl, aen_op); 1553 - if (ret) { 1554 - /* 1555 - * if __nvme_fc_abort_op failed the io wasn't 1556 - * active. Thus this call path is running in 1557 - * parallel to the io complete. Treat as non-error. 1558 - */ 1559 - 1560 - /* back out the flags/counters */ 1561 - spin_lock_irqsave(&ctrl->lock, flags); 1562 - if (ctrl->flags & FCCTRL_TERMIO) 1563 - ctrl->iocnt--; 1564 - aen_op->flags &= ~FCOP_FLAGS_TERMIO; 1565 - spin_unlock_irqrestore(&ctrl->lock, flags); 1566 - return; 1567 - } 1568 - } 1537 + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1538 + __nvme_fc_abort_op(ctrl, aen_op); 1569 1539 } 1570 1540 1571 - static inline int 1541 + static inline void 1572 1542 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1573 - struct nvme_fc_fcp_op *op) 1543 + struct nvme_fc_fcp_op *op, int opstate) 1574 1544 { 1575 1545 unsigned long flags; 1576 - bool complete_rq = false; 1577 1546 1578 - spin_lock_irqsave(&ctrl->lock, flags); 1579 - if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { 1547 + if (opstate == FCPOP_STATE_ABORTED) { 1548 + spin_lock_irqsave(&ctrl->lock, flags); 1580 1549 if (ctrl->flags & FCCTRL_TERMIO) { 1581 1550 if (!--ctrl->iocnt) 1582 1551 wake_up(&ctrl->ioabort_wait); 1583 1552 } 1553 + spin_unlock_irqrestore(&ctrl->lock, flags); 1584 1554 } 1585 - if (op->flags & FCOP_FLAGS_RELEASED) 1586 - complete_rq = true; 1587 - else 1588 - op->flags |= FCOP_FLAGS_COMPLETE; 1589 - spin_unlock_irqrestore(&ctrl->lock, flags); 1590 - 1591 - return complete_rq; 1592 1555 } 1593 1556 1594 1557 static void ··· 1570 1601 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1571 1602 union nvme_result result; 1572 1603 bool terminate_assoc = true; 1604 + int opstate; 1573 1605 1574 1606 /* 1575 1607 * WARNING: ··· 1609 1639 * association to be terminated. 1610 1640 */ 1611 1641 1642 + opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 1643 + 1612 1644 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1613 1645 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1614 1646 1615 - if (atomic_read(&op->state) == FCPOP_STATE_ABORTED || 1616 - op->flags & FCOP_FLAGS_TERMIO) 1647 + if (opstate == FCPOP_STATE_ABORTED) 1617 1648 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1618 1649 else if (freq->status) 1619 1650 status = cpu_to_le16(NVME_SC_INTERNAL << 1); ··· 1679 1708 done: 1680 1709 if (op->flags & FCOP_FLAGS_AEN) { 1681 1710 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1682 - __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1711 + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 1683 1712 atomic_set(&op->state, FCPOP_STATE_IDLE); 1684 1713 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1685 1714 nvme_fc_ctrl_put(ctrl); ··· 1693 1722 if (status && 1694 1723 (blk_queue_dying(rq->q) || 1695 1724 ctrl->ctrl.state == NVME_CTRL_NEW || 1696 - ctrl->ctrl.state == NVME_CTRL_RECONNECTING)) 1725 + ctrl->ctrl.state == NVME_CTRL_CONNECTING)) 1697 1726 status |= cpu_to_le16(NVME_SC_DNR << 1); 1698 1727 1699 - if (__nvme_fc_fcpop_chk_teardowns(ctrl, op)) 1700 - __nvme_fc_final_op_cleanup(rq); 1701 - else 1702 - nvme_end_request(rq, status, result); 1728 + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 1729 + nvme_end_request(rq, status, result); 1703 1730 1704 1731 check_error: 1705 1732 if (terminate_assoc) ··· 2384 2415 } 2385 2416 2386 2417 static void 2387 - __nvme_fc_final_op_cleanup(struct request *rq) 2418 + nvme_fc_complete_rq(struct request *rq) 2388 2419 { 2389 2420 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2390 2421 struct nvme_fc_ctrl *ctrl = op->ctrl; 2391 2422 2392 2423 atomic_set(&op->state, FCPOP_STATE_IDLE); 2393 - op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED | 2394 - FCOP_FLAGS_COMPLETE); 2395 2424 2396 2425 nvme_fc_unmap_data(ctrl, rq, op); 2397 2426 nvme_complete_rq(rq); 2398 2427 nvme_fc_ctrl_put(ctrl); 2399 - 2400 - } 2401 - 2402 - static void 2403 - nvme_fc_complete_rq(struct request *rq) 2404 - { 2405 - struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2406 - struct nvme_fc_ctrl *ctrl = op->ctrl; 2407 - unsigned long flags; 2408 - bool completed = false; 2409 - 2410 - /* 2411 - * the core layer, on controller resets after calling 2412 - * nvme_shutdown_ctrl(), calls complete_rq without our 2413 - * calling blk_mq_complete_request(), thus there may still 2414 - * be live i/o outstanding with the LLDD. Means transport has 2415 - * to track complete calls vs fcpio_done calls to know what 2416 - * path to take on completes and dones. 2417 - */ 2418 - spin_lock_irqsave(&ctrl->lock, flags); 2419 - if (op->flags & FCOP_FLAGS_COMPLETE) 2420 - completed = true; 2421 - else 2422 - op->flags |= FCOP_FLAGS_RELEASED; 2423 - spin_unlock_irqrestore(&ctrl->lock, flags); 2424 - 2425 - if (completed) 2426 - __nvme_fc_final_op_cleanup(rq); 2427 2428 } 2428 2429 2429 2430 /* ··· 2415 2476 struct nvme_ctrl *nctrl = data; 2416 2477 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2417 2478 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2418 - unsigned long flags; 2419 - int status; 2420 2479 2421 2480 if (!blk_mq_request_started(req)) 2422 2481 return; 2423 2482 2424 - spin_lock_irqsave(&ctrl->lock, flags); 2425 - if (ctrl->flags & FCCTRL_TERMIO) { 2426 - ctrl->iocnt++; 2427 - op->flags |= FCOP_FLAGS_TERMIO; 2428 - } 2429 - spin_unlock_irqrestore(&ctrl->lock, flags); 2430 - 2431 - status = __nvme_fc_abort_op(ctrl, op); 2432 - if (status) { 2433 - /* 2434 - * if __nvme_fc_abort_op failed the io wasn't 2435 - * active. Thus this call path is running in 2436 - * parallel to the io complete. Treat as non-error. 2437 - */ 2438 - 2439 - /* back out the flags/counters */ 2440 - spin_lock_irqsave(&ctrl->lock, flags); 2441 - if (ctrl->flags & FCCTRL_TERMIO) 2442 - ctrl->iocnt--; 2443 - op->flags &= ~FCOP_FLAGS_TERMIO; 2444 - spin_unlock_irqrestore(&ctrl->lock, flags); 2445 - return; 2446 - } 2483 + __nvme_fc_abort_op(ctrl, op); 2447 2484 } 2448 2485 2449 2486 ··· 2858 2943 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 2859 2944 bool recon = true; 2860 2945 2861 - if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) 2946 + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 2862 2947 return; 2863 2948 2864 2949 if (portptr->port_state == FC_OBJSTATE_ONLINE) ··· 2906 2991 /* will block will waiting for io to terminate */ 2907 2992 nvme_fc_delete_association(ctrl); 2908 2993 2909 - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 2994 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 2910 2995 dev_err(ctrl->ctrl.device, 2911 2996 "NVME-FC{%d}: error_recovery: Couldn't change state " 2912 - "to RECONNECTING\n", ctrl->cnum); 2997 + "to CONNECTING\n", ctrl->cnum); 2913 2998 return; 2914 2999 } 2915 3000 ··· 3110 3195 * transport errors (frame drop, LS failure) inherently must kill 3111 3196 * the association. The transport is coded so that any command used 3112 3197 * to create the association (prior to a LIVE state transition 3113 - * while NEW or RECONNECTING) will fail if it completes in error or 3198 + * while NEW or CONNECTING) will fail if it completes in error or 3114 3199 * times out. 3115 3200 * 3116 3201 * As such: as the connect request was mostly likely due to a
+2 -1
drivers/nvme/host/nvme.h
··· 123 123 NVME_CTRL_LIVE, 124 124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 125 125 NVME_CTRL_RESETTING, 126 - NVME_CTRL_RECONNECTING, 126 + NVME_CTRL_CONNECTING, 127 127 NVME_CTRL_DELETING, 128 128 NVME_CTRL_DEAD, 129 129 }; ··· 183 183 struct work_struct scan_work; 184 184 struct work_struct async_event_work; 185 185 struct delayed_work ka_work; 186 + struct nvme_command ka_cmd; 186 187 struct work_struct fw_act_work; 187 188 188 189 /* Power saving configuration */
+23 -16
drivers/nvme/host/pci.c
··· 1141 1141 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1142 1142 switch (dev->ctrl.state) { 1143 1143 case NVME_CTRL_RESETTING: 1144 - case NVME_CTRL_RECONNECTING: 1144 + case NVME_CTRL_CONNECTING: 1145 1145 return false; 1146 1146 default: 1147 1147 break; ··· 1215 1215 * cancellation error. All outstanding requests are completed on 1216 1216 * shutdown, so we return BLK_EH_HANDLED. 1217 1217 */ 1218 - if (dev->ctrl.state == NVME_CTRL_RESETTING) { 1218 + switch (dev->ctrl.state) { 1219 + case NVME_CTRL_CONNECTING: 1220 + case NVME_CTRL_RESETTING: 1219 1221 dev_warn(dev->ctrl.device, 1220 1222 "I/O %d QID %d timeout, disable controller\n", 1221 1223 req->tag, nvmeq->qid); 1222 1224 nvme_dev_disable(dev, false); 1223 1225 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1224 1226 return BLK_EH_HANDLED; 1227 + default: 1228 + break; 1225 1229 } 1226 1230 1227 1231 /* ··· 1368 1364 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1369 1365 int qid, int depth) 1370 1366 { 1371 - if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1372 - unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1373 - dev->ctrl.page_size); 1374 - nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; 1375 - nvmeq->sq_cmds_io = dev->cmb + offset; 1376 - } else { 1377 - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1378 - &nvmeq->sq_dma_addr, GFP_KERNEL); 1379 - if (!nvmeq->sq_cmds) 1380 - return -ENOMEM; 1381 - } 1367 + /* CMB SQEs will be mapped before creation */ 1368 + if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) 1369 + return 0; 1382 1370 1371 + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1372 + &nvmeq->sq_dma_addr, GFP_KERNEL); 1373 + if (!nvmeq->sq_cmds) 1374 + return -ENOMEM; 1383 1375 return 0; 1384 1376 } 1385 1377 ··· 1448 1448 { 1449 1449 struct nvme_dev *dev = nvmeq->dev; 1450 1450 int result; 1451 + 1452 + if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1453 + unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), 1454 + dev->ctrl.page_size); 1455 + nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; 1456 + nvmeq->sq_cmds_io = dev->cmb + offset; 1457 + } 1451 1458 1452 1459 nvmeq->cq_vector = qid - 1; 1453 1460 result = adapter_alloc_cq(dev, qid, nvmeq); ··· 2295 2288 nvme_dev_disable(dev, false); 2296 2289 2297 2290 /* 2298 - * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the 2291 + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2299 2292 * initializing procedure here. 2300 2293 */ 2301 - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { 2294 + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2302 2295 dev_warn(dev->ctrl.device, 2303 - "failed to mark controller RECONNECTING\n"); 2296 + "failed to mark controller CONNECTING\n"); 2304 2297 goto out; 2305 2298 } 2306 2299
+8 -8
drivers/nvme/host/rdma.c
··· 887 887 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 888 888 { 889 889 /* If we are resetting/deleting then do nothing */ 890 - if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { 890 + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { 891 891 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 892 892 ctrl->ctrl.state == NVME_CTRL_LIVE); 893 893 return; ··· 973 973 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 974 974 nvme_start_queues(&ctrl->ctrl); 975 975 976 - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 976 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 977 977 /* state change failure should never happen */ 978 978 WARN_ON_ONCE(1); 979 979 return; ··· 1756 1756 nvme_stop_ctrl(&ctrl->ctrl); 1757 1757 nvme_rdma_shutdown_ctrl(ctrl, false); 1758 1758 1759 - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1759 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 1760 1760 /* state change failure should never happen */ 1761 1761 WARN_ON_ONCE(1); 1762 1762 return; ··· 1784 1784 return; 1785 1785 1786 1786 out_fail: 1787 - dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 1788 - nvme_remove_namespaces(&ctrl->ctrl); 1789 - nvme_rdma_shutdown_ctrl(ctrl, true); 1790 - nvme_uninit_ctrl(&ctrl->ctrl); 1791 - nvme_put_ctrl(&ctrl->ctrl); 1787 + ++ctrl->ctrl.nr_reconnects; 1788 + nvme_rdma_reconnect_or_remove(ctrl); 1792 1789 } 1793 1790 1794 1791 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { ··· 1938 1941 GFP_KERNEL); 1939 1942 if (!ctrl->queues) 1940 1943 goto out_uninit_ctrl; 1944 + 1945 + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); 1946 + WARN_ON_ONCE(!changed); 1941 1947 1942 1948 ret = nvme_rdma_configure_admin_queue(ctrl, true); 1943 1949 if (ret)
+5 -2
drivers/nvme/target/io-cmd.c
··· 105 105 static u16 nvmet_discard_range(struct nvmet_ns *ns, 106 106 struct nvme_dsm_range *range, struct bio **bio) 107 107 { 108 - if (__blkdev_issue_discard(ns->bdev, 108 + int ret; 109 + 110 + ret = __blkdev_issue_discard(ns->bdev, 109 111 le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 110 112 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 111 - GFP_KERNEL, 0, bio)) 113 + GFP_KERNEL, 0, bio); 114 + if (ret && ret != -EOPNOTSUPP) 112 115 return NVME_SC_INTERNAL | NVME_SC_DNR; 113 116 return 0; 114 117 }