Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nvme: split nvme_uninit_ctrl into stop and uninit

Usually before we teardown the controller we want to:
1. complete/cancel any ctrl inflight works
2. remove ctrl namespaces (only for removal though, resets
shouldn't remove any namespaces).

but we do not want to destroy the controller device as
we might use it for logging during the teardown stage.

This patch adds nvme_start_ctrl() which queues inflight
controller works (aen, ns scan, queue start and keep-alive
if kato is set) and nvme_stop_ctrl() which cancels the works
namespace removal is left to the callers to handle.

Move nvme_uninit_ctrl after we are done with the
controller device.

Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>

+43 -60
+19 -2
drivers/nvme/host/core.c
··· 2591 2591 spin_unlock(&dev_list_lock); 2592 2592 } 2593 2593 2594 - void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 2594 + void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 2595 2595 { 2596 + nvme_stop_keep_alive(ctrl); 2596 2597 flush_work(&ctrl->async_event_work); 2597 2598 flush_work(&ctrl->scan_work); 2598 - nvme_remove_namespaces(ctrl); 2599 + } 2600 + EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 2599 2601 2602 + void nvme_start_ctrl(struct nvme_ctrl *ctrl) 2603 + { 2604 + if (ctrl->kato) 2605 + nvme_start_keep_alive(ctrl); 2606 + 2607 + if (ctrl->queue_count > 1) { 2608 + nvme_queue_scan(ctrl); 2609 + nvme_queue_async_events(ctrl); 2610 + nvme_start_queues(ctrl); 2611 + } 2612 + } 2613 + EXPORT_SYMBOL_GPL(nvme_start_ctrl); 2614 + 2615 + void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 2616 + { 2600 2617 device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); 2601 2618 2602 2619 spin_lock(&dev_list_lock);
+4 -12
drivers/nvme/host/fc.c
··· 2232 2232 out_delete_hw_queues: 2233 2233 nvme_fc_delete_hw_io_queues(ctrl); 2234 2234 out_cleanup_blk_queue: 2235 - nvme_stop_keep_alive(&ctrl->ctrl); 2236 2235 blk_cleanup_queue(ctrl->ctrl.connect_q); 2237 2236 out_free_tag_set: 2238 2237 blk_mq_free_tag_set(&ctrl->tag_set); ··· 2365 2366 goto out_disconnect_admin_queue; 2366 2367 } 2367 2368 2368 - nvme_start_keep_alive(&ctrl->ctrl); 2369 - 2370 2369 /* FC-NVME supports normal SGL Data Block Descriptors */ 2371 2370 2372 2371 if (opts->queue_size > ctrl->ctrl.maxcmd) { ··· 2398 2401 2399 2402 ctrl->ctrl.nr_reconnects = 0; 2400 2403 2401 - if (ctrl->ctrl.queue_count > 1) { 2402 - nvme_start_queues(&ctrl->ctrl); 2403 - nvme_queue_scan(&ctrl->ctrl); 2404 - nvme_queue_async_events(&ctrl->ctrl); 2405 - } 2404 + nvme_start_ctrl(&ctrl->ctrl); 2406 2405 2407 2406 return 0; /* Success */ 2408 2407 2409 2408 out_term_aen_ops: 2410 2409 nvme_fc_term_aen_ops(ctrl); 2411 - nvme_stop_keep_alive(&ctrl->ctrl); 2412 2410 out_disconnect_admin_queue: 2413 2411 /* send a Disconnect(association) LS to fc-nvme target */ 2414 2412 nvme_fc_xmt_disconnect_assoc(ctrl); ··· 2425 2433 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) 2426 2434 { 2427 2435 unsigned long flags; 2428 - 2429 - nvme_stop_keep_alive(&ctrl->ctrl); 2430 2436 2431 2437 spin_lock_irqsave(&ctrl->lock, flags); 2432 2438 ctrl->flags |= FCCTRL_TERMIO; ··· 2507 2517 2508 2518 cancel_work_sync(&ctrl->ctrl.reset_work); 2509 2519 cancel_delayed_work_sync(&ctrl->connect_work); 2510 - 2520 + nvme_stop_ctrl(&ctrl->ctrl); 2521 + nvme_remove_namespaces(&ctrl->ctrl); 2511 2522 /* 2512 2523 * kill the association on the link side. this will block 2513 2524 * waiting for io to terminate ··· 2603 2612 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); 2604 2613 int ret; 2605 2614 2615 + nvme_stop_ctrl(&ctrl->ctrl); 2606 2616 /* will block will waiting for io to terminate */ 2607 2617 nvme_fc_delete_association(ctrl); 2608 2618
+2
drivers/nvme/host/nvme.h
··· 280 280 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 281 281 const struct nvme_ctrl_ops *ops, unsigned long quirks); 282 282 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 283 + void nvme_start_ctrl(struct nvme_ctrl *ctrl); 284 + void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 283 285 void nvme_put_ctrl(struct nvme_ctrl *ctrl); 284 286 int nvme_init_identify(struct nvme_ctrl *ctrl); 285 287
+4 -12
drivers/nvme/host/pci.c
··· 2135 2135 goto out; 2136 2136 2137 2137 /* 2138 - * A controller that can not execute IO typically requires user 2139 - * intervention to correct. For such degraded controllers, the driver 2140 - * should not submit commands the user did not request, so skip 2141 - * registering for asynchronous event notification on this condition. 2142 - */ 2143 - if (dev->online_queues > 1) 2144 - nvme_queue_async_events(&dev->ctrl); 2145 - 2146 - /* 2147 2138 * Keep the controller around but remove all namespaces if we don't have 2148 2139 * any working I/O queue. 2149 2140 */ ··· 2154 2163 goto out; 2155 2164 } 2156 2165 2157 - if (dev->online_queues > 1) 2158 - nvme_queue_scan(&dev->ctrl); 2166 + nvme_start_ctrl(&dev->ctrl); 2159 2167 return; 2160 2168 2161 2169 out: ··· 2331 2341 } 2332 2342 2333 2343 flush_work(&dev->ctrl.reset_work); 2334 - nvme_uninit_ctrl(&dev->ctrl); 2344 + nvme_stop_ctrl(&dev->ctrl); 2345 + nvme_remove_namespaces(&dev->ctrl); 2335 2346 nvme_dev_disable(dev, true); 2336 2347 nvme_free_host_mem(dev); 2337 2348 nvme_dev_remove_admin(dev); 2338 2349 nvme_free_queues(dev, 0); 2350 + nvme_uninit_ctrl(&dev->ctrl); 2339 2351 nvme_release_prp_pools(dev); 2340 2352 nvme_dev_unmap(dev); 2341 2353 nvme_put_ctrl(&dev->ctrl);
+8 -21
drivers/nvme/host/rdma.c
··· 732 732 if (ret) 733 733 goto requeue; 734 734 735 - nvme_start_keep_alive(&ctrl->ctrl); 736 - 737 735 if (ctrl->ctrl.queue_count > 1) { 738 736 ret = nvme_rdma_init_io_queues(ctrl); 739 737 if (ret) ··· 749 751 WARN_ON_ONCE(!changed); 750 752 ctrl->ctrl.nr_reconnects = 0; 751 753 752 - if (ctrl->ctrl.queue_count > 1) { 753 - nvme_queue_scan(&ctrl->ctrl); 754 - nvme_queue_async_events(&ctrl->ctrl); 755 - } 754 + nvme_start_ctrl(&ctrl->ctrl); 756 755 757 756 dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); 758 757 ··· 767 772 struct nvme_rdma_ctrl, err_work); 768 773 int i; 769 774 770 - nvme_stop_keep_alive(&ctrl->ctrl); 775 + nvme_stop_ctrl(&ctrl->ctrl); 771 776 772 777 for (i = 0; i < ctrl->ctrl.queue_count; i++) 773 778 clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); ··· 1598 1603 if (error) 1599 1604 goto out_cleanup_queue; 1600 1605 1601 - nvme_start_keep_alive(&ctrl->ctrl); 1602 - 1603 1606 return 0; 1604 1607 1605 1608 out_cleanup_queue: ··· 1615 1622 1616 1623 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) 1617 1624 { 1618 - nvme_stop_keep_alive(&ctrl->ctrl); 1619 1625 cancel_work_sync(&ctrl->err_work); 1620 1626 cancel_delayed_work_sync(&ctrl->reconnect_work); 1621 1627 ··· 1637 1645 1638 1646 static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) 1639 1647 { 1640 - nvme_uninit_ctrl(&ctrl->ctrl); 1648 + nvme_stop_ctrl(&ctrl->ctrl); 1649 + nvme_remove_namespaces(&ctrl->ctrl); 1641 1650 if (shutdown) 1642 1651 nvme_rdma_shutdown_ctrl(ctrl); 1643 1652 1653 + nvme_uninit_ctrl(&ctrl->ctrl); 1644 1654 if (ctrl->ctrl.tagset) { 1645 1655 blk_cleanup_queue(ctrl->ctrl.connect_q); 1646 1656 blk_mq_free_tag_set(&ctrl->tag_set); ··· 1704 1710 int ret; 1705 1711 bool changed; 1706 1712 1713 + nvme_stop_ctrl(&ctrl->ctrl); 1707 1714 nvme_rdma_shutdown_ctrl(ctrl); 1708 1715 1709 1716 ret = nvme_rdma_configure_admin_queue(ctrl); ··· 1734 1739 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 1735 1740 WARN_ON_ONCE(!changed); 1736 1741 1737 - if (ctrl->ctrl.queue_count > 1) { 1738 - nvme_start_queues(&ctrl->ctrl); 1739 - nvme_queue_scan(&ctrl->ctrl); 1740 - nvme_queue_async_events(&ctrl->ctrl); 1741 - } 1742 + nvme_start_ctrl(&ctrl->ctrl); 1742 1743 1743 1744 return; 1744 1745 ··· 1922 1931 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); 1923 1932 mutex_unlock(&nvme_rdma_ctrl_mutex); 1924 1933 1925 - if (ctrl->ctrl.queue_count > 1) { 1926 - nvme_queue_scan(&ctrl->ctrl); 1927 - nvme_queue_async_events(&ctrl->ctrl); 1928 - } 1934 + nvme_start_ctrl(&ctrl->ctrl); 1929 1935 1930 1936 return &ctrl->ctrl; 1931 1937 1932 1938 out_remove_admin_queue: 1933 - nvme_stop_keep_alive(&ctrl->ctrl); 1934 1939 nvme_rdma_destroy_admin_queue(ctrl); 1935 1940 out_kfree_queues: 1936 1941 kfree(ctrl->queues);
+6 -13
drivers/nvme/target/loop.c
··· 407 407 if (error) 408 408 goto out_cleanup_queue; 409 409 410 - nvme_start_keep_alive(&ctrl->ctrl); 411 - 412 410 return 0; 413 411 414 412 out_cleanup_queue: ··· 420 422 421 423 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 422 424 { 423 - nvme_stop_keep_alive(&ctrl->ctrl); 424 - 425 425 if (ctrl->ctrl.queue_count > 1) { 426 426 nvme_stop_queues(&ctrl->ctrl); 427 427 blk_mq_tagset_busy_iter(&ctrl->tag_set, ··· 442 446 struct nvme_loop_ctrl *ctrl = container_of(work, 443 447 struct nvme_loop_ctrl, delete_work); 444 448 445 - nvme_uninit_ctrl(&ctrl->ctrl); 449 + nvme_stop_ctrl(&ctrl->ctrl); 450 + nvme_remove_namespaces(&ctrl->ctrl); 446 451 nvme_loop_shutdown_ctrl(ctrl); 452 + nvme_uninit_ctrl(&ctrl->ctrl); 447 453 nvme_put_ctrl(&ctrl->ctrl); 448 454 } 449 455 ··· 493 495 bool changed; 494 496 int ret; 495 497 498 + nvme_stop_ctrl(&ctrl->ctrl); 496 499 nvme_loop_shutdown_ctrl(ctrl); 497 500 498 501 ret = nvme_loop_configure_admin_queue(ctrl); ··· 514 515 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 515 516 WARN_ON_ONCE(!changed); 516 517 517 - nvme_queue_scan(&ctrl->ctrl); 518 - nvme_queue_async_events(&ctrl->ctrl); 519 - 520 - nvme_start_queues(&ctrl->ctrl); 518 + nvme_start_ctrl(&ctrl->ctrl); 521 519 522 520 return; 523 521 ··· 649 653 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); 650 654 mutex_unlock(&nvme_loop_ctrl_mutex); 651 655 652 - if (opts->nr_io_queues) { 653 - nvme_queue_scan(&ctrl->ctrl); 654 - nvme_queue_async_events(&ctrl->ctrl); 655 - } 656 + nvme_start_ctrl(&ctrl->ctrl); 656 657 657 658 return &ctrl->ctrl; 658 659