Merge tag 'nvme-for-4.18' of git://git.infradead.org/nvme

Pull NVMe fixes from Christoph Hellwig:

- fix a regression in 4.18 that causes a memory leak on probe failure
(Keith Bush)

- fix a deadlock in the passthrough ioctl code (Scott Bauer)

- don't enable AENs if not supported (Weiping Zhang)

- fix an old regression in metadata handling in the passthrough ioctl
code (Roland Dreier)

* tag 'nvme-for-4.18' of git://git.infradead.org/nvme:
nvme: fix handling of metadata_len for NVME_IOCTL_IO_CMD
nvme: don't enable AEN if not supported
nvme: ensure forward progress during Admin passthru
nvme-pci: fix memory leak on probe failure

Changed files
+41 -34
drivers
nvme
host
+34 -29
drivers/nvme/host/core.c
··· 100 100 static void nvme_ns_remove(struct nvme_ns *ns); 101 101 static int nvme_revalidate_disk(struct gendisk *disk); 102 102 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 103 + static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 104 + unsigned nsid); 105 + 106 + static void nvme_set_queue_dying(struct nvme_ns *ns) 107 + { 108 + /* 109 + * Revalidating a dead namespace sets capacity to 0. This will end 110 + * buffered writers dirtying pages that can't be synced. 111 + */ 112 + if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 113 + return; 114 + revalidate_disk(ns->disk); 115 + blk_set_queue_dying(ns->queue); 116 + /* Forcibly unquiesce queues to avoid blocking dispatch */ 117 + blk_mq_unquiesce_queue(ns->queue); 118 + } 103 119 104 120 static void nvme_queue_scan(struct nvme_ctrl *ctrl) 105 121 { ··· 1060 1044 1061 1045 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1062 1046 { 1063 - u32 result; 1047 + u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1064 1048 int status; 1065 1049 1066 - status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, 1067 - ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result); 1050 + if (!supported_aens) 1051 + return; 1052 + 1053 + status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1054 + NULL, 0, &result); 1068 1055 if (status) 1069 1056 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1070 - ctrl->oaes & NVME_AEN_SUPPORTED); 1057 + supported_aens); 1071 1058 } 1072 1059 1073 1060 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ··· 1170 1151 1171 1152 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1172 1153 { 1173 - struct nvme_ns *ns, *next; 1174 - LIST_HEAD(rm_list); 1154 + struct nvme_ns *ns; 1175 1155 1176 - down_write(&ctrl->namespaces_rwsem); 1177 - list_for_each_entry(ns, &ctrl->namespaces, list) { 1178 - if (ns->disk && nvme_revalidate_disk(ns->disk)) { 1179 - list_move_tail(&ns->list, &rm_list); 1180 - } 1181 - } 1182 - up_write(&ctrl->namespaces_rwsem); 1156 + down_read(&ctrl->namespaces_rwsem); 1157 + list_for_each_entry(ns, &ctrl->namespaces, list) 1158 + if (ns->disk && nvme_revalidate_disk(ns->disk)) 1159 + nvme_set_queue_dying(ns); 1160 + up_read(&ctrl->namespaces_rwsem); 1183 1161 1184 - list_for_each_entry_safe(ns, next, &rm_list, list) 1185 - nvme_ns_remove(ns); 1162 + nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1186 1163 } 1187 1164 1188 1165 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) ··· 1233 1218 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1234 1219 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1235 1220 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1236 - (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, 1221 + (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, 1237 1222 0, &cmd.result, timeout); 1238 1223 nvme_passthru_end(ctrl, effects); 1239 1224 ··· 3153 3138 3154 3139 down_write(&ctrl->namespaces_rwsem); 3155 3140 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3156 - if (ns->head->ns_id > nsid) 3141 + if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 3157 3142 list_move_tail(&ns->list, &rm_list); 3158 3143 } 3159 3144 up_write(&ctrl->namespaces_rwsem); ··· 3557 3542 if (ctrl->admin_q) 3558 3543 blk_mq_unquiesce_queue(ctrl->admin_q); 3559 3544 3560 - list_for_each_entry(ns, &ctrl->namespaces, list) { 3561 - /* 3562 - * Revalidating a dead namespace sets capacity to 0. This will 3563 - * end buffered writers dirtying pages that can't be synced. 3564 - */ 3565 - if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 3566 - continue; 3567 - revalidate_disk(ns->disk); 3568 - blk_set_queue_dying(ns->queue); 3545 + list_for_each_entry(ns, &ctrl->namespaces, list) 3546 + nvme_set_queue_dying(ns); 3569 3547 3570 - /* Forcibly unquiesce queues to avoid blocking dispatch */ 3571 - blk_mq_unquiesce_queue(ns->queue); 3572 - } 3573 3548 up_read(&ctrl->namespaces_rwsem); 3574 3549 } 3575 3550 EXPORT_SYMBOL_GPL(nvme_kill_queues);
+7 -5
drivers/nvme/host/pci.c
··· 2556 2556 2557 2557 quirks |= check_vendor_combination_bug(pdev); 2558 2558 2559 - result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2560 - quirks); 2561 - if (result) 2562 - goto release_pools; 2563 - 2564 2559 /* 2565 2560 * Double check that our mempool alloc size will cover the biggest 2566 2561 * command we support. ··· 2573 2578 goto release_pools; 2574 2579 } 2575 2580 2581 + result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2582 + quirks); 2583 + if (result) 2584 + goto release_mempool; 2585 + 2576 2586 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2577 2587 2578 2588 nvme_get_ctrl(&dev->ctrl); ··· 2585 2585 2586 2586 return 0; 2587 2587 2588 + release_mempool: 2589 + mempool_destroy(dev->iod_mempool); 2588 2590 release_pools: 2589 2591 nvme_release_prp_pools(dev); 2590 2592 unmap: