NVMe: Cancel outstanding IOs on queue deletion

If the device is hot-unplugged while there are active commands, we should
time out the I/Os so that upper layers don't just see the I/Os disappear.

Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>

+32 -23
+32 -23
drivers/block/nvme.c
··· 868 868 return nvme_submit_admin_cmd(dev, &c, result); 869 869 } 870 870 871 + /** 872 + * nvme_cancel_ios - Cancel outstanding I/Os 873 + * @queue: The queue to cancel I/Os on 874 + * @timeout: True to only cancel I/Os which have timed out 875 + */ 876 + static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) 877 + { 878 + int depth = nvmeq->q_depth - 1; 879 + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 880 + unsigned long now = jiffies; 881 + int cmdid; 882 + 883 + for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 884 + void *ctx; 885 + nvme_completion_fn fn; 886 + static struct nvme_completion cqe = { 887 + .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, 888 + }; 889 + 890 + if (timeout && !time_after(now, info[cmdid].timeout)) 891 + continue; 892 + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 893 + ctx = cancel_cmdid(nvmeq, cmdid, &fn); 894 + fn(nvmeq->dev, ctx, &cqe); 895 + } 896 + } 897 + 871 898 static void nvme_free_queue_mem(struct nvme_queue *nvmeq) 872 899 { 873 900 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), ··· 908 881 { 909 882 struct nvme_queue *nvmeq = dev->queues[qid]; 910 883 int vector = dev->entry[nvmeq->cq_vector].vector; 884 + 885 + spin_lock_irq(&nvmeq->q_lock); 886 + nvme_cancel_ios(nvmeq, false); 887 + spin_unlock_irq(&nvmeq->q_lock); 911 888 912 889 irq_set_affinity_hint(vector, NULL); 913 890 free_irq(vector, nvmeq); ··· 1267 1236 .compat_ioctl = nvme_ioctl, 1268 1237 }; 1269 1238 1270 - static void nvme_timeout_ios(struct nvme_queue *nvmeq) 1271 - { 1272 - int depth = nvmeq->q_depth - 1; 1273 - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 1274 - unsigned long now = jiffies; 1275 - int cmdid; 1276 - 1277 - for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 1278 - void *ctx; 1279 - nvme_completion_fn fn; 1280 - static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; 1281 - 1282 - if (!time_after(now, info[cmdid].timeout)) 1283 - continue; 1284 - dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid); 1285 - ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1286 - fn(nvmeq->dev, ctx, &cqe); 1287 - } 1288 - } 1289 - 1290 1239 static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1291 1240 { 1292 1241 while (bio_list_peek(&nvmeq->sq_cong)) { ··· 1298 1287 spin_lock_irq(&nvmeq->q_lock); 1299 1288 if (nvme_process_cq(nvmeq)) 1300 1289 printk("process_cq did something\n"); 1301 - nvme_timeout_ios(nvmeq); 1290 + nvme_cancel_ios(nvmeq, true); 1302 1291 nvme_resubmit_bios(nvmeq); 1303 1292 spin_unlock_irq(&nvmeq->q_lock); 1304 1293 } ··· 1559 1548 spin_lock(&dev_list_lock); 1560 1549 list_del(&dev->node); 1561 1550 spin_unlock(&dev_list_lock); 1562 - 1563 - /* TODO: wait all I/O finished or cancel them */ 1564 1551 1565 1552 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1566 1553 list_del(&ns->list);