Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
5 */
6
7#include <linux/acpi.h>
8#include <linux/aer.h>
9#include <linux/async.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
12#include <linux/blk-mq-pci.h>
13#include <linux/blk-integrity.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/mutex.h>
21#include <linux/once.h>
22#include <linux/pci.h>
23#include <linux/suspend.h>
24#include <linux/t10-pi.h>
25#include <linux/types.h>
26#include <linux/io-64-nonatomic-lo-hi.h>
27#include <linux/io-64-nonatomic-hi-lo.h>
28#include <linux/sed-opal.h>
29#include <linux/pci-p2pdma.h>
30
31#include "trace.h"
32#include "nvme.h"
33
34#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
35#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
36
37#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
38
39/*
40 * These can be higher, but we need to ensure that any command doesn't
41 * require an sg allocation that needs more than a page of data.
42 */
43#define NVME_MAX_KB_SZ 4096
44#define NVME_MAX_SEGS 127
45
46static int use_threaded_interrupts;
47module_param(use_threaded_interrupts, int, 0);
48
49static bool use_cmb_sqes = true;
50module_param(use_cmb_sqes, bool, 0444);
51MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
52
53static unsigned int max_host_mem_size_mb = 128;
54module_param(max_host_mem_size_mb, uint, 0444);
55MODULE_PARM_DESC(max_host_mem_size_mb,
56 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
57
58static unsigned int sgl_threshold = SZ_32K;
59module_param(sgl_threshold, uint, 0644);
60MODULE_PARM_DESC(sgl_threshold,
61 "Use SGLs when average request segment size is larger or equal to "
62 "this size. Use 0 to disable SGLs.");
63
64#define NVME_PCI_MIN_QUEUE_SIZE 2
65#define NVME_PCI_MAX_QUEUE_SIZE 4095
66static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
67static const struct kernel_param_ops io_queue_depth_ops = {
68 .set = io_queue_depth_set,
69 .get = param_get_uint,
70};
71
72static unsigned int io_queue_depth = 1024;
73module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
74MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096");
75
76static int io_queue_count_set(const char *val, const struct kernel_param *kp)
77{
78 unsigned int n;
79 int ret;
80
81 ret = kstrtouint(val, 10, &n);
82 if (ret != 0 || n > num_possible_cpus())
83 return -EINVAL;
84 return param_set_uint(val, kp);
85}
86
87static const struct kernel_param_ops io_queue_count_ops = {
88 .set = io_queue_count_set,
89 .get = param_get_uint,
90};
91
92static unsigned int write_queues;
93module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
94MODULE_PARM_DESC(write_queues,
95 "Number of queues to use for writes. If not set, reads and writes "
96 "will share a queue set.");
97
98static unsigned int poll_queues;
99module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
100MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
101
102static bool noacpi;
103module_param(noacpi, bool, 0444);
104MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
105
106struct nvme_dev;
107struct nvme_queue;
108
109static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
110static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
111
112/*
113 * Represents an NVM Express device. Each nvme_dev is a PCI function.
114 */
115struct nvme_dev {
116 struct nvme_queue *queues;
117 struct blk_mq_tag_set tagset;
118 struct blk_mq_tag_set admin_tagset;
119 u32 __iomem *dbs;
120 struct device *dev;
121 struct dma_pool *prp_page_pool;
122 struct dma_pool *prp_small_pool;
123 unsigned online_queues;
124 unsigned max_qid;
125 unsigned io_queues[HCTX_MAX_TYPES];
126 unsigned int num_vecs;
127 u32 q_depth;
128 int io_sqes;
129 u32 db_stride;
130 void __iomem *bar;
131 unsigned long bar_mapped_size;
132 struct work_struct remove_work;
133 struct mutex shutdown_lock;
134 bool subsystem;
135 u64 cmb_size;
136 bool cmb_use_sqes;
137 u32 cmbsz;
138 u32 cmbloc;
139 struct nvme_ctrl ctrl;
140 u32 last_ps;
141 bool hmb;
142
143 mempool_t *iod_mempool;
144
145 /* shadow doorbell buffer support: */
146 u32 *dbbuf_dbs;
147 dma_addr_t dbbuf_dbs_dma_addr;
148 u32 *dbbuf_eis;
149 dma_addr_t dbbuf_eis_dma_addr;
150
151 /* host memory buffer support: */
152 u64 host_mem_size;
153 u32 nr_host_mem_descs;
154 dma_addr_t host_mem_descs_dma;
155 struct nvme_host_mem_buf_desc *host_mem_descs;
156 void **host_mem_desc_bufs;
157 unsigned int nr_allocated_queues;
158 unsigned int nr_write_queues;
159 unsigned int nr_poll_queues;
160
161 bool attrs_added;
162};
163
164static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
165{
166 return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE,
167 NVME_PCI_MAX_QUEUE_SIZE);
168}
169
170static inline unsigned int sq_idx(unsigned int qid, u32 stride)
171{
172 return qid * 2 * stride;
173}
174
175static inline unsigned int cq_idx(unsigned int qid, u32 stride)
176{
177 return (qid * 2 + 1) * stride;
178}
179
180static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
181{
182 return container_of(ctrl, struct nvme_dev, ctrl);
183}
184
185/*
186 * An NVM Express queue. Each device has at least two (one for admin
187 * commands and one for I/O commands).
188 */
189struct nvme_queue {
190 struct nvme_dev *dev;
191 spinlock_t sq_lock;
192 void *sq_cmds;
193 /* only used for poll queues: */
194 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
195 struct nvme_completion *cqes;
196 dma_addr_t sq_dma_addr;
197 dma_addr_t cq_dma_addr;
198 u32 __iomem *q_db;
199 u32 q_depth;
200 u16 cq_vector;
201 u16 sq_tail;
202 u16 last_sq_tail;
203 u16 cq_head;
204 u16 qid;
205 u8 cq_phase;
206 u8 sqes;
207 unsigned long flags;
208#define NVMEQ_ENABLED 0
209#define NVMEQ_SQ_CMB 1
210#define NVMEQ_DELETE_ERROR 2
211#define NVMEQ_POLLED 3
212 u32 *dbbuf_sq_db;
213 u32 *dbbuf_cq_db;
214 u32 *dbbuf_sq_ei;
215 u32 *dbbuf_cq_ei;
216 struct completion delete_done;
217};
218
219/*
220 * The nvme_iod describes the data in an I/O.
221 *
222 * The sg pointer contains the list of PRP/SGL chunk allocations in addition
223 * to the actual struct scatterlist.
224 */
225struct nvme_iod {
226 struct nvme_request req;
227 struct nvme_command cmd;
228 struct nvme_queue *nvmeq;
229 bool use_sgl;
230 int aborted;
231 int npages; /* In the PRP list. 0 means small pool in use */
232 int nents; /* Used in scatterlist */
233 dma_addr_t first_dma;
234 unsigned int dma_len; /* length of single DMA segment mapping */
235 dma_addr_t meta_dma;
236 struct scatterlist *sg;
237};
238
239static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
240{
241 return dev->nr_allocated_queues * 8 * dev->db_stride;
242}
243
244static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
245{
246 unsigned int mem_size = nvme_dbbuf_size(dev);
247
248 if (dev->dbbuf_dbs) {
249 /*
250 * Clear the dbbuf memory so the driver doesn't observe stale
251 * values from the previous instantiation.
252 */
253 memset(dev->dbbuf_dbs, 0, mem_size);
254 memset(dev->dbbuf_eis, 0, mem_size);
255 return 0;
256 }
257
258 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
259 &dev->dbbuf_dbs_dma_addr,
260 GFP_KERNEL);
261 if (!dev->dbbuf_dbs)
262 return -ENOMEM;
263 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
264 &dev->dbbuf_eis_dma_addr,
265 GFP_KERNEL);
266 if (!dev->dbbuf_eis) {
267 dma_free_coherent(dev->dev, mem_size,
268 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
269 dev->dbbuf_dbs = NULL;
270 return -ENOMEM;
271 }
272
273 return 0;
274}
275
276static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
277{
278 unsigned int mem_size = nvme_dbbuf_size(dev);
279
280 if (dev->dbbuf_dbs) {
281 dma_free_coherent(dev->dev, mem_size,
282 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
283 dev->dbbuf_dbs = NULL;
284 }
285 if (dev->dbbuf_eis) {
286 dma_free_coherent(dev->dev, mem_size,
287 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
288 dev->dbbuf_eis = NULL;
289 }
290}
291
292static void nvme_dbbuf_init(struct nvme_dev *dev,
293 struct nvme_queue *nvmeq, int qid)
294{
295 if (!dev->dbbuf_dbs || !qid)
296 return;
297
298 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
299 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
300 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
301 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
302}
303
304static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
305{
306 if (!nvmeq->qid)
307 return;
308
309 nvmeq->dbbuf_sq_db = NULL;
310 nvmeq->dbbuf_cq_db = NULL;
311 nvmeq->dbbuf_sq_ei = NULL;
312 nvmeq->dbbuf_cq_ei = NULL;
313}
314
315static void nvme_dbbuf_set(struct nvme_dev *dev)
316{
317 struct nvme_command c = { };
318 unsigned int i;
319
320 if (!dev->dbbuf_dbs)
321 return;
322
323 c.dbbuf.opcode = nvme_admin_dbbuf;
324 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
325 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
326
327 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
328 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
329 /* Free memory and continue on */
330 nvme_dbbuf_dma_free(dev);
331
332 for (i = 1; i <= dev->online_queues; i++)
333 nvme_dbbuf_free(&dev->queues[i]);
334 }
335}
336
337static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
338{
339 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
340}
341
342/* Update dbbuf and return true if an MMIO is required */
343static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
344 volatile u32 *dbbuf_ei)
345{
346 if (dbbuf_db) {
347 u16 old_value;
348
349 /*
350 * Ensure that the queue is written before updating
351 * the doorbell in memory
352 */
353 wmb();
354
355 old_value = *dbbuf_db;
356 *dbbuf_db = value;
357
358 /*
359 * Ensure that the doorbell is updated before reading the event
360 * index from memory. The controller needs to provide similar
361 * ordering to ensure the envent index is updated before reading
362 * the doorbell.
363 */
364 mb();
365
366 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
367 return false;
368 }
369
370 return true;
371}
372
373/*
374 * Will slightly overestimate the number of pages needed. This is OK
375 * as it only leads to a small amount of wasted memory for the lifetime of
376 * the I/O.
377 */
378static int nvme_pci_npages_prp(void)
379{
380 unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
381 NVME_CTRL_PAGE_SIZE);
382 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
383}
384
385/*
386 * Calculates the number of pages needed for the SGL segments. For example a 4k
387 * page can accommodate 256 SGL descriptors.
388 */
389static int nvme_pci_npages_sgl(void)
390{
391 return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
392 PAGE_SIZE);
393}
394
395static size_t nvme_pci_iod_alloc_size(void)
396{
397 size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
398
399 return sizeof(__le64 *) * npages +
400 sizeof(struct scatterlist) * NVME_MAX_SEGS;
401}
402
403static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
404 unsigned int hctx_idx)
405{
406 struct nvme_dev *dev = data;
407 struct nvme_queue *nvmeq = &dev->queues[0];
408
409 WARN_ON(hctx_idx != 0);
410 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
411
412 hctx->driver_data = nvmeq;
413 return 0;
414}
415
416static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
417 unsigned int hctx_idx)
418{
419 struct nvme_dev *dev = data;
420 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
421
422 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
423 hctx->driver_data = nvmeq;
424 return 0;
425}
426
427static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
428 unsigned int hctx_idx, unsigned int numa_node)
429{
430 struct nvme_dev *dev = set->driver_data;
431 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
432 int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
433 struct nvme_queue *nvmeq = &dev->queues[queue_idx];
434
435 BUG_ON(!nvmeq);
436 iod->nvmeq = nvmeq;
437
438 nvme_req(req)->ctrl = &dev->ctrl;
439 nvme_req(req)->cmd = &iod->cmd;
440 return 0;
441}
442
443static int queue_irq_offset(struct nvme_dev *dev)
444{
445 /* if we have more than 1 vec, admin queue offsets us by 1 */
446 if (dev->num_vecs > 1)
447 return 1;
448
449 return 0;
450}
451
452static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
453{
454 struct nvme_dev *dev = set->driver_data;
455 int i, qoff, offset;
456
457 offset = queue_irq_offset(dev);
458 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
459 struct blk_mq_queue_map *map = &set->map[i];
460
461 map->nr_queues = dev->io_queues[i];
462 if (!map->nr_queues) {
463 BUG_ON(i == HCTX_TYPE_DEFAULT);
464 continue;
465 }
466
467 /*
468 * The poll queue(s) doesn't have an IRQ (and hence IRQ
469 * affinity), so use the regular blk-mq cpu mapping
470 */
471 map->queue_offset = qoff;
472 if (i != HCTX_TYPE_POLL && offset)
473 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
474 else
475 blk_mq_map_queues(map);
476 qoff += map->nr_queues;
477 offset += map->nr_queues;
478 }
479
480 return 0;
481}
482
483/*
484 * Write sq tail if we are asked to, or if the next command would wrap.
485 */
486static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
487{
488 if (!write_sq) {
489 u16 next_tail = nvmeq->sq_tail + 1;
490
491 if (next_tail == nvmeq->q_depth)
492 next_tail = 0;
493 if (next_tail != nvmeq->last_sq_tail)
494 return;
495 }
496
497 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
498 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
499 writel(nvmeq->sq_tail, nvmeq->q_db);
500 nvmeq->last_sq_tail = nvmeq->sq_tail;
501}
502
503/**
504 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
505 * @nvmeq: The queue to use
506 * @cmd: The command to send
507 * @write_sq: whether to write to the SQ doorbell
508 */
509static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
510 bool write_sq)
511{
512 spin_lock(&nvmeq->sq_lock);
513 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
514 cmd, sizeof(*cmd));
515 if (++nvmeq->sq_tail == nvmeq->q_depth)
516 nvmeq->sq_tail = 0;
517 nvme_write_sq_db(nvmeq, write_sq);
518 spin_unlock(&nvmeq->sq_lock);
519}
520
521static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
522{
523 struct nvme_queue *nvmeq = hctx->driver_data;
524
525 spin_lock(&nvmeq->sq_lock);
526 if (nvmeq->sq_tail != nvmeq->last_sq_tail)
527 nvme_write_sq_db(nvmeq, true);
528 spin_unlock(&nvmeq->sq_lock);
529}
530
531static void **nvme_pci_iod_list(struct request *req)
532{
533 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
534 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
535}
536
537static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
538{
539 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
540 int nseg = blk_rq_nr_phys_segments(req);
541 unsigned int avg_seg_size;
542
543 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
544
545 if (!nvme_ctrl_sgl_supported(&dev->ctrl))
546 return false;
547 if (!iod->nvmeq->qid)
548 return false;
549 if (!sgl_threshold || avg_seg_size < sgl_threshold)
550 return false;
551 return true;
552}
553
554static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
555{
556 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
557 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
558 dma_addr_t dma_addr = iod->first_dma;
559 int i;
560
561 for (i = 0; i < iod->npages; i++) {
562 __le64 *prp_list = nvme_pci_iod_list(req)[i];
563 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
564
565 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
566 dma_addr = next_dma_addr;
567 }
568}
569
570static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
571{
572 const int last_sg = SGES_PER_PAGE - 1;
573 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
574 dma_addr_t dma_addr = iod->first_dma;
575 int i;
576
577 for (i = 0; i < iod->npages; i++) {
578 struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
579 dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
580
581 dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
582 dma_addr = next_dma_addr;
583 }
584}
585
586static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
587{
588 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
589
590 if (is_pci_p2pdma_page(sg_page(iod->sg)))
591 pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
592 rq_dma_dir(req));
593 else
594 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
595}
596
597static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
598{
599 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
600
601 if (iod->dma_len) {
602 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
603 rq_dma_dir(req));
604 return;
605 }
606
607 WARN_ON_ONCE(!iod->nents);
608
609 nvme_unmap_sg(dev, req);
610 if (iod->npages == 0)
611 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
612 iod->first_dma);
613 else if (iod->use_sgl)
614 nvme_free_sgls(dev, req);
615 else
616 nvme_free_prps(dev, req);
617 mempool_free(iod->sg, dev->iod_mempool);
618}
619
620static void nvme_print_sgl(struct scatterlist *sgl, int nents)
621{
622 int i;
623 struct scatterlist *sg;
624
625 for_each_sg(sgl, sg, nents, i) {
626 dma_addr_t phys = sg_phys(sg);
627 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
628 "dma_address:%pad dma_length:%d\n",
629 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
630 sg_dma_len(sg));
631 }
632}
633
634static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
635 struct request *req, struct nvme_rw_command *cmnd)
636{
637 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
638 struct dma_pool *pool;
639 int length = blk_rq_payload_bytes(req);
640 struct scatterlist *sg = iod->sg;
641 int dma_len = sg_dma_len(sg);
642 u64 dma_addr = sg_dma_address(sg);
643 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
644 __le64 *prp_list;
645 void **list = nvme_pci_iod_list(req);
646 dma_addr_t prp_dma;
647 int nprps, i;
648
649 length -= (NVME_CTRL_PAGE_SIZE - offset);
650 if (length <= 0) {
651 iod->first_dma = 0;
652 goto done;
653 }
654
655 dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
656 if (dma_len) {
657 dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
658 } else {
659 sg = sg_next(sg);
660 dma_addr = sg_dma_address(sg);
661 dma_len = sg_dma_len(sg);
662 }
663
664 if (length <= NVME_CTRL_PAGE_SIZE) {
665 iod->first_dma = dma_addr;
666 goto done;
667 }
668
669 nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
670 if (nprps <= (256 / 8)) {
671 pool = dev->prp_small_pool;
672 iod->npages = 0;
673 } else {
674 pool = dev->prp_page_pool;
675 iod->npages = 1;
676 }
677
678 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
679 if (!prp_list) {
680 iod->first_dma = dma_addr;
681 iod->npages = -1;
682 return BLK_STS_RESOURCE;
683 }
684 list[0] = prp_list;
685 iod->first_dma = prp_dma;
686 i = 0;
687 for (;;) {
688 if (i == NVME_CTRL_PAGE_SIZE >> 3) {
689 __le64 *old_prp_list = prp_list;
690 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
691 if (!prp_list)
692 goto free_prps;
693 list[iod->npages++] = prp_list;
694 prp_list[0] = old_prp_list[i - 1];
695 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
696 i = 1;
697 }
698 prp_list[i++] = cpu_to_le64(dma_addr);
699 dma_len -= NVME_CTRL_PAGE_SIZE;
700 dma_addr += NVME_CTRL_PAGE_SIZE;
701 length -= NVME_CTRL_PAGE_SIZE;
702 if (length <= 0)
703 break;
704 if (dma_len > 0)
705 continue;
706 if (unlikely(dma_len < 0))
707 goto bad_sgl;
708 sg = sg_next(sg);
709 dma_addr = sg_dma_address(sg);
710 dma_len = sg_dma_len(sg);
711 }
712done:
713 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
714 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
715 return BLK_STS_OK;
716free_prps:
717 nvme_free_prps(dev, req);
718 return BLK_STS_RESOURCE;
719bad_sgl:
720 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
721 "Invalid SGL for payload:%d nents:%d\n",
722 blk_rq_payload_bytes(req), iod->nents);
723 return BLK_STS_IOERR;
724}
725
726static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
727 struct scatterlist *sg)
728{
729 sge->addr = cpu_to_le64(sg_dma_address(sg));
730 sge->length = cpu_to_le32(sg_dma_len(sg));
731 sge->type = NVME_SGL_FMT_DATA_DESC << 4;
732}
733
734static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
735 dma_addr_t dma_addr, int entries)
736{
737 sge->addr = cpu_to_le64(dma_addr);
738 if (entries < SGES_PER_PAGE) {
739 sge->length = cpu_to_le32(entries * sizeof(*sge));
740 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
741 } else {
742 sge->length = cpu_to_le32(PAGE_SIZE);
743 sge->type = NVME_SGL_FMT_SEG_DESC << 4;
744 }
745}
746
747static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
748 struct request *req, struct nvme_rw_command *cmd, int entries)
749{
750 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
751 struct dma_pool *pool;
752 struct nvme_sgl_desc *sg_list;
753 struct scatterlist *sg = iod->sg;
754 dma_addr_t sgl_dma;
755 int i = 0;
756
757 /* setting the transfer type as SGL */
758 cmd->flags = NVME_CMD_SGL_METABUF;
759
760 if (entries == 1) {
761 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
762 return BLK_STS_OK;
763 }
764
765 if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
766 pool = dev->prp_small_pool;
767 iod->npages = 0;
768 } else {
769 pool = dev->prp_page_pool;
770 iod->npages = 1;
771 }
772
773 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
774 if (!sg_list) {
775 iod->npages = -1;
776 return BLK_STS_RESOURCE;
777 }
778
779 nvme_pci_iod_list(req)[0] = sg_list;
780 iod->first_dma = sgl_dma;
781
782 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
783
784 do {
785 if (i == SGES_PER_PAGE) {
786 struct nvme_sgl_desc *old_sg_desc = sg_list;
787 struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
788
789 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
790 if (!sg_list)
791 goto free_sgls;
792
793 i = 0;
794 nvme_pci_iod_list(req)[iod->npages++] = sg_list;
795 sg_list[i++] = *link;
796 nvme_pci_sgl_set_seg(link, sgl_dma, entries);
797 }
798
799 nvme_pci_sgl_set_data(&sg_list[i++], sg);
800 sg = sg_next(sg);
801 } while (--entries > 0);
802
803 return BLK_STS_OK;
804free_sgls:
805 nvme_free_sgls(dev, req);
806 return BLK_STS_RESOURCE;
807}
808
809static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
810 struct request *req, struct nvme_rw_command *cmnd,
811 struct bio_vec *bv)
812{
813 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
814 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
815 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
816
817 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
818 if (dma_mapping_error(dev->dev, iod->first_dma))
819 return BLK_STS_RESOURCE;
820 iod->dma_len = bv->bv_len;
821
822 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
823 if (bv->bv_len > first_prp_len)
824 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
825 return BLK_STS_OK;
826}
827
828static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
829 struct request *req, struct nvme_rw_command *cmnd,
830 struct bio_vec *bv)
831{
832 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
833
834 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
835 if (dma_mapping_error(dev->dev, iod->first_dma))
836 return BLK_STS_RESOURCE;
837 iod->dma_len = bv->bv_len;
838
839 cmnd->flags = NVME_CMD_SGL_METABUF;
840 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
841 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
842 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
843 return BLK_STS_OK;
844}
845
846static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
847 struct nvme_command *cmnd)
848{
849 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
850 blk_status_t ret = BLK_STS_RESOURCE;
851 int nr_mapped;
852
853 if (blk_rq_nr_phys_segments(req) == 1) {
854 struct bio_vec bv = req_bvec(req);
855
856 if (!is_pci_p2pdma_page(bv.bv_page)) {
857 if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
858 return nvme_setup_prp_simple(dev, req,
859 &cmnd->rw, &bv);
860
861 if (iod->nvmeq->qid && sgl_threshold &&
862 nvme_ctrl_sgl_supported(&dev->ctrl))
863 return nvme_setup_sgl_simple(dev, req,
864 &cmnd->rw, &bv);
865 }
866 }
867
868 iod->dma_len = 0;
869 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
870 if (!iod->sg)
871 return BLK_STS_RESOURCE;
872 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
873 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
874 if (!iod->nents)
875 goto out_free_sg;
876
877 if (is_pci_p2pdma_page(sg_page(iod->sg)))
878 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
879 iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
880 else
881 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
882 rq_dma_dir(req), DMA_ATTR_NO_WARN);
883 if (!nr_mapped)
884 goto out_free_sg;
885
886 iod->use_sgl = nvme_pci_use_sgls(dev, req);
887 if (iod->use_sgl)
888 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
889 else
890 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
891 if (ret != BLK_STS_OK)
892 goto out_unmap_sg;
893 return BLK_STS_OK;
894
895out_unmap_sg:
896 nvme_unmap_sg(dev, req);
897out_free_sg:
898 mempool_free(iod->sg, dev->iod_mempool);
899 return ret;
900}
901
902static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
903 struct nvme_command *cmnd)
904{
905 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
906
907 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
908 rq_dma_dir(req), 0);
909 if (dma_mapping_error(dev->dev, iod->meta_dma))
910 return BLK_STS_IOERR;
911 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
912 return BLK_STS_OK;
913}
914
915/*
916 * NOTE: ns is NULL when called on the admin queue.
917 */
918static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
919 const struct blk_mq_queue_data *bd)
920{
921 struct nvme_ns *ns = hctx->queue->queuedata;
922 struct nvme_queue *nvmeq = hctx->driver_data;
923 struct nvme_dev *dev = nvmeq->dev;
924 struct request *req = bd->rq;
925 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
926 struct nvme_command *cmnd = &iod->cmd;
927 blk_status_t ret;
928
929 iod->aborted = 0;
930 iod->npages = -1;
931 iod->nents = 0;
932
933 /*
934 * We should not need to do this, but we're still using this to
935 * ensure we can drain requests on a dying queue.
936 */
937 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
938 return BLK_STS_IOERR;
939
940 if (!nvme_check_ready(&dev->ctrl, req, true))
941 return nvme_fail_nonready_command(&dev->ctrl, req);
942
943 ret = nvme_setup_cmd(ns, req);
944 if (ret)
945 return ret;
946
947 if (blk_rq_nr_phys_segments(req)) {
948 ret = nvme_map_data(dev, req, cmnd);
949 if (ret)
950 goto out_free_cmd;
951 }
952
953 if (blk_integrity_rq(req)) {
954 ret = nvme_map_metadata(dev, req, cmnd);
955 if (ret)
956 goto out_unmap_data;
957 }
958
959 blk_mq_start_request(req);
960 nvme_submit_cmd(nvmeq, cmnd, bd->last);
961 return BLK_STS_OK;
962out_unmap_data:
963 nvme_unmap_data(dev, req);
964out_free_cmd:
965 nvme_cleanup_cmd(req);
966 return ret;
967}
968
969static __always_inline void nvme_pci_unmap_rq(struct request *req)
970{
971 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
972 struct nvme_dev *dev = iod->nvmeq->dev;
973
974 if (blk_integrity_rq(req))
975 dma_unmap_page(dev->dev, iod->meta_dma,
976 rq_integrity_vec(req)->bv_len, rq_data_dir(req));
977 if (blk_rq_nr_phys_segments(req))
978 nvme_unmap_data(dev, req);
979}
980
981static void nvme_pci_complete_rq(struct request *req)
982{
983 nvme_pci_unmap_rq(req);
984 nvme_complete_rq(req);
985}
986
987static void nvme_pci_complete_batch(struct io_comp_batch *iob)
988{
989 nvme_complete_batch(iob, nvme_pci_unmap_rq);
990}
991
992/* We read the CQE phase first to check if the rest of the entry is valid */
993static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
994{
995 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
996
997 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
998}
999
1000static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
1001{
1002 u16 head = nvmeq->cq_head;
1003
1004 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
1005 nvmeq->dbbuf_cq_ei))
1006 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
1007}
1008
1009static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
1010{
1011 if (!nvmeq->qid)
1012 return nvmeq->dev->admin_tagset.tags[0];
1013 return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
1014}
1015
1016static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
1017 struct io_comp_batch *iob, u16 idx)
1018{
1019 struct nvme_completion *cqe = &nvmeq->cqes[idx];
1020 __u16 command_id = READ_ONCE(cqe->command_id);
1021 struct request *req;
1022
1023 /*
1024 * AEN requests are special as they don't time out and can
1025 * survive any kind of queue freeze and often don't respond to
1026 * aborts. We don't even bother to allocate a struct request
1027 * for them but rather special case them here.
1028 */
1029 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
1030 nvme_complete_async_event(&nvmeq->dev->ctrl,
1031 cqe->status, &cqe->result);
1032 return;
1033 }
1034
1035 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1036 if (unlikely(!req)) {
1037 dev_warn(nvmeq->dev->ctrl.device,
1038 "invalid id %d completed on queue %d\n",
1039 command_id, le16_to_cpu(cqe->sq_id));
1040 return;
1041 }
1042
1043 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1044 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
1045 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
1046 nvme_pci_complete_batch))
1047 nvme_pci_complete_rq(req);
1048}
1049
1050static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1051{
1052 u32 tmp = nvmeq->cq_head + 1;
1053
1054 if (tmp == nvmeq->q_depth) {
1055 nvmeq->cq_head = 0;
1056 nvmeq->cq_phase ^= 1;
1057 } else {
1058 nvmeq->cq_head = tmp;
1059 }
1060}
1061
1062static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
1063 struct io_comp_batch *iob)
1064{
1065 int found = 0;
1066
1067 while (nvme_cqe_pending(nvmeq)) {
1068 found++;
1069 /*
1070 * load-load control dependency between phase and the rest of
1071 * the cqe requires a full read memory barrier
1072 */
1073 dma_rmb();
1074 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
1075 nvme_update_cq_head(nvmeq);
1076 }
1077
1078 if (found)
1079 nvme_ring_cq_doorbell(nvmeq);
1080 return found;
1081}
1082
1083static irqreturn_t nvme_irq(int irq, void *data)
1084{
1085 struct nvme_queue *nvmeq = data;
1086 DEFINE_IO_COMP_BATCH(iob);
1087
1088 if (nvme_poll_cq(nvmeq, &iob)) {
1089 if (!rq_list_empty(iob.req_list))
1090 nvme_pci_complete_batch(&iob);
1091 return IRQ_HANDLED;
1092 }
1093 return IRQ_NONE;
1094}
1095
1096static irqreturn_t nvme_irq_check(int irq, void *data)
1097{
1098 struct nvme_queue *nvmeq = data;
1099
1100 if (nvme_cqe_pending(nvmeq))
1101 return IRQ_WAKE_THREAD;
1102 return IRQ_NONE;
1103}
1104
1105/*
1106 * Poll for completions for any interrupt driven queue
1107 * Can be called from any context.
1108 */
1109static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
1110{
1111 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1112
1113 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
1114
1115 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1116 nvme_poll_cq(nvmeq, NULL);
1117 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1118}
1119
1120static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1121{
1122 struct nvme_queue *nvmeq = hctx->driver_data;
1123 bool found;
1124
1125 if (!nvme_cqe_pending(nvmeq))
1126 return 0;
1127
1128 spin_lock(&nvmeq->cq_poll_lock);
1129 found = nvme_poll_cq(nvmeq, iob);
1130 spin_unlock(&nvmeq->cq_poll_lock);
1131
1132 return found;
1133}
1134
1135static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1136{
1137 struct nvme_dev *dev = to_nvme_dev(ctrl);
1138 struct nvme_queue *nvmeq = &dev->queues[0];
1139 struct nvme_command c = { };
1140
1141 c.common.opcode = nvme_admin_async_event;
1142 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1143 nvme_submit_cmd(nvmeq, &c, true);
1144}
1145
1146static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1147{
1148 struct nvme_command c = { };
1149
1150 c.delete_queue.opcode = opcode;
1151 c.delete_queue.qid = cpu_to_le16(id);
1152
1153 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1154}
1155
1156static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1157 struct nvme_queue *nvmeq, s16 vector)
1158{
1159 struct nvme_command c = { };
1160 int flags = NVME_QUEUE_PHYS_CONTIG;
1161
1162 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
1163 flags |= NVME_CQ_IRQ_ENABLED;
1164
1165 /*
1166 * Note: we (ab)use the fact that the prp fields survive if no data
1167 * is attached to the request.
1168 */
1169 c.create_cq.opcode = nvme_admin_create_cq;
1170 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1171 c.create_cq.cqid = cpu_to_le16(qid);
1172 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1173 c.create_cq.cq_flags = cpu_to_le16(flags);
1174 c.create_cq.irq_vector = cpu_to_le16(vector);
1175
1176 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1177}
1178
1179static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1180 struct nvme_queue *nvmeq)
1181{
1182 struct nvme_ctrl *ctrl = &dev->ctrl;
1183 struct nvme_command c = { };
1184 int flags = NVME_QUEUE_PHYS_CONTIG;
1185
1186 /*
1187 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
1188 * set. Since URGENT priority is zeroes, it makes all queues
1189 * URGENT.
1190 */
1191 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
1192 flags |= NVME_SQ_PRIO_MEDIUM;
1193
1194 /*
1195 * Note: we (ab)use the fact that the prp fields survive if no data
1196 * is attached to the request.
1197 */
1198 c.create_sq.opcode = nvme_admin_create_sq;
1199 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1200 c.create_sq.sqid = cpu_to_le16(qid);
1201 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1202 c.create_sq.sq_flags = cpu_to_le16(flags);
1203 c.create_sq.cqid = cpu_to_le16(qid);
1204
1205 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1206}
1207
1208static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
1209{
1210 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
1211}
1212
1213static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
1214{
1215 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
1216}
1217
1218static void abort_endio(struct request *req, blk_status_t error)
1219{
1220 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1221 struct nvme_queue *nvmeq = iod->nvmeq;
1222
1223 dev_warn(nvmeq->dev->ctrl.device,
1224 "Abort status: 0x%x", nvme_req(req)->status);
1225 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1226 blk_mq_free_request(req);
1227}
1228
1229static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1230{
1231 /* If true, indicates loss of adapter communication, possibly by a
1232 * NVMe Subsystem reset.
1233 */
1234 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1235
1236 /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1237 switch (dev->ctrl.state) {
1238 case NVME_CTRL_RESETTING:
1239 case NVME_CTRL_CONNECTING:
1240 return false;
1241 default:
1242 break;
1243 }
1244
1245 /* We shouldn't reset unless the controller is on fatal error state
1246 * _or_ if we lost the communication with it.
1247 */
1248 if (!(csts & NVME_CSTS_CFS) && !nssro)
1249 return false;
1250
1251 return true;
1252}
1253
1254static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1255{
1256 /* Read a config register to help see what died. */
1257 u16 pci_status;
1258 int result;
1259
1260 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1261 &pci_status);
1262 if (result == PCIBIOS_SUCCESSFUL)
1263 dev_warn(dev->ctrl.device,
1264 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1265 csts, pci_status);
1266 else
1267 dev_warn(dev->ctrl.device,
1268 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1269 csts, result);
1270}
1271
1272static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1273{
1274 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1275 struct nvme_queue *nvmeq = iod->nvmeq;
1276 struct nvme_dev *dev = nvmeq->dev;
1277 struct request *abort_req;
1278 struct nvme_command cmd = { };
1279 u32 csts = readl(dev->bar + NVME_REG_CSTS);
1280
1281 /* If PCI error recovery process is happening, we cannot reset or
1282 * the recovery mechanism will surely fail.
1283 */
1284 mb();
1285 if (pci_channel_offline(to_pci_dev(dev->dev)))
1286 return BLK_EH_RESET_TIMER;
1287
1288 /*
1289 * Reset immediately if the controller is failed
1290 */
1291 if (nvme_should_reset(dev, csts)) {
1292 nvme_warn_reset(dev, csts);
1293 nvme_dev_disable(dev, false);
1294 nvme_reset_ctrl(&dev->ctrl);
1295 return BLK_EH_DONE;
1296 }
1297
1298 /*
1299 * Did we miss an interrupt?
1300 */
1301 if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
1302 nvme_poll(req->mq_hctx, NULL);
1303 else
1304 nvme_poll_irqdisable(nvmeq);
1305
1306 if (blk_mq_request_completed(req)) {
1307 dev_warn(dev->ctrl.device,
1308 "I/O %d QID %d timeout, completion polled\n",
1309 req->tag, nvmeq->qid);
1310 return BLK_EH_DONE;
1311 }
1312
1313 /*
1314 * Shutdown immediately if controller times out while starting. The
1315 * reset work will see the pci device disabled when it gets the forced
1316 * cancellation error. All outstanding requests are completed on
1317 * shutdown, so we return BLK_EH_DONE.
1318 */
1319 switch (dev->ctrl.state) {
1320 case NVME_CTRL_CONNECTING:
1321 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1322 fallthrough;
1323 case NVME_CTRL_DELETING:
1324 dev_warn_ratelimited(dev->ctrl.device,
1325 "I/O %d QID %d timeout, disable controller\n",
1326 req->tag, nvmeq->qid);
1327 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1328 nvme_dev_disable(dev, true);
1329 return BLK_EH_DONE;
1330 case NVME_CTRL_RESETTING:
1331 return BLK_EH_RESET_TIMER;
1332 default:
1333 break;
1334 }
1335
1336 /*
1337 * Shutdown the controller immediately and schedule a reset if the
1338 * command was already aborted once before and still hasn't been
1339 * returned to the driver, or if this is the admin queue.
1340 */
1341 if (!nvmeq->qid || iod->aborted) {
1342 dev_warn(dev->ctrl.device,
1343 "I/O %d QID %d timeout, reset controller\n",
1344 req->tag, nvmeq->qid);
1345 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1346 nvme_dev_disable(dev, false);
1347 nvme_reset_ctrl(&dev->ctrl);
1348
1349 return BLK_EH_DONE;
1350 }
1351
1352 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1353 atomic_inc(&dev->ctrl.abort_limit);
1354 return BLK_EH_RESET_TIMER;
1355 }
1356 iod->aborted = 1;
1357
1358 cmd.abort.opcode = nvme_admin_abort_cmd;
1359 cmd.abort.cid = nvme_cid(req);
1360 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1361
1362 dev_warn(nvmeq->dev->ctrl.device,
1363 "I/O %d QID %d timeout, aborting\n",
1364 req->tag, nvmeq->qid);
1365
1366 abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1367 BLK_MQ_REQ_NOWAIT);
1368 if (IS_ERR(abort_req)) {
1369 atomic_inc(&dev->ctrl.abort_limit);
1370 return BLK_EH_RESET_TIMER;
1371 }
1372
1373 abort_req->end_io_data = NULL;
1374 blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio);
1375
1376 /*
1377 * The aborted req will be completed on receiving the abort req.
1378 * We enable the timer again. If hit twice, it'll cause a device reset,
1379 * as the device then is in a faulty state.
1380 */
1381 return BLK_EH_RESET_TIMER;
1382}
1383
1384static void nvme_free_queue(struct nvme_queue *nvmeq)
1385{
1386 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
1387 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1388 if (!nvmeq->sq_cmds)
1389 return;
1390
1391 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
1392 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
1393 nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1394 } else {
1395 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
1396 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1397 }
1398}
1399
1400static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1401{
1402 int i;
1403
1404 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
1405 dev->ctrl.queue_count--;
1406 nvme_free_queue(&dev->queues[i]);
1407 }
1408}
1409
1410/**
1411 * nvme_suspend_queue - put queue into suspended state
1412 * @nvmeq: queue to suspend
1413 */
1414static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1415{
1416 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
1417 return 1;
1418
1419 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
1420 mb();
1421
1422 nvmeq->dev->online_queues--;
1423 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1424 nvme_stop_admin_queue(&nvmeq->dev->ctrl);
1425 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
1426 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
1427 return 0;
1428}
1429
1430static void nvme_suspend_io_queues(struct nvme_dev *dev)
1431{
1432 int i;
1433
1434 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1435 nvme_suspend_queue(&dev->queues[i]);
1436}
1437
1438static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1439{
1440 struct nvme_queue *nvmeq = &dev->queues[0];
1441
1442 if (shutdown)
1443 nvme_shutdown_ctrl(&dev->ctrl);
1444 else
1445 nvme_disable_ctrl(&dev->ctrl);
1446
1447 nvme_poll_irqdisable(nvmeq);
1448}
1449
1450/*
1451 * Called only on a device that has been disabled and after all other threads
1452 * that can check this device's completion queues have synced, except
1453 * nvme_poll(). This is the last chance for the driver to see a natural
1454 * completion before nvme_cancel_request() terminates all incomplete requests.
1455 */
1456static void nvme_reap_pending_cqes(struct nvme_dev *dev)
1457{
1458 int i;
1459
1460 for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
1461 spin_lock(&dev->queues[i].cq_poll_lock);
1462 nvme_poll_cq(&dev->queues[i], NULL);
1463 spin_unlock(&dev->queues[i].cq_poll_lock);
1464 }
1465}
1466
1467static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1468 int entry_size)
1469{
1470 int q_depth = dev->q_depth;
1471 unsigned q_size_aligned = roundup(q_depth * entry_size,
1472 NVME_CTRL_PAGE_SIZE);
1473
1474 if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1475 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1476
1477 mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
1478 q_depth = div_u64(mem_per_q, entry_size);
1479
1480 /*
1481 * Ensure the reduced q_depth is above some threshold where it
1482 * would be better to map queues in system memory with the
1483 * original depth
1484 */
1485 if (q_depth < 64)
1486 return -ENOMEM;
1487 }
1488
1489 return q_depth;
1490}
1491
1492static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1493 int qid)
1494{
1495 struct pci_dev *pdev = to_pci_dev(dev->dev);
1496
1497 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1498 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
1499 if (nvmeq->sq_cmds) {
1500 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
1501 nvmeq->sq_cmds);
1502 if (nvmeq->sq_dma_addr) {
1503 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
1504 return 0;
1505 }
1506
1507 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1508 }
1509 }
1510
1511 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
1512 &nvmeq->sq_dma_addr, GFP_KERNEL);
1513 if (!nvmeq->sq_cmds)
1514 return -ENOMEM;
1515 return 0;
1516}
1517
1518static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
1519{
1520 struct nvme_queue *nvmeq = &dev->queues[qid];
1521
1522 if (dev->ctrl.queue_count > qid)
1523 return 0;
1524
1525 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
1526 nvmeq->q_depth = depth;
1527 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
1528 &nvmeq->cq_dma_addr, GFP_KERNEL);
1529 if (!nvmeq->cqes)
1530 goto free_nvmeq;
1531
1532 if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
1533 goto free_cqdma;
1534
1535 nvmeq->dev = dev;
1536 spin_lock_init(&nvmeq->sq_lock);
1537 spin_lock_init(&nvmeq->cq_poll_lock);
1538 nvmeq->cq_head = 0;
1539 nvmeq->cq_phase = 1;
1540 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1541 nvmeq->qid = qid;
1542 dev->ctrl.queue_count++;
1543
1544 return 0;
1545
1546 free_cqdma:
1547 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
1548 nvmeq->cq_dma_addr);
1549 free_nvmeq:
1550 return -ENOMEM;
1551}
1552
1553static int queue_request_irq(struct nvme_queue *nvmeq)
1554{
1555 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1556 int nr = nvmeq->dev->ctrl.instance;
1557
1558 if (use_threaded_interrupts) {
1559 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
1560 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1561 } else {
1562 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
1563 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1564 }
1565}
1566
1567static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1568{
1569 struct nvme_dev *dev = nvmeq->dev;
1570
1571 nvmeq->sq_tail = 0;
1572 nvmeq->last_sq_tail = 0;
1573 nvmeq->cq_head = 0;
1574 nvmeq->cq_phase = 1;
1575 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1576 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
1577 nvme_dbbuf_init(dev, nvmeq, qid);
1578 dev->online_queues++;
1579 wmb(); /* ensure the first interrupt sees the initialization */
1580}
1581
1582/*
1583 * Try getting shutdown_lock while setting up IO queues.
1584 */
1585static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
1586{
1587 /*
1588 * Give up if the lock is being held by nvme_dev_disable.
1589 */
1590 if (!mutex_trylock(&dev->shutdown_lock))
1591 return -ENODEV;
1592
1593 /*
1594 * Controller is in wrong state, fail early.
1595 */
1596 if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
1597 mutex_unlock(&dev->shutdown_lock);
1598 return -ENODEV;
1599 }
1600
1601 return 0;
1602}
1603
1604static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
1605{
1606 struct nvme_dev *dev = nvmeq->dev;
1607 int result;
1608 u16 vector = 0;
1609
1610 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
1611
1612 /*
1613 * A queue's vector matches the queue identifier unless the controller
1614 * has only one vector available.
1615 */
1616 if (!polled)
1617 vector = dev->num_vecs == 1 ? 0 : qid;
1618 else
1619 set_bit(NVMEQ_POLLED, &nvmeq->flags);
1620
1621 result = adapter_alloc_cq(dev, qid, nvmeq, vector);
1622 if (result)
1623 return result;
1624
1625 result = adapter_alloc_sq(dev, qid, nvmeq);
1626 if (result < 0)
1627 return result;
1628 if (result)
1629 goto release_cq;
1630
1631 nvmeq->cq_vector = vector;
1632
1633 result = nvme_setup_io_queues_trylock(dev);
1634 if (result)
1635 return result;
1636 nvme_init_queue(nvmeq, qid);
1637 if (!polled) {
1638 result = queue_request_irq(nvmeq);
1639 if (result < 0)
1640 goto release_sq;
1641 }
1642
1643 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
1644 mutex_unlock(&dev->shutdown_lock);
1645 return result;
1646
1647release_sq:
1648 dev->online_queues--;
1649 mutex_unlock(&dev->shutdown_lock);
1650 adapter_delete_sq(dev, qid);
1651release_cq:
1652 adapter_delete_cq(dev, qid);
1653 return result;
1654}
1655
1656static const struct blk_mq_ops nvme_mq_admin_ops = {
1657 .queue_rq = nvme_queue_rq,
1658 .complete = nvme_pci_complete_rq,
1659 .init_hctx = nvme_admin_init_hctx,
1660 .init_request = nvme_init_request,
1661 .timeout = nvme_timeout,
1662};
1663
1664static const struct blk_mq_ops nvme_mq_ops = {
1665 .queue_rq = nvme_queue_rq,
1666 .complete = nvme_pci_complete_rq,
1667 .commit_rqs = nvme_commit_rqs,
1668 .init_hctx = nvme_init_hctx,
1669 .init_request = nvme_init_request,
1670 .map_queues = nvme_pci_map_queues,
1671 .timeout = nvme_timeout,
1672 .poll = nvme_poll,
1673};
1674
1675static void nvme_dev_remove_admin(struct nvme_dev *dev)
1676{
1677 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1678 /*
1679 * If the controller was reset during removal, it's possible
1680 * user requests may be waiting on a stopped queue. Start the
1681 * queue to flush these to completion.
1682 */
1683 nvme_start_admin_queue(&dev->ctrl);
1684 blk_cleanup_queue(dev->ctrl.admin_q);
1685 blk_mq_free_tag_set(&dev->admin_tagset);
1686 }
1687}
1688
1689static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1690{
1691 if (!dev->ctrl.admin_q) {
1692 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1693 dev->admin_tagset.nr_hw_queues = 1;
1694
1695 dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1696 dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
1697 dev->admin_tagset.numa_node = dev->ctrl.numa_node;
1698 dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
1699 dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
1700 dev->admin_tagset.driver_data = dev;
1701
1702 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
1703 return -ENOMEM;
1704 dev->ctrl.admin_tagset = &dev->admin_tagset;
1705
1706 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
1707 if (IS_ERR(dev->ctrl.admin_q)) {
1708 blk_mq_free_tag_set(&dev->admin_tagset);
1709 return -ENOMEM;
1710 }
1711 if (!blk_get_queue(dev->ctrl.admin_q)) {
1712 nvme_dev_remove_admin(dev);
1713 dev->ctrl.admin_q = NULL;
1714 return -ENODEV;
1715 }
1716 } else
1717 nvme_start_admin_queue(&dev->ctrl);
1718
1719 return 0;
1720}
1721
1722static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1723{
1724 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
1725}
1726
1727static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
1728{
1729 struct pci_dev *pdev = to_pci_dev(dev->dev);
1730
1731 if (size <= dev->bar_mapped_size)
1732 return 0;
1733 if (size > pci_resource_len(pdev, 0))
1734 return -ENOMEM;
1735 if (dev->bar)
1736 iounmap(dev->bar);
1737 dev->bar = ioremap(pci_resource_start(pdev, 0), size);
1738 if (!dev->bar) {
1739 dev->bar_mapped_size = 0;
1740 return -ENOMEM;
1741 }
1742 dev->bar_mapped_size = size;
1743 dev->dbs = dev->bar + NVME_REG_DBS;
1744
1745 return 0;
1746}
1747
1748static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1749{
1750 int result;
1751 u32 aqa;
1752 struct nvme_queue *nvmeq;
1753
1754 result = nvme_remap_bar(dev, db_bar_size(dev, 0));
1755 if (result < 0)
1756 return result;
1757
1758 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1759 NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1760
1761 if (dev->subsystem &&
1762 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
1763 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1764
1765 result = nvme_disable_ctrl(&dev->ctrl);
1766 if (result < 0)
1767 return result;
1768
1769 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1770 if (result)
1771 return result;
1772
1773 dev->ctrl.numa_node = dev_to_node(dev->dev);
1774
1775 nvmeq = &dev->queues[0];
1776 aqa = nvmeq->q_depth - 1;
1777 aqa |= aqa << 16;
1778
1779 writel(aqa, dev->bar + NVME_REG_AQA);
1780 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1781 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
1782
1783 result = nvme_enable_ctrl(&dev->ctrl);
1784 if (result)
1785 return result;
1786
1787 nvmeq->cq_vector = 0;
1788 nvme_init_queue(nvmeq, 0);
1789 result = queue_request_irq(nvmeq);
1790 if (result) {
1791 dev->online_queues--;
1792 return result;
1793 }
1794
1795 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
1796 return result;
1797}
1798
1799static int nvme_create_io_queues(struct nvme_dev *dev)
1800{
1801 unsigned i, max, rw_queues;
1802 int ret = 0;
1803
1804 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1805 if (nvme_alloc_queue(dev, i, dev->q_depth)) {
1806 ret = -ENOMEM;
1807 break;
1808 }
1809 }
1810
1811 max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1812 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
1813 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
1814 dev->io_queues[HCTX_TYPE_READ];
1815 } else {
1816 rw_queues = max;
1817 }
1818
1819 for (i = dev->online_queues; i <= max; i++) {
1820 bool polled = i > rw_queues;
1821
1822 ret = nvme_create_queue(&dev->queues[i], i, polled);
1823 if (ret)
1824 break;
1825 }
1826
1827 /*
1828 * Ignore failing Create SQ/CQ commands, we can continue with less
1829 * than the desired amount of queues, and even a controller without
1830 * I/O queues can still be used to issue admin commands. This might
1831 * be useful to upgrade a buggy firmware for example.
1832 */
1833 return ret >= 0 ? 0 : ret;
1834}
1835
1836static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
1837{
1838 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
1839
1840 return 1ULL << (12 + 4 * szu);
1841}
1842
1843static u32 nvme_cmb_size(struct nvme_dev *dev)
1844{
1845 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
1846}
1847
1848static void nvme_map_cmb(struct nvme_dev *dev)
1849{
1850 u64 size, offset;
1851 resource_size_t bar_size;
1852 struct pci_dev *pdev = to_pci_dev(dev->dev);
1853 int bar;
1854
1855 if (dev->cmb_size)
1856 return;
1857
1858 if (NVME_CAP_CMBS(dev->ctrl.cap))
1859 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
1860
1861 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1862 if (!dev->cmbsz)
1863 return;
1864 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1865
1866 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
1867 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
1868 bar = NVME_CMB_BIR(dev->cmbloc);
1869 bar_size = pci_resource_len(pdev, bar);
1870
1871 if (offset > bar_size)
1872 return;
1873
1874 /*
1875 * Tell the controller about the host side address mapping the CMB,
1876 * and enable CMB decoding for the NVMe 1.4+ scheme:
1877 */
1878 if (NVME_CAP_CMBS(dev->ctrl.cap)) {
1879 hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
1880 (pci_bus_address(pdev, bar) + offset),
1881 dev->bar + NVME_REG_CMBMSC);
1882 }
1883
1884 /*
1885 * Controllers may support a CMB size larger than their BAR,
1886 * for example, due to being behind a bridge. Reduce the CMB to
1887 * the reported size of the BAR
1888 */
1889 if (size > bar_size - offset)
1890 size = bar_size - offset;
1891
1892 if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
1893 dev_warn(dev->ctrl.device,
1894 "failed to register the CMB\n");
1895 return;
1896 }
1897
1898 dev->cmb_size = size;
1899 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
1900
1901 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
1902 (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
1903 pci_p2pmem_publish(pdev, true);
1904}
1905
1906static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
1907{
1908 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
1909 u64 dma_addr = dev->host_mem_descs_dma;
1910 struct nvme_command c = { };
1911 int ret;
1912
1913 c.features.opcode = nvme_admin_set_features;
1914 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
1915 c.features.dword11 = cpu_to_le32(bits);
1916 c.features.dword12 = cpu_to_le32(host_mem_size);
1917 c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
1918 c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
1919 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
1920
1921 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1922 if (ret) {
1923 dev_warn(dev->ctrl.device,
1924 "failed to set host mem (err %d, flags %#x).\n",
1925 ret, bits);
1926 } else
1927 dev->hmb = bits & NVME_HOST_MEM_ENABLE;
1928
1929 return ret;
1930}
1931
1932static void nvme_free_host_mem(struct nvme_dev *dev)
1933{
1934 int i;
1935
1936 for (i = 0; i < dev->nr_host_mem_descs; i++) {
1937 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1938 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
1939
1940 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
1941 le64_to_cpu(desc->addr),
1942 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1943 }
1944
1945 kfree(dev->host_mem_desc_bufs);
1946 dev->host_mem_desc_bufs = NULL;
1947 dma_free_coherent(dev->dev,
1948 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
1949 dev->host_mem_descs, dev->host_mem_descs_dma);
1950 dev->host_mem_descs = NULL;
1951 dev->nr_host_mem_descs = 0;
1952}
1953
1954static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1955 u32 chunk_size)
1956{
1957 struct nvme_host_mem_buf_desc *descs;
1958 u32 max_entries, len;
1959 dma_addr_t descs_dma;
1960 int i = 0;
1961 void **bufs;
1962 u64 size, tmp;
1963
1964 tmp = (preferred + chunk_size - 1);
1965 do_div(tmp, chunk_size);
1966 max_entries = tmp;
1967
1968 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1969 max_entries = dev->ctrl.hmmaxd;
1970
1971 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
1972 &descs_dma, GFP_KERNEL);
1973 if (!descs)
1974 goto out;
1975
1976 bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
1977 if (!bufs)
1978 goto out_free_descs;
1979
1980 for (size = 0; size < preferred && i < max_entries; size += len) {
1981 dma_addr_t dma_addr;
1982
1983 len = min_t(u64, chunk_size, preferred - size);
1984 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
1985 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1986 if (!bufs[i])
1987 break;
1988
1989 descs[i].addr = cpu_to_le64(dma_addr);
1990 descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
1991 i++;
1992 }
1993
1994 if (!size)
1995 goto out_free_bufs;
1996
1997 dev->nr_host_mem_descs = i;
1998 dev->host_mem_size = size;
1999 dev->host_mem_descs = descs;
2000 dev->host_mem_descs_dma = descs_dma;
2001 dev->host_mem_desc_bufs = bufs;
2002 return 0;
2003
2004out_free_bufs:
2005 while (--i >= 0) {
2006 size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
2007
2008 dma_free_attrs(dev->dev, size, bufs[i],
2009 le64_to_cpu(descs[i].addr),
2010 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
2011 }
2012
2013 kfree(bufs);
2014out_free_descs:
2015 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
2016 descs_dma);
2017out:
2018 dev->host_mem_descs = NULL;
2019 return -ENOMEM;
2020}
2021
2022static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
2023{
2024 u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
2025 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
2026 u64 chunk_size;
2027
2028 /* start big and work our way down */
2029 for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
2030 if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
2031 if (!min || dev->host_mem_size >= min)
2032 return 0;
2033 nvme_free_host_mem(dev);
2034 }
2035 }
2036
2037 return -ENOMEM;
2038}
2039
2040static int nvme_setup_host_mem(struct nvme_dev *dev)
2041{
2042 u64 max = (u64)max_host_mem_size_mb * SZ_1M;
2043 u64 preferred = (u64)dev->ctrl.hmpre * 4096;
2044 u64 min = (u64)dev->ctrl.hmmin * 4096;
2045 u32 enable_bits = NVME_HOST_MEM_ENABLE;
2046 int ret;
2047
2048 preferred = min(preferred, max);
2049 if (min > max) {
2050 dev_warn(dev->ctrl.device,
2051 "min host memory (%lld MiB) above limit (%d MiB).\n",
2052 min >> ilog2(SZ_1M), max_host_mem_size_mb);
2053 nvme_free_host_mem(dev);
2054 return 0;
2055 }
2056
2057 /*
2058 * If we already have a buffer allocated check if we can reuse it.
2059 */
2060 if (dev->host_mem_descs) {
2061 if (dev->host_mem_size >= min)
2062 enable_bits |= NVME_HOST_MEM_RETURN;
2063 else
2064 nvme_free_host_mem(dev);
2065 }
2066
2067 if (!dev->host_mem_descs) {
2068 if (nvme_alloc_host_mem(dev, min, preferred)) {
2069 dev_warn(dev->ctrl.device,
2070 "failed to allocate host memory buffer.\n");
2071 return 0; /* controller must work without HMB */
2072 }
2073
2074 dev_info(dev->ctrl.device,
2075 "allocated %lld MiB host memory buffer.\n",
2076 dev->host_mem_size >> ilog2(SZ_1M));
2077 }
2078
2079 ret = nvme_set_host_mem(dev, enable_bits);
2080 if (ret)
2081 nvme_free_host_mem(dev);
2082 return ret;
2083}
2084
2085static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
2086 char *buf)
2087{
2088 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2089
2090 return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n",
2091 ndev->cmbloc, ndev->cmbsz);
2092}
2093static DEVICE_ATTR_RO(cmb);
2094
2095static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
2096 char *buf)
2097{
2098 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2099
2100 return sysfs_emit(buf, "%u\n", ndev->cmbloc);
2101}
2102static DEVICE_ATTR_RO(cmbloc);
2103
2104static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
2105 char *buf)
2106{
2107 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2108
2109 return sysfs_emit(buf, "%u\n", ndev->cmbsz);
2110}
2111static DEVICE_ATTR_RO(cmbsz);
2112
2113static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
2114 char *buf)
2115{
2116 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2117
2118 return sysfs_emit(buf, "%d\n", ndev->hmb);
2119}
2120
2121static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
2122 const char *buf, size_t count)
2123{
2124 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2125 bool new;
2126 int ret;
2127
2128 if (strtobool(buf, &new) < 0)
2129 return -EINVAL;
2130
2131 if (new == ndev->hmb)
2132 return count;
2133
2134 if (new) {
2135 ret = nvme_setup_host_mem(ndev);
2136 } else {
2137 ret = nvme_set_host_mem(ndev, 0);
2138 if (!ret)
2139 nvme_free_host_mem(ndev);
2140 }
2141
2142 if (ret < 0)
2143 return ret;
2144
2145 return count;
2146}
2147static DEVICE_ATTR_RW(hmb);
2148
2149static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
2150 struct attribute *a, int n)
2151{
2152 struct nvme_ctrl *ctrl =
2153 dev_get_drvdata(container_of(kobj, struct device, kobj));
2154 struct nvme_dev *dev = to_nvme_dev(ctrl);
2155
2156 if (a == &dev_attr_cmb.attr ||
2157 a == &dev_attr_cmbloc.attr ||
2158 a == &dev_attr_cmbsz.attr) {
2159 if (!dev->cmbsz)
2160 return 0;
2161 }
2162 if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
2163 return 0;
2164
2165 return a->mode;
2166}
2167
2168static struct attribute *nvme_pci_attrs[] = {
2169 &dev_attr_cmb.attr,
2170 &dev_attr_cmbloc.attr,
2171 &dev_attr_cmbsz.attr,
2172 &dev_attr_hmb.attr,
2173 NULL,
2174};
2175
2176static const struct attribute_group nvme_pci_attr_group = {
2177 .attrs = nvme_pci_attrs,
2178 .is_visible = nvme_pci_attrs_are_visible,
2179};
2180
2181/*
2182 * nirqs is the number of interrupts available for write and read
2183 * queues. The core already reserved an interrupt for the admin queue.
2184 */
2185static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
2186{
2187 struct nvme_dev *dev = affd->priv;
2188 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
2189
2190 /*
2191 * If there is no interrupt available for queues, ensure that
2192 * the default queue is set to 1. The affinity set size is
2193 * also set to one, but the irq core ignores it for this case.
2194 *
2195 * If only one interrupt is available or 'write_queue' == 0, combine
2196 * write and read queues.
2197 *
2198 * If 'write_queues' > 0, ensure it leaves room for at least one read
2199 * queue.
2200 */
2201 if (!nrirqs) {
2202 nrirqs = 1;
2203 nr_read_queues = 0;
2204 } else if (nrirqs == 1 || !nr_write_queues) {
2205 nr_read_queues = 0;
2206 } else if (nr_write_queues >= nrirqs) {
2207 nr_read_queues = 1;
2208 } else {
2209 nr_read_queues = nrirqs - nr_write_queues;
2210 }
2211
2212 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2213 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2214 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
2215 affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
2216 affd->nr_sets = nr_read_queues ? 2 : 1;
2217}
2218
2219static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2220{
2221 struct pci_dev *pdev = to_pci_dev(dev->dev);
2222 struct irq_affinity affd = {
2223 .pre_vectors = 1,
2224 .calc_sets = nvme_calc_irq_sets,
2225 .priv = dev,
2226 };
2227 unsigned int irq_queues, poll_queues;
2228
2229 /*
2230 * Poll queues don't need interrupts, but we need at least one I/O queue
2231 * left over for non-polled I/O.
2232 */
2233 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1);
2234 dev->io_queues[HCTX_TYPE_POLL] = poll_queues;
2235
2236 /*
2237 * Initialize for the single interrupt case, will be updated in
2238 * nvme_calc_irq_sets().
2239 */
2240 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2241 dev->io_queues[HCTX_TYPE_READ] = 0;
2242
2243 /*
2244 * We need interrupts for the admin queue and each non-polled I/O queue,
2245 * but some Apple controllers require all queues to use the first
2246 * vector.
2247 */
2248 irq_queues = 1;
2249 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
2250 irq_queues += (nr_io_queues - poll_queues);
2251 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
2252 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2253}
2254
2255static void nvme_disable_io_queues(struct nvme_dev *dev)
2256{
2257 if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
2258 __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2259}
2260
2261static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
2262{
2263 /*
2264 * If tags are shared with admin queue (Apple bug), then
2265 * make sure we only use one IO queue.
2266 */
2267 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2268 return 1;
2269 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
2270}
2271
2272static int nvme_setup_io_queues(struct nvme_dev *dev)
2273{
2274 struct nvme_queue *adminq = &dev->queues[0];
2275 struct pci_dev *pdev = to_pci_dev(dev->dev);
2276 unsigned int nr_io_queues;
2277 unsigned long size;
2278 int result;
2279
2280 /*
2281 * Sample the module parameters once at reset time so that we have
2282 * stable values to work with.
2283 */
2284 dev->nr_write_queues = write_queues;
2285 dev->nr_poll_queues = poll_queues;
2286
2287 nr_io_queues = dev->nr_allocated_queues - 1;
2288 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2289 if (result < 0)
2290 return result;
2291
2292 if (nr_io_queues == 0)
2293 return 0;
2294
2295 /*
2296 * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
2297 * from set to unset. If there is a window to it is truely freed,
2298 * pci_free_irq_vectors() jumping into this window will crash.
2299 * And take lock to avoid racing with pci_free_irq_vectors() in
2300 * nvme_dev_disable() path.
2301 */
2302 result = nvme_setup_io_queues_trylock(dev);
2303 if (result)
2304 return result;
2305 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
2306 pci_free_irq(pdev, 0, adminq);
2307
2308 if (dev->cmb_use_sqes) {
2309 result = nvme_cmb_qdepth(dev, nr_io_queues,
2310 sizeof(struct nvme_command));
2311 if (result > 0)
2312 dev->q_depth = result;
2313 else
2314 dev->cmb_use_sqes = false;
2315 }
2316
2317 do {
2318 size = db_bar_size(dev, nr_io_queues);
2319 result = nvme_remap_bar(dev, size);
2320 if (!result)
2321 break;
2322 if (!--nr_io_queues) {
2323 result = -ENOMEM;
2324 goto out_unlock;
2325 }
2326 } while (1);
2327 adminq->q_db = dev->dbs;
2328
2329 retry:
2330 /* Deregister the admin queue's interrupt */
2331 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
2332 pci_free_irq(pdev, 0, adminq);
2333
2334 /*
2335 * If we enable msix early due to not intx, disable it again before
2336 * setting up the full range we need.
2337 */
2338 pci_free_irq_vectors(pdev);
2339
2340 result = nvme_setup_irqs(dev, nr_io_queues);
2341 if (result <= 0) {
2342 result = -EIO;
2343 goto out_unlock;
2344 }
2345
2346 dev->num_vecs = result;
2347 result = max(result - 1, 1);
2348 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
2349
2350 /*
2351 * Should investigate if there's a performance win from allocating
2352 * more queues than interrupt vectors; it might allow the submission
2353 * path to scale better, even if the receive path is limited by the
2354 * number of interrupts.
2355 */
2356 result = queue_request_irq(adminq);
2357 if (result)
2358 goto out_unlock;
2359 set_bit(NVMEQ_ENABLED, &adminq->flags);
2360 mutex_unlock(&dev->shutdown_lock);
2361
2362 result = nvme_create_io_queues(dev);
2363 if (result || dev->online_queues < 2)
2364 return result;
2365
2366 if (dev->online_queues - 1 < dev->max_qid) {
2367 nr_io_queues = dev->online_queues - 1;
2368 nvme_disable_io_queues(dev);
2369 result = nvme_setup_io_queues_trylock(dev);
2370 if (result)
2371 return result;
2372 nvme_suspend_io_queues(dev);
2373 goto retry;
2374 }
2375 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2376 dev->io_queues[HCTX_TYPE_DEFAULT],
2377 dev->io_queues[HCTX_TYPE_READ],
2378 dev->io_queues[HCTX_TYPE_POLL]);
2379 return 0;
2380out_unlock:
2381 mutex_unlock(&dev->shutdown_lock);
2382 return result;
2383}
2384
2385static void nvme_del_queue_end(struct request *req, blk_status_t error)
2386{
2387 struct nvme_queue *nvmeq = req->end_io_data;
2388
2389 blk_mq_free_request(req);
2390 complete(&nvmeq->delete_done);
2391}
2392
2393static void nvme_del_cq_end(struct request *req, blk_status_t error)
2394{
2395 struct nvme_queue *nvmeq = req->end_io_data;
2396
2397 if (error)
2398 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
2399
2400 nvme_del_queue_end(req, error);
2401}
2402
2403static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2404{
2405 struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2406 struct request *req;
2407 struct nvme_command cmd = { };
2408
2409 cmd.delete_queue.opcode = opcode;
2410 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2411
2412 req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
2413 if (IS_ERR(req))
2414 return PTR_ERR(req);
2415
2416 req->end_io_data = nvmeq;
2417
2418 init_completion(&nvmeq->delete_done);
2419 blk_execute_rq_nowait(NULL, req, false,
2420 opcode == nvme_admin_delete_cq ?
2421 nvme_del_cq_end : nvme_del_queue_end);
2422 return 0;
2423}
2424
2425static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
2426{
2427 int nr_queues = dev->online_queues - 1, sent = 0;
2428 unsigned long timeout;
2429
2430 retry:
2431 timeout = NVME_ADMIN_TIMEOUT;
2432 while (nr_queues > 0) {
2433 if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
2434 break;
2435 nr_queues--;
2436 sent++;
2437 }
2438 while (sent) {
2439 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
2440
2441 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
2442 timeout);
2443 if (timeout == 0)
2444 return false;
2445
2446 sent--;
2447 if (nr_queues)
2448 goto retry;
2449 }
2450 return true;
2451}
2452
2453static void nvme_dev_add(struct nvme_dev *dev)
2454{
2455 int ret;
2456
2457 if (!dev->ctrl.tagset) {
2458 dev->tagset.ops = &nvme_mq_ops;
2459 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2460 dev->tagset.nr_maps = 2; /* default + read */
2461 if (dev->io_queues[HCTX_TYPE_POLL])
2462 dev->tagset.nr_maps++;
2463 dev->tagset.timeout = NVME_IO_TIMEOUT;
2464 dev->tagset.numa_node = dev->ctrl.numa_node;
2465 dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
2466 BLK_MQ_MAX_DEPTH) - 1;
2467 dev->tagset.cmd_size = sizeof(struct nvme_iod);
2468 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
2469 dev->tagset.driver_data = dev;
2470
2471 /*
2472 * Some Apple controllers requires tags to be unique
2473 * across admin and IO queue, so reserve the first 32
2474 * tags of the IO queue.
2475 */
2476 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2477 dev->tagset.reserved_tags = NVME_AQ_DEPTH;
2478
2479 ret = blk_mq_alloc_tag_set(&dev->tagset);
2480 if (ret) {
2481 dev_warn(dev->ctrl.device,
2482 "IO queues tagset allocation failed %d\n", ret);
2483 return;
2484 }
2485 dev->ctrl.tagset = &dev->tagset;
2486 } else {
2487 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
2488
2489 /* Free previously allocated queues that are no longer usable */
2490 nvme_free_queues(dev, dev->online_queues);
2491 }
2492
2493 nvme_dbbuf_set(dev);
2494}
2495
2496static int nvme_pci_enable(struct nvme_dev *dev)
2497{
2498 int result = -ENOMEM;
2499 struct pci_dev *pdev = to_pci_dev(dev->dev);
2500 int dma_address_bits = 64;
2501
2502 if (pci_enable_device_mem(pdev))
2503 return result;
2504
2505 pci_set_master(pdev);
2506
2507 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
2508 dma_address_bits = 48;
2509 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
2510 goto disable;
2511
2512 if (readl(dev->bar + NVME_REG_CSTS) == -1) {
2513 result = -ENODEV;
2514 goto disable;
2515 }
2516
2517 /*
2518 * Some devices and/or platforms don't advertise or work with INTx
2519 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
2520 * adjust this later.
2521 */
2522 result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
2523 if (result < 0)
2524 return result;
2525
2526 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2527
2528 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2529 io_queue_depth);
2530 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
2531 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2532 dev->dbs = dev->bar + 4096;
2533
2534 /*
2535 * Some Apple controllers require a non-standard SQE size.
2536 * Interestingly they also seem to ignore the CC:IOSQES register
2537 * so we don't bother updating it here.
2538 */
2539 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
2540 dev->io_sqes = 7;
2541 else
2542 dev->io_sqes = NVME_NVM_IOSQES;
2543
2544 /*
2545 * Temporary fix for the Apple controller found in the MacBook8,1 and
2546 * some MacBook7,1 to avoid controller resets and data loss.
2547 */
2548 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2549 dev->q_depth = 2;
2550 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
2551 "set queue depth=%u to work around controller resets\n",
2552 dev->q_depth);
2553 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
2554 (pdev->device == 0xa821 || pdev->device == 0xa822) &&
2555 NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2556 dev->q_depth = 64;
2557 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
2558 "set queue depth=%u\n", dev->q_depth);
2559 }
2560
2561 /*
2562 * Controllers with the shared tags quirk need the IO queue to be
2563 * big enough so that we get 32 tags for the admin queue
2564 */
2565 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
2566 (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
2567 dev->q_depth = NVME_AQ_DEPTH + 2;
2568 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
2569 dev->q_depth);
2570 }
2571
2572
2573 nvme_map_cmb(dev);
2574
2575 pci_enable_pcie_error_reporting(pdev);
2576 pci_save_state(pdev);
2577 return 0;
2578
2579 disable:
2580 pci_disable_device(pdev);
2581 return result;
2582}
2583
2584static void nvme_dev_unmap(struct nvme_dev *dev)
2585{
2586 if (dev->bar)
2587 iounmap(dev->bar);
2588 pci_release_mem_regions(to_pci_dev(dev->dev));
2589}
2590
2591static void nvme_pci_disable(struct nvme_dev *dev)
2592{
2593 struct pci_dev *pdev = to_pci_dev(dev->dev);
2594
2595 pci_free_irq_vectors(pdev);
2596
2597 if (pci_is_enabled(pdev)) {
2598 pci_disable_pcie_error_reporting(pdev);
2599 pci_disable_device(pdev);
2600 }
2601}
2602
2603static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2604{
2605 bool dead = true, freeze = false;
2606 struct pci_dev *pdev = to_pci_dev(dev->dev);
2607
2608 mutex_lock(&dev->shutdown_lock);
2609 if (pci_is_enabled(pdev)) {
2610 u32 csts = readl(dev->bar + NVME_REG_CSTS);
2611
2612 if (dev->ctrl.state == NVME_CTRL_LIVE ||
2613 dev->ctrl.state == NVME_CTRL_RESETTING) {
2614 freeze = true;
2615 nvme_start_freeze(&dev->ctrl);
2616 }
2617 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
2618 pdev->error_state != pci_channel_io_normal);
2619 }
2620
2621 /*
2622 * Give the controller a chance to complete all entered requests if
2623 * doing a safe shutdown.
2624 */
2625 if (!dead && shutdown && freeze)
2626 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
2627
2628 nvme_stop_queues(&dev->ctrl);
2629
2630 if (!dead && dev->ctrl.queue_count > 0) {
2631 nvme_disable_io_queues(dev);
2632 nvme_disable_admin_queue(dev, shutdown);
2633 }
2634 nvme_suspend_io_queues(dev);
2635 nvme_suspend_queue(&dev->queues[0]);
2636 nvme_pci_disable(dev);
2637 nvme_reap_pending_cqes(dev);
2638
2639 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
2640 blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
2641 blk_mq_tagset_wait_completed_request(&dev->tagset);
2642 blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
2643
2644 /*
2645 * The driver will not be starting up queues again if shutting down so
2646 * must flush all entered requests to their failed completion to avoid
2647 * deadlocking blk-mq hot-cpu notifier.
2648 */
2649 if (shutdown) {
2650 nvme_start_queues(&dev->ctrl);
2651 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
2652 nvme_start_admin_queue(&dev->ctrl);
2653 }
2654 mutex_unlock(&dev->shutdown_lock);
2655}
2656
2657static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
2658{
2659 if (!nvme_wait_reset(&dev->ctrl))
2660 return -EBUSY;
2661 nvme_dev_disable(dev, shutdown);
2662 return 0;
2663}
2664
2665static int nvme_setup_prp_pools(struct nvme_dev *dev)
2666{
2667 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
2668 NVME_CTRL_PAGE_SIZE,
2669 NVME_CTRL_PAGE_SIZE, 0);
2670 if (!dev->prp_page_pool)
2671 return -ENOMEM;
2672
2673 /* Optimisation for I/Os between 4k and 128k */
2674 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2675 256, 256, 0);
2676 if (!dev->prp_small_pool) {
2677 dma_pool_destroy(dev->prp_page_pool);
2678 return -ENOMEM;
2679 }
2680 return 0;
2681}
2682
2683static void nvme_release_prp_pools(struct nvme_dev *dev)
2684{
2685 dma_pool_destroy(dev->prp_page_pool);
2686 dma_pool_destroy(dev->prp_small_pool);
2687}
2688
2689static void nvme_free_tagset(struct nvme_dev *dev)
2690{
2691 if (dev->tagset.tags)
2692 blk_mq_free_tag_set(&dev->tagset);
2693 dev->ctrl.tagset = NULL;
2694}
2695
2696static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2697{
2698 struct nvme_dev *dev = to_nvme_dev(ctrl);
2699
2700 nvme_dbbuf_dma_free(dev);
2701 nvme_free_tagset(dev);
2702 if (dev->ctrl.admin_q)
2703 blk_put_queue(dev->ctrl.admin_q);
2704 free_opal_dev(dev->ctrl.opal_dev);
2705 mempool_destroy(dev->iod_mempool);
2706 put_device(dev->dev);
2707 kfree(dev->queues);
2708 kfree(dev);
2709}
2710
2711static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
2712{
2713 /*
2714 * Set state to deleting now to avoid blocking nvme_wait_reset(), which
2715 * may be holding this pci_dev's device lock.
2716 */
2717 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2718 nvme_get_ctrl(&dev->ctrl);
2719 nvme_dev_disable(dev, false);
2720 nvme_kill_queues(&dev->ctrl);
2721 if (!queue_work(nvme_wq, &dev->remove_work))
2722 nvme_put_ctrl(&dev->ctrl);
2723}
2724
2725static void nvme_reset_work(struct work_struct *work)
2726{
2727 struct nvme_dev *dev =
2728 container_of(work, struct nvme_dev, ctrl.reset_work);
2729 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2730 int result;
2731
2732 if (dev->ctrl.state != NVME_CTRL_RESETTING) {
2733 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
2734 dev->ctrl.state);
2735 result = -ENODEV;
2736 goto out;
2737 }
2738
2739 /*
2740 * If we're called to reset a live controller first shut it down before
2741 * moving on.
2742 */
2743 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2744 nvme_dev_disable(dev, false);
2745 nvme_sync_queues(&dev->ctrl);
2746
2747 mutex_lock(&dev->shutdown_lock);
2748 result = nvme_pci_enable(dev);
2749 if (result)
2750 goto out_unlock;
2751
2752 result = nvme_pci_configure_admin_queue(dev);
2753 if (result)
2754 goto out_unlock;
2755
2756 result = nvme_alloc_admin_tags(dev);
2757 if (result)
2758 goto out_unlock;
2759
2760 /*
2761 * Limit the max command size to prevent iod->sg allocations going
2762 * over a single page.
2763 */
2764 dev->ctrl.max_hw_sectors = min_t(u32,
2765 NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9);
2766 dev->ctrl.max_segments = NVME_MAX_SEGS;
2767
2768 /*
2769 * Don't limit the IOMMU merged segment size.
2770 */
2771 dma_set_max_seg_size(dev->dev, 0xffffffff);
2772 dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
2773
2774 mutex_unlock(&dev->shutdown_lock);
2775
2776 /*
2777 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2778 * initializing procedure here.
2779 */
2780 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2781 dev_warn(dev->ctrl.device,
2782 "failed to mark controller CONNECTING\n");
2783 result = -EBUSY;
2784 goto out;
2785 }
2786
2787 /*
2788 * We do not support an SGL for metadata (yet), so we are limited to a
2789 * single integrity segment for the separate metadata pointer.
2790 */
2791 dev->ctrl.max_integrity_segments = 1;
2792
2793 result = nvme_init_ctrl_finish(&dev->ctrl);
2794 if (result)
2795 goto out;
2796
2797 if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
2798 if (!dev->ctrl.opal_dev)
2799 dev->ctrl.opal_dev =
2800 init_opal_dev(&dev->ctrl, &nvme_sec_submit);
2801 else if (was_suspend)
2802 opal_unlock_from_suspend(dev->ctrl.opal_dev);
2803 } else {
2804 free_opal_dev(dev->ctrl.opal_dev);
2805 dev->ctrl.opal_dev = NULL;
2806 }
2807
2808 if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
2809 result = nvme_dbbuf_dma_alloc(dev);
2810 if (result)
2811 dev_warn(dev->dev,
2812 "unable to allocate dma for dbbuf\n");
2813 }
2814
2815 if (dev->ctrl.hmpre) {
2816 result = nvme_setup_host_mem(dev);
2817 if (result < 0)
2818 goto out;
2819 }
2820
2821 result = nvme_setup_io_queues(dev);
2822 if (result)
2823 goto out;
2824
2825 /*
2826 * Keep the controller around but remove all namespaces if we don't have
2827 * any working I/O queue.
2828 */
2829 if (dev->online_queues < 2) {
2830 dev_warn(dev->ctrl.device, "IO queues not created\n");
2831 nvme_kill_queues(&dev->ctrl);
2832 nvme_remove_namespaces(&dev->ctrl);
2833 nvme_free_tagset(dev);
2834 } else {
2835 nvme_start_queues(&dev->ctrl);
2836 nvme_wait_freeze(&dev->ctrl);
2837 nvme_dev_add(dev);
2838 nvme_unfreeze(&dev->ctrl);
2839 }
2840
2841 /*
2842 * If only admin queue live, keep it to do further investigation or
2843 * recovery.
2844 */
2845 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
2846 dev_warn(dev->ctrl.device,
2847 "failed to mark controller live state\n");
2848 result = -ENODEV;
2849 goto out;
2850 }
2851
2852 if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj,
2853 &nvme_pci_attr_group))
2854 dev->attrs_added = true;
2855
2856 nvme_start_ctrl(&dev->ctrl);
2857 return;
2858
2859 out_unlock:
2860 mutex_unlock(&dev->shutdown_lock);
2861 out:
2862 if (result)
2863 dev_warn(dev->ctrl.device,
2864 "Removing after probe failure status: %d\n", result);
2865 nvme_remove_dead_ctrl(dev);
2866}
2867
2868static void nvme_remove_dead_ctrl_work(struct work_struct *work)
2869{
2870 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2871 struct pci_dev *pdev = to_pci_dev(dev->dev);
2872
2873 if (pci_get_drvdata(pdev))
2874 device_release_driver(&pdev->dev);
2875 nvme_put_ctrl(&dev->ctrl);
2876}
2877
2878static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
2879{
2880 *val = readl(to_nvme_dev(ctrl)->bar + off);
2881 return 0;
2882}
2883
2884static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2885{
2886 writel(val, to_nvme_dev(ctrl)->bar + off);
2887 return 0;
2888}
2889
2890static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
2891{
2892 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
2893 return 0;
2894}
2895
2896static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2897{
2898 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
2899
2900 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
2901}
2902
2903static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2904 .name = "pcie",
2905 .module = THIS_MODULE,
2906 .flags = NVME_F_METADATA_SUPPORTED |
2907 NVME_F_PCI_P2PDMA,
2908 .reg_read32 = nvme_pci_reg_read32,
2909 .reg_write32 = nvme_pci_reg_write32,
2910 .reg_read64 = nvme_pci_reg_read64,
2911 .free_ctrl = nvme_pci_free_ctrl,
2912 .submit_async_event = nvme_pci_submit_async_event,
2913 .get_address = nvme_pci_get_address,
2914};
2915
2916static int nvme_dev_map(struct nvme_dev *dev)
2917{
2918 struct pci_dev *pdev = to_pci_dev(dev->dev);
2919
2920 if (pci_request_mem_regions(pdev, "nvme"))
2921 return -ENODEV;
2922
2923 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
2924 goto release;
2925
2926 return 0;
2927 release:
2928 pci_release_mem_regions(pdev);
2929 return -ENODEV;
2930}
2931
2932static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
2933{
2934 if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
2935 /*
2936 * Several Samsung devices seem to drop off the PCIe bus
2937 * randomly when APST is on and uses the deepest sleep state.
2938 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
2939 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
2940 * 950 PRO 256GB", but it seems to be restricted to two Dell
2941 * laptops.
2942 */
2943 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
2944 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
2945 dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
2946 return NVME_QUIRK_NO_DEEPEST_PS;
2947 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
2948 /*
2949 * Samsung SSD 960 EVO drops off the PCIe bus after system
2950 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
2951 * within few minutes after bootup on a Coffee Lake board -
2952 * ASUS PRIME Z370-A
2953 */
2954 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
2955 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
2956 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
2957 return NVME_QUIRK_NO_APST;
2958 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
2959 pdev->device == 0xa808 || pdev->device == 0xa809)) ||
2960 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
2961 /*
2962 * Forcing to use host managed nvme power settings for
2963 * lowest idle power with quick resume latency on
2964 * Samsung and Toshiba SSDs based on suspend behavior
2965 * on Coffee Lake board for LENOVO C640
2966 */
2967 if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
2968 dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
2969 return NVME_QUIRK_SIMPLE_SUSPEND;
2970 }
2971
2972 return 0;
2973}
2974
2975static void nvme_async_probe(void *data, async_cookie_t cookie)
2976{
2977 struct nvme_dev *dev = data;
2978
2979 flush_work(&dev->ctrl.reset_work);
2980 flush_work(&dev->ctrl.scan_work);
2981 nvme_put_ctrl(&dev->ctrl);
2982}
2983
2984static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2985{
2986 int node, result = -ENOMEM;
2987 struct nvme_dev *dev;
2988 unsigned long quirks = id->driver_data;
2989 size_t alloc_size;
2990
2991 node = dev_to_node(&pdev->dev);
2992 if (node == NUMA_NO_NODE)
2993 set_dev_node(&pdev->dev, first_memory_node);
2994
2995 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
2996 if (!dev)
2997 return -ENOMEM;
2998
2999 dev->nr_write_queues = write_queues;
3000 dev->nr_poll_queues = poll_queues;
3001 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
3002 dev->queues = kcalloc_node(dev->nr_allocated_queues,
3003 sizeof(struct nvme_queue), GFP_KERNEL, node);
3004 if (!dev->queues)
3005 goto free;
3006
3007 dev->dev = get_device(&pdev->dev);
3008 pci_set_drvdata(pdev, dev);
3009
3010 result = nvme_dev_map(dev);
3011 if (result)
3012 goto put_pci;
3013
3014 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
3015 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
3016 mutex_init(&dev->shutdown_lock);
3017
3018 result = nvme_setup_prp_pools(dev);
3019 if (result)
3020 goto unmap;
3021
3022 quirks |= check_vendor_combination_bug(pdev);
3023
3024 if (!noacpi && acpi_storage_d3(&pdev->dev)) {
3025 /*
3026 * Some systems use a bios work around to ask for D3 on
3027 * platforms that support kernel managed suspend.
3028 */
3029 dev_info(&pdev->dev,
3030 "platform quirk: setting simple suspend\n");
3031 quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
3032 }
3033
3034 /*
3035 * Double check that our mempool alloc size will cover the biggest
3036 * command we support.
3037 */
3038 alloc_size = nvme_pci_iod_alloc_size();
3039 WARN_ON_ONCE(alloc_size > PAGE_SIZE);
3040
3041 dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
3042 mempool_kfree,
3043 (void *) alloc_size,
3044 GFP_KERNEL, node);
3045 if (!dev->iod_mempool) {
3046 result = -ENOMEM;
3047 goto release_pools;
3048 }
3049
3050 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
3051 quirks);
3052 if (result)
3053 goto release_mempool;
3054
3055 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
3056
3057 nvme_reset_ctrl(&dev->ctrl);
3058 async_schedule(nvme_async_probe, dev);
3059
3060 return 0;
3061
3062 release_mempool:
3063 mempool_destroy(dev->iod_mempool);
3064 release_pools:
3065 nvme_release_prp_pools(dev);
3066 unmap:
3067 nvme_dev_unmap(dev);
3068 put_pci:
3069 put_device(dev->dev);
3070 free:
3071 kfree(dev->queues);
3072 kfree(dev);
3073 return result;
3074}
3075
3076static void nvme_reset_prepare(struct pci_dev *pdev)
3077{
3078 struct nvme_dev *dev = pci_get_drvdata(pdev);
3079
3080 /*
3081 * We don't need to check the return value from waiting for the reset
3082 * state as pci_dev device lock is held, making it impossible to race
3083 * with ->remove().
3084 */
3085 nvme_disable_prepare_reset(dev, false);
3086 nvme_sync_queues(&dev->ctrl);
3087}
3088
3089static void nvme_reset_done(struct pci_dev *pdev)
3090{
3091 struct nvme_dev *dev = pci_get_drvdata(pdev);
3092
3093 if (!nvme_try_sched_reset(&dev->ctrl))
3094 flush_work(&dev->ctrl.reset_work);
3095}
3096
3097static void nvme_shutdown(struct pci_dev *pdev)
3098{
3099 struct nvme_dev *dev = pci_get_drvdata(pdev);
3100
3101 nvme_disable_prepare_reset(dev, true);
3102}
3103
3104static void nvme_remove_attrs(struct nvme_dev *dev)
3105{
3106 if (dev->attrs_added)
3107 sysfs_remove_group(&dev->ctrl.device->kobj,
3108 &nvme_pci_attr_group);
3109}
3110
3111/*
3112 * The driver's remove may be called on a device in a partially initialized
3113 * state. This function must not have any dependencies on the device state in
3114 * order to proceed.
3115 */
3116static void nvme_remove(struct pci_dev *pdev)
3117{
3118 struct nvme_dev *dev = pci_get_drvdata(pdev);
3119
3120 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3121 pci_set_drvdata(pdev, NULL);
3122
3123 if (!pci_device_is_present(pdev)) {
3124 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
3125 nvme_dev_disable(dev, true);
3126 }
3127
3128 flush_work(&dev->ctrl.reset_work);
3129 nvme_stop_ctrl(&dev->ctrl);
3130 nvme_remove_namespaces(&dev->ctrl);
3131 nvme_dev_disable(dev, true);
3132 nvme_remove_attrs(dev);
3133 nvme_free_host_mem(dev);
3134 nvme_dev_remove_admin(dev);
3135 nvme_free_queues(dev, 0);
3136 nvme_release_prp_pools(dev);
3137 nvme_dev_unmap(dev);
3138 nvme_uninit_ctrl(&dev->ctrl);
3139}
3140
3141#ifdef CONFIG_PM_SLEEP
3142static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
3143{
3144 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
3145}
3146
3147static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
3148{
3149 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
3150}
3151
3152static int nvme_resume(struct device *dev)
3153{
3154 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
3155 struct nvme_ctrl *ctrl = &ndev->ctrl;
3156
3157 if (ndev->last_ps == U32_MAX ||
3158 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
3159 goto reset;
3160 if (ctrl->hmpre && nvme_setup_host_mem(ndev))
3161 goto reset;
3162
3163 return 0;
3164reset:
3165 return nvme_try_sched_reset(ctrl);
3166}
3167
3168static int nvme_suspend(struct device *dev)
3169{
3170 struct pci_dev *pdev = to_pci_dev(dev);
3171 struct nvme_dev *ndev = pci_get_drvdata(pdev);
3172 struct nvme_ctrl *ctrl = &ndev->ctrl;
3173 int ret = -EBUSY;
3174
3175 ndev->last_ps = U32_MAX;
3176
3177 /*
3178 * The platform does not remove power for a kernel managed suspend so
3179 * use host managed nvme power settings for lowest idle power if
3180 * possible. This should have quicker resume latency than a full device
3181 * shutdown. But if the firmware is involved after the suspend or the
3182 * device does not support any non-default power states, shut down the
3183 * device fully.
3184 *
3185 * If ASPM is not enabled for the device, shut down the device and allow
3186 * the PCI bus layer to put it into D3 in order to take the PCIe link
3187 * down, so as to allow the platform to achieve its minimum low-power
3188 * state (which may not be possible if the link is up).
3189 */
3190 if (pm_suspend_via_firmware() || !ctrl->npss ||
3191 !pcie_aspm_enabled(pdev) ||
3192 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
3193 return nvme_disable_prepare_reset(ndev, true);
3194
3195 nvme_start_freeze(ctrl);
3196 nvme_wait_freeze(ctrl);
3197 nvme_sync_queues(ctrl);
3198
3199 if (ctrl->state != NVME_CTRL_LIVE)
3200 goto unfreeze;
3201
3202 /*
3203 * Host memory access may not be successful in a system suspend state,
3204 * but the specification allows the controller to access memory in a
3205 * non-operational power state.
3206 */
3207 if (ndev->hmb) {
3208 ret = nvme_set_host_mem(ndev, 0);
3209 if (ret < 0)
3210 goto unfreeze;
3211 }
3212
3213 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
3214 if (ret < 0)
3215 goto unfreeze;
3216
3217 /*
3218 * A saved state prevents pci pm from generically controlling the
3219 * device's power. If we're using protocol specific settings, we don't
3220 * want pci interfering.
3221 */
3222 pci_save_state(pdev);
3223
3224 ret = nvme_set_power_state(ctrl, ctrl->npss);
3225 if (ret < 0)
3226 goto unfreeze;
3227
3228 if (ret) {
3229 /* discard the saved state */
3230 pci_load_saved_state(pdev, NULL);
3231
3232 /*
3233 * Clearing npss forces a controller reset on resume. The
3234 * correct value will be rediscovered then.
3235 */
3236 ret = nvme_disable_prepare_reset(ndev, true);
3237 ctrl->npss = 0;
3238 }
3239unfreeze:
3240 nvme_unfreeze(ctrl);
3241 return ret;
3242}
3243
3244static int nvme_simple_suspend(struct device *dev)
3245{
3246 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
3247
3248 return nvme_disable_prepare_reset(ndev, true);
3249}
3250
3251static int nvme_simple_resume(struct device *dev)
3252{
3253 struct pci_dev *pdev = to_pci_dev(dev);
3254 struct nvme_dev *ndev = pci_get_drvdata(pdev);
3255
3256 return nvme_try_sched_reset(&ndev->ctrl);
3257}
3258
3259static const struct dev_pm_ops nvme_dev_pm_ops = {
3260 .suspend = nvme_suspend,
3261 .resume = nvme_resume,
3262 .freeze = nvme_simple_suspend,
3263 .thaw = nvme_simple_resume,
3264 .poweroff = nvme_simple_suspend,
3265 .restore = nvme_simple_resume,
3266};
3267#endif /* CONFIG_PM_SLEEP */
3268
3269static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
3270 pci_channel_state_t state)
3271{
3272 struct nvme_dev *dev = pci_get_drvdata(pdev);
3273
3274 /*
3275 * A frozen channel requires a reset. When detected, this method will
3276 * shutdown the controller to quiesce. The controller will be restarted
3277 * after the slot reset through driver's slot_reset callback.
3278 */
3279 switch (state) {
3280 case pci_channel_io_normal:
3281 return PCI_ERS_RESULT_CAN_RECOVER;
3282 case pci_channel_io_frozen:
3283 dev_warn(dev->ctrl.device,
3284 "frozen state error detected, reset controller\n");
3285 nvme_dev_disable(dev, false);
3286 return PCI_ERS_RESULT_NEED_RESET;
3287 case pci_channel_io_perm_failure:
3288 dev_warn(dev->ctrl.device,
3289 "failure state error detected, request disconnect\n");
3290 return PCI_ERS_RESULT_DISCONNECT;
3291 }
3292 return PCI_ERS_RESULT_NEED_RESET;
3293}
3294
3295static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
3296{
3297 struct nvme_dev *dev = pci_get_drvdata(pdev);
3298
3299 dev_info(dev->ctrl.device, "restart after slot reset\n");
3300 pci_restore_state(pdev);
3301 nvme_reset_ctrl(&dev->ctrl);
3302 return PCI_ERS_RESULT_RECOVERED;
3303}
3304
3305static void nvme_error_resume(struct pci_dev *pdev)
3306{
3307 struct nvme_dev *dev = pci_get_drvdata(pdev);
3308
3309 flush_work(&dev->ctrl.reset_work);
3310}
3311
3312static const struct pci_error_handlers nvme_err_handler = {
3313 .error_detected = nvme_error_detected,
3314 .slot_reset = nvme_slot_reset,
3315 .resume = nvme_error_resume,
3316 .reset_prepare = nvme_reset_prepare,
3317 .reset_done = nvme_reset_done,
3318};
3319
3320static const struct pci_device_id nvme_id_table[] = {
3321 { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
3322 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3323 NVME_QUIRK_DEALLOCATE_ZEROES, },
3324 { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
3325 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3326 NVME_QUIRK_DEALLOCATE_ZEROES, },
3327 { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
3328 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3329 NVME_QUIRK_DEALLOCATE_ZEROES, },
3330 { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
3331 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3332 NVME_QUIRK_DEALLOCATE_ZEROES, },
3333 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
3334 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
3335 NVME_QUIRK_MEDIUM_PRIO_SQ |
3336 NVME_QUIRK_NO_TEMP_THRESH_CHANGE |
3337 NVME_QUIRK_DISABLE_WRITE_ZEROES, },
3338 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
3339 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3340 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
3341 .driver_data = NVME_QUIRK_IDENTIFY_CNS |
3342 NVME_QUIRK_DISABLE_WRITE_ZEROES, },
3343 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
3344 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
3345 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
3346 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
3347 NVME_QUIRK_NO_NS_DESC_LIST, },
3348 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
3349 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3350 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
3351 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3352 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
3353 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3354 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
3355 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3356 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
3357 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
3358 NVME_QUIRK_DISABLE_WRITE_ZEROES|
3359 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3360 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
3361 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3362 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
3363 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
3364 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3365 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3366 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3367 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
3368 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
3369 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3370 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
3371 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
3372 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
3373 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
3374 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
3375 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
3376 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
3377 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
3378 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
3379 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
3380 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
3381 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
3382 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
3383 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
3384 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
3385 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
3386 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
3387 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
3388 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
3389 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
3390 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
3391 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
3392 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
3393 .driver_data = NVME_QUIRK_SINGLE_VECTOR },
3394 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
3395 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
3396 .driver_data = NVME_QUIRK_SINGLE_VECTOR |
3397 NVME_QUIRK_128_BYTES_SQES |
3398 NVME_QUIRK_SHARED_TAGS |
3399 NVME_QUIRK_SKIP_CID_GEN },
3400
3401 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
3402 { 0, }
3403};
3404MODULE_DEVICE_TABLE(pci, nvme_id_table);
3405
3406static struct pci_driver nvme_driver = {
3407 .name = "nvme",
3408 .id_table = nvme_id_table,
3409 .probe = nvme_probe,
3410 .remove = nvme_remove,
3411 .shutdown = nvme_shutdown,
3412#ifdef CONFIG_PM_SLEEP
3413 .driver = {
3414 .pm = &nvme_dev_pm_ops,
3415 },
3416#endif
3417 .sriov_configure = pci_sriov_configure_simple,
3418 .err_handler = &nvme_err_handler,
3419};
3420
3421static int __init nvme_init(void)
3422{
3423 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
3424 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
3425 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
3426 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
3427
3428 return pci_register_driver(&nvme_driver);
3429}
3430
3431static void __exit nvme_exit(void)
3432{
3433 pci_unregister_driver(&nvme_driver);
3434 flush_workqueue(nvme_wq);
3435}
3436
3437MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
3438MODULE_LICENSE("GPL");
3439MODULE_VERSION("1.0");
3440module_init(nvme_init);
3441module_exit(nvme_exit);