1diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2index dd10cf78f2d3..8f006638452b 100644
3--- a/drivers/nvme/host/pci.c
4+++ b/drivers/nvme/host/pci.c
5@@ -28,8 +28,8 @@
6 #include "trace.h"
7 #include "nvme.h"
8
9-#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
10-#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
11+#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command))
12+#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
13
14 #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
15
16@@ -1344,16 +1344,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
17
18 static void nvme_free_queue(struct nvme_queue *nvmeq)
19 {
20- dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
21+ dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
22 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
23 if (!nvmeq->sq_cmds)
24 return;
25
26 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
27 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
28- nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
29+ nvmeq->sq_cmds, SQ_SIZE(nvmeq));
30 } else {
31- dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
32+ dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
33 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
34 }
35 }
36@@ -1433,12 +1433,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
37 }
38
39 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
40- int qid, int depth)
41+ int qid)
42 {
43 struct pci_dev *pdev = to_pci_dev(dev->dev);
44
45 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
46- nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
47+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
48 if (nvmeq->sq_cmds) {
49 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
50 nvmeq->sq_cmds);
51@@ -1447,11 +1447,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
52 return 0;
53 }
54
55- pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
56+ pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
57 }
58 }
59
60- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
61+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
62 &nvmeq->sq_dma_addr, GFP_KERNEL);
63 if (!nvmeq->sq_cmds)
64 return -ENOMEM;
65@@ -1465,12 +1465,13 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
66 if (dev->ctrl.queue_count > qid)
67 return 0;
68
69- nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
70+ nvmeq->q_depth = depth;
71+ nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
72 &nvmeq->cq_dma_addr, GFP_KERNEL);
73 if (!nvmeq->cqes)
74 goto free_nvmeq;
75
76- if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
77+ if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
78 goto free_cqdma;
79
80 nvmeq->dev = dev;
81@@ -1479,15 +1480,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
82 nvmeq->cq_head = 0;
83 nvmeq->cq_phase = 1;
84 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
85- nvmeq->q_depth = depth;
86 nvmeq->qid = qid;
87 dev->ctrl.queue_count++;
88
89 return 0;
90
91 free_cqdma:
92- dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
93- nvmeq->cq_dma_addr);
94+ dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
95+ nvmeq->cq_dma_addr);
96 free_nvmeq:
97 return -ENOMEM;
98 }
99@@ -1515,7 +1515,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
100 nvmeq->cq_head = 0;
101 nvmeq->cq_phase = 1;
102 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
103- memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
104+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
105 nvme_dbbuf_init(dev, nvmeq, qid);
106 dev->online_queues++;
107 wmb(); /* ensure the first interrupt sees the initialization */
108diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
109index cc09b81fc7f4..716ebe87a2b8 100644
110--- a/drivers/nvme/host/core.c
111+++ b/drivers/nvme/host/core.c
112@@ -1986,6 +1986,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
113 ctrl->ctrl_config = NVME_CC_CSS_NVM;
114 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
115 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
116+ /* Use default IOSQES. We'll update it later if needed */
117 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
118 ctrl->ctrl_config |= NVME_CC_ENABLE;
119
120@@ -2698,6 +2699,30 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
121 ctrl->hmmin = le32_to_cpu(id->hmmin);
122 ctrl->hmminds = le32_to_cpu(id->hmminds);
123 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
124+
125+ /* Grab required IO queue size */
126+ ctrl->iosqes = id->sqes & 0xf;
127+ if (ctrl->iosqes < NVME_NVM_IOSQES) {
128+ dev_err(ctrl->device,
129+ "unsupported required IO queue size %d\n", ctrl->iosqes);
130+ ret = -EINVAL;
131+ goto out_free;
132+ }
133+ /*
134+ * If our IO queue size isn't the default, update the setting
135+ * in CC:IOSQES.
136+ */
137+ if (ctrl->iosqes != NVME_NVM_IOSQES) {
138+ ctrl->ctrl_config &= ~(0xfu << NVME_CC_IOSQES_SHIFT);
139+ ctrl->ctrl_config |= ctrl->iosqes << NVME_CC_IOSQES_SHIFT;
140+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC,
141+ ctrl->ctrl_config);
142+ if (ret) {
143+ dev_err(ctrl->device,
144+ "error updating CC register\n");
145+ goto out_free;
146+ }
147+ }
148 }
149
150 ret = nvme_mpath_init(ctrl, id);
151diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
152index 716a876119c8..34ef35fcd8a5 100644
153--- a/drivers/nvme/host/nvme.h
154+++ b/drivers/nvme/host/nvme.h
155@@ -244,6 +244,7 @@ struct nvme_ctrl {
156 u32 hmmin;
157 u32 hmminds;
158 u16 hmmaxd;
159+ u8 iosqes;
160
161 /* Fabrics only */
162 u16 sqsize;
163diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
164index 8f006638452b..54b35ea4af88 100644
165--- a/drivers/nvme/host/pci.c
166+++ b/drivers/nvme/host/pci.c
167@@ -28,7 +28,7 @@
168 #include "trace.h"
169 #include "nvme.h"
170
171-#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command))
172+#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
173 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
174
175 #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
176@@ -162,7 +162,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
177 struct nvme_queue {
178 struct nvme_dev *dev;
179 spinlock_t sq_lock;
180- struct nvme_command *sq_cmds;
181+ void *sq_cmds;
182 /* only used for poll queues: */
183 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
184 volatile struct nvme_completion *cqes;
185@@ -178,6 +178,7 @@ struct nvme_queue {
186 u16 last_cq_head;
187 u16 qid;
188 u8 cq_phase;
189+ u8 sqes;
190 unsigned long flags;
191 #define NVMEQ_ENABLED 0
192 #define NVMEQ_SQ_CMB 1
193@@ -488,7 +489,8 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
194 bool write_sq)
195 {
196 spin_lock(&nvmeq->sq_lock);
197- memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
198+ memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
199+ cmd, sizeof(*cmd));
200 if (++nvmeq->sq_tail == nvmeq->q_depth)
201 nvmeq->sq_tail = 0;
202 nvme_write_sq_db(nvmeq, write_sq);
203@@ -1465,6 +1467,7 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
204 if (dev->ctrl.queue_count > qid)
205 return 0;
206
207+ nvmeq->sqes = qid ? dev->ctrl.iosqes : NVME_NVM_ADMSQES;
208 nvmeq->q_depth = depth;
209 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
210 &nvmeq->cq_dma_addr, GFP_KERNEL);
211diff --git a/include/linux/nvme.h b/include/linux/nvme.h
212index 01aa6a6c241d..7af18965fb57 100644
213--- a/include/linux/nvme.h
214+++ b/include/linux/nvme.h
215@@ -141,6 +141,7 @@ enum {
216 * (In bytes and specified as a power of two (2^n)).
217 */
218 #define NVME_NVM_IOSQES 6
219+#define NVME_NVM_ADMSQES 6
220 #define NVME_NVM_IOCQES 4
221
222 enum {
223diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
224index 716ebe87a2b8..480ea24d8cf4 100644
225--- a/drivers/nvme/host/core.c
226+++ b/drivers/nvme/host/core.c
227@@ -2701,7 +2701,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
228 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
229
230 /* Grab required IO queue size */
231- ctrl->iosqes = id->sqes & 0xf;
232+ if (ctrl->quirks & NVME_QUIRK_128_BYTES_SQES)
233+ ctrl->iosqes = 7;
234+ else
235+ ctrl->iosqes = id->sqes & 0xf;
236 if (ctrl->iosqes < NVME_NVM_IOSQES) {
237 dev_err(ctrl->device,
238 "unsupported required IO queue size %d\n", ctrl->iosqes);
239diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
240index 34ef35fcd8a5..b2a78d08b984 100644
241--- a/drivers/nvme/host/nvme.h
242+++ b/drivers/nvme/host/nvme.h
243@@ -92,6 +92,16 @@ enum nvme_quirks {
244 * Broken Write Zeroes.
245 */
246 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
247+
248+ /*
249+ * Use only one interrupt vector for all queues
250+ */
251+ NVME_QUIRK_SINGLE_VECTOR = (1 << 10),
252+
253+ /*
254+ * Use non-standard 128 bytes SQEs.
255+ */
256+ NVME_QUIRK_128_BYTES_SQES = (1 << 11),
257 };
258
259 /*
260diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
261index 54b35ea4af88..ab2358137419 100644
262--- a/drivers/nvme/host/pci.c
263+++ b/drivers/nvme/host/pci.c
264@@ -2080,6 +2080,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
265 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
266 dev->io_queues[HCTX_TYPE_READ] = 0;
267
268+ if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)
269+ irq_queues = 1;
270+
271 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
272 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
273 }
274@@ -3037,6 +3040,9 @@ static const struct pci_device_id nvme_id_table[] = {
275 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
276 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
277 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
278+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
279+ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
280+ NVME_QUIRK_128_BYTES_SQES },
281 { 0, }
282 };
283 MODULE_DEVICE_TABLE(pci, nvme_id_table);