Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include <linux/nvme.h>
40#include <linux/nvme-fc-driver.h>
41#include <linux/nvme-fc.h>
42#include "lpfc_version.h"
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_nvme.h"
51#include "lpfc_scsi.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_crtn.h"
54#include "lpfc_vport.h"
55#include "lpfc_debugfs.h"
56
57/* NVME initiator-based functions */
58
59static struct lpfc_nvme_buf *
60lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
61
62static void
63lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
64
65
66/**
67 * lpfc_nvme_create_queue -
68 * @lpfc_pnvme: Pointer to the driver's nvme instance data
69 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
70 * @handle: An opaque driver handle used in follow-up calls.
71 *
72 * Driver registers this routine to preallocate and initialize any
73 * internal data structures to bind the @qidx to its internal IO queues.
74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
75 *
76 * Return value :
77 * 0 - Success
78 * -EINVAL - Unsupported input value.
79 * -ENOMEM - Could not alloc necessary memory
80 **/
81static int
82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
84 void **handle)
85{
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
89 char *str;
90
91 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
92 vport = lport->vport;
93 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
94 if (qhandle == NULL)
95 return -ENOMEM;
96
97 qhandle->cpu_id = smp_processor_id();
98 qhandle->qidx = qidx;
99 /*
100 * NVME qidx == 0 is the admin queue, so both admin queue
101 * and first IO queue will use MSI-X vector and associated
102 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
103 */
104 if (qidx) {
105 str = "IO "; /* IO queue */
106 qhandle->index = ((qidx - 1) %
107 vport->phba->cfg_nvme_io_channel);
108 } else {
109 str = "ADM"; /* Admin queue */
110 qhandle->index = qidx;
111 }
112
113 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
114 "6073 Binding %s HdwQueue %d (cpu %d) to "
115 "io_channel %d qhandle %p\n", str,
116 qidx, qhandle->cpu_id, qhandle->index, qhandle);
117 *handle = (void *)qhandle;
118 return 0;
119}
120
121/**
122 * lpfc_nvme_delete_queue -
123 * @lpfc_pnvme: Pointer to the driver's nvme instance data
124 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
125 * @handle: An opaque driver handle from lpfc_nvme_create_queue
126 *
127 * Driver registers this routine to free
128 * any internal data structures to bind the @qidx to its internal
129 * IO queues.
130 *
131 * Return value :
132 * 0 - Success
133 * TODO: What are the failure codes.
134 **/
135static void
136lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
137 unsigned int qidx,
138 void *handle)
139{
140 struct lpfc_nvme_lport *lport;
141 struct lpfc_vport *vport;
142
143 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
144 vport = lport->vport;
145
146 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
147 "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n",
148 lport, qidx, handle);
149 kfree(handle);
150}
151
152static void
153lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
154{
155 struct lpfc_nvme_lport *lport = localport->private;
156
157 /* release any threads waiting for the unreg to complete */
158 complete(&lport->lport_unreg_done);
159}
160
161/* lpfc_nvme_remoteport_delete
162 *
163 * @remoteport: Pointer to an nvme transport remoteport instance.
164 *
165 * This is a template downcall. NVME transport calls this function
166 * when it has completed the unregistration of a previously
167 * registered remoteport.
168 *
169 * Return value :
170 * None
171 */
172void
173lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
174{
175 struct lpfc_nvme_rport *rport = remoteport->private;
176 struct lpfc_vport *vport;
177 struct lpfc_nodelist *ndlp;
178
179 ndlp = rport->ndlp;
180 if (!ndlp)
181 goto rport_err;
182
183 vport = ndlp->vport;
184 if (!vport)
185 goto rport_err;
186
187 /* Remove this rport from the lport's list - memory is owned by the
188 * transport. Remove the ndlp reference for the NVME transport before
189 * calling state machine to remove the node, this is devloss = 0
190 * semantics.
191 */
192 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
193 "6146 remoteport delete complete %p\n",
194 remoteport);
195 list_del(&rport->list);
196 lpfc_nlp_put(ndlp);
197
198 rport_err:
199 /* This call has to execute as long as the rport is valid.
200 * Release any threads waiting for the unreg to complete.
201 */
202 complete(&rport->rport_unreg_done);
203}
204
205static void
206lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
207 struct lpfc_wcqe_complete *wcqe)
208{
209 struct lpfc_vport *vport = cmdwqe->vport;
210 uint32_t status;
211 struct nvmefc_ls_req *pnvme_lsreq;
212 struct lpfc_dmabuf *buf_ptr;
213 struct lpfc_nodelist *ndlp;
214
215 vport->phba->fc4NvmeLsCmpls++;
216
217 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
218 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
219 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
220 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
221 "6047 nvme cmpl Enter "
222 "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
223 "bmp:%p ndlp:%p\n",
224 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
225 cmdwqe->sli4_xritag, status,
226 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
227
228 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
229 cmdwqe->sli4_xritag, status, wcqe->parameter);
230
231 if (cmdwqe->context3) {
232 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
233 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
234 kfree(buf_ptr);
235 cmdwqe->context3 = NULL;
236 }
237 if (pnvme_lsreq->done)
238 pnvme_lsreq->done(pnvme_lsreq, status);
239 else
240 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
241 "6046 nvme cmpl without done call back? "
242 "Data %p DID %x Xri: %x status %x\n",
243 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
244 cmdwqe->sli4_xritag, status);
245 if (ndlp) {
246 lpfc_nlp_put(ndlp);
247 cmdwqe->context1 = NULL;
248 }
249 lpfc_sli_release_iocbq(phba, cmdwqe);
250}
251
252static int
253lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
254 struct lpfc_dmabuf *inp,
255 struct nvmefc_ls_req *pnvme_lsreq,
256 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
257 struct lpfc_wcqe_complete *),
258 struct lpfc_nodelist *ndlp, uint32_t num_entry,
259 uint32_t tmo, uint8_t retry)
260{
261 struct lpfc_hba *phba = vport->phba;
262 union lpfc_wqe *wqe;
263 struct lpfc_iocbq *genwqe;
264 struct ulp_bde64 *bpl;
265 struct ulp_bde64 bde;
266 int i, rc, xmit_len, first_len;
267
268 /* Allocate buffer for command WQE */
269 genwqe = lpfc_sli_get_iocbq(phba);
270 if (genwqe == NULL)
271 return 1;
272
273 wqe = &genwqe->wqe;
274 memset(wqe, 0, sizeof(union lpfc_wqe));
275
276 genwqe->context3 = (uint8_t *)bmp;
277 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
278
279 /* Save for completion so we can release these resources */
280 genwqe->context1 = lpfc_nlp_get(ndlp);
281 genwqe->context2 = (uint8_t *)pnvme_lsreq;
282 /* Fill in payload, bp points to frame payload */
283
284 if (!tmo)
285 /* FC spec states we need 3 * ratov for CT requests */
286 tmo = (3 * phba->fc_ratov);
287
288 /* For this command calculate the xmit length of the request bde. */
289 xmit_len = 0;
290 first_len = 0;
291 bpl = (struct ulp_bde64 *)bmp->virt;
292 for (i = 0; i < num_entry; i++) {
293 bde.tus.w = bpl[i].tus.w;
294 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
295 break;
296 xmit_len += bde.tus.f.bdeSize;
297 if (i == 0)
298 first_len = xmit_len;
299 }
300
301 genwqe->rsvd2 = num_entry;
302 genwqe->hba_wqidx = 0;
303
304 /* Words 0 - 2 */
305 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
306 wqe->generic.bde.tus.f.bdeSize = first_len;
307 wqe->generic.bde.addrLow = bpl[0].addrLow;
308 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
309
310 /* Word 3 */
311 wqe->gen_req.request_payload_len = first_len;
312
313 /* Word 4 */
314
315 /* Word 5 */
316 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
317 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
318 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
319 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
320 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
321
322 /* Word 6 */
323 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
324 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
325 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
326
327 /* Word 7 */
328 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
329 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
330 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
331 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
332
333 /* Word 8 */
334 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
335
336 /* Word 9 */
337 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
338
339 /* Word 10 */
340 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
341 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
342 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
343 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
344 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
345
346 /* Word 11 */
347 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
348 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
349
350
351 /* Issue GEN REQ WQE for NPORT <did> */
352 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
353 "6050 Issue GEN REQ WQE to NPORT x%x "
354 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
355 ndlp->nlp_DID, genwqe->iotag,
356 vport->port_state,
357 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
358 genwqe->wqe_cmpl = cmpl;
359 genwqe->iocb_cmpl = NULL;
360 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
361 genwqe->vport = vport;
362 genwqe->retry = retry;
363
364 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
365 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
366
367 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
368 if (rc == WQE_ERROR) {
369 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
370 "6045 Issue GEN REQ WQE to NPORT x%x "
371 "Data: x%x x%x\n",
372 ndlp->nlp_DID, genwqe->iotag,
373 vport->port_state);
374 lpfc_sli_release_iocbq(phba, genwqe);
375 return 1;
376 }
377 return 0;
378}
379
380/**
381 * lpfc_nvme_ls_req - Issue an Link Service request
382 * @lpfc_pnvme: Pointer to the driver's nvme instance data
383 * @lpfc_nvme_lport: Pointer to the driver's local port data
384 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
385 *
386 * Driver registers this routine to handle any link service request
387 * from the nvme_fc transport to a remote nvme-aware port.
388 *
389 * Return value :
390 * 0 - Success
391 * TODO: What are the failure codes.
392 **/
393static int
394lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
395 struct nvme_fc_remote_port *pnvme_rport,
396 struct nvmefc_ls_req *pnvme_lsreq)
397{
398 int ret = 0;
399 struct lpfc_nvme_lport *lport;
400 struct lpfc_vport *vport;
401 struct lpfc_nodelist *ndlp;
402 struct ulp_bde64 *bpl;
403 struct lpfc_dmabuf *bmp;
404 uint16_t ntype, nstate;
405
406 /* there are two dma buf in the request, actually there is one and
407 * the second one is just the start address + cmd size.
408 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
409 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
410 * because the nvem layer owns the data bufs.
411 * We do not have to break these packets open, we don't care what is in
412 * them. And we do not have to look at the resonse data, we only care
413 * that we got a response. All of the caring is going to happen in the
414 * nvme-fc layer.
415 */
416
417 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
418 vport = lport->vport;
419
420 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
421 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
422 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
423 "6051 DID x%06x not an active rport.\n",
424 pnvme_rport->port_id);
425 return -ENODEV;
426 }
427
428 /* The remote node has to be a mapped nvme target or an
429 * unmapped nvme initiator or it's an error.
430 */
431 ntype = ndlp->nlp_type;
432 nstate = ndlp->nlp_state;
433 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
434 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
435 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
436 "6088 DID x%06x not ready for "
437 "IO. State x%x, Type x%x\n",
438 pnvme_rport->port_id,
439 ndlp->nlp_state, ndlp->nlp_type);
440 return -ENODEV;
441 }
442 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
443 if (!bmp) {
444
445 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
446 "6044 Could not find node for DID %x\n",
447 pnvme_rport->port_id);
448 return 2;
449 }
450 INIT_LIST_HEAD(&bmp->list);
451 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
452 if (!bmp->virt) {
453 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
454 "6042 Could not find node for DID %x\n",
455 pnvme_rport->port_id);
456 kfree(bmp);
457 return 3;
458 }
459 bpl = (struct ulp_bde64 *)bmp->virt;
460 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
461 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
462 bpl->tus.f.bdeFlags = 0;
463 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
464 bpl->tus.w = le32_to_cpu(bpl->tus.w);
465 bpl++;
466
467 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
468 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
469 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
470 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
471 bpl->tus.w = le32_to_cpu(bpl->tus.w);
472
473 /* Expand print to include key fields. */
474 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
475 "6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
476 "rsplen:%d %pad %pad\n",
477 pnvme_lport, pnvme_rport,
478 pnvme_lsreq, pnvme_lsreq->rqstlen,
479 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
480 &pnvme_lsreq->rspdma);
481
482 vport->phba->fc4NvmeLsRequests++;
483
484 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
485 * This code allows it all to work.
486 */
487 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
488 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
489 ndlp, 2, 30, 0);
490 if (ret != WQE_SUCCESS) {
491 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
492 "6052 EXIT. issue ls wqe failed lport %p, "
493 "rport %p lsreq%p Status %x DID %x\n",
494 pnvme_lport, pnvme_rport, pnvme_lsreq,
495 ret, ndlp->nlp_DID);
496 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
497 kfree(bmp);
498 return ret;
499 }
500
501 /* Stub in routine and return 0 for now. */
502 return ret;
503}
504
505/**
506 * lpfc_nvme_ls_abort - Issue an Link Service request
507 * @lpfc_pnvme: Pointer to the driver's nvme instance data
508 * @lpfc_nvme_lport: Pointer to the driver's local port data
509 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
510 *
511 * Driver registers this routine to handle any link service request
512 * from the nvme_fc transport to a remote nvme-aware port.
513 *
514 * Return value :
515 * 0 - Success
516 * TODO: What are the failure codes.
517 **/
518static void
519lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
520 struct nvme_fc_remote_port *pnvme_rport,
521 struct nvmefc_ls_req *pnvme_lsreq)
522{
523 struct lpfc_nvme_lport *lport;
524 struct lpfc_vport *vport;
525 struct lpfc_hba *phba;
526 struct lpfc_nodelist *ndlp;
527 LIST_HEAD(abort_list);
528 struct lpfc_sli_ring *pring;
529 struct lpfc_iocbq *wqe, *next_wqe;
530
531 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
532 vport = lport->vport;
533 phba = vport->phba;
534
535 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
536 if (!ndlp) {
537 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
538 "6049 Could not find node for DID %x\n",
539 pnvme_rport->port_id);
540 return;
541 }
542
543 /* Expand print to include key fields. */
544 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
545 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
546 "rsplen:%d %pad %pad\n",
547 pnvme_lport, pnvme_rport,
548 pnvme_lsreq, pnvme_lsreq->rqstlen,
549 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
550 &pnvme_lsreq->rspdma);
551
552 /*
553 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
554 * that need an ABTS. The IOs need to stay on the txcmplq so that
555 * the abort operation completes them successfully.
556 */
557 pring = phba->sli4_hba.nvmels_wq->pring;
558 spin_lock_irq(&phba->hbalock);
559 spin_lock(&pring->ring_lock);
560 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
561 /* Add to abort_list on on NDLP match. */
562 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
563 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
564 list_add_tail(&wqe->dlist, &abort_list);
565 }
566 }
567 spin_unlock(&pring->ring_lock);
568 spin_unlock_irq(&phba->hbalock);
569
570 /* Abort the targeted IOs and remove them from the abort list. */
571 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
572 spin_lock_irq(&phba->hbalock);
573 list_del_init(&wqe->dlist);
574 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
575 spin_unlock_irq(&phba->hbalock);
576 }
577}
578
579/* Fix up the existing sgls for NVME IO. */
580static void
581lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
582 struct lpfc_nvme_buf *lpfc_ncmd,
583 struct nvmefc_fcp_req *nCmd)
584{
585 struct sli4_sge *sgl;
586 union lpfc_wqe128 *wqe;
587 uint32_t *wptr, *dptr;
588
589 /*
590 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
591 * match NVME. NVME sends 96 bytes. Also, use the
592 * nvme commands command and response dma addresses
593 * rather than the virtual memory to ease the restore
594 * operation.
595 */
596 sgl = lpfc_ncmd->nvme_sgl;
597 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
598
599 sgl++;
600
601 /* Setup the physical region for the FCP RSP */
602 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
603 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
604 sgl->word2 = le32_to_cpu(sgl->word2);
605 if (nCmd->sg_cnt)
606 bf_set(lpfc_sli4_sge_last, sgl, 0);
607 else
608 bf_set(lpfc_sli4_sge_last, sgl, 1);
609 sgl->word2 = cpu_to_le32(sgl->word2);
610 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
611
612 /*
613 * Get a local pointer to the built-in wqe and correct
614 * the cmd size to match NVME's 96 bytes and fix
615 * the dma address.
616 */
617
618 /* 128 byte wqe support here */
619 wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
620
621 /* Word 0-2 - NVME CMND IU (embedded payload) */
622 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
623 wqe->generic.bde.tus.f.bdeSize = 60;
624 wqe->generic.bde.addrHigh = 0;
625 wqe->generic.bde.addrLow = 64; /* Word 16 */
626
627 /* Word 3 */
628 bf_set(payload_offset_len, &wqe->fcp_icmd,
629 (nCmd->rsplen + nCmd->cmdlen));
630
631 /* Word 10 */
632 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
633 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
634
635 /*
636 * Embed the payload in the last half of the WQE
637 * WQE words 16-30 get the NVME CMD IU payload
638 *
639 * WQE words 16-19 get payload Words 1-4
640 * WQE words 20-21 get payload Words 6-7
641 * WQE words 22-29 get payload Words 16-23
642 */
643 wptr = &wqe->words[16]; /* WQE ptr */
644 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
645 dptr++; /* Skip Word 0 in payload */
646
647 *wptr++ = *dptr++; /* Word 1 */
648 *wptr++ = *dptr++; /* Word 2 */
649 *wptr++ = *dptr++; /* Word 3 */
650 *wptr++ = *dptr++; /* Word 4 */
651 dptr++; /* Skip Word 5 in payload */
652 *wptr++ = *dptr++; /* Word 6 */
653 *wptr++ = *dptr++; /* Word 7 */
654 dptr += 8; /* Skip Words 8-15 in payload */
655 *wptr++ = *dptr++; /* Word 16 */
656 *wptr++ = *dptr++; /* Word 17 */
657 *wptr++ = *dptr++; /* Word 18 */
658 *wptr++ = *dptr++; /* Word 19 */
659 *wptr++ = *dptr++; /* Word 20 */
660 *wptr++ = *dptr++; /* Word 21 */
661 *wptr++ = *dptr++; /* Word 22 */
662 *wptr = *dptr; /* Word 23 */
663}
664
665#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
666static void
667lpfc_nvme_ktime(struct lpfc_hba *phba,
668 struct lpfc_nvme_buf *lpfc_ncmd)
669{
670 uint64_t seg1, seg2, seg3, seg4;
671
672 if (!phba->ktime_on)
673 return;
674 if (!lpfc_ncmd->ts_last_cmd ||
675 !lpfc_ncmd->ts_cmd_start ||
676 !lpfc_ncmd->ts_cmd_wqput ||
677 !lpfc_ncmd->ts_isr_cmpl ||
678 !lpfc_ncmd->ts_data_nvme)
679 return;
680 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
681 return;
682 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
683 return;
684 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
685 return;
686 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
687 return;
688 /*
689 * Segment 1 - Time from Last FCP command cmpl is handed
690 * off to NVME Layer to start of next command.
691 * Segment 2 - Time from Driver receives a IO cmd start
692 * from NVME Layer to WQ put is done on IO cmd.
693 * Segment 3 - Time from Driver WQ put is done on IO cmd
694 * to MSI-X ISR for IO cmpl.
695 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
696 * cmpl is handled off to the NVME Layer.
697 */
698 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
699 if (seg1 > 5000000) /* 5 ms - for sequential IOs */
700 return;
701
702 /* Calculate times relative to start of IO */
703 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
704 seg3 = (lpfc_ncmd->ts_isr_cmpl -
705 lpfc_ncmd->ts_cmd_start) - seg2;
706 seg4 = (lpfc_ncmd->ts_data_nvme -
707 lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
708 phba->ktime_data_samples++;
709 phba->ktime_seg1_total += seg1;
710 if (seg1 < phba->ktime_seg1_min)
711 phba->ktime_seg1_min = seg1;
712 else if (seg1 > phba->ktime_seg1_max)
713 phba->ktime_seg1_max = seg1;
714 phba->ktime_seg2_total += seg2;
715 if (seg2 < phba->ktime_seg2_min)
716 phba->ktime_seg2_min = seg2;
717 else if (seg2 > phba->ktime_seg2_max)
718 phba->ktime_seg2_max = seg2;
719 phba->ktime_seg3_total += seg3;
720 if (seg3 < phba->ktime_seg3_min)
721 phba->ktime_seg3_min = seg3;
722 else if (seg3 > phba->ktime_seg3_max)
723 phba->ktime_seg3_max = seg3;
724 phba->ktime_seg4_total += seg4;
725 if (seg4 < phba->ktime_seg4_min)
726 phba->ktime_seg4_min = seg4;
727 else if (seg4 > phba->ktime_seg4_max)
728 phba->ktime_seg4_max = seg4;
729
730 lpfc_ncmd->ts_last_cmd = 0;
731 lpfc_ncmd->ts_cmd_start = 0;
732 lpfc_ncmd->ts_cmd_wqput = 0;
733 lpfc_ncmd->ts_isr_cmpl = 0;
734 lpfc_ncmd->ts_data_nvme = 0;
735}
736#endif
737
738/**
739 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
740 * @lpfc_pnvme: Pointer to the driver's nvme instance data
741 * @lpfc_nvme_lport: Pointer to the driver's local port data
742 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
743 *
744 * Driver registers this routine as it io request handler. This
745 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
746 * data structure to the rport indicated in @lpfc_nvme_rport.
747 *
748 * Return value :
749 * 0 - Success
750 * TODO: What are the failure codes.
751 **/
752static void
753lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
754 struct lpfc_wcqe_complete *wcqe)
755{
756 struct lpfc_nvme_buf *lpfc_ncmd =
757 (struct lpfc_nvme_buf *)pwqeIn->context1;
758 struct lpfc_vport *vport = pwqeIn->vport;
759 struct nvmefc_fcp_req *nCmd;
760 struct nvme_fc_ersp_iu *ep;
761 struct nvme_fc_cmd_iu *cp;
762 struct lpfc_nvme_rport *rport;
763 struct lpfc_nodelist *ndlp;
764 struct lpfc_nvme_fcpreq_priv *freqpriv;
765 unsigned long flags;
766 uint32_t code;
767 uint16_t cid, sqhd, data;
768 uint32_t *ptr;
769
770 /* Sanity check on return of outstanding command */
771 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
772 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
773 "6071 Completion pointers bad on wqe %p.\n",
774 wcqe);
775 return;
776 }
777 phba->fc4NvmeIoCmpls++;
778
779 nCmd = lpfc_ncmd->nvmeCmd;
780 rport = lpfc_ncmd->nrport;
781
782 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
783 lpfc_ncmd->cur_iocbq.sli4_xritag,
784 bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
785 /*
786 * Catch race where our node has transitioned, but the
787 * transport is still transitioning.
788 */
789 ndlp = rport->ndlp;
790 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
791 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
792 "6061 rport %p, DID x%06x node not ready.\n",
793 rport, rport->remoteport->port_id);
794
795 ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
796 if (!ndlp) {
797 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
798 "6062 Ignoring NVME cmpl. No ndlp\n");
799 goto out_err;
800 }
801 }
802
803 code = bf_get(lpfc_wcqe_c_code, wcqe);
804 if (code == CQE_CODE_NVME_ERSP) {
805 /* For this type of CQE, we need to rebuild the rsp */
806 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
807
808 /*
809 * Get Command Id from cmd to plug into response. This
810 * code is not needed in the next NVME Transport drop.
811 */
812 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
813 cid = cp->sqe.common.command_id;
814
815 /*
816 * RSN is in CQE word 2
817 * SQHD is in CQE Word 3 bits 15:0
818 * Cmd Specific info is in CQE Word 1
819 * and in CQE Word 0 bits 15:0
820 */
821 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
822
823 /* Now lets build the NVME ERSP IU */
824 ep->iu_len = cpu_to_be16(8);
825 ep->rsn = wcqe->parameter;
826 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
827 ep->rsvd12 = 0;
828 ptr = (uint32_t *)&ep->cqe.result.u64;
829 *ptr++ = wcqe->total_data_placed;
830 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
831 *ptr = (uint32_t)data;
832 ep->cqe.sq_head = sqhd;
833 ep->cqe.sq_id = nCmd->sqid;
834 ep->cqe.command_id = cid;
835 ep->cqe.status = 0;
836
837 lpfc_ncmd->status = IOSTAT_SUCCESS;
838 lpfc_ncmd->result = 0;
839 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
840 nCmd->transferred_length = nCmd->payload_length;
841 } else {
842 lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
843 LPFC_IOCB_STATUS_MASK);
844 lpfc_ncmd->result = wcqe->parameter;
845
846 /* For NVME, the only failure path that results in an
847 * IO error is when the adapter rejects it. All other
848 * conditions are a success case and resolved by the
849 * transport.
850 * IOSTAT_FCP_RSP_ERROR means:
851 * 1. Length of data received doesn't match total
852 * transfer length in WQE
853 * 2. If the RSP payload does NOT match these cases:
854 * a. RSP length 12/24 bytes and all zeros
855 * b. NVME ERSP
856 */
857 switch (lpfc_ncmd->status) {
858 case IOSTAT_SUCCESS:
859 nCmd->transferred_length = wcqe->total_data_placed;
860 nCmd->rcv_rsplen = 0;
861 nCmd->status = 0;
862 break;
863 case IOSTAT_FCP_RSP_ERROR:
864 nCmd->transferred_length = wcqe->total_data_placed;
865 nCmd->rcv_rsplen = wcqe->parameter;
866 nCmd->status = 0;
867 /* Sanity check */
868 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
869 break;
870 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
871 "6081 NVME Completion Protocol Error: "
872 "xri %x status x%x result x%x "
873 "placed x%x\n",
874 lpfc_ncmd->cur_iocbq.sli4_xritag,
875 lpfc_ncmd->status, lpfc_ncmd->result,
876 wcqe->total_data_placed);
877 break;
878 default:
879out_err:
880 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
881 "6072 NVME Completion Error: xri %x "
882 "status x%x result x%x placed x%x\n",
883 lpfc_ncmd->cur_iocbq.sli4_xritag,
884 lpfc_ncmd->status, lpfc_ncmd->result,
885 wcqe->total_data_placed);
886 nCmd->transferred_length = 0;
887 nCmd->rcv_rsplen = 0;
888 nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
889 }
890 }
891
892 /* pick up SLI4 exhange busy condition */
893 if (bf_get(lpfc_wcqe_c_xb, wcqe))
894 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
895 else
896 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
897
898 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
899 atomic_dec(&ndlp->cmd_pending);
900
901 /* Update stats and complete the IO. There is
902 * no need for dma unprep because the nvme_transport
903 * owns the dma address.
904 */
905#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
906 if (phba->ktime_on) {
907 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
908 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
909 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
910 lpfc_nvme_ktime(phba, lpfc_ncmd);
911 }
912 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
913 if (lpfc_ncmd->cpu != smp_processor_id())
914 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
915 "6701 CPU Check cmpl: "
916 "cpu %d expect %d\n",
917 smp_processor_id(), lpfc_ncmd->cpu);
918 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
919 phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
920 }
921#endif
922 freqpriv = nCmd->private;
923 freqpriv->nvme_buf = NULL;
924 nCmd->done(nCmd);
925
926 spin_lock_irqsave(&phba->hbalock, flags);
927 lpfc_ncmd->nrport = NULL;
928 spin_unlock_irqrestore(&phba->hbalock, flags);
929
930 lpfc_release_nvme_buf(phba, lpfc_ncmd);
931}
932
933
934/**
935 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
936 * @lpfc_pnvme: Pointer to the driver's nvme instance data
937 * @lpfc_nvme_lport: Pointer to the driver's local port data
938 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
939 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
940 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
941 *
942 * Driver registers this routine as it io request handler. This
943 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
944 * data structure to the rport indicated in @lpfc_nvme_rport.
945 *
946 * Return value :
947 * 0 - Success
948 * TODO: What are the failure codes.
949 **/
950static int
951lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
952 struct lpfc_nvme_buf *lpfc_ncmd,
953 struct lpfc_nodelist *pnode)
954{
955 struct lpfc_hba *phba = vport->phba;
956 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
957 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
958 union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
959 uint32_t req_len;
960
961 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
962 return -EINVAL;
963
964 /*
965 * There are three possibilities here - use scatter-gather segment, use
966 * the single mapping, or neither.
967 */
968 wqe->fcp_iwrite.initial_xfer_len = 0;
969 if (nCmd->sg_cnt) {
970 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
971 /* Word 5 */
972 if ((phba->cfg_nvme_enable_fb) &&
973 (pnode->nlp_flag & NLP_FIRSTBURST)) {
974 req_len = lpfc_ncmd->nvmeCmd->payload_length;
975 if (req_len < pnode->nvme_fb_size)
976 wqe->fcp_iwrite.initial_xfer_len =
977 req_len;
978 else
979 wqe->fcp_iwrite.initial_xfer_len =
980 pnode->nvme_fb_size;
981 }
982
983 /* Word 7 */
984 bf_set(wqe_cmnd, &wqe->generic.wqe_com,
985 CMD_FCP_IWRITE64_WQE);
986 bf_set(wqe_pu, &wqe->generic.wqe_com,
987 PARM_READ_CHECK);
988
989 /* Word 10 */
990 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
991 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com,
992 LPFC_WQE_IOD_WRITE);
993 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
994 LPFC_WQE_LENLOC_WORD4);
995 if (phba->cfg_nvme_oas)
996 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
997
998 /* Word 11 */
999 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
1000 NVME_WRITE_CMD);
1001
1002 phba->fc4NvmeOutputRequests++;
1003 } else {
1004 /* Word 7 */
1005 bf_set(wqe_cmnd, &wqe->generic.wqe_com,
1006 CMD_FCP_IREAD64_WQE);
1007 bf_set(wqe_pu, &wqe->generic.wqe_com,
1008 PARM_READ_CHECK);
1009
1010 /* Word 10 */
1011 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
1012 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1013 LPFC_WQE_IOD_READ);
1014 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
1015 LPFC_WQE_LENLOC_WORD4);
1016 if (phba->cfg_nvme_oas)
1017 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
1018
1019 /* Word 11 */
1020 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
1021 NVME_READ_CMD);
1022
1023 phba->fc4NvmeInputRequests++;
1024 }
1025 } else {
1026 /* Word 4 */
1027 wqe->fcp_icmd.rsrvd4 = 0;
1028
1029 /* Word 7 */
1030 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE);
1031 bf_set(wqe_pu, &wqe->generic.wqe_com, 0);
1032
1033 /* Word 10 */
1034 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
1035 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
1036 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
1037 LPFC_WQE_LENLOC_NONE);
1038 if (phba->cfg_nvme_oas)
1039 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
1040
1041 /* Word 11 */
1042 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
1043
1044 phba->fc4NvmeControlRequests++;
1045 }
1046 /*
1047 * Finish initializing those WQE fields that are independent
1048 * of the nvme_cmnd request_buffer
1049 */
1050
1051 /* Word 6 */
1052 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1053 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1054 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1055
1056 /* Word 7 */
1057 /* Preserve Class data in the ndlp. */
1058 bf_set(wqe_class, &wqe->generic.wqe_com,
1059 (pnode->nlp_fcp_info & 0x0f));
1060
1061 /* Word 8 */
1062 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1063
1064 /* Word 9 */
1065 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1066
1067 /* Word 11 */
1068 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1069
1070 pwqeq->vport = vport;
1071 return 0;
1072}
1073
1074
1075/**
1076 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1077 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1078 * @lpfc_nvme_lport: Pointer to the driver's local port data
1079 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1080 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1081 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1082 *
1083 * Driver registers this routine as it io request handler. This
1084 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1085 * data structure to the rport indicated in @lpfc_nvme_rport.
1086 *
1087 * Return value :
1088 * 0 - Success
1089 * TODO: What are the failure codes.
1090 **/
1091static int
1092lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1093 struct lpfc_nvme_buf *lpfc_ncmd)
1094{
1095 struct lpfc_hba *phba = vport->phba;
1096 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1097 union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
1098 struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
1099 struct scatterlist *data_sg;
1100 struct sli4_sge *first_data_sgl;
1101 dma_addr_t physaddr;
1102 uint32_t num_bde = 0;
1103 uint32_t dma_len;
1104 uint32_t dma_offset = 0;
1105 int nseg, i;
1106
1107 /* Fix up the command and response DMA stuff. */
1108 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1109
1110 /*
1111 * There are three possibilities here - use scatter-gather segment, use
1112 * the single mapping, or neither.
1113 */
1114 if (nCmd->sg_cnt) {
1115 /*
1116 * Jump over the cmd and rsp SGEs. The fix routine
1117 * has already adjusted for this.
1118 */
1119 sgl += 2;
1120
1121 first_data_sgl = sgl;
1122 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1123 if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
1124 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1125 "6058 Too many sg segments from "
1126 "NVME Transport. Max %d, "
1127 "nvmeIO sg_cnt %d\n",
1128 phba->cfg_nvme_seg_cnt,
1129 lpfc_ncmd->seg_cnt);
1130 lpfc_ncmd->seg_cnt = 0;
1131 return 1;
1132 }
1133
1134 /*
1135 * The driver established a maximum scatter-gather segment count
1136 * during probe that limits the number of sg elements in any
1137 * single nvme command. Just run through the seg_cnt and format
1138 * the sge's.
1139 */
1140 nseg = nCmd->sg_cnt;
1141 data_sg = nCmd->first_sgl;
1142 for (i = 0; i < nseg; i++) {
1143 if (data_sg == NULL) {
1144 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1145 "6059 dptr err %d, nseg %d\n",
1146 i, nseg);
1147 lpfc_ncmd->seg_cnt = 0;
1148 return 1;
1149 }
1150 physaddr = data_sg->dma_address;
1151 dma_len = data_sg->length;
1152 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1153 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1154 sgl->word2 = le32_to_cpu(sgl->word2);
1155 if ((num_bde + 1) == nseg)
1156 bf_set(lpfc_sli4_sge_last, sgl, 1);
1157 else
1158 bf_set(lpfc_sli4_sge_last, sgl, 0);
1159 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1160 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1161 sgl->word2 = cpu_to_le32(sgl->word2);
1162 sgl->sge_len = cpu_to_le32(dma_len);
1163
1164 dma_offset += dma_len;
1165 data_sg = sg_next(data_sg);
1166 sgl++;
1167 }
1168 } else {
1169 /* For this clause to be valid, the payload_length
1170 * and sg_cnt must zero.
1171 */
1172 if (nCmd->payload_length != 0) {
1173 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1174 "6063 NVME DMA Prep Err: sg_cnt %d "
1175 "payload_length x%x\n",
1176 nCmd->sg_cnt, nCmd->payload_length);
1177 return 1;
1178 }
1179 }
1180
1181 /*
1182 * Due to difference in data length between DIF/non-DIF paths,
1183 * we need to set word 4 of WQE here
1184 */
1185 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1186 return 0;
1187}
1188
1189/**
1190 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1191 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1192 * @lpfc_nvme_lport: Pointer to the driver's local port data
1193 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1194 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1195 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1196 *
1197 * Driver registers this routine as it io request handler. This
1198 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1199 * data structure to the rport
1200 indicated in @lpfc_nvme_rport.
1201 *
1202 * Return value :
1203 * 0 - Success
1204 * TODO: What are the failure codes.
1205 **/
1206static int
1207lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1208 struct nvme_fc_remote_port *pnvme_rport,
1209 void *hw_queue_handle,
1210 struct nvmefc_fcp_req *pnvme_fcreq)
1211{
1212 int ret = 0;
1213 struct lpfc_nvme_lport *lport;
1214 struct lpfc_vport *vport;
1215 struct lpfc_hba *phba;
1216 struct lpfc_nodelist *ndlp;
1217 struct lpfc_nvme_buf *lpfc_ncmd;
1218 struct lpfc_nvme_rport *rport;
1219 struct lpfc_nvme_qhandle *lpfc_queue_info;
1220 struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
1221#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1222 uint64_t start = 0;
1223#endif
1224
1225 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1226 vport = lport->vport;
1227 phba = vport->phba;
1228
1229#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1230 if (phba->ktime_on)
1231 start = ktime_get_ns();
1232#endif
1233 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1234 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1235
1236 /*
1237 * Catch race where our node has transitioned, but the
1238 * transport is still transitioning.
1239 */
1240 ndlp = rport->ndlp;
1241 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1242 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1243 "6053 rport %p, ndlp %p, DID x%06x "
1244 "ndlp not ready.\n",
1245 rport, ndlp, pnvme_rport->port_id);
1246
1247 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
1248 if (!ndlp) {
1249 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1250 "6066 Missing node for DID %x\n",
1251 pnvme_rport->port_id);
1252 ret = -ENODEV;
1253 goto out_fail;
1254 }
1255 }
1256
1257 /* The remote node has to be a mapped target or it's an error. */
1258 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1259 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1260 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1261 "6036 rport %p, DID x%06x not ready for "
1262 "IO. State x%x, Type x%x\n",
1263 rport, pnvme_rport->port_id,
1264 ndlp->nlp_state, ndlp->nlp_type);
1265 ret = -ENODEV;
1266 goto out_fail;
1267
1268 }
1269
1270 /* The node is shared with FCP IO, make sure the IO pending count does
1271 * not exceed the programmed depth.
1272 */
1273 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
1274 ret = -EAGAIN;
1275 goto out_fail;
1276 }
1277
1278 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
1279 if (lpfc_ncmd == NULL) {
1280 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1281 "6065 driver's buffer pool is empty, "
1282 "IO failed\n");
1283 ret = -ENOMEM;
1284 goto out_fail;
1285 }
1286#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1287 if (phba->ktime_on) {
1288 lpfc_ncmd->ts_cmd_start = start;
1289 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1290 }
1291#endif
1292
1293 /*
1294 * Store the data needed by the driver to issue, abort, and complete
1295 * an IO.
1296 * Do not let the IO hang out forever. There is no midlayer issuing
1297 * an abort so inform the FW of the maximum IO pending time.
1298 */
1299 freqpriv->nvme_buf = lpfc_ncmd;
1300 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1301 lpfc_ncmd->nrport = rport;
1302 lpfc_ncmd->ndlp = ndlp;
1303 lpfc_ncmd->start_time = jiffies;
1304
1305 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
1306 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1307 if (ret) {
1308 ret = -ENOMEM;
1309 goto out_free_nvme_buf;
1310 }
1311
1312 atomic_inc(&ndlp->cmd_pending);
1313
1314 /*
1315 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1316 * This identfier was create in our hardware queue create callback
1317 * routine. The driver now is dependent on the IO queue steering from
1318 * the transport. We are trusting the upper NVME layers know which
1319 * index to use and that they have affinitized a CPU to this hardware
1320 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1321 */
1322 lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
1323
1324 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1325 lpfc_ncmd->cur_iocbq.sli4_xritag,
1326 lpfc_queue_info->index, ndlp->nlp_DID);
1327
1328 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1329 if (ret) {
1330 atomic_dec(&ndlp->cmd_pending);
1331 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1332 "6113 FCP could not issue WQE err %x "
1333 "sid: x%x did: x%x oxid: x%x\n",
1334 ret, vport->fc_myDID, ndlp->nlp_DID,
1335 lpfc_ncmd->cur_iocbq.sli4_xritag);
1336 ret = -EBUSY;
1337 goto out_free_nvme_buf;
1338 }
1339
1340#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1341 if (phba->ktime_on)
1342 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1343
1344 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1345 lpfc_ncmd->cpu = smp_processor_id();
1346 if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
1347 /* Check for admin queue */
1348 if (lpfc_queue_info->qidx) {
1349 lpfc_printf_vlog(vport,
1350 KERN_ERR, LOG_NVME_IOERR,
1351 "6702 CPU Check cmd: "
1352 "cpu %d wq %d\n",
1353 lpfc_ncmd->cpu,
1354 lpfc_queue_info->index);
1355 }
1356 lpfc_ncmd->cpu = lpfc_queue_info->index;
1357 }
1358 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1359 phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
1360 }
1361#endif
1362 return 0;
1363
1364 out_free_nvme_buf:
1365 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1366 out_fail:
1367 return ret;
1368}
1369
1370/**
1371 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1372 * @phba: Pointer to HBA context object
1373 * @cmdiocb: Pointer to command iocb object.
1374 * @rspiocb: Pointer to response iocb object.
1375 *
1376 * This is the callback function for any NVME FCP IO that was aborted.
1377 *
1378 * Return value:
1379 * None
1380 **/
1381void
1382lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1383 struct lpfc_wcqe_complete *abts_cmpl)
1384{
1385 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1386 "6145 ABORT_XRI_CN completing on rpi x%x "
1387 "original iotag x%x, abort cmd iotag x%x "
1388 "req_tag x%x, status x%x, hwstatus x%x\n",
1389 cmdiocb->iocb.un.acxri.abortContextTag,
1390 cmdiocb->iocb.un.acxri.abortIoTag,
1391 cmdiocb->iotag,
1392 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1393 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1394 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1395 lpfc_sli_release_iocbq(phba, cmdiocb);
1396}
1397
1398/**
1399 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1400 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1401 * @lpfc_nvme_lport: Pointer to the driver's local port data
1402 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1403 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1404 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1405 *
1406 * Driver registers this routine as its nvme request io abort handler. This
1407 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1408 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1409 * is executed asynchronously - one the target is validated as "MAPPED" and
1410 * ready for IO, the driver issues the abort request and returns.
1411 *
1412 * Return value:
1413 * None
1414 **/
1415static void
1416lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1417 struct nvme_fc_remote_port *pnvme_rport,
1418 void *hw_queue_handle,
1419 struct nvmefc_fcp_req *pnvme_fcreq)
1420{
1421 struct lpfc_nvme_lport *lport;
1422 struct lpfc_vport *vport;
1423 struct lpfc_hba *phba;
1424 struct lpfc_nodelist *ndlp;
1425 struct lpfc_nvme_rport *rport;
1426 struct lpfc_nvme_buf *lpfc_nbuf;
1427 struct lpfc_iocbq *abts_buf;
1428 struct lpfc_iocbq *nvmereq_wqe;
1429 struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
1430 union lpfc_wqe *abts_wqe;
1431 unsigned long flags;
1432 int ret_val;
1433
1434 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1435 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1436 vport = lport->vport;
1437 phba = vport->phba;
1438
1439 /* Announce entry to new IO submit field. */
1440 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1441 "6002 Abort Request to rport DID x%06x "
1442 "for nvme_fc_req %p\n",
1443 pnvme_rport->port_id,
1444 pnvme_fcreq);
1445
1446 /*
1447 * Catch race where our node has transitioned, but the
1448 * transport is still transitioning.
1449 */
1450 ndlp = rport->ndlp;
1451 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1452 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
1453 "6054 rport %p, ndlp %p, DID x%06x ndlp "
1454 " not ready.\n",
1455 rport, ndlp, pnvme_rport->port_id);
1456
1457 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
1458 if (!ndlp) {
1459 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1460 "6055 Could not find node for "
1461 "DID %x\n",
1462 pnvme_rport->port_id);
1463 return;
1464 }
1465 }
1466
1467 /* The remote node has to be ready to send an abort. */
1468 if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
1469 !(ndlp->nlp_type & NLP_NVME_TARGET)) {
1470 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1471 "6048 rport %p, DID x%06x not ready for "
1472 "IO. State x%x, Type x%x\n",
1473 rport, pnvme_rport->port_id,
1474 ndlp->nlp_state, ndlp->nlp_type);
1475 return;
1476 }
1477
1478 /* If the hba is getting reset, this flag is set. It is
1479 * cleared when the reset is complete and rings reestablished.
1480 */
1481 spin_lock_irqsave(&phba->hbalock, flags);
1482 /* driver queued commands are in process of being flushed */
1483 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1484 spin_unlock_irqrestore(&phba->hbalock, flags);
1485 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1486 "6139 Driver in reset cleanup - flushing "
1487 "NVME Req now. hba_flag x%x\n",
1488 phba->hba_flag);
1489 return;
1490 }
1491
1492 lpfc_nbuf = freqpriv->nvme_buf;
1493 if (!lpfc_nbuf) {
1494 spin_unlock_irqrestore(&phba->hbalock, flags);
1495 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1496 "6140 NVME IO req has no matching lpfc nvme "
1497 "io buffer. Skipping abort req.\n");
1498 return;
1499 } else if (!lpfc_nbuf->nvmeCmd) {
1500 spin_unlock_irqrestore(&phba->hbalock, flags);
1501 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1502 "6141 lpfc NVME IO req has no nvme_fcreq "
1503 "io buffer. Skipping abort req.\n");
1504 return;
1505 }
1506 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1507
1508 /*
1509 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1510 * state must match the nvme_fcreq passed by the nvme
1511 * transport. If they don't match, it is likely the driver
1512 * has already completed the NVME IO and the nvme transport
1513 * has not seen it yet.
1514 */
1515 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1516 spin_unlock_irqrestore(&phba->hbalock, flags);
1517 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1518 "6143 NVME req mismatch: "
1519 "lpfc_nbuf %p nvmeCmd %p, "
1520 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
1521 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1522 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1523 return;
1524 }
1525
1526 /* Don't abort IOs no longer on the pending queue. */
1527 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1528 spin_unlock_irqrestore(&phba->hbalock, flags);
1529 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1530 "6142 NVME IO req %p not queued - skipping "
1531 "abort req xri x%x\n",
1532 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1533 return;
1534 }
1535
1536 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1537 nvmereq_wqe->sli4_xritag,
1538 nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
1539
1540 /* Outstanding abort is in progress */
1541 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1542 spin_unlock_irqrestore(&phba->hbalock, flags);
1543 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1544 "6144 Outstanding NVME I/O Abort Request "
1545 "still pending on nvme_fcreq %p, "
1546 "lpfc_ncmd %p xri x%x\n",
1547 pnvme_fcreq, lpfc_nbuf,
1548 nvmereq_wqe->sli4_xritag);
1549 return;
1550 }
1551
1552 abts_buf = __lpfc_sli_get_iocbq(phba);
1553 if (!abts_buf) {
1554 spin_unlock_irqrestore(&phba->hbalock, flags);
1555 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1556 "6136 No available abort wqes. Skipping "
1557 "Abts req for nvme_fcreq %p xri x%x\n",
1558 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1559 return;
1560 }
1561
1562 /* Ready - mark outstanding as aborted by driver. */
1563 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1564
1565 /* Complete prepping the abort wqe and issue to the FW. */
1566 abts_wqe = &abts_buf->wqe;
1567
1568 /* WQEs are reused. Clear stale data and set key fields to
1569 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1570 */
1571 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1572 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1573
1574 /* word 7 */
1575 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1576 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1577 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1578 nvmereq_wqe->iocb.ulpClass);
1579
1580 /* word 8 - tell the FW to abort the IO associated with this
1581 * outstanding exchange ID.
1582 */
1583 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1584
1585 /* word 9 - this is the iotag for the abts_wqe completion. */
1586 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1587 abts_buf->iotag);
1588
1589 /* word 10 */
1590 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
1591 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1592 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1593
1594 /* word 11 */
1595 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1596 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1597 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1598
1599 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1600 abts_buf->iocb_flag |= LPFC_IO_NVME;
1601 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1602 abts_buf->vport = vport;
1603 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1604 ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
1605 spin_unlock_irqrestore(&phba->hbalock, flags);
1606 if (ret_val == IOCB_ERROR) {
1607 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1608 "6137 Failed abts issue_wqe with status x%x "
1609 "for nvme_fcreq %p.\n",
1610 ret_val, pnvme_fcreq);
1611 lpfc_sli_release_iocbq(phba, abts_buf);
1612 return;
1613 }
1614
1615 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1616 "6138 Transport Abort NVME Request Issued for "
1617 "ox_id x%x on reqtag x%x\n",
1618 nvmereq_wqe->sli4_xritag,
1619 abts_buf->iotag);
1620}
1621
1622/* Declare and initialization an instance of the FC NVME template. */
1623static struct nvme_fc_port_template lpfc_nvme_template = {
1624 /* initiator-based functions */
1625 .localport_delete = lpfc_nvme_localport_delete,
1626 .remoteport_delete = lpfc_nvme_remoteport_delete,
1627 .create_queue = lpfc_nvme_create_queue,
1628 .delete_queue = lpfc_nvme_delete_queue,
1629 .ls_req = lpfc_nvme_ls_req,
1630 .fcp_io = lpfc_nvme_fcp_io_submit,
1631 .ls_abort = lpfc_nvme_ls_abort,
1632 .fcp_abort = lpfc_nvme_fcp_abort,
1633
1634 .max_hw_queues = 1,
1635 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1636 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1637 .dma_boundary = 0xFFFFFFFF,
1638
1639 /* Sizes of additional private data for data structures.
1640 * No use for the last two sizes at this time.
1641 */
1642 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1643 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1644 .lsrqst_priv_sz = 0,
1645 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1646};
1647
1648/**
1649 * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
1650 * @phba: pointer to lpfc hba data structure.
1651 * @nblist: pointer to nvme buffer list.
1652 * @count: number of scsi buffers on the list.
1653 *
1654 * This routine is invoked to post a block of @count scsi sgl pages from a
1655 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1656 * No Lock is held.
1657 *
1658 **/
1659static int
1660lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
1661 struct list_head *nblist,
1662 int count)
1663{
1664 struct lpfc_nvme_buf *lpfc_ncmd;
1665 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1666 struct sgl_page_pairs *sgl_pg_pairs;
1667 void *viraddr;
1668 LPFC_MBOXQ_t *mbox;
1669 uint32_t reqlen, alloclen, pg_pairs;
1670 uint32_t mbox_tmo;
1671 uint16_t xritag_start = 0;
1672 int rc = 0;
1673 uint32_t shdr_status, shdr_add_status;
1674 dma_addr_t pdma_phys_bpl1;
1675 union lpfc_sli4_cfg_shdr *shdr;
1676
1677 /* Calculate the requested length of the dma memory */
1678 reqlen = count * sizeof(struct sgl_page_pairs) +
1679 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1680 if (reqlen > SLI4_PAGE_SIZE) {
1681 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1682 "6118 Block sgl registration required DMA "
1683 "size (%d) great than a page\n", reqlen);
1684 return -ENOMEM;
1685 }
1686 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1687 if (!mbox) {
1688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1689 "6119 Failed to allocate mbox cmd memory\n");
1690 return -ENOMEM;
1691 }
1692
1693 /* Allocate DMA memory and set up the non-embedded mailbox command */
1694 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1695 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
1696 LPFC_SLI4_MBX_NEMBED);
1697
1698 if (alloclen < reqlen) {
1699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1700 "6120 Allocated DMA memory size (%d) is "
1701 "less than the requested DMA memory "
1702 "size (%d)\n", alloclen, reqlen);
1703 lpfc_sli4_mbox_cmd_free(phba, mbox);
1704 return -ENOMEM;
1705 }
1706
1707 /* Get the first SGE entry from the non-embedded DMA memory */
1708 viraddr = mbox->sge_array->addr[0];
1709
1710 /* Set up the SGL pages in the non-embedded DMA pages */
1711 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
1712 sgl_pg_pairs = &sgl->sgl_pg_pairs;
1713
1714 pg_pairs = 0;
1715 list_for_each_entry(lpfc_ncmd, nblist, list) {
1716 /* Set up the sge entry */
1717 sgl_pg_pairs->sgl_pg0_addr_lo =
1718 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
1719 sgl_pg_pairs->sgl_pg0_addr_hi =
1720 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
1721 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1722 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
1723 SGL_PAGE_SIZE;
1724 else
1725 pdma_phys_bpl1 = 0;
1726 sgl_pg_pairs->sgl_pg1_addr_lo =
1727 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
1728 sgl_pg_pairs->sgl_pg1_addr_hi =
1729 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
1730 /* Keep the first xritag on the list */
1731 if (pg_pairs == 0)
1732 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
1733 sgl_pg_pairs++;
1734 pg_pairs++;
1735 }
1736 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
1737 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
1738 /* Perform endian conversion if necessary */
1739 sgl->word0 = cpu_to_le32(sgl->word0);
1740
1741 if (!phba->sli4_hba.intr_enable)
1742 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1743 else {
1744 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
1745 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
1746 }
1747 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
1748 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1749 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1750 if (rc != MBX_TIMEOUT)
1751 lpfc_sli4_mbox_cmd_free(phba, mbox);
1752 if (shdr_status || shdr_add_status || rc) {
1753 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1754 "6125 POST_SGL_BLOCK mailbox command failed "
1755 "status x%x add_status x%x mbx status x%x\n",
1756 shdr_status, shdr_add_status, rc);
1757 rc = -ENXIO;
1758 }
1759 return rc;
1760}
1761
1762/**
1763 * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
1764 * @phba: pointer to lpfc hba data structure.
1765 * @post_nblist: pointer to the nvme buffer list.
1766 *
1767 * This routine walks a list of nvme buffers that was passed in. It attempts
1768 * to construct blocks of nvme buffer sgls which contains contiguous xris and
1769 * uses the non-embedded SGL block post mailbox commands to post to the port.
1770 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
1771 * embedded SGL post mailbox command for posting. The @post_nblist passed in
1772 * must be local list, thus no lock is needed when manipulate the list.
1773 *
1774 * Returns: 0 = failure, non-zero number of successfully posted buffers.
1775 **/
1776static int
1777lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1778 struct list_head *post_nblist, int sb_count)
1779{
1780 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
1781 int status, sgl_size;
1782 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
1783 dma_addr_t pdma_phys_sgl1;
1784 int last_xritag = NO_XRI;
1785 int cur_xritag;
1786 LIST_HEAD(prep_nblist);
1787 LIST_HEAD(blck_nblist);
1788 LIST_HEAD(nvme_nblist);
1789
1790 /* sanity check */
1791 if (sb_count <= 0)
1792 return -EINVAL;
1793
1794 sgl_size = phba->cfg_sg_dma_buf_size;
1795
1796 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
1797 list_del_init(&lpfc_ncmd->list);
1798 block_cnt++;
1799 if ((last_xritag != NO_XRI) &&
1800 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
1801 /* a hole in xri block, form a sgl posting block */
1802 list_splice_init(&prep_nblist, &blck_nblist);
1803 post_cnt = block_cnt - 1;
1804 /* prepare list for next posting block */
1805 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1806 block_cnt = 1;
1807 } else {
1808 /* prepare list for next posting block */
1809 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1810 /* enough sgls for non-embed sgl mbox command */
1811 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
1812 list_splice_init(&prep_nblist, &blck_nblist);
1813 post_cnt = block_cnt;
1814 block_cnt = 0;
1815 }
1816 }
1817 num_posting++;
1818 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1819
1820 /* end of repost sgl list condition for NVME buffers */
1821 if (num_posting == sb_count) {
1822 if (post_cnt == 0) {
1823 /* last sgl posting block */
1824 list_splice_init(&prep_nblist, &blck_nblist);
1825 post_cnt = block_cnt;
1826 } else if (block_cnt == 1) {
1827 /* last single sgl with non-contiguous xri */
1828 if (sgl_size > SGL_PAGE_SIZE)
1829 pdma_phys_sgl1 =
1830 lpfc_ncmd->dma_phys_sgl +
1831 SGL_PAGE_SIZE;
1832 else
1833 pdma_phys_sgl1 = 0;
1834 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1835 status = lpfc_sli4_post_sgl(phba,
1836 lpfc_ncmd->dma_phys_sgl,
1837 pdma_phys_sgl1, cur_xritag);
1838 if (status) {
1839 /* failure, put on abort nvme list */
1840 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1841 } else {
1842 /* success, put on NVME buffer list */
1843 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1844 lpfc_ncmd->status = IOSTAT_SUCCESS;
1845 num_posted++;
1846 }
1847 /* success, put on NVME buffer sgl list */
1848 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1849 }
1850 }
1851
1852 /* continue until a nembed page worth of sgls */
1853 if (post_cnt == 0)
1854 continue;
1855
1856 /* post block of NVME buffer list sgls */
1857 status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
1858 post_cnt);
1859
1860 /* don't reset xirtag due to hole in xri block */
1861 if (block_cnt == 0)
1862 last_xritag = NO_XRI;
1863
1864 /* reset NVME buffer post count for next round of posting */
1865 post_cnt = 0;
1866
1867 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
1868 while (!list_empty(&blck_nblist)) {
1869 list_remove_head(&blck_nblist, lpfc_ncmd,
1870 struct lpfc_nvme_buf, list);
1871 if (status) {
1872 /* failure, put on abort nvme list */
1873 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1874 } else {
1875 /* success, put on NVME buffer list */
1876 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1877 lpfc_ncmd->status = IOSTAT_SUCCESS;
1878 num_posted++;
1879 }
1880 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1881 }
1882 }
1883 /* Push NVME buffers with sgl posted to the available list */
1884 while (!list_empty(&nvme_nblist)) {
1885 list_remove_head(&nvme_nblist, lpfc_ncmd,
1886 struct lpfc_nvme_buf, list);
1887 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1888 }
1889 return num_posted;
1890}
1891
1892/**
1893 * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
1894 * @phba: pointer to lpfc hba data structure.
1895 *
1896 * This routine walks the list of nvme buffers that have been allocated and
1897 * repost them to the port by using SGL block post. This is needed after a
1898 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
1899 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
1900 * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
1901 *
1902 * Returns: 0 = success, non-zero failure.
1903 **/
1904int
1905lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
1906{
1907 LIST_HEAD(post_nblist);
1908 int num_posted, rc = 0;
1909
1910 /* get all NVME buffers need to repost to a local list */
1911 spin_lock_irq(&phba->nvme_buf_list_get_lock);
1912 spin_lock(&phba->nvme_buf_list_put_lock);
1913 list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
1914 list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
1915 spin_unlock(&phba->nvme_buf_list_put_lock);
1916 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
1917
1918 /* post the list of nvme buffer sgls to port if available */
1919 if (!list_empty(&post_nblist)) {
1920 num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
1921 phba->sli4_hba.nvme_xri_cnt);
1922 /* failed to post any nvme buffer, return error */
1923 if (num_posted == 0)
1924 rc = -EIO;
1925 }
1926 return rc;
1927}
1928
1929/**
1930 * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
1931 * @vport: The virtual port for which this call being executed.
1932 * @num_to_allocate: The requested number of buffers to allocate.
1933 *
1934 * This routine allocates nvme buffers for device with SLI-4 interface spec,
1935 * the nvme buffer contains all the necessary information needed to initiate
1936 * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
1937 * them on a list, it post them to the port by using SGL block post.
1938 *
1939 * Return codes:
1940 * int - number of nvme buffers that were allocated and posted.
1941 * 0 = failure, less than num_to_alloc is a partial failure.
1942 **/
1943static int
1944lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
1945{
1946 struct lpfc_hba *phba = vport->phba;
1947 struct lpfc_nvme_buf *lpfc_ncmd;
1948 struct lpfc_iocbq *pwqeq;
1949 union lpfc_wqe128 *wqe;
1950 struct sli4_sge *sgl;
1951 dma_addr_t pdma_phys_sgl;
1952 uint16_t iotag, lxri = 0;
1953 int bcnt, num_posted, sgl_size;
1954 LIST_HEAD(prep_nblist);
1955 LIST_HEAD(post_nblist);
1956 LIST_HEAD(nvme_nblist);
1957
1958 sgl_size = phba->cfg_sg_dma_buf_size;
1959
1960 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
1961 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
1962 if (!lpfc_ncmd)
1963 break;
1964 /*
1965 * Get memory from the pci pool to map the virt space to
1966 * pci bus space for an I/O. The DMA buffer includes the
1967 * number of SGE's necessary to support the sg_tablesize.
1968 */
1969 lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
1970 GFP_KERNEL,
1971 &lpfc_ncmd->dma_handle);
1972 if (!lpfc_ncmd->data) {
1973 kfree(lpfc_ncmd);
1974 break;
1975 }
1976 memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
1977
1978 lxri = lpfc_sli4_next_xritag(phba);
1979 if (lxri == NO_XRI) {
1980 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1981 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1982 kfree(lpfc_ncmd);
1983 break;
1984 }
1985 pwqeq = &(lpfc_ncmd->cur_iocbq);
1986 wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
1987
1988 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
1989 iotag = lpfc_sli_next_iotag(phba, pwqeq);
1990 if (iotag == 0) {
1991 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1992 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1993 kfree(lpfc_ncmd);
1994 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1995 "6121 Failed to allocated IOTAG for"
1996 " XRI:0x%x\n", lxri);
1997 lpfc_sli4_free_xri(phba, lxri);
1998 break;
1999 }
2000 pwqeq->sli4_lxritag = lxri;
2001 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2002 pwqeq->iocb_flag |= LPFC_IO_NVME;
2003 pwqeq->context1 = lpfc_ncmd;
2004 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2005
2006 /* Initialize local short-hand pointers. */
2007 lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
2008 sgl = lpfc_ncmd->nvme_sgl;
2009 pdma_phys_sgl = lpfc_ncmd->dma_handle;
2010 lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
2011
2012 /* Rsp SGE will be filled in when we rcv an IO
2013 * from the NVME Layer to be sent.
2014 * The cmd is going to be embedded so we need a SKIP SGE.
2015 */
2016 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2017 bf_set(lpfc_sli4_sge_last, sgl, 0);
2018 sgl->word2 = cpu_to_le32(sgl->word2);
2019 /* Fill in word 3 / sgl_len during cmd submission */
2020
2021 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
2022
2023 /* Word 7 */
2024 bf_set(wqe_erp, &wqe->generic.wqe_com, 0);
2025 /* NVME upper layers will time things out, if needed */
2026 bf_set(wqe_tmo, &wqe->generic.wqe_com, 0);
2027
2028 /* Word 10 */
2029 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
2030 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
2031
2032 /* add the nvme buffer to a post list */
2033 list_add_tail(&lpfc_ncmd->list, &post_nblist);
2034 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2035 phba->sli4_hba.nvme_xri_cnt++;
2036 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2037 }
2038 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
2039 "6114 Allocate %d out of %d requested new NVME "
2040 "buffers\n", bcnt, num_to_alloc);
2041
2042 /* post the list of nvme buffer sgls to port if available */
2043 if (!list_empty(&post_nblist))
2044 num_posted = lpfc_post_nvme_sgl_list(phba,
2045 &post_nblist, bcnt);
2046 else
2047 num_posted = 0;
2048
2049 return num_posted;
2050}
2051
2052/**
2053 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2054 * @phba: The HBA for which this call is being executed.
2055 *
2056 * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
2057 * and returns to caller.
2058 *
2059 * Return codes:
2060 * NULL - Error
2061 * Pointer to lpfc_nvme_buf - Success
2062 **/
2063static struct lpfc_nvme_buf *
2064lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2065{
2066 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2067 unsigned long iflag = 0;
2068 int found = 0;
2069
2070 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2071 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2072 &phba->lpfc_nvme_buf_list_get, list) {
2073 if (lpfc_test_rrq_active(phba, ndlp,
2074 lpfc_ncmd->cur_iocbq.sli4_lxritag))
2075 continue;
2076 list_del_init(&lpfc_ncmd->list);
2077 found = 1;
2078 break;
2079 }
2080 if (!found) {
2081 spin_lock(&phba->nvme_buf_list_put_lock);
2082 list_splice(&phba->lpfc_nvme_buf_list_put,
2083 &phba->lpfc_nvme_buf_list_get);
2084 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2085 spin_unlock(&phba->nvme_buf_list_put_lock);
2086 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2087 &phba->lpfc_nvme_buf_list_get, list) {
2088 if (lpfc_test_rrq_active(
2089 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
2090 continue;
2091 list_del_init(&lpfc_ncmd->list);
2092 found = 1;
2093 break;
2094 }
2095 }
2096 spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2097 if (!found)
2098 return NULL;
2099 return lpfc_ncmd;
2100}
2101
2102/**
2103 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2104 * @phba: The Hba for which this call is being executed.
2105 * @lpfc_ncmd: The nvme buffer which is being released.
2106 *
2107 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2108 * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2109 * and cannot be reused for at least RA_TOV amount of time if it was
2110 * aborted.
2111 **/
2112static void
2113lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2114{
2115 unsigned long iflag = 0;
2116
2117 lpfc_ncmd->nonsg_phys = 0;
2118 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2119 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2120 "6310 XB release deferred for "
2121 "ox_id x%x on reqtag x%x\n",
2122 lpfc_ncmd->cur_iocbq.sli4_xritag,
2123 lpfc_ncmd->cur_iocbq.iotag);
2124
2125 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2126 iflag);
2127 lpfc_ncmd->nvmeCmd = NULL;
2128 list_add_tail(&lpfc_ncmd->list,
2129 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2130 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
2131 iflag);
2132 } else {
2133 lpfc_ncmd->nvmeCmd = NULL;
2134 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2135 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2136 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2137 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2138 }
2139}
2140
2141/**
2142 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2143 * @pvport - the lpfc_vport instance requesting a localport.
2144 *
2145 * This routine is invoked to create an nvme localport instance to bind
2146 * to the nvme_fc_transport. It is called once during driver load
2147 * like lpfc_create_shost after all other services are initialized.
2148 * It requires a vport, vpi, and wwns at call time. Other localport
2149 * parameters are modified as the driver's FCID and the Fabric WWN
2150 * are established.
2151 *
2152 * Return codes
2153 * 0 - successful
2154 * -ENOMEM - no heap memory available
2155 * other values - from nvme registration upcall
2156 **/
2157int
2158lpfc_nvme_create_localport(struct lpfc_vport *vport)
2159{
2160 int ret = 0;
2161 struct lpfc_hba *phba = vport->phba;
2162 struct nvme_fc_port_info nfcp_info;
2163 struct nvme_fc_local_port *localport;
2164 struct lpfc_nvme_lport *lport;
2165 int len;
2166
2167 /* Initialize this localport instance. The vport wwn usage ensures
2168 * that NPIV is accounted for.
2169 */
2170 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2171 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2172 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2173 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2174
2175 /* Limit to LPFC_MAX_NVME_SEG_CNT.
2176 * For now need + 1 to get around NVME transport logic.
2177 */
2178 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
2179 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
2180 "6300 Reducing sg segment cnt to %d\n",
2181 LPFC_MAX_NVME_SEG_CNT);
2182 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
2183 } else {
2184 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
2185 }
2186 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2187 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2188
2189 /* localport is allocated from the stack, but the registration
2190 * call allocates heap memory as well as the private area.
2191 */
2192#if (IS_ENABLED(CONFIG_NVME_FC))
2193 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2194 &vport->phba->pcidev->dev, &localport);
2195#else
2196 ret = -ENOMEM;
2197#endif
2198 if (!ret) {
2199 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2200 "6005 Successfully registered local "
2201 "NVME port num %d, localP %p, private %p, "
2202 "sg_seg %d\n",
2203 localport->port_num, localport,
2204 localport->private,
2205 lpfc_nvme_template.max_sgl_segments);
2206
2207 /* Private is our lport size declared in the template. */
2208 lport = (struct lpfc_nvme_lport *)localport->private;
2209 vport->localport = localport;
2210 lport->vport = vport;
2211 INIT_LIST_HEAD(&lport->rport_list);
2212 vport->nvmei_support = 1;
2213 len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2214 vport->phba->total_nvme_bufs += len;
2215 }
2216
2217 return ret;
2218}
2219
2220/**
2221 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2222 * @pnvme: pointer to lpfc nvme data structure.
2223 *
2224 * This routine is invoked to destroy all lports bound to the phba.
2225 * The lport memory was allocated by the nvme fc transport and is
2226 * released there. This routine ensures all rports bound to the
2227 * lport have been disconnected.
2228 *
2229 **/
2230void
2231lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2232{
2233#if (IS_ENABLED(CONFIG_NVME_FC))
2234 struct nvme_fc_local_port *localport;
2235 struct lpfc_nvme_lport *lport;
2236 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
2237 int ret;
2238
2239 if (vport->nvmei_support == 0)
2240 return;
2241
2242 localport = vport->localport;
2243 vport->localport = NULL;
2244 lport = (struct lpfc_nvme_lport *)localport->private;
2245
2246 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2247 "6011 Destroying NVME localport %p\n",
2248 localport);
2249 list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
2250 /* The last node ref has to get released now before the rport
2251 * private memory area is released by the transport.
2252 */
2253 list_del(&rport->list);
2254
2255 init_completion(&rport->rport_unreg_done);
2256 ret = nvme_fc_unregister_remoteport(rport->remoteport);
2257 if (ret)
2258 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2259 "6008 rport fail destroy %x\n", ret);
2260 wait_for_completion_timeout(&rport->rport_unreg_done, 5);
2261 }
2262
2263 /* lport's rport list is clear. Unregister
2264 * lport and release resources.
2265 */
2266 init_completion(&lport->lport_unreg_done);
2267 ret = nvme_fc_unregister_localport(localport);
2268 wait_for_completion_timeout(&lport->lport_unreg_done, 5);
2269
2270 /* Regardless of the unregister upcall response, clear
2271 * nvmei_support. All rports are unregistered and the
2272 * driver will clean up.
2273 */
2274 vport->nvmei_support = 0;
2275 if (ret == 0) {
2276 lpfc_printf_vlog(vport,
2277 KERN_INFO, LOG_NVME_DISC,
2278 "6009 Unregistered lport Success\n");
2279 } else {
2280 lpfc_printf_vlog(vport,
2281 KERN_INFO, LOG_NVME_DISC,
2282 "6010 Unregistered lport "
2283 "Failed, status x%x\n",
2284 ret);
2285 }
2286#endif
2287}
2288
2289void
2290lpfc_nvme_update_localport(struct lpfc_vport *vport)
2291{
2292#if (IS_ENABLED(CONFIG_NVME_FC))
2293 struct nvme_fc_local_port *localport;
2294 struct lpfc_nvme_lport *lport;
2295
2296 localport = vport->localport;
2297 if (!localport) {
2298 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2299 "6710 Update NVME fail. No localport\n");
2300 return;
2301 }
2302 lport = (struct lpfc_nvme_lport *)localport->private;
2303 if (!lport) {
2304 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2305 "6171 Update NVME fail. localP %p, No lport\n",
2306 localport);
2307 return;
2308 }
2309 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2310 "6012 Update NVME lport %p did x%x\n",
2311 localport, vport->fc_myDID);
2312
2313 localport->port_id = vport->fc_myDID;
2314 if (localport->port_id == 0)
2315 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2316 else
2317 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2318
2319 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2320 "6030 bound lport %p to DID x%06x\n",
2321 lport, localport->port_id);
2322#endif
2323}
2324
2325int
2326lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2327{
2328#if (IS_ENABLED(CONFIG_NVME_FC))
2329 int ret = 0;
2330 struct nvme_fc_local_port *localport;
2331 struct lpfc_nvme_lport *lport;
2332 struct lpfc_nvme_rport *rport;
2333 struct nvme_fc_remote_port *remote_port;
2334 struct nvme_fc_port_info rpinfo;
2335
2336 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2337 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2338 ndlp->nlp_DID, ndlp->nlp_type);
2339
2340 localport = vport->localport;
2341 lport = (struct lpfc_nvme_lport *)localport->private;
2342
2343 if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2344
2345 /* The driver isn't expecting the rport wwn to change
2346 * but it might get a different DID on a different
2347 * fabric.
2348 */
2349 list_for_each_entry(rport, &lport->rport_list, list) {
2350 if (rport->remoteport->port_name !=
2351 wwn_to_u64(ndlp->nlp_portname.u.wwn))
2352 continue;
2353 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2354 "6035 lport %p, found matching rport "
2355 "at wwpn 0x%llx, Data: x%x x%x x%x "
2356 "x%06x\n",
2357 lport,
2358 rport->remoteport->port_name,
2359 rport->remoteport->port_id,
2360 rport->remoteport->port_role,
2361 ndlp->nlp_type,
2362 ndlp->nlp_DID);
2363 remote_port = rport->remoteport;
2364 if ((remote_port->port_id == 0) &&
2365 (remote_port->port_role ==
2366 FC_PORT_ROLE_NVME_DISCOVERY)) {
2367 remote_port->port_id = ndlp->nlp_DID;
2368 remote_port->port_role &=
2369 ~FC_PORT_ROLE_NVME_DISCOVERY;
2370 if (ndlp->nlp_type & NLP_NVME_TARGET)
2371 remote_port->port_role |=
2372 FC_PORT_ROLE_NVME_TARGET;
2373 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2374 remote_port->port_role |=
2375 FC_PORT_ROLE_NVME_INITIATOR;
2376
2377 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2378 LOG_NVME_DISC,
2379 "6014 Rebinding lport to "
2380 "rport wwpn 0x%llx, "
2381 "Data: x%x x%x x%x x%06x\n",
2382 remote_port->port_name,
2383 remote_port->port_id,
2384 remote_port->port_role,
2385 ndlp->nlp_type,
2386 ndlp->nlp_DID);
2387 }
2388 return 0;
2389 }
2390
2391 /* NVME rports are not preserved across devloss.
2392 * Just register this instance.
2393 */
2394 rpinfo.port_id = ndlp->nlp_DID;
2395 rpinfo.port_role = 0;
2396 if (ndlp->nlp_type & NLP_NVME_TARGET)
2397 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2398 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2399 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2400 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2401 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2402 ret = nvme_fc_register_remoteport(localport, &rpinfo,
2403 &remote_port);
2404 if (!ret) {
2405 rport = remote_port->private;
2406 rport->remoteport = remote_port;
2407 rport->lport = lport;
2408 rport->ndlp = lpfc_nlp_get(ndlp);
2409 if (!rport->ndlp)
2410 return -1;
2411 ndlp->nrport = rport;
2412 INIT_LIST_HEAD(&rport->list);
2413 list_add_tail(&rport->list, &lport->rport_list);
2414 lpfc_printf_vlog(vport, KERN_INFO,
2415 LOG_NVME_DISC | LOG_NODE,
2416 "6022 Binding new rport to lport %p "
2417 "Rport WWNN 0x%llx, Rport WWPN 0x%llx "
2418 "DID x%06x Role x%x\n",
2419 lport,
2420 rpinfo.node_name, rpinfo.port_name,
2421 rpinfo.port_id, rpinfo.port_role);
2422 } else {
2423 lpfc_printf_vlog(vport, KERN_ERR,
2424 LOG_NVME_DISC | LOG_NODE,
2425 "6031 RemotePort Registration failed "
2426 "err: %d, DID x%06x\n",
2427 ret, ndlp->nlp_DID);
2428 }
2429 } else {
2430 ret = -EINVAL;
2431 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2432 "6027 Unknown nlp_type x%x on DID x%06x "
2433 "ndlp %p. Not Registering nvme rport\n",
2434 ndlp->nlp_type, ndlp->nlp_DID, ndlp);
2435 }
2436 return ret;
2437#else
2438 return 0;
2439#endif
2440}
2441
2442/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2443 *
2444 * There is no notion of Devloss or rport recovery from the current
2445 * nvme_transport perspective. Loss of an rport just means IO cannot
2446 * be sent and recovery is completely up to the initator.
2447 * For now, the driver just unbinds the DID and port_role so that
2448 * no further IO can be issued. Changes are planned for later.
2449 *
2450 * Notes - the ndlp reference count is not decremented here since
2451 * since there is no nvme_transport api for devloss. Node ref count
2452 * is only adjusted in driver unload.
2453 */
2454void
2455lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2456{
2457#if (IS_ENABLED(CONFIG_NVME_FC))
2458 int ret;
2459 struct nvme_fc_local_port *localport;
2460 struct lpfc_nvme_lport *lport;
2461 struct lpfc_nvme_rport *rport;
2462 struct nvme_fc_remote_port *remoteport;
2463 unsigned long wait_tmo;
2464
2465 localport = vport->localport;
2466
2467 /* This is fundamental error. The localport is always
2468 * available until driver unload. Just exit.
2469 */
2470 if (!localport)
2471 return;
2472
2473 lport = (struct lpfc_nvme_lport *)localport->private;
2474 if (!lport)
2475 goto input_err;
2476
2477 rport = ndlp->nrport;
2478 if (!rport)
2479 goto input_err;
2480
2481 remoteport = rport->remoteport;
2482 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2483 "6033 Unreg nvme remoteport %p, portname x%llx, "
2484 "port_id x%06x, portstate x%x port type x%x\n",
2485 remoteport, remoteport->port_name,
2486 remoteport->port_id, remoteport->port_state,
2487 ndlp->nlp_type);
2488
2489 /* Sanity check ndlp type. Only call for NVME ports. Don't
2490 * clear any rport state until the transport calls back.
2491 */
2492 if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2493 init_completion(&rport->rport_unreg_done);
2494 ret = nvme_fc_unregister_remoteport(remoteport);
2495 if (ret != 0) {
2496 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2497 "6167 NVME unregister failed %d "
2498 "port_state x%x\n",
2499 ret, remoteport->port_state);
2500 }
2501
2502 /* Wait for the driver's delete completion routine to finish
2503 * before proceeding. This guarantees the transport and driver
2504 * have completed the unreg process.
2505 */
2506 wait_tmo = msecs_to_jiffies(5000);
2507 ret = wait_for_completion_timeout(&rport->rport_unreg_done,
2508 wait_tmo);
2509 if (ret == 0) {
2510 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2511 "6169 Unreg nvme wait timeout\n");
2512 }
2513 }
2514 return;
2515
2516 input_err:
2517#endif
2518 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2519 "6168 State error: lport %p, rport%p FCID x%06x\n",
2520 vport->localport, ndlp->rport, ndlp->nlp_DID);
2521}
2522
2523/**
2524 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2525 * @phba: pointer to lpfc hba data structure.
2526 * @axri: pointer to the fcp xri abort wcqe structure.
2527 *
2528 * This routine is invoked by the worker thread to process a SLI4 fast-path
2529 * FCP aborted xri.
2530 **/
2531void
2532lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2533 struct sli4_wcqe_xri_aborted *axri)
2534{
2535 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2536 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
2537 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2538 struct lpfc_nodelist *ndlp;
2539 unsigned long iflag = 0;
2540 int rrq_empty = 0;
2541
2542 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2543 return;
2544 spin_lock_irqsave(&phba->hbalock, iflag);
2545 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2546 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2547 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2548 list) {
2549 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2550 list_del_init(&lpfc_ncmd->list);
2551 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2552 lpfc_ncmd->status = IOSTAT_SUCCESS;
2553 spin_unlock(
2554 &phba->sli4_hba.abts_nvme_buf_list_lock);
2555
2556 rrq_empty = list_empty(&phba->active_rrq_list);
2557 spin_unlock_irqrestore(&phba->hbalock, iflag);
2558 ndlp = lpfc_ncmd->ndlp;
2559 if (ndlp) {
2560 lpfc_set_rrq_active(
2561 phba, ndlp,
2562 lpfc_ncmd->cur_iocbq.sli4_lxritag,
2563 rxid, 1);
2564 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2565 }
2566
2567 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2568 "6311 XRI Aborted xri x%x tag x%x "
2569 "released\n",
2570 xri, lpfc_ncmd->cur_iocbq.iotag);
2571
2572 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2573 if (rrq_empty)
2574 lpfc_worker_wake_up(phba);
2575 return;
2576 }
2577 }
2578 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2579 spin_unlock_irqrestore(&phba->hbalock, iflag);
2580
2581 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2582 "6312 XRI Aborted xri x%x not found\n", xri);
2583
2584}