Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_version.h"
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc.h"
47#include "lpfc_nvme.h"
48#include "lpfc_scsi.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52#include "lpfc_debugfs.h"
53
54/* NVME initiator-based functions */
55
56static struct lpfc_io_buf *
57lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
58 int idx, int expedite);
59
60static void
61lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
62
63static struct nvme_fc_port_template lpfc_nvme_template;
64
65/**
66 * lpfc_nvme_create_queue -
67 * @pnvme_lport: Transport localport that LS is to be issued from
68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
69 * @qsize: Size of the queue in bytes
70 * @handle: An opaque driver handle used in follow-up calls.
71 *
72 * Driver registers this routine to preallocate and initialize any
73 * internal data structures to bind the @qidx to its internal IO queues.
74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
75 *
76 * Return value :
77 * 0 - Success
78 * -EINVAL - Unsupported input value.
79 * -ENOMEM - Could not alloc necessary memory
80 **/
81static int
82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
84 void **handle)
85{
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
89 char *str;
90
91 if (!pnvme_lport->private)
92 return -ENOMEM;
93
94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
95 vport = lport->vport;
96
97 if (!vport || vport->load_flag & FC_UNLOADING ||
98 vport->phba->hba_flag & HBA_IOQ_FLUSH)
99 return -ENODEV;
100
101 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
102 if (qhandle == NULL)
103 return -ENOMEM;
104
105 qhandle->cpu_id = raw_smp_processor_id();
106 qhandle->qidx = qidx;
107 /*
108 * NVME qidx == 0 is the admin queue, so both admin queue
109 * and first IO queue will use MSI-X vector and associated
110 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
111 */
112 if (qidx) {
113 str = "IO "; /* IO queue */
114 qhandle->index = ((qidx - 1) %
115 lpfc_nvme_template.max_hw_queues);
116 } else {
117 str = "ADM"; /* Admin queue */
118 qhandle->index = qidx;
119 }
120
121 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
122 "6073 Binding %s HdwQueue %d (cpu %d) to "
123 "hdw_queue %d qhandle x%px\n", str,
124 qidx, qhandle->cpu_id, qhandle->index, qhandle);
125 *handle = (void *)qhandle;
126 return 0;
127}
128
129/**
130 * lpfc_nvme_delete_queue -
131 * @pnvme_lport: Transport localport that LS is to be issued from
132 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
133 * @handle: An opaque driver handle from lpfc_nvme_create_queue
134 *
135 * Driver registers this routine to free
136 * any internal data structures to bind the @qidx to its internal
137 * IO queues.
138 *
139 * Return value :
140 * 0 - Success
141 * TODO: What are the failure codes.
142 **/
143static void
144lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
145 unsigned int qidx,
146 void *handle)
147{
148 struct lpfc_nvme_lport *lport;
149 struct lpfc_vport *vport;
150
151 if (!pnvme_lport->private)
152 return;
153
154 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
155 vport = lport->vport;
156
157 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
158 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
159 lport, qidx, handle);
160 kfree(handle);
161}
162
163static void
164lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
165{
166 struct lpfc_nvme_lport *lport = localport->private;
167
168 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
169 "6173 localport x%px delete complete\n",
170 lport);
171
172 /* release any threads waiting for the unreg to complete */
173 if (lport->vport->localport)
174 complete(lport->lport_unreg_cmp);
175}
176
177/* lpfc_nvme_remoteport_delete
178 *
179 * @remoteport: Pointer to an nvme transport remoteport instance.
180 *
181 * This is a template downcall. NVME transport calls this function
182 * when it has completed the unregistration of a previously
183 * registered remoteport.
184 *
185 * Return value :
186 * None
187 */
188static void
189lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
190{
191 struct lpfc_nvme_rport *rport = remoteport->private;
192 struct lpfc_vport *vport;
193 struct lpfc_nodelist *ndlp;
194 u32 fc4_xpt_flags;
195
196 ndlp = rport->ndlp;
197 if (!ndlp) {
198 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
199 __func__, rport, remoteport);
200 goto rport_err;
201 }
202
203 vport = ndlp->vport;
204 if (!vport) {
205 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
206 __func__, ndlp, ndlp->nlp_state, rport);
207 goto rport_err;
208 }
209
210 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
211
212 /* Remove this rport from the lport's list - memory is owned by the
213 * transport. Remove the ndlp reference for the NVME transport before
214 * calling state machine to remove the node.
215 */
216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
217 "6146 remoteport delete of remoteport x%px, ndlp x%px "
218 "DID x%x xflags x%x\n",
219 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
220 spin_lock_irq(&ndlp->lock);
221
222 /* The register rebind might have occurred before the delete
223 * downcall. Guard against this race.
224 */
225 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
226 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
227
228 spin_unlock_irq(&ndlp->lock);
229
230 /* On a devloss timeout event, one more put is executed provided the
231 * NVME and SCSI rport unregister requests are complete. If the vport
232 * is unloading, this extra put is executed by lpfc_drop_node.
233 */
234 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
235 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
236
237 rport_err:
238 return;
239}
240
241/**
242 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
243 * @phba: pointer to lpfc hba data structure.
244 * @axchg: pointer to exchange context for the NVME LS request
245 *
246 * This routine is used for processing an asychronously received NVME LS
247 * request. Any remaining validation is done and the LS is then forwarded
248 * to the nvme-fc transport via nvme_fc_rcv_ls_req().
249 *
250 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
251 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
252 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
253 *
254 * Returns 0 if LS was handled and delivered to the transport
255 * Returns 1 if LS failed to be handled and should be dropped
256 */
257int
258lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
259 struct lpfc_async_xchg_ctx *axchg)
260{
261#if (IS_ENABLED(CONFIG_NVME_FC))
262 struct lpfc_vport *vport;
263 struct lpfc_nvme_rport *lpfc_rport;
264 struct nvme_fc_remote_port *remoteport;
265 struct lpfc_nvme_lport *lport;
266 uint32_t *payload = axchg->payload;
267 int rc;
268
269 vport = axchg->ndlp->vport;
270 lpfc_rport = axchg->ndlp->nrport;
271 if (!lpfc_rport)
272 return -EINVAL;
273
274 remoteport = lpfc_rport->remoteport;
275 if (!vport->localport ||
276 vport->phba->hba_flag & HBA_IOQ_FLUSH)
277 return -EINVAL;
278
279 lport = vport->localport->private;
280 if (!lport)
281 return -EINVAL;
282
283 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
284 axchg->size);
285
286 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
287 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
288 "%08x %08x %08x\n",
289 axchg->size, rc,
290 *payload, *(payload+1), *(payload+2),
291 *(payload+3), *(payload+4), *(payload+5));
292
293 if (!rc)
294 return 0;
295#endif
296 return 1;
297}
298
299/**
300 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
301 * LS request.
302 * @phba: Pointer to HBA context object
303 * @vport: The local port that issued the LS
304 * @cmdwqe: Pointer to driver command WQE object.
305 * @wcqe: Pointer to driver response CQE object.
306 *
307 * This function is the generic completion handler for NVME LS requests.
308 * The function updates any states and statistics, calls the transport
309 * ls_req done() routine, then tears down the command and buffers used
310 * for the LS request.
311 **/
312void
313__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
314 struct lpfc_iocbq *cmdwqe,
315 struct lpfc_wcqe_complete *wcqe)
316{
317 struct nvmefc_ls_req *pnvme_lsreq;
318 struct lpfc_dmabuf *buf_ptr;
319 struct lpfc_nodelist *ndlp;
320 int status;
321
322 pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
323 ndlp = cmdwqe->ndlp;
324 buf_ptr = cmdwqe->bpl_dmabuf;
325
326 status = bf_get(lpfc_wcqe_c_status, wcqe);
327
328 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
329 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
330 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
331 "ndlp:x%px\n",
332 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
333 cmdwqe->sli4_xritag, status,
334 (wcqe->parameter & 0xffff),
335 cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
336 ndlp);
337
338 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
339 cmdwqe->sli4_xritag, status, wcqe->parameter);
340
341 if (buf_ptr) {
342 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
343 kfree(buf_ptr);
344 cmdwqe->bpl_dmabuf = NULL;
345 }
346 if (pnvme_lsreq->done) {
347 if (status != CQE_STATUS_SUCCESS)
348 status = -ENXIO;
349 pnvme_lsreq->done(pnvme_lsreq, status);
350 } else {
351 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
352 "6046 NVMEx cmpl without done call back? "
353 "Data x%px DID %x Xri: %x status %x\n",
354 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
355 cmdwqe->sli4_xritag, status);
356 }
357 if (ndlp) {
358 lpfc_nlp_put(ndlp);
359 cmdwqe->ndlp = NULL;
360 }
361 lpfc_sli_release_iocbq(phba, cmdwqe);
362}
363
364static void
365lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
366 struct lpfc_iocbq *rspwqe)
367{
368 struct lpfc_vport *vport = cmdwqe->vport;
369 struct lpfc_nvme_lport *lport;
370 uint32_t status;
371 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
372
373 status = bf_get(lpfc_wcqe_c_status, wcqe);
374
375 if (vport->localport) {
376 lport = (struct lpfc_nvme_lport *)vport->localport->private;
377 if (lport) {
378 atomic_inc(&lport->fc4NvmeLsCmpls);
379 if (status) {
380 if (bf_get(lpfc_wcqe_c_xb, wcqe))
381 atomic_inc(&lport->cmpl_ls_xb);
382 atomic_inc(&lport->cmpl_ls_err);
383 }
384 }
385 }
386
387 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
388}
389
390static int
391lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
392 struct lpfc_dmabuf *inp,
393 struct nvmefc_ls_req *pnvme_lsreq,
394 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
395 struct lpfc_iocbq *),
396 struct lpfc_nodelist *ndlp, uint32_t num_entry,
397 uint32_t tmo, uint8_t retry)
398{
399 struct lpfc_hba *phba = vport->phba;
400 union lpfc_wqe128 *wqe;
401 struct lpfc_iocbq *genwqe;
402 struct ulp_bde64 *bpl;
403 struct ulp_bde64 bde;
404 int i, rc, xmit_len, first_len;
405
406 /* Allocate buffer for command WQE */
407 genwqe = lpfc_sli_get_iocbq(phba);
408 if (genwqe == NULL)
409 return 1;
410
411 wqe = &genwqe->wqe;
412 /* Initialize only 64 bytes */
413 memset(wqe, 0, sizeof(union lpfc_wqe));
414
415 genwqe->bpl_dmabuf = bmp;
416 genwqe->cmd_flag |= LPFC_IO_NVME_LS;
417
418 /* Save for completion so we can release these resources */
419 genwqe->ndlp = lpfc_nlp_get(ndlp);
420 if (!genwqe->ndlp) {
421 dev_warn(&phba->pcidev->dev,
422 "Warning: Failed node ref, not sending LS_REQ\n");
423 lpfc_sli_release_iocbq(phba, genwqe);
424 return 1;
425 }
426
427 genwqe->context_un.nvme_lsreq = pnvme_lsreq;
428 /* Fill in payload, bp points to frame payload */
429
430 if (!tmo)
431 /* FC spec states we need 3 * ratov for CT requests */
432 tmo = (3 * phba->fc_ratov);
433
434 /* For this command calculate the xmit length of the request bde. */
435 xmit_len = 0;
436 first_len = 0;
437 bpl = (struct ulp_bde64 *)bmp->virt;
438 for (i = 0; i < num_entry; i++) {
439 bde.tus.w = bpl[i].tus.w;
440 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
441 break;
442 xmit_len += bde.tus.f.bdeSize;
443 if (i == 0)
444 first_len = xmit_len;
445 }
446
447 genwqe->num_bdes = num_entry;
448 genwqe->hba_wqidx = 0;
449
450 /* Words 0 - 2 */
451 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
452 wqe->generic.bde.tus.f.bdeSize = first_len;
453 wqe->generic.bde.addrLow = bpl[0].addrLow;
454 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
455
456 /* Word 3 */
457 wqe->gen_req.request_payload_len = first_len;
458
459 /* Word 4 */
460
461 /* Word 5 */
462 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
463 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
464 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
465 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
466 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
467
468 /* Word 6 */
469 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
470 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
471 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
472
473 /* Word 7 */
474 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
475 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
476 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
477 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
478
479 /* Word 8 */
480 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
481
482 /* Word 9 */
483 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
484
485 /* Word 10 */
486 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
487 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
488 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
489 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
490 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
491
492 /* Word 11 */
493 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
494 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
495
496
497 /* Issue GEN REQ WQE for NPORT <did> */
498 genwqe->cmd_cmpl = cmpl;
499 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
500 genwqe->vport = vport;
501 genwqe->retry = retry;
502
503 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
504 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
505
506 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
507 if (rc) {
508 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
509 "6045 Issue GEN REQ WQE to NPORT x%x "
510 "Data: x%x x%x rc x%x\n",
511 ndlp->nlp_DID, genwqe->iotag,
512 vport->port_state, rc);
513 lpfc_nlp_put(ndlp);
514 lpfc_sli_release_iocbq(phba, genwqe);
515 return 1;
516 }
517
518 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
519 "6050 Issue GEN REQ WQE to NPORT x%x "
520 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
521 "bmp:x%px xmit:%d 1st:%d\n",
522 ndlp->nlp_DID, genwqe->sli4_xritag,
523 vport->port_state,
524 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
525 return 0;
526}
527
528
529/**
530 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
531 * @vport: The local port issuing the LS
532 * @ndlp: The remote port to send the LS to
533 * @pnvme_lsreq: Pointer to LS request structure from the transport
534 * @gen_req_cmp: Completion call-back
535 *
536 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
537 * WQE to perform the LS operation.
538 *
539 * Return value :
540 * 0 - Success
541 * non-zero: various error codes, in form of -Exxx
542 **/
543int
544__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
545 struct nvmefc_ls_req *pnvme_lsreq,
546 void (*gen_req_cmp)(struct lpfc_hba *phba,
547 struct lpfc_iocbq *cmdwqe,
548 struct lpfc_iocbq *rspwqe))
549{
550 struct lpfc_dmabuf *bmp;
551 struct ulp_bde64 *bpl;
552 int ret;
553 uint16_t ntype, nstate;
554
555 if (!ndlp) {
556 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
557 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
558 "LS Req\n",
559 ndlp);
560 return -ENODEV;
561 }
562
563 ntype = ndlp->nlp_type;
564 nstate = ndlp->nlp_state;
565 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
566 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
567 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
568 "6088 NVMEx LS REQ: Fail DID x%06x not "
569 "ready for IO. Type x%x, State x%x\n",
570 ndlp->nlp_DID, ntype, nstate);
571 return -ENODEV;
572 }
573 if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
574 return -ENODEV;
575
576 if (!vport->phba->sli4_hba.nvmels_wq)
577 return -ENOMEM;
578
579 /*
580 * there are two dma buf in the request, actually there is one and
581 * the second one is just the start address + cmd size.
582 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
583 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
584 * because the nvem layer owns the data bufs.
585 * We do not have to break these packets open, we don't care what is
586 * in them. And we do not have to look at the resonse data, we only
587 * care that we got a response. All of the caring is going to happen
588 * in the nvme-fc layer.
589 */
590
591 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
592 if (!bmp) {
593 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
594 "6044 NVMEx LS REQ: Could not alloc LS buf "
595 "for DID %x\n",
596 ndlp->nlp_DID);
597 return -ENOMEM;
598 }
599
600 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
601 if (!bmp->virt) {
602 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
603 "6042 NVMEx LS REQ: Could not alloc mbuf "
604 "for DID %x\n",
605 ndlp->nlp_DID);
606 kfree(bmp);
607 return -ENOMEM;
608 }
609
610 INIT_LIST_HEAD(&bmp->list);
611
612 bpl = (struct ulp_bde64 *)bmp->virt;
613 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
614 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
615 bpl->tus.f.bdeFlags = 0;
616 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
617 bpl->tus.w = le32_to_cpu(bpl->tus.w);
618 bpl++;
619
620 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
621 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
622 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
623 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
624 bpl->tus.w = le32_to_cpu(bpl->tus.w);
625
626 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
627 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
628 "rqstlen:%d rsplen:%d %pad %pad\n",
629 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
630 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
631 &pnvme_lsreq->rspdma);
632
633 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
634 pnvme_lsreq, gen_req_cmp, ndlp, 2,
635 pnvme_lsreq->timeout, 0);
636 if (ret != WQE_SUCCESS) {
637 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
638 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
639 "lsreq x%px Status %x DID %x\n",
640 pnvme_lsreq, ret, ndlp->nlp_DID);
641 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
642 kfree(bmp);
643 return -EIO;
644 }
645
646 return 0;
647}
648
649/**
650 * lpfc_nvme_ls_req - Issue an NVME Link Service request
651 * @pnvme_lport: Transport localport that LS is to be issued from.
652 * @pnvme_rport: Transport remoteport that LS is to be sent to.
653 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
654 *
655 * Driver registers this routine to handle any link service request
656 * from the nvme_fc transport to a remote nvme-aware port.
657 *
658 * Return value :
659 * 0 - Success
660 * non-zero: various error codes, in form of -Exxx
661 **/
662static int
663lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
664 struct nvme_fc_remote_port *pnvme_rport,
665 struct nvmefc_ls_req *pnvme_lsreq)
666{
667 struct lpfc_nvme_lport *lport;
668 struct lpfc_nvme_rport *rport;
669 struct lpfc_vport *vport;
670 int ret;
671
672 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
673 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
674 if (unlikely(!lport) || unlikely(!rport))
675 return -EINVAL;
676
677 vport = lport->vport;
678 if (vport->load_flag & FC_UNLOADING ||
679 vport->phba->hba_flag & HBA_IOQ_FLUSH)
680 return -ENODEV;
681
682 atomic_inc(&lport->fc4NvmeLsRequests);
683
684 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
685 lpfc_nvme_ls_req_cmp);
686 if (ret)
687 atomic_inc(&lport->xmt_ls_err);
688
689 return ret;
690}
691
692/**
693 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
694 * NVME LS request
695 * @vport: The local port that issued the LS
696 * @ndlp: The remote port the LS was sent to
697 * @pnvme_lsreq: Pointer to LS request structure from the transport
698 *
699 * The driver validates the ndlp, looks for the LS, and aborts the
700 * LS if found.
701 *
702 * Returns:
703 * 0 : if LS found and aborted
704 * non-zero: various error conditions in form -Exxx
705 **/
706int
707__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
708 struct nvmefc_ls_req *pnvme_lsreq)
709{
710 struct lpfc_hba *phba = vport->phba;
711 struct lpfc_sli_ring *pring;
712 struct lpfc_iocbq *wqe, *next_wqe;
713 bool foundit = false;
714
715 if (!ndlp) {
716 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
717 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
718 "x%06x, Failing LS Req\n",
719 ndlp, ndlp ? ndlp->nlp_DID : 0);
720 return -EINVAL;
721 }
722
723 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
724 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
725 "x%px rqstlen:%d rsplen:%d %pad %pad\n",
726 pnvme_lsreq, pnvme_lsreq->rqstlen,
727 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
728 &pnvme_lsreq->rspdma);
729
730 /*
731 * Lock the ELS ring txcmplq and look for the wqe that matches
732 * this ELS. If found, issue an abort on the wqe.
733 */
734 pring = phba->sli4_hba.nvmels_wq->pring;
735 spin_lock_irq(&phba->hbalock);
736 spin_lock(&pring->ring_lock);
737 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
738 if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
739 wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
740 foundit = true;
741 break;
742 }
743 }
744 spin_unlock(&pring->ring_lock);
745
746 if (foundit)
747 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
748 spin_unlock_irq(&phba->hbalock);
749
750 if (foundit)
751 return 0;
752
753 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
754 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
755 pnvme_lsreq);
756 return -EINVAL;
757}
758
759static int
760lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
761 struct nvme_fc_remote_port *remoteport,
762 struct nvmefc_ls_rsp *ls_rsp)
763{
764 struct lpfc_async_xchg_ctx *axchg =
765 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
766 struct lpfc_nvme_lport *lport;
767 int rc;
768
769 if (axchg->phba->pport->load_flag & FC_UNLOADING)
770 return -ENODEV;
771
772 lport = (struct lpfc_nvme_lport *)localport->private;
773
774 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
775
776 if (rc) {
777 /*
778 * unless the failure is due to having already sent
779 * the response, an abort will be generated for the
780 * exchange if the rsp can't be sent.
781 */
782 if (rc != -EALREADY)
783 atomic_inc(&lport->xmt_ls_abort);
784 return rc;
785 }
786
787 return 0;
788}
789
790/**
791 * lpfc_nvme_ls_abort - Abort a prior NVME LS request
792 * @pnvme_lport: Transport localport that LS is to be issued from.
793 * @pnvme_rport: Transport remoteport that LS is to be sent to.
794 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
795 *
796 * Driver registers this routine to abort a NVME LS request that is
797 * in progress (from the transports perspective).
798 **/
799static void
800lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
801 struct nvme_fc_remote_port *pnvme_rport,
802 struct nvmefc_ls_req *pnvme_lsreq)
803{
804 struct lpfc_nvme_lport *lport;
805 struct lpfc_vport *vport;
806 struct lpfc_nodelist *ndlp;
807 int ret;
808
809 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
810 if (unlikely(!lport))
811 return;
812 vport = lport->vport;
813
814 if (vport->load_flag & FC_UNLOADING)
815 return;
816
817 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
818
819 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
820 if (!ret)
821 atomic_inc(&lport->xmt_ls_abort);
822}
823
824/* Fix up the existing sgls for NVME IO. */
825static inline void
826lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
827 struct lpfc_io_buf *lpfc_ncmd,
828 struct nvmefc_fcp_req *nCmd)
829{
830 struct lpfc_hba *phba = vport->phba;
831 struct sli4_sge *sgl;
832 union lpfc_wqe128 *wqe;
833 uint32_t *wptr, *dptr;
834
835 /*
836 * Get a local pointer to the built-in wqe and correct
837 * the cmd size to match NVME's 96 bytes and fix
838 * the dma address.
839 */
840
841 wqe = &lpfc_ncmd->cur_iocbq.wqe;
842
843 /*
844 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
845 * match NVME. NVME sends 96 bytes. Also, use the
846 * nvme commands command and response dma addresses
847 * rather than the virtual memory to ease the restore
848 * operation.
849 */
850 sgl = lpfc_ncmd->dma_sgl;
851 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
852 if (phba->cfg_nvme_embed_cmd) {
853 sgl->addr_hi = 0;
854 sgl->addr_lo = 0;
855
856 /* Word 0-2 - NVME CMND IU (embedded payload) */
857 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
858 wqe->generic.bde.tus.f.bdeSize = 56;
859 wqe->generic.bde.addrHigh = 0;
860 wqe->generic.bde.addrLow = 64; /* Word 16 */
861
862 /* Word 10 - dbde is 0, wqes is 1 in template */
863
864 /*
865 * Embed the payload in the last half of the WQE
866 * WQE words 16-30 get the NVME CMD IU payload
867 *
868 * WQE words 16-19 get payload Words 1-4
869 * WQE words 20-21 get payload Words 6-7
870 * WQE words 22-29 get payload Words 16-23
871 */
872 wptr = &wqe->words[16]; /* WQE ptr */
873 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
874 dptr++; /* Skip Word 0 in payload */
875
876 *wptr++ = *dptr++; /* Word 1 */
877 *wptr++ = *dptr++; /* Word 2 */
878 *wptr++ = *dptr++; /* Word 3 */
879 *wptr++ = *dptr++; /* Word 4 */
880 dptr++; /* Skip Word 5 in payload */
881 *wptr++ = *dptr++; /* Word 6 */
882 *wptr++ = *dptr++; /* Word 7 */
883 dptr += 8; /* Skip Words 8-15 in payload */
884 *wptr++ = *dptr++; /* Word 16 */
885 *wptr++ = *dptr++; /* Word 17 */
886 *wptr++ = *dptr++; /* Word 18 */
887 *wptr++ = *dptr++; /* Word 19 */
888 *wptr++ = *dptr++; /* Word 20 */
889 *wptr++ = *dptr++; /* Word 21 */
890 *wptr++ = *dptr++; /* Word 22 */
891 *wptr = *dptr; /* Word 23 */
892 } else {
893 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
894 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
895
896 /* Word 0-2 - NVME CMND IU Inline BDE */
897 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
898 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
899 wqe->generic.bde.addrHigh = sgl->addr_hi;
900 wqe->generic.bde.addrLow = sgl->addr_lo;
901
902 /* Word 10 */
903 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
904 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
905 }
906
907 sgl++;
908
909 /* Setup the physical region for the FCP RSP */
910 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
911 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
912 sgl->word2 = le32_to_cpu(sgl->word2);
913 if (nCmd->sg_cnt)
914 bf_set(lpfc_sli4_sge_last, sgl, 0);
915 else
916 bf_set(lpfc_sli4_sge_last, sgl, 1);
917 sgl->word2 = cpu_to_le32(sgl->word2);
918 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
919}
920
921
922/*
923 * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
924 *
925 * Driver registers this routine as it io request handler. This
926 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
927 * data structure to the rport indicated in @lpfc_nvme_rport.
928 *
929 * Return value :
930 * 0 - Success
931 * TODO: What are the failure codes.
932 **/
933static void
934lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
935 struct lpfc_iocbq *pwqeOut)
936{
937 struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
938 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
939 struct lpfc_vport *vport = pwqeIn->vport;
940 struct nvmefc_fcp_req *nCmd;
941 struct nvme_fc_ersp_iu *ep;
942 struct nvme_fc_cmd_iu *cp;
943 struct lpfc_nodelist *ndlp;
944 struct lpfc_nvme_fcpreq_priv *freqpriv;
945 struct lpfc_nvme_lport *lport;
946 uint32_t code, status, idx;
947 uint16_t cid, sqhd, data;
948 uint32_t *ptr;
949 uint32_t lat;
950 bool call_done = false;
951#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
952 int cpu;
953#endif
954 int offline = 0;
955
956 /* Sanity check on return of outstanding command */
957 if (!lpfc_ncmd) {
958 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
959 "6071 Null lpfc_ncmd pointer. No "
960 "release, skip completion\n");
961 return;
962 }
963
964 /* Guard against abort handler being called at same time */
965 spin_lock(&lpfc_ncmd->buf_lock);
966
967 if (!lpfc_ncmd->nvmeCmd) {
968 spin_unlock(&lpfc_ncmd->buf_lock);
969 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
970 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
971 "nvmeCmd x%px\n",
972 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
973
974 /* Release the lpfc_ncmd regardless of the missing elements. */
975 lpfc_release_nvme_buf(phba, lpfc_ncmd);
976 return;
977 }
978 nCmd = lpfc_ncmd->nvmeCmd;
979 status = bf_get(lpfc_wcqe_c_status, wcqe);
980
981 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
982 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
983
984 if (unlikely(status && vport->localport)) {
985 lport = (struct lpfc_nvme_lport *)vport->localport->private;
986 if (lport) {
987 if (bf_get(lpfc_wcqe_c_xb, wcqe))
988 atomic_inc(&lport->cmpl_fcp_xb);
989 atomic_inc(&lport->cmpl_fcp_err);
990 }
991 }
992
993 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
994 lpfc_ncmd->cur_iocbq.sli4_xritag,
995 status, wcqe->parameter);
996 /*
997 * Catch race where our node has transitioned, but the
998 * transport is still transitioning.
999 */
1000 ndlp = lpfc_ncmd->ndlp;
1001 if (!ndlp) {
1002 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1003 "6062 Ignoring NVME cmpl. No ndlp\n");
1004 goto out_err;
1005 }
1006
1007 code = bf_get(lpfc_wcqe_c_code, wcqe);
1008 if (code == CQE_CODE_NVME_ERSP) {
1009 /* For this type of CQE, we need to rebuild the rsp */
1010 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1011
1012 /*
1013 * Get Command Id from cmd to plug into response. This
1014 * code is not needed in the next NVME Transport drop.
1015 */
1016 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1017 cid = cp->sqe.common.command_id;
1018
1019 /*
1020 * RSN is in CQE word 2
1021 * SQHD is in CQE Word 3 bits 15:0
1022 * Cmd Specific info is in CQE Word 1
1023 * and in CQE Word 0 bits 15:0
1024 */
1025 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1026
1027 /* Now lets build the NVME ERSP IU */
1028 ep->iu_len = cpu_to_be16(8);
1029 ep->rsn = wcqe->parameter;
1030 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1031 ep->rsvd12 = 0;
1032 ptr = (uint32_t *)&ep->cqe.result.u64;
1033 *ptr++ = wcqe->total_data_placed;
1034 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1035 *ptr = (uint32_t)data;
1036 ep->cqe.sq_head = sqhd;
1037 ep->cqe.sq_id = nCmd->sqid;
1038 ep->cqe.command_id = cid;
1039 ep->cqe.status = 0;
1040
1041 lpfc_ncmd->status = IOSTAT_SUCCESS;
1042 lpfc_ncmd->result = 0;
1043 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1044 nCmd->transferred_length = nCmd->payload_length;
1045 } else {
1046 lpfc_ncmd->status = status;
1047 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1048
1049 /* For NVME, the only failure path that results in an
1050 * IO error is when the adapter rejects it. All other
1051 * conditions are a success case and resolved by the
1052 * transport.
1053 * IOSTAT_FCP_RSP_ERROR means:
1054 * 1. Length of data received doesn't match total
1055 * transfer length in WQE
1056 * 2. If the RSP payload does NOT match these cases:
1057 * a. RSP length 12/24 bytes and all zeros
1058 * b. NVME ERSP
1059 */
1060 switch (lpfc_ncmd->status) {
1061 case IOSTAT_SUCCESS:
1062 nCmd->transferred_length = wcqe->total_data_placed;
1063 nCmd->rcv_rsplen = 0;
1064 nCmd->status = 0;
1065 break;
1066 case IOSTAT_FCP_RSP_ERROR:
1067 nCmd->transferred_length = wcqe->total_data_placed;
1068 nCmd->rcv_rsplen = wcqe->parameter;
1069 nCmd->status = 0;
1070
1071 /* Get the NVME cmd details for this unique error. */
1072 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1073 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1074
1075 /* Check if this is really an ERSP */
1076 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
1077 lpfc_ncmd->status = IOSTAT_SUCCESS;
1078 lpfc_ncmd->result = 0;
1079
1080 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1081 "6084 NVME FCP_ERR ERSP: "
1082 "xri %x placed x%x opcode x%x cmd_id "
1083 "x%x cqe_status x%x\n",
1084 lpfc_ncmd->cur_iocbq.sli4_xritag,
1085 wcqe->total_data_placed,
1086 cp->sqe.common.opcode,
1087 cp->sqe.common.command_id,
1088 ep->cqe.status);
1089 break;
1090 }
1091 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1092 "6081 NVME Completion Protocol Error: "
1093 "xri %x status x%x result x%x "
1094 "placed x%x opcode x%x cmd_id x%x, "
1095 "cqe_status x%x\n",
1096 lpfc_ncmd->cur_iocbq.sli4_xritag,
1097 lpfc_ncmd->status, lpfc_ncmd->result,
1098 wcqe->total_data_placed,
1099 cp->sqe.common.opcode,
1100 cp->sqe.common.command_id,
1101 ep->cqe.status);
1102 break;
1103 case IOSTAT_LOCAL_REJECT:
1104 /* Let fall through to set command final state. */
1105 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1106 lpfc_printf_vlog(vport, KERN_INFO,
1107 LOG_NVME_IOERR,
1108 "6032 Delay Aborted cmd x%px "
1109 "nvme cmd x%px, xri x%x, "
1110 "xb %d\n",
1111 lpfc_ncmd, nCmd,
1112 lpfc_ncmd->cur_iocbq.sli4_xritag,
1113 bf_get(lpfc_wcqe_c_xb, wcqe));
1114 fallthrough;
1115 default:
1116out_err:
1117 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1118 "6072 NVME Completion Error: xri %x "
1119 "status x%x result x%x [x%x] "
1120 "placed x%x\n",
1121 lpfc_ncmd->cur_iocbq.sli4_xritag,
1122 lpfc_ncmd->status, lpfc_ncmd->result,
1123 wcqe->parameter,
1124 wcqe->total_data_placed);
1125 nCmd->transferred_length = 0;
1126 nCmd->rcv_rsplen = 0;
1127 nCmd->status = NVME_SC_INTERNAL;
1128 offline = pci_channel_offline(vport->phba->pcidev);
1129 }
1130 }
1131
1132 /* pick up SLI4 exhange busy condition */
1133 if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
1134 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1135 else
1136 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1137
1138 /* Update stats and complete the IO. There is
1139 * no need for dma unprep because the nvme_transport
1140 * owns the dma address.
1141 */
1142#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1143 if (lpfc_ncmd->ts_cmd_start) {
1144 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1145 lpfc_ncmd->ts_data_io = ktime_get_ns();
1146 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1147 lpfc_io_ktime(phba, lpfc_ncmd);
1148 }
1149 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1150 cpu = raw_smp_processor_id();
1151 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1152 if (lpfc_ncmd->cpu != cpu)
1153 lpfc_printf_vlog(vport,
1154 KERN_INFO, LOG_NVME_IOERR,
1155 "6701 CPU Check cmpl: "
1156 "cpu %d expect %d\n",
1157 cpu, lpfc_ncmd->cpu);
1158 }
1159#endif
1160
1161 /* NVME targets need completion held off until the abort exchange
1162 * completes unless the NVME Rport is getting unregistered.
1163 */
1164
1165 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1166 freqpriv = nCmd->private;
1167 freqpriv->nvme_buf = NULL;
1168 lpfc_ncmd->nvmeCmd = NULL;
1169 call_done = true;
1170 }
1171 spin_unlock(&lpfc_ncmd->buf_lock);
1172
1173 /* Check if IO qualified for CMF */
1174 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1175 nCmd->io_dir == NVMEFC_FCP_READ &&
1176 nCmd->payload_length) {
1177 /* Used when calculating average latency */
1178 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
1179 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
1180 }
1181
1182 if (call_done)
1183 nCmd->done(nCmd);
1184
1185 /* Call release with XB=1 to queue the IO into the abort list. */
1186 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1187}
1188
1189
1190/**
1191 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1192 * @vport: pointer to a host virtual N_Port data structure
1193 * @lpfc_ncmd: Pointer to lpfc scsi command
1194 * @pnode: pointer to a node-list data structure
1195 * @cstat: pointer to the control status structure
1196 *
1197 * Driver registers this routine as it io request handler. This
1198 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1199 * data structure to the rport indicated in @lpfc_nvme_rport.
1200 *
1201 * Return value :
1202 * 0 - Success
1203 * TODO: What are the failure codes.
1204 **/
1205static int
1206lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1207 struct lpfc_io_buf *lpfc_ncmd,
1208 struct lpfc_nodelist *pnode,
1209 struct lpfc_fc4_ctrl_stat *cstat)
1210{
1211 struct lpfc_hba *phba = vport->phba;
1212 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1213 struct nvme_common_command *sqe;
1214 struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
1215 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1216 uint32_t req_len;
1217
1218 /*
1219 * There are three possibilities here - use scatter-gather segment, use
1220 * the single mapping, or neither.
1221 */
1222 if (nCmd->sg_cnt) {
1223 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1224 /* From the iwrite template, initialize words 7 - 11 */
1225 memcpy(&wqe->words[7],
1226 &lpfc_iwrite_cmd_template.words[7],
1227 sizeof(uint32_t) * 5);
1228
1229 /* Word 4 */
1230 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1231
1232 /* Word 5 */
1233 if ((phba->cfg_nvme_enable_fb) &&
1234 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1235 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1236 if (req_len < pnode->nvme_fb_size)
1237 wqe->fcp_iwrite.initial_xfer_len =
1238 req_len;
1239 else
1240 wqe->fcp_iwrite.initial_xfer_len =
1241 pnode->nvme_fb_size;
1242 } else {
1243 wqe->fcp_iwrite.initial_xfer_len = 0;
1244 }
1245 cstat->output_requests++;
1246 } else {
1247 /* From the iread template, initialize words 7 - 11 */
1248 memcpy(&wqe->words[7],
1249 &lpfc_iread_cmd_template.words[7],
1250 sizeof(uint32_t) * 5);
1251
1252 /* Word 4 */
1253 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1254
1255 /* Word 5 */
1256 wqe->fcp_iread.rsrvd5 = 0;
1257
1258 /* For a CMF Managed port, iod must be zero'ed */
1259 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
1260 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1261 LPFC_WQE_IOD_NONE);
1262 cstat->input_requests++;
1263 }
1264 } else {
1265 /* From the icmnd template, initialize words 4 - 11 */
1266 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1267 sizeof(uint32_t) * 8);
1268 cstat->control_requests++;
1269 }
1270
1271 if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
1272 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1273 sqe = &((struct nvme_fc_cmd_iu *)
1274 nCmd->cmdaddr)->sqe.common;
1275 if (sqe->opcode == nvme_admin_async_event)
1276 bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
1277 }
1278
1279 /*
1280 * Finish initializing those WQE fields that are independent
1281 * of the nvme_cmnd request_buffer
1282 */
1283
1284 /* Word 3 */
1285 bf_set(payload_offset_len, &wqe->fcp_icmd,
1286 (nCmd->rsplen + nCmd->cmdlen));
1287
1288 /* Word 6 */
1289 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1290 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1291 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1292
1293 /* Word 8 */
1294 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1295
1296 /* Word 9 */
1297 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1298
1299 /* Word 10 */
1300 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1301
1302 /* Words 13 14 15 are for PBDE support */
1303
1304 /* add the VMID tags as per switch response */
1305 if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
1306 if (phba->pport->vmid_priority_tagging) {
1307 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
1308 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
1309 lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
1310 } else {
1311 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
1312 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
1313 wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
1314 }
1315 }
1316
1317 pwqeq->vport = vport;
1318 return 0;
1319}
1320
1321
1322/**
1323 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1324 * @vport: pointer to a host virtual N_Port data structure
1325 * @lpfc_ncmd: Pointer to lpfc scsi command
1326 *
1327 * Driver registers this routine as it io request handler. This
1328 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1329 * data structure to the rport indicated in @lpfc_nvme_rport.
1330 *
1331 * Return value :
1332 * 0 - Success
1333 * TODO: What are the failure codes.
1334 **/
1335static int
1336lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1337 struct lpfc_io_buf *lpfc_ncmd)
1338{
1339 struct lpfc_hba *phba = vport->phba;
1340 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1341 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1342 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1343 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1344 struct scatterlist *data_sg;
1345 struct sli4_sge *first_data_sgl;
1346 struct ulp_bde64 *bde;
1347 dma_addr_t physaddr = 0;
1348 uint32_t dma_len = 0;
1349 uint32_t dma_offset = 0;
1350 int nseg, i, j;
1351 bool lsp_just_set = false;
1352
1353 /* Fix up the command and response DMA stuff. */
1354 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1355
1356 /*
1357 * There are three possibilities here - use scatter-gather segment, use
1358 * the single mapping, or neither.
1359 */
1360 if (nCmd->sg_cnt) {
1361 /*
1362 * Jump over the cmd and rsp SGEs. The fix routine
1363 * has already adjusted for this.
1364 */
1365 sgl += 2;
1366
1367 first_data_sgl = sgl;
1368 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1369 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1371 "6058 Too many sg segments from "
1372 "NVME Transport. Max %d, "
1373 "nvmeIO sg_cnt %d\n",
1374 phba->cfg_nvme_seg_cnt + 1,
1375 lpfc_ncmd->seg_cnt);
1376 lpfc_ncmd->seg_cnt = 0;
1377 return 1;
1378 }
1379
1380 /*
1381 * The driver established a maximum scatter-gather segment count
1382 * during probe that limits the number of sg elements in any
1383 * single nvme command. Just run through the seg_cnt and format
1384 * the sge's.
1385 */
1386 nseg = nCmd->sg_cnt;
1387 data_sg = nCmd->first_sgl;
1388
1389 /* for tracking the segment boundaries */
1390 j = 2;
1391 for (i = 0; i < nseg; i++) {
1392 if (data_sg == NULL) {
1393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1394 "6059 dptr err %d, nseg %d\n",
1395 i, nseg);
1396 lpfc_ncmd->seg_cnt = 0;
1397 return 1;
1398 }
1399
1400 sgl->word2 = 0;
1401 if (nseg == 1) {
1402 bf_set(lpfc_sli4_sge_last, sgl, 1);
1403 bf_set(lpfc_sli4_sge_type, sgl,
1404 LPFC_SGE_TYPE_DATA);
1405 } else {
1406 bf_set(lpfc_sli4_sge_last, sgl, 0);
1407
1408 /* expand the segment */
1409 if (!lsp_just_set &&
1410 !((j + 1) % phba->border_sge_num) &&
1411 ((nseg - 1) != i)) {
1412 /* set LSP type */
1413 bf_set(lpfc_sli4_sge_type, sgl,
1414 LPFC_SGE_TYPE_LSP);
1415
1416 sgl_xtra = lpfc_get_sgl_per_hdwq(
1417 phba, lpfc_ncmd);
1418
1419 if (unlikely(!sgl_xtra)) {
1420 lpfc_ncmd->seg_cnt = 0;
1421 return 1;
1422 }
1423 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1424 sgl_xtra->dma_phys_sgl));
1425 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1426 sgl_xtra->dma_phys_sgl));
1427
1428 } else {
1429 bf_set(lpfc_sli4_sge_type, sgl,
1430 LPFC_SGE_TYPE_DATA);
1431 }
1432 }
1433
1434 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1435 LPFC_SGE_TYPE_LSP)) {
1436 if ((nseg - 1) == i)
1437 bf_set(lpfc_sli4_sge_last, sgl, 1);
1438
1439 physaddr = sg_dma_address(data_sg);
1440 dma_len = sg_dma_len(data_sg);
1441 sgl->addr_lo = cpu_to_le32(
1442 putPaddrLow(physaddr));
1443 sgl->addr_hi = cpu_to_le32(
1444 putPaddrHigh(physaddr));
1445
1446 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1447 sgl->word2 = cpu_to_le32(sgl->word2);
1448 sgl->sge_len = cpu_to_le32(dma_len);
1449
1450 dma_offset += dma_len;
1451 data_sg = sg_next(data_sg);
1452
1453 sgl++;
1454
1455 lsp_just_set = false;
1456 } else {
1457 sgl->word2 = cpu_to_le32(sgl->word2);
1458
1459 sgl->sge_len = cpu_to_le32(
1460 phba->cfg_sg_dma_buf_size);
1461
1462 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1463 i = i - 1;
1464
1465 lsp_just_set = true;
1466 }
1467
1468 j++;
1469 }
1470
1471 /* PBDE support for first data SGE only */
1472 if (nseg == 1 && phba->cfg_enable_pbde) {
1473 /* Words 13-15 */
1474 bde = (struct ulp_bde64 *)
1475 &wqe->words[13];
1476 bde->addrLow = first_data_sgl->addr_lo;
1477 bde->addrHigh = first_data_sgl->addr_hi;
1478 bde->tus.f.bdeSize =
1479 le32_to_cpu(first_data_sgl->sge_len);
1480 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1481 bde->tus.w = cpu_to_le32(bde->tus.w);
1482
1483 /* Word 11 - set PBDE bit */
1484 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1485 } else {
1486 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1487 /* Word 11 - PBDE bit disabled by default template */
1488 }
1489
1490 } else {
1491 lpfc_ncmd->seg_cnt = 0;
1492
1493 /* For this clause to be valid, the payload_length
1494 * and sg_cnt must zero.
1495 */
1496 if (nCmd->payload_length != 0) {
1497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1498 "6063 NVME DMA Prep Err: sg_cnt %d "
1499 "payload_length x%x\n",
1500 nCmd->sg_cnt, nCmd->payload_length);
1501 return 1;
1502 }
1503 }
1504 return 0;
1505}
1506
1507/**
1508 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1509 * @pnvme_lport: Pointer to the driver's local port data
1510 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1511 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1512 * @pnvme_fcreq: IO request from nvme fc to driver.
1513 *
1514 * Driver registers this routine as it io request handler. This
1515 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1516 * data structure to the rport indicated in @lpfc_nvme_rport.
1517 *
1518 * Return value :
1519 * 0 - Success
1520 * TODO: What are the failure codes.
1521 **/
1522static int
1523lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1524 struct nvme_fc_remote_port *pnvme_rport,
1525 void *hw_queue_handle,
1526 struct nvmefc_fcp_req *pnvme_fcreq)
1527{
1528 int ret = 0;
1529 int expedite = 0;
1530 int idx, cpu;
1531 struct lpfc_nvme_lport *lport;
1532 struct lpfc_fc4_ctrl_stat *cstat;
1533 struct lpfc_vport *vport;
1534 struct lpfc_hba *phba;
1535 struct lpfc_nodelist *ndlp;
1536 struct lpfc_io_buf *lpfc_ncmd;
1537 struct lpfc_nvme_rport *rport;
1538 struct lpfc_nvme_qhandle *lpfc_queue_info;
1539 struct lpfc_nvme_fcpreq_priv *freqpriv;
1540 struct nvme_common_command *sqe;
1541 uint64_t start = 0;
1542#if (IS_ENABLED(CONFIG_NVME_FC))
1543 u8 *uuid = NULL;
1544 int err;
1545 enum dma_data_direction iodir;
1546#endif
1547
1548 /* Validate pointers. LLDD fault handling with transport does
1549 * have timing races.
1550 */
1551 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1552 if (unlikely(!lport)) {
1553 ret = -EINVAL;
1554 goto out_fail;
1555 }
1556
1557 vport = lport->vport;
1558
1559 if (unlikely(!hw_queue_handle)) {
1560 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1561 "6117 Fail IO, NULL hw_queue_handle\n");
1562 atomic_inc(&lport->xmt_fcp_err);
1563 ret = -EBUSY;
1564 goto out_fail;
1565 }
1566
1567 phba = vport->phba;
1568
1569 if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
1570 phba->hba_flag & HBA_IOQ_FLUSH) {
1571 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1572 "6124 Fail IO, Driver unload\n");
1573 atomic_inc(&lport->xmt_fcp_err);
1574 ret = -ENODEV;
1575 goto out_fail;
1576 }
1577
1578 freqpriv = pnvme_fcreq->private;
1579 if (unlikely(!freqpriv)) {
1580 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1581 "6158 Fail IO, NULL request data\n");
1582 atomic_inc(&lport->xmt_fcp_err);
1583 ret = -EINVAL;
1584 goto out_fail;
1585 }
1586
1587#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1588 if (phba->ktime_on)
1589 start = ktime_get_ns();
1590#endif
1591 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1592 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1593
1594 /*
1595 * Catch race where our node has transitioned, but the
1596 * transport is still transitioning.
1597 */
1598 ndlp = rport->ndlp;
1599 if (!ndlp) {
1600 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1601 "6053 Busy IO, ndlp not ready: rport x%px "
1602 "ndlp x%px, DID x%06x\n",
1603 rport, ndlp, pnvme_rport->port_id);
1604 atomic_inc(&lport->xmt_fcp_err);
1605 ret = -EBUSY;
1606 goto out_fail;
1607 }
1608
1609 /* The remote node has to be a mapped target or it's an error. */
1610 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1611 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1612 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1613 "6036 Fail IO, DID x%06x not ready for "
1614 "IO. State x%x, Type x%x Flg x%x\n",
1615 pnvme_rport->port_id,
1616 ndlp->nlp_state, ndlp->nlp_type,
1617 ndlp->fc4_xpt_flags);
1618 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1619 ret = -EBUSY;
1620 goto out_fail;
1621
1622 }
1623
1624 /* Currently only NVME Keep alive commands should be expedited
1625 * if the driver runs out of a resource. These should only be
1626 * issued on the admin queue, qidx 0
1627 */
1628 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1629 sqe = &((struct nvme_fc_cmd_iu *)
1630 pnvme_fcreq->cmdaddr)->sqe.common;
1631 if (sqe->opcode == nvme_admin_keep_alive)
1632 expedite = 1;
1633 }
1634
1635 /* Check if IO qualifies for CMF */
1636 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1637 pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
1638 pnvme_fcreq->payload_length) {
1639 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
1640 if (ret) {
1641 ret = -EBUSY;
1642 goto out_fail;
1643 }
1644 /* Get start time for IO latency */
1645 start = ktime_get_ns();
1646 }
1647
1648 /* The node is shared with FCP IO, make sure the IO pending count does
1649 * not exceed the programmed depth.
1650 */
1651 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1652 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1653 !expedite) {
1654 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1655 "6174 Fail IO, ndlp qdepth exceeded: "
1656 "idx %d DID %x pend %d qdepth %d\n",
1657 lpfc_queue_info->index, ndlp->nlp_DID,
1658 atomic_read(&ndlp->cmd_pending),
1659 ndlp->cmd_qdepth);
1660 atomic_inc(&lport->xmt_fcp_qdepth);
1661 ret = -EBUSY;
1662 goto out_fail1;
1663 }
1664 }
1665
1666 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1667 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1668 idx = lpfc_queue_info->index;
1669 } else {
1670 cpu = raw_smp_processor_id();
1671 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1672 }
1673
1674 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1675 if (lpfc_ncmd == NULL) {
1676 atomic_inc(&lport->xmt_fcp_noxri);
1677 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1678 "6065 Fail IO, driver buffer pool is empty: "
1679 "idx %d DID %x\n",
1680 lpfc_queue_info->index, ndlp->nlp_DID);
1681 ret = -EBUSY;
1682 goto out_fail1;
1683 }
1684#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1685 if (start) {
1686 lpfc_ncmd->ts_cmd_start = start;
1687 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1688 } else {
1689 lpfc_ncmd->ts_cmd_start = 0;
1690 }
1691#endif
1692 lpfc_ncmd->rx_cmd_start = start;
1693
1694 /*
1695 * Store the data needed by the driver to issue, abort, and complete
1696 * an IO.
1697 * Do not let the IO hang out forever. There is no midlayer issuing
1698 * an abort so inform the FW of the maximum IO pending time.
1699 */
1700 freqpriv->nvme_buf = lpfc_ncmd;
1701 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1702 lpfc_ncmd->ndlp = ndlp;
1703 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1704
1705#if (IS_ENABLED(CONFIG_NVME_FC))
1706 /* check the necessary and sufficient condition to support VMID */
1707 if (lpfc_is_vmid_enabled(phba) &&
1708 (ndlp->vmid_support ||
1709 phba->pport->vmid_priority_tagging ==
1710 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
1711 /* is the I/O generated by a VM, get the associated virtual */
1712 /* entity id */
1713 uuid = nvme_fc_io_getuuid(pnvme_fcreq);
1714
1715 if (uuid) {
1716 if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
1717 iodir = DMA_TO_DEVICE;
1718 else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
1719 iodir = DMA_FROM_DEVICE;
1720 else
1721 iodir = DMA_NONE;
1722
1723 err = lpfc_vmid_get_appid(vport, uuid, iodir,
1724 (union lpfc_vmid_io_tag *)
1725 &lpfc_ncmd->cur_iocbq.vmid_tag);
1726 if (!err)
1727 lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
1728 }
1729 }
1730#endif
1731
1732 /*
1733 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1734 * This identfier was create in our hardware queue create callback
1735 * routine. The driver now is dependent on the IO queue steering from
1736 * the transport. We are trusting the upper NVME layers know which
1737 * index to use and that they have affinitized a CPU to this hardware
1738 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1739 */
1740 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1741 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1742
1743 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1744 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1745 if (ret) {
1746 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1747 "6175 Fail IO, Prep DMA: "
1748 "idx %d DID %x\n",
1749 lpfc_queue_info->index, ndlp->nlp_DID);
1750 atomic_inc(&lport->xmt_fcp_err);
1751 ret = -ENOMEM;
1752 goto out_free_nvme_buf;
1753 }
1754
1755 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1756 lpfc_ncmd->cur_iocbq.sli4_xritag,
1757 lpfc_queue_info->index, ndlp->nlp_DID);
1758
1759 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1760 if (ret) {
1761 atomic_inc(&lport->xmt_fcp_wqerr);
1762 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1763 "6113 Fail IO, Could not issue WQE err %x "
1764 "sid: x%x did: x%x oxid: x%x\n",
1765 ret, vport->fc_myDID, ndlp->nlp_DID,
1766 lpfc_ncmd->cur_iocbq.sli4_xritag);
1767 goto out_free_nvme_buf;
1768 }
1769
1770 if (phba->cfg_xri_rebalancing)
1771 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1772
1773#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1774 if (lpfc_ncmd->ts_cmd_start)
1775 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1776
1777 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1778 cpu = raw_smp_processor_id();
1779 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1780 lpfc_ncmd->cpu = cpu;
1781 if (idx != cpu)
1782 lpfc_printf_vlog(vport,
1783 KERN_INFO, LOG_NVME_IOERR,
1784 "6702 CPU Check cmd: "
1785 "cpu %d wq %d\n",
1786 lpfc_ncmd->cpu,
1787 lpfc_queue_info->index);
1788 }
1789#endif
1790 return 0;
1791
1792 out_free_nvme_buf:
1793 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1794 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1795 cstat->output_requests--;
1796 else
1797 cstat->input_requests--;
1798 } else
1799 cstat->control_requests--;
1800 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1801 out_fail1:
1802 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
1803 pnvme_fcreq->payload_length, NULL);
1804 out_fail:
1805 return ret;
1806}
1807
1808/**
1809 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1810 * @phba: Pointer to HBA context object
1811 * @cmdiocb: Pointer to command iocb object.
1812 * @rspiocb: Pointer to response iocb object.
1813 *
1814 * This is the callback function for any NVME FCP IO that was aborted.
1815 *
1816 * Return value:
1817 * None
1818 **/
1819void
1820lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1821 struct lpfc_iocbq *rspiocb)
1822{
1823 struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
1824
1825 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1826 "6145 ABORT_XRI_CN completing on rpi x%x "
1827 "original iotag x%x, abort cmd iotag x%x "
1828 "req_tag x%x, status x%x, hwstatus x%x\n",
1829 bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
1830 get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
1831 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1832 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1833 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1834 lpfc_sli_release_iocbq(phba, cmdiocb);
1835}
1836
1837/**
1838 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1839 * @pnvme_lport: Pointer to the driver's local port data
1840 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1841 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1842 * @pnvme_fcreq: IO request from nvme fc to driver.
1843 *
1844 * Driver registers this routine as its nvme request io abort handler. This
1845 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1846 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1847 * is executed asynchronously - one the target is validated as "MAPPED" and
1848 * ready for IO, the driver issues the abort request and returns.
1849 *
1850 * Return value:
1851 * None
1852 **/
1853static void
1854lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1855 struct nvme_fc_remote_port *pnvme_rport,
1856 void *hw_queue_handle,
1857 struct nvmefc_fcp_req *pnvme_fcreq)
1858{
1859 struct lpfc_nvme_lport *lport;
1860 struct lpfc_vport *vport;
1861 struct lpfc_hba *phba;
1862 struct lpfc_io_buf *lpfc_nbuf;
1863 struct lpfc_iocbq *nvmereq_wqe;
1864 struct lpfc_nvme_fcpreq_priv *freqpriv;
1865 unsigned long flags;
1866 int ret_val;
1867 struct nvme_fc_cmd_iu *cp;
1868
1869 /* Validate pointers. LLDD fault handling with transport does
1870 * have timing races.
1871 */
1872 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1873 if (unlikely(!lport))
1874 return;
1875
1876 vport = lport->vport;
1877
1878 if (unlikely(!hw_queue_handle)) {
1879 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1880 "6129 Fail Abort, HW Queue Handle NULL.\n");
1881 return;
1882 }
1883
1884 phba = vport->phba;
1885 freqpriv = pnvme_fcreq->private;
1886
1887 if (unlikely(!freqpriv))
1888 return;
1889 if (vport->load_flag & FC_UNLOADING)
1890 return;
1891
1892 /* Announce entry to new IO submit field. */
1893 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1894 "6002 Abort Request to rport DID x%06x "
1895 "for nvme_fc_req x%px\n",
1896 pnvme_rport->port_id,
1897 pnvme_fcreq);
1898
1899 lpfc_nbuf = freqpriv->nvme_buf;
1900 if (!lpfc_nbuf) {
1901 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1902 "6140 NVME IO req has no matching lpfc nvme "
1903 "io buffer. Skipping abort req.\n");
1904 return;
1905 } else if (!lpfc_nbuf->nvmeCmd) {
1906 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1907 "6141 lpfc NVME IO req has no nvme_fcreq "
1908 "io buffer. Skipping abort req.\n");
1909 return;
1910 }
1911
1912 /* Guard against IO completion being called at same time */
1913 spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
1914
1915 /* If the hba is getting reset, this flag is set. It is
1916 * cleared when the reset is complete and rings reestablished.
1917 */
1918 spin_lock(&phba->hbalock);
1919 /* driver queued commands are in process of being flushed */
1920 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1921 spin_unlock(&phba->hbalock);
1922 spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
1923 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1924 "6139 Driver in reset cleanup - flushing "
1925 "NVME Req now. hba_flag x%x\n",
1926 phba->hba_flag);
1927 return;
1928 }
1929
1930 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1931
1932 /*
1933 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1934 * state must match the nvme_fcreq passed by the nvme
1935 * transport. If they don't match, it is likely the driver
1936 * has already completed the NVME IO and the nvme transport
1937 * has not seen it yet.
1938 */
1939 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1940 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1941 "6143 NVME req mismatch: "
1942 "lpfc_nbuf x%px nvmeCmd x%px, "
1943 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1944 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1945 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1946 goto out_unlock;
1947 }
1948
1949 /* Don't abort IOs no longer on the pending queue. */
1950 if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
1951 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1952 "6142 NVME IO req x%px not queued - skipping "
1953 "abort req xri x%x\n",
1954 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1955 goto out_unlock;
1956 }
1957
1958 atomic_inc(&lport->xmt_fcp_abort);
1959 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1960 nvmereq_wqe->sli4_xritag,
1961 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1962
1963 /* Outstanding abort is in progress */
1964 if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
1965 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1966 "6144 Outstanding NVME I/O Abort Request "
1967 "still pending on nvme_fcreq x%px, "
1968 "lpfc_ncmd x%px xri x%x\n",
1969 pnvme_fcreq, lpfc_nbuf,
1970 nvmereq_wqe->sli4_xritag);
1971 goto out_unlock;
1972 }
1973
1974 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1975 lpfc_nvme_abort_fcreq_cmpl);
1976
1977 spin_unlock(&phba->hbalock);
1978 spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
1979
1980 /* Make sure HBA is alive */
1981 lpfc_issue_hb_tmo(phba);
1982
1983 if (ret_val != WQE_SUCCESS) {
1984 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1985 "6137 Failed abts issue_wqe with status x%x "
1986 "for nvme_fcreq x%px.\n",
1987 ret_val, pnvme_fcreq);
1988 return;
1989 }
1990
1991 /*
1992 * Get Command Id from cmd to plug into response. This
1993 * code is not needed in the next NVME Transport drop.
1994 */
1995 cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
1996 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1997 "6138 Transport Abort NVME Request Issued for "
1998 "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
1999 nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
2000 cp->sqe.common.command_id);
2001 return;
2002
2003out_unlock:
2004 spin_unlock(&phba->hbalock);
2005 spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
2006 return;
2007}
2008
2009/* Declare and initialization an instance of the FC NVME template. */
2010static struct nvme_fc_port_template lpfc_nvme_template = {
2011 /* initiator-based functions */
2012 .localport_delete = lpfc_nvme_localport_delete,
2013 .remoteport_delete = lpfc_nvme_remoteport_delete,
2014 .create_queue = lpfc_nvme_create_queue,
2015 .delete_queue = lpfc_nvme_delete_queue,
2016 .ls_req = lpfc_nvme_ls_req,
2017 .fcp_io = lpfc_nvme_fcp_io_submit,
2018 .ls_abort = lpfc_nvme_ls_abort,
2019 .fcp_abort = lpfc_nvme_fcp_abort,
2020 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
2021
2022 .max_hw_queues = 1,
2023 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2024 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2025 .dma_boundary = 0xFFFFFFFF,
2026
2027 /* Sizes of additional private data for data structures.
2028 * No use for the last two sizes at this time.
2029 */
2030 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
2031 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
2032 .lsrqst_priv_sz = 0,
2033 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
2034};
2035
2036/*
2037 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
2038 *
2039 * This routine removes a nvme buffer from head of @hdwq io_buf_list
2040 * and returns to caller.
2041 *
2042 * Return codes:
2043 * NULL - Error
2044 * Pointer to lpfc_nvme_buf - Success
2045 **/
2046static struct lpfc_io_buf *
2047lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2048 int idx, int expedite)
2049{
2050 struct lpfc_io_buf *lpfc_ncmd;
2051 struct lpfc_sli4_hdw_queue *qp;
2052 struct sli4_sge *sgl;
2053 struct lpfc_iocbq *pwqeq;
2054 union lpfc_wqe128 *wqe;
2055
2056 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2057
2058 if (lpfc_ncmd) {
2059 pwqeq = &(lpfc_ncmd->cur_iocbq);
2060 wqe = &pwqeq->wqe;
2061
2062 /* Setup key fields in buffer that may have been changed
2063 * if other protocols used this buffer.
2064 */
2065 pwqeq->cmd_flag = LPFC_IO_NVME;
2066 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
2067 lpfc_ncmd->start_time = jiffies;
2068 lpfc_ncmd->flags = 0;
2069
2070 /* Rsp SGE will be filled in when we rcv an IO
2071 * from the NVME Layer to be sent.
2072 * The cmd is going to be embedded so we need a SKIP SGE.
2073 */
2074 sgl = lpfc_ncmd->dma_sgl;
2075 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2076 bf_set(lpfc_sli4_sge_last, sgl, 0);
2077 sgl->word2 = cpu_to_le32(sgl->word2);
2078 /* Fill in word 3 / sgl_len during cmd submission */
2079
2080 /* Initialize 64 bytes only */
2081 memset(wqe, 0, sizeof(union lpfc_wqe));
2082
2083 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2084 atomic_inc(&ndlp->cmd_pending);
2085 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2086 }
2087
2088 } else {
2089 qp = &phba->sli4_hba.hdwq[idx];
2090 qp->empty_io_bufs++;
2091 }
2092
2093 return lpfc_ncmd;
2094}
2095
2096/**
2097 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2098 * @phba: The Hba for which this call is being executed.
2099 * @lpfc_ncmd: The nvme buffer which is being released.
2100 *
2101 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2102 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2103 * and cannot be reused for at least RA_TOV amount of time if it was
2104 * aborted.
2105 **/
2106static void
2107lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2108{
2109 struct lpfc_sli4_hdw_queue *qp;
2110 unsigned long iflag = 0;
2111
2112 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2113 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2114
2115 lpfc_ncmd->ndlp = NULL;
2116 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2117
2118 qp = lpfc_ncmd->hdwq;
2119 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
2120 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2121 "6310 XB release deferred for "
2122 "ox_id x%x on reqtag x%x\n",
2123 lpfc_ncmd->cur_iocbq.sli4_xritag,
2124 lpfc_ncmd->cur_iocbq.iotag);
2125
2126 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2127 list_add_tail(&lpfc_ncmd->list,
2128 &qp->lpfc_abts_io_buf_list);
2129 qp->abts_nvme_io_bufs++;
2130 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2131 } else
2132 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2133}
2134
2135/**
2136 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2137 * @vport: the lpfc_vport instance requesting a localport.
2138 *
2139 * This routine is invoked to create an nvme localport instance to bind
2140 * to the nvme_fc_transport. It is called once during driver load
2141 * like lpfc_create_shost after all other services are initialized.
2142 * It requires a vport, vpi, and wwns at call time. Other localport
2143 * parameters are modified as the driver's FCID and the Fabric WWN
2144 * are established.
2145 *
2146 * Return codes
2147 * 0 - successful
2148 * -ENOMEM - no heap memory available
2149 * other values - from nvme registration upcall
2150 **/
2151int
2152lpfc_nvme_create_localport(struct lpfc_vport *vport)
2153{
2154 int ret = 0;
2155 struct lpfc_hba *phba = vport->phba;
2156 struct nvme_fc_port_info nfcp_info;
2157 struct nvme_fc_local_port *localport;
2158 struct lpfc_nvme_lport *lport;
2159
2160 /* Initialize this localport instance. The vport wwn usage ensures
2161 * that NPIV is accounted for.
2162 */
2163 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2164 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2165 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2166 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2167
2168 /* We need to tell the transport layer + 1 because it takes page
2169 * alignment into account. When space for the SGL is allocated we
2170 * allocate + 3, one for cmd, one for rsp and one for this alignment
2171 */
2172 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2173
2174 /* Advertise how many hw queues we support based on cfg_hdw_queue,
2175 * which will not exceed cpu count.
2176 */
2177 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2178
2179 if (!IS_ENABLED(CONFIG_NVME_FC))
2180 return ret;
2181
2182 /* localport is allocated from the stack, but the registration
2183 * call allocates heap memory as well as the private area.
2184 */
2185
2186 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2187 &vport->phba->pcidev->dev, &localport);
2188 if (!ret) {
2189 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2190 "6005 Successfully registered local "
2191 "NVME port num %d, localP x%px, private "
2192 "x%px, sg_seg %d\n",
2193 localport->port_num, localport,
2194 localport->private,
2195 lpfc_nvme_template.max_sgl_segments);
2196
2197 /* Private is our lport size declared in the template. */
2198 lport = (struct lpfc_nvme_lport *)localport->private;
2199 vport->localport = localport;
2200 lport->vport = vport;
2201 vport->nvmei_support = 1;
2202
2203 atomic_set(&lport->xmt_fcp_noxri, 0);
2204 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2205 atomic_set(&lport->xmt_fcp_qdepth, 0);
2206 atomic_set(&lport->xmt_fcp_err, 0);
2207 atomic_set(&lport->xmt_fcp_wqerr, 0);
2208 atomic_set(&lport->xmt_fcp_abort, 0);
2209 atomic_set(&lport->xmt_ls_abort, 0);
2210 atomic_set(&lport->xmt_ls_err, 0);
2211 atomic_set(&lport->cmpl_fcp_xb, 0);
2212 atomic_set(&lport->cmpl_fcp_err, 0);
2213 atomic_set(&lport->cmpl_ls_xb, 0);
2214 atomic_set(&lport->cmpl_ls_err, 0);
2215
2216 atomic_set(&lport->fc4NvmeLsRequests, 0);
2217 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2218 }
2219
2220 return ret;
2221}
2222
2223#if (IS_ENABLED(CONFIG_NVME_FC))
2224/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2225 *
2226 * The driver has to wait for the host nvme transport to callback
2227 * indicating the localport has successfully unregistered all
2228 * resources. Since this is an uninterruptible wait, loop every ten
2229 * seconds and print a message indicating no progress.
2230 *
2231 * An uninterruptible wait is used because of the risk of transport-to-
2232 * driver state mismatch.
2233 */
2234static void
2235lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2236 struct lpfc_nvme_lport *lport,
2237 struct completion *lport_unreg_cmp)
2238{
2239 u32 wait_tmo;
2240 int ret, i, pending = 0;
2241 struct lpfc_sli_ring *pring;
2242 struct lpfc_hba *phba = vport->phba;
2243 struct lpfc_sli4_hdw_queue *qp;
2244 int abts_scsi, abts_nvme;
2245
2246 /* Host transport has to clean up and confirm requiring an indefinite
2247 * wait. Print a message if a 10 second wait expires and renew the
2248 * wait. This is unexpected.
2249 */
2250 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2251 while (true) {
2252 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2253 if (unlikely(!ret)) {
2254 pending = 0;
2255 abts_scsi = 0;
2256 abts_nvme = 0;
2257 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2258 qp = &phba->sli4_hba.hdwq[i];
2259 if (!vport->localport || !qp || !qp->io_wq)
2260 return;
2261
2262 pring = qp->io_wq->pring;
2263 if (!pring)
2264 continue;
2265 pending += pring->txcmplq_cnt;
2266 abts_scsi += qp->abts_scsi_io_bufs;
2267 abts_nvme += qp->abts_nvme_io_bufs;
2268 }
2269 if (!vport->localport ||
2270 test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
2271 phba->link_state == LPFC_HBA_ERROR ||
2272 vport->load_flag & FC_UNLOADING)
2273 return;
2274
2275 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2276 "6176 Lport x%px Localport x%px wait "
2277 "timed out. Pending %d [%d:%d]. "
2278 "Renewing.\n",
2279 lport, vport->localport, pending,
2280 abts_scsi, abts_nvme);
2281 continue;
2282 }
2283 break;
2284 }
2285 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2286 "6177 Lport x%px Localport x%px Complete Success\n",
2287 lport, vport->localport);
2288}
2289#endif
2290
2291/**
2292 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2293 * @vport: pointer to a host virtual N_Port data structure
2294 *
2295 * This routine is invoked to destroy all lports bound to the phba.
2296 * The lport memory was allocated by the nvme fc transport and is
2297 * released there. This routine ensures all rports bound to the
2298 * lport have been disconnected.
2299 *
2300 **/
2301void
2302lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2303{
2304#if (IS_ENABLED(CONFIG_NVME_FC))
2305 struct nvme_fc_local_port *localport;
2306 struct lpfc_nvme_lport *lport;
2307 int ret;
2308 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2309
2310 if (vport->nvmei_support == 0)
2311 return;
2312
2313 localport = vport->localport;
2314 if (!localport)
2315 return;
2316 lport = (struct lpfc_nvme_lport *)localport->private;
2317
2318 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2319 "6011 Destroying NVME localport x%px\n",
2320 localport);
2321
2322 /* lport's rport list is clear. Unregister
2323 * lport and release resources.
2324 */
2325 lport->lport_unreg_cmp = &lport_unreg_cmp;
2326 ret = nvme_fc_unregister_localport(localport);
2327
2328 /* Wait for completion. This either blocks
2329 * indefinitely or succeeds
2330 */
2331 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2332 vport->localport = NULL;
2333
2334 /* Regardless of the unregister upcall response, clear
2335 * nvmei_support. All rports are unregistered and the
2336 * driver will clean up.
2337 */
2338 vport->nvmei_support = 0;
2339 if (ret == 0) {
2340 lpfc_printf_vlog(vport,
2341 KERN_INFO, LOG_NVME_DISC,
2342 "6009 Unregistered lport Success\n");
2343 } else {
2344 lpfc_printf_vlog(vport,
2345 KERN_INFO, LOG_NVME_DISC,
2346 "6010 Unregistered lport "
2347 "Failed, status x%x\n",
2348 ret);
2349 }
2350#endif
2351}
2352
2353void
2354lpfc_nvme_update_localport(struct lpfc_vport *vport)
2355{
2356#if (IS_ENABLED(CONFIG_NVME_FC))
2357 struct nvme_fc_local_port *localport;
2358 struct lpfc_nvme_lport *lport;
2359
2360 localport = vport->localport;
2361 if (!localport) {
2362 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2363 "6710 Update NVME fail. No localport\n");
2364 return;
2365 }
2366 lport = (struct lpfc_nvme_lport *)localport->private;
2367 if (!lport) {
2368 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2369 "6171 Update NVME fail. localP x%px, No lport\n",
2370 localport);
2371 return;
2372 }
2373 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2374 "6012 Update NVME lport x%px did x%x\n",
2375 localport, vport->fc_myDID);
2376
2377 localport->port_id = vport->fc_myDID;
2378 if (localport->port_id == 0)
2379 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2380 else
2381 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2382
2383 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2384 "6030 bound lport x%px to DID x%06x\n",
2385 lport, localport->port_id);
2386#endif
2387}
2388
2389int
2390lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2391{
2392#if (IS_ENABLED(CONFIG_NVME_FC))
2393 int ret = 0;
2394 struct nvme_fc_local_port *localport;
2395 struct lpfc_nvme_lport *lport;
2396 struct lpfc_nvme_rport *rport;
2397 struct lpfc_nvme_rport *oldrport;
2398 struct nvme_fc_remote_port *remote_port;
2399 struct nvme_fc_port_info rpinfo;
2400 struct lpfc_nodelist *prev_ndlp = NULL;
2401 struct fc_rport *srport = ndlp->rport;
2402
2403 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2404 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2405 ndlp->nlp_DID, ndlp->nlp_type);
2406
2407 localport = vport->localport;
2408 if (!localport)
2409 return 0;
2410
2411 lport = (struct lpfc_nvme_lport *)localport->private;
2412
2413 /* NVME rports are not preserved across devloss.
2414 * Just register this instance. Note, rpinfo->dev_loss_tmo
2415 * is left 0 to indicate accept transport defaults. The
2416 * driver communicates port role capabilities consistent
2417 * with the PRLI response data.
2418 */
2419 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2420 rpinfo.port_id = ndlp->nlp_DID;
2421 if (ndlp->nlp_type & NLP_NVME_TARGET)
2422 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2423 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2424 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2425
2426 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2427 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2428
2429 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2430 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2431 if (srport)
2432 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2433 else
2434 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2435
2436 spin_lock_irq(&ndlp->lock);
2437
2438 /* If an oldrport exists, so does the ndlp reference. If not
2439 * a new reference is needed because either the node has never
2440 * been registered or it's been unregistered and getting deleted.
2441 */
2442 oldrport = lpfc_ndlp_get_nrport(ndlp);
2443 if (oldrport) {
2444 prev_ndlp = oldrport->ndlp;
2445 spin_unlock_irq(&ndlp->lock);
2446 } else {
2447 spin_unlock_irq(&ndlp->lock);
2448 if (!lpfc_nlp_get(ndlp)) {
2449 dev_warn(&vport->phba->pcidev->dev,
2450 "Warning - No node ref - exit register\n");
2451 return 0;
2452 }
2453 }
2454
2455 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2456 if (!ret) {
2457 /* If the ndlp already has an nrport, this is just
2458 * a resume of the existing rport. Else this is a
2459 * new rport.
2460 */
2461 /* Guard against an unregister/reregister
2462 * race that leaves the WAIT flag set.
2463 */
2464 spin_lock_irq(&ndlp->lock);
2465 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2466 ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2467 spin_unlock_irq(&ndlp->lock);
2468 rport = remote_port->private;
2469 if (oldrport) {
2470
2471 /* Sever the ndlp<->rport association
2472 * before dropping the ndlp ref from
2473 * register.
2474 */
2475 spin_lock_irq(&ndlp->lock);
2476 ndlp->nrport = NULL;
2477 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2478 spin_unlock_irq(&ndlp->lock);
2479 rport->ndlp = NULL;
2480 rport->remoteport = NULL;
2481
2482 /* Reference only removed if previous NDLP is no longer
2483 * active. It might be just a swap and removing the
2484 * reference would cause a premature cleanup.
2485 */
2486 if (prev_ndlp && prev_ndlp != ndlp) {
2487 if (!prev_ndlp->nrport)
2488 lpfc_nlp_put(prev_ndlp);
2489 }
2490 }
2491
2492 /* Clean bind the rport to the ndlp. */
2493 rport->remoteport = remote_port;
2494 rport->lport = lport;
2495 rport->ndlp = ndlp;
2496 spin_lock_irq(&ndlp->lock);
2497 ndlp->nrport = rport;
2498 spin_unlock_irq(&ndlp->lock);
2499 lpfc_printf_vlog(vport, KERN_INFO,
2500 LOG_NVME_DISC | LOG_NODE,
2501 "6022 Bind lport x%px to remoteport x%px "
2502 "rport x%px WWNN 0x%llx, "
2503 "Rport WWPN 0x%llx DID "
2504 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2505 lport, remote_port, rport,
2506 rpinfo.node_name, rpinfo.port_name,
2507 rpinfo.port_id, rpinfo.port_role,
2508 ndlp, prev_ndlp);
2509 } else {
2510 lpfc_printf_vlog(vport, KERN_ERR,
2511 LOG_TRACE_EVENT,
2512 "6031 RemotePort Registration failed "
2513 "err: %d, DID x%06x\n",
2514 ret, ndlp->nlp_DID);
2515 }
2516
2517 return ret;
2518#else
2519 return 0;
2520#endif
2521}
2522
2523/*
2524 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2525 *
2526 * If the ndlp represents an NVME Target, that we are logged into,
2527 * ping the NVME FC Transport layer to initiate a device rescan
2528 * on this remote NPort.
2529 */
2530void
2531lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2532{
2533#if (IS_ENABLED(CONFIG_NVME_FC))
2534 struct lpfc_nvme_rport *nrport;
2535 struct nvme_fc_remote_port *remoteport = NULL;
2536
2537 spin_lock_irq(&ndlp->lock);
2538 nrport = lpfc_ndlp_get_nrport(ndlp);
2539 if (nrport)
2540 remoteport = nrport->remoteport;
2541 spin_unlock_irq(&ndlp->lock);
2542
2543 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2544 "6170 Rescan NPort DID x%06x type x%x "
2545 "state x%x nrport x%px remoteport x%px\n",
2546 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2547 nrport, remoteport);
2548
2549 if (!nrport || !remoteport)
2550 goto rescan_exit;
2551
2552 /* Rescan an NVME target in MAPPED state with DISCOVERY role set */
2553 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2554 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2555 nvme_fc_rescan_remoteport(remoteport);
2556
2557 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2558 "6172 NVME rescanned DID x%06x "
2559 "port_state x%x\n",
2560 ndlp->nlp_DID, remoteport->port_state);
2561 }
2562 return;
2563 rescan_exit:
2564 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2565 "6169 Skip NVME Rport Rescan, NVME remoteport "
2566 "unregistered\n");
2567#endif
2568}
2569
2570/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2571 *
2572 * There is no notion of Devloss or rport recovery from the current
2573 * nvme_transport perspective. Loss of an rport just means IO cannot
2574 * be sent and recovery is completely up to the initator.
2575 * For now, the driver just unbinds the DID and port_role so that
2576 * no further IO can be issued. Changes are planned for later.
2577 *
2578 * Notes - the ndlp reference count is not decremented here since
2579 * since there is no nvme_transport api for devloss. Node ref count
2580 * is only adjusted in driver unload.
2581 */
2582void
2583lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2584{
2585#if (IS_ENABLED(CONFIG_NVME_FC))
2586 int ret;
2587 struct nvme_fc_local_port *localport;
2588 struct lpfc_nvme_lport *lport;
2589 struct lpfc_nvme_rport *rport;
2590 struct nvme_fc_remote_port *remoteport = NULL;
2591
2592 localport = vport->localport;
2593
2594 /* This is fundamental error. The localport is always
2595 * available until driver unload. Just exit.
2596 */
2597 if (!localport)
2598 return;
2599
2600 lport = (struct lpfc_nvme_lport *)localport->private;
2601 if (!lport)
2602 goto input_err;
2603
2604 spin_lock_irq(&ndlp->lock);
2605 rport = lpfc_ndlp_get_nrport(ndlp);
2606 if (rport)
2607 remoteport = rport->remoteport;
2608 spin_unlock_irq(&ndlp->lock);
2609 if (!remoteport)
2610 goto input_err;
2611
2612 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2613 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2614 "port_id x%06x, portstate x%x port type x%x "
2615 "refcnt %d\n",
2616 remoteport, remoteport->port_name,
2617 remoteport->port_id, remoteport->port_state,
2618 ndlp->nlp_type, kref_read(&ndlp->kref));
2619
2620 /* Sanity check ndlp type. Only call for NVME ports. Don't
2621 * clear any rport state until the transport calls back.
2622 */
2623
2624 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2625 /* No concern about the role change on the nvme remoteport.
2626 * The transport will update it.
2627 */
2628 spin_lock_irq(&vport->phba->hbalock);
2629 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
2630 spin_unlock_irq(&vport->phba->hbalock);
2631
2632 /* Don't let the host nvme transport keep sending keep-alives
2633 * on this remoteport. Vport is unloading, no recovery. The
2634 * return values is ignored. The upcall is a courtesy to the
2635 * transport.
2636 */
2637 if (vport->load_flag & FC_UNLOADING ||
2638 unlikely(vport->phba->link_state == LPFC_HBA_ERROR))
2639 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2640
2641 ret = nvme_fc_unregister_remoteport(remoteport);
2642
2643 /* The driver no longer knows if the nrport memory is valid.
2644 * because the controller teardown process has begun and
2645 * is asynchronous. Break the binding in the ndlp. Also
2646 * remove the register ndlp reference to setup node release.
2647 */
2648 ndlp->nrport = NULL;
2649 lpfc_nlp_put(ndlp);
2650 if (ret != 0) {
2651 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2652 "6167 NVME unregister failed %d "
2653 "port_state x%x\n",
2654 ret, remoteport->port_state);
2655 }
2656 }
2657 return;
2658
2659 input_err:
2660#endif
2661 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2662 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2663 vport->localport, ndlp->rport, ndlp->nlp_DID);
2664}
2665
2666/**
2667 * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
2668 * @phba: pointer to lpfc hba data structure.
2669 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2670 *
2671 * This routine is invoked by the worker thread to process a SLI4 fast-path
2672 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2673 * here.
2674 **/
2675void
2676lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
2677 struct lpfc_io_buf *lpfc_ncmd)
2678{
2679 struct nvmefc_fcp_req *nvme_cmd = NULL;
2680
2681 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2682 "6533 %s nvme_cmd %p tag x%x abort complete and "
2683 "xri released\n", __func__,
2684 lpfc_ncmd->nvmeCmd,
2685 lpfc_ncmd->cur_iocbq.iotag);
2686
2687 /* Aborted NVME commands are required to not complete
2688 * before the abort exchange command fully completes.
2689 * Once completed, it is available via the put list.
2690 */
2691 if (lpfc_ncmd->nvmeCmd) {
2692 nvme_cmd = lpfc_ncmd->nvmeCmd;
2693 nvme_cmd->transferred_length = 0;
2694 nvme_cmd->rcv_rsplen = 0;
2695 nvme_cmd->status = NVME_SC_INTERNAL;
2696 nvme_cmd->done(nvme_cmd);
2697 lpfc_ncmd->nvmeCmd = NULL;
2698 }
2699 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2700}
2701
2702/**
2703 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2704 * @phba: pointer to lpfc hba data structure.
2705 * @axri: pointer to the fcp xri abort wcqe structure.
2706 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2707 *
2708 * This routine is invoked by the worker thread to process a SLI4 fast-path
2709 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2710 * here.
2711 **/
2712void
2713lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2714 struct sli4_wcqe_xri_aborted *axri,
2715 struct lpfc_io_buf *lpfc_ncmd)
2716{
2717 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2718 struct nvmefc_fcp_req *nvme_cmd = NULL;
2719 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2720
2721
2722 if (ndlp)
2723 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2724
2725 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2726 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2727 "xri released\n",
2728 lpfc_ncmd->nvmeCmd, xri,
2729 lpfc_ncmd->cur_iocbq.iotag);
2730
2731 /* Aborted NVME commands are required to not complete
2732 * before the abort exchange command fully completes.
2733 * Once completed, it is available via the put list.
2734 */
2735 if (lpfc_ncmd->nvmeCmd) {
2736 nvme_cmd = lpfc_ncmd->nvmeCmd;
2737 nvme_cmd->done(nvme_cmd);
2738 lpfc_ncmd->nvmeCmd = NULL;
2739 }
2740 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2741}
2742
2743/**
2744 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2745 * @phba: Pointer to HBA context object.
2746 *
2747 * This function flushes all wqes in the nvme rings and frees all resources
2748 * in the txcmplq. This function does not issue abort wqes for the IO
2749 * commands in txcmplq, they will just be returned with
2750 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2751 * slot has been permanently disabled.
2752 **/
2753void
2754lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2755{
2756 struct lpfc_sli_ring *pring;
2757 u32 i, wait_cnt = 0;
2758
2759 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2760 return;
2761
2762 /* Cycle through all IO rings and make sure all outstanding
2763 * WQEs have been removed from the txcmplqs.
2764 */
2765 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2766 if (!phba->sli4_hba.hdwq[i].io_wq)
2767 continue;
2768 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2769
2770 if (!pring)
2771 continue;
2772
2773 /* Retrieve everything on the txcmplq */
2774 while (!list_empty(&pring->txcmplq)) {
2775 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2776 wait_cnt++;
2777
2778 /* The sleep is 10mS. Every ten seconds,
2779 * dump a message. Something is wrong.
2780 */
2781 if ((wait_cnt % 1000) == 0) {
2782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2783 "6178 NVME IO not empty, "
2784 "cnt %d\n", wait_cnt);
2785 }
2786 }
2787 }
2788
2789 /* Make sure HBA is alive */
2790 lpfc_issue_hb_tmo(phba);
2791
2792}
2793
2794void
2795lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2796 uint32_t stat, uint32_t param)
2797{
2798#if (IS_ENABLED(CONFIG_NVME_FC))
2799 struct lpfc_io_buf *lpfc_ncmd;
2800 struct nvmefc_fcp_req *nCmd;
2801 struct lpfc_wcqe_complete wcqe;
2802 struct lpfc_wcqe_complete *wcqep = &wcqe;
2803
2804 lpfc_ncmd = pwqeIn->io_buf;
2805 if (!lpfc_ncmd) {
2806 lpfc_sli_release_iocbq(phba, pwqeIn);
2807 return;
2808 }
2809 /* For abort iocb just return, IO iocb will do a done call */
2810 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2811 CMD_ABORT_XRI_CX) {
2812 lpfc_sli_release_iocbq(phba, pwqeIn);
2813 return;
2814 }
2815
2816 spin_lock(&lpfc_ncmd->buf_lock);
2817 nCmd = lpfc_ncmd->nvmeCmd;
2818 if (!nCmd) {
2819 spin_unlock(&lpfc_ncmd->buf_lock);
2820 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2821 return;
2822 }
2823 spin_unlock(&lpfc_ncmd->buf_lock);
2824
2825 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2826 "6194 NVME Cancel xri %x\n",
2827 lpfc_ncmd->cur_iocbq.sli4_xritag);
2828
2829 wcqep->word0 = 0;
2830 bf_set(lpfc_wcqe_c_status, wcqep, stat);
2831 wcqep->parameter = param;
2832 wcqep->total_data_placed = 0;
2833 wcqep->word3 = 0; /* xb is 0 */
2834
2835 /* Call release with XB=1 to queue the IO into the abort list. */
2836 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2837 bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2838
2839 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
2840 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
2841#endif
2842}