Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: lpfc: NVME Target: bind to nvmet_fc api

NVME Target: Tie in to NVME Fabrics nvmet_fc LLDD target api

Adds the routines to:
- register and deregister the FC port as a nvmet-fc targetport
- binding of nvme queues to adapter WQs
- receipt and passing of NVME LS's to transport, sending transport response
- receipt of NVME FCP CMD IUs, processing FCP target io data transmission
commands; transmission of FCP io response
- Abort operations for tgt io exchanges

[mkp: fixed space at end of file warning]

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

James Smart and committed by
Martin K. Petersen
d613b6a7 8c258641

+1752 -15
+1 -1
drivers/scsi/lpfc/Makefile
··· 31 31 lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \ 32 32 lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \ 33 33 lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \ 34 - lpfc_nvme.o 34 + lpfc_nvme.o lpfc_nvmet.o
+10
drivers/scsi/lpfc/lpfc_crtn.h
··· 363 363 void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma); 364 364 365 365 void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); 366 + void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp); 367 + 366 368 /* Function prototypes. */ 367 369 const char* lpfc_info(struct Scsi_Host *); 368 370 int lpfc_scan_finished(struct Scsi_Host *, unsigned long); ··· 538 536 int lpfc_nvme_create_localport(struct lpfc_vport *vport); 539 537 void lpfc_nvme_destroy_localport(struct lpfc_vport *vport); 540 538 void lpfc_nvme_update_localport(struct lpfc_vport *vport); 539 + int lpfc_nvmet_create_targetport(struct lpfc_hba *phba); 540 + int lpfc_nvmet_update_targetport(struct lpfc_hba *phba); 541 + void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba); 542 + void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, 543 + struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb); 544 + void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, 545 + struct lpfc_sli_ring *pring, 546 + struct rqb_dmabuf *nvmebuf, uint64_t isr_ts); 541 547 void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba); 542 548 void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, 543 549 struct lpfc_iocbq *cmdiocb,
+1 -1
drivers/scsi/lpfc/lpfc_ct.c
··· 1436 1436 if ((vport == phba->pport) && phba->nvmet_support) { 1437 1437 CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | 1438 1438 FC4_FEATURE_NVME_DISC); 1439 - /* todo: update targetport attributes */ 1439 + lpfc_nvmet_update_targetport(phba); 1440 1440 } else { 1441 1441 lpfc_nvme_update_localport(vport); 1442 1442 }
+3 -2
drivers/scsi/lpfc/lpfc_els.c
··· 2636 2636 2637 2637 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 2638 2638 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 2639 - if (!phba->nvmet_support) 2639 + if (phba->nvmet_support) 2640 + lpfc_nvmet_update_targetport(phba); 2641 + else 2640 2642 lpfc_nvme_update_localport(phba->pport); 2641 - /* todo: tgt: update targetport attributes */ 2642 2643 } 2643 2644 2644 2645 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+6 -4
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 911 911 912 912 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 913 913 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 914 - if (!phba->nvmet_support) 914 + if (phba->nvmet_support) 915 + lpfc_nvmet_update_targetport(phba); 916 + else 915 917 lpfc_nvme_update_localport(vports[i]); 916 - /* todo: tgt: update targetport attributes */ 917 918 } 918 919 } 919 920 } ··· 3588 3587 3589 3588 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 3590 3589 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 3591 - if (!phba->nvmet_support) 3590 + if (phba->nvmet_support) 3591 + lpfc_nvmet_update_targetport(phba); 3592 + else 3592 3593 lpfc_nvme_update_localport(vport); 3593 - /* todo: update targetport attributes */ 3594 3594 } 3595 3595 goto out; 3596 3596 }
+10 -4
drivers/scsi/lpfc/lpfc_init.c
··· 10397 10397 /* Remove FC host and then SCSI host with the physical port */ 10398 10398 fc_remove_host(shost); 10399 10399 scsi_remove_host(shost); 10400 + 10401 + /* Perform ndlp cleanup on the physical port. The nvme and nvmet 10402 + * localports are destroyed after to cleanup all transport memory. 10403 + */ 10400 10404 lpfc_cleanup(vport); 10405 + lpfc_nvmet_destroy_targetport(phba); 10406 + lpfc_nvme_destroy_localport(vport); 10401 10407 10402 10408 /* 10403 10409 * Bring down the SLI Layer. This step disable all interrupts, ··· 11209 11203 /* Remove FC host and then SCSI host with the physical port */ 11210 11204 fc_remove_host(shost); 11211 11205 scsi_remove_host(shost); 11212 - /* todo: tgt: remove targetport */ 11213 11206 11214 - /* Perform ndlp cleanup on the physical port. The nvme localport 11215 - * is destroyed after to ensure all rports are io-disabled. 11207 + /* Perform ndlp cleanup on the physical port. The nvme and nvmet 11208 + * localports are destroyed after to cleanup all transport memory. 11216 11209 */ 11217 - lpfc_nvme_destroy_localport(vport); 11218 11210 lpfc_cleanup(vport); 11211 + lpfc_nvmet_destroy_targetport(phba); 11212 + lpfc_nvme_destroy_localport(vport); 11219 11213 11220 11214 /* 11221 11215 * Bring down the SLI Layer. This step disables all interrupts,
+45
drivers/scsi/lpfc/lpfc_mem.c
··· 763 763 } 764 764 return; 765 765 } 766 + 767 + /** 768 + * lpfc_rq_buf_free - Free a RQ DMA buffer 769 + * @phba: HBA buffer is associated with 770 + * @mp: Buffer to free 771 + * 772 + * Description: Frees the given DMA buffer in the appropriate way given by 773 + * reposting it to its associated RQ so it can be reused. 774 + * 775 + * Notes: Takes phba->hbalock. Can be called with or without other locks held. 776 + * 777 + * Returns: None 778 + **/ 779 + void 780 + lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 781 + { 782 + struct lpfc_rqb *rqbp; 783 + struct lpfc_rqe hrqe; 784 + struct lpfc_rqe drqe; 785 + struct rqb_dmabuf *rqb_entry; 786 + unsigned long flags; 787 + int rc; 788 + 789 + if (!mp) 790 + return; 791 + 792 + rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); 793 + rqbp = rqb_entry->hrq->rqbp; 794 + 795 + spin_lock_irqsave(&phba->hbalock, flags); 796 + list_del(&rqb_entry->hbuf.list); 797 + hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); 798 + hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); 799 + drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); 800 + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 801 + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 802 + if (rc < 0) { 803 + (rqbp->rqb_free_buffer)(phba, rqb_entry); 804 + } else { 805 + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 806 + rqbp->buffer_count++; 807 + } 808 + 809 + spin_unlock_irqrestore(&phba->hbalock, flags); 810 + }
+1671
drivers/scsi/lpfc/lpfc_nvmet.c
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex Linux Device Driver for * 3 + * Fibre Channsel Host Bus Adapters. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 + * * 9 + * This program is free software; you can redistribute it and/or * 10 + * modify it under the terms of version 2 of the GNU General * 11 + * Public License as published by the Free Software Foundation. * 12 + * This program is distributed in the hope that it will be useful. * 13 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 + * more details, a copy of which can be found in the file COPYING * 19 + * included with this package. * 20 + ********************************************************************/ 21 + #include <linux/pci.h> 22 + #include <linux/slab.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/delay.h> 25 + #include <asm/unaligned.h> 26 + #include <linux/crc-t10dif.h> 27 + #include <net/checksum.h> 28 + 29 + #include <scsi/scsi.h> 30 + #include <scsi/scsi_device.h> 31 + #include <scsi/scsi_eh.h> 32 + #include <scsi/scsi_host.h> 33 + #include <scsi/scsi_tcq.h> 34 + #include <scsi/scsi_transport_fc.h> 35 + #include <scsi/fc/fc_fs.h> 36 + 37 + #include <../drivers/nvme/host/nvme.h> 38 + #include <linux/nvme-fc-driver.h> 39 + 40 + #include "lpfc_version.h" 41 + #include "lpfc_hw4.h" 42 + #include "lpfc_hw.h" 43 + #include "lpfc_sli.h" 44 + #include "lpfc_sli4.h" 45 + #include "lpfc_nl.h" 46 + #include "lpfc_disc.h" 47 + #include "lpfc.h" 48 + #include "lpfc_scsi.h" 49 + #include "lpfc_nvme.h" 50 + #include "lpfc_nvmet.h" 51 + #include "lpfc_logmsg.h" 52 + #include "lpfc_crtn.h" 53 + #include "lpfc_vport.h" 54 + 55 + static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, 56 + struct lpfc_nvmet_rcv_ctx *, 57 + dma_addr_t rspbuf, 58 + uint16_t rspsize); 59 + static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, 60 + struct lpfc_nvmet_rcv_ctx *); 61 + static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, 62 + struct lpfc_nvmet_rcv_ctx *, 63 + uint32_t, uint16_t); 64 + static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, 65 + struct lpfc_nvmet_rcv_ctx *, 66 + uint32_t, uint16_t); 67 + static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, 68 + struct lpfc_nvmet_rcv_ctx *, 69 + uint32_t, uint16_t); 70 + 71 + /** 72 + * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response 73 + * @phba: Pointer to HBA context object. 74 + * @cmdwqe: Pointer to driver command WQE object. 75 + * @wcqe: Pointer to driver response CQE object. 76 + * 77 + * The function is called from SLI ring event handler with no 78 + * lock held. This function is the completion handler for NVME LS commands 79 + * The function frees memory resources used for the NVME commands. 80 + **/ 81 + static void 82 + lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 83 + struct lpfc_wcqe_complete *wcqe) 84 + { 85 + struct lpfc_nvmet_tgtport *tgtp; 86 + struct nvmefc_tgt_ls_req *rsp; 87 + struct lpfc_nvmet_rcv_ctx *ctxp; 88 + uint32_t status, result; 89 + 90 + status = bf_get(lpfc_wcqe_c_status, wcqe); 91 + result = wcqe->parameter; 92 + if (!phba->targetport) 93 + goto out; 94 + 95 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 96 + 97 + if (status) 98 + atomic_inc(&tgtp->xmt_ls_rsp_error); 99 + else 100 + atomic_inc(&tgtp->xmt_ls_rsp_cmpl); 101 + 102 + out: 103 + ctxp = cmdwqe->context2; 104 + rsp = &ctxp->ctx.ls_req; 105 + 106 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 107 + "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__, 108 + ctxp, status, result); 109 + 110 + lpfc_nlp_put(cmdwqe->context1); 111 + cmdwqe->context2 = NULL; 112 + cmdwqe->context3 = NULL; 113 + lpfc_sli_release_iocbq(phba, cmdwqe); 114 + rsp->done(rsp); 115 + kfree(ctxp); 116 + } 117 + 118 + /** 119 + * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context 120 + * @phba: HBA buffer is associated with 121 + * @ctxp: context to clean up 122 + * @mp: Buffer to free 123 + * 124 + * Description: Frees the given DMA buffer in the appropriate way given by 125 + * reposting it to its associated RQ so it can be reused. 126 + * 127 + * Notes: Takes phba->hbalock. Can be called with or without other locks held. 128 + * 129 + * Returns: None 130 + **/ 131 + void 132 + lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 133 + struct lpfc_dmabuf *mp) 134 + { 135 + if (ctxp) { 136 + if (ctxp->txrdy) { 137 + pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 138 + ctxp->txrdy_phys); 139 + ctxp->txrdy = NULL; 140 + ctxp->txrdy_phys = 0; 141 + } 142 + ctxp->state = LPFC_NVMET_STE_FREE; 143 + } 144 + lpfc_rq_buf_free(phba, mp); 145 + } 146 + 147 + /** 148 + * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response 149 + * @phba: Pointer to HBA context object. 150 + * @cmdwqe: Pointer to driver command WQE object. 151 + * @wcqe: Pointer to driver response CQE object. 152 + * 153 + * The function is called from SLI ring event handler with no 154 + * lock held. This function is the completion handler for NVME FCP commands 155 + * The function frees memory resources used for the NVME commands. 156 + **/ 157 + static void 158 + lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 159 + struct lpfc_wcqe_complete *wcqe) 160 + { 161 + struct lpfc_nvmet_tgtport *tgtp; 162 + struct nvmefc_tgt_fcp_req *rsp; 163 + struct lpfc_nvmet_rcv_ctx *ctxp; 164 + uint32_t status, result, op, start_clean; 165 + 166 + ctxp = cmdwqe->context2; 167 + rsp = &ctxp->ctx.fcp_req; 168 + op = rsp->op; 169 + ctxp->flag &= ~LPFC_NVMET_IO_INP; 170 + 171 + status = bf_get(lpfc_wcqe_c_status, wcqe); 172 + result = wcqe->parameter; 173 + 174 + if (!phba->targetport) 175 + goto out; 176 + 177 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 178 + if (status) { 179 + rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; 180 + rsp->transferred_length = 0; 181 + atomic_inc(&tgtp->xmt_fcp_rsp_error); 182 + } else { 183 + rsp->fcp_error = NVME_SC_SUCCESS; 184 + if (op == NVMET_FCOP_RSP) 185 + rsp->transferred_length = rsp->rsplen; 186 + else 187 + rsp->transferred_length = rsp->transfer_length; 188 + atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); 189 + } 190 + 191 + out: 192 + if ((op == NVMET_FCOP_READDATA_RSP) || 193 + (op == NVMET_FCOP_RSP)) { 194 + /* Sanity check */ 195 + ctxp->state = LPFC_NVMET_STE_DONE; 196 + ctxp->entry_cnt++; 197 + rsp->done(rsp); 198 + /* Let Abort cmpl repost the context */ 199 + if (!(ctxp->flag & LPFC_NVMET_ABORT_OP)) 200 + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 201 + } else { 202 + ctxp->entry_cnt++; 203 + start_clean = offsetof(struct lpfc_iocbq, wqe); 204 + memset(((char *)cmdwqe) + start_clean, 0, 205 + (sizeof(struct lpfc_iocbq) - start_clean)); 206 + rsp->done(rsp); 207 + } 208 + } 209 + 210 + static int 211 + lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, 212 + struct nvmefc_tgt_ls_req *rsp) 213 + { 214 + struct lpfc_nvmet_rcv_ctx *ctxp = 215 + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); 216 + struct lpfc_hba *phba = ctxp->phba; 217 + struct hbq_dmabuf *nvmebuf = 218 + (struct hbq_dmabuf *)ctxp->rqb_buffer; 219 + struct lpfc_iocbq *nvmewqeq; 220 + struct lpfc_nvmet_tgtport *nvmep = tgtport->private; 221 + struct lpfc_dmabuf dmabuf; 222 + struct ulp_bde64 bpl; 223 + int rc; 224 + 225 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 226 + "6023 %s: Entrypoint ctx %p %p\n", __func__, 227 + ctxp, tgtport); 228 + 229 + nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, 230 + rsp->rsplen); 231 + if (nvmewqeq == NULL) { 232 + atomic_inc(&nvmep->xmt_ls_drop); 233 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 234 + "6150 LS Drop IO x%x: Prep\n", 235 + ctxp->oxid); 236 + lpfc_in_buf_free(phba, &nvmebuf->dbuf); 237 + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, 238 + ctxp->sid, ctxp->oxid); 239 + return -ENOMEM; 240 + } 241 + 242 + /* Save numBdes for bpl2sgl */ 243 + nvmewqeq->rsvd2 = 1; 244 + nvmewqeq->hba_wqidx = 0; 245 + nvmewqeq->context3 = &dmabuf; 246 + dmabuf.virt = &bpl; 247 + bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; 248 + bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; 249 + bpl.tus.f.bdeSize = rsp->rsplen; 250 + bpl.tus.f.bdeFlags = 0; 251 + bpl.tus.w = le32_to_cpu(bpl.tus.w); 252 + 253 + nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; 254 + nvmewqeq->iocb_cmpl = NULL; 255 + nvmewqeq->context2 = ctxp; 256 + 257 + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); 258 + if (rc == WQE_SUCCESS) { 259 + /* 260 + * Okay to repost buffer here, but wait till cmpl 261 + * before freeing ctxp and iocbq. 262 + */ 263 + lpfc_in_buf_free(phba, &nvmebuf->dbuf); 264 + ctxp->rqb_buffer = 0; 265 + atomic_inc(&nvmep->xmt_ls_rsp); 266 + return 0; 267 + } 268 + /* Give back resources */ 269 + atomic_inc(&nvmep->xmt_ls_drop); 270 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 271 + "6151 LS Drop IO x%x: Issue %d\n", 272 + ctxp->oxid, rc); 273 + 274 + lpfc_nlp_put(nvmewqeq->context1); 275 + 276 + lpfc_in_buf_free(phba, &nvmebuf->dbuf); 277 + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); 278 + return -ENXIO; 279 + } 280 + 281 + static int 282 + lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, 283 + struct nvmefc_tgt_fcp_req *rsp) 284 + { 285 + struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 286 + struct lpfc_nvmet_rcv_ctx *ctxp = 287 + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 288 + struct lpfc_hba *phba = ctxp->phba; 289 + struct lpfc_iocbq *nvmewqeq; 290 + unsigned long iflags; 291 + int rc; 292 + 293 + if (rsp->op == NVMET_FCOP_ABORT) { 294 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 295 + "6103 Abort op: oxri x%x %d cnt %d\n", 296 + ctxp->oxid, ctxp->state, ctxp->entry_cnt); 297 + atomic_inc(&lpfc_nvmep->xmt_fcp_abort); 298 + ctxp->entry_cnt++; 299 + ctxp->flag |= LPFC_NVMET_ABORT_OP; 300 + if (ctxp->flag & LPFC_NVMET_IO_INP) 301 + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, 302 + ctxp->oxid); 303 + else 304 + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, 305 + ctxp->oxid); 306 + return 0; 307 + } 308 + 309 + /* Sanity check */ 310 + if (ctxp->state == LPFC_NVMET_STE_ABORT) { 311 + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 312 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 313 + "6102 Bad state IO x%x aborted\n", 314 + ctxp->oxid); 315 + goto aerr; 316 + } 317 + 318 + nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); 319 + if (nvmewqeq == NULL) { 320 + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 321 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 322 + "6152 FCP Drop IO x%x: Prep\n", 323 + ctxp->oxid); 324 + goto aerr; 325 + } 326 + 327 + nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; 328 + nvmewqeq->iocb_cmpl = NULL; 329 + nvmewqeq->context2 = ctxp; 330 + nvmewqeq->iocb_flag |= LPFC_IO_NVMET; 331 + ctxp->wqeq->hba_wqidx = rsp->hwqid; 332 + 333 + /* For now we take hbalock */ 334 + spin_lock_irqsave(&phba->hbalock, iflags); 335 + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 336 + spin_unlock_irqrestore(&phba->hbalock, iflags); 337 + if (rc == WQE_SUCCESS) { 338 + ctxp->flag |= LPFC_NVMET_IO_INP; 339 + return 0; 340 + } 341 + 342 + /* Give back resources */ 343 + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 344 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 345 + "6153 FCP Drop IO x%x: Issue: %d\n", 346 + ctxp->oxid, rc); 347 + 348 + ctxp->wqeq->hba_wqidx = 0; 349 + nvmewqeq->context2 = NULL; 350 + nvmewqeq->context3 = NULL; 351 + aerr: 352 + return -ENXIO; 353 + } 354 + 355 + static void 356 + lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) 357 + { 358 + struct lpfc_nvmet_tgtport *tport = targetport->private; 359 + 360 + /* release any threads waiting for the unreg to complete */ 361 + complete(&tport->tport_unreg_done); 362 + } 363 + 364 + static struct nvmet_fc_target_template lpfc_tgttemplate = { 365 + .targetport_delete = lpfc_nvmet_targetport_delete, 366 + .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 367 + .fcp_op = lpfc_nvmet_xmt_fcp_op, 368 + 369 + .max_hw_queues = 1, 370 + .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 371 + .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 372 + .dma_boundary = 0xFFFFFFFF, 373 + 374 + /* optional features */ 375 + .target_features = 0, 376 + /* sizes of additional private data for data structures */ 377 + .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 378 + }; 379 + 380 + int 381 + lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 382 + { 383 + struct lpfc_vport *vport = phba->pport; 384 + struct lpfc_nvmet_tgtport *tgtp; 385 + struct nvmet_fc_port_info pinfo; 386 + int error = 0; 387 + 388 + if (phba->targetport) 389 + return 0; 390 + 391 + memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 392 + pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 393 + pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 394 + pinfo.port_id = vport->fc_myDID; 395 + 396 + lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 397 + lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt; 398 + lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 399 + NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; 400 + 401 + error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 402 + &phba->pcidev->dev, 403 + &phba->targetport); 404 + if (error) { 405 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 406 + "6025 Cannot register NVME targetport " 407 + "x%x\n", error); 408 + phba->targetport = NULL; 409 + } else { 410 + tgtp = (struct lpfc_nvmet_tgtport *) 411 + phba->targetport->private; 412 + tgtp->phba = phba; 413 + 414 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 415 + "6026 Registered NVME " 416 + "targetport: %p, private %p " 417 + "portnm %llx nodenm %llx\n", 418 + phba->targetport, tgtp, 419 + pinfo.port_name, pinfo.node_name); 420 + 421 + atomic_set(&tgtp->rcv_ls_req_in, 0); 422 + atomic_set(&tgtp->rcv_ls_req_out, 0); 423 + atomic_set(&tgtp->rcv_ls_req_drop, 0); 424 + atomic_set(&tgtp->xmt_ls_abort, 0); 425 + atomic_set(&tgtp->xmt_ls_rsp, 0); 426 + atomic_set(&tgtp->xmt_ls_drop, 0); 427 + atomic_set(&tgtp->xmt_ls_rsp_error, 0); 428 + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); 429 + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 430 + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 431 + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 432 + atomic_set(&tgtp->xmt_fcp_abort, 0); 433 + atomic_set(&tgtp->xmt_fcp_drop, 0); 434 + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 435 + atomic_set(&tgtp->xmt_fcp_read, 0); 436 + atomic_set(&tgtp->xmt_fcp_write, 0); 437 + atomic_set(&tgtp->xmt_fcp_rsp, 0); 438 + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 439 + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 440 + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 441 + atomic_set(&tgtp->xmt_abort_rsp, 0); 442 + atomic_set(&tgtp->xmt_abort_rsp_error, 0); 443 + atomic_set(&tgtp->xmt_abort_cmpl, 0); 444 + } 445 + return error; 446 + } 447 + 448 + int 449 + lpfc_nvmet_update_targetport(struct lpfc_hba *phba) 450 + { 451 + struct lpfc_vport *vport = phba->pport; 452 + 453 + if (!phba->targetport) 454 + return 0; 455 + 456 + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 457 + "6007 Update NVMET port %p did x%x\n", 458 + phba->targetport, vport->fc_myDID); 459 + 460 + phba->targetport->port_id = vport->fc_myDID; 461 + return 0; 462 + } 463 + 464 + void 465 + lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 466 + { 467 + struct lpfc_nvmet_tgtport *tgtp; 468 + 469 + if (phba->nvmet_support == 0) 470 + return; 471 + if (phba->targetport) { 472 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 473 + init_completion(&tgtp->tport_unreg_done); 474 + nvmet_fc_unregister_targetport(phba->targetport); 475 + wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 476 + } 477 + phba->targetport = NULL; 478 + } 479 + 480 + /** 481 + * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer 482 + * @phba: pointer to lpfc hba data structure. 483 + * @pring: pointer to a SLI ring. 484 + * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 485 + * 486 + * This routine is used for processing the WQE associated with a unsolicited 487 + * event. It first determines whether there is an existing ndlp that matches 488 + * the DID from the unsolicited WQE. If not, it will create a new one with 489 + * the DID from the unsolicited WQE. The ELS command from the unsolicited 490 + * WQE is then used to invoke the proper routine and to set up proper state 491 + * of the discovery state machine. 492 + **/ 493 + static void 494 + lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 495 + struct hbq_dmabuf *nvmebuf) 496 + { 497 + struct lpfc_nvmet_tgtport *tgtp; 498 + struct fc_frame_header *fc_hdr; 499 + struct lpfc_nvmet_rcv_ctx *ctxp; 500 + uint32_t *payload; 501 + uint32_t size, oxid, sid, rc; 502 + 503 + if (!nvmebuf || !phba->targetport) { 504 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 505 + "6154 LS Drop IO\n"); 506 + goto dropit; 507 + } 508 + 509 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 510 + payload = (uint32_t *)(nvmebuf->dbuf.virt); 511 + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 512 + size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); 513 + oxid = be16_to_cpu(fc_hdr->fh_ox_id); 514 + sid = sli4_sid_from_fc_hdr(fc_hdr); 515 + 516 + ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); 517 + if (ctxp == NULL) { 518 + atomic_inc(&tgtp->rcv_ls_req_drop); 519 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 520 + "6155 LS Drop IO x%x: Alloc\n", 521 + oxid); 522 + dropit: 523 + if (nvmebuf) 524 + lpfc_in_buf_free(phba, &nvmebuf->dbuf); 525 + return; 526 + } 527 + ctxp->phba = phba; 528 + ctxp->size = size; 529 + ctxp->oxid = oxid; 530 + ctxp->sid = sid; 531 + ctxp->wqeq = NULL; 532 + ctxp->state = LPFC_NVMET_STE_RCV; 533 + ctxp->rqb_buffer = (void *)nvmebuf; 534 + /* 535 + * The calling sequence should be: 536 + * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done 537 + * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. 538 + */ 539 + atomic_inc(&tgtp->rcv_ls_req_in); 540 + rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, 541 + payload, size); 542 + 543 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 544 + "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x " 545 + "%08x %08x %08x\n", __func__, ctxp, size, rc, 546 + *payload, *(payload+1), *(payload+2), 547 + *(payload+3), *(payload+4), *(payload+5)); 548 + if (rc == 0) { 549 + atomic_inc(&tgtp->rcv_ls_req_out); 550 + return; 551 + } 552 + atomic_inc(&tgtp->rcv_ls_req_drop); 553 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 554 + "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", 555 + ctxp->oxid, rc); 556 + 557 + /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 558 + if (nvmebuf) 559 + lpfc_in_buf_free(phba, &nvmebuf->dbuf); 560 + 561 + atomic_inc(&tgtp->xmt_ls_abort); 562 + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); 563 + } 564 + 565 + /** 566 + * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer 567 + * @phba: pointer to lpfc hba data structure. 568 + * @pring: pointer to a SLI ring. 569 + * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 570 + * 571 + * This routine is used for processing the WQE associated with a unsolicited 572 + * event. It first determines whether there is an existing ndlp that matches 573 + * the DID from the unsolicited WQE. If not, it will create a new one with 574 + * the DID from the unsolicited WQE. The ELS command from the unsolicited 575 + * WQE is then used to invoke the proper routine and to set up proper state 576 + * of the discovery state machine. 577 + **/ 578 + static void 579 + lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, 580 + struct lpfc_sli_ring *pring, 581 + struct rqb_dmabuf *nvmebuf, 582 + uint64_t isr_timestamp) 583 + { 584 + struct lpfc_nvmet_rcv_ctx *ctxp; 585 + struct lpfc_nvmet_tgtport *tgtp; 586 + struct fc_frame_header *fc_hdr; 587 + uint32_t *payload; 588 + uint32_t size, oxid, sid, rc; 589 + 590 + oxid = 0; 591 + if (!nvmebuf || !phba->targetport) { 592 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 593 + "6157 FCP Drop IO\n"); 594 + goto dropit; 595 + } 596 + 597 + 598 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 599 + payload = (uint32_t *)(nvmebuf->dbuf.virt); 600 + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 601 + size = nvmebuf->bytes_recv; 602 + oxid = be16_to_cpu(fc_hdr->fh_ox_id); 603 + sid = sli4_sid_from_fc_hdr(fc_hdr); 604 + 605 + ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; 606 + if (ctxp == NULL) { 607 + atomic_inc(&tgtp->rcv_fcp_cmd_drop); 608 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 609 + "6158 FCP Drop IO x%x: Alloc\n", 610 + oxid); 611 + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 612 + /* Cannot send ABTS without context */ 613 + return; 614 + } 615 + memset(ctxp, 0, sizeof(ctxp->ctx)); 616 + ctxp->wqeq = NULL; 617 + ctxp->txrdy = NULL; 618 + ctxp->offset = 0; 619 + ctxp->phba = phba; 620 + ctxp->size = size; 621 + ctxp->oxid = oxid; 622 + ctxp->sid = sid; 623 + ctxp->state = LPFC_NVMET_STE_RCV; 624 + ctxp->rqb_buffer = nvmebuf; 625 + ctxp->entry_cnt = 1; 626 + ctxp->flag = 0; 627 + 628 + atomic_inc(&tgtp->rcv_fcp_cmd_in); 629 + /* 630 + * The calling sequence should be: 631 + * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 632 + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 633 + */ 634 + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 635 + payload, size); 636 + 637 + /* Process FCP command */ 638 + if (rc == 0) { 639 + atomic_inc(&tgtp->rcv_fcp_cmd_out); 640 + return; 641 + } 642 + 643 + atomic_inc(&tgtp->rcv_fcp_cmd_drop); 644 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 645 + "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n", 646 + ctxp->oxid, rc); 647 + dropit: 648 + if (oxid) { 649 + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 650 + return; 651 + } 652 + 653 + if (nvmebuf) { 654 + nvmebuf->iocbq->hba_wqidx = 0; 655 + /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 656 + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 657 + } 658 + } 659 + 660 + /** 661 + * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport 662 + * @phba: pointer to lpfc hba data structure. 663 + * @pring: pointer to a SLI ring. 664 + * @nvmebuf: pointer to received nvme data structure. 665 + * 666 + * This routine is used to process an unsolicited event received from a SLI 667 + * (Service Level Interface) ring. The actual processing of the data buffer 668 + * associated with the unsolicited event is done by invoking the routine 669 + * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the 670 + * SLI RQ on which the unsolicited event was received. 671 + **/ 672 + void 673 + lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 674 + struct lpfc_iocbq *piocb) 675 + { 676 + struct lpfc_dmabuf *d_buf; 677 + struct hbq_dmabuf *nvmebuf; 678 + 679 + d_buf = piocb->context2; 680 + nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 681 + 682 + if (phba->nvmet_support == 0) { 683 + lpfc_in_buf_free(phba, &nvmebuf->dbuf); 684 + return; 685 + } 686 + lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); 687 + } 688 + 689 + /** 690 + * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport 691 + * @phba: pointer to lpfc hba data structure. 692 + * @pring: pointer to a SLI ring. 693 + * @nvmebuf: pointer to received nvme data structure. 694 + * 695 + * This routine is used to process an unsolicited event received from a SLI 696 + * (Service Level Interface) ring. The actual processing of the data buffer 697 + * associated with the unsolicited event is done by invoking the routine 698 + * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the 699 + * SLI RQ on which the unsolicited event was received. 700 + **/ 701 + void 702 + lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, 703 + struct lpfc_sli_ring *pring, 704 + struct rqb_dmabuf *nvmebuf, 705 + uint64_t isr_timestamp) 706 + { 707 + if (phba->nvmet_support == 0) { 708 + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 709 + return; 710 + } 711 + lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, 712 + isr_timestamp); 713 + } 714 + 715 + /** 716 + * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure 717 + * @phba: pointer to a host N_Port data structure. 718 + * @ctxp: Context info for NVME LS Request 719 + * @rspbuf: DMA buffer of NVME command. 720 + * @rspsize: size of the NVME command. 721 + * 722 + * This routine is used for allocating a lpfc-WQE data structure from 723 + * the driver lpfc-WQE free-list and prepare the WQE with the parameters 724 + * passed into the routine for discovery state machine to issue an Extended 725 + * Link Service (NVME) commands. It is a generic lpfc-WQE allocation 726 + * and preparation routine that is used by all the discovery state machine 727 + * routines and the NVME command-specific fields will be later set up by 728 + * the individual discovery machine routines after calling this routine 729 + * allocating and preparing a generic WQE data structure. It fills in the 730 + * Buffer Descriptor Entries (BDEs), allocates buffers for both command 731 + * payload and response payload (if expected). The reference count on the 732 + * ndlp is incremented by 1 and the reference to the ndlp is put into 733 + * context1 of the WQE data structure for this WQE to hold the ndlp 734 + * reference for the command's callback function to access later. 735 + * 736 + * Return code 737 + * Pointer to the newly allocated/prepared nvme wqe data structure 738 + * NULL - when nvme wqe data structure allocation/preparation failed 739 + **/ 740 + static struct lpfc_iocbq * 741 + lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, 742 + struct lpfc_nvmet_rcv_ctx *ctxp, 743 + dma_addr_t rspbuf, uint16_t rspsize) 744 + { 745 + struct lpfc_nodelist *ndlp; 746 + struct lpfc_iocbq *nvmewqe; 747 + union lpfc_wqe *wqe; 748 + 749 + if (!lpfc_is_link_up(phba)) { 750 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 751 + "6104 lpfc_nvmet_prep_ls_wqe: link err: " 752 + "NPORT x%x oxid:x%x\n", 753 + ctxp->sid, ctxp->oxid); 754 + return NULL; 755 + } 756 + 757 + /* Allocate buffer for command wqe */ 758 + nvmewqe = lpfc_sli_get_iocbq(phba); 759 + if (nvmewqe == NULL) { 760 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 761 + "6105 lpfc_nvmet_prep_ls_wqe: No WQE: " 762 + "NPORT x%x oxid:x%x\n", 763 + ctxp->sid, ctxp->oxid); 764 + return NULL; 765 + } 766 + 767 + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 768 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 769 + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 770 + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 771 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 772 + "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: " 773 + "NPORT x%x oxid:x%x\n", 774 + ctxp->sid, ctxp->oxid); 775 + goto nvme_wqe_free_wqeq_exit; 776 + } 777 + ctxp->wqeq = nvmewqe; 778 + 779 + /* prevent preparing wqe with NULL ndlp reference */ 780 + nvmewqe->context1 = lpfc_nlp_get(ndlp); 781 + if (nvmewqe->context1 == NULL) 782 + goto nvme_wqe_free_wqeq_exit; 783 + nvmewqe->context2 = ctxp; 784 + 785 + wqe = &nvmewqe->wqe; 786 + memset(wqe, 0, sizeof(union lpfc_wqe)); 787 + 788 + /* Words 0 - 2 */ 789 + wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 790 + wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; 791 + wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); 792 + wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); 793 + 794 + /* Word 3 */ 795 + 796 + /* Word 4 */ 797 + 798 + /* Word 5 */ 799 + bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 800 + bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); 801 + bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); 802 + bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL); 803 + bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); 804 + 805 + /* Word 6 */ 806 + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 807 + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 808 + bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); 809 + 810 + /* Word 7 */ 811 + bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, 812 + CMD_XMIT_SEQUENCE64_WQE); 813 + bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); 814 + bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); 815 + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 816 + 817 + /* Word 8 */ 818 + wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; 819 + 820 + /* Word 9 */ 821 + bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); 822 + /* Needs to be set by caller */ 823 + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); 824 + 825 + /* Word 10 */ 826 + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 827 + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 828 + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 829 + LPFC_WQE_LENLOC_WORD12); 830 + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 831 + 832 + /* Word 11 */ 833 + bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, 834 + LPFC_WQE_CQ_ID_DEFAULT); 835 + bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, 836 + OTHER_COMMAND); 837 + 838 + /* Word 12 */ 839 + wqe->xmit_sequence.xmit_len = rspsize; 840 + 841 + nvmewqe->retry = 1; 842 + nvmewqe->vport = phba->pport; 843 + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 844 + nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; 845 + 846 + /* Xmit NVME response to remote NPORT <did> */ 847 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 848 + "6039 Xmit NVME LS response to remote " 849 + "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", 850 + ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, 851 + rspsize); 852 + return nvmewqe; 853 + 854 + nvme_wqe_free_wqeq_exit: 855 + nvmewqe->context2 = NULL; 856 + nvmewqe->context3 = NULL; 857 + lpfc_sli_release_iocbq(phba, nvmewqe); 858 + return NULL; 859 + } 860 + 861 + 862 + static struct lpfc_iocbq * 863 + lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, 864 + struct lpfc_nvmet_rcv_ctx *ctxp) 865 + { 866 + struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; 867 + struct lpfc_nvmet_tgtport *tgtp; 868 + struct sli4_sge *sgl; 869 + struct lpfc_nodelist *ndlp; 870 + struct lpfc_iocbq *nvmewqe; 871 + struct scatterlist *sgel; 872 + union lpfc_wqe128 *wqe; 873 + uint32_t *txrdy; 874 + dma_addr_t physaddr; 875 + int i, cnt; 876 + int xc = 1; 877 + 878 + if (!lpfc_is_link_up(phba)) { 879 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 880 + "6107 lpfc_nvmet_prep_fcp_wqe: link err:" 881 + "NPORT x%x oxid:x%x\n", ctxp->sid, 882 + ctxp->oxid); 883 + return NULL; 884 + } 885 + 886 + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 887 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 888 + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 889 + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 890 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 891 + "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: " 892 + "NPORT x%x oxid:x%x\n", 893 + ctxp->sid, ctxp->oxid); 894 + return NULL; 895 + } 896 + 897 + if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) { 898 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 899 + "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " 900 + "NPORT x%x oxid:x%x\n", 901 + ctxp->sid, ctxp->oxid); 902 + return NULL; 903 + } 904 + 905 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 906 + nvmewqe = ctxp->wqeq; 907 + if (nvmewqe == NULL) { 908 + /* Allocate buffer for command wqe */ 909 + nvmewqe = ctxp->rqb_buffer->iocbq; 910 + if (nvmewqe == NULL) { 911 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 912 + "6110 lpfc_nvmet_prep_fcp_wqe: No " 913 + "WQE: NPORT x%x oxid:x%x\n", 914 + ctxp->sid, ctxp->oxid); 915 + return NULL; 916 + } 917 + ctxp->wqeq = nvmewqe; 918 + xc = 0; /* create new XRI */ 919 + nvmewqe->sli4_lxritag = NO_XRI; 920 + nvmewqe->sli4_xritag = NO_XRI; 921 + } 922 + 923 + /* Sanity check */ 924 + if (((ctxp->state == LPFC_NVMET_STE_RCV) && 925 + (ctxp->entry_cnt == 1)) || 926 + ((ctxp->state == LPFC_NVMET_STE_DATA) && 927 + (ctxp->entry_cnt > 1))) { 928 + wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 929 + } else { 930 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 931 + "6111 Wrong state %s: %d cnt %d\n", 932 + __func__, ctxp->state, ctxp->entry_cnt); 933 + return NULL; 934 + } 935 + 936 + sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; 937 + switch (rsp->op) { 938 + case NVMET_FCOP_READDATA: 939 + case NVMET_FCOP_READDATA_RSP: 940 + /* Words 0 - 2 : The first sg segment */ 941 + sgel = &rsp->sg[0]; 942 + physaddr = sg_dma_address(sgel); 943 + wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 944 + wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); 945 + wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); 946 + wqe->fcp_tsend.bde.addrHigh = 947 + cpu_to_le32(putPaddrHigh(physaddr)); 948 + 949 + /* Word 3 */ 950 + wqe->fcp_tsend.payload_offset_len = 0; 951 + 952 + /* Word 4 */ 953 + wqe->fcp_tsend.relative_offset = ctxp->offset; 954 + 955 + /* Word 5 */ 956 + 957 + /* Word 6 */ 958 + bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, 959 + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 960 + bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, 961 + nvmewqe->sli4_xritag); 962 + 963 + /* Word 7 */ 964 + bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); 965 + 966 + /* Word 8 */ 967 + wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; 968 + 969 + /* Word 9 */ 970 + bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); 971 + bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); 972 + 973 + /* Word 10 */ 974 + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 975 + bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); 976 + bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); 977 + bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, 978 + LPFC_WQE_LENLOC_WORD12); 979 + bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); 980 + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); 981 + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 982 + if (phba->cfg_nvme_oas) 983 + bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); 984 + 985 + /* Word 11 */ 986 + bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, 987 + LPFC_WQE_CQ_ID_DEFAULT); 988 + bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, 989 + FCP_COMMAND_TSEND); 990 + 991 + /* Word 12 */ 992 + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 993 + 994 + /* Setup 2 SKIP SGEs */ 995 + sgl->addr_hi = 0; 996 + sgl->addr_lo = 0; 997 + sgl->word2 = 0; 998 + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 999 + sgl->word2 = cpu_to_le32(sgl->word2); 1000 + sgl->sge_len = 0; 1001 + sgl++; 1002 + sgl->addr_hi = 0; 1003 + sgl->addr_lo = 0; 1004 + sgl->word2 = 0; 1005 + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1006 + sgl->word2 = cpu_to_le32(sgl->word2); 1007 + sgl->sge_len = 0; 1008 + sgl++; 1009 + if (rsp->op == NVMET_FCOP_READDATA_RSP) { 1010 + atomic_inc(&tgtp->xmt_fcp_read_rsp); 1011 + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); 1012 + if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && 1013 + (rsp->rsplen == 12)) { 1014 + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); 1015 + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1016 + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1017 + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1018 + } else { 1019 + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1020 + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); 1021 + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); 1022 + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 1023 + ((rsp->rsplen >> 2) - 1)); 1024 + memcpy(&wqe->words[16], rsp->rspaddr, 1025 + rsp->rsplen); 1026 + } 1027 + } else { 1028 + atomic_inc(&tgtp->xmt_fcp_read); 1029 + 1030 + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1031 + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1032 + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1033 + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); 1034 + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1035 + } 1036 + ctxp->state = LPFC_NVMET_STE_DATA; 1037 + break; 1038 + 1039 + case NVMET_FCOP_WRITEDATA: 1040 + /* Words 0 - 2 : The first sg segment */ 1041 + txrdy = pci_pool_alloc(phba->txrdy_payload_pool, 1042 + GFP_KERNEL, &physaddr); 1043 + if (!txrdy) { 1044 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1045 + "6041 Bad txrdy buffer: oxid x%x\n", 1046 + ctxp->oxid); 1047 + return NULL; 1048 + } 1049 + ctxp->txrdy = txrdy; 1050 + ctxp->txrdy_phys = physaddr; 1051 + wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1052 + wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; 1053 + wqe->fcp_treceive.bde.addrLow = 1054 + cpu_to_le32(putPaddrLow(physaddr)); 1055 + wqe->fcp_treceive.bde.addrHigh = 1056 + cpu_to_le32(putPaddrHigh(physaddr)); 1057 + 1058 + /* Word 3 */ 1059 + wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; 1060 + 1061 + /* Word 4 */ 1062 + wqe->fcp_treceive.relative_offset = ctxp->offset; 1063 + 1064 + /* Word 5 */ 1065 + 1066 + /* Word 6 */ 1067 + bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, 1068 + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1069 + bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, 1070 + nvmewqe->sli4_xritag); 1071 + 1072 + /* Word 7 */ 1073 + bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); 1074 + bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, 1075 + CMD_FCP_TRECEIVE64_WQE); 1076 + 1077 + /* Word 8 */ 1078 + wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; 1079 + 1080 + /* Word 9 */ 1081 + bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); 1082 + bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); 1083 + 1084 + /* Word 10 */ 1085 + bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1086 + bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); 1087 + bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); 1088 + bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, 1089 + LPFC_WQE_LENLOC_WORD12); 1090 + bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); 1091 + bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); 1092 + bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); 1093 + bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); 1094 + bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1095 + if (phba->cfg_nvme_oas) 1096 + bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); 1097 + 1098 + /* Word 11 */ 1099 + bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, 1100 + LPFC_WQE_CQ_ID_DEFAULT); 1101 + bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, 1102 + FCP_COMMAND_TRECEIVE); 1103 + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1104 + 1105 + /* Word 12 */ 1106 + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 1107 + 1108 + /* Setup 1 TXRDY and 1 SKIP SGE */ 1109 + txrdy[0] = 0; 1110 + txrdy[1] = cpu_to_be32(rsp->transfer_length); 1111 + txrdy[2] = 0; 1112 + 1113 + sgl->addr_hi = putPaddrHigh(physaddr); 1114 + sgl->addr_lo = putPaddrLow(physaddr); 1115 + sgl->word2 = 0; 1116 + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 1117 + sgl->word2 = cpu_to_le32(sgl->word2); 1118 + sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); 1119 + sgl++; 1120 + sgl->addr_hi = 0; 1121 + sgl->addr_lo = 0; 1122 + sgl->word2 = 0; 1123 + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1124 + sgl->word2 = cpu_to_le32(sgl->word2); 1125 + sgl->sge_len = 0; 1126 + sgl++; 1127 + ctxp->state = LPFC_NVMET_STE_DATA; 1128 + atomic_inc(&tgtp->xmt_fcp_write); 1129 + break; 1130 + 1131 + case NVMET_FCOP_RSP: 1132 + /* Words 0 - 2 */ 1133 + sgel = &rsp->sg[0]; 1134 + physaddr = rsp->rspdma; 1135 + wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1136 + wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; 1137 + wqe->fcp_trsp.bde.addrLow = 1138 + cpu_to_le32(putPaddrLow(physaddr)); 1139 + wqe->fcp_trsp.bde.addrHigh = 1140 + cpu_to_le32(putPaddrHigh(physaddr)); 1141 + 1142 + /* Word 3 */ 1143 + wqe->fcp_trsp.response_len = rsp->rsplen; 1144 + 1145 + /* Word 4 */ 1146 + wqe->fcp_trsp.rsvd_4_5[0] = 0; 1147 + 1148 + 1149 + /* Word 5 */ 1150 + 1151 + /* Word 6 */ 1152 + bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, 1153 + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1154 + bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, 1155 + nvmewqe->sli4_xritag); 1156 + 1157 + /* Word 7 */ 1158 + bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); 1159 + bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); 1160 + 1161 + /* Word 8 */ 1162 + wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; 1163 + 1164 + /* Word 9 */ 1165 + bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); 1166 + bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); 1167 + 1168 + /* Word 10 */ 1169 + bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 1170 + bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); 1171 + bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); 1172 + bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, 1173 + LPFC_WQE_LENLOC_WORD3); 1174 + bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); 1175 + bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 1176 + if (phba->cfg_nvme_oas) 1177 + bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); 1178 + 1179 + /* Word 11 */ 1180 + bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, 1181 + LPFC_WQE_CQ_ID_DEFAULT); 1182 + bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, 1183 + FCP_COMMAND_TRSP); 1184 + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1185 + ctxp->state = LPFC_NVMET_STE_RSP; 1186 + 1187 + if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { 1188 + /* Good response - all zero's on wire */ 1189 + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); 1190 + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); 1191 + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); 1192 + } else { 1193 + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); 1194 + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); 1195 + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 1196 + ((rsp->rsplen >> 2) - 1)); 1197 + memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); 1198 + } 1199 + 1200 + /* Use rspbuf, NOT sg list */ 1201 + rsp->sg_cnt = 0; 1202 + sgl->word2 = 0; 1203 + atomic_inc(&tgtp->xmt_fcp_rsp); 1204 + break; 1205 + 1206 + default: 1207 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 1208 + "6064 Unknown Rsp Op %d\n", 1209 + rsp->op); 1210 + return NULL; 1211 + } 1212 + 1213 + nvmewqe->retry = 1; 1214 + nvmewqe->vport = phba->pport; 1215 + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 1216 + nvmewqe->context1 = ndlp; 1217 + 1218 + for (i = 0; i < rsp->sg_cnt; i++) { 1219 + sgel = &rsp->sg[i]; 1220 + physaddr = sg_dma_address(sgel); 1221 + cnt = sg_dma_len(sgel); 1222 + sgl->addr_hi = putPaddrHigh(physaddr); 1223 + sgl->addr_lo = putPaddrLow(physaddr); 1224 + sgl->word2 = 0; 1225 + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 1226 + bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); 1227 + if ((i+1) == rsp->sg_cnt) 1228 + bf_set(lpfc_sli4_sge_last, sgl, 1); 1229 + sgl->word2 = cpu_to_le32(sgl->word2); 1230 + sgl->sge_len = cpu_to_le32(cnt); 1231 + sgl++; 1232 + ctxp->offset += cnt; 1233 + } 1234 + return nvmewqe; 1235 + } 1236 + 1237 + /** 1238 + * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS 1239 + * @phba: Pointer to HBA context object. 1240 + * @cmdwqe: Pointer to driver command WQE object. 1241 + * @wcqe: Pointer to driver response CQE object. 1242 + * 1243 + * The function is called from SLI ring event handler with no 1244 + * lock held. This function is the completion handler for NVME ABTS for FCP cmds 1245 + * The function frees memory resources used for the NVME commands. 1246 + **/ 1247 + static void 1248 + lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 1249 + struct lpfc_wcqe_complete *wcqe) 1250 + { 1251 + struct lpfc_nvmet_rcv_ctx *ctxp; 1252 + struct lpfc_nvmet_tgtport *tgtp; 1253 + uint32_t status, result; 1254 + 1255 + ctxp = cmdwqe->context2; 1256 + status = bf_get(lpfc_wcqe_c_status, wcqe); 1257 + result = wcqe->parameter; 1258 + 1259 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1260 + atomic_inc(&tgtp->xmt_abort_cmpl); 1261 + 1262 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 1263 + "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n", 1264 + ctxp->oxid, wcqe->word0, wcqe->total_data_placed, 1265 + result, wcqe->word3); 1266 + 1267 + ctxp->state = LPFC_NVMET_STE_DONE; 1268 + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1269 + 1270 + cmdwqe->context2 = NULL; 1271 + cmdwqe->context3 = NULL; 1272 + lpfc_sli_release_iocbq(phba, cmdwqe); 1273 + } 1274 + 1275 + /** 1276 + * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS 1277 + * @phba: Pointer to HBA context object. 1278 + * @cmdwqe: Pointer to driver command WQE object. 1279 + * @wcqe: Pointer to driver response CQE object. 1280 + * 1281 + * The function is called from SLI ring event handler with no 1282 + * lock held. This function is the completion handler for NVME ABTS for FCP cmds 1283 + * The function frees memory resources used for the NVME commands. 1284 + **/ 1285 + static void 1286 + lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 1287 + struct lpfc_wcqe_complete *wcqe) 1288 + { 1289 + struct lpfc_nvmet_rcv_ctx *ctxp; 1290 + struct lpfc_nvmet_tgtport *tgtp; 1291 + uint32_t status, result; 1292 + 1293 + ctxp = cmdwqe->context2; 1294 + status = bf_get(lpfc_wcqe_c_status, wcqe); 1295 + result = wcqe->parameter; 1296 + 1297 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1298 + atomic_inc(&tgtp->xmt_abort_cmpl); 1299 + 1300 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1301 + "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", 1302 + ctxp, wcqe->word0, wcqe->total_data_placed, 1303 + result, wcqe->word3); 1304 + 1305 + if (ctxp) { 1306 + /* Sanity check */ 1307 + if (ctxp->state != LPFC_NVMET_STE_ABORT) { 1308 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 1309 + "6112 ABORT Wrong state:%d oxid x%x\n", 1310 + ctxp->state, ctxp->oxid); 1311 + } 1312 + ctxp->state = LPFC_NVMET_STE_DONE; 1313 + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1314 + cmdwqe->context2 = NULL; 1315 + cmdwqe->context3 = NULL; 1316 + } 1317 + } 1318 + 1319 + /** 1320 + * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS 1321 + * @phba: Pointer to HBA context object. 1322 + * @cmdwqe: Pointer to driver command WQE object. 1323 + * @wcqe: Pointer to driver response CQE object. 1324 + * 1325 + * The function is called from SLI ring event handler with no 1326 + * lock held. This function is the completion handler for NVME ABTS for LS cmds 1327 + * The function frees memory resources used for the NVME commands. 1328 + **/ 1329 + static void 1330 + lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 1331 + struct lpfc_wcqe_complete *wcqe) 1332 + { 1333 + struct lpfc_nvmet_rcv_ctx *ctxp; 1334 + struct lpfc_nvmet_tgtport *tgtp; 1335 + uint32_t status, result; 1336 + 1337 + ctxp = cmdwqe->context2; 1338 + status = bf_get(lpfc_wcqe_c_status, wcqe); 1339 + result = wcqe->parameter; 1340 + 1341 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1342 + atomic_inc(&tgtp->xmt_abort_cmpl); 1343 + 1344 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1345 + "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", 1346 + ctxp, wcqe->word0, wcqe->total_data_placed, 1347 + result, wcqe->word3); 1348 + 1349 + if (ctxp) { 1350 + cmdwqe->context2 = NULL; 1351 + cmdwqe->context3 = NULL; 1352 + lpfc_sli_release_iocbq(phba, cmdwqe); 1353 + kfree(ctxp); 1354 + } else 1355 + lpfc_sli_release_iocbq(phba, cmdwqe); 1356 + } 1357 + 1358 + static int 1359 + lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, 1360 + struct lpfc_nvmet_rcv_ctx *ctxp, 1361 + uint32_t sid, uint16_t xri) 1362 + { 1363 + struct lpfc_nvmet_tgtport *tgtp; 1364 + struct lpfc_iocbq *abts_wqeq; 1365 + union lpfc_wqe *wqe_abts; 1366 + struct lpfc_nodelist *ndlp; 1367 + 1368 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1369 + "6067 %s: Entrypoint: sid %x xri %x\n", __func__, 1370 + sid, xri); 1371 + 1372 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1373 + 1374 + ndlp = lpfc_findnode_did(phba->pport, sid); 1375 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1376 + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1377 + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1378 + atomic_inc(&tgtp->xmt_abort_rsp_error); 1379 + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1380 + "6134 Drop ABTS - wrong NDLP state x%x.\n", 1381 + ndlp->nlp_state); 1382 + 1383 + /* No failure to an ABTS request. */ 1384 + return 0; 1385 + } 1386 + 1387 + abts_wqeq = ctxp->wqeq; 1388 + wqe_abts = &abts_wqeq->wqe; 1389 + ctxp->state = LPFC_NVMET_STE_ABORT; 1390 + 1391 + /* 1392 + * Since we zero the whole WQE, we need to ensure we set the WQE fields 1393 + * that were initialized in lpfc_sli4_nvmet_alloc. 1394 + */ 1395 + memset(wqe_abts, 0, sizeof(union lpfc_wqe)); 1396 + 1397 + /* Word 5 */ 1398 + bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); 1399 + bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); 1400 + bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); 1401 + bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); 1402 + bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); 1403 + 1404 + /* Word 6 */ 1405 + bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, 1406 + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1407 + bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, 1408 + abts_wqeq->sli4_xritag); 1409 + 1410 + /* Word 7 */ 1411 + bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, 1412 + CMD_XMIT_SEQUENCE64_WQE); 1413 + bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); 1414 + bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); 1415 + bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); 1416 + 1417 + /* Word 8 */ 1418 + wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; 1419 + 1420 + /* Word 9 */ 1421 + bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); 1422 + /* Needs to be set by caller */ 1423 + bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); 1424 + 1425 + /* Word 10 */ 1426 + bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); 1427 + bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 1428 + bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, 1429 + LPFC_WQE_LENLOC_WORD12); 1430 + bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); 1431 + bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); 1432 + 1433 + /* Word 11 */ 1434 + bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, 1435 + LPFC_WQE_CQ_ID_DEFAULT); 1436 + bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, 1437 + OTHER_COMMAND); 1438 + 1439 + abts_wqeq->vport = phba->pport; 1440 + abts_wqeq->context1 = ndlp; 1441 + abts_wqeq->context2 = ctxp; 1442 + abts_wqeq->context3 = NULL; 1443 + abts_wqeq->rsvd2 = 0; 1444 + /* hba_wqidx should already be setup from command we are aborting */ 1445 + abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; 1446 + abts_wqeq->iocb.ulpLe = 1; 1447 + 1448 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1449 + "6069 Issue ABTS to xri x%x reqtag x%x\n", 1450 + xri, abts_wqeq->iotag); 1451 + return 1; 1452 + } 1453 + 1454 + static int 1455 + lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, 1456 + struct lpfc_nvmet_rcv_ctx *ctxp, 1457 + uint32_t sid, uint16_t xri) 1458 + { 1459 + struct lpfc_nvmet_tgtport *tgtp; 1460 + struct lpfc_iocbq *abts_wqeq; 1461 + union lpfc_wqe *abts_wqe; 1462 + struct lpfc_nodelist *ndlp; 1463 + unsigned long flags; 1464 + int rc; 1465 + 1466 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1467 + if (!ctxp->wqeq) { 1468 + ctxp->wqeq = ctxp->rqb_buffer->iocbq; 1469 + ctxp->wqeq->hba_wqidx = 0; 1470 + } 1471 + 1472 + ndlp = lpfc_findnode_did(phba->pport, sid); 1473 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1474 + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1475 + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1476 + atomic_inc(&tgtp->xmt_abort_rsp_error); 1477 + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1478 + "6160 Drop ABTS - wrong NDLP state x%x.\n", 1479 + ndlp->nlp_state); 1480 + 1481 + /* No failure to an ABTS request. */ 1482 + return 0; 1483 + } 1484 + 1485 + /* Issue ABTS for this WQE based on iotag */ 1486 + ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); 1487 + if (!ctxp->abort_wqeq) { 1488 + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1489 + "6161 Abort failed: No wqeqs: " 1490 + "xri: x%x\n", ctxp->oxid); 1491 + /* No failure to an ABTS request. */ 1492 + return 0; 1493 + } 1494 + abts_wqeq = ctxp->abort_wqeq; 1495 + abts_wqe = &abts_wqeq->wqe; 1496 + ctxp->state = LPFC_NVMET_STE_ABORT; 1497 + 1498 + /* Announce entry to new IO submit field. */ 1499 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 1500 + "6162 Abort Request to rport DID x%06x " 1501 + "for xri x%x x%x\n", 1502 + ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); 1503 + 1504 + /* If the hba is getting reset, this flag is set. It is 1505 + * cleared when the reset is complete and rings reestablished. 1506 + */ 1507 + spin_lock_irqsave(&phba->hbalock, flags); 1508 + /* driver queued commands are in process of being flushed */ 1509 + if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 1510 + spin_unlock_irqrestore(&phba->hbalock, flags); 1511 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1512 + "6163 Driver in reset cleanup - flushing " 1513 + "NVME Req now. hba_flag x%x oxid x%x\n", 1514 + phba->hba_flag, ctxp->oxid); 1515 + lpfc_sli_release_iocbq(phba, abts_wqeq); 1516 + return 0; 1517 + } 1518 + 1519 + /* Outstanding abort is in progress */ 1520 + if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 1521 + spin_unlock_irqrestore(&phba->hbalock, flags); 1522 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1523 + "6164 Outstanding NVME I/O Abort Request " 1524 + "still pending on oxid x%x\n", 1525 + ctxp->oxid); 1526 + lpfc_sli_release_iocbq(phba, abts_wqeq); 1527 + return 0; 1528 + } 1529 + 1530 + /* Ready - mark outstanding as aborted by driver. */ 1531 + abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; 1532 + 1533 + /* WQEs are reused. Clear stale data and set key fields to 1534 + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 1535 + */ 1536 + memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 1537 + 1538 + /* word 3 */ 1539 + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 1540 + 1541 + /* word 7 */ 1542 + bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 1543 + bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 1544 + 1545 + /* word 8 - tell the FW to abort the IO associated with this 1546 + * outstanding exchange ID. 1547 + */ 1548 + abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; 1549 + 1550 + /* word 9 - this is the iotag for the abts_wqe completion. */ 1551 + bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 1552 + abts_wqeq->iotag); 1553 + 1554 + /* word 10 */ 1555 + bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 1556 + bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 1557 + 1558 + /* word 11 */ 1559 + bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 1560 + bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 1561 + bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 1562 + 1563 + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 1564 + abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; 1565 + abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; 1566 + abts_wqeq->iocb_cmpl = 0; 1567 + abts_wqeq->iocb_flag |= LPFC_IO_NVME; 1568 + abts_wqeq->context2 = ctxp; 1569 + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 1570 + spin_unlock_irqrestore(&phba->hbalock, flags); 1571 + if (rc == WQE_SUCCESS) 1572 + return 0; 1573 + 1574 + lpfc_sli_release_iocbq(phba, abts_wqeq); 1575 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1576 + "6166 Failed abts issue_wqe with status x%x " 1577 + "for oxid x%x.\n", 1578 + rc, ctxp->oxid); 1579 + return 1; 1580 + } 1581 + 1582 + 1583 + static int 1584 + lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, 1585 + struct lpfc_nvmet_rcv_ctx *ctxp, 1586 + uint32_t sid, uint16_t xri) 1587 + { 1588 + struct lpfc_nvmet_tgtport *tgtp; 1589 + struct lpfc_iocbq *abts_wqeq; 1590 + unsigned long flags; 1591 + int rc; 1592 + 1593 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1594 + if (!ctxp->wqeq) { 1595 + ctxp->wqeq = ctxp->rqb_buffer->iocbq; 1596 + ctxp->wqeq->hba_wqidx = 0; 1597 + } 1598 + 1599 + rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 1600 + if (rc == 0) 1601 + goto aerr; 1602 + 1603 + spin_lock_irqsave(&phba->hbalock, flags); 1604 + abts_wqeq = ctxp->wqeq; 1605 + abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp; 1606 + abts_wqeq->iocb_cmpl = 0; 1607 + abts_wqeq->iocb_flag |= LPFC_IO_NVMET; 1608 + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 1609 + spin_unlock_irqrestore(&phba->hbalock, flags); 1610 + if (rc == WQE_SUCCESS) { 1611 + atomic_inc(&tgtp->xmt_abort_rsp); 1612 + return 0; 1613 + } 1614 + 1615 + aerr: 1616 + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1617 + atomic_inc(&tgtp->xmt_abort_rsp_error); 1618 + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1619 + "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", 1620 + ctxp->oxid, rc); 1621 + return 1; 1622 + } 1623 + 1624 + static int 1625 + lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, 1626 + struct lpfc_nvmet_rcv_ctx *ctxp, 1627 + uint32_t sid, uint16_t xri) 1628 + { 1629 + struct lpfc_nvmet_tgtport *tgtp; 1630 + struct lpfc_iocbq *abts_wqeq; 1631 + union lpfc_wqe *wqe_abts; 1632 + unsigned long flags; 1633 + int rc; 1634 + 1635 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1636 + if (!ctxp->wqeq) { 1637 + /* Issue ABTS for this WQE based on iotag */ 1638 + ctxp->wqeq = lpfc_sli_get_iocbq(phba); 1639 + if (!ctxp->wqeq) { 1640 + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1641 + "6068 Abort failed: No wqeqs: " 1642 + "xri: x%x\n", xri); 1643 + /* No failure to an ABTS request. */ 1644 + kfree(ctxp); 1645 + return 0; 1646 + } 1647 + } 1648 + abts_wqeq = ctxp->wqeq; 1649 + wqe_abts = &abts_wqeq->wqe; 1650 + lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 1651 + 1652 + spin_lock_irqsave(&phba->hbalock, flags); 1653 + abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; 1654 + abts_wqeq->iocb_cmpl = 0; 1655 + abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; 1656 + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); 1657 + spin_unlock_irqrestore(&phba->hbalock, flags); 1658 + if (rc == WQE_SUCCESS) { 1659 + atomic_inc(&tgtp->xmt_abort_rsp); 1660 + return 0; 1661 + } 1662 + 1663 + atomic_inc(&tgtp->xmt_abort_rsp_error); 1664 + abts_wqeq->context2 = NULL; 1665 + abts_wqeq->context3 = NULL; 1666 + lpfc_sli_release_iocbq(phba, abts_wqeq); 1667 + kfree(ctxp); 1668 + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1669 + "6056 Failed to Issue ABTS. Status x%x\n", rc); 1670 + return 0; 1671 + }
+5 -3
drivers/scsi/lpfc/lpfc_sli.c
··· 2518 2518 2519 2519 switch (fch_type) { 2520 2520 case FC_TYPE_NVME: 2521 - /* todo: tgt: forward NVME LS to transport */ 2521 + lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2522 2522 return 1; 2523 2523 default: 2524 2524 break; ··· 6867 6867 goto out_destroy_queue; 6868 6868 } 6869 6869 phba->sli4_hba.nvmet_xri_cnt = rc; 6870 - /* todo: tgt: create targetport */ 6870 + lpfc_nvmet_create_targetport(phba); 6871 6871 } else { 6872 6872 /* update host scsi xri-sgl sizes and mappings */ 6873 6873 rc = lpfc_sli4_scsi_sgl_update(phba); ··· 13137 13137 13138 13138 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13139 13139 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13140 - /* todo: tgt: forward cmd iu to transport */ 13140 + lpfc_nvmet_unsol_fcp_event( 13141 + phba, phba->sli4_hba.els_wq->pring, dma_buf, 13142 + cq->assoc_qp->isr_timestamp); 13141 13143 return false; 13142 13144 } 13143 13145 drop: