Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: lpfc: Fix discovery failures during port failovers with lots of vports

The driver is getting hit with 100s of RSCNs during remote port address
changes. Each of those RSCN's ends up generating UNREG_RPI and REG_PRI
mailbox commands. The discovery engine within the driver doesn't wait for
the mailbox command completions. Instead it sets state flags and moves
forward. At some point, there's a massive backlog of mailbox commands which
take time for the adapter to process. Additionally, it appears there were
duplicate events from the switch so the driver generated duplicate mailbox
commands for the same remote port. During this window, failures on PLOGI
and PRLI ELS's are see as the adapter is rejecting them as they are for
remote ports that still have pending mailbox commands.

Streamline the discovery engine so that PLOGI log checks for outstanding
UNREG_RPIs and defer the processing until the commands complete. This
better synchronizes the ELS transmission vs the RPI registrations.

Filter out multiple UNREG_RPIs being queued up for the same remote port.

Beef up log messages in this area.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

James Smart and committed by
Martin K. Petersen
dea16bda 3e1f0718

+207 -34
-1
drivers/scsi/lpfc/lpfc_crtn.h
··· 74 74 void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 75 75 void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 76 76 void lpfc_retry_pport_discovery(struct lpfc_hba *); 77 - void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); 78 77 int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt); 79 78 void lpfc_free_iocb_list(struct lpfc_hba *phba); 80 79 int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+2
drivers/scsi/lpfc/lpfc_debugfs.c
··· 645 645 i, ndlp->cmd_qdepth); 646 646 outio += i; 647 647 } 648 + len += snprintf(buf + len, size - len, "defer:%x ", 649 + ndlp->nlp_defer_did); 648 650 len += snprintf(buf+len, size-len, "\n"); 649 651 } 650 652 spin_unlock_irq(shost->host_lock);
+3 -1
drivers/scsi/lpfc/lpfc_disc.h
··· 138 138 139 139 uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ 140 140 #define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ 141 + uint32_t nlp_defer_did; 141 142 }; 142 143 struct lpfc_node_rrq { 143 144 struct list_head list; ··· 166 165 #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ 167 166 #define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ 168 167 #define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */ 168 + #define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ 169 169 #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ 170 170 #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ 171 171 #define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ ··· 295 293 #define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */ 296 294 #define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */ 297 295 #define NLP_EVT_MAX_EVENT 0xd 298 - 296 + #define NLP_EVT_NOTHING_PENDING 0xff
+58 -11
drivers/scsi/lpfc/lpfc_els.c
··· 315 315 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 316 316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 317 317 "0116 Xmit ELS command x%x to remote " 318 - "NPORT x%x I/O tag: x%x, port state:x%x" 319 - " fc_flag:x%x\n", 318 + "NPORT x%x I/O tag: x%x, port state:x%x " 319 + "rpi x%x fc_flag:x%x\n", 320 320 elscmd, did, elsiocb->iotag, 321 - vport->port_state, 321 + vport->port_state, ndlp->nlp_rpi, 322 322 vport->fc_flag); 323 323 } else { 324 324 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 325 325 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 326 326 "0117 Xmit ELS response x%x to remote " 327 327 "NPORT x%x I/O tag: x%x, size: x%x " 328 - "port_state x%x fc_flag x%x\n", 328 + "port_state x%x rpi x%x fc_flag x%x\n", 329 329 elscmd, ndlp->nlp_DID, elsiocb->iotag, 330 330 cmdSize, vport->port_state, 331 - vport->fc_flag); 331 + ndlp->nlp_rpi, vport->fc_flag); 332 332 } 333 333 return elsiocb; 334 334 ··· 1642 1642 spin_lock_irq(shost->host_lock); 1643 1643 keep_nlp_flag = new_ndlp->nlp_flag; 1644 1644 new_ndlp->nlp_flag = ndlp->nlp_flag; 1645 - ndlp->nlp_flag = keep_nlp_flag; 1645 + 1646 + /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1647 + if (keep_nlp_flag & NLP_UNREG_INP) 1648 + new_ndlp->nlp_flag |= NLP_UNREG_INP; 1649 + else 1650 + new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1651 + 1652 + /* if ndlp had NLP_UNREG_INP set, keep it */ 1653 + if (ndlp->nlp_flag & NLP_UNREG_INP) 1654 + ndlp->nlp_flag = keep_nlp_flag | NLP_UNREG_INP; 1655 + else 1656 + ndlp->nlp_flag = keep_nlp_flag & ~NLP_UNREG_INP; 1657 + 1646 1658 spin_unlock_irq(shost->host_lock); 1647 1659 1648 1660 /* Set nlp_states accordingly */ ··· 1931 1919 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1932 1920 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1933 1921 spin_unlock_irq(shost->host_lock); 1934 - rc = 0; 1922 + rc = 0; 1935 1923 1936 1924 /* PLOGI completes to NPort <nlp_DID> */ 1937 1925 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, ··· 2038 2026 int ret; 2039 2027 2040 2028 ndlp = lpfc_findnode_did(vport, did); 2041 - if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 2042 - ndlp = NULL; 2029 + 2030 + if (ndlp) { 2031 + /* Defer the processing of the issue PLOGI until after the 2032 + * outstanding UNREG_RPI mbox command completes, unless we 2033 + * are going offline. This logic does not apply for Fabric DIDs 2034 + */ 2035 + if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2036 + ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2037 + !(vport->fc_flag & FC_OFFLINE_MODE)) { 2038 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2039 + "4110 Issue PLOGI x%x deferred " 2040 + "on NPort x%x rpi x%x Data: %p\n", 2041 + ndlp->nlp_defer_did, ndlp->nlp_DID, 2042 + ndlp->nlp_rpi, ndlp); 2043 + 2044 + /* We can only defer 1st PLOGI */ 2045 + if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2046 + ndlp->nlp_defer_did = did; 2047 + return 0; 2048 + } 2049 + if (!NLP_CHK_NODE_ACT(ndlp)) 2050 + ndlp = NULL; 2051 + } 2043 2052 2044 2053 /* If ndlp is not NULL, we will bump the reference count on it */ 2045 2054 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); ··· 2194 2161 else 2195 2162 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2196 2163 NLP_EVT_CMPL_PRLI); 2197 - } else 2164 + } else { 2198 2165 /* Good status, call state machine. However, if another 2199 2166 * PRLI is outstanding, don't call the state machine 2200 2167 * because final disposition to Mapped or Unmapped is ··· 2202 2169 */ 2203 2170 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2204 2171 NLP_EVT_CMPL_PRLI); 2172 + } 2205 2173 2206 2174 out: 2207 2175 lpfc_els_free_iocb(phba, cmdiocb); ··· 2261 2227 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2262 2228 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2263 2229 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2264 - ndlp->nlp_flag &= ~NLP_FIRSTBURST; 2230 + ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2265 2231 ndlp->nvme_fb_size = 0; 2266 2232 2267 2233 send_next_prli: ··· 6145 6111 /* NVME Target mode does not do RSCN Recovery. */ 6146 6112 if (vport->phba->nvmet_support) 6147 6113 continue; 6114 + 6115 + /* If we are in the process of doing discovery on this 6116 + * NPort, let it continue on its own. 6117 + */ 6118 + switch (ndlp->nlp_state) { 6119 + case NLP_STE_PLOGI_ISSUE: 6120 + case NLP_STE_ADISC_ISSUE: 6121 + case NLP_STE_REG_LOGIN_ISSUE: 6122 + case NLP_STE_PRLI_ISSUE: 6123 + case NLP_STE_LOGO_ISSUE: 6124 + continue; 6125 + } 6126 + 6148 6127 6149 6128 lpfc_disc_state_machine(vport, ndlp, NULL, 6150 6129 NLP_EVT_DEVICE_RECOVERY);
+56 -6
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 4444 4444 NLP_INT_NODE_ACT(ndlp); 4445 4445 atomic_set(&ndlp->cmd_pending, 0); 4446 4446 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 4447 + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 4447 4448 } 4448 4449 4449 4450 struct lpfc_nodelist * ··· 4452 4451 int state) 4453 4452 { 4454 4453 struct lpfc_hba *phba = vport->phba; 4455 - uint32_t did; 4454 + uint32_t did, flag; 4456 4455 unsigned long flags; 4457 4456 unsigned long *active_rrqs_xri_bitmap = NULL; 4458 4457 int rpi = LPFC_RPI_ALLOC_ERROR; 4458 + uint32_t defer_did = 0; 4459 4459 4460 4460 if (!ndlp) 4461 4461 return NULL; ··· 4489 4487 goto free_rpi; 4490 4488 } 4491 4489 4492 - /* Keep the original DID */ 4490 + /* First preserve the orginal DID, xri_bitmap and some flags */ 4493 4491 did = ndlp->nlp_DID; 4492 + flag = (ndlp->nlp_flag & NLP_UNREG_INP); 4493 + if (flag & NLP_UNREG_INP) 4494 + defer_did = ndlp->nlp_defer_did; 4494 4495 if (phba->sli_rev == LPFC_SLI_REV4) 4495 4496 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap; 4496 4497 4497 - /* re-initialize ndlp except of ndlp linked list pointer */ 4498 + /* Zero ndlp except of ndlp linked list pointer */ 4498 4499 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 4499 4500 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 4500 - lpfc_initialize_node(vport, ndlp, did); 4501 4501 4502 + /* Next reinitialize and restore saved objects */ 4503 + lpfc_initialize_node(vport, ndlp, did); 4504 + ndlp->nlp_flag |= flag; 4505 + if (flag & NLP_UNREG_INP) 4506 + ndlp->nlp_defer_did = defer_did; 4502 4507 if (phba->sli_rev == LPFC_SLI_REV4) 4503 4508 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap; 4504 4509 ··· 4770 4761 return; 4771 4762 lpfc_issue_els_logo(vport, ndlp, 0); 4772 4763 mempool_free(pmb, phba->mbox_mem_pool); 4764 + 4765 + /* Check to see if there are any deferred events to process */ 4766 + if ((ndlp->nlp_flag & NLP_UNREG_INP) && 4767 + (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 4768 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4769 + "1434 UNREG cmpl deferred logo x%x " 4770 + "on NPort x%x Data: x%x %p\n", 4771 + ndlp->nlp_rpi, ndlp->nlp_DID, 4772 + ndlp->nlp_defer_did, ndlp); 4773 + 4774 + ndlp->nlp_flag &= ~NLP_UNREG_INP; 4775 + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 4776 + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4777 + } 4773 4778 } 4774 4779 4775 4780 /* ··· 4812 4789 "did x%x\n", 4813 4790 ndlp->nlp_rpi, ndlp->nlp_flag, 4814 4791 ndlp->nlp_DID); 4792 + 4793 + /* If there is already an UNREG in progress for this ndlp, 4794 + * no need to queue up another one. 4795 + */ 4796 + if (ndlp->nlp_flag & NLP_UNREG_INP) { 4797 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4798 + "1436 unreg_rpi SKIP UNREG x%x on " 4799 + "NPort x%x deferred x%x flg x%x " 4800 + "Data: %p\n", 4801 + ndlp->nlp_rpi, ndlp->nlp_DID, 4802 + ndlp->nlp_defer_did, 4803 + ndlp->nlp_flag, ndlp); 4804 + goto out; 4805 + } 4806 + 4815 4807 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4816 4808 if (mbox) { 4817 4809 /* SLI4 ports require the physical rpi value. */ ··· 4853 4815 * accept PLOGIs after unreg_rpi_cmpl 4854 4816 */ 4855 4817 acc_plogi = 0; 4856 - } else 4818 + } else { 4819 + mbox->ctx_ndlp = ndlp; 4857 4820 mbox->mbox_cmpl = 4858 4821 lpfc_sli_def_mbox_cmpl; 4822 + } 4859 4823 } 4824 + if (((ndlp->nlp_DID & Fabric_DID_MASK) != 4825 + Fabric_DID_MASK) && 4826 + (!(vport->fc_flag & FC_OFFLINE_MODE))) 4827 + ndlp->nlp_flag |= NLP_UNREG_INP; 4828 + 4829 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4830 + "1433 unreg_rpi UNREG x%x on " 4831 + "NPort x%x deferred flg x%x Data:%p\n", 4832 + ndlp->nlp_rpi, ndlp->nlp_DID, 4833 + ndlp->nlp_flag, ndlp); 4860 4834 4861 4835 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4862 4836 if (rc == MBX_NOT_FINISHED) { ··· 4877 4827 } 4878 4828 } 4879 4829 lpfc_no_rpi(phba, ndlp); 4880 - 4830 + out: 4881 4831 if (phba->sli_rev != LPFC_SLI_REV4) 4882 4832 ndlp->nlp_rpi = 0; 4883 4833 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+35 -9
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 871 871 * to release a rpi. 872 872 **/ 873 873 void 874 - lpfc_release_rpi(struct lpfc_hba *phba, 875 - struct lpfc_vport *vport, 876 - uint16_t rpi) 874 + lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, 875 + struct lpfc_nodelist *ndlp, uint16_t rpi) 877 876 { 878 877 LPFC_MBOXQ_t *pmb; 879 878 int rc; 879 + 880 + /* If there is already an UNREG in progress for this ndlp, 881 + * no need to queue up another one. 882 + */ 883 + if (ndlp->nlp_flag & NLP_UNREG_INP) { 884 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 885 + "1435 release_rpi SKIP UNREG x%x on " 886 + "NPort x%x deferred x%x flg x%x " 887 + "Data: %p\n", 888 + ndlp->nlp_rpi, ndlp->nlp_DID, 889 + ndlp->nlp_defer_did, 890 + ndlp->nlp_flag, ndlp); 891 + return; 892 + } 880 893 881 894 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 882 895 GFP_KERNEL); ··· 899 886 else { 900 887 lpfc_unreg_login(phba, vport->vpi, rpi, pmb); 901 888 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 889 + pmb->vport = vport; 890 + pmb->ctx_ndlp = ndlp; 891 + 892 + if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 893 + (!(vport->fc_flag & FC_OFFLINE_MODE))) 894 + ndlp->nlp_flag |= NLP_UNREG_INP; 895 + 896 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 897 + "1437 release_rpi UNREG x%x " 898 + "on NPort x%x flg x%x\n", 899 + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); 900 + 902 901 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 903 902 if (rc == MBX_NOT_FINISHED) 904 903 mempool_free(pmb, phba->mbox_mem_pool); ··· 931 906 (evt == NLP_EVT_CMPL_REG_LOGIN) && 932 907 (!pmb->u.mb.mbxStatus)) { 933 908 rpi = pmb->u.mb.un.varWords[0]; 934 - lpfc_release_rpi(phba, vport, rpi); 909 + lpfc_release_rpi(phba, vport, ndlp, rpi); 935 910 } 936 911 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 937 912 "0271 Illegal State Transition: node x%x " ··· 1359 1334 if (!(phba->pport->load_flag & FC_UNLOADING) && 1360 1335 !mb->mbxStatus) { 1361 1336 rpi = pmb->u.mb.un.varWords[0]; 1362 - lpfc_release_rpi(phba, vport, rpi); 1337 + lpfc_release_rpi(phba, vport, ndlp, rpi); 1363 1338 } 1364 1339 return ndlp->nlp_state; 1365 1340 } ··· 2900 2875 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2901 2876 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2902 2877 "0211 DSM in event x%x on NPort x%x in " 2903 - "state %d Data: x%x x%x\n", 2904 - evt, ndlp->nlp_DID, cur_state, 2878 + "state %d rpi x%x Data: x%x x%x\n", 2879 + evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, 2905 2880 ndlp->nlp_flag, ndlp->nlp_fc4_type); 2906 2881 2907 2882 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, ··· 2914 2889 /* DSM out state <rc> on NPort <nlp_DID> */ 2915 2890 if (got_ndlp) { 2916 2891 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2917 - "0212 DSM out state %d on NPort x%x Data: x%x\n", 2918 - rc, ndlp->nlp_DID, ndlp->nlp_flag); 2892 + "0212 DSM out state %d on NPort x%x " 2893 + "rpi x%x Data: x%x\n", 2894 + rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag); 2919 2895 2920 2896 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2921 2897 "DSM out: ste:%d did:x%x flg:x%x",
+53 -6
drivers/scsi/lpfc/lpfc_sli.c
··· 2493 2493 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2494 2494 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2495 2495 lpfc_nlp_put(ndlp); 2496 + pmb->ctx_buf = NULL; 2497 + pmb->ctx_ndlp = NULL; 2498 + } 2499 + 2500 + if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2501 + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2502 + 2503 + /* Check to see if there are any deferred events to process */ 2504 + if (ndlp) { 2505 + lpfc_printf_vlog( 2506 + vport, 2507 + KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2508 + "1438 UNREG cmpl deferred mbox x%x " 2509 + "on NPort x%x Data: x%x x%x %p\n", 2510 + ndlp->nlp_rpi, ndlp->nlp_DID, 2511 + ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2512 + 2513 + if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2514 + (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2515 + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2516 + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2517 + } 2518 + ndlp->nlp_flag &= ~NLP_UNREG_INP; 2519 + } 2496 2520 pmb->ctx_ndlp = NULL; 2497 2521 } 2498 2522 ··· 2558 2534 &phba->sli4_hba.sli_intf) >= 2559 2535 LPFC_SLI_INTF_IF_TYPE_2)) { 2560 2536 if (ndlp) { 2561 - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 2562 - "0010 UNREG_LOGIN vpi:%x " 2563 - "rpi:%x DID:%x map:%x %p\n", 2564 - vport->vpi, ndlp->nlp_rpi, 2565 - ndlp->nlp_DID, 2566 - ndlp->nlp_usg_map, ndlp); 2537 + lpfc_printf_vlog( 2538 + vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2539 + "0010 UNREG_LOGIN vpi:%x " 2540 + "rpi:%x DID:%x defer x%x flg x%x " 2541 + "map:%x %p\n", 2542 + vport->vpi, ndlp->nlp_rpi, 2543 + ndlp->nlp_DID, ndlp->nlp_defer_did, 2544 + ndlp->nlp_flag, 2545 + ndlp->nlp_usg_map, ndlp); 2567 2546 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2568 2547 lpfc_nlp_put(ndlp); 2548 + 2549 + /* Check to see if there are any deferred 2550 + * events to process 2551 + */ 2552 + if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2553 + (ndlp->nlp_defer_did != 2554 + NLP_EVT_NOTHING_PENDING)) { 2555 + lpfc_printf_vlog( 2556 + vport, KERN_INFO, LOG_DISCOVERY, 2557 + "4111 UNREG cmpl deferred " 2558 + "clr x%x on " 2559 + "NPort x%x Data: x%x %p\n", 2560 + ndlp->nlp_rpi, ndlp->nlp_DID, 2561 + ndlp->nlp_defer_did, ndlp); 2562 + ndlp->nlp_defer_did = 2563 + NLP_EVT_NOTHING_PENDING; 2564 + lpfc_issue_els_plogi( 2565 + vport, ndlp->nlp_DID, 0); 2566 + } 2567 + ndlp->nlp_flag &= ~NLP_UNREG_INP; 2569 2568 } 2570 2569 } 2571 2570 }