[SCSI] lpfc 8.3.18: Adapter Shutdown and Unregistration cleanup

Adapter Shutdown and Unregistration cleanup

- Correct the logic around hba shutdown. Prior to final reset, the
driver must wait for all XRIs to return from the adapter. Added logic
to poll, progressively slowing the poll rate as delay gets longer.
- Correct behavior around the rsvd1 field in UNREG_RPI_ALL mailbox
completion and final rpi cleanup.
- Updated logic to move pending VPI registrations to their completion
in cases where a CVL may be received while registration in progress.
- Added unreg all rpi mailbox command before unreg vpi.

Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by James Smart and committed by James Bottomley 5af5eee7 a93ff37a

+235 -28
+3
drivers/scsi/lpfc/lpfc_crtn.h
··· 44 44 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 45 45 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46 46 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 47 + void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *); 48 + 47 49 void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 48 50 void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, 49 51 struct lpfc_nodelist *); ··· 274 272 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 275 273 void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 276 274 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 275 + void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); 277 276 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 278 277 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 279 278 struct lpfc_dmabuf *);
+10
drivers/scsi/lpfc/lpfc_els.c
··· 580 580 lpfc_unreg_rpi(vport, np); 581 581 } 582 582 lpfc_cleanup_pending_mbox(vport); 583 + 584 + if (phba->sli_rev == LPFC_SLI_REV4) 585 + lpfc_sli4_unreg_all_rpis(vport); 586 + 583 587 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 584 588 lpfc_mbx_unreg_vpi(vport); 585 589 spin_lock_irq(shost->host_lock); ··· 6486 6482 6487 6483 default: 6488 6484 /* Try to recover from this error */ 6485 + if (phba->sli_rev == LPFC_SLI_REV4) 6486 + lpfc_sli4_unreg_all_rpis(vport); 6489 6487 lpfc_mbx_unreg_vpi(vport); 6490 6488 spin_lock_irq(shost->host_lock); 6491 6489 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 6755 6749 lpfc_unreg_rpi(vport, np); 6756 6750 } 6757 6751 lpfc_cleanup_pending_mbox(vport); 6752 + 6753 + if (phba->sli_rev == LPFC_SLI_REV4) 6754 + lpfc_sli4_unreg_all_rpis(vport); 6755 + 6758 6756 lpfc_mbx_unreg_vpi(vport); 6759 6757 spin_lock_irq(shost->host_lock); 6760 6758 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+9
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 794 794 : NLP_EVT_DEVICE_RECOVERY); 795 795 } 796 796 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 797 + if (phba->sli_rev == LPFC_SLI_REV4) 798 + lpfc_sli4_unreg_all_rpis(vport); 797 799 lpfc_mbx_unreg_vpi(vport); 798 800 spin_lock_irq(shost->host_lock); 799 801 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 4082 4080 LPFC_MBOXQ_t *mbox; 4083 4081 int rc; 4084 4082 4083 + if (phba->sli_rev == LPFC_SLI_REV4) { 4084 + lpfc_sli4_unreg_all_rpis(vport); 4085 + return; 4086 + } 4087 + 4085 4088 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4086 4089 if (mbox) { 4087 4090 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); ··· 5361 5354 if (ndlp) 5362 5355 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5363 5356 lpfc_cleanup_pending_mbox(vports[i]); 5357 + if (phba->sli_rev == LPFC_SLI_REV4) 5358 + lpfc_sli4_unreg_all_rpis(vports[i]); 5364 5359 lpfc_mbx_unreg_vpi(vports[i]); 5365 5360 shost = lpfc_shost_from_vport(vports[i]); 5366 5361 spin_lock_irq(shost->host_lock);
+52
drivers/scsi/lpfc/lpfc_init.c
··· 813 813 814 814 return 0; 815 815 } 816 + 816 817 /** 817 818 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 818 819 * @phba: pointer to lpfc HBA data structure. ··· 7268 7267 } 7269 7268 7270 7269 /** 7270 + * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7271 + * @phba: Pointer to HBA context object. 7272 + * 7273 + * This function is called in the SLI4 code path to wait for completion 7274 + * of device's XRIs exchange busy. It will check the XRI exchange busy 7275 + * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7276 + * that, it will check the XRI exchange busy on outstanding FCP and ELS 7277 + * I/Os every 30 seconds, log error message, and wait forever. Only when 7278 + * all XRI exchange busy complete, the driver unload shall proceed with 7279 + * invoking the function reset ioctl mailbox command to the CNA and the 7280 + * the rest of the driver unload resource release. 7281 + **/ 7282 + static void 7283 + lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7284 + { 7285 + int wait_time = 0; 7286 + int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7287 + int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7288 + 7289 + while (!fcp_xri_cmpl || !els_xri_cmpl) { 7290 + if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 7291 + if (!fcp_xri_cmpl) 7292 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7293 + "2877 FCP XRI exchange busy " 7294 + "wait time: %d seconds.\n", 7295 + wait_time/1000); 7296 + if (!els_xri_cmpl) 7297 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7298 + "2878 ELS XRI exchange busy " 7299 + "wait time: %d seconds.\n", 7300 + wait_time/1000); 7301 + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 7302 + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 7303 + } else { 7304 + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 7305 + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 7306 + } 7307 + fcp_xri_cmpl = 7308 + list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7309 + els_xri_cmpl = 7310 + list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7311 + } 7312 + } 7313 + 7314 + /** 7271 7315 * lpfc_sli4_hba_unset - Unset the fcoe hba 7272 7316 * @phba: Pointer to HBA context object. 7273 7317 * ··· 7356 7310 phba->sli.mbox_active = NULL; 7357 7311 spin_unlock_irq(&phba->hbalock); 7358 7312 } 7313 + 7314 + /* Abort all iocbs associated with the hba */ 7315 + lpfc_sli_hba_iocb_abort(phba); 7316 + 7317 + /* Wait for completion of device XRI exchange busy */ 7318 + lpfc_sli4_xri_exchange_busy_wait(phba); 7359 7319 7360 7320 /* Disable PCI subsystem interrupt */ 7361 7321 lpfc_sli4_disable_intr(phba);
+28
drivers/scsi/lpfc/lpfc_mbox.c
··· 797 797 } 798 798 799 799 /** 800 + * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. 801 + * @vport: pointer to a vport object. 802 + * 803 + * This routine sends mailbox command to unregister all active RPIs for 804 + * a vport. 805 + **/ 806 + void 807 + lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) 808 + { 809 + struct lpfc_hba *phba = vport->phba; 810 + LPFC_MBOXQ_t *mbox; 811 + int rc; 812 + 813 + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 814 + if (mbox) { 815 + lpfc_unreg_login(phba, vport->vpi, 816 + vport->vpi + phba->vpi_base, mbox); 817 + mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; 818 + mbox->vport = vport; 819 + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 820 + mbox->context1 = NULL; 821 + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 822 + if (rc == MBX_NOT_FINISHED) 823 + mempool_free(mbox, phba->mbox_mem_pool); 824 + } 825 + } 826 + 827 + /** 800 828 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier 801 829 * @phba: pointer to lpfc hba data structure. 802 830 * @vpi: virtual N_Port identifier.
+130 -28
drivers/scsi/lpfc/lpfc_sli.c
··· 1735 1735 struct lpfc_vport *vport = pmb->vport; 1736 1736 struct lpfc_dmabuf *mp; 1737 1737 struct lpfc_nodelist *ndlp; 1738 + struct Scsi_Host *shost; 1738 1739 uint16_t rpi, vpi; 1739 1740 int rc; 1740 1741 ··· 1747 1746 } 1748 1747 1749 1748 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && 1750 - (phba->sli_rev == LPFC_SLI_REV4)) 1749 + (phba->sli_rev == LPFC_SLI_REV4) && 1750 + (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0)) 1751 1751 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 1752 1752 1753 1753 /* ··· 1767 1765 return; 1768 1766 } 1769 1767 1770 - /* Unreg VPI, if the REG_VPI succeed after VLink failure */ 1771 1768 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 1772 1769 !(phba->pport->load_flag & FC_UNLOADING) && 1773 1770 !pmb->u.mb.mbxStatus) { 1774 - lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); 1775 - pmb->vport = vport; 1776 - pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1777 - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1778 - if (rc != MBX_NOT_FINISHED) 1779 - return; 1771 + shost = lpfc_shost_from_vport(vport); 1772 + spin_lock_irq(shost->host_lock); 1773 + vport->vpi_state |= LPFC_VPI_REGISTERED; 1774 + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 1775 + spin_unlock_irq(shost->host_lock); 1780 1776 } 1781 1777 1782 1778 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ··· 7257 7257 } 7258 7258 7259 7259 /** 7260 - * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7260 + * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 7261 7261 * @phba: Pointer to HBA context object. 7262 7262 * @pring: Pointer to driver SLI ring object. 7263 7263 * @cmdiocb: Pointer to driver command iocb object. 7264 7264 * 7265 - * This function issues an abort iocb for the provided command 7266 - * iocb. This function is called with hbalock held. 7267 - * The function returns 0 when it fails due to memory allocation 7268 - * failure or when the command iocb is an abort request. 7265 + * This function issues an abort iocb for the provided command iocb down to 7266 + * the port. Other than the case the outstanding command iocb is an abort 7267 + * request, this function issues abort out unconditionally. This function is 7268 + * called with hbalock held. The function returns 0 when it fails due to 7269 + * memory allocation failure or when the command iocb is an abort request. 7269 7270 **/ 7270 - int 7271 - lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7271 + static int 7272 + lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7272 7273 struct lpfc_iocbq *cmdiocb) 7273 7274 { 7274 7275 struct lpfc_vport *vport = cmdiocb->vport; 7275 7276 struct lpfc_iocbq *abtsiocbp; 7276 7277 IOCB_t *icmd = NULL; 7277 7278 IOCB_t *iabt = NULL; 7278 - int retval = IOCB_ERROR; 7279 + int retval; 7279 7280 7280 7281 /* 7281 7282 * There are certain command types we don't want to abort. And we ··· 7288 7287 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7289 7288 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7290 7289 return 0; 7291 - 7292 - /* If we're unloading, don't abort iocb on the ELS ring, but change the 7293 - * callback so that nothing happens when it finishes. 7294 - */ 7295 - if ((vport->load_flag & FC_UNLOADING) && 7296 - (pring->ringno == LPFC_ELS_RING)) { 7297 - if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7298 - cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7299 - else 7300 - cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7301 - goto abort_iotag_exit; 7302 - } 7303 7290 7304 7291 /* issue ABTS for this IOCB based on iotag */ 7305 7292 abtsiocbp = __lpfc_sli_get_iocbq(phba); ··· 7333 7344 7334 7345 if (retval) 7335 7346 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7347 + 7348 + /* 7349 + * Caller to this routine should check for IOCB_ERROR 7350 + * and handle it properly. This routine no longer removes 7351 + * iocb off txcmplq and call compl in case of IOCB_ERROR. 7352 + */ 7353 + return retval; 7354 + } 7355 + 7356 + /** 7357 + * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7358 + * @phba: Pointer to HBA context object. 7359 + * @pring: Pointer to driver SLI ring object. 7360 + * @cmdiocb: Pointer to driver command iocb object. 7361 + * 7362 + * This function issues an abort iocb for the provided command iocb. In case 7363 + * of unloading, the abort iocb will not be issued to commands on the ELS 7364 + * ring. Instead, the callback function shall be changed to those commands 7365 + * so that nothing happens when them finishes. This function is called with 7366 + * hbalock held. The function returns 0 when the command iocb is an abort 7367 + * request. 7368 + **/ 7369 + int 7370 + lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7371 + struct lpfc_iocbq *cmdiocb) 7372 + { 7373 + struct lpfc_vport *vport = cmdiocb->vport; 7374 + int retval = IOCB_ERROR; 7375 + IOCB_t *icmd = NULL; 7376 + 7377 + /* 7378 + * There are certain command types we don't want to abort. And we 7379 + * don't want to abort commands that are already in the process of 7380 + * being aborted. 7381 + */ 7382 + icmd = &cmdiocb->iocb; 7383 + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7384 + icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7385 + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7386 + return 0; 7387 + 7388 + /* 7389 + * If we're unloading, don't abort iocb on the ELS ring, but change 7390 + * the callback so that nothing happens when it finishes. 7391 + */ 7392 + if ((vport->load_flag & FC_UNLOADING) && 7393 + (pring->ringno == LPFC_ELS_RING)) { 7394 + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7395 + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7396 + else 7397 + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7398 + goto abort_iotag_exit; 7399 + } 7400 + 7401 + /* Now, we try to issue the abort to the cmdiocb out */ 7402 + retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 7403 + 7336 7404 abort_iotag_exit: 7337 7405 /* 7338 7406 * Caller to this routine should check for IOCB_ERROR ··· 7397 7351 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7398 7352 */ 7399 7353 return retval; 7354 + } 7355 + 7356 + /** 7357 + * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 7358 + * @phba: Pointer to HBA context object. 7359 + * @pring: Pointer to driver SLI ring object. 7360 + * 7361 + * This function aborts all iocbs in the given ring and frees all the iocb 7362 + * objects in txq. This function issues abort iocbs unconditionally for all 7363 + * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 7364 + * to complete before the return of this function. The caller is not required 7365 + * to hold any locks. 7366 + **/ 7367 + static void 7368 + lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 7369 + { 7370 + LIST_HEAD(completions); 7371 + struct lpfc_iocbq *iocb, *next_iocb; 7372 + 7373 + if (pring->ringno == LPFC_ELS_RING) 7374 + lpfc_fabric_abort_hba(phba); 7375 + 7376 + spin_lock_irq(&phba->hbalock); 7377 + 7378 + /* Take off all the iocbs on txq for cancelling */ 7379 + list_splice_init(&pring->txq, &completions); 7380 + pring->txq_cnt = 0; 7381 + 7382 + /* Next issue ABTS for everything on the txcmplq */ 7383 + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 7384 + lpfc_sli_abort_iotag_issue(phba, pring, iocb); 7385 + 7386 + spin_unlock_irq(&phba->hbalock); 7387 + 7388 + /* Cancel all the IOCBs from the completions list */ 7389 + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7390 + IOERR_SLI_ABORTED); 7391 + } 7392 + 7393 + /** 7394 + * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 7395 + * @phba: pointer to lpfc HBA data structure. 7396 + * 7397 + * This routine will abort all pending and outstanding iocbs to an HBA. 7398 + **/ 7399 + void 7400 + lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 7401 + { 7402 + struct lpfc_sli *psli = &phba->sli; 7403 + struct lpfc_sli_ring *pring; 7404 + int i; 7405 + 7406 + for (i = 0; i < psli->num_rings; i++) { 7407 + pring = &psli->ring[i]; 7408 + lpfc_sli_iocb_ring_abort(phba, pring); 7409 + } 7400 7410 } 7401 7411 7402 7412 /**
+3
drivers/scsi/lpfc/lpfc_sli4.h
··· 19 19 *******************************************************************/ 20 20 21 21 #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 22 + #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 23 + #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 24 + #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 22 25 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 23 26 #define LPFC_GET_QE_REL_INT 32 24 27 #define LPFC_RPI_LOW_WATER_MARK 10