[SCSI] lpfc 8.3.18: Adapter Shutdown and Unregistration cleanup

Adapter Shutdown and Unregistration cleanup

- Correct the logic around hba shutdown. Prior to final reset, the
driver must wait for all XRIs to return from the adapter. Added logic
to poll, progressively slowing the poll rate as delay gets longer.
- Correct behavior around the rsvd1 field in UNREG_RPI_ALL mailbox
completion and final rpi cleanup.
- Updated logic to move pending VPI registrations to their completion
in cases where a CVL may be received while registration in progress.
- Added unreg all rpi mailbox command before unreg vpi.

Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by James Smart and committed by James Bottomley 5af5eee7 a93ff37a

+235 -28
+3
drivers/scsi/lpfc/lpfc_crtn.h
··· 44 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 45 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 47 void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 48 void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, 49 struct lpfc_nodelist *); ··· 274 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 275 void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 276 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 277 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 278 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 279 struct lpfc_dmabuf *);
··· 44 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 45 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 47 + void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *); 48 + 49 void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 50 void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, 51 struct lpfc_nodelist *); ··· 272 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 273 void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 274 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 275 + void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); 276 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 277 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 278 struct lpfc_dmabuf *);
+10
drivers/scsi/lpfc/lpfc_els.c
··· 580 lpfc_unreg_rpi(vport, np); 581 } 582 lpfc_cleanup_pending_mbox(vport); 583 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 584 lpfc_mbx_unreg_vpi(vport); 585 spin_lock_irq(shost->host_lock); ··· 6486 6487 default: 6488 /* Try to recover from this error */ 6489 lpfc_mbx_unreg_vpi(vport); 6490 spin_lock_irq(shost->host_lock); 6491 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 6755 lpfc_unreg_rpi(vport, np); 6756 } 6757 lpfc_cleanup_pending_mbox(vport); 6758 lpfc_mbx_unreg_vpi(vport); 6759 spin_lock_irq(shost->host_lock); 6760 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
··· 580 lpfc_unreg_rpi(vport, np); 581 } 582 lpfc_cleanup_pending_mbox(vport); 583 + 584 + if (phba->sli_rev == LPFC_SLI_REV4) 585 + lpfc_sli4_unreg_all_rpis(vport); 586 + 587 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 588 lpfc_mbx_unreg_vpi(vport); 589 spin_lock_irq(shost->host_lock); ··· 6482 6483 default: 6484 /* Try to recover from this error */ 6485 + if (phba->sli_rev == LPFC_SLI_REV4) 6486 + lpfc_sli4_unreg_all_rpis(vport); 6487 lpfc_mbx_unreg_vpi(vport); 6488 spin_lock_irq(shost->host_lock); 6489 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 6749 lpfc_unreg_rpi(vport, np); 6750 } 6751 lpfc_cleanup_pending_mbox(vport); 6752 + 6753 + if (phba->sli_rev == LPFC_SLI_REV4) 6754 + lpfc_sli4_unreg_all_rpis(vport); 6755 + 6756 lpfc_mbx_unreg_vpi(vport); 6757 spin_lock_irq(shost->host_lock); 6758 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+9
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 794 : NLP_EVT_DEVICE_RECOVERY); 795 } 796 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 797 lpfc_mbx_unreg_vpi(vport); 798 spin_lock_irq(shost->host_lock); 799 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 4082 LPFC_MBOXQ_t *mbox; 4083 int rc; 4084 4085 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4086 if (mbox) { 4087 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); ··· 5361 if (ndlp) 5362 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5363 lpfc_cleanup_pending_mbox(vports[i]); 5364 lpfc_mbx_unreg_vpi(vports[i]); 5365 shost = lpfc_shost_from_vport(vports[i]); 5366 spin_lock_irq(shost->host_lock);
··· 794 : NLP_EVT_DEVICE_RECOVERY); 795 } 796 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 797 + if (phba->sli_rev == LPFC_SLI_REV4) 798 + lpfc_sli4_unreg_all_rpis(vport); 799 lpfc_mbx_unreg_vpi(vport); 800 spin_lock_irq(shost->host_lock); 801 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 4080 LPFC_MBOXQ_t *mbox; 4081 int rc; 4082 4083 + if (phba->sli_rev == LPFC_SLI_REV4) { 4084 + lpfc_sli4_unreg_all_rpis(vport); 4085 + return; 4086 + } 4087 + 4088 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4089 if (mbox) { 4090 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); ··· 5354 if (ndlp) 5355 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5356 lpfc_cleanup_pending_mbox(vports[i]); 5357 + if (phba->sli_rev == LPFC_SLI_REV4) 5358 + lpfc_sli4_unreg_all_rpis(vports[i]); 5359 lpfc_mbx_unreg_vpi(vports[i]); 5360 shost = lpfc_shost_from_vport(vports[i]); 5361 spin_lock_irq(shost->host_lock);
+52
drivers/scsi/lpfc/lpfc_init.c
··· 813 814 return 0; 815 } 816 /** 817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 818 * @phba: pointer to lpfc HBA data structure. ··· 7268 } 7269 7270 /** 7271 * lpfc_sli4_hba_unset - Unset the fcoe hba 7272 * @phba: Pointer to HBA context object. 7273 * ··· 7356 phba->sli.mbox_active = NULL; 7357 spin_unlock_irq(&phba->hbalock); 7358 } 7359 7360 /* Disable PCI subsystem interrupt */ 7361 lpfc_sli4_disable_intr(phba);
··· 813 814 return 0; 815 } 816 + 817 /** 818 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 819 * @phba: pointer to lpfc HBA data structure. ··· 7267 } 7268 7269 /** 7270 + * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7271 + * @phba: Pointer to HBA context object. 7272 + * 7273 + * This function is called in the SLI4 code path to wait for completion 7274 + * of device's XRIs exchange busy. It will check the XRI exchange busy 7275 + * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7276 + * that, it will check the XRI exchange busy on outstanding FCP and ELS 7277 + * I/Os every 30 seconds, log error message, and wait forever. Only when 7278 + * all XRI exchange busy complete, the driver unload shall proceed with 7279 + * invoking the function reset ioctl mailbox command to the CNA and the 7280 + * the rest of the driver unload resource release. 7281 + **/ 7282 + static void 7283 + lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7284 + { 7285 + int wait_time = 0; 7286 + int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7287 + int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7288 + 7289 + while (!fcp_xri_cmpl || !els_xri_cmpl) { 7290 + if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 7291 + if (!fcp_xri_cmpl) 7292 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7293 + "2877 FCP XRI exchange busy " 7294 + "wait time: %d seconds.\n", 7295 + wait_time/1000); 7296 + if (!els_xri_cmpl) 7297 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7298 + "2878 ELS XRI exchange busy " 7299 + "wait time: %d seconds.\n", 7300 + wait_time/1000); 7301 + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 7302 + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 7303 + } else { 7304 + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 7305 + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 7306 + } 7307 + fcp_xri_cmpl = 7308 + list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7309 + els_xri_cmpl = 7310 + list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7311 + } 7312 + } 7313 + 7314 + /** 7315 * lpfc_sli4_hba_unset - Unset the fcoe hba 7316 * @phba: Pointer to HBA context object. 7317 * ··· 7310 phba->sli.mbox_active = NULL; 7311 spin_unlock_irq(&phba->hbalock); 7312 } 7313 + 7314 + /* Abort all iocbs associated with the hba */ 7315 + lpfc_sli_hba_iocb_abort(phba); 7316 + 7317 + /* Wait for completion of device XRI exchange busy */ 7318 + lpfc_sli4_xri_exchange_busy_wait(phba); 7319 7320 /* Disable PCI subsystem interrupt */ 7321 lpfc_sli4_disable_intr(phba);
+28
drivers/scsi/lpfc/lpfc_mbox.c
··· 797 } 798 799 /** 800 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier 801 * @phba: pointer to lpfc hba data structure. 802 * @vpi: virtual N_Port identifier.
··· 797 } 798 799 /** 800 + * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. 801 + * @vport: pointer to a vport object. 802 + * 803 + * This routine sends mailbox command to unregister all active RPIs for 804 + * a vport. 805 + **/ 806 + void 807 + lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) 808 + { 809 + struct lpfc_hba *phba = vport->phba; 810 + LPFC_MBOXQ_t *mbox; 811 + int rc; 812 + 813 + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 814 + if (mbox) { 815 + lpfc_unreg_login(phba, vport->vpi, 816 + vport->vpi + phba->vpi_base, mbox); 817 + mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; 818 + mbox->vport = vport; 819 + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 820 + mbox->context1 = NULL; 821 + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 822 + if (rc == MBX_NOT_FINISHED) 823 + mempool_free(mbox, phba->mbox_mem_pool); 824 + } 825 + } 826 + 827 + /** 828 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier 829 * @phba: pointer to lpfc hba data structure. 830 * @vpi: virtual N_Port identifier.
+130 -28
drivers/scsi/lpfc/lpfc_sli.c
··· 1735 struct lpfc_vport *vport = pmb->vport; 1736 struct lpfc_dmabuf *mp; 1737 struct lpfc_nodelist *ndlp; 1738 uint16_t rpi, vpi; 1739 int rc; 1740 ··· 1747 } 1748 1749 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && 1750 - (phba->sli_rev == LPFC_SLI_REV4)) 1751 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 1752 1753 /* ··· 1767 return; 1768 } 1769 1770 - /* Unreg VPI, if the REG_VPI succeed after VLink failure */ 1771 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 1772 !(phba->pport->load_flag & FC_UNLOADING) && 1773 !pmb->u.mb.mbxStatus) { 1774 - lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); 1775 - pmb->vport = vport; 1776 - pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1777 - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1778 - if (rc != MBX_NOT_FINISHED) 1779 - return; 1780 } 1781 1782 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ··· 7257 } 7258 7259 /** 7260 - * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7261 * @phba: Pointer to HBA context object. 7262 * @pring: Pointer to driver SLI ring object. 7263 * @cmdiocb: Pointer to driver command iocb object. 7264 * 7265 - * This function issues an abort iocb for the provided command 7266 - * iocb. This function is called with hbalock held. 7267 - * The function returns 0 when it fails due to memory allocation 7268 - * failure or when the command iocb is an abort request. 7269 **/ 7270 - int 7271 - lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7272 struct lpfc_iocbq *cmdiocb) 7273 { 7274 struct lpfc_vport *vport = cmdiocb->vport; 7275 struct lpfc_iocbq *abtsiocbp; 7276 IOCB_t *icmd = NULL; 7277 IOCB_t *iabt = NULL; 7278 - int retval = IOCB_ERROR; 7279 7280 /* 7281 * There are certain command types we don't want to abort. And we ··· 7288 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7289 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7290 return 0; 7291 - 7292 - /* If we're unloading, don't abort iocb on the ELS ring, but change the 7293 - * callback so that nothing happens when it finishes. 7294 - */ 7295 - if ((vport->load_flag & FC_UNLOADING) && 7296 - (pring->ringno == LPFC_ELS_RING)) { 7297 - if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7298 - cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7299 - else 7300 - cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7301 - goto abort_iotag_exit; 7302 - } 7303 7304 /* issue ABTS for this IOCB based on iotag */ 7305 abtsiocbp = __lpfc_sli_get_iocbq(phba); ··· 7333 7334 if (retval) 7335 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7336 abort_iotag_exit: 7337 /* 7338 * Caller to this routine should check for IOCB_ERROR ··· 7397 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7398 */ 7399 return retval; 7400 } 7401 7402 /**
··· 1735 struct lpfc_vport *vport = pmb->vport; 1736 struct lpfc_dmabuf *mp; 1737 struct lpfc_nodelist *ndlp; 1738 + struct Scsi_Host *shost; 1739 uint16_t rpi, vpi; 1740 int rc; 1741 ··· 1746 } 1747 1748 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && 1749 + (phba->sli_rev == LPFC_SLI_REV4) && 1750 + (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0)) 1751 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 1752 1753 /* ··· 1765 return; 1766 } 1767 1768 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 1769 !(phba->pport->load_flag & FC_UNLOADING) && 1770 !pmb->u.mb.mbxStatus) { 1771 + shost = lpfc_shost_from_vport(vport); 1772 + spin_lock_irq(shost->host_lock); 1773 + vport->vpi_state |= LPFC_VPI_REGISTERED; 1774 + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 1775 + spin_unlock_irq(shost->host_lock); 1776 } 1777 1778 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ··· 7257 } 7258 7259 /** 7260 + * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 7261 * @phba: Pointer to HBA context object. 7262 * @pring: Pointer to driver SLI ring object. 7263 * @cmdiocb: Pointer to driver command iocb object. 7264 * 7265 + * This function issues an abort iocb for the provided command iocb down to 7266 + * the port. Other than the case the outstanding command iocb is an abort 7267 + * request, this function issues abort out unconditionally. This function is 7268 + * called with hbalock held. The function returns 0 when it fails due to 7269 + * memory allocation failure or when the command iocb is an abort request. 7270 **/ 7271 + static int 7272 + lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7273 struct lpfc_iocbq *cmdiocb) 7274 { 7275 struct lpfc_vport *vport = cmdiocb->vport; 7276 struct lpfc_iocbq *abtsiocbp; 7277 IOCB_t *icmd = NULL; 7278 IOCB_t *iabt = NULL; 7279 + int retval; 7280 7281 /* 7282 * There are certain command types we don't want to abort. And we ··· 7287 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7288 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7289 return 0; 7290 7291 /* issue ABTS for this IOCB based on iotag */ 7292 abtsiocbp = __lpfc_sli_get_iocbq(phba); ··· 7344 7345 if (retval) 7346 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7347 + 7348 + /* 7349 + * Caller to this routine should check for IOCB_ERROR 7350 + * and handle it properly. This routine no longer removes 7351 + * iocb off txcmplq and call compl in case of IOCB_ERROR. 7352 + */ 7353 + return retval; 7354 + } 7355 + 7356 + /** 7357 + * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7358 + * @phba: Pointer to HBA context object. 7359 + * @pring: Pointer to driver SLI ring object. 7360 + * @cmdiocb: Pointer to driver command iocb object. 7361 + * 7362 + * This function issues an abort iocb for the provided command iocb. In case 7363 + * of unloading, the abort iocb will not be issued to commands on the ELS 7364 + * ring. Instead, the callback function shall be changed to those commands 7365 + * so that nothing happens when them finishes. This function is called with 7366 + * hbalock held. The function returns 0 when the command iocb is an abort 7367 + * request. 7368 + **/ 7369 + int 7370 + lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7371 + struct lpfc_iocbq *cmdiocb) 7372 + { 7373 + struct lpfc_vport *vport = cmdiocb->vport; 7374 + int retval = IOCB_ERROR; 7375 + IOCB_t *icmd = NULL; 7376 + 7377 + /* 7378 + * There are certain command types we don't want to abort. And we 7379 + * don't want to abort commands that are already in the process of 7380 + * being aborted. 7381 + */ 7382 + icmd = &cmdiocb->iocb; 7383 + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7384 + icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7385 + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7386 + return 0; 7387 + 7388 + /* 7389 + * If we're unloading, don't abort iocb on the ELS ring, but change 7390 + * the callback so that nothing happens when it finishes. 7391 + */ 7392 + if ((vport->load_flag & FC_UNLOADING) && 7393 + (pring->ringno == LPFC_ELS_RING)) { 7394 + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7395 + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7396 + else 7397 + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7398 + goto abort_iotag_exit; 7399 + } 7400 + 7401 + /* Now, we try to issue the abort to the cmdiocb out */ 7402 + retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 7403 + 7404 abort_iotag_exit: 7405 /* 7406 * Caller to this routine should check for IOCB_ERROR ··· 7351 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7352 */ 7353 return retval; 7354 + } 7355 + 7356 + /** 7357 + * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 7358 + * @phba: Pointer to HBA context object. 7359 + * @pring: Pointer to driver SLI ring object. 7360 + * 7361 + * This function aborts all iocbs in the given ring and frees all the iocb 7362 + * objects in txq. This function issues abort iocbs unconditionally for all 7363 + * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 7364 + * to complete before the return of this function. The caller is not required 7365 + * to hold any locks. 7366 + **/ 7367 + static void 7368 + lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 7369 + { 7370 + LIST_HEAD(completions); 7371 + struct lpfc_iocbq *iocb, *next_iocb; 7372 + 7373 + if (pring->ringno == LPFC_ELS_RING) 7374 + lpfc_fabric_abort_hba(phba); 7375 + 7376 + spin_lock_irq(&phba->hbalock); 7377 + 7378 + /* Take off all the iocbs on txq for cancelling */ 7379 + list_splice_init(&pring->txq, &completions); 7380 + pring->txq_cnt = 0; 7381 + 7382 + /* Next issue ABTS for everything on the txcmplq */ 7383 + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 7384 + lpfc_sli_abort_iotag_issue(phba, pring, iocb); 7385 + 7386 + spin_unlock_irq(&phba->hbalock); 7387 + 7388 + /* Cancel all the IOCBs from the completions list */ 7389 + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7390 + IOERR_SLI_ABORTED); 7391 + } 7392 + 7393 + /** 7394 + * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 7395 + * @phba: pointer to lpfc HBA data structure. 7396 + * 7397 + * This routine will abort all pending and outstanding iocbs to an HBA. 7398 + **/ 7399 + void 7400 + lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 7401 + { 7402 + struct lpfc_sli *psli = &phba->sli; 7403 + struct lpfc_sli_ring *pring; 7404 + int i; 7405 + 7406 + for (i = 0; i < psli->num_rings; i++) { 7407 + pring = &psli->ring[i]; 7408 + lpfc_sli_iocb_ring_abort(phba, pring); 7409 + } 7410 } 7411 7412 /**
+3
drivers/scsi/lpfc/lpfc_sli4.h
··· 19 *******************************************************************/ 20 21 #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 22 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 23 #define LPFC_GET_QE_REL_INT 32 24 #define LPFC_RPI_LOW_WATER_MARK 10
··· 19 *******************************************************************/ 20 21 #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 22 + #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 23 + #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 24 + #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 25 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 26 #define LPFC_GET_QE_REL_INT 32 27 #define LPFC_RPI_LOW_WATER_MARK 10