[SCSI] lpfc 8.3.18: Add logic to detect last devloss timeout

Added driver logic to detect the last devloss timeout of remote nodes which
was still in use of FCF. At that point, the driver should set the last
in-use remote node devloss timeout flag if it was not already set and should
perform proper action on the in-use FCF and recover of FCF from firmware,
depending on the state the driver's FIP engine is in.

Find eligible FCF through FCF table rescan or the next new FCF event when
FCF table rescan turned out empty eligible FCF, and the successful flogi
into an FCF shall clear the HBA_DEVLOSS_TMO flag, indicating the successful
recovery from devloss timeout.

[jejb: add delay.h include to lpfc_hbadisc.c to fix ppc compile]
Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by

James Smart and committed by
James Bottomley
a93ff37a 12265f68

+328 -205
+5 -3
drivers/scsi/lpfc/lpfc.h
··· 552 552 #define ELS_XRI_ABORT_EVENT 0x40 553 553 #define ASYNC_EVENT 0x80 554 554 #define LINK_DISABLED 0x100 /* Link disabled by user */ 555 - #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 556 - #define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ 557 - #define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ 555 + #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ 556 + #define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ 557 + #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ 558 + #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ 559 + #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 558 560 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 559 561 struct lpfc_dmabuf slim2p; 560 562
+1
drivers/scsi/lpfc/lpfc_crtn.h
··· 229 229 uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 230 230 int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 231 231 void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 232 + int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); 232 233 233 234 int lpfc_mem_alloc(struct lpfc_hba *, int align); 234 235 void lpfc_mem_free(struct lpfc_hba *);
+11 -43
drivers/scsi/lpfc/lpfc_els.c
··· 795 795 796 796 if (irsp->ulpStatus) { 797 797 /* 798 - * In case of FIP mode, perform round robin FCF failover 798 + * In case of FIP mode, perform roundrobin FCF failover 799 799 * due to new FCF discovery 800 800 */ 801 801 if ((phba->hba_flag & HBA_FIP_SUPPORT) && ··· 803 803 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && 804 804 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { 805 805 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 806 - "2611 FLOGI failed on registered " 807 - "FCF record fcf_index(%d), status: " 808 - "x%x/x%x, tmo:x%x, trying to perform " 809 - "round robin failover\n", 806 + "2611 FLOGI failed on FCF (x%x), " 807 + "status:x%x/x%x, tmo:x%x, perform " 808 + "roundrobin FCF failover\n", 810 809 phba->fcf.current_rec.fcf_indx, 811 810 irsp->ulpStatus, irsp->un.ulpWord[4], 812 811 irsp->ulpTimeout); 813 812 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 814 - if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 815 - /* 816 - * Exhausted the eligible FCF record list, 817 - * fail through to retry FLOGI on current 818 - * FCF record. 819 - */ 820 - lpfc_printf_log(phba, KERN_WARNING, 821 - LOG_FIP | LOG_ELS, 822 - "2760 Completed one round " 823 - "of FLOGI FCF round robin " 824 - "failover list, retry FLOGI " 825 - "on currently registered " 826 - "FCF index:%d\n", 827 - phba->fcf.current_rec.fcf_indx); 828 - } else { 829 - lpfc_printf_log(phba, KERN_INFO, 830 - LOG_FIP | LOG_ELS, 831 - "2794 FLOGI FCF round robin " 832 - "failover to FCF index x%x\n", 833 - fcf_index); 834 - rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, 835 - fcf_index); 836 - if (rc) 837 - lpfc_printf_log(phba, KERN_WARNING, 838 - LOG_FIP | LOG_ELS, 839 - "2761 FLOGI round " 840 - "robin FCF failover " 841 - "read FCF failed " 842 - "rc:x%x, fcf_index:" 843 - "%d\n", rc, 844 - phba->fcf.current_rec.fcf_indx); 845 - else 846 - goto out; 847 - } 813 + rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 814 + if (rc) 815 + goto out; 848 816 } 849 817 850 818 /* FLOGI failure */ ··· 902 934 lpfc_nlp_put(ndlp); 903 935 spin_lock_irq(&phba->hbalock); 904 936 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 937 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 905 938 spin_unlock_irq(&phba->hbalock); 906 939 goto out; 907 940 } ··· 911 942 if (phba->hba_flag & HBA_FIP_SUPPORT) 912 943 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 913 944 LOG_ELS, 914 - "2769 FLOGI successful on FCF " 915 - "record: current_fcf_index:" 916 - "x%x, terminate FCF round " 917 - "robin failover process\n", 945 + "2769 FLOGI to FCF (x%x) " 946 + "completed successfully\n", 918 947 phba->fcf.current_rec.fcf_indx); 919 948 spin_lock_irq(&phba->hbalock); 920 949 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 950 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 921 951 spin_unlock_irq(&phba->hbalock); 922 952 goto out; 923 953 }
+265 -86
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 20 20 *******************************************************************/ 21 21 22 22 #include <linux/blkdev.h> 23 + #include <linux/delay.h> 23 24 #include <linux/slab.h> 24 25 #include <linux/pci.h> 25 26 #include <linux/kthread.h> ··· 64 63 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 65 64 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 66 65 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 66 + static int lpfc_fcf_inuse(struct lpfc_hba *); 67 67 68 68 void 69 69 lpfc_terminate_rport_io(struct fc_rport *rport) ··· 162 160 return; 163 161 } 164 162 165 - /* 166 - * This function is called from the worker thread when dev_loss_tmo 167 - * expire. 168 - */ 169 - static void 163 + /** 164 + * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 165 + * @ndlp: Pointer to remote node object. 166 + * 167 + * This function is called from the worker thread when devloss timeout timer 168 + * expires. For SLI4 host, this routine shall return 1 when at lease one 169 + * remote node, including this @ndlp, is still in use of FCF; otherwise, this 170 + * routine shall return 0 when there is no remote node is still in use of FCF 171 + * when devloss timeout happened to this @ndlp. 172 + **/ 173 + static int 170 174 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 171 175 { 172 176 struct lpfc_rport_data *rdata; ··· 183 175 int put_node; 184 176 int put_rport; 185 177 int warn_on = 0; 178 + int fcf_inuse = 0; 186 179 187 180 rport = ndlp->rport; 188 181 189 182 if (!rport) 190 - return; 183 + return fcf_inuse; 191 184 192 185 rdata = rport->dd_data; 193 186 name = (uint8_t *) &ndlp->nlp_portname; 194 187 vport = ndlp->vport; 195 188 phba = vport->phba; 189 + 190 + if (phba->sli_rev == LPFC_SLI_REV4) 191 + fcf_inuse = lpfc_fcf_inuse(phba); 196 192 197 193 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 198 194 "rport devlosstmo:did:x%x type:x%x id:x%x", ··· 221 209 lpfc_nlp_put(ndlp); 222 210 if (put_rport) 223 211 put_device(&rport->dev); 224 - return; 212 + return fcf_inuse; 225 213 } 226 214 227 215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { ··· 232 220 *name, *(name+1), *(name+2), *(name+3), 233 221 *(name+4), *(name+5), *(name+6), *(name+7), 234 222 ndlp->nlp_DID); 235 - return; 223 + return fcf_inuse; 236 224 } 237 225 238 226 if (ndlp->nlp_type & NLP_FABRIC) { ··· 245 233 lpfc_nlp_put(ndlp); 246 234 if (put_rport) 247 235 put_device(&rport->dev); 248 - return; 236 + return fcf_inuse; 249 237 } 250 238 251 239 if (ndlp->nlp_sid != NLP_NO_SID) { ··· 292 280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 293 281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 294 282 283 + return fcf_inuse; 284 + } 285 + 286 + /** 287 + * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler 288 + * @phba: Pointer to hba context object. 289 + * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. 290 + * @nlp_did: remote node identifer with devloss timeout. 291 + * 292 + * This function is called from the worker thread after invoking devloss 293 + * timeout handler and releasing the reference count for the ndlp with 294 + * which the devloss timeout was handled for SLI4 host. For the devloss 295 + * timeout of the last remote node which had been in use of FCF, when this 296 + * routine is invoked, it shall be guaranteed that none of the remote are 297 + * in-use of FCF. When devloss timeout to the last remote using the FCF, 298 + * if the FIP engine is neither in FCF table scan process nor roundrobin 299 + * failover process, the in-use FCF shall be unregistered. If the FIP 300 + * engine is in FCF discovery process, the devloss timeout state shall 301 + * be set for either the FCF table scan process or roundrobin failover 302 + * process to unregister the in-use FCF. 303 + **/ 304 + static void 305 + lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, 306 + uint32_t nlp_did) 307 + { 308 + /* If devloss timeout happened to a remote node when FCF had no 309 + * longer been in-use, do nothing. 310 + */ 311 + if (!fcf_inuse) 312 + return; 313 + 314 + if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { 315 + spin_lock_irq(&phba->hbalock); 316 + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 317 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 318 + spin_unlock_irq(&phba->hbalock); 319 + return; 320 + } 321 + phba->hba_flag |= HBA_DEVLOSS_TMO; 322 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 323 + "2847 Last remote node (x%x) using " 324 + "FCF devloss tmo\n", nlp_did); 325 + } 326 + if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { 327 + spin_unlock_irq(&phba->hbalock); 328 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 329 + "2868 Devloss tmo to FCF rediscovery " 330 + "in progress\n"); 331 + return; 332 + } 333 + if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { 334 + spin_unlock_irq(&phba->hbalock); 335 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 336 + "2869 Devloss tmo to idle FIP engine, " 337 + "unreg in-use FCF and rescan.\n"); 338 + /* Unregister in-use FCF and rescan */ 339 + lpfc_unregister_fcf_rescan(phba); 340 + return; 341 + } 342 + spin_unlock_irq(&phba->hbalock); 343 + if (phba->hba_flag & FCF_TS_INPROG) 344 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 345 + "2870 FCF table scan in progress\n"); 346 + if (phba->hba_flag & FCF_RR_INPROG) 347 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 348 + "2871 FLOGI roundrobin FCF failover " 349 + "in progress\n"); 350 + } 295 351 lpfc_unregister_unused_fcf(phba); 296 352 } 297 353 ··· 488 408 struct lpfc_work_evt *evtp = NULL; 489 409 struct lpfc_nodelist *ndlp; 490 410 int free_evt; 411 + int fcf_inuse; 412 + uint32_t nlp_did; 491 413 492 414 spin_lock_irq(&phba->hbalock); 493 415 while (!list_empty(&phba->work_list)) { ··· 509 427 break; 510 428 case LPFC_EVT_DEV_LOSS: 511 429 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 512 - lpfc_dev_loss_tmo_handler(ndlp); 430 + fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); 513 431 free_evt = 0; 514 432 /* decrement the node reference count held for 515 433 * this queued work 516 434 */ 435 + nlp_did = ndlp->nlp_DID; 517 436 lpfc_nlp_put(ndlp); 437 + if (phba->sli_rev == LPFC_SLI_REV4) 438 + lpfc_sli4_post_dev_loss_tmo_handler(phba, 439 + fcf_inuse, 440 + nlp_did); 518 441 break; 519 442 case LPFC_EVT_ONLINE: 520 443 if (phba->link_state < LPFC_LINK_DOWN) ··· 1108 1021 "2017 REG_FCFI mbxStatus error x%x " 1109 1022 "HBA state x%x\n", 1110 1023 mboxq->u.mb.mbxStatus, vport->port_state); 1111 - mempool_free(mboxq, phba->mbox_mem_pool); 1112 - return; 1024 + goto fail_out; 1113 1025 } 1114 1026 1115 1027 /* Start FCoE discovery by sending a FLOGI. */ ··· 1117 1031 spin_lock_irq(&phba->hbalock); 1118 1032 phba->fcf.fcf_flag |= FCF_REGISTERED; 1119 1033 spin_unlock_irq(&phba->hbalock); 1034 + 1120 1035 /* If there is a pending FCoE event, restart FCF table scan. */ 1121 - if (lpfc_check_pending_fcoe_event(phba, 1)) { 1122 - mempool_free(mboxq, phba->mbox_mem_pool); 1123 - return; 1124 - } 1036 + if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1037 + goto fail_out; 1038 + 1039 + /* Mark successful completion of FCF table scan */ 1125 1040 spin_lock_irq(&phba->hbalock); 1126 1041 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1127 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1128 - spin_unlock_irq(&phba->hbalock); 1129 - if (vport->port_state != LPFC_FLOGI) 1042 + phba->hba_flag &= ~FCF_TS_INPROG; 1043 + if (vport->port_state != LPFC_FLOGI) { 1044 + phba->hba_flag |= FCF_RR_INPROG; 1045 + spin_unlock_irq(&phba->hbalock); 1130 1046 lpfc_initial_flogi(vport); 1047 + goto out; 1048 + } 1049 + spin_unlock_irq(&phba->hbalock); 1050 + goto out; 1131 1051 1052 + fail_out: 1053 + spin_lock_irq(&phba->hbalock); 1054 + phba->hba_flag &= ~FCF_RR_INPROG; 1055 + spin_unlock_irq(&phba->hbalock); 1056 + out: 1132 1057 mempool_free(mboxq, phba->mbox_mem_pool); 1133 - return; 1134 1058 } 1135 1059 1136 1060 /** ··· 1337 1241 int rc; 1338 1242 1339 1243 spin_lock_irq(&phba->hbalock); 1340 - 1341 1244 /* If the FCF is not availabe do nothing. */ 1342 1245 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1343 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1246 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1344 1247 spin_unlock_irq(&phba->hbalock); 1345 1248 return; 1346 1249 } ··· 1347 1252 /* The FCF is already registered, start discovery */ 1348 1253 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1349 1254 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1350 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1351 - spin_unlock_irq(&phba->hbalock); 1352 - if (phba->pport->port_state != LPFC_FLOGI) 1255 + phba->hba_flag &= ~FCF_TS_INPROG; 1256 + if (phba->pport->port_state != LPFC_FLOGI) { 1257 + phba->hba_flag |= FCF_RR_INPROG; 1258 + spin_unlock_irq(&phba->hbalock); 1353 1259 lpfc_initial_flogi(phba->pport); 1260 + return; 1261 + } 1262 + spin_unlock_irq(&phba->hbalock); 1354 1263 return; 1355 1264 } 1356 1265 spin_unlock_irq(&phba->hbalock); 1357 1266 1358 - fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1359 - GFP_KERNEL); 1267 + fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1360 1268 if (!fcf_mbxq) { 1361 1269 spin_lock_irq(&phba->hbalock); 1362 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1270 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1363 1271 spin_unlock_irq(&phba->hbalock); 1364 1272 return; 1365 1273 } ··· 1373 1275 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1374 1276 if (rc == MBX_NOT_FINISHED) { 1375 1277 spin_lock_irq(&phba->hbalock); 1376 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1278 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1377 1279 spin_unlock_irq(&phba->hbalock); 1378 1280 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1379 1281 } ··· 1591 1493 * FCF discovery, no need to restart FCF discovery. 1592 1494 */ 1593 1495 if ((phba->link_state >= LPFC_LINK_UP) && 1594 - (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1496 + (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1595 1497 return 0; 1596 1498 1597 1499 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, ··· 1615 1517 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1616 1518 } else { 1617 1519 /* 1618 - * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1520 + * Do not continue FCF discovery and clear FCF_TS_INPROG 1619 1521 * flag 1620 1522 */ 1621 1523 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1622 1524 "2833 Stop FCF discovery process due to link " 1623 1525 "state change (x%x)\n", phba->link_state); 1624 1526 spin_lock_irq(&phba->hbalock); 1625 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1527 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1626 1528 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1627 1529 spin_unlock_irq(&phba->hbalock); 1628 1530 } ··· 1827 1729 } 1828 1730 1829 1731 /** 1732 + * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 1733 + * @vport: Pointer to vport object. 1734 + * @fcf_index: index to next fcf. 1735 + * 1736 + * This function processing the roundrobin fcf failover to next fcf index. 1737 + * When this function is invoked, there will be a current fcf registered 1738 + * for flogi. 1739 + * Return: 0 for continue retrying flogi on currently registered fcf; 1740 + * 1 for stop flogi on currently registered fcf; 1741 + */ 1742 + int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) 1743 + { 1744 + struct lpfc_hba *phba = vport->phba; 1745 + int rc; 1746 + 1747 + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 1748 + spin_lock_irq(&phba->hbalock); 1749 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 1750 + spin_unlock_irq(&phba->hbalock); 1751 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1752 + "2872 Devloss tmo with no eligible " 1753 + "FCF, unregister in-use FCF (x%x) " 1754 + "and rescan FCF table\n", 1755 + phba->fcf.current_rec.fcf_indx); 1756 + lpfc_unregister_fcf_rescan(phba); 1757 + goto stop_flogi_current_fcf; 1758 + } 1759 + /* Mark the end to FLOGI roundrobin failover */ 1760 + phba->hba_flag &= ~FCF_RR_INPROG; 1761 + /* Allow action to new fcf asynchronous event */ 1762 + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 1763 + spin_unlock_irq(&phba->hbalock); 1764 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1765 + "2865 No FCF available, stop roundrobin FCF " 1766 + "failover and change port state:x%x/x%x\n", 1767 + phba->pport->port_state, LPFC_VPORT_UNKNOWN); 1768 + phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1769 + goto stop_flogi_current_fcf; 1770 + } else { 1771 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 1772 + "2794 Try FLOGI roundrobin FCF failover to " 1773 + "(x%x)\n", fcf_index); 1774 + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); 1775 + if (rc) 1776 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1777 + "2761 FLOGI roundrobin FCF failover " 1778 + "failed (rc:x%x) to read FCF (x%x)\n", 1779 + rc, phba->fcf.current_rec.fcf_indx); 1780 + else 1781 + goto stop_flogi_current_fcf; 1782 + } 1783 + return 0; 1784 + 1785 + stop_flogi_current_fcf: 1786 + lpfc_can_disctmo(vport); 1787 + return 1; 1788 + } 1789 + 1790 + /** 1830 1791 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1831 1792 * @phba: pointer to lpfc hba data structure. 1832 1793 * @mboxq: pointer to mailbox object. ··· 1913 1756 int rc; 1914 1757 1915 1758 /* If there is pending FCoE event restart FCF table scan */ 1916 - if (lpfc_check_pending_fcoe_event(phba, 0)) { 1759 + if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { 1917 1760 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1918 1761 return; 1919 1762 } ··· 1922 1765 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1923 1766 &next_fcf_index); 1924 1767 if (!new_fcf_record) { 1925 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1768 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1926 1769 "2765 Mailbox command READ_FCF_RECORD " 1927 1770 "failed to retrieve a FCF record.\n"); 1928 1771 /* Let next new FCF event trigger fast failover */ 1929 1772 spin_lock_irq(&phba->hbalock); 1930 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1773 + phba->hba_flag &= ~FCF_TS_INPROG; 1931 1774 spin_unlock_irq(&phba->hbalock); 1932 1775 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1933 1776 return; ··· 1944 1787 /* 1945 1788 * If the fcf record does not match with connect list entries 1946 1789 * read the next entry; otherwise, this is an eligible FCF 1947 - * record for round robin FCF failover. 1790 + * record for roundrobin FCF failover. 1948 1791 */ 1949 1792 if (!rc) { 1950 1793 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1951 - "2781 FCF record (x%x) failed FCF " 1952 - "connection list check, fcf_avail:x%x, " 1953 - "fcf_valid:x%x\n", 1794 + "2781 FCF (x%x) failed connection " 1795 + "list check: (x%x/x%x)\n", 1954 1796 bf_get(lpfc_fcf_record_fcf_index, 1955 1797 new_fcf_record), 1956 1798 bf_get(lpfc_fcf_record_fcf_avail, ··· 1979 1823 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1980 1824 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1981 1825 "2835 Invalid in-use FCF " 1982 - "record (x%x) reported, " 1983 - "entering fast FCF failover " 1984 - "mode scanning.\n", 1826 + "(x%x), enter FCF failover " 1827 + "table scan.\n", 1985 1828 phba->fcf.current_rec.fcf_indx); 1986 1829 spin_lock_irq(&phba->hbalock); 1987 1830 phba->fcf.fcf_flag |= FCF_REDISC_FOV; ··· 2125 1970 */ 2126 1971 if (fcf_rec) { 2127 1972 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2128 - "2840 Update current FCF record " 2129 - "with initial FCF record (x%x)\n", 1973 + "2840 Update initial FCF candidate " 1974 + "with FCF (x%x)\n", 2130 1975 bf_get(lpfc_fcf_record_fcf_index, 2131 1976 new_fcf_record)); 2132 1977 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, ··· 2156 2001 */ 2157 2002 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2158 2003 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2159 - "2782 No suitable FCF record " 2160 - "found during this round of " 2161 - "post FCF rediscovery scan: " 2162 - "fcf_evt_tag:x%x, fcf_index: " 2163 - "x%x\n", 2004 + "2782 No suitable FCF found: " 2005 + "(x%x/x%x)\n", 2164 2006 phba->fcoe_eventtag_at_fcf_scan, 2165 2007 bf_get(lpfc_fcf_record_fcf_index, 2166 2008 new_fcf_record)); 2167 - /* 2168 - * Let next new FCF event trigger fast 2169 - * failover 2170 - */ 2171 2009 spin_lock_irq(&phba->hbalock); 2172 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 2010 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2011 + phba->hba_flag &= ~FCF_TS_INPROG; 2012 + spin_unlock_irq(&phba->hbalock); 2013 + /* Unregister in-use FCF and rescan */ 2014 + lpfc_printf_log(phba, KERN_INFO, 2015 + LOG_FIP, 2016 + "2864 On devloss tmo " 2017 + "unreg in-use FCF and " 2018 + "rescan FCF table\n"); 2019 + lpfc_unregister_fcf_rescan(phba); 2020 + return; 2021 + } 2022 + /* 2023 + * Let next new FCF event trigger fast failover 2024 + */ 2025 + phba->hba_flag &= ~FCF_TS_INPROG; 2173 2026 spin_unlock_irq(&phba->hbalock); 2174 2027 return; 2175 2028 } ··· 2195 2032 2196 2033 /* Replace in-use record with the new record */ 2197 2034 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2198 - "2842 Replace the current in-use " 2199 - "FCF record (x%x) with failover FCF " 2200 - "record (x%x)\n", 2035 + "2842 Replace in-use FCF (x%x) " 2036 + "with failover FCF (x%x)\n", 2201 2037 phba->fcf.current_rec.fcf_indx, 2202 2038 phba->fcf.failover_rec.fcf_indx); 2203 2039 memcpy(&phba->fcf.current_rec, ··· 2208 2046 * FCF failover. 2209 2047 */ 2210 2048 spin_lock_irq(&phba->hbalock); 2211 - phba->fcf.fcf_flag &= 2212 - ~(FCF_REDISC_FOV | FCF_REDISC_RRU); 2049 + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2213 2050 spin_unlock_irq(&phba->hbalock); 2214 - /* 2215 - * Set up the initial registered FCF index for FLOGI 2216 - * round robin FCF failover. 2217 - */ 2218 - phba->fcf.fcf_rr_init_indx = 2219 - phba->fcf.failover_rec.fcf_indx; 2220 2051 /* Register to the new FCF record */ 2221 2052 lpfc_register_fcf(phba); 2222 2053 } else { ··· 2256 2101 } 2257 2102 2258 2103 /** 2259 - * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler 2104 + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler 2260 2105 * @phba: pointer to lpfc hba data structure. 2261 2106 * @mboxq: pointer to mailbox object. 2262 2107 * 2263 - * This is the callback function for FLOGI failure round robin FCF failover 2108 + * This is the callback function for FLOGI failure roundrobin FCF failover 2264 2109 * read FCF record mailbox command from the eligible FCF record bmask for 2265 2110 * performing the failover. If the FCF read back is not valid/available, it 2266 2111 * fails through to retrying FLOGI to the currently registered FCF again. ··· 2275 2120 { 2276 2121 struct fcf_record *new_fcf_record; 2277 2122 uint32_t boot_flag, addr_mode; 2278 - uint16_t next_fcf_index; 2123 + uint16_t next_fcf_index, fcf_index; 2279 2124 uint16_t current_fcf_index; 2280 2125 uint16_t vlan_id; 2126 + int rc; 2281 2127 2282 - /* If link state is not up, stop the round robin failover process */ 2128 + /* If link state is not up, stop the roundrobin failover process */ 2283 2129 if (phba->link_state < LPFC_LINK_UP) { 2284 2130 spin_lock_irq(&phba->hbalock); 2285 2131 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2132 + phba->hba_flag &= ~FCF_RR_INPROG; 2286 2133 spin_unlock_irq(&phba->hbalock); 2287 - lpfc_sli4_mbox_cmd_free(phba, mboxq); 2288 - return; 2134 + goto out; 2289 2135 } 2290 2136 2291 2137 /* Parse the FCF record from the non-embedded mailbox command */ ··· 2296 2140 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2297 2141 "2766 Mailbox command READ_FCF_RECORD " 2298 2142 "failed to retrieve a FCF record.\n"); 2299 - goto out; 2143 + goto error_out; 2300 2144 } 2301 2145 2302 2146 /* Get the needed parameters from FCF record */ 2303 - lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2304 - &addr_mode, &vlan_id); 2147 + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2148 + &addr_mode, &vlan_id); 2305 2149 2306 2150 /* Log the FCF record information if turned on */ 2307 2151 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2308 2152 next_fcf_index); 2309 2153 2154 + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2155 + if (!rc) { 2156 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2157 + "2848 Remove ineligible FCF (x%x) from " 2158 + "from roundrobin bmask\n", fcf_index); 2159 + /* Clear roundrobin bmask bit for ineligible FCF */ 2160 + lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); 2161 + /* Perform next round of roundrobin FCF failover */ 2162 + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 2163 + rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); 2164 + if (rc) 2165 + goto out; 2166 + goto error_out; 2167 + } 2168 + 2169 + if (fcf_index == phba->fcf.current_rec.fcf_indx) { 2170 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2171 + "2760 Perform FLOGI roundrobin FCF failover: " 2172 + "FCF (x%x) back to FCF (x%x)\n", 2173 + phba->fcf.current_rec.fcf_indx, fcf_index); 2174 + /* Wait 500 ms before retrying FLOGI to current FCF */ 2175 + msleep(500); 2176 + lpfc_initial_flogi(phba->pport); 2177 + goto out; 2178 + } 2179 + 2310 2180 /* Upload new FCF record to the failover FCF record */ 2311 2181 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2312 - "2834 Update the current FCF record (x%x) " 2313 - "with the next FCF record (x%x)\n", 2314 - phba->fcf.failover_rec.fcf_indx, 2315 - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2182 + "2834 Update current FCF (x%x) with new FCF (x%x)\n", 2183 + phba->fcf.failover_rec.fcf_indx, fcf_index); 2316 2184 spin_lock_irq(&phba->hbalock); 2317 2185 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2318 2186 new_fcf_record, addr_mode, vlan_id, ··· 2353 2173 sizeof(struct lpfc_fcf_rec)); 2354 2174 2355 2175 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2356 - "2783 FLOGI round robin FCF failover from FCF " 2357 - "(x%x) to FCF (x%x).\n", 2358 - current_fcf_index, 2359 - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2176 + "2783 Perform FLOGI roundrobin FCF failover: FCF " 2177 + "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); 2360 2178 2179 + error_out: 2180 + lpfc_register_fcf(phba); 2361 2181 out: 2362 2182 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2363 - lpfc_register_fcf(phba); 2364 2183 } 2365 2184 2366 2185 /** ··· 2368 2189 * @mboxq: pointer to mailbox object. 2369 2190 * 2370 2191 * This is the callback function of read FCF record mailbox command for 2371 - * updating the eligible FCF bmask for FLOGI failure round robin FCF 2192 + * updating the eligible FCF bmask for FLOGI failure roundrobin FCF 2372 2193 * failover when a new FCF event happened. If the FCF read back is 2373 2194 * valid/available and it passes the connection list check, it updates 2374 - * the bmask for the eligible FCF record for round robin failover. 2195 + * the bmask for the eligible FCF record for roundrobin failover. 2375 2196 */ 2376 2197 void 2377 2198 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ··· 2813 2634 * and get the FCF Table. 2814 2635 */ 2815 2636 spin_lock_irq(&phba->hbalock); 2816 - if (phba->hba_flag & FCF_DISC_INPROGRESS) { 2637 + if (phba->hba_flag & FCF_TS_INPROG) { 2817 2638 spin_unlock_irq(&phba->hbalock); 2818 2639 return; 2819 2640 }
+18 -23
drivers/scsi/lpfc/lpfc_init.c
··· 2936 2936 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2937 2937 spin_unlock_irq(&phba->hbalock); 2938 2938 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2939 - "2776 FCF rediscover wait timer expired, post " 2940 - "a worker thread event for FCF table scan\n"); 2939 + "2776 FCF rediscover quiescent timer expired\n"); 2941 2940 /* wake up worker thread */ 2942 2941 lpfc_worker_wake_up(phba); 2943 2942 } ··· 3311 3312 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3312 3313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3313 3314 LOG_DISCOVERY, 3314 - "2546 New FCF found event: " 3315 - "evt_tag:x%x, fcf_index:x%x\n", 3315 + "2546 New FCF event, evt_tag:x%x, " 3316 + "index:x%x\n", 3316 3317 acqe_fcoe->event_tag, 3317 3318 acqe_fcoe->index); 3318 3319 else 3319 3320 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3320 3321 LOG_DISCOVERY, 3321 - "2788 FCF parameter modified event: " 3322 - "evt_tag:x%x, fcf_index:x%x\n", 3322 + "2788 FCF param modified event, " 3323 + "evt_tag:x%x, index:x%x\n", 3323 3324 acqe_fcoe->event_tag, 3324 3325 acqe_fcoe->index); 3325 3326 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3326 3327 /* 3327 3328 * During period of FCF discovery, read the FCF 3328 3329 * table record indexed by the event to update 3329 - * FCF round robin failover eligible FCF bmask. 3330 + * FCF roundrobin failover eligible FCF bmask. 3330 3331 */ 3331 3332 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3332 3333 LOG_DISCOVERY, 3333 - "2779 Read new FCF record with " 3334 - "fcf_index:x%x for updating FCF " 3335 - "round robin failover bmask\n", 3334 + "2779 Read FCF (x%x) for updating " 3335 + "roundrobin FCF failover bmask\n", 3336 3336 acqe_fcoe->index); 3337 3337 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3338 3338 } 3339 3339 3340 3340 /* If the FCF discovery is in progress, do nothing. */ 3341 3341 spin_lock_irq(&phba->hbalock); 3342 - if (phba->hba_flag & FCF_DISC_INPROGRESS) { 3342 + if (phba->hba_flag & FCF_TS_INPROG) { 3343 3343 spin_unlock_irq(&phba->hbalock); 3344 3344 break; 3345 3345 } ··· 3357 3359 3358 3360 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3359 3361 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3360 - "2770 Start FCF table scan due to new FCF " 3361 - "event: evt_tag:x%x, fcf_index:x%x\n", 3362 + "2770 Start FCF table scan per async FCF " 3363 + "event, evt_tag:x%x, index:x%x\n", 3362 3364 acqe_fcoe->event_tag, acqe_fcoe->index); 3363 3365 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3364 3366 LPFC_FCOE_FCF_GET_FIRST); 3365 3367 if (rc) 3366 3368 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3367 3369 "2547 Issue FCF scan read FCF mailbox " 3368 - "command failed 0x%x\n", rc); 3370 + "command failed (x%x)\n", rc); 3369 3371 break; 3370 3372 3371 3373 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: ··· 3377 3379 3378 3380 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3379 3381 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3380 - "2549 FCF disconnected from network index 0x%x" 3381 - " tag 0x%x\n", acqe_fcoe->index, 3382 - acqe_fcoe->event_tag); 3382 + "2549 FCF (x%x) disconnected from network, " 3383 + "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3383 3384 /* 3384 3385 * If we are in the middle of FCF failover process, clear 3385 3386 * the corresponding FCF bit in the roundrobin bitmap. ··· 3492 3495 spin_unlock_irq(&phba->hbalock); 3493 3496 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3494 3497 LOG_DISCOVERY, 3495 - "2773 Start FCF fast failover due " 3496 - "to CVL event: evt_tag:x%x\n", 3497 - acqe_fcoe->event_tag); 3498 + "2773 Start FCF failover per CVL, " 3499 + "evt_tag:x%x\n", acqe_fcoe->event_tag); 3498 3500 rc = lpfc_sli4_redisc_fcf_table(phba); 3499 3501 if (rc) { 3500 3502 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | ··· 3643 3647 3644 3648 /* Scan FCF table from the first entry to re-discover SAN */ 3645 3649 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3646 - "2777 Start FCF table scan after FCF " 3647 - "rediscovery quiescent period over\n"); 3650 + "2777 Start post-quiescent FCF table scan\n"); 3648 3651 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3649 3652 if (rc) 3650 3653 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, ··· 4161 4166 goto out_free_active_sgl; 4162 4167 } 4163 4168 4164 - /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4169 + /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4165 4170 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4166 4171 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4167 4172 GFP_KERNEL);
+24 -48
drivers/scsi/lpfc/lpfc_sli.c
··· 5921 5921 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5922 5922 * @phba: Pointer to HBA context object. 5923 5923 * 5924 - * This routine performs a round robin SCSI command to SLI4 FCP WQ index 5924 + * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 5925 5925 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 5926 5926 * held. 5927 5927 * ··· 12242 12242 /* Issue the mailbox command asynchronously */ 12243 12243 mboxq->vport = phba->pport; 12244 12244 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12245 + 12246 + spin_lock_irq(&phba->hbalock); 12247 + phba->hba_flag |= FCF_TS_INPROG; 12248 + spin_unlock_irq(&phba->hbalock); 12249 + 12245 12250 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12246 12251 if (rc == MBX_NOT_FINISHED) 12247 12252 error = -EIO; 12248 12253 else { 12249 - spin_lock_irq(&phba->hbalock); 12250 - phba->hba_flag |= FCF_DISC_INPROGRESS; 12251 - spin_unlock_irq(&phba->hbalock); 12252 12254 /* Reset eligible FCF count for new scan */ 12253 12255 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12254 12256 phba->fcf.eligible_fcf_cnt = 0; ··· 12260 12258 if (error) { 12261 12259 if (mboxq) 12262 12260 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12263 - /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ 12261 + /* FCF scan failed, clear FCF_TS_INPROG flag */ 12264 12262 spin_lock_irq(&phba->hbalock); 12265 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 12263 + phba->hba_flag &= ~FCF_TS_INPROG; 12266 12264 spin_unlock_irq(&phba->hbalock); 12267 12265 } 12268 12266 return error; 12269 12267 } 12270 12268 12271 12269 /** 12272 - * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. 12270 + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 12273 12271 * @phba: pointer to lpfc hba data structure. 12274 12272 * @fcf_index: FCF table entry offset. 12275 12273 * 12276 12274 * This routine is invoked to read an FCF record indicated by @fcf_index 12277 - * and to use it for FLOGI round robin FCF failover. 12275 + * and to use it for FLOGI roundrobin FCF failover. 12278 12276 * 12279 12277 * Return 0 if the mailbox command is submitted sucessfully, none 0 12280 12278 * otherwise. ··· 12320 12318 * @fcf_index: FCF table entry offset. 12321 12319 * 12322 12320 * This routine is invoked to read an FCF record indicated by @fcf_index to 12323 - * determine whether it's eligible for FLOGI round robin failover list. 12321 + * determine whether it's eligible for FLOGI roundrobin failover list. 12324 12322 * 12325 12323 * Return 0 if the mailbox command is submitted sucessfully, none 0 12326 12324 * otherwise. ··· 12366 12364 * 12367 12365 * This routine is to get the next eligible FCF record index in a round 12368 12366 * robin fashion. If the next eligible FCF record index equals to the 12369 - * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12367 + * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12370 12368 * shall be returned, otherwise, the next eligible FCF record's index 12371 12369 * shall be returned. 12372 12370 **/ ··· 12394 12392 return LPFC_FCOE_FCF_NEXT_NONE; 12395 12393 } 12396 12394 12397 - /* Check roundrobin failover index bmask stop condition */ 12398 - if (next_fcf_index == phba->fcf.fcf_rr_init_indx) { 12399 - if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) { 12400 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 12401 - "2847 Round robin failover FCF index " 12402 - "search hit stop condition:x%x\n", 12403 - next_fcf_index); 12404 - return LPFC_FCOE_FCF_NEXT_NONE; 12405 - } 12406 - /* The roundrobin failover index bmask updated, start over */ 12407 - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12408 - "2848 Round robin failover FCF index bmask " 12409 - "updated, start over\n"); 12410 - spin_lock_irq(&phba->hbalock); 12411 - phba->fcf.fcf_flag &= ~FCF_REDISC_RRU; 12412 - spin_unlock_irq(&phba->hbalock); 12413 - return phba->fcf.fcf_rr_init_indx; 12414 - } 12415 - 12416 12395 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12417 - "2845 Get next round robin failover " 12418 - "FCF index x%x\n", next_fcf_index); 12396 + "2845 Get next roundrobin failover FCF (x%x)\n", 12397 + next_fcf_index); 12398 + 12419 12399 return next_fcf_index; 12420 12400 } 12421 12401 ··· 12406 12422 * @phba: pointer to lpfc hba data structure. 12407 12423 * 12408 12424 * This routine sets the FCF record index in to the eligible bmask for 12409 - * round robin failover search. It checks to make sure that the index 12425 + * roundrobin failover search. It checks to make sure that the index 12410 12426 * does not go beyond the range of the driver allocated bmask dimension 12411 12427 * before setting the bit. 12412 12428 * ··· 12418 12434 { 12419 12435 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12420 12436 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12421 - "2610 HBA FCF index reached driver's " 12422 - "book keeping dimension: fcf_index:%d, " 12423 - "driver_bmask_max:%d\n", 12437 + "2610 FCF (x%x) reached driver's book " 12438 + "keeping dimension:x%x\n", 12424 12439 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12425 12440 return -EINVAL; 12426 12441 } 12427 12442 /* Set the eligible FCF record index bmask */ 12428 12443 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12429 12444 12430 - /* Set the roundrobin index bmask updated */ 12431 - spin_lock_irq(&phba->hbalock); 12432 - phba->fcf.fcf_flag |= FCF_REDISC_RRU; 12433 - spin_unlock_irq(&phba->hbalock); 12434 - 12435 12445 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12436 - "2790 Set FCF index x%x to round robin failover " 12446 + "2790 Set FCF (x%x) to roundrobin FCF failover " 12437 12447 "bmask\n", fcf_index); 12438 12448 12439 12449 return 0; ··· 12438 12460 * @phba: pointer to lpfc hba data structure. 12439 12461 * 12440 12462 * This routine clears the FCF record index from the eligible bmask for 12441 - * round robin failover search. It checks to make sure that the index 12463 + * roundrobin failover search. It checks to make sure that the index 12442 12464 * does not go beyond the range of the driver allocated bmask dimension 12443 12465 * before clearing the bit. 12444 12466 **/ ··· 12447 12469 { 12448 12470 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12449 12471 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12450 - "2762 HBA FCF index goes beyond driver's " 12451 - "book keeping dimension: fcf_index:%d, " 12452 - "driver_bmask_max:%d\n", 12472 + "2762 FCF (x%x) reached driver's book " 12473 + "keeping dimension:x%x\n", 12453 12474 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12454 12475 return; 12455 12476 } ··· 12456 12479 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12457 12480 12458 12481 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12459 - "2791 Clear FCF index x%x from round robin failover " 12482 + "2791 Clear FCF (x%x) from roundrobin failover " 12460 12483 "bmask\n", fcf_index); 12461 12484 } 12462 12485 ··· 12507 12530 } 12508 12531 } else { 12509 12532 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12510 - "2775 Start FCF rediscovery quiescent period " 12511 - "wait timer before scaning FCF table\n"); 12533 + "2775 Start FCF rediscover quiescent timer\n"); 12512 12534 /* 12513 12535 * Start FCF rediscovery wait timer for pending FCF 12514 12536 * before rescan FCF record table.
+4 -2
drivers/scsi/lpfc/lpfc_sli4.h
··· 23 23 #define LPFC_GET_QE_REL_INT 32 24 24 #define LPFC_RPI_LOW_WATER_MARK 10 25 25 26 + #define LPFC_UNREG_FCF 1 27 + #define LPFC_SKIP_UNREG_FCF 0 28 + 26 29 /* Amount of time in seconds for waiting FCF rediscovery to complete */ 27 30 #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ 28 31 ··· 166 163 #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 167 164 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 168 165 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 169 - #define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */ 166 + #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) 170 167 uint32_t addr_mode; 171 - uint16_t fcf_rr_init_indx; 172 168 uint32_t eligible_fcf_cnt; 173 169 struct lpfc_fcf_rec current_rec; 174 170 struct lpfc_fcf_rec failover_rec;