[SCSI] lpfc 8.3.18: Add logic to detect last devloss timeout

Added driver logic to detect the last devloss timeout of remote nodes which
was still in use of FCF. At that point, the driver should set the last
in-use remote node devloss timeout flag if it was not already set and should
perform proper action on the in-use FCF and recover of FCF from firmware,
depending on the state the driver's FIP engine is in.

Find eligible FCF through FCF table rescan or the next new FCF event when
FCF table rescan turned out empty eligible FCF, and the successful flogi
into an FCF shall clear the HBA_DEVLOSS_TMO flag, indicating the successful
recovery from devloss timeout.

[jejb: add delay.h include to lpfc_hbadisc.c to fix ppc compile]
Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by

James Smart and committed by
James Bottomley
a93ff37a 12265f68

+328 -205
+5 -3
drivers/scsi/lpfc/lpfc.h
··· 552 #define ELS_XRI_ABORT_EVENT 0x40 553 #define ASYNC_EVENT 0x80 554 #define LINK_DISABLED 0x100 /* Link disabled by user */ 555 - #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 556 - #define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ 557 - #define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ 558 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 559 struct lpfc_dmabuf slim2p; 560
··· 552 #define ELS_XRI_ABORT_EVENT 0x40 553 #define ASYNC_EVENT 0x80 554 #define LINK_DISABLED 0x100 /* Link disabled by user */ 555 + #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ 556 + #define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ 557 + #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ 558 + #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ 559 + #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 560 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 561 struct lpfc_dmabuf slim2p; 562
+1
drivers/scsi/lpfc/lpfc_crtn.h
··· 229 uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 230 int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 231 void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 232 233 int lpfc_mem_alloc(struct lpfc_hba *, int align); 234 void lpfc_mem_free(struct lpfc_hba *);
··· 229 uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 230 int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 231 void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 232 + int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); 233 234 int lpfc_mem_alloc(struct lpfc_hba *, int align); 235 void lpfc_mem_free(struct lpfc_hba *);
+11 -43
drivers/scsi/lpfc/lpfc_els.c
··· 795 796 if (irsp->ulpStatus) { 797 /* 798 - * In case of FIP mode, perform round robin FCF failover 799 * due to new FCF discovery 800 */ 801 if ((phba->hba_flag & HBA_FIP_SUPPORT) && ··· 803 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && 804 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { 805 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 806 - "2611 FLOGI failed on registered " 807 - "FCF record fcf_index(%d), status: " 808 - "x%x/x%x, tmo:x%x, trying to perform " 809 - "round robin failover\n", 810 phba->fcf.current_rec.fcf_indx, 811 irsp->ulpStatus, irsp->un.ulpWord[4], 812 irsp->ulpTimeout); 813 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 814 - if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 815 - /* 816 - * Exhausted the eligible FCF record list, 817 - * fail through to retry FLOGI on current 818 - * FCF record. 819 - */ 820 - lpfc_printf_log(phba, KERN_WARNING, 821 - LOG_FIP | LOG_ELS, 822 - "2760 Completed one round " 823 - "of FLOGI FCF round robin " 824 - "failover list, retry FLOGI " 825 - "on currently registered " 826 - "FCF index:%d\n", 827 - phba->fcf.current_rec.fcf_indx); 828 - } else { 829 - lpfc_printf_log(phba, KERN_INFO, 830 - LOG_FIP | LOG_ELS, 831 - "2794 FLOGI FCF round robin " 832 - "failover to FCF index x%x\n", 833 - fcf_index); 834 - rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, 835 - fcf_index); 836 - if (rc) 837 - lpfc_printf_log(phba, KERN_WARNING, 838 - LOG_FIP | LOG_ELS, 839 - "2761 FLOGI round " 840 - "robin FCF failover " 841 - "read FCF failed " 842 - "rc:x%x, fcf_index:" 843 - "%d\n", rc, 844 - phba->fcf.current_rec.fcf_indx); 845 - else 846 - goto out; 847 - } 848 } 849 850 /* FLOGI failure */ ··· 902 lpfc_nlp_put(ndlp); 903 spin_lock_irq(&phba->hbalock); 904 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 905 spin_unlock_irq(&phba->hbalock); 906 goto out; 907 } ··· 911 if (phba->hba_flag & HBA_FIP_SUPPORT) 912 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 913 LOG_ELS, 914 - "2769 FLOGI successful on FCF " 915 - "record: current_fcf_index:" 916 - "x%x, terminate FCF round " 917 - "robin failover process\n", 918 phba->fcf.current_rec.fcf_indx); 919 spin_lock_irq(&phba->hbalock); 920 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 921 spin_unlock_irq(&phba->hbalock); 922 goto out; 923 }
··· 795 796 if (irsp->ulpStatus) { 797 /* 798 + * In case of FIP mode, perform roundrobin FCF failover 799 * due to new FCF discovery 800 */ 801 if ((phba->hba_flag & HBA_FIP_SUPPORT) && ··· 803 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && 804 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { 805 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 806 + "2611 FLOGI failed on FCF (x%x), " 807 + "status:x%x/x%x, tmo:x%x, perform " 808 + "roundrobin FCF failover\n", 809 phba->fcf.current_rec.fcf_indx, 810 irsp->ulpStatus, irsp->un.ulpWord[4], 811 irsp->ulpTimeout); 812 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 813 + rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 814 + if (rc) 815 + goto out; 816 } 817 818 /* FLOGI failure */ ··· 934 lpfc_nlp_put(ndlp); 935 spin_lock_irq(&phba->hbalock); 936 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 937 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 938 spin_unlock_irq(&phba->hbalock); 939 goto out; 940 } ··· 942 if (phba->hba_flag & HBA_FIP_SUPPORT) 943 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 944 LOG_ELS, 945 + "2769 FLOGI to FCF (x%x) " 946 + "completed successfully\n", 947 phba->fcf.current_rec.fcf_indx); 948 spin_lock_irq(&phba->hbalock); 949 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 950 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 951 spin_unlock_irq(&phba->hbalock); 952 goto out; 953 }
+265 -86
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/slab.h> 24 #include <linux/pci.h> 25 #include <linux/kthread.h> ··· 64 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 65 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 67 68 void 69 lpfc_terminate_rport_io(struct fc_rport *rport) ··· 162 return; 163 } 164 165 - /* 166 - * This function is called from the worker thread when dev_loss_tmo 167 - * expire. 168 - */ 169 - static void 170 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 171 { 172 struct lpfc_rport_data *rdata; ··· 183 int put_node; 184 int put_rport; 185 int warn_on = 0; 186 187 rport = ndlp->rport; 188 189 if (!rport) 190 - return; 191 192 rdata = rport->dd_data; 193 name = (uint8_t *) &ndlp->nlp_portname; 194 vport = ndlp->vport; 195 phba = vport->phba; 196 197 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 198 "rport devlosstmo:did:x%x type:x%x id:x%x", ··· 221 lpfc_nlp_put(ndlp); 222 if (put_rport) 223 put_device(&rport->dev); 224 - return; 225 } 226 227 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { ··· 232 *name, *(name+1), *(name+2), *(name+3), 233 *(name+4), *(name+5), *(name+6), *(name+7), 234 ndlp->nlp_DID); 235 - return; 236 } 237 238 if (ndlp->nlp_type & NLP_FABRIC) { ··· 245 lpfc_nlp_put(ndlp); 246 if (put_rport) 247 put_device(&rport->dev); 248 - return; 249 } 250 251 if (ndlp->nlp_sid != NLP_NO_SID) { ··· 292 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 293 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 294 295 lpfc_unregister_unused_fcf(phba); 296 } 297 ··· 488 struct lpfc_work_evt *evtp = NULL; 489 struct lpfc_nodelist *ndlp; 490 int free_evt; 491 492 spin_lock_irq(&phba->hbalock); 493 while (!list_empty(&phba->work_list)) { ··· 509 break; 510 case LPFC_EVT_DEV_LOSS: 511 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 512 - lpfc_dev_loss_tmo_handler(ndlp); 513 free_evt = 0; 514 /* decrement the node reference count held for 515 * this queued work 516 */ 517 lpfc_nlp_put(ndlp); 518 break; 519 case LPFC_EVT_ONLINE: 520 if (phba->link_state < LPFC_LINK_DOWN) ··· 1108 "2017 REG_FCFI mbxStatus error x%x " 1109 "HBA state x%x\n", 1110 mboxq->u.mb.mbxStatus, vport->port_state); 1111 - mempool_free(mboxq, phba->mbox_mem_pool); 1112 - return; 1113 } 1114 1115 /* Start FCoE discovery by sending a FLOGI. */ ··· 1117 spin_lock_irq(&phba->hbalock); 1118 phba->fcf.fcf_flag |= FCF_REGISTERED; 1119 spin_unlock_irq(&phba->hbalock); 1120 /* If there is a pending FCoE event, restart FCF table scan. */ 1121 - if (lpfc_check_pending_fcoe_event(phba, 1)) { 1122 - mempool_free(mboxq, phba->mbox_mem_pool); 1123 - return; 1124 - } 1125 spin_lock_irq(&phba->hbalock); 1126 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1127 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1128 - spin_unlock_irq(&phba->hbalock); 1129 - if (vport->port_state != LPFC_FLOGI) 1130 lpfc_initial_flogi(vport); 1131 1132 mempool_free(mboxq, phba->mbox_mem_pool); 1133 - return; 1134 } 1135 1136 /** ··· 1337 int rc; 1338 1339 spin_lock_irq(&phba->hbalock); 1340 - 1341 /* If the FCF is not availabe do nothing. */ 1342 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1343 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1344 spin_unlock_irq(&phba->hbalock); 1345 return; 1346 } ··· 1347 /* The FCF is already registered, start discovery */ 1348 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1349 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1350 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1351 - spin_unlock_irq(&phba->hbalock); 1352 - if (phba->pport->port_state != LPFC_FLOGI) 1353 lpfc_initial_flogi(phba->pport); 1354 return; 1355 } 1356 spin_unlock_irq(&phba->hbalock); 1357 1358 - fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1359 - GFP_KERNEL); 1360 if (!fcf_mbxq) { 1361 spin_lock_irq(&phba->hbalock); 1362 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1363 spin_unlock_irq(&phba->hbalock); 1364 return; 1365 } ··· 1373 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1374 if (rc == MBX_NOT_FINISHED) { 1375 spin_lock_irq(&phba->hbalock); 1376 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1377 spin_unlock_irq(&phba->hbalock); 1378 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1379 } ··· 1591 * FCF discovery, no need to restart FCF discovery. 1592 */ 1593 if ((phba->link_state >= LPFC_LINK_UP) && 1594 - (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1595 return 0; 1596 1597 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, ··· 1615 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1616 } else { 1617 /* 1618 - * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1619 * flag 1620 */ 1621 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1622 "2833 Stop FCF discovery process due to link " 1623 "state change (x%x)\n", phba->link_state); 1624 spin_lock_irq(&phba->hbalock); 1625 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1626 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1627 spin_unlock_irq(&phba->hbalock); 1628 } ··· 1827 } 1828 1829 /** 1830 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1831 * @phba: pointer to lpfc hba data structure. 1832 * @mboxq: pointer to mailbox object. ··· 1913 int rc; 1914 1915 /* If there is pending FCoE event restart FCF table scan */ 1916 - if (lpfc_check_pending_fcoe_event(phba, 0)) { 1917 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1918 return; 1919 } ··· 1922 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1923 &next_fcf_index); 1924 if (!new_fcf_record) { 1925 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1926 "2765 Mailbox command READ_FCF_RECORD " 1927 "failed to retrieve a FCF record.\n"); 1928 /* Let next new FCF event trigger fast failover */ 1929 spin_lock_irq(&phba->hbalock); 1930 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1931 spin_unlock_irq(&phba->hbalock); 1932 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1933 return; ··· 1944 /* 1945 * If the fcf record does not match with connect list entries 1946 * read the next entry; otherwise, this is an eligible FCF 1947 - * record for round robin FCF failover. 1948 */ 1949 if (!rc) { 1950 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1951 - "2781 FCF record (x%x) failed FCF " 1952 - "connection list check, fcf_avail:x%x, " 1953 - "fcf_valid:x%x\n", 1954 bf_get(lpfc_fcf_record_fcf_index, 1955 new_fcf_record), 1956 bf_get(lpfc_fcf_record_fcf_avail, ··· 1979 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1980 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1981 "2835 Invalid in-use FCF " 1982 - "record (x%x) reported, " 1983 - "entering fast FCF failover " 1984 - "mode scanning.\n", 1985 phba->fcf.current_rec.fcf_indx); 1986 spin_lock_irq(&phba->hbalock); 1987 phba->fcf.fcf_flag |= FCF_REDISC_FOV; ··· 2125 */ 2126 if (fcf_rec) { 2127 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2128 - "2840 Update current FCF record " 2129 - "with initial FCF record (x%x)\n", 2130 bf_get(lpfc_fcf_record_fcf_index, 2131 new_fcf_record)); 2132 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, ··· 2156 */ 2157 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2158 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2159 - "2782 No suitable FCF record " 2160 - "found during this round of " 2161 - "post FCF rediscovery scan: " 2162 - "fcf_evt_tag:x%x, fcf_index: " 2163 - "x%x\n", 2164 phba->fcoe_eventtag_at_fcf_scan, 2165 bf_get(lpfc_fcf_record_fcf_index, 2166 new_fcf_record)); 2167 - /* 2168 - * Let next new FCF event trigger fast 2169 - * failover 2170 - */ 2171 spin_lock_irq(&phba->hbalock); 2172 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 2173 spin_unlock_irq(&phba->hbalock); 2174 return; 2175 } ··· 2195 2196 /* Replace in-use record with the new record */ 2197 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2198 - "2842 Replace the current in-use " 2199 - "FCF record (x%x) with failover FCF " 2200 - "record (x%x)\n", 2201 phba->fcf.current_rec.fcf_indx, 2202 phba->fcf.failover_rec.fcf_indx); 2203 memcpy(&phba->fcf.current_rec, ··· 2208 * FCF failover. 2209 */ 2210 spin_lock_irq(&phba->hbalock); 2211 - phba->fcf.fcf_flag &= 2212 - ~(FCF_REDISC_FOV | FCF_REDISC_RRU); 2213 spin_unlock_irq(&phba->hbalock); 2214 - /* 2215 - * Set up the initial registered FCF index for FLOGI 2216 - * round robin FCF failover. 2217 - */ 2218 - phba->fcf.fcf_rr_init_indx = 2219 - phba->fcf.failover_rec.fcf_indx; 2220 /* Register to the new FCF record */ 2221 lpfc_register_fcf(phba); 2222 } else { ··· 2256 } 2257 2258 /** 2259 - * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler 2260 * @phba: pointer to lpfc hba data structure. 2261 * @mboxq: pointer to mailbox object. 2262 * 2263 - * This is the callback function for FLOGI failure round robin FCF failover 2264 * read FCF record mailbox command from the eligible FCF record bmask for 2265 * performing the failover. If the FCF read back is not valid/available, it 2266 * fails through to retrying FLOGI to the currently registered FCF again. ··· 2275 { 2276 struct fcf_record *new_fcf_record; 2277 uint32_t boot_flag, addr_mode; 2278 - uint16_t next_fcf_index; 2279 uint16_t current_fcf_index; 2280 uint16_t vlan_id; 2281 2282 - /* If link state is not up, stop the round robin failover process */ 2283 if (phba->link_state < LPFC_LINK_UP) { 2284 spin_lock_irq(&phba->hbalock); 2285 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2286 spin_unlock_irq(&phba->hbalock); 2287 - lpfc_sli4_mbox_cmd_free(phba, mboxq); 2288 - return; 2289 } 2290 2291 /* Parse the FCF record from the non-embedded mailbox command */ ··· 2296 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2297 "2766 Mailbox command READ_FCF_RECORD " 2298 "failed to retrieve a FCF record.\n"); 2299 - goto out; 2300 } 2301 2302 /* Get the needed parameters from FCF record */ 2303 - lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2304 - &addr_mode, &vlan_id); 2305 2306 /* Log the FCF record information if turned on */ 2307 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2308 next_fcf_index); 2309 2310 /* Upload new FCF record to the failover FCF record */ 2311 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2312 - "2834 Update the current FCF record (x%x) " 2313 - "with the next FCF record (x%x)\n", 2314 - phba->fcf.failover_rec.fcf_indx, 2315 - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2316 spin_lock_irq(&phba->hbalock); 2317 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2318 new_fcf_record, addr_mode, vlan_id, ··· 2353 sizeof(struct lpfc_fcf_rec)); 2354 2355 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2356 - "2783 FLOGI round robin FCF failover from FCF " 2357 - "(x%x) to FCF (x%x).\n", 2358 - current_fcf_index, 2359 - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2360 2361 out: 2362 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2363 - lpfc_register_fcf(phba); 2364 } 2365 2366 /** ··· 2368 * @mboxq: pointer to mailbox object. 2369 * 2370 * This is the callback function of read FCF record mailbox command for 2371 - * updating the eligible FCF bmask for FLOGI failure round robin FCF 2372 * failover when a new FCF event happened. If the FCF read back is 2373 * valid/available and it passes the connection list check, it updates 2374 - * the bmask for the eligible FCF record for round robin failover. 2375 */ 2376 void 2377 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ··· 2813 * and get the FCF Table. 2814 */ 2815 spin_lock_irq(&phba->hbalock); 2816 - if (phba->hba_flag & FCF_DISC_INPROGRESS) { 2817 spin_unlock_irq(&phba->hbalock); 2818 return; 2819 }
··· 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 + #include <linux/delay.h> 24 #include <linux/slab.h> 25 #include <linux/pci.h> 26 #include <linux/kthread.h> ··· 63 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 64 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 65 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 66 + static int lpfc_fcf_inuse(struct lpfc_hba *); 67 68 void 69 lpfc_terminate_rport_io(struct fc_rport *rport) ··· 160 return; 161 } 162 163 + /** 164 + * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 165 + * @ndlp: Pointer to remote node object. 166 + * 167 + * This function is called from the worker thread when devloss timeout timer 168 + * expires. For SLI4 host, this routine shall return 1 when at lease one 169 + * remote node, including this @ndlp, is still in use of FCF; otherwise, this 170 + * routine shall return 0 when there is no remote node is still in use of FCF 171 + * when devloss timeout happened to this @ndlp. 172 + **/ 173 + static int 174 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 175 { 176 struct lpfc_rport_data *rdata; ··· 175 int put_node; 176 int put_rport; 177 int warn_on = 0; 178 + int fcf_inuse = 0; 179 180 rport = ndlp->rport; 181 182 if (!rport) 183 + return fcf_inuse; 184 185 rdata = rport->dd_data; 186 name = (uint8_t *) &ndlp->nlp_portname; 187 vport = ndlp->vport; 188 phba = vport->phba; 189 + 190 + if (phba->sli_rev == LPFC_SLI_REV4) 191 + fcf_inuse = lpfc_fcf_inuse(phba); 192 193 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 194 "rport devlosstmo:did:x%x type:x%x id:x%x", ··· 209 lpfc_nlp_put(ndlp); 210 if (put_rport) 211 put_device(&rport->dev); 212 + return fcf_inuse; 213 } 214 215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { ··· 220 *name, *(name+1), *(name+2), *(name+3), 221 *(name+4), *(name+5), *(name+6), *(name+7), 222 ndlp->nlp_DID); 223 + return fcf_inuse; 224 } 225 226 if (ndlp->nlp_type & NLP_FABRIC) { ··· 233 lpfc_nlp_put(ndlp); 234 if (put_rport) 235 put_device(&rport->dev); 236 + return fcf_inuse; 237 } 238 239 if (ndlp->nlp_sid != NLP_NO_SID) { ··· 280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 282 283 + return fcf_inuse; 284 + } 285 + 286 + /** 287 + * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler 288 + * @phba: Pointer to hba context object. 289 + * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. 290 + * @nlp_did: remote node identifer with devloss timeout. 291 + * 292 + * This function is called from the worker thread after invoking devloss 293 + * timeout handler and releasing the reference count for the ndlp with 294 + * which the devloss timeout was handled for SLI4 host. For the devloss 295 + * timeout of the last remote node which had been in use of FCF, when this 296 + * routine is invoked, it shall be guaranteed that none of the remote are 297 + * in-use of FCF. When devloss timeout to the last remote using the FCF, 298 + * if the FIP engine is neither in FCF table scan process nor roundrobin 299 + * failover process, the in-use FCF shall be unregistered. If the FIP 300 + * engine is in FCF discovery process, the devloss timeout state shall 301 + * be set for either the FCF table scan process or roundrobin failover 302 + * process to unregister the in-use FCF. 303 + **/ 304 + static void 305 + lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, 306 + uint32_t nlp_did) 307 + { 308 + /* If devloss timeout happened to a remote node when FCF had no 309 + * longer been in-use, do nothing. 310 + */ 311 + if (!fcf_inuse) 312 + return; 313 + 314 + if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { 315 + spin_lock_irq(&phba->hbalock); 316 + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 317 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 318 + spin_unlock_irq(&phba->hbalock); 319 + return; 320 + } 321 + phba->hba_flag |= HBA_DEVLOSS_TMO; 322 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 323 + "2847 Last remote node (x%x) using " 324 + "FCF devloss tmo\n", nlp_did); 325 + } 326 + if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { 327 + spin_unlock_irq(&phba->hbalock); 328 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 329 + "2868 Devloss tmo to FCF rediscovery " 330 + "in progress\n"); 331 + return; 332 + } 333 + if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { 334 + spin_unlock_irq(&phba->hbalock); 335 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 336 + "2869 Devloss tmo to idle FIP engine, " 337 + "unreg in-use FCF and rescan.\n"); 338 + /* Unregister in-use FCF and rescan */ 339 + lpfc_unregister_fcf_rescan(phba); 340 + return; 341 + } 342 + spin_unlock_irq(&phba->hbalock); 343 + if (phba->hba_flag & FCF_TS_INPROG) 344 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 345 + "2870 FCF table scan in progress\n"); 346 + if (phba->hba_flag & FCF_RR_INPROG) 347 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 348 + "2871 FLOGI roundrobin FCF failover " 349 + "in progress\n"); 350 + } 351 lpfc_unregister_unused_fcf(phba); 352 } 353 ··· 408 struct lpfc_work_evt *evtp = NULL; 409 struct lpfc_nodelist *ndlp; 410 int free_evt; 411 + int fcf_inuse; 412 + uint32_t nlp_did; 413 414 spin_lock_irq(&phba->hbalock); 415 while (!list_empty(&phba->work_list)) { ··· 427 break; 428 case LPFC_EVT_DEV_LOSS: 429 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 430 + fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); 431 free_evt = 0; 432 /* decrement the node reference count held for 433 * this queued work 434 */ 435 + nlp_did = ndlp->nlp_DID; 436 lpfc_nlp_put(ndlp); 437 + if (phba->sli_rev == LPFC_SLI_REV4) 438 + lpfc_sli4_post_dev_loss_tmo_handler(phba, 439 + fcf_inuse, 440 + nlp_did); 441 break; 442 case LPFC_EVT_ONLINE: 443 if (phba->link_state < LPFC_LINK_DOWN) ··· 1021 "2017 REG_FCFI mbxStatus error x%x " 1022 "HBA state x%x\n", 1023 mboxq->u.mb.mbxStatus, vport->port_state); 1024 + goto fail_out; 1025 } 1026 1027 /* Start FCoE discovery by sending a FLOGI. */ ··· 1031 spin_lock_irq(&phba->hbalock); 1032 phba->fcf.fcf_flag |= FCF_REGISTERED; 1033 spin_unlock_irq(&phba->hbalock); 1034 + 1035 /* If there is a pending FCoE event, restart FCF table scan. */ 1036 + if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1037 + goto fail_out; 1038 + 1039 + /* Mark successful completion of FCF table scan */ 1040 spin_lock_irq(&phba->hbalock); 1041 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1042 + phba->hba_flag &= ~FCF_TS_INPROG; 1043 + if (vport->port_state != LPFC_FLOGI) { 1044 + phba->hba_flag |= FCF_RR_INPROG; 1045 + spin_unlock_irq(&phba->hbalock); 1046 lpfc_initial_flogi(vport); 1047 + goto out; 1048 + } 1049 + spin_unlock_irq(&phba->hbalock); 1050 + goto out; 1051 1052 + fail_out: 1053 + spin_lock_irq(&phba->hbalock); 1054 + phba->hba_flag &= ~FCF_RR_INPROG; 1055 + spin_unlock_irq(&phba->hbalock); 1056 + out: 1057 mempool_free(mboxq, phba->mbox_mem_pool); 1058 } 1059 1060 /** ··· 1241 int rc; 1242 1243 spin_lock_irq(&phba->hbalock); 1244 /* If the FCF is not availabe do nothing. */ 1245 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1246 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1247 spin_unlock_irq(&phba->hbalock); 1248 return; 1249 } ··· 1252 /* The FCF is already registered, start discovery */ 1253 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1254 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1255 + phba->hba_flag &= ~FCF_TS_INPROG; 1256 + if (phba->pport->port_state != LPFC_FLOGI) { 1257 + phba->hba_flag |= FCF_RR_INPROG; 1258 + spin_unlock_irq(&phba->hbalock); 1259 lpfc_initial_flogi(phba->pport); 1260 + return; 1261 + } 1262 + spin_unlock_irq(&phba->hbalock); 1263 return; 1264 } 1265 spin_unlock_irq(&phba->hbalock); 1266 1267 + fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1268 if (!fcf_mbxq) { 1269 spin_lock_irq(&phba->hbalock); 1270 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1271 spin_unlock_irq(&phba->hbalock); 1272 return; 1273 } ··· 1275 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1276 if (rc == MBX_NOT_FINISHED) { 1277 spin_lock_irq(&phba->hbalock); 1278 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1279 spin_unlock_irq(&phba->hbalock); 1280 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1281 } ··· 1493 * FCF discovery, no need to restart FCF discovery. 1494 */ 1495 if ((phba->link_state >= LPFC_LINK_UP) && 1496 + (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1497 return 0; 1498 1499 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, ··· 1517 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1518 } else { 1519 /* 1520 + * Do not continue FCF discovery and clear FCF_TS_INPROG 1521 * flag 1522 */ 1523 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1524 "2833 Stop FCF discovery process due to link " 1525 "state change (x%x)\n", phba->link_state); 1526 spin_lock_irq(&phba->hbalock); 1527 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1528 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1529 spin_unlock_irq(&phba->hbalock); 1530 } ··· 1729 } 1730 1731 /** 1732 + * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 1733 + * @vport: Pointer to vport object. 1734 + * @fcf_index: index to next fcf. 1735 + * 1736 + * This function processing the roundrobin fcf failover to next fcf index. 1737 + * When this function is invoked, there will be a current fcf registered 1738 + * for flogi. 1739 + * Return: 0 for continue retrying flogi on currently registered fcf; 1740 + * 1 for stop flogi on currently registered fcf; 1741 + */ 1742 + int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) 1743 + { 1744 + struct lpfc_hba *phba = vport->phba; 1745 + int rc; 1746 + 1747 + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 1748 + spin_lock_irq(&phba->hbalock); 1749 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 1750 + spin_unlock_irq(&phba->hbalock); 1751 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1752 + "2872 Devloss tmo with no eligible " 1753 + "FCF, unregister in-use FCF (x%x) " 1754 + "and rescan FCF table\n", 1755 + phba->fcf.current_rec.fcf_indx); 1756 + lpfc_unregister_fcf_rescan(phba); 1757 + goto stop_flogi_current_fcf; 1758 + } 1759 + /* Mark the end to FLOGI roundrobin failover */ 1760 + phba->hba_flag &= ~FCF_RR_INPROG; 1761 + /* Allow action to new fcf asynchronous event */ 1762 + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 1763 + spin_unlock_irq(&phba->hbalock); 1764 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1765 + "2865 No FCF available, stop roundrobin FCF " 1766 + "failover and change port state:x%x/x%x\n", 1767 + phba->pport->port_state, LPFC_VPORT_UNKNOWN); 1768 + phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1769 + goto stop_flogi_current_fcf; 1770 + } else { 1771 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 1772 + "2794 Try FLOGI roundrobin FCF failover to " 1773 + "(x%x)\n", fcf_index); 1774 + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); 1775 + if (rc) 1776 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1777 + "2761 FLOGI roundrobin FCF failover " 1778 + "failed (rc:x%x) to read FCF (x%x)\n", 1779 + rc, phba->fcf.current_rec.fcf_indx); 1780 + else 1781 + goto stop_flogi_current_fcf; 1782 + } 1783 + return 0; 1784 + 1785 + stop_flogi_current_fcf: 1786 + lpfc_can_disctmo(vport); 1787 + return 1; 1788 + } 1789 + 1790 + /** 1791 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1792 * @phba: pointer to lpfc hba data structure. 1793 * @mboxq: pointer to mailbox object. ··· 1756 int rc; 1757 1758 /* If there is pending FCoE event restart FCF table scan */ 1759 + if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { 1760 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1761 return; 1762 } ··· 1765 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1766 &next_fcf_index); 1767 if (!new_fcf_record) { 1768 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1769 "2765 Mailbox command READ_FCF_RECORD " 1770 "failed to retrieve a FCF record.\n"); 1771 /* Let next new FCF event trigger fast failover */ 1772 spin_lock_irq(&phba->hbalock); 1773 + phba->hba_flag &= ~FCF_TS_INPROG; 1774 spin_unlock_irq(&phba->hbalock); 1775 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1776 return; ··· 1787 /* 1788 * If the fcf record does not match with connect list entries 1789 * read the next entry; otherwise, this is an eligible FCF 1790 + * record for roundrobin FCF failover. 1791 */ 1792 if (!rc) { 1793 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1794 + "2781 FCF (x%x) failed connection " 1795 + "list check: (x%x/x%x)\n", 1796 bf_get(lpfc_fcf_record_fcf_index, 1797 new_fcf_record), 1798 bf_get(lpfc_fcf_record_fcf_avail, ··· 1823 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1824 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1825 "2835 Invalid in-use FCF " 1826 + "(x%x), enter FCF failover " 1827 + "table scan.\n", 1828 phba->fcf.current_rec.fcf_indx); 1829 spin_lock_irq(&phba->hbalock); 1830 phba->fcf.fcf_flag |= FCF_REDISC_FOV; ··· 1970 */ 1971 if (fcf_rec) { 1972 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1973 + "2840 Update initial FCF candidate " 1974 + "with FCF (x%x)\n", 1975 bf_get(lpfc_fcf_record_fcf_index, 1976 new_fcf_record)); 1977 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, ··· 2001 */ 2002 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2003 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2004 + "2782 No suitable FCF found: " 2005 + "(x%x/x%x)\n", 2006 phba->fcoe_eventtag_at_fcf_scan, 2007 bf_get(lpfc_fcf_record_fcf_index, 2008 new_fcf_record)); 2009 spin_lock_irq(&phba->hbalock); 2010 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2011 + phba->hba_flag &= ~FCF_TS_INPROG; 2012 + spin_unlock_irq(&phba->hbalock); 2013 + /* Unregister in-use FCF and rescan */ 2014 + lpfc_printf_log(phba, KERN_INFO, 2015 + LOG_FIP, 2016 + "2864 On devloss tmo " 2017 + "unreg in-use FCF and " 2018 + "rescan FCF table\n"); 2019 + lpfc_unregister_fcf_rescan(phba); 2020 + return; 2021 + } 2022 + /* 2023 + * Let next new FCF event trigger fast failover 2024 + */ 2025 + phba->hba_flag &= ~FCF_TS_INPROG; 2026 spin_unlock_irq(&phba->hbalock); 2027 return; 2028 } ··· 2032 2033 /* Replace in-use record with the new record */ 2034 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2035 + "2842 Replace in-use FCF (x%x) " 2036 + "with failover FCF (x%x)\n", 2037 phba->fcf.current_rec.fcf_indx, 2038 phba->fcf.failover_rec.fcf_indx); 2039 memcpy(&phba->fcf.current_rec, ··· 2046 * FCF failover. 2047 */ 2048 spin_lock_irq(&phba->hbalock); 2049 + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2050 spin_unlock_irq(&phba->hbalock); 2051 /* Register to the new FCF record */ 2052 lpfc_register_fcf(phba); 2053 } else { ··· 2101 } 2102 2103 /** 2104 + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler 2105 * @phba: pointer to lpfc hba data structure. 2106 * @mboxq: pointer to mailbox object. 2107 * 2108 + * This is the callback function for FLOGI failure roundrobin FCF failover 2109 * read FCF record mailbox command from the eligible FCF record bmask for 2110 * performing the failover. If the FCF read back is not valid/available, it 2111 * fails through to retrying FLOGI to the currently registered FCF again. ··· 2120 { 2121 struct fcf_record *new_fcf_record; 2122 uint32_t boot_flag, addr_mode; 2123 + uint16_t next_fcf_index, fcf_index; 2124 uint16_t current_fcf_index; 2125 uint16_t vlan_id; 2126 + int rc; 2127 2128 + /* If link state is not up, stop the roundrobin failover process */ 2129 if (phba->link_state < LPFC_LINK_UP) { 2130 spin_lock_irq(&phba->hbalock); 2131 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2132 + phba->hba_flag &= ~FCF_RR_INPROG; 2133 spin_unlock_irq(&phba->hbalock); 2134 + goto out; 2135 } 2136 2137 /* Parse the FCF record from the non-embedded mailbox command */ ··· 2140 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2141 "2766 Mailbox command READ_FCF_RECORD " 2142 "failed to retrieve a FCF record.\n"); 2143 + goto error_out; 2144 } 2145 2146 /* Get the needed parameters from FCF record */ 2147 + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2148 + &addr_mode, &vlan_id); 2149 2150 /* Log the FCF record information if turned on */ 2151 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2152 next_fcf_index); 2153 2154 + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2155 + if (!rc) { 2156 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2157 + "2848 Remove ineligible FCF (x%x) from " 2158 + "from roundrobin bmask\n", fcf_index); 2159 + /* Clear roundrobin bmask bit for ineligible FCF */ 2160 + lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); 2161 + /* Perform next round of roundrobin FCF failover */ 2162 + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 2163 + rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); 2164 + if (rc) 2165 + goto out; 2166 + goto error_out; 2167 + } 2168 + 2169 + if (fcf_index == phba->fcf.current_rec.fcf_indx) { 2170 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2171 + "2760 Perform FLOGI roundrobin FCF failover: " 2172 + "FCF (x%x) back to FCF (x%x)\n", 2173 + phba->fcf.current_rec.fcf_indx, fcf_index); 2174 + /* Wait 500 ms before retrying FLOGI to current FCF */ 2175 + msleep(500); 2176 + lpfc_initial_flogi(phba->pport); 2177 + goto out; 2178 + } 2179 + 2180 /* Upload new FCF record to the failover FCF record */ 2181 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2182 + "2834 Update current FCF (x%x) with new FCF (x%x)\n", 2183 + phba->fcf.failover_rec.fcf_indx, fcf_index); 2184 spin_lock_irq(&phba->hbalock); 2185 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2186 new_fcf_record, addr_mode, vlan_id, ··· 2173 sizeof(struct lpfc_fcf_rec)); 2174 2175 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2176 + "2783 Perform FLOGI roundrobin FCF failover: FCF " 2177 + "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); 2178 2179 + error_out: 2180 + lpfc_register_fcf(phba); 2181 out: 2182 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2183 } 2184 2185 /** ··· 2189 * @mboxq: pointer to mailbox object. 2190 * 2191 * This is the callback function of read FCF record mailbox command for 2192 + * updating the eligible FCF bmask for FLOGI failure roundrobin FCF 2193 * failover when a new FCF event happened. If the FCF read back is 2194 * valid/available and it passes the connection list check, it updates 2195 + * the bmask for the eligible FCF record for roundrobin failover. 2196 */ 2197 void 2198 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ··· 2634 * and get the FCF Table. 2635 */ 2636 spin_lock_irq(&phba->hbalock); 2637 + if (phba->hba_flag & FCF_TS_INPROG) { 2638 spin_unlock_irq(&phba->hbalock); 2639 return; 2640 }
+18 -23
drivers/scsi/lpfc/lpfc_init.c
··· 2936 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2937 spin_unlock_irq(&phba->hbalock); 2938 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2939 - "2776 FCF rediscover wait timer expired, post " 2940 - "a worker thread event for FCF table scan\n"); 2941 /* wake up worker thread */ 2942 lpfc_worker_wake_up(phba); 2943 } ··· 3311 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3312 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3313 LOG_DISCOVERY, 3314 - "2546 New FCF found event: " 3315 - "evt_tag:x%x, fcf_index:x%x\n", 3316 acqe_fcoe->event_tag, 3317 acqe_fcoe->index); 3318 else 3319 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3320 LOG_DISCOVERY, 3321 - "2788 FCF parameter modified event: " 3322 - "evt_tag:x%x, fcf_index:x%x\n", 3323 acqe_fcoe->event_tag, 3324 acqe_fcoe->index); 3325 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3326 /* 3327 * During period of FCF discovery, read the FCF 3328 * table record indexed by the event to update 3329 - * FCF round robin failover eligible FCF bmask. 3330 */ 3331 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3332 LOG_DISCOVERY, 3333 - "2779 Read new FCF record with " 3334 - "fcf_index:x%x for updating FCF " 3335 - "round robin failover bmask\n", 3336 acqe_fcoe->index); 3337 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3338 } 3339 3340 /* If the FCF discovery is in progress, do nothing. */ 3341 spin_lock_irq(&phba->hbalock); 3342 - if (phba->hba_flag & FCF_DISC_INPROGRESS) { 3343 spin_unlock_irq(&phba->hbalock); 3344 break; 3345 } ··· 3357 3358 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3360 - "2770 Start FCF table scan due to new FCF " 3361 - "event: evt_tag:x%x, fcf_index:x%x\n", 3362 acqe_fcoe->event_tag, acqe_fcoe->index); 3363 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3364 LPFC_FCOE_FCF_GET_FIRST); 3365 if (rc) 3366 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3367 "2547 Issue FCF scan read FCF mailbox " 3368 - "command failed 0x%x\n", rc); 3369 break; 3370 3371 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: ··· 3377 3378 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3379 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3380 - "2549 FCF disconnected from network index 0x%x" 3381 - " tag 0x%x\n", acqe_fcoe->index, 3382 - acqe_fcoe->event_tag); 3383 /* 3384 * If we are in the middle of FCF failover process, clear 3385 * the corresponding FCF bit in the roundrobin bitmap. ··· 3492 spin_unlock_irq(&phba->hbalock); 3493 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3494 LOG_DISCOVERY, 3495 - "2773 Start FCF fast failover due " 3496 - "to CVL event: evt_tag:x%x\n", 3497 - acqe_fcoe->event_tag); 3498 rc = lpfc_sli4_redisc_fcf_table(phba); 3499 if (rc) { 3500 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | ··· 3643 3644 /* Scan FCF table from the first entry to re-discover SAN */ 3645 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3646 - "2777 Start FCF table scan after FCF " 3647 - "rediscovery quiescent period over\n"); 3648 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3649 if (rc) 3650 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, ··· 4161 goto out_free_active_sgl; 4162 } 4163 4164 - /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4165 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4166 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4167 GFP_KERNEL);
··· 2936 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2937 spin_unlock_irq(&phba->hbalock); 2938 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2939 + "2776 FCF rediscover quiescent timer expired\n"); 2940 /* wake up worker thread */ 2941 lpfc_worker_wake_up(phba); 2942 } ··· 3312 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3314 LOG_DISCOVERY, 3315 + "2546 New FCF event, evt_tag:x%x, " 3316 + "index:x%x\n", 3317 acqe_fcoe->event_tag, 3318 acqe_fcoe->index); 3319 else 3320 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3321 LOG_DISCOVERY, 3322 + "2788 FCF param modified event, " 3323 + "evt_tag:x%x, index:x%x\n", 3324 acqe_fcoe->event_tag, 3325 acqe_fcoe->index); 3326 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3327 /* 3328 * During period of FCF discovery, read the FCF 3329 * table record indexed by the event to update 3330 + * FCF roundrobin failover eligible FCF bmask. 3331 */ 3332 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3333 LOG_DISCOVERY, 3334 + "2779 Read FCF (x%x) for updating " 3335 + "roundrobin FCF failover bmask\n", 3336 acqe_fcoe->index); 3337 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3338 } 3339 3340 /* If the FCF discovery is in progress, do nothing. */ 3341 spin_lock_irq(&phba->hbalock); 3342 + if (phba->hba_flag & FCF_TS_INPROG) { 3343 spin_unlock_irq(&phba->hbalock); 3344 break; 3345 } ··· 3359 3360 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3361 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3362 + "2770 Start FCF table scan per async FCF " 3363 + "event, evt_tag:x%x, index:x%x\n", 3364 acqe_fcoe->event_tag, acqe_fcoe->index); 3365 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3366 LPFC_FCOE_FCF_GET_FIRST); 3367 if (rc) 3368 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3369 "2547 Issue FCF scan read FCF mailbox " 3370 + "command failed (x%x)\n", rc); 3371 break; 3372 3373 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: ··· 3379 3380 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3381 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3382 + "2549 FCF (x%x) disconnected from network, " 3383 + "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3384 /* 3385 * If we are in the middle of FCF failover process, clear 3386 * the corresponding FCF bit in the roundrobin bitmap. ··· 3495 spin_unlock_irq(&phba->hbalock); 3496 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3497 LOG_DISCOVERY, 3498 + "2773 Start FCF failover per CVL, " 3499 + "evt_tag:x%x\n", acqe_fcoe->event_tag); 3500 rc = lpfc_sli4_redisc_fcf_table(phba); 3501 if (rc) { 3502 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | ··· 3647 3648 /* Scan FCF table from the first entry to re-discover SAN */ 3649 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3650 + "2777 Start post-quiescent FCF table scan\n"); 3651 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3652 if (rc) 3653 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, ··· 4166 goto out_free_active_sgl; 4167 } 4168 4169 + /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4170 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4171 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4172 GFP_KERNEL);
+24 -48
drivers/scsi/lpfc/lpfc_sli.c
··· 5921 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5922 * @phba: Pointer to HBA context object. 5923 * 5924 - * This routine performs a round robin SCSI command to SLI4 FCP WQ index 5925 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 5926 * held. 5927 * ··· 12242 /* Issue the mailbox command asynchronously */ 12243 mboxq->vport = phba->pport; 12244 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12245 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12246 if (rc == MBX_NOT_FINISHED) 12247 error = -EIO; 12248 else { 12249 - spin_lock_irq(&phba->hbalock); 12250 - phba->hba_flag |= FCF_DISC_INPROGRESS; 12251 - spin_unlock_irq(&phba->hbalock); 12252 /* Reset eligible FCF count for new scan */ 12253 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12254 phba->fcf.eligible_fcf_cnt = 0; ··· 12260 if (error) { 12261 if (mboxq) 12262 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12263 - /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ 12264 spin_lock_irq(&phba->hbalock); 12265 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 12266 spin_unlock_irq(&phba->hbalock); 12267 } 12268 return error; 12269 } 12270 12271 /** 12272 - * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. 12273 * @phba: pointer to lpfc hba data structure. 12274 * @fcf_index: FCF table entry offset. 12275 * 12276 * This routine is invoked to read an FCF record indicated by @fcf_index 12277 - * and to use it for FLOGI round robin FCF failover. 12278 * 12279 * Return 0 if the mailbox command is submitted sucessfully, none 0 12280 * otherwise. ··· 12320 * @fcf_index: FCF table entry offset. 12321 * 12322 * This routine is invoked to read an FCF record indicated by @fcf_index to 12323 - * determine whether it's eligible for FLOGI round robin failover list. 12324 * 12325 * Return 0 if the mailbox command is submitted sucessfully, none 0 12326 * otherwise. ··· 12366 * 12367 * This routine is to get the next eligible FCF record index in a round 12368 * robin fashion. If the next eligible FCF record index equals to the 12369 - * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12370 * shall be returned, otherwise, the next eligible FCF record's index 12371 * shall be returned. 12372 **/ ··· 12394 return LPFC_FCOE_FCF_NEXT_NONE; 12395 } 12396 12397 - /* Check roundrobin failover index bmask stop condition */ 12398 - if (next_fcf_index == phba->fcf.fcf_rr_init_indx) { 12399 - if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) { 12400 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 12401 - "2847 Round robin failover FCF index " 12402 - "search hit stop condition:x%x\n", 12403 - next_fcf_index); 12404 - return LPFC_FCOE_FCF_NEXT_NONE; 12405 - } 12406 - /* The roundrobin failover index bmask updated, start over */ 12407 - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12408 - "2848 Round robin failover FCF index bmask " 12409 - "updated, start over\n"); 12410 - spin_lock_irq(&phba->hbalock); 12411 - phba->fcf.fcf_flag &= ~FCF_REDISC_RRU; 12412 - spin_unlock_irq(&phba->hbalock); 12413 - return phba->fcf.fcf_rr_init_indx; 12414 - } 12415 - 12416 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12417 - "2845 Get next round robin failover " 12418 - "FCF index x%x\n", next_fcf_index); 12419 return next_fcf_index; 12420 } 12421 ··· 12406 * @phba: pointer to lpfc hba data structure. 12407 * 12408 * This routine sets the FCF record index in to the eligible bmask for 12409 - * round robin failover search. It checks to make sure that the index 12410 * does not go beyond the range of the driver allocated bmask dimension 12411 * before setting the bit. 12412 * ··· 12418 { 12419 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12420 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12421 - "2610 HBA FCF index reached driver's " 12422 - "book keeping dimension: fcf_index:%d, " 12423 - "driver_bmask_max:%d\n", 12424 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12425 return -EINVAL; 12426 } 12427 /* Set the eligible FCF record index bmask */ 12428 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12429 12430 - /* Set the roundrobin index bmask updated */ 12431 - spin_lock_irq(&phba->hbalock); 12432 - phba->fcf.fcf_flag |= FCF_REDISC_RRU; 12433 - spin_unlock_irq(&phba->hbalock); 12434 - 12435 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12436 - "2790 Set FCF index x%x to round robin failover " 12437 "bmask\n", fcf_index); 12438 12439 return 0; ··· 12438 * @phba: pointer to lpfc hba data structure. 12439 * 12440 * This routine clears the FCF record index from the eligible bmask for 12441 - * round robin failover search. It checks to make sure that the index 12442 * does not go beyond the range of the driver allocated bmask dimension 12443 * before clearing the bit. 12444 **/ ··· 12447 { 12448 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12449 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12450 - "2762 HBA FCF index goes beyond driver's " 12451 - "book keeping dimension: fcf_index:%d, " 12452 - "driver_bmask_max:%d\n", 12453 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12454 return; 12455 } ··· 12456 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12457 12458 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12459 - "2791 Clear FCF index x%x from round robin failover " 12460 "bmask\n", fcf_index); 12461 } 12462 ··· 12507 } 12508 } else { 12509 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12510 - "2775 Start FCF rediscovery quiescent period " 12511 - "wait timer before scaning FCF table\n"); 12512 /* 12513 * Start FCF rediscovery wait timer for pending FCF 12514 * before rescan FCF record table.
··· 5921 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5922 * @phba: Pointer to HBA context object. 5923 * 5924 + * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 5925 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 5926 * held. 5927 * ··· 12242 /* Issue the mailbox command asynchronously */ 12243 mboxq->vport = phba->pport; 12244 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12245 + 12246 + spin_lock_irq(&phba->hbalock); 12247 + phba->hba_flag |= FCF_TS_INPROG; 12248 + spin_unlock_irq(&phba->hbalock); 12249 + 12250 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12251 if (rc == MBX_NOT_FINISHED) 12252 error = -EIO; 12253 else { 12254 /* Reset eligible FCF count for new scan */ 12255 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12256 phba->fcf.eligible_fcf_cnt = 0; ··· 12258 if (error) { 12259 if (mboxq) 12260 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12261 + /* FCF scan failed, clear FCF_TS_INPROG flag */ 12262 spin_lock_irq(&phba->hbalock); 12263 + phba->hba_flag &= ~FCF_TS_INPROG; 12264 spin_unlock_irq(&phba->hbalock); 12265 } 12266 return error; 12267 } 12268 12269 /** 12270 + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 12271 * @phba: pointer to lpfc hba data structure. 12272 * @fcf_index: FCF table entry offset. 12273 * 12274 * This routine is invoked to read an FCF record indicated by @fcf_index 12275 + * and to use it for FLOGI roundrobin FCF failover. 12276 * 12277 * Return 0 if the mailbox command is submitted sucessfully, none 0 12278 * otherwise. ··· 12318 * @fcf_index: FCF table entry offset. 12319 * 12320 * This routine is invoked to read an FCF record indicated by @fcf_index to 12321 + * determine whether it's eligible for FLOGI roundrobin failover list. 12322 * 12323 * Return 0 if the mailbox command is submitted sucessfully, none 0 12324 * otherwise. ··· 12364 * 12365 * This routine is to get the next eligible FCF record index in a round 12366 * robin fashion. If the next eligible FCF record index equals to the 12367 + * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12368 * shall be returned, otherwise, the next eligible FCF record's index 12369 * shall be returned. 12370 **/ ··· 12392 return LPFC_FCOE_FCF_NEXT_NONE; 12393 } 12394 12395 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12396 + "2845 Get next roundrobin failover FCF (x%x)\n", 12397 + next_fcf_index); 12398 + 12399 return next_fcf_index; 12400 } 12401 ··· 12422 * @phba: pointer to lpfc hba data structure. 12423 * 12424 * This routine sets the FCF record index in to the eligible bmask for 12425 + * roundrobin failover search. It checks to make sure that the index 12426 * does not go beyond the range of the driver allocated bmask dimension 12427 * before setting the bit. 12428 * ··· 12434 { 12435 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12436 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12437 + "2610 FCF (x%x) reached driver's book " 12438 + "keeping dimension:x%x\n", 12439 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12440 return -EINVAL; 12441 } 12442 /* Set the eligible FCF record index bmask */ 12443 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12444 12445 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12446 + "2790 Set FCF (x%x) to roundrobin FCF failover " 12447 "bmask\n", fcf_index); 12448 12449 return 0; ··· 12460 * @phba: pointer to lpfc hba data structure. 12461 * 12462 * This routine clears the FCF record index from the eligible bmask for 12463 + * roundrobin failover search. It checks to make sure that the index 12464 * does not go beyond the range of the driver allocated bmask dimension 12465 * before clearing the bit. 12466 **/ ··· 12469 { 12470 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12471 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12472 + "2762 FCF (x%x) reached driver's book " 12473 + "keeping dimension:x%x\n", 12474 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12475 return; 12476 } ··· 12479 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12480 12481 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12482 + "2791 Clear FCF (x%x) from roundrobin failover " 12483 "bmask\n", fcf_index); 12484 } 12485 ··· 12530 } 12531 } else { 12532 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12533 + "2775 Start FCF rediscover quiescent timer\n"); 12534 /* 12535 * Start FCF rediscovery wait timer for pending FCF 12536 * before rescan FCF record table.
+4 -2
drivers/scsi/lpfc/lpfc_sli4.h
··· 23 #define LPFC_GET_QE_REL_INT 32 24 #define LPFC_RPI_LOW_WATER_MARK 10 25 26 /* Amount of time in seconds for waiting FCF rediscovery to complete */ 27 #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ 28 ··· 166 #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 167 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 168 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 169 - #define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */ 170 uint32_t addr_mode; 171 - uint16_t fcf_rr_init_indx; 172 uint32_t eligible_fcf_cnt; 173 struct lpfc_fcf_rec current_rec; 174 struct lpfc_fcf_rec failover_rec;
··· 23 #define LPFC_GET_QE_REL_INT 32 24 #define LPFC_RPI_LOW_WATER_MARK 10 25 26 + #define LPFC_UNREG_FCF 1 27 + #define LPFC_SKIP_UNREG_FCF 0 28 + 29 /* Amount of time in seconds for waiting FCF rediscovery to complete */ 30 #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ 31 ··· 163 #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 164 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 165 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 166 + #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) 167 uint32_t addr_mode; 168 uint32_t eligible_fcf_cnt; 169 struct lpfc_fcf_rec current_rec; 170 struct lpfc_fcf_rec failover_rec;