Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] lpfc 8.3.25: Add FCF priority failover functionality

This patch implements a new FCF failover policy for the lpfc driver. It
allows the driver to choose which FCF to failover to based on the FCF
priority. This patch also introduces a new sysfs parameter
(fcf_failover_policy) to allow the user to choose which FCF failover policy
to use.

Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

James Smart and committed by
James Bottomley
7d791df7 b76f2dc9

+377 -20
+3
drivers/scsi/lpfc/lpfc.h
··· 680 680 uint32_t cfg_enable_rrq; 681 681 uint32_t cfg_topology; 682 682 uint32_t cfg_link_speed; 683 + #define LPFC_FCF_FOV 1 /* Fast fcf failover */ 684 + #define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ 685 + uint32_t cfg_fcf_failover_policy; 683 686 uint32_t cfg_cr_delay; 684 687 uint32_t cfg_cr_count; 685 688 uint32_t cfg_multi_ring_support;
+5
drivers/scsi/lpfc/lpfc_attr.c
··· 2193 2193 lpfc_param_init(enable_npiv, 1, 0, 1); 2194 2194 static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); 2195 2195 2196 + LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, 2197 + "FCF Fast failover=1 Priority failover=2"); 2198 + 2196 2199 int lpfc_enable_rrq; 2197 2200 module_param(lpfc_enable_rrq, int, S_IRUGO); 2198 2201 MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); ··· 3778 3775 &dev_attr_lpfc_fdmi_on, 3779 3776 &dev_attr_lpfc_max_luns, 3780 3777 &dev_attr_lpfc_enable_npiv, 3778 + &dev_attr_lpfc_fcf_failover_policy, 3781 3779 &dev_attr_lpfc_enable_rrq, 3782 3780 &dev_attr_nport_evt_cnt, 3783 3781 &dev_attr_board_mode, ··· 4999 4995 lpfc_link_speed_init(phba, lpfc_link_speed); 5000 4996 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 5001 4997 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4998 + lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); 5002 4999 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5003 5000 lpfc_use_msi_init(phba, lpfc_use_msi); 5004 5001 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+2
drivers/scsi/lpfc/lpfc_crtn.h
··· 235 235 void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 236 236 void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); 237 237 uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 238 + void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t); 238 239 int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 239 240 void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 240 241 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); 242 + void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); 241 243 242 244 int lpfc_mem_alloc(struct lpfc_hba *, int align); 243 245 void lpfc_mem_free(struct lpfc_hba *);
+2
drivers/scsi/lpfc/lpfc_els.c
··· 874 874 phba->fcf.current_rec.fcf_indx, 875 875 irsp->ulpStatus, irsp->un.ulpWord[4], 876 876 irsp->ulpTimeout); 877 + lpfc_sli4_set_fcf_flogi_fail(phba, 878 + phba->fcf.current_rec.fcf_indx); 877 879 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 878 880 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 879 881 if (rc)
+212 -8
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1109 1109 return; 1110 1110 } 1111 1111 1112 + /** 1113 + * lpfc_sli4_clear_fcf_rr_bmask 1114 + * @phba pointer to the struct lpfc_hba for this port. 1115 + * This fucnction resets the round robin bit mask and clears the 1116 + * fcf priority list. The list deletions are done while holding the 1117 + * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared 1118 + * from the lpfc_fcf_pri record. 1119 + **/ 1120 + void 1121 + lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) 1122 + { 1123 + struct lpfc_fcf_pri *fcf_pri; 1124 + struct lpfc_fcf_pri *next_fcf_pri; 1125 + memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 1126 + spin_lock_irq(&phba->hbalock); 1127 + list_for_each_entry_safe(fcf_pri, next_fcf_pri, 1128 + &phba->fcf.fcf_pri_list, list) { 1129 + list_del_init(&fcf_pri->list); 1130 + fcf_pri->fcf_rec.flag = 0; 1131 + } 1132 + spin_unlock_irq(&phba->hbalock); 1133 + } 1112 1134 static void 1113 1135 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1114 1136 { ··· 1152 1130 spin_unlock_irq(&phba->hbalock); 1153 1131 1154 1132 /* If there is a pending FCoE event, restart FCF table scan. */ 1155 - if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1133 + if ((!(phba->hba_flag & FCF_RR_INPROG)) && 1134 + lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1156 1135 goto fail_out; 1157 1136 1158 1137 /* Mark successful completion of FCF table scan */ ··· 1273 1250 } 1274 1251 1275 1252 /** 1253 + * lpfc_update_fcf_record - Update driver fcf record 1254 + * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. 1255 + * @phba: pointer to lpfc hba data structure. 1256 + * @fcf_index: Index for the lpfc_fcf_record. 1257 + * @new_fcf_record: pointer to hba fcf record. 1258 + * 1259 + * This routine updates the driver FCF priority record from the new HBA FCF 1260 + * record. This routine is called with the host lock held. 1261 + **/ 1262 + static void 1263 + __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, 1264 + struct fcf_record *new_fcf_record 1265 + ) 1266 + { 1267 + struct lpfc_fcf_pri *fcf_pri; 1268 + 1269 + fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1270 + fcf_pri->fcf_rec.fcf_index = fcf_index; 1271 + /* FCF record priority */ 1272 + fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 1273 + 1274 + } 1275 + 1276 + /** 1276 1277 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1277 1278 * @fcf: pointer to driver fcf record. 1278 1279 * @new_fcf_record: pointer to fcf record. ··· 1379 1332 fcf_rec->addr_mode = addr_mode; 1380 1333 fcf_rec->vlan_id = vlan_id; 1381 1334 fcf_rec->flag |= (flag | RECORD_VALID); 1335 + __lpfc_update_fcf_record_pri(phba, 1336 + bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), 1337 + new_fcf_record); 1382 1338 } 1383 1339 1384 1340 /** ··· 1884 1834 return false; 1885 1835 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 1886 1836 return false; 1837 + if (fcf_rec->priority != new_fcf_record->fip_priority) 1838 + return false; 1887 1839 return true; 1888 1840 } 1889 1841 ··· 1946 1894 stop_flogi_current_fcf: 1947 1895 lpfc_can_disctmo(vport); 1948 1896 return 1; 1897 + } 1898 + 1899 + /** 1900 + * lpfc_sli4_fcf_pri_list_del 1901 + * @phba: pointer to lpfc hba data structure. 1902 + * @fcf_index the index of the fcf record to delete 1903 + * This routine checks the on list flag of the fcf_index to be deleted. 1904 + * If it is one the list then it is removed from the list, and the flag 1905 + * is cleared. This routine grab the hbalock before removing the fcf 1906 + * record from the list. 1907 + **/ 1908 + static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, 1909 + uint16_t fcf_index) 1910 + { 1911 + struct lpfc_fcf_pri *new_fcf_pri; 1912 + 1913 + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1914 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1915 + "3058 deleting idx x%x pri x%x flg x%x\n", 1916 + fcf_index, new_fcf_pri->fcf_rec.priority, 1917 + new_fcf_pri->fcf_rec.flag); 1918 + spin_lock_irq(&phba->hbalock); 1919 + if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { 1920 + if (phba->fcf.current_rec.priority == 1921 + new_fcf_pri->fcf_rec.priority) 1922 + phba->fcf.eligible_fcf_cnt--; 1923 + list_del_init(&new_fcf_pri->list); 1924 + new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; 1925 + } 1926 + spin_unlock_irq(&phba->hbalock); 1927 + } 1928 + 1929 + /** 1930 + * lpfc_sli4_set_fcf_flogi_fail 1931 + * @phba: pointer to lpfc hba data structure. 1932 + * @fcf_index the index of the fcf record to update 1933 + * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED 1934 + * flag so the the round robin slection for the particular priority level 1935 + * will try a different fcf record that does not have this bit set. 1936 + * If the fcf record is re-read for any reason this flag is cleared brfore 1937 + * adding it to the priority list. 1938 + **/ 1939 + void 1940 + lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) 1941 + { 1942 + struct lpfc_fcf_pri *new_fcf_pri; 1943 + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1944 + spin_lock_irq(&phba->hbalock); 1945 + new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; 1946 + spin_unlock_irq(&phba->hbalock); 1947 + } 1948 + 1949 + /** 1950 + * lpfc_sli4_fcf_pri_list_add 1951 + * @phba: pointer to lpfc hba data structure. 1952 + * @fcf_index the index of the fcf record to add 1953 + * This routine checks the priority of the fcf_index to be added. 1954 + * If it is a lower priority than the current head of the fcf_pri list 1955 + * then it is added to the list in the right order. 1956 + * If it is the same priority as the current head of the list then it 1957 + * is added to the head of the list and its bit in the rr_bmask is set. 1958 + * If the fcf_index to be added is of a higher priority than the current 1959 + * head of the list then the rr_bmask is cleared, its bit is set in the 1960 + * rr_bmask and it is added to the head of the list. 1961 + * returns: 1962 + * 0=success 1=failure 1963 + **/ 1964 + int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, 1965 + struct fcf_record *new_fcf_record) 1966 + { 1967 + uint16_t current_fcf_pri; 1968 + uint16_t last_index; 1969 + struct lpfc_fcf_pri *fcf_pri; 1970 + struct lpfc_fcf_pri *next_fcf_pri; 1971 + struct lpfc_fcf_pri *new_fcf_pri; 1972 + int ret; 1973 + 1974 + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1975 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1976 + "3059 adding idx x%x pri x%x flg x%x\n", 1977 + fcf_index, new_fcf_record->fip_priority, 1978 + new_fcf_pri->fcf_rec.flag); 1979 + spin_lock_irq(&phba->hbalock); 1980 + if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) 1981 + list_del_init(&new_fcf_pri->list); 1982 + new_fcf_pri->fcf_rec.fcf_index = fcf_index; 1983 + new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 1984 + if (list_empty(&phba->fcf.fcf_pri_list)) { 1985 + list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 1986 + ret = lpfc_sli4_fcf_rr_index_set(phba, 1987 + new_fcf_pri->fcf_rec.fcf_index); 1988 + goto out; 1989 + } 1990 + 1991 + last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 1992 + LPFC_SLI4_FCF_TBL_INDX_MAX); 1993 + if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 1994 + ret = 0; /* Empty rr list */ 1995 + goto out; 1996 + } 1997 + current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; 1998 + if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { 1999 + list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2000 + if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { 2001 + memset(phba->fcf.fcf_rr_bmask, 0, 2002 + sizeof(*phba->fcf.fcf_rr_bmask)); 2003 + /* fcfs_at_this_priority_level = 1; */ 2004 + phba->fcf.eligible_fcf_cnt = 1; 2005 + } else 2006 + /* fcfs_at_this_priority_level++; */ 2007 + phba->fcf.eligible_fcf_cnt++; 2008 + ret = lpfc_sli4_fcf_rr_index_set(phba, 2009 + new_fcf_pri->fcf_rec.fcf_index); 2010 + goto out; 2011 + } 2012 + 2013 + list_for_each_entry_safe(fcf_pri, next_fcf_pri, 2014 + &phba->fcf.fcf_pri_list, list) { 2015 + if (new_fcf_pri->fcf_rec.priority <= 2016 + fcf_pri->fcf_rec.priority) { 2017 + if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) 2018 + list_add(&new_fcf_pri->list, 2019 + &phba->fcf.fcf_pri_list); 2020 + else 2021 + list_add(&new_fcf_pri->list, 2022 + &((struct lpfc_fcf_pri *) 2023 + fcf_pri->list.prev)->list); 2024 + ret = 0; 2025 + goto out; 2026 + } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list 2027 + || new_fcf_pri->fcf_rec.priority < 2028 + next_fcf_pri->fcf_rec.priority) { 2029 + list_add(&new_fcf_pri->list, &fcf_pri->list); 2030 + ret = 0; 2031 + goto out; 2032 + } 2033 + if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) 2034 + continue; 2035 + 2036 + } 2037 + ret = 1; 2038 + out: 2039 + /* we use = instead of |= to clear the FLOGI_FAILED flag. */ 2040 + new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; 2041 + spin_unlock_irq(&phba->hbalock); 2042 + return ret; 1949 2043 } 1950 2044 1951 2045 /** ··· 2156 1958 * record for roundrobin FCF failover. 2157 1959 */ 2158 1960 if (!rc) { 1961 + lpfc_sli4_fcf_pri_list_del(phba, 1962 + bf_get(lpfc_fcf_record_fcf_index, 1963 + new_fcf_record)); 2159 1964 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2160 1965 "2781 FCF (x%x) failed connection " 2161 1966 "list check: (x%x/x%x)\n", ··· 2206 2005 goto read_next_fcf; 2207 2006 } else { 2208 2007 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2209 - rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2008 + rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, 2009 + new_fcf_record); 2210 2010 if (rc) 2211 2011 goto read_next_fcf; 2212 2012 } ··· 2220 2018 */ 2221 2019 spin_lock_irq(&phba->hbalock); 2222 2020 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2223 - if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2021 + if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2022 + lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2224 2023 new_fcf_record, vlan_id)) { 2225 2024 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2226 2025 phba->fcf.current_rec.fcf_indx) { ··· 2435 2232 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2436 2233 return; 2437 2234 2438 - if (phba->fcf.fcf_flag & FCF_IN_USE) { 2235 + if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2236 + phba->fcf.fcf_flag & FCF_IN_USE) { 2439 2237 /* 2440 2238 * In case the current in-use FCF record no 2441 2239 * longer existed during FCF discovery that ··· 2627 2423 2628 2424 /* Update the eligible FCF record index bmask */ 2629 2425 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2630 - rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2426 + 2427 + rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); 2631 2428 2632 2429 out: 2633 2430 lpfc_sli4_mbox_cmd_free(phba, mboxq); ··· 3098 2893 goto out; 3099 2894 } 3100 2895 /* Reset FCF roundrobin bmask for new discovery */ 3101 - memset(phba->fcf.fcf_rr_bmask, 0, 3102 - sizeof(*phba->fcf.fcf_rr_bmask)); 2896 + lpfc_sli4_clear_fcf_rr_bmask(phba); 3103 2897 } 3104 2898 3105 2899 return; ··· 5796 5592 spin_unlock_irq(&phba->hbalock); 5797 5593 5798 5594 /* Reset FCF roundrobin bmask for new discovery */ 5799 - memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 5595 + lpfc_sli4_clear_fcf_rr_bmask(phba); 5800 5596 5801 5597 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5802 5598
+3 -4
drivers/scsi/lpfc/lpfc_init.c
··· 3634 3634 lpfc_sli4_fcf_dead_failthrough(phba); 3635 3635 } else { 3636 3636 /* Reset FCF roundrobin bmask for new discovery */ 3637 - memset(phba->fcf.fcf_rr_bmask, 0, 3638 - sizeof(*phba->fcf.fcf_rr_bmask)); 3637 + lpfc_sli4_clear_fcf_rr_bmask(phba); 3639 3638 /* 3640 3639 * Handling fast FCF failover to a DEAD FCF event is 3641 3640 * considered equalivant to receiving CVL to all vports. ··· 3720 3721 * Reset FCF roundrobin bmask for new 3721 3722 * discovery. 3722 3723 */ 3723 - memset(phba->fcf.fcf_rr_bmask, 0, 3724 - sizeof(*phba->fcf.fcf_rr_bmask)); 3724 + lpfc_sli4_clear_fcf_rr_bmask(phba); 3725 3725 } 3726 3726 break; 3727 3727 default: ··· 9044 9046 } 9045 9047 9046 9048 INIT_LIST_HEAD(&phba->active_rrq_list); 9049 + INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 9047 9050 9048 9051 /* Set up common device driver resources */ 9049 9052 error = lpfc_setup_driver_resource_phase2(phba);
+128 -3
drivers/scsi/lpfc/lpfc_sli.c
··· 14635 14635 } 14636 14636 14637 14637 /** 14638 + * lpfc_check_next_fcf_pri 14639 + * phba pointer to the lpfc_hba struct for this port. 14640 + * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 14641 + * routine when the rr_bmask is empty. The FCF indecies are put into the 14642 + * rr_bmask based on their priority level. Starting from the highest priority 14643 + * to the lowest. The most likely FCF candidate will be in the highest 14644 + * priority group. When this routine is called it searches the fcf_pri list for 14645 + * next lowest priority group and repopulates the rr_bmask with only those 14646 + * fcf_indexes. 14647 + * returns: 14648 + * 1=success 0=failure 14649 + **/ 14650 + int 14651 + lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 14652 + { 14653 + uint16_t next_fcf_pri; 14654 + uint16_t last_index; 14655 + struct lpfc_fcf_pri *fcf_pri; 14656 + int rc; 14657 + int ret = 0; 14658 + 14659 + last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 14660 + LPFC_SLI4_FCF_TBL_INDX_MAX); 14661 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14662 + "3060 Last IDX %d\n", last_index); 14663 + if (list_empty(&phba->fcf.fcf_pri_list)) { 14664 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14665 + "3061 Last IDX %d\n", last_index); 14666 + return 0; /* Empty rr list */ 14667 + } 14668 + next_fcf_pri = 0; 14669 + /* 14670 + * Clear the rr_bmask and set all of the bits that are at this 14671 + * priority. 14672 + */ 14673 + memset(phba->fcf.fcf_rr_bmask, 0, 14674 + sizeof(*phba->fcf.fcf_rr_bmask)); 14675 + spin_lock_irq(&phba->hbalock); 14676 + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 14677 + if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 14678 + continue; 14679 + /* 14680 + * the 1st priority that has not FLOGI failed 14681 + * will be the highest. 14682 + */ 14683 + if (!next_fcf_pri) 14684 + next_fcf_pri = fcf_pri->fcf_rec.priority; 14685 + spin_unlock_irq(&phba->hbalock); 14686 + if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 14687 + rc = lpfc_sli4_fcf_rr_index_set(phba, 14688 + fcf_pri->fcf_rec.fcf_index); 14689 + if (rc) 14690 + return 0; 14691 + } 14692 + spin_lock_irq(&phba->hbalock); 14693 + } 14694 + /* 14695 + * if next_fcf_pri was not set above and the list is not empty then 14696 + * we have failed flogis on all of them. So reset flogi failed 14697 + * and start at the begining. 14698 + */ 14699 + if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 14700 + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 14701 + fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 14702 + /* 14703 + * the 1st priority that has not FLOGI failed 14704 + * will be the highest. 14705 + */ 14706 + if (!next_fcf_pri) 14707 + next_fcf_pri = fcf_pri->fcf_rec.priority; 14708 + spin_unlock_irq(&phba->hbalock); 14709 + if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 14710 + rc = lpfc_sli4_fcf_rr_index_set(phba, 14711 + fcf_pri->fcf_rec.fcf_index); 14712 + if (rc) 14713 + return 0; 14714 + } 14715 + spin_lock_irq(&phba->hbalock); 14716 + } 14717 + } else 14718 + ret = 1; 14719 + spin_unlock_irq(&phba->hbalock); 14720 + 14721 + return ret; 14722 + } 14723 + /** 14638 14724 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 14639 14725 * @phba: pointer to lpfc hba data structure. 14640 14726 * ··· 14736 14650 uint16_t next_fcf_index; 14737 14651 14738 14652 /* Search start from next bit of currently registered FCF index */ 14653 + next_priority: 14739 14654 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 14740 14655 LPFC_SLI4_FCF_TBL_INDX_MAX; 14741 14656 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, ··· 14744 14657 next_fcf_index); 14745 14658 14746 14659 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 14747 - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 14660 + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14661 + /* 14662 + * If we have wrapped then we need to clear the bits that 14663 + * have been tested so that we can detect when we should 14664 + * change the priority level. 14665 + */ 14748 14666 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14749 14667 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 14668 + } 14669 + 14750 14670 14751 14671 /* Check roundrobin failover list empty condition */ 14752 - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14672 + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 14673 + next_fcf_index == phba->fcf.current_rec.fcf_indx) { 14674 + /* 14675 + * If next fcf index is not found check if there are lower 14676 + * Priority level fcf's in the fcf_priority list. 14677 + * Set up the rr_bmask with all of the avaiable fcf bits 14678 + * at that level and continue the selection process. 14679 + */ 14680 + if (lpfc_check_next_fcf_pri_level(phba)) 14681 + goto next_priority; 14753 14682 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 14754 14683 "2844 No roundrobin failover FCF available\n"); 14755 - return LPFC_FCOE_FCF_NEXT_NONE; 14684 + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 14685 + return LPFC_FCOE_FCF_NEXT_NONE; 14686 + else { 14687 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 14688 + "3063 Only FCF available idx %d, flag %x\n", 14689 + next_fcf_index, 14690 + phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 14691 + return next_fcf_index; 14692 + } 14756 14693 } 14694 + 14695 + if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 14696 + phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 14697 + LPFC_FCF_FLOGI_FAILED) 14698 + goto next_priority; 14757 14699 14758 14700 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14759 14701 "2845 Get next roundrobin failover FCF (x%x)\n", ··· 14835 14719 void 14836 14720 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 14837 14721 { 14722 + struct lpfc_fcf_pri *fcf_pri; 14838 14723 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14839 14724 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14840 14725 "2762 FCF (x%x) reached driver's book " ··· 14844 14727 return; 14845 14728 } 14846 14729 /* Clear the eligible FCF record index bmask */ 14730 + spin_lock_irq(&phba->hbalock); 14731 + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 14732 + if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 14733 + list_del_init(&fcf_pri->list); 14734 + break; 14735 + } 14736 + } 14737 + spin_unlock_irq(&phba->hbalock); 14847 14738 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 14848 14739 14849 14740 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+22 -5
drivers/scsi/lpfc/lpfc_sli4.h
··· 159 159 #define RECORD_VALID 0x02 160 160 }; 161 161 162 + struct lpfc_fcf_pri_rec { 163 + uint16_t fcf_index; 164 + #define LPFC_FCF_ON_PRI_LIST 0x0001 165 + #define LPFC_FCF_FLOGI_FAILED 0x0002 166 + uint16_t flag; 167 + uint32_t priority; 168 + }; 169 + 170 + struct lpfc_fcf_pri { 171 + struct list_head list; 172 + struct lpfc_fcf_pri_rec fcf_rec; 173 + }; 174 + 175 + /* 176 + * Maximum FCF table index, it is for driver internal book keeping, it 177 + * just needs to be no less than the supported HBA's FCF table size. 178 + */ 179 + #define LPFC_SLI4_FCF_TBL_INDX_MAX 32 180 + 162 181 struct lpfc_fcf { 163 182 uint16_t fcfi; 164 183 uint32_t fcf_flag; ··· 197 178 uint32_t eligible_fcf_cnt; 198 179 struct lpfc_fcf_rec current_rec; 199 180 struct lpfc_fcf_rec failover_rec; 181 + struct list_head fcf_pri_list; 182 + struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX]; 183 + uint32_t current_fcf_scan_pri; 200 184 struct timer_list redisc_wait; 201 185 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ 202 186 }; 203 187 204 - /* 205 - * Maximum FCF table index, it is for driver internal book keeping, it 206 - * just needs to be no less than the supported HBA's FCF table size. 207 - */ 208 - #define LPFC_SLI4_FCF_TBL_INDX_MAX 32 209 188 210 189 #define LPFC_REGION23_SIGNATURE "RG23" 211 190 #define LPFC_REGION23_VERSION 1