Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2020-07-23

This series contains updates to ice driver only.

Jake refactors ice_discover_caps() to reduce the number of AdminQ calls
made. Splits ice_parse_caps() to separate functions to update function
and device capabilities separately to allow for updating outside of
initialization.

Akeem adds power management support.

Paul G refactors FC and FEC code to aid in restoring of PHY settings
on media insertion. Implements lenient mode and link override support.
Adds link debug info and formats existing debug info to be more
readable. Adds support to check and report additional autoneg
capabilities. Implements the capability to detect media cage in order to
differentiate AUI types as Direct Attach or backplane.

Bruce implements Total Port Shutdown for devices that support it.

Lev renames low_power_ctrl field to lower_power_ctrl_an to be more
descriptive of the field.

Doug reports AOC types as media type fiber.

Paul S adds code to handle 1G SGMII PHY type.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+2342 -667
+11
drivers/net/ethernet/intel/ice/ice.h
··· 222 222 __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ 223 223 __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ 224 224 __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ 225 + __ICE_LINK_DEFAULT_OVERRIDE_PENDING, 226 + __ICE_PHY_INIT_COMPLETE, 225 227 __ICE_STATE_NBITS /* must be last */ 226 228 }; 227 229 ··· 359 357 ICE_FLAG_FD_ENA, 360 358 ICE_FLAG_ADV_FEATURES, 361 359 ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, 360 + ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 362 361 ICE_FLAG_NO_MEDIA, 363 362 ICE_FLAG_FW_LLDP_AGENT, 364 363 ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ 365 364 ICE_FLAG_LEGACY_RX, 366 365 ICE_FLAG_VF_TRUE_PROMISC_ENA, 367 366 ICE_FLAG_MDD_AUTO_RESET_VF, 367 + ICE_FLAG_LINK_LENIENT_MODE_ENA, 368 368 ICE_PF_FLAGS_NBITS /* must be last */ 369 369 }; 370 370 ··· 427 423 u16 empr_count; /* EMP reset count */ 428 424 u16 pfr_count; /* PF reset count */ 429 425 426 + u8 wol_ena : 1; /* software state of WoL */ 427 + u32 wakeup_reason; /* last wakeup reason */ 430 428 struct ice_hw_port_stats stats; 431 429 struct ice_hw_port_stats stats_prev; 432 430 struct ice_hw hw; ··· 441 435 u32 tx_timeout_recovery_level; 442 436 char int_name[ICE_INT_NAME_STR_LEN]; 443 437 u32 sw_int_count; 438 + 439 + __le64 nvm_phy_type_lo; /* NVM PHY type low */ 440 + __le64 nvm_phy_type_hi; /* NVM PHY type high */ 441 + struct ice_link_default_override_tlv link_dflt_override; 444 442 }; 445 443 446 444 struct ice_netdev_priv { ··· 578 568 void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 579 569 const char *ice_stat_str(enum ice_status stat_err); 580 570 const char *ice_aq_str(enum ice_aq_err aq_err); 571 + bool ice_is_wol_supported(struct ice_pf *pf); 581 572 int 582 573 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 583 574 bool is_tun);
+63 -4
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 961 961 #define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5) 962 962 #define ICE_AQC_PHY_EN_AUTO_FEC BIT(7) 963 963 #define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0) 964 - u8 low_power_ctrl; 964 + u8 low_power_ctrl_an; 965 965 #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) 966 + #define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1) 967 + #define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2) 968 + #define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3) 966 969 __le16 eee_cap; 967 970 #define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0) 968 971 #define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1) ··· 986 983 #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) 987 984 #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) 988 985 #define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0) 989 - u8 rsvd1; /* Byte 35 reserved */ 986 + u8 module_compliance_enforcement; 987 + #define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0) 990 988 u8 extended_compliance_code; 991 989 #define ICE_MODULE_TYPE_TOTAL_BYTE 3 992 990 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; 993 991 #define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 994 992 #define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 993 + #define ICE_AQC_MOD_TYPE_IDENT 1 995 994 #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) 996 995 #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) 997 996 #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) ··· 1037 1032 #define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) 1038 1033 #define ICE_AQ_PHY_ENA_LESM BIT(6) 1039 1034 #define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) 1040 - u8 low_power_ctrl; 1035 + u8 low_power_ctrl_an; 1041 1036 __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ 1042 1037 __le16 eeer_value; 1043 1038 u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */ 1044 - u8 rsvd1; 1039 + u8 module_compliance_enforcement; 1045 1040 }; 1046 1041 1047 1042 /* Set MAC Config command data structure (direct 0x0603) */ ··· 1152 1147 #define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 1153 1148 #define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 1154 1149 __le16 link_speed; 1150 + #define ICE_AQ_LINK_SPEED_M 0x7FF 1155 1151 #define ICE_AQ_LINK_SPEED_10MB BIT(0) 1156 1152 #define ICE_AQ_LINK_SPEED_100MB BIT(1) 1157 1153 #define ICE_AQ_LINK_SPEED_1000MB BIT(2) ··· 1193 1187 #define ICE_AQ_MAC_LB_EN BIT(0) 1194 1188 #define ICE_AQ_MAC_LB_OSC_CLK BIT(1) 1195 1189 u8 reserved[15]; 1190 + }; 1191 + 1192 + struct ice_aqc_link_topo_addr { 1193 + u8 lport_num; 1194 + u8 lport_num_valid; 1195 + #define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0) 1196 + u8 node_type_ctx; 1197 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0 1198 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S) 1199 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0 1200 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1 1201 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2 1202 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3 1203 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4 1204 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5 1205 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 1206 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 1207 + #define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 1208 + #define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 1209 + #define ICE_AQC_LINK_TOPO_NODE_CTX_M \ 1210 + (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) 1211 + #define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0 1212 + #define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1 1213 + #define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2 1214 + #define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3 1215 + #define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4 1216 + #define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5 1217 + u8 index; 1218 + __le16 handle; 1219 + #define ICE_AQC_LINK_TOPO_HANDLE_S 0 1220 + #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) 1221 + /* Used to decode the handle field */ 1222 + #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) 1223 + #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) 1224 + #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 1225 + #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 1226 + /* In case of a Mezzanine type */ 1227 + #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ 1228 + (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) 1229 + #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6 1230 + #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S) 1231 + /* In case of a LOM type */ 1232 + #define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \ 1233 + (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) 1234 + }; 1235 + 1236 + /* Get Link Topology Handle (direct, 0x06E0) */ 1237 + struct ice_aqc_get_link_topo { 1238 + struct ice_aqc_link_topo_addr addr; 1239 + u8 node_part_num; 1240 + u8 rsvd[9]; 1196 1241 }; 1197 1242 1198 1243 /* Set Port Identification LED (direct, 0x06E9) */ ··· 1816 1759 struct ice_aqc_set_event_mask set_event_mask; 1817 1760 struct ice_aqc_get_link_status get_link_status; 1818 1761 struct ice_aqc_event_lan_overflow lan_overflow; 1762 + struct ice_aqc_get_link_topo get_link_topo; 1819 1763 } params; 1820 1764 }; 1821 1765 ··· 1916 1858 ice_aqc_opc_get_link_status = 0x0607, 1917 1859 ice_aqc_opc_set_event_mask = 0x0613, 1918 1860 ice_aqc_opc_set_mac_lb = 0x0620, 1861 + ice_aqc_opc_get_link_topo = 0x06E0, 1919 1862 ice_aqc_opc_set_port_id_led = 0x06E9, 1920 1863 ice_aqc_opc_sff_eeprom = 0x06EE, 1921 1864
+952 -317
drivers/net/ethernet/intel/ice/ice_common.c
··· 20 20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 21 21 return ICE_ERR_DEVICE_NOT_SUPPORTED; 22 22 23 - hw->mac_type = ICE_MAC_GENERIC; 23 + switch (hw->device_id) { 24 + case ICE_DEV_ID_E810C_BACKPLANE: 25 + case ICE_DEV_ID_E810C_QSFP: 26 + case ICE_DEV_ID_E810C_SFP: 27 + case ICE_DEV_ID_E810_XXV_SFP: 28 + hw->mac_type = ICE_MAC_E810; 29 + break; 30 + case ICE_DEV_ID_E823C_10G_BASE_T: 31 + case ICE_DEV_ID_E823C_BACKPLANE: 32 + case ICE_DEV_ID_E823C_QSFP: 33 + case ICE_DEV_ID_E823C_SFP: 34 + case ICE_DEV_ID_E823C_SGMII: 35 + case ICE_DEV_ID_E822C_10G_BASE_T: 36 + case ICE_DEV_ID_E822C_BACKPLANE: 37 + case ICE_DEV_ID_E822C_QSFP: 38 + case ICE_DEV_ID_E822C_SFP: 39 + case ICE_DEV_ID_E822C_SGMII: 40 + case ICE_DEV_ID_E822L_10G_BASE_T: 41 + case ICE_DEV_ID_E822L_BACKPLANE: 42 + case ICE_DEV_ID_E822L_SFP: 43 + case ICE_DEV_ID_E822L_SGMII: 44 + case ICE_DEV_ID_E823L_10G_BASE_T: 45 + case ICE_DEV_ID_E823L_1GBE: 46 + case ICE_DEV_ID_E823L_BACKPLANE: 47 + case ICE_DEV_ID_E823L_QSFP: 48 + case ICE_DEV_ID_E823L_SFP: 49 + hw->mac_type = ICE_MAC_GENERIC; 50 + break; 51 + default: 52 + hw->mac_type = ICE_MAC_UNKNOWN; 53 + break; 54 + } 55 + 56 + ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 24 57 return 0; 25 58 } 26 59 ··· 85 52 * is returned in user specified buffer. Please interpret user specified 86 53 * buffer as "manage_mac_read" response. 87 54 * Response such as various MAC addresses are stored in HW struct (port.mac) 88 - * ice_aq_discover_caps is expected to be called before this function is called. 55 + * ice_discover_dev_caps is expected to be called before this function is 56 + * called. 89 57 */ 90 58 static enum ice_status 91 59 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, ··· 150 116 u16 pcaps_size = sizeof(*pcaps); 151 117 struct ice_aq_desc desc; 152 118 enum ice_status status; 119 + struct ice_hw *hw; 153 120 154 121 cmd = &desc.params.get_phy; 155 122 156 123 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 157 124 return ICE_ERR_PARAM; 125 + hw = pi->hw; 158 126 159 127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 160 128 ··· 164 128 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 165 129 166 130 cmd->param0 |= cpu_to_le16(report_mode); 167 - status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); 131 + status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 132 + 133 + ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", 134 + report_mode); 135 + ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 136 + (unsigned long long)le64_to_cpu(pcaps->phy_type_low)); 137 + ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 138 + (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); 139 + ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); 140 + ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 141 + pcaps->low_power_ctrl_an); 142 + ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); 143 + ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", 144 + pcaps->eeer_value); 145 + ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", 146 + pcaps->link_fec_options); 147 + ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", 148 + pcaps->module_compliance_enforcement); 149 + ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", 150 + pcaps->extended_compliance_code); 151 + ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", 152 + pcaps->module_type[0]); 153 + ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", 154 + pcaps->module_type[1]); 155 + ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", 156 + pcaps->module_type[2]); 168 157 169 158 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { 170 159 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 171 160 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 161 + memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 162 + sizeof(pi->phy.link_info.module_type)); 172 163 } 173 164 174 165 return status; 166 + } 167 + 168 + /** 169 + * ice_aq_get_link_topo_handle - get link topology node return status 170 + * @pi: port information structure 171 + * @node_type: requested node type 172 + * @cd: pointer to command details structure or NULL 173 + * 174 + * Get link topology node return status for specified node type (0x06E0) 175 + * 176 + * Node type cage can be used to determine if cage is present. If AQC 177 + * returns error (ENOENT), then no cage present. If no cage present, then 178 + * connection type is backplane or BASE-T. 179 + */ 180 + static enum ice_status 181 + ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 182 + struct ice_sq_cd *cd) 183 + { 184 + struct ice_aqc_get_link_topo *cmd; 185 + struct ice_aq_desc desc; 186 + 187 + cmd = &desc.params.get_link_topo; 188 + 189 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 190 + 191 + cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 192 + ICE_AQC_LINK_TOPO_NODE_CTX_S); 193 + 194 + /* set node type */ 195 + cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 196 + 197 + return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 198 + } 199 + 200 + /** 201 + * ice_is_media_cage_present 202 + * @pi: port information structure 203 + * 204 + * Returns true if media cage is present, else false. If no cage, then 205 + * media type is backplane or BASE-T. 206 + */ 207 + static bool ice_is_media_cage_present(struct ice_port_info *pi) 208 + { 209 + /* Node type cage can be used to determine if cage is present. If AQC 210 + * returns error (ENOENT), then no cage present. If no cage present then 211 + * connection type is backplane or BASE-T. 212 + */ 213 + return !ice_aq_get_link_topo_handle(pi, 214 + ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 215 + NULL); 175 216 } 176 217 177 218 /** ··· 268 155 return ICE_MEDIA_UNKNOWN; 269 156 270 157 if (hw_link_info->phy_type_low) { 158 + /* 1G SGMII is a special case where some DA cable PHYs 159 + * may show this as an option when it really shouldn't 160 + * be since SGMII is meant to be between a MAC and a PHY 161 + * in a backplane. Try to detect this case and handle it 162 + */ 163 + if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 164 + (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 165 + ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 166 + hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 167 + ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 168 + return ICE_MEDIA_DA; 169 + 271 170 switch (hw_link_info->phy_type_low) { 272 171 case ICE_PHY_TYPE_LOW_1000BASE_SX: 273 172 case ICE_PHY_TYPE_LOW_1000BASE_LX: ··· 288 163 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 289 164 case ICE_PHY_TYPE_LOW_25GBASE_SR: 290 165 case ICE_PHY_TYPE_LOW_25GBASE_LR: 291 - case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 292 166 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 293 167 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 294 168 case ICE_PHY_TYPE_LOW_50GBASE_SR2: ··· 299 175 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 300 176 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 301 177 case ICE_PHY_TYPE_LOW_100GBASE_DR: 178 + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 179 + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 180 + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 181 + case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 182 + case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 183 + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 184 + case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 185 + case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 302 186 return ICE_MEDIA_FIBER; 303 187 case ICE_PHY_TYPE_LOW_100BASE_TX: 304 188 case ICE_PHY_TYPE_LOW_1000BASE_T: ··· 326 194 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 327 195 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 328 196 return ICE_MEDIA_DA; 197 + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 198 + case ICE_PHY_TYPE_LOW_40G_XLAUI: 199 + case ICE_PHY_TYPE_LOW_50G_LAUI2: 200 + case ICE_PHY_TYPE_LOW_50G_AUI2: 201 + case ICE_PHY_TYPE_LOW_50G_AUI1: 202 + case ICE_PHY_TYPE_LOW_100G_AUI4: 203 + case ICE_PHY_TYPE_LOW_100G_CAUI4: 204 + if (ice_is_media_cage_present(pi)) 205 + return ICE_MEDIA_DA; 206 + fallthrough; 329 207 case ICE_PHY_TYPE_LOW_1000BASE_KX: 330 208 case ICE_PHY_TYPE_LOW_2500BASE_KX: 331 209 case ICE_PHY_TYPE_LOW_2500BASE_X: ··· 353 211 } 354 212 } else { 355 213 switch (hw_link_info->phy_type_high) { 214 + case ICE_PHY_TYPE_HIGH_100G_AUI2: 215 + case ICE_PHY_TYPE_HIGH_100G_CAUI2: 216 + if (ice_is_media_cage_present(pi)) 217 + return ICE_MEDIA_DA; 218 + fallthrough; 356 219 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 357 220 return ICE_MEDIA_BACKPLANE; 221 + case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 222 + case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 223 + return ICE_MEDIA_FIBER; 358 224 } 359 225 } 360 226 return ICE_MEDIA_UNKNOWN; ··· 442 292 443 293 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 444 294 445 - ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed); 446 - ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", 295 + ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 296 + ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 297 + ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 447 298 (unsigned long long)li->phy_type_low); 448 - ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", 299 + ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 449 300 (unsigned long long)li->phy_type_high); 450 - ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type); 451 - ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info); 452 - ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info); 453 - ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info); 454 - ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena); 455 - ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size); 456 - ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing); 301 + ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 302 + ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 303 + ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 304 + ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 305 + ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 306 + ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 307 + ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 308 + li->max_frame_size); 309 + ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 457 310 458 311 /* save link status information */ 459 312 if (link) ··· 1770 1617 } 1771 1618 1772 1619 /** 1773 - * ice_parse_caps - parse function/device capabilities 1620 + * ice_parse_common_caps - parse common device/function capabilities 1774 1621 * @hw: pointer to the HW struct 1775 - * @buf: pointer to a buffer containing function/device capability records 1776 - * @cap_count: number of capability records in the list 1777 - * @opc: type of capabilities list to parse 1622 + * @caps: pointer to common capabilities structure 1623 + * @elem: the capability element to parse 1624 + * @prefix: message prefix for tracing capabilities 1778 1625 * 1779 - * Helper function to parse function(0x000a)/device(0x000b) capabilities list. 1626 + * Given a capability element, extract relevant details into the common 1627 + * capability structure. 1628 + * 1629 + * Returns: true if the capability matches one of the common capability ids, 1630 + * false otherwise. 1631 + */ 1632 + static bool 1633 + ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 1634 + struct ice_aqc_list_caps_elem *elem, const char *prefix) 1635 + { 1636 + u32 logical_id = le32_to_cpu(elem->logical_id); 1637 + u32 phys_id = le32_to_cpu(elem->phys_id); 1638 + u32 number = le32_to_cpu(elem->number); 1639 + u16 cap = le16_to_cpu(elem->cap); 1640 + bool found = true; 1641 + 1642 + switch (cap) { 1643 + case ICE_AQC_CAPS_VALID_FUNCTIONS: 1644 + caps->valid_functions = number; 1645 + ice_debug(hw, ICE_DBG_INIT, 1646 + "%s: valid_functions (bitmap) = %d\n", prefix, 1647 + caps->valid_functions); 1648 + break; 1649 + case ICE_AQC_CAPS_SRIOV: 1650 + caps->sr_iov_1_1 = (number == 1); 1651 + ice_debug(hw, ICE_DBG_INIT, 1652 + "%s: sr_iov_1_1 = %d\n", prefix, 1653 + caps->sr_iov_1_1); 1654 + break; 1655 + case ICE_AQC_CAPS_DCB: 1656 + caps->dcb = (number == 1); 1657 + caps->active_tc_bitmap = logical_id; 1658 + caps->maxtc = phys_id; 1659 + ice_debug(hw, ICE_DBG_INIT, 1660 + "%s: dcb = %d\n", prefix, caps->dcb); 1661 + ice_debug(hw, ICE_DBG_INIT, 1662 + "%s: active_tc_bitmap = %d\n", prefix, 1663 + caps->active_tc_bitmap); 1664 + ice_debug(hw, ICE_DBG_INIT, 1665 + "%s: maxtc = %d\n", prefix, caps->maxtc); 1666 + break; 1667 + case ICE_AQC_CAPS_RSS: 1668 + caps->rss_table_size = number; 1669 + caps->rss_table_entry_width = logical_id; 1670 + ice_debug(hw, ICE_DBG_INIT, 1671 + "%s: rss_table_size = %d\n", prefix, 1672 + caps->rss_table_size); 1673 + ice_debug(hw, ICE_DBG_INIT, 1674 + "%s: rss_table_entry_width = %d\n", prefix, 1675 + caps->rss_table_entry_width); 1676 + break; 1677 + case ICE_AQC_CAPS_RXQS: 1678 + caps->num_rxq = number; 1679 + caps->rxq_first_id = phys_id; 1680 + ice_debug(hw, ICE_DBG_INIT, 1681 + "%s: num_rxq = %d\n", prefix, 1682 + caps->num_rxq); 1683 + ice_debug(hw, ICE_DBG_INIT, 1684 + "%s: rxq_first_id = %d\n", prefix, 1685 + caps->rxq_first_id); 1686 + break; 1687 + case ICE_AQC_CAPS_TXQS: 1688 + caps->num_txq = number; 1689 + caps->txq_first_id = phys_id; 1690 + ice_debug(hw, ICE_DBG_INIT, 1691 + "%s: num_txq = %d\n", prefix, 1692 + caps->num_txq); 1693 + ice_debug(hw, ICE_DBG_INIT, 1694 + "%s: txq_first_id = %d\n", prefix, 1695 + caps->txq_first_id); 1696 + break; 1697 + case ICE_AQC_CAPS_MSIX: 1698 + caps->num_msix_vectors = number; 1699 + caps->msix_vector_first_id = phys_id; 1700 + ice_debug(hw, ICE_DBG_INIT, 1701 + "%s: num_msix_vectors = %d\n", prefix, 1702 + caps->num_msix_vectors); 1703 + ice_debug(hw, ICE_DBG_INIT, 1704 + "%s: msix_vector_first_id = %d\n", prefix, 1705 + caps->msix_vector_first_id); 1706 + break; 1707 + case ICE_AQC_CAPS_MAX_MTU: 1708 + caps->max_mtu = number; 1709 + ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 1710 + prefix, caps->max_mtu); 1711 + break; 1712 + default: 1713 + /* Not one of the recognized common capabilities */ 1714 + found = false; 1715 + } 1716 + 1717 + return found; 1718 + } 1719 + 1720 + /** 1721 + * ice_recalc_port_limited_caps - Recalculate port limited capabilities 1722 + * @hw: pointer to the HW structure 1723 + * @caps: pointer to capabilities structure to fix 1724 + * 1725 + * Re-calculate the capabilities that are dependent on the number of physical 1726 + * ports; i.e. some features are not supported or function differently on 1727 + * devices with more than 4 ports. 1780 1728 */ 1781 1729 static void 1782 - ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, 1783 - enum ice_adminq_opc opc) 1730 + ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 1784 1731 { 1785 - struct ice_aqc_list_caps_elem *cap_resp; 1786 - struct ice_hw_func_caps *func_p = NULL; 1787 - struct ice_hw_dev_caps *dev_p = NULL; 1788 - struct ice_hw_common_caps *caps; 1789 - char const *prefix; 1790 - u32 i; 1791 - 1792 - if (!buf) 1793 - return; 1794 - 1795 - cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1796 - 1797 - if (opc == ice_aqc_opc_list_dev_caps) { 1798 - dev_p = &hw->dev_caps; 1799 - caps = &dev_p->common_cap; 1800 - prefix = "dev cap"; 1801 - } else if (opc == ice_aqc_opc_list_func_caps) { 1802 - func_p = &hw->func_caps; 1803 - caps = &func_p->common_cap; 1804 - prefix = "func cap"; 1805 - } else { 1806 - ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); 1807 - return; 1808 - } 1809 - 1810 - for (i = 0; caps && i < cap_count; i++, cap_resp++) { 1811 - u32 logical_id = le32_to_cpu(cap_resp->logical_id); 1812 - u32 phys_id = le32_to_cpu(cap_resp->phys_id); 1813 - u32 number = le32_to_cpu(cap_resp->number); 1814 - u16 cap = le16_to_cpu(cap_resp->cap); 1815 - 1816 - switch (cap) { 1817 - case ICE_AQC_CAPS_VALID_FUNCTIONS: 1818 - caps->valid_functions = number; 1819 - ice_debug(hw, ICE_DBG_INIT, 1820 - "%s: valid_functions (bitmap) = %d\n", prefix, 1821 - caps->valid_functions); 1822 - 1823 - /* store func count for resource management purposes */ 1824 - if (dev_p) 1825 - dev_p->num_funcs = hweight32(number); 1826 - break; 1827 - case ICE_AQC_CAPS_SRIOV: 1828 - caps->sr_iov_1_1 = (number == 1); 1829 - ice_debug(hw, ICE_DBG_INIT, 1830 - "%s: sr_iov_1_1 = %d\n", prefix, 1831 - caps->sr_iov_1_1); 1832 - break; 1833 - case ICE_AQC_CAPS_VF: 1834 - if (dev_p) { 1835 - dev_p->num_vfs_exposed = number; 1836 - ice_debug(hw, ICE_DBG_INIT, 1837 - "%s: num_vfs_exposed = %d\n", prefix, 1838 - dev_p->num_vfs_exposed); 1839 - } else if (func_p) { 1840 - func_p->num_allocd_vfs = number; 1841 - func_p->vf_base_id = logical_id; 1842 - ice_debug(hw, ICE_DBG_INIT, 1843 - "%s: num_allocd_vfs = %d\n", prefix, 1844 - func_p->num_allocd_vfs); 1845 - ice_debug(hw, ICE_DBG_INIT, 1846 - "%s: vf_base_id = %d\n", prefix, 1847 - func_p->vf_base_id); 1848 - } 1849 - break; 1850 - case ICE_AQC_CAPS_VSI: 1851 - if (dev_p) { 1852 - dev_p->num_vsi_allocd_to_host = number; 1853 - ice_debug(hw, ICE_DBG_INIT, 1854 - "%s: num_vsi_allocd_to_host = %d\n", 1855 - prefix, 1856 - dev_p->num_vsi_allocd_to_host); 1857 - } else if (func_p) { 1858 - func_p->guar_num_vsi = 1859 - ice_get_num_per_func(hw, ICE_MAX_VSI); 1860 - ice_debug(hw, ICE_DBG_INIT, 1861 - "%s: guar_num_vsi (fw) = %d\n", 1862 - prefix, number); 1863 - ice_debug(hw, ICE_DBG_INIT, 1864 - "%s: guar_num_vsi = %d\n", 1865 - prefix, func_p->guar_num_vsi); 1866 - } 1867 - break; 1868 - case ICE_AQC_CAPS_DCB: 1869 - caps->dcb = (number == 1); 1870 - caps->active_tc_bitmap = logical_id; 1871 - caps->maxtc = phys_id; 1872 - ice_debug(hw, ICE_DBG_INIT, 1873 - "%s: dcb = %d\n", prefix, caps->dcb); 1874 - ice_debug(hw, ICE_DBG_INIT, 1875 - "%s: active_tc_bitmap = %d\n", prefix, 1876 - caps->active_tc_bitmap); 1877 - ice_debug(hw, ICE_DBG_INIT, 1878 - "%s: maxtc = %d\n", prefix, caps->maxtc); 1879 - break; 1880 - case ICE_AQC_CAPS_RSS: 1881 - caps->rss_table_size = number; 1882 - caps->rss_table_entry_width = logical_id; 1883 - ice_debug(hw, ICE_DBG_INIT, 1884 - "%s: rss_table_size = %d\n", prefix, 1885 - caps->rss_table_size); 1886 - ice_debug(hw, ICE_DBG_INIT, 1887 - "%s: rss_table_entry_width = %d\n", prefix, 1888 - caps->rss_table_entry_width); 1889 - break; 1890 - case ICE_AQC_CAPS_RXQS: 1891 - caps->num_rxq = number; 1892 - caps->rxq_first_id = phys_id; 1893 - ice_debug(hw, ICE_DBG_INIT, 1894 - "%s: num_rxq = %d\n", prefix, 1895 - caps->num_rxq); 1896 - ice_debug(hw, ICE_DBG_INIT, 1897 - "%s: rxq_first_id = %d\n", prefix, 1898 - caps->rxq_first_id); 1899 - break; 1900 - case ICE_AQC_CAPS_TXQS: 1901 - caps->num_txq = number; 1902 - caps->txq_first_id = phys_id; 1903 - ice_debug(hw, ICE_DBG_INIT, 1904 - "%s: num_txq = %d\n", prefix, 1905 - caps->num_txq); 1906 - ice_debug(hw, ICE_DBG_INIT, 1907 - "%s: txq_first_id = %d\n", prefix, 1908 - caps->txq_first_id); 1909 - break; 1910 - case ICE_AQC_CAPS_MSIX: 1911 - caps->num_msix_vectors = number; 1912 - caps->msix_vector_first_id = phys_id; 1913 - ice_debug(hw, ICE_DBG_INIT, 1914 - "%s: num_msix_vectors = %d\n", prefix, 1915 - caps->num_msix_vectors); 1916 - ice_debug(hw, ICE_DBG_INIT, 1917 - "%s: msix_vector_first_id = %d\n", prefix, 1918 - caps->msix_vector_first_id); 1919 - break; 1920 - case ICE_AQC_CAPS_FD: 1921 - if (dev_p) { 1922 - dev_p->num_flow_director_fltr = number; 1923 - ice_debug(hw, ICE_DBG_INIT, 1924 - "%s: num_flow_director_fltr = %d\n", 1925 - prefix, 1926 - dev_p->num_flow_director_fltr); 1927 - } 1928 - if (func_p) { 1929 - u32 reg_val, val; 1930 - 1931 - reg_val = rd32(hw, GLQF_FD_SIZE); 1932 - val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 1933 - GLQF_FD_SIZE_FD_GSIZE_S; 1934 - func_p->fd_fltr_guar = 1935 - ice_get_num_per_func(hw, val); 1936 - val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 1937 - GLQF_FD_SIZE_FD_BSIZE_S; 1938 - func_p->fd_fltr_best_effort = val; 1939 - ice_debug(hw, ICE_DBG_INIT, 1940 - "%s: fd_fltr_guar = %d\n", 1941 - prefix, func_p->fd_fltr_guar); 1942 - ice_debug(hw, ICE_DBG_INIT, 1943 - "%s: fd_fltr_best_effort = %d\n", 1944 - prefix, func_p->fd_fltr_best_effort); 1945 - } 1946 - break; 1947 - case ICE_AQC_CAPS_MAX_MTU: 1948 - caps->max_mtu = number; 1949 - ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 1950 - prefix, caps->max_mtu); 1951 - break; 1952 - default: 1953 - ice_debug(hw, ICE_DBG_INIT, 1954 - "%s: unknown capability[%d]: 0x%x\n", prefix, 1955 - i, cap); 1956 - break; 1957 - } 1958 - } 1959 - 1960 - /* Re-calculate capabilities that are dependent on the number of 1961 - * physical ports; i.e. some features are not supported or function 1962 - * differently on devices with more than 4 ports. 1732 + /* This assumes device capabilities are always scanned before function 1733 + * capabilities during the initialization flow. 1963 1734 */ 1964 1735 if (hw->dev_caps.num_funcs > 4) { 1965 1736 /* Max 4 TCs per port */ 1966 1737 caps->maxtc = 4; 1967 1738 ice_debug(hw, ICE_DBG_INIT, 1968 - "%s: maxtc = %d (based on #ports)\n", prefix, 1739 + "reducing maxtc to %d (based on #ports)\n", 1969 1740 caps->maxtc); 1970 1741 } 1742 + } 1743 + 1744 + /** 1745 + * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 1746 + * @hw: pointer to the HW struct 1747 + * @func_p: pointer to function capabilities structure 1748 + * @cap: pointer to the capability element to parse 1749 + * 1750 + * Extract function capabilities for ICE_AQC_CAPS_VF. 1751 + */ 1752 + static void 1753 + ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 1754 + struct ice_aqc_list_caps_elem *cap) 1755 + { 1756 + u32 logical_id = le32_to_cpu(cap->logical_id); 1757 + u32 number = le32_to_cpu(cap->number); 1758 + 1759 + func_p->num_allocd_vfs = number; 1760 + func_p->vf_base_id = logical_id; 1761 + ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 1762 + func_p->num_allocd_vfs); 1763 + ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 1764 + func_p->vf_base_id); 1765 + } 1766 + 1767 + /** 1768 + * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 1769 + * @hw: pointer to the HW struct 1770 + * @func_p: pointer to function capabilities structure 1771 + * @cap: pointer to the capability element to parse 1772 + * 1773 + * Extract function capabilities for ICE_AQC_CAPS_VSI. 1774 + */ 1775 + static void 1776 + ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 1777 + struct ice_aqc_list_caps_elem *cap) 1778 + { 1779 + func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 1780 + ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 1781 + le32_to_cpu(cap->number)); 1782 + ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 1783 + func_p->guar_num_vsi); 1784 + } 1785 + 1786 + /** 1787 + * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 1788 + * @hw: pointer to the HW struct 1789 + * @func_p: pointer to function capabilities structure 1790 + * 1791 + * Extract function capabilities for ICE_AQC_CAPS_FD. 1792 + */ 1793 + static void 1794 + ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 1795 + { 1796 + u32 reg_val, val; 1797 + 1798 + reg_val = rd32(hw, GLQF_FD_SIZE); 1799 + val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 1800 + GLQF_FD_SIZE_FD_GSIZE_S; 1801 + func_p->fd_fltr_guar = 1802 + ice_get_num_per_func(hw, val); 1803 + val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 1804 + GLQF_FD_SIZE_FD_BSIZE_S; 1805 + func_p->fd_fltr_best_effort = val; 1806 + 1807 + ice_debug(hw, ICE_DBG_INIT, 1808 + "func caps: fd_fltr_guar = %d\n", 1809 + func_p->fd_fltr_guar); 1810 + ice_debug(hw, ICE_DBG_INIT, 1811 + "func caps: fd_fltr_best_effort = %d\n", 1812 + func_p->fd_fltr_best_effort); 1813 + } 1814 + 1815 + /** 1816 + * ice_parse_func_caps - Parse function capabilities 1817 + * @hw: pointer to the HW struct 1818 + * @func_p: pointer to function capabilities structure 1819 + * @buf: buffer containing the function capability records 1820 + * @cap_count: the number of capabilities 1821 + * 1822 + * Helper function to parse function (0x000A) capabilities list. For 1823 + * capabilities shared between device and function, this relies on 1824 + * ice_parse_common_caps. 1825 + * 1826 + * Loop through the list of provided capabilities and extract the relevant 1827 + * data into the function capabilities structured. 1828 + */ 1829 + static void 1830 + ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 1831 + void *buf, u32 cap_count) 1832 + { 1833 + struct ice_aqc_list_caps_elem *cap_resp; 1834 + u32 i; 1835 + 1836 + cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1837 + 1838 + memset(func_p, 0, sizeof(*func_p)); 1839 + 1840 + for (i = 0; i < cap_count; i++) { 1841 + u16 cap = le16_to_cpu(cap_resp[i].cap); 1842 + bool found; 1843 + 1844 + found = ice_parse_common_caps(hw, &func_p->common_cap, 1845 + &cap_resp[i], "func caps"); 1846 + 1847 + switch (cap) { 1848 + case ICE_AQC_CAPS_VF: 1849 + ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 1850 + break; 1851 + case ICE_AQC_CAPS_VSI: 1852 + ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 1853 + break; 1854 + case ICE_AQC_CAPS_FD: 1855 + ice_parse_fdir_func_caps(hw, func_p); 1856 + break; 1857 + default: 1858 + /* Don't list common capabilities as unknown */ 1859 + if (!found) 1860 + ice_debug(hw, ICE_DBG_INIT, 1861 + "func caps: unknown capability[%d]: 0x%x\n", 1862 + i, cap); 1863 + break; 1864 + } 1865 + } 1866 + 1867 + ice_recalc_port_limited_caps(hw, &func_p->common_cap); 1868 + } 1869 + 1870 + /** 1871 + * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 1872 + * @hw: pointer to the HW struct 1873 + * @dev_p: pointer to device capabilities structure 1874 + * @cap: capability element to parse 1875 + * 1876 + * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 1877 + */ 1878 + static void 1879 + ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 1880 + struct ice_aqc_list_caps_elem *cap) 1881 + { 1882 + u32 number = le32_to_cpu(cap->number); 1883 + 1884 + dev_p->num_funcs = hweight32(number); 1885 + ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 1886 + dev_p->num_funcs); 1887 + } 1888 + 1889 + /** 1890 + * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 1891 + * @hw: pointer to the HW struct 1892 + * @dev_p: pointer to device capabilities structure 1893 + * @cap: capability element to parse 1894 + * 1895 + * Parse ICE_AQC_CAPS_VF for device capabilities. 1896 + */ 1897 + static void 1898 + ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 1899 + struct ice_aqc_list_caps_elem *cap) 1900 + { 1901 + u32 number = le32_to_cpu(cap->number); 1902 + 1903 + dev_p->num_vfs_exposed = number; 1904 + ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 1905 + dev_p->num_vfs_exposed); 1906 + } 1907 + 1908 + /** 1909 + * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 1910 + * @hw: pointer to the HW struct 1911 + * @dev_p: pointer to device capabilities structure 1912 + * @cap: capability element to parse 1913 + * 1914 + * Parse ICE_AQC_CAPS_VSI for device capabilities. 1915 + */ 1916 + static void 1917 + ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 1918 + struct ice_aqc_list_caps_elem *cap) 1919 + { 1920 + u32 number = le32_to_cpu(cap->number); 1921 + 1922 + dev_p->num_vsi_allocd_to_host = number; 1923 + ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 1924 + dev_p->num_vsi_allocd_to_host); 1925 + } 1926 + 1927 + /** 1928 + * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 1929 + * @hw: pointer to the HW struct 1930 + * @dev_p: pointer to device capabilities structure 1931 + * @cap: capability element to parse 1932 + * 1933 + * Parse ICE_AQC_CAPS_FD for device capabilities. 1934 + */ 1935 + static void 1936 + ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 1937 + struct ice_aqc_list_caps_elem *cap) 1938 + { 1939 + u32 number = le32_to_cpu(cap->number); 1940 + 1941 + dev_p->num_flow_director_fltr = number; 1942 + ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 1943 + dev_p->num_flow_director_fltr); 1944 + } 1945 + 1946 + /** 1947 + * ice_parse_dev_caps - Parse device capabilities 1948 + * @hw: pointer to the HW struct 1949 + * @dev_p: pointer to device capabilities structure 1950 + * @buf: buffer containing the device capability records 1951 + * @cap_count: the number of capabilities 1952 + * 1953 + * Helper device to parse device (0x000B) capabilities list. For 1954 + * capabilities shared between device and device, this relies on 1955 + * ice_parse_common_caps. 1956 + * 1957 + * Loop through the list of provided capabilities and extract the relevant 1958 + * data into the device capabilities structured. 1959 + */ 1960 + static void 1961 + ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 1962 + void *buf, u32 cap_count) 1963 + { 1964 + struct ice_aqc_list_caps_elem *cap_resp; 1965 + u32 i; 1966 + 1967 + cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1968 + 1969 + memset(dev_p, 0, sizeof(*dev_p)); 1970 + 1971 + for (i = 0; i < cap_count; i++) { 1972 + u16 cap = le16_to_cpu(cap_resp[i].cap); 1973 + bool found; 1974 + 1975 + found = ice_parse_common_caps(hw, &dev_p->common_cap, 1976 + &cap_resp[i], "dev caps"); 1977 + 1978 + switch (cap) { 1979 + case ICE_AQC_CAPS_VALID_FUNCTIONS: 1980 + ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 1981 + break; 1982 + case ICE_AQC_CAPS_VF: 1983 + ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 1984 + break; 1985 + case ICE_AQC_CAPS_VSI: 1986 + ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 1987 + break; 1988 + case ICE_AQC_CAPS_FD: 1989 + ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 1990 + break; 1991 + default: 1992 + /* Don't list common capabilities as unknown */ 1993 + if (!found) 1994 + ice_debug(hw, ICE_DBG_INIT, 1995 + "dev caps: unknown capability[%d]: 0x%x\n", 1996 + i, cap); 1997 + break; 1998 + } 1999 + } 2000 + 2001 + ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 1971 2002 } 1972 2003 1973 2004 /** ··· 2197 1860 } 2198 1861 2199 1862 /** 2200 - * ice_aq_discover_caps - query function/device capabilities 2201 - * @hw: pointer to the HW struct 2202 - * @buf: a virtual buffer to hold the capabilities 2203 - * @buf_size: Size of the virtual buffer 2204 - * @cap_count: cap count needed if AQ err==ENOMEM 2205 - * @opc: capabilities type to discover - pass in the command opcode 2206 - * @cd: pointer to command details structure or NULL 1863 + * ice_discover_dev_caps - Read and extract device capabilities 1864 + * @hw: pointer to the hardware structure 1865 + * @dev_caps: pointer to device capabilities structure 2207 1866 * 2208 - * Get the function(0x000a)/device(0x000b) capabilities description from 2209 - * the firmware. 2210 - * 2211 - * NOTE: this function has the side effect of updating the hw->dev_caps or 2212 - * hw->func_caps by way of calling ice_parse_caps. 1867 + * Read the device capabilities and extract them into the dev_caps structure 1868 + * for later use. 2213 1869 */ 2214 1870 static enum ice_status 2215 - ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2216 - enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1871 + ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2217 1872 { 2218 - u32 local_cap_count = 0; 2219 1873 enum ice_status status; 1874 + u32 cap_count = 0; 1875 + void *cbuf; 2220 1876 2221 - status = ice_aq_list_caps(hw, buf, buf_size, &local_cap_count, 2222 - opc, cd); 1877 + cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 1878 + if (!cbuf) 1879 + return ICE_ERR_NO_MEMORY; 1880 + 1881 + /* Although the driver doesn't know the number of capabilities the 1882 + * device will return, we can simply send a 4KB buffer, the maximum 1883 + * possible size that firmware can return. 1884 + */ 1885 + cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 1886 + 1887 + status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 1888 + ice_aqc_opc_list_dev_caps, NULL); 2223 1889 if (!status) 2224 - ice_parse_caps(hw, buf, local_cap_count, opc); 2225 - else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) 2226 - *cap_count = local_cap_count; 1890 + ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 1891 + kfree(cbuf); 2227 1892 2228 1893 return status; 2229 1894 } 2230 1895 2231 1896 /** 2232 - * ice_discover_caps - get info about the HW 1897 + * ice_discover_func_caps - Read and extract function capabilities 2233 1898 * @hw: pointer to the hardware structure 2234 - * @opc: capabilities type to discover - pass in the command opcode 1899 + * @func_caps: pointer to function capabilities structure 1900 + * 1901 + * Read the function capabilities and extract them into the func_caps structure 1902 + * for later use. 2235 1903 */ 2236 1904 static enum ice_status 2237 - ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) 1905 + ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2238 1906 { 2239 1907 enum ice_status status; 2240 - u32 cap_count; 2241 - u16 cbuf_len; 2242 - u8 retries; 1908 + u32 cap_count = 0; 1909 + void *cbuf; 2243 1910 2244 - /* The driver doesn't know how many capabilities the device will return 2245 - * so the buffer size required isn't known ahead of time. The driver 2246 - * starts with cbuf_len and if this turns out to be insufficient, the 2247 - * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. 2248 - * The driver then allocates the buffer based on the count and retries 2249 - * the operation. So it follows that the retry count is 2. 1911 + cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 1912 + if (!cbuf) 1913 + return ICE_ERR_NO_MEMORY; 1914 + 1915 + /* Although the driver doesn't know the number of capabilities the 1916 + * device will return, we can simply send a 4KB buffer, the maximum 1917 + * possible size that firmware can return. 2250 1918 */ 2251 - #define ICE_GET_CAP_BUF_COUNT 40 2252 - #define ICE_GET_CAP_RETRY_COUNT 2 1919 + cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2253 1920 2254 - cap_count = ICE_GET_CAP_BUF_COUNT; 2255 - retries = ICE_GET_CAP_RETRY_COUNT; 2256 - 2257 - do { 2258 - void *cbuf; 2259 - 2260 - cbuf_len = (u16)(cap_count * 2261 - sizeof(struct ice_aqc_list_caps_elem)); 2262 - cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); 2263 - if (!cbuf) 2264 - return ICE_ERR_NO_MEMORY; 2265 - 2266 - status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, 2267 - opc, NULL); 2268 - devm_kfree(ice_hw_to_dev(hw), cbuf); 2269 - 2270 - if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) 2271 - break; 2272 - 2273 - /* If ENOMEM is returned, try again with bigger buffer */ 2274 - } while (--retries); 1921 + status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 1922 + ice_aqc_opc_list_func_caps, NULL); 1923 + if (!status) 1924 + ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 1925 + kfree(cbuf); 2275 1926 2276 1927 return status; 2277 1928 } ··· 2336 2011 { 2337 2012 enum ice_status status; 2338 2013 2339 - status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); 2340 - if (!status) 2341 - status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); 2014 + status = ice_discover_dev_caps(hw, &hw->dev_caps); 2015 + if (status) 2016 + return status; 2342 2017 2343 - return status; 2018 + return ice_discover_func_caps(hw, &hw->func_caps); 2344 2019 } 2345 2020 2346 2021 /** ··· 2576 2251 /** 2577 2252 * ice_aq_set_phy_cfg 2578 2253 * @hw: pointer to the HW struct 2579 - * @lport: logical port number 2254 + * @pi: port info structure of the interested logical port 2580 2255 * @cfg: structure with PHY configuration data to be set 2581 2256 * @cd: pointer to command details structure or NULL 2582 2257 * ··· 2586 2261 * parameters. This status will be indicated by the command response (0x0601). 2587 2262 */ 2588 2263 enum ice_status 2589 - ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 2264 + ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 2590 2265 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 2591 2266 { 2592 2267 struct ice_aq_desc desc; ··· 2605 2280 } 2606 2281 2607 2282 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 2608 - desc.params.set_phy.lport_num = lport; 2283 + desc.params.set_phy.lport_num = pi->lport; 2609 2284 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2610 2285 2611 - ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", 2286 + ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 2287 + ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 2612 2288 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 2613 - ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", 2289 + ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 2614 2290 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 2615 - ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); 2616 - ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n", 2617 - cfg->low_power_ctrl); 2618 - ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); 2619 - ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); 2620 - ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); 2291 + ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 2292 + ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 2293 + cfg->low_power_ctrl_an); 2294 + ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 2295 + ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 2296 + ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 2297 + cfg->link_fec_opt); 2621 2298 2622 2299 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 2623 2300 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 2624 2301 status = 0; 2302 + 2303 + if (!status) 2304 + pi->phy.curr_user_phy_cfg = *cfg; 2625 2305 2626 2306 return status; 2627 2307 } ··· 2661 2331 2662 2332 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2663 2333 pcaps, NULL); 2664 - if (!status) 2665 - memcpy(li->module_type, &pcaps->module_type, 2666 - sizeof(li->module_type)); 2667 2334 2668 2335 devm_kfree(ice_hw_to_dev(hw), pcaps); 2669 2336 } 2670 2337 2671 2338 return status; 2339 + } 2340 + 2341 + /** 2342 + * ice_cache_phy_user_req 2343 + * @pi: port information structure 2344 + * @cache_data: PHY logging data 2345 + * @cache_mode: PHY logging mode 2346 + * 2347 + * Log the user request on (FC, FEC, SPEED) for later use. 2348 + */ 2349 + static void 2350 + ice_cache_phy_user_req(struct ice_port_info *pi, 2351 + struct ice_phy_cache_mode_data cache_data, 2352 + enum ice_phy_cache_mode cache_mode) 2353 + { 2354 + if (!pi) 2355 + return; 2356 + 2357 + switch (cache_mode) { 2358 + case ICE_FC_MODE: 2359 + pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 2360 + break; 2361 + case ICE_SPEED_MODE: 2362 + pi->phy.curr_user_speed_req = 2363 + cache_data.data.curr_user_speed_req; 2364 + break; 2365 + case ICE_FEC_MODE: 2366 + pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 2367 + break; 2368 + default: 2369 + break; 2370 + } 2371 + } 2372 + 2373 + /** 2374 + * ice_caps_to_fc_mode 2375 + * @caps: PHY capabilities 2376 + * 2377 + * Convert PHY FC capabilities to ice FC mode 2378 + */ 2379 + enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 2380 + { 2381 + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 2382 + caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2383 + return ICE_FC_FULL; 2384 + 2385 + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 2386 + return ICE_FC_TX_PAUSE; 2387 + 2388 + if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2389 + return ICE_FC_RX_PAUSE; 2390 + 2391 + return ICE_FC_NONE; 2392 + } 2393 + 2394 + /** 2395 + * ice_caps_to_fec_mode 2396 + * @caps: PHY capabilities 2397 + * @fec_options: Link FEC options 2398 + * 2399 + * Convert PHY FEC capabilities to ice FEC mode 2400 + */ 2401 + enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 2402 + { 2403 + if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 2404 + return ICE_FEC_AUTO; 2405 + 2406 + if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 2407 + ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 2408 + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 2409 + ICE_AQC_PHY_FEC_25G_KR_REQ)) 2410 + return ICE_FEC_BASER; 2411 + 2412 + if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 2413 + ICE_AQC_PHY_FEC_25G_RS_544_REQ | 2414 + ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 2415 + return ICE_FEC_RS; 2416 + 2417 + return ICE_FEC_NONE; 2418 + } 2419 + 2420 + /** 2421 + * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 2422 + * @pi: port information structure 2423 + * @cfg: PHY configuration data to set FC mode 2424 + * @req_mode: FC mode to configure 2425 + */ 2426 + enum ice_status 2427 + ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 2428 + enum ice_fc_mode req_mode) 2429 + { 2430 + struct ice_phy_cache_mode_data cache_data; 2431 + u8 pause_mask = 0x0; 2432 + 2433 + if (!pi || !cfg) 2434 + return ICE_ERR_BAD_PTR; 2435 + 2436 + switch (req_mode) { 2437 + case ICE_FC_FULL: 2438 + pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2439 + pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2440 + break; 2441 + case ICE_FC_RX_PAUSE: 2442 + pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2443 + break; 2444 + case ICE_FC_TX_PAUSE: 2445 + pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2446 + break; 2447 + default: 2448 + break; 2449 + } 2450 + 2451 + /* clear the old pause settings */ 2452 + cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 2453 + ICE_AQC_PHY_EN_RX_LINK_PAUSE); 2454 + 2455 + /* set the new capabilities */ 2456 + cfg->caps |= pause_mask; 2457 + 2458 + /* Cache user FC request */ 2459 + cache_data.data.curr_user_fc_req = req_mode; 2460 + ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 2461 + 2462 + return 0; 2672 2463 } 2673 2464 2674 2465 /** ··· 2806 2355 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 2807 2356 struct ice_aqc_get_phy_caps_data *pcaps; 2808 2357 enum ice_status status; 2809 - u8 pause_mask = 0x0; 2810 2358 struct ice_hw *hw; 2811 2359 2812 - if (!pi) 2813 - return ICE_ERR_PARAM; 2814 - hw = pi->hw; 2815 - *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; 2360 + if (!pi || !aq_failures) 2361 + return ICE_ERR_BAD_PTR; 2816 2362 2817 - switch (pi->fc.req_mode) { 2818 - case ICE_FC_FULL: 2819 - pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2820 - pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2821 - break; 2822 - case ICE_FC_RX_PAUSE: 2823 - pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2824 - break; 2825 - case ICE_FC_TX_PAUSE: 2826 - pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2827 - break; 2828 - default: 2829 - break; 2830 - } 2363 + *aq_failures = 0; 2364 + hw = pi->hw; 2831 2365 2832 2366 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 2833 2367 if (!pcaps) ··· 2826 2390 goto out; 2827 2391 } 2828 2392 2829 - /* clear the old pause settings */ 2830 - cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 2831 - ICE_AQC_PHY_EN_RX_LINK_PAUSE); 2393 + ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 2832 2394 2833 - /* set the new capabilities */ 2834 - cfg.caps |= pause_mask; 2395 + /* Configure the set PHY data */ 2396 + status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 2397 + if (status) 2398 + goto out; 2835 2399 2836 2400 /* If the capabilities have changed, then set the new config */ 2837 2401 if (cfg.caps != pcaps->caps) { ··· 2840 2404 /* Auto restart link so settings take effect */ 2841 2405 if (ena_auto_link_update) 2842 2406 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2843 - /* Copy over all the old settings */ 2844 - cfg.phy_type_high = pcaps->phy_type_high; 2845 - cfg.phy_type_low = pcaps->phy_type_low; 2846 - cfg.low_power_ctrl = pcaps->low_power_ctrl; 2847 - cfg.eee_cap = pcaps->eee_cap; 2848 - cfg.eeer_value = pcaps->eeer_value; 2849 - cfg.link_fec_opt = pcaps->link_fec_options; 2850 2407 2851 - status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); 2408 + status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 2852 2409 if (status) { 2853 2410 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 2854 2411 goto out; ··· 2871 2442 } 2872 2443 2873 2444 /** 2445 + * ice_phy_caps_equals_cfg 2446 + * @phy_caps: PHY capabilities 2447 + * @phy_cfg: PHY configuration 2448 + * 2449 + * Helper function to determine if PHY capabilities matches PHY 2450 + * configuration 2451 + */ 2452 + bool 2453 + ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 2454 + struct ice_aqc_set_phy_cfg_data *phy_cfg) 2455 + { 2456 + u8 caps_mask, cfg_mask; 2457 + 2458 + if (!phy_caps || !phy_cfg) 2459 + return false; 2460 + 2461 + /* These bits are not common between capabilities and configuration. 2462 + * Do not use them to determine equality. 2463 + */ 2464 + caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 2465 + ICE_AQC_GET_PHY_EN_MOD_QUAL); 2466 + cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2467 + 2468 + if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 2469 + phy_caps->phy_type_high != phy_cfg->phy_type_high || 2470 + ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 2471 + phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 2472 + phy_caps->eee_cap != phy_cfg->eee_cap || 2473 + phy_caps->eeer_value != phy_cfg->eeer_value || 2474 + phy_caps->link_fec_options != phy_cfg->link_fec_opt) 2475 + return false; 2476 + 2477 + return true; 2478 + } 2479 + 2480 + /** 2874 2481 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 2482 + * @pi: port information structure 2875 2483 * @caps: PHY ability structure to copy date from 2876 2484 * @cfg: PHY configuration structure to copy data to 2877 2485 * ··· 2916 2450 * data structure 2917 2451 */ 2918 2452 void 2919 - ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, 2453 + ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 2454 + struct ice_aqc_get_phy_caps_data *caps, 2920 2455 struct ice_aqc_set_phy_cfg_data *cfg) 2921 2456 { 2922 - if (!caps || !cfg) 2457 + if (!pi || !caps || !cfg) 2923 2458 return; 2924 2459 2460 + memset(cfg, 0, sizeof(*cfg)); 2925 2461 cfg->phy_type_low = caps->phy_type_low; 2926 2462 cfg->phy_type_high = caps->phy_type_high; 2927 2463 cfg->caps = caps->caps; 2928 - cfg->low_power_ctrl = caps->low_power_ctrl; 2464 + cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 2929 2465 cfg->eee_cap = caps->eee_cap; 2930 2466 cfg->eeer_value = caps->eeer_value; 2931 2467 cfg->link_fec_opt = caps->link_fec_options; 2468 + cfg->module_compliance_enforcement = 2469 + caps->module_compliance_enforcement; 2470 + 2471 + if (ice_fw_supports_link_override(pi->hw)) { 2472 + struct ice_link_default_override_tlv tlv; 2473 + 2474 + if (ice_get_link_default_override(&tlv, pi)) 2475 + return; 2476 + 2477 + if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) 2478 + cfg->module_compliance_enforcement |= 2479 + ICE_LINK_OVERRIDE_STRICT_MODE; 2480 + } 2932 2481 } 2933 2482 2934 2483 /** 2935 2484 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 2485 + * @pi: port information structure 2936 2486 * @cfg: PHY configuration data to set FEC mode 2937 2487 * @fec: FEC mode to configure 2938 - * 2939 - * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC 2940 - * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps 2941 - * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling. 2942 2488 */ 2943 - void 2944 - ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) 2489 + enum ice_status 2490 + ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 2491 + enum ice_fec_mode fec) 2945 2492 { 2493 + struct ice_aqc_get_phy_caps_data *pcaps; 2494 + enum ice_status status; 2495 + 2496 + if (!pi || !cfg) 2497 + return ICE_ERR_BAD_PTR; 2498 + 2499 + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2500 + if (!pcaps) 2501 + return ICE_ERR_NO_MEMORY; 2502 + 2503 + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, 2504 + NULL); 2505 + if (status) 2506 + goto out; 2507 + 2508 + cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2509 + cfg->link_fec_opt = pcaps->link_fec_options; 2510 + 2946 2511 switch (fec) { 2947 2512 case ICE_FEC_BASER: 2948 2513 /* Clear RS bits, and AND BASE-R ability 2949 2514 * bits and OR request bits. 2950 2515 */ 2951 2516 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 2952 - ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 2517 + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 2953 2518 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 2954 - ICE_AQC_PHY_FEC_25G_KR_REQ; 2519 + ICE_AQC_PHY_FEC_25G_KR_REQ; 2955 2520 break; 2956 2521 case ICE_FEC_RS: 2957 2522 /* Clear BASE-R bits, and AND RS ability ··· 2990 2493 */ 2991 2494 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 2992 2495 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 2993 - ICE_AQC_PHY_FEC_25G_RS_544_REQ; 2496 + ICE_AQC_PHY_FEC_25G_RS_544_REQ; 2994 2497 break; 2995 2498 case ICE_FEC_NONE: 2996 2499 /* Clear all FEC option bits. */ ··· 2999 2502 case ICE_FEC_AUTO: 3000 2503 /* AND auto FEC bit, and all caps bits. */ 3001 2504 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 2505 + cfg->link_fec_opt |= pcaps->link_fec_options; 2506 + break; 2507 + default: 2508 + status = ICE_ERR_PARAM; 3002 2509 break; 3003 2510 } 2511 + 2512 + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { 2513 + struct ice_link_default_override_tlv tlv; 2514 + 2515 + if (ice_get_link_default_override(&tlv, pi)) 2516 + goto out; 2517 + 2518 + if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 2519 + (tlv.options & ICE_LINK_OVERRIDE_EN)) 2520 + cfg->link_fec_opt = tlv.fec_options; 2521 + } 2522 + 2523 + out: 2524 + kfree(pcaps); 2525 + 2526 + return status; 3004 2527 } 3005 2528 3006 2529 /** ··· 4219 3702 if (status || num_elem_ret != 1) 4220 3703 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 4221 3704 return status; 3705 + } 3706 + 3707 + /** 3708 + * ice_fw_supports_link_override 3709 + * @hw: pointer to the hardware structure 3710 + * 3711 + * Checks if the firmware supports link override 3712 + */ 3713 + bool ice_fw_supports_link_override(struct ice_hw *hw) 3714 + { 3715 + /* Currently, only supported for E810 devices */ 3716 + if (hw->mac_type != ICE_MAC_E810) 3717 + return false; 3718 + 3719 + if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { 3720 + if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) 3721 + return true; 3722 + if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && 3723 + hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) 3724 + return true; 3725 + } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { 3726 + return true; 3727 + } 3728 + 3729 + return false; 3730 + } 3731 + 3732 + /** 3733 + * ice_get_link_default_override 3734 + * @ldo: pointer to the link default override struct 3735 + * @pi: pointer to the port info struct 3736 + * 3737 + * Gets the link default override for a port 3738 + */ 3739 + enum ice_status 3740 + ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 3741 + struct ice_port_info *pi) 3742 + { 3743 + u16 i, tlv, tlv_len, tlv_start, buf, offset; 3744 + struct ice_hw *hw = pi->hw; 3745 + enum ice_status status; 3746 + 3747 + status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 3748 + ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 3749 + if (status) { 3750 + ice_debug(hw, ICE_DBG_INIT, 3751 + "Failed to read link override TLV.\n"); 3752 + return status; 3753 + } 3754 + 3755 + /* Each port has its own config; calculate for our port */ 3756 + tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 3757 + ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 3758 + 3759 + /* link options first */ 3760 + status = ice_read_sr_word(hw, tlv_start, &buf); 3761 + if (status) { 3762 + ice_debug(hw, ICE_DBG_INIT, 3763 + "Failed to read override link options.\n"); 3764 + return status; 3765 + } 3766 + ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 3767 + ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 3768 + ICE_LINK_OVERRIDE_PHY_CFG_S; 3769 + 3770 + /* link PHY config */ 3771 + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 3772 + status = ice_read_sr_word(hw, offset, &buf); 3773 + if (status) { 3774 + ice_debug(hw, ICE_DBG_INIT, 3775 + "Failed to read override phy config.\n"); 3776 + return status; 3777 + } 3778 + ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 3779 + 3780 + /* PHY types low */ 3781 + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 3782 + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 3783 + status = ice_read_sr_word(hw, (offset + i), &buf); 3784 + if (status) { 3785 + ice_debug(hw, ICE_DBG_INIT, 3786 + "Failed to read override link options.\n"); 3787 + return status; 3788 + } 3789 + /* shift 16 bits at a time to fill 64 bits */ 3790 + ldo->phy_type_low |= ((u64)buf << (i * 16)); 3791 + } 3792 + 3793 + /* PHY types high */ 3794 + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 3795 + ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 3796 + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 3797 + status = ice_read_sr_word(hw, (offset + i), &buf); 3798 + if (status) { 3799 + ice_debug(hw, ICE_DBG_INIT, 3800 + "Failed to read override link options.\n"); 3801 + return status; 3802 + } 3803 + /* shift 16 bits at a time to fill 64 bits */ 3804 + ldo->phy_type_high |= ((u64)buf << (i * 16)); 3805 + } 3806 + 3807 + return status; 3808 + } 3809 + 3810 + /** 3811 + * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 3812 + * @caps: get PHY capability data 3813 + */ 3814 + bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 3815 + { 3816 + if (caps->caps & ICE_AQC_PHY_AN_MODE || 3817 + caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 3818 + ICE_AQC_PHY_AN_EN_CLAUSE73 | 3819 + ICE_AQC_PHY_AN_EN_CLAUSE37)) 3820 + return true; 3821 + 3822 + return false; 4222 3823 }
+20 -4
drivers/net/ethernet/intel/ice/ice_common.h
··· 98 98 struct ice_sq_cd *cd); 99 99 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); 100 100 enum ice_status 101 - ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 101 + ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 102 102 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); 103 + bool ice_fw_supports_link_override(struct ice_hw *hw); 104 + enum ice_status 105 + ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 106 + struct ice_port_info *pi); 107 + bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); 108 + 109 + enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); 110 + enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); 103 111 enum ice_status 104 112 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, 105 113 bool ena_auto_link_update); 114 + enum ice_status 115 + ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 116 + enum ice_fc_mode fc); 117 + bool 118 + ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, 119 + struct ice_aqc_set_phy_cfg_data *cfg); 106 120 void 107 - ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); 108 - void 109 - ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, 121 + ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 122 + struct ice_aqc_get_phy_caps_data *caps, 110 123 struct ice_aqc_set_phy_cfg_data *cfg); 124 + enum ice_status 125 + ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 126 + enum ice_fec_mode fec); 111 127 enum ice_status 112 128 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 113 129 struct ice_sq_cd *cd);
+454 -284
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 966 966 { 967 967 struct ice_netdev_priv *np = netdev_priv(netdev); 968 968 struct ice_aqc_set_phy_cfg_data config = { 0 }; 969 - struct ice_aqc_get_phy_caps_data *caps; 970 969 struct ice_vsi *vsi = np->vsi; 971 - u8 sw_cfg_caps, sw_cfg_fec; 972 970 struct ice_port_info *pi; 973 - enum ice_status status; 974 - int err = 0; 975 971 976 972 pi = vsi->port_info; 977 973 if (!pi) ··· 979 983 return -EOPNOTSUPP; 980 984 } 981 985 982 - /* Get last SW configuration */ 983 - caps = kzalloc(sizeof(*caps), GFP_KERNEL); 984 - if (!caps) 985 - return -ENOMEM; 986 + /* Proceed only if requesting different FEC mode */ 987 + if (pi->phy.curr_user_fec_req == req_fec) 988 + return 0; 986 989 987 - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 988 - caps, NULL); 989 - if (status) { 990 - err = -EAGAIN; 991 - goto done; 992 - } 990 + /* Copy the current user PHY configuration. The current user PHY 991 + * configuration is initialized during probe from PHY capabilities 992 + * software mode, and updated on set PHY configuration. 993 + */ 994 + memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config)); 993 995 994 - /* Copy SW configuration returned from PHY caps to PHY config */ 995 - ice_copy_phy_caps_to_cfg(caps, &config); 996 - sw_cfg_caps = caps->caps; 997 - sw_cfg_fec = caps->link_fec_options; 996 + ice_cfg_phy_fec(pi, &config, req_fec); 997 + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 998 998 999 - /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */ 1000 - memset(caps, 0, sizeof(*caps)); 999 + if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL)) 1000 + return -EAGAIN; 1001 1001 1002 - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 1003 - caps, NULL); 1004 - if (status) { 1005 - err = -EAGAIN; 1006 - goto done; 1007 - } 1002 + /* Save requested FEC config */ 1003 + pi->phy.curr_user_fec_req = req_fec; 1008 1004 1009 - config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC); 1010 - config.link_fec_opt = caps->link_fec_options; 1011 - 1012 - ice_cfg_phy_fec(&config, req_fec); 1013 - 1014 - /* If FEC mode has changed, then set PHY configuration and enable AN. */ 1015 - if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) != 1016 - (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) || 1017 - config.link_fec_opt != sw_cfg_fec) { 1018 - if (caps->caps & ICE_AQC_PHY_AN_MODE) 1019 - config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1020 - 1021 - status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL); 1022 - 1023 - if (status) 1024 - err = -EAGAIN; 1025 - } 1026 - 1027 - done: 1028 - kfree(caps); 1029 - return err; 1005 + return 0; 1030 1006 } 1031 1007 1032 1008 /** ··· 1196 1228 1197 1229 bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); 1198 1230 1231 + /* Do not allow change to link-down-on-close when Total Port Shutdown 1232 + * is enabled. 1233 + */ 1234 + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) && 1235 + test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { 1236 + dev_err(dev, "Setting link-down-on-close not supported on this port\n"); 1237 + set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1238 + ret = -EINVAL; 1239 + goto ethtool_exit; 1240 + } 1241 + 1199 1242 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { 1200 1243 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { 1201 1244 enum ice_status status; ··· 1294 1315 change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags); 1295 1316 ret = -EAGAIN; 1296 1317 } 1318 + ethtool_exit: 1297 1319 clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); 1298 1320 return ret; 1299 1321 } ··· 1399 1419 } 1400 1420 } 1401 1421 1422 + #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \ 1423 + ICE_PHY_TYPE_LOW_100M_SGMII) 1424 + 1425 + #define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \ 1426 + ICE_PHY_TYPE_LOW_1000BASE_T | \ 1427 + ICE_PHY_TYPE_LOW_1000BASE_SX | \ 1428 + ICE_PHY_TYPE_LOW_1000BASE_LX | \ 1429 + ICE_PHY_TYPE_LOW_1000BASE_KX | \ 1430 + ICE_PHY_TYPE_LOW_1G_SGMII | \ 1431 + ICE_PHY_TYPE_LOW_2500BASE_T | \ 1432 + ICE_PHY_TYPE_LOW_2500BASE_X | \ 1433 + ICE_PHY_TYPE_LOW_2500BASE_KX | \ 1434 + ICE_PHY_TYPE_LOW_5GBASE_T | \ 1435 + ICE_PHY_TYPE_LOW_5GBASE_KR | \ 1436 + ICE_PHY_TYPE_LOW_10GBASE_T | \ 1437 + ICE_PHY_TYPE_LOW_10G_SFI_DA | \ 1438 + ICE_PHY_TYPE_LOW_10GBASE_SR | \ 1439 + ICE_PHY_TYPE_LOW_10GBASE_LR | \ 1440 + ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ 1441 + ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ 1442 + ICE_PHY_TYPE_LOW_10G_SFI_C2C) 1443 + 1444 + #define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ 1445 + ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ 1446 + ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ 1447 + ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ 1448 + ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ 1449 + ICE_PHY_TYPE_LOW_100G_CAUI4 | \ 1450 + ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ 1451 + ICE_PHY_TYPE_LOW_100G_AUI4 | \ 1452 + ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ 1453 + ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ 1454 + ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ 1455 + ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ 1456 + ICE_PHY_TYPE_LOW_100GBASE_DR) 1457 + 1458 + #define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ 1459 + ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\ 1460 + ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ 1461 + ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ 1462 + ICE_PHY_TYPE_HIGH_100G_AUI2) 1463 + 1464 + /** 1465 + * ice_mask_min_supported_speeds 1466 + * @phy_types_high: PHY type high 1467 + * @phy_types_low: PHY type low to apply minimum supported speeds mask 1468 + * 1469 + * Apply minimum supported speeds mask to PHY type low. These are the speeds 1470 + * for ethtool supported link mode. 1471 + */ 1472 + static 1473 + void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low) 1474 + { 1475 + /* if QSFP connection with 100G speed, minimum supported speed is 25G */ 1476 + if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G || 1477 + phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) 1478 + *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G; 1479 + else 1480 + *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; 1481 + } 1482 + 1483 + #define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ 1484 + do { \ 1485 + if (req_speeds & (aq_link_speed) || \ 1486 + (!req_speeds && \ 1487 + (adv_phy_type_lo & phy_type_mask_lo || \ 1488 + adv_phy_type_hi & phy_type_mask_hi))) \ 1489 + ethtool_link_ksettings_add_link_mode(ks, advertising,\ 1490 + ethtool_link_mode); \ 1491 + } while (0) 1492 + 1402 1493 /** 1403 1494 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes 1404 1495 * @netdev: network interface device structure ··· 1480 1429 struct ethtool_link_ksettings *ks) 1481 1430 { 1482 1431 struct ice_netdev_priv *np = netdev_priv(netdev); 1483 - struct ice_link_status *hw_link_info; 1484 - bool need_add_adv_mode = false; 1485 1432 struct ice_vsi *vsi = np->vsi; 1486 - u64 phy_types_high; 1487 - u64 phy_types_low; 1433 + struct ice_pf *pf = vsi->back; 1434 + u64 phy_type_mask_lo = 0; 1435 + u64 phy_type_mask_hi = 0; 1436 + u64 adv_phy_type_lo = 0; 1437 + u64 adv_phy_type_hi = 0; 1438 + u64 phy_types_high = 0; 1439 + u64 phy_types_low = 0; 1440 + u16 req_speeds; 1488 1441 1489 - hw_link_info = &vsi->port_info->phy.link_info; 1490 - phy_types_low = vsi->port_info->phy.phy_type_low; 1491 - phy_types_high = vsi->port_info->phy.phy_type_high; 1442 + req_speeds = vsi->port_info->phy.link_info.req_speeds; 1443 + 1444 + /* Check if lenient mode is supported and enabled, or in strict mode. 1445 + * 1446 + * In lenient mode the Supported link modes are the PHY types without 1447 + * media. The Advertising link mode is either 1. the user requested 1448 + * speed, 2. the override PHY mask, or 3. the PHY types with media. 1449 + * 1450 + * In strict mode Supported link mode are the PHY type with media, 1451 + * and Advertising link modes are the media PHY type or the speed 1452 + * requested by user. 1453 + */ 1454 + if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { 1455 + struct ice_link_default_override_tlv *ldo; 1456 + 1457 + ldo = &pf->link_dflt_override; 1458 + phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo); 1459 + phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi); 1460 + 1461 + ice_mask_min_supported_speeds(phy_types_high, &phy_types_low); 1462 + 1463 + /* If override enabled and PHY mask set, then 1464 + * Advertising link mode is the intersection of the PHY 1465 + * types without media and the override PHY mask. 1466 + */ 1467 + if (ldo->options & ICE_LINK_OVERRIDE_EN && 1468 + (ldo->phy_type_low || ldo->phy_type_high)) { 1469 + adv_phy_type_lo = 1470 + le64_to_cpu(pf->nvm_phy_type_lo) & 1471 + ldo->phy_type_low; 1472 + adv_phy_type_hi = 1473 + le64_to_cpu(pf->nvm_phy_type_hi) & 1474 + ldo->phy_type_high; 1475 + } 1476 + } else { 1477 + phy_types_low = vsi->port_info->phy.phy_type_low; 1478 + phy_types_high = vsi->port_info->phy.phy_type_high; 1479 + } 1480 + 1481 + /* If Advertising link mode PHY type is not using override PHY type, 1482 + * then use PHY type with media. 1483 + */ 1484 + if (!adv_phy_type_lo && !adv_phy_type_hi) { 1485 + adv_phy_type_lo = vsi->port_info->phy.phy_type_low; 1486 + adv_phy_type_hi = vsi->port_info->phy.phy_type_high; 1487 + } 1492 1488 1493 1489 ethtool_link_ksettings_zero_link_mode(ks, supported); 1494 1490 ethtool_link_ksettings_zero_link_mode(ks, advertising); 1495 1491 1496 - if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || 1497 - phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { 1492 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX | 1493 + ICE_PHY_TYPE_LOW_100M_SGMII; 1494 + if (phy_types_low & phy_type_mask_lo) { 1498 1495 ethtool_link_ksettings_add_link_mode(ks, supported, 1499 1496 100baseT_Full); 1500 - if (!hw_link_info->req_speeds || 1501 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) 1502 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1503 - 100baseT_Full); 1497 + 1498 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB, 1499 + 100baseT_Full); 1504 1500 } 1505 - if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || 1506 - phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { 1501 + 1502 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T | 1503 + ICE_PHY_TYPE_LOW_1G_SGMII; 1504 + if (phy_types_low & phy_type_mask_lo) { 1507 1505 ethtool_link_ksettings_add_link_mode(ks, supported, 1508 1506 1000baseT_Full); 1509 - if (!hw_link_info->req_speeds || 1510 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 1511 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1512 - 1000baseT_Full); 1507 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, 1508 + 1000baseT_Full); 1513 1509 } 1514 - if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { 1510 + 1511 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX; 1512 + if (phy_types_low & phy_type_mask_lo) { 1515 1513 ethtool_link_ksettings_add_link_mode(ks, supported, 1516 1514 1000baseKX_Full); 1517 - if (!hw_link_info->req_speeds || 1518 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 1519 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1520 - 1000baseKX_Full); 1515 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, 1516 + 1000baseKX_Full); 1521 1517 } 1522 - if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX || 1523 - phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { 1518 + 1519 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX | 1520 + ICE_PHY_TYPE_LOW_1000BASE_LX; 1521 + if (phy_types_low & phy_type_mask_lo) { 1524 1522 ethtool_link_ksettings_add_link_mode(ks, supported, 1525 1523 1000baseX_Full); 1526 - if (!hw_link_info->req_speeds || 1527 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 1528 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1529 - 1000baseX_Full); 1524 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, 1525 + 1000baseX_Full); 1530 1526 } 1531 - if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { 1527 + 1528 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T; 1529 + if (phy_types_low & phy_type_mask_lo) { 1532 1530 ethtool_link_ksettings_add_link_mode(ks, supported, 1533 1531 2500baseT_Full); 1534 - if (!hw_link_info->req_speeds || 1535 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) 1536 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1537 - 2500baseT_Full); 1532 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, 1533 + 2500baseT_Full); 1538 1534 } 1539 - if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X || 1540 - phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { 1535 + 1536 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X | 1537 + ICE_PHY_TYPE_LOW_2500BASE_KX; 1538 + if (phy_types_low & phy_type_mask_lo) { 1541 1539 ethtool_link_ksettings_add_link_mode(ks, supported, 1542 1540 2500baseX_Full); 1543 - if (!hw_link_info->req_speeds || 1544 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) 1545 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1546 - 2500baseX_Full); 1541 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, 1542 + 2500baseX_Full); 1547 1543 } 1548 - if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || 1549 - phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { 1544 + 1545 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T | 1546 + ICE_PHY_TYPE_LOW_5GBASE_KR; 1547 + if (phy_types_low & phy_type_mask_lo) { 1550 1548 ethtool_link_ksettings_add_link_mode(ks, supported, 1551 1549 5000baseT_Full); 1552 - if (!hw_link_info->req_speeds || 1553 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) 1554 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1555 - 5000baseT_Full); 1550 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB, 1551 + 5000baseT_Full); 1556 1552 } 1557 - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || 1558 - phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA || 1559 - phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || 1560 - phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { 1553 + 1554 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T | 1555 + ICE_PHY_TYPE_LOW_10G_SFI_DA | 1556 + ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | 1557 + ICE_PHY_TYPE_LOW_10G_SFI_C2C; 1558 + if (phy_types_low & phy_type_mask_lo) { 1561 1559 ethtool_link_ksettings_add_link_mode(ks, supported, 1562 1560 10000baseT_Full); 1563 - if (!hw_link_info->req_speeds || 1564 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1565 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1566 - 10000baseT_Full); 1561 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, 1562 + 10000baseT_Full); 1567 1563 } 1568 - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { 1564 + 1565 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1; 1566 + if (phy_types_low & phy_type_mask_lo) { 1569 1567 ethtool_link_ksettings_add_link_mode(ks, supported, 1570 1568 10000baseKR_Full); 1571 - if (!hw_link_info->req_speeds || 1572 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1573 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1574 - 10000baseKR_Full); 1569 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, 1570 + 10000baseKR_Full); 1575 1571 } 1576 - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { 1572 + 1573 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR; 1574 + if (phy_types_low & phy_type_mask_lo) { 1577 1575 ethtool_link_ksettings_add_link_mode(ks, supported, 1578 1576 10000baseSR_Full); 1579 - if (!hw_link_info->req_speeds || 1580 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1581 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1582 - 10000baseSR_Full); 1577 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, 1578 + 10000baseSR_Full); 1583 1579 } 1584 - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { 1580 + 1581 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR; 1582 + if (phy_types_low & phy_type_mask_lo) { 1585 1583 ethtool_link_ksettings_add_link_mode(ks, supported, 1586 1584 10000baseLR_Full); 1587 - if (!hw_link_info->req_speeds || 1588 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1589 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1590 - 10000baseLR_Full); 1585 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, 1586 + 10000baseLR_Full); 1591 1587 } 1592 - if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || 1593 - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || 1594 - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || 1595 - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || 1596 - phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC || 1597 - phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { 1588 + 1589 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T | 1590 + ICE_PHY_TYPE_LOW_25GBASE_CR | 1591 + ICE_PHY_TYPE_LOW_25GBASE_CR_S | 1592 + ICE_PHY_TYPE_LOW_25GBASE_CR1 | 1593 + ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | 1594 + ICE_PHY_TYPE_LOW_25G_AUI_C2C; 1595 + if (phy_types_low & phy_type_mask_lo) { 1598 1596 ethtool_link_ksettings_add_link_mode(ks, supported, 1599 1597 25000baseCR_Full); 1600 - if (!hw_link_info->req_speeds || 1601 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 1602 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1603 - 25000baseCR_Full); 1598 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, 1599 + 25000baseCR_Full); 1604 1600 } 1605 - if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR || 1606 - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { 1601 + 1602 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR | 1603 + ICE_PHY_TYPE_LOW_25GBASE_LR; 1604 + if (phy_types_low & phy_type_mask_lo) { 1607 1605 ethtool_link_ksettings_add_link_mode(ks, supported, 1608 1606 25000baseSR_Full); 1609 - if (!hw_link_info->req_speeds || 1610 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 1611 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1612 - 25000baseSR_Full); 1607 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, 1608 + 25000baseSR_Full); 1613 1609 } 1614 - if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || 1615 - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || 1616 - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { 1610 + 1611 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR | 1612 + ICE_PHY_TYPE_LOW_25GBASE_KR_S | 1613 + ICE_PHY_TYPE_LOW_25GBASE_KR1; 1614 + if (phy_types_low & phy_type_mask_lo) { 1617 1615 ethtool_link_ksettings_add_link_mode(ks, supported, 1618 1616 25000baseKR_Full); 1619 - if (!hw_link_info->req_speeds || 1620 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 1621 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1622 - 25000baseKR_Full); 1617 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, 1618 + 25000baseKR_Full); 1623 1619 } 1624 - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { 1620 + 1621 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4; 1622 + if (phy_types_low & phy_type_mask_lo) { 1625 1623 ethtool_link_ksettings_add_link_mode(ks, supported, 1626 1624 40000baseKR4_Full); 1627 - if (!hw_link_info->req_speeds || 1628 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1629 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1630 - 40000baseKR4_Full); 1625 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, 1626 + 40000baseKR4_Full); 1631 1627 } 1632 - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || 1633 - phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC || 1634 - phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { 1628 + 1629 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 | 1630 + ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | 1631 + ICE_PHY_TYPE_LOW_40G_XLAUI; 1632 + if (phy_types_low & phy_type_mask_lo) { 1635 1633 ethtool_link_ksettings_add_link_mode(ks, supported, 1636 1634 40000baseCR4_Full); 1637 - if (!hw_link_info->req_speeds || 1638 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1639 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1640 - 40000baseCR4_Full); 1635 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, 1636 + 40000baseCR4_Full); 1641 1637 } 1642 - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { 1638 + 1639 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4; 1640 + if (phy_types_low & phy_type_mask_lo) { 1643 1641 ethtool_link_ksettings_add_link_mode(ks, supported, 1644 1642 40000baseSR4_Full); 1645 - if (!hw_link_info->req_speeds || 1646 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1647 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1648 - 40000baseSR4_Full); 1643 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, 1644 + 40000baseSR4_Full); 1649 1645 } 1650 - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { 1646 + 1647 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4; 1648 + if (phy_types_low & phy_type_mask_lo) { 1651 1649 ethtool_link_ksettings_add_link_mode(ks, supported, 1652 1650 40000baseLR4_Full); 1653 - if (!hw_link_info->req_speeds || 1654 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1655 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1656 - 40000baseLR4_Full); 1651 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, 1652 + 40000baseLR4_Full); 1657 1653 } 1658 - if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || 1659 - phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC || 1660 - phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 || 1661 - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC || 1662 - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 || 1663 - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || 1664 - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR || 1665 - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC || 1666 - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) { 1654 + 1655 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 | 1656 + ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | 1657 + ICE_PHY_TYPE_LOW_50G_LAUI2 | 1658 + ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | 1659 + ICE_PHY_TYPE_LOW_50G_AUI2 | 1660 + ICE_PHY_TYPE_LOW_50GBASE_CP | 1661 + ICE_PHY_TYPE_LOW_50GBASE_SR | 1662 + ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | 1663 + ICE_PHY_TYPE_LOW_50G_AUI1; 1664 + if (phy_types_low & phy_type_mask_lo) { 1667 1665 ethtool_link_ksettings_add_link_mode(ks, supported, 1668 1666 50000baseCR2_Full); 1669 - if (!hw_link_info->req_speeds || 1670 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 1671 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1672 - 50000baseCR2_Full); 1667 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, 1668 + 50000baseCR2_Full); 1673 1669 } 1674 - if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || 1675 - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { 1670 + 1671 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 | 1672 + ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4; 1673 + if (phy_types_low & phy_type_mask_lo) { 1676 1674 ethtool_link_ksettings_add_link_mode(ks, supported, 1677 1675 50000baseKR2_Full); 1678 - if (!hw_link_info->req_speeds || 1679 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 1680 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1681 - 50000baseKR2_Full); 1676 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, 1677 + 50000baseKR2_Full); 1682 1678 } 1683 - if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 || 1684 - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 || 1685 - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR || 1686 - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) { 1679 + 1680 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 | 1681 + ICE_PHY_TYPE_LOW_50GBASE_LR2 | 1682 + ICE_PHY_TYPE_LOW_50GBASE_FR | 1683 + ICE_PHY_TYPE_LOW_50GBASE_LR; 1684 + if (phy_types_low & phy_type_mask_lo) { 1687 1685 ethtool_link_ksettings_add_link_mode(ks, supported, 1688 1686 50000baseSR2_Full); 1689 - if (!hw_link_info->req_speeds || 1690 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 1691 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1692 - 50000baseSR2_Full); 1687 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, 1688 + 50000baseSR2_Full); 1693 1689 } 1694 - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || 1695 - phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC || 1696 - phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 || 1697 - phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC || 1698 - phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 || 1699 - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 || 1700 - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 || 1701 - phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC || 1702 - phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 || 1703 - phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC || 1704 - phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) { 1690 + 1691 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 | 1692 + ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | 1693 + ICE_PHY_TYPE_LOW_100G_CAUI4 | 1694 + ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | 1695 + ICE_PHY_TYPE_LOW_100G_AUI4 | 1696 + ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | 1697 + ICE_PHY_TYPE_LOW_100GBASE_CP2; 1698 + phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | 1699 + ICE_PHY_TYPE_HIGH_100G_CAUI2 | 1700 + ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | 1701 + ICE_PHY_TYPE_HIGH_100G_AUI2; 1702 + if (phy_types_low & phy_type_mask_lo || 1703 + phy_types_high & phy_type_mask_hi) { 1705 1704 ethtool_link_ksettings_add_link_mode(ks, supported, 1706 1705 100000baseCR4_Full); 1707 - if (!hw_link_info->req_speeds || 1708 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1709 - need_add_adv_mode = true; 1706 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 1707 + 100000baseCR4_Full); 1710 1708 } 1711 - if (need_add_adv_mode) { 1712 - need_add_adv_mode = false; 1713 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1714 - 100000baseCR4_Full); 1715 - } 1716 - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 || 1717 - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { 1709 + 1710 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 | 1711 + ICE_PHY_TYPE_LOW_100GBASE_SR2; 1712 + if (phy_types_low & phy_type_mask_lo) { 1718 1713 ethtool_link_ksettings_add_link_mode(ks, supported, 1719 1714 100000baseSR4_Full); 1720 - if (!hw_link_info->req_speeds || 1721 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1722 - need_add_adv_mode = true; 1715 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 1716 + 100000baseSR4_Full); 1723 1717 } 1724 - if (need_add_adv_mode) { 1725 - need_add_adv_mode = false; 1726 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1727 - 100000baseSR4_Full); 1728 - } 1729 - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 || 1730 - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) { 1718 + 1719 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 | 1720 + ICE_PHY_TYPE_LOW_100GBASE_DR; 1721 + if (phy_types_low & phy_type_mask_lo) { 1731 1722 ethtool_link_ksettings_add_link_mode(ks, supported, 1732 1723 100000baseLR4_ER4_Full); 1733 - if (!hw_link_info->req_speeds || 1734 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1735 - need_add_adv_mode = true; 1724 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 1725 + 100000baseLR4_ER4_Full); 1736 1726 } 1737 - if (need_add_adv_mode) { 1738 - need_add_adv_mode = false; 1739 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1740 - 100000baseLR4_ER4_Full); 1741 - } 1742 - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || 1743 - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || 1744 - phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { 1727 + 1728 + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 | 1729 + ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4; 1730 + phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4; 1731 + if (phy_types_low & phy_type_mask_lo || 1732 + phy_types_high & phy_type_mask_hi) { 1745 1733 ethtool_link_ksettings_add_link_mode(ks, supported, 1746 1734 100000baseKR4_Full); 1747 - if (!hw_link_info->req_speeds || 1748 - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1749 - need_add_adv_mode = true; 1735 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 1736 + 100000baseKR4_Full); 1750 1737 } 1751 - if (need_add_adv_mode) 1752 - ethtool_link_ksettings_add_link_mode(ks, advertising, 1753 - 100000baseKR4_Full); 1754 1738 1755 1739 /* Autoneg PHY types */ 1756 1740 if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || ··· 2213 2127 ice_set_link_ksettings(struct net_device *netdev, 2214 2128 const struct ethtool_link_ksettings *ks) 2215 2129 { 2216 - u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; 2217 2130 struct ice_netdev_priv *np = netdev_priv(netdev); 2218 2131 struct ethtool_link_ksettings safe_ks, copy_ks; 2219 2132 struct ice_aqc_get_phy_caps_data *abilities; 2133 + u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; 2220 2134 u16 adv_link_speed, curr_link_speed, idx; 2221 2135 struct ice_aqc_set_phy_cfg_data config; 2222 2136 struct ice_pf *pf = np->vsi->back; 2223 2137 struct ice_port_info *p; 2224 2138 u8 autoneg_changed = 0; 2225 2139 enum ice_status status; 2226 - u64 phy_type_high; 2227 - u64 phy_type_low; 2140 + u64 phy_type_high = 0; 2141 + u64 phy_type_low = 0; 2228 2142 int err = 0; 2229 2143 bool linkup; 2230 2144 ··· 2248 2162 p->phy.link_info.link_info & ICE_AQ_LINK_UP) 2249 2163 return -EOPNOTSUPP; 2250 2164 2165 + abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); 2166 + if (!abilities) 2167 + return -ENOMEM; 2168 + 2169 + /* Get the PHY capabilities based on media */ 2170 + status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP, 2171 + abilities, NULL); 2172 + if (status) { 2173 + err = -EAGAIN; 2174 + goto done; 2175 + } 2176 + 2251 2177 /* copy the ksettings to copy_ks to avoid modifying the original */ 2252 2178 memcpy(&copy_ks, ks, sizeof(copy_ks)); 2253 2179 ··· 2276 2178 */ 2277 2179 if (!bitmap_subset(copy_ks.link_modes.advertising, 2278 2180 safe_ks.link_modes.supported, 2279 - __ETHTOOL_LINK_MODE_MASK_NBITS)) 2280 - return -EINVAL; 2181 + __ETHTOOL_LINK_MODE_MASK_NBITS)) { 2182 + if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) 2183 + netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2184 + err = -EINVAL; 2185 + goto done; 2186 + } 2281 2187 2282 2188 /* get our own copy of the bits to check against */ 2283 2189 memset(&safe_ks, 0, sizeof(safe_ks)); ··· 2298 2196 /* If copy_ks.base and safe_ks.base are not the same now, then they are 2299 2197 * trying to set something that we do not support. 2300 2198 */ 2301 - if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base))) 2302 - return -EOPNOTSUPP; 2303 - 2304 - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { 2305 - timeout--; 2306 - if (!timeout) 2307 - return -EBUSY; 2308 - usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); 2309 - } 2310 - 2311 - abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); 2312 - if (!abilities) 2313 - return -ENOMEM; 2314 - 2315 - /* Get the current PHY config */ 2316 - status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, 2317 - NULL); 2318 - if (status) { 2319 - err = -EAGAIN; 2199 + if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base))) { 2200 + err = -EOPNOTSUPP; 2320 2201 goto done; 2321 2202 } 2322 2203 2323 - /* Copy abilities to config in case autoneg is not set below */ 2324 - memset(&config, 0, sizeof(config)); 2325 - config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; 2326 - if (abilities->caps & ICE_AQC_PHY_AN_MODE) 2327 - config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2204 + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { 2205 + timeout--; 2206 + if (!timeout) { 2207 + err = -EBUSY; 2208 + goto done; 2209 + } 2210 + usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); 2211 + } 2212 + 2213 + /* Copy the current user PHY configuration. The current user PHY 2214 + * configuration is initialized during probe from PHY capabilities 2215 + * software mode, and updated on set PHY configuration. 2216 + */ 2217 + memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config)); 2218 + 2219 + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2328 2220 2329 2221 /* Check autoneg */ 2330 2222 err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, ··· 2353 2257 goto done; 2354 2258 } 2355 2259 2356 - /* copy over the rest of the abilities */ 2357 - config.low_power_ctrl = abilities->low_power_ctrl; 2358 - config.eee_cap = abilities->eee_cap; 2359 - config.eeer_value = abilities->eeer_value; 2360 - config.link_fec_opt = abilities->link_fec_options; 2361 - 2362 2260 /* save the requested speeds */ 2363 2261 p->phy.link_info.req_speeds = adv_link_speed; 2364 2262 2365 2263 /* set link and auto negotiation so changes take effect */ 2366 2264 config.caps |= ICE_AQ_PHY_ENA_LINK; 2367 2265 2368 - if (phy_type_low || phy_type_high) { 2369 - config.phy_type_high = cpu_to_le64(phy_type_high) & 2370 - abilities->phy_type_high; 2371 - config.phy_type_low = cpu_to_le64(phy_type_low) & 2372 - abilities->phy_type_low; 2373 - } else { 2266 + /* check if there is a PHY type for the requested advertised speed */ 2267 + if (!(phy_type_low || phy_type_high)) { 2268 + netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2374 2269 err = -EAGAIN; 2375 - netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n"); 2376 2270 goto done; 2271 + } 2272 + 2273 + /* intersect requested advertised speed PHY types with media PHY types 2274 + * for set PHY configuration 2275 + */ 2276 + config.phy_type_high = cpu_to_le64(phy_type_high) & 2277 + abilities->phy_type_high; 2278 + config.phy_type_low = cpu_to_le64(phy_type_low) & 2279 + abilities->phy_type_low; 2280 + 2281 + if (!(config.phy_type_high || config.phy_type_low)) { 2282 + /* If there is no intersection and lenient mode is enabled, then 2283 + * intersect the requested advertised speed with NVM media type 2284 + * PHY types. 2285 + */ 2286 + if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { 2287 + config.phy_type_high = cpu_to_le64(phy_type_high) & 2288 + pf->nvm_phy_type_hi; 2289 + config.phy_type_low = cpu_to_le64(phy_type_low) & 2290 + pf->nvm_phy_type_lo; 2291 + } else { 2292 + netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2293 + err = -EAGAIN; 2294 + goto done; 2295 + } 2377 2296 } 2378 2297 2379 2298 /* If link is up put link down */ ··· 2402 2291 } 2403 2292 2404 2293 /* make the aq call */ 2405 - status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL); 2294 + status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); 2406 2295 if (status) { 2407 2296 netdev_info(netdev, "Set phy config failed,\n"); 2408 2297 err = -EAGAIN; 2298 + goto done; 2409 2299 } 2410 2300 2301 + /* Save speed request */ 2302 + p->phy.curr_user_speed_req = adv_link_speed; 2411 2303 done: 2412 2304 kfree(abilities); 2413 2305 clear_bit(__ICE_CFG_BUSY, pf->state); ··· 2987 2873 if (status) 2988 2874 goto out; 2989 2875 2990 - pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? 2991 - AUTONEG_ENABLE : AUTONEG_DISABLE); 2876 + pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : 2877 + AUTONEG_DISABLE; 2992 2878 2993 2879 if (dcbx_cfg->pfc.pfcena) 2994 2880 /* PFC enabled so report LFC as off */ ··· 3056 2942 return -EIO; 3057 2943 } 3058 2944 3059 - is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? 3060 - AUTONEG_ENABLE : AUTONEG_DISABLE); 2945 + is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : 2946 + AUTONEG_DISABLE; 3061 2947 3062 2948 kfree(pcaps); 3063 2949 ··· 3432 3318 3433 3319 if (new_rx && !netif_is_rxfh_configured(dev)) 3434 3320 return ice_vsi_set_dflt_rss_lut(vsi, new_rx); 3321 + 3322 + return 0; 3323 + } 3324 + 3325 + /** 3326 + * ice_get_wol - get current Wake on LAN configuration 3327 + * @netdev: network interface device structure 3328 + * @wol: Ethtool structure to retrieve WoL settings 3329 + */ 3330 + static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 3331 + { 3332 + struct ice_netdev_priv *np = netdev_priv(netdev); 3333 + struct ice_pf *pf = np->vsi->back; 3334 + 3335 + if (np->vsi->type != ICE_VSI_PF) 3336 + netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n"); 3337 + 3338 + /* Get WoL settings based on the HW capability */ 3339 + if (ice_is_wol_supported(pf)) { 3340 + wol->supported = WAKE_MAGIC; 3341 + wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0; 3342 + } else { 3343 + wol->supported = 0; 3344 + wol->wolopts = 0; 3345 + } 3346 + } 3347 + 3348 + /** 3349 + * ice_set_wol - set Wake on LAN on supported device 3350 + * @netdev: network interface device structure 3351 + * @wol: Ethtool structure to set WoL 3352 + */ 3353 + static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 3354 + { 3355 + struct ice_netdev_priv *np = netdev_priv(netdev); 3356 + struct ice_vsi *vsi = np->vsi; 3357 + struct ice_pf *pf = vsi->back; 3358 + 3359 + if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf)) 3360 + return -EOPNOTSUPP; 3361 + 3362 + /* only magic packet is supported */ 3363 + if (wol->wolopts && wol->wolopts != WAKE_MAGIC) 3364 + return -EOPNOTSUPP; 3365 + 3366 + /* Set WoL only if there is a new value */ 3367 + if (pf->wol_ena != !!wol->wolopts) { 3368 + pf->wol_ena = !!wol->wolopts; 3369 + device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena); 3370 + netdev_dbg(netdev, "WoL magic packet %sabled\n", 3371 + pf->wol_ena ? "en" : "dis"); 3372 + } 3435 3373 3436 3374 return 0; 3437 3375 } ··· 3971 3805 .get_drvinfo = ice_get_drvinfo, 3972 3806 .get_regs_len = ice_get_regs_len, 3973 3807 .get_regs = ice_get_regs, 3808 + .get_wol = ice_get_wol, 3809 + .set_wol = ice_set_wol, 3974 3810 .get_msglevel = ice_get_msglevel, 3975 3811 .set_msglevel = ice_set_msglevel, 3976 3812 .self_test = ice_self_test, ··· 4015 3847 .get_drvinfo = ice_get_drvinfo, 4016 3848 .get_regs_len = ice_get_regs_len, 4017 3849 .get_regs = ice_get_regs, 3850 + .get_wol = ice_get_wol, 3851 + .set_wol = ice_set_wol, 4018 3852 .get_msglevel = ice_get_msglevel, 4019 3853 .set_msglevel = ice_set_msglevel, 4020 3854 .get_link = ethtool_op_get_link,
+9
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 367 367 #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) 368 368 #define VSIQF_HKEY_MAX_INDEX 12 369 369 #define VSIQF_HLUT_MAX_INDEX 15 370 + #define PFPM_APM 0x000B8080 371 + #define PFPM_APM_APME_M BIT(0) 372 + #define PFPM_WUFC 0x0009DC00 373 + #define PFPM_WUFC_MAG_M BIT(1) 374 + #define PFPM_WUS 0x0009DB80 375 + #define PFPM_WUS_LNKC_M BIT(0) 376 + #define PFPM_WUS_MAG_M BIT(1) 377 + #define PFPM_WUS_MNG_M BIT(3) 378 + #define PFPM_WUS_FW_RST_WK_M BIT(31) 370 379 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) 371 380 #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) 372 381 #define PRTRPB_RDPC 0x000AC260
+24
drivers/net/ethernet/intel/ice/ice_lib.c
··· 1468 1468 } 1469 1469 1470 1470 /** 1471 + * ice_pf_state_is_nominal - checks the PF for nominal state 1472 + * @pf: pointer to PF to check 1473 + * 1474 + * Check the PF's state for a collection of bits that would indicate 1475 + * the PF is in a state that would inhibit normal operation for 1476 + * driver functionality. 1477 + * 1478 + * Returns true if PF is in a nominal state, false otherwise 1479 + */ 1480 + bool ice_pf_state_is_nominal(struct ice_pf *pf) 1481 + { 1482 + DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; 1483 + 1484 + if (!pf) 1485 + return false; 1486 + 1487 + bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); 1488 + if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) 1489 + return false; 1490 + 1491 + return true; 1492 + } 1493 + 1494 + /** 1471 1495 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1472 1496 * @vsi: the VSI to be updated 1473 1497 */
+2
drivers/net/ethernet/intel/ice/ice_lib.h
··· 8 8 9 9 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); 10 10 11 + bool ice_pf_state_is_nominal(struct ice_pf *pf); 12 + 11 13 void ice_update_eth_stats(struct ice_vsi *vsi); 12 14 13 15 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
+744 -30
drivers/net/ethernet/intel/ice/ice_main.c
··· 612 612 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 613 613 { 614 614 struct ice_aqc_get_phy_caps_data *caps; 615 + const char *an_advertised; 615 616 enum ice_status status; 616 617 const char *fec_req; 617 618 const char *speed; ··· 711 710 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 712 711 if (!caps) { 713 712 fec_req = "Unknown"; 713 + an_advertised = "Unknown"; 714 714 goto done; 715 715 } 716 716 ··· 719 717 ICE_AQC_REPORT_SW_CFG, caps, NULL); 720 718 if (status) 721 719 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 720 + 721 + an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 722 722 723 723 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 724 724 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ··· 734 730 kfree(caps); 735 731 736 732 done: 737 - netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 738 - speed, fec_req, fec, an, fc); 733 + netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 734 + speed, fec_req, fec, an_advertised, an, fc); 739 735 ice_print_topo_conflict(vsi); 740 736 } 741 737 ··· 800 796 dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", 801 797 pi->lport); 802 798 803 - /* if the old link up/down and speed is the same as the new */ 804 - if (link_up == old_link && link_speed == old_link_speed) 805 - return result; 806 - 807 799 vsi = ice_get_main_vsi(pf); 808 800 if (!vsi || !vsi->port_info) 809 801 return -EINVAL; ··· 816 816 return result; 817 817 } 818 818 } 819 + 820 + /* if the old link up/down and speed is the same as the new */ 821 + if (link_up == old_link && link_speed == old_link_speed) 822 + return result; 819 823 820 824 ice_dcb_rebuild(pf); 821 825 ice_vsi_link_event(vsi, link_up); ··· 1133 1129 /** 1134 1130 * ice_service_task_stop - stop service task and cancel works 1135 1131 * @pf: board private structure 1132 + * 1133 + * Return 0 if the __ICE_SERVICE_DIS bit was not already set, 1134 + * 1 otherwise. 1136 1135 */ 1137 - static void ice_service_task_stop(struct ice_pf *pf) 1136 + static int ice_service_task_stop(struct ice_pf *pf) 1138 1137 { 1139 - set_bit(__ICE_SERVICE_DIS, pf->state); 1138 + int ret; 1139 + 1140 + ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state); 1140 1141 1141 1142 if (pf->serv_tmr.function) 1142 1143 del_timer_sync(&pf->serv_tmr); ··· 1149 1140 cancel_work_sync(&pf->serv_task); 1150 1141 1151 1142 clear_bit(__ICE_SERVICE_SCHED, pf->state); 1143 + return ret; 1152 1144 } 1153 1145 1154 1146 /** ··· 1384 1374 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1385 1375 goto out; 1386 1376 1387 - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 1377 + /* Use the current user PHY configuration. The current user PHY 1378 + * configuration is initialized during probe from PHY capabilities 1379 + * software mode, and updated on set PHY configuration. 1380 + */ 1381 + cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1388 1382 if (!cfg) { 1389 1383 retcode = -ENOMEM; 1390 1384 goto out; 1391 1385 } 1392 1386 1393 - cfg->phy_type_low = pcaps->phy_type_low; 1394 - cfg->phy_type_high = pcaps->phy_type_high; 1395 - cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1396 - cfg->low_power_ctrl = pcaps->low_power_ctrl; 1397 - cfg->eee_cap = pcaps->eee_cap; 1398 - cfg->eeer_value = pcaps->eeer_value; 1399 - cfg->link_fec_opt = pcaps->link_fec_options; 1387 + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1400 1388 if (link_up) 1401 1389 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1402 1390 else 1403 1391 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1404 1392 1405 - retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); 1393 + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1406 1394 if (retcode) { 1407 1395 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1408 1396 vsi->vsi_num, retcode); ··· 1414 1406 } 1415 1407 1416 1408 /** 1417 - * ice_check_media_subtask - Check for media; bring link up if detected. 1409 + * ice_init_nvm_phy_type - Initialize the NVM PHY type 1410 + * @pi: port info structure 1411 + * 1412 + * Initialize nvm_phy_type_[low|high] for link lenient mode support 1413 + */ 1414 + static int ice_init_nvm_phy_type(struct ice_port_info *pi) 1415 + { 1416 + struct ice_aqc_get_phy_caps_data *pcaps; 1417 + struct ice_pf *pf = pi->hw->back; 1418 + enum ice_status status; 1419 + int err = 0; 1420 + 1421 + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1422 + if (!pcaps) 1423 + return -ENOMEM; 1424 + 1425 + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps, 1426 + NULL); 1427 + 1428 + if (status) { 1429 + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1430 + err = -EIO; 1431 + goto out; 1432 + } 1433 + 1434 + pf->nvm_phy_type_hi = pcaps->phy_type_high; 1435 + pf->nvm_phy_type_lo = pcaps->phy_type_low; 1436 + 1437 + out: 1438 + kfree(pcaps); 1439 + return err; 1440 + } 1441 + 1442 + /** 1443 + * ice_init_link_dflt_override - Initialize link default override 1444 + * @pi: port info structure 1445 + * 1446 + * Initialize link default override and PHY total port shutdown during probe 1447 + */ 1448 + static void ice_init_link_dflt_override(struct ice_port_info *pi) 1449 + { 1450 + struct ice_link_default_override_tlv *ldo; 1451 + struct ice_pf *pf = pi->hw->back; 1452 + 1453 + ldo = &pf->link_dflt_override; 1454 + if (ice_get_link_default_override(ldo, pi)) 1455 + return; 1456 + 1457 + if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 1458 + return; 1459 + 1460 + /* Enable Total Port Shutdown (override/replace link-down-on-close 1461 + * ethtool private flag) for ports with Port Disable bit set. 1462 + */ 1463 + set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 1464 + set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1465 + } 1466 + 1467 + /** 1468 + * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 1469 + * @pi: port info structure 1470 + * 1471 + * If default override is enabled, initialized the user PHY cfg speed and FEC 1472 + * settings using the default override mask from the NVM. 1473 + * 1474 + * The PHY should only be configured with the default override settings the 1475 + * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state 1476 + * is used to indicate that the user PHY cfg default override is initialized 1477 + * and the PHY has not been configured with the default override settings. The 1478 + * state is set here, and cleared in ice_configure_phy the first time the PHY is 1479 + * configured. 1480 + */ 1481 + static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 1482 + { 1483 + struct ice_link_default_override_tlv *ldo; 1484 + struct ice_aqc_set_phy_cfg_data *cfg; 1485 + struct ice_phy_info *phy = &pi->phy; 1486 + struct ice_pf *pf = pi->hw->back; 1487 + 1488 + ldo = &pf->link_dflt_override; 1489 + 1490 + /* If link default override is enabled, use to mask NVM PHY capabilities 1491 + * for speed and FEC default configuration. 1492 + */ 1493 + cfg = &phy->curr_user_phy_cfg; 1494 + 1495 + if (ldo->phy_type_low || ldo->phy_type_high) { 1496 + cfg->phy_type_low = pf->nvm_phy_type_lo & 1497 + cpu_to_le64(ldo->phy_type_low); 1498 + cfg->phy_type_high = pf->nvm_phy_type_hi & 1499 + cpu_to_le64(ldo->phy_type_high); 1500 + } 1501 + cfg->link_fec_opt = ldo->fec_options; 1502 + phy->curr_user_fec_req = ICE_FEC_AUTO; 1503 + 1504 + set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 1505 + } 1506 + 1507 + /** 1508 + * ice_init_phy_user_cfg - Initialize the PHY user configuration 1509 + * @pi: port info structure 1510 + * 1511 + * Initialize the current user PHY configuration, speed, FEC, and FC requested 1512 + * mode to default. The PHY defaults are from get PHY capabilities topology 1513 + * with media so call when media is first available. An error is returned if 1514 + * called when media is not available. The PHY initialization completed state is 1515 + * set here. 1516 + * 1517 + * These configurations are used when setting PHY 1518 + * configuration. The user PHY configuration is updated on set PHY 1519 + * configuration. Returns 0 on success, negative on failure 1520 + */ 1521 + static int ice_init_phy_user_cfg(struct ice_port_info *pi) 1522 + { 1523 + struct ice_aqc_get_phy_caps_data *pcaps; 1524 + struct ice_phy_info *phy = &pi->phy; 1525 + struct ice_pf *pf = pi->hw->back; 1526 + enum ice_status status; 1527 + struct ice_vsi *vsi; 1528 + int err = 0; 1529 + 1530 + if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 1531 + return -EIO; 1532 + 1533 + vsi = ice_get_main_vsi(pf); 1534 + if (!vsi) 1535 + return -EINVAL; 1536 + 1537 + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1538 + if (!pcaps) 1539 + return -ENOMEM; 1540 + 1541 + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, 1542 + NULL); 1543 + if (status) { 1544 + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1545 + err = -EIO; 1546 + goto err_out; 1547 + } 1548 + 1549 + ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 1550 + 1551 + /* check if lenient mode is supported and enabled */ 1552 + if (ice_fw_supports_link_override(&vsi->back->hw) && 1553 + !(pcaps->module_compliance_enforcement & 1554 + ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 1555 + set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 1556 + 1557 + /* if link default override is enabled, initialize user PHY 1558 + * configuration with link default override values 1559 + */ 1560 + if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) { 1561 + ice_init_phy_cfg_dflt_override(pi); 1562 + goto out; 1563 + } 1564 + } 1565 + 1566 + /* if link default override is not enabled, initialize PHY using 1567 + * topology with media 1568 + */ 1569 + phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 1570 + pcaps->link_fec_options); 1571 + phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 1572 + 1573 + out: 1574 + phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 1575 + set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); 1576 + err_out: 1577 + kfree(pcaps); 1578 + return err; 1579 + } 1580 + 1581 + /** 1582 + * ice_configure_phy - configure PHY 1583 + * @vsi: VSI of PHY 1584 + * 1585 + * Set the PHY configuration. If the current PHY configuration is the same as 1586 + * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 1587 + * configure the based get PHY capabilities for topology with media. 1588 + */ 1589 + static int ice_configure_phy(struct ice_vsi *vsi) 1590 + { 1591 + struct device *dev = ice_pf_to_dev(vsi->back); 1592 + struct ice_aqc_get_phy_caps_data *pcaps; 1593 + struct ice_aqc_set_phy_cfg_data *cfg; 1594 + struct ice_port_info *pi; 1595 + enum ice_status status; 1596 + int err = 0; 1597 + 1598 + pi = vsi->port_info; 1599 + if (!pi) 1600 + return -EINVAL; 1601 + 1602 + /* Ensure we have media as we cannot configure a medialess port */ 1603 + if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 1604 + return -EPERM; 1605 + 1606 + ice_print_topo_conflict(vsi); 1607 + 1608 + if (vsi->port_info->phy.link_info.topo_media_conflict == 1609 + ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 1610 + return -EPERM; 1611 + 1612 + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 1613 + return ice_force_phys_link_state(vsi, true); 1614 + 1615 + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1616 + if (!pcaps) 1617 + return -ENOMEM; 1618 + 1619 + /* Get current PHY config */ 1620 + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 1621 + NULL); 1622 + if (status) { 1623 + dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", 1624 + vsi->vsi_num, ice_stat_str(status)); 1625 + err = -EIO; 1626 + goto done; 1627 + } 1628 + 1629 + /* If PHY enable link is configured and configuration has not changed, 1630 + * there's nothing to do 1631 + */ 1632 + if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 1633 + ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg)) 1634 + goto done; 1635 + 1636 + /* Use PHY topology as baseline for configuration */ 1637 + memset(pcaps, 0, sizeof(*pcaps)); 1638 + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, 1639 + NULL); 1640 + if (status) { 1641 + dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", 1642 + vsi->vsi_num, ice_stat_str(status)); 1643 + err = -EIO; 1644 + goto done; 1645 + } 1646 + 1647 + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 1648 + if (!cfg) { 1649 + err = -ENOMEM; 1650 + goto done; 1651 + } 1652 + 1653 + ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 1654 + 1655 + /* Speed - If default override pending, use curr_user_phy_cfg set in 1656 + * ice_init_phy_user_cfg_ldo. 1657 + */ 1658 + if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, 1659 + vsi->back->state)) { 1660 + cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low; 1661 + cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high; 1662 + } else { 1663 + u64 phy_low = 0, phy_high = 0; 1664 + 1665 + ice_update_phy_type(&phy_low, &phy_high, 1666 + pi->phy.curr_user_speed_req); 1667 + cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 1668 + cfg->phy_type_high = pcaps->phy_type_high & 1669 + cpu_to_le64(phy_high); 1670 + } 1671 + 1672 + /* Can't provide what was requested; use PHY capabilities */ 1673 + if (!cfg->phy_type_low && !cfg->phy_type_high) { 1674 + cfg->phy_type_low = pcaps->phy_type_low; 1675 + cfg->phy_type_high = pcaps->phy_type_high; 1676 + } 1677 + 1678 + /* FEC */ 1679 + ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); 1680 + 1681 + /* Can't provide what was requested; use PHY capabilities */ 1682 + if (cfg->link_fec_opt != 1683 + (cfg->link_fec_opt & pcaps->link_fec_options)) { 1684 + cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 1685 + cfg->link_fec_opt = pcaps->link_fec_options; 1686 + } 1687 + 1688 + /* Flow Control - always supported; no need to check against 1689 + * capabilities 1690 + */ 1691 + ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req); 1692 + 1693 + /* Enable link and link update */ 1694 + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 1695 + 1696 + status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1697 + if (status) { 1698 + dev_err(dev, "Failed to set phy config, VSI %d error %s\n", 1699 + vsi->vsi_num, ice_stat_str(status)); 1700 + err = -EIO; 1701 + } 1702 + 1703 + kfree(cfg); 1704 + done: 1705 + kfree(pcaps); 1706 + return err; 1707 + } 1708 + 1709 + /** 1710 + * ice_check_media_subtask - Check for media 1418 1711 * @pf: pointer to PF struct 1712 + * 1713 + * If media is available, then initialize PHY user configuration if it is not 1714 + * been, and configure the PHY if the interface is up. 1419 1715 */ 1420 1716 static void ice_check_media_subtask(struct ice_pf *pf) 1421 1717 { ··· 1727 1415 struct ice_vsi *vsi; 1728 1416 int err; 1729 1417 1730 - vsi = ice_get_main_vsi(pf); 1731 - if (!vsi) 1418 + /* No need to check for media if it's already present */ 1419 + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 1732 1420 return; 1733 1421 1734 - /* No need to check for media if it's already present or the interface 1735 - * is down 1736 - */ 1737 - if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || 1738 - test_bit(__ICE_DOWN, vsi->state)) 1422 + vsi = ice_get_main_vsi(pf); 1423 + if (!vsi) 1739 1424 return; 1740 1425 1741 1426 /* Refresh link info and check if media is present */ ··· 1742 1433 return; 1743 1434 1744 1435 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 1745 - err = ice_force_phys_link_state(vsi, true); 1746 - if (err) 1436 + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) 1437 + ice_init_phy_user_cfg(pi); 1438 + 1439 + /* PHY settings are reset on media insertion, reconfigure 1440 + * PHY to preserve settings. 1441 + */ 1442 + if (test_bit(__ICE_DOWN, vsi->state) && 1443 + test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 1747 1444 return; 1748 - clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1445 + 1446 + err = ice_configure_phy(vsi); 1447 + if (!err) 1448 + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1749 1449 1750 1450 /* A Link Status Event will be generated; the event handler 1751 1451 * will complete bringing the interface up ··· 3259 2941 } 3260 2942 3261 2943 /** 2944 + * ice_is_wol_supported - get NVM state of WoL 2945 + * @pf: board private structure 2946 + * 2947 + * Check if WoL is supported based on the HW configuration. 2948 + * Returns true if NVM supports and enables WoL for this port, false otherwise 2949 + */ 2950 + bool ice_is_wol_supported(struct ice_pf *pf) 2951 + { 2952 + struct ice_hw *hw = &pf->hw; 2953 + u16 wol_ctrl; 2954 + 2955 + /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 2956 + * word) indicates WoL is not supported on the corresponding PF ID. 2957 + */ 2958 + if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 2959 + return false; 2960 + 2961 + return !(BIT(hw->pf_id) & wol_ctrl); 2962 + } 2963 + 2964 + /** 3262 2965 * ice_vsi_recfg_qs - Change the number of queues on a VSI 3263 2966 * @vsi: VSI being changed 3264 2967 * @new_rx: new number of Rx queues ··· 3627 3288 } 3628 3289 3629 3290 /** 3291 + * ice_print_wake_reason - show the wake up cause in the log 3292 + * @pf: pointer to the PF struct 3293 + */ 3294 + static void ice_print_wake_reason(struct ice_pf *pf) 3295 + { 3296 + u32 wus = pf->wakeup_reason; 3297 + const char *wake_str; 3298 + 3299 + /* if no wake event, nothing to print */ 3300 + if (!wus) 3301 + return; 3302 + 3303 + if (wus & PFPM_WUS_LNKC_M) 3304 + wake_str = "Link\n"; 3305 + else if (wus & PFPM_WUS_MAG_M) 3306 + wake_str = "Magic Packet\n"; 3307 + else if (wus & PFPM_WUS_MNG_M) 3308 + wake_str = "Management\n"; 3309 + else if (wus & PFPM_WUS_FW_RST_WK_M) 3310 + wake_str = "Firmware Reset\n"; 3311 + else 3312 + wake_str = "Unknown\n"; 3313 + 3314 + dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 3315 + } 3316 + 3317 + /** 3630 3318 * ice_probe - Device initialization routine 3631 3319 * @pdev: PCI device information struct 3632 3320 * @ent: entry in ice_pci_tbl ··· 3834 3468 goto err_alloc_sw_unroll; 3835 3469 } 3836 3470 3471 + err = ice_init_nvm_phy_type(pf->hw.port_info); 3472 + if (err) { 3473 + dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 3474 + goto err_alloc_sw_unroll; 3475 + } 3476 + 3477 + err = ice_update_link_info(pf->hw.port_info); 3478 + if (err) { 3479 + dev_err(dev, "ice_update_link_info failed: %d\n", err); 3480 + goto err_alloc_sw_unroll; 3481 + } 3482 + 3483 + ice_init_link_dflt_override(pf->hw.port_info); 3484 + 3485 + /* if media available, initialize PHY settings */ 3486 + if (pf->hw.port_info->phy.link_info.link_info & 3487 + ICE_AQ_MEDIA_AVAILABLE) { 3488 + err = ice_init_phy_user_cfg(pf->hw.port_info); 3489 + if (err) { 3490 + dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 3491 + goto err_alloc_sw_unroll; 3492 + } 3493 + 3494 + if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 3495 + struct ice_vsi *vsi = ice_get_main_vsi(pf); 3496 + 3497 + if (vsi) 3498 + ice_configure_phy(vsi); 3499 + } 3500 + } else { 3501 + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 3502 + } 3503 + 3837 3504 ice_verify_cacheline_size(pf); 3505 + 3506 + /* Save wakeup reason register for later use */ 3507 + pf->wakeup_reason = rd32(hw, PFPM_WUS); 3508 + 3509 + /* check for a power management event */ 3510 + ice_print_wake_reason(pf); 3511 + 3512 + /* clear wake status, all bits */ 3513 + wr32(hw, PFPM_WUS, U32_MAX); 3514 + 3515 + /* Disable WoL at init, wait for user to enable */ 3516 + device_set_wakeup_enable(dev, false); 3838 3517 3839 3518 /* If no DDP driven features have to be setup, we are done with probe */ 3840 3519 if (ice_is_safe_mode(pf)) ··· 3925 3514 err_exit_unroll: 3926 3515 ice_devlink_unregister(pf); 3927 3516 pci_disable_pcie_error_reporting(pdev); 3517 + pci_disable_device(pdev); 3928 3518 return err; 3519 + } 3520 + 3521 + /** 3522 + * ice_set_wake - enable or disable Wake on LAN 3523 + * @pf: pointer to the PF struct 3524 + * 3525 + * Simple helper for WoL control 3526 + */ 3527 + static void ice_set_wake(struct ice_pf *pf) 3528 + { 3529 + struct ice_hw *hw = &pf->hw; 3530 + bool wol = pf->wol_ena; 3531 + 3532 + /* clear wake state, otherwise new wake events won't fire */ 3533 + wr32(hw, PFPM_WUS, U32_MAX); 3534 + 3535 + /* enable / disable APM wake up, no RMW needed */ 3536 + wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 3537 + 3538 + /* set magic packet filter enabled */ 3539 + wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 3540 + } 3541 + 3542 + /** 3543 + * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet 3544 + * @pf: pointer to the PF struct 3545 + * 3546 + * Issue firmware command to enable multicast magic wake, making 3547 + * sure that any locally administered address (LAA) is used for 3548 + * wake, and that PF reset doesn't undo the LAA. 3549 + */ 3550 + static void ice_setup_mc_magic_wake(struct ice_pf *pf) 3551 + { 3552 + struct device *dev = ice_pf_to_dev(pf); 3553 + struct ice_hw *hw = &pf->hw; 3554 + enum ice_status status; 3555 + u8 mac_addr[ETH_ALEN]; 3556 + struct ice_vsi *vsi; 3557 + u8 flags; 3558 + 3559 + if (!pf->wol_ena) 3560 + return; 3561 + 3562 + vsi = ice_get_main_vsi(pf); 3563 + if (!vsi) 3564 + return; 3565 + 3566 + /* Get current MAC address in case it's an LAA */ 3567 + if (vsi->netdev) 3568 + ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 3569 + else 3570 + ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 3571 + 3572 + flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 3573 + ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 3574 + ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 3575 + 3576 + status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 3577 + if (status) 3578 + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", 3579 + ice_stat_str(status), 3580 + ice_aq_str(hw->adminq.sq_last_status)); 3929 3581 } 3930 3582 3931 3583 /** ··· 4020 3546 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4021 3547 if (!ice_is_safe_mode(pf)) 4022 3548 ice_remove_arfs(pf); 3549 + ice_setup_mc_magic_wake(pf); 4023 3550 ice_devlink_destroy_port(pf); 4024 3551 ice_vsi_release_all(pf); 3552 + ice_set_wake(pf); 4025 3553 ice_free_irq_msix_misc(pf); 4026 3554 ice_for_each_vsi(pf, i) { 4027 3555 if (!pf->vsi[i]) ··· 4043 3567 pci_wait_for_pending_transaction(pdev); 4044 3568 ice_clear_interrupt_scheme(pf); 4045 3569 pci_disable_pcie_error_reporting(pdev); 3570 + pci_disable_device(pdev); 4046 3571 } 3572 + 3573 + /** 3574 + * ice_shutdown - PCI callback for shutting down device 3575 + * @pdev: PCI device information struct 3576 + */ 3577 + static void ice_shutdown(struct pci_dev *pdev) 3578 + { 3579 + struct ice_pf *pf = pci_get_drvdata(pdev); 3580 + 3581 + ice_remove(pdev); 3582 + 3583 + if (system_state == SYSTEM_POWER_OFF) { 3584 + pci_wake_from_d3(pdev, pf->wol_ena); 3585 + pci_set_power_state(pdev, PCI_D3hot); 3586 + } 3587 + } 3588 + 3589 + #ifdef CONFIG_PM 3590 + /** 3591 + * ice_prepare_for_shutdown - prep for PCI shutdown 3592 + * @pf: board private structure 3593 + * 3594 + * Inform or close all dependent features in prep for PCI device shutdown 3595 + */ 3596 + static void ice_prepare_for_shutdown(struct ice_pf *pf) 3597 + { 3598 + struct ice_hw *hw = &pf->hw; 3599 + u32 v; 3600 + 3601 + /* Notify VFs of impending reset */ 3602 + if (ice_check_sq_alive(hw, &hw->mailboxq)) 3603 + ice_vc_notify_reset(pf); 3604 + 3605 + dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 3606 + 3607 + /* disable the VSIs and their queues that are not already DOWN */ 3608 + ice_pf_dis_all_vsi(pf, false); 3609 + 3610 + ice_for_each_vsi(pf, v) 3611 + if (pf->vsi[v]) 3612 + pf->vsi[v]->vsi_num = 0; 3613 + 3614 + ice_shutdown_all_ctrlq(hw); 3615 + } 3616 + 3617 + /** 3618 + * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 3619 + * @pf: board private structure to reinitialize 3620 + * 3621 + * This routine reinitialize interrupt scheme that was cleared during 3622 + * power management suspend callback. 3623 + * 3624 + * This should be called during resume routine to re-allocate the q_vectors 3625 + * and reacquire interrupts. 3626 + */ 3627 + static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 3628 + { 3629 + struct device *dev = ice_pf_to_dev(pf); 3630 + int ret, v; 3631 + 3632 + /* Since we clear MSIX flag during suspend, we need to 3633 + * set it back during resume... 3634 + */ 3635 + 3636 + ret = ice_init_interrupt_scheme(pf); 3637 + if (ret) { 3638 + dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 3639 + return ret; 3640 + } 3641 + 3642 + /* Remap vectors and rings, after successful re-init interrupts */ 3643 + ice_for_each_vsi(pf, v) { 3644 + if (!pf->vsi[v]) 3645 + continue; 3646 + 3647 + ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 3648 + if (ret) 3649 + goto err_reinit; 3650 + ice_vsi_map_rings_to_vectors(pf->vsi[v]); 3651 + } 3652 + 3653 + ret = ice_req_irq_msix_misc(pf); 3654 + if (ret) { 3655 + dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 3656 + ret); 3657 + goto err_reinit; 3658 + } 3659 + 3660 + return 0; 3661 + 3662 + err_reinit: 3663 + while (v--) 3664 + if (pf->vsi[v]) 3665 + ice_vsi_free_q_vectors(pf->vsi[v]); 3666 + 3667 + return ret; 3668 + } 3669 + 3670 + /** 3671 + * ice_suspend 3672 + * @dev: generic device information structure 3673 + * 3674 + * Power Management callback to quiesce the device and prepare 3675 + * for D3 transition. 3676 + */ 3677 + static int ice_suspend(struct device *dev) 3678 + { 3679 + struct pci_dev *pdev = to_pci_dev(dev); 3680 + struct ice_pf *pf; 3681 + int disabled, v; 3682 + 3683 + pf = pci_get_drvdata(pdev); 3684 + 3685 + if (!ice_pf_state_is_nominal(pf)) { 3686 + dev_err(dev, "Device is not ready, no need to suspend it\n"); 3687 + return -EBUSY; 3688 + } 3689 + 3690 + /* Stop watchdog tasks until resume completion. 3691 + * Even though it is most likely that the service task is 3692 + * disabled if the device is suspended or down, the service task's 3693 + * state is controlled by a different state bit, and we should 3694 + * store and honor whatever state that bit is in at this point. 3695 + */ 3696 + disabled = ice_service_task_stop(pf); 3697 + 3698 + /* Already suspended?, then there is nothing to do */ 3699 + if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) { 3700 + if (!disabled) 3701 + ice_service_task_restart(pf); 3702 + return 0; 3703 + } 3704 + 3705 + if (test_bit(__ICE_DOWN, pf->state) || 3706 + ice_is_reset_in_progress(pf->state)) { 3707 + dev_err(dev, "can't suspend device in reset or already down\n"); 3708 + if (!disabled) 3709 + ice_service_task_restart(pf); 3710 + return 0; 3711 + } 3712 + 3713 + ice_setup_mc_magic_wake(pf); 3714 + 3715 + ice_prepare_for_shutdown(pf); 3716 + 3717 + ice_set_wake(pf); 3718 + 3719 + /* Free vectors, clear the interrupt scheme and release IRQs 3720 + * for proper hibernation, especially with large number of CPUs. 3721 + * Otherwise hibernation might fail when mapping all the vectors back 3722 + * to CPU0. 3723 + */ 3724 + ice_free_irq_msix_misc(pf); 3725 + ice_for_each_vsi(pf, v) { 3726 + if (!pf->vsi[v]) 3727 + continue; 3728 + ice_vsi_free_q_vectors(pf->vsi[v]); 3729 + } 3730 + ice_clear_interrupt_scheme(pf); 3731 + 3732 + pci_wake_from_d3(pdev, pf->wol_ena); 3733 + pci_set_power_state(pdev, PCI_D3hot); 3734 + return 0; 3735 + } 3736 + 3737 + /** 3738 + * ice_resume - PM callback for waking up from D3 3739 + * @dev: generic device information structure 3740 + */ 3741 + static int ice_resume(struct device *dev) 3742 + { 3743 + struct pci_dev *pdev = to_pci_dev(dev); 3744 + enum ice_reset_req reset_type; 3745 + struct ice_pf *pf; 3746 + struct ice_hw *hw; 3747 + int ret; 3748 + 3749 + pci_set_power_state(pdev, PCI_D0); 3750 + pci_restore_state(pdev); 3751 + pci_save_state(pdev); 3752 + 3753 + if (!pci_device_is_present(pdev)) 3754 + return -ENODEV; 3755 + 3756 + ret = pci_enable_device_mem(pdev); 3757 + if (ret) { 3758 + dev_err(dev, "Cannot enable device after suspend\n"); 3759 + return ret; 3760 + } 3761 + 3762 + pf = pci_get_drvdata(pdev); 3763 + hw = &pf->hw; 3764 + 3765 + pf->wakeup_reason = rd32(hw, PFPM_WUS); 3766 + ice_print_wake_reason(pf); 3767 + 3768 + /* We cleared the interrupt scheme when we suspended, so we need to 3769 + * restore it now to resume device functionality. 3770 + */ 3771 + ret = ice_reinit_interrupt_scheme(pf); 3772 + if (ret) 3773 + dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 3774 + 3775 + clear_bit(__ICE_DOWN, pf->state); 3776 + /* Now perform PF reset and rebuild */ 3777 + reset_type = ICE_RESET_PFR; 3778 + /* re-enable service task for reset, but allow reset to schedule it */ 3779 + clear_bit(__ICE_SERVICE_DIS, pf->state); 3780 + 3781 + if (ice_schedule_reset(pf, reset_type)) 3782 + dev_err(dev, "Reset during resume failed.\n"); 3783 + 3784 + clear_bit(__ICE_SUSPENDED, pf->state); 3785 + ice_service_task_restart(pf); 3786 + 3787 + /* Restart the service task */ 3788 + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 3789 + 3790 + return 0; 3791 + } 3792 + #endif /* CONFIG_PM */ 4047 3793 4048 3794 /** 4049 3795 * ice_pci_err_detected - warning that PCI error has been detected ··· 4432 3734 }; 4433 3735 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 4434 3736 3737 + static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 3738 + 4435 3739 static const struct pci_error_handlers ice_pci_err_handler = { 4436 3740 .error_detected = ice_pci_err_detected, 4437 3741 .slot_reset = ice_pci_err_slot_reset, ··· 4447 3747 .id_table = ice_pci_tbl, 4448 3748 .probe = ice_probe, 4449 3749 .remove = ice_remove, 3750 + #ifdef CONFIG_PM 3751 + .driver.pm = &ice_pm_ops, 3752 + #endif /* CONFIG_PM */ 3753 + .shutdown = ice_shutdown, 4450 3754 .sriov_configure = ice_sriov_configure, 4451 3755 .err_handler = &ice_pci_err_handler 4452 3756 }; ··· 6363 5659 6364 5660 /* Set PHY if there is media, otherwise, turn off PHY */ 6365 5661 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 6366 - err = ice_force_phys_link_state(vsi, true); 5662 + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 5663 + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) { 5664 + err = ice_init_phy_user_cfg(pi); 5665 + if (err) { 5666 + netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 5667 + err); 5668 + return err; 5669 + } 5670 + } 5671 + 5672 + err = ice_configure_phy(vsi); 6367 5673 if (err) { 6368 5674 netdev_err(netdev, "Failed to set physical link up, error %d\n", 6369 5675 err); 6370 5676 return err; 6371 5677 } 6372 5678 } else { 5679 + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 6373 5680 err = ice_aq_set_link_restart_an(pi, false, NULL); 6374 5681 if (err) { 6375 5682 netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", 6376 5683 vsi->vsi_num, err); 6377 5684 return err; 6378 5685 } 6379 - set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); 6380 5686 } 6381 5687 6382 5688 err = ice_vsi_open(vsi);
+2 -3
drivers/net/ethernet/intel/ice/ice_nvm.c
··· 172 172 * 173 173 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. 174 174 */ 175 - static enum ice_status 176 - ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 175 + enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 177 176 { 178 177 enum ice_status status; 179 178 ··· 196 197 * Area (PFA) and returns the TLV pointer and length. The caller can 197 198 * use these to read the variable length TLV value. 198 199 */ 199 - static enum ice_status 200 + enum ice_status 200 201 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 201 202 u16 module_type) 202 203 {
+4
drivers/net/ethernet/intel/ice/ice_nvm.h
··· 11 11 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, 12 12 bool read_shadow_ram); 13 13 enum ice_status 14 + ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 15 + u16 module_type); 16 + enum ice_status 14 17 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); 15 18 enum ice_status ice_init_nvm(struct ice_hw *hw); 19 + enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); 16 20 #endif /* _ICE_NVM_H_ */
+57
drivers/net/ethernet/intel/ice/ice_type.h
··· 87 87 ICE_FC_DFLT 88 88 }; 89 89 90 + enum ice_phy_cache_mode { 91 + ICE_FC_MODE = 0, 92 + ICE_SPEED_MODE, 93 + ICE_FEC_MODE 94 + }; 95 + 90 96 enum ice_fec_mode { 91 97 ICE_FEC_NONE = 0, 92 98 ICE_FEC_RS, 93 99 ICE_FEC_BASER, 94 100 ICE_FEC_AUTO 101 + }; 102 + 103 + struct ice_phy_cache_mode_data { 104 + union { 105 + enum ice_fec_mode curr_user_fec_req; 106 + enum ice_fc_mode curr_user_fc_req; 107 + u16 curr_user_speed_req; 108 + } data; 95 109 }; 96 110 97 111 enum ice_set_fc_aq_failures { ··· 118 104 /* Various MAC types */ 119 105 enum ice_mac_type { 120 106 ICE_MAC_UNKNOWN = 0, 107 + ICE_MAC_E810, 121 108 ICE_MAC_GENERIC, 122 109 }; 123 110 ··· 175 160 u64 phy_type_high; 176 161 enum ice_media_type media_type; 177 162 u8 get_link_info; 163 + /* Please refer to struct ice_aqc_get_link_status_data to get 164 + * detail of enable bit in curr_user_speed_req 165 + */ 166 + u16 curr_user_speed_req; 167 + enum ice_fec_mode curr_user_fec_req; 168 + enum ice_fc_mode curr_user_fc_req; 169 + struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg; 178 170 }; 179 171 180 172 /* protocol enumeration for filters */ ··· 313 291 u8 major_ver; /* major version of NVM package */ 314 292 u8 minor_ver; /* minor version of dev starter */ 315 293 u8 blank_nvm_mode; /* is NVM empty (no FW present) */ 294 + }; 295 + 296 + struct ice_link_default_override_tlv { 297 + u8 options; 298 + #define ICE_LINK_OVERRIDE_OPT_M 0x3F 299 + #define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0) 300 + #define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1) 301 + #define ICE_LINK_OVERRIDE_PORT_DIS BIT(2) 302 + #define ICE_LINK_OVERRIDE_EN BIT(3) 303 + #define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4) 304 + #define ICE_LINK_OVERRIDE_EEE_EN BIT(5) 305 + u8 phy_config; 306 + #define ICE_LINK_OVERRIDE_PHY_CFG_S 8 307 + #define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S) 308 + #define ICE_LINK_OVERRIDE_PAUSE_M 0x3 309 + #define ICE_LINK_OVERRIDE_LESM_EN BIT(6) 310 + #define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7) 311 + u8 fec_options; 312 + #define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF 313 + u8 rsvd1; 314 + u64 phy_type_low; 315 + u64 phy_type_high; 316 316 }; 317 317 318 318 #define ICE_NVM_VER_LEN 32 ··· 488 444 #define ICE_APP_SEL_ETHTYPE 0x1 489 445 #define ICE_APP_SEL_TCPIP 0x2 490 446 #define ICE_CEE_APP_SEL_ETHTYPE 0x0 447 + #define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134 491 448 #define ICE_CEE_APP_SEL_TCPIP 0x1 492 449 493 450 struct ice_dcbx_cfg { ··· 754 709 755 710 /* Checksum and Shadow RAM pointers */ 756 711 #define ICE_SR_BOOT_CFG_PTR 0x132 712 + #define ICE_SR_NVM_WOL_CFG 0x19 757 713 #define ICE_NVM_OROM_VER_OFF 0x02 758 714 #define ICE_SR_PBA_BLOCK_PTR 0x16 759 715 #define ICE_SR_NVM_DEV_STARTER_VER 0x18 ··· 772 726 #define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) 773 727 #define ICE_SR_PFA_PTR 0x40 774 728 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 729 + 730 + /* Link override related */ 731 + #define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10 732 + #define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4 733 + #define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2 734 + #define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1 735 + #define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2 736 + #define ICE_FW_API_LINK_OVERRIDE_MAJ 1 737 + #define ICE_FW_API_LINK_OVERRIDE_MIN 5 738 + #define ICE_FW_API_LINK_OVERRIDE_PATCH 2 739 + 775 740 #define ICE_SR_WORDS_IN_1KB 512 776 741 777 742 /* Hash redirection LUT for VSI - maximum array size */
-25
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
··· 1593 1593 } 1594 1594 1595 1595 /** 1596 - * ice_pf_state_is_nominal - checks the PF for nominal state 1597 - * @pf: pointer to PF to check 1598 - * 1599 - * Check the PF's state for a collection of bits that would indicate 1600 - * the PF is in a state that would inhibit normal operation for 1601 - * driver functionality. 1602 - * 1603 - * Returns true if PF is in a nominal state. 1604 - * Returns false otherwise 1605 - */ 1606 - static bool ice_pf_state_is_nominal(struct ice_pf *pf) 1607 - { 1608 - DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; 1609 - 1610 - if (!pf) 1611 - return false; 1612 - 1613 - bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); 1614 - if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) 1615 - return false; 1616 - 1617 - return true; 1618 - } 1619 - 1620 - /** 1621 1596 * ice_pci_sriov_ena - Enable or change number of VFs 1622 1597 * @pf: pointer to the PF structure 1623 1598 * @num_vfs: number of VFs to allocate