Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ice: Start hardware initialization

This patch implements multiple pieces of the initialization flow
as follows:

1) A reset is issued to ensure a clean device state, followed
by initialization of admin queue interface.

2) Once the admin queue interface is up, clear the PF config
and transition the device to non-PXE mode.

3) Get the NVM configuration stored in the device's non-volatile
memory (NVM) using ice_init_nvm.

CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

authored by

Anirudh Venkataramanan and committed by
Jeff Kirsher
f31e4b6f 7ec59eea

+854 -1
+2 -1
drivers/net/ethernet/intel/ice/Makefile
··· 9 9 10 10 ice-y := ice_main.o \ 11 11 ice_controlq.o \ 12 - ice_common.o 12 + ice_common.o \ 13 + ice_nvm.o
+2
drivers/net/ethernet/intel/ice/ice.h
··· 16 16 #include <linux/bitmap.h> 17 17 #include "ice_devids.h" 18 18 #include "ice_type.h" 19 + #include "ice_common.h" 19 20 20 21 #define ICE_BAR0 0 22 + #define ICE_AQ_LEN 64 21 23 22 24 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 23 25
+79
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 36 36 u8 reserved[12]; 37 37 }; 38 38 39 + /* Request resource ownership (direct 0x0008) 40 + * Release resource ownership (direct 0x0009) 41 + */ 42 + struct ice_aqc_req_res { 43 + __le16 res_id; 44 + #define ICE_AQC_RES_ID_NVM 1 45 + #define ICE_AQC_RES_ID_SDP 2 46 + #define ICE_AQC_RES_ID_CHNG_LOCK 3 47 + #define ICE_AQC_RES_ID_GLBL_LOCK 4 48 + __le16 access_type; 49 + #define ICE_AQC_RES_ACCESS_READ 1 50 + #define ICE_AQC_RES_ACCESS_WRITE 2 51 + 52 + /* Upon successful completion, FW writes this value and driver is 53 + * expected to release resource before timeout. This value is provided 54 + * in milliseconds. 55 + */ 56 + __le32 timeout; 57 + #define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 58 + #define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 59 + #define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 60 + #define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 61 + /* For SDP: pin id of the SDP */ 62 + __le32 res_number; 63 + /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */ 64 + __le16 status; 65 + #define ICE_AQ_RES_GLBL_SUCCESS 0 66 + #define ICE_AQ_RES_GLBL_IN_PROG 1 67 + #define ICE_AQ_RES_GLBL_DONE 2 68 + u8 reserved[2]; 69 + }; 70 + 71 + /* Clear PXE Command and response (direct 0x0110) */ 72 + struct ice_aqc_clear_pxe { 73 + u8 rx_cnt; 74 + #define ICE_AQC_CLEAR_PXE_RX_CNT 0x2 75 + u8 reserved[15]; 76 + }; 77 + 78 + /* NVM Read command (indirect 0x0701) 79 + * NVM Erase commands (direct 0x0702) 80 + * NVM Update commands (indirect 0x0703) 81 + */ 82 + struct ice_aqc_nvm { 83 + u8 cmd_flags; 84 + #define ICE_AQC_NVM_LAST_CMD BIT(0) 85 + #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */ 86 + #define ICE_AQC_NVM_PRESERVATION_S 1 87 + #define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S) 88 + #define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S) 89 + #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) 90 + #define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S) 91 + #define ICE_AQC_NVM_FLASH_ONLY BIT(7) 92 + u8 module_typeid; 93 + __le16 length; 94 + #define ICE_AQC_NVM_ERASE_LEN 0xFFFF 95 + __le32 offset; 96 + __le32 addr_high; 97 + __le32 addr_low; 98 + }; 99 + 39 100 /** 40 101 * struct ice_aq_desc - Admin Queue (AQ) descriptor 41 102 * @flags: ICE_AQ_FLAG_* flags ··· 126 65 struct ice_aqc_generic generic; 127 66 struct ice_aqc_get_ver get_ver; 128 67 struct ice_aqc_q_shutdown q_shutdown; 68 + struct ice_aqc_req_res res_owner; 69 + struct ice_aqc_clear_pxe clear_pxe; 70 + struct ice_aqc_nvm nvm; 129 71 } params; 130 72 }; 131 73 ··· 146 82 /* error codes */ 147 83 enum ice_aq_err { 148 84 ICE_AQ_RC_OK = 0, /* success */ 85 + ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ 86 + ICE_AQ_RC_EEXIST = 13, /* object already exists */ 149 87 }; 150 88 151 89 /* Admin Queue command opcodes */ ··· 155 89 /* AQ commands */ 156 90 ice_aqc_opc_get_ver = 0x0001, 157 91 ice_aqc_opc_q_shutdown = 0x0003, 92 + 93 + /* resource ownership */ 94 + ice_aqc_opc_req_res = 0x0008, 95 + ice_aqc_opc_release_res = 0x0009, 96 + 97 + /* PXE */ 98 + ice_aqc_opc_clear_pxe_mode = 0x0110, 99 + 100 + ice_aqc_opc_clear_pf_cfg = 0x02A4, 101 + 102 + /* NVM commands */ 103 + ice_aqc_opc_nvm_read = 0x0701, 104 + 158 105 }; 159 106 160 107 #endif /* _ICE_ADMINQ_CMD_H_ */
+405
drivers/net/ethernet/intel/ice/ice_common.c
··· 4 4 #include "ice_common.h" 5 5 #include "ice_adminq_cmd.h" 6 6 7 + #define ICE_PF_RESET_WAIT_COUNT 200 8 + 9 + /** 10 + * ice_set_mac_type - Sets MAC type 11 + * @hw: pointer to the HW structure 12 + * 13 + * This function sets the MAC type of the adapter based on the 14 + * vendor ID and device ID stored in the hw structure. 15 + */ 16 + static enum ice_status ice_set_mac_type(struct ice_hw *hw) 17 + { 18 + if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 19 + return ICE_ERR_DEVICE_NOT_SUPPORTED; 20 + 21 + hw->mac_type = ICE_MAC_GENERIC; 22 + return 0; 23 + } 24 + 25 + /** 26 + * ice_clear_pf_cfg - Clear PF configuration 27 + * @hw: pointer to the hardware structure 28 + */ 29 + enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 30 + { 31 + struct ice_aq_desc desc; 32 + 33 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 34 + 35 + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 36 + } 37 + 38 + /** 39 + * ice_init_hw - main hardware initialization routine 40 + * @hw: pointer to the hardware structure 41 + */ 42 + enum ice_status ice_init_hw(struct ice_hw *hw) 43 + { 44 + enum ice_status status; 45 + 46 + /* Set MAC type based on DeviceID */ 47 + status = ice_set_mac_type(hw); 48 + if (status) 49 + return status; 50 + 51 + hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 52 + PF_FUNC_RID_FUNC_NUM_M) >> 53 + PF_FUNC_RID_FUNC_NUM_S; 54 + 55 + status = ice_reset(hw, ICE_RESET_PFR); 56 + if (status) 57 + return status; 58 + 59 + status = ice_init_all_ctrlq(hw); 60 + if (status) 61 + goto err_unroll_cqinit; 62 + 63 + status = ice_clear_pf_cfg(hw); 64 + if (status) 65 + goto err_unroll_cqinit; 66 + 67 + ice_clear_pxe_mode(hw); 68 + 69 + status = ice_init_nvm(hw); 70 + if (status) 71 + goto err_unroll_cqinit; 72 + 73 + return 0; 74 + 75 + err_unroll_cqinit: 76 + ice_shutdown_all_ctrlq(hw); 77 + return status; 78 + } 79 + 80 + /** 81 + * ice_deinit_hw - unroll initialization operations done by ice_init_hw 82 + * @hw: pointer to the hardware structure 83 + */ 84 + void ice_deinit_hw(struct ice_hw *hw) 85 + { 86 + ice_shutdown_all_ctrlq(hw); 87 + } 88 + 89 + /** 90 + * ice_check_reset - Check to see if a global reset is complete 91 + * @hw: pointer to the hardware structure 92 + */ 93 + enum ice_status ice_check_reset(struct ice_hw *hw) 94 + { 95 + u32 cnt, reg = 0, grst_delay; 96 + 97 + /* Poll for Device Active state in case a recent CORER, GLOBR, 98 + * or EMPR has occurred. The grst delay value is in 100ms units. 99 + * Add 1sec for outstanding AQ commands that can take a long time. 100 + */ 101 + grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 102 + GLGEN_RSTCTL_GRSTDEL_S) + 10; 103 + 104 + for (cnt = 0; cnt < grst_delay; cnt++) { 105 + mdelay(100); 106 + reg = rd32(hw, GLGEN_RSTAT); 107 + if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 108 + break; 109 + } 110 + 111 + if (cnt == grst_delay) { 112 + ice_debug(hw, ICE_DBG_INIT, 113 + "Global reset polling failed to complete.\n"); 114 + return ICE_ERR_RESET_FAILED; 115 + } 116 + 117 + #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ 118 + GLNVM_ULD_GLOBR_DONE_M) 119 + 120 + /* Device is Active; check Global Reset processes are done */ 121 + for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 122 + reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; 123 + if (reg == ICE_RESET_DONE_MASK) { 124 + ice_debug(hw, ICE_DBG_INIT, 125 + "Global reset processes done. %d\n", cnt); 126 + break; 127 + } 128 + mdelay(10); 129 + } 130 + 131 + if (cnt == ICE_PF_RESET_WAIT_COUNT) { 132 + ice_debug(hw, ICE_DBG_INIT, 133 + "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 134 + reg); 135 + return ICE_ERR_RESET_FAILED; 136 + } 137 + 138 + return 0; 139 + } 140 + 141 + /** 142 + * ice_pf_reset - Reset the PF 143 + * @hw: pointer to the hardware structure 144 + * 145 + * If a global reset has been triggered, this function checks 146 + * for its completion and then issues the PF reset 147 + */ 148 + static enum ice_status ice_pf_reset(struct ice_hw *hw) 149 + { 150 + u32 cnt, reg; 151 + 152 + /* If at function entry a global reset was already in progress, i.e. 153 + * state is not 'device active' or any of the reset done bits are not 154 + * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 155 + * global reset is done. 156 + */ 157 + if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 158 + (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 159 + /* poll on global reset currently in progress until done */ 160 + if (ice_check_reset(hw)) 161 + return ICE_ERR_RESET_FAILED; 162 + 163 + return 0; 164 + } 165 + 166 + /* Reset the PF */ 167 + reg = rd32(hw, PFGEN_CTRL); 168 + 169 + wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 170 + 171 + for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 172 + reg = rd32(hw, PFGEN_CTRL); 173 + if (!(reg & PFGEN_CTRL_PFSWR_M)) 174 + break; 175 + 176 + mdelay(1); 177 + } 178 + 179 + if (cnt == ICE_PF_RESET_WAIT_COUNT) { 180 + ice_debug(hw, ICE_DBG_INIT, 181 + "PF reset polling failed to complete.\n"); 182 + return ICE_ERR_RESET_FAILED; 183 + } 184 + 185 + return 0; 186 + } 187 + 188 + /** 189 + * ice_reset - Perform different types of reset 190 + * @hw: pointer to the hardware structure 191 + * @req: reset request 192 + * 193 + * This function triggers a reset as specified by the req parameter. 194 + * 195 + * Note: 196 + * If anything other than a PF reset is triggered, PXE mode is restored. 197 + * This has to be cleared using ice_clear_pxe_mode again, once the AQ 198 + * interface has been restored in the rebuild flow. 199 + */ 200 + enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 201 + { 202 + u32 val = 0; 203 + 204 + switch (req) { 205 + case ICE_RESET_PFR: 206 + return ice_pf_reset(hw); 207 + case ICE_RESET_CORER: 208 + ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 209 + val = GLGEN_RTRIG_CORER_M; 210 + break; 211 + case ICE_RESET_GLOBR: 212 + ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 213 + val = GLGEN_RTRIG_GLOBR_M; 214 + break; 215 + } 216 + 217 + val |= rd32(hw, GLGEN_RTRIG); 218 + wr32(hw, GLGEN_RTRIG, val); 219 + ice_flush(hw); 220 + 221 + /* wait for the FW to be ready */ 222 + return ice_check_reset(hw); 223 + } 224 + 7 225 /** 8 226 * ice_debug_cq 9 227 * @hw: pointer to the hardware structure ··· 345 127 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); 346 128 347 129 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 130 + } 131 + 132 + /** 133 + * ice_aq_req_res 134 + * @hw: pointer to the hw struct 135 + * @res: resource id 136 + * @access: access type 137 + * @sdp_number: resource number 138 + * @timeout: the maximum time in ms that the driver may hold the resource 139 + * @cd: pointer to command details structure or NULL 140 + * 141 + * requests common resource using the admin queue commands (0x0008) 142 + */ 143 + static enum ice_status 144 + ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 145 + enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 146 + struct ice_sq_cd *cd) 147 + { 148 + struct ice_aqc_req_res *cmd_resp; 149 + struct ice_aq_desc desc; 150 + enum ice_status status; 151 + 152 + cmd_resp = &desc.params.res_owner; 153 + 154 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 155 + 156 + cmd_resp->res_id = cpu_to_le16(res); 157 + cmd_resp->access_type = cpu_to_le16(access); 158 + cmd_resp->res_number = cpu_to_le32(sdp_number); 159 + 160 + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 161 + /* The completion specifies the maximum time in ms that the driver 162 + * may hold the resource in the Timeout field. 163 + * If the resource is held by someone else, the command completes with 164 + * busy return value and the timeout field indicates the maximum time 165 + * the current owner of the resource has to free it. 166 + */ 167 + if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 168 + *timeout = le32_to_cpu(cmd_resp->timeout); 169 + 170 + return status; 171 + } 172 + 173 + /** 174 + * ice_aq_release_res 175 + * @hw: pointer to the hw struct 176 + * @res: resource id 177 + * @sdp_number: resource number 178 + * @cd: pointer to command details structure or NULL 179 + * 180 + * release common resource using the admin queue commands (0x0009) 181 + */ 182 + static enum ice_status 183 + ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 184 + struct ice_sq_cd *cd) 185 + { 186 + struct ice_aqc_req_res *cmd; 187 + struct ice_aq_desc desc; 188 + 189 + cmd = &desc.params.res_owner; 190 + 191 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 192 + 193 + cmd->res_id = cpu_to_le16(res); 194 + cmd->res_number = cpu_to_le32(sdp_number); 195 + 196 + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 197 + } 198 + 199 + /** 200 + * ice_acquire_res 201 + * @hw: pointer to the HW structure 202 + * @res: resource id 203 + * @access: access type (read or write) 204 + * 205 + * This function will attempt to acquire the ownership of a resource. 206 + */ 207 + enum ice_status 208 + ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 209 + enum ice_aq_res_access_type access) 210 + { 211 + #define ICE_RES_POLLING_DELAY_MS 10 212 + u32 delay = ICE_RES_POLLING_DELAY_MS; 213 + enum ice_status status; 214 + u32 time_left = 0; 215 + u32 timeout; 216 + 217 + status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 218 + 219 + /* An admin queue return code of ICE_AQ_RC_EEXIST means that another 220 + * driver has previously acquired the resource and performed any 221 + * necessary updates; in this case the caller does not obtain the 222 + * resource and has no further work to do. 223 + */ 224 + if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { 225 + status = ICE_ERR_AQ_NO_WORK; 226 + goto ice_acquire_res_exit; 227 + } 228 + 229 + if (status) 230 + ice_debug(hw, ICE_DBG_RES, 231 + "resource %d acquire type %d failed.\n", res, access); 232 + 233 + /* If necessary, poll until the current lock owner timeouts */ 234 + timeout = time_left; 235 + while (status && timeout && time_left) { 236 + mdelay(delay); 237 + timeout = (timeout > delay) ? timeout - delay : 0; 238 + status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 239 + 240 + if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { 241 + /* lock free, but no work to do */ 242 + status = ICE_ERR_AQ_NO_WORK; 243 + break; 244 + } 245 + 246 + if (!status) 247 + /* lock acquired */ 248 + break; 249 + } 250 + if (status && status != ICE_ERR_AQ_NO_WORK) 251 + ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 252 + 253 + ice_acquire_res_exit: 254 + if (status == ICE_ERR_AQ_NO_WORK) { 255 + if (access == ICE_RES_WRITE) 256 + ice_debug(hw, ICE_DBG_RES, 257 + "resource indicates no work to do.\n"); 258 + else 259 + ice_debug(hw, ICE_DBG_RES, 260 + "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 261 + } 262 + return status; 263 + } 264 + 265 + /** 266 + * ice_release_res 267 + * @hw: pointer to the HW structure 268 + * @res: resource id 269 + * 270 + * This function will release a resource using the proper Admin Command. 271 + */ 272 + void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 273 + { 274 + enum ice_status status; 275 + u32 total_delay = 0; 276 + 277 + status = ice_aq_release_res(hw, res, 0, NULL); 278 + 279 + /* there are some rare cases when trying to release the resource 280 + * results in an admin Q timeout, so handle them correctly 281 + */ 282 + while ((status == ICE_ERR_AQ_TIMEOUT) && 283 + (total_delay < hw->adminq.sq_cmd_timeout)) { 284 + mdelay(1); 285 + status = ice_aq_release_res(hw, res, 0, NULL); 286 + total_delay++; 287 + } 288 + } 289 + 290 + /** 291 + * ice_aq_clear_pxe_mode 292 + * @hw: pointer to the hw struct 293 + * 294 + * Tell the firmware that the driver is taking over from PXE (0x0110). 295 + */ 296 + static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 297 + { 298 + struct ice_aq_desc desc; 299 + 300 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 301 + desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 302 + 303 + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 304 + } 305 + 306 + /** 307 + * ice_clear_pxe_mode - clear pxe operations mode 308 + * @hw: pointer to the hw struct 309 + * 310 + * Make sure all PXE mode settings are cleared, including things 311 + * like descriptor fetch/write-back mode. 312 + */ 313 + void ice_clear_pxe_mode(struct ice_hw *hw) 314 + { 315 + if (ice_check_sq_alive(hw, &hw->adminq)) 316 + ice_aq_clear_pxe_mode(hw); 348 317 }
+11
drivers/net/ethernet/intel/ice/ice_common.h
··· 9 9 10 10 void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, 11 11 u16 buf_len); 12 + enum ice_status ice_init_hw(struct ice_hw *hw); 13 + void ice_deinit_hw(struct ice_hw *hw); 14 + enum ice_status ice_check_reset(struct ice_hw *hw); 15 + enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); 12 16 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); 13 17 void ice_shutdown_all_ctrlq(struct ice_hw *hw); 18 + enum ice_status 19 + ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 20 + enum ice_aq_res_access_type access); 21 + void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); 22 + enum ice_status ice_init_nvm(struct ice_hw *hw); 14 23 enum ice_status 15 24 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 16 25 struct ice_aq_desc *desc, void *buf, u16 buf_size, 17 26 struct ice_sq_cd *cd); 27 + void ice_clear_pxe_mode(struct ice_hw *hw); 18 28 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); 19 29 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); 20 30 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); ··· 32 22 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, 33 23 void *buf, u16 buf_size, struct ice_sq_cd *cd); 34 24 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); 25 + enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); 35 26 #endif /* _ICE_COMMON_H_ */
+3
drivers/net/ethernet/intel/ice/ice_controlq.h
··· 6 6 7 7 #include "ice_adminq_cmd.h" 8 8 9 + /* Maximum buffer lengths for all control queue types */ 10 + #define ICE_AQ_MAX_BUF_LEN 4096 11 + 9 12 #define ICE_CTL_Q_DESC(R, i) \ 10 13 (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) 11 14
+30
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 28 28 #define PF_FW_ATQLEN_ATQENABLE_S 31 29 29 #define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) 30 30 #define PF_FW_ATQT 0x00080400 31 + #define GLGEN_RSTAT 0x000B8188 32 + #define GLGEN_RSTAT_DEVSTATE_S 0 33 + #define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S) 34 + #define GLGEN_RSTCTL 0x000B8180 35 + #define GLGEN_RSTCTL_GRSTDEL_S 0 36 + #define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) 37 + #define GLGEN_RTRIG 0x000B8190 38 + #define GLGEN_RTRIG_CORER_S 0 39 + #define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S) 40 + #define GLGEN_RTRIG_GLOBR_S 1 41 + #define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S) 42 + #define GLGEN_STAT 0x000B612C 43 + #define PFGEN_CTRL 0x00091000 44 + #define PFGEN_CTRL_PFSWR_S 0 45 + #define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S) 46 + #define GLLAN_RCTL_0 0x002941F8 47 + #define GLNVM_FLA 0x000B6108 48 + #define GLNVM_FLA_LOCKED_S 6 49 + #define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S) 50 + #define GLNVM_GENS 0x000B6100 51 + #define GLNVM_GENS_SR_SIZE_S 5 52 + #define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S) 53 + #define GLNVM_ULD 0x000B6008 54 + #define GLNVM_ULD_CORER_DONE_S 3 55 + #define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S) 56 + #define GLNVM_ULD_GLOBR_DONE_S 4 57 + #define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S) 58 + #define PF_FUNC_RID 0x0009E880 59 + #define PF_FUNC_RID_FUNC_NUM_S 0 60 + #define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S) 31 61 32 62 #endif /* _ICE_HW_AUTOGEN_H_ */
+31
drivers/net/ethernet/intel/ice/ice_main.c
··· 27 27 #endif /* !CONFIG_DYNAMIC_DEBUG */ 28 28 29 29 /** 30 + * ice_set_ctrlq_len - helper function to set controlq length 31 + * @hw: pointer to the hw instance 32 + */ 33 + static void ice_set_ctrlq_len(struct ice_hw *hw) 34 + { 35 + hw->adminq.num_rq_entries = ICE_AQ_LEN; 36 + hw->adminq.num_sq_entries = ICE_AQ_LEN; 37 + hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 38 + hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 39 + } 40 + 41 + /** 30 42 * ice_probe - Device initialization routine 31 43 * @pdev: PCI device information struct 32 44 * @ent: entry in ice_pci_tbl ··· 93 81 hw->subsystem_device_id = pdev->subsystem_device; 94 82 hw->bus.device = PCI_SLOT(pdev->devfn); 95 83 hw->bus.func = PCI_FUNC(pdev->devfn); 84 + ice_set_ctrlq_len(hw); 85 + 96 86 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 97 87 98 88 #ifndef CONFIG_DYNAMIC_DEBUG ··· 102 88 hw->debug_mask = debug; 103 89 #endif 104 90 91 + err = ice_init_hw(hw); 92 + if (err) { 93 + dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); 94 + err = -EIO; 95 + goto err_exit_unroll; 96 + } 97 + 98 + dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", 99 + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 100 + hw->api_maj_ver, hw->api_min_ver); 101 + 105 102 return 0; 103 + 104 + err_exit_unroll: 105 + pci_disable_pcie_error_reporting(pdev); 106 + return err; 106 107 } 107 108 108 109 /** ··· 132 103 return; 133 104 134 105 set_bit(__ICE_DOWN, pf->state); 106 + 107 + ice_deinit_hw(&pf->hw); 135 108 pci_disable_pcie_error_reporting(pdev); 136 109 } 137 110
+236
drivers/net/ethernet/intel/ice/ice_nvm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018, Intel Corporation. */ 3 + 4 + #include "ice_common.h" 5 + 6 + /** 7 + * ice_aq_read_nvm 8 + * @hw: pointer to the hw struct 9 + * @module_typeid: module pointer location in words from the NVM beginning 10 + * @offset: byte offset from the module beginning 11 + * @length: length of the section to be read (in bytes from the offset) 12 + * @data: command buffer (size [bytes] = length) 13 + * @last_command: tells if this is the last command in a series 14 + * @cd: pointer to command details structure or NULL 15 + * 16 + * Read the NVM using the admin queue commands (0x0701) 17 + */ 18 + static enum ice_status 19 + ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length, 20 + void *data, bool last_command, struct ice_sq_cd *cd) 21 + { 22 + struct ice_aq_desc desc; 23 + struct ice_aqc_nvm *cmd; 24 + 25 + cmd = &desc.params.nvm; 26 + 27 + /* In offset the highest byte must be zeroed. */ 28 + if (offset & 0xFF000000) 29 + return ICE_ERR_PARAM; 30 + 31 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); 32 + 33 + /* If this is the last command in a series, set the proper flag. */ 34 + if (last_command) 35 + cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 36 + cmd->module_typeid = module_typeid; 37 + cmd->offset = cpu_to_le32(offset); 38 + cmd->length = cpu_to_le16(length); 39 + 40 + return ice_aq_send_cmd(hw, &desc, data, length, cd); 41 + } 42 + 43 + /** 44 + * ice_check_sr_access_params - verify params for Shadow RAM R/W operations. 45 + * @hw: pointer to the HW structure 46 + * @offset: offset in words from module start 47 + * @words: number of words to access 48 + */ 49 + static enum ice_status 50 + ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) 51 + { 52 + if ((offset + words) > hw->nvm.sr_words) { 53 + ice_debug(hw, ICE_DBG_NVM, 54 + "NVM error: offset beyond SR lmt.\n"); 55 + return ICE_ERR_PARAM; 56 + } 57 + 58 + if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { 59 + /* We can access only up to 4KB (one sector), in one AQ write */ 60 + ice_debug(hw, ICE_DBG_NVM, 61 + "NVM error: tried to access %d words, limit is %d.\n", 62 + words, ICE_SR_SECTOR_SIZE_IN_WORDS); 63 + return ICE_ERR_PARAM; 64 + } 65 + 66 + if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != 67 + (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { 68 + /* A single access cannot spread over two sectors */ 69 + ice_debug(hw, ICE_DBG_NVM, 70 + "NVM error: cannot spread over two sectors.\n"); 71 + return ICE_ERR_PARAM; 72 + } 73 + 74 + return 0; 75 + } 76 + 77 + /** 78 + * ice_read_sr_aq - Read Shadow RAM. 79 + * @hw: pointer to the HW structure 80 + * @offset: offset in words from module start 81 + * @words: number of words to read 82 + * @data: buffer for words reads from Shadow RAM 83 + * @last_command: tells the AdminQ that this is the last command 84 + * 85 + * Reads 16-bit word buffers from the Shadow RAM using the admin command. 86 + */ 87 + static enum ice_status 88 + ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data, 89 + bool last_command) 90 + { 91 + enum ice_status status; 92 + 93 + status = ice_check_sr_access_params(hw, offset, words); 94 + 95 + /* values in "offset" and "words" parameters are sized as words 96 + * (16 bits) but ice_aq_read_nvm expects these values in bytes. 97 + * So do this conversion while calling ice_aq_read_nvm. 98 + */ 99 + if (!status) 100 + status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data, 101 + last_command, NULL); 102 + 103 + return status; 104 + } 105 + 106 + /** 107 + * ice_read_sr_word_aq - Reads Shadow RAM via AQ 108 + * @hw: pointer to the HW structure 109 + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 110 + * @data: word read from the Shadow RAM 111 + * 112 + * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method. 113 + */ 114 + static enum ice_status 115 + ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) 116 + { 117 + enum ice_status status; 118 + 119 + status = ice_read_sr_aq(hw, offset, 1, data, true); 120 + if (!status) 121 + *data = le16_to_cpu(*(__le16 *)data); 122 + 123 + return status; 124 + } 125 + 126 + /** 127 + * ice_acquire_nvm - Generic request for acquiring the NVM ownership 128 + * @hw: pointer to the HW structure 129 + * @access: NVM access type (read or write) 130 + * 131 + * This function will request NVM ownership. 132 + */ 133 + static enum 134 + ice_status ice_acquire_nvm(struct ice_hw *hw, 135 + enum ice_aq_res_access_type access) 136 + { 137 + if (hw->nvm.blank_nvm_mode) 138 + return 0; 139 + 140 + return ice_acquire_res(hw, ICE_NVM_RES_ID, access); 141 + } 142 + 143 + /** 144 + * ice_release_nvm - Generic request for releasing the NVM ownership 145 + * @hw: pointer to the HW structure 146 + * 147 + * This function will release NVM ownership. 148 + */ 149 + static void ice_release_nvm(struct ice_hw *hw) 150 + { 151 + if (hw->nvm.blank_nvm_mode) 152 + return; 153 + 154 + ice_release_res(hw, ICE_NVM_RES_ID); 155 + } 156 + 157 + /** 158 + * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary 159 + * @hw: pointer to the HW structure 160 + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 161 + * @data: word read from the Shadow RAM 162 + * 163 + * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. 164 + */ 165 + static enum ice_status 166 + ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 167 + { 168 + enum ice_status status; 169 + 170 + status = ice_acquire_nvm(hw, ICE_RES_READ); 171 + if (!status) { 172 + status = ice_read_sr_word_aq(hw, offset, data); 173 + ice_release_nvm(hw); 174 + } 175 + 176 + return status; 177 + } 178 + 179 + /** 180 + * ice_init_nvm - initializes NVM setting 181 + * @hw: pointer to the hw struct 182 + * 183 + * This function reads and populates NVM settings such as Shadow RAM size, 184 + * max_timeout, and blank_nvm_mode 185 + */ 186 + enum ice_status ice_init_nvm(struct ice_hw *hw) 187 + { 188 + struct ice_nvm_info *nvm = &hw->nvm; 189 + u16 eetrack_lo, eetrack_hi; 190 + enum ice_status status = 0; 191 + u32 fla, gens_stat; 192 + u8 sr_size; 193 + 194 + /* The SR size is stored regardless of the nvm programming mode 195 + * as the blank mode may be used in the factory line. 196 + */ 197 + gens_stat = rd32(hw, GLNVM_GENS); 198 + sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; 199 + 200 + /* Switching to words (sr_size contains power of 2) */ 201 + nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; 202 + 203 + /* Check if we are in the normal or blank NVM programming mode */ 204 + fla = rd32(hw, GLNVM_FLA); 205 + if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ 206 + nvm->blank_nvm_mode = false; 207 + } else { /* Blank programming mode */ 208 + nvm->blank_nvm_mode = true; 209 + status = ICE_ERR_NVM_BLANK_MODE; 210 + ice_debug(hw, ICE_DBG_NVM, 211 + "NVM init error: unsupported blank mode.\n"); 212 + return status; 213 + } 214 + 215 + status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver); 216 + if (status) { 217 + ice_debug(hw, ICE_DBG_INIT, 218 + "Failed to read DEV starter version.\n"); 219 + return status; 220 + } 221 + 222 + status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); 223 + if (status) { 224 + ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n"); 225 + return status; 226 + } 227 + status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); 228 + if (status) { 229 + ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n"); 230 + return status; 231 + } 232 + 233 + hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 234 + 235 + return status; 236 + }
+1
drivers/net/ethernet/intel/ice/ice_osdep.h
··· 15 15 #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) 16 16 #define rd64(a, reg) readq((a)->hw_addr + (reg)) 17 17 18 + #define ice_flush(a) rd32((a), GLGEN_STAT) 18 19 #define ICE_M(m, s) ((m) << (s)) 19 20 20 21 struct ice_dma_mem {
+5
drivers/net/ethernet/intel/ice/ice_status.h
··· 9 9 ICE_ERR_PARAM = -1, 10 10 ICE_ERR_NOT_READY = -3, 11 11 ICE_ERR_INVAL_SIZE = -6, 12 + ICE_ERR_DEVICE_NOT_SUPPORTED = -8, 13 + ICE_ERR_RESET_FAILED = -9, 12 14 ICE_ERR_FW_API_VER = -10, 13 15 ICE_ERR_NO_MEMORY = -11, 14 16 ICE_ERR_CFG = -12, 17 + ICE_ERR_OUT_OF_RANGE = -13, 18 + ICE_ERR_NVM_BLANK_MODE = -53, 15 19 ICE_ERR_AQ_ERROR = -100, 16 20 ICE_ERR_AQ_TIMEOUT = -101, 17 21 ICE_ERR_AQ_FULL = -102, 22 + ICE_ERR_AQ_NO_WORK = -103, 18 23 ICE_ERR_AQ_EMPTY = -104, 19 24 }; 20 25
+49
drivers/net/ethernet/intel/ice/ice_type.h
··· 10 10 #include "ice_controlq.h" 11 11 12 12 /* debug masks - set these bits in hw->debug_mask to control output */ 13 + #define ICE_DBG_INIT BIT_ULL(1) 14 + #define ICE_DBG_NVM BIT_ULL(7) 15 + #define ICE_DBG_RES BIT_ULL(17) 13 16 #define ICE_DBG_AQ_MSG BIT_ULL(24) 14 17 #define ICE_DBG_AQ_CMD BIT_ULL(27) 18 + 19 + enum ice_aq_res_ids { 20 + ICE_NVM_RES_ID = 1, 21 + ICE_SPD_RES_ID, 22 + ICE_GLOBAL_CFG_LOCK_RES_ID, 23 + ICE_CHANGE_LOCK_RES_ID 24 + }; 25 + 26 + enum ice_aq_res_access_type { 27 + ICE_RES_READ = 1, 28 + ICE_RES_WRITE 29 + }; 30 + 31 + /* Various MAC types */ 32 + enum ice_mac_type { 33 + ICE_MAC_UNKNOWN = 0, 34 + ICE_MAC_GENERIC, 35 + }; 36 + 37 + /* Various RESET request, These are not tied with HW reset types */ 38 + enum ice_reset_req { 39 + ICE_RESET_PFR = 0, 40 + ICE_RESET_CORER = 1, 41 + ICE_RESET_GLOBR = 2, 42 + }; 15 43 16 44 /* Bus parameters */ 17 45 struct ice_bus_info { ··· 47 19 u8 func; 48 20 }; 49 21 22 + /* NVM Information */ 23 + struct ice_nvm_info { 24 + u32 eetrack; /* NVM data version */ 25 + u32 oem_ver; /* OEM version info */ 26 + u16 sr_words; /* Shadow RAM size in words */ 27 + u16 ver; /* NVM package version */ 28 + bool blank_nvm_mode; /* is NVM empty (no FW present) */ 29 + }; 30 + 50 31 /* Port hardware description */ 51 32 struct ice_hw { 52 33 u8 __iomem *hw_addr; 53 34 void *back; 54 35 u64 debug_mask; /* bitmap for debug mask */ 36 + enum ice_mac_type mac_type; 55 37 56 38 /* pci info */ 57 39 u16 device_id; ··· 70 32 u16 subsystem_vendor_id; 71 33 u8 revision_id; 72 34 35 + u8 pf_id; /* device profile info */ 36 + 73 37 struct ice_bus_info bus; 38 + struct ice_nvm_info nvm; 39 + 74 40 /* Control Queue info */ 75 41 struct ice_ctl_q_info adminq; 76 42 ··· 88 46 u8 fw_patch; /* firmware patch version */ 89 47 u32 fw_build; /* firmware build number */ 90 48 }; 49 + 50 + /* Checksum and Shadow RAM pointers */ 51 + #define ICE_SR_NVM_DEV_STARTER_VER 0x18 52 + #define ICE_SR_NVM_EETRACK_LO 0x2D 53 + #define ICE_SR_NVM_EETRACK_HI 0x2E 54 + #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 55 + #define ICE_SR_WORDS_IN_1KB 512 91 56 92 57 #endif /* _ICE_TYPE_H_ */