Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

i40e/i40evf: Add nvmupdate support

This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.

This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.

The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.

Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

authored by

Shannon Nelson and committed by
Jeff Kirsher
cd552cb4 efe1ac25

+866 -16
+13 -8
drivers/net/ethernet/intel/i40e/i40e_adminq.c
··· 38 38 **/ 39 39 static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) 40 40 { 41 - return (desc->opcode == i40e_aqc_opc_nvm_erase) || 42 - (desc->opcode == i40e_aqc_opc_nvm_update); 41 + return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) || 42 + (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update)); 43 43 } 44 44 45 45 /** ··· 889 889 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 890 890 } 891 891 892 - if (i40e_is_nvm_update_op(desc)) 893 - hw->aq.nvm_busy = true; 894 - 895 892 if (le16_to_cpu(desc->datalen) == buff_size) { 896 893 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 897 894 "AQTX: desc and buffer writeback:\n"); ··· 903 906 "AQTX: Writeback timeout.\n"); 904 907 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 905 908 } 909 + 910 + if (!status && i40e_is_nvm_update_op(desc)) 911 + hw->aq.nvm_busy = true; 906 912 907 913 asq_send_command_error: 908 914 mutex_unlock(&hw->aq.asq_mutex); ··· 988 988 e->msg_size); 989 989 } 990 990 991 - if (i40e_is_nvm_update_op(&e->desc)) 992 - hw->aq.nvm_busy = false; 993 - 994 991 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 995 992 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf); 996 993 ··· 1019 1022 if (pending != NULL) 1020 1023 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1021 1024 mutex_unlock(&hw->aq.arq_mutex); 1025 + 1026 + if (i40e_is_nvm_update_op(&e->desc)) { 1027 + hw->aq.nvm_busy = false; 1028 + if (hw->aq.nvm_release_on_done) { 1029 + i40e_release_nvm(hw); 1030 + hw->aq.nvm_release_on_done = false; 1031 + } 1032 + } 1022 1033 1023 1034 return ret_code; 1024 1035 }
+36
drivers/net/ethernet/intel/i40e/i40e_adminq.h
··· 94 94 u16 api_maj_ver; /* api major version */ 95 95 u16 api_min_ver; /* api minor version */ 96 96 bool nvm_busy; 97 + bool nvm_release_on_done; 97 98 98 99 struct mutex asq_mutex; /* Send queue lock */ 99 100 struct mutex arq_mutex; /* Receive queue lock */ ··· 103 102 enum i40e_admin_queue_err asq_last_status; 104 103 enum i40e_admin_queue_err arq_last_status; 105 104 }; 105 + 106 + /** 107 + * i40e_aq_rc_to_posix - convert errors to user-land codes 108 + * aq_rc: AdminQ error code to convert 109 + **/ 110 + static inline int i40e_aq_rc_to_posix(u16 aq_rc) 111 + { 112 + int aq_to_posix[] = { 113 + 0, /* I40E_AQ_RC_OK */ 114 + -EPERM, /* I40E_AQ_RC_EPERM */ 115 + -ENOENT, /* I40E_AQ_RC_ENOENT */ 116 + -ESRCH, /* I40E_AQ_RC_ESRCH */ 117 + -EINTR, /* I40E_AQ_RC_EINTR */ 118 + -EIO, /* I40E_AQ_RC_EIO */ 119 + -ENXIO, /* I40E_AQ_RC_ENXIO */ 120 + -E2BIG, /* I40E_AQ_RC_E2BIG */ 121 + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ 122 + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ 123 + -EACCES, /* I40E_AQ_RC_EACCES */ 124 + -EFAULT, /* I40E_AQ_RC_EFAULT */ 125 + -EBUSY, /* I40E_AQ_RC_EBUSY */ 126 + -EEXIST, /* I40E_AQ_RC_EEXIST */ 127 + -EINVAL, /* I40E_AQ_RC_EINVAL */ 128 + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ 129 + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ 130 + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ 131 + -ERANGE, /* I40E_AQ_RC_ERANGE */ 132 + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ 133 + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ 134 + -EROFS, /* I40E_AQ_RC_EMODE */ 135 + -EFBIG, /* I40E_AQ_RC_EFBIG */ 136 + }; 137 + 138 + return aq_to_posix[aq_rc]; 139 + } 106 140 107 141 /* general information */ 108 142 #define I40E_AQ_LARGE_BUF 512
+88
drivers/net/ethernet/intel/i40e/i40e_common.c
··· 2121 2121 return status; 2122 2122 } 2123 2123 2124 + /** 2125 + * i40e_aq_erase_nvm 2126 + * @hw: pointer to the hw struct 2127 + * @module_pointer: module pointer location in words from the NVM beginning 2128 + * @offset: offset in the module (expressed in 4 KB from module's beginning) 2129 + * @length: length of the section to be erased (expressed in 4 KB) 2130 + * @last_command: tells if this is the last command in a series 2131 + * @cmd_details: pointer to command details structure or NULL 2132 + * 2133 + * Erase the NVM sector using the admin queue commands 2134 + **/ 2135 + i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 2136 + u32 offset, u16 length, bool last_command, 2137 + struct i40e_asq_cmd_details *cmd_details) 2138 + { 2139 + struct i40e_aq_desc desc; 2140 + struct i40e_aqc_nvm_update *cmd = 2141 + (struct i40e_aqc_nvm_update *)&desc.params.raw; 2142 + i40e_status status; 2143 + 2144 + /* In offset the highest byte must be zeroed. */ 2145 + if (offset & 0xFF000000) { 2146 + status = I40E_ERR_PARAM; 2147 + goto i40e_aq_erase_nvm_exit; 2148 + } 2149 + 2150 + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 2151 + 2152 + /* If this is the last command in a series, set the proper flag. */ 2153 + if (last_command) 2154 + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 2155 + cmd->module_pointer = module_pointer; 2156 + cmd->offset = cpu_to_le32(offset); 2157 + cmd->length = cpu_to_le16(length); 2158 + 2159 + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2160 + 2161 + i40e_aq_erase_nvm_exit: 2162 + return status; 2163 + } 2164 + 2124 2165 #define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01 2125 2166 #define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02 2126 2167 #define I40E_DEV_FUNC_CAP_NPAR 0x03 ··· 2388 2347 list_type_opc); 2389 2348 2390 2349 exit: 2350 + return status; 2351 + } 2352 + 2353 + /** 2354 + * i40e_aq_update_nvm 2355 + * @hw: pointer to the hw struct 2356 + * @module_pointer: module pointer location in words from the NVM beginning 2357 + * @offset: byte offset from the module beginning 2358 + * @length: length of the section to be written (in bytes from the offset) 2359 + * @data: command buffer (size [bytes] = length) 2360 + * @last_command: tells if this is the last command in a series 2361 + * @cmd_details: pointer to command details structure or NULL 2362 + * 2363 + * Update the NVM using the admin queue commands 2364 + **/ 2365 + i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 2366 + u32 offset, u16 length, void *data, 2367 + bool last_command, 2368 + struct i40e_asq_cmd_details *cmd_details) 2369 + { 2370 + struct i40e_aq_desc desc; 2371 + struct i40e_aqc_nvm_update *cmd = 2372 + (struct i40e_aqc_nvm_update *)&desc.params.raw; 2373 + i40e_status status; 2374 + 2375 + /* In offset the highest byte must be zeroed. */ 2376 + if (offset & 0xFF000000) { 2377 + status = I40E_ERR_PARAM; 2378 + goto i40e_aq_update_nvm_exit; 2379 + } 2380 + 2381 + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 2382 + 2383 + /* If this is the last command in a series, set the proper flag. */ 2384 + if (last_command) 2385 + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 2386 + cmd->module_pointer = module_pointer; 2387 + cmd->offset = cpu_to_le32(offset); 2388 + cmd->length = cpu_to_le16(length); 2389 + 2390 + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2391 + if (length > I40E_AQ_LARGE_BUF) 2392 + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2393 + 2394 + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 2395 + 2396 + i40e_aq_update_nvm_exit: 2391 2397 return status; 2392 2398 } 2393 2399
+59 -2
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 759 759 u8 *eeprom_buff; 760 760 u16 i, sectors; 761 761 bool last; 762 + u32 magic; 763 + 762 764 #define I40E_NVM_SECTOR_SIZE 4096 763 765 if (eeprom->len == 0) 764 766 return -EINVAL; 765 767 768 + /* check for NVMUpdate access method */ 769 + magic = hw->vendor_id | (hw->device_id << 16); 770 + if (eeprom->magic && eeprom->magic != magic) { 771 + int errno; 772 + 773 + /* make sure it is the right magic for NVMUpdate */ 774 + if ((eeprom->magic >> 16) != hw->device_id) 775 + return -EINVAL; 776 + 777 + ret_val = i40e_nvmupd_command(hw, 778 + (struct i40e_nvm_access *)eeprom, 779 + bytes, &errno); 780 + if (ret_val) 781 + dev_info(&pf->pdev->dev, 782 + "NVMUpdate read failed err=%d status=0x%x\n", 783 + ret_val, hw->aq.asq_last_status); 784 + 785 + return errno; 786 + } 787 + 788 + /* normal ethtool get_eeprom support */ 766 789 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 767 790 768 791 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); ··· 812 789 ret_val = i40e_aq_read_nvm(hw, 0x0, 813 790 eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), 814 791 len, 815 - eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), 792 + (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), 816 793 last, NULL); 817 794 if (ret_val) { 818 795 dev_info(&pf->pdev->dev, ··· 824 801 825 802 release_nvm: 826 803 i40e_release_nvm(hw); 827 - memcpy(bytes, eeprom_buff, eeprom->len); 804 + memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); 828 805 free_buff: 829 806 kfree(eeprom_buff); 830 807 return ret_val; ··· 842 819 /* register returns value in power of 2, 64Kbyte chunks. */ 843 820 val = (64 * 1024) * (1 << val); 844 821 return val; 822 + } 823 + 824 + static int i40e_set_eeprom(struct net_device *netdev, 825 + struct ethtool_eeprom *eeprom, u8 *bytes) 826 + { 827 + struct i40e_netdev_priv *np = netdev_priv(netdev); 828 + struct i40e_hw *hw = &np->vsi->back->hw; 829 + struct i40e_pf *pf = np->vsi->back; 830 + int ret_val = 0; 831 + int errno; 832 + u32 magic; 833 + 834 + /* normal ethtool set_eeprom is not supported */ 835 + magic = hw->vendor_id | (hw->device_id << 16); 836 + if (eeprom->magic == magic) 837 + return -EOPNOTSUPP; 838 + 839 + /* check for NVMUpdate access method */ 840 + if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) 841 + return -EINVAL; 842 + 843 + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || 844 + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) 845 + return -EBUSY; 846 + 847 + ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom, 848 + bytes, &errno); 849 + if (ret_val) 850 + dev_info(&pf->pdev->dev, 851 + "NVMUpdate write failed err=%d status=0x%x\n", 852 + ret_val, hw->aq.asq_last_status); 853 + 854 + return errno; 845 855 } 846 856 847 857 static void i40e_get_drvinfo(struct net_device *netdev, ··· 2150 2094 .get_link = ethtool_op_get_link, 2151 2095 .get_wol = i40e_get_wol, 2152 2096 .set_wol = i40e_set_wol, 2097 + .set_eeprom = i40e_set_eeprom, 2153 2098 .get_eeprom_len = i40e_get_eeprom_len, 2154 2099 .get_eeprom = i40e_get_eeprom, 2155 2100 .get_ringparam = i40e_get_ringparam,
+511
drivers/net/ethernet/intel/i40e/i40e_nvm.c
··· 241 241 } 242 242 243 243 /** 244 + * i40e_write_nvm_aq - Writes Shadow RAM. 245 + * @hw: pointer to the HW structure. 246 + * @module_pointer: module pointer location in words from the NVM beginning 247 + * @offset: offset in words from module start 248 + * @words: number of words to write 249 + * @data: buffer with words to write to the Shadow RAM 250 + * @last_command: tells the AdminQ that this is the last command 251 + * 252 + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 253 + **/ 254 + i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 255 + u32 offset, u16 words, void *data, 256 + bool last_command) 257 + { 258 + i40e_status ret_code = I40E_ERR_NVM; 259 + 260 + /* Here we are checking the SR limit only for the flat memory model. 261 + * We cannot do it for the module-based model, as we did not acquire 262 + * the NVM resource yet (we cannot get the module pointer value). 263 + * Firmware will check the module-based model. 264 + */ 265 + if ((offset + words) > hw->nvm.sr_size) 266 + hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n"); 267 + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 268 + /* We can write only up to 4KB (one sector), in one AQ write */ 269 + hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n"); 270 + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 271 + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 272 + /* A single write cannot spread over two sectors */ 273 + hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n"); 274 + else 275 + ret_code = i40e_aq_update_nvm(hw, module_pointer, 276 + 2 * offset, /*bytes*/ 277 + 2 * words, /*bytes*/ 278 + data, last_command, NULL); 279 + 280 + return ret_code; 281 + } 282 + 283 + /** 244 284 * i40e_calc_nvm_checksum - Calculates and returns the checksum 245 285 * @hw: pointer to hardware structure 246 286 * @checksum: pointer to the checksum ··· 350 310 } 351 311 352 312 /** 313 + * i40e_update_nvm_checksum - Updates the NVM checksum 314 + * @hw: pointer to hardware structure 315 + * 316 + * NVM ownership must be acquired before calling this function and released 317 + * on ARQ completion event reception by caller. 318 + * This function will commit SR to NVM. 319 + **/ 320 + i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) 321 + { 322 + i40e_status ret_code = 0; 323 + u16 checksum; 324 + 325 + ret_code = i40e_calc_nvm_checksum(hw, &checksum); 326 + if (!ret_code) 327 + ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 328 + 1, &checksum, true); 329 + 330 + return ret_code; 331 + } 332 + 333 + /** 353 334 * i40e_validate_nvm_checksum - Validate EEPROM checksum 354 335 * @hw: pointer to hardware structure 355 336 * @checksum: calculated checksum ··· 406 345 407 346 i40e_validate_nvm_checksum_exit: 408 347 return ret_code; 348 + } 349 + 350 + static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, 351 + struct i40e_nvm_access *cmd, 352 + u8 *bytes, int *errno); 353 + static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, 354 + struct i40e_nvm_access *cmd, 355 + u8 *bytes, int *errno); 356 + static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, 357 + struct i40e_nvm_access *cmd, 358 + u8 *bytes, int *errno); 359 + static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 360 + struct i40e_nvm_access *cmd, 361 + int *errno); 362 + static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 363 + struct i40e_nvm_access *cmd, 364 + int *errno); 365 + static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, 366 + struct i40e_nvm_access *cmd, 367 + u8 *bytes, int *errno); 368 + static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, 369 + struct i40e_nvm_access *cmd, 370 + u8 *bytes, int *errno); 371 + static inline u8 i40e_nvmupd_get_module(u32 val) 372 + { 373 + return (u8)(val & I40E_NVM_MOD_PNT_MASK); 374 + } 375 + static inline u8 i40e_nvmupd_get_transaction(u32 val) 376 + { 377 + return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); 378 + } 379 + 380 + /** 381 + * i40e_nvmupd_command - Process an NVM update command 382 + * @hw: pointer to hardware structure 383 + * @cmd: pointer to nvm update command 384 + * @bytes: pointer to the data buffer 385 + * @errno: pointer to return error code 386 + * 387 + * Dispatches command depending on what update state is current 388 + **/ 389 + i40e_status i40e_nvmupd_command(struct i40e_hw *hw, 390 + struct i40e_nvm_access *cmd, 391 + u8 *bytes, int *errno) 392 + { 393 + i40e_status status; 394 + 395 + /* assume success */ 396 + *errno = 0; 397 + 398 + switch (hw->nvmupd_state) { 399 + case I40E_NVMUPD_STATE_INIT: 400 + status = i40e_nvmupd_state_init(hw, cmd, bytes, errno); 401 + break; 402 + 403 + case I40E_NVMUPD_STATE_READING: 404 + status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno); 405 + break; 406 + 407 + case I40E_NVMUPD_STATE_WRITING: 408 + status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno); 409 + break; 410 + 411 + default: 412 + /* invalid state, should never happen */ 413 + status = I40E_NOT_SUPPORTED; 414 + *errno = -ESRCH; 415 + break; 416 + } 417 + return status; 418 + } 419 + 420 + /** 421 + * i40e_nvmupd_state_init - Handle NVM update state Init 422 + * @hw: pointer to hardware structure 423 + * @cmd: pointer to nvm update command buffer 424 + * @bytes: pointer to the data buffer 425 + * @errno: pointer to return error code 426 + * 427 + * Process legitimate commands of the Init state and conditionally set next 428 + * state. Reject all other commands. 429 + **/ 430 + static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, 431 + struct i40e_nvm_access *cmd, 432 + u8 *bytes, int *errno) 433 + { 434 + i40e_status status = 0; 435 + enum i40e_nvmupd_cmd upd_cmd; 436 + 437 + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 438 + 439 + switch (upd_cmd) { 440 + case I40E_NVMUPD_READ_SA: 441 + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 442 + if (status) { 443 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 444 + } else { 445 + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); 446 + i40e_release_nvm(hw); 447 + } 448 + break; 449 + 450 + case I40E_NVMUPD_READ_SNT: 451 + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 452 + if (status) { 453 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 454 + } else { 455 + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); 456 + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; 457 + } 458 + break; 459 + 460 + case I40E_NVMUPD_WRITE_ERA: 461 + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 462 + if (status) { 463 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 464 + } else { 465 + status = i40e_nvmupd_nvm_erase(hw, cmd, errno); 466 + if (status) 467 + i40e_release_nvm(hw); 468 + else 469 + hw->aq.nvm_release_on_done = true; 470 + } 471 + break; 472 + 473 + case I40E_NVMUPD_WRITE_SA: 474 + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 475 + if (status) { 476 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 477 + } else { 478 + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 479 + if (status) 480 + i40e_release_nvm(hw); 481 + else 482 + hw->aq.nvm_release_on_done = true; 483 + } 484 + break; 485 + 486 + case I40E_NVMUPD_WRITE_SNT: 487 + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 488 + if (status) { 489 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 490 + } else { 491 + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 492 + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 493 + } 494 + break; 495 + 496 + case I40E_NVMUPD_CSUM_SA: 497 + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 498 + if (status) { 499 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 500 + } else { 501 + status = i40e_update_nvm_checksum(hw); 502 + if (status) { 503 + *errno = hw->aq.asq_last_status ? 504 + i40e_aq_rc_to_posix(hw->aq.asq_last_status) : 505 + -EIO; 506 + i40e_release_nvm(hw); 507 + } else { 508 + hw->aq.nvm_release_on_done = true; 509 + } 510 + } 511 + break; 512 + 513 + default: 514 + status = I40E_ERR_NVM; 515 + *errno = -ESRCH; 516 + break; 517 + } 518 + return status; 519 + } 520 + 521 + /** 522 + * i40e_nvmupd_state_reading - Handle NVM update state Reading 523 + * @hw: pointer to hardware structure 524 + * @cmd: pointer to nvm update command buffer 525 + * @bytes: pointer to the data buffer 526 + * @errno: pointer to return error code 527 + * 528 + * NVM ownership is already held. Process legitimate commands and set any 529 + * change in state; reject all other commands. 530 + **/ 531 + static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, 532 + struct i40e_nvm_access *cmd, 533 + u8 *bytes, int *errno) 534 + { 535 + i40e_status status; 536 + enum i40e_nvmupd_cmd upd_cmd; 537 + 538 + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 539 + 540 + switch (upd_cmd) { 541 + case I40E_NVMUPD_READ_SA: 542 + case I40E_NVMUPD_READ_CON: 543 + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); 544 + break; 545 + 546 + case I40E_NVMUPD_READ_LCB: 547 + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); 548 + i40e_release_nvm(hw); 549 + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 550 + break; 551 + 552 + default: 553 + status = I40E_NOT_SUPPORTED; 554 + *errno = -ESRCH; 555 + break; 556 + } 557 + return status; 558 + } 559 + 560 + /** 561 + * i40e_nvmupd_state_writing - Handle NVM update state Writing 562 + * @hw: pointer to hardware structure 563 + * @cmd: pointer to nvm update command buffer 564 + * @bytes: pointer to the data buffer 565 + * @errno: pointer to return error code 566 + * 567 + * NVM ownership is already held. Process legitimate commands and set any 568 + * change in state; reject all other commands 569 + **/ 570 + static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, 571 + struct i40e_nvm_access *cmd, 572 + u8 *bytes, int *errno) 573 + { 574 + i40e_status status; 575 + enum i40e_nvmupd_cmd upd_cmd; 576 + 577 + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 578 + 579 + switch (upd_cmd) { 580 + case I40E_NVMUPD_WRITE_CON: 581 + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 582 + break; 583 + 584 + case I40E_NVMUPD_WRITE_LCB: 585 + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 586 + if (!status) { 587 + hw->aq.nvm_release_on_done = true; 588 + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 589 + } 590 + break; 591 + 592 + case I40E_NVMUPD_CSUM_CON: 593 + status = i40e_update_nvm_checksum(hw); 594 + if (status) 595 + *errno = hw->aq.asq_last_status ? 596 + i40e_aq_rc_to_posix(hw->aq.asq_last_status) : 597 + -EIO; 598 + break; 599 + 600 + case I40E_NVMUPD_CSUM_LCB: 601 + status = i40e_update_nvm_checksum(hw); 602 + if (status) { 603 + *errno = hw->aq.asq_last_status ? 604 + i40e_aq_rc_to_posix(hw->aq.asq_last_status) : 605 + -EIO; 606 + } else { 607 + hw->aq.nvm_release_on_done = true; 608 + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 609 + } 610 + break; 611 + 612 + default: 613 + status = I40E_NOT_SUPPORTED; 614 + *errno = -ESRCH; 615 + break; 616 + } 617 + return status; 618 + } 619 + 620 + /** 621 + * i40e_nvmupd_validate_command - Validate given command 622 + * @hw: pointer to hardware structure 623 + * @cmd: pointer to nvm update command buffer 624 + * @errno: pointer to return error code 625 + * 626 + * Return one of the valid command types or I40E_NVMUPD_INVALID 627 + **/ 628 + static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, 629 + struct i40e_nvm_access *cmd, 630 + int *errno) 631 + { 632 + enum i40e_nvmupd_cmd upd_cmd; 633 + u8 transaction, module; 634 + 635 + /* anything that doesn't match a recognized case is an error */ 636 + upd_cmd = I40E_NVMUPD_INVALID; 637 + 638 + transaction = i40e_nvmupd_get_transaction(cmd->config); 639 + module = i40e_nvmupd_get_module(cmd->config); 640 + 641 + /* limits on data size */ 642 + if ((cmd->data_size < 1) || 643 + (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { 644 + hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n", 645 + cmd->data_size); 646 + *errno = -EFAULT; 647 + return I40E_NVMUPD_INVALID; 648 + } 649 + 650 + switch (cmd->command) { 651 + case I40E_NVM_READ: 652 + switch (transaction) { 653 + case I40E_NVM_CON: 654 + upd_cmd = I40E_NVMUPD_READ_CON; 655 + break; 656 + case I40E_NVM_SNT: 657 + upd_cmd = I40E_NVMUPD_READ_SNT; 658 + break; 659 + case I40E_NVM_LCB: 660 + upd_cmd = I40E_NVMUPD_READ_LCB; 661 + break; 662 + case I40E_NVM_SA: 663 + upd_cmd = I40E_NVMUPD_READ_SA; 664 + break; 665 + } 666 + break; 667 + 668 + case I40E_NVM_WRITE: 669 + switch (transaction) { 670 + case I40E_NVM_CON: 671 + upd_cmd = I40E_NVMUPD_WRITE_CON; 672 + break; 673 + case I40E_NVM_SNT: 674 + upd_cmd = I40E_NVMUPD_WRITE_SNT; 675 + break; 676 + case I40E_NVM_LCB: 677 + upd_cmd = I40E_NVMUPD_WRITE_LCB; 678 + break; 679 + case I40E_NVM_SA: 680 + upd_cmd = I40E_NVMUPD_WRITE_SA; 681 + break; 682 + case I40E_NVM_ERA: 683 + upd_cmd = I40E_NVMUPD_WRITE_ERA; 684 + break; 685 + case I40E_NVM_CSUM: 686 + upd_cmd = I40E_NVMUPD_CSUM_CON; 687 + break; 688 + case (I40E_NVM_CSUM|I40E_NVM_SA): 689 + upd_cmd = I40E_NVMUPD_CSUM_SA; 690 + break; 691 + case (I40E_NVM_CSUM|I40E_NVM_LCB): 692 + upd_cmd = I40E_NVMUPD_CSUM_LCB; 693 + break; 694 + } 695 + break; 696 + } 697 + 698 + if (upd_cmd == I40E_NVMUPD_INVALID) { 699 + *errno = -EFAULT; 700 + hw_dbg(hw, 701 + "i40e_nvmupd_validate_command returns %d errno: %d\n", 702 + upd_cmd, *errno); 703 + } 704 + return upd_cmd; 705 + } 706 + 707 + /** 708 + * i40e_nvmupd_nvm_read - Read NVM 709 + * @hw: pointer to hardware structure 710 + * @cmd: pointer to nvm update command buffer 711 + * @bytes: pointer to the data buffer 712 + * @errno: pointer to return error code 713 + * 714 + * cmd structure contains identifiers and data buffer 715 + **/ 716 + static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, 717 + struct i40e_nvm_access *cmd, 718 + u8 *bytes, int *errno) 719 + { 720 + i40e_status status; 721 + u8 module, transaction; 722 + bool last; 723 + 724 + transaction = i40e_nvmupd_get_transaction(cmd->config); 725 + module = i40e_nvmupd_get_module(cmd->config); 726 + last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); 727 + hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", 728 + module, cmd->offset, cmd->data_size); 729 + 730 + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 731 + bytes, last, NULL); 732 + hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status); 733 + if (status) 734 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 735 + 736 + return status; 737 + } 738 + 739 + /** 740 + * i40e_nvmupd_nvm_erase - Erase an NVM module 741 + * @hw: pointer to hardware structure 742 + * @cmd: pointer to nvm update command buffer 743 + * @errno: pointer to return error code 744 + * 745 + * module, offset, data_size and data are in cmd structure 746 + **/ 747 + static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 748 + struct i40e_nvm_access *cmd, 749 + int *errno) 750 + { 751 + i40e_status status = 0; 752 + u8 module, transaction; 753 + bool last; 754 + 755 + transaction = i40e_nvmupd_get_transaction(cmd->config); 756 + module = i40e_nvmupd_get_module(cmd->config); 757 + last = (transaction & I40E_NVM_LCB); 758 + hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", 759 + module, cmd->offset, cmd->data_size); 760 + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 761 + last, NULL); 762 + hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status); 763 + if (status) 764 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 765 + 766 + return status; 767 + } 768 + 769 + /** 770 + * i40e_nvmupd_nvm_write - Write NVM 771 + * @hw: pointer to hardware structure 772 + * @cmd: pointer to nvm update command buffer 773 + * @bytes: pointer to the data buffer 774 + * @errno: pointer to return error code 775 + * 776 + * module, offset, data_size and data are in cmd structure 777 + **/ 778 + static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, 779 + struct i40e_nvm_access *cmd, 780 + u8 *bytes, int *errno) 781 + { 782 + i40e_status status = 0; 783 + u8 module, transaction; 784 + bool last; 785 + 786 + transaction = i40e_nvmupd_get_transaction(cmd->config); 787 + module = i40e_nvmupd_get_module(cmd->config); 788 + last = (transaction & I40E_NVM_LCB); 789 + hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", 790 + module, cmd->offset, cmd->data_size); 791 + status = i40e_aq_update_nvm(hw, module, cmd->offset, 792 + (u16)cmd->data_size, bytes, last, NULL); 793 + hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status); 794 + if (status) 795 + *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); 796 + 797 + return status; 409 798 }
+7
drivers/net/ethernet/intel/i40e/i40e_prototype.h
··· 150 150 u32 offset, u16 length, void *data, 151 151 bool last_command, 152 152 struct i40e_asq_cmd_details *cmd_details); 153 + i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 154 + u32 offset, u16 length, bool last_command, 155 + struct i40e_asq_cmd_details *cmd_details); 153 156 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 154 157 void *buff, u16 buff_size, u16 *data_size, 155 158 enum i40e_admin_queue_opc list_type_opc, ··· 248 245 u16 *data); 249 246 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 250 247 u16 *words, u16 *data); 248 + i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); 251 249 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, 252 250 u16 *checksum); 251 + i40e_status i40e_nvmupd_command(struct i40e_hw *hw, 252 + struct i40e_nvm_access *cmd, 253 + u8 *bytes, int *); 253 254 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); 254 255 255 256 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+58
drivers/net/ethernet/intel/i40e/i40e_type.h
··· 269 269 u32 eetrack; /* NVM data version */ 270 270 }; 271 271 272 + /* definitions used in NVM update support */ 273 + 274 + enum i40e_nvmupd_cmd { 275 + I40E_NVMUPD_INVALID, 276 + I40E_NVMUPD_READ_CON, 277 + I40E_NVMUPD_READ_SNT, 278 + I40E_NVMUPD_READ_LCB, 279 + I40E_NVMUPD_READ_SA, 280 + I40E_NVMUPD_WRITE_ERA, 281 + I40E_NVMUPD_WRITE_CON, 282 + I40E_NVMUPD_WRITE_SNT, 283 + I40E_NVMUPD_WRITE_LCB, 284 + I40E_NVMUPD_WRITE_SA, 285 + I40E_NVMUPD_CSUM_CON, 286 + I40E_NVMUPD_CSUM_SA, 287 + I40E_NVMUPD_CSUM_LCB, 288 + }; 289 + 290 + enum i40e_nvmupd_state { 291 + I40E_NVMUPD_STATE_INIT, 292 + I40E_NVMUPD_STATE_READING, 293 + I40E_NVMUPD_STATE_WRITING 294 + }; 295 + 296 + /* nvm_access definition and its masks/shifts need to be accessible to 297 + * application, core driver, and shared code. Where is the right file? 298 + */ 299 + #define I40E_NVM_READ 0xB 300 + #define I40E_NVM_WRITE 0xC 301 + 302 + #define I40E_NVM_MOD_PNT_MASK 0xFF 303 + 304 + #define I40E_NVM_TRANS_SHIFT 8 305 + #define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) 306 + #define I40E_NVM_CON 0x0 307 + #define I40E_NVM_SNT 0x1 308 + #define I40E_NVM_LCB 0x2 309 + #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) 310 + #define I40E_NVM_ERA 0x4 311 + #define I40E_NVM_CSUM 0x8 312 + 313 + #define I40E_NVM_ADAPT_SHIFT 16 314 + #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) 315 + 316 + #define I40E_NVMUPD_MAX_DATA 4096 317 + #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ 318 + 319 + struct i40e_nvm_access { 320 + u32 command; 321 + u32 config; 322 + u32 offset; /* in bytes */ 323 + u32 data_size; /* in bytes */ 324 + u8 data[1]; 325 + }; 326 + 272 327 /* PCI bus types */ 273 328 enum i40e_bus_type { 274 329 i40e_bus_type_unknown = 0, ··· 458 403 459 404 /* Admin Queue info */ 460 405 struct i40e_adminq_info aq; 406 + 407 + /* state of nvm update process */ 408 + enum i40e_nvmupd_state nvmupd_state; 461 409 462 410 /* HMC info */ 463 411 struct i40e_hmc_info hmc; /* HMC info struct */
-6
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
··· 708 708 goto asq_send_command_exit; 709 709 } 710 710 711 - if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) { 712 - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n"); 713 - status = I40E_ERR_NVM; 714 - goto asq_send_command_exit; 715 - } 716 - 717 711 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 718 712 if (cmd_details) { 719 713 *details = *cmd_details;
+36
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
··· 94 94 u16 api_maj_ver; /* api major version */ 95 95 u16 api_min_ver; /* api minor version */ 96 96 bool nvm_busy; 97 + bool nvm_release_on_done; 97 98 98 99 struct mutex asq_mutex; /* Send queue lock */ 99 100 struct mutex arq_mutex; /* Receive queue lock */ ··· 103 102 enum i40e_admin_queue_err asq_last_status; 104 103 enum i40e_admin_queue_err arq_last_status; 105 104 }; 105 + 106 + /** 107 + * i40e_aq_rc_to_posix - convert errors to user-land codes 108 + * aq_rc: AdminQ error code to convert 109 + **/ 110 + static inline int i40e_aq_rc_to_posix(u16 aq_rc) 111 + { 112 + int aq_to_posix[] = { 113 + 0, /* I40E_AQ_RC_OK */ 114 + -EPERM, /* I40E_AQ_RC_EPERM */ 115 + -ENOENT, /* I40E_AQ_RC_ENOENT */ 116 + -ESRCH, /* I40E_AQ_RC_ESRCH */ 117 + -EINTR, /* I40E_AQ_RC_EINTR */ 118 + -EIO, /* I40E_AQ_RC_EIO */ 119 + -ENXIO, /* I40E_AQ_RC_ENXIO */ 120 + -E2BIG, /* I40E_AQ_RC_E2BIG */ 121 + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ 122 + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ 123 + -EACCES, /* I40E_AQ_RC_EACCES */ 124 + -EFAULT, /* I40E_AQ_RC_EFAULT */ 125 + -EBUSY, /* I40E_AQ_RC_EBUSY */ 126 + -EEXIST, /* I40E_AQ_RC_EEXIST */ 127 + -EINVAL, /* I40E_AQ_RC_EINVAL */ 128 + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ 129 + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ 130 + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ 131 + -ERANGE, /* I40E_AQ_RC_ERANGE */ 132 + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ 133 + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ 134 + -EROFS, /* I40E_AQ_RC_EMODE */ 135 + -EFBIG, /* I40E_AQ_RC_EFBIG */ 136 + }; 137 + 138 + return aq_to_posix[aq_rc]; 139 + } 106 140 107 141 /* general information */ 108 142 #define I40E_AQ_LARGE_BUF 512
+58
drivers/net/ethernet/intel/i40evf/i40e_type.h
··· 268 268 u32 eetrack; /* NVM data version */ 269 269 }; 270 270 271 + /* definitions used in NVM update support */ 272 + 273 + enum i40e_nvmupd_cmd { 274 + I40E_NVMUPD_INVALID, 275 + I40E_NVMUPD_READ_CON, 276 + I40E_NVMUPD_READ_SNT, 277 + I40E_NVMUPD_READ_LCB, 278 + I40E_NVMUPD_READ_SA, 279 + I40E_NVMUPD_WRITE_ERA, 280 + I40E_NVMUPD_WRITE_CON, 281 + I40E_NVMUPD_WRITE_SNT, 282 + I40E_NVMUPD_WRITE_LCB, 283 + I40E_NVMUPD_WRITE_SA, 284 + I40E_NVMUPD_CSUM_CON, 285 + I40E_NVMUPD_CSUM_SA, 286 + I40E_NVMUPD_CSUM_LCB, 287 + }; 288 + 289 + enum i40e_nvmupd_state { 290 + I40E_NVMUPD_STATE_INIT, 291 + I40E_NVMUPD_STATE_READING, 292 + I40E_NVMUPD_STATE_WRITING 293 + }; 294 + 295 + /* nvm_access definition and its masks/shifts need to be accessible to 296 + * application, core driver, and shared code. Where is the right file? 297 + */ 298 + #define I40E_NVM_READ 0xB 299 + #define I40E_NVM_WRITE 0xC 300 + 301 + #define I40E_NVM_MOD_PNT_MASK 0xFF 302 + 303 + #define I40E_NVM_TRANS_SHIFT 8 304 + #define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) 305 + #define I40E_NVM_CON 0x0 306 + #define I40E_NVM_SNT 0x1 307 + #define I40E_NVM_LCB 0x2 308 + #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) 309 + #define I40E_NVM_ERA 0x4 310 + #define I40E_NVM_CSUM 0x8 311 + 312 + #define I40E_NVM_ADAPT_SHIFT 16 313 + #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) 314 + 315 + #define I40E_NVMUPD_MAX_DATA 4096 316 + #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ 317 + 318 + struct i40e_nvm_access { 319 + u32 command; 320 + u32 config; 321 + u32 offset; /* in bytes */ 322 + u32 data_size; /* in bytes */ 323 + u8 data[1]; 324 + }; 325 + 271 326 /* PCI bus types */ 272 327 enum i40e_bus_type { 273 328 i40e_bus_type_unknown = 0, ··· 457 402 458 403 /* Admin Queue info */ 459 404 struct i40e_adminq_info aq; 405 + 406 + /* state of nvm update process */ 407 + enum i40e_nvmupd_state nvmupd_state; 460 408 461 409 /* HMC info */ 462 410 struct i40e_hmc_info hmc; /* HMC info struct */