Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2019-08-22

This series contains updates to i40e driver only.

Arnd Bergmann reduces the stack usage which was causing warnings on
32-bit architectures due to large structure sizes for 2 functions
getting inlined, so use noinline_for_stack to prevent the compilers from
combining the 2 functions.

Mauro S. M. Rodrigues fixes an issue when reading an EEPROM from SFP
modules that comply with SFF-8472 but do not implement the Digital
Diagnostic Monitoring (DDM) interface for i40e.

Huhai found we were not checking the return value for configuring the
transmit ring and continuing with XDP configuration of the transmit
ring.

Beilei fixes an issue of shifting signed 32-bit integers.

Sylwia adds support for "packet drop mode" to the MAC configuration for
admin queue command. This bit controls the behavior when a no-drop
packet is blocking a TC queue. Adds support for persistent LLDP by
checking the LLDP flag and reading the LLDP from the NVM when enabled.

Adrian fixes the "recovery mode" check to take into account which device
we are on, since x710 devices have 4 register values to check for status
and x722 devices only have 2 register values to check.

Piotr Azarewicz bumps the supported firmware API version to 1.9 which
extends the PHY access admin queue command support.

Jake makes sure the traffic class stats for a VEB are reset when the VEB
stats are reset.

Slawomir fixes a NULL pointer dereference where the VSI pointer was not
updated before passing it to the i40e_set_vf_mac() when the VF is in a
reset state, so wait for the reset to complete.

Grzegorz removes the i40e_update_dcb_config() which was not using the
correct NVM reads, so call i40e_init_dcb() in its place to correctly
update the DCB configuration.

Piotr Kwapulinski expands the scope of i40e_set_mac_type() since this is
needed during probe to determine if we are in recovery mode. Fixed the
driver reset path when in recovery mode.

Marcin fixed an issue where we were breaking out of a loop too early
when trying to get the PHY capabilities.

v2: Combined patch 7 & 9 in the original series, since both patches
bumped firmware API version. Also combined patches 12 & 13 in the
original series, since one increased the scope of checking for MAC
and the follow-on patch made use of function within the new scope.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+322 -134
+3 -1
drivers/net/ethernet/intel/i40e/i40e_adminq.c
··· 610 610 611 611 if (hw->aq.api_maj_ver > 1 || 612 612 (hw->aq.api_maj_ver == 1 && 613 - hw->aq.api_min_ver >= 8)) 613 + hw->aq.api_min_ver >= 8)) { 614 614 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; 615 + hw->flags |= I40E_HW_FLAG_DROP_MODE; 616 + } 615 617 616 618 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { 617 619 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+17 -16
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
··· 11 11 */ 12 12 13 13 #define I40E_FW_API_VERSION_MAJOR 0x0001 14 - #define I40E_FW_API_VERSION_MINOR_X722 0x0008 15 - #define I40E_FW_API_VERSION_MINOR_X710 0x0008 14 + #define I40E_FW_API_VERSION_MINOR_X722 0x0009 15 + #define I40E_FW_API_VERSION_MINOR_X710 0x0009 16 16 17 17 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ 18 18 I40E_FW_API_VERSION_MINOR_X710 : \ ··· 2051 2051 struct i40e_aq_set_mac_config { 2052 2052 __le16 max_frame_size; 2053 2053 u8 params; 2054 - #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 2055 - #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 2056 - #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 2057 - #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 2058 - #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF 2059 - #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 2060 - #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 2061 - #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 2062 - #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 2063 - #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 2064 - #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 2065 - #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 2066 - #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 2067 - #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 2054 + #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 2055 + #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 2056 + #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 2057 + #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 2058 + #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF 2059 + #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 2060 + #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 2061 + #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 2062 + #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 2063 + #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 2064 + #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 2065 + #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 2066 + #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 2067 + #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 2068 + #define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 2068 2069 u8 tx_timer_priority; /* bitmap */ 2069 2070 __le16 tx_timer_value; 2070 2071 __le16 fc_refresh_threshold;
+62 -48
drivers/net/ethernet/intel/i40e/i40e_common.c
··· 13 13 * This function sets the mac type of the adapter based on the 14 14 * vendor ID and device ID stored in the hw structure. 15 15 **/ 16 - static i40e_status i40e_set_mac_type(struct i40e_hw *hw) 16 + i40e_status i40e_set_mac_type(struct i40e_hw *hw) 17 17 { 18 18 i40e_status status = 0; 19 19 ··· 1577 1577 status = i40e_asq_send_command(hw, &desc, abilities, 1578 1578 abilities_size, cmd_details); 1579 1579 1580 - if (status) 1581 - break; 1582 - 1583 - if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { 1580 + switch (hw->aq.asq_last_status) { 1581 + case I40E_AQ_RC_EIO: 1584 1582 status = I40E_ERR_UNKNOWN_PHY; 1585 1583 break; 1586 - } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { 1584 + case I40E_AQ_RC_EAGAIN: 1587 1585 usleep_range(1000, 2000); 1588 1586 total_delay++; 1589 1587 status = I40E_ERR_TIMEOUT; 1588 + break; 1589 + /* also covers I40E_AQ_RC_OK */ 1590 + default: 1591 + break; 1590 1592 } 1591 - } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && 1592 - (total_delay < max_delay)); 1593 + 1594 + } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1595 + (total_delay < max_delay)); 1593 1596 1594 1597 if (status) 1595 1598 return status; ··· 1646 1643 return status; 1647 1644 } 1648 1645 1649 - /** 1650 - * i40e_set_fc 1651 - * @hw: pointer to the hw struct 1652 - * @aq_failures: buffer to return AdminQ failure information 1653 - * @atomic_restart: whether to enable atomic link restart 1654 - * 1655 - * Set the requested flow control mode using set_phy_config. 1656 - **/ 1657 - enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1658 - bool atomic_restart) 1646 + static noinline_for_stack enum i40e_status_code 1647 + i40e_set_fc_status(struct i40e_hw *hw, 1648 + struct i40e_aq_get_phy_abilities_resp *abilities, 1649 + bool atomic_restart) 1659 1650 { 1660 - enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1661 - struct i40e_aq_get_phy_abilities_resp abilities; 1662 1651 struct i40e_aq_set_phy_config config; 1663 - enum i40e_status_code status; 1652 + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1664 1653 u8 pause_mask = 0x0; 1665 - 1666 - *aq_failures = 0x0; 1667 1654 1668 1655 switch (fc_mode) { 1669 1656 case I40E_FC_FULL: ··· 1670 1677 break; 1671 1678 } 1672 1679 1680 + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1681 + /* clear the old pause settings */ 1682 + config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1683 + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1684 + /* set the new abilities */ 1685 + config.abilities |= pause_mask; 1686 + /* If the abilities have changed, then set the new config */ 1687 + if (config.abilities == abilities->abilities) 1688 + return 0; 1689 + 1690 + /* Auto restart link so settings take effect */ 1691 + if (atomic_restart) 1692 + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1693 + /* Copy over all the old settings */ 1694 + config.phy_type = abilities->phy_type; 1695 + config.phy_type_ext = abilities->phy_type_ext; 1696 + config.link_speed = abilities->link_speed; 1697 + config.eee_capability = abilities->eee_capability; 1698 + config.eeer = abilities->eeer_val; 1699 + config.low_power_ctrl = abilities->d3_lpan; 1700 + config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1701 + I40E_AQ_PHY_FEC_CONFIG_MASK; 1702 + 1703 + return i40e_aq_set_phy_config(hw, &config, NULL); 1704 + } 1705 + 1706 + /** 1707 + * i40e_set_fc 1708 + * @hw: pointer to the hw struct 1709 + * @aq_failures: buffer to return AdminQ failure information 1710 + * @atomic_restart: whether to enable atomic link restart 1711 + * 1712 + * Set the requested flow control mode using set_phy_config. 1713 + **/ 1714 + enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1715 + bool atomic_restart) 1716 + { 1717 + struct i40e_aq_get_phy_abilities_resp abilities; 1718 + enum i40e_status_code status; 1719 + 1720 + *aq_failures = 0x0; 1721 + 1673 1722 /* Get the current phy config */ 1674 1723 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1675 1724 NULL); ··· 1720 1685 return status; 1721 1686 } 1722 1687 1723 - memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1724 - /* clear the old pause settings */ 1725 - config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1726 - ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1727 - /* set the new abilities */ 1728 - config.abilities |= pause_mask; 1729 - /* If the abilities have changed, then set the new config */ 1730 - if (config.abilities != abilities.abilities) { 1731 - /* Auto restart link so settings take effect */ 1732 - if (atomic_restart) 1733 - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1734 - /* Copy over all the old settings */ 1735 - config.phy_type = abilities.phy_type; 1736 - config.phy_type_ext = abilities.phy_type_ext; 1737 - config.link_speed = abilities.link_speed; 1738 - config.eee_capability = abilities.eee_capability; 1739 - config.eeer = abilities.eeer_val; 1740 - config.low_power_ctrl = abilities.d3_lpan; 1741 - config.fec_config = abilities.fec_cfg_curr_mod_ext_info & 1742 - I40E_AQ_PHY_FEC_CONFIG_MASK; 1743 - status = i40e_aq_set_phy_config(hw, &config, NULL); 1688 + status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1689 + if (status) 1690 + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1744 1691 1745 - if (status) 1746 - *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1747 - } 1748 1692 /* Update the link info */ 1749 1693 status = i40e_update_link_info(hw); 1750 1694 if (status) { ··· 2551 2537 * i40e_updatelink_status - update status of the HW network link 2552 2538 * @hw: pointer to the hw struct 2553 2539 **/ 2554 - i40e_status i40e_update_link_info(struct i40e_hw *hw) 2540 + noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2555 2541 { 2556 2542 struct i40e_aq_get_phy_abilities_resp abilities; 2557 2543 i40e_status status = 0;
+17 -1
drivers/net/ethernet/intel/i40e/i40e_dcb.c
··· 877 877 return I40E_NOT_SUPPORTED; 878 878 879 879 /* Read LLDP NVM area */ 880 - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); 880 + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { 881 + u8 offset = 0; 882 + 883 + if (hw->mac.type == I40E_MAC_XL710) 884 + offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET; 885 + else if (hw->mac.type == I40E_MAC_X722) 886 + offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET; 887 + else 888 + return I40E_NOT_SUPPORTED; 889 + 890 + ret = i40e_read_nvm_module_data(hw, 891 + I40E_SR_EMP_SR_SETTINGS_PTR, 892 + offset, 1, 893 + &lldp_cfg.adminstatus); 894 + } else { 895 + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); 896 + } 881 897 if (ret) 882 898 return I40E_ERR_NOT_READY; 883 899
+2
drivers/net/ethernet/intel/i40e/i40e_dcb.h
··· 30 30 #define I40E_CEE_SUBTYPE_APP_PRI 4 31 31 32 32 #define I40E_CEE_MAX_FEAT_TYPE 3 33 + #define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B 34 + #define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31 33 35 /* Defines for LLDP TLV header */ 34 36 #define I40E_LLDP_TLV_LEN_SHIFT 0 35 37 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+6
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 5137 5137 /* Module is not SFF-8472 compliant */ 5138 5138 modinfo->type = ETH_MODULE_SFF_8079; 5139 5139 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 5140 + } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) { 5141 + /* Module is SFF-8472 compliant but doesn't implement 5142 + * Digital Diagnostic Monitoring (DDM). 5143 + */ 5144 + modinfo->type = ETH_MODULE_SFF_8079; 5145 + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 5140 5146 } else { 5141 5147 modinfo->type = ETH_MODULE_SFF_8472; 5142 5148 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+79 -55
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 534 534 sizeof(pf->veb[i]->stats)); 535 535 memset(&pf->veb[i]->stats_offsets, 0, 536 536 sizeof(pf->veb[i]->stats_offsets)); 537 + memset(&pf->veb[i]->tc_stats, 0, 538 + sizeof(pf->veb[i]->tc_stats)); 539 + memset(&pf->veb[i]->tc_stats_offsets, 0, 540 + sizeof(pf->veb[i]->tc_stats_offsets)); 537 541 pf->veb[i]->stat_offsets_loaded = false; 538 542 } 539 543 } ··· 3368 3364 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 3369 3365 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 3370 3366 3371 - if (!i40e_enabled_xdp_vsi(vsi)) 3367 + if (err || !i40e_enabled_xdp_vsi(vsi)) 3372 3368 return err; 3373 3369 3374 3370 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) ··· 6420 6416 } 6421 6417 6422 6418 /** 6423 - * i40e_update_dcb_config 6424 - * @hw: pointer to the HW struct 6425 - * @enable_mib_change: enable MIB change event 6426 - * 6427 - * Update DCB configuration from the firmware 6428 - **/ 6429 - static enum i40e_status_code 6430 - i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change) 6431 - { 6432 - struct i40e_lldp_variables lldp_cfg; 6433 - i40e_status ret; 6434 - 6435 - if (!hw->func_caps.dcb) 6436 - return I40E_NOT_SUPPORTED; 6437 - 6438 - /* Read LLDP NVM area */ 6439 - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); 6440 - if (ret) 6441 - return I40E_ERR_NOT_READY; 6442 - 6443 - /* Get DCBX status */ 6444 - ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); 6445 - if (ret) 6446 - return ret; 6447 - 6448 - /* Check the DCBX Status */ 6449 - if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || 6450 - hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { 6451 - /* Get current DCBX configuration */ 6452 - ret = i40e_get_dcb_config(hw); 6453 - if (ret) 6454 - return ret; 6455 - } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { 6456 - return I40E_ERR_NOT_READY; 6457 - } 6458 - 6459 - /* Configure the LLDP MIB change event */ 6460 - if (enable_mib_change) 6461 - ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); 6462 - 6463 - return ret; 6464 - } 6465 - 6466 - /** 6467 6419 * i40e_init_pf_dcb - Initialize DCB configuration 6468 6420 * @pf: PF being configured 6469 6421 * ··· 6441 6481 goto out; 6442 6482 } 6443 6483 6444 - err = i40e_update_dcb_config(hw, true); 6484 + err = i40e_init_dcb(hw, true); 6445 6485 if (!err) { 6446 6486 /* Device/Function is not DCBX capable */ 6447 6487 if ((!hw->func_caps.dcb) || ··· 14538 14578 **/ 14539 14579 static bool i40e_check_recovery_mode(struct i40e_pf *pf) 14540 14580 { 14541 - u32 val = rd32(&pf->hw, I40E_GL_FWSTS); 14581 + u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 14582 + bool is_recovery_mode = false; 14542 14583 14543 - if (val & I40E_GL_FWSTS_FWS1B_MASK) { 14584 + if (pf->hw.mac.type == I40E_MAC_XL710) 14585 + is_recovery_mode = 14586 + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || 14587 + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 14588 + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK || 14589 + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK; 14590 + if (pf->hw.mac.type == I40E_MAC_X722) 14591 + is_recovery_mode = 14592 + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || 14593 + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK; 14594 + if (is_recovery_mode) { 14544 14595 dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); 14545 14596 dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 14546 14597 set_bit(__I40E_RECOVERY_MODE, pf->state); ··· 14562 14591 dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n"); 14563 14592 14564 14593 return false; 14594 + } 14595 + 14596 + /** 14597 + * i40e_pf_loop_reset - perform reset in a loop. 14598 + * @pf: board private structure 14599 + * 14600 + * This function is useful when a NIC is about to enter recovery mode. 14601 + * When a NIC's internal data structures are corrupted the NIC's 14602 + * firmware is going to enter recovery mode. 14603 + * Right after a POR it takes about 7 minutes for firmware to enter 14604 + * recovery mode. Until that time a NIC is in some kind of intermediate 14605 + * state. After that time period the NIC almost surely enters 14606 + * recovery mode. The only way for a driver to detect intermediate 14607 + * state is to issue a series of pf-resets and check a return value. 14608 + * If a PF reset returns success then the firmware could be in recovery 14609 + * mode so the caller of this code needs to check for recovery mode 14610 + * if this function returns success. There is a little chance that 14611 + * firmware will hang in intermediate state forever. 14612 + * Since waiting 7 minutes is quite a lot of time this function waits 14613 + * 10 seconds and then gives up by returning an error. 14614 + * 14615 + * Return 0 on success, negative on failure. 14616 + **/ 14617 + static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) 14618 + { 14619 + const unsigned short MAX_CNT = 1000; 14620 + const unsigned short MSECS = 10; 14621 + struct i40e_hw *hw = &pf->hw; 14622 + i40e_status ret; 14623 + int cnt; 14624 + 14625 + for (cnt = 0; cnt < MAX_CNT; ++cnt) { 14626 + ret = i40e_pf_reset(hw); 14627 + if (!ret) 14628 + break; 14629 + msleep(MSECS); 14630 + } 14631 + 14632 + if (cnt == MAX_CNT) { 14633 + dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); 14634 + return ret; 14635 + } 14636 + 14637 + pf->pfr_count++; 14638 + return ret; 14565 14639 } 14566 14640 14567 14641 /** ··· 14837 14821 14838 14822 /* Reset here to make sure all is clean and to define PF 'n' */ 14839 14823 i40e_clear_hw(hw); 14840 - if (!i40e_check_recovery_mode(pf)) { 14841 - err = i40e_pf_reset(hw); 14842 - if (err) { 14843 - dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 14844 - goto err_pf_reset; 14845 - } 14846 - pf->pfr_count++; 14824 + 14825 + err = i40e_set_mac_type(hw); 14826 + if (err) { 14827 + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 14828 + err); 14829 + goto err_pf_reset; 14847 14830 } 14831 + 14832 + err = i40e_pf_loop_reset(pf); 14833 + if (err) { 14834 + dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 14835 + goto err_pf_reset; 14836 + } 14837 + 14838 + i40e_check_recovery_mode(pf); 14839 + 14848 14840 hw->aq.num_arq_entries = I40E_AQ_LEN; 14849 14841 hw->aq.num_asq_entries = I40E_AQ_LEN; 14850 14842 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+101
drivers/net/ethernet/intel/i40e/i40e_nvm.c
··· 322 322 } 323 323 324 324 /** 325 + * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location 326 + * @hw: pointer to the HW structure 327 + * @module_ptr: Pointer to module in words with respect to NVM beginning 328 + * @offset: offset in words from module start 329 + * @words_data_size: Words to read from NVM 330 + * @data_ptr: Pointer to memory location where resulting buffer will be stored 331 + **/ 332 + i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, 333 + u8 module_ptr, u16 offset, 334 + u16 words_data_size, 335 + u16 *data_ptr) 336 + { 337 + i40e_status status; 338 + u16 ptr_value = 0; 339 + u32 flat_offset; 340 + 341 + if (module_ptr != 0) { 342 + status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); 343 + if (status) { 344 + i40e_debug(hw, I40E_DEBUG_ALL, 345 + "Reading nvm word failed.Error code: %d.\n", 346 + status); 347 + return I40E_ERR_NVM; 348 + } 349 + } 350 + #define I40E_NVM_INVALID_PTR_VAL 0x7FFF 351 + #define I40E_NVM_INVALID_VAL 0xFFFF 352 + 353 + /* Pointer not initialized */ 354 + if (ptr_value == I40E_NVM_INVALID_PTR_VAL || 355 + ptr_value == I40E_NVM_INVALID_VAL) 356 + return I40E_ERR_BAD_PTR; 357 + 358 + /* Check whether the module is in SR mapped area or outside */ 359 + if (ptr_value & I40E_PTR_TYPE) { 360 + /* Pointer points outside of the Shared RAM mapped area */ 361 + ptr_value &= ~I40E_PTR_TYPE; 362 + 363 + /* PtrValue in 4kB units, need to convert to words */ 364 + ptr_value /= 2; 365 + flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset; 366 + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 367 + if (!status) { 368 + status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset, 369 + 2 * words_data_size, 370 + data_ptr, true, NULL); 371 + i40e_release_nvm(hw); 372 + if (status) { 373 + i40e_debug(hw, I40E_DEBUG_ALL, 374 + "Reading nvm aq failed.Error code: %d.\n", 375 + status); 376 + return I40E_ERR_NVM; 377 + } 378 + } else { 379 + return I40E_ERR_NVM; 380 + } 381 + } else { 382 + /* Read from the Shadow RAM */ 383 + status = i40e_read_nvm_buffer(hw, ptr_value + offset, 384 + &words_data_size, data_ptr); 385 + if (status) { 386 + i40e_debug(hw, I40E_DEBUG_ALL, 387 + "Reading nvm buffer failed.Error code: %d.\n", 388 + status); 389 + } 390 + } 391 + 392 + return status; 393 + } 394 + 395 + /** 325 396 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register 326 397 * @hw: pointer to the HW structure 327 398 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). ··· 498 427 return i40e_read_nvm_buffer_aq(hw, offset, words, data); 499 428 500 429 return i40e_read_nvm_buffer_srctl(hw, offset, words, data); 430 + } 431 + 432 + /** 433 + * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary 434 + * @hw: pointer to the HW structure 435 + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 436 + * @words: (in) number of words to read; (out) number of words actually read 437 + * @data: words read from the Shadow RAM 438 + * 439 + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 440 + * method. The buffer read is preceded by the NVM ownership take 441 + * and followed by the release. 442 + **/ 443 + i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 444 + u16 *words, u16 *data) 445 + { 446 + i40e_status ret_code = 0; 447 + 448 + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { 449 + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 450 + if (!ret_code) { 451 + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, 452 + data); 453 + i40e_release_nvm(hw); 454 + } 455 + } else { 456 + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); 457 + } 458 + 459 + return ret_code; 501 460 } 502 461 503 462 /**
+8
drivers/net/ethernet/intel/i40e/i40e_prototype.h
··· 315 315 void i40e_release_nvm(struct i40e_hw *hw); 316 316 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 317 317 u16 *data); 318 + i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, 319 + u8 module_ptr, u16 offset, 320 + u16 words_data_size, 321 + u16 *data_ptr); 322 + i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 323 + u16 *words, u16 *data); 318 324 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); 319 325 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, 320 326 u16 *checksum); ··· 331 325 struct i40e_aq_desc *desc); 332 326 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); 333 327 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); 328 + 329 + i40e_status i40e_set_mac_type(struct i40e_hw *hw); 334 330 335 331 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; 336 332
+18 -12
drivers/net/ethernet/intel/i40e/i40e_register.h
··· 58 58 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 59 59 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) 60 60 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 61 - #define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) 61 + #define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) 62 62 #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ 63 63 #define I40E_PF_ARQT_ARQT_SHIFT 0 64 64 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) ··· 81 81 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 82 82 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) 83 83 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 84 - #define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) 84 + #define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) 85 85 #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ 86 86 #define I40E_PF_ATQT_ATQT_SHIFT 0 87 87 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) ··· 108 108 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 109 109 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) 110 110 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 111 - #define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) 111 + #define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) 112 112 #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ 113 113 #define I40E_VF_ARQT_MAX_INDEX 127 114 114 #define I40E_VF_ARQT_ARQT_SHIFT 0 ··· 136 136 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 137 137 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) 138 138 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 139 - #define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) 139 + #define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) 140 140 #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ 141 141 #define I40E_VF_ATQT_MAX_INDEX 127 142 142 #define I40E_VF_ATQT_ATQT_SHIFT 0 ··· 259 259 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 260 260 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) 261 261 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 262 - #define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) 262 + #define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) 263 263 #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ 264 264 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 265 265 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) ··· 363 363 #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) 364 364 #define I40E_GL_FWSTS_FWS1B_SHIFT 16 365 365 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) 366 + #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) 367 + #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT) 368 + #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT) 369 + #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) 370 + #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) 371 + #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) 366 372 #define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ 367 373 #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 368 374 #define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) ··· 509 503 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 510 504 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) 511 505 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 512 - #define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) 506 + #define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) 513 507 #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ 514 508 #define I40E_GLGEN_MSRWD_MAX_INDEX 3 515 509 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 ··· 1248 1242 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 1249 1243 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) 1250 1244 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 1251 - #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) 1245 + #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) 1252 1246 #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ 1253 1247 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 1254 1248 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) 1255 1249 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 1256 1250 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) 1257 1251 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 1258 - #define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) 1252 + #define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) 1259 1253 #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ 1260 1254 #define I40E_QRX_ENA_MAX_INDEX 1535 1261 1255 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 ··· 1664 1658 #define I40E_GLNVM_SRCTL_START_SHIFT 30 1665 1659 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) 1666 1660 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 1667 - #define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) 1661 + #define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) 1668 1662 #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ 1669 1663 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 1670 1664 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) ··· 3031 3025 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 3032 3026 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) 3033 3027 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 3034 - #define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) 3028 + #define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) 3035 3029 #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ 3036 3030 #define I40E_VP_MDET_RX_MAX_INDEX 127 3037 3031 #define I40E_VP_MDET_RX_VALID_SHIFT 0 ··· 3167 3161 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 3168 3162 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) 3169 3163 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 3170 - #define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) 3164 + #define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) 3171 3165 #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ 3172 3166 #define I40E_VF_ARQT1_ARQT_SHIFT 0 3173 3167 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) ··· 3190 3184 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 3191 3185 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) 3192 3186 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 3193 - #define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) 3187 + #define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) 3194 3188 #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ 3195 3189 #define I40E_VF_ATQT1_ATQT_SHIFT 0 3196 3190 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+3
drivers/net/ethernet/intel/i40e/i40e_type.h
··· 443 443 #define I40E_MODULE_SFF_8472_COMP 0x5E 444 444 #define I40E_MODULE_SFF_8472_SWAP 0x5C 445 445 #define I40E_MODULE_SFF_ADDR_MODE 0x04 446 + #define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40 446 447 #define I40E_MODULE_TYPE_QSFP_PLUS 0x0D 447 448 #define I40E_MODULE_TYPE_QSFP28 0x11 448 449 #define I40E_MODULE_QSFP_MAX_LEN 640 ··· 624 623 #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) 625 624 #define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) 626 625 #define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) 626 + #define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) 627 627 u64 flags; 628 628 629 629 /* Used in set switch config AQ command */ ··· 1318 1316 #define I40E_SR_VPD_PTR 0x2F 1319 1317 #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E 1320 1318 #define I40E_SR_SW_CHECKSUM_WORD 0x3F 1319 + #define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 1321 1320 1322 1321 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ 1323 1322 #define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+6 -1
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 3967 3967 /* When the VF is resetting wait until it is done. 3968 3968 * It can take up to 200 milliseconds, 3969 3969 * but wait for up to 300 milliseconds to be safe. 3970 + * If the VF is indeed in reset, the vsi pointer has 3971 + * to show on the newly loaded vsi under pf->vsi[id]. 3970 3972 */ 3971 3973 for (i = 0; i < 15; i++) { 3972 - if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 3974 + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3975 + if (i > 0) 3976 + vsi = pf->vsi[vf->lan_vsi_idx]; 3973 3977 break; 3978 + } 3974 3979 msleep(20); 3975 3980 } 3976 3981 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {