Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-01-09

This series contains updates to ixgbe and ixgbevf only.

Emil fixes an issue with "wake on LAN"(WoL) where we need to ensure we
enable the reception of multicast packets so that WoL works for IPv6
magic packets. Cleaned up code no longer needed with the update to
adaptive ITR.

Paul update the driver to advertise the highest capable link speed
when a module gets inserted. Also extended the displaying of firmware
version to include the iSCSI and OEM block in the EEPROM to better
identify firmware versions/images.

Tonghao Zhang cleans up a code comment that no longer applies since
InterruptThrottleRate has been removed from the driver.

Alex fixes SR-IOV and MACVLAN offload interaction, where the MACVLAN
offload was incorrectly configuring several filters with the wrong
pool value which resulted in MACLVAN interfaces not being able to
receive traffic that had to pass over the physical interface. Fixed
transmit hangs and dropped receive frames when the number of VFs
changed. Added support for RSS on MACVLAN pools for X550 devices.
Fixed up the MACVLAN limitations so we can now support 63 offloaded
devices. Cleaned up MACVLAN code that is no longer needed with the
recent changes and fixes.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+298 -177
+4 -6
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 333 333 struct net_device *netdev; /* netdev ring belongs to */ 334 334 struct bpf_prog *xdp_prog; 335 335 struct device *dev; /* device for DMA mapping */ 336 - struct ixgbe_fwd_adapter *l2_accel_priv; 337 336 void *desc; /* descriptor ring memory */ 338 337 union { 339 338 struct ixgbe_tx_buffer *tx_buffer_info; ··· 396 397 #define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 397 398 #define IXGBE_MAX_L2A_QUEUES 4 398 399 #define IXGBE_BAD_L2A_QUEUE 3 399 - #define IXGBE_MAX_MACVLANS 31 400 - #define IXGBE_MAX_DCBMACVLANS 8 400 + #define IXGBE_MAX_MACVLANS 63 401 401 402 402 struct ixgbe_ring_feature { 403 403 u16 limit; /* upper limit on feature indices */ ··· 721 723 722 724 u16 bridge_mode; 723 725 724 - u16 eeprom_verh; 725 - u16 eeprom_verl; 726 + char eeprom_id[NVM_VER_SIZE]; 726 727 u16 eeprom_cap; 727 728 728 729 u32 interrupt_event; ··· 765 768 #endif /*CONFIG_DEBUG_FS*/ 766 769 767 770 u8 default_up; 768 - unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ 771 + /* Bitmask indicating in use pools */ 772 + DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1); 769 773 770 774 #define IXGBE_MAX_LINK_HANDLE 10 771 775 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
+112
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
··· 4028 4028 return 0; 4029 4029 } 4030 4030 4031 + /** 4032 + * ixgbe_get_orom_version - Return option ROM from EEPROM 4033 + * 4034 + * @hw: pointer to hardware structure 4035 + * @nvm_ver: pointer to output structure 4036 + * 4037 + * if valid option ROM version, nvm_ver->or_valid set to true 4038 + * else nvm_ver->or_valid is false. 4039 + **/ 4040 + void ixgbe_get_orom_version(struct ixgbe_hw *hw, 4041 + struct ixgbe_nvm_version *nvm_ver) 4042 + { 4043 + u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; 4044 + 4045 + nvm_ver->or_valid = false; 4046 + /* Option Rom may or may not be present. Start with pointer */ 4047 + hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); 4048 + 4049 + /* make sure offset is valid */ 4050 + if (offset == 0x0 || offset == NVM_INVALID_PTR) 4051 + return; 4052 + 4053 + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); 4054 + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); 4055 + 4056 + /* option rom exists and is valid */ 4057 + if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || 4058 + eeprom_cfg_blkl == NVM_VER_INVALID || 4059 + eeprom_cfg_blkh == NVM_VER_INVALID) 4060 + return; 4061 + 4062 + nvm_ver->or_valid = true; 4063 + nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; 4064 + nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | 4065 + (eeprom_cfg_blkh >> NVM_OROM_SHIFT); 4066 + nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; 4067 + } 4068 + 4069 + /** 4070 + * ixgbe_get_oem_prod_version Etrack ID from EEPROM 4071 + * 4072 + * @hw: pointer to hardware structure 4073 + * @nvm_ver: pointer to output structure 4074 + * 4075 + * if valid OEM product version, nvm_ver->oem_valid set to true 4076 + * else nvm_ver->oem_valid is false. 4077 + **/ 4078 + void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 4079 + struct ixgbe_nvm_version *nvm_ver) 4080 + { 4081 + u16 rel_num, prod_ver, mod_len, cap, offset; 4082 + 4083 + nvm_ver->oem_valid = false; 4084 + hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 4085 + 4086 + /* Return is offset to OEM Product Version block is invalid */ 4087 + if (offset == 0x0 && offset == NVM_INVALID_PTR) 4088 + return; 4089 + 4090 + /* Read product version block */ 4091 + hw->eeprom.ops.read(hw, offset, &mod_len); 4092 + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); 4093 + 4094 + /* Return if OEM product version block is invalid */ 4095 + if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || 4096 + (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) 4097 + return; 4098 + 4099 + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); 4100 + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); 4101 + 4102 + /* Return if version is invalid */ 4103 + if ((rel_num | prod_ver) == 0x0 || 4104 + rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) 4105 + return; 4106 + 4107 + nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; 4108 + nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; 4109 + nvm_ver->oem_release = rel_num; 4110 + nvm_ver->oem_valid = true; 4111 + } 4112 + 4113 + /** 4114 + * ixgbe_get_etk_id - Return Etrack ID from EEPROM 4115 + * 4116 + * @hw: pointer to hardware structure 4117 + * @nvm_ver: pointer to output structure 4118 + * 4119 + * word read errors will return 0xFFFF 4120 + **/ 4121 + void ixgbe_get_etk_id(struct ixgbe_hw *hw, 4122 + struct ixgbe_nvm_version *nvm_ver) 4123 + { 4124 + u16 etk_id_l, etk_id_h; 4125 + 4126 + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) 4127 + etk_id_l = NVM_VER_INVALID; 4128 + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) 4129 + etk_id_h = NVM_VER_INVALID; 4130 + 4131 + /* The word order for the version format is determined by high order 4132 + * word bit 15. 4133 + */ 4134 + if ((etk_id_h & NVM_ETK_VALID) == 0) { 4135 + nvm_ver->etk_id = etk_id_h; 4136 + nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); 4137 + } else { 4138 + nvm_ver->etk_id = etk_id_l; 4139 + nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); 4140 + } 4141 + } 4142 + 4031 4143 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 4032 4144 { 4033 4145 u32 rxctrl;
+6
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
··· 139 139 140 140 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); 141 141 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); 142 + void ixgbe_get_etk_id(struct ixgbe_hw *hw, 143 + struct ixgbe_nvm_version *nvm_ver); 144 + void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 145 + struct ixgbe_nvm_version *nvm_ver); 146 + void ixgbe_get_orom_version(struct ixgbe_hw *hw, 147 + struct ixgbe_nvm_version *nvm_ver); 142 148 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); 143 149 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); 144 150 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+2 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 1014 1014 struct ethtool_drvinfo *drvinfo) 1015 1015 { 1016 1016 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1017 - u32 nvm_track_id; 1018 1017 1019 1018 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 1020 1019 strlcpy(drvinfo->version, ixgbe_driver_version, 1021 1020 sizeof(drvinfo->version)); 1022 1021 1023 - nvm_track_id = (adapter->eeprom_verh << 16) | 1024 - adapter->eeprom_verl; 1025 - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 1026 - nvm_track_id); 1022 + strlcpy(drvinfo->fw_version, adapter->eeprom_id, 1023 + sizeof(drvinfo->fw_version)); 1027 1024 1028 1025 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 1029 1026 sizeof(drvinfo->bus_info));
+2 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
··· 1034 1034 ixgbe_driver_name, 1035 1035 ixgbe_driver_version); 1036 1036 /* Firmware Version */ 1037 - snprintf(info->firmware_version, 1038 - sizeof(info->firmware_version), 1039 - "0x%08x", 1040 - (adapter->eeprom_verh << 16) | 1041 - adapter->eeprom_verl); 1037 + strlcpy(info->firmware_version, adapter->eeprom_id, 1038 + sizeof(info->firmware_version)); 1042 1039 1043 1040 /* Model */ 1044 1041 if (hw->mac.type == ixgbe_mac_82599EB) {
+8 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 350 350 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 351 351 return false; 352 352 353 + /* limit VMDq instances on the PF by number of Tx queues */ 354 + vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); 355 + 353 356 /* Add starting offset to total pool count */ 354 357 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 355 358 ··· 515 512 #ifdef IXGBE_FCOE 516 513 u16 fcoe_i = 0; 517 514 #endif 518 - bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 519 515 520 516 /* only proceed if SR-IOV is enabled */ 521 517 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 522 518 return false; 519 + 520 + /* limit l2fwd RSS based on total Tx queue limit */ 521 + rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); 523 522 524 523 /* Add starting offset to total pool count */ 525 524 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; ··· 530 525 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 531 526 532 527 /* 64 pool mode with 2 queues per pool */ 533 - if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) { 528 + if (vmdq_i > 32) { 534 529 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 535 530 rss_m = IXGBE_RSS_2Q_MASK; 536 531 rss_i = min_t(u16, rss_i, 2); ··· 706 701 adapter->num_rx_queues = 1; 707 702 adapter->num_tx_queues = 1; 708 703 adapter->num_xdp_queues = 0; 709 - adapter->num_rx_pools = adapter->num_rx_queues; 704 + adapter->num_rx_pools = 1; 710 705 adapter->num_rx_queues_per_pool = 1; 711 706 712 707 #ifdef CONFIG_IXGBE_DCB
+113 -120
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 192 192 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); 193 193 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); 194 194 195 + static const struct net_device_ops ixgbe_netdev_ops; 196 + 197 + static bool netif_is_ixgbe(struct net_device *dev) 198 + { 199 + return dev && (dev->netdev_ops == &ixgbe_netdev_ops); 200 + } 201 + 195 202 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, 196 203 u32 reg, u16 *value) 197 204 { ··· 1071 1064 1072 1065 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) 1073 1066 { 1074 - struct ixgbe_adapter *adapter; 1075 - struct ixgbe_hw *hw; 1076 - u32 head, tail; 1067 + unsigned int head, tail; 1077 1068 1078 - if (ring->l2_accel_priv) 1079 - adapter = ring->l2_accel_priv->real_adapter; 1080 - else 1081 - adapter = netdev_priv(ring->netdev); 1069 + head = ring->next_to_clean; 1070 + tail = ring->next_to_use; 1082 1071 1083 - hw = &adapter->hw; 1084 - head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); 1085 - tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); 1086 - 1087 - if (head != tail) 1088 - return (head < tail) ? 1089 - tail - head : (tail + ring->count - head); 1090 - 1091 - return 0; 1072 + return ((head <= tail) ? tail : tail + ring->count) - head; 1092 1073 } 1093 1074 1094 1075 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) ··· 2512 2517 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 2513 2518 } 2514 2519 2515 - enum latency_range { 2516 - lowest_latency = 0, 2517 - low_latency = 1, 2518 - bulk_latency = 2, 2519 - latency_invalid = 255 2520 - }; 2521 - 2522 2520 /** 2523 2521 * ixgbe_update_itr - update the dynamic ITR value based on statistics 2524 2522 * @q_vector: structure containing interrupt and ring information ··· 2524 2536 * based on theoretical maximum wire speed and thresholds were set based 2525 2537 * on testing data as well as attempting to minimize response time 2526 2538 * while increasing bulk throughput. 2527 - * this functionality is controlled by the InterruptThrottleRate module 2528 - * parameter (see ixgbe_param.c) 2529 2539 **/ 2530 2540 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, 2531 2541 struct ixgbe_ring_container *ring_container) ··· 3841 3855 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3842 3856 struct ixgbe_hw *hw = &adapter->hw; 3843 3857 u32 vfreta = 0; 3844 - unsigned int pf_pool = adapter->num_vfs; 3845 3858 3846 3859 /* Write redirection table to HW */ 3847 3860 for (i = 0; i < reta_entries; i++) { 3861 + u16 pool = adapter->num_rx_pools; 3862 + 3848 3863 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; 3849 - if ((i & 3) == 3) { 3850 - IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), 3864 + if ((i & 3) != 3) 3865 + continue; 3866 + 3867 + while (pool--) 3868 + IXGBE_WRITE_REG(hw, 3869 + IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)), 3851 3870 vfreta); 3852 - vfreta = 0; 3853 - } 3871 + vfreta = 0; 3854 3872 } 3855 3873 } 3856 3874 ··· 3891 3901 { 3892 3902 struct ixgbe_hw *hw = &adapter->hw; 3893 3903 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3894 - unsigned int pf_pool = adapter->num_vfs; 3895 3904 int i, j; 3896 3905 3897 3906 /* Fill out hash function seeds */ 3898 - for (i = 0; i < 10; i++) 3899 - IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), 3900 - *(adapter->rss_key + i)); 3907 + for (i = 0; i < 10; i++) { 3908 + u16 pool = adapter->num_rx_pools; 3909 + 3910 + while (pool--) 3911 + IXGBE_WRITE_REG(hw, 3912 + IXGBE_PFVFRSSRK(i, VMDQ_P(pool)), 3913 + *(adapter->rss_key + i)); 3914 + } 3901 3915 3902 3916 /* Fill out the redirection table */ 3903 3917 for (i = 0, j = 0; i < 64; i++, j++) { ··· 3967 3973 3968 3974 if ((hw->mac.type >= ixgbe_mac_X550) && 3969 3975 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 3970 - unsigned int pf_pool = adapter->num_vfs; 3976 + u16 pool = adapter->num_rx_pools; 3971 3977 3972 3978 /* Enable VF RSS mode */ 3973 3979 mrqc |= IXGBE_MRQC_MULTIPLE_RSS; ··· 3977 3983 ixgbe_setup_vfreta(adapter); 3978 3984 vfmrqc = IXGBE_MRQC_RSSEN; 3979 3985 vfmrqc |= rss_field; 3980 - IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); 3986 + 3987 + while (pool--) 3988 + IXGBE_WRITE_REG(hw, 3989 + IXGBE_PFVFMRQC(VMDQ_P(pool)), 3990 + vfmrqc); 3981 3991 } else { 3982 3992 ixgbe_setup_reta(adapter); 3983 3993 mrqc |= rss_field; ··· 4144 4146 { 4145 4147 struct ixgbe_hw *hw = &adapter->hw; 4146 4148 int rss_i = adapter->ring_feature[RING_F_RSS].indices; 4147 - u16 pool; 4149 + u16 pool = adapter->num_rx_pools; 4148 4150 4149 4151 /* PSRTYPE must be initialized in non 82598 adapters */ 4150 4152 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | ··· 4161 4163 else if (rss_i > 1) 4162 4164 psrtype |= 1u << 29; 4163 4165 4164 - for_each_set_bit(pool, &adapter->fwd_bitmask, 32) 4166 + while (pool--) 4165 4167 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 4166 4168 } 4167 4169 ··· 4488 4490 for (i = 0; i < adapter->num_rx_queues; i++) { 4489 4491 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4490 4492 4491 - if (ring->l2_accel_priv) 4493 + if (!netif_is_ixgbe(ring->netdev)) 4492 4494 continue; 4495 + 4493 4496 j = ring->reg_idx; 4494 4497 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 4495 4498 vlnctrl &= ~IXGBE_RXDCTL_VME; ··· 4526 4527 for (i = 0; i < adapter->num_rx_queues; i++) { 4527 4528 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4528 4529 4529 - if (ring->l2_accel_priv) 4530 + if (!netif_is_ixgbe(ring->netdev)) 4530 4531 continue; 4532 + 4531 4533 j = ring->reg_idx; 4532 4534 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 4533 4535 vlnctrl |= IXGBE_RXDCTL_VME; ··· 5279 5279 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 5280 5280 } 5281 5281 5282 - static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) 5283 - { 5284 - struct ixgbe_adapter *adapter = vadapter->real_adapter; 5285 - int rss_i = adapter->num_rx_queues_per_pool; 5286 - struct ixgbe_hw *hw = &adapter->hw; 5287 - u16 pool = vadapter->pool; 5288 - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 5289 - IXGBE_PSRTYPE_UDPHDR | 5290 - IXGBE_PSRTYPE_IPV4HDR | 5291 - IXGBE_PSRTYPE_L2HDR | 5292 - IXGBE_PSRTYPE_IPV6HDR; 5293 - 5294 - if (hw->mac.type == ixgbe_mac_82598EB) 5295 - return; 5296 - 5297 - if (rss_i > 3) 5298 - psrtype |= 2u << 29; 5299 - else if (rss_i > 1) 5300 - psrtype |= 1u << 29; 5301 - 5302 - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 5303 - } 5304 - 5305 5282 /** 5306 5283 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 5307 5284 * @rx_ring: ring to free buffers from ··· 5342 5365 usleep_range(10000, 20000); 5343 5366 ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); 5344 5367 ixgbe_clean_rx_ring(rx_ring); 5345 - rx_ring->l2_accel_priv = NULL; 5346 5368 } 5347 5369 5348 5370 static int ixgbe_fwd_ring_down(struct net_device *vdev, ··· 5359 5383 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; 5360 5384 } 5361 5385 5362 - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 5363 - adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; 5386 + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 5364 5387 adapter->tx_ring[txbase + i]->netdev = adapter->netdev; 5365 - } 5366 5388 5367 5389 5368 5390 return 0; ··· 5373 5399 unsigned int rxbase, txbase, queues; 5374 5400 int i, baseq, err = 0; 5375 5401 5376 - if (!test_bit(accel->pool, &adapter->fwd_bitmask)) 5402 + if (!test_bit(accel->pool, adapter->fwd_bitmask)) 5377 5403 return 0; 5378 5404 5379 5405 baseq = accel->pool * adapter->num_rx_queues_per_pool; 5380 - netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 5406 + netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", 5381 5407 accel->pool, adapter->num_rx_pools, 5382 - baseq, baseq + adapter->num_rx_queues_per_pool, 5383 - adapter->fwd_bitmask); 5408 + baseq, baseq + adapter->num_rx_queues_per_pool); 5384 5409 5385 5410 accel->netdev = vdev; 5386 5411 accel->rx_base_queue = rxbase = baseq; ··· 5390 5417 5391 5418 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 5392 5419 adapter->rx_ring[rxbase + i]->netdev = vdev; 5393 - adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; 5394 5420 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); 5395 5421 } 5396 5422 5397 - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 5423 + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 5398 5424 adapter->tx_ring[txbase + i]->netdev = vdev; 5399 - adapter->tx_ring[txbase + i]->l2_accel_priv = accel; 5400 - } 5401 5425 5402 5426 queues = min_t(unsigned int, 5403 5427 adapter->num_rx_queues_per_pool, vdev->num_tx_queues); ··· 5407 5437 goto fwd_queue_err; 5408 5438 5409 5439 if (is_valid_ether_addr(vdev->dev_addr)) 5410 - ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); 5440 + ixgbe_add_mac_filter(adapter, vdev->dev_addr, 5441 + VMDQ_P(accel->pool)); 5411 5442 5412 - ixgbe_fwd_psrtype(accel); 5413 - ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); 5443 + ixgbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->pool), adapter); 5414 5444 return err; 5415 5445 fwd_queue_err: 5416 5446 ixgbe_fwd_ring_down(vdev, accel); ··· 6274 6304 } 6275 6305 6276 6306 /* PF holds first pool slot */ 6277 - set_bit(0, &adapter->fwd_bitmask); 6307 + set_bit(0, adapter->fwd_bitmask); 6278 6308 set_bit(__IXGBE_DOWN, &adapter->state); 6279 6309 6280 6310 return 0; ··· 6761 6791 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 6762 6792 struct net_device *netdev = adapter->netdev; 6763 6793 struct ixgbe_hw *hw = &adapter->hw; 6764 - u32 ctrl, fctrl; 6794 + u32 ctrl; 6765 6795 u32 wufc = adapter->wol; 6766 6796 #ifdef CONFIG_PM 6767 6797 int retval = 0; ··· 6786 6816 hw->mac.ops.stop_link_on_d3(hw); 6787 6817 6788 6818 if (wufc) { 6819 + u32 fctrl; 6820 + 6789 6821 ixgbe_set_rx_mode(netdev); 6790 6822 6791 6823 /* enable the optics for 82599 SFP+ fiber as we can WoL */ 6792 6824 if (hw->mac.ops.enable_tx_laser) 6793 6825 hw->mac.ops.enable_tx_laser(hw); 6794 6826 6795 - /* turn on all-multi mode if wake on multicast is enabled */ 6796 - if (wufc & IXGBE_WUFC_MC) { 6797 - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6798 - fctrl |= IXGBE_FCTRL_MPE; 6799 - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 6800 - } 6827 + /* enable the reception of multicast packets */ 6828 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6829 + fctrl |= IXGBE_FCTRL_MPE; 6830 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 6801 6831 6802 6832 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 6803 6833 ctrl |= IXGBE_CTRL_GIO_DIS; ··· 7633 7663 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) 7634 7664 { 7635 7665 struct ixgbe_hw *hw = &adapter->hw; 7666 + u32 cap_speed; 7636 7667 u32 speed; 7637 7668 bool autoneg = false; 7638 7669 ··· 7646 7675 7647 7676 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 7648 7677 7649 - speed = hw->phy.autoneg_advertised; 7650 - if ((!speed) && (hw->mac.ops.get_link_capabilities)) { 7651 - hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); 7678 + hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); 7652 7679 7653 - /* setup the highest link when no autoneg */ 7654 - if (!autoneg) { 7655 - if (speed & IXGBE_LINK_SPEED_10GB_FULL) 7656 - speed = IXGBE_LINK_SPEED_10GB_FULL; 7657 - } 7658 - } 7680 + /* advertise highest capable link speed */ 7681 + if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL)) 7682 + speed = IXGBE_LINK_SPEED_10GB_FULL; 7683 + else 7684 + speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL | 7685 + IXGBE_LINK_SPEED_1GB_FULL); 7659 7686 7660 7687 if (hw->mac.ops.setup_link) 7661 7688 hw->mac.ops.setup_link(hw, speed, true); ··· 8846 8877 { 8847 8878 struct ixgbe_adapter *adapter = netdev_priv(dev); 8848 8879 struct ixgbe_hw *hw = &adapter->hw; 8849 - bool pools; 8850 8880 8851 8881 /* Hardware supports up to 8 traffic classes */ 8852 8882 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) ··· 8853 8885 8854 8886 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) 8855 8887 return -EINVAL; 8856 - 8857 - pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 8858 - if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) 8859 - return -EBUSY; 8860 8888 8861 8889 /* Hardware has to reinitialize queues and interrupts to 8862 8890 * match packet buffer alignment. Unfortunately, the ··· 9016 9052 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, 9017 9053 u8 *queue, u64 *action) 9018 9054 { 9055 + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 9019 9056 unsigned int num_vfs = adapter->num_vfs, vf; 9020 9057 struct upper_walk_data data; 9021 9058 struct net_device *upper; ··· 9025 9060 for (vf = 0; vf < num_vfs; ++vf) { 9026 9061 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); 9027 9062 if (upper->ifindex == ifindex) { 9028 - if (adapter->num_rx_pools > 1) 9029 - *queue = vf * 2; 9030 - else 9031 - *queue = vf * adapter->num_rx_queues_per_pool; 9032 - 9063 + *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); 9033 9064 *action = vf + 1; 9034 9065 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 9035 9066 return 0; ··· 9792 9831 struct ixgbe_fwd_adapter *fwd_adapter = NULL; 9793 9832 struct ixgbe_adapter *adapter = netdev_priv(pdev); 9794 9833 int used_pools = adapter->num_vfs + adapter->num_rx_pools; 9834 + int tcs = netdev_get_num_tc(pdev) ? : 1; 9795 9835 unsigned int limit; 9796 9836 int pool, err; 9797 9837 ··· 9820 9858 } 9821 9859 9822 9860 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 9823 - adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || 9861 + adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || 9824 9862 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) 9825 9863 return ERR_PTR(-EBUSY); 9826 9864 ··· 9828 9866 if (!fwd_adapter) 9829 9867 return ERR_PTR(-ENOMEM); 9830 9868 9831 - pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); 9832 - adapter->num_rx_pools++; 9833 - set_bit(pool, &adapter->fwd_bitmask); 9834 - limit = find_last_bit(&adapter->fwd_bitmask, 32); 9869 + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); 9870 + set_bit(pool, adapter->fwd_bitmask); 9871 + limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1); 9835 9872 9836 9873 /* Enable VMDq flag so device will be set in VM mode */ 9837 9874 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; ··· 9856 9895 /* unwind counter and free adapter struct */ 9857 9896 netdev_info(pdev, 9858 9897 "%s: dfwd hardware acceleration failed\n", vdev->name); 9859 - clear_bit(pool, &adapter->fwd_bitmask); 9860 - adapter->num_rx_pools--; 9898 + clear_bit(pool, adapter->fwd_bitmask); 9861 9899 kfree(fwd_adapter); 9862 9900 return ERR_PTR(err); 9863 9901 } ··· 9867 9907 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; 9868 9908 unsigned int limit; 9869 9909 9870 - clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); 9871 - adapter->num_rx_pools--; 9910 + clear_bit(fwd_adapter->pool, adapter->fwd_bitmask); 9872 9911 9873 - limit = find_last_bit(&adapter->fwd_bitmask, 32); 9912 + limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools); 9874 9913 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; 9875 9914 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); 9876 9915 ··· 9884 9925 } 9885 9926 9886 9927 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); 9887 - netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 9928 + netdev_dbg(pdev, "pool %i:%i queues %i:%i\n", 9888 9929 fwd_adapter->pool, adapter->num_rx_pools, 9889 9930 fwd_adapter->rx_base_queue, 9890 - fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, 9891 - adapter->fwd_bitmask); 9931 + fwd_adapter->rx_base_queue + 9932 + adapter->num_rx_queues_per_pool); 9892 9933 kfree(fwd_adapter); 9893 9934 } 9894 9935 ··· 10199 10240 } 10200 10241 10201 10242 return false; 10243 + } 10244 + 10245 + /** 10246 + * ixgbe_set_fw_version - Set FW version 10247 + * @adapter: the adapter private structure 10248 + * 10249 + * This function is used by probe and ethtool to determine the FW version to 10250 + * format to display. The FW version is taken from the EEPROM/NVM. 10251 + */ 10252 + static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) 10253 + { 10254 + struct ixgbe_hw *hw = &adapter->hw; 10255 + struct ixgbe_nvm_version nvm_ver; 10256 + 10257 + ixgbe_get_oem_prod_version(hw, &nvm_ver); 10258 + if (nvm_ver.oem_valid) { 10259 + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), 10260 + "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor, 10261 + nvm_ver.oem_release); 10262 + return; 10263 + } 10264 + 10265 + ixgbe_get_etk_id(hw, &nvm_ver); 10266 + ixgbe_get_orom_version(hw, &nvm_ver); 10267 + 10268 + if (nvm_ver.or_valid) { 10269 + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), 10270 + "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major, 10271 + nvm_ver.or_build, nvm_ver.or_patch); 10272 + return; 10273 + } 10274 + 10275 + /* Set ETrack ID format */ 10276 + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), 10277 + "0x%08x", nvm_ver.etk_id); 10202 10278 } 10203 10279 10204 10280 /** ··· 10572 10578 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 10573 10579 10574 10580 /* save off EEPROM version number */ 10575 - hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); 10576 - hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); 10581 + ixgbe_set_fw_version(adapter); 10577 10582 10578 10583 /* pick up the PCI bus settings for reporting later */ 10579 10584 if (ixgbe_pcie_from_parent(hw))
+12 -34
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
··· 227 227 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 228 228 { 229 229 unsigned int num_vfs = adapter->num_vfs, vf; 230 - struct ixgbe_hw *hw = &adapter->hw; 231 - u32 gpie; 232 - u32 vmdctl; 233 230 int rss; 234 231 235 232 /* set num VFs to 0 to prevent access to vfinfo */ ··· 268 271 pci_disable_sriov(adapter->pdev); 269 272 #endif 270 273 271 - /* turn off device IOV mode */ 272 - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); 273 - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 274 - gpie &= ~IXGBE_GPIE_VTMODE_MASK; 275 - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 276 - 277 - /* set default pool back to 0 */ 278 - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 279 - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; 280 - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 281 - IXGBE_WRITE_FLUSH(hw); 282 - 283 274 /* Disable VMDq flag so device will be set in VM mode */ 284 275 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { 285 276 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; ··· 290 305 { 291 306 #ifdef CONFIG_PCI_IOV 292 307 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 293 - int err = 0; 294 - u8 num_tc; 295 - int i; 296 308 int pre_existing_vfs = pci_num_vf(dev); 309 + int err = 0, num_rx_pools, i, limit; 310 + u8 num_tc; 297 311 298 312 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 299 313 err = ixgbe_disable_sriov(adapter); ··· 315 331 * other values out of range. 316 332 */ 317 333 num_tc = netdev_get_num_tc(adapter->netdev); 334 + num_rx_pools = adapter->num_rx_pools; 335 + limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : 336 + (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; 318 337 319 - if (num_tc > 4) { 320 - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) { 321 - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); 322 - return -EPERM; 323 - } 324 - } else if ((num_tc > 1) && (num_tc <= 4)) { 325 - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) { 326 - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); 327 - return -EPERM; 328 - } 329 - } else { 330 - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) { 331 - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); 332 - return -EPERM; 333 - } 338 + if (num_vfs > (limit - num_rx_pools)) { 339 + e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", 340 + num_tc, num_rx_pools - 1, limit - num_rx_pools); 341 + return -EPERM; 334 342 } 335 343 336 344 err = __ixgbe_enable_sriov(adapter, num_vfs); ··· 354 378 int err; 355 379 #ifdef CONFIG_PCI_IOV 356 380 u32 current_flags = adapter->flags; 381 + int prev_num_vf = pci_num_vf(dev); 357 382 #endif 358 383 359 384 err = ixgbe_disable_sriov(adapter); 360 385 361 386 /* Only reinit if no error and state changed */ 362 387 #ifdef CONFIG_PCI_IOV 363 - if (!err && current_flags != adapter->flags) 388 + if (!err && (current_flags != adapter->flags || 389 + prev_num_vf != pci_num_vf(dev))) 364 390 ixgbe_sriov_reinit(adapter); 365 391 #endif 366 392
+39
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
··· 235 235 struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; 236 236 }; 237 237 238 + #define NVM_OROM_OFFSET 0x17 239 + #define NVM_OROM_BLK_LOW 0x83 240 + #define NVM_OROM_BLK_HI 0x84 241 + #define NVM_OROM_PATCH_MASK 0xFF 242 + #define NVM_OROM_SHIFT 8 243 + 244 + #define NVM_VER_MASK 0x00FF /* version mask */ 245 + #define NVM_VER_SHIFT 8 /* version bit shift */ 246 + #define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ 247 + #define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ 248 + #define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ 249 + #define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ 250 + #define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ 251 + #define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ 252 + #define NVM_ETK_OFF_LOW 0x2D /* version low order word */ 253 + #define NVM_ETK_OFF_HI 0x2E /* version high order word */ 254 + #define NVM_ETK_SHIFT 16 /* high version word shift */ 255 + #define NVM_VER_INVALID 0xFFFF 256 + #define NVM_ETK_VALID 0x8000 257 + #define NVM_INVALID_PTR 0xFFFF 258 + #define NVM_VER_SIZE 32 /* version sting size */ 259 + 260 + struct ixgbe_nvm_version { 261 + u32 etk_id; 262 + u8 nvm_major; 263 + u16 nvm_minor; 264 + u8 nvm_id; 265 + 266 + bool oem_valid; 267 + u8 oem_major; 268 + u8 oem_minor; 269 + u16 oem_release; 270 + 271 + bool or_valid; 272 + u8 or_major; 273 + u16 or_build; 274 + u8 or_patch; 275 + }; 276 + 238 277 /* Interrupt Registers */ 239 278 #define IXGBE_EICR 0x00800 240 279 #define IXGBE_EICS 0x00808
-4
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 1896 1896 unsigned int flags = netdev->flags; 1897 1897 int xcast_mode; 1898 1898 1899 - xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : 1900 - (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? 1901 - IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; 1902 - 1903 1899 /* request the most inclusive mode we need */ 1904 1900 if (flags & IFF_PROMISC) 1905 1901 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;