Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2025-04-11 (ice, i40e, ixgbe, igc, e1000e)

For ice:
Mateusz and Larysa add support for LLDP packets to be received on a VF
and transmitted by a VF in switchdev mode. Additional information:
https://lore.kernel.org/intel-wired-lan/20250214085215.2846063-1-larysa.zaremba@intel.com/

Karol adds timesync support for E825C devices using 2xNAC (Network
Acceleration Complex) configuration. 2xNAC mode is the mode in which
IO die is housing two complexes and each of them has its own PHY
connected to it.

Martyna adds messaging to clarify filter errors when recipe space is
exhausted.

Colin Ian King adds static modifier to a const array to avoid stack
usage.

For i40e:
Kyungwook Boo changes variable declaration types to prevent possible
underflow.

For ixgbe:
Rand Deeb adjusts retry values so that retries are attempted.

For igc:
Rui Salvaterra sets VLAN offloads to be enabled as default.

For e1000e:
Piotr Wejman converts driver to use newer hardware timestamping API.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
net: e1000e: convert to ndo_hwtstamp_get() and ndo_hwtstamp_set()
igc: enable HW vlan tag insertion/stripping by default
ixgbe: Fix unreachable retry logic in combined and byte I2C write functions
i40e: fix MMIO write access to an invalid page in i40e_clear_hw
ice: make const read-only array dflt_rules static
ice: improve error message for insufficient filter space
ice: enable timesync operation on 2xNAC E825 devices
ice: refactor ice_sbq_msg_dev enum
ice: remove SW side band access workaround for E825
ice: enable LLDP TX for VFs through tc
ice: support egress drop rules on PF
ice: remove headers argument from ice_tc_count_lkups
ice: receive LLDP on trusted VFs
ice: do not add LLDP-specific filter if not necessary
ice: fix check for existing switch rule
====================

Link: https://patch.msgid.link/20250411204401.3271306-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+685 -192
+1 -1
drivers/net/ethernet/intel/e1000e/e1000.h
··· 319 319 u16 tx_ring_count; 320 320 u16 rx_ring_count; 321 321 322 - struct hwtstamp_config hwtstamp_config; 322 + struct kernel_hwtstamp_config hwtstamp_config; 323 323 struct delayed_work systim_overflow_work; 324 324 struct sk_buff *tx_hwtstamp_skb; 325 325 unsigned long tx_hwtstamp_start;
+37 -38
drivers/net/ethernet/intel/e1000e/netdev.c
··· 3574 3574 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable 3575 3575 * @adapter: board private structure 3576 3576 * @config: timestamp configuration 3577 + * @extack: netlink extended ACK for error report 3577 3578 * 3578 3579 * Outgoing time stamping can be enabled and disabled. Play nice and 3579 3580 * disable it when requested, although it shouldn't cause any overhead ··· 3588 3587 * exception of "all V2 events regardless of level 2 or 4". 3589 3588 **/ 3590 3589 static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, 3591 - struct hwtstamp_config *config) 3590 + struct kernel_hwtstamp_config *config, 3591 + struct netlink_ext_ack *extack) 3592 3592 { 3593 3593 struct e1000_hw *hw = &adapter->hw; 3594 3594 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; ··· 3600 3598 bool is_l2 = false; 3601 3599 u32 regval; 3602 3600 3603 - if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) 3601 + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { 3602 + NL_SET_ERR_MSG(extack, "No HW timestamp support"); 3604 3603 return -EINVAL; 3604 + } 3605 3605 3606 3606 switch (config->tx_type) { 3607 3607 case HWTSTAMP_TX_OFF: ··· 3612 3608 case HWTSTAMP_TX_ON: 3613 3609 break; 3614 3610 default: 3611 + NL_SET_ERR_MSG(extack, "Unsupported TX HW timestamp type"); 3615 3612 return -ERANGE; 3616 3613 } 3617 3614 ··· 3686 3681 config->rx_filter = HWTSTAMP_FILTER_ALL; 3687 3682 break; 3688 3683 default: 3684 + NL_SET_ERR_MSG(extack, "Unsupported RX HW timestamp filter"); 3689 3685 return -ERANGE; 3690 3686 } 3691 3687 ··· 3699 3693 ew32(TSYNCTXCTL, regval); 3700 3694 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != 3701 3695 (regval & E1000_TSYNCTXCTL_ENABLED)) { 3702 - e_err("Timesync Tx Control register not set as expected\n"); 3696 + NL_SET_ERR_MSG(extack, 3697 + "Timesync Tx Control register not set as expected"); 3703 3698 return -EAGAIN; 3704 3699 } 3705 3700 ··· 3713 3706 E1000_TSYNCRXCTL_TYPE_MASK)) != 3714 3707 (regval & (E1000_TSYNCRXCTL_ENABLED | 3715 3708 E1000_TSYNCRXCTL_TYPE_MASK))) { 3716 - e_err("Timesync Rx Control register not set as expected\n"); 3709 + NL_SET_ERR_MSG(extack, 3710 + "Timesync Rx Control register not set as expected"); 3717 3711 return -EAGAIN; 3718 3712 } 3719 3713 ··· 3909 3901 { 3910 3902 struct ptp_clock_info *info = &adapter->ptp_clock_info; 3911 3903 struct e1000_hw *hw = &adapter->hw; 3904 + struct netlink_ext_ack extack = {}; 3912 3905 unsigned long flags; 3913 3906 u32 timinca; 3914 3907 s32 ret_val; ··· 3941 3932 spin_unlock_irqrestore(&adapter->systim_lock, flags); 3942 3933 3943 3934 /* restore the previous hwtstamp configuration settings */ 3944 - e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); 3935 + ret_val = e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config, 3936 + &extack); 3937 + if (ret_val) { 3938 + if (extack._msg) 3939 + e_err("%s\n", extack._msg); 3940 + } 3945 3941 } 3946 3942 3947 3943 /** ··· 6093 6079 return 0; 6094 6080 } 6095 6081 6096 - static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 6097 - int cmd) 6082 + static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6098 6083 { 6099 6084 struct e1000_adapter *adapter = netdev_priv(netdev); 6100 6085 struct mii_ioctl_data *data = if_mii(ifr); ··· 6153 6140 /** 6154 6141 * e1000e_hwtstamp_set - control hardware time stamping 6155 6142 * @netdev: network interface device structure 6156 - * @ifr: interface request 6143 + * @config: timestamp configuration 6144 + * @extack: netlink extended ACK report 6157 6145 * 6158 6146 * Outgoing time stamping can be enabled and disabled. Play nice and 6159 6147 * disable it when requested, although it shouldn't cause any overhead ··· 6167 6153 * specified. Matching the kind of event packet is not supported, with the 6168 6154 * exception of "all V2 events regardless of level 2 or 4". 6169 6155 **/ 6170 - static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 6156 + static int e1000e_hwtstamp_set(struct net_device *netdev, 6157 + struct kernel_hwtstamp_config *config, 6158 + struct netlink_ext_ack *extack) 6171 6159 { 6172 6160 struct e1000_adapter *adapter = netdev_priv(netdev); 6173 - struct hwtstamp_config config; 6174 6161 int ret_val; 6175 6162 6176 - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 6177 - return -EFAULT; 6178 - 6179 - ret_val = e1000e_config_hwtstamp(adapter, &config); 6163 + ret_val = e1000e_config_hwtstamp(adapter, config, extack); 6180 6164 if (ret_val) 6181 6165 return ret_val; 6182 6166 6183 - switch (config.rx_filter) { 6167 + switch (config->rx_filter) { 6184 6168 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 6185 6169 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 6186 6170 case HWTSTAMP_FILTER_PTP_V2_SYNC: ··· 6190 6178 * by hardware so notify the caller the requested packets plus 6191 6179 * some others are time stamped. 6192 6180 */ 6193 - config.rx_filter = HWTSTAMP_FILTER_SOME; 6181 + config->rx_filter = HWTSTAMP_FILTER_SOME; 6194 6182 break; 6195 6183 default: 6196 6184 break; 6197 6185 } 6198 6186 6199 - return copy_to_user(ifr->ifr_data, &config, 6200 - sizeof(config)) ? -EFAULT : 0; 6187 + return 0; 6201 6188 } 6202 6189 6203 - static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 6190 + static int e1000e_hwtstamp_get(struct net_device *netdev, 6191 + struct kernel_hwtstamp_config *kernel_config) 6204 6192 { 6205 6193 struct e1000_adapter *adapter = netdev_priv(netdev); 6206 6194 6207 - return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, 6208 - sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; 6209 - } 6195 + *kernel_config = adapter->hwtstamp_config; 6210 6196 6211 - static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6212 - { 6213 - switch (cmd) { 6214 - case SIOCGMIIPHY: 6215 - case SIOCGMIIREG: 6216 - case SIOCSMIIREG: 6217 - return e1000_mii_ioctl(netdev, ifr, cmd); 6218 - case SIOCSHWTSTAMP: 6219 - return e1000e_hwtstamp_set(netdev, ifr); 6220 - case SIOCGHWTSTAMP: 6221 - return e1000e_hwtstamp_get(netdev, ifr); 6222 - default: 6223 - return -EOPNOTSUPP; 6224 - } 6197 + return 0; 6225 6198 } 6226 6199 6227 6200 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) ··· 7343 7346 #ifdef CONFIG_NET_POLL_CONTROLLER 7344 7347 .ndo_poll_controller = e1000_netpoll, 7345 7348 #endif 7346 - .ndo_set_features = e1000_set_features, 7347 - .ndo_fix_features = e1000_fix_features, 7349 + .ndo_set_features = e1000_set_features, 7350 + .ndo_fix_features = e1000_fix_features, 7348 7351 .ndo_features_check = passthru_features_check, 7352 + .ndo_hwtstamp_get = e1000e_hwtstamp_get, 7353 + .ndo_hwtstamp_set = e1000e_hwtstamp_set, 7349 7354 }; 7350 7355 7351 7356 /**
+4 -3
drivers/net/ethernet/intel/i40e/i40e_common.c
··· 817 817 void i40e_clear_hw(struct i40e_hw *hw) 818 818 { 819 819 u32 num_queues, base_queue; 820 - u32 num_pf_int; 821 - u32 num_vf_int; 820 + s32 num_pf_int; 821 + s32 num_vf_int; 822 822 u32 num_vfs; 823 - u32 i, j; 823 + s32 i; 824 + u32 j; 824 825 u32 val; 825 826 u32 eol = 0x7ff; 826 827
+59 -2
drivers/net/ethernet/intel/ice/ice.h
··· 193 193 194 194 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) 195 195 196 - #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) 197 - 198 196 enum ice_feature { 199 197 ICE_F_DSCP, 200 198 ICE_F_PHY_RCLK, ··· 513 515 ICE_FLAG_MTU_CHANGED, 514 516 ICE_FLAG_GNSS, /* GNSS successfully initialized */ 515 517 ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ 518 + ICE_FLAG_LLDP_AQ_FLTR, 516 519 ICE_PF_FLAGS_NBITS /* must be last */ 517 520 }; 518 521 ··· 1044 1045 } 1045 1046 1046 1047 extern const struct xdp_metadata_ops ice_xdp_md_ops; 1048 + 1049 + /** 1050 + * ice_is_dual - Check if given config is multi-NAC 1051 + * @hw: pointer to HW structure 1052 + * 1053 + * Return: true if the device is running in mutli-NAC (Network 1054 + * Acceleration Complex) configuration variant, false otherwise 1055 + * (always false for non-E825 devices). 1056 + */ 1057 + static inline bool ice_is_dual(struct ice_hw *hw) 1058 + { 1059 + return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && 1060 + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); 1061 + } 1062 + 1063 + /** 1064 + * ice_is_primary - Check if given device belongs to the primary complex 1065 + * @hw: pointer to HW structure 1066 + * 1067 + * Check if given PF/HW is running on primary complex in multi-NAC 1068 + * configuration. 1069 + * 1070 + * Return: true if the device is dual, false otherwise (always true 1071 + * for non-E825 devices). 1072 + */ 1073 + static inline bool ice_is_primary(struct ice_hw *hw) 1074 + { 1075 + return hw->mac_type != ICE_MAC_GENERIC_3K_E825 || 1076 + !ice_is_dual(hw) || 1077 + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M); 1078 + } 1079 + 1080 + /** 1081 + * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF 1082 + * @pf: pointer to PF structure 1083 + * 1084 + * Return: true if PF owns primary timer, false otherwise. 1085 + */ 1086 + static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf) 1087 + { 1088 + return pf->hw.func_caps.ts_func_info.src_tmr_owned && 1089 + ice_is_primary(&pf->hw); 1090 + } 1091 + 1092 + /** 1093 + * ice_get_primary_hw - Get pointer to primary ice_hw structure 1094 + * @pf: pointer to PF structure 1095 + * 1096 + * Return: A pointer to ice_hw structure with access to timesync 1097 + * register space. 1098 + */ 1099 + static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf) 1100 + { 1101 + if (!pf->adapter->ctrl_pf) 1102 + return &pf->hw; 1103 + else 1104 + return &pf->adapter->ctrl_pf->hw; 1105 + } 1047 1106 #endif /* _ICE_H_ */
+16 -6
drivers/net/ethernet/intel/ice/ice_common.c
··· 1135 1135 } 1136 1136 } 1137 1137 1138 + hw->lane_num = ice_get_phy_lane_number(hw); 1139 + 1138 1140 return 0; 1139 1141 err_unroll_fltr_mgmt_struct: 1140 1142 ice_cleanup_fltr_mgmt_struct(hw); ··· 3436 3434 msg.msg_addr_low = lower_16_bits(reg_offset); 3437 3435 msg.msg_addr_high = receiver_id; 3438 3436 msg.opcode = ice_sbq_msg_rd; 3439 - msg.dest_dev = rmn_0; 3437 + msg.dest_dev = ice_sbq_dev_phy_0; 3440 3438 3441 3439 err = ice_sbq_rw_reg(hw, &msg, flag); 3442 3440 if (err) ··· 4084 4082 continue; 4085 4083 4086 4084 if (hw->pf_id == lport) { 4085 + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && 4086 + ice_is_dual(hw) && !ice_is_primary(hw)) 4087 + lane += ICE_PORTS_PER_QUAD; 4087 4088 kfree(options); 4088 4089 return lane; 4089 4090 } 4090 - 4091 4091 lport++; 4092 4092 } 4093 4093 ··· 6015 6011 /** 6016 6012 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6017 6013 * @hw: pointer to HW struct 6018 - * @vsi_num: absolute HW index for VSI 6014 + * @vsi: VSI to add the filter to 6019 6015 * @add: boolean for if adding or removing a filter 6016 + * 6017 + * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed 6018 + * with this HW or VSI, otherwise an error corresponding to 6019 + * the AQ transaction result. 6020 6020 */ 6021 - int 6022 - ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6021 + int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add) 6023 6022 { 6024 6023 struct ice_aqc_lldp_filter_ctrl *cmd; 6025 6024 struct ice_aq_desc desc; 6025 + 6026 + if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw)) 6027 + return -EOPNOTSUPP; 6026 6028 6027 6029 cmd = &desc.params.lldp_filter_ctrl; 6028 6030 ··· 6039 6029 else 6040 6030 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6041 6031 6042 - cmd->vsi_num = cpu_to_le16(vsi_num); 6032 + cmd->vsi_num = cpu_to_le16(vsi->vsi_num); 6043 6033 6044 6034 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6045 6035 }
+1 -2
drivers/net/ethernet/intel/ice/ice_common.h
··· 290 290 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 291 291 struct ice_sq_cd *cd); 292 292 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); 293 - int 294 - ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); 293 + int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add); 295 294 int ice_lldp_execute_pending_mib(struct ice_hw *hw); 296 295 int 297 296 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+1 -1
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
··· 846 846 goto dcb_init_err; 847 847 } 848 848 849 - ice_cfg_sw_lldp(pf_vsi, false, true); 849 + ice_cfg_sw_rx_lldp(pf, true); 850 850 851 851 pf->dcbx_cap = ice_dcb_get_mode(port_info, true); 852 852 return 0;
+6
drivers/net/ethernet/intel/ice/ice_eswitch.c
··· 29 29 return -ENODEV; 30 30 31 31 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); 32 + ice_vsi_cfg_sw_lldp(uplink_vsi, true, false); 32 33 33 34 netif_addr_lock_bh(netdev); 34 35 __dev_uc_unsync(netdev, NULL); ··· 246 245 u64 cd_cmd, dst_vsi; 247 246 248 247 if (!dst) { 248 + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 249 + 250 + if (unlikely(eth->h_proto == htons(ETH_P_LLDP))) 251 + return; 249 252 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; 250 253 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); 251 254 } else { ··· 283 278 ice_fltr_add_mac_and_broadcast(uplink_vsi, 284 279 uplink_vsi->port_info->mac.perm_addr, 285 280 ICE_FWD_TO_VSI); 281 + ice_vsi_cfg_sw_lldp(uplink_vsi, true, true); 286 282 } 287 283 288 284 /**
+1 -1
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 1818 1818 /* Remove rule to direct LLDP packets to default VSI. 1819 1819 * The FW LLDP engine will now be consuming them. 1820 1820 */ 1821 - ice_cfg_sw_lldp(vsi, false, false); 1821 + ice_cfg_sw_rx_lldp(vsi->back, false); 1822 1822 1823 1823 /* AQ command to start FW LLDP agent will return an 1824 1824 * error if the agent is already started
+1 -1
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
··· 1605 1605 */ 1606 1606 int ice_fdir_create_dflt_rules(struct ice_pf *pf) 1607 1607 { 1608 - const enum ice_fltr_ptype dflt_rules[] = { 1608 + static const enum ice_fltr_ptype dflt_rules[] = { 1609 1609 ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1610 1610 ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP, 1611 1611 };
+58 -13
drivers/net/ethernet/intel/ice/ice_lib.c
··· 2065 2065 } 2066 2066 2067 2067 /** 2068 - * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling 2068 + * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling 2069 2069 * @vsi: the VSI being configured 2070 2070 * @tx: bool to determine Tx or Rx rule 2071 2071 * @create: bool to determine create or remove Rule 2072 + * 2073 + * Adding an ethtype Tx rule to the uplink VSI results in it being applied 2074 + * to the whole port, so LLDP transmission for VFs will be blocked too. 2072 2075 */ 2073 - void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2076 + void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2074 2077 { 2075 2078 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, 2076 2079 enum ice_sw_fwd_act_type act); ··· 2088 2085 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, 2089 2086 ICE_DROP_PACKET); 2090 2087 } else { 2091 - if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { 2092 - status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, 2093 - create); 2094 - } else { 2088 + if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) { 2095 2089 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, 2096 2090 ICE_FWD_TO_VSI); 2091 + if (!status || !create) 2092 + goto report; 2093 + 2094 + dev_info(dev, 2095 + "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n", 2096 + vsi->vsi_num, status); 2097 2097 } 2098 + 2099 + status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create); 2100 + if (!status) 2101 + set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags); 2102 + 2098 2103 } 2099 2104 2105 + report: 2100 2106 if (status) 2101 - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", 2102 - create ? "adding" : "removing", tx ? "TX" : "RX", 2103 - vsi->vsi_num, status); 2107 + dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n", 2108 + create ? "add" : "remove", tx ? "Tx" : "Rx", 2109 + vsi->vsi_num, status); 2110 + } 2111 + 2112 + /** 2113 + * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP 2114 + * @pf: the PF being configured 2115 + * @enable: enable or disable 2116 + * 2117 + * Configure switch rules to enable/disable LLDP handling by software 2118 + * across PF. 2119 + */ 2120 + void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable) 2121 + { 2122 + struct ice_vsi *vsi; 2123 + struct ice_vf *vf; 2124 + unsigned int bkt; 2125 + 2126 + vsi = ice_get_main_vsi(pf); 2127 + ice_vsi_cfg_sw_lldp(vsi, false, enable); 2128 + 2129 + if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) 2130 + return; 2131 + 2132 + ice_for_each_vf(pf, bkt, vf) { 2133 + vsi = ice_get_vf_vsi(vf); 2134 + 2135 + if (WARN_ON(!vsi)) 2136 + continue; 2137 + 2138 + if (ice_vf_is_lldp_ena(vf)) 2139 + ice_vsi_cfg_sw_lldp(vsi, false, enable); 2140 + } 2104 2141 } 2105 2142 2106 2143 /** ··· 2571 2528 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { 2572 2529 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, 2573 2530 ICE_DROP_PACKET); 2574 - ice_cfg_sw_lldp(vsi, true, true); 2531 + ice_vsi_cfg_sw_lldp(vsi, true, true); 2575 2532 } 2576 2533 2577 2534 if (!vsi->agg_node) ··· 2868 2825 /* The Rx rule will only exist to remove if the LLDP FW 2869 2826 * engine is currently stopped 2870 2827 */ 2871 - if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && 2872 - !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 2873 - ice_cfg_sw_lldp(vsi, false, false); 2828 + if (!ice_is_safe_mode(pf) && 2829 + !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) && 2830 + (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF && 2831 + ice_vf_is_lldp_ena(vsi->vf)))) 2832 + ice_vsi_cfg_sw_lldp(vsi, false, false); 2874 2833 2875 2834 ice_vsi_decfg(vsi); 2876 2835
+2 -1
drivers/net/ethernet/intel/ice/ice_lib.h
··· 29 29 30 30 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); 31 31 32 - void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); 32 + void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); 33 + void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable); 33 34 34 35 int ice_set_link(struct ice_vsi *vsi, bool ena); 35 36
+55 -8
drivers/net/ethernet/intel/ice/ice_main.c
··· 8330 8330 * @np: net device to configure 8331 8331 * @filter_dev: device on which filter is added 8332 8332 * @cls_flower: offload data 8333 + * @ingress: if the rule is added to an ingress block 8334 + * 8335 + * Return: 0 if the flower was successfully added or deleted, 8336 + * negative error code otherwise. 8333 8337 */ 8334 8338 static int 8335 8339 ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 8336 8340 struct net_device *filter_dev, 8337 - struct flow_cls_offload *cls_flower) 8341 + struct flow_cls_offload *cls_flower, 8342 + bool ingress) 8338 8343 { 8339 8344 struct ice_vsi *vsi = np->vsi; 8340 8345 ··· 8348 8343 8349 8344 switch (cls_flower->command) { 8350 8345 case FLOW_CLS_REPLACE: 8351 - return ice_add_cls_flower(filter_dev, vsi, cls_flower); 8346 + return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress); 8352 8347 case FLOW_CLS_DESTROY: 8353 8348 return ice_del_cls_flower(vsi, cls_flower); 8354 8349 default: ··· 8357 8352 } 8358 8353 8359 8354 /** 8360 - * ice_setup_tc_block_cb - callback handler registered for TC block 8355 + * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block 8361 8356 * @type: TC SETUP type 8362 8357 * @type_data: TC flower offload data that contains user input 8363 8358 * @cb_priv: netdev private data 8359 + * 8360 + * Return: 0 if the setup was successful, negative error code otherwise. 8364 8361 */ 8365 8362 static int 8366 - ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 8363 + ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data, 8364 + void *cb_priv) 8367 8365 { 8368 8366 struct ice_netdev_priv *np = cb_priv; 8369 8367 8370 8368 switch (type) { 8371 8369 case TC_SETUP_CLSFLOWER: 8372 8370 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 8373 - type_data); 8371 + type_data, true); 8372 + default: 8373 + return -EOPNOTSUPP; 8374 + } 8375 + } 8376 + 8377 + /** 8378 + * ice_setup_tc_block_cb_egress - callback handler for egress TC block 8379 + * @type: TC SETUP type 8380 + * @type_data: TC flower offload data that contains user input 8381 + * @cb_priv: netdev private data 8382 + * 8383 + * Return: 0 if the setup was successful, negative error code otherwise. 8384 + */ 8385 + static int 8386 + ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data, 8387 + void *cb_priv) 8388 + { 8389 + struct ice_netdev_priv *np = cb_priv; 8390 + 8391 + switch (type) { 8392 + case TC_SETUP_CLSFLOWER: 8393 + return ice_setup_tc_cls_flower(np, np->vsi->netdev, 8394 + type_data, false); 8374 8395 default: 8375 8396 return -EOPNOTSUPP; 8376 8397 } ··· 9341 9310 void *type_data) 9342 9311 { 9343 9312 struct ice_netdev_priv *np = netdev_priv(netdev); 9313 + enum flow_block_binder_type binder_type; 9344 9314 struct ice_pf *pf = np->vsi->back; 9315 + flow_setup_cb_t *flower_handler; 9345 9316 bool locked = false; 9346 9317 int err; 9347 9318 9348 9319 switch (type) { 9349 9320 case TC_SETUP_BLOCK: 9321 + binder_type = 9322 + ((struct flow_block_offload *)type_data)->binder_type; 9323 + 9324 + switch (binder_type) { 9325 + case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 9326 + flower_handler = ice_setup_tc_block_cb_ingress; 9327 + break; 9328 + case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 9329 + flower_handler = ice_setup_tc_block_cb_egress; 9330 + break; 9331 + default: 9332 + return -EOPNOTSUPP; 9333 + } 9334 + 9350 9335 return flow_block_cb_setup_simple(type_data, 9351 9336 &ice_block_cb_list, 9352 - ice_setup_tc_block_cb, 9353 - np, np, true); 9337 + flower_handler, 9338 + np, np, false); 9354 9339 case TC_SETUP_QDISC_MQPRIO: 9355 9340 if (ice_is_eswitch_mode_switchdev(pf)) { 9356 9341 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); ··· 9427 9380 case TC_SETUP_CLSFLOWER: 9428 9381 return ice_setup_tc_cls_flower(np, priv->netdev, 9429 9382 (struct flow_cls_offload *) 9430 - type_data); 9383 + type_data, false); 9431 9384 default: 9432 9385 return -EOPNOTSUPP; 9433 9386 }
+37 -12
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 305 305 u32 hi, lo, lo2; 306 306 u8 tmr_idx; 307 307 308 + if (!ice_is_primary(hw)) 309 + hw = ice_get_primary_hw(pf); 310 + 308 311 tmr_idx = ice_get_ptp_src_clock_index(hw); 309 312 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 310 313 /* Read the system timestamp pre PHC read */ ··· 2989 2986 } 2990 2987 2991 2988 /** 2989 + * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild 2990 + * @pf: Board private structure 2991 + * @rebuild: rebuild if true, prepare if false 2992 + * @reset_type: the reset type being performed 2993 + */ 2994 + static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild, 2995 + enum ice_reset_req reset_type) 2996 + { 2997 + struct list_head *entry; 2998 + 2999 + list_for_each(entry, &pf->adapter->ports.ports) { 3000 + struct ice_ptp_port *port = list_entry(entry, 3001 + struct ice_ptp_port, 3002 + list_node); 3003 + struct ice_pf *peer_pf = ptp_port_to_pf(port); 3004 + 3005 + if (!ice_is_primary(&peer_pf->hw)) { 3006 + if (rebuild) 3007 + ice_ptp_rebuild(peer_pf, reset_type); 3008 + else 3009 + ice_ptp_prepare_for_reset(peer_pf, reset_type); 3010 + } 3011 + } 3012 + } 3013 + 3014 + /** 2992 3015 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2993 3016 * @pf: Board private structure 2994 3017 * @reset_type: the reset type being performed ··· 3022 2993 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 3023 2994 { 3024 2995 struct ice_ptp *ptp = &pf->ptp; 2996 + struct ice_hw *hw = &pf->hw; 3025 2997 u8 src_tmr; 3026 2998 3027 2999 if (ptp->state != ICE_PTP_READY) ··· 3037 3007 3038 3008 if (reset_type == ICE_RESET_PFR) 3039 3009 return; 3010 + 3011 + if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825) 3012 + ice_ptp_prepare_rebuild_sec(pf, false, reset_type); 3040 3013 3041 3014 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 3042 3015 ··· 3160 3127 err: 3161 3128 ptp->state = ICE_PTP_ERROR; 3162 3129 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 3163 - } 3164 - 3165 - static bool ice_is_primary(struct ice_hw *hw) 3166 - { 3167 - return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ? 3168 - !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : 3169 - true; 3170 3130 } 3171 3131 3172 3132 static int ice_ptp_setup_adapter(struct ice_pf *pf) ··· 3381 3355 { 3382 3356 struct ice_ptp *ptp = &pf->ptp; 3383 3357 struct ice_hw *hw = &pf->hw; 3384 - int lane_num, err; 3358 + int err; 3385 3359 3386 3360 ptp->state = ICE_PTP_INITIALIZING; 3387 3361 3388 - lane_num = ice_get_phy_lane_number(hw); 3389 - if (lane_num < 0) { 3390 - err = lane_num; 3362 + if (hw->lane_num < 0) { 3363 + err = hw->lane_num; 3391 3364 goto err_exit; 3392 3365 } 3366 + ptp->port.port_num = hw->lane_num; 3393 3367 3394 - ptp->port.port_num = (u8)lane_num; 3395 3368 ice_ptp_init_hw(hw); 3396 3369 3397 3370 ice_ptp_init_tx_interrupt_mode(pf);
+43 -39
drivers/net/ethernet/intel/ice/ice_ptp_hw.c
··· 240 240 { 241 241 struct ice_sbq_msg_input cgu_msg = { 242 242 .opcode = ice_sbq_msg_rd, 243 - .dest_dev = cgu, 243 + .dest_dev = ice_sbq_dev_cgu, 244 244 .msg_addr_low = addr 245 245 }; 246 246 int err; ··· 272 272 { 273 273 struct ice_sbq_msg_input cgu_msg = { 274 274 .opcode = ice_sbq_msg_wr, 275 - .dest_dev = cgu, 275 + .dest_dev = ice_sbq_dev_cgu, 276 276 .msg_addr_low = addr, 277 277 .data = val 278 278 }; ··· 874 874 */ 875 875 void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 876 876 { 877 + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 877 878 u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd); 879 + 880 + if (!ice_is_primary(hw)) 881 + hw = ice_get_primary_hw(pf); 878 882 879 883 wr32(hw, GLTSYN_CMD, cmd_val); 880 884 } ··· 894 890 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) 895 891 { 896 892 struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 893 + 894 + if (!ice_is_primary(hw)) 895 + hw = ice_get_primary_hw(pf); 897 896 898 897 guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); 899 898 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); ··· 926 919 * 927 920 * Return: destination sideband queue PHY device. 928 921 */ 929 - static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw, 930 - u8 port) 922 + static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw, 923 + u8 port) 931 924 { 932 - /* On a single complex E825, PHY 0 is always destination device phy_0 925 + u8 curr_phy, tgt_phy; 926 + 927 + tgt_phy = port >= hw->ptp.ports_per_phy; 928 + curr_phy = hw->lane_num >= hw->ptp.ports_per_phy; 929 + /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY. 930 + * On a single complex E825C, PHY 0 is always destination device phy_0 933 931 * and PHY 1 is phy_0_peer. 932 + * On dual complex E825C, device phy_0 points to PHY on a current 933 + * complex and phy_0_peer to PHY on a different complex. 934 934 */ 935 - if (port >= hw->ptp.ports_per_phy) 936 - return eth56g_phy_1; 935 + if ((!ice_is_dual(hw) && tgt_phy == 1) || 936 + (ice_is_dual(hw) && tgt_phy != curr_phy)) 937 + return ice_sbq_dev_phy_0_peer; 937 938 else 938 - return eth56g_phy_0; 939 + return ice_sbq_dev_phy_0; 939 940 } 940 941 941 942 /** ··· 2432 2417 static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, 2433 2418 u64 *phy_time, u64 *phc_time) 2434 2419 { 2420 + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 2435 2421 u64 tx_time, rx_time; 2436 2422 u32 zo, lo; 2437 2423 u8 tmr_idx; ··· 2452 2436 ice_ptp_exec_tmr_cmd(hw); 2453 2437 2454 2438 /* Read the captured PHC time from the shadow time registers */ 2455 - zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); 2456 - lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); 2439 + if (ice_is_primary(hw)) { 2440 + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); 2441 + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); 2442 + } else { 2443 + zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx)); 2444 + lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx)); 2445 + } 2457 2446 *phc_time = (u64)lo << 32 | zo; 2458 2447 2459 2448 /* Read the captured PHY time from the PHY shadow registers */ ··· 2595 2574 */ 2596 2575 int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) 2597 2576 { 2577 + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 2598 2578 u32 lo, hi; 2599 2579 u64 incval; 2600 2580 u8 tmr_idx; ··· 2621 2599 if (err) 2622 2600 return err; 2623 2601 2624 - lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); 2625 - hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); 2602 + if (ice_is_primary(hw)) { 2603 + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); 2604 + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); 2605 + } else { 2606 + lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx)); 2607 + hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx)); 2608 + } 2626 2609 incval = (u64)hi << 32 | lo; 2627 2610 2628 2611 err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval); ··· 2658 2631 } 2659 2632 2660 2633 /** 2661 - * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access 2662 - * @hw: pointer to HW struct 2663 - * @enable: Enable or disable access 2664 - * 2665 - * Enable sideband devices (PHY and others) access. 2666 - */ 2667 - static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) 2668 - { 2669 - u32 val = rd32(hw, PF_SB_REM_DEV_CTL); 2670 - 2671 - if (enable) 2672 - val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1); 2673 - else 2674 - val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1)); 2675 - 2676 - wr32(hw, PF_SB_REM_DEV_CTL, val); 2677 - } 2678 - 2679 - /** 2680 2634 * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization 2681 2635 * @hw: pointer to HW struct 2682 2636 * ··· 2667 2659 */ 2668 2660 static int ice_ptp_init_phc_e825(struct ice_hw *hw) 2669 2661 { 2670 - ice_sb_access_ena_eth56g(hw, true); 2671 - 2672 2662 /* Initialize the Clock Generation Unit */ 2673 2663 return ice_init_cgu_e82x(hw); 2674 2664 } ··· 2753 2747 params->num_phys = 2; 2754 2748 ptp->ports_per_phy = 4; 2755 2749 ptp->num_lports = params->num_phys * ptp->ports_per_phy; 2756 - 2757 - ice_sb_access_ena_eth56g(hw, true); 2758 2750 } 2759 2751 2760 2752 /* E822 family functions ··· 2785 2781 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); 2786 2782 } 2787 2783 2788 - msg->dest_dev = rmn_0; 2784 + msg->dest_dev = ice_sbq_dev_phy_0; 2789 2785 } 2790 2786 2791 2787 /** ··· 3108 3104 if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports)) 3109 3105 return -EINVAL; 3110 3106 3111 - msg->dest_dev = rmn_0; 3107 + msg->dest_dev = ice_sbq_dev_phy_0; 3112 3108 3113 3109 if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy))) 3114 3110 addr = Q_0_BASE + offset; ··· 4827 4823 msg.msg_addr_low = lower_16_bits(addr); 4828 4824 msg.msg_addr_high = upper_16_bits(addr); 4829 4825 msg.opcode = ice_sbq_msg_rd; 4830 - msg.dest_dev = rmn_0; 4826 + msg.dest_dev = ice_sbq_dev_phy_0; 4831 4827 4832 4828 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); 4833 4829 if (err) { ··· 4857 4853 msg.msg_addr_low = lower_16_bits(addr); 4858 4854 msg.msg_addr_high = upper_16_bits(addr); 4859 4855 msg.opcode = ice_sbq_msg_wr; 4860 - msg.dest_dev = rmn_0; 4856 + msg.dest_dev = ice_sbq_dev_phy_0; 4861 4857 msg.data = val; 4862 4858 4863 4859 err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
-5
drivers/net/ethernet/intel/ice/ice_ptp_hw.h
··· 444 444 } 445 445 } 446 446 447 - static inline bool ice_is_dual(struct ice_hw *hw) 448 - { 449 - return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); 450 - } 451 - 452 447 #define PFTSYN_SEM_BYTES 4 453 448 454 449 #define ICE_PTP_CLOCK_INDEX_0 0x00
+9 -1
drivers/net/ethernet/intel/ice/ice_repr.c
··· 219 219 { 220 220 switch (flower->command) { 221 221 case FLOW_CLS_REPLACE: 222 - return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); 222 + return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower, 223 + true); 223 224 case FLOW_CLS_DESTROY: 224 225 return ice_del_cls_flower(repr->src_vsi, flower); 225 226 default: ··· 337 336 static void ice_repr_rem_vf(struct ice_repr *repr) 338 337 { 339 338 ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); 339 + ice_pass_vf_tx_lldp(repr->src_vsi, true); 340 340 unregister_netdev(repr->netdev); 341 341 ice_devlink_destroy_vf_port(repr->vf); 342 342 ice_virtchnl_set_dflt_ops(repr->vf); ··· 419 417 if (err) 420 418 goto err_netdev; 421 419 420 + err = ice_drop_vf_tx_lldp(repr->src_vsi, true); 421 + if (err) 422 + goto err_drop_lldp; 423 + 422 424 err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac); 423 425 if (err) 424 426 goto err_cfg_vsi; ··· 435 429 return 0; 436 430 437 431 err_cfg_vsi: 432 + ice_pass_vf_tx_lldp(repr->src_vsi, true); 433 + err_drop_lldp: 438 434 unregister_netdev(repr->netdev); 439 435 err_netdev: 440 436 ice_devlink_destroy_vf_port(vf);
+4 -7
drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
··· 46 46 u8 data[24]; 47 47 }; 48 48 49 - enum ice_sbq_msg_dev { 50 - eth56g_phy_0 = 0x02, 51 - rmn_0 = 0x02, 52 - rmn_1 = 0x03, 53 - rmn_2 = 0x04, 54 - cgu = 0x06, 55 - eth56g_phy_1 = 0x0D, 49 + enum ice_sbq_dev_id { 50 + ice_sbq_dev_phy_0 = 0x02, 51 + ice_sbq_dev_cgu = 0x06, 52 + ice_sbq_dev_phy_0_peer = 0x0D, 56 53 }; 57 54 58 55 enum ice_sbq_msg_opcode {
+4
drivers/net/ethernet/intel/ice/ice_sriov.c
··· 63 63 if (vf->lan_vsi_idx != ICE_NO_VSI) { 64 64 ice_vf_vsi_release(vf); 65 65 vf->num_mac = 0; 66 + vf->num_mac_lldp = 0; 66 67 } 67 68 68 69 last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; ··· 1402 1401 } 1403 1402 1404 1403 mutex_lock(&vf->cfg_lock); 1404 + 1405 + while (!trusted && vf->num_mac_lldp) 1406 + ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false); 1405 1407 1406 1408 vf->trusted = trusted; 1407 1409 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+2 -2
drivers/net/ethernet/intel/ice/ice_switch.c
··· 3146 3146 u16 vsi_handle_arr[2]; 3147 3147 3148 3148 /* A rule already exists with the new VSI being added */ 3149 - if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 3149 + if (cur_fltr->vsi_handle == new_fltr->vsi_handle) 3150 3150 return -EEXIST; 3151 3151 3152 3152 vsi_handle_arr[0] = cur_fltr->vsi_handle; ··· 5978 5978 5979 5979 /* A rule already exists with the new VSI being added */ 5980 5980 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 5981 - return 0; 5981 + return -EEXIST; 5982 5982 5983 5983 /* Update the previously created VSI list set with 5984 5984 * the new VSI ID passed in
+235 -31
drivers/net/ethernet/intel/ice/ice_tc_lib.c
··· 12 12 /** 13 13 * ice_tc_count_lkups - determine lookup count for switch filter 14 14 * @flags: TC-flower flags 15 - * @headers: Pointer to TC flower filter header structure 16 15 * @fltr: Pointer to outer TC filter structure 17 16 * 18 - * Determine lookup count based on TC flower input for switch filter. 17 + * Return: lookup count based on TC flower input for a switch filter. 19 18 */ 20 - static int 21 - ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, 22 - struct ice_tc_flower_fltr *fltr) 19 + static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr) 23 20 { 24 21 int lkups_cnt = 1; /* 0th lookup is metadata */ 25 22 ··· 681 684 fltr->action.fltr_act = action; 682 685 683 686 if (ice_is_port_repr_netdev(filter_dev) && 684 - ice_is_port_repr_netdev(target_dev)) { 687 + ice_is_port_repr_netdev(target_dev) && 688 + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { 685 689 repr = ice_netdev_to_repr(target_dev); 686 690 687 691 fltr->dest_vsi = repr->src_vsi; 688 - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 689 692 } else if (ice_is_port_repr_netdev(filter_dev) && 690 - ice_tc_is_dev_uplink(target_dev)) { 693 + ice_tc_is_dev_uplink(target_dev) && 694 + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { 691 695 repr = ice_netdev_to_repr(filter_dev); 692 696 693 697 fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; 694 - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 695 698 } else if (ice_tc_is_dev_uplink(filter_dev) && 696 - ice_is_port_repr_netdev(target_dev)) { 699 + ice_is_port_repr_netdev(target_dev) && 700 + fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { 697 701 repr = ice_netdev_to_repr(target_dev); 698 702 699 703 fltr->dest_vsi = repr->src_vsi; 700 - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; 701 704 } else { 702 705 NL_SET_ERR_MSG_MOD(fltr->extack, 703 - "Unsupported netdevice in switchdev mode"); 706 + "The action is not supported for this netdevice"); 704 707 return -EINVAL; 705 708 } 706 709 ··· 713 716 { 714 717 fltr->action.fltr_act = ICE_DROP_PACKET; 715 718 716 - if (ice_is_port_repr_netdev(filter_dev)) { 717 - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 718 - } else if (ice_tc_is_dev_uplink(filter_dev)) { 719 - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; 720 - } else { 719 + if (!ice_tc_is_dev_uplink(filter_dev) && 720 + !(ice_is_port_repr_netdev(filter_dev) && 721 + fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) { 721 722 NL_SET_ERR_MSG_MOD(fltr->extack, 722 - "Unsupported netdevice in switchdev mode"); 723 + "The action is not supported for this netdevice"); 723 724 return -EINVAL; 724 725 } 725 726 ··· 762 767 return 0; 763 768 } 764 769 770 + static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr) 771 + { 772 + return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP); 773 + } 774 + 775 + static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr) 776 + { 777 + struct ice_vsi *vsi = fltr->src_vsi, *uplink; 778 + 779 + if (!ice_is_switchdev_running(vsi->back)) 780 + return false; 781 + 782 + uplink = vsi->back->eswitch.uplink_vsi; 783 + return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET && 784 + ice_is_fltr_lldp(fltr) && 785 + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && 786 + fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID; 787 + } 788 + 789 + static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr) 790 + { 791 + struct ice_vsi *vsi = fltr->src_vsi, *uplink; 792 + 793 + uplink = vsi->back->eswitch.uplink_vsi; 794 + return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) && 795 + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && 796 + fltr->dest_vsi == uplink; 797 + } 798 + 799 + static struct ice_tc_flower_fltr * 800 + ice_find_pf_tx_lldp_fltr(struct ice_pf *pf) 801 + { 802 + struct ice_tc_flower_fltr *fltr; 803 + 804 + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) 805 + if (ice_is_fltr_pf_tx_lldp(fltr)) 806 + return fltr; 807 + 808 + return NULL; 809 + } 810 + 811 + static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf) 812 + { 813 + struct ice_vf *vf; 814 + unsigned int bkt; 815 + 816 + ice_for_each_vf(pf, bkt, vf) 817 + if (vf->lldp_tx_ena) 818 + return true; 819 + 820 + return false; 821 + } 822 + 823 + int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit) 824 + { 825 + struct ice_rule_query_data remove_entry = { 826 + .rid = vsi->vf->lldp_recipe_id, 827 + .rule_id = vsi->vf->lldp_rule_id, 828 + .vsi_handle = vsi->idx, 829 + }; 830 + struct ice_pf *pf = vsi->back; 831 + int err; 832 + 833 + if (vsi->vf->lldp_tx_ena) 834 + return 0; 835 + 836 + if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back)) 837 + return -EINVAL; 838 + 839 + if (!deinit && ice_any_vf_lldp_tx_ena(pf)) 840 + return -EINVAL; 841 + 842 + err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry); 843 + if (!err) 844 + vsi->vf->lldp_tx_ena = true; 845 + 846 + return err; 847 + } 848 + 849 + int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init) 850 + { 851 + struct ice_rule_query_data rule_added; 852 + struct ice_adv_rule_info rinfo = { 853 + .priority = 7, 854 + .src_vsi = vsi->idx, 855 + .sw_act = { 856 + .src = vsi->idx, 857 + .flag = ICE_FLTR_TX, 858 + .fltr_act = ICE_DROP_PACKET, 859 + .vsi_handle = vsi->idx, 860 + }, 861 + .flags_info.act_valid = true, 862 + }; 863 + struct ice_adv_lkup_elem list[3]; 864 + struct ice_pf *pf = vsi->back; 865 + int err; 866 + 867 + if (!init && !vsi->vf->lldp_tx_ena) 868 + return 0; 869 + 870 + memset(list, 0, sizeof(list)); 871 + ice_rule_add_direction_metadata(&list[0]); 872 + ice_rule_add_src_vsi_metadata(&list[1]); 873 + list[2].type = ICE_ETYPE_OL; 874 + list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP); 875 + list[2].m_u.ethertype.ethtype_id = htons(0xFFFF); 876 + 877 + err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo, 878 + &rule_added); 879 + if (err) { 880 + dev_err(&pf->pdev->dev, 881 + "Failed to add an LLDP rule to VSI 0x%X: %d\n", 882 + vsi->idx, err); 883 + } else { 884 + vsi->vf->lldp_recipe_id = rule_added.rid; 885 + vsi->vf->lldp_rule_id = rule_added.rule_id; 886 + vsi->vf->lldp_tx_ena = false; 887 + } 888 + 889 + return err; 890 + } 891 + 892 + static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi) 893 + { 894 + struct ice_tc_flower_fltr *fltr; 895 + struct ice_pf *pf = vsi->back; 896 + 897 + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) { 898 + if (!ice_is_fltr_vf_tx_lldp(fltr)) 899 + continue; 900 + ice_pass_vf_tx_lldp(fltr->src_vsi, true); 901 + break; 902 + } 903 + } 904 + 905 + static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf) 906 + { 907 + int i; 908 + 909 + /* Make the VF LLDP fwd to uplink rule dormant */ 910 + ice_for_each_vsi(pf, i) { 911 + struct ice_vsi *vf_vsi = pf->vsi[i]; 912 + 913 + if (vf_vsi && vf_vsi->type == ICE_VSI_VF) 914 + ice_drop_vf_tx_lldp(vf_vsi, false); 915 + } 916 + } 917 + 765 918 static int 766 919 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 767 920 { 768 - struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 769 921 struct ice_adv_rule_info rule_info = { 0 }; 770 922 struct ice_rule_query_data rule_added; 771 923 struct ice_hw *hw = &vsi->back->hw; ··· 927 785 return -EOPNOTSUPP; 928 786 } 929 787 930 - lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); 788 + if (ice_is_fltr_vf_tx_lldp(fltr)) 789 + return ice_pass_vf_tx_lldp(vsi, false); 790 + 791 + lkups_cnt = ice_tc_count_lkups(flags, fltr); 931 792 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 932 793 if (!list) 933 794 return -ENOMEM; ··· 958 813 rule_info.sw_act.flag |= ICE_FLTR_RX; 959 814 rule_info.sw_act.src = hw->pf_id; 960 815 rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; 816 + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && 817 + !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) { 818 + /* PF to Uplink */ 819 + rule_info.sw_act.flag |= ICE_FLTR_TX; 820 + rule_info.sw_act.src = vsi->idx; 961 821 } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && 962 822 fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { 963 823 /* VF to Uplink */ ··· 996 846 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); 997 847 ret = -EINVAL; 998 848 goto exit; 849 + } else if (ret == -ENOSPC) { 850 + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available."); 851 + goto exit; 999 852 } else if (ret) { 1000 853 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); 1001 854 goto exit; 1002 855 } 856 + 857 + if (ice_is_fltr_pf_tx_lldp(fltr)) 858 + ice_handle_add_pf_lldp_drop_rule(vsi); 1003 859 1004 860 /* store the output params, which are needed later for removing 1005 861 * advanced switch filter ··· 1141 985 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, 1142 986 struct ice_tc_flower_fltr *tc_fltr) 1143 987 { 1144 - struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; 1145 988 struct ice_adv_rule_info rule_info = {0}; 1146 989 struct ice_rule_query_data rule_added; 1147 990 struct ice_adv_lkup_elem *list; ··· 1176 1021 return PTR_ERR(dest_vsi); 1177 1022 } 1178 1023 1179 - lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); 1024 + lkups_cnt = ice_tc_count_lkups(flags, tc_fltr); 1180 1025 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 1181 1026 if (!list) 1182 1027 return -ENOMEM; ··· 1211 1056 tc_fltr->action.fwd.q.hw_queue, lkups_cnt); 1212 1057 break; 1213 1058 case ICE_DROP_PACKET: 1214 - rule_info.sw_act.flag |= ICE_FLTR_RX; 1215 - rule_info.sw_act.src = hw->pf_id; 1059 + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { 1060 + rule_info.sw_act.flag |= ICE_FLTR_TX; 1061 + rule_info.sw_act.src = vsi->idx; 1062 + } else { 1063 + rule_info.sw_act.flag |= ICE_FLTR_RX; 1064 + rule_info.sw_act.src = hw->pf_id; 1065 + } 1216 1066 rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; 1217 1067 break; 1218 1068 default: ··· 1230 1070 NL_SET_ERR_MSG_MOD(tc_fltr->extack, 1231 1071 "Unable to add filter because it already exist"); 1232 1072 ret = -EINVAL; 1073 + goto exit; 1074 + } else if (ret == -ENOSPC) { 1075 + NL_SET_ERR_MSG_MOD(tc_fltr->extack, 1076 + "Unable to add filter: insufficient space available."); 1233 1077 goto exit; 1234 1078 } else if (ret) { 1235 1079 NL_SET_ERR_MSG_MOD(tc_fltr->extack, ··· 1627 1463 * @filter_dev: Pointer to device on which filter is being added 1628 1464 * @f: Pointer to struct flow_cls_offload 1629 1465 * @fltr: Pointer to filter structure 1466 + * @ingress: if the rule is added to an ingress block 1467 + * 1468 + * Return: 0 if the flower was parsed successfully, -EINVAL if the flower 1469 + * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured 1470 + * for the given VSI. 1630 1471 */ 1631 1472 static int 1632 1473 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, 1633 1474 struct flow_cls_offload *f, 1634 - struct ice_tc_flower_fltr *fltr) 1475 + struct ice_tc_flower_fltr *fltr, bool ingress) 1635 1476 { 1636 1477 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 1637 1478 struct flow_rule *rule = flow_cls_offload_flow_rule(f); ··· 1718 1549 n_proto_mask = 0; 1719 1550 } else { 1720 1551 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; 1552 + } 1553 + 1554 + if (!ingress) { 1555 + bool switchdev = 1556 + ice_is_eswitch_mode_switchdev(vsi->back); 1557 + 1558 + if (switchdev != (n_proto_key == ETH_P_LLDP)) { 1559 + NL_SET_ERR_MSG_FMT_MOD(fltr->extack, 1560 + "%sLLDP filtering is not supported on egress in %s mode", 1561 + switchdev ? "Non-" : "", 1562 + switchdev ? "switchdev" : 1563 + "legacy"); 1564 + return -EOPNOTSUPP; 1565 + } 1721 1566 } 1722 1567 1723 1568 headers->l2_key.n_proto = cpu_to_be16(n_proto_key); ··· 1909 1726 return -EINVAL; 1910 1727 } 1911 1728 } 1729 + 1730 + /* Ingress filter on representor results in an egress filter in HW 1731 + * and vice versa 1732 + */ 1733 + ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress; 1734 + fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS : 1735 + ICE_ESWITCH_FLTR_EGRESS; 1736 + 1912 1737 return 0; 1913 1738 } 1914 1739 ··· 2130 1939 struct ice_pf *pf = vsi->back; 2131 1940 int err; 2132 1941 1942 + if (ice_is_fltr_pf_tx_lldp(fltr)) 1943 + ice_handle_del_pf_lldp_drop_rule(pf); 1944 + 1945 + if (ice_is_fltr_vf_tx_lldp(fltr)) 1946 + return ice_drop_vf_tx_lldp(vsi, false); 1947 + 2133 1948 rule_rem.rid = fltr->rid; 2134 1949 rule_rem.rule_id = fltr->rule_id; 2135 1950 rule_rem.vsi_handle = fltr->dest_vsi_handle; ··· 2172 1975 * @vsi: Pointer to VSI 2173 1976 * @f: Pointer to flower offload structure 2174 1977 * @__fltr: Pointer to struct ice_tc_flower_fltr 1978 + * @ingress: if the rule is added to an ingress block 2175 1979 * 2176 1980 * This function parses TC-flower input fields, parses action, 2177 1981 * and adds a filter. 1982 + * 1983 + * Return: 0 if the filter was successfully added, 1984 + * negative error code otherwise. 2178 1985 */ 2179 1986 static int 2180 1987 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, 2181 1988 struct flow_cls_offload *f, 2182 - struct ice_tc_flower_fltr **__fltr) 1989 + struct ice_tc_flower_fltr **__fltr, bool ingress) 2183 1990 { 2184 1991 struct ice_tc_flower_fltr *fltr; 2185 1992 int err; ··· 2200 1999 fltr->src_vsi = vsi; 2201 2000 INIT_HLIST_NODE(&fltr->tc_flower_node); 2202 2001 2203 - err = ice_parse_cls_flower(netdev, vsi, f, fltr); 2002 + err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress); 2204 2003 if (err < 0) 2205 2004 goto err; 2206 2005 ··· 2243 2042 * @netdev: Pointer to filter device 2244 2043 * @vsi: Pointer to VSI 2245 2044 * @cls_flower: Pointer to flower offload structure 2045 + * @ingress: if the rule is added to an ingress block 2046 + * 2047 + * Return: 0 if the flower was successfully added, 2048 + * negative error code otherwise. 2246 2049 */ 2247 - int 2248 - ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, 2249 - struct flow_cls_offload *cls_flower) 2050 + int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, 2051 + struct flow_cls_offload *cls_flower, bool ingress) 2250 2052 { 2251 2053 struct netlink_ext_ack *extack = cls_flower->common.extack; 2252 2054 struct net_device *vsi_netdev = vsi->netdev; ··· 2284 2080 } 2285 2081 2286 2082 /* prep and add TC-flower filter in HW */ 2287 - err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); 2083 + err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress); 2288 2084 if (err) 2289 2085 return err; 2290 2086
+6 -5
drivers/net/ethernet/intel/ice/ice_tc_lib.h
··· 211 211 } 212 212 213 213 struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue); 214 - int 215 - ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, 216 - struct flow_cls_offload *cls_flower); 217 - int 218 - ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); 214 + int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, 215 + struct flow_cls_offload *cls_flower, bool ingress); 216 + int ice_del_cls_flower(struct ice_vsi *vsi, 217 + struct flow_cls_offload *cls_flower); 219 218 void ice_replay_tc_fltrs(struct ice_pf *pf); 220 219 bool ice_is_tunnel_supported(struct net_device *dev); 220 + int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init); 221 + int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit); 221 222 222 223 static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act) 223 224 {
+9 -8
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 2440 2440 2441 2441 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2442 2442 eth = (struct ethhdr *)skb_mac_header(skb); 2443 - if (unlikely((skb->priority == TC_PRIO_CONTROL || 2444 - eth->h_proto == htons(ETH_P_LLDP)) && 2445 - vsi->type == ICE_VSI_PF && 2446 - vsi->port_info->qos_cfg.is_sw_lldp)) 2443 + 2444 + if ((ice_is_switchdev_running(vsi->back) || 2445 + ice_lag_is_switchdev_running(vsi->back)) && 2446 + vsi->type != ICE_VSI_SF) 2447 + ice_eswitch_set_target_vsi(skb, &offload); 2448 + else if (unlikely((skb->priority == TC_PRIO_CONTROL || 2449 + eth->h_proto == htons(ETH_P_LLDP)) && 2450 + vsi->type == ICE_VSI_PF && 2451 + vsi->port_info->qos_cfg.is_sw_lldp)) 2447 2452 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2448 2453 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2449 2454 ICE_TXD_CTX_QW1_CMD_S); 2450 2455 2451 2456 ice_tstamp(tx_ring, skb, first, &offload); 2452 - if ((ice_is_switchdev_running(vsi->back) || 2453 - ice_lag_is_switchdev_running(vsi->back)) && 2454 - vsi->type != ICE_VSI_SF) 2455 - ice_eswitch_set_target_vsi(skb, &offload); 2456 2457 2457 2458 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2458 2459 struct ice_tx_ctx_desc *cdesc;
+1
drivers/net/ethernet/intel/ice/ice_type.h
··· 970 970 u8 intrl_gran; 971 971 972 972 struct ice_ptp_hw ptp; 973 + s8 lane_num; 973 974 974 975 /* Active package version (currently active) */ 975 976 struct ice_pkg_ver active_pkg_ver;
+26
drivers/net/ethernet/intel/ice/ice_vf_lib.c
··· 226 226 vsi->num_vlan = 0; 227 227 228 228 vf->num_mac = 0; 229 + vf->num_mac_lldp = 0; 229 230 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 230 231 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 231 232 } ··· 1401 1400 1402 1401 rcu_read_unlock(); 1403 1402 return ctrl_vsi; 1403 + } 1404 + 1405 + /** 1406 + * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses 1407 + * @vf: a VF to add the address to 1408 + * @vsi: the corresponding VSI 1409 + * @incr: is the rule added or removed 1410 + */ 1411 + void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, 1412 + bool incr) 1413 + { 1414 + bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags); 1415 + bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; 1416 + bool is_ena; 1417 + 1418 + if (WARN_ON(!vsi)) { 1419 + vf->num_mac_lldp = 0; 1420 + return; 1421 + } 1422 + 1423 + vf->num_mac_lldp += incr ? 1 : -1; 1424 + is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; 1425 + 1426 + if (was_ena != is_ena) 1427 + ice_vsi_cfg_sw_lldp(vsi, false, is_ena); 1404 1428 }
+12
drivers/net/ethernet/intel/ice/ice_vf_lib.h
··· 124 124 u8 spoofchk:1; 125 125 u8 link_forced:1; 126 126 u8 link_up:1; /* only valid if VF link is forced */ 127 + u8 lldp_tx_ena:1; 127 128 128 129 u32 ptp_caps; 129 130 ··· 135 134 unsigned long vf_caps; /* VF's adv. capabilities */ 136 135 u8 num_req_qs; /* num of queue pairs requested by VF */ 137 136 u16 num_mac; 137 + u16 num_mac_lldp; 138 138 u16 num_vf_qs; /* num of queue configured per VF */ 139 139 u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ 140 140 #define ICE_INNER_VLAN_STRIP_ENA BIT(0) ··· 150 148 151 149 /* devlink port data */ 152 150 struct devlink_port devlink_port; 151 + 152 + u16 lldp_recipe_id; 153 + u16 lldp_rule_id; 153 154 154 155 u16 num_msix; /* num of MSI-X configured on this VF */ 155 156 struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF]; ··· 183 178 static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) 184 179 { 185 180 return vf->port_vlan_info.tpid; 181 + } 182 + 183 + static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf) 184 + { 185 + return vf->num_mac_lldp && vf->trusted; 186 186 } 187 187 188 188 /* VF Hash Table access functions ··· 255 245 int ice_reset_vf(struct ice_vf *vf, u32 flags); 256 246 void ice_reset_all_vfs(struct ice_pf *pf); 257 247 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi); 248 + void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, 249 + bool incr); 258 250 #else /* CONFIG_PCI_IOV */ 259 251 static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 260 252 {
+50 -3
drivers/net/ethernet/intel/ice/ice_virtchnl.c
··· 2266 2266 } 2267 2267 2268 2268 /** 2269 + * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address 2270 + * @mac: address to check 2271 + * 2272 + * Return: true if the address is one of the three possible LLDP multicast 2273 + * addresses, false otherwise. 2274 + */ 2275 + static bool ice_is_mc_lldp_eth_addr(const u8 *mac) 2276 + { 2277 + const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00}; 2278 + 2279 + if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base))) 2280 + return false; 2281 + 2282 + return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00); 2283 + } 2284 + 2285 + /** 2286 + * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC 2287 + * @vf: a VF to add the address to 2288 + * @mac: address to check 2289 + * 2290 + * Return: true if the VF is allowed to add such MAC address, false otherwise. 2291 + */ 2292 + static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac) 2293 + { 2294 + struct device *dev = ice_pf_to_dev(vf->pf); 2295 + 2296 + if (is_unicast_ether_addr(mac) && 2297 + !ice_can_vf_change_mac((struct ice_vf *)vf)) { 2298 + dev_err(dev, 2299 + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2300 + return false; 2301 + } 2302 + 2303 + if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) { 2304 + dev_warn(dev, 2305 + "An untrusted VF %u is attempting to configure an LLDP multicast address\n", 2306 + vf->vf_id); 2307 + return false; 2308 + } 2309 + 2310 + return true; 2311 + } 2312 + 2313 + /** 2269 2314 * ice_vc_add_mac_addr - attempt to add the MAC address passed in 2270 2315 * @vf: pointer to the VF info 2271 2316 * @vsi: pointer to the VF's VSI ··· 2328 2283 if (ether_addr_equal(mac_addr, vf->dev_lan_addr)) 2329 2284 return 0; 2330 2285 2331 - if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { 2332 - dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2286 + if (!ice_vc_can_add_mac(vf, mac_addr)) 2333 2287 return -EPERM; 2334 - } 2335 2288 2336 2289 ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); 2337 2290 if (ret == -EEXIST) { ··· 2344 2301 return ret; 2345 2302 } else { 2346 2303 vf->num_mac++; 2304 + if (ice_is_mc_lldp_eth_addr(mac_addr)) 2305 + ice_vf_update_mac_lldp_num(vf, vsi, true); 2347 2306 } 2348 2307 2349 2308 ice_vfhw_mac_add(vf, vc_ether_addr); ··· 2440 2395 ice_vfhw_mac_del(vf, vc_ether_addr); 2441 2396 2442 2397 vf->num_mac--; 2398 + if (ice_is_mc_lldp_eth_addr(mac_addr)) 2399 + ice_vf_update_mac_lldp_num(vf, vsi, false); 2443 2400 2444 2401 return 0; 2445 2402 }
+3
drivers/net/ethernet/intel/igc/igc_main.c
··· 7125 7125 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 7126 7126 NETDEV_XDP_ACT_XSK_ZEROCOPY; 7127 7127 7128 + /* enable HW vlan tag insertion/stripping by default */ 7129 + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 7130 + 7128 7131 /* MTU range: 68 - 9216 */ 7129 7132 netdev->min_mtu = ETH_MIN_MTU; 7130 7133 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
··· 167 167 u16 reg, u16 val, bool lock) 168 168 { 169 169 u32 swfw_mask = hw->phy.phy_semaphore_mask; 170 - int max_retry = 1; 170 + int max_retry = 3; 171 171 int retry = 0; 172 172 u8 reg_high; 173 173 u8 csum; ··· 2285 2285 u8 dev_addr, u8 data, bool lock) 2286 2286 { 2287 2287 u32 swfw_mask = hw->phy.phy_semaphore_mask; 2288 - u32 max_retry = 1; 2288 + u32 max_retry = 3; 2289 2289 u32 retry = 0; 2290 2290 int status; 2291 2291