Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-05-22

This series contains updates to virtchnl and the ice driver.

Geert Uytterhoeven fixes a data structure alignment issue in the
virtchnl structures.

Henry adds Flow Director support which allows for the redirection on
ntuple rules over six patches. Initially Henry adds the initial
infrastructure for Flow Director, and then later adds IPv4 and IPv6
support, as well as being able to display the ntuple rules.

Bret add Accelerated Receive Flow Steering (aRFS) support which is used
to steer receive flows to a specific queue. Fixes a transmit timeout
when the VF link transitions from up/down/up because the transmit and
receive queue interrupts are not enabled as part of VF's link up. Fixed
an issue when the default VF LAN address is changed and after reset the
PF will attempt to add the new MAC, which fails because it already
exists. This causes the VF to be disabled completely until it is removed
and enabled via sysfs.

Anirudh (Ani) makes a fix where the ice driver needs to call set_mac_cfg
to enable jumbo frames, so ensure it gets called during initialization
and after reset. Fix bad register reads during a register dump in
ethtool by removing the bad registers.

Paul fixes an issue where the receive Malicious Driver Detection (MDD)
auto reset message was not being logged because it occurred after the VF
reset.

Victor adds a check for compatibility between the Dynamic Device
Personalization (DDP) package and the NIC firmware to ensure that
everything aligns.

Jesse fixes a administrative queue string call with the appropriate
error reporting variable. Also fixed the loop variables that are
comparing or assigning signed against unsigned values.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+5407 -113
+3
drivers/net/ethernet/intel/ice/Makefile
··· 18 18 ice_txrx_lib.o \ 19 19 ice_txrx.o \ 20 20 ice_fltr.o \ 21 + ice_fdir.o \ 22 + ice_ethtool_fdir.o \ 21 23 ice_flex_pipe.o \ 22 24 ice_flow.o \ 23 25 ice_devlink.o \ 24 26 ice_ethtool.o 25 27 ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o 26 28 ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o 29 + ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o 27 30 ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
+53
drivers/net/ethernet/intel/ice/ice.h
··· 34 34 #include <linux/ctype.h> 35 35 #include <linux/bpf.h> 36 36 #include <linux/avf/virtchnl.h> 37 + #include <linux/cpu_rmap.h> 37 38 #include <net/devlink.h> 38 39 #include <net/ipv6.h> 39 40 #include <net/xdp_sock.h> ··· 51 50 #include "ice_sched.h" 52 51 #include "ice_virtchnl_pf.h" 53 52 #include "ice_sriov.h" 53 + #include "ice_fdir.h" 54 54 #include "ice_xsk.h" 55 + #include "ice_arfs.h" 55 56 56 57 extern const char ice_drv_ver[]; 57 58 #define ICE_BAR0 0 ··· 69 66 #define ICE_AQ_LEN 64 70 67 #define ICE_MBXSQ_LEN 64 71 68 #define ICE_MIN_MSIX 2 69 + #define ICE_FDIR_MSIX 1 72 70 #define ICE_NO_VSI 0xffff 73 71 #define ICE_VSI_MAP_CONTIG 0 74 72 #define ICE_VSI_MAP_SCATTER 1 ··· 98 94 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) 99 95 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) 100 96 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) 97 + #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) 101 98 102 99 /* Macro for each VSI in a PF */ 103 100 #define ice_for_each_vsi(pf, i) \ ··· 219 214 __ICE_CFG_BUSY, 220 215 __ICE_SERVICE_SCHED, 221 216 __ICE_SERVICE_DIS, 217 + __ICE_FD_FLUSH_REQ, 222 218 __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ 223 219 __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ 224 220 __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ ··· 263 257 s16 vf_id; /* VF ID for SR-IOV VSIs */ 264 258 265 259 u16 ethtype; /* Ethernet protocol for pause frame */ 260 + u16 num_gfltr; 261 + u16 num_bfltr; 266 262 267 263 /* RSS config */ 268 264 u16 rss_table_size; /* HW RSS table size */ ··· 272 264 u8 *rss_hkey_user; /* User configured hash keys */ 273 265 u8 *rss_lut_user; /* User configured lookup table entries */ 274 266 u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ 267 + 268 + /* aRFS members only allocated for the PF VSI */ 269 + #define ICE_MAX_ARFS_LIST 1024 270 + #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) 271 + struct hlist_head *arfs_fltr_list; 272 + struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; 273 + spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ 274 + atomic_t *arfs_last_fltr_id; 275 275 276 276 u16 max_frame; 277 277 u16 rx_buf_len; ··· 355 339 ICE_FLAG_SRIOV_CAPABLE, 356 340 ICE_FLAG_DCB_CAPABLE, 357 341 ICE_FLAG_DCB_ENA, 342 + ICE_FLAG_FD_ENA, 358 343 ICE_FLAG_ADV_FEATURES, 359 344 ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, 360 345 ICE_FLAG_NO_MEDIA, ··· 383 366 * MSIX vectors allowed on this PF. 384 367 */ 385 368 u16 sriov_base_vector; 369 + 370 + u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ 386 371 387 372 struct ice_vsi **vsi; /* VSIs created by the driver */ 388 373 struct ice_sw *first_sw; /* first switch created by firmware */ ··· 524 505 return NULL; 525 506 } 526 507 508 + /** 509 + * ice_get_ctrl_vsi - Get the control VSI 510 + * @pf: PF instance 511 + */ 512 + static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) 513 + { 514 + /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ 515 + if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) 516 + return NULL; 517 + 518 + return pf->vsi[pf->ctrl_vsi_idx]; 519 + } 520 + 521 + #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 522 + #define ICE_FD_STAT_PF_IDX(base_idx) \ 523 + ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) 524 + #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) 525 + 527 526 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); 528 527 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); 528 + int ice_vsi_open_ctrl(struct ice_vsi *vsi); 529 529 void ice_set_ethtool_ops(struct net_device *netdev); 530 530 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); 531 531 u16 ice_get_avail_txq_count(struct ice_pf *pf); ··· 568 530 void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 569 531 const char *ice_stat_str(enum ice_status stat_err); 570 532 const char *ice_aq_str(enum ice_aq_err aq_err); 533 + int 534 + ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 535 + bool is_tun); 536 + void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); 537 + int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 538 + int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 539 + int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); 540 + int 541 + ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 542 + u32 *rule_locs); 543 + void ice_fdir_release_flows(struct ice_hw *hw); 544 + void ice_fdir_replay_flows(struct ice_hw *hw); 545 + void ice_fdir_replay_fltrs(struct ice_pf *pf); 546 + int ice_fdir_create_dflt_rules(struct ice_pf *pf); 571 547 int ice_open(struct net_device *netdev); 572 548 int ice_stop(struct net_device *netdev); 549 + void ice_service_task_schedule(struct ice_pf *pf); 573 550 574 551 #endif /* _ICE_H_ */
+33 -1
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 107 107 #define ICE_AQC_CAPS_RXQS 0x0041 108 108 #define ICE_AQC_CAPS_TXQS 0x0042 109 109 #define ICE_AQC_CAPS_MSIX 0x0043 110 + #define ICE_AQC_CAPS_FD 0x0045 110 111 #define ICE_AQC_CAPS_MAX_MTU 0x0047 111 112 112 113 u8 major_ver; ··· 233 232 */ 234 233 #define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 235 234 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 235 + #define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 236 + #define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 237 + #define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 238 + #define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58 239 + #define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59 236 240 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60 237 241 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61 238 242 ··· 245 239 #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) 246 240 247 241 #define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 242 + 243 + #define ICE_AQC_RES_TYPE_S 0 244 + #define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S) 248 245 249 246 /* Allocate Resources command (indirect 0x0208) 250 247 * Free Resources command (indirect 0x0209) ··· 1068 1059 u8 rsvd1; 1069 1060 }; 1070 1061 1062 + /* Set MAC Config command data structure (direct 0x0603) */ 1063 + struct ice_aqc_set_mac_cfg { 1064 + __le16 max_frame_size; 1065 + u8 params; 1066 + #define ICE_AQ_SET_MAC_PACE_S 3 1067 + #define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S) 1068 + #define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7) 1069 + #define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0 1070 + #define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M 1071 + u8 tx_tmr_priority; 1072 + __le16 tx_tmr_value; 1073 + __le16 fc_refresh_threshold; 1074 + u8 drop_opts; 1075 + #define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0) 1076 + #define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0 1077 + #define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0) 1078 + u8 reserved[7]; 1079 + }; 1080 + 1071 1081 /* Restart AN command data structure (direct 0x0605) 1072 1082 * Also used for response, with only the lport_num field present. 1073 1083 */ ··· 1703 1675 }; 1704 1676 1705 1677 #define ICE_PKG_NAME_SIZE 32 1678 + #define ICE_SEG_NAME_SIZE 28 1706 1679 1707 1680 struct ice_aqc_get_pkg_info { 1708 1681 struct ice_pkg_ver ver; 1709 - char name[ICE_PKG_NAME_SIZE]; 1682 + char name[ICE_SEG_NAME_SIZE]; 1683 + __le32 track_id; 1710 1684 u8 is_in_nvm; 1711 1685 u8 is_active; 1712 1686 u8 is_active_at_boot; ··· 1795 1765 struct ice_aqc_download_pkg download_pkg; 1796 1766 struct ice_aqc_set_mac_lb set_mac_lb; 1797 1767 struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; 1768 + struct ice_aqc_set_mac_cfg set_mac_cfg; 1798 1769 struct ice_aqc_set_event_mask set_event_mask; 1799 1770 struct ice_aqc_get_link_status get_link_status; 1800 1771 struct ice_aqc_event_lan_overflow lan_overflow; ··· 1892 1861 /* PHY commands */ 1893 1862 ice_aqc_opc_get_phy_caps = 0x0600, 1894 1863 ice_aqc_opc_set_phy_cfg = 0x0601, 1864 + ice_aqc_opc_set_mac_cfg = 0x0603, 1895 1865 ice_aqc_opc_restart_an = 0x0605, 1896 1866 ice_aqc_opc_get_link_status = 0x0607, 1897 1867 ice_aqc_opc_set_event_mask = 0x0613,
+663
drivers/net/ethernet/intel/ice/ice_arfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2018-2020, Intel Corporation. */ 3 + 4 + #include "ice.h" 5 + 6 + /** 7 + * ice_is_arfs_active - helper to check is aRFS is active 8 + * @vsi: VSI to check 9 + */ 10 + static bool ice_is_arfs_active(struct ice_vsi *vsi) 11 + { 12 + return !!vsi->arfs_fltr_list; 13 + } 14 + 15 + /** 16 + * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters 17 + * @hw: pointer to the HW structure 18 + * @flow_type: flow type as Flow Director understands it 19 + * 20 + * Flow Director will query this function to see if aRFS is currently using 21 + * the specified flow_type for perfect (4-tuple) filters. 22 + */ 23 + bool 24 + ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type) 25 + { 26 + struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; 27 + struct ice_pf *pf = hw->back; 28 + struct ice_vsi *vsi; 29 + 30 + vsi = ice_get_main_vsi(pf); 31 + if (!vsi) 32 + return false; 33 + 34 + arfs_fltr_cntrs = vsi->arfs_fltr_cntrs; 35 + 36 + /* active counters can be updated by multiple CPUs */ 37 + smp_mb__before_atomic(); 38 + switch (flow_type) { 39 + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 40 + return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0; 41 + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 42 + return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0; 43 + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 44 + return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0; 45 + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 46 + return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0; 47 + default: 48 + return false; 49 + } 50 + } 51 + 52 + /** 53 + * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS 54 + * @vsi: VSI that aRFS is active on 55 + * @entry: aRFS entry used to change counters 56 + * @add: true to increment counter, false to decrement 57 + */ 58 + static void 59 + ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi, 60 + struct ice_arfs_entry *entry, bool add) 61 + { 62 + struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs; 63 + 64 + switch (entry->fltr_info.flow_type) { 65 + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 66 + if (add) 67 + atomic_inc(&fltr_cntrs->active_tcpv4_cnt); 68 + else 69 + atomic_dec(&fltr_cntrs->active_tcpv4_cnt); 70 + break; 71 + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 72 + if (add) 73 + atomic_inc(&fltr_cntrs->active_tcpv6_cnt); 74 + else 75 + atomic_dec(&fltr_cntrs->active_tcpv6_cnt); 76 + break; 77 + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 78 + if (add) 79 + atomic_inc(&fltr_cntrs->active_udpv4_cnt); 80 + else 81 + atomic_dec(&fltr_cntrs->active_udpv4_cnt); 82 + break; 83 + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 84 + if (add) 85 + atomic_inc(&fltr_cntrs->active_udpv6_cnt); 86 + else 87 + atomic_dec(&fltr_cntrs->active_udpv6_cnt); 88 + break; 89 + default: 90 + dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n", 91 + entry->fltr_info.flow_type); 92 + } 93 + } 94 + 95 + /** 96 + * ice_arfs_del_flow_rules - delete the rules passed in from HW 97 + * @vsi: VSI for the flow rules that need to be deleted 98 + * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion 99 + * 100 + * Loop through the delete list passed in and remove the rules from HW. After 101 + * each rule is deleted, disconnect and free the ice_arfs_entry because it is no 102 + * longer being referenced by the aRFS hash table. 103 + */ 104 + static void 105 + ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head) 106 + { 107 + struct ice_arfs_entry *e; 108 + struct hlist_node *n; 109 + struct device *dev; 110 + 111 + dev = ice_pf_to_dev(vsi->back); 112 + 113 + hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { 114 + int result; 115 + 116 + result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false, 117 + false); 118 + if (!result) 119 + ice_arfs_update_active_fltr_cntrs(vsi, e, false); 120 + else 121 + dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", 122 + result, e->fltr_state, e->fltr_info.fltr_id, 123 + e->flow_id, e->fltr_info.q_index); 124 + 125 + /* The aRFS hash table is no longer referencing this entry */ 126 + hlist_del(&e->list_entry); 127 + devm_kfree(dev, e); 128 + } 129 + } 130 + 131 + /** 132 + * ice_arfs_add_flow_rules - add the rules passed in from HW 133 + * @vsi: VSI for the flow rules that need to be added 134 + * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition 135 + * 136 + * Loop through the add list passed in and remove the rules from HW. After each 137 + * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free 138 + * the ice_arfs_entry(s) because they are still being referenced in the aRFS 139 + * hash table. 140 + */ 141 + static void 142 + ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head) 143 + { 144 + struct ice_arfs_entry_ptr *ep; 145 + struct hlist_node *n; 146 + struct device *dev; 147 + 148 + dev = ice_pf_to_dev(vsi->back); 149 + 150 + hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { 151 + int result; 152 + 153 + result = ice_fdir_write_fltr(vsi->back, 154 + &ep->arfs_entry->fltr_info, true, 155 + false); 156 + if (!result) 157 + ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry, 158 + true); 159 + else 160 + dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", 161 + result, ep->arfs_entry->fltr_state, 162 + ep->arfs_entry->fltr_info.fltr_id, 163 + ep->arfs_entry->flow_id, 164 + ep->arfs_entry->fltr_info.q_index); 165 + 166 + hlist_del(&ep->list_entry); 167 + devm_kfree(dev, ep); 168 + } 169 + } 170 + 171 + /** 172 + * ice_arfs_is_flow_expired - check if the aRFS entry has expired 173 + * @vsi: VSI containing the aRFS entry 174 + * @arfs_entry: aRFS entry that's being checked for expiration 175 + * 176 + * Return true if the flow has expired, else false. This function should be used 177 + * to determine whether or not an aRFS entry should be removed from the hardware 178 + * and software structures. 179 + */ 180 + static bool 181 + ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry) 182 + { 183 + #define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) 184 + if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index, 185 + arfs_entry->flow_id, 186 + arfs_entry->fltr_info.fltr_id)) 187 + return true; 188 + 189 + /* expiration timer only used for UDP filters */ 190 + if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP && 191 + arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP) 192 + return false; 193 + 194 + return time_in_range64(arfs_entry->time_activated + 195 + ICE_ARFS_TIME_DELTA_EXPIRATION, 196 + arfs_entry->time_activated, get_jiffies_64()); 197 + } 198 + 199 + /** 200 + * ice_arfs_update_flow_rules - add/delete aRFS rules in HW 201 + * @vsi: the VSI to be forwarded to 202 + * @idx: index into the table of aRFS filter lists. Obtained from skb->hash 203 + * @add_list: list to populate with filters to be added to Flow Director 204 + * @del_list: list to populate with filters to be deleted from Flow Director 205 + * 206 + * Iterate over the hlist at the index given in the aRFS hash table and 207 + * determine if there are any aRFS entries that need to be either added or 208 + * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the 209 + * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and 210 + * the flow has expired delete the filter from HW. The caller of this function 211 + * is expected to add/delete rules on the add_list/del_list respectively. 212 + */ 213 + static void 214 + ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx, 215 + struct hlist_head *add_list, 216 + struct hlist_head *del_list) 217 + { 218 + struct ice_arfs_entry *e; 219 + struct hlist_node *n; 220 + struct device *dev; 221 + 222 + dev = ice_pf_to_dev(vsi->back); 223 + 224 + /* go through the aRFS hlist at this idx and check for needed updates */ 225 + hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry) 226 + /* check if filter needs to be added to HW */ 227 + if (e->fltr_state == ICE_ARFS_INACTIVE) { 228 + enum ice_fltr_ptype flow_type = e->fltr_info.flow_type; 229 + struct ice_arfs_entry_ptr *ep = 230 + devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC); 231 + 232 + if (!ep) 233 + continue; 234 + INIT_HLIST_NODE(&ep->list_entry); 235 + /* reference aRFS entry to add HW filter */ 236 + ep->arfs_entry = e; 237 + hlist_add_head(&ep->list_entry, add_list); 238 + e->fltr_state = ICE_ARFS_ACTIVE; 239 + /* expiration timer only used for UDP flows */ 240 + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 241 + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 242 + e->time_activated = get_jiffies_64(); 243 + } else if (e->fltr_state == ICE_ARFS_ACTIVE) { 244 + /* check if filter needs to be removed from HW */ 245 + if (ice_arfs_is_flow_expired(vsi, e)) { 246 + /* remove aRFS entry from hash table for delete 247 + * and to prevent referencing it the next time 248 + * through this hlist index 249 + */ 250 + hlist_del(&e->list_entry); 251 + e->fltr_state = ICE_ARFS_TODEL; 252 + /* save reference to aRFS entry for delete */ 253 + hlist_add_head(&e->list_entry, del_list); 254 + } 255 + } 256 + } 257 + 258 + /** 259 + * ice_sync_arfs_fltrs - update all aRFS filters 260 + * @pf: board private structure 261 + */ 262 + void ice_sync_arfs_fltrs(struct ice_pf *pf) 263 + { 264 + HLIST_HEAD(tmp_del_list); 265 + HLIST_HEAD(tmp_add_list); 266 + struct ice_vsi *pf_vsi; 267 + unsigned int i; 268 + 269 + pf_vsi = ice_get_main_vsi(pf); 270 + if (!pf_vsi) 271 + return; 272 + 273 + if (!ice_is_arfs_active(pf_vsi)) 274 + return; 275 + 276 + spin_lock_bh(&pf_vsi->arfs_lock); 277 + /* Once we process aRFS for the PF VSI get out */ 278 + for (i = 0; i < ICE_MAX_ARFS_LIST; i++) 279 + ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list, 280 + &tmp_del_list); 281 + spin_unlock_bh(&pf_vsi->arfs_lock); 282 + 283 + /* use list of ice_arfs_entry(s) for delete */ 284 + ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list); 285 + 286 + /* use list of ice_arfs_entry_ptr(s) for add */ 287 + ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list); 288 + } 289 + 290 + /** 291 + * ice_arfs_build_entry - builds an aRFS entry based on input 292 + * @vsi: destination VSI for this flow 293 + * @fk: flow dissector keys for creating the tuple 294 + * @rxq_idx: Rx queue to steer this flow to 295 + * @flow_id: passed down from the stack and saved for flow expiration 296 + * 297 + * returns an aRFS entry on success and NULL on failure 298 + */ 299 + static struct ice_arfs_entry * 300 + ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk, 301 + u16 rxq_idx, u32 flow_id) 302 + { 303 + struct ice_arfs_entry *arfs_entry; 304 + struct ice_fdir_fltr *fltr_info; 305 + u8 ip_proto; 306 + 307 + arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back), 308 + sizeof(*arfs_entry), 309 + GFP_ATOMIC | __GFP_NOWARN); 310 + if (!arfs_entry) 311 + return NULL; 312 + 313 + fltr_info = &arfs_entry->fltr_info; 314 + fltr_info->q_index = rxq_idx; 315 + fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 316 + fltr_info->dest_vsi = vsi->idx; 317 + ip_proto = fk->basic.ip_proto; 318 + 319 + if (fk->basic.n_proto == htons(ETH_P_IP)) { 320 + fltr_info->ip.v4.proto = ip_proto; 321 + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? 322 + ICE_FLTR_PTYPE_NONF_IPV4_TCP : 323 + ICE_FLTR_PTYPE_NONF_IPV4_UDP; 324 + fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; 325 + fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; 326 + fltr_info->ip.v4.src_port = fk->ports.src; 327 + fltr_info->ip.v4.dst_port = fk->ports.dst; 328 + } else { /* ETH_P_IPV6 */ 329 + fltr_info->ip.v6.proto = ip_proto; 330 + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? 331 + ICE_FLTR_PTYPE_NONF_IPV6_TCP : 332 + ICE_FLTR_PTYPE_NONF_IPV6_UDP; 333 + memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, 334 + sizeof(struct in6_addr)); 335 + memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, 336 + sizeof(struct in6_addr)); 337 + fltr_info->ip.v6.src_port = fk->ports.src; 338 + fltr_info->ip.v6.dst_port = fk->ports.dst; 339 + } 340 + 341 + arfs_entry->flow_id = flow_id; 342 + fltr_info->fltr_id = 343 + atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER; 344 + 345 + return arfs_entry; 346 + } 347 + 348 + /** 349 + * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set 350 + * @hw: pointer to HW structure 351 + * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order 352 + * @l4_proto: IPPROTO_UDP or IPPROTO_TCP 353 + * 354 + * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS 355 + * to check if perfect (4-tuple) flow rules are currently in place by Flow 356 + * Director. 357 + */ 358 + static bool 359 + ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto) 360 + { 361 + unsigned long *perfect_fltr = hw->fdir_perfect_fltr; 362 + 363 + /* advanced Flow Director disabled, perfect filters always supported */ 364 + if (!perfect_fltr) 365 + return true; 366 + 367 + if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP) 368 + return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr); 369 + else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP) 370 + return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr); 371 + else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP) 372 + return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr); 373 + else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP) 374 + return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr); 375 + 376 + return false; 377 + } 378 + 379 + /** 380 + * ice_rx_flow_steer - steer the Rx flow to where application is being run 381 + * @netdev: ptr to the netdev being adjusted 382 + * @skb: buffer with required header information 383 + * @rxq_idx: queue to which the flow needs to move 384 + * @flow_id: flow identifier provided by the netdev 385 + * 386 + * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the 387 + * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and 388 + * if the flow_id already exists in the hash table but the rxq_idx has changed 389 + * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else 390 + * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table. 391 + * If neither of the previous conditions are true then add a new entry in the 392 + * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be 393 + * added to HW. 394 + */ 395 + int 396 + ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, 397 + u16 rxq_idx, u32 flow_id) 398 + { 399 + struct ice_netdev_priv *np = netdev_priv(netdev); 400 + struct ice_arfs_entry *arfs_entry; 401 + struct ice_vsi *vsi = np->vsi; 402 + struct flow_keys fk; 403 + struct ice_pf *pf; 404 + __be16 n_proto; 405 + u8 ip_proto; 406 + u16 idx; 407 + int ret; 408 + 409 + /* failed to allocate memory for aRFS so don't crash */ 410 + if (unlikely(!vsi->arfs_fltr_list)) 411 + return -ENODEV; 412 + 413 + pf = vsi->back; 414 + 415 + if (skb->encapsulation) 416 + return -EPROTONOSUPPORT; 417 + 418 + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 419 + return -EPROTONOSUPPORT; 420 + 421 + n_proto = fk.basic.n_proto; 422 + /* Support only IPV4 and IPV6 */ 423 + if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) || 424 + n_proto == htons(ETH_P_IPV6)) 425 + ip_proto = fk.basic.ip_proto; 426 + else 427 + return -EPROTONOSUPPORT; 428 + 429 + /* Support only TCP and UDP */ 430 + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) 431 + return -EPROTONOSUPPORT; 432 + 433 + /* only support 4-tuple filters for aRFS */ 434 + if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto)) 435 + return -EOPNOTSUPP; 436 + 437 + /* choose the aRFS list bucket based on skb hash */ 438 + idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK; 439 + /* search for entry in the bucket */ 440 + spin_lock_bh(&vsi->arfs_lock); 441 + hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx], 442 + list_entry) { 443 + struct ice_fdir_fltr *fltr_info; 444 + 445 + /* keep searching for the already existing arfs_entry flow */ 446 + if (arfs_entry->flow_id != flow_id) 447 + continue; 448 + 449 + fltr_info = &arfs_entry->fltr_info; 450 + ret = fltr_info->fltr_id; 451 + 452 + if (fltr_info->q_index == rxq_idx || 453 + arfs_entry->fltr_state != ICE_ARFS_ACTIVE) 454 + goto out; 455 + 456 + /* update the queue to forward to on an already existing flow */ 457 + fltr_info->q_index = rxq_idx; 458 + arfs_entry->fltr_state = ICE_ARFS_INACTIVE; 459 + ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false); 460 + goto out_schedule_service_task; 461 + } 462 + 463 + arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id); 464 + if (!arfs_entry) { 465 + ret = -ENOMEM; 466 + goto out; 467 + } 468 + 469 + ret = arfs_entry->fltr_info.fltr_id; 470 + INIT_HLIST_NODE(&arfs_entry->list_entry); 471 + hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]); 472 + out_schedule_service_task: 473 + ice_service_task_schedule(pf); 474 + out: 475 + spin_unlock_bh(&vsi->arfs_lock); 476 + return ret; 477 + } 478 + 479 + /** 480 + * ice_init_arfs_cntrs - initialize aRFS counter values 481 + * @vsi: VSI that aRFS counters need to be initialized on 482 + */ 483 + static int ice_init_arfs_cntrs(struct ice_vsi *vsi) 484 + { 485 + if (!vsi || vsi->type != ICE_VSI_PF) 486 + return -EINVAL; 487 + 488 + vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs), 489 + GFP_KERNEL); 490 + if (!vsi->arfs_fltr_cntrs) 491 + return -ENOMEM; 492 + 493 + vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id), 494 + GFP_KERNEL); 495 + if (!vsi->arfs_last_fltr_id) { 496 + kfree(vsi->arfs_fltr_cntrs); 497 + vsi->arfs_fltr_cntrs = NULL; 498 + return -ENOMEM; 499 + } 500 + 501 + return 0; 502 + } 503 + 504 + /** 505 + * ice_init_arfs - initialize aRFS resources 506 + * @vsi: the VSI to be forwarded to 507 + */ 508 + void ice_init_arfs(struct ice_vsi *vsi) 509 + { 510 + struct hlist_head *arfs_fltr_list; 511 + unsigned int i; 512 + 513 + if (!vsi || vsi->type != ICE_VSI_PF) 514 + return; 515 + 516 + arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST, 517 + GFP_KERNEL); 518 + if (!arfs_fltr_list) 519 + return; 520 + 521 + if (ice_init_arfs_cntrs(vsi)) 522 + goto free_arfs_fltr_list; 523 + 524 + for (i = 0; i < ICE_MAX_ARFS_LIST; i++) 525 + INIT_HLIST_HEAD(&arfs_fltr_list[i]); 526 + 527 + spin_lock_init(&vsi->arfs_lock); 528 + 529 + vsi->arfs_fltr_list = arfs_fltr_list; 530 + 531 + return; 532 + 533 + free_arfs_fltr_list: 534 + kfree(arfs_fltr_list); 535 + } 536 + 537 + /** 538 + * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS 539 + * @vsi: the VSI to be forwarded to 540 + */ 541 + void ice_clear_arfs(struct ice_vsi *vsi) 542 + { 543 + struct device *dev; 544 + unsigned int i; 545 + 546 + if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back || 547 + !vsi->arfs_fltr_list) 548 + return; 549 + 550 + dev = ice_pf_to_dev(vsi->back); 551 + for (i = 0; i < ICE_MAX_ARFS_LIST; i++) { 552 + struct ice_arfs_entry *r; 553 + struct hlist_node *n; 554 + 555 + spin_lock_bh(&vsi->arfs_lock); 556 + hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i], 557 + list_entry) { 558 + hlist_del(&r->list_entry); 559 + devm_kfree(dev, r); 560 + } 561 + spin_unlock_bh(&vsi->arfs_lock); 562 + } 563 + 564 + kfree(vsi->arfs_fltr_list); 565 + vsi->arfs_fltr_list = NULL; 566 + kfree(vsi->arfs_last_fltr_id); 567 + vsi->arfs_last_fltr_id = NULL; 568 + kfree(vsi->arfs_fltr_cntrs); 569 + vsi->arfs_fltr_cntrs = NULL; 570 + } 571 + 572 + /** 573 + * ice_free_cpu_rx_rmap - free setup CPU reverse map 574 + * @vsi: the VSI to be forwarded to 575 + */ 576 + void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) 577 + { 578 + struct net_device *netdev; 579 + 580 + if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list) 581 + return; 582 + 583 + netdev = vsi->netdev; 584 + if (!netdev || !netdev->rx_cpu_rmap || 585 + netdev->reg_state != NETREG_REGISTERED) 586 + return; 587 + 588 + free_irq_cpu_rmap(netdev->rx_cpu_rmap); 589 + netdev->rx_cpu_rmap = NULL; 590 + } 591 + 592 + /** 593 + * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue 594 + * @vsi: the VSI to be forwarded to 595 + */ 596 + int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) 597 + { 598 + struct net_device *netdev; 599 + struct ice_pf *pf; 600 + int base_idx, i; 601 + 602 + if (!vsi || vsi->type != ICE_VSI_PF) 603 + return -EINVAL; 604 + 605 + pf = vsi->back; 606 + netdev = vsi->netdev; 607 + if (!pf || !netdev || !vsi->num_q_vectors || 608 + vsi->netdev->reg_state != NETREG_REGISTERED) 609 + return -EINVAL; 610 + 611 + netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n", 612 + vsi->type, netdev->name, vsi->num_q_vectors); 613 + 614 + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors); 615 + if (unlikely(!netdev->rx_cpu_rmap)) 616 + return -EINVAL; 617 + 618 + base_idx = vsi->base_vector; 619 + for (i = 0; i < vsi->num_q_vectors; i++) 620 + if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, 621 + pf->msix_entries[base_idx + i].vector)) { 622 + ice_free_cpu_rx_rmap(vsi); 623 + return -EINVAL; 624 + } 625 + 626 + return 0; 627 + } 628 + 629 + /** 630 + * ice_remove_arfs - remove/clear all aRFS resources 631 + * @pf: device private structure 632 + */ 633 + void ice_remove_arfs(struct ice_pf *pf) 634 + { 635 + struct ice_vsi *pf_vsi; 636 + 637 + pf_vsi = ice_get_main_vsi(pf); 638 + if (!pf_vsi) 639 + return; 640 + 641 + ice_free_cpu_rx_rmap(pf_vsi); 642 + ice_clear_arfs(pf_vsi); 643 + } 644 + 645 + /** 646 + * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset 647 + * @pf: device private structure 648 + */ 649 + void ice_rebuild_arfs(struct ice_pf *pf) 650 + { 651 + struct ice_vsi *pf_vsi; 652 + 653 + pf_vsi = ice_get_main_vsi(pf); 654 + if (!pf_vsi) 655 + return; 656 + 657 + ice_remove_arfs(pf); 658 + if (ice_set_cpu_rx_rmap(pf_vsi)) { 659 + dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n"); 660 + return; 661 + } 662 + ice_init_arfs(pf_vsi); 663 + }
+82
drivers/net/ethernet/intel/ice/ice_arfs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2018-2020, Intel Corporation. */ 3 + 4 + #ifndef _ICE_ARFS_H_ 5 + #define _ICE_ARFS_H_ 6 + enum ice_arfs_fltr_state { 7 + ICE_ARFS_INACTIVE, 8 + ICE_ARFS_ACTIVE, 9 + ICE_ARFS_TODEL, 10 + }; 11 + 12 + struct ice_arfs_entry { 13 + struct ice_fdir_fltr fltr_info; 14 + struct hlist_node list_entry; 15 + u64 time_activated; /* only valid for UDP flows */ 16 + u32 flow_id; 17 + /* fltr_state = 0 - ICE_ARFS_INACTIVE: 18 + * filter needs to be updated or programmed in HW. 19 + * fltr_state = 1 - ICE_ARFS_ACTIVE: 20 + * filter is active and programmed in HW. 21 + * fltr_state = 2 - ICE_ARFS_TODEL: 22 + * filter has been deleted from HW and needs to be removed from 23 + * the aRFS hash table. 24 + */ 25 + u8 fltr_state; 26 + }; 27 + 28 + struct ice_arfs_entry_ptr { 29 + struct ice_arfs_entry *arfs_entry; 30 + struct hlist_node list_entry; 31 + }; 32 + 33 + struct ice_arfs_active_fltr_cntrs { 34 + atomic_t active_tcpv4_cnt; 35 + atomic_t active_tcpv6_cnt; 36 + atomic_t active_udpv4_cnt; 37 + atomic_t active_udpv6_cnt; 38 + }; 39 + 40 + #ifdef CONFIG_RFS_ACCEL 41 + int 42 + ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, 43 + u16 rxq_idx, u32 flow_id); 44 + void ice_clear_arfs(struct ice_vsi *vsi); 45 + void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); 46 + void ice_init_arfs(struct ice_vsi *vsi); 47 + void ice_sync_arfs_fltrs(struct ice_pf *pf); 48 + int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); 49 + void ice_remove_arfs(struct ice_pf *pf); 50 + void ice_rebuild_arfs(struct ice_pf *pf); 51 + bool 52 + ice_is_arfs_using_perfect_flow(struct ice_hw *hw, 53 + enum ice_fltr_ptype flow_type); 54 + #else 55 + #define ice_sync_arfs_fltrs(pf) do {} while (0) 56 + #define ice_init_arfs(vsi) do {} while (0) 57 + #define ice_clear_arfs(vsi) do {} while (0) 58 + #define ice_remove_arfs(pf) do {} while (0) 59 + #define ice_free_cpu_rx_rmap(vsi) do {} while (0) 60 + #define ice_rebuild_arfs(pf) do {} while (0) 61 + 62 + static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi) 63 + { 64 + return 0; 65 + } 66 + 67 + static inline int 68 + ice_rx_flow_steer(struct net_device __always_unused *netdev, 69 + const struct sk_buff __always_unused *skb, 70 + u16 __always_unused rxq_idx, u32 __always_unused flow_id) 71 + { 72 + return -EOPNOTSUPP; 73 + } 74 + 75 + static inline bool 76 + ice_is_arfs_using_perfect_flow(struct ice_hw __always_unused *hw, 77 + enum ice_fltr_ptype __always_unused flow_type) 78 + { 79 + return false; 80 + } 81 + #endif /* CONFIG_RFS_ACCEL */ 82 + #endif /* _ICE_ARFS_H_ */
+1
drivers/net/ethernet/intel/ice/ice_base.c
··· 247 247 */ 248 248 switch (vsi->type) { 249 249 case ICE_VSI_LB: 250 + case ICE_VSI_CTRL: 250 251 case ICE_VSI_PF: 251 252 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 252 253 break;
+105
drivers/net/ethernet/intel/ice/ice_common.c
··· 316 316 } 317 317 318 318 /** 319 + * ice_fill_tx_timer_and_fc_thresh 320 + * @hw: pointer to the HW struct 321 + * @cmd: pointer to MAC cfg structure 322 + * 323 + * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 324 + * descriptor 325 + */ 326 + static void 327 + ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 328 + struct ice_aqc_set_mac_cfg *cmd) 329 + { 330 + u16 fc_thres_val, tx_timer_val; 331 + u32 val; 332 + 333 + /* We read back the transmit timer and FC threshold value of 334 + * LFC. Thus, we will use index = 335 + * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 336 + * 337 + * Also, because we are operating on transmit timer and FC 338 + * threshold of LFC, we don't turn on any bit in tx_tmr_priority 339 + */ 340 + #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 341 + 342 + /* Retrieve the transmit timer */ 343 + val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 344 + tx_timer_val = val & 345 + PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 346 + cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 347 + 348 + /* Retrieve the FC threshold */ 349 + val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 350 + fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 351 + 352 + cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 353 + } 354 + 355 + /** 356 + * ice_aq_set_mac_cfg 357 + * @hw: pointer to the HW struct 358 + * @max_frame_size: Maximum Frame Size to be supported 359 + * @cd: pointer to command details structure or NULL 360 + * 361 + * Set MAC configuration (0x0603) 362 + */ 363 + enum ice_status 364 + ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 365 + { 366 + struct ice_aqc_set_mac_cfg *cmd; 367 + struct ice_aq_desc desc; 368 + 369 + cmd = &desc.params.set_mac_cfg; 370 + 371 + if (max_frame_size == 0) 372 + return ICE_ERR_PARAM; 373 + 374 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 375 + 376 + cmd->max_frame_size = cpu_to_le16(max_frame_size); 377 + 378 + ice_fill_tx_timer_and_fc_thresh(hw, cmd); 379 + 380 + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 381 + } 382 + 383 + /** 319 384 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 320 385 * @hw: pointer to the HW struct 321 386 */ ··· 718 653 if (status) 719 654 goto err_unroll_cqinit; 720 655 656 + /* Set bit to enable Flow Director filters */ 657 + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 658 + INIT_LIST_HEAD(&hw->fdir_list_head); 659 + 721 660 ice_clear_pxe_mode(hw); 722 661 723 662 status = ice_init_nvm(hw); ··· 812 743 813 744 if (status) 814 745 goto err_unroll_fltr_mgmt_struct; 746 + /* enable jumbo frame support at MAC level */ 747 + status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 748 + if (status) 749 + goto err_unroll_fltr_mgmt_struct; 750 + /* Obtain counter base index which would be used by flow director */ 751 + status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 752 + if (status) 753 + goto err_unroll_fltr_mgmt_struct; 815 754 status = ice_init_hw_tbls(hw); 816 755 if (status) 817 756 goto err_unroll_fltr_mgmt_struct; ··· 847 770 */ 848 771 void ice_deinit_hw(struct ice_hw *hw) 849 772 { 773 + ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 850 774 ice_cleanup_fltr_mgmt_struct(hw); 851 775 852 776 ice_sched_cleanup_all(hw); ··· 1757 1679 ice_debug(hw, ICE_DBG_INIT, 1758 1680 "%s: msix_vector_first_id = %d\n", prefix, 1759 1681 caps->msix_vector_first_id); 1682 + break; 1683 + case ICE_AQC_CAPS_FD: 1684 + if (dev_p) { 1685 + dev_p->num_flow_director_fltr = number; 1686 + ice_debug(hw, ICE_DBG_INIT, 1687 + "%s: num_flow_director_fltr = %d\n", 1688 + prefix, 1689 + dev_p->num_flow_director_fltr); 1690 + } 1691 + if (func_p) { 1692 + u32 reg_val, val; 1693 + 1694 + reg_val = rd32(hw, GLQF_FD_SIZE); 1695 + val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 1696 + GLQF_FD_SIZE_FD_GSIZE_S; 1697 + func_p->fd_fltr_guar = 1698 + ice_get_num_per_func(hw, val); 1699 + val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 1700 + GLQF_FD_SIZE_FD_BSIZE_S; 1701 + func_p->fd_fltr_best_effort = val; 1702 + ice_debug(hw, ICE_DBG_INIT, 1703 + "%s: fd_fltr_guar = %d\n", 1704 + prefix, func_p->fd_fltr_guar); 1705 + ice_debug(hw, ICE_DBG_INIT, 1706 + "%s: fd_fltr_best_effort = %d\n", 1707 + prefix, func_p->fd_fltr_best_effort); 1708 + } 1760 1709 break; 1761 1710 case ICE_AQC_CAPS_MAX_MTU: 1762 1711 caps->max_mtu = number;
+2
drivers/net/ethernet/intel/ice/ice_common.h
··· 108 108 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 109 109 struct ice_sq_cd *cd); 110 110 enum ice_status 111 + ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); 112 + enum ice_status 111 113 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 112 114 struct ice_link_status *link, struct ice_sq_cd *cd); 113 115 enum ice_status
+5 -4
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
··· 671 671 ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg, 672 672 struct ice_dcb_app_priority_table *app) 673 673 { 674 - int i; 674 + unsigned int i; 675 675 676 676 for (i = 0; i < cfg->numapps; i++) { 677 677 if (app->selector == cfg->app[i].selector && ··· 746 746 { 747 747 struct ice_pf *pf = ice_netdev_to_pf(netdev); 748 748 struct ice_dcbx_cfg *old_cfg, *new_cfg; 749 - int i, j, ret = 0; 749 + unsigned int i, j; 750 + int ret = 0; 750 751 751 752 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) 752 753 return -EINVAL; ··· 870 869 struct ice_port_info *pi; 871 870 struct dcb_app sapp; 872 871 struct ice_pf *pf; 873 - int i; 872 + unsigned int i; 874 873 875 874 if (!netdev) 876 875 return; ··· 942 941 struct ice_dcbx_cfg *new_cfg) 943 942 { 944 943 struct ice_vsi *main_vsi = ice_get_main_vsi(pf); 945 - int i; 944 + unsigned int i; 946 945 947 946 if (!main_vsi) 948 947 return;
+37 -10
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 130 130 ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), 131 131 ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults), 132 132 ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults), 133 + ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match), 134 + ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status), 133 135 }; 134 136 135 137 static const u32 ice_regs_dump_list[] = { ··· 142 140 QINT_RQCTL(0), 143 141 PFINT_OICR_ENA, 144 142 QRX_ITR(0), 145 - PF0INT_ITR_0(0), 146 - PF0INT_ITR_1(0), 147 - PF0INT_ITR_2(0), 148 143 }; 149 144 150 145 struct ice_priv_flag { ··· 205 206 struct ice_pf *pf = np->vsi->back; 206 207 struct ice_hw *hw = &pf->hw; 207 208 u32 *regs_buf = (u32 *)p; 208 - int i; 209 + unsigned int i; 209 210 210 211 regs->version = 1; 211 212 ··· 308 309 */ 309 310 static bool ice_active_vfs(struct ice_pf *pf) 310 311 { 311 - int i; 312 + unsigned int i; 312 313 313 314 ice_for_each_vf(pf, i) { 314 315 struct ice_vf *vf = &pf->vf[i]; ··· 378 379 0x00000000, 0xFFFFFFFF 379 380 }; 380 381 u32 val, orig_val; 381 - int i; 382 + unsigned int i; 382 383 383 384 orig_val = rd32(hw, reg); 384 385 for (i = 0; i < ARRAY_SIZE(patterns); ++i) { ··· 431 432 GLINT_ITR(2, 1) - GLINT_ITR(2, 0)}, 432 433 {GLINT_CTL, 0xffff0001, 1, 0} 433 434 }; 434 - int i; 435 + unsigned int i; 435 436 436 437 netdev_dbg(netdev, "Register test\n"); 437 438 for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) { ··· 2534 2535 struct ice_vsi *vsi = np->vsi; 2535 2536 2536 2537 switch (cmd->cmd) { 2538 + case ETHTOOL_SRXCLSRLINS: 2539 + return ice_add_fdir_ethtool(vsi, cmd); 2540 + case ETHTOOL_SRXCLSRLDEL: 2541 + return ice_del_fdir_ethtool(vsi, cmd); 2537 2542 case ETHTOOL_SRXFH: 2538 2543 return ice_set_rss_hash_opt(vsi, cmd); 2539 2544 default: ··· 2561 2558 struct ice_netdev_priv *np = netdev_priv(netdev); 2562 2559 struct ice_vsi *vsi = np->vsi; 2563 2560 int ret = -EOPNOTSUPP; 2561 + struct ice_hw *hw; 2562 + 2563 + hw = &vsi->back->hw; 2564 2564 2565 2565 switch (cmd->cmd) { 2566 2566 case ETHTOOL_GRXRINGS: 2567 2567 cmd->data = vsi->rss_size; 2568 2568 ret = 0; 2569 + break; 2570 + case ETHTOOL_GRXCLSRLCNT: 2571 + cmd->rule_cnt = hw->fdir_active_fltr; 2572 + /* report total rule count */ 2573 + cmd->data = ice_get_fdir_cnt_all(hw); 2574 + ret = 0; 2575 + break; 2576 + case ETHTOOL_GRXCLSRULE: 2577 + ret = ice_get_ethtool_fdir_entry(hw, cmd); 2578 + break; 2579 + case ETHTOOL_GRXCLSRLALL: 2580 + ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs); 2569 2581 break; 2570 2582 case ETHTOOL_GRXFH: 2571 2583 ice_get_rss_hash_opt(vsi, cmd); ··· 3202 3184 ch->combined_count = ice_get_combined_cnt(vsi); 3203 3185 ch->rx_count = vsi->num_rxq - ch->combined_count; 3204 3186 ch->tx_count = vsi->num_txq - ch->combined_count; 3187 + 3188 + /* report other queues */ 3189 + ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 3190 + ch->max_other = ch->other_count; 3205 3191 } 3206 3192 3207 3193 /** ··· 3251 3229 if (status) { 3252 3230 dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", 3253 3231 ice_stat_str(status), 3254 - ice_aq_str(hw->adminq.rq_last_status)); 3232 + ice_aq_str(hw->adminq.sq_last_status)); 3255 3233 err = -EIO; 3256 3234 } 3257 3235 ··· 3278 3256 return -EOPNOTSUPP; 3279 3257 } 3280 3258 /* do not support changing other_count */ 3281 - if (ch->other_count) 3259 + if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U)) 3282 3260 return -EINVAL; 3261 + 3262 + if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) { 3263 + netdev_err(dev, "Cannot set channels when Flow Director filters are active\n"); 3264 + return -EOPNOTSUPP; 3265 + } 3283 3266 3284 3267 curr_combined = ice_get_combined_cnt(vsi); 3285 3268 ··· 3759 3732 struct ice_hw *hw = &pf->hw; 3760 3733 enum ice_status status; 3761 3734 bool is_sfp = false; 3735 + unsigned int i; 3762 3736 u16 offset = 0; 3763 3737 u8 value = 0; 3764 3738 u8 page = 0; 3765 - int i; 3766 3739 3767 3740 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, 3768 3741 &value, 1, 0, NULL);
+1672
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2018-2020, Intel Corporation. */ 3 + 4 + /* flow director ethtool support for ice */ 5 + 6 + #include "ice.h" 7 + #include "ice_lib.h" 8 + #include "ice_flow.h" 9 + 10 + static struct in6_addr full_ipv6_addr_mask = { 11 + .in6_u = { 12 + .u6_addr8 = { 13 + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 14 + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 15 + } 16 + } 17 + }; 18 + 19 + static struct in6_addr zero_ipv6_addr_mask = { 20 + .in6_u = { 21 + .u6_addr8 = { 22 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 23 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 24 + } 25 + } 26 + }; 27 + 28 + /* calls to ice_flow_add_prof require the number of segments in the array 29 + * for segs_cnt. In this code that is one more than the index. 30 + */ 31 + #define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1) 32 + 33 + /** 34 + * ice_fltr_to_ethtool_flow - convert filter type values to ethtool 35 + * flow type values 36 + * @flow: filter type to be converted 37 + * 38 + * Returns the corresponding ethtool flow type. 39 + */ 40 + static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) 41 + { 42 + switch (flow) { 43 + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 44 + return TCP_V4_FLOW; 45 + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 46 + return UDP_V4_FLOW; 47 + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 48 + return SCTP_V4_FLOW; 49 + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 50 + return IPV4_USER_FLOW; 51 + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 52 + return TCP_V6_FLOW; 53 + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 54 + return UDP_V6_FLOW; 55 + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 56 + return SCTP_V6_FLOW; 57 + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 58 + return IPV6_USER_FLOW; 59 + default: 60 + /* 0 is undefined ethtool flow */ 61 + return 0; 62 + } 63 + } 64 + 65 + /** 66 + * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum 67 + * @eth: Ethtool flow type to be converted 68 + * 69 + * Returns flow enum 70 + */ 71 + static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) 72 + { 73 + switch (eth) { 74 + case TCP_V4_FLOW: 75 + return ICE_FLTR_PTYPE_NONF_IPV4_TCP; 76 + case UDP_V4_FLOW: 77 + return ICE_FLTR_PTYPE_NONF_IPV4_UDP; 78 + case SCTP_V4_FLOW: 79 + return ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 80 + case IPV4_USER_FLOW: 81 + return ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 82 + case TCP_V6_FLOW: 83 + return ICE_FLTR_PTYPE_NONF_IPV6_TCP; 84 + case UDP_V6_FLOW: 85 + return ICE_FLTR_PTYPE_NONF_IPV6_UDP; 86 + case SCTP_V6_FLOW: 87 + return ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 88 + case IPV6_USER_FLOW: 89 + return ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 90 + default: 91 + return ICE_FLTR_PTYPE_NONF_NONE; 92 + } 93 + } 94 + 95 + /** 96 + * ice_is_mask_valid - check mask field set 97 + * @mask: full mask to check 98 + * @field: field for which mask should be valid 99 + * 100 + * If the mask is fully set return true. If it is not valid for field return 101 + * false. 102 + */ 103 + static bool ice_is_mask_valid(u64 mask, u64 field) 104 + { 105 + return (mask & field) == field; 106 + } 107 + 108 + /** 109 + * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data 110 + * @hw: hardware structure that contains filter list 111 + * @cmd: ethtool command data structure to receive the filter data 112 + * 113 + * Returns 0 on success and -EINVAL on failure 114 + */ 115 + int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) 116 + { 117 + struct ethtool_rx_flow_spec *fsp; 118 + struct ice_fdir_fltr *rule; 119 + int ret = 0; 120 + u16 idx; 121 + 122 + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 123 + 124 + mutex_lock(&hw->fdir_fltr_lock); 125 + 126 + rule = ice_fdir_find_fltr_by_idx(hw, fsp->location); 127 + 128 + if (!rule || fsp->location != rule->fltr_id) { 129 + ret = -EINVAL; 130 + goto release_lock; 131 + } 132 + 133 + fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type); 134 + 135 + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); 136 + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); 137 + 138 + switch (fsp->flow_type) { 139 + case IPV4_USER_FLOW: 140 + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 141 + fsp->h_u.usr_ip4_spec.proto = 0; 142 + fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header; 143 + fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos; 144 + fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip; 145 + fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 146 + fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip; 147 + fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 148 + fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; 149 + fsp->m_u.usr_ip4_spec.proto = 0; 150 + fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header; 151 + fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos; 152 + break; 153 + case TCP_V4_FLOW: 154 + case UDP_V4_FLOW: 155 + case SCTP_V4_FLOW: 156 + fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port; 157 + fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port; 158 + fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip; 159 + fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 160 + fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port; 161 + fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port; 162 + fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip; 163 + fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 164 + break; 165 + case IPV6_USER_FLOW: 166 + fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header; 167 + fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc; 168 + fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto; 169 + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 170 + sizeof(struct in6_addr)); 171 + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 172 + sizeof(struct in6_addr)); 173 + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip, 174 + sizeof(struct in6_addr)); 175 + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip, 176 + sizeof(struct in6_addr)); 177 + fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header; 178 + fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc; 179 + fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto; 180 + break; 181 + case TCP_V6_FLOW: 182 + case UDP_V6_FLOW: 183 + case SCTP_V6_FLOW: 184 + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 185 + sizeof(struct in6_addr)); 186 + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 187 + sizeof(struct in6_addr)); 188 + fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port; 189 + fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port; 190 + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, 191 + rule->mask.v6.src_ip, 192 + sizeof(struct in6_addr)); 193 + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, 194 + rule->mask.v6.dst_ip, 195 + sizeof(struct in6_addr)); 196 + fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port; 197 + fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port; 198 + fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc; 199 + fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc; 200 + break; 201 + default: 202 + break; 203 + } 204 + 205 + if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) 206 + fsp->ring_cookie = RX_CLS_FLOW_DISC; 207 + else 208 + fsp->ring_cookie = rule->q_index; 209 + 210 + idx = ice_ethtool_flow_to_fltr(fsp->flow_type); 211 + if (idx == ICE_FLTR_PTYPE_NONF_NONE) { 212 + dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n", 213 + rule->flow_type); 214 + ret = -EINVAL; 215 + } 216 + 217 + release_lock: 218 + mutex_unlock(&hw->fdir_fltr_lock); 219 + return ret; 220 + } 221 + 222 + /** 223 + * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters 224 + * @hw: hardware structure containing the filter list 225 + * @cmd: ethtool command data structure 226 + * @rule_locs: ethtool array passed in from OS to receive filter IDs 227 + * 228 + * Returns 0 as expected for success by ethtool 229 + */ 230 + int 231 + ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 232 + u32 *rule_locs) 233 + { 234 + struct ice_fdir_fltr *f_rule; 235 + unsigned int cnt = 0; 236 + int val = 0; 237 + 238 + /* report total rule count */ 239 + cmd->data = ice_get_fdir_cnt_all(hw); 240 + 241 + mutex_lock(&hw->fdir_fltr_lock); 242 + 243 + list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 244 + if (cnt == cmd->rule_cnt) { 245 + val = -EMSGSIZE; 246 + goto release_lock; 247 + } 248 + rule_locs[cnt] = f_rule->fltr_id; 249 + cnt++; 250 + } 251 + 252 + release_lock: 253 + mutex_unlock(&hw->fdir_fltr_lock); 254 + if (!val) 255 + cmd->rule_cnt = cnt; 256 + return val; 257 + } 258 + 259 + /** 260 + * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow 261 + * @hw: hardware structure containing the filter list 262 + * @blk: hardware block 263 + * @flow: FDir flow type to release 264 + */ 265 + static struct ice_fd_hw_prof * 266 + ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow) 267 + { 268 + if (blk == ICE_BLK_FD && hw->fdir_prof) 269 + return hw->fdir_prof[flow]; 270 + 271 + return NULL; 272 + } 273 + 274 + /** 275 + * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables 276 + * @hw: hardware structure containing the filter list 277 + * @blk: hardware block 278 + * @flow: FDir flow type to release 279 + */ 280 + static void 281 + ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) 282 + { 283 + struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow); 284 + int tun; 285 + 286 + if (!prof) 287 + return; 288 + 289 + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 290 + u64 prof_id; 291 + int j; 292 + 293 + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 294 + for (j = 0; j < prof->cnt; j++) { 295 + u16 vsi_num; 296 + 297 + if (!prof->entry_h[j][tun] || !prof->vsi_h[j]) 298 + continue; 299 + vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]); 300 + ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id); 301 + ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]); 302 + prof->entry_h[j][tun] = 0; 303 + } 304 + ice_flow_rem_prof(hw, blk, prof_id); 305 + } 306 + } 307 + 308 + /** 309 + * ice_fdir_rem_flow - release the ice_flow structures for a filter type 310 + * @hw: hardware structure containing the filter list 311 + * @blk: hardware block 312 + * @flow_type: FDir flow type to release 313 + */ 314 + static void 315 + ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk, 316 + enum ice_fltr_ptype flow_type) 317 + { 318 + int flow = (int)flow_type & ~FLOW_EXT; 319 + struct ice_fd_hw_prof *prof; 320 + int tun, i; 321 + 322 + prof = ice_fdir_get_hw_prof(hw, blk, flow); 323 + if (!prof) 324 + return; 325 + 326 + ice_fdir_erase_flow_from_hw(hw, blk, flow); 327 + for (i = 0; i < prof->cnt; i++) 328 + prof->vsi_h[i] = 0; 329 + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 330 + if (!prof->fdir_seg[tun]) 331 + continue; 332 + devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]); 333 + prof->fdir_seg[tun] = NULL; 334 + } 335 + prof->cnt = 0; 336 + } 337 + 338 + /** 339 + * ice_fdir_release_flows - release all flows in use for later replay 340 + * @hw: pointer to HW instance 341 + */ 342 + void ice_fdir_release_flows(struct ice_hw *hw) 343 + { 344 + int flow; 345 + 346 + /* release Flow Director HW table entries */ 347 + for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) 348 + ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow); 349 + } 350 + 351 + /** 352 + * ice_fdir_replay_flows - replay HW Flow Director filter info 353 + * @hw: pointer to HW instance 354 + */ 355 + void ice_fdir_replay_flows(struct ice_hw *hw) 356 + { 357 + int flow; 358 + 359 + for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 360 + int tun; 361 + 362 + if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt) 363 + continue; 364 + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 365 + struct ice_flow_prof *hw_prof; 366 + struct ice_fd_hw_prof *prof; 367 + u64 prof_id; 368 + int j; 369 + 370 + prof = hw->fdir_prof[flow]; 371 + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 372 + ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, 373 + prof->fdir_seg[tun], TNL_SEG_CNT(tun), 374 + &hw_prof); 375 + for (j = 0; j < prof->cnt; j++) { 376 + enum ice_flow_priority prio; 377 + u64 entry_h = 0; 378 + int err; 379 + 380 + prio = ICE_FLOW_PRIO_NORMAL; 381 + err = ice_flow_add_entry(hw, ICE_BLK_FD, 382 + prof_id, 383 + prof->vsi_h[0], 384 + prof->vsi_h[j], 385 + prio, prof->fdir_seg, 386 + &entry_h); 387 + if (err) { 388 + dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n", 389 + flow); 390 + continue; 391 + } 392 + prof->entry_h[j][tun] = entry_h; 393 + } 394 + } 395 + } 396 + } 397 + 398 + /** 399 + * ice_parse_rx_flow_user_data - deconstruct user-defined data 400 + * @fsp: pointer to ethtool Rx flow specification 401 + * @data: pointer to userdef data structure for storage 402 + * 403 + * Returns 0 on success, negative error value on failure 404 + */ 405 + static int 406 + ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 407 + struct ice_rx_flow_userdef *data) 408 + { 409 + u64 value, mask; 410 + 411 + memset(data, 0, sizeof(*data)); 412 + if (!(fsp->flow_type & FLOW_EXT)) 413 + return 0; 414 + 415 + value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data)); 416 + mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data)); 417 + if (!mask) 418 + return 0; 419 + 420 + #define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0) 421 + #define ICE_USERDEF_FLEX_OFFS_S 16 422 + #define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S) 423 + #define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0) 424 + 425 + /* 0x1fe is the maximum value for offsets stored in the internal 426 + * filtering tables. 427 + */ 428 + #define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe 429 + 430 + if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) || 431 + value > ICE_USERDEF_FLEX_FLTR_M) 432 + return -EINVAL; 433 + 434 + data->flex_word = value & ICE_USERDEF_FLEX_WORD_M; 435 + data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >> 436 + ICE_USERDEF_FLEX_OFFS_S; 437 + if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL) 438 + return -EINVAL; 439 + 440 + data->flex_fltr = true; 441 + 442 + return 0; 443 + } 444 + 445 + /** 446 + * ice_fdir_num_avail_fltr - return the number of unused flow director filters 447 + * @hw: pointer to hardware structure 448 + * @vsi: software VSI structure 449 + * 450 + * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can 451 + * use filters from either pool. The guaranteed pool is divided between VSIs. 452 + * The best effort filter pool is common to all VSIs and is a device shared 453 + * resource pool. The number of filters available to this VSI is the sum of 454 + * the VSIs guaranteed filter pool and the global available best effort 455 + * filter pool. 456 + * 457 + * Returns the number of available flow director filters to this VSI 458 + */ 459 + static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) 460 + { 461 + u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 462 + u16 num_guar; 463 + u16 num_be; 464 + 465 + /* total guaranteed filters assigned to this VSI */ 466 + num_guar = vsi->num_gfltr; 467 + 468 + /* minus the guaranteed filters programed by this VSI */ 469 + num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) & 470 + VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S; 471 + 472 + /* total global best effort filters */ 473 + num_be = hw->func_caps.fd_fltr_best_effort; 474 + 475 + /* minus the global best effort filters programmed */ 476 + num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >> 477 + GLQF_FD_CNT_FD_BCNT_S; 478 + 479 + return num_guar + num_be; 480 + } 481 + 482 + /** 483 + * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s) 484 + * @hw: HW structure containing the FDir flow profile structure(s) 485 + * @flow: flow type to allocate the flow profile for 486 + * 487 + * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0 488 + * on success and negative on error. 489 + */ 490 + static int 491 + ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow) 492 + { 493 + if (!hw) 494 + return -EINVAL; 495 + 496 + if (!hw->fdir_prof) { 497 + hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw), 498 + ICE_FLTR_PTYPE_MAX, 499 + sizeof(*hw->fdir_prof), 500 + GFP_KERNEL); 501 + if (!hw->fdir_prof) 502 + return -ENOMEM; 503 + } 504 + 505 + if (!hw->fdir_prof[flow]) { 506 + hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw), 507 + sizeof(**hw->fdir_prof), 508 + GFP_KERNEL); 509 + if (!hw->fdir_prof[flow]) 510 + return -ENOMEM; 511 + } 512 + 513 + return 0; 514 + } 515 + 516 + /** 517 + * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule 518 + * @pf: pointer to the PF structure 519 + * @seg: protocol header description pointer 520 + * @flow: filter enum 521 + * @tun: FDir segment to program 522 + */ 523 + static int 524 + ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, 525 + enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun) 526 + { 527 + struct device *dev = ice_pf_to_dev(pf); 528 + struct ice_vsi *main_vsi, *ctrl_vsi; 529 + struct ice_flow_seg_info *old_seg; 530 + struct ice_flow_prof *prof = NULL; 531 + struct ice_fd_hw_prof *hw_prof; 532 + struct ice_hw *hw = &pf->hw; 533 + enum ice_status status; 534 + u64 entry1_h = 0; 535 + u64 entry2_h = 0; 536 + u64 prof_id; 537 + int err; 538 + 539 + main_vsi = ice_get_main_vsi(pf); 540 + if (!main_vsi) 541 + return -EINVAL; 542 + 543 + ctrl_vsi = ice_get_ctrl_vsi(pf); 544 + if (!ctrl_vsi) 545 + return -EINVAL; 546 + 547 + err = ice_fdir_alloc_flow_prof(hw, flow); 548 + if (err) 549 + return err; 550 + 551 + hw_prof = hw->fdir_prof[flow]; 552 + old_seg = hw_prof->fdir_seg[tun]; 553 + if (old_seg) { 554 + /* This flow_type already has a changed input set. 555 + * If it matches the requested input set then we are 556 + * done. Or, if it's different then it's an error. 557 + */ 558 + if (!memcmp(old_seg, seg, sizeof(*seg))) 559 + return -EEXIST; 560 + 561 + /* if there are FDir filters using this flow, 562 + * then return error. 563 + */ 564 + if (hw->fdir_fltr_cnt[flow]) { 565 + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 566 + return -EINVAL; 567 + } 568 + 569 + if (ice_is_arfs_using_perfect_flow(hw, flow)) { 570 + dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n", 571 + flow); 572 + return -EINVAL; 573 + } 574 + 575 + /* remove HW filter definition */ 576 + ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 577 + } 578 + 579 + /* Adding a profile, but there is only one header supported. 580 + * That is the final parameters are 1 header (segment), no 581 + * actions (NULL) and zero actions 0. 582 + */ 583 + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 584 + status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 585 + TNL_SEG_CNT(tun), &prof); 586 + if (status) 587 + return ice_status_to_errno(status); 588 + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, 589 + main_vsi->idx, ICE_FLOW_PRIO_NORMAL, 590 + seg, &entry1_h); 591 + if (status) { 592 + err = ice_status_to_errno(status); 593 + goto err_prof; 594 + } 595 + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, 596 + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 597 + seg, &entry2_h); 598 + if (status) { 599 + err = ice_status_to_errno(status); 600 + goto err_entry; 601 + } 602 + 603 + hw_prof->fdir_seg[tun] = seg; 604 + hw_prof->entry_h[0][tun] = entry1_h; 605 + hw_prof->entry_h[1][tun] = entry2_h; 606 + hw_prof->vsi_h[0] = main_vsi->idx; 607 + hw_prof->vsi_h[1] = ctrl_vsi->idx; 608 + if (!hw_prof->cnt) 609 + hw_prof->cnt = 2; 610 + 611 + return 0; 612 + 613 + err_entry: 614 + ice_rem_prof_id_flow(hw, ICE_BLK_FD, 615 + ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); 616 + ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 617 + err_prof: 618 + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 619 + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 620 + 621 + return err; 622 + } 623 + 624 + /** 625 + * ice_set_init_fdir_seg 626 + * @seg: flow segment for programming 627 + * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6 628 + * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP 629 + * 630 + * Set the configuration for perfect filters to the provided flow segment for 631 + * programming the HW filter. This is to be called only when initializing 632 + * filters as this function it assumes no filters exist. 633 + */ 634 + static int 635 + ice_set_init_fdir_seg(struct ice_flow_seg_info *seg, 636 + enum ice_flow_seg_hdr l3_proto, 637 + enum ice_flow_seg_hdr l4_proto) 638 + { 639 + enum ice_flow_field src_addr, dst_addr, src_port, dst_port; 640 + 641 + if (!seg) 642 + return -EINVAL; 643 + 644 + if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) { 645 + src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA; 646 + dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA; 647 + } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) { 648 + src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA; 649 + dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA; 650 + } else { 651 + return -EINVAL; 652 + } 653 + 654 + if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 655 + src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 656 + dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 657 + } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 658 + src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 659 + dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 660 + } else { 661 + return -EINVAL; 662 + } 663 + 664 + ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto); 665 + 666 + /* IP source address */ 667 + ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL, 668 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 669 + 670 + /* IP destination address */ 671 + ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL, 672 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 673 + 674 + /* Layer 4 source port */ 675 + ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 676 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 677 + 678 + /* Layer 4 destination port */ 679 + ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 680 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 681 + 682 + return 0; 683 + } 684 + 685 + /** 686 + * ice_create_init_fdir_rule 687 + * @pf: PF structure 688 + * @flow: filter enum 689 + * 690 + * Return error value or 0 on success. 691 + */ 692 + static int 693 + ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow) 694 + { 695 + struct ice_flow_seg_info *seg, *tun_seg; 696 + struct device *dev = ice_pf_to_dev(pf); 697 + struct ice_hw *hw = &pf->hw; 698 + int ret; 699 + 700 + /* if there is already a filter rule for kind return -EINVAL */ 701 + if (hw->fdir_prof && hw->fdir_prof[flow] && 702 + hw->fdir_prof[flow]->fdir_seg[0]) 703 + return -EINVAL; 704 + 705 + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 706 + if (!seg) 707 + return -ENOMEM; 708 + 709 + tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, 710 + GFP_KERNEL); 711 + if (!tun_seg) { 712 + devm_kfree(dev, seg); 713 + return -ENOMEM; 714 + } 715 + 716 + if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP) 717 + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 718 + ICE_FLOW_SEG_HDR_TCP); 719 + else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP) 720 + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 721 + ICE_FLOW_SEG_HDR_UDP); 722 + else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP) 723 + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 724 + ICE_FLOW_SEG_HDR_TCP); 725 + else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 726 + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 727 + ICE_FLOW_SEG_HDR_UDP); 728 + else 729 + ret = -EINVAL; 730 + if (ret) 731 + goto err_exit; 732 + 733 + /* add filter for outer headers */ 734 + ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN); 735 + if (ret) 736 + /* could not write filter, free memory */ 737 + goto err_exit; 738 + 739 + /* make tunneled filter HW entries if possible */ 740 + memcpy(&tun_seg[1], seg, sizeof(*seg)); 741 + ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN); 742 + if (ret) 743 + /* could not write tunnel filter, but outer header filter 744 + * exists 745 + */ 746 + devm_kfree(dev, tun_seg); 747 + 748 + set_bit(flow, hw->fdir_perfect_fltr); 749 + return ret; 750 + err_exit: 751 + devm_kfree(dev, tun_seg); 752 + devm_kfree(dev, seg); 753 + 754 + return -EOPNOTSUPP; 755 + } 756 + 757 + /** 758 + * ice_set_fdir_ip4_seg 759 + * @seg: flow segment for programming 760 + * @tcp_ip4_spec: mask data from ethtool 761 + * @l4_proto: Layer 4 protocol to program 762 + * @perfect_fltr: only valid on success; returns true if perfect filter, 763 + * false if not 764 + * 765 + * Set the mask data into the flow segment to be used to program HW 766 + * table based on provided L4 protocol for IPv4 767 + */ 768 + static int 769 + ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg, 770 + struct ethtool_tcpip4_spec *tcp_ip4_spec, 771 + enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 772 + { 773 + enum ice_flow_field src_port, dst_port; 774 + 775 + /* make sure we don't have any empty rule */ 776 + if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src && 777 + !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst) 778 + return -EINVAL; 779 + 780 + /* filtering on TOS not supported */ 781 + if (tcp_ip4_spec->tos) 782 + return -EOPNOTSUPP; 783 + 784 + if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 785 + src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 786 + dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 787 + } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 788 + src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 789 + dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 790 + } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 791 + src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 792 + dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 793 + } else { 794 + return -EOPNOTSUPP; 795 + } 796 + 797 + *perfect_fltr = true; 798 + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto); 799 + 800 + /* IP source address */ 801 + if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 802 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 803 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 804 + ICE_FLOW_FLD_OFF_INVAL, false); 805 + else if (!tcp_ip4_spec->ip4src) 806 + *perfect_fltr = false; 807 + else 808 + return -EOPNOTSUPP; 809 + 810 + /* IP destination address */ 811 + if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 812 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 813 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 814 + ICE_FLOW_FLD_OFF_INVAL, false); 815 + else if (!tcp_ip4_spec->ip4dst) 816 + *perfect_fltr = false; 817 + else 818 + return -EOPNOTSUPP; 819 + 820 + /* Layer 4 source port */ 821 + if (tcp_ip4_spec->psrc == htons(0xFFFF)) 822 + ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 823 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 824 + false); 825 + else if (!tcp_ip4_spec->psrc) 826 + *perfect_fltr = false; 827 + else 828 + return -EOPNOTSUPP; 829 + 830 + /* Layer 4 destination port */ 831 + if (tcp_ip4_spec->pdst == htons(0xFFFF)) 832 + ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 833 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 834 + false); 835 + else if (!tcp_ip4_spec->pdst) 836 + *perfect_fltr = false; 837 + else 838 + return -EOPNOTSUPP; 839 + 840 + return 0; 841 + } 842 + 843 + /** 844 + * ice_set_fdir_ip4_usr_seg 845 + * @seg: flow segment for programming 846 + * @usr_ip4_spec: ethtool userdef packet offset 847 + * @perfect_fltr: only valid on success; returns true if perfect filter, 848 + * false if not 849 + * 850 + * Set the offset data into the flow segment to be used to program HW 851 + * table for IPv4 852 + */ 853 + static int 854 + ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg, 855 + struct ethtool_usrip4_spec *usr_ip4_spec, 856 + bool *perfect_fltr) 857 + { 858 + /* first 4 bytes of Layer 4 header */ 859 + if (usr_ip4_spec->l4_4_bytes) 860 + return -EINVAL; 861 + if (usr_ip4_spec->tos) 862 + return -EINVAL; 863 + if (usr_ip4_spec->ip_ver) 864 + return -EINVAL; 865 + /* Filtering on Layer 4 protocol not supported */ 866 + if (usr_ip4_spec->proto) 867 + return -EOPNOTSUPP; 868 + /* empty rules are not valid */ 869 + if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst) 870 + return -EINVAL; 871 + 872 + *perfect_fltr = true; 873 + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); 874 + 875 + /* IP source address */ 876 + if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 877 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 878 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 879 + ICE_FLOW_FLD_OFF_INVAL, false); 880 + else if (!usr_ip4_spec->ip4src) 881 + *perfect_fltr = false; 882 + else 883 + return -EOPNOTSUPP; 884 + 885 + /* IP destination address */ 886 + if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 887 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 888 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 889 + ICE_FLOW_FLD_OFF_INVAL, false); 890 + else if (!usr_ip4_spec->ip4dst) 891 + *perfect_fltr = false; 892 + else 893 + return -EOPNOTSUPP; 894 + 895 + return 0; 896 + } 897 + 898 + /** 899 + * ice_set_fdir_ip6_seg 900 + * @seg: flow segment for programming 901 + * @tcp_ip6_spec: mask data from ethtool 902 + * @l4_proto: Layer 4 protocol to program 903 + * @perfect_fltr: only valid on success; returns true if perfect filter, 904 + * false if not 905 + * 906 + * Set the mask data into the flow segment to be used to program HW 907 + * table based on provided L4 protocol for IPv6 908 + */ 909 + static int 910 + ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg, 911 + struct ethtool_tcpip6_spec *tcp_ip6_spec, 912 + enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 913 + { 914 + enum ice_flow_field src_port, dst_port; 915 + 916 + /* make sure we don't have any empty rule */ 917 + if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 918 + sizeof(struct in6_addr)) && 919 + !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 920 + sizeof(struct in6_addr)) && 921 + !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst) 922 + return -EINVAL; 923 + 924 + /* filtering on TC not supported */ 925 + if (tcp_ip6_spec->tclass) 926 + return -EOPNOTSUPP; 927 + 928 + if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 929 + src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 930 + dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 931 + } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 932 + src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 933 + dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 934 + } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 935 + src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 936 + dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 937 + } else { 938 + return -EINVAL; 939 + } 940 + 941 + *perfect_fltr = true; 942 + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto); 943 + 944 + if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask, 945 + sizeof(struct in6_addr))) 946 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 947 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 948 + ICE_FLOW_FLD_OFF_INVAL, false); 949 + else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 950 + sizeof(struct in6_addr))) 951 + *perfect_fltr = false; 952 + else 953 + return -EOPNOTSUPP; 954 + 955 + if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask, 956 + sizeof(struct in6_addr))) 957 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 958 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 959 + ICE_FLOW_FLD_OFF_INVAL, false); 960 + else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 961 + sizeof(struct in6_addr))) 962 + *perfect_fltr = false; 963 + else 964 + return -EOPNOTSUPP; 965 + 966 + /* Layer 4 source port */ 967 + if (tcp_ip6_spec->psrc == htons(0xFFFF)) 968 + ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 969 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 970 + false); 971 + else if (!tcp_ip6_spec->psrc) 972 + *perfect_fltr = false; 973 + else 974 + return -EOPNOTSUPP; 975 + 976 + /* Layer 4 destination port */ 977 + if (tcp_ip6_spec->pdst == htons(0xFFFF)) 978 + ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 979 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 980 + false); 981 + else if (!tcp_ip6_spec->pdst) 982 + *perfect_fltr = false; 983 + else 984 + return -EOPNOTSUPP; 985 + 986 + return 0; 987 + } 988 + 989 + /** 990 + * ice_set_fdir_ip6_usr_seg 991 + * @seg: flow segment for programming 992 + * @usr_ip6_spec: ethtool userdef packet offset 993 + * @perfect_fltr: only valid on success; returns true if perfect filter, 994 + * false if not 995 + * 996 + * Set the offset data into the flow segment to be used to program HW 997 + * table for IPv6 998 + */ 999 + static int 1000 + ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, 1001 + struct ethtool_usrip6_spec *usr_ip6_spec, 1002 + bool *perfect_fltr) 1003 + { 1004 + /* filtering on Layer 4 bytes not supported */ 1005 + if (usr_ip6_spec->l4_4_bytes) 1006 + return -EOPNOTSUPP; 1007 + /* filtering on TC not supported */ 1008 + if (usr_ip6_spec->tclass) 1009 + return -EOPNOTSUPP; 1010 + /* filtering on Layer 4 protocol not supported */ 1011 + if (usr_ip6_spec->l4_proto) 1012 + return -EOPNOTSUPP; 1013 + /* empty rules are not valid */ 1014 + if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1015 + sizeof(struct in6_addr)) && 1016 + !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1017 + sizeof(struct in6_addr))) 1018 + return -EINVAL; 1019 + 1020 + *perfect_fltr = true; 1021 + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); 1022 + 1023 + if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask, 1024 + sizeof(struct in6_addr))) 1025 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 1026 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1027 + ICE_FLOW_FLD_OFF_INVAL, false); 1028 + else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1029 + sizeof(struct in6_addr))) 1030 + *perfect_fltr = false; 1031 + else 1032 + return -EOPNOTSUPP; 1033 + 1034 + if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask, 1035 + sizeof(struct in6_addr))) 1036 + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 1037 + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1038 + ICE_FLOW_FLD_OFF_INVAL, false); 1039 + else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1040 + sizeof(struct in6_addr))) 1041 + *perfect_fltr = false; 1042 + else 1043 + return -EOPNOTSUPP; 1044 + 1045 + return 0; 1046 + } 1047 + 1048 + /** 1049 + * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter 1050 + * @pf: PF structure 1051 + * @fsp: pointer to ethtool Rx flow specification 1052 + * @user: user defined data from flow specification 1053 + * 1054 + * Returns 0 on success. 1055 + */ 1056 + static int 1057 + ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, 1058 + struct ice_rx_flow_userdef *user) 1059 + { 1060 + struct ice_flow_seg_info *seg, *tun_seg; 1061 + struct device *dev = ice_pf_to_dev(pf); 1062 + enum ice_fltr_ptype fltr_idx; 1063 + struct ice_hw *hw = &pf->hw; 1064 + bool perfect_filter; 1065 + int ret; 1066 + 1067 + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 1068 + if (!seg) 1069 + return -ENOMEM; 1070 + 1071 + tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, 1072 + GFP_KERNEL); 1073 + if (!tun_seg) { 1074 + devm_kfree(dev, seg); 1075 + return -ENOMEM; 1076 + } 1077 + 1078 + switch (fsp->flow_type & ~FLOW_EXT) { 1079 + case TCP_V4_FLOW: 1080 + ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1081 + ICE_FLOW_SEG_HDR_TCP, 1082 + &perfect_filter); 1083 + break; 1084 + case UDP_V4_FLOW: 1085 + ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1086 + ICE_FLOW_SEG_HDR_UDP, 1087 + &perfect_filter); 1088 + break; 1089 + case SCTP_V4_FLOW: 1090 + ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1091 + ICE_FLOW_SEG_HDR_SCTP, 1092 + &perfect_filter); 1093 + break; 1094 + case IPV4_USER_FLOW: 1095 + ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec, 1096 + &perfect_filter); 1097 + break; 1098 + case TCP_V6_FLOW: 1099 + ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1100 + ICE_FLOW_SEG_HDR_TCP, 1101 + &perfect_filter); 1102 + break; 1103 + case UDP_V6_FLOW: 1104 + ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1105 + ICE_FLOW_SEG_HDR_UDP, 1106 + &perfect_filter); 1107 + break; 1108 + case SCTP_V6_FLOW: 1109 + ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1110 + ICE_FLOW_SEG_HDR_SCTP, 1111 + &perfect_filter); 1112 + break; 1113 + case IPV6_USER_FLOW: 1114 + ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, 1115 + &perfect_filter); 1116 + break; 1117 + default: 1118 + ret = -EINVAL; 1119 + } 1120 + if (ret) 1121 + goto err_exit; 1122 + 1123 + /* tunnel segments are shifted up one. */ 1124 + memcpy(&tun_seg[1], seg, sizeof(*seg)); 1125 + 1126 + if (user && user->flex_fltr) { 1127 + perfect_filter = false; 1128 + ice_flow_add_fld_raw(seg, user->flex_offset, 1129 + ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1130 + ICE_FLOW_FLD_OFF_INVAL, 1131 + ICE_FLOW_FLD_OFF_INVAL); 1132 + ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset, 1133 + ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1134 + ICE_FLOW_FLD_OFF_INVAL, 1135 + ICE_FLOW_FLD_OFF_INVAL); 1136 + } 1137 + 1138 + /* add filter for outer headers */ 1139 + fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); 1140 + ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx, 1141 + ICE_FD_HW_SEG_NON_TUN); 1142 + if (ret == -EEXIST) 1143 + /* Rule already exists, free memory and continue */ 1144 + devm_kfree(dev, seg); 1145 + else if (ret) 1146 + /* could not write filter, free memory */ 1147 + goto err_exit; 1148 + 1149 + /* make tunneled filter HW entries if possible */ 1150 + memcpy(&tun_seg[1], seg, sizeof(*seg)); 1151 + ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx, 1152 + ICE_FD_HW_SEG_TUN); 1153 + if (ret == -EEXIST) { 1154 + /* Rule already exists, free memory and count as success */ 1155 + devm_kfree(dev, tun_seg); 1156 + ret = 0; 1157 + } else if (ret) { 1158 + /* could not write tunnel filter, but outer filter exists */ 1159 + devm_kfree(dev, tun_seg); 1160 + } 1161 + 1162 + if (perfect_filter) 1163 + set_bit(fltr_idx, hw->fdir_perfect_fltr); 1164 + else 1165 + clear_bit(fltr_idx, hw->fdir_perfect_fltr); 1166 + 1167 + return ret; 1168 + 1169 + err_exit: 1170 + devm_kfree(dev, tun_seg); 1171 + devm_kfree(dev, seg); 1172 + 1173 + return -EOPNOTSUPP; 1174 + } 1175 + 1176 + /** 1177 + * ice_fdir_write_fltr - send a flow director filter to the hardware 1178 + * @pf: PF data structure 1179 + * @input: filter structure 1180 + * @add: true adds filter and false removed filter 1181 + * @is_tun: true adds inner filter on tunnel and false outer headers 1182 + * 1183 + * returns 0 on success and negative value on error 1184 + */ 1185 + int 1186 + ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 1187 + bool is_tun) 1188 + { 1189 + struct device *dev = ice_pf_to_dev(pf); 1190 + struct ice_hw *hw = &pf->hw; 1191 + struct ice_fltr_desc desc; 1192 + struct ice_vsi *ctrl_vsi; 1193 + enum ice_status status; 1194 + u8 *pkt, *frag_pkt; 1195 + bool has_frag; 1196 + int err; 1197 + 1198 + ctrl_vsi = ice_get_ctrl_vsi(pf); 1199 + if (!ctrl_vsi) 1200 + return -EINVAL; 1201 + 1202 + pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1203 + if (!pkt) 1204 + return -ENOMEM; 1205 + frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1206 + if (!frag_pkt) { 1207 + err = -ENOMEM; 1208 + goto err_free; 1209 + } 1210 + 1211 + ice_fdir_get_prgm_desc(hw, input, &desc, add); 1212 + status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1213 + if (status) { 1214 + err = ice_status_to_errno(status); 1215 + goto err_free_all; 1216 + } 1217 + err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1218 + if (err) 1219 + goto err_free_all; 1220 + 1221 + /* repeat for fragment packet */ 1222 + has_frag = ice_fdir_has_frag(input->flow_type); 1223 + if (has_frag) { 1224 + /* does not return error */ 1225 + ice_fdir_get_prgm_desc(hw, input, &desc, add); 1226 + status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, 1227 + is_tun); 1228 + if (status) { 1229 + err = ice_status_to_errno(status); 1230 + goto err_frag; 1231 + } 1232 + err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); 1233 + if (err) 1234 + goto err_frag; 1235 + } else { 1236 + devm_kfree(dev, frag_pkt); 1237 + } 1238 + 1239 + return 0; 1240 + 1241 + err_free_all: 1242 + devm_kfree(dev, frag_pkt); 1243 + err_free: 1244 + devm_kfree(dev, pkt); 1245 + return err; 1246 + 1247 + err_frag: 1248 + devm_kfree(dev, frag_pkt); 1249 + return err; 1250 + } 1251 + 1252 + /** 1253 + * ice_fdir_write_all_fltr - send a flow director filter to the hardware 1254 + * @pf: PF data structure 1255 + * @input: filter structure 1256 + * @add: true adds filter and false removed filter 1257 + * 1258 + * returns 0 on success and negative value on error 1259 + */ 1260 + static int 1261 + ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, 1262 + bool add) 1263 + { 1264 + u16 port_num; 1265 + int tun; 1266 + 1267 + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 1268 + bool is_tun = tun == ICE_FD_HW_SEG_TUN; 1269 + int err; 1270 + 1271 + if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL, 1272 + &port_num)) 1273 + continue; 1274 + err = ice_fdir_write_fltr(pf, input, add, is_tun); 1275 + if (err) 1276 + return err; 1277 + } 1278 + return 0; 1279 + } 1280 + 1281 + /** 1282 + * ice_fdir_replay_fltrs - replay filters from the HW filter list 1283 + * @pf: board private structure 1284 + */ 1285 + void ice_fdir_replay_fltrs(struct ice_pf *pf) 1286 + { 1287 + struct ice_fdir_fltr *f_rule; 1288 + struct ice_hw *hw = &pf->hw; 1289 + 1290 + list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 1291 + int err = ice_fdir_write_all_fltr(pf, f_rule, true); 1292 + 1293 + if (err) 1294 + dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n", 1295 + err, f_rule->fltr_id); 1296 + } 1297 + } 1298 + 1299 + /** 1300 + * ice_fdir_create_dflt_rules - create default perfect filters 1301 + * @pf: PF data structure 1302 + * 1303 + * Returns 0 for success or error. 1304 + */ 1305 + int ice_fdir_create_dflt_rules(struct ice_pf *pf) 1306 + { 1307 + int err; 1308 + 1309 + /* Create perfect TCP and UDP rules in hardware. */ 1310 + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP); 1311 + if (err) 1312 + return err; 1313 + 1314 + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP); 1315 + if (err) 1316 + return err; 1317 + 1318 + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP); 1319 + if (err) 1320 + return err; 1321 + 1322 + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP); 1323 + 1324 + return err; 1325 + } 1326 + 1327 + /** 1328 + * ice_vsi_manage_fdir - turn on/off flow director 1329 + * @vsi: the VSI being changed 1330 + * @ena: boolean value indicating if this is an enable or disable request 1331 + */ 1332 + void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) 1333 + { 1334 + struct ice_fdir_fltr *f_rule, *tmp; 1335 + struct ice_pf *pf = vsi->back; 1336 + struct ice_hw *hw = &pf->hw; 1337 + enum ice_fltr_ptype flow; 1338 + 1339 + if (ena) { 1340 + set_bit(ICE_FLAG_FD_ENA, pf->flags); 1341 + ice_fdir_create_dflt_rules(pf); 1342 + return; 1343 + } 1344 + 1345 + mutex_lock(&hw->fdir_fltr_lock); 1346 + if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) 1347 + goto release_lock; 1348 + list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { 1349 + /* ignore return value */ 1350 + ice_fdir_write_all_fltr(pf, f_rule, false); 1351 + ice_fdir_update_cntrs(hw, f_rule->flow_type, false); 1352 + list_del(&f_rule->fltr_node); 1353 + devm_kfree(ice_hw_to_dev(hw), f_rule); 1354 + } 1355 + 1356 + if (hw->fdir_prof) 1357 + for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; 1358 + flow++) 1359 + if (hw->fdir_prof[flow]) 1360 + ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 1361 + 1362 + release_lock: 1363 + mutex_unlock(&hw->fdir_fltr_lock); 1364 + } 1365 + 1366 + /** 1367 + * ice_fdir_update_list_entry - add or delete a filter from the filter list 1368 + * @pf: PF structure 1369 + * @input: filter structure 1370 + * @fltr_idx: ethtool index of filter to modify 1371 + * 1372 + * returns 0 on success and negative on errors 1373 + */ 1374 + static int 1375 + ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, 1376 + int fltr_idx) 1377 + { 1378 + struct ice_fdir_fltr *old_fltr; 1379 + struct ice_hw *hw = &pf->hw; 1380 + int err = -ENOENT; 1381 + 1382 + /* Do not update filters during reset */ 1383 + if (ice_is_reset_in_progress(pf->state)) 1384 + return -EBUSY; 1385 + 1386 + old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx); 1387 + if (old_fltr) { 1388 + err = ice_fdir_write_all_fltr(pf, old_fltr, false); 1389 + if (err) 1390 + return err; 1391 + ice_fdir_update_cntrs(hw, old_fltr->flow_type, false); 1392 + if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type]) 1393 + /* we just deleted the last filter of flow_type so we 1394 + * should also delete the HW filter info. 1395 + */ 1396 + ice_fdir_rem_flow(hw, ICE_BLK_FD, old_fltr->flow_type); 1397 + list_del(&old_fltr->fltr_node); 1398 + devm_kfree(ice_hw_to_dev(hw), old_fltr); 1399 + } 1400 + if (!input) 1401 + return err; 1402 + ice_fdir_list_add_fltr(hw, input); 1403 + ice_fdir_update_cntrs(hw, input->flow_type, true); 1404 + return 0; 1405 + } 1406 + 1407 + /** 1408 + * ice_del_fdir_ethtool - delete Flow Director filter 1409 + * @vsi: pointer to target VSI 1410 + * @cmd: command to add or delete Flow Director filter 1411 + * 1412 + * Returns 0 on success and negative values for failure 1413 + */ 1414 + int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1415 + { 1416 + struct ethtool_rx_flow_spec *fsp = 1417 + (struct ethtool_rx_flow_spec *)&cmd->fs; 1418 + struct ice_pf *pf = vsi->back; 1419 + struct ice_hw *hw = &pf->hw; 1420 + int val; 1421 + 1422 + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1423 + return -EOPNOTSUPP; 1424 + 1425 + /* Do not delete filters during reset */ 1426 + if (ice_is_reset_in_progress(pf->state)) { 1427 + dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n"); 1428 + return -EBUSY; 1429 + } 1430 + 1431 + if (test_bit(__ICE_FD_FLUSH_REQ, pf->state)) 1432 + return -EBUSY; 1433 + 1434 + mutex_lock(&hw->fdir_fltr_lock); 1435 + val = ice_fdir_update_list_entry(pf, NULL, fsp->location); 1436 + mutex_unlock(&hw->fdir_fltr_lock); 1437 + 1438 + return val; 1439 + } 1440 + 1441 + /** 1442 + * ice_set_fdir_input_set - Set the input set for Flow Director 1443 + * @vsi: pointer to target VSI 1444 + * @fsp: pointer to ethtool Rx flow specification 1445 + * @input: filter structure 1446 + */ 1447 + static int 1448 + ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, 1449 + struct ice_fdir_fltr *input) 1450 + { 1451 + u16 dest_vsi, q_index = 0; 1452 + struct ice_pf *pf; 1453 + struct ice_hw *hw; 1454 + int flow_type; 1455 + u8 dest_ctl; 1456 + 1457 + if (!vsi || !fsp || !input) 1458 + return -EINVAL; 1459 + 1460 + pf = vsi->back; 1461 + hw = &pf->hw; 1462 + 1463 + dest_vsi = vsi->idx; 1464 + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 1465 + dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1466 + } else { 1467 + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 1468 + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 1469 + 1470 + if (vf) { 1471 + dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n"); 1472 + return -EINVAL; 1473 + } 1474 + 1475 + if (ring >= vsi->num_rxq) 1476 + return -EINVAL; 1477 + 1478 + dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1479 + q_index = ring; 1480 + } 1481 + 1482 + input->fltr_id = fsp->location; 1483 + input->q_index = q_index; 1484 + flow_type = fsp->flow_type & ~FLOW_EXT; 1485 + 1486 + input->dest_vsi = dest_vsi; 1487 + input->dest_ctl = dest_ctl; 1488 + input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID; 1489 + input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base); 1490 + input->flow_type = ice_ethtool_flow_to_fltr(flow_type); 1491 + 1492 + if (fsp->flow_type & FLOW_EXT) { 1493 + memcpy(input->ext_data.usr_def, fsp->h_ext.data, 1494 + sizeof(input->ext_data.usr_def)); 1495 + input->ext_data.vlan_type = fsp->h_ext.vlan_etype; 1496 + input->ext_data.vlan_tag = fsp->h_ext.vlan_tci; 1497 + memcpy(input->ext_mask.usr_def, fsp->m_ext.data, 1498 + sizeof(input->ext_mask.usr_def)); 1499 + input->ext_mask.vlan_type = fsp->m_ext.vlan_etype; 1500 + input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci; 1501 + } 1502 + 1503 + switch (flow_type) { 1504 + case TCP_V4_FLOW: 1505 + case UDP_V4_FLOW: 1506 + case SCTP_V4_FLOW: 1507 + input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1508 + input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc; 1509 + input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 1510 + input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; 1511 + input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 1512 + input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc; 1513 + input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; 1514 + input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; 1515 + break; 1516 + case IPV4_USER_FLOW: 1517 + input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; 1518 + input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src; 1519 + input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; 1520 + input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto; 1521 + input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver; 1522 + input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos; 1523 + input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; 1524 + input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src; 1525 + input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; 1526 + input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto; 1527 + input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver; 1528 + input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos; 1529 + break; 1530 + case TCP_V6_FLOW: 1531 + case UDP_V6_FLOW: 1532 + case SCTP_V6_FLOW: 1533 + memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1534 + sizeof(struct in6_addr)); 1535 + memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1536 + sizeof(struct in6_addr)); 1537 + input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst; 1538 + input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc; 1539 + input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass; 1540 + memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, 1541 + sizeof(struct in6_addr)); 1542 + memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, 1543 + sizeof(struct in6_addr)); 1544 + input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst; 1545 + input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc; 1546 + input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass; 1547 + break; 1548 + case IPV6_USER_FLOW: 1549 + memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1550 + sizeof(struct in6_addr)); 1551 + memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1552 + sizeof(struct in6_addr)); 1553 + input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; 1554 + input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass; 1555 + input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; 1556 + memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1557 + sizeof(struct in6_addr)); 1558 + memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1559 + sizeof(struct in6_addr)); 1560 + input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; 1561 + input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; 1562 + input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; 1563 + break; 1564 + default: 1565 + /* not doing un-parsed flow types */ 1566 + return -EINVAL; 1567 + } 1568 + 1569 + return 0; 1570 + } 1571 + 1572 + /** 1573 + * ice_add_fdir_ethtool - Add/Remove Flow Director filter 1574 + * @vsi: pointer to target VSI 1575 + * @cmd: command to add or delete Flow Director filter 1576 + * 1577 + * Returns 0 on success and negative values for failure 1578 + */ 1579 + int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1580 + { 1581 + struct ice_rx_flow_userdef userdata; 1582 + struct ethtool_rx_flow_spec *fsp; 1583 + struct ice_fdir_fltr *input; 1584 + struct device *dev; 1585 + struct ice_pf *pf; 1586 + struct ice_hw *hw; 1587 + int fltrs_needed; 1588 + u16 tunnel_port; 1589 + int ret; 1590 + 1591 + if (!vsi) 1592 + return -EINVAL; 1593 + 1594 + pf = vsi->back; 1595 + hw = &pf->hw; 1596 + dev = ice_pf_to_dev(pf); 1597 + 1598 + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1599 + return -EOPNOTSUPP; 1600 + 1601 + /* Do not program filters during reset */ 1602 + if (ice_is_reset_in_progress(pf->state)) { 1603 + dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n"); 1604 + return -EBUSY; 1605 + } 1606 + 1607 + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1608 + 1609 + if (ice_parse_rx_flow_user_data(fsp, &userdata)) 1610 + return -EINVAL; 1611 + 1612 + if (fsp->flow_type & FLOW_MAC_EXT) 1613 + return -EINVAL; 1614 + 1615 + ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata); 1616 + if (ret) 1617 + return ret; 1618 + 1619 + if (fsp->location >= ice_get_fdir_cnt_all(hw)) { 1620 + dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); 1621 + return -ENOSPC; 1622 + } 1623 + 1624 + /* return error if not an update and no available filters */ 1625 + fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ? 1626 + 2 : 1; 1627 + if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && 1628 + ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { 1629 + dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); 1630 + return -ENOSPC; 1631 + } 1632 + 1633 + input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL); 1634 + if (!input) 1635 + return -ENOMEM; 1636 + 1637 + ret = ice_set_fdir_input_set(vsi, fsp, input); 1638 + if (ret) 1639 + goto free_input; 1640 + 1641 + mutex_lock(&hw->fdir_fltr_lock); 1642 + if (ice_fdir_is_dup_fltr(hw, input)) { 1643 + ret = -EINVAL; 1644 + goto release_lock; 1645 + } 1646 + 1647 + if (userdata.flex_fltr) { 1648 + input->flex_fltr = true; 1649 + input->flex_word = cpu_to_be16(userdata.flex_word); 1650 + input->flex_offset = userdata.flex_offset; 1651 + } 1652 + 1653 + /* input struct is added to the HW filter list */ 1654 + ice_fdir_update_list_entry(pf, input, fsp->location); 1655 + 1656 + ret = ice_fdir_write_all_fltr(pf, input, true); 1657 + if (ret) 1658 + goto remove_sw_rule; 1659 + 1660 + goto release_lock; 1661 + 1662 + remove_sw_rule: 1663 + ice_fdir_update_cntrs(hw, input->flow_type, false); 1664 + list_del(&input->fltr_node); 1665 + release_lock: 1666 + mutex_unlock(&hw->fdir_fltr_lock); 1667 + free_input: 1668 + if (ret) 1669 + devm_kfree(dev, input); 1670 + 1671 + return ret; 1672 + }
+840
drivers/net/ethernet/intel/ice/ice_fdir.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2018-2020, Intel Corporation. */ 3 + 4 + #include "ice_common.h" 5 + 6 + /* These are training packet headers used to program flow director filters. */ 7 + static const u8 ice_fdir_tcpv4_pkt[] = { 8 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 9 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 10 + 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, 11 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 12 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 13 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 14 + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00 15 + }; 16 + 17 + static const u8 ice_fdir_udpv4_pkt[] = { 18 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 19 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 20 + 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 21 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 22 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 23 + 0x00, 0x00, 24 + }; 25 + 26 + static const u8 ice_fdir_sctpv4_pkt[] = { 27 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 28 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 29 + 0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x40, 0x84, 30 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 31 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 32 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 33 + }; 34 + 35 + static const u8 ice_fdir_ipv4_pkt[] = { 36 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 37 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 38 + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x10, 39 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 40 + 0x00, 0x00 41 + }; 42 + 43 + static const u8 ice_fdir_tcpv6_pkt[] = { 44 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 45 + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, 46 + 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 0x00, 0x00, 47 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 48 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 49 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 50 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 51 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 52 + 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 53 + 0x00, 0x00, 54 + }; 55 + 56 + static const u8 ice_fdir_udpv6_pkt[] = { 57 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 58 + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, 59 + 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, 60 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 61 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 62 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 63 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 64 + 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 65 + }; 66 + 67 + static const u8 ice_fdir_sctpv6_pkt[] = { 68 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 69 + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, 70 + 0x00, 0x00, 0x00, 0x0C, 0x84, 0x40, 0x00, 0x00, 71 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 72 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 73 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 74 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 75 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 76 + 0x00, 0x00, 77 + }; 78 + 79 + static const u8 ice_fdir_ipv6_pkt[] = { 80 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 81 + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, 82 + 0x00, 0x00, 0x00, 0x00, 0x3B, 0x40, 0x00, 0x00, 83 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 84 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 85 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 86 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 87 + }; 88 + 89 + static const u8 ice_fdir_tcp4_tun_pkt[] = { 90 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 91 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 92 + 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 93 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 94 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 95 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 96 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 97 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 98 + 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 99 + 0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 100 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 101 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 102 + 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 103 + }; 104 + 105 + static const u8 ice_fdir_udp4_tun_pkt[] = { 106 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 107 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 108 + 0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 109 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 110 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 111 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 112 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 113 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 114 + 0x45, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 115 + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 116 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 117 + 0x00, 0x00, 0x00, 0x00, 118 + }; 119 + 120 + static const u8 ice_fdir_sctp4_tun_pkt[] = { 121 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 122 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 123 + 0x00, 0x52, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 124 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 125 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 126 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 127 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 128 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 129 + 0x45, 0x00, 0x00, 0x20, 0x00, 0x01, 0x00, 0x00, 130 + 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 131 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 132 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 133 + }; 134 + 135 + static const u8 ice_fdir_ip4_tun_pkt[] = { 136 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 137 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 138 + 0x00, 0x46, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 139 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 140 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 141 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 142 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 143 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 144 + 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 145 + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 146 + 0x00, 0x00, 0x00, 0x00, 147 + }; 148 + 149 + static const u8 ice_fdir_tcp6_tun_pkt[] = { 150 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 151 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 152 + 0x00, 0x6e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 153 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 154 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 155 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 156 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 157 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 158 + 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 159 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 160 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 161 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 162 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 163 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 164 + 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 165 + 0x00, 0x00, 0x00, 0x00, 166 + }; 167 + 168 + static const u8 ice_fdir_udp6_tun_pkt[] = { 169 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 170 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 171 + 0x00, 0x62, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 172 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 173 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 174 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 175 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 176 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 177 + 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 178 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 179 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 180 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 181 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 182 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 183 + }; 184 + 185 + static const u8 ice_fdir_sctp6_tun_pkt[] = { 186 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 187 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 188 + 0x00, 0x66, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 189 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 190 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 191 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 192 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 193 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 194 + 0x60, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x84, 0x40, 195 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 196 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 197 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 198 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 199 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 200 + 0x00, 0x00, 0x00, 0x00, 201 + }; 202 + 203 + static const u8 ice_fdir_ip6_tun_pkt[] = { 204 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 205 + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 206 + 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 207 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 208 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 209 + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 210 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 211 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 212 + 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40, 213 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 214 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 215 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 216 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 217 + }; 218 + 219 + /* Flow Director no-op training packet table */ 220 + static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { 221 + { 222 + ICE_FLTR_PTYPE_NONF_IPV4_TCP, 223 + sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt, 224 + sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, 225 + }, 226 + { 227 + ICE_FLTR_PTYPE_NONF_IPV4_UDP, 228 + sizeof(ice_fdir_udpv4_pkt), ice_fdir_udpv4_pkt, 229 + sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt, 230 + }, 231 + { 232 + ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 233 + sizeof(ice_fdir_sctpv4_pkt), ice_fdir_sctpv4_pkt, 234 + sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt, 235 + }, 236 + { 237 + ICE_FLTR_PTYPE_NONF_IPV4_OTHER, 238 + sizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt, 239 + sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt, 240 + }, 241 + { 242 + ICE_FLTR_PTYPE_NONF_IPV6_TCP, 243 + sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt, 244 + sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt, 245 + }, 246 + { 247 + ICE_FLTR_PTYPE_NONF_IPV6_UDP, 248 + sizeof(ice_fdir_udpv6_pkt), ice_fdir_udpv6_pkt, 249 + sizeof(ice_fdir_udp6_tun_pkt), ice_fdir_udp6_tun_pkt, 250 + }, 251 + { 252 + ICE_FLTR_PTYPE_NONF_IPV6_SCTP, 253 + sizeof(ice_fdir_sctpv6_pkt), ice_fdir_sctpv6_pkt, 254 + sizeof(ice_fdir_sctp6_tun_pkt), ice_fdir_sctp6_tun_pkt, 255 + }, 256 + { 257 + ICE_FLTR_PTYPE_NONF_IPV6_OTHER, 258 + sizeof(ice_fdir_ipv6_pkt), ice_fdir_ipv6_pkt, 259 + sizeof(ice_fdir_ip6_tun_pkt), ice_fdir_ip6_tun_pkt, 260 + }, 261 + }; 262 + 263 + #define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt) 264 + 265 + /** 266 + * ice_set_dflt_val_fd_desc 267 + * @fd_fltr_ctx: pointer to fd filter descriptor 268 + */ 269 + static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx) 270 + { 271 + fd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; 272 + fd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; 273 + fd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST; 274 + fd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; 275 + fd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE; 276 + fd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX; 277 + fd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1; 278 + fd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT; 279 + fd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO; 280 + fd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE; 281 + fd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0; 282 + fd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0; 283 + fd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG; 284 + fd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO; 285 + fd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO; 286 + fd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET; 287 + fd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE; 288 + fd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD; 289 + fd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO; 290 + } 291 + 292 + /** 293 + * ice_set_fd_desc_val 294 + * @ctx: pointer to fd filter descriptor context 295 + * @fdir_desc: populated with fd filter descriptor values 296 + */ 297 + static void 298 + ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx, 299 + struct ice_fltr_desc *fdir_desc) 300 + { 301 + u64 qword; 302 + 303 + /* prep QW0 of FD filter programming desc */ 304 + qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) & 305 + ICE_FXD_FLTR_QW0_QINDEX_M; 306 + qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) & 307 + ICE_FXD_FLTR_QW0_COMP_Q_M; 308 + qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) & 309 + ICE_FXD_FLTR_QW0_COMP_REPORT_M; 310 + qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) & 311 + ICE_FXD_FLTR_QW0_FD_SPACE_M; 312 + qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) & 313 + ICE_FXD_FLTR_QW0_STAT_CNT_M; 314 + qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) & 315 + ICE_FXD_FLTR_QW0_STAT_ENA_M; 316 + qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) & 317 + ICE_FXD_FLTR_QW0_EVICT_ENA_M; 318 + qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) & 319 + ICE_FXD_FLTR_QW0_TO_Q_M; 320 + qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) & 321 + ICE_FXD_FLTR_QW0_TO_Q_PRI_M; 322 + qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) & 323 + ICE_FXD_FLTR_QW0_DPU_RECIPE_M; 324 + qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) & 325 + ICE_FXD_FLTR_QW0_DROP_M; 326 + qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) & 327 + ICE_FXD_FLTR_QW0_FLEX_PRI_M; 328 + qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) & 329 + ICE_FXD_FLTR_QW0_FLEX_MDID_M; 330 + qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) & 331 + ICE_FXD_FLTR_QW0_FLEX_VAL_M; 332 + fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword); 333 + 334 + /* prep QW1 of FD filter programming desc */ 335 + qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) & 336 + ICE_FXD_FLTR_QW1_DTYPE_M; 337 + qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) & 338 + ICE_FXD_FLTR_QW1_PCMD_M; 339 + qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) & 340 + ICE_FXD_FLTR_QW1_PROF_PRI_M; 341 + qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) & 342 + ICE_FXD_FLTR_QW1_PROF_M; 343 + qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) & 344 + ICE_FXD_FLTR_QW1_FD_VSI_M; 345 + qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) & 346 + ICE_FXD_FLTR_QW1_SWAP_M; 347 + qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) & 348 + ICE_FXD_FLTR_QW1_FDID_PRI_M; 349 + qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) & 350 + ICE_FXD_FLTR_QW1_FDID_MDID_M; 351 + qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) & 352 + ICE_FXD_FLTR_QW1_FDID_M; 353 + fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword); 354 + } 355 + 356 + /** 357 + * ice_fdir_get_prgm_desc - set a fdir descriptor from a fdir filter struct 358 + * @hw: pointer to the hardware structure 359 + * @input: filter 360 + * @fdesc: filter descriptor 361 + * @add: if add is true, this is an add operation, false implies delete 362 + */ 363 + void 364 + ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, 365 + struct ice_fltr_desc *fdesc, bool add) 366 + { 367 + struct ice_fd_fltr_desc_ctx fdir_fltr_ctx = { 0 }; 368 + 369 + /* set default context info */ 370 + ice_set_dflt_val_fd_desc(&fdir_fltr_ctx); 371 + 372 + /* change sideband filtering values */ 373 + fdir_fltr_ctx.fdid = input->fltr_id; 374 + if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) { 375 + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES; 376 + fdir_fltr_ctx.qindex = 0; 377 + } else { 378 + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; 379 + fdir_fltr_ctx.qindex = input->q_index; 380 + } 381 + fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; 382 + fdir_fltr_ctx.cnt_index = input->cnt_index; 383 + fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi); 384 + fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE; 385 + fdir_fltr_ctx.toq_prio = 3; 386 + fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD : 387 + ICE_FXD_FLTR_QW1_PCMD_REMOVE; 388 + fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET; 389 + fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; 390 + fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; 391 + fdir_fltr_ctx.fdid_prio = 3; 392 + fdir_fltr_ctx.desc_prof = 1; 393 + fdir_fltr_ctx.desc_prof_prio = 3; 394 + ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc); 395 + } 396 + 397 + /** 398 + * ice_alloc_fd_res_cntr - obtain counter resource for FD type 399 + * @hw: pointer to the hardware structure 400 + * @cntr_id: returns counter index 401 + */ 402 + enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) 403 + { 404 + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, 405 + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); 406 + } 407 + 408 + /** 409 + * ice_free_fd_res_cntr - Free counter resource for FD type 410 + * @hw: pointer to the hardware structure 411 + * @cntr_id: counter index to be freed 412 + */ 413 + enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) 414 + { 415 + return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, 416 + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); 417 + } 418 + 419 + /** 420 + * ice_alloc_fd_guar_item - allocate resource for FD guaranteed entries 421 + * @hw: pointer to the hardware structure 422 + * @cntr_id: returns counter index 423 + * @num_fltr: number of filter entries to be allocated 424 + */ 425 + enum ice_status 426 + ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) 427 + { 428 + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, 429 + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, 430 + cntr_id); 431 + } 432 + 433 + /** 434 + * ice_alloc_fd_shrd_item - allocate resource for flow director shared entries 435 + * @hw: pointer to the hardware structure 436 + * @cntr_id: returns counter index 437 + * @num_fltr: number of filter entries to be allocated 438 + */ 439 + enum ice_status 440 + ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) 441 + { 442 + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, 443 + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, 444 + cntr_id); 445 + } 446 + 447 + /** 448 + * ice_get_fdir_cnt_all - get the number of Flow Director filters 449 + * @hw: hardware data structure 450 + * 451 + * Returns the number of filters available on device 452 + */ 453 + int ice_get_fdir_cnt_all(struct ice_hw *hw) 454 + { 455 + return hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort; 456 + } 457 + 458 + /** 459 + * ice_pkt_insert_ipv6_addr - insert a be32 IPv6 address into a memory buffer 460 + * @pkt: packet buffer 461 + * @offset: offset into buffer 462 + * @addr: IPv6 address to convert and insert into pkt at offset 463 + */ 464 + static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr) 465 + { 466 + int idx; 467 + 468 + for (idx = 0; idx < ICE_IPV6_ADDR_LEN_AS_U32; idx++) 469 + memcpy(pkt + offset + idx * sizeof(*addr), &addr[idx], 470 + sizeof(*addr)); 471 + } 472 + 473 + /** 474 + * ice_pkt_insert_u16 - insert a be16 value into a memory buffer 475 + * @pkt: packet buffer 476 + * @offset: offset into buffer 477 + * @data: 16 bit value to convert and insert into pkt at offset 478 + */ 479 + static void ice_pkt_insert_u16(u8 *pkt, int offset, __be16 data) 480 + { 481 + memcpy(pkt + offset, &data, sizeof(data)); 482 + } 483 + 484 + /** 485 + * ice_pkt_insert_u32 - insert a be32 value into a memory buffer 486 + * @pkt: packet buffer 487 + * @offset: offset into buffer 488 + * @data: 32 bit value to convert and insert into pkt at offset 489 + */ 490 + static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data) 491 + { 492 + memcpy(pkt + offset, &data, sizeof(data)); 493 + } 494 + 495 + /** 496 + * ice_fdir_get_gen_prgm_pkt - generate a training packet 497 + * @hw: pointer to the hardware structure 498 + * @input: flow director filter data structure 499 + * @pkt: pointer to return filter packet 500 + * @frag: generate a fragment packet 501 + * @tun: true implies generate a tunnel packet 502 + */ 503 + enum ice_status 504 + ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, 505 + u8 *pkt, bool frag, bool tun) 506 + { 507 + enum ice_fltr_ptype flow; 508 + u16 tnl_port; 509 + u8 *loc; 510 + u16 idx; 511 + 512 + if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { 513 + switch (input->ip.v4.proto) { 514 + case IPPROTO_TCP: 515 + flow = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 516 + break; 517 + case IPPROTO_UDP: 518 + flow = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 519 + break; 520 + case IPPROTO_SCTP: 521 + flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 522 + break; 523 + case IPPROTO_IP: 524 + flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 525 + break; 526 + default: 527 + return ICE_ERR_PARAM; 528 + } 529 + } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { 530 + switch (input->ip.v6.proto) { 531 + case IPPROTO_TCP: 532 + flow = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 533 + break; 534 + case IPPROTO_UDP: 535 + flow = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 536 + break; 537 + case IPPROTO_SCTP: 538 + flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 539 + break; 540 + case IPPROTO_IP: 541 + flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 542 + break; 543 + default: 544 + return ICE_ERR_PARAM; 545 + } 546 + } else { 547 + flow = input->flow_type; 548 + } 549 + 550 + for (idx = 0; idx < ICE_FDIR_NUM_PKT; idx++) 551 + if (ice_fdir_pkt[idx].flow == flow) 552 + break; 553 + if (idx == ICE_FDIR_NUM_PKT) 554 + return ICE_ERR_PARAM; 555 + if (!tun) { 556 + memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); 557 + loc = pkt; 558 + } else { 559 + if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port)) 560 + return ICE_ERR_DOES_NOT_EXIST; 561 + if (!ice_fdir_pkt[idx].tun_pkt) 562 + return ICE_ERR_PARAM; 563 + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, 564 + ice_fdir_pkt[idx].tun_pkt_len); 565 + ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET, 566 + htons(tnl_port)); 567 + loc = &pkt[ICE_FDIR_TUN_PKT_OFF]; 568 + } 569 + 570 + /* Reverse the src and dst, since the HW expects them to be from Tx 571 + * perspective. The input from user is from Rx filter perspective. 572 + */ 573 + switch (flow) { 574 + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 575 + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, 576 + input->ip.v4.src_ip); 577 + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET, 578 + input->ip.v4.src_port); 579 + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, 580 + input->ip.v4.dst_ip); 581 + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET, 582 + input->ip.v4.dst_port); 583 + if (frag) 584 + loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; 585 + break; 586 + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 587 + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, 588 + input->ip.v4.src_ip); 589 + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_DST_PORT_OFFSET, 590 + input->ip.v4.src_port); 591 + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, 592 + input->ip.v4.dst_ip); 593 + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET, 594 + input->ip.v4.dst_port); 595 + break; 596 + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 597 + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, 598 + input->ip.v4.src_ip); 599 + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET, 600 + input->ip.v4.src_port); 601 + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, 602 + input->ip.v4.dst_ip); 603 + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET, 604 + input->ip.v4.dst_port); 605 + break; 606 + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 607 + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, 608 + input->ip.v4.src_ip); 609 + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, 610 + input->ip.v4.dst_ip); 611 + ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0); 612 + break; 613 + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 614 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, 615 + input->ip.v6.src_ip); 616 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, 617 + input->ip.v6.dst_ip); 618 + ice_pkt_insert_u16(loc, ICE_IPV6_TCP_DST_PORT_OFFSET, 619 + input->ip.v6.src_port); 620 + ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET, 621 + input->ip.v6.dst_port); 622 + break; 623 + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 624 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, 625 + input->ip.v6.src_ip); 626 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, 627 + input->ip.v6.dst_ip); 628 + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_DST_PORT_OFFSET, 629 + input->ip.v6.src_port); 630 + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET, 631 + input->ip.v6.dst_port); 632 + break; 633 + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 634 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, 635 + input->ip.v6.src_ip); 636 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, 637 + input->ip.v6.dst_ip); 638 + ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_DST_PORT_OFFSET, 639 + input->ip.v6.src_port); 640 + ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET, 641 + input->ip.v6.dst_port); 642 + break; 643 + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 644 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, 645 + input->ip.v6.src_ip); 646 + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, 647 + input->ip.v6.dst_ip); 648 + break; 649 + default: 650 + return ICE_ERR_PARAM; 651 + } 652 + 653 + if (input->flex_fltr) 654 + ice_pkt_insert_u16(loc, input->flex_offset, input->flex_word); 655 + 656 + return 0; 657 + } 658 + 659 + /** 660 + * ice_fdir_has_frag - does flow type have 2 ptypes 661 + * @flow: flow ptype 662 + * 663 + * returns true is there is a fragment packet for this ptype 664 + */ 665 + bool ice_fdir_has_frag(enum ice_fltr_ptype flow) 666 + { 667 + if (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 668 + return true; 669 + else 670 + return false; 671 + } 672 + 673 + /** 674 + * ice_fdir_find_by_idx - find filter with idx 675 + * @hw: pointer to hardware structure 676 + * @fltr_idx: index to find. 677 + * 678 + * Returns pointer to filter if found or null 679 + */ 680 + struct ice_fdir_fltr * 681 + ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx) 682 + { 683 + struct ice_fdir_fltr *rule; 684 + 685 + list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { 686 + /* rule ID found in the list */ 687 + if (fltr_idx == rule->fltr_id) 688 + return rule; 689 + if (fltr_idx < rule->fltr_id) 690 + break; 691 + } 692 + return NULL; 693 + } 694 + 695 + /** 696 + * ice_fdir_list_add_fltr - add a new node to the flow director filter list 697 + * @hw: hardware structure 698 + * @fltr: filter node to add to structure 699 + */ 700 + void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr) 701 + { 702 + struct ice_fdir_fltr *rule, *parent = NULL; 703 + 704 + list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { 705 + /* rule ID found or pass its spot in the list */ 706 + if (rule->fltr_id >= fltr->fltr_id) 707 + break; 708 + parent = rule; 709 + } 710 + 711 + if (parent) 712 + list_add(&fltr->fltr_node, &parent->fltr_node); 713 + else 714 + list_add(&fltr->fltr_node, &hw->fdir_list_head); 715 + } 716 + 717 + /** 718 + * ice_fdir_update_cntrs - increment / decrement filter counter 719 + * @hw: pointer to hardware structure 720 + * @flow: filter flow type 721 + * @add: true implies filters added 722 + */ 723 + void 724 + ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add) 725 + { 726 + int incr; 727 + 728 + incr = add ? 1 : -1; 729 + hw->fdir_active_fltr += incr; 730 + 731 + if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX) 732 + ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow); 733 + else 734 + hw->fdir_fltr_cnt[flow] += incr; 735 + } 736 + 737 + /** 738 + * ice_cmp_ipv6_addr - compare 2 IP v6 addresses 739 + * @a: IP v6 address 740 + * @b: IP v6 address 741 + * 742 + * Returns 0 on equal, returns non-0 if different 743 + */ 744 + static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b) 745 + { 746 + return memcmp(a, b, 4 * sizeof(__be32)); 747 + } 748 + 749 + /** 750 + * ice_fdir_comp_rules - compare 2 filters 751 + * @a: a Flow Director filter data structure 752 + * @b: a Flow Director filter data structure 753 + * @v6: bool true if v6 filter 754 + * 755 + * Returns true if the filters match 756 + */ 757 + static bool 758 + ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6) 759 + { 760 + enum ice_fltr_ptype flow_type = a->flow_type; 761 + 762 + /* The calling function already checks that the two filters have the 763 + * same flow_type. 764 + */ 765 + if (!v6) { 766 + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 767 + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 768 + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) { 769 + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && 770 + a->ip.v4.src_ip == b->ip.v4.src_ip && 771 + a->ip.v4.dst_port == b->ip.v4.dst_port && 772 + a->ip.v4.src_port == b->ip.v4.src_port) 773 + return true; 774 + } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { 775 + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && 776 + a->ip.v4.src_ip == b->ip.v4.src_ip && 777 + a->ip.v4.l4_header == b->ip.v4.l4_header && 778 + a->ip.v4.proto == b->ip.v4.proto && 779 + a->ip.v4.ip_ver == b->ip.v4.ip_ver && 780 + a->ip.v4.tos == b->ip.v4.tos) 781 + return true; 782 + } 783 + } else { 784 + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP || 785 + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 786 + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) { 787 + if (a->ip.v6.dst_port == b->ip.v6.dst_port && 788 + a->ip.v6.src_port == b->ip.v6.src_port && 789 + !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, 790 + b->ip.v6.dst_ip) && 791 + !ice_cmp_ipv6_addr(a->ip.v6.src_ip, 792 + b->ip.v6.src_ip)) 793 + return true; 794 + } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { 795 + if (a->ip.v6.dst_port == b->ip.v6.dst_port && 796 + a->ip.v6.src_port == b->ip.v6.src_port) 797 + return true; 798 + } 799 + } 800 + 801 + return false; 802 + } 803 + 804 + /** 805 + * ice_fdir_is_dup_fltr - test if filter is already in list for PF 806 + * @hw: hardware data structure 807 + * @input: Flow Director filter data structure 808 + * 809 + * Returns true if the filter is found in the list 810 + */ 811 + bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) 812 + { 813 + struct ice_fdir_fltr *rule; 814 + bool ret = false; 815 + 816 + list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { 817 + enum ice_fltr_ptype flow_type; 818 + 819 + if (rule->flow_type != input->flow_type) 820 + continue; 821 + 822 + flow_type = input->flow_type; 823 + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 824 + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 825 + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP || 826 + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 827 + ret = ice_fdir_comp_rules(rule, input, false); 828 + else 829 + ret = ice_fdir_comp_rules(rule, input, true); 830 + if (ret) { 831 + if (rule->fltr_id == input->fltr_id && 832 + rule->q_index != input->q_index) 833 + ret = false; 834 + else 835 + break; 836 + } 837 + } 838 + 839 + return ret; 840 + }
+166
drivers/net/ethernet/intel/ice/ice_fdir.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2018-2020, Intel Corporation. */ 3 + 4 + #ifndef _ICE_FDIR_H_ 5 + #define _ICE_FDIR_H_ 6 + 7 + #define ICE_FDIR_TUN_PKT_OFF 50 8 + #define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) 9 + 10 + /* macros for offsets into packets for flow director programming */ 11 + #define ICE_IPV4_SRC_ADDR_OFFSET 26 12 + #define ICE_IPV4_DST_ADDR_OFFSET 30 13 + #define ICE_IPV4_TCP_SRC_PORT_OFFSET 34 14 + #define ICE_IPV4_TCP_DST_PORT_OFFSET 36 15 + #define ICE_IPV4_UDP_SRC_PORT_OFFSET 34 16 + #define ICE_IPV4_UDP_DST_PORT_OFFSET 36 17 + #define ICE_IPV4_SCTP_SRC_PORT_OFFSET 34 18 + #define ICE_IPV4_SCTP_DST_PORT_OFFSET 36 19 + #define ICE_IPV4_PROTO_OFFSET 23 20 + #define ICE_IPV6_SRC_ADDR_OFFSET 22 21 + #define ICE_IPV6_DST_ADDR_OFFSET 38 22 + #define ICE_IPV6_TCP_SRC_PORT_OFFSET 54 23 + #define ICE_IPV6_TCP_DST_PORT_OFFSET 56 24 + #define ICE_IPV6_UDP_SRC_PORT_OFFSET 54 25 + #define ICE_IPV6_UDP_DST_PORT_OFFSET 56 26 + #define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54 27 + #define ICE_IPV6_SCTP_DST_PORT_OFFSET 56 28 + /* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF 29 + * requests that the packet not be fragmented. MF indicates that a packet has 30 + * been fragmented. 31 + */ 32 + #define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20 33 + 34 + enum ice_fltr_prgm_desc_dest { 35 + ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, 36 + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, 37 + }; 38 + 39 + enum ice_fltr_prgm_desc_fd_status { 40 + ICE_FLTR_PRGM_DESC_FD_STATUS_NONE, 41 + ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID, 42 + }; 43 + 44 + /* Flow Director (FD) Filter Programming descriptor */ 45 + struct ice_fd_fltr_desc_ctx { 46 + u32 fdid; 47 + u16 qindex; 48 + u16 cnt_index; 49 + u16 fd_vsi; 50 + u16 flex_val; 51 + u8 comp_q; 52 + u8 comp_report; 53 + u8 fd_space; 54 + u8 cnt_ena; 55 + u8 evict_ena; 56 + u8 toq; 57 + u8 toq_prio; 58 + u8 dpu_recipe; 59 + u8 drop; 60 + u8 flex_prio; 61 + u8 flex_mdid; 62 + u8 dtype; 63 + u8 pcmd; 64 + u8 desc_prof_prio; 65 + u8 desc_prof; 66 + u8 swap; 67 + u8 fdid_prio; 68 + u8 fdid_mdid; 69 + }; 70 + 71 + #define ICE_FLTR_PRGM_FLEX_WORD_SIZE sizeof(__be16) 72 + 73 + struct ice_rx_flow_userdef { 74 + u16 flex_word; 75 + u16 flex_offset; 76 + u16 flex_fltr; 77 + }; 78 + 79 + struct ice_fdir_v4 { 80 + __be32 dst_ip; 81 + __be32 src_ip; 82 + __be16 dst_port; 83 + __be16 src_port; 84 + __be32 l4_header; 85 + __be32 sec_parm_idx; /* security parameter index */ 86 + u8 tos; 87 + u8 ip_ver; 88 + u8 proto; 89 + }; 90 + 91 + #define ICE_IPV6_ADDR_LEN_AS_U32 4 92 + 93 + struct ice_fdir_v6 { 94 + __be32 dst_ip[ICE_IPV6_ADDR_LEN_AS_U32]; 95 + __be32 src_ip[ICE_IPV6_ADDR_LEN_AS_U32]; 96 + __be16 dst_port; 97 + __be16 src_port; 98 + __be32 l4_header; /* next header */ 99 + __be32 sec_parm_idx; /* security parameter index */ 100 + u8 tc; 101 + u8 proto; 102 + }; 103 + 104 + struct ice_fdir_extra { 105 + u8 dst_mac[ETH_ALEN]; /* dest MAC address */ 106 + u32 usr_def[2]; /* user data */ 107 + __be16 vlan_type; /* VLAN ethertype */ 108 + __be16 vlan_tag; /* VLAN tag info */ 109 + }; 110 + 111 + struct ice_fdir_fltr { 112 + struct list_head fltr_node; 113 + enum ice_fltr_ptype flow_type; 114 + 115 + union { 116 + struct ice_fdir_v4 v4; 117 + struct ice_fdir_v6 v6; 118 + } ip, mask; 119 + 120 + struct ice_fdir_extra ext_data; 121 + struct ice_fdir_extra ext_mask; 122 + 123 + /* flex byte filter data */ 124 + __be16 flex_word; 125 + u16 flex_offset; 126 + u16 flex_fltr; 127 + 128 + /* filter control */ 129 + u16 q_index; 130 + u16 dest_vsi; 131 + u8 dest_ctl; 132 + u8 fltr_status; 133 + u16 cnt_index; 134 + u32 fltr_id; 135 + }; 136 + 137 + /* Dummy packet filter definition structure */ 138 + struct ice_fdir_base_pkt { 139 + enum ice_fltr_ptype flow; 140 + u16 pkt_len; 141 + const u8 *pkt; 142 + u16 tun_pkt_len; 143 + const u8 *tun_pkt; 144 + }; 145 + 146 + enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); 147 + enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); 148 + enum ice_status 149 + ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); 150 + enum ice_status 151 + ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); 152 + void 153 + ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, 154 + struct ice_fltr_desc *fdesc, bool add); 155 + enum ice_status 156 + ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, 157 + u8 *pkt, bool frag, bool tun); 158 + int ice_get_fdir_cnt_all(struct ice_hw *hw); 159 + bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); 160 + bool ice_fdir_has_frag(enum ice_fltr_ptype flow); 161 + struct ice_fdir_fltr * 162 + ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx); 163 + void 164 + ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add); 165 + void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); 166 + #endif /* _ICE_FDIR_H_ */
+351 -25
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
··· 864 864 u32 i; 865 865 866 866 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 867 - pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, 868 - pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); 867 + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, 868 + pkg_hdr->pkg_format_ver.update, 869 + pkg_hdr->pkg_format_ver.draft); 869 870 870 871 /* Search all package segments for the requested segment type */ 871 872 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { ··· 1036 1035 { 1037 1036 struct ice_buf_table *ice_buf_tbl; 1038 1037 1039 - ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", 1040 - ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, 1041 - ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); 1038 + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", 1039 + ice_seg->hdr.seg_format_ver.major, 1040 + ice_seg->hdr.seg_format_ver.minor, 1041 + ice_seg->hdr.seg_format_ver.update, 1042 + ice_seg->hdr.seg_format_ver.draft); 1042 1043 1043 1044 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 1044 1045 le32_to_cpu(ice_seg->hdr.seg_type), 1045 - le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); 1046 + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); 1046 1047 1047 1048 ice_buf_tbl = ice_find_buf_table(ice_seg); 1048 1049 ··· 1089 1086 1090 1087 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); 1091 1088 if (seg_hdr) { 1092 - hw->ice_pkg_ver = seg_hdr->seg_ver; 1093 - memcpy(hw->ice_pkg_name, seg_hdr->seg_name, 1089 + hw->ice_pkg_ver = seg_hdr->seg_format_ver; 1090 + memcpy(hw->ice_pkg_name, seg_hdr->seg_id, 1094 1091 sizeof(hw->ice_pkg_name)); 1095 1092 1096 - ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", 1097 - seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, 1098 - seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, 1099 - seg_hdr->seg_name); 1093 + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", 1094 + seg_hdr->seg_format_ver.major, 1095 + seg_hdr->seg_format_ver.minor, 1096 + seg_hdr->seg_format_ver.update, 1097 + seg_hdr->seg_format_ver.draft, 1098 + seg_hdr->seg_id); 1100 1099 } else { 1101 1100 ice_debug(hw, ICE_DBG_INIT, 1102 1101 "Did not find ice segment in driver package\n"); ··· 1139 1134 if (pkg_info->pkg_info[i].is_active) { 1140 1135 flags[place++] = 'A'; 1141 1136 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 1137 + hw->active_track_id = 1138 + le32_to_cpu(pkg_info->pkg_info[i].track_id); 1142 1139 memcpy(hw->active_pkg_name, 1143 1140 pkg_info->pkg_info[i].name, 1144 - sizeof(hw->active_pkg_name)); 1141 + sizeof(pkg_info->pkg_info[i].name)); 1145 1142 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 1146 1143 } 1147 1144 if (pkg_info->pkg_info[i].is_active_at_boot) ··· 1183 1176 if (len < sizeof(*pkg)) 1184 1177 return ICE_ERR_BUF_TOO_SHORT; 1185 1178 1186 - if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || 1187 - pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || 1188 - pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || 1189 - pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) 1179 + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || 1180 + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || 1181 + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || 1182 + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) 1190 1183 return ICE_ERR_CFG; 1191 1184 1192 1185 /* pkg must have at least one segment */ ··· 1268 1261 } 1269 1262 1270 1263 /** 1264 + * ice_chk_pkg_compat 1265 + * @hw: pointer to the hardware structure 1266 + * @ospkg: pointer to the package hdr 1267 + * @seg: pointer to the package segment hdr 1268 + * 1269 + * This function checks the package version compatibility with driver and NVM 1270 + */ 1271 + static enum ice_status 1272 + ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, 1273 + struct ice_seg **seg) 1274 + { 1275 + struct ice_aqc_get_pkg_info_resp *pkg; 1276 + enum ice_status status; 1277 + u16 size; 1278 + u32 i; 1279 + 1280 + /* Check package version compatibility */ 1281 + status = ice_chk_pkg_version(&hw->pkg_ver); 1282 + if (status) { 1283 + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); 1284 + return status; 1285 + } 1286 + 1287 + /* find ICE segment in given package */ 1288 + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, 1289 + ospkg); 1290 + if (!*seg) { 1291 + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 1292 + return ICE_ERR_CFG; 1293 + } 1294 + 1295 + /* Check if FW is compatible with the OS package */ 1296 + size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1); 1297 + pkg = kzalloc(size, GFP_KERNEL); 1298 + if (!pkg) 1299 + return ICE_ERR_NO_MEMORY; 1300 + 1301 + status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); 1302 + if (status) 1303 + goto fw_ddp_compat_free_alloc; 1304 + 1305 + for (i = 0; i < le32_to_cpu(pkg->count); i++) { 1306 + /* loop till we find the NVM package */ 1307 + if (!pkg->pkg_info[i].is_in_nvm) 1308 + continue; 1309 + if ((*seg)->hdr.seg_format_ver.major != 1310 + pkg->pkg_info[i].ver.major || 1311 + (*seg)->hdr.seg_format_ver.minor > 1312 + pkg->pkg_info[i].ver.minor) { 1313 + status = ICE_ERR_FW_DDP_MISMATCH; 1314 + ice_debug(hw, ICE_DBG_INIT, 1315 + "OS package is not compatible with NVM.\n"); 1316 + } 1317 + /* done processing NVM package so break */ 1318 + break; 1319 + } 1320 + fw_ddp_compat_free_alloc: 1321 + kfree(pkg); 1322 + return status; 1323 + } 1324 + 1325 + /** 1271 1326 * ice_init_pkg - initialize/download package 1272 1327 * @hw: pointer to the hardware structure 1273 1328 * @buf: pointer to the package buffer ··· 1379 1310 /* before downloading the package, check package version for 1380 1311 * compatibility with driver 1381 1312 */ 1382 - status = ice_chk_pkg_version(&hw->pkg_ver); 1313 + status = ice_chk_pkg_compat(hw, pkg, &seg); 1383 1314 if (status) 1384 1315 return status; 1385 - 1386 - /* find segment in given package */ 1387 - seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); 1388 - if (!seg) { 1389 - ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 1390 - return ICE_ERR_CFG; 1391 - } 1392 1316 1393 1317 /* initialize package hints and then download package */ 1394 1318 ice_init_pkg_hints(hw, seg); ··· 1692 1630 } 1693 1631 1694 1632 return false; 1633 + } 1634 + 1635 + /** 1636 + * ice_get_open_tunnel_port - retrieve an open tunnel port 1637 + * @hw: pointer to the HW structure 1638 + * @type: tunnel type (TNL_ALL will return any open port) 1639 + * @port: returns open port 1640 + */ 1641 + bool 1642 + ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, 1643 + u16 *port) 1644 + { 1645 + bool res = false; 1646 + u16 i; 1647 + 1648 + mutex_lock(&hw->tnl_lock); 1649 + 1650 + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 1651 + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 1652 + (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { 1653 + *port = hw->tnl.tbl[i].port; 1654 + res = true; 1655 + break; 1656 + } 1657 + 1658 + mutex_unlock(&hw->tnl_lock); 1659 + 1660 + return res; 1695 1661 } 1696 1662 1697 1663 /** ··· 2422 2332 u16 off; 2423 2333 u8 i; 2424 2334 2335 + /* For FD, we don't want to re-use a existed profile with the same 2336 + * field vector and mask. This will cause rule interference. 2337 + */ 2338 + if (blk == ICE_BLK_FD) 2339 + return ICE_ERR_DOES_NOT_EXIST; 2340 + 2425 2341 for (i = 0; i < (u8)es->count; i++) { 2426 2342 off = i * es->fvw; 2427 2343 ··· 2449 2353 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) 2450 2354 { 2451 2355 switch (blk) { 2356 + case ICE_BLK_FD: 2357 + *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID; 2358 + break; 2452 2359 case ICE_BLK_RSS: 2453 2360 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; 2454 2361 break; ··· 2469 2370 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) 2470 2371 { 2471 2372 switch (blk) { 2373 + case ICE_BLK_FD: 2374 + *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM; 2375 + break; 2472 2376 case ICE_BLK_RSS: 2473 2377 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; 2474 2378 break; ··· 2915 2813 2916 2814 mutex_lock(&hw->fl_profs_locks[blk_idx]); 2917 2815 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) { 2816 + struct ice_flow_entry *e, *t; 2817 + 2818 + list_for_each_entry_safe(e, t, &p->entries, l_entry) 2819 + ice_flow_rem_entry(hw, (enum ice_block)blk_idx, 2820 + ICE_FLOW_ENTRY_HNDL(e)); 2821 + 2918 2822 list_del(&p->l_entry); 2919 2823 devm_kfree(ice_hw_to_dev(hw), p); 2920 2824 } ··· 3550 3442 } 3551 3443 3552 3444 /** 3445 + * ice_update_fd_mask - set Flow Director Field Vector mask for a profile 3446 + * @hw: pointer to the HW struct 3447 + * @prof_id: profile ID 3448 + * @mask_sel: mask select 3449 + * 3450 + * This function enable any of the masks selected by the mask select parameter 3451 + * for the profile specified. 3452 + */ 3453 + static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel) 3454 + { 3455 + wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel); 3456 + 3457 + ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id, 3458 + GLQF_FDMASK_SEL(prof_id), mask_sel); 3459 + } 3460 + 3461 + struct ice_fd_src_dst_pair { 3462 + u8 prot_id; 3463 + u8 count; 3464 + u16 off; 3465 + }; 3466 + 3467 + static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { 3468 + /* These are defined in pairs */ 3469 + { ICE_PROT_IPV4_OF_OR_S, 2, 12 }, 3470 + { ICE_PROT_IPV4_OF_OR_S, 2, 16 }, 3471 + 3472 + { ICE_PROT_IPV4_IL, 2, 12 }, 3473 + { ICE_PROT_IPV4_IL, 2, 16 }, 3474 + 3475 + { ICE_PROT_IPV6_OF_OR_S, 8, 8 }, 3476 + { ICE_PROT_IPV6_OF_OR_S, 8, 24 }, 3477 + 3478 + { ICE_PROT_IPV6_IL, 8, 8 }, 3479 + { ICE_PROT_IPV6_IL, 8, 24 }, 3480 + 3481 + { ICE_PROT_TCP_IL, 1, 0 }, 3482 + { ICE_PROT_TCP_IL, 1, 2 }, 3483 + 3484 + { ICE_PROT_UDP_OF, 1, 0 }, 3485 + { ICE_PROT_UDP_OF, 1, 2 }, 3486 + 3487 + { ICE_PROT_UDP_IL_OR_S, 1, 0 }, 3488 + { ICE_PROT_UDP_IL_OR_S, 1, 2 }, 3489 + 3490 + { ICE_PROT_SCTP_IL, 1, 0 }, 3491 + { ICE_PROT_SCTP_IL, 1, 2 } 3492 + }; 3493 + 3494 + #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs) 3495 + 3496 + /** 3497 + * ice_update_fd_swap - set register appropriately for a FD FV extraction 3498 + * @hw: pointer to the HW struct 3499 + * @prof_id: profile ID 3500 + * @es: extraction sequence (length of array is determined by the block) 3501 + */ 3502 + static enum ice_status 3503 + ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) 3504 + { 3505 + DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); 3506 + u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 }; 3507 + #define ICE_FD_FV_NOT_FOUND (-2) 3508 + s8 first_free = ICE_FD_FV_NOT_FOUND; 3509 + u8 used[ICE_MAX_FV_WORDS] = { 0 }; 3510 + s8 orig_free, si; 3511 + u32 mask_sel = 0; 3512 + u8 i, j, k; 3513 + 3514 + bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); 3515 + 3516 + /* This code assumes that the Flow Director field vectors are assigned 3517 + * from the end of the FV indexes working towards the zero index, that 3518 + * only complete fields will be included and will be consecutive, and 3519 + * that there are no gaps between valid indexes. 3520 + */ 3521 + 3522 + /* Determine swap fields present */ 3523 + for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) { 3524 + /* Find the first free entry, assuming right to left population. 3525 + * This is where we can start adding additional pairs if needed. 3526 + */ 3527 + if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id != 3528 + ICE_PROT_INVALID) 3529 + first_free = i - 1; 3530 + 3531 + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) 3532 + if (es[i].prot_id == ice_fd_pairs[j].prot_id && 3533 + es[i].off == ice_fd_pairs[j].off) { 3534 + set_bit(j, pair_list); 3535 + pair_start[j] = i; 3536 + } 3537 + } 3538 + 3539 + orig_free = first_free; 3540 + 3541 + /* determine missing swap fields that need to be added */ 3542 + for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) { 3543 + u8 bit1 = test_bit(i + 1, pair_list); 3544 + u8 bit0 = test_bit(i, pair_list); 3545 + 3546 + if (bit0 ^ bit1) { 3547 + u8 index; 3548 + 3549 + /* add the appropriate 'paired' entry */ 3550 + if (!bit0) 3551 + index = i; 3552 + else 3553 + index = i + 1; 3554 + 3555 + /* check for room */ 3556 + if (first_free + 1 < (s8)ice_fd_pairs[index].count) 3557 + return ICE_ERR_MAX_LIMIT; 3558 + 3559 + /* place in extraction sequence */ 3560 + for (k = 0; k < ice_fd_pairs[index].count; k++) { 3561 + es[first_free - k].prot_id = 3562 + ice_fd_pairs[index].prot_id; 3563 + es[first_free - k].off = 3564 + ice_fd_pairs[index].off + (k * 2); 3565 + 3566 + if (k > first_free) 3567 + return ICE_ERR_OUT_OF_RANGE; 3568 + 3569 + /* keep track of non-relevant fields */ 3570 + mask_sel |= BIT(first_free - k); 3571 + } 3572 + 3573 + pair_start[index] = first_free; 3574 + first_free -= ice_fd_pairs[index].count; 3575 + } 3576 + } 3577 + 3578 + /* fill in the swap array */ 3579 + si = hw->blk[ICE_BLK_FD].es.fvw - 1; 3580 + while (si >= 0) { 3581 + u8 indexes_used = 1; 3582 + 3583 + /* assume flat at this index */ 3584 + #define ICE_SWAP_VALID 0x80 3585 + used[si] = si | ICE_SWAP_VALID; 3586 + 3587 + if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) { 3588 + si -= indexes_used; 3589 + continue; 3590 + } 3591 + 3592 + /* check for a swap location */ 3593 + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) 3594 + if (es[si].prot_id == ice_fd_pairs[j].prot_id && 3595 + es[si].off == ice_fd_pairs[j].off) { 3596 + u8 idx; 3597 + 3598 + /* determine the appropriate matching field */ 3599 + idx = j + ((j % 2) ? -1 : 1); 3600 + 3601 + indexes_used = ice_fd_pairs[idx].count; 3602 + for (k = 0; k < indexes_used; k++) { 3603 + used[si - k] = (pair_start[idx] - k) | 3604 + ICE_SWAP_VALID; 3605 + } 3606 + 3607 + break; 3608 + } 3609 + 3610 + si -= indexes_used; 3611 + } 3612 + 3613 + /* for each set of 4 swap and 4 inset indexes, write the appropriate 3614 + * register 3615 + */ 3616 + for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) { 3617 + u32 raw_swap = 0; 3618 + u32 raw_in = 0; 3619 + 3620 + for (k = 0; k < 4; k++) { 3621 + u8 idx; 3622 + 3623 + idx = (j * 4) + k; 3624 + if (used[idx] && !(mask_sel & BIT(idx))) { 3625 + raw_swap |= used[idx] << (k * BITS_PER_BYTE); 3626 + #define ICE_INSET_DFLT 0x9f 3627 + raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE); 3628 + } 3629 + } 3630 + 3631 + /* write the appropriate swap register set */ 3632 + wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap); 3633 + 3634 + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n", 3635 + prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap); 3636 + 3637 + /* write the appropriate inset register set */ 3638 + wr32(hw, GLQF_FDINSET(prof_id, j), raw_in); 3639 + 3640 + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n", 3641 + prof_id, j, GLQF_FDINSET(prof_id, j), raw_in); 3642 + } 3643 + 3644 + /* initially clear the mask select for this profile */ 3645 + ice_update_fd_mask(hw, prof_id, 0); 3646 + 3647 + return 0; 3648 + } 3649 + 3650 + /** 3553 3651 * ice_add_prof - add profile 3554 3652 * @hw: pointer to the HW struct 3555 3653 * @blk: hardware block ··· 3790 3476 status = ice_alloc_prof_id(hw, blk, &prof_id); 3791 3477 if (status) 3792 3478 goto err_ice_add_prof; 3479 + if (blk == ICE_BLK_FD) { 3480 + /* For Flow Director block, the extraction sequence may 3481 + * need to be altered in the case where there are paired 3482 + * fields that have no match. This is necessary because 3483 + * for Flow Director, src and dest fields need to paired 3484 + * for filter programming and these values are swapped 3485 + * during Tx. 3486 + */ 3487 + status = ice_update_fd_swap(hw, prof_id, es); 3488 + if (status) 3489 + goto err_ice_add_prof; 3490 + } 3793 3491 3794 3492 /* and write new es */ 3795 3493 ice_write_es(hw, blk, prof_id, es);
+3
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
··· 18 18 19 19 #define ICE_PKG_CNT 4 20 20 21 + bool 22 + ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, 23 + u16 *port); 21 24 enum ice_status 22 25 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); 23 26 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
+4 -4
drivers/net/ethernet/intel/ice/ice_flex_type.h
··· 20 20 21 21 /* Package and segment headers and tables */ 22 22 struct ice_pkg_hdr { 23 - struct ice_pkg_ver format_ver; 23 + struct ice_pkg_ver pkg_format_ver; 24 24 __le32 seg_count; 25 25 __le32 seg_offset[1]; 26 26 }; ··· 30 30 #define SEGMENT_TYPE_METADATA 0x00000001 31 31 #define SEGMENT_TYPE_ICE 0x00000010 32 32 __le32 seg_type; 33 - struct ice_pkg_ver seg_ver; 33 + struct ice_pkg_ver seg_format_ver; 34 34 __le32 seg_size; 35 - char seg_name[ICE_PKG_NAME_SIZE]; 35 + char seg_id[ICE_PKG_NAME_SIZE]; 36 36 }; 37 37 38 38 /* ice specific segment */ ··· 75 75 struct ice_global_metadata_seg { 76 76 struct ice_generic_seg_hdr hdr; 77 77 struct ice_pkg_ver pkg_ver; 78 - __le32 track_id; 78 + __le32 rsvd; 79 79 char pkg_name[ICE_PKG_NAME_SIZE]; 80 80 }; 81 81
+313 -6
drivers/net/ethernet/intel/ice/ice_flow.c
··· 193 193 return 0; 194 194 } 195 195 196 + /* Sizes of fixed known protocol headers without header options */ 197 + #define ICE_FLOW_PROT_HDR_SZ_MAC 14 198 + #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 199 + #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 200 + #define ICE_FLOW_PROT_HDR_SZ_TCP 20 201 + #define ICE_FLOW_PROT_HDR_SZ_UDP 8 202 + #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 203 + 204 + /** 205 + * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers 206 + * @params: information about the flow to be processed 207 + * @seg: index of packet segment whose header size is to be determined 208 + */ 209 + static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) 210 + { 211 + u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC; 212 + 213 + /* L3 headers */ 214 + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) 215 + sz += ICE_FLOW_PROT_HDR_SZ_IPV4; 216 + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) 217 + sz += ICE_FLOW_PROT_HDR_SZ_IPV6; 218 + 219 + /* L4 headers */ 220 + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) 221 + sz += ICE_FLOW_PROT_HDR_SZ_TCP; 222 + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) 223 + sz += ICE_FLOW_PROT_HDR_SZ_UDP; 224 + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) 225 + sz += ICE_FLOW_PROT_HDR_SZ_SCTP; 226 + 227 + return sz; 228 + } 229 + 196 230 /** 197 231 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments 198 232 * @params: information about the flow to be processed ··· 382 348 } 383 349 384 350 /** 351 + * ice_flow_xtract_raws - Create extract sequence entries for raw bytes 352 + * @hw: pointer to the HW struct 353 + * @params: information about the flow to be processed 354 + * @seg: index of packet segment whose raw fields are to be be extracted 355 + */ 356 + static enum ice_status 357 + ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, 358 + u8 seg) 359 + { 360 + u16 fv_words; 361 + u16 hdrs_sz; 362 + u8 i; 363 + 364 + if (!params->prof->segs[seg].raws_cnt) 365 + return 0; 366 + 367 + if (params->prof->segs[seg].raws_cnt > 368 + ARRAY_SIZE(params->prof->segs[seg].raws)) 369 + return ICE_ERR_MAX_LIMIT; 370 + 371 + /* Offsets within the segment headers are not supported */ 372 + hdrs_sz = ice_flow_calc_seg_sz(params, seg); 373 + if (!hdrs_sz) 374 + return ICE_ERR_PARAM; 375 + 376 + fv_words = hw->blk[params->blk].es.fvw; 377 + 378 + for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { 379 + struct ice_flow_seg_fld_raw *raw; 380 + u16 off, cnt, j; 381 + 382 + raw = &params->prof->segs[seg].raws[i]; 383 + 384 + /* Storing extraction information */ 385 + raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; 386 + raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * 387 + ICE_FLOW_FV_EXTRACT_SZ; 388 + raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * 389 + BITS_PER_BYTE; 390 + raw->info.xtrct.idx = params->es_cnt; 391 + 392 + /* Determine the number of field vector entries this raw field 393 + * consumes. 394 + */ 395 + cnt = DIV_ROUND_UP(raw->info.xtrct.disp + 396 + (raw->info.src.last * BITS_PER_BYTE), 397 + (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE)); 398 + off = raw->info.xtrct.off; 399 + for (j = 0; j < cnt; j++) { 400 + u16 idx; 401 + 402 + /* Make sure the number of extraction sequence required 403 + * does not exceed the block's capability 404 + */ 405 + if (params->es_cnt >= hw->blk[params->blk].es.count || 406 + params->es_cnt >= ICE_MAX_FV_WORDS) 407 + return ICE_ERR_MAX_LIMIT; 408 + 409 + /* some blocks require a reversed field vector layout */ 410 + if (hw->blk[params->blk].es.reverse) 411 + idx = fv_words - params->es_cnt - 1; 412 + else 413 + idx = params->es_cnt; 414 + 415 + params->es[idx].prot_id = raw->info.xtrct.prot_id; 416 + params->es[idx].off = off; 417 + params->es_cnt++; 418 + off += ICE_FLOW_FV_EXTRACT_SZ; 419 + } 420 + } 421 + 422 + return 0; 423 + } 424 + 425 + /** 385 426 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments 386 427 * @hw: pointer to the HW struct 387 428 * @params: information about the flow to be processed ··· 482 373 if (status) 483 374 return status; 484 375 } 376 + 377 + /* Process raw matching bytes */ 378 + status = ice_flow_xtract_raws(hw, params, i); 379 + if (status) 380 + return status; 485 381 } 486 382 487 383 return status; ··· 511 397 return status; 512 398 513 399 switch (params->blk) { 400 + case ICE_BLK_FD: 514 401 case ICE_BLK_RSS: 515 - /* Only header information is provided for RSS configuration. 516 - * No further processing is needed. 517 - */ 518 402 status = 0; 519 403 break; 520 404 default: ··· 591 479 return p; 592 480 593 481 return NULL; 482 + } 483 + 484 + /** 485 + * ice_dealloc_flow_entry - Deallocate flow entry memory 486 + * @hw: pointer to the HW struct 487 + * @entry: flow entry to be removed 488 + */ 489 + static void 490 + ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) 491 + { 492 + if (!entry) 493 + return; 494 + 495 + if (entry->entry) 496 + devm_kfree(ice_hw_to_dev(hw), entry->entry); 497 + 498 + devm_kfree(ice_hw_to_dev(hw), entry); 499 + } 500 + 501 + /** 502 + * ice_flow_rem_entry_sync - Remove a flow entry 503 + * @hw: pointer to the HW struct 504 + * @blk: classification stage 505 + * @entry: flow entry to be removed 506 + */ 507 + static enum ice_status 508 + ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, 509 + struct ice_flow_entry *entry) 510 + { 511 + if (!entry) 512 + return ICE_ERR_BAD_PTR; 513 + 514 + list_del(&entry->l_entry); 515 + 516 + ice_dealloc_flow_entry(hw, entry); 517 + 518 + return 0; 594 519 } 595 520 596 521 /** ··· 717 568 { 718 569 enum ice_status status; 719 570 571 + /* Remove all remaining flow entries before removing the flow profile */ 572 + if (!list_empty(&prof->entries)) { 573 + struct ice_flow_entry *e, *t; 574 + 575 + mutex_lock(&prof->entries_lock); 576 + 577 + list_for_each_entry_safe(e, t, &prof->entries, l_entry) { 578 + status = ice_flow_rem_entry_sync(hw, blk, e); 579 + if (status) 580 + break; 581 + } 582 + 583 + mutex_unlock(&prof->entries_lock); 584 + } 585 + 720 586 /* Remove all hardware profiles associated with this flow profile */ 721 587 status = ice_rem_prof(hw, blk, prof->id); 722 588 if (!status) { ··· 817 653 * @segs_cnt: number of packet segments provided 818 654 * @prof: stores the returned flow profile added 819 655 */ 820 - static enum ice_status 656 + enum ice_status 821 657 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 822 658 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, 823 659 struct ice_flow_prof **prof) ··· 855 691 * @blk: the block for which the flow profile is to be removed 856 692 * @prof_id: unique ID of the flow profile to be removed 857 693 */ 858 - static enum ice_status 694 + enum ice_status 859 695 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 860 696 { 861 697 struct ice_flow_prof *prof; ··· 874 710 875 711 out: 876 712 mutex_unlock(&hw->fl_profs_locks[blk]); 713 + 714 + return status; 715 + } 716 + 717 + /** 718 + * ice_flow_add_entry - Add a flow entry 719 + * @hw: pointer to the HW struct 720 + * @blk: classification stage 721 + * @prof_id: ID of the profile to add a new flow entry to 722 + * @entry_id: unique ID to identify this flow entry 723 + * @vsi_handle: software VSI handle for the flow entry 724 + * @prio: priority of the flow entry 725 + * @data: pointer to a data buffer containing flow entry's match values/masks 726 + * @entry_h: pointer to buffer that receives the new flow entry's handle 727 + */ 728 + enum ice_status 729 + ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 730 + u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, 731 + void *data, u64 *entry_h) 732 + { 733 + struct ice_flow_entry *e = NULL; 734 + struct ice_flow_prof *prof; 735 + enum ice_status status; 736 + 737 + /* No flow entry data is expected for RSS */ 738 + if (!entry_h || (!data && blk != ICE_BLK_RSS)) 739 + return ICE_ERR_BAD_PTR; 740 + 741 + if (!ice_is_vsi_valid(hw, vsi_handle)) 742 + return ICE_ERR_PARAM; 743 + 744 + mutex_lock(&hw->fl_profs_locks[blk]); 745 + 746 + prof = ice_flow_find_prof_id(hw, blk, prof_id); 747 + if (!prof) { 748 + status = ICE_ERR_DOES_NOT_EXIST; 749 + } else { 750 + /* Allocate memory for the entry being added and associate 751 + * the VSI to the found flow profile 752 + */ 753 + e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); 754 + if (!e) 755 + status = ICE_ERR_NO_MEMORY; 756 + else 757 + status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 758 + } 759 + 760 + mutex_unlock(&hw->fl_profs_locks[blk]); 761 + if (status) 762 + goto out; 763 + 764 + e->id = entry_id; 765 + e->vsi_handle = vsi_handle; 766 + e->prof = prof; 767 + e->priority = prio; 768 + 769 + switch (blk) { 770 + case ICE_BLK_FD: 771 + case ICE_BLK_RSS: 772 + break; 773 + default: 774 + status = ICE_ERR_NOT_IMPL; 775 + goto out; 776 + } 777 + 778 + mutex_lock(&prof->entries_lock); 779 + list_add(&e->l_entry, &prof->entries); 780 + mutex_unlock(&prof->entries_lock); 781 + 782 + *entry_h = ICE_FLOW_ENTRY_HNDL(e); 783 + 784 + out: 785 + if (status && e) { 786 + if (e->entry) 787 + devm_kfree(ice_hw_to_dev(hw), e->entry); 788 + devm_kfree(ice_hw_to_dev(hw), e); 789 + } 790 + 791 + return status; 792 + } 793 + 794 + /** 795 + * ice_flow_rem_entry - Remove a flow entry 796 + * @hw: pointer to the HW struct 797 + * @blk: classification stage 798 + * @entry_h: handle to the flow entry to be removed 799 + */ 800 + enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, 801 + u64 entry_h) 802 + { 803 + struct ice_flow_entry *entry; 804 + struct ice_flow_prof *prof; 805 + enum ice_status status = 0; 806 + 807 + if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) 808 + return ICE_ERR_PARAM; 809 + 810 + entry = ICE_FLOW_ENTRY_PTR(entry_h); 811 + 812 + /* Retain the pointer to the flow profile as the entry will be freed */ 813 + prof = entry->prof; 814 + 815 + if (prof) { 816 + mutex_lock(&prof->entries_lock); 817 + status = ice_flow_rem_entry_sync(hw, blk, entry); 818 + mutex_unlock(&prof->entries_lock); 819 + } 877 820 878 821 return status; 879 822 } ··· 1047 776 * create the content of a match entry. This function should only be used for 1048 777 * fixed-size data structures. 1049 778 */ 1050 - static void 779 + void 1051 780 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1052 781 u16 val_loc, u16 mask_loc, u16 last_loc, bool range) 1053 782 { ··· 1055 784 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; 1056 785 1057 786 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); 787 + } 788 + 789 + /** 790 + * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf 791 + * @seg: packet segment the field being set belongs to 792 + * @off: offset of the raw field from the beginning of the segment in bytes 793 + * @len: length of the raw pattern to be matched 794 + * @val_loc: location of the value to match from entry's input buffer 795 + * @mask_loc: location of mask value from entry's input buffer 796 + * 797 + * This function specifies the offset of the raw field to be match from the 798 + * beginning of the specified packet segment, and the locations, in the form of 799 + * byte offsets from the start of the input buffer for a flow entry, from where 800 + * the value to match and the mask value to be extracted. These locations are 801 + * then stored in the flow profile. When adding flow entries to the associated 802 + * flow profile, these locations can be used to quickly extract the values to 803 + * create the content of a match entry. This function should only be used for 804 + * fixed-size data structures. 805 + */ 806 + void 807 + ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, 808 + u16 val_loc, u16 mask_loc) 809 + { 810 + if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { 811 + seg->raws[seg->raws_cnt].off = off; 812 + seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; 813 + seg->raws[seg->raws_cnt].info.src.val = val_loc; 814 + seg->raws[seg->raws_cnt].info.src.mask = mask_loc; 815 + /* The "last" field is used to store the length of the field */ 816 + seg->raws[seg->raws_cnt].info.src.last = len; 817 + } 818 + 819 + /* Overflows of "raws" will be handled as an error condition later in 820 + * the flow when this information is processed. 821 + */ 822 + seg->raws_cnt++; 1058 823 } 1059 824 1060 825 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
+43 -1
drivers/net/ethernet/intel/ice/ice_flow.h
··· 128 128 }; 129 129 130 130 #define ICE_FLOW_SEG_MAX 2 131 + #define ICE_FLOW_SEG_RAW_FLD_MAX 2 131 132 #define ICE_FLOW_FV_EXTRACT_SZ 2 132 133 133 134 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) ··· 165 164 struct ice_flow_seg_xtrct xtrct; 166 165 }; 167 166 167 + struct ice_flow_seg_fld_raw { 168 + struct ice_flow_fld_info info; 169 + u16 off; /* Offset from the start of the segment */ 170 + }; 171 + 168 172 struct ice_flow_seg_info { 169 173 u32 hdrs; /* Bitmask indicating protocol headers present */ 170 174 u64 match; /* Bitmask indicating header fields to be matched */ 171 175 u64 range; /* Bitmask indicating header fields matched as ranges */ 172 176 173 177 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX]; 178 + 179 + u8 raws_cnt; /* Number of raw fields to be matched */ 180 + struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX]; 174 181 }; 182 + 183 + /* This structure describes a flow entry, and is tracked only in this file */ 184 + struct ice_flow_entry { 185 + struct list_head l_entry; 186 + 187 + u64 id; 188 + struct ice_flow_prof *prof; 189 + /* Flow entry's content */ 190 + void *entry; 191 + enum ice_flow_priority priority; 192 + u16 vsi_handle; 193 + u16 entry_sz; 194 + }; 195 + 196 + #define ICE_FLOW_ENTRY_HNDL(e) ((u64)e) 197 + #define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h)) 175 198 176 199 struct ice_flow_prof { 177 200 struct list_head l_entry; ··· 222 197 u32 packet_hdr; 223 198 }; 224 199 225 - enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h); 200 + enum ice_status 201 + ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 202 + u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, 203 + struct ice_flow_prof **prof); 204 + enum ice_status 205 + ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); 206 + enum ice_status 207 + ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 208 + u64 entry_id, u16 vsi, enum ice_flow_priority prio, 209 + void *data, u64 *entry_h); 210 + enum ice_status 211 + ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); 212 + void 213 + ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 214 + u16 val_loc, u16 mask_loc, u16 last_loc, bool range); 215 + void 216 + ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, 217 + u16 val_loc, u16 mask_loc); 226 218 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); 227 219 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); 228 220 enum ice_status
+23 -3
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 6 6 #ifndef _ICE_HW_AUTOGEN_H_ 7 7 #define _ICE_HW_AUTOGEN_H_ 8 8 9 - #define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096)) 10 - #define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096)) 11 - #define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096)) 12 9 #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) 13 10 #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) 14 11 #define QTX_COMM_HEAD_HEAD_S 0 ··· 216 219 #define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) 217 220 #define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) 218 221 #define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) 222 + #define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) 223 + #define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 224 + #define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0) 225 + #define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) 226 + #define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0) 219 227 #define GL_MDCK_TX_TDPU 0x00049348 220 228 #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) 221 229 #define GL_MDET_RX 0x00294C00 ··· 292 290 #define GL_PWR_MODE_CTL 0x000B820C 293 291 #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 294 292 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) 293 + #define GLQF_FD_CNT 0x00460018 294 + #define GLQF_FD_CNT_FD_BCNT_S 16 295 + #define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16) 296 + #define GLQF_FD_SIZE 0x00460010 297 + #define GLQF_FD_SIZE_FD_GSIZE_S 0 298 + #define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0) 299 + #define GLQF_FD_SIZE_FD_BSIZE_S 16 300 + #define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16) 301 + #define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) 302 + #define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4)) 303 + #define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512)) 304 + #define PFQF_FD_ENA 0x0043A000 305 + #define PFQF_FD_ENA_FD_ENA_M BIT(0) 306 + #define PFQF_FD_SIZE 0x00460100 295 307 #define GLDCB_RTCTQ_RXQNUM_S 0 296 308 #define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0) 297 309 #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) ··· 349 333 #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) 350 334 #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) 351 335 #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) 336 + #define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8)) 352 337 #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) 353 338 #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) 354 339 #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) ··· 360 343 #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) 361 344 #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) 362 345 #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) 346 + #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) 347 + #define VSIQF_FD_CNT_FD_GCNT_S 0 348 + #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) 363 349 #define VSIQF_HKEY_MAX_INDEX 12 364 350 #define VSIQF_HLUT_MAX_INDEX 15 365 351 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
+101
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
··· 40 40 } wb; /* writeback */ 41 41 }; 42 42 43 + struct ice_fltr_desc { 44 + __le64 qidx_compq_space_stat; 45 + __le64 dtype_cmd_vsi_fdid; 46 + }; 47 + 48 + #define ICE_FXD_FLTR_QW0_QINDEX_S 0 49 + #define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S) 50 + #define ICE_FXD_FLTR_QW0_COMP_Q_S 11 51 + #define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S) 52 + #define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL 53 + 54 + #define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12 55 + #define ICE_FXD_FLTR_QW0_COMP_REPORT_M \ 56 + (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S) 57 + #define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL 58 + 59 + #define ICE_FXD_FLTR_QW0_FD_SPACE_S 14 60 + #define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S) 61 + #define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL 62 + 63 + #define ICE_FXD_FLTR_QW0_STAT_CNT_S 16 64 + #define ICE_FXD_FLTR_QW0_STAT_CNT_M \ 65 + (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S) 66 + #define ICE_FXD_FLTR_QW0_STAT_ENA_S 29 67 + #define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S) 68 + #define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL 69 + 70 + #define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31 71 + #define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S) 72 + #define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL 73 + #define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL 74 + 75 + #define ICE_FXD_FLTR_QW0_TO_Q_S 32 76 + #define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S) 77 + #define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL 78 + 79 + #define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35 80 + #define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) 81 + #define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL 82 + 83 + #define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38 84 + #define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \ 85 + (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) 86 + #define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL 87 + 88 + #define ICE_FXD_FLTR_QW0_DROP_S 40 89 + #define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S) 90 + #define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL 91 + #define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL 92 + 93 + #define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41 94 + #define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S) 95 + #define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL 96 + 97 + #define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44 98 + #define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S) 99 + #define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL 100 + 101 + #define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48 102 + #define ICE_FXD_FLTR_QW0_FLEX_VAL_M \ 103 + (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S) 104 + #define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL 105 + 106 + #define ICE_FXD_FLTR_QW1_DTYPE_S 0 107 + #define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S) 108 + #define ICE_FXD_FLTR_QW1_PCMD_S 4 109 + #define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S) 110 + #define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL 111 + #define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL 112 + 113 + #define ICE_FXD_FLTR_QW1_PROF_PRI_S 5 114 + #define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S) 115 + #define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL 116 + 117 + #define ICE_FXD_FLTR_QW1_PROF_S 8 118 + #define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S) 119 + #define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL 120 + 121 + #define ICE_FXD_FLTR_QW1_FD_VSI_S 14 122 + #define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S) 123 + #define ICE_FXD_FLTR_QW1_SWAP_S 24 124 + #define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S) 125 + #define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL 126 + #define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL 127 + 128 + #define ICE_FXD_FLTR_QW1_FDID_PRI_S 25 129 + #define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S) 130 + #define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL 131 + 132 + #define ICE_FXD_FLTR_QW1_FDID_MDID_S 28 133 + #define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S) 134 + #define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL 135 + 136 + #define ICE_FXD_FLTR_QW1_FDID_S 32 137 + #define ICE_FXD_FLTR_QW1_FDID_M \ 138 + (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S) 139 + #define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL 140 + 43 141 struct ice_rx_ptype_decoded { 44 142 u32 ptype:10; 45 143 u32 known:1; ··· 444 346 enum ice_tx_desc_dtype_value { 445 347 ICE_TX_DESC_DTYPE_DATA = 0x0, 446 348 ICE_TX_DESC_DTYPE_CTX = 0x1, 349 + ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8, 447 350 /* DESC_DONE - HW has completed write-back of descriptor */ 448 351 ICE_TX_DESC_DTYPE_DESC_DONE = 0xF, 449 352 }; ··· 456 357 ICE_TX_DESC_CMD_EOP = 0x0001, 457 358 ICE_TX_DESC_CMD_RS = 0x0002, 458 359 ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, 360 + ICE_TX_DESC_CMD_DUMMY = 0x0010, 459 361 ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, 460 362 ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, 461 363 ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, 462 364 ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, 463 365 ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, 464 366 ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, 367 + ICE_TX_DESC_CMD_RE = 0x0400, 465 368 }; 466 369 467 370 #define ICE_TXD_QW1_OFFSET_S 16
+198 -30
drivers/net/ethernet/intel/ice/ice_lib.c
··· 19 19 return "ICE_VSI_PF"; 20 20 case ICE_VSI_VF: 21 21 return "ICE_VSI_VF"; 22 + case ICE_VSI_CTRL: 23 + return "ICE_VSI_CTRL"; 22 24 case ICE_VSI_LB: 23 25 return "ICE_VSI_LB"; 24 26 default: ··· 125 123 { 126 124 switch (vsi->type) { 127 125 case ICE_VSI_PF: 126 + case ICE_VSI_CTRL: 128 127 case ICE_VSI_LB: 129 128 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 130 129 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; ··· 189 186 * original vector count 190 187 */ 191 188 vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; 189 + break; 190 + case ICE_VSI_CTRL: 191 + vsi->alloc_txq = 1; 192 + vsi->alloc_rxq = 1; 193 + vsi->num_q_vectors = 1; 192 194 break; 193 195 case ICE_VSI_LB: 194 196 vsi->alloc_txq = 1; ··· 330 322 /* updates the PF for this cleared VSI */ 331 323 332 324 pf->vsi[vsi->idx] = NULL; 333 - if (vsi->idx < pf->next_vsi) 325 + if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) 334 326 pf->next_vsi = vsi->idx; 335 327 336 328 ice_vsi_free_arrays(vsi); ··· 338 330 devm_kfree(dev, vsi); 339 331 340 332 return 0; 333 + } 334 + 335 + /** 336 + * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI 337 + * @irq: interrupt number 338 + * @data: pointer to a q_vector 339 + */ 340 + static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) 341 + { 342 + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 343 + 344 + if (!q_vector->tx.ring) 345 + return IRQ_HANDLED; 346 + 347 + #define FDIR_RX_DESC_CLEAN_BUDGET 64 348 + ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET); 349 + ice_clean_ctrl_tx_irq(q_vector->tx.ring); 350 + 351 + return IRQ_HANDLED; 341 352 } 342 353 343 354 /** ··· 410 383 vsi->back = pf; 411 384 set_bit(__ICE_DOWN, vsi->state); 412 385 413 - vsi->idx = pf->next_vsi; 414 - 415 386 if (vsi_type == ICE_VSI_VF) 416 387 ice_vsi_set_num_qs(vsi, vf_id); 417 388 else ··· 422 397 423 398 /* Setup default MSIX irq handler for VSI */ 424 399 vsi->irq_handler = ice_msix_clean_rings; 400 + break; 401 + case ICE_VSI_CTRL: 402 + if (ice_vsi_alloc_arrays(vsi)) 403 + goto err_rings; 404 + 405 + /* Setup ctrl VSI MSIX irq handler */ 406 + vsi->irq_handler = ice_msix_clean_ctrl_vsi; 425 407 break; 426 408 case ICE_VSI_VF: 427 409 if (ice_vsi_alloc_arrays(vsi)) ··· 443 411 goto unlock_pf; 444 412 } 445 413 446 - /* fill VSI slot in the PF struct */ 447 - pf->vsi[pf->next_vsi] = vsi; 414 + if (vsi->type == ICE_VSI_CTRL) { 415 + /* Use the last VSI slot as the index for the control VSI */ 416 + vsi->idx = pf->num_alloc_vsi - 1; 417 + pf->ctrl_vsi_idx = vsi->idx; 418 + pf->vsi[vsi->idx] = vsi; 419 + } else { 420 + /* fill slot and make note of the index */ 421 + vsi->idx = pf->next_vsi; 422 + pf->vsi[pf->next_vsi] = vsi; 448 423 449 - /* prepare pf->next_vsi for next use */ 450 - pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 451 - pf->next_vsi); 424 + /* prepare pf->next_vsi for next use */ 425 + pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 426 + pf->next_vsi); 427 + } 452 428 goto unlock_pf; 453 429 454 430 err_rings: ··· 465 425 unlock_pf: 466 426 mutex_unlock(&pf->sw_mutex); 467 427 return vsi; 428 + } 429 + 430 + /** 431 + * ice_alloc_fd_res - Allocate FD resource for a VSI 432 + * @vsi: pointer to the ice_vsi 433 + * 434 + * This allocates the FD resources 435 + * 436 + * Returns 0 on success, -EPERM on no-op or -EIO on failure 437 + */ 438 + static int ice_alloc_fd_res(struct ice_vsi *vsi) 439 + { 440 + struct ice_pf *pf = vsi->back; 441 + u32 g_val, b_val; 442 + 443 + /* Flow Director filters are only allocated/assigned to the PF VSI which 444 + * passes the traffic. The CTRL VSI is only used to add/delete filters 445 + * so we don't allocate resources to it 446 + */ 447 + 448 + /* FD filters from guaranteed pool per VSI */ 449 + g_val = pf->hw.func_caps.fd_fltr_guar; 450 + if (!g_val) 451 + return -EPERM; 452 + 453 + /* FD filters from best effort pool */ 454 + b_val = pf->hw.func_caps.fd_fltr_best_effort; 455 + if (!b_val) 456 + return -EPERM; 457 + 458 + if (vsi->type != ICE_VSI_PF) 459 + return -EPERM; 460 + 461 + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 462 + return -EPERM; 463 + 464 + vsi->num_gfltr = g_val / pf->num_alloc_vsi; 465 + 466 + /* each VSI gets same "best_effort" quota */ 467 + vsi->num_bfltr = b_val; 468 + 469 + return 0; 468 470 } 469 471 470 472 /** ··· 665 583 case ICE_VSI_LB: 666 584 break; 667 585 default: 668 - dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", 669 - vsi->type); 586 + dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", 587 + ice_vsi_type_str(vsi->type)); 670 588 break; 671 589 } 672 590 } ··· 836 754 } 837 755 838 756 /** 757 + * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI 758 + * @ctxt: the VSI context being set 759 + * @vsi: the VSI being configured 760 + */ 761 + static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 762 + { 763 + u8 dflt_q_group, dflt_q_prio; 764 + u16 dflt_q, report_q, val; 765 + 766 + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL) 767 + return; 768 + 769 + val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 770 + ctxt->info.valid_sections |= cpu_to_le16(val); 771 + dflt_q = 0; 772 + dflt_q_group = 0; 773 + report_q = 0; 774 + dflt_q_prio = 0; 775 + 776 + /* enable flow director filtering/programming */ 777 + val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; 778 + ctxt->info.fd_options = cpu_to_le16(val); 779 + /* max of allocated flow director filters */ 780 + ctxt->info.max_fd_fltr_dedicated = 781 + cpu_to_le16(vsi->num_gfltr); 782 + /* max of shared flow director filters any VSI may program */ 783 + ctxt->info.max_fd_fltr_shared = 784 + cpu_to_le16(vsi->num_bfltr); 785 + /* default queue index within the VSI of the default FD */ 786 + val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & 787 + ICE_AQ_VSI_FD_DEF_Q_M); 788 + /* target queue or queue group to the FD filter */ 789 + val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & 790 + ICE_AQ_VSI_FD_DEF_GRP_M); 791 + ctxt->info.fd_def_q = cpu_to_le16(val); 792 + /* queue index on which FD filter completion is reported */ 793 + val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & 794 + ICE_AQ_VSI_FD_REPORT_Q_M); 795 + /* priority of the default qindex action */ 796 + val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & 797 + ICE_AQ_VSI_FD_DEF_PRIORITY_M); 798 + ctxt->info.fd_report_opt = cpu_to_le16(val); 799 + } 800 + 801 + /** 839 802 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 840 803 * @ctxt: the VSI context being set 841 804 * @vsi: the VSI being configured ··· 905 778 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 906 779 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 907 780 break; 908 - case ICE_VSI_LB: 781 + default: 909 782 dev_dbg(dev, "Unsupported VSI type %s\n", 910 783 ice_vsi_type_str(vsi->type)); 911 - return; 912 - default: 913 - dev_warn(dev, "Unknown VSI type %d\n", vsi->type); 914 784 return; 915 785 } 916 786 ··· 940 816 941 817 ctxt->info = vsi->info; 942 818 switch (vsi->type) { 819 + case ICE_VSI_CTRL: 943 820 case ICE_VSI_LB: 944 821 case ICE_VSI_PF: 945 822 ctxt->flags = ICE_AQ_VSI_TYPE_PF; ··· 956 831 } 957 832 958 833 ice_set_dflt_vsi_ctx(ctxt); 834 + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) 835 + ice_set_fd_vsi_ctx(ctxt, vsi); 959 836 /* if the switch is in VEB mode, allow VSI loopback */ 960 837 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 961 838 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 962 839 963 840 /* Set LUT type and HASH type if RSS is enabled */ 964 - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 841 + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && 842 + vsi->type != ICE_VSI_CTRL) { 965 843 ice_set_rss_vsi_ctx(ctxt, vsi); 966 844 /* if updating VSI context, make sure to set valid_section: 967 845 * to indicate which section of VSI context being updated ··· 2114 1986 if (vsi->type == ICE_VSI_VF) 2115 1987 vsi->vf_id = vf_id; 2116 1988 1989 + ice_alloc_fd_res(vsi); 1990 + 2117 1991 if (ice_vsi_get_qs(vsi)) { 2118 1992 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2119 1993 vsi->idx); 2120 - goto unroll_get_qs; 1994 + goto unroll_vsi_alloc; 2121 1995 } 2122 1996 2123 1997 /* set RSS capabilities */ ··· 2134 2004 goto unroll_get_qs; 2135 2005 2136 2006 switch (vsi->type) { 2007 + case ICE_VSI_CTRL: 2137 2008 case ICE_VSI_PF: 2138 2009 ret = ice_vsi_alloc_q_vectors(vsi); 2139 2010 if (ret) ··· 2165 2034 2166 2035 ice_vsi_map_rings_to_vectors(vsi); 2167 2036 2168 - /* Do not exit if configuring RSS had an issue, at least 2169 - * receive traffic on first queue. Hence no need to capture 2170 - * return value 2171 - */ 2172 - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2173 - ice_vsi_cfg_rss_lut_key(vsi); 2174 - ice_vsi_set_rss_flow_fld(vsi); 2175 - } 2037 + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 2038 + if (vsi->type != ICE_VSI_CTRL) 2039 + /* Do not exit if configuring RSS had an issue, at 2040 + * least receive traffic on first queue. Hence no 2041 + * need to capture return value 2042 + */ 2043 + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2044 + ice_vsi_cfg_rss_lut_key(vsi); 2045 + ice_vsi_set_rss_flow_fld(vsi); 2046 + } 2047 + ice_init_arfs(vsi); 2176 2048 break; 2177 2049 case ICE_VSI_VF: 2178 2050 /* VF driver will take care of creating netdev for this type and ··· 2256 2122 ice_vsi_delete(vsi); 2257 2123 unroll_get_qs: 2258 2124 ice_vsi_put_qs(vsi); 2125 + unroll_vsi_alloc: 2259 2126 ice_vsi_clear(vsi); 2260 2127 2261 2128 return NULL; ··· 2409 2274 if (!locked) 2410 2275 rtnl_unlock(); 2411 2276 } 2277 + } else if (vsi->type == ICE_VSI_CTRL) { 2278 + err = ice_vsi_open_ctrl(vsi); 2412 2279 } 2413 2280 2414 2281 return err; ··· 2440 2303 } else { 2441 2304 ice_vsi_close(vsi); 2442 2305 } 2306 + } else if (vsi->type == ICE_VSI_CTRL) { 2307 + ice_vsi_close(vsi); 2443 2308 } 2444 2309 } 2445 2310 ··· 2749 2610 goto err_vsi; 2750 2611 2751 2612 ice_vsi_get_qs(vsi); 2613 + 2614 + ice_alloc_fd_res(vsi); 2752 2615 ice_vsi_set_tc_cfg(vsi); 2753 2616 2754 2617 /* Initialize VSI struct elements and create VSI in FW */ ··· 2759 2618 goto err_vsi; 2760 2619 2761 2620 switch (vsi->type) { 2621 + case ICE_VSI_CTRL: 2762 2622 case ICE_VSI_PF: 2763 2623 ret = ice_vsi_alloc_q_vectors(vsi); 2764 2624 if (ret) ··· 2784 2642 if (ret) 2785 2643 goto err_vectors; 2786 2644 } 2787 - /* Do not exit if configuring RSS had an issue, at least 2788 - * receive traffic on first queue. Hence no need to capture 2789 - * return value 2790 - */ 2791 - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2792 - ice_vsi_cfg_rss_lut_key(vsi); 2645 + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 2646 + if (vsi->type != ICE_VSI_CTRL) 2647 + /* Do not exit if configuring RSS had an issue, at 2648 + * least receive traffic on first queue. Hence no 2649 + * need to capture return value 2650 + */ 2651 + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2652 + ice_vsi_cfg_rss_lut_key(vsi); 2793 2653 break; 2794 2654 case ICE_VSI_VF: 2795 2655 ret = ice_vsi_alloc_q_vectors(vsi); ··· 2990 2846 u64_stats_update_begin(&rx_ring->syncp); 2991 2847 ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes); 2992 2848 u64_stats_update_end(&rx_ring->syncp); 2849 + } 2850 + 2851 + /** 2852 + * ice_status_to_errno - convert from enum ice_status to Linux errno 2853 + * @err: ice_status value to convert 2854 + */ 2855 + int ice_status_to_errno(enum ice_status err) 2856 + { 2857 + switch (err) { 2858 + case ICE_SUCCESS: 2859 + return 0; 2860 + case ICE_ERR_DOES_NOT_EXIST: 2861 + return -ENOENT; 2862 + case ICE_ERR_OUT_OF_RANGE: 2863 + return -ENOTTY; 2864 + case ICE_ERR_PARAM: 2865 + return -EINVAL; 2866 + case ICE_ERR_NO_MEMORY: 2867 + return -ENOMEM; 2868 + case ICE_ERR_MAX_LIMIT: 2869 + return -EAGAIN; 2870 + default: 2871 + return -EINVAL; 2872 + } 2993 2873 } 2994 2874 2995 2875 /**
+2
drivers/net/ethernet/intel/ice/ice_lib.h
··· 92 92 93 93 void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); 94 94 95 + int ice_status_to_errno(enum ice_status err); 96 + 95 97 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); 96 98 97 99 enum ice_status
+256 -16
drivers/net/ethernet/intel/ice/ice_main.c
··· 452 452 ice_prepare_for_reset(struct ice_pf *pf) 453 453 { 454 454 struct ice_hw *hw = &pf->hw; 455 - int i; 455 + unsigned int i; 456 456 457 457 /* already prepared for reset */ 458 458 if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) ··· 1113 1113 * 1114 1114 * If not already scheduled, this puts the task into the work queue. 1115 1115 */ 1116 - static void ice_service_task_schedule(struct ice_pf *pf) 1116 + void ice_service_task_schedule(struct ice_pf *pf) 1117 1117 { 1118 1118 if (!test_bit(__ICE_SERVICE_DIS, pf->state) && 1119 1119 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && ··· 1188 1188 { 1189 1189 struct device *dev = ice_pf_to_dev(pf); 1190 1190 struct ice_hw *hw = &pf->hw; 1191 + unsigned int i; 1191 1192 u32 reg; 1192 - int i; 1193 1193 1194 1194 if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { 1195 1195 /* Since the VF MDD event logging is rate limited, check if ··· 1322 1322 * PF can be configured to reset the VF through ethtool 1323 1323 * private flag mdd-auto-reset-vf. 1324 1324 */ 1325 - if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) 1325 + if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 1326 + /* VF MDD event counters will be cleared by 1327 + * reset, so print the event prior to reset. 1328 + */ 1329 + ice_print_vf_rx_mdd_event(vf); 1326 1330 ice_reset_vf(&pf->vf[i], false); 1331 + } 1327 1332 } 1328 1333 } 1329 1334 ··· 1488 1483 1489 1484 ice_process_vflr_event(pf); 1490 1485 ice_clean_mailboxq_subtask(pf); 1491 - 1486 + ice_sync_arfs_fltrs(pf); 1492 1487 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ 1493 1488 ice_service_task_complete(pf); 1494 1489 ··· 1647 1642 } 1648 1643 1649 1644 /* register for affinity change notifications */ 1650 - q_vector->affinity_notify.notify = ice_irq_affinity_notify; 1651 - q_vector->affinity_notify.release = ice_irq_affinity_release; 1652 - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 1645 + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 1646 + struct irq_affinity_notify *affinity_notify; 1647 + 1648 + affinity_notify = &q_vector->affinity_notify; 1649 + affinity_notify->notify = ice_irq_affinity_notify; 1650 + affinity_notify->release = ice_irq_affinity_release; 1651 + irq_set_affinity_notifier(irq_num, affinity_notify); 1652 + } 1653 1653 1654 1654 /* assign the mask for this irq */ 1655 1655 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); ··· 1666 1656 free_q_irqs: 1667 1657 while (vector) { 1668 1658 vector--; 1669 - irq_num = pf->msix_entries[base + vector].vector, 1670 - irq_set_affinity_notifier(irq_num, NULL); 1659 + irq_num = pf->msix_entries[base + vector].vector; 1660 + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 1661 + irq_set_affinity_notifier(irq_num, NULL); 1671 1662 irq_set_affinity_hint(irq_num, NULL); 1672 1663 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 1673 1664 } ··· 2330 2319 2331 2320 dflt_features = NETIF_F_SG | 2332 2321 NETIF_F_HIGHDMA | 2322 + NETIF_F_NTUPLE | 2333 2323 NETIF_F_RXHASH; 2334 2324 2335 2325 csumo_features = NETIF_F_RXCSUM | ··· 2471 2459 } 2472 2460 2473 2461 /** 2462 + * ice_ctrl_vsi_setup - Set up a control VSI 2463 + * @pf: board private structure 2464 + * @pi: pointer to the port_info instance 2465 + * 2466 + * Returns pointer to the successfully allocated VSI software struct 2467 + * on success, otherwise returns NULL on failure. 2468 + */ 2469 + static struct ice_vsi * 2470 + ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 2471 + { 2472 + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); 2473 + } 2474 + 2475 + /** 2474 2476 * ice_lb_vsi_setup - Set up a loopback VSI 2475 2477 * @pf: board private structure 2476 2478 * @pi: pointer to the port_info instance ··· 2622 2596 */ 2623 2597 ice_napi_add(vsi); 2624 2598 2599 + status = ice_set_cpu_rx_rmap(vsi); 2600 + if (status) { 2601 + dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", 2602 + vsi->vsi_num, status); 2603 + status = -EINVAL; 2604 + goto unroll_napi_add; 2605 + } 2625 2606 status = ice_init_mac_fltr(pf); 2626 2607 if (status) 2627 - goto unroll_napi_add; 2608 + goto free_cpu_rx_map; 2628 2609 2629 2610 return status; 2611 + 2612 + free_cpu_rx_map: 2613 + ice_free_cpu_rx_rmap(vsi); 2630 2614 2631 2615 unroll_napi_add: 2632 2616 if (vsi) { ··· 2742 2706 if (func_caps->common_cap.rss_table_size) 2743 2707 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 2744 2708 2709 + clear_bit(ICE_FLAG_FD_ENA, pf->flags); 2710 + if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 2711 + u16 unused; 2712 + 2713 + /* ctrl_vsi_idx will be set to a valid value when flow director 2714 + * is setup by ice_init_fdir 2715 + */ 2716 + pf->ctrl_vsi_idx = ICE_NO_VSI; 2717 + set_bit(ICE_FLAG_FD_ENA, pf->flags); 2718 + /* force guaranteed filter pool for PF */ 2719 + ice_alloc_fd_guar_item(&pf->hw, &unused, 2720 + func_caps->fd_fltr_guar); 2721 + /* force shared filter pool for PF */ 2722 + ice_alloc_fd_shrd_item(&pf->hw, &unused, 2723 + func_caps->fd_fltr_best_effort); 2724 + } 2725 + 2745 2726 pf->max_pf_txqs = func_caps->common_cap.num_txq; 2746 2727 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 2747 2728 } ··· 2825 2772 v_budget += needed; 2826 2773 v_left -= needed; 2827 2774 2775 + /* reserve one vector for flow director */ 2776 + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 2777 + needed = ICE_FDIR_MSIX; 2778 + if (v_left < needed) 2779 + goto no_hw_vecs_left_err; 2780 + v_budget += needed; 2781 + v_left -= needed; 2782 + } 2783 + 2828 2784 pf->msix_entries = devm_kcalloc(dev, v_budget, 2829 2785 sizeof(*pf->msix_entries), GFP_KERNEL); 2830 2786 ··· 2858 2796 if (v_actual < v_budget) { 2859 2797 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 2860 2798 v_budget, v_actual); 2861 - /* 2 vectors for LAN (traffic + OICR) */ 2799 + /* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */ 2862 2800 #define ICE_MIN_LAN_VECS 2 2801 + #define ICE_MIN_RDMA_VECS 2 2802 + #define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1) 2863 2803 2864 2804 if (v_actual < ICE_MIN_LAN_VECS) { 2865 2805 /* error if we can't get minimum vectors */ ··· 3052 2988 *status = ICE_ERR_NOT_SUPPORTED; 3053 2989 } 3054 2990 break; 2991 + case ICE_ERR_FW_DDP_MISMATCH: 2992 + dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 2993 + break; 3055 2994 case ICE_ERR_BUF_TOO_SHORT: 3056 2995 case ICE_ERR_CFG: 3057 2996 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); ··· 3167 3100 strscpy((char *)dv.driver_string, DRV_VERSION, 3168 3101 sizeof(dv.driver_string)); 3169 3102 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 3103 + } 3104 + 3105 + /** 3106 + * ice_init_fdir - Initialize flow director VSI and configuration 3107 + * @pf: pointer to the PF instance 3108 + * 3109 + * returns 0 on success, negative on error 3110 + */ 3111 + static int ice_init_fdir(struct ice_pf *pf) 3112 + { 3113 + struct device *dev = ice_pf_to_dev(pf); 3114 + struct ice_vsi *ctrl_vsi; 3115 + int err; 3116 + 3117 + /* Side Band Flow Director needs to have a control VSI. 3118 + * Allocate it and store it in the PF. 3119 + */ 3120 + ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 3121 + if (!ctrl_vsi) { 3122 + dev_dbg(dev, "could not create control VSI\n"); 3123 + return -ENOMEM; 3124 + } 3125 + 3126 + err = ice_vsi_open_ctrl(ctrl_vsi); 3127 + if (err) { 3128 + dev_dbg(dev, "could not open control VSI\n"); 3129 + goto err_vsi_open; 3130 + } 3131 + 3132 + mutex_init(&pf->hw.fdir_fltr_lock); 3133 + 3134 + err = ice_fdir_create_dflt_rules(pf); 3135 + if (err) 3136 + goto err_fdir_rule; 3137 + 3138 + return 0; 3139 + 3140 + err_fdir_rule: 3141 + ice_fdir_release_flows(&pf->hw); 3142 + ice_vsi_close(ctrl_vsi); 3143 + err_vsi_open: 3144 + ice_vsi_release(ctrl_vsi); 3145 + if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 3146 + pf->vsi[pf->ctrl_vsi_idx] = NULL; 3147 + pf->ctrl_vsi_idx = ICE_NO_VSI; 3148 + } 3149 + return err; 3170 3150 } 3171 3151 3172 3152 /** ··· 3476 3362 3477 3363 /* initialize DDP driven features */ 3478 3364 3365 + /* Note: Flow director init failure is non-fatal to load */ 3366 + if (ice_init_fdir(pf)) 3367 + dev_err(dev, "could not initialize flow director\n"); 3368 + 3479 3369 /* Note: DCB init failure is non-fatal to load */ 3480 3370 if (ice_init_pf_dcb(pf, false)) { 3481 3371 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); ··· 3542 3424 set_bit(__ICE_DOWN, pf->state); 3543 3425 ice_service_task_stop(pf); 3544 3426 3427 + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 3428 + if (!ice_is_safe_mode(pf)) 3429 + ice_remove_arfs(pf); 3545 3430 ice_devlink_destroy_port(pf); 3546 3431 ice_vsi_release_all(pf); 3547 3432 ice_free_irq_msix_misc(pf); ··· 4061 3940 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 4062 3941 ret = ice_cfg_vlan_pruning(vsi, false, false); 4063 3942 3943 + if ((features & NETIF_F_NTUPLE) && 3944 + !(netdev->features & NETIF_F_NTUPLE)) { 3945 + ice_vsi_manage_fdir(vsi, true); 3946 + ice_init_arfs(vsi); 3947 + } else if (!(features & NETIF_F_NTUPLE) && 3948 + (netdev->features & NETIF_F_NTUPLE)) { 3949 + ice_vsi_manage_fdir(vsi, false); 3950 + ice_clear_arfs(vsi); 3951 + } 3952 + 4064 3953 return ret; 4065 3954 } 4066 3955 ··· 4310 4179 { 4311 4180 struct ice_hw_port_stats *prev_ps, *cur_ps; 4312 4181 struct ice_hw *hw = &pf->hw; 4182 + u16 fd_ctr_base; 4313 4183 u8 port; 4314 4184 4315 4185 port = hw->port_info->lport; ··· 4399 4267 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 4400 4268 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 4401 4269 4270 + fd_ctr_base = hw->fd_ctr_base; 4271 + 4272 + ice_stat_update40(hw, 4273 + GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 4274 + pf->stat_prev_loaded, &prev_ps->fd_sb_match, 4275 + &cur_ps->fd_sb_match); 4402 4276 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 4403 4277 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 4404 4278 ··· 4447 4309 4448 4310 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 4449 4311 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 4312 + 4313 + cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 4450 4314 4451 4315 pf->stat_prev_loaded = true; 4452 4316 } ··· 4630 4490 if (err) 4631 4491 break; 4632 4492 } 4493 + 4494 + return err; 4495 + } 4496 + 4497 + /** 4498 + * ice_vsi_open_ctrl - open control VSI for use 4499 + * @vsi: the VSI to open 4500 + * 4501 + * Initialization of the Control VSI 4502 + * 4503 + * Returns 0 on success, negative value on error 4504 + */ 4505 + int ice_vsi_open_ctrl(struct ice_vsi *vsi) 4506 + { 4507 + char int_name[ICE_INT_NAME_STR_LEN]; 4508 + struct ice_pf *pf = vsi->back; 4509 + struct device *dev; 4510 + int err; 4511 + 4512 + dev = ice_pf_to_dev(pf); 4513 + /* allocate descriptors */ 4514 + err = ice_vsi_setup_tx_rings(vsi); 4515 + if (err) 4516 + goto err_setup_tx; 4517 + 4518 + err = ice_vsi_setup_rx_rings(vsi); 4519 + if (err) 4520 + goto err_setup_rx; 4521 + 4522 + err = ice_vsi_cfg(vsi); 4523 + if (err) 4524 + goto err_setup_rx; 4525 + 4526 + snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 4527 + dev_driver_string(dev), dev_name(dev)); 4528 + err = ice_vsi_req_irq_msix(vsi, int_name); 4529 + if (err) 4530 + goto err_setup_rx; 4531 + 4532 + ice_vsi_cfg_msix(vsi); 4533 + 4534 + err = ice_vsi_start_all_rx_rings(vsi); 4535 + if (err) 4536 + goto err_up_complete; 4537 + 4538 + clear_bit(__ICE_DOWN, vsi->state); 4539 + ice_vsi_ena_irq(vsi); 4540 + 4541 + return 0; 4542 + 4543 + err_up_complete: 4544 + ice_down(vsi); 4545 + err_setup_rx: 4546 + ice_vsi_free_rx_rings(vsi); 4547 + err_setup_tx: 4548 + ice_vsi_free_tx_rings(vsi); 4633 4549 4634 4550 return err; 4635 4551 } ··· 4909 4713 goto err_init_ctrlq; 4910 4714 } 4911 4715 4716 + ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 4717 + if (ret) { 4718 + dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); 4719 + goto err_init_ctrlq; 4720 + } 4721 + 4912 4722 err = ice_sched_init_port(hw->port_info); 4913 4723 if (err) 4914 4724 goto err_sched_init_port; ··· 4928 4726 if (err) { 4929 4727 dev_err(dev, "misc vector setup failed: %d\n", err); 4930 4728 goto err_sched_init_port; 4729 + } 4730 + 4731 + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 4732 + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 4733 + if (!rd32(hw, PFQF_FD_SIZE)) { 4734 + u16 unused, guar, b_effort; 4735 + 4736 + guar = hw->func_caps.fd_fltr_guar; 4737 + b_effort = hw->func_caps.fd_fltr_best_effort; 4738 + 4739 + /* force guaranteed filter pool for PF */ 4740 + ice_alloc_fd_guar_item(hw, &unused, guar); 4741 + /* force shared filter pool for PF */ 4742 + ice_alloc_fd_shrd_item(hw, &unused, b_effort); 4743 + } 4931 4744 } 4932 4745 4933 4746 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) ··· 4961 4744 dev_err(dev, "VF VSI rebuild failed: %d\n", err); 4962 4745 goto err_vsi_rebuild; 4963 4746 } 4747 + } 4748 + 4749 + /* If Flow Director is active */ 4750 + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 4751 + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 4752 + if (err) { 4753 + dev_err(dev, "control VSI rebuild failed: %d\n", err); 4754 + goto err_vsi_rebuild; 4755 + } 4756 + 4757 + /* replay HW Flow Director recipes */ 4758 + if (hw->fdir_prof) 4759 + ice_fdir_replay_flows(hw); 4760 + 4761 + /* replay Flow Director filters */ 4762 + ice_fdir_replay_fltrs(pf); 4763 + 4764 + ice_rebuild_arfs(pf); 4964 4765 } 4965 4766 4966 4767 ice_update_pf_netdev_link(pf); ··· 5189 4954 return "ICE_ERR_HW_TABLE"; 5190 4955 case ICE_ERR_DOES_NOT_EXIST: 5191 4956 return "ICE_ERR_DOES_NOT_EXIST"; 4957 + case ICE_ERR_FW_DDP_MISMATCH: 4958 + return "ICE_ERR_FW_DDP_MISMATCH"; 5192 4959 case ICE_ERR_AQ_ERROR: 5193 4960 return "ICE_ERR_AQ_ERROR"; 5194 4961 case ICE_ERR_AQ_TIMEOUT: ··· 5232 4995 if (status) { 5233 4996 dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n", 5234 4997 ice_stat_str(status), 5235 - ice_aq_str(hw->adminq.rq_last_status)); 4998 + ice_aq_str(hw->adminq.sq_last_status)); 5236 4999 return -EIO; 5237 5000 } 5238 5001 } ··· 5243 5006 if (status) { 5244 5007 dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", 5245 5008 ice_stat_str(status), 5246 - ice_aq_str(hw->adminq.rq_last_status)); 5009 + ice_aq_str(hw->adminq.sq_last_status)); 5247 5010 return -EIO; 5248 5011 } 5249 5012 } ··· 5276 5039 if (status) { 5277 5040 dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n", 5278 5041 ice_stat_str(status), 5279 - ice_aq_str(hw->adminq.rq_last_status)); 5042 + ice_aq_str(hw->adminq.sq_last_status)); 5280 5043 return -EIO; 5281 5044 } 5282 5045 } ··· 5287 5050 if (status) { 5288 5051 dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n", 5289 5052 ice_stat_str(status), 5290 - ice_aq_str(hw->adminq.rq_last_status)); 5053 + ice_aq_str(hw->adminq.sq_last_status)); 5291 5054 return -EIO; 5292 5055 } 5293 5056 } ··· 5760 5523 .ndo_bridge_setlink = ice_bridge_setlink, 5761 5524 .ndo_fdb_add = ice_fdb_add, 5762 5525 .ndo_fdb_del = ice_fdb_del, 5526 + #ifdef CONFIG_RFS_ACCEL 5527 + .ndo_rx_flow_steer = ice_rx_flow_steer, 5528 + #endif 5763 5529 .ndo_tx_timeout = ice_tx_timeout, 5764 5530 .ndo_bpf = ice_xdp, 5765 5531 .ndo_xdp_xmit = ice_xdp_xmit,
+2
drivers/net/ethernet/intel/ice/ice_protocol_type.h
··· 12 12 */ 13 13 enum ice_prot_id { 14 14 ICE_PROT_ID_INVAL = 0, 15 + ICE_PROT_MAC_OF_OR_S = 1, 15 16 ICE_PROT_IPV4_OF_OR_S = 32, 16 17 ICE_PROT_IPV4_IL = 33, 17 18 ICE_PROT_IPV6_OF_OR_S = 40, 18 19 ICE_PROT_IPV6_IL = 41, 19 20 ICE_PROT_TCP_IL = 49, 21 + ICE_PROT_UDP_OF = 52, 20 22 ICE_PROT_UDP_IL_OR_S = 53, 21 23 ICE_PROT_GRE_OF = 64, 22 24 ICE_PROT_SCTP_IL = 96,
+2
drivers/net/ethernet/intel/ice/ice_status.h
··· 27 27 ICE_ERR_MAX_LIMIT = -17, 28 28 ICE_ERR_RESET_ONGOING = -18, 29 29 ICE_ERR_HW_TABLE = -19, 30 + ICE_ERR_FW_DDP_MISMATCH = -20, 31 + 30 32 ICE_ERR_NVM_CHECKSUM = -51, 31 33 ICE_ERR_BUF_TOO_SHORT = -52, 32 34 ICE_ERR_NVM_BLANK_MODE = -53,
+75
drivers/net/ethernet/intel/ice/ice_switch.c
··· 2678 2678 } 2679 2679 2680 2680 /** 2681 + * ice_alloc_res_cntr - allocating resource counter 2682 + * @hw: pointer to the hardware structure 2683 + * @type: type of resource 2684 + * @alloc_shared: if set it is shared else dedicated 2685 + * @num_items: number of entries requested for FD resource type 2686 + * @counter_id: counter index returned by AQ call 2687 + */ 2688 + enum ice_status 2689 + ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 2690 + u16 *counter_id) 2691 + { 2692 + struct ice_aqc_alloc_free_res_elem *buf; 2693 + enum ice_status status; 2694 + u16 buf_len; 2695 + 2696 + /* Allocate resource */ 2697 + buf_len = sizeof(*buf); 2698 + buf = kzalloc(buf_len, GFP_KERNEL); 2699 + if (!buf) 2700 + return ICE_ERR_NO_MEMORY; 2701 + 2702 + buf->num_elems = cpu_to_le16(num_items); 2703 + buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 2704 + ICE_AQC_RES_TYPE_M) | alloc_shared); 2705 + 2706 + status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2707 + ice_aqc_opc_alloc_res, NULL); 2708 + if (status) 2709 + goto exit; 2710 + 2711 + *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 2712 + 2713 + exit: 2714 + kfree(buf); 2715 + return status; 2716 + } 2717 + 2718 + /** 2719 + * ice_free_res_cntr - free resource counter 2720 + * @hw: pointer to the hardware structure 2721 + * @type: type of resource 2722 + * @alloc_shared: if set it is shared else dedicated 2723 + * @num_items: number of entries to be freed for FD resource type 2724 + * @counter_id: counter ID resource which needs to be freed 2725 + */ 2726 + enum ice_status 2727 + ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 2728 + u16 counter_id) 2729 + { 2730 + struct ice_aqc_alloc_free_res_elem *buf; 2731 + enum ice_status status; 2732 + u16 buf_len; 2733 + 2734 + /* Free resource */ 2735 + buf_len = sizeof(*buf); 2736 + buf = kzalloc(buf_len, GFP_KERNEL); 2737 + if (!buf) 2738 + return ICE_ERR_NO_MEMORY; 2739 + 2740 + buf->num_elems = cpu_to_le16(num_items); 2741 + buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 2742 + ICE_AQC_RES_TYPE_M) | alloc_shared); 2743 + buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 2744 + 2745 + status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2746 + ice_aqc_opc_free_res, NULL); 2747 + if (status) 2748 + ice_debug(hw, ICE_DBG_SW, 2749 + "counter resource could not be freed\n"); 2750 + 2751 + kfree(buf); 2752 + return status; 2753 + } 2754 + 2755 + /** 2681 2756 * ice_replay_vsi_fltr - Replay filters for requested VSI 2682 2757 * @hw: pointer to the hardware structure 2683 2758 * @vsi_handle: driver VSI handle
+7
drivers/net/ethernet/intel/ice/ice_switch.h
··· 208 208 /* Switch config */ 209 209 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); 210 210 211 + enum ice_status 212 + ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 213 + u16 *counter_id); 214 + enum ice_status 215 + ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 216 + u16 counter_id); 217 + 211 218 /* Switch/bridge related commands */ 212 219 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); 213 220 enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
+179 -3
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 15 15 16 16 #define ICE_RX_HDR_SIZE 256 17 17 18 + #define FDIR_DESC_RXDID 0x40 19 + #define ICE_FDIR_CLEAN_DELAY 10 20 + 21 + /** 22 + * ice_prgm_fdir_fltr - Program a Flow Director filter 23 + * @vsi: VSI to send dummy packet 24 + * @fdir_desc: flow director descriptor 25 + * @raw_packet: allocated buffer for flow director 26 + */ 27 + int 28 + ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 29 + u8 *raw_packet) 30 + { 31 + struct ice_tx_buf *tx_buf, *first; 32 + struct ice_fltr_desc *f_desc; 33 + struct ice_tx_desc *tx_desc; 34 + struct ice_ring *tx_ring; 35 + struct device *dev; 36 + dma_addr_t dma; 37 + u32 td_cmd; 38 + u16 i; 39 + 40 + /* VSI and Tx ring */ 41 + if (!vsi) 42 + return -ENOENT; 43 + tx_ring = vsi->tx_rings[0]; 44 + if (!tx_ring || !tx_ring->desc) 45 + return -ENOENT; 46 + dev = tx_ring->dev; 47 + 48 + /* we are using two descriptors to add/del a filter and we can wait */ 49 + for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 50 + if (!i) 51 + return -EAGAIN; 52 + msleep_interruptible(1); 53 + } 54 + 55 + dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 56 + DMA_TO_DEVICE); 57 + 58 + if (dma_mapping_error(dev, dma)) 59 + return -EINVAL; 60 + 61 + /* grab the next descriptor */ 62 + i = tx_ring->next_to_use; 63 + first = &tx_ring->tx_buf[i]; 64 + f_desc = ICE_TX_FDIRDESC(tx_ring, i); 65 + memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 66 + 67 + i++; 68 + i = (i < tx_ring->count) ? i : 0; 69 + tx_desc = ICE_TX_DESC(tx_ring, i); 70 + tx_buf = &tx_ring->tx_buf[i]; 71 + 72 + i++; 73 + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 74 + 75 + memset(tx_buf, 0, sizeof(*tx_buf)); 76 + dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 77 + dma_unmap_addr_set(tx_buf, dma, dma); 78 + 79 + tx_desc->buf_addr = cpu_to_le64(dma); 80 + td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 81 + ICE_TX_DESC_CMD_RE; 82 + 83 + tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 84 + tx_buf->raw_buf = raw_packet; 85 + 86 + tx_desc->cmd_type_offset_bsz = 87 + ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 88 + 89 + /* Force memory write to complete before letting h/w know 90 + * there are new descriptors to fetch. 91 + */ 92 + wmb(); 93 + 94 + /* mark the data descriptor to be watched */ 95 + first->next_to_watch = tx_desc; 96 + 97 + writel(tx_ring->next_to_use, tx_ring->tail); 98 + 99 + return 0; 100 + } 101 + 18 102 /** 19 103 * ice_unmap_and_free_tx_buf - Release a Tx buffer 20 104 * @ring: the ring that owns the buffer ··· 108 24 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 109 25 { 110 26 if (tx_buf->skb) { 111 - if (ice_ring_is_xdp(ring)) 27 + if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 28 + devm_kfree(ring->dev, tx_buf->raw_buf); 29 + else if (ice_ring_is_xdp(ring)) 112 30 page_frag_free(tx_buf->raw_buf); 113 31 else 114 32 dev_kfree_skb_any(tx_buf->skb); ··· 685 599 struct ice_rx_buf *bi; 686 600 687 601 /* do nothing if no valid netdev defined */ 688 - if (!rx_ring->netdev || !cleaned_count) 602 + if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 603 + !cleaned_count) 689 604 return false; 690 605 691 606 /* get the Rx descriptor and buffer based on next_to_use */ ··· 1084 997 * 1085 998 * Returns amount of work completed 1086 999 */ 1087 - static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 1000 + int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 1088 1001 { 1089 1002 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1090 1003 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); ··· 1126 1039 * DD bit is set. 1127 1040 */ 1128 1041 dma_rmb(); 1042 + 1043 + if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1044 + ice_put_rx_buf(rx_ring, NULL); 1045 + cleaned_count++; 1046 + continue; 1047 + } 1129 1048 1130 1049 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1131 1050 ICE_RX_FLX_DESC_PKT_LEN_M; ··· 2470 2377 return NETDEV_TX_OK; 2471 2378 2472 2379 return ice_xmit_frame_ring(skb, tx_ring); 2380 + } 2381 + 2382 + /** 2383 + * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2384 + * @tx_ring: tx_ring to clean 2385 + */ 2386 + void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) 2387 + { 2388 + struct ice_vsi *vsi = tx_ring->vsi; 2389 + s16 i = tx_ring->next_to_clean; 2390 + int budget = ICE_DFLT_IRQ_WORK; 2391 + struct ice_tx_desc *tx_desc; 2392 + struct ice_tx_buf *tx_buf; 2393 + 2394 + tx_buf = &tx_ring->tx_buf[i]; 2395 + tx_desc = ICE_TX_DESC(tx_ring, i); 2396 + i -= tx_ring->count; 2397 + 2398 + do { 2399 + struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2400 + 2401 + /* if next_to_watch is not set then there is no pending work */ 2402 + if (!eop_desc) 2403 + break; 2404 + 2405 + /* prevent any other reads prior to eop_desc */ 2406 + smp_rmb(); 2407 + 2408 + /* if the descriptor isn't done, no work to do */ 2409 + if (!(eop_desc->cmd_type_offset_bsz & 2410 + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2411 + break; 2412 + 2413 + /* clear next_to_watch to prevent false hangs */ 2414 + tx_buf->next_to_watch = NULL; 2415 + tx_desc->buf_addr = 0; 2416 + tx_desc->cmd_type_offset_bsz = 0; 2417 + 2418 + /* move past filter desc */ 2419 + tx_buf++; 2420 + tx_desc++; 2421 + i++; 2422 + if (unlikely(!i)) { 2423 + i -= tx_ring->count; 2424 + tx_buf = tx_ring->tx_buf; 2425 + tx_desc = ICE_TX_DESC(tx_ring, 0); 2426 + } 2427 + 2428 + /* unmap the data header */ 2429 + if (dma_unmap_len(tx_buf, len)) 2430 + dma_unmap_single(tx_ring->dev, 2431 + dma_unmap_addr(tx_buf, dma), 2432 + dma_unmap_len(tx_buf, len), 2433 + DMA_TO_DEVICE); 2434 + if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2435 + devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2436 + 2437 + /* clear next_to_watch to prevent false hangs */ 2438 + tx_buf->raw_buf = NULL; 2439 + tx_buf->tx_flags = 0; 2440 + tx_buf->next_to_watch = NULL; 2441 + dma_unmap_len_set(tx_buf, len, 0); 2442 + tx_desc->buf_addr = 0; 2443 + tx_desc->cmd_type_offset_bsz = 0; 2444 + 2445 + /* move past eop_desc for start of next FD desc */ 2446 + tx_buf++; 2447 + tx_desc++; 2448 + i++; 2449 + if (unlikely(!i)) { 2450 + i -= tx_ring->count; 2451 + tx_buf = tx_ring->tx_buf; 2452 + tx_desc = ICE_TX_DESC(tx_ring, 0); 2453 + } 2454 + 2455 + budget--; 2456 + } while (likely(budget)); 2457 + 2458 + i += tx_ring->count; 2459 + tx_ring->next_to_clean = i; 2460 + 2461 + /* re-enable interrupt if needed */ 2462 + ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2473 2463 }
+9 -1
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 113 113 #define ICE_TX_FLAGS_TSO BIT(0) 114 114 #define ICE_TX_FLAGS_HW_VLAN BIT(1) 115 115 #define ICE_TX_FLAGS_SW_VLAN BIT(2) 116 + /* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be 117 + * freed instead of returned like skb packets. 118 + */ 119 + #define ICE_TX_FLAGS_DUMMY_PKT BIT(3) 116 120 #define ICE_TX_FLAGS_IPV4 BIT(5) 117 121 #define ICE_TX_FLAGS_IPV6 BIT(6) 118 122 #define ICE_TX_FLAGS_TUNNEL BIT(7) ··· 378 374 void ice_free_tx_ring(struct ice_ring *tx_ring); 379 375 void ice_free_rx_ring(struct ice_ring *rx_ring); 380 376 int ice_napi_poll(struct napi_struct *napi, int budget); 381 - 377 + int 378 + ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 379 + u8 *raw_packet); 380 + int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget); 381 + void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring); 382 382 #endif /* _ICE_TXRX_H_ */
+58 -1
drivers/net/ethernet/intel/ice/ice_type.h
··· 118 118 119 119 enum ice_vsi_type { 120 120 ICE_VSI_PF = 0, 121 - ICE_VSI_VF, 121 + ICE_VSI_VF = 1, 122 + ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ 122 123 ICE_VSI_LB = 6, 123 124 }; 124 125 ··· 162 161 u8 get_link_info; 163 162 }; 164 163 164 + /* protocol enumeration for filters */ 165 + enum ice_fltr_ptype { 166 + /* NONE - used for undef/error */ 167 + ICE_FLTR_PTYPE_NONF_NONE = 0, 168 + ICE_FLTR_PTYPE_NONF_IPV4_UDP, 169 + ICE_FLTR_PTYPE_NONF_IPV4_TCP, 170 + ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 171 + ICE_FLTR_PTYPE_NONF_IPV4_OTHER, 172 + ICE_FLTR_PTYPE_FRAG_IPV4, 173 + ICE_FLTR_PTYPE_NONF_IPV6_UDP, 174 + ICE_FLTR_PTYPE_NONF_IPV6_TCP, 175 + ICE_FLTR_PTYPE_NONF_IPV6_SCTP, 176 + ICE_FLTR_PTYPE_NONF_IPV6_OTHER, 177 + ICE_FLTR_PTYPE_MAX, 178 + }; 179 + 180 + enum ice_fd_hw_seg { 181 + ICE_FD_HW_SEG_NON_TUN = 0, 182 + ICE_FD_HW_SEG_TUN, 183 + ICE_FD_HW_SEG_MAX, 184 + }; 185 + 186 + /* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */ 187 + #define ICE_MAX_FDIR_VSI_PER_FILTER 2 188 + 189 + struct ice_fd_hw_prof { 190 + struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX]; 191 + int cnt; 192 + u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX]; 193 + u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER]; 194 + }; 195 + 165 196 /* Common HW capabilities for SW use */ 166 197 struct ice_hw_common_caps { 167 198 u32 valid_functions; ··· 230 197 u32 num_allocd_vfs; /* Number of allocated VFs */ 231 198 u32 vf_base_id; /* Logical ID of the first VF */ 232 199 u32 guar_num_vsi; 200 + u32 fd_fltr_guar; /* Number of filters guaranteed */ 201 + u32 fd_fltr_best_effort; /* Number of best effort filters */ 233 202 }; 234 203 235 204 /* Device wide capabilities */ ··· 239 204 struct ice_hw_common_caps common_cap; 240 205 u32 num_vfs_exposed; /* Total number of VFs exposed */ 241 206 u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ 207 + u32 num_flow_director_fltr; /* Number of FD filters available */ 242 208 u32 num_funcs; 243 209 }; 244 210 ··· 525 489 u64 debug_mask; /* bitmap for debug mask */ 526 490 enum ice_mac_type mac_type; 527 491 492 + u16 fd_ctr_base; /* FD counter base index */ 493 + 528 494 /* pci info */ 529 495 u16 device_id; 530 496 u16 vendor_id; ··· 597 559 598 560 /* Active package version (currently active) */ 599 561 struct ice_pkg_ver active_pkg_ver; 562 + u32 active_track_id; 600 563 u8 active_pkg_name[ICE_PKG_NAME_SIZE]; 601 564 u8 active_pkg_in_nvm; 602 565 ··· 626 587 struct ice_blk_info blk[ICE_BLK_COUNT]; 627 588 struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */ 628 589 struct list_head fl_profs[ICE_BLK_COUNT]; 590 + 591 + /* Flow Director filter info */ 592 + int fdir_active_fltr; 593 + 594 + struct mutex fdir_fltr_lock; /* protect Flow Director */ 595 + struct list_head fdir_list_head; 596 + 597 + /* Book-keeping of side-band filter count per flow-type. 598 + * This is used to detect and handle input set changes for 599 + * respective flow-type. 600 + */ 601 + u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX]; 602 + 603 + struct ice_fd_hw_prof **fdir_prof; 604 + DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX); 629 605 struct mutex rss_locks; /* protect RSS configuration */ 630 606 struct list_head rss_list_head; 631 607 }; ··· 702 648 u64 tx_size_1023; /* ptc1023 */ 703 649 u64 tx_size_1522; /* ptc1522 */ 704 650 u64 tx_size_big; /* ptc9522 */ 651 + /* flow director stats */ 652 + u32 fd_sb_status; 653 + u64 fd_sb_match; 705 654 }; 706 655 707 656 /* Checksum and Shadow RAM pointers */
+112 -8
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
··· 80 80 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) 81 81 { 82 82 struct ice_hw *hw = &pf->hw; 83 - int i; 83 + unsigned int i; 84 84 85 85 ice_for_each_vf(pf, i) { 86 86 struct ice_vf *vf = &pf->vf[i]; ··· 325 325 { 326 326 struct device *dev = ice_pf_to_dev(pf); 327 327 struct ice_hw *hw = &pf->hw; 328 - int tmp, i; 328 + unsigned int tmp, i; 329 329 330 330 if (!pf->vf) 331 331 return; ··· 2317 2317 } 2318 2318 2319 2319 /** 2320 + * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL 2321 + * @vsi: VSI of the VF to configure 2322 + * @q_idx: VF queue index used to determine the queue in the PF's space 2323 + */ 2324 + static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) 2325 + { 2326 + struct ice_hw *hw = &vsi->back->hw; 2327 + u32 pfq = vsi->txq_map[q_idx]; 2328 + u32 reg; 2329 + 2330 + reg = rd32(hw, QINT_TQCTL(pfq)); 2331 + 2332 + /* MSI-X index 0 in the VF's space is always for the OICR, which means 2333 + * this is most likely a poll mode VF driver, so don't enable an 2334 + * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 2335 + */ 2336 + if (!(reg & QINT_TQCTL_MSIX_INDX_M)) 2337 + return; 2338 + 2339 + wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M); 2340 + } 2341 + 2342 + /** 2343 + * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL 2344 + * @vsi: VSI of the VF to configure 2345 + * @q_idx: VF queue index used to determine the queue in the PF's space 2346 + */ 2347 + static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) 2348 + { 2349 + struct ice_hw *hw = &vsi->back->hw; 2350 + u32 pfq = vsi->rxq_map[q_idx]; 2351 + u32 reg; 2352 + 2353 + reg = rd32(hw, QINT_RQCTL(pfq)); 2354 + 2355 + /* MSI-X index 0 in the VF's space is always for the OICR, which means 2356 + * this is most likely a poll mode VF driver, so don't enable an 2357 + * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 2358 + */ 2359 + if (!(reg & QINT_RQCTL_MSIX_INDX_M)) 2360 + return; 2361 + 2362 + wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M); 2363 + } 2364 + 2365 + /** 2320 2366 * ice_vc_ena_qs_msg 2321 2367 * @vf: pointer to the VF info 2322 2368 * @msg: pointer to the msg buffer ··· 2422 2376 goto error_param; 2423 2377 } 2424 2378 2379 + ice_vf_ena_rxq_interrupt(vsi, vf_q_id); 2425 2380 set_bit(vf_q_id, vf->rxq_ena); 2426 2381 } 2427 2382 ··· 2438 2391 if (test_bit(vf_q_id, vf->txq_ena)) 2439 2392 continue; 2440 2393 2394 + ice_vf_ena_txq_interrupt(vsi, vf_q_id); 2441 2395 set_bit(vf_q_id, vf->txq_ena); 2442 2396 } 2443 2397 ··· 3641 3593 } 3642 3594 3643 3595 /** 3596 + * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch 3597 + * @pf: PF used to reference the switch's rules 3598 + * @umac: unicast MAC to compare against existing switch rules 3599 + * 3600 + * Return true on the first/any match, else return false 3601 + */ 3602 + static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) 3603 + { 3604 + struct ice_sw_recipe *mac_recipe_list = 3605 + &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; 3606 + struct ice_fltr_mgmt_list_entry *list_itr; 3607 + struct list_head *rule_head; 3608 + struct mutex *rule_lock; /* protect MAC filter list access */ 3609 + 3610 + rule_head = &mac_recipe_list->filt_rules; 3611 + rule_lock = &mac_recipe_list->filt_rule_lock; 3612 + 3613 + mutex_lock(rule_lock); 3614 + list_for_each_entry(list_itr, rule_head, list_entry) { 3615 + u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 3616 + 3617 + if (ether_addr_equal(existing_mac, umac)) { 3618 + mutex_unlock(rule_lock); 3619 + return true; 3620 + } 3621 + } 3622 + 3623 + mutex_unlock(rule_lock); 3624 + 3625 + return false; 3626 + } 3627 + 3628 + /** 3644 3629 * ice_set_vf_mac 3645 3630 * @netdev: network interface device structure 3646 3631 * @vf_id: VF identifier ··· 3696 3615 } 3697 3616 3698 3617 vf = &pf->vf[vf_id]; 3618 + /* nothing left to do, unicast MAC already set */ 3619 + if (ether_addr_equal(vf->dflt_lan_addr.addr, mac)) 3620 + return 0; 3621 + 3699 3622 ret = ice_check_vf_ready_for_cfg(vf); 3700 3623 if (ret) 3701 3624 return ret; 3625 + 3626 + if (ice_unicast_mac_exists(pf, mac)) { 3627 + netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", 3628 + mac, vf_id, mac); 3629 + return -EINVAL; 3630 + } 3702 3631 3703 3632 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset 3704 3633 * flow will use the updated dflt_lan_addr and add a MAC filter ··· 3848 3757 } 3849 3758 3850 3759 /** 3760 + * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 3761 + * @vf: pointer to the VF structure 3762 + */ 3763 + void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 3764 + { 3765 + struct ice_pf *pf = vf->pf; 3766 + struct device *dev; 3767 + 3768 + dev = ice_pf_to_dev(pf); 3769 + 3770 + dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 3771 + vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 3772 + vf->dflt_lan_addr.addr, 3773 + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 3774 + ? "on" : "off"); 3775 + } 3776 + 3777 + /** 3851 3778 * ice_print_vfs_mdd_event - print VFs malicious driver detect event 3852 3779 * @pf: pointer to the PF structure 3853 3780 * ··· 3894 3785 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 3895 3786 vf->mdd_rx_events.last_printed = 3896 3787 vf->mdd_rx_events.count; 3897 - 3898 - dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 3899 - vf->mdd_rx_events.count, hw->pf_id, i, 3900 - vf->dflt_lan_addr.addr, 3901 - test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 3902 - ? "on" : "off"); 3788 + ice_print_vf_rx_mdd_event(vf); 3903 3789 } 3904 3790 3905 3791 /* only print Tx MDD event message if there are new events */
+2
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
··· 132 132 void 133 133 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); 134 134 void ice_print_vfs_mdd_events(struct ice_pf *pf); 135 + void ice_print_vf_rx_mdd_event(struct ice_vf *vf); 135 136 #else /* CONFIG_PCI_IOV */ 136 137 #define ice_process_vflr_event(pf) do {} while (0) 137 138 #define ice_free_vfs(pf) do {} while (0) ··· 142 141 #define ice_set_vf_state_qs_dis(vf) do {} while (0) 143 142 #define ice_vf_lan_overflow_event(pf, event) do {} while (0) 144 143 #define ice_print_vfs_mdd_events(pf) do {} while (0) 144 + #define ice_print_vf_rx_mdd_event(vf) do {} while (0) 145 145 146 146 static inline bool 147 147 ice_reset_all_vfs(struct ice_pf __always_unused *pf,
+5
include/linux/avf/virtchnl.h
··· 476 476 u16 vsi_id; 477 477 u16 key_len; 478 478 u8 key[1]; /* RSS hash key, packed bytes */ 479 + u8 pad[1]; 479 480 }; 480 481 481 482 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); ··· 485 484 u16 vsi_id; 486 485 u16 lut_entries; 487 486 u8 lut[1]; /* RSS lookup table */ 487 + u8 pad[1]; 488 488 }; 489 489 490 490 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); ··· 574 572 enum virtchnl_action action; 575 573 u32 action_meta; 576 574 u8 field_flags; 575 + u8 pad[3]; 577 576 }; 578 577 579 578 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); ··· 613 610 /* link_speed provided in Mbps */ 614 611 u32 link_speed; 615 612 u8 link_status; 613 + u8 pad[3]; 616 614 } link_event_adv; 617 615 } event_data; 618 616 ··· 639 635 u16 ceq_idx; 640 636 u16 aeq_idx; 641 637 u8 itr_idx; 638 + u8 pad[3]; 642 639 }; 643 640 644 641 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);