Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2025-11-06 (i40, ice, iavf)

Mohammad Heib introduces a new devlink parameter, max_mac_per_vf, for
controlling the maximum number of MAC address filters allowed by a VF. This
allows administrators to control the VF behavior in a more nuanced manner.

Aleksandr and Przemek add support for Receive Side Scaling of GTP to iAVF
for VFs running on E800 series ice hardware. This improves performance and
scalability for virtualized network functions in 5G and LTE deployments.

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
iavf: add RSS support for GTP protocol via ethtool
ice: Extend PTYPE bitmap coverage for GTP encapsulated flows
ice: improve TCAM priority handling for RSS profiles
ice: implement GTP RSS context tracking and configuration
ice: add virtchnl definitions and static data for GTP RSS
ice: add flow parsing for GTP and new protocol field support
i40e: support generic devlink param "max_mac_per_vf"
devlink: Add new "max_mac_per_vf" generic device param
====================

Link: https://patch.msgid.link/20251106225321.1609605-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2124 -137
+34
Documentation/networking/devlink/i40e.rst
··· 7 7 This document describes the devlink features implemented by the ``i40e`` 8 8 device driver. 9 9 10 + Parameters 11 + ========== 12 + 13 + .. list-table:: Generic parameters implemented 14 + :widths: 5 5 90 15 + 16 + * - Name 17 + - Mode 18 + - Notes 19 + * - ``max_mac_per_vf`` 20 + - runtime 21 + - Controls the maximum number of MAC addresses a VF can use 22 + on i40e devices. 23 + 24 + By default (``0``), the driver enforces its internally calculated per-VF 25 + MAC filter limit, which is based on the number of allocated VFS. 26 + 27 + If set to a non-zero value, this parameter acts as a strict cap: 28 + the driver will use the user-provided value instead of its internal 29 + calculation. 30 + 31 + **Important notes:** 32 + 33 + - This value **must be set before enabling SR-IOV**. 34 + Attempting to change it while SR-IOV is enabled will return an error. 35 + - MAC filters are a **shared hardware resource** across all VFs. 36 + Setting a high value may cause other VFs to be starved of filters. 37 + - This value is a **Administrative policy**. The hardware may return 38 + errors when its absolute limit is reached, regardless of the value 39 + set here. 40 + 41 + The default value is ``0`` (internal calculation is used). 42 + 43 + 10 44 Info versions 11 45 ============= 12 46
+4
drivers/net/ethernet/intel/i40e/i40e.h
··· 574 574 struct i40e_vf *vf; 575 575 int num_alloc_vfs; /* actual number of VFs allocated */ 576 576 u32 vf_aq_requests; 577 + /* If set to non-zero, the device uses this value 578 + * as maximum number of MAC filters per VF. 579 + */ 580 + u32 max_mac_per_vf; 577 581 u32 arq_overflows; /* Not fatal, possibly indicative of problems */ 578 582 struct ratelimit_state mdd_message_rate_limit; 579 583 /* DCBx/DCBNL capability for PF that indicates
+52 -2
drivers/net/ethernet/intel/i40e/i40e_devlink.c
··· 5 5 #include "i40e.h" 6 6 #include "i40e_devlink.h" 7 7 8 + static int i40e_max_mac_per_vf_set(struct devlink *devlink, 9 + u32 id, 10 + struct devlink_param_gset_ctx *ctx, 11 + struct netlink_ext_ack *extack) 12 + { 13 + struct i40e_pf *pf = devlink_priv(devlink); 14 + 15 + if (pf->num_alloc_vfs > 0) { 16 + NL_SET_ERR_MSG_MOD(extack, 17 + "Cannot change max_mac_per_vf while SR-IOV is enabled"); 18 + return -EBUSY; 19 + } 20 + 21 + pf->max_mac_per_vf = ctx->val.vu32; 22 + return 0; 23 + } 24 + 25 + static int i40e_max_mac_per_vf_get(struct devlink *devlink, 26 + u32 id, 27 + struct devlink_param_gset_ctx *ctx) 28 + { 29 + struct i40e_pf *pf = devlink_priv(devlink); 30 + 31 + ctx->val.vu32 = pf->max_mac_per_vf; 32 + return 0; 33 + } 34 + 35 + static const struct devlink_param i40e_dl_params[] = { 36 + DEVLINK_PARAM_GENERIC(MAX_MAC_PER_VF, 37 + BIT(DEVLINK_PARAM_CMODE_RUNTIME), 38 + i40e_max_mac_per_vf_get, 39 + i40e_max_mac_per_vf_set, 40 + NULL), 41 + }; 42 + 8 43 static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len) 9 44 { 10 45 u8 dsn[8]; ··· 200 165 **/ 201 166 void i40e_devlink_register(struct i40e_pf *pf) 202 167 { 203 - devlink_register(priv_to_devlink(pf)); 168 + struct devlink *dl = priv_to_devlink(pf); 169 + struct device *dev = &pf->pdev->dev; 170 + int err; 171 + 172 + err = devlink_params_register(dl, i40e_dl_params, 173 + ARRAY_SIZE(i40e_dl_params)); 174 + if (err) 175 + dev_err(dev, 176 + "devlink params register failed with error %d", err); 177 + 178 + devlink_register(dl); 179 + 204 180 } 205 181 206 182 /** ··· 222 176 **/ 223 177 void i40e_devlink_unregister(struct i40e_pf *pf) 224 178 { 225 - devlink_unregister(priv_to_devlink(pf)); 179 + struct devlink *dl = priv_to_devlink(pf); 180 + 181 + devlink_unregister(dl); 182 + devlink_params_unregister(dl, i40e_dl_params, 183 + ARRAY_SIZE(i40e_dl_params)); 226 184 } 227 185 228 186 /**
+23 -8
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 2935 2935 if (!f) 2936 2936 ++mac_add_cnt; 2937 2937 } 2938 - 2939 - /* If this VF is not privileged, then we can't add more than a limited 2940 - * number of addresses. 2938 + /* Determine the maximum number of MAC addresses this VF may use. 2941 2939 * 2942 - * If this VF is trusted, it can use more resources than untrusted. 2943 - * However to ensure that every trusted VF has appropriate number of 2944 - * resources, divide whole pool of resources per port and then across 2945 - * all VFs. 2940 + * - For untrusted VFs: use a fixed small limit. 2941 + * 2942 + * - For trusted VFs: limit is calculated by dividing total MAC 2943 + * filter pool across all VFs/ports. 2944 + * 2945 + * - User can override this by devlink param "max_mac_per_vf". 2946 + * If set its value is used as a strict cap for both trusted and 2947 + * untrusted VFs. 2948 + * Note: 2949 + * even when overridden, this is a theoretical maximum; hardware 2950 + * may reject additional MACs if the absolute HW limit is reached. 2946 2951 */ 2947 2952 if (!vf_trusted) 2948 2953 mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; 2949 2954 else 2950 2955 mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); 2956 + 2957 + if (pf->max_mac_per_vf > 0) 2958 + mac_add_max = pf->max_mac_per_vf; 2951 2959 2952 2960 /* VF can replace all its filters in one step, in this case mac_add_max 2953 2961 * will be added as active and another mac_add_max will be in ··· 2963 2955 */ 2964 2956 if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || 2965 2957 (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { 2958 + if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) { 2959 + dev_err(&pf->pdev->dev, 2960 + "Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n", 2961 + mac_add_max); 2962 + return -EPERM; 2963 + } 2966 2964 if (!vf_trusted) { 2967 2965 dev_err(&pf->pdev->dev, 2968 2966 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2969 2967 return -EPERM; 2970 2968 } else { 2971 2969 dev_err(&pf->pdev->dev, 2972 - "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); 2970 + "Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n", 2971 + mac_add_max); 2973 2972 return -EPERM; 2974 2973 } 2975 2974 }
+96 -23
drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
··· 91 91 } 92 92 93 93 /** 94 + * iavf_fill_adv_rss_gtp_hdr - Fill GTP-related RSS protocol headers 95 + * @proto_hdrs: pointer to the virtchnl protocol headers structure to populate 96 + * @packet_hdrs: bitmask of packet header types to configure 97 + * @hash_flds: RSS hash field configuration 98 + * 99 + * This function populates the virtchnl protocol header structure with 100 + * appropriate GTP-related header types based on the specified packet_hdrs. 101 + * It supports GTPC, GTPU with extension headers, and uplink/downlink PDU 102 + * types. For certain GTPU types, it also appends an IPv4 header to enable 103 + * hashing on the destination IP address. 104 + * 105 + * Return: 0 on success or -EOPNOTSUPP if the packet_hdrs value is unsupported. 106 + */ 107 + static int 108 + iavf_fill_adv_rss_gtp_hdr(struct virtchnl_proto_hdrs *proto_hdrs, 109 + u32 packet_hdrs, u64 hash_flds) 110 + { 111 + struct virtchnl_proto_hdr *hdr; 112 + 113 + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; 114 + 115 + switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) { 116 + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID: 117 + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC: 118 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPC); 119 + break; 120 + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH: 121 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); 122 + break; 123 + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP: 124 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP); 125 + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 126 + iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA); 127 + break; 128 + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN: 129 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN); 130 + fallthrough; 131 + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP: 132 + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 133 + iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA); 134 + break; 135 + default: 136 + return -EOPNOTSUPP; 137 + } 138 + 139 + return 0; 140 + } 141 + 142 + /** 94 143 * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message 95 144 * @rss_cfg: the virtchnl message to be filled with RSS configuration setting 96 145 * @packet_hdrs: the RSS configuration protocol header types ··· 152 103 iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, 153 104 u32 packet_hdrs, u64 hash_flds, bool symm) 154 105 { 106 + const u32 packet_l3_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3; 107 + const u32 packet_l4_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4; 155 108 struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; 156 109 struct virtchnl_proto_hdr *hdr; 157 110 ··· 164 113 165 114 proto_hdrs->tunnel_level = 0; /* always outer layer */ 166 115 167 - hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 168 - switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) { 169 - case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: 170 - iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); 171 - break; 172 - case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: 173 - iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); 174 - break; 175 - default: 176 - return -EINVAL; 116 + if (packet_l3_hdrs) { 117 + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 118 + switch (packet_l3_hdrs) { 119 + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: 120 + iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); 121 + break; 122 + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: 123 + iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); 124 + break; 125 + default: 126 + return -EINVAL; 127 + } 177 128 } 178 129 179 - hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 180 - switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) { 181 - case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: 182 - iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); 183 - break; 184 - case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: 185 - iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); 186 - break; 187 - case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: 188 - iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); 189 - break; 190 - default: 191 - return -EINVAL; 130 + if (packet_l4_hdrs) { 131 + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 132 + switch (packet_l4_hdrs) { 133 + case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: 134 + iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); 135 + break; 136 + case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: 137 + iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); 138 + break; 139 + case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: 140 + iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); 141 + break; 142 + default: 143 + return -EINVAL; 144 + } 145 + } 146 + 147 + if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) { 148 + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 149 + if (iavf_fill_adv_rss_gtp_hdr(proto_hdrs, packet_hdrs, hash_flds)) 150 + return -EINVAL; 192 151 } 193 152 194 153 return 0; ··· 247 186 proto = "UDP"; 248 187 else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) 249 188 proto = "SCTP"; 189 + else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) 190 + proto = "GTP"; 250 191 else 251 192 return; 252 193 ··· 274 211 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | 275 212 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) 276 213 strcat(hash_opt, "dst port,"); 214 + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPC_TEID) 215 + strcat(hash_opt, "gtp-c,"); 216 + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID) 217 + strcat(hash_opt, "gtp-u ip,"); 218 + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID) 219 + strcat(hash_opt, "gtp-u ext,"); 220 + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID) 221 + strcat(hash_opt, "gtp-u ul,"); 222 + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID) 223 + strcat(hash_opt, "gtp-u dl,"); 277 224 278 225 if (!action) 279 226 action = "";
+31
drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
··· 22 22 IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004, 23 23 IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008, 24 24 IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010, 25 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC = 0x00000400, 26 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID = 0x00000800, 27 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP = 0x00001000, 28 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH = 0x00002000, 29 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN = 0x00004000, 30 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP = 0x00008000, 25 31 }; 26 32 27 33 #define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \ ··· 38 32 (IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | \ 39 33 IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \ 40 34 IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) 35 + 36 + #define IAVF_ADV_RSS_FLOW_SEG_HDR_GTP \ 37 + (IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | \ 38 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | \ 39 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | \ 40 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | \ 41 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | \ 42 + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP) 41 43 42 44 enum iavf_adv_rss_flow_field { 43 45 /* L3 */ ··· 60 46 IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT, 61 47 IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT, 62 48 IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT, 49 + /* GTPC_TEID */ 50 + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID, 51 + /* GTPU_IP */ 52 + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID, 53 + /* GTPU_EH */ 54 + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID, 55 + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_QFI, 56 + /* GTPU_UP */ 57 + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID, 58 + /* GTPU_DWN */ 59 + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID, 63 60 64 61 /* The total number of enums must not exceed 64 */ 65 62 IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX ··· 97 72 BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT) 98 73 #define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \ 99 74 BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT) 75 + #define IAVF_ADV_RSS_HASH_FLD_GTPC_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID) 76 + #define IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID) 77 + #define IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID) 78 + #define IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID) 79 + #define IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID \ 80 + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID) 100 81 101 82 /* bookkeeping of advanced RSS configuration */ 102 83 struct iavf_adv_rss {
+89
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
··· 1336 1336 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | 1337 1337 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1338 1338 break; 1339 + case GTPU_V4_FLOW: 1340 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | 1341 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1342 + break; 1343 + case GTPC_V4_FLOW: 1344 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | 1345 + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1346 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1347 + break; 1348 + case GTPC_TEID_V4_FLOW: 1349 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | 1350 + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1351 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1352 + break; 1353 + case GTPU_EH_V4_FLOW: 1354 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | 1355 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1356 + break; 1357 + case GTPU_UL_V4_FLOW: 1358 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | 1359 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1360 + break; 1361 + case GTPU_DL_V4_FLOW: 1362 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | 1363 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1364 + break; 1365 + case GTPU_V6_FLOW: 1366 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | 1367 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1368 + break; 1369 + case GTPC_V6_FLOW: 1370 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | 1371 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1372 + break; 1373 + case GTPC_TEID_V6_FLOW: 1374 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | 1375 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1376 + break; 1377 + case GTPU_EH_V6_FLOW: 1378 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | 1379 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1380 + break; 1381 + case GTPU_UL_V6_FLOW: 1382 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | 1383 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1384 + break; 1385 + case GTPU_DL_V6_FLOW: 1386 + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | 1387 + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1388 + break; 1339 1389 default: 1340 1390 break; 1341 1391 } ··· 1403 1353 case TCP_V4_FLOW: 1404 1354 case UDP_V4_FLOW: 1405 1355 case SCTP_V4_FLOW: 1356 + case GTPU_V4_FLOW: 1357 + case GTPC_V4_FLOW: 1358 + case GTPC_TEID_V4_FLOW: 1359 + case GTPU_EH_V4_FLOW: 1360 + case GTPU_UL_V4_FLOW: 1361 + case GTPU_DL_V4_FLOW: 1406 1362 if (cmd->data & RXH_IP_SRC) 1407 1363 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; 1408 1364 if (cmd->data & RXH_IP_DST) ··· 1417 1361 case TCP_V6_FLOW: 1418 1362 case UDP_V6_FLOW: 1419 1363 case SCTP_V6_FLOW: 1364 + case GTPU_V6_FLOW: 1365 + case GTPC_V6_FLOW: 1366 + case GTPC_TEID_V6_FLOW: 1367 + case GTPU_EH_V6_FLOW: 1368 + case GTPU_UL_V6_FLOW: 1369 + case GTPU_DL_V6_FLOW: 1420 1370 if (cmd->data & RXH_IP_SRC) 1421 1371 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; 1422 1372 if (cmd->data & RXH_IP_DST) ··· 1444 1382 break; 1445 1383 case UDP_V4_FLOW: 1446 1384 case UDP_V6_FLOW: 1385 + case GTPC_V4_FLOW: 1447 1386 if (cmd->data & RXH_L4_B_0_1) 1448 1387 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; 1449 1388 if (cmd->data & RXH_L4_B_2_3) ··· 1456 1393 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT; 1457 1394 if (cmd->data & RXH_L4_B_2_3) 1458 1395 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT; 1396 + break; 1397 + default: 1398 + break; 1399 + } 1400 + } 1401 + if (cmd->data & RXH_GTP_TEID) { 1402 + switch (cmd->flow_type) { 1403 + case GTPC_TEID_V4_FLOW: 1404 + case GTPC_TEID_V6_FLOW: 1405 + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPC_TEID; 1406 + break; 1407 + case GTPU_V4_FLOW: 1408 + case GTPU_V6_FLOW: 1409 + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID; 1410 + break; 1411 + case GTPU_EH_V4_FLOW: 1412 + case GTPU_EH_V6_FLOW: 1413 + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID; 1414 + break; 1415 + case GTPU_UL_V4_FLOW: 1416 + case GTPU_UL_V6_FLOW: 1417 + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID; 1418 + break; 1419 + case GTPU_DL_V4_FLOW: 1420 + case GTPU_DL_V6_FLOW: 1421 + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID; 1459 1422 break; 1460 1423 default: 1461 1424 break;
+77 -14
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
··· 3578 3578 } 3579 3579 3580 3580 /** 3581 + * ice_set_tcam_flags - set TCAM flag don't care mask 3582 + * @mask: mask for flags 3583 + * @dc_mask: pointer to the don't care mask 3584 + */ 3585 + static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ]) 3586 + { 3587 + u16 inverted_mask = ~mask; 3588 + 3589 + /* flags are lowest u16 */ 3590 + put_unaligned_le16(inverted_mask, dc_mask); 3591 + } 3592 + 3593 + /** 3581 3594 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list 3582 3595 * @hw: pointer to the HW struct 3583 3596 * @idx: the index of the TCAM entry to remove ··· 3660 3647 if (!p) 3661 3648 return -ENOMEM; 3662 3649 3650 + /* set don't care masks for TCAM flags */ 3651 + ice_set_tcam_flags(tcam->attr.mask, dc_msk); 3652 + 3663 3653 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, 3664 3654 tcam->ptg, vsig, 0, tcam->attr.flags, 3665 3655 vl_msk, dc_msk, nm_msk); ··· 3689 3673 } 3690 3674 3691 3675 /** 3676 + * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use 3677 + * @ptg_attr: pointer to the PTG and attribute pair to check 3678 + * @ptgs_used: bitmap that denotes which PTGs are in use 3679 + * @attr_used: array of PTG and attributes pairs already used 3680 + * @attr_cnt: count of entries in the attr_used array 3681 + * 3682 + * Return: true if the PTG and attribute pair is in use, false otherwise. 3683 + */ 3684 + static bool 3685 + ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used, 3686 + struct ice_tcam_inf *attr_used[], u16 attr_cnt) 3687 + { 3688 + u16 i; 3689 + 3690 + if (!test_bit(ptg_attr->ptg, ptgs_used)) 3691 + return false; 3692 + 3693 + /* the PTG is used, so now look for correct attributes */ 3694 + for (i = 0; i < attr_cnt; i++) 3695 + if (attr_used[i]->ptg == ptg_attr->ptg && 3696 + attr_used[i]->attr.flags == ptg_attr->attr.flags && 3697 + attr_used[i]->attr.mask == ptg_attr->attr.mask) 3698 + return true; 3699 + 3700 + return false; 3701 + } 3702 + 3703 + /** 3692 3704 * ice_adj_prof_priorities - adjust profile based on priorities 3693 3705 * @hw: pointer to the HW struct 3694 3706 * @blk: hardware block ··· 3728 3684 struct list_head *chg) 3729 3685 { 3730 3686 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); 3687 + struct ice_tcam_inf **attr_used; 3731 3688 struct ice_vsig_prof *t; 3732 - int status; 3689 + u16 attr_used_cnt = 0; 3690 + int status = 0; 3733 3691 u16 idx; 3692 + 3693 + attr_used = kcalloc(ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL); 3694 + if (!attr_used) 3695 + return -ENOMEM; 3734 3696 3735 3697 bitmap_zero(ptgs_used, ICE_XLT1_CNT); 3736 3698 idx = vsig & ICE_VSIG_IDX_M; ··· 3755 3705 u16 i; 3756 3706 3757 3707 for (i = 0; i < t->tcam_count; i++) { 3708 + bool used; 3709 + 3758 3710 /* Scan the priorities from newest to oldest. 3759 3711 * Make sure that the newest profiles take priority. 3760 3712 */ 3761 - if (test_bit(t->tcam[i].ptg, ptgs_used) && 3762 - t->tcam[i].in_use) { 3713 + used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used, 3714 + attr_used, attr_used_cnt); 3715 + 3716 + if (used && t->tcam[i].in_use) { 3763 3717 /* need to mark this PTG as never match, as it 3764 3718 * was already in use and therefore duplicate 3765 3719 * (and lower priority) ··· 3773 3719 &t->tcam[i], 3774 3720 chg); 3775 3721 if (status) 3776 - return status; 3777 - } else if (!test_bit(t->tcam[i].ptg, ptgs_used) && 3778 - !t->tcam[i].in_use) { 3722 + goto free_attr_used; 3723 + } else if (!used && !t->tcam[i].in_use) { 3779 3724 /* need to enable this PTG, as it in not in use 3780 3725 * and not enabled (highest priority) 3781 3726 */ ··· 3783 3730 &t->tcam[i], 3784 3731 chg); 3785 3732 if (status) 3786 - return status; 3733 + goto free_attr_used; 3787 3734 } 3788 3735 3789 3736 /* keep track of used ptgs */ 3790 - __set_bit(t->tcam[i].ptg, ptgs_used); 3737 + set_bit(t->tcam[i].ptg, ptgs_used); 3738 + if (attr_used_cnt < ICE_MAX_PTG_ATTRS) 3739 + attr_used[attr_used_cnt++] = &t->tcam[i]; 3740 + else 3741 + ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n"); 3791 3742 } 3792 3743 } 3793 3744 3794 - return 0; 3745 + free_attr_used: 3746 + kfree(attr_used); 3747 + return status; 3795 3748 } 3796 3749 3797 3750 /** ··· 3880 3821 p->vsig = vsig; 3881 3822 p->tcam_idx = t->tcam[i].tcam_idx; 3882 3823 3824 + /* set don't care masks for TCAM flags */ 3825 + ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk); 3826 + 3883 3827 /* write the TCAM entry */ 3884 3828 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, 3885 3829 t->tcam[i].prof_id, 3886 - t->tcam[i].ptg, vsig, 0, 0, 3887 - vl_msk, dc_msk, nm_msk); 3830 + t->tcam[i].ptg, vsig, 0, 3831 + t->tcam[i].attr.flags, vl_msk, 3832 + dc_msk, nm_msk); 3888 3833 if (status) { 3889 3834 devm_kfree(ice_hw_to_dev(hw), p); 3890 3835 goto err_ice_add_prof_id_vsig; ··· 4202 4139 u16 vsi_num; 4203 4140 int status; 4204 4141 4205 - if (blk != ICE_BLK_FD) 4206 - return -EINVAL; 4207 - 4208 4142 vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); 4209 4143 status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); 4210 4144 if (status) { ··· 4209 4149 status); 4210 4150 return status; 4211 4151 } 4152 + 4153 + if (blk != ICE_BLK_FD) 4154 + return 0; 4212 4155 4213 4156 vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi); 4214 4157 status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+1
drivers/net/ethernet/intel/ice/ice_flex_type.h
··· 187 187 }; 188 188 189 189 #define ICE_INVALID_TCAM 0xFFFF 190 + #define ICE_MAX_PTG_ATTRS 1024 190 191 191 192 struct ice_tcam_inf { 192 193 u16 tcam_idx;
+238 -31
drivers/net/ethernet/intel/ice/ice_flow.c
··· 5 5 #include "ice_flow.h" 6 6 #include <net/gre.h> 7 7 8 + /* Size of known protocol header fields */ 9 + #define ICE_FLOW_FLD_SZ_ETH_TYPE 2 10 + #define ICE_FLOW_FLD_SZ_VLAN 2 11 + #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4 12 + #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16 13 + #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4 14 + #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6 15 + #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8 16 + #define ICE_FLOW_FLD_SZ_IPV4_ID 2 17 + #define ICE_FLOW_FLD_SZ_IPV6_ID 4 18 + #define ICE_FLOW_FLD_SZ_IP_CHKSUM 2 19 + #define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2 20 + #define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2 21 + #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4 22 + #define ICE_FLOW_FLD_SZ_IP_DSCP 1 23 + #define ICE_FLOW_FLD_SZ_IP_TTL 1 24 + #define ICE_FLOW_FLD_SZ_IP_PROT 1 25 + #define ICE_FLOW_FLD_SZ_PORT 2 26 + #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1 27 + #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1 28 + #define ICE_FLOW_FLD_SZ_ICMP_CODE 1 29 + #define ICE_FLOW_FLD_SZ_ARP_OPER 2 30 + #define ICE_FLOW_FLD_SZ_GRE_KEYID 4 31 + #define ICE_FLOW_FLD_SZ_GTP_TEID 4 32 + #define ICE_FLOW_FLD_SZ_GTP_QFI 2 33 + #define ICE_FLOW_FLD_SZ_PFCP_SEID 8 34 + #define ICE_FLOW_FLD_SZ_ESP_SPI 4 35 + #define ICE_FLOW_FLD_SZ_AH_SPI 4 36 + #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4 37 + #define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2 38 + #define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2 39 + 8 40 /* Describe properties of a protocol header field */ 9 41 struct ice_flow_field_info { 10 42 enum ice_flow_seg_hdr hdr; ··· 52 20 .mask = 0, \ 53 21 } 54 22 23 + /* QFI: 6-bit field in GTP-U PDU Session Container (3GPP TS 38.415) */ 55 24 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ 56 25 .hdr = _hdr, \ 57 26 .off = (_offset_bytes) * BITS_PER_BYTE, \ ··· 94 61 /* ICE_FLOW_FIELD_IDX_IPV6_SA */ 95 62 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)), 96 63 /* ICE_FLOW_FIELD_IDX_IPV6_DA */ 97 - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)), 64 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), 65 + /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */ 66 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM), 67 + /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */ 68 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4, 69 + ICE_FLOW_FLD_SZ_IPV4_ID), 70 + /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */ 71 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4, 72 + ICE_FLOW_FLD_SZ_IPV6_ID), 73 + /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */ 74 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, 75 + ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR), 76 + /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */ 77 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, 78 + ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR), 79 + /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */ 80 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, 81 + ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR), 82 + /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */ 83 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, 84 + ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR), 85 + /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */ 86 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, 87 + ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR), 88 + /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */ 89 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, 90 + ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR), 98 91 /* Transport */ 99 92 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ 100 93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)), ··· 135 76 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ 136 77 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), 137 78 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ 138 - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), 79 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), 80 + /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */ 81 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM), 82 + /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */ 83 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM), 84 + /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */ 85 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8, 86 + ICE_FLOW_FLD_SZ_SCTP_CHKSUM), 139 87 /* ARP */ 140 88 /* ICE_FLOW_FIELD_IDX_ARP_SIP */ 141 89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), ··· 174 108 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16), 175 109 0x3f00), 176 110 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ 177 - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)), 111 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, 112 + ICE_FLOW_FLD_SZ_GTP_TEID), 113 + /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */ 114 + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22, 115 + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), 178 116 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ 179 - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)), 117 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, 118 + ICE_FLOW_FLD_SZ_GTP_TEID), 119 + /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */ 120 + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22, 121 + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), 180 122 /* PPPoE */ 181 123 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ 182 124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)), ··· 202 128 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)), 203 129 /* NAT_T_ESP */ 204 130 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ 205 - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)), 131 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, 132 + ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI), 133 + /* L2TPV2 */ 134 + /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */ 135 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12, 136 + ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID), 137 + /* L2TPV2_LEN */ 138 + /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */ 139 + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14, 140 + ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID), 206 141 }; 207 142 208 143 /* Bitmaps indicating relevant packet types for a particular protocol header ··· 220 137 */ 221 138 static const u32 ice_ptypes_mac_ofos[] = { 222 139 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 223 - 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 224 - 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, 225 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 140 + 0x0000077E, 0x000003FF, 0x00000000, 0x00000000, 141 + 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707, 142 + 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000, 226 143 0x00000000, 0x00000000, 0x00000000, 0x00000000, 227 144 0x00000000, 0x00000000, 0x00000000, 0x00000000, 228 145 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 245 162 * include IPv4 other PTYPEs 246 163 */ 247 164 static const u32 ice_ptypes_ipv4_ofos[] = { 248 - 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 165 + 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000, 249 166 0x00000000, 0x00000155, 0x00000000, 0x00000000, 250 - 0x00000000, 0x000FC000, 0x00000000, 0x00000000, 251 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 167 + 0x00000000, 0x000FC000, 0x000002A0, 0x00000000, 168 + 0x00015000, 0x00000000, 0x00000000, 0x00000000, 252 169 0x00000000, 0x00000000, 0x00000000, 0x00000000, 253 170 0x00000000, 0x00000000, 0x00000000, 0x00000000, 254 171 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 259 176 * IPv4 other PTYPEs 260 177 */ 261 178 static const u32 ice_ptypes_ipv4_ofos_all[] = { 262 - 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 179 + 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000, 263 180 0x00000000, 0x00000155, 0x00000000, 0x00000000, 264 - 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101, 265 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 181 + 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101, 182 + 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000, 266 183 0x00000000, 0x00000000, 0x00000000, 0x00000000, 267 184 0x00000000, 0x00000000, 0x00000000, 0x00000000, 268 185 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 274 191 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 275 192 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 276 193 0x00000000, 0x00000000, 0x001FF800, 0x00000000, 277 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 194 + 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0, 278 195 0x00000000, 0x00000000, 0x00000000, 0x00000000, 279 196 0x00000000, 0x00000000, 0x00000000, 0x00000000, 280 197 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 285 202 * include IPv6 other PTYPEs 286 203 */ 287 204 static const u32 ice_ptypes_ipv6_ofos[] = { 288 - 0x00000000, 0x00000000, 0x77000000, 0x10002000, 205 + 0x00000000, 0x00000000, 0x76000000, 0x10002000, 289 206 0x00000000, 0x000002AA, 0x00000000, 0x00000000, 290 - 0x00000000, 0x03F00000, 0x00000000, 0x00000000, 291 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 207 + 0x00000000, 0x03F00000, 0x00000540, 0x00000000, 208 + 0x0002A000, 0x00000000, 0x00000000, 0x00000000, 292 209 0x00000000, 0x00000000, 0x00000000, 0x00000000, 293 210 0x00000000, 0x00000000, 0x00000000, 0x00000000, 294 211 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 299 216 * IPv6 other PTYPEs 300 217 */ 301 218 static const u32 ice_ptypes_ipv6_ofos_all[] = { 302 - 0x00000000, 0x00000000, 0x77000000, 0x10002000, 303 - 0x00000000, 0x000002AA, 0x00000000, 0x00000000, 304 - 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206, 305 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 219 + 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000, 220 + 0x0000077E, 0x000002AA, 0x00000000, 0x00000000, 221 + 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206, 222 + 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF, 306 223 0x00000000, 0x00000000, 0x00000000, 0x00000000, 307 224 0x00000000, 0x00000000, 0x00000000, 0x00000000, 308 225 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 314 231 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 315 232 0x00000770, 0x00000000, 0x00000000, 0x00000000, 316 233 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, 317 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 234 + 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F, 318 235 0x00000000, 0x00000000, 0x00000000, 0x00000000, 319 236 0x00000000, 0x00000000, 0x00000000, 0x00000000, 320 237 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 387 304 static const u32 ice_ptypes_udp_il[] = { 388 305 0x81000000, 0x20204040, 0x04000010, 0x80810102, 389 306 0x00000040, 0x00000000, 0x00000000, 0x00000000, 390 - 0x00000000, 0x00410000, 0x90842000, 0x00000007, 391 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 307 + 0x00000000, 0x00410000, 0x908427E0, 0x00000007, 308 + 0x0413F000, 0x00000041, 0x10410410, 0x00004104, 392 309 0x00000000, 0x00000000, 0x00000000, 0x00000000, 393 310 0x00000000, 0x00000000, 0x00000000, 0x00000000, 394 311 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 400 317 0x04000000, 0x80810102, 0x10000040, 0x02040408, 401 318 0x00000102, 0x00000000, 0x00000000, 0x00000000, 402 319 0x00000000, 0x00820000, 0x21084000, 0x00000000, 403 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 320 + 0x08200000, 0x00000082, 0x20820820, 0x00008208, 404 321 0x00000000, 0x00000000, 0x00000000, 0x00000000, 405 322 0x00000000, 0x00000000, 0x00000000, 0x00000000, 406 323 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 412 329 0x08000000, 0x01020204, 0x20000081, 0x04080810, 413 330 0x00000204, 0x00000000, 0x00000000, 0x00000000, 414 331 0x00000000, 0x01040000, 0x00000000, 0x00000000, 415 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 332 + 0x10400000, 0x00000104, 0x00000000, 0x00000000, 416 333 0x00000000, 0x00000000, 0x00000000, 0x00000000, 417 334 0x00000000, 0x00000000, 0x00000000, 0x00000000, 418 335 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 436 353 0x00000000, 0x02040408, 0x40000102, 0x08101020, 437 354 0x00000408, 0x00000000, 0x00000000, 0x00000000, 438 355 0x00000000, 0x00000000, 0x42108000, 0x00000000, 439 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 356 + 0x20800000, 0x00000208, 0x00000000, 0x00000000, 440 357 0x00000000, 0x00000000, 0x00000000, 0x00000000, 441 358 0x00000000, 0x00000000, 0x00000000, 0x00000000, 442 359 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 448 365 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 449 366 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 450 367 0x00000000, 0x00000000, 0x00000000, 0x00000000, 451 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 368 + 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF, 452 369 0x00000000, 0x00000000, 0x00000000, 0x00000000, 453 370 0x00000000, 0x00000000, 0x00000000, 0x00000000, 454 371 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 457 374 458 375 /* Packet types for packets with an Innermost/Last MAC header */ 459 376 static const u32 ice_ptypes_mac_il[] = { 460 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 377 + 0x00000000, 0x20000000, 0x00000000, 0x00000000, 461 378 0x00000000, 0x00000000, 0x00000000, 0x00000000, 462 379 0x00000000, 0x00000000, 0x00000000, 0x00000000, 463 380 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 471 388 static const u32 ice_ptypes_gtpc[] = { 472 389 0x00000000, 0x00000000, 0x00000000, 0x00000000, 473 390 0x00000000, 0x00000000, 0x00000000, 0x00000000, 474 - 0x00000000, 0x00000000, 0x00000180, 0x00000000, 391 + 0x00000000, 0x00000000, 0x000001E0, 0x00000000, 475 392 0x00000000, 0x00000000, 0x00000000, 0x00000000, 476 393 0x00000000, 0x00000000, 0x00000000, 0x00000000, 477 394 0x00000000, 0x00000000, 0x00000000, 0x00000000, ··· 2405 2322 ice_rss_config_xor(hw, prof_id, 2406 2323 sctp_src->idx, sctp_dst->idx, 1); 2407 2324 } 2325 + } 2326 + 2327 + /** 2328 + * ice_rss_cfg_raw_symm - Configure symmetric RSS for a raw parser profile 2329 + * @hw: device HW 2330 + * @prof: parser profile describing extracted FV (field vector) entries 2331 + * @prof_id: RSS profile identifier used to program symmetry registers 2332 + * 2333 + * The routine scans the parser profile's FV entries and looks for 2334 + * direction-sensitive pairs (L3 src/dst, L4 src/dst). When a pair is found, 2335 + * it programs XOR-based symmetry so that flows hash identically regardless 2336 + * of packet direction. This preserves CPU affinity for the same 5-tuple. 2337 + * 2338 + * Notes: 2339 + * - The size of each logical field (IPv4/IPv6 address, L4 port) is expressed 2340 + * in units of ICE_FLOW_FV_EXTRACT_SZ so we can step across fv[] correctly. 2341 + * - We guard against out-of-bounds access before looking at fv[i + len]. 2342 + */ 2343 + static void ice_rss_cfg_raw_symm(struct ice_hw *hw, 2344 + const struct ice_parser_profile *prof, 2345 + u64 prof_id) 2346 + { 2347 + for (size_t i = 0; i < prof->fv_num; i++) { 2348 + u8 proto_id = prof->fv[i].proto_id; 2349 + u16 src_off = 0, dst_off = 0; 2350 + size_t src_idx, dst_idx; 2351 + bool is_matched = false; 2352 + unsigned int len = 0; 2353 + 2354 + switch (proto_id) { 2355 + /* IPv4 address pairs (outer/inner variants) */ 2356 + case ICE_PROT_IPV4_OF_OR_S: 2357 + case ICE_PROT_IPV4_IL: 2358 + case ICE_PROT_IPV4_IL_IL: 2359 + len = ICE_FLOW_FLD_SZ_IPV4_ADDR / 2360 + ICE_FLOW_FV_EXTRACT_SZ; 2361 + src_off = ICE_FLOW_FIELD_IPV4_SRC_OFFSET; 2362 + dst_off = ICE_FLOW_FIELD_IPV4_DST_OFFSET; 2363 + break; 2364 + 2365 + /* IPv6 address pairs (outer/inner variants) */ 2366 + case ICE_PROT_IPV6_OF_OR_S: 2367 + case ICE_PROT_IPV6_IL: 2368 + case ICE_PROT_IPV6_IL_IL: 2369 + len = ICE_FLOW_FLD_SZ_IPV6_ADDR / 2370 + ICE_FLOW_FV_EXTRACT_SZ; 2371 + src_off = ICE_FLOW_FIELD_IPV6_SRC_OFFSET; 2372 + dst_off = ICE_FLOW_FIELD_IPV6_DST_OFFSET; 2373 + break; 2374 + 2375 + /* L4 port pairs (TCP/UDP/SCTP) */ 2376 + case ICE_PROT_TCP_IL: 2377 + case ICE_PROT_UDP_IL_OR_S: 2378 + case ICE_PROT_SCTP_IL: 2379 + len = ICE_FLOW_FLD_SZ_PORT / ICE_FLOW_FV_EXTRACT_SZ; 2380 + src_off = ICE_FLOW_FIELD_SRC_PORT_OFFSET; 2381 + dst_off = ICE_FLOW_FIELD_DST_PORT_OFFSET; 2382 + break; 2383 + 2384 + default: 2385 + continue; 2386 + } 2387 + 2388 + /* Bounds check before accessing fv[i + len]. */ 2389 + if (i + len >= prof->fv_num) 2390 + continue; 2391 + 2392 + /* Verify src/dst pairing for this protocol id. */ 2393 + is_matched = prof->fv[i].offset == src_off && 2394 + prof->fv[i + len].proto_id == proto_id && 2395 + prof->fv[i + len].offset == dst_off; 2396 + if (!is_matched) 2397 + continue; 2398 + 2399 + /* Program XOR symmetry for this field pair. */ 2400 + src_idx = i; 2401 + dst_idx = i + len; 2402 + 2403 + ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len); 2404 + 2405 + /* Skip over the pair we just handled; the loop's ++i advances 2406 + * one more element, hence the --i after the jump. 2407 + */ 2408 + i += (2 * len); 2409 + /* not strictly needed; keeps static analyzers happy */ 2410 + if (i == 0) 2411 + break; 2412 + --i; 2413 + } 2414 + } 2415 + 2416 + /* Max registers index per packet profile */ 2417 + #define ICE_SYMM_REG_INDEX_MAX 6 2418 + 2419 + /** 2420 + * ice_rss_update_raw_symm - update symmetric hash configuration 2421 + * for raw pattern 2422 + * @hw: pointer to the hardware structure 2423 + * @cfg: configure parameters for raw pattern 2424 + * @id: profile tracking ID 2425 + * 2426 + * Update symmetric hash configuration for raw pattern if required. 2427 + * Otherwise only clear to default. 2428 + */ 2429 + void 2430 + ice_rss_update_raw_symm(struct ice_hw *hw, 2431 + struct ice_rss_raw_cfg *cfg, u64 id) 2432 + { 2433 + struct ice_prof_map *map; 2434 + u8 prof_id, m; 2435 + 2436 + mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); 2437 + map = ice_search_prof_id(hw, ICE_BLK_RSS, id); 2438 + if (map) 2439 + prof_id = map->prof_id; 2440 + mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); 2441 + if (!map) 2442 + return; 2443 + /* clear to default */ 2444 + for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++) 2445 + wr32(hw, GLQF_HSYMM(prof_id, m), 0); 2446 + 2447 + if (cfg->symm) 2448 + ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id); 2408 2449 } 2409 2450 2410 2451 /**
+90 -4
drivers/net/ethernet/intel/ice/ice_flow.h
··· 22 22 #define ICE_FLOW_HASH_IPV6 \ 23 23 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \ 24 24 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)) 25 + #define ICE_FLOW_HASH_IPV6_PRE32 \ 26 + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA) | \ 27 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA)) 28 + #define ICE_FLOW_HASH_IPV6_PRE48 \ 29 + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA) | \ 30 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA)) 31 + #define ICE_FLOW_HASH_IPV6_PRE64 \ 32 + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | \ 33 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)) 25 34 #define ICE_FLOW_HASH_TCP_PORT \ 26 35 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \ 27 36 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)) ··· 48 39 #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) 49 40 #define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) 50 41 #define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) 42 + 43 + #define ICE_HASH_TCP_IPV6_PRE32 \ 44 + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_TCP_PORT) 45 + #define ICE_HASH_UDP_IPV6_PRE32 \ 46 + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_UDP_PORT) 47 + #define ICE_HASH_SCTP_IPV6_PRE32 \ 48 + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_SCTP_PORT) 49 + #define ICE_HASH_TCP_IPV6_PRE48 \ 50 + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_TCP_PORT) 51 + #define ICE_HASH_UDP_IPV6_PRE48 \ 52 + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_UDP_PORT) 53 + #define ICE_HASH_SCTP_IPV6_PRE48 \ 54 + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_SCTP_PORT) 55 + #define ICE_HASH_TCP_IPV6_PRE64 \ 56 + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_TCP_PORT) 57 + #define ICE_HASH_UDP_IPV6_PRE64 \ 58 + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_UDP_PORT) 59 + #define ICE_HASH_SCTP_IPV6_PRE64 \ 60 + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_SCTP_PORT) 61 + 62 + #define ICE_FLOW_HASH_GTP_TEID \ 63 + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) 64 + 65 + #define ICE_FLOW_HASH_GTP_IPV4_TEID \ 66 + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID) 67 + #define ICE_FLOW_HASH_GTP_IPV6_TEID \ 68 + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID) 51 69 52 70 #define ICE_FLOW_HASH_GTP_C_TEID \ 53 71 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) ··· 164 128 #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ 165 129 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) 166 130 131 + #define ICE_FLOW_HASH_L2TPV2_SESS_ID \ 132 + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)) 133 + #define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \ 134 + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID) 135 + 136 + #define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \ 137 + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)) 138 + #define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \ 139 + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID) 140 + 141 + #define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12 142 + #define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16 143 + #define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8 144 + #define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24 145 + #define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0 146 + #define ICE_FLOW_FIELD_DST_PORT_OFFSET 2 147 + 167 148 /* Protocol header fields within a packet segment. A segment consists of one or 168 149 * more protocol headers that make up a logical group of protocol headers. Each 169 150 * logical group of protocol headers encapsulates or is encapsulated using/by ··· 213 160 ICE_FLOW_SEG_HDR_AH = 0x00200000, 214 161 ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000, 215 162 ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000, 163 + ICE_FLOW_SEG_HDR_GTPU_NON_IP = 0x01000000, 164 + ICE_FLOW_SEG_HDR_L2TPV2 = 0x10000000, 216 165 /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and 217 - * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs 166 + * ICE_FLOW_SEG_HDR_IPV6. 218 167 */ 219 - ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000, 168 + ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000, 169 + ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000, 220 170 }; 221 171 222 172 /* These segments all have the same PTYPES, but are otherwise distinguished by ··· 256 200 ICE_FLOW_FIELD_IDX_IPV4_DA, 257 201 ICE_FLOW_FIELD_IDX_IPV6_SA, 258 202 ICE_FLOW_FIELD_IDX_IPV6_DA, 203 + ICE_FLOW_FIELD_IDX_IPV4_CHKSUM, 204 + ICE_FLOW_FIELD_IDX_IPV4_ID, 205 + ICE_FLOW_FIELD_IDX_IPV6_ID, 206 + ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA, 207 + ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA, 208 + ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA, 209 + ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA, 210 + ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA, 211 + ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA, 259 212 /* L4 */ 260 213 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 261 214 ICE_FLOW_FIELD_IDX_TCP_DST_PORT, ··· 273 208 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 274 209 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 275 210 ICE_FLOW_FIELD_IDX_TCP_FLAGS, 211 + ICE_FLOW_FIELD_IDX_TCP_CHKSUM, 212 + ICE_FLOW_FIELD_IDX_UDP_CHKSUM, 213 + ICE_FLOW_FIELD_IDX_SCTP_CHKSUM, 276 214 /* ARP */ 277 215 ICE_FLOW_FIELD_IDX_ARP_SIP, 278 216 ICE_FLOW_FIELD_IDX_ARP_DIP, ··· 296 228 ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 297 229 /* GTPU_UP */ 298 230 ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, 231 + ICE_FLOW_FIELD_IDX_GTPU_UP_QFI, 299 232 /* GTPU_DWN */ 300 233 ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, 301 - /* PPPoE */ 234 + ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI, 302 235 ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, 303 236 /* PFCP */ 304 237 ICE_FLOW_FIELD_IDX_PFCP_SEID, 305 - /* L2TPv3 */ 306 238 ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 307 239 /* ESP */ 308 240 ICE_FLOW_FIELD_IDX_ESP_SPI, ··· 310 242 ICE_FLOW_FIELD_IDX_AH_SPI, 311 243 /* NAT_T ESP */ 312 244 ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 245 + /* L2TPV2 SESSION ID*/ 246 + ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID, 247 + /* L2TPV2_LEN SESSION ID */ 248 + ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID, 313 249 /* The total number of enums must not exceed 64 */ 314 250 ICE_FLOW_FIELD_IDX_MAX 315 251 }; 252 + 253 + static_assert(ICE_FLOW_FIELD_IDX_MAX <= 64, "The total number of enums must not exceed 64"); 316 254 317 255 #define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) 318 256 #define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) ··· 370 296 /* take inner headers as inputset for packet with outer ipv6. */ 371 297 ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, 372 298 /* take outer headers first then inner headers as inputset */ 299 + /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */ 300 + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE, 301 + /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */ 302 + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE, 373 303 ICE_RSS_ANY_HEADERS 374 304 }; 375 305 ··· 484 406 bool symm; /* Symmetric Hash for RSS */ 485 407 }; 486 408 409 + struct ice_rss_raw_cfg { 410 + struct ice_parser_profile prof; 411 + bool raw_ena; 412 + bool symm; 413 + }; 414 + 487 415 struct ice_rss_cfg { 488 416 struct list_head l_entry; 489 417 /* bitmap of VSIs added to the RSS entry */ ··· 528 444 int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, 529 445 const struct ice_rss_hash_cfg *cfg); 530 446 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm); 447 + void ice_rss_update_raw_symm(struct ice_hw *hw, 448 + struct ice_rss_raw_cfg *cfg, u64 id); 531 449 #endif /* _ICE_FLOW_H_ */
+20
drivers/net/ethernet/intel/ice/ice_protocol_type.h
··· 82 82 enum ice_prot_id { 83 83 ICE_PROT_ID_INVAL = 0, 84 84 ICE_PROT_MAC_OF_OR_S = 1, 85 + ICE_PROT_MAC_O2 = 2, 85 86 ICE_PROT_MAC_IL = 4, 87 + ICE_PROT_MAC_IN_MAC = 7, 86 88 ICE_PROT_ETYPE_OL = 9, 87 89 ICE_PROT_ETYPE_IL = 10, 90 + ICE_PROT_PAY = 15, 91 + ICE_PROT_EVLAN_O = 16, 92 + ICE_PROT_VLAN_O = 17, 93 + ICE_PROT_VLAN_IF = 18, 94 + ICE_PROT_MPLS_OL_MINUS_1 = 27, 95 + ICE_PROT_MPLS_OL_OR_OS = 28, 96 + ICE_PROT_MPLS_IL = 29, 88 97 ICE_PROT_IPV4_OF_OR_S = 32, 89 98 ICE_PROT_IPV4_IL = 33, 99 + ICE_PROT_IPV4_IL_IL = 34, 90 100 ICE_PROT_IPV6_OF_OR_S = 40, 91 101 ICE_PROT_IPV6_IL = 41, 102 + ICE_PROT_IPV6_IL_IL = 42, 103 + ICE_PROT_IPV6_NEXT_PROTO = 43, 104 + ICE_PROT_IPV6_FRAG = 47, 92 105 ICE_PROT_TCP_IL = 49, 93 106 ICE_PROT_UDP_OF = 52, 94 107 ICE_PROT_UDP_IL_OR_S = 53, 95 108 ICE_PROT_GRE_OF = 64, 109 + ICE_PROT_NSH_F = 84, 96 110 ICE_PROT_ESP_F = 88, 97 111 ICE_PROT_ESP_2 = 89, 98 112 ICE_PROT_SCTP_IL = 96, 99 113 ICE_PROT_ICMP_IL = 98, 100 114 ICE_PROT_ICMPV6_IL = 100, 115 + ICE_PROT_VRRP_F = 101, 116 + ICE_PROT_OSPF = 102, 101 117 ICE_PROT_PPPOE = 103, 102 118 ICE_PROT_L2TPV3 = 104, 119 + ICE_PROT_ATAOE_OF = 114, 120 + ICE_PROT_CTRL_OF = 116, 121 + ICE_PROT_LLDP_OF = 117, 103 122 ICE_PROT_ARP_OF = 118, 104 123 ICE_PROT_META_ID = 255, /* when offset == metadata */ 124 + ICE_PROT_EAPOL_OF = 120, 105 125 ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ 106 126 }; 107 127
+48
drivers/net/ethernet/intel/ice/ice_vf_lib.h
··· 53 53 u16 last_printed; 54 54 }; 55 55 56 + enum ice_hash_ip_ctx_type { 57 + ICE_HASH_IP_CTX_IP = 0, 58 + ICE_HASH_IP_CTX_IP_ESP, 59 + ICE_HASH_IP_CTX_IP_UDP_ESP, 60 + ICE_HASH_IP_CTX_IP_AH, 61 + ICE_HASH_IP_CTX_IP_PFCP, 62 + ICE_HASH_IP_CTX_IP_UDP, 63 + ICE_HASH_IP_CTX_IP_TCP, 64 + ICE_HASH_IP_CTX_IP_SCTP, 65 + ICE_HASH_IP_CTX_MAX, 66 + }; 67 + 68 + struct ice_vf_hash_ip_ctx { 69 + struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX]; 70 + }; 71 + 72 + enum ice_hash_gtpu_ctx_type { 73 + ICE_HASH_GTPU_CTX_EH_IP = 0, 74 + ICE_HASH_GTPU_CTX_EH_IP_UDP, 75 + ICE_HASH_GTPU_CTX_EH_IP_TCP, 76 + ICE_HASH_GTPU_CTX_UP_IP, 77 + ICE_HASH_GTPU_CTX_UP_IP_UDP, 78 + ICE_HASH_GTPU_CTX_UP_IP_TCP, 79 + ICE_HASH_GTPU_CTX_DW_IP, 80 + ICE_HASH_GTPU_CTX_DW_IP_UDP, 81 + ICE_HASH_GTPU_CTX_DW_IP_TCP, 82 + ICE_HASH_GTPU_CTX_MAX, 83 + }; 84 + 85 + struct ice_vf_hash_gtpu_ctx { 86 + struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; 87 + }; 88 + 89 + struct ice_vf_hash_ctx { 90 + struct ice_vf_hash_ip_ctx v4; 91 + struct ice_vf_hash_ip_ctx v6; 92 + struct ice_vf_hash_gtpu_ctx ipv4; 93 + struct ice_vf_hash_gtpu_ctx ipv6; 94 + }; 95 + 56 96 /* Structure to store fdir fv entry */ 57 97 struct ice_fdir_prof_info { 58 98 struct ice_parser_profile prof; ··· 104 64 u32 peak; 105 65 u16 queue_id; 106 66 u8 tc; 67 + }; 68 + 69 + /* Structure to store RSS field vector entry */ 70 + struct ice_rss_prof_info { 71 + struct ice_parser_profile prof; 72 + bool symm; 107 73 }; 108 74 109 75 /* VF operations */ ··· 152 106 u16 ctrl_vsi_idx; 153 107 struct ice_vf_fdir fdir; 154 108 struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; 109 + struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS]; 110 + struct ice_vf_hash_ctx hash_ctx; 155 111 u64 rss_hashcfg; /* RSS hash configuration */ 156 112 struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ 157 113 struct virtchnl_version_info vf_ver;
+1258 -55
drivers/net/ethernet/intel/ice/virt/rss.c
··· 36 36 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP}, 37 37 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH}, 38 38 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION}, 39 + {VIRTCHNL_PROTO_HDR_GTPC, ICE_FLOW_SEG_HDR_GTPC}, 40 + {VIRTCHNL_PROTO_HDR_L2TPV2, ICE_FLOW_SEG_HDR_L2TPV2}, 41 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, 42 + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, 43 + {VIRTCHNL_PROTO_HDR_GRE, ICE_FLOW_SEG_HDR_GRE}, 39 44 }; 40 45 41 46 struct ice_vc_hash_field_match_type { ··· 92 87 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 93 88 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 94 89 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 95 - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 90 + {VIRTCHNL_PROTO_HDR_IPV4, 91 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), 92 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, 93 + {VIRTCHNL_PROTO_HDR_IPV4, 94 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 95 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 96 + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 97 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 98 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 99 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 100 + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 101 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 102 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 103 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 104 + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 105 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 106 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 107 + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 108 + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 109 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 110 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 111 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 112 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 113 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 114 + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 115 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 116 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 117 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 118 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 119 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 120 + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 121 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 122 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 123 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 124 + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 125 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 126 + {VIRTCHNL_PROTO_HDR_IPV4, 127 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 128 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 129 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 130 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 131 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 132 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 96 133 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 134 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 135 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), 136 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, 137 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 138 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 139 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, 140 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 141 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 142 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 143 + ICE_FLOW_HASH_IPV4}, 144 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 145 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 146 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 147 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 148 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 149 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 150 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 151 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 152 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 153 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 154 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 155 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 156 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 157 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 158 + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 159 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 160 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 161 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 162 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 163 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), 164 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, 165 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 166 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 167 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 168 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 169 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 170 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 171 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 172 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 173 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 174 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 175 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 176 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 177 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 178 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 179 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 180 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 181 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 182 + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 183 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 184 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 185 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 186 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 187 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 188 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 189 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 190 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 191 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 192 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 193 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 194 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 195 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 196 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 197 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 198 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 199 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 200 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 201 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 202 + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 203 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 204 + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, 205 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | 206 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), 207 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | 208 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, 97 209 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), 98 210 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, 99 211 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), ··· 232 110 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 233 111 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 234 112 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 113 + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, 114 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID), 115 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)}, 116 + {VIRTCHNL_PROTO_HDR_IPV6, 117 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | 118 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST), 119 + ICE_FLOW_HASH_IPV6_PRE64}, 120 + {VIRTCHNL_PROTO_HDR_IPV6, 121 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC), 122 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA)}, 123 + {VIRTCHNL_PROTO_HDR_IPV6, 124 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST), 125 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)}, 126 + {VIRTCHNL_PROTO_HDR_IPV6, 127 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | 128 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) | 129 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 130 + ICE_FLOW_HASH_IPV6_PRE64 | 131 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 132 + {VIRTCHNL_PROTO_HDR_IPV6, 133 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | 134 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 135 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | 136 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 137 + {VIRTCHNL_PROTO_HDR_IPV6, 138 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) | 139 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 140 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA) | 141 + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 235 142 {VIRTCHNL_PROTO_HDR_TCP, 236 143 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), 237 144 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, ··· 271 120 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | 272 121 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), 273 122 ICE_FLOW_HASH_TCP_PORT}, 123 + {VIRTCHNL_PROTO_HDR_TCP, 124 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), 125 + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, 126 + {VIRTCHNL_PROTO_HDR_TCP, 127 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | 128 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), 129 + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | 130 + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, 131 + {VIRTCHNL_PROTO_HDR_TCP, 132 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | 133 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), 134 + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) | 135 + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, 136 + {VIRTCHNL_PROTO_HDR_TCP, 137 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | 138 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | 139 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), 140 + ICE_FLOW_HASH_TCP_PORT | 141 + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, 274 142 {VIRTCHNL_PROTO_HDR_UDP, 275 143 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), 276 144 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, ··· 300 130 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | 301 131 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), 302 132 ICE_FLOW_HASH_UDP_PORT}, 133 + {VIRTCHNL_PROTO_HDR_UDP, 134 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), 135 + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, 136 + {VIRTCHNL_PROTO_HDR_UDP, 137 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | 138 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), 139 + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | 140 + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, 141 + {VIRTCHNL_PROTO_HDR_UDP, 142 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | 143 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), 144 + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) | 145 + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, 146 + {VIRTCHNL_PROTO_HDR_UDP, 147 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | 148 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | 149 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), 150 + ICE_FLOW_HASH_UDP_PORT | 151 + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, 303 152 {VIRTCHNL_PROTO_HDR_SCTP, 304 153 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), 305 154 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, ··· 329 140 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | 330 141 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), 331 142 ICE_FLOW_HASH_SCTP_PORT}, 143 + {VIRTCHNL_PROTO_HDR_SCTP, 144 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), 145 + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, 146 + {VIRTCHNL_PROTO_HDR_SCTP, 147 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | 148 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), 149 + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | 150 + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, 151 + {VIRTCHNL_PROTO_HDR_SCTP, 152 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | 153 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), 154 + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) | 155 + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, 156 + {VIRTCHNL_PROTO_HDR_SCTP, 157 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | 158 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | 159 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), 160 + ICE_FLOW_HASH_SCTP_PORT | 161 + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, 332 162 {VIRTCHNL_PROTO_HDR_PPPOE, 333 163 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), 334 164 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, ··· 363 155 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)}, 364 156 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), 365 157 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)}, 158 + {VIRTCHNL_PROTO_HDR_GTPC, 159 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPC_TEID), 160 + BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)}, 161 + {VIRTCHNL_PROTO_HDR_L2TPV2, 162 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID), 163 + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)}, 164 + {VIRTCHNL_PROTO_HDR_L2TPV2, 165 + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID), 166 + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)}, 366 167 }; 168 + 169 + static int 170 + ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type) 171 + { 172 + struct ice_vsi_ctx *ctx; 173 + int ret; 174 + 175 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 176 + if (!ctx) 177 + return -ENOMEM; 178 + 179 + /* clear previous hash_type */ 180 + ctx->info.q_opt_rss = vsi->info.q_opt_rss & 181 + ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; 182 + /* hash_type is passed in as ICE_AQ_VSI_Q_OPT_RSS_<XOR|TPLZ|SYM_TPLZ */ 183 + ctx->info.q_opt_rss |= FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, 184 + hash_type); 185 + 186 + /* Preserve existing queueing option setting */ 187 + ctx->info.q_opt_tc = vsi->info.q_opt_tc; 188 + ctx->info.q_opt_flags = vsi->info.q_opt_flags; 189 + 190 + ctx->info.valid_sections = 191 + cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 192 + 193 + ret = ice_update_vsi(hw, vsi->idx, ctx, NULL); 194 + if (ret) { 195 + dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n", 196 + ret, libie_aq_str(hw->adminq.sq_last_status)); 197 + } else { 198 + vsi->info.q_opt_rss = ctx->info.q_opt_rss; 199 + } 200 + 201 + kfree(ctx); 202 + 203 + return ret; 204 + } 367 205 368 206 /** 369 207 * ice_vc_validate_pattern ··· 525 271 const struct ice_vc_hash_field_match_type *hf_list; 526 272 const struct ice_vc_hdr_match_type *hdr_list; 527 273 int i, hf_list_len, hdr_list_len; 274 + bool outer_ipv4 = false; 275 + bool outer_ipv6 = false; 276 + bool inner_hdr = false; 277 + bool has_gre = false; 278 + 528 279 u32 *addl_hdrs = &hash_cfg->addl_hdrs; 529 280 u64 *hash_flds = &hash_cfg->hash_flds; 530 281 ··· 549 290 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { 550 291 struct virtchnl_proto_hdr *proto_hdr = 551 292 &rss_cfg->proto_hdrs.proto_hdr[i]; 552 - bool hdr_found = false; 293 + u32 hdr_found = 0; 553 294 int j; 554 295 555 - /* Find matched ice headers according to virtchnl headers. */ 296 + /* Find matched ice headers according to virtchnl headers. 297 + * Also figure out the outer type of GTPU headers. 298 + */ 556 299 for (j = 0; j < hdr_list_len; j++) { 557 300 struct ice_vc_hdr_match_type hdr_map = hdr_list[j]; 558 301 559 - if (proto_hdr->type == hdr_map.vc_hdr) { 560 - *addl_hdrs |= hdr_map.ice_hdr; 561 - hdr_found = true; 562 - } 302 + if (proto_hdr->type == hdr_map.vc_hdr) 303 + hdr_found = hdr_map.ice_hdr; 563 304 } 564 305 565 306 if (!hdr_found) ··· 577 318 break; 578 319 } 579 320 } 321 + 322 + if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4 && !inner_hdr) 323 + outer_ipv4 = true; 324 + else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 && 325 + !inner_hdr) 326 + outer_ipv6 = true; 327 + /* for GRE and L2TPv2, take inner header as input set if no 328 + * any field is selected from outer headers. 329 + * for GTPU, take inner header and GTPU teid as input set. 330 + */ 331 + else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP || 332 + proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH || 333 + proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN || 334 + proto_hdr->type == 335 + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) || 336 + ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 || 337 + proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) && 338 + *hash_flds == 0)) { 339 + /* set inner_hdr flag, and clean up outer header */ 340 + inner_hdr = true; 341 + 342 + /* clear outer headers */ 343 + *addl_hdrs = 0; 344 + 345 + if (outer_ipv4 && outer_ipv6) 346 + return false; 347 + 348 + if (outer_ipv4) 349 + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; 350 + else if (outer_ipv6) 351 + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; 352 + else 353 + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS; 354 + 355 + if (has_gre && outer_ipv4) 356 + hash_cfg->hdr_type = 357 + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE; 358 + if (has_gre && outer_ipv6) 359 + hash_cfg->hdr_type = 360 + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE; 361 + 362 + if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) 363 + has_gre = true; 364 + } 365 + 366 + *addl_hdrs |= hdr_found; 367 + 368 + /* refine hash hdrs and fields for IP fragment */ 369 + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, 370 + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) && 371 + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) { 372 + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; 373 + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); 374 + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID); 375 + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, 376 + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID); 377 + } 378 + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, 379 + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) && 380 + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) { 381 + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; 382 + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); 383 + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID); 384 + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, 385 + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID); 386 + } 580 387 } 388 + 389 + /* refine gtpu header if we take outer as input set for a no inner 390 + * ip gtpu flow. 391 + */ 392 + if (hash_cfg->hdr_type == ICE_RSS_OUTER_HEADERS && 393 + *addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { 394 + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_GTPU_IP); 395 + *addl_hdrs |= ICE_FLOW_SEG_HDR_GTPU_NON_IP; 396 + } 397 + 398 + /* refine hash field for esp and nat-t-esp. */ 399 + if ((*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) && 400 + (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP)) { 401 + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_UDP); 402 + *addl_hdrs |= ICE_FLOW_SEG_HDR_NAT_T_ESP; 403 + *hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)); 404 + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI); 405 + } 406 + 407 + /* refine hash hdrs for L4 udp/tcp/sctp. */ 408 + if (*addl_hdrs & (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | 409 + ICE_FLOW_SEG_HDR_SCTP) && 410 + *addl_hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER) 411 + *addl_hdrs &= ~ICE_FLOW_SEG_HDR_IPV_OTHER; 581 412 582 413 return true; 583 414 } ··· 686 337 } 687 338 688 339 /** 340 + * ice_is_hash_cfg_valid - Check whether an RSS hash context is valid 341 + * @cfg: RSS hash configuration to test 342 + * 343 + * Return: true if both @cfg->hash_flds and @cfg->addl_hdrs are non-zero; false otherwise. 344 + */ 345 + static bool ice_is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg) 346 + { 347 + return cfg->hash_flds && cfg->addl_hdrs; 348 + } 349 + 350 + /** 351 + * ice_hash_cfg_reset - Reset an RSS hash context 352 + * @cfg: RSS hash configuration to reset 353 + * 354 + * Reset fields of @cfg that store the active rule information. 355 + */ 356 + static void ice_hash_cfg_reset(struct ice_rss_hash_cfg *cfg) 357 + { 358 + cfg->hash_flds = 0; 359 + cfg->addl_hdrs = 0; 360 + cfg->hdr_type = ICE_RSS_OUTER_HEADERS; 361 + cfg->symm = 0; 362 + } 363 + 364 + /** 365 + * ice_hash_cfg_record - Record an RSS hash context 366 + * @ctx: destination (global) RSS hash configuration 367 + * @cfg: source RSS hash configuration to record 368 + * 369 + * Copy the active rule information from @cfg into @ctx. 370 + */ 371 + static void ice_hash_cfg_record(struct ice_rss_hash_cfg *ctx, 372 + struct ice_rss_hash_cfg *cfg) 373 + { 374 + ctx->hash_flds = cfg->hash_flds; 375 + ctx->addl_hdrs = cfg->addl_hdrs; 376 + ctx->hdr_type = cfg->hdr_type; 377 + ctx->symm = cfg->symm; 378 + } 379 + 380 + /** 381 + * ice_hash_moveout - Delete an RSS configuration (keep context) 382 + * @vf: VF pointer 383 + * @cfg: RSS hash configuration 384 + * 385 + * Return: 0 on success (including when already absent); -ENOENT if @cfg is 386 + * invalid or VSI is missing; -EBUSY on hardware removal failure. 387 + */ 388 + static int 389 + ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 390 + { 391 + struct device *dev = ice_pf_to_dev(vf->pf); 392 + struct ice_vsi *vsi = ice_get_vf_vsi(vf); 393 + struct ice_hw *hw = &vf->pf->hw; 394 + int ret; 395 + 396 + if (!ice_is_hash_cfg_valid(cfg) || !vsi) 397 + return -ENOENT; 398 + 399 + ret = ice_rem_rss_cfg(hw, vsi->idx, cfg); 400 + if (ret && ret != -ENOENT) { 401 + dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n", 402 + vf->vf_id, vf->lan_vsi_idx, ret); 403 + return -EBUSY; 404 + } 405 + 406 + return 0; 407 + } 408 + 409 + /** 410 + * ice_hash_moveback - Add an RSS hash configuration for a VF 411 + * @vf: VF pointer 412 + * @cfg: RSS hash configuration to apply 413 + * 414 + * Add @cfg to @vf if the context is valid and VSI exists; programs HW. 415 + * 416 + * Return: 417 + * * 0 on success 418 + * * -ENOENT if @cfg is invalid or VSI is missing 419 + * * -EBUSY if hardware programming fails 420 + */ 421 + static int 422 + ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 423 + { 424 + struct device *dev = ice_pf_to_dev(vf->pf); 425 + struct ice_vsi *vsi = ice_get_vf_vsi(vf); 426 + struct ice_hw *hw = &vf->pf->hw; 427 + int ret; 428 + 429 + if (!ice_is_hash_cfg_valid(cfg) || !vsi) 430 + return -ENOENT; 431 + 432 + ret = ice_add_rss_cfg(hw, vsi, cfg); 433 + if (ret) { 434 + dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n", 435 + vf->vf_id, vf->lan_vsi_idx, ret); 436 + return -EBUSY; 437 + } 438 + 439 + return 0; 440 + } 441 + 442 + /** 443 + * ice_hash_remove - remove a RSS configuration 444 + * @vf: pointer to the VF info 445 + * @cfg: pointer to the RSS hash configuration 446 + * 447 + * This function will delete a RSS hash configuration and also delete the 448 + * hash context which stores the rule info. 449 + * 450 + * Return: 0 on success, or a negative error code on failure. 451 + */ 452 + static int 453 + ice_hash_remove(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 454 + { 455 + int ret; 456 + 457 + ret = ice_hash_moveout(vf, cfg); 458 + if (ret && ret != -ENOENT) 459 + return ret; 460 + 461 + ice_hash_cfg_reset(cfg); 462 + 463 + return 0; 464 + } 465 + 466 + struct ice_gtpu_ctx_action { 467 + u32 ctx_idx; 468 + const u32 *remove_list; 469 + int remove_count; 470 + const u32 *moveout_list; 471 + int moveout_count; 472 + }; 473 + 474 + /** 475 + * ice_add_rss_cfg_pre_gtpu - Pre-process the GTPU RSS configuration 476 + * @vf: pointer to the VF info 477 + * @ctx: pointer to the context of the GTPU hash 478 + * @ctx_idx: index of the hash context 479 + * 480 + * Pre-processes the GTPU hash configuration before adding a new 481 + * hash context. It removes or reorders existing hash configurations that may 482 + * conflict with the new one. For example, if a GTPU_UP or GTPU_DWN rule is 483 + * configured after a GTPU_EH rule, the GTPU_EH hash will be matched first due 484 + * to TCAM write and match order (top-down). In such cases, the GTPU_EH rule 485 + * must be moved after the GTPU_UP/DWN rule. Conversely, if a GTPU_EH rule is 486 + * configured after a GTPU_UP/DWN rule, the UP/DWN rules should be removed to 487 + * avoid conflict. 488 + * 489 + * Return: 0 on success or a negative error code on failure 490 + */ 491 + static int ice_add_rss_cfg_pre_gtpu(struct ice_vf *vf, 492 + struct ice_vf_hash_gtpu_ctx *ctx, 493 + u32 ctx_idx) 494 + { 495 + int ret, i; 496 + 497 + static const u32 remove_eh_ip[] = { 498 + ICE_HASH_GTPU_CTX_EH_IP_UDP, ICE_HASH_GTPU_CTX_EH_IP_TCP, 499 + ICE_HASH_GTPU_CTX_UP_IP, ICE_HASH_GTPU_CTX_UP_IP_UDP, 500 + ICE_HASH_GTPU_CTX_UP_IP_TCP, ICE_HASH_GTPU_CTX_DW_IP, 501 + ICE_HASH_GTPU_CTX_DW_IP_UDP, ICE_HASH_GTPU_CTX_DW_IP_TCP, 502 + }; 503 + 504 + static const u32 remove_eh_ip_udp[] = { 505 + ICE_HASH_GTPU_CTX_UP_IP_UDP, 506 + ICE_HASH_GTPU_CTX_DW_IP_UDP, 507 + }; 508 + static const u32 moveout_eh_ip_udp[] = { 509 + ICE_HASH_GTPU_CTX_UP_IP, 510 + ICE_HASH_GTPU_CTX_UP_IP_TCP, 511 + ICE_HASH_GTPU_CTX_DW_IP, 512 + ICE_HASH_GTPU_CTX_DW_IP_TCP, 513 + }; 514 + 515 + static const u32 remove_eh_ip_tcp[] = { 516 + ICE_HASH_GTPU_CTX_UP_IP_TCP, 517 + ICE_HASH_GTPU_CTX_DW_IP_TCP, 518 + }; 519 + static const u32 moveout_eh_ip_tcp[] = { 520 + ICE_HASH_GTPU_CTX_UP_IP, 521 + ICE_HASH_GTPU_CTX_UP_IP_UDP, 522 + ICE_HASH_GTPU_CTX_DW_IP, 523 + ICE_HASH_GTPU_CTX_DW_IP_UDP, 524 + }; 525 + 526 + static const u32 remove_up_ip[] = { 527 + ICE_HASH_GTPU_CTX_UP_IP_UDP, 528 + ICE_HASH_GTPU_CTX_UP_IP_TCP, 529 + }; 530 + static const u32 moveout_up_ip[] = { 531 + ICE_HASH_GTPU_CTX_EH_IP, 532 + ICE_HASH_GTPU_CTX_EH_IP_UDP, 533 + ICE_HASH_GTPU_CTX_EH_IP_TCP, 534 + }; 535 + 536 + static const u32 moveout_up_ip_udp_tcp[] = { 537 + ICE_HASH_GTPU_CTX_EH_IP, 538 + ICE_HASH_GTPU_CTX_EH_IP_UDP, 539 + ICE_HASH_GTPU_CTX_EH_IP_TCP, 540 + }; 541 + 542 + static const u32 remove_dw_ip[] = { 543 + ICE_HASH_GTPU_CTX_DW_IP_UDP, 544 + ICE_HASH_GTPU_CTX_DW_IP_TCP, 545 + }; 546 + static const u32 moveout_dw_ip[] = { 547 + ICE_HASH_GTPU_CTX_EH_IP, 548 + ICE_HASH_GTPU_CTX_EH_IP_UDP, 549 + ICE_HASH_GTPU_CTX_EH_IP_TCP, 550 + }; 551 + 552 + static const struct ice_gtpu_ctx_action actions[] = { 553 + { ICE_HASH_GTPU_CTX_EH_IP, remove_eh_ip, 554 + ARRAY_SIZE(remove_eh_ip), NULL, 0 }, 555 + { ICE_HASH_GTPU_CTX_EH_IP_UDP, remove_eh_ip_udp, 556 + ARRAY_SIZE(remove_eh_ip_udp), moveout_eh_ip_udp, 557 + ARRAY_SIZE(moveout_eh_ip_udp) }, 558 + { ICE_HASH_GTPU_CTX_EH_IP_TCP, remove_eh_ip_tcp, 559 + ARRAY_SIZE(remove_eh_ip_tcp), moveout_eh_ip_tcp, 560 + ARRAY_SIZE(moveout_eh_ip_tcp) }, 561 + { ICE_HASH_GTPU_CTX_UP_IP, remove_up_ip, 562 + ARRAY_SIZE(remove_up_ip), moveout_up_ip, 563 + ARRAY_SIZE(moveout_up_ip) }, 564 + { ICE_HASH_GTPU_CTX_UP_IP_UDP, NULL, 0, moveout_up_ip_udp_tcp, 565 + ARRAY_SIZE(moveout_up_ip_udp_tcp) }, 566 + { ICE_HASH_GTPU_CTX_UP_IP_TCP, NULL, 0, moveout_up_ip_udp_tcp, 567 + ARRAY_SIZE(moveout_up_ip_udp_tcp) }, 568 + { ICE_HASH_GTPU_CTX_DW_IP, remove_dw_ip, 569 + ARRAY_SIZE(remove_dw_ip), moveout_dw_ip, 570 + ARRAY_SIZE(moveout_dw_ip) }, 571 + { ICE_HASH_GTPU_CTX_DW_IP_UDP, NULL, 0, moveout_dw_ip, 572 + ARRAY_SIZE(moveout_dw_ip) }, 573 + { ICE_HASH_GTPU_CTX_DW_IP_TCP, NULL, 0, moveout_dw_ip, 574 + ARRAY_SIZE(moveout_dw_ip) }, 575 + }; 576 + 577 + for (i = 0; i < ARRAY_SIZE(actions); i++) { 578 + if (actions[i].ctx_idx != ctx_idx) 579 + continue; 580 + 581 + if (actions[i].remove_list) { 582 + for (int j = 0; j < actions[i].remove_count; j++) { 583 + u16 rm = actions[i].remove_list[j]; 584 + 585 + ret = ice_hash_remove(vf, &ctx->ctx[rm]); 586 + if (ret && ret != -ENOENT) 587 + return ret; 588 + } 589 + } 590 + 591 + if (actions[i].moveout_list) { 592 + for (int j = 0; j < actions[i].moveout_count; j++) { 593 + u16 mv = actions[i].moveout_list[j]; 594 + 595 + ret = ice_hash_moveout(vf, &ctx->ctx[mv]); 596 + if (ret && ret != -ENOENT) 597 + return ret; 598 + } 599 + } 600 + break; 601 + } 602 + 603 + return 0; 604 + } 605 + 606 + /** 607 + * ice_add_rss_cfg_pre_ip - Pre-process IP-layer RSS configuration 608 + * @vf: VF pointer 609 + * @ctx: IP L4 hash context (ESP/UDP-ESP/AH/PFCP and UDP/TCP/SCTP) 610 + * 611 + * Remove covered/recorded IP RSS configurations prior to adding a new one. 612 + * 613 + * Return: 0 on success; negative error code on failure. 614 + */ 615 + static int 616 + ice_add_rss_cfg_pre_ip(struct ice_vf *vf, struct ice_vf_hash_ip_ctx *ctx) 617 + { 618 + int i, ret; 619 + 620 + for (i = 1; i < ICE_HASH_IP_CTX_MAX; i++) 621 + if (ice_is_hash_cfg_valid(&ctx->ctx[i])) { 622 + ret = ice_hash_remove(vf, &ctx->ctx[i]); 623 + if (ret) 624 + return ret; 625 + } 626 + 627 + return 0; 628 + } 629 + 630 + /** 631 + * ice_calc_gtpu_ctx_idx - Calculate GTPU hash context index 632 + * @hdrs: Bitmask of protocol headers prefixed with ICE_FLOW_SEG_HDR_* 633 + * 634 + * Determine the GTPU hash context index based on the combination of 635 + * encapsulation headers (GTPU_EH, GTPU_UP, GTPU_DWN) and transport 636 + * protocols (UDP, TCP) within IPv4 or IPv6 flows. 637 + * 638 + * Return: A valid context index (0-8) if the header combination is supported, 639 + * or ICE_HASH_GTPU_CTX_MAX if the combination is invalid. 640 + */ 641 + static enum ice_hash_gtpu_ctx_type ice_calc_gtpu_ctx_idx(u32 hdrs) 642 + { 643 + u32 eh_idx, ip_idx; 644 + 645 + if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) 646 + eh_idx = 0; 647 + else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) 648 + eh_idx = 1; 649 + else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) 650 + eh_idx = 2; 651 + else 652 + return ICE_HASH_GTPU_CTX_MAX; 653 + 654 + ip_idx = 0; 655 + if (hdrs & ICE_FLOW_SEG_HDR_UDP) 656 + ip_idx = 1; 657 + else if (hdrs & ICE_FLOW_SEG_HDR_TCP) 658 + ip_idx = 2; 659 + 660 + if (hdrs & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)) 661 + return eh_idx * 3 + ip_idx; 662 + else 663 + return ICE_HASH_GTPU_CTX_MAX; 664 + } 665 + 666 + /** 667 + * ice_map_ip_ctx_idx - map the index of the IP L4 hash context 668 + * @hdrs: protocol headers prefix with ICE_FLOW_SEG_HDR_XXX. 669 + * 670 + * The IP L4 hash context use the index to classify for IPv4/IPv6 with 671 + * ESP/UDP_ESP/AH/PFCP and non-tunnel UDP/TCP/SCTP 672 + * this function map the index based on the protocol headers. 673 + * 674 + * Return: The mapped IP context index on success, or ICE_HASH_IP_CTX_MAX 675 + * if no matching context is found. 676 + */ 677 + static u8 ice_map_ip_ctx_idx(u32 hdrs) 678 + { 679 + u8 i; 680 + 681 + static struct { 682 + u32 hdrs; 683 + u8 ctx_idx; 684 + } ip_ctx_idx_map[] = { 685 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | 686 + ICE_FLOW_SEG_HDR_ESP, 687 + ICE_HASH_IP_CTX_IP_ESP }, 688 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | 689 + ICE_FLOW_SEG_HDR_NAT_T_ESP, 690 + ICE_HASH_IP_CTX_IP_UDP_ESP }, 691 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | 692 + ICE_FLOW_SEG_HDR_AH, 693 + ICE_HASH_IP_CTX_IP_AH }, 694 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | 695 + ICE_FLOW_SEG_HDR_PFCP_SESSION, 696 + ICE_HASH_IP_CTX_IP_PFCP }, 697 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 698 + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP, 699 + ICE_HASH_IP_CTX_IP_UDP }, 700 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 701 + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP, 702 + ICE_HASH_IP_CTX_IP_TCP }, 703 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 704 + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP, 705 + ICE_HASH_IP_CTX_IP_SCTP }, 706 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 707 + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 708 + ICE_HASH_IP_CTX_IP }, 709 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | 710 + ICE_FLOW_SEG_HDR_ESP, 711 + ICE_HASH_IP_CTX_IP_ESP }, 712 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | 713 + ICE_FLOW_SEG_HDR_NAT_T_ESP, 714 + ICE_HASH_IP_CTX_IP_UDP_ESP }, 715 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | 716 + ICE_FLOW_SEG_HDR_AH, 717 + ICE_HASH_IP_CTX_IP_AH }, 718 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | 719 + ICE_FLOW_SEG_HDR_PFCP_SESSION, 720 + ICE_HASH_IP_CTX_IP_PFCP }, 721 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 722 + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP, 723 + ICE_HASH_IP_CTX_IP_UDP }, 724 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 725 + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP, 726 + ICE_HASH_IP_CTX_IP_TCP }, 727 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 728 + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP, 729 + ICE_HASH_IP_CTX_IP_SCTP }, 730 + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | 731 + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 732 + ICE_HASH_IP_CTX_IP }, 733 + /* the remaining mappings are used for default RSS */ 734 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP, 735 + ICE_HASH_IP_CTX_IP_UDP }, 736 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP, 737 + ICE_HASH_IP_CTX_IP_TCP }, 738 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP, 739 + ICE_HASH_IP_CTX_IP_SCTP }, 740 + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 741 + ICE_HASH_IP_CTX_IP }, 742 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP, 743 + ICE_HASH_IP_CTX_IP_UDP }, 744 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP, 745 + ICE_HASH_IP_CTX_IP_TCP }, 746 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP, 747 + ICE_HASH_IP_CTX_IP_SCTP }, 748 + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 749 + ICE_HASH_IP_CTX_IP }, 750 + }; 751 + 752 + for (i = 0; i < ARRAY_SIZE(ip_ctx_idx_map); i++) { 753 + if (hdrs == ip_ctx_idx_map[i].hdrs) 754 + return ip_ctx_idx_map[i].ctx_idx; 755 + } 756 + 757 + return ICE_HASH_IP_CTX_MAX; 758 + } 759 + 760 + /** 761 + * ice_add_rss_cfg_pre - Prepare RSS configuration context for a VF 762 + * @vf: pointer to the VF structure 763 + * @cfg: pointer to the RSS hash configuration 764 + * 765 + * Prepare the RSS hash context for a given VF based on the additional 766 + * protocol headers specified in @cfg. This includes pre-configuration 767 + * for IP and GTPU-based flows. 768 + * 769 + * If the configuration matches a known IP context, the function sets up 770 + * the appropriate IP hash context. If the configuration includes GTPU 771 + * headers, it prepares the GTPU-specific context accordingly. 772 + * 773 + * Return: 0 on success, or a negative error code on failure. 774 + */ 775 + static int 776 + ice_add_rss_cfg_pre(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 777 + { 778 + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); 779 + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); 780 + 781 + if (ip_ctx_idx == ICE_HASH_IP_CTX_IP) { 782 + int ret = 0; 783 + 784 + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) 785 + ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v4); 786 + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) 787 + ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v6); 788 + 789 + if (ret) 790 + return ret; 791 + } 792 + 793 + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) { 794 + return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv4, 795 + ice_gtpu_ctx_idx); 796 + } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) { 797 + return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv6, 798 + ice_gtpu_ctx_idx); 799 + } 800 + 801 + return 0; 802 + } 803 + 804 + /** 805 + * ice_add_rss_cfg_post_gtpu - Post-process GTPU RSS configuration 806 + * @vf: pointer to the VF info 807 + * @ctx: pointer to the context of the GTPU hash 808 + * @cfg: pointer to the RSS hash configuration 809 + * @ctx_idx: index of the hash context 810 + * 811 + * Post-processes the GTPU hash configuration after a new hash 812 + * context has been successfully added. It updates the context with the new 813 + * configuration and restores any previously removed hash contexts that need 814 + * to be re-applied. This ensures proper TCAM rule ordering and avoids 815 + * conflicts between overlapping GTPU rules. 816 + * 817 + * Return: 0 on success or a negative error code on failure 818 + */ 819 + static int ice_add_rss_cfg_post_gtpu(struct ice_vf *vf, 820 + struct ice_vf_hash_gtpu_ctx *ctx, 821 + struct ice_rss_hash_cfg *cfg, u32 ctx_idx) 822 + { 823 + /* GTPU hash moveback lookup table indexed by context ID. 824 + * Each entry is a bitmap indicating which contexts need moveback 825 + * operations when the corresponding context index is processed. 826 + */ 827 + static const unsigned long 828 + ice_gtpu_moveback_tbl[ICE_HASH_GTPU_CTX_MAX] = { 829 + [ICE_HASH_GTPU_CTX_EH_IP] = 0, 830 + [ICE_HASH_GTPU_CTX_EH_IP_UDP] = 831 + BIT(ICE_HASH_GTPU_CTX_UP_IP) | 832 + BIT(ICE_HASH_GTPU_CTX_UP_IP_TCP) | 833 + BIT(ICE_HASH_GTPU_CTX_DW_IP) | 834 + BIT(ICE_HASH_GTPU_CTX_DW_IP_TCP), 835 + [ICE_HASH_GTPU_CTX_EH_IP_TCP] = 836 + BIT(ICE_HASH_GTPU_CTX_UP_IP) | 837 + BIT(ICE_HASH_GTPU_CTX_UP_IP_UDP) | 838 + BIT(ICE_HASH_GTPU_CTX_DW_IP) | 839 + BIT(ICE_HASH_GTPU_CTX_DW_IP_UDP), 840 + [ICE_HASH_GTPU_CTX_UP_IP] = 841 + BIT(ICE_HASH_GTPU_CTX_EH_IP) | 842 + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | 843 + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), 844 + [ICE_HASH_GTPU_CTX_UP_IP_UDP] = 845 + BIT(ICE_HASH_GTPU_CTX_EH_IP) | 846 + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | 847 + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), 848 + [ICE_HASH_GTPU_CTX_UP_IP_TCP] = 849 + BIT(ICE_HASH_GTPU_CTX_EH_IP) | 850 + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | 851 + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), 852 + [ICE_HASH_GTPU_CTX_DW_IP] = 853 + BIT(ICE_HASH_GTPU_CTX_EH_IP) | 854 + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | 855 + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), 856 + [ICE_HASH_GTPU_CTX_DW_IP_UDP] = 857 + BIT(ICE_HASH_GTPU_CTX_EH_IP) | 858 + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | 859 + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), 860 + [ICE_HASH_GTPU_CTX_DW_IP_TCP] = 861 + BIT(ICE_HASH_GTPU_CTX_EH_IP) | 862 + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | 863 + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), 864 + }; 865 + unsigned long moveback_mask; 866 + int ret; 867 + int i; 868 + 869 + if (unlikely(ctx_idx >= ICE_HASH_GTPU_CTX_MAX)) 870 + return 0; 871 + 872 + ctx->ctx[ctx_idx].addl_hdrs = cfg->addl_hdrs; 873 + ctx->ctx[ctx_idx].hash_flds = cfg->hash_flds; 874 + ctx->ctx[ctx_idx].hdr_type = cfg->hdr_type; 875 + ctx->ctx[ctx_idx].symm = cfg->symm; 876 + 877 + moveback_mask = ice_gtpu_moveback_tbl[ctx_idx]; 878 + for_each_set_bit(i, &moveback_mask, ICE_HASH_GTPU_CTX_MAX) { 879 + ret = ice_hash_moveback(vf, &ctx->ctx[i]); 880 + if (ret && ret != -ENOENT) 881 + return ret; 882 + } 883 + 884 + return 0; 885 + } 886 + 887 + static int 888 + ice_add_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 889 + { 890 + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); 891 + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); 892 + 893 + if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) { 894 + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) 895 + ice_hash_cfg_record(&vf->hash_ctx.v4.ctx[ip_ctx_idx], cfg); 896 + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) 897 + ice_hash_cfg_record(&vf->hash_ctx.v6.ctx[ip_ctx_idx], cfg); 898 + } 899 + 900 + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) { 901 + return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv4, 902 + cfg, ice_gtpu_ctx_idx); 903 + } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) { 904 + return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv6, 905 + cfg, ice_gtpu_ctx_idx); 906 + } 907 + 908 + return 0; 909 + } 910 + 911 + /** 912 + * ice_rem_rss_cfg_post - post-process the RSS configuration 913 + * @vf: pointer to the VF info 914 + * @cfg: pointer to the RSS hash configuration 915 + * 916 + * Post process the RSS hash configuration after deleting a hash 917 + * config. Such as, it will reset the hash context for the GTPU hash. 918 + */ 919 + static void 920 + ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 921 + { 922 + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); 923 + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); 924 + 925 + if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) { 926 + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) 927 + ice_hash_cfg_reset(&vf->hash_ctx.v4.ctx[ip_ctx_idx]); 928 + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) 929 + ice_hash_cfg_reset(&vf->hash_ctx.v6.ctx[ip_ctx_idx]); 930 + } 931 + 932 + if (ice_gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX) 933 + return; 934 + 935 + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) 936 + ice_hash_cfg_reset(&vf->hash_ctx.ipv4.ctx[ice_gtpu_ctx_idx]); 937 + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) 938 + ice_hash_cfg_reset(&vf->hash_ctx.ipv6.ctx[ice_gtpu_ctx_idx]); 939 + } 940 + 941 + /** 942 + * ice_rem_rss_cfg_wrap - Wrapper for deleting an RSS configuration 943 + * @vf: pointer to the VF info 944 + * @cfg: pointer to the RSS hash configuration 945 + * 946 + * Wrapper function to delete a flow profile base on an RSS configuration, 947 + * and also post process the hash context base on the rollback mechanism 948 + * which handle some rules conflict by ice_add_rss_cfg_wrap. 949 + * 950 + * Return: 0 on success; negative error code on failure. 951 + */ 952 + static int 953 + ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 954 + { 955 + struct device *dev = ice_pf_to_dev(vf->pf); 956 + struct ice_vsi *vsi = ice_get_vf_vsi(vf); 957 + struct ice_hw *hw = &vf->pf->hw; 958 + int ret; 959 + 960 + ret = ice_rem_rss_cfg(hw, vsi->idx, cfg); 961 + /* We just ignore -ENOENT, because if two configurations share the same 962 + * profile remove one of them actually removes both, since the 963 + * profile is deleted. 964 + */ 965 + if (ret && ret != -ENOENT) { 966 + dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n", 967 + vf->vf_id, vf->lan_vsi_idx, ret); 968 + return ret; 969 + } 970 + 971 + ice_rem_rss_cfg_post(vf, cfg); 972 + 973 + return 0; 974 + } 975 + 976 + /** 977 + * ice_add_rss_cfg_wrap - Wrapper for adding an RSS configuration 978 + * @vf: pointer to the VF info 979 + * @cfg: pointer to the RSS hash configuration 980 + * 981 + * Add a flow profile based on an RSS configuration. Use a rollback 982 + * mechanism to handle rule conflicts due to TCAM 983 + * write sequence from top to down. 984 + * 985 + * Return: 0 on success; negative error code on failure. 986 + */ 987 + static int 988 + ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) 989 + { 990 + struct device *dev = ice_pf_to_dev(vf->pf); 991 + struct ice_vsi *vsi = ice_get_vf_vsi(vf); 992 + struct ice_hw *hw = &vf->pf->hw; 993 + int ret; 994 + 995 + if (ice_add_rss_cfg_pre(vf, cfg)) 996 + return -EINVAL; 997 + 998 + ret = ice_add_rss_cfg(hw, vsi, cfg); 999 + if (ret) { 1000 + dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n", 1001 + vf->vf_id, vf->lan_vsi_idx, ret); 1002 + return ret; 1003 + } 1004 + 1005 + if (ice_add_rss_cfg_post(vf, cfg)) 1006 + ret = -EINVAL; 1007 + 1008 + return ret; 1009 + } 1010 + 1011 + /** 1012 + * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS 1013 + * @vf: pointer to the VF info 1014 + * @proto: pointer to the virtchnl protocol header 1015 + * @raw_cfg: pointer to the RSS raw pattern configuration 1016 + * 1017 + * Parser function to get spec and mask from virtchnl message, and parse 1018 + * them to get the corresponding profile and offset. The profile is used 1019 + * to add RSS configuration. 1020 + * 1021 + * Return: 0 on success; negative error code on failure. 1022 + */ 1023 + static int 1024 + ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto, 1025 + struct ice_rss_raw_cfg *raw_cfg) 1026 + { 1027 + struct ice_parser_result pkt_parsed; 1028 + struct ice_hw *hw = &vf->pf->hw; 1029 + struct ice_parser_profile prof; 1030 + struct ice_parser *psr; 1031 + u8 *pkt_buf, *msk_buf; 1032 + u16 pkt_len; 1033 + int ret = 0; 1034 + 1035 + pkt_len = proto->raw.pkt_len; 1036 + if (!pkt_len) 1037 + return -EINVAL; 1038 + if (pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) 1039 + pkt_len = VIRTCHNL_MAX_SIZE_RAW_PACKET; 1040 + 1041 + pkt_buf = kzalloc(pkt_len, GFP_KERNEL); 1042 + msk_buf = kzalloc(pkt_len, GFP_KERNEL); 1043 + if (!pkt_buf || !msk_buf) { 1044 + ret = -ENOMEM; 1045 + goto free_alloc; 1046 + } 1047 + 1048 + memcpy(pkt_buf, proto->raw.spec, pkt_len); 1049 + memcpy(msk_buf, proto->raw.mask, pkt_len); 1050 + 1051 + psr = ice_parser_create(hw); 1052 + if (IS_ERR(psr)) { 1053 + ret = PTR_ERR(psr); 1054 + goto free_alloc; 1055 + } 1056 + 1057 + ret = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed); 1058 + if (ret) 1059 + goto parser_destroy; 1060 + 1061 + ret = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf, 1062 + pkt_len, ICE_BLK_RSS, &prof); 1063 + if (ret) 1064 + goto parser_destroy; 1065 + 1066 + memcpy(&raw_cfg->prof, &prof, sizeof(prof)); 1067 + 1068 + parser_destroy: 1069 + ice_parser_destroy(psr); 1070 + free_alloc: 1071 + kfree(pkt_buf); 1072 + kfree(msk_buf); 1073 + return ret; 1074 + } 1075 + 1076 + /** 1077 + * ice_add_raw_rss_cfg - add RSS configuration for raw pattern 1078 + * @vf: pointer to the VF info 1079 + * @cfg: pointer to the RSS raw pattern configuration 1080 + * 1081 + * This function adds the RSS configuration for raw pattern. 1082 + * Check if current profile is matched. If not, remove the old 1083 + * one and add the new profile to HW directly. Update the symmetric 1084 + * hash configuration as well. 1085 + * 1086 + * Return: 0 on success; negative error code on failure. 1087 + */ 1088 + static int 1089 + ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) 1090 + { 1091 + struct ice_parser_profile *prof = &cfg->prof; 1092 + struct device *dev = ice_pf_to_dev(vf->pf); 1093 + struct ice_rss_prof_info *rss_prof; 1094 + struct ice_hw *hw = &vf->pf->hw; 1095 + int i, ptg, ret = 0; 1096 + u16 vsi_handle; 1097 + u64 id; 1098 + 1099 + vsi_handle = vf->lan_vsi_idx; 1100 + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); 1101 + 1102 + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; 1103 + rss_prof = &vf->rss_prof_info[ptg]; 1104 + 1105 + /* check if ptg already has a profile */ 1106 + if (rss_prof->prof.fv_num) { 1107 + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { 1108 + if (rss_prof->prof.fv[i].proto_id != 1109 + prof->fv[i].proto_id || 1110 + rss_prof->prof.fv[i].offset != 1111 + prof->fv[i].offset) 1112 + break; 1113 + } 1114 + 1115 + /* current profile is matched, check symmetric hash */ 1116 + if (i == ICE_MAX_FV_WORDS) { 1117 + if (rss_prof->symm != cfg->symm) 1118 + goto update_symm; 1119 + return ret; 1120 + } 1121 + 1122 + /* current profile is not matched, remove it */ 1123 + ret = 1124 + ice_rem_prof_id_flow(hw, ICE_BLK_RSS, 1125 + ice_get_hw_vsi_num(hw, vsi_handle), 1126 + id); 1127 + if (ret) { 1128 + dev_err(dev, "remove RSS flow failed\n"); 1129 + return ret; 1130 + } 1131 + 1132 + ret = ice_rem_prof(hw, ICE_BLK_RSS, id); 1133 + if (ret) { 1134 + dev_err(dev, "remove RSS profile failed\n"); 1135 + return ret; 1136 + } 1137 + } 1138 + 1139 + /* add new profile */ 1140 + ret = ice_flow_set_parser_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS); 1141 + if (ret) { 1142 + dev_err(dev, "HW profile add failed\n"); 1143 + return ret; 1144 + } 1145 + 1146 + memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile)); 1147 + 1148 + update_symm: 1149 + rss_prof->symm = cfg->symm; 1150 + ice_rss_update_raw_symm(hw, cfg, id); 1151 + return ret; 1152 + } 1153 + 1154 + /** 1155 + * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern 1156 + * @vf: pointer to the VF info 1157 + * @cfg: pointer to the RSS raw pattern configuration 1158 + * 1159 + * This function removes the RSS configuration for raw pattern. 1160 + * Check if vsi group is already removed first. If not, remove the 1161 + * profile. 1162 + * 1163 + * Return: 0 on success; negative error code on failure. 1164 + */ 1165 + static int 1166 + ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) 1167 + { 1168 + struct ice_parser_profile *prof = &cfg->prof; 1169 + struct device *dev = ice_pf_to_dev(vf->pf); 1170 + struct ice_hw *hw = &vf->pf->hw; 1171 + int ptg, ret = 0; 1172 + u16 vsig, vsi; 1173 + u64 id; 1174 + 1175 + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); 1176 + 1177 + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; 1178 + 1179 + memset(&vf->rss_prof_info[ptg], 0, 1180 + sizeof(struct ice_rss_prof_info)); 1181 + 1182 + /* check if vsig is already removed */ 1183 + vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx); 1184 + if (vsi >= ICE_MAX_VSI) { 1185 + ret = -EINVAL; 1186 + goto err; 1187 + } 1188 + 1189 + vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig; 1190 + if (vsig) { 1191 + ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id); 1192 + if (ret) 1193 + goto err; 1194 + 1195 + ret = ice_rem_prof(hw, ICE_BLK_RSS, id); 1196 + if (ret) 1197 + goto err; 1198 + } 1199 + 1200 + return ret; 1201 + 1202 + err: 1203 + dev_err(dev, "HW profile remove failed\n"); 1204 + return ret; 1205 + } 1206 + 1207 + /** 689 1208 * ice_vc_handle_rss_cfg 690 1209 * @vf: pointer to the VF info 691 1210 * @msg: pointer to the message buffer ··· 1569 352 struct device *dev = ice_pf_to_dev(vf->pf); 1570 353 struct ice_hw *hw = &vf->pf->hw; 1571 354 struct ice_vsi *vsi; 355 + u8 hash_type; 356 + bool symm; 357 + int ret; 1572 358 1573 359 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 1574 360 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", ··· 1607 387 goto error_param; 1608 388 } 1609 389 1610 - if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { 1611 - v_ret = VIRTCHNL_STATUS_ERR_PARAM; 390 + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { 391 + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : 392 + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; 393 + 394 + ret = ice_vc_rss_hash_update(hw, vsi, hash_type); 395 + if (ret) 396 + v_ret = ice_err_to_virt_err(ret); 1612 397 goto error_param; 1613 398 } 1614 399 1615 - if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { 1616 - struct ice_vsi_ctx *ctx; 1617 - u8 lut_type, hash_type; 1618 - int status; 400 + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ : 401 + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; 402 + ret = ice_vc_rss_hash_update(hw, vsi, hash_type); 403 + if (ret) { 404 + v_ret = ice_err_to_virt_err(ret); 405 + goto error_param; 406 + } 1619 407 1620 - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 1621 - hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : 1622 - ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; 408 + symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; 409 + /* Configure RSS hash for raw pattern */ 410 + if (rss_cfg->proto_hdrs.tunnel_level == 0 && 411 + rss_cfg->proto_hdrs.count == 0) { 412 + struct ice_rss_raw_cfg raw_cfg; 1623 413 1624 - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1625 - if (!ctx) { 1626 - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 414 + if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs, 415 + &raw_cfg)) { 416 + v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1627 417 goto error_param; 1628 418 } 1629 419 1630 - ctx->info.q_opt_rss = 1631 - FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | 1632 - FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); 1633 - 1634 - /* Preserve existing queueing option setting */ 1635 - ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & 1636 - ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M); 1637 - ctx->info.q_opt_tc = vsi->info.q_opt_tc; 1638 - ctx->info.q_opt_flags = vsi->info.q_opt_rss; 1639 - 1640 - ctx->info.valid_sections = 1641 - cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 1642 - 1643 - status = ice_update_vsi(hw, vsi->idx, ctx, NULL); 1644 - if (status) { 1645 - dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", 1646 - status, libie_aq_str(hw->adminq.sq_last_status)); 1647 - v_ret = VIRTCHNL_STATUS_ERR_PARAM; 420 + if (add) { 421 + raw_cfg.symm = symm; 422 + if (ice_add_raw_rss_cfg(vf, &raw_cfg)) 423 + v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1648 424 } else { 1649 - vsi->info.q_opt_rss = ctx->info.q_opt_rss; 425 + if (ice_rem_raw_rss_cfg(vf, &raw_cfg)) 426 + v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1650 427 } 1651 - 1652 - kfree(ctx); 1653 428 } else { 1654 429 struct ice_rss_hash_cfg cfg; 1655 430 ··· 1663 448 } 1664 449 1665 450 if (add) { 1666 - if (ice_add_rss_cfg(hw, vsi, &cfg)) { 451 + cfg.symm = symm; 452 + if (ice_add_rss_cfg_wrap(vf, &cfg)) 1667 453 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1668 - dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", 1669 - vsi->vsi_num, v_ret); 1670 - } 1671 454 } else { 1672 - int status; 1673 - 1674 - status = ice_rem_rss_cfg(hw, vsi->idx, &cfg); 1675 - /* We just ignore -ENOENT, because if two configurations 1676 - * share the same profile remove one of them actually 1677 - * removes both, since the profile is deleted. 1678 - */ 1679 - if (status && status != -ENOENT) { 455 + if (ice_rem_rss_cfg_wrap(vf, &cfg)) 1680 456 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1681 - dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", 1682 - vf->vf_id, status); 1683 - } 1684 457 } 1685 458 } 1686 459
+50
include/linux/avf/virtchnl.h
··· 1253 1253 VIRTCHNL_PROTO_HDR_ESP, 1254 1254 VIRTCHNL_PROTO_HDR_AH, 1255 1255 VIRTCHNL_PROTO_HDR_PFCP, 1256 + VIRTCHNL_PROTO_HDR_GTPC, 1257 + VIRTCHNL_PROTO_HDR_ECPRI, 1258 + VIRTCHNL_PROTO_HDR_L2TPV2, 1259 + VIRTCHNL_PROTO_HDR_PPP, 1260 + /* IPv4 and IPv6 Fragment header types are only associated to 1261 + * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively, 1262 + * cannot be used independently. 1263 + */ 1264 + VIRTCHNL_PROTO_HDR_IPV4_FRAG, 1265 + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, 1266 + VIRTCHNL_PROTO_HDR_GRE, 1256 1267 }; 1257 1268 1258 1269 /* Protocol header field within a protocol header. */ ··· 1286 1275 VIRTCHNL_PROTO_HDR_IPV4_DSCP, 1287 1276 VIRTCHNL_PROTO_HDR_IPV4_TTL, 1288 1277 VIRTCHNL_PROTO_HDR_IPV4_PROT, 1278 + VIRTCHNL_PROTO_HDR_IPV4_CHKSUM, 1289 1279 /* IPV6 */ 1290 1280 VIRTCHNL_PROTO_HDR_IPV6_SRC = 1291 1281 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), ··· 1294 1282 VIRTCHNL_PROTO_HDR_IPV6_TC, 1295 1283 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, 1296 1284 VIRTCHNL_PROTO_HDR_IPV6_PROT, 1285 + /* IPV6 Prefix */ 1286 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC, 1287 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST, 1288 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC, 1289 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST, 1290 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC, 1291 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST, 1292 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC, 1293 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST, 1294 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC, 1295 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST, 1296 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC, 1297 + VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST, 1297 1298 /* TCP */ 1298 1299 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = 1299 1300 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), 1300 1301 VIRTCHNL_PROTO_HDR_TCP_DST_PORT, 1302 + VIRTCHNL_PROTO_HDR_TCP_CHKSUM, 1301 1303 /* UDP */ 1302 1304 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = 1303 1305 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), 1304 1306 VIRTCHNL_PROTO_HDR_UDP_DST_PORT, 1307 + VIRTCHNL_PROTO_HDR_UDP_CHKSUM, 1305 1308 /* SCTP */ 1306 1309 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = 1307 1310 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), 1308 1311 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, 1312 + VIRTCHNL_PROTO_HDR_SCTP_CHKSUM, 1309 1313 /* GTPU_IP */ 1310 1314 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = 1311 1315 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), ··· 1345 1317 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD = 1346 1318 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP), 1347 1319 VIRTCHNL_PROTO_HDR_PFCP_SEID, 1320 + /* GTPC */ 1321 + VIRTCHNL_PROTO_HDR_GTPC_TEID = 1322 + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC), 1323 + /* ECPRI */ 1324 + VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE = 1325 + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI), 1326 + VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID, 1327 + /* IPv4 Dummy Fragment */ 1328 + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID = 1329 + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG), 1330 + /* IPv6 Extension Fragment */ 1331 + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID = 1332 + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG), 1333 + /* GTPU_DWN/UP */ 1334 + VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI = 1335 + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN), 1336 + VIRTCHNL_PROTO_HDR_GTPU_UP_QFI = 1337 + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP), 1338 + /* L2TPv2 */ 1339 + VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID = 1340 + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2), 1341 + VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID, 1348 1342 }; 1349 1343 1350 1344 struct virtchnl_proto_hdr {
+4
include/net/devlink.h
··· 532 532 DEVLINK_PARAM_GENERIC_ID_CLOCK_ID, 533 533 DEVLINK_PARAM_GENERIC_ID_TOTAL_VFS, 534 534 DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS, 535 + DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF, 535 536 536 537 /* add new param generic ids above here*/ 537 538 __DEVLINK_PARAM_GENERIC_ID_MAX, ··· 602 601 603 602 #define DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_NAME "num_doorbells" 604 603 #define DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_TYPE DEVLINK_PARAM_TYPE_U32 604 + 605 + #define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME "max_mac_per_vf" 606 + #define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE DEVLINK_PARAM_TYPE_U32 605 607 606 608 #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ 607 609 { \
+5
net/devlink/param.c
··· 112 112 .name = DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_NAME, 113 113 .type = DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_TYPE, 114 114 }, 115 + { 116 + .id = DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF, 117 + .name = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME, 118 + .type = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE, 119 + }, 115 120 }; 116 121 117 122 static int devlink_param_generic_verify(const struct devlink_param *param)