Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: iavf: add support for TC U32 filters on VFs

Ahmed Zaki says:

The Intel Ethernet 800 Series is designed with a pipeline that has
an on-chip programmable capability called Dynamic Device Personalization
(DDP). A DDP package is loaded by the driver during probe time. The DDP
package programs functionality in both the parser and switching blocks in
the pipeline, allowing dynamic support for new and existing protocols.
Once the pipeline is configured, the driver can identify the protocol and
apply any HW action in different stages, for example, direct packets to
desired hardware queues (flow director), queue groups or drop.

Patches 1-8 introduce a DDP package parser API that enables different
pipeline stages in the driver to learn the HW parser capabilities from
the DDP package that is downloaded to HW. The parser library takes raw
packet patterns and masks (in binary) indicating the packet protocol fields
to be matched and generates the final HW profiles that can be applied at
the required stage. With this API, raw flow filtering for FDIR or RSS
could be done on new protocols or headers without any driver or Kernel
updates (only need to update the DDP package). These patches were submitted
before [1] but were not accepted mainly due to lack of a user.

Patches 9-11 extend the virtchnl support to allow the VF to request raw
flow director filters. Upon receiving the raw FDIR filter request, the PF
driver allocates and runs a parser lib instance and generates the hardware
profile definitions required to program the FDIR stage. These were also
submitted before [2].

Finally, patches 12 and 13 add TC U32 filter support to the iavf driver.
Using the parser API, the ice driver runs the raw patterns sent by the
user and then adds a new profile to the FDIR stage associated with the VF's
VSI. Refer to examples in patch 13 commit message.

[1]: https://lore.kernel.org/netdev/20230904021455.3944605-1-junfeng.guo@intel.com/
[2]: https://lore.kernel.org/intel-wired-lan/20230818064703.154183-1-junfeng.guo@intel.com/

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
iavf: add support for offloading tc U32 cls filters
iavf: refactor add/del FDIR filters
ice: enable FDIR filters from raw binary patterns for VFs
ice: add method to disable FDIR SWAP option
virtchnl: support raw packet in protocol header
ice: add API for parser profile initialization
ice: add UDP tunnels support to the parser
ice: support turning on/off the parser's double vlan mode
ice: add parser execution main loop
ice: add parser internal helper functions
ice: add debugging functions for the parser sections
ice: parse and init various DDP parser sections
ice: add parser create and destroy skeleton
====================

Link: https://patch.msgid.link/20240813222249.3708070-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+4792 -90
+30
drivers/net/ethernet/intel/iavf/iavf.h
··· 33 33 #include <net/udp.h> 34 34 #include <net/tc_act/tc_gact.h> 35 35 #include <net/tc_act/tc_mirred.h> 36 + #include <net/tc_act/tc_skbedit.h> 36 37 37 38 #include "iavf_type.h" 38 39 #include <linux/avf/virtchnl.h> ··· 394 393 VIRTCHNL_VF_OFFLOAD_VLAN_V2) 395 394 #define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ 396 395 VIRTCHNL_VF_OFFLOAD_CRC) 396 + #define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ 397 + VIRTCHNL_VF_OFFLOAD_TC_U32) 397 398 #define VLAN_V2_FILTERING_ALLOWED(_a) \ 398 399 (VLAN_V2_ALLOWED((_a)) && \ 399 400 ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \ ··· 440 437 441 438 #define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */ 442 439 u16 fdir_active_fltr; 440 + u16 raw_fdir_active_fltr; 443 441 struct list_head fdir_list_head; 444 442 spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */ 445 443 ··· 448 444 spinlock_t adv_rss_lock; /* protect the RSS management list */ 449 445 }; 450 446 447 + /* Must be called with fdir_fltr_lock lock held */ 448 + static inline bool iavf_fdir_max_reached(struct iavf_adapter *adapter) 449 + { 450 + return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >= 451 + IAVF_MAX_FDIR_FILTERS; 452 + } 453 + 454 + static inline void 455 + iavf_inc_fdir_active_fltr(struct iavf_adapter *adapter, 456 + struct iavf_fdir_fltr *fltr) 457 + { 458 + if (iavf_is_raw_fdir(fltr)) 459 + adapter->raw_fdir_active_fltr++; 460 + else 461 + adapter->fdir_active_fltr++; 462 + } 463 + 464 + static inline void 465 + iavf_dec_fdir_active_fltr(struct iavf_adapter *adapter, 466 + struct iavf_fdir_fltr *fltr) 467 + { 468 + if (iavf_is_raw_fdir(fltr)) 469 + adapter->raw_fdir_active_fltr--; 470 + else 471 + adapter->fdir_active_fltr--; 472 + } 451 473 452 474 /* Ethtool Private Flags */ 453 475
+9 -50
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
··· 927 927 928 928 spin_lock_bh(&adapter->fdir_fltr_lock); 929 929 930 - rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); 930 + rule = iavf_find_fdir_fltr(adapter, false, fsp->location); 931 931 if (!rule) { 932 932 ret = -EINVAL; 933 933 goto release_lock; ··· 1072 1072 spin_lock_bh(&adapter->fdir_fltr_lock); 1073 1073 1074 1074 list_for_each_entry(fltr, &adapter->fdir_list_head, list) { 1075 + if (iavf_is_raw_fdir(fltr)) 1076 + continue; 1077 + 1075 1078 if (cnt == cmd->rule_cnt) { 1076 1079 val = -EMSGSIZE; 1077 1080 goto release_lock; ··· 1266 1263 return -EINVAL; 1267 1264 1268 1265 spin_lock_bh(&adapter->fdir_fltr_lock); 1269 - if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { 1270 - spin_unlock_bh(&adapter->fdir_fltr_lock); 1271 - dev_err(&adapter->pdev->dev, 1272 - "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", 1273 - IAVF_MAX_FDIR_FILTERS); 1274 - return -ENOSPC; 1275 - } 1276 - 1277 - if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { 1266 + if (iavf_find_fdir_fltr(adapter, false, fsp->location)) { 1278 1267 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); 1279 1268 spin_unlock_bh(&adapter->fdir_fltr_lock); 1280 1269 return -EEXIST; ··· 1286 1291 } 1287 1292 1288 1293 err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); 1294 + if (!err) 1295 + err = iavf_fdir_add_fltr(adapter, fltr); 1296 + 1289 1297 if (err) 1290 - goto ret; 1291 - 1292 - spin_lock_bh(&adapter->fdir_fltr_lock); 1293 - iavf_fdir_list_add_fltr(adapter, fltr); 1294 - adapter->fdir_active_fltr++; 1295 - 1296 - if (adapter->link_up) 1297 - fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; 1298 - else 1299 - fltr->state = IAVF_FDIR_FLTR_INACTIVE; 1300 - spin_unlock_bh(&adapter->fdir_fltr_lock); 1301 - 1302 - if (adapter->link_up) 1303 - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER); 1304 - ret: 1305 - if (err && fltr) 1306 1298 kfree(fltr); 1307 1299 1308 1300 mutex_unlock(&adapter->crit_lock); ··· 1306 1324 static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) 1307 1325 { 1308 1326 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1309 - struct iavf_fdir_fltr *fltr = NULL; 1310 - int err = 0; 1311 1327 1312 1328 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1313 1329 return -EOPNOTSUPP; 1314 1330 1315 - spin_lock_bh(&adapter->fdir_fltr_lock); 1316 - fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); 1317 - if (fltr) { 1318 - if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { 1319 - fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1320 - } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { 1321 - list_del(&fltr->list); 1322 - kfree(fltr); 1323 - adapter->fdir_active_fltr--; 1324 - fltr = NULL; 1325 - } else { 1326 - err = -EBUSY; 1327 - } 1328 - } else if (adapter->fdir_active_fltr) { 1329 - err = -EINVAL; 1330 - } 1331 - spin_unlock_bh(&adapter->fdir_fltr_lock); 1332 - 1333 - if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) 1334 - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER); 1335 - 1336 - return err; 1331 + return iavf_fdir_del_fltr(adapter, false, fsp->location); 1337 1332 } 1338 1333 1339 1334 /**
+81 -8
drivers/net/ethernet/intel/iavf/iavf_fdir.c
··· 796 796 797 797 spin_lock_bh(&adapter->fdir_fltr_lock); 798 798 list_for_each_entry(tmp, &adapter->fdir_list_head, list) { 799 + if (iavf_is_raw_fdir(fltr)) 800 + continue; 801 + 799 802 if (tmp->flow_type != fltr->flow_type) 800 803 continue; 801 804 ··· 818 815 } 819 816 820 817 /** 821 - * iavf_find_fdir_fltr_by_loc - find filter with location 818 + * iavf_find_fdir_fltr - find FDIR filter 822 819 * @adapter: pointer to the VF adapter structure 823 - * @loc: location to find. 820 + * @is_raw: filter type, is raw (tc u32) or not (ethtool) 821 + * @data: data to ID the filter, type dependent 824 822 * 825 - * Returns pointer to Flow Director filter if found or null 823 + * Returns: pointer to Flow Director filter if found or NULL. Lock must be held. 826 824 */ 827 - struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) 825 + struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter, 826 + bool is_raw, u32 data) 828 827 { 829 828 struct iavf_fdir_fltr *rule; 830 829 831 - list_for_each_entry(rule, &adapter->fdir_list_head, list) 832 - if (rule->loc == loc) 830 + list_for_each_entry(rule, &adapter->fdir_list_head, list) { 831 + if ((is_raw && rule->cls_u32_handle == data) || 832 + (!is_raw && rule->loc == data)) 833 833 return rule; 834 + } 834 835 835 836 return NULL; 836 837 } 837 838 838 839 /** 839 - * iavf_fdir_list_add_fltr - add a new node to the flow director filter list 840 + * iavf_fdir_add_fltr - add a new node to the flow director filter list 840 841 * @adapter: pointer to the VF adapter structure 841 842 * @fltr: filter node to add to structure 843 + * 844 + * Return: 0 on success or negative errno on failure. 842 845 */ 843 - void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) 846 + int iavf_fdir_add_fltr(struct iavf_adapter *adapter, 847 + struct iavf_fdir_fltr *fltr) 844 848 { 845 849 struct iavf_fdir_fltr *rule, *parent = NULL; 846 850 851 + spin_lock_bh(&adapter->fdir_fltr_lock); 852 + if (iavf_fdir_max_reached(adapter)) { 853 + spin_unlock_bh(&adapter->fdir_fltr_lock); 854 + dev_err(&adapter->pdev->dev, 855 + "Unable to add Flow Director filter (limit (%u) reached)\n", 856 + IAVF_MAX_FDIR_FILTERS); 857 + return -ENOSPC; 858 + } 859 + 847 860 list_for_each_entry(rule, &adapter->fdir_list_head, list) { 861 + if (iavf_is_raw_fdir(fltr)) 862 + break; 863 + 848 864 if (rule->loc >= fltr->loc) 849 865 break; 850 866 parent = rule; ··· 873 851 list_add(&fltr->list, &parent->list); 874 852 else 875 853 list_add(&fltr->list, &adapter->fdir_list_head); 854 + 855 + iavf_inc_fdir_active_fltr(adapter, fltr); 856 + 857 + if (adapter->link_up) 858 + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; 859 + else 860 + fltr->state = IAVF_FDIR_FLTR_INACTIVE; 861 + spin_unlock_bh(&adapter->fdir_fltr_lock); 862 + 863 + if (adapter->link_up) 864 + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER); 865 + 866 + return 0; 867 + } 868 + 869 + /** 870 + * iavf_fdir_del_fltr - delete a flow director filter from the list 871 + * @adapter: pointer to the VF adapter structure 872 + * @is_raw: filter type, is raw (tc u32) or not (ethtool) 873 + * @data: data to ID the filter, type dependent 874 + * 875 + * Return: 0 on success or negative errno on failure. 876 + */ 877 + int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data) 878 + { 879 + struct iavf_fdir_fltr *fltr = NULL; 880 + int err = 0; 881 + 882 + spin_lock_bh(&adapter->fdir_fltr_lock); 883 + fltr = iavf_find_fdir_fltr(adapter, is_raw, data); 884 + 885 + if (fltr) { 886 + if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { 887 + fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; 888 + } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { 889 + list_del(&fltr->list); 890 + iavf_dec_fdir_active_fltr(adapter, fltr); 891 + kfree(fltr); 892 + fltr = NULL; 893 + } else { 894 + err = -EBUSY; 895 + } 896 + } else if (adapter->fdir_active_fltr) { 897 + err = -EINVAL; 898 + } 899 + 900 + if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) 901 + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER); 902 + 903 + spin_unlock_bh(&adapter->fdir_fltr_lock); 904 + return err; 876 905 }
+11 -2
drivers/net/ethernet/intel/iavf/iavf_fdir.h
··· 117 117 118 118 u32 flow_id; 119 119 120 + u32 cls_u32_handle; /* for FDIR added via tc u32 */ 120 121 u32 loc; /* Rule location inside the flow table */ 121 122 u32 q_index; 122 123 123 124 struct virtchnl_fdir_add vc_add_msg; 124 125 }; 125 126 127 + static inline bool iavf_is_raw_fdir(struct iavf_fdir_fltr *fltr) 128 + { 129 + return !fltr->vc_add_msg.rule_cfg.proto_hdrs.count; 130 + } 131 + 126 132 int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, 127 133 struct iavf_fdir_fltr *fltr); 128 134 int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); 129 135 void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); 130 136 bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); 131 - void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); 132 - struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc); 137 + int iavf_fdir_add_fltr(struct iavf_adapter *adapter, 138 + struct iavf_fdir_fltr *fltr); 139 + int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data); 140 + struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter, 141 + bool is_raw, u32 data); 133 142 #endif /* _IAVF_FDIR_H_ */
+156 -4
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 4013 4013 4014 4014 /** 4015 4015 * iavf_setup_tc_cls_flower - flower classifier offloads 4016 - * @adapter: board private structure 4016 + * @adapter: pointer to iavf adapter structure 4017 4017 * @cls_flower: pointer to flow_cls_offload struct with flow info 4018 4018 */ 4019 4019 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, ··· 4026 4026 return iavf_delete_clsflower(adapter, cls_flower); 4027 4027 case FLOW_CLS_STATS: 4028 4028 return -EOPNOTSUPP; 4029 + default: 4030 + return -EOPNOTSUPP; 4031 + } 4032 + } 4033 + 4034 + /** 4035 + * iavf_add_cls_u32 - Add U32 classifier offloads 4036 + * @adapter: pointer to iavf adapter structure 4037 + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info 4038 + * 4039 + * Return: 0 on success or negative errno on failure. 4040 + */ 4041 + static int iavf_add_cls_u32(struct iavf_adapter *adapter, 4042 + struct tc_cls_u32_offload *cls_u32) 4043 + { 4044 + struct netlink_ext_ack *extack = cls_u32->common.extack; 4045 + struct virtchnl_fdir_rule *rule_cfg; 4046 + struct virtchnl_filter_action *vact; 4047 + struct virtchnl_proto_hdrs *hdrs; 4048 + struct ethhdr *spec_h, *mask_h; 4049 + const struct tc_action *act; 4050 + struct iavf_fdir_fltr *fltr; 4051 + struct tcf_exts *exts; 4052 + unsigned int q_index; 4053 + int i, status = 0; 4054 + int off_base = 0; 4055 + 4056 + if (cls_u32->knode.link_handle) { 4057 + NL_SET_ERR_MSG_MOD(extack, "Linking not supported"); 4058 + return -EOPNOTSUPP; 4059 + } 4060 + 4061 + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); 4062 + if (!fltr) 4063 + return -ENOMEM; 4064 + 4065 + rule_cfg = &fltr->vc_add_msg.rule_cfg; 4066 + hdrs = &rule_cfg->proto_hdrs; 4067 + hdrs->count = 0; 4068 + 4069 + /* The parser lib at the PF expects the packet starting with MAC hdr */ 4070 + switch (ntohs(cls_u32->common.protocol)) { 4071 + case ETH_P_802_3: 4072 + break; 4073 + case ETH_P_IP: 4074 + spec_h = (struct ethhdr *)hdrs->raw.spec; 4075 + mask_h = (struct ethhdr *)hdrs->raw.mask; 4076 + spec_h->h_proto = htons(ETH_P_IP); 4077 + mask_h->h_proto = htons(0xFFFF); 4078 + off_base += ETH_HLEN; 4079 + break; 4080 + default: 4081 + NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported"); 4082 + status = -EOPNOTSUPP; 4083 + goto free_alloc; 4084 + } 4085 + 4086 + for (i = 0; i < cls_u32->knode.sel->nkeys; i++) { 4087 + __be32 val, mask; 4088 + int off; 4089 + 4090 + off = off_base + cls_u32->knode.sel->keys[i].off; 4091 + val = cls_u32->knode.sel->keys[i].val; 4092 + mask = cls_u32->knode.sel->keys[i].mask; 4093 + 4094 + if (off >= sizeof(hdrs->raw.spec)) { 4095 + NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed."); 4096 + status = -EINVAL; 4097 + goto free_alloc; 4098 + } 4099 + 4100 + memcpy(&hdrs->raw.spec[off], &val, sizeof(val)); 4101 + memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask)); 4102 + hdrs->raw.pkt_len = off + sizeof(val); 4103 + } 4104 + 4105 + /* Only one action is allowed */ 4106 + rule_cfg->action_set.count = 1; 4107 + vact = &rule_cfg->action_set.actions[0]; 4108 + exts = cls_u32->knode.exts; 4109 + 4110 + tcf_exts_for_each_action(i, act, exts) { 4111 + /* FDIR queue */ 4112 + if (is_tcf_skbedit_rx_queue_mapping(act)) { 4113 + q_index = tcf_skbedit_rx_queue_mapping(act); 4114 + if (q_index >= adapter->num_active_queues) { 4115 + status = -EINVAL; 4116 + goto free_alloc; 4117 + } 4118 + 4119 + vact->type = VIRTCHNL_ACTION_QUEUE; 4120 + vact->act_conf.queue.index = q_index; 4121 + break; 4122 + } 4123 + 4124 + /* Drop */ 4125 + if (is_tcf_gact_shot(act)) { 4126 + vact->type = VIRTCHNL_ACTION_DROP; 4127 + break; 4128 + } 4129 + 4130 + /* Unsupported action */ 4131 + NL_SET_ERR_MSG_MOD(extack, "Unsupported action."); 4132 + status = -EOPNOTSUPP; 4133 + goto free_alloc; 4134 + } 4135 + 4136 + fltr->vc_add_msg.vsi_id = adapter->vsi.id; 4137 + fltr->cls_u32_handle = cls_u32->knode.handle; 4138 + return iavf_fdir_add_fltr(adapter, fltr); 4139 + 4140 + free_alloc: 4141 + kfree(fltr); 4142 + return status; 4143 + } 4144 + 4145 + /** 4146 + * iavf_del_cls_u32 - Delete U32 classifier offloads 4147 + * @adapter: pointer to iavf adapter structure 4148 + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info 4149 + * 4150 + * Return: 0 on success or negative errno on failure. 4151 + */ 4152 + static int iavf_del_cls_u32(struct iavf_adapter *adapter, 4153 + struct tc_cls_u32_offload *cls_u32) 4154 + { 4155 + return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle); 4156 + } 4157 + 4158 + /** 4159 + * iavf_setup_tc_cls_u32 - U32 filter offloads 4160 + * @adapter: pointer to iavf adapter structure 4161 + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info 4162 + * 4163 + * Return: 0 on success or negative errno on failure. 4164 + */ 4165 + static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter, 4166 + struct tc_cls_u32_offload *cls_u32) 4167 + { 4168 + if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter)) 4169 + return -EOPNOTSUPP; 4170 + 4171 + switch (cls_u32->command) { 4172 + case TC_CLSU32_NEW_KNODE: 4173 + case TC_CLSU32_REPLACE_KNODE: 4174 + return iavf_add_cls_u32(adapter, cls_u32); 4175 + case TC_CLSU32_DELETE_KNODE: 4176 + return iavf_del_cls_u32(adapter, cls_u32); 4029 4177 default: 4030 4178 return -EOPNOTSUPP; 4031 4179 } ··· 4198 4050 switch (type) { 4199 4051 case TC_SETUP_CLSFLOWER: 4200 4052 return iavf_setup_tc_cls_flower(cb_priv, type_data); 4053 + case TC_SETUP_CLSU32: 4054 + return iavf_setup_tc_cls_u32(cb_priv, type_data); 4201 4055 default: 4202 4056 return -EOPNOTSUPP; 4203 4057 } ··· 4482 4332 fdir->state == IAVF_FDIR_FLTR_INACTIVE) { 4483 4333 /* Delete filters not registered in PF */ 4484 4334 list_del(&fdir->list); 4335 + iavf_dec_fdir_active_fltr(adapter, fdir); 4485 4336 kfree(fdir); 4486 - adapter->fdir_active_fltr--; 4487 4337 } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || 4488 4338 fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST || 4489 4339 fdir->state == IAVF_FDIR_FLTR_ACTIVE) { ··· 4993 4843 /* get HW VLAN features that can be toggled */ 4994 4844 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); 4995 4845 4996 - /* Enable cloud filter if ADQ is supported */ 4997 - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 4846 + /* Enable HW TC offload if ADQ or tc U32 is supported */ 4847 + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ || 4848 + TC_U32_SUPPORT(adapter)) 4998 4849 hw_features |= NETIF_F_HW_TC; 4850 + 4999 4851 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 5000 4852 hw_features |= NETIF_F_GSO_UDP_L4; 5001 4853
+17 -8
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 142 142 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 143 143 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 144 144 VIRTCHNL_VF_OFFLOAD_ENCAP | 145 + VIRTCHNL_VF_OFFLOAD_TC_U32 | 145 146 VIRTCHNL_VF_OFFLOAD_VLAN_V2 | 146 147 VIRTCHNL_VF_OFFLOAD_CRC | 147 148 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | ··· 1962 1961 * list on PF is already cleared after a reset 1963 1962 */ 1964 1963 list_del(&f->list); 1964 + iavf_dec_fdir_active_fltr(adapter, f); 1965 1965 kfree(f); 1966 - adapter->fdir_active_fltr--; 1967 1966 } 1968 1967 } 1969 1968 spin_unlock_bh(&adapter->fdir_fltr_lock); ··· 2136 2135 dev_err(&adapter->pdev->dev, 2137 2136 "%s\n", msg); 2138 2137 list_del(&fdir->list); 2138 + iavf_dec_fdir_active_fltr(adapter, fdir); 2139 2139 kfree(fdir); 2140 - adapter->fdir_active_fltr--; 2141 2140 } 2142 2141 } 2143 2142 spin_unlock_bh(&adapter->fdir_fltr_lock); ··· 2452 2451 list) { 2453 2452 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2454 2453 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2455 - dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2456 - fdir->loc); 2454 + if (!iavf_is_raw_fdir(fdir)) 2455 + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2456 + fdir->loc); 2457 + else 2458 + dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n", 2459 + TC_U32_USERHTID(fdir->cls_u32_handle)); 2457 2460 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2458 2461 fdir->flow_id = add_fltr->flow_id; 2459 2462 } else { ··· 2465 2460 add_fltr->status); 2466 2461 iavf_print_fdir_fltr(adapter, fdir); 2467 2462 list_del(&fdir->list); 2463 + iavf_dec_fdir_active_fltr(adapter, fdir); 2468 2464 kfree(fdir); 2469 - adapter->fdir_active_fltr--; 2470 2465 } 2471 2466 } 2472 2467 } ··· 2484 2479 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || 2485 2480 del_fltr->status == 2486 2481 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { 2487 - dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2488 - fdir->loc); 2482 + if (!iavf_is_raw_fdir(fdir)) 2483 + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2484 + fdir->loc); 2485 + else 2486 + dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n", 2487 + TC_U32_USERHTID(fdir->cls_u32_handle)); 2489 2488 list_del(&fdir->list); 2489 + iavf_dec_fdir_active_fltr(adapter, fdir); 2490 2490 kfree(fdir); 2491 - adapter->fdir_active_fltr--; 2492 2491 } else { 2493 2492 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2494 2493 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
+2
drivers/net/ethernet/intel/ice/Makefile
··· 28 28 ice_vlan_mode.o \ 29 29 ice_flex_pipe.o \ 30 30 ice_flow.o \ 31 + ice_parser.o \ 32 + ice_parser_rt.o \ 31 33 ice_idc.o \ 32 34 devlink/devlink.o \ 33 35 devlink/devlink_port.o \
+1
drivers/net/ethernet/intel/ice/ice_common.h
··· 10 10 #include "ice_type.h" 11 11 #include "ice_nvm.h" 12 12 #include "ice_flex_pipe.h" 13 + #include "ice_parser.h" 13 14 #include <linux/avf/virtchnl.h> 14 15 #include "ice_switch.h" 15 16 #include "ice_fdir.h"
+5 -5
drivers/net/ethernet/intel/ice/ice_ddp.c
··· 289 289 * indicates a base offset of 10, and the index for the entry is 2, then 290 290 * section handler function should set the offset to 10 + 2 = 12. 291 291 */ 292 - static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, 293 - struct ice_pkg_enum *state, u32 sect_type, 294 - u32 *offset, 295 - void *(*handler)(u32 sect_type, void *section, 296 - u32 index, u32 *offset)) 292 + void *ice_pkg_enum_entry(struct ice_seg *ice_seg, 293 + struct ice_pkg_enum *state, u32 sect_type, 294 + u32 *offset, 295 + void *(*handler)(u32 sect_type, void *section, 296 + u32 index, u32 *offset)) 297 297 { 298 298 void *entry; 299 299
+13
drivers/net/ethernet/intel/ice/ice_ddp.h
··· 261 261 #define ICE_SID_CDID_KEY_BUILDER_RSS 47 262 262 #define ICE_SID_CDID_REDIR_RSS 48 263 263 264 + #define ICE_SID_RXPARSER_CAM 50 265 + #define ICE_SID_RXPARSER_NOMATCH_CAM 51 266 + #define ICE_SID_RXPARSER_IMEM 52 264 267 #define ICE_SID_RXPARSER_MARKER_PTYPE 55 265 268 #define ICE_SID_RXPARSER_BOOST_TCAM 56 269 + #define ICE_SID_RXPARSER_PROTO_GRP 57 266 270 #define ICE_SID_RXPARSER_METADATA_INIT 58 267 271 #define ICE_SID_TXPARSER_BOOST_TCAM 66 272 + #define ICE_SID_RXPARSER_MARKER_GRP 72 273 + #define ICE_SID_RXPARSER_PG_SPILL 76 274 + #define ICE_SID_RXPARSER_NOMATCH_SPILL 78 268 275 269 276 #define ICE_SID_XLT0_PE 80 270 277 #define ICE_SID_XLT_KEY_BUILDER_PE 81 ··· 283 276 #define ICE_SID_CDID_KEY_BUILDER_PE 87 284 277 #define ICE_SID_CDID_REDIR_PE 88 285 278 279 + #define ICE_SID_RXPARSER_FLAG_REDIR 97 286 280 /* Label Metadata section IDs */ 287 281 #define ICE_SID_LBL_FIRST 0x80000010 288 282 #define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 ··· 459 451 460 452 int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); 461 453 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); 454 + void * 455 + ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 456 + u32 sect_type, u32 *offset, 457 + void *(*handler)(u32 sect_type, void *section, 458 + u32 index, u32 *offset)); 462 459 void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 463 460 u32 sect_type); 464 461
+97 -2
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
··· 2981 2981 } 2982 2982 2983 2983 /** 2984 + * ice_disable_fd_swap - set register appropriately to disable FD SWAP 2985 + * @hw: pointer to the HW struct 2986 + * @prof_id: profile ID 2987 + */ 2988 + static void 2989 + ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id) 2990 + { 2991 + u16 swap_val, fvw_num; 2992 + unsigned int i; 2993 + 2994 + swap_val = ICE_SWAP_VALID; 2995 + fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE; 2996 + 2997 + /* Since the SWAP Flag in the Programming Desc doesn't work, 2998 + * here add method to disable the SWAP Option via setting 2999 + * certain SWAP and INSET register sets. 3000 + */ 3001 + for (i = 0; i < fvw_num ; i++) { 3002 + u32 raw_swap, raw_in; 3003 + unsigned int j; 3004 + 3005 + raw_swap = 0; 3006 + raw_in = 0; 3007 + 3008 + for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) { 3009 + raw_swap |= (swap_val++) << (j * BITS_PER_BYTE); 3010 + raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE); 3011 + } 3012 + 3013 + /* write the FDIR swap register set */ 3014 + wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap); 3015 + 3016 + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n", 3017 + prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap); 3018 + 3019 + /* write the FDIR inset register set */ 3020 + wr32(hw, GLQF_FDINSET(prof_id, i), raw_in); 3021 + 3022 + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n", 3023 + prof_id, i, GLQF_FDINSET(prof_id, i), raw_in); 3024 + } 3025 + } 3026 + 3027 + /* 2984 3028 * ice_add_prof - add profile 2985 3029 * @hw: pointer to the HW struct 2986 3030 * @blk: hardware block ··· 3035 2991 * @es: extraction sequence (length of array is determined by the block) 3036 2992 * @masks: mask for extraction sequence 3037 2993 * @symm: symmetric setting for RSS profiles 2994 + * @fd_swap: enable/disable FDIR paired src/dst fields swap option 3038 2995 * 3039 2996 * This function registers a profile, which matches a set of PTYPES with a 3040 2997 * particular extraction sequence. While the hardware profile is allocated ··· 3045 3000 int 3046 3001 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], 3047 3002 const struct ice_ptype_attributes *attr, u16 attr_cnt, 3048 - struct ice_fv_word *es, u16 *masks, bool symm) 3003 + struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap) 3049 3004 { 3050 3005 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); 3051 3006 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); ··· 3065 3020 status = ice_alloc_prof_id(hw, blk, &prof_id); 3066 3021 if (status) 3067 3022 goto err_ice_add_prof; 3068 - if (blk == ICE_BLK_FD) { 3023 + if (blk == ICE_BLK_FD && fd_swap) { 3069 3024 /* For Flow Director block, the extraction sequence may 3070 3025 * need to be altered in the case where there are paired 3071 3026 * fields that have no match. This is necessary because ··· 3076 3031 status = ice_update_fd_swap(hw, prof_id, es); 3077 3032 if (status) 3078 3033 goto err_ice_add_prof; 3034 + } else if (blk == ICE_BLK_FD) { 3035 + ice_disable_fd_swap(hw, prof_id); 3079 3036 } 3080 3037 status = ice_update_prof_masking(hw, blk, prof_id, masks); 3081 3038 if (status) ··· 4141 4094 list_del(&del1->list); 4142 4095 devm_kfree(ice_hw_to_dev(hw), del1); 4143 4096 } 4097 + 4098 + return status; 4099 + } 4100 + 4101 + /** 4102 + * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI 4103 + * @hw: pointer to the HW struct 4104 + * @blk: HW block 4105 + * @dest_vsi: dest VSI 4106 + * @fdir_vsi: fdir programming VSI 4107 + * @hdl: profile handle 4108 + * 4109 + * Update the hardware tables to enable the FDIR profile indicated by @hdl for 4110 + * the VSI specified by @dest_vsi. On success, the flow will be enabled. 4111 + * 4112 + * Return: 0 on success or negative errno on failure. 4113 + */ 4114 + int 4115 + ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, 4116 + u16 dest_vsi, u16 fdir_vsi, u64 hdl) 4117 + { 4118 + u16 vsi_num; 4119 + int status; 4120 + 4121 + if (blk != ICE_BLK_FD) 4122 + return -EINVAL; 4123 + 4124 + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); 4125 + status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); 4126 + if (status) { 4127 + ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n", 4128 + status); 4129 + return status; 4130 + } 4131 + 4132 + vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi); 4133 + status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); 4134 + if (status) { 4135 + ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n", 4136 + status); 4137 + goto err; 4138 + } 4139 + 4140 + return 0; 4141 + 4142 + err: 4143 + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); 4144 + ice_rem_prof_id_flow(hw, blk, vsi_num, hdl); 4144 4145 4145 4146 return status; 4146 4147 }
+6 -1
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
··· 6 6 7 7 #include "ice_type.h" 8 8 9 + #define ICE_FDIR_REG_SET_SIZE 4 10 + 9 11 int 10 12 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); 11 13 void ice_release_change_lock(struct ice_hw *hw); ··· 44 42 int 45 43 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], 46 44 const struct ice_ptype_attributes *attr, u16 attr_cnt, 47 - struct ice_fv_word *es, u16 *masks, bool symm); 45 + struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap); 48 46 struct ice_prof_map * 49 47 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); 50 48 int 51 49 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); 52 50 int 53 51 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); 52 + int 53 + ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, 54 + u16 dest_vsi, u16 fdir_vsi, u64 hdl); 54 55 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); 55 56 enum ice_ddp_state 56 57 ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
+108 -1
drivers/net/ethernet/intel/ice/ice_flow.c
··· 409 409 }; 410 410 411 411 /* Packet types for GTPU */ 412 + static const struct ice_ptype_attributes ice_attr_gtpu_session[] = { 413 + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, 414 + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 415 + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 416 + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, 417 + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, 418 + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, 419 + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 420 + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 421 + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, 422 + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, 423 + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, 424 + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 425 + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 426 + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, 427 + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, 428 + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, 429 + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 430 + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, 431 + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, 432 + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, 433 + }; 434 + 412 435 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { 413 436 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, 414 437 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, ··· 1423 1400 /* Add a HW profile for this flow profile */ 1424 1401 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, 1425 1402 params->attr, params->attr_cnt, params->es, 1426 - params->mask, symm); 1403 + params->mask, symm, true); 1427 1404 if (status) { 1428 1405 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); 1429 1406 goto out; ··· 1542 1519 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", 1543 1520 status); 1544 1521 } 1522 + 1523 + return status; 1524 + } 1525 + 1526 + #define FLAG_GTP_EH_PDU_LINK BIT_ULL(13) 1527 + #define FLAG_GTP_EH_PDU BIT_ULL(14) 1528 + 1529 + #define HI_BYTE_IN_WORD GENMASK(15, 8) 1530 + #define LO_BYTE_IN_WORD GENMASK(7, 0) 1531 + 1532 + #define FLAG_GTPU_MSK \ 1533 + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) 1534 + #define FLAG_GTPU_UP \ 1535 + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) 1536 + #define FLAG_GTPU_DW FLAG_GTP_EH_PDU 1537 + 1538 + /** 1539 + * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info 1540 + * @hw: pointer to the HW struct 1541 + * @dest_vsi: dest VSI 1542 + * @fdir_vsi: fdir programming VSI 1543 + * @prof: stores parsed profile info from raw flow 1544 + * @blk: classification blk 1545 + * 1546 + * Return: 0 on success or negative errno on failure. 1547 + */ 1548 + int 1549 + ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, 1550 + struct ice_parser_profile *prof, enum ice_block blk) 1551 + { 1552 + u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); 1553 + struct ice_flow_prof_params *params __free(kfree); 1554 + u8 fv_words = hw->blk[blk].es.fvw; 1555 + int status; 1556 + int i, idx; 1557 + 1558 + params = kzalloc(sizeof(*params), GFP_KERNEL); 1559 + if (!params) 1560 + return -ENOMEM; 1561 + 1562 + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { 1563 + params->es[i].prot_id = ICE_PROT_INVALID; 1564 + params->es[i].off = ICE_FV_OFFSET_INVAL; 1565 + } 1566 + 1567 + for (i = 0; i < prof->fv_num; i++) { 1568 + if (hw->blk[blk].es.reverse) 1569 + idx = fv_words - i - 1; 1570 + else 1571 + idx = i; 1572 + params->es[idx].prot_id = prof->fv[i].proto_id; 1573 + params->es[idx].off = prof->fv[i].offset; 1574 + params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) & 1575 + HI_BYTE_IN_WORD) | 1576 + (((prof->fv[i].msk) >> BITS_PER_BYTE) & 1577 + LO_BYTE_IN_WORD); 1578 + } 1579 + 1580 + switch (prof->flags) { 1581 + case FLAG_GTPU_DW: 1582 + params->attr = ice_attr_gtpu_down; 1583 + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); 1584 + break; 1585 + case FLAG_GTPU_UP: 1586 + params->attr = ice_attr_gtpu_up; 1587 + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); 1588 + break; 1589 + default: 1590 + if (prof->flags_msk & FLAG_GTPU_MSK) { 1591 + params->attr = ice_attr_gtpu_session; 1592 + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session); 1593 + } 1594 + break; 1595 + } 1596 + 1597 + status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes, 1598 + params->attr, params->attr_cnt, 1599 + params->es, params->mask, false, false); 1600 + if (status) 1601 + return status; 1602 + 1603 + status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id); 1604 + if (status) 1605 + ice_rem_prof(hw, blk, id); 1545 1606 1546 1607 return status; 1547 1608 }
+5
drivers/net/ethernet/intel/ice/ice_flow.h
··· 5 5 #define _ICE_FLOW_H_ 6 6 7 7 #include "ice_flex_type.h" 8 + #include "ice_parser.h" 8 9 9 10 #define ICE_FLOW_ENTRY_HANDLE_INVAL 0 10 11 #define ICE_FLOW_FLD_OFF_INVAL 0xffff ··· 327 326 ICE_RSS_ANY_HEADERS 328 327 }; 329 328 329 + struct ice_vsi; 330 330 struct ice_rss_hash_cfg { 331 331 u32 addl_hdrs; /* protocol header fields */ 332 332 u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ ··· 446 444 struct ice_flow_seg_info *segs, u8 segs_cnt, 447 445 bool symm, struct ice_flow_prof **prof); 448 446 int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); 447 + int 448 + ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, 449 + struct ice_parser_profile *prof, enum ice_block blk); 449 450 int 450 451 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 451 452 u64 entry_id, u16 vsi, enum ice_flow_priority prio,
+2430
drivers/net/ethernet/intel/ice/ice_parser.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2024 Intel Corporation */ 3 + 4 + #include "ice_common.h" 5 + 6 + struct ice_pkg_sect_hdr { 7 + __le16 count; 8 + __le16 offset; 9 + }; 10 + 11 + /** 12 + * ice_parser_sect_item_get - parse an item from a section 13 + * @sect_type: section type 14 + * @section: section object 15 + * @index: index of the item to get 16 + * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter 17 + * 18 + * Return: a pointer to the item or NULL. 19 + */ 20 + static void *ice_parser_sect_item_get(u32 sect_type, void *section, 21 + u32 index, u32 __maybe_unused *offset) 22 + { 23 + size_t data_off = ICE_SEC_DATA_OFFSET; 24 + struct ice_pkg_sect_hdr *hdr; 25 + size_t size; 26 + 27 + if (!section) 28 + return NULL; 29 + 30 + switch (sect_type) { 31 + case ICE_SID_RXPARSER_IMEM: 32 + size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE; 33 + break; 34 + case ICE_SID_RXPARSER_METADATA_INIT: 35 + size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE; 36 + break; 37 + case ICE_SID_RXPARSER_CAM: 38 + size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE; 39 + break; 40 + case ICE_SID_RXPARSER_PG_SPILL: 41 + size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE; 42 + break; 43 + case ICE_SID_RXPARSER_NOMATCH_CAM: 44 + size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE; 45 + break; 46 + case ICE_SID_RXPARSER_NOMATCH_SPILL: 47 + size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE; 48 + break; 49 + case ICE_SID_RXPARSER_BOOST_TCAM: 50 + size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE; 51 + break; 52 + case ICE_SID_LBL_RXPARSER_TMEM: 53 + data_off = ICE_SEC_LBL_DATA_OFFSET; 54 + size = ICE_SID_LBL_ENTRY_SIZE; 55 + break; 56 + case ICE_SID_RXPARSER_MARKER_PTYPE: 57 + size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE; 58 + break; 59 + case ICE_SID_RXPARSER_MARKER_GRP: 60 + size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE; 61 + break; 62 + case ICE_SID_RXPARSER_PROTO_GRP: 63 + size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE; 64 + break; 65 + case ICE_SID_RXPARSER_FLAG_REDIR: 66 + size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE; 67 + break; 68 + default: 69 + return NULL; 70 + } 71 + 72 + hdr = section; 73 + if (index >= le16_to_cpu(hdr->count)) 74 + return NULL; 75 + 76 + return section + data_off + index * size; 77 + } 78 + 79 + /** 80 + * ice_parser_create_table - create an item table from a section 81 + * @hw: pointer to the hardware structure 82 + * @sect_type: section type 83 + * @item_size: item size in bytes 84 + * @length: number of items in the table to create 85 + * @parse_item: the function to parse the item 86 + * @no_offset: ignore header offset, calculate index from 0 87 + * 88 + * Return: a pointer to the allocated table or ERR_PTR. 89 + */ 90 + static void * 91 + ice_parser_create_table(struct ice_hw *hw, u32 sect_type, 92 + u32 item_size, u32 length, 93 + void (*parse_item)(struct ice_hw *hw, u16 idx, 94 + void *item, void *data, 95 + int size), bool no_offset) 96 + { 97 + struct ice_pkg_enum state = {}; 98 + struct ice_seg *seg = hw->seg; 99 + void *table, *data, *item; 100 + u16 idx = 0; 101 + 102 + if (!seg) 103 + return ERR_PTR(-EINVAL); 104 + 105 + table = kzalloc(item_size * length, GFP_KERNEL); 106 + if (!table) 107 + return ERR_PTR(-ENOMEM); 108 + 109 + do { 110 + data = ice_pkg_enum_entry(seg, &state, sect_type, NULL, 111 + ice_parser_sect_item_get); 112 + seg = NULL; 113 + if (data) { 114 + struct ice_pkg_sect_hdr *hdr = state.sect; 115 + 116 + if (!no_offset) 117 + idx = le16_to_cpu(hdr->offset) + 118 + state.entry_idx; 119 + 120 + item = (void *)((uintptr_t)table + idx * item_size); 121 + parse_item(hw, idx, item, data, item_size); 122 + 123 + if (no_offset) 124 + idx++; 125 + } 126 + } while (data); 127 + 128 + return table; 129 + } 130 + 131 + /*** ICE_SID_RXPARSER_IMEM section ***/ 132 + static void ice_imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm) 133 + { 134 + struct device *dev = ice_hw_to_dev(hw); 135 + 136 + dev_info(dev, "boost main:\n"); 137 + dev_info(dev, "\talu0 = %d\n", bm->alu0); 138 + dev_info(dev, "\talu1 = %d\n", bm->alu1); 139 + dev_info(dev, "\talu2 = %d\n", bm->alu2); 140 + dev_info(dev, "\tpg = %d\n", bm->pg); 141 + } 142 + 143 + static void ice_imem_bst_kb_dump(struct ice_hw *hw, 144 + struct ice_bst_keybuilder *kb) 145 + { 146 + struct device *dev = ice_hw_to_dev(hw); 147 + 148 + dev_info(dev, "boost key builder:\n"); 149 + dev_info(dev, "\tpriority = %d\n", kb->prio); 150 + dev_info(dev, "\ttsr_ctrl = %d\n", kb->tsr_ctrl); 151 + } 152 + 153 + static void ice_imem_np_kb_dump(struct ice_hw *hw, 154 + struct ice_np_keybuilder *kb) 155 + { 156 + struct device *dev = ice_hw_to_dev(hw); 157 + 158 + dev_info(dev, "next proto key builder:\n"); 159 + dev_info(dev, "\topc = %d\n", kb->opc); 160 + dev_info(dev, "\tstart_or_reg0 = %d\n", kb->start_reg0); 161 + dev_info(dev, "\tlen_or_reg1 = %d\n", kb->len_reg1); 162 + } 163 + 164 + static void ice_imem_pg_kb_dump(struct ice_hw *hw, 165 + struct ice_pg_keybuilder *kb) 166 + { 167 + struct device *dev = ice_hw_to_dev(hw); 168 + 169 + dev_info(dev, "parse graph key builder:\n"); 170 + dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena); 171 + dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena); 172 + dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena); 173 + dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena); 174 + dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx); 175 + dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx); 176 + dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx); 177 + dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx); 178 + dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx); 179 + } 180 + 181 + static void ice_imem_alu_dump(struct ice_hw *hw, 182 + struct ice_alu *alu, int index) 183 + { 184 + struct device *dev = ice_hw_to_dev(hw); 185 + 186 + dev_info(dev, "alu%d:\n", index); 187 + dev_info(dev, "\topc = %d\n", alu->opc); 188 + dev_info(dev, "\tsrc_start = %d\n", alu->src_start); 189 + dev_info(dev, "\tsrc_len = %d\n", alu->src_len); 190 + dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel); 191 + dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key); 192 + dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id); 193 + dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id); 194 + dev_info(dev, "\tinc0 = %d\n", alu->inc0); 195 + dev_info(dev, "\tinc1 = %d\n", alu->inc1); 196 + dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc); 197 + dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset); 198 + dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr); 199 + dev_info(dev, "\timm = %d\n", alu->imm); 200 + dev_info(dev, "\tdst_start = %d\n", alu->dst_start); 201 + dev_info(dev, "\tdst_len = %d\n", alu->dst_len); 202 + dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm); 203 + dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm); 204 + } 205 + 206 + /** 207 + * ice_imem_dump - dump an imem item info 208 + * @hw: pointer to the hardware structure 209 + * @item: imem item to dump 210 + */ 211 + static void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item) 212 + { 213 + struct device *dev = ice_hw_to_dev(hw); 214 + 215 + dev_info(dev, "index = %d\n", item->idx); 216 + ice_imem_bst_bm_dump(hw, &item->b_m); 217 + ice_imem_bst_kb_dump(hw, &item->b_kb); 218 + dev_info(dev, "pg priority = %d\n", item->pg_prio); 219 + ice_imem_np_kb_dump(hw, &item->np_kb); 220 + ice_imem_pg_kb_dump(hw, &item->pg_kb); 221 + ice_imem_alu_dump(hw, &item->alu0, 0); 222 + ice_imem_alu_dump(hw, &item->alu1, 1); 223 + ice_imem_alu_dump(hw, &item->alu2, 2); 224 + } 225 + 226 + #define ICE_IM_BM_ALU0 BIT(0) 227 + #define ICE_IM_BM_ALU1 BIT(1) 228 + #define ICE_IM_BM_ALU2 BIT(2) 229 + #define ICE_IM_BM_PG BIT(3) 230 + 231 + /** 232 + * ice_imem_bm_init - parse 4 bits of Boost Main 233 + * @bm: pointer to the Boost Main structure 234 + * @data: Boost Main data to be parsed 235 + */ 236 + static void ice_imem_bm_init(struct ice_bst_main *bm, u8 data) 237 + { 238 + bm->alu0 = FIELD_GET(ICE_IM_BM_ALU0, data); 239 + bm->alu1 = FIELD_GET(ICE_IM_BM_ALU1, data); 240 + bm->alu2 = FIELD_GET(ICE_IM_BM_ALU2, data); 241 + bm->pg = FIELD_GET(ICE_IM_BM_PG, data); 242 + } 243 + 244 + #define ICE_IM_BKB_PRIO GENMASK(7, 0) 245 + #define ICE_IM_BKB_TSR_CTRL BIT(8) 246 + 247 + /** 248 + * ice_imem_bkb_init - parse 10 bits of Boost Main Build 249 + * @bkb: pointer to the Boost Main Build structure 250 + * @data: Boost Main Build data to be parsed 251 + */ 252 + static void ice_imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data) 253 + { 254 + bkb->prio = FIELD_GET(ICE_IM_BKB_PRIO, data); 255 + bkb->tsr_ctrl = FIELD_GET(ICE_IM_BKB_TSR_CTRL, data); 256 + } 257 + 258 + #define ICE_IM_NPKB_OPC GENMASK(1, 0) 259 + #define ICE_IM_NPKB_S_R0 GENMASK(9, 2) 260 + #define ICE_IM_NPKB_L_R1 GENMASK(17, 10) 261 + 262 + /** 263 + * ice_imem_npkb_init - parse 18 bits of Next Protocol Key Build 264 + * @kb: pointer to the Next Protocol Key Build structure 265 + * @data: Next Protocol Key Build data to be parsed 266 + */ 267 + static void ice_imem_npkb_init(struct ice_np_keybuilder *kb, u32 data) 268 + { 269 + kb->opc = FIELD_GET(ICE_IM_NPKB_OPC, data); 270 + kb->start_reg0 = FIELD_GET(ICE_IM_NPKB_S_R0, data); 271 + kb->len_reg1 = FIELD_GET(ICE_IM_NPKB_L_R1, data); 272 + } 273 + 274 + #define ICE_IM_PGKB_F0_ENA BIT_ULL(0) 275 + #define ICE_IM_PGKB_F0_IDX GENMASK_ULL(6, 1) 276 + #define ICE_IM_PGKB_F1_ENA BIT_ULL(7) 277 + #define ICE_IM_PGKB_F1_IDX GENMASK_ULL(13, 8) 278 + #define ICE_IM_PGKB_F2_ENA BIT_ULL(14) 279 + #define ICE_IM_PGKB_F2_IDX GENMASK_ULL(20, 15) 280 + #define ICE_IM_PGKB_F3_ENA BIT_ULL(21) 281 + #define ICE_IM_PGKB_F3_IDX GENMASK_ULL(27, 22) 282 + #define ICE_IM_PGKB_AR_IDX GENMASK_ULL(34, 28) 283 + 284 + /** 285 + * ice_imem_pgkb_init - parse 35 bits of Parse Graph Key Build 286 + * @kb: pointer to the Parse Graph Key Build structure 287 + * @data: Parse Graph Key Build data to be parsed 288 + */ 289 + static void ice_imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) 290 + { 291 + kb->flag0_ena = FIELD_GET(ICE_IM_PGKB_F0_ENA, data); 292 + kb->flag0_idx = FIELD_GET(ICE_IM_PGKB_F0_IDX, data); 293 + kb->flag1_ena = FIELD_GET(ICE_IM_PGKB_F1_ENA, data); 294 + kb->flag1_idx = FIELD_GET(ICE_IM_PGKB_F1_IDX, data); 295 + kb->flag2_ena = FIELD_GET(ICE_IM_PGKB_F2_ENA, data); 296 + kb->flag2_idx = FIELD_GET(ICE_IM_PGKB_F2_IDX, data); 297 + kb->flag3_ena = FIELD_GET(ICE_IM_PGKB_F3_ENA, data); 298 + kb->flag3_idx = FIELD_GET(ICE_IM_PGKB_F3_IDX, data); 299 + kb->alu_reg_idx = FIELD_GET(ICE_IM_PGKB_AR_IDX, data); 300 + } 301 + 302 + #define ICE_IM_ALU_OPC GENMASK_ULL(5, 0) 303 + #define ICE_IM_ALU_SS GENMASK_ULL(13, 6) 304 + #define ICE_IM_ALU_SL GENMASK_ULL(18, 14) 305 + #define ICE_IM_ALU_SXS BIT_ULL(19) 306 + #define ICE_IM_ALU_SXK GENMASK_ULL(23, 20) 307 + #define ICE_IM_ALU_SRID GENMASK_ULL(30, 24) 308 + #define ICE_IM_ALU_DRID GENMASK_ULL(37, 31) 309 + #define ICE_IM_ALU_INC0 BIT_ULL(38) 310 + #define ICE_IM_ALU_INC1 BIT_ULL(39) 311 + #define ICE_IM_ALU_POO GENMASK_ULL(41, 40) 312 + #define ICE_IM_ALU_PO GENMASK_ULL(49, 42) 313 + #define ICE_IM_ALU_BA_S 50 /* offset for the 2nd 64-bits field */ 314 + #define ICE_IM_ALU_BA GENMASK_ULL(57 - ICE_IM_ALU_BA_S, \ 315 + 50 - ICE_IM_ALU_BA_S) 316 + #define ICE_IM_ALU_IMM GENMASK_ULL(73 - ICE_IM_ALU_BA_S, \ 317 + 58 - ICE_IM_ALU_BA_S) 318 + #define ICE_IM_ALU_DFE BIT_ULL(74 - ICE_IM_ALU_BA_S) 319 + #define ICE_IM_ALU_DS GENMASK_ULL(80 - ICE_IM_ALU_BA_S, \ 320 + 75 - ICE_IM_ALU_BA_S) 321 + #define ICE_IM_ALU_DL GENMASK_ULL(86 - ICE_IM_ALU_BA_S, \ 322 + 81 - ICE_IM_ALU_BA_S) 323 + #define ICE_IM_ALU_FEI BIT_ULL(87 - ICE_IM_ALU_BA_S) 324 + #define ICE_IM_ALU_FSI GENMASK_ULL(95 - ICE_IM_ALU_BA_S, \ 325 + 88 - ICE_IM_ALU_BA_S) 326 + 327 + /** 328 + * ice_imem_alu_init - parse 96 bits of ALU entry 329 + * @alu: pointer to the ALU entry structure 330 + * @data: ALU entry data to be parsed 331 + * @off: offset of the ALU entry data 332 + */ 333 + static void ice_imem_alu_init(struct ice_alu *alu, u8 *data, u8 off) 334 + { 335 + u64 d64; 336 + u8 idd; 337 + 338 + d64 = *((u64 *)data) >> off; 339 + 340 + alu->opc = FIELD_GET(ICE_IM_ALU_OPC, d64); 341 + alu->src_start = FIELD_GET(ICE_IM_ALU_SS, d64); 342 + alu->src_len = FIELD_GET(ICE_IM_ALU_SL, d64); 343 + alu->shift_xlate_sel = FIELD_GET(ICE_IM_ALU_SXS, d64); 344 + alu->shift_xlate_key = FIELD_GET(ICE_IM_ALU_SXK, d64); 345 + alu->src_reg_id = FIELD_GET(ICE_IM_ALU_SRID, d64); 346 + alu->dst_reg_id = FIELD_GET(ICE_IM_ALU_DRID, d64); 347 + alu->inc0 = FIELD_GET(ICE_IM_ALU_INC0, d64); 348 + alu->inc1 = FIELD_GET(ICE_IM_ALU_INC1, d64); 349 + alu->proto_offset_opc = FIELD_GET(ICE_IM_ALU_POO, d64); 350 + alu->proto_offset = FIELD_GET(ICE_IM_ALU_PO, d64); 351 + 352 + idd = (ICE_IM_ALU_BA_S + off) / BITS_PER_BYTE; 353 + off = (ICE_IM_ALU_BA_S + off) % BITS_PER_BYTE; 354 + d64 = *((u64 *)(&data[idd])) >> off; 355 + 356 + alu->branch_addr = FIELD_GET(ICE_IM_ALU_BA, d64); 357 + alu->imm = FIELD_GET(ICE_IM_ALU_IMM, d64); 358 + alu->dedicate_flags_ena = FIELD_GET(ICE_IM_ALU_DFE, d64); 359 + alu->dst_start = FIELD_GET(ICE_IM_ALU_DS, d64); 360 + alu->dst_len = FIELD_GET(ICE_IM_ALU_DL, d64); 361 + alu->flags_extr_imm = FIELD_GET(ICE_IM_ALU_FEI, d64); 362 + alu->flags_start_imm = FIELD_GET(ICE_IM_ALU_FSI, d64); 363 + } 364 + 365 + #define ICE_IMEM_BM_S 0 366 + #define ICE_IMEM_BKB_S 4 367 + #define ICE_IMEM_BKB_IDD (ICE_IMEM_BKB_S / BITS_PER_BYTE) 368 + #define ICE_IMEM_BKB_OFF (ICE_IMEM_BKB_S % BITS_PER_BYTE) 369 + #define ICE_IMEM_PGP GENMASK(15, 14) 370 + #define ICE_IMEM_NPKB_S 16 371 + #define ICE_IMEM_NPKB_IDD (ICE_IMEM_NPKB_S / BITS_PER_BYTE) 372 + #define ICE_IMEM_NPKB_OFF (ICE_IMEM_NPKB_S % BITS_PER_BYTE) 373 + #define ICE_IMEM_PGKB_S 34 374 + #define ICE_IMEM_PGKB_IDD (ICE_IMEM_PGKB_S / BITS_PER_BYTE) 375 + #define ICE_IMEM_PGKB_OFF (ICE_IMEM_PGKB_S % BITS_PER_BYTE) 376 + #define ICE_IMEM_ALU0_S 69 377 + #define ICE_IMEM_ALU0_IDD (ICE_IMEM_ALU0_S / BITS_PER_BYTE) 378 + #define ICE_IMEM_ALU0_OFF (ICE_IMEM_ALU0_S % BITS_PER_BYTE) 379 + #define ICE_IMEM_ALU1_S 165 380 + #define ICE_IMEM_ALU1_IDD (ICE_IMEM_ALU1_S / BITS_PER_BYTE) 381 + #define ICE_IMEM_ALU1_OFF (ICE_IMEM_ALU1_S % BITS_PER_BYTE) 382 + #define ICE_IMEM_ALU2_S 357 383 + #define ICE_IMEM_ALU2_IDD (ICE_IMEM_ALU2_S / BITS_PER_BYTE) 384 + #define ICE_IMEM_ALU2_OFF (ICE_IMEM_ALU2_S % BITS_PER_BYTE) 385 + 386 + /** 387 + * ice_imem_parse_item - parse 384 bits of IMEM entry 388 + * @hw: pointer to the hardware structure 389 + * @idx: index of IMEM entry 390 + * @item: item of IMEM entry 391 + * @data: IMEM entry data to be parsed 392 + * @size: size of IMEM entry 393 + */ 394 + static void ice_imem_parse_item(struct ice_hw *hw, u16 idx, void *item, 395 + void *data, int __maybe_unused size) 396 + { 397 + struct ice_imem_item *ii = item; 398 + u8 *buf = data; 399 + 400 + ii->idx = idx; 401 + 402 + ice_imem_bm_init(&ii->b_m, *(u8 *)buf); 403 + ice_imem_bkb_init(&ii->b_kb, 404 + *((u16 *)(&buf[ICE_IMEM_BKB_IDD])) >> 405 + ICE_IMEM_BKB_OFF); 406 + 407 + ii->pg_prio = FIELD_GET(ICE_IMEM_PGP, *(u16 *)buf); 408 + 409 + ice_imem_npkb_init(&ii->np_kb, 410 + *((u32 *)(&buf[ICE_IMEM_NPKB_IDD])) >> 411 + ICE_IMEM_NPKB_OFF); 412 + ice_imem_pgkb_init(&ii->pg_kb, 413 + *((u64 *)(&buf[ICE_IMEM_PGKB_IDD])) >> 414 + ICE_IMEM_PGKB_OFF); 415 + 416 + ice_imem_alu_init(&ii->alu0, 417 + &buf[ICE_IMEM_ALU0_IDD], 418 + ICE_IMEM_ALU0_OFF); 419 + ice_imem_alu_init(&ii->alu1, 420 + &buf[ICE_IMEM_ALU1_IDD], 421 + ICE_IMEM_ALU1_OFF); 422 + ice_imem_alu_init(&ii->alu2, 423 + &buf[ICE_IMEM_ALU2_IDD], 424 + ICE_IMEM_ALU2_OFF); 425 + 426 + if (hw->debug_mask & ICE_DBG_PARSER) 427 + ice_imem_dump(hw, ii); 428 + } 429 + 430 + /** 431 + * ice_imem_table_get - create an imem table 432 + * @hw: pointer to the hardware structure 433 + * 434 + * Return: a pointer to the allocated IMEM table. 435 + */ 436 + static struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw) 437 + { 438 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM, 439 + sizeof(struct ice_imem_item), 440 + ICE_IMEM_TABLE_SIZE, 441 + ice_imem_parse_item, false); 442 + } 443 + 444 + /*** ICE_SID_RXPARSER_METADATA_INIT section ***/ 445 + /** 446 + * ice_metainit_dump - dump an metainit item info 447 + * @hw: pointer to the hardware structure 448 + * @item: metainit item to dump 449 + */ 450 + static void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item) 451 + { 452 + struct device *dev = ice_hw_to_dev(hw); 453 + 454 + dev_info(dev, "index = %d\n", item->idx); 455 + 456 + dev_info(dev, "tsr = %d\n", item->tsr); 457 + dev_info(dev, "ho = %d\n", item->ho); 458 + dev_info(dev, "pc = %d\n", item->pc); 459 + dev_info(dev, "pg_rn = %d\n", item->pg_rn); 460 + dev_info(dev, "cd = %d\n", item->cd); 461 + 462 + dev_info(dev, "gpr_a_ctrl = %d\n", item->gpr_a_ctrl); 463 + dev_info(dev, "gpr_a_data_mdid = %d\n", item->gpr_a_data_mdid); 464 + dev_info(dev, "gpr_a_data_start = %d\n", item->gpr_a_data_start); 465 + dev_info(dev, "gpr_a_data_len = %d\n", item->gpr_a_data_len); 466 + dev_info(dev, "gpr_a_id = %d\n", item->gpr_a_id); 467 + 468 + dev_info(dev, "gpr_b_ctrl = %d\n", item->gpr_b_ctrl); 469 + dev_info(dev, "gpr_b_data_mdid = %d\n", item->gpr_b_data_mdid); 470 + dev_info(dev, "gpr_b_data_start = %d\n", item->gpr_b_data_start); 471 + dev_info(dev, "gpr_b_data_len = %d\n", item->gpr_b_data_len); 472 + dev_info(dev, "gpr_b_id = %d\n", item->gpr_b_id); 473 + 474 + dev_info(dev, "gpr_c_ctrl = %d\n", item->gpr_c_ctrl); 475 + dev_info(dev, "gpr_c_data_mdid = %d\n", item->gpr_c_data_mdid); 476 + dev_info(dev, "gpr_c_data_start = %d\n", item->gpr_c_data_start); 477 + dev_info(dev, "gpr_c_data_len = %d\n", item->gpr_c_data_len); 478 + dev_info(dev, "gpr_c_id = %d\n", item->gpr_c_id); 479 + 480 + dev_info(dev, "gpr_d_ctrl = %d\n", item->gpr_d_ctrl); 481 + dev_info(dev, "gpr_d_data_mdid = %d\n", item->gpr_d_data_mdid); 482 + dev_info(dev, "gpr_d_data_start = %d\n", item->gpr_d_data_start); 483 + dev_info(dev, "gpr_d_data_len = %d\n", item->gpr_d_data_len); 484 + dev_info(dev, "gpr_d_id = %d\n", item->gpr_d_id); 485 + 486 + dev_info(dev, "flags = 0x%llx\n", (unsigned long long)(item->flags)); 487 + } 488 + 489 + #define ICE_MI_TSR GENMASK_ULL(7, 0) 490 + #define ICE_MI_HO GENMASK_ULL(16, 8) 491 + #define ICE_MI_PC GENMASK_ULL(24, 17) 492 + #define ICE_MI_PGRN GENMASK_ULL(35, 25) 493 + #define ICE_MI_CD GENMASK_ULL(38, 36) 494 + #define ICE_MI_GAC BIT_ULL(39) 495 + #define ICE_MI_GADM GENMASK_ULL(44, 40) 496 + #define ICE_MI_GADS GENMASK_ULL(48, 45) 497 + #define ICE_MI_GADL GENMASK_ULL(53, 49) 498 + #define ICE_MI_GAI GENMASK_ULL(59, 56) 499 + #define ICE_MI_GBC BIT_ULL(60) 500 + #define ICE_MI_GBDM_S 61 /* offset for the 2nd 64-bits field */ 501 + #define ICE_MI_GBDM_IDD (ICE_MI_GBDM_S / BITS_PER_BYTE) 502 + #define ICE_MI_GBDM_OFF (ICE_MI_GBDM_S % BITS_PER_BYTE) 503 + 504 + #define ICE_MI_GBDM_GENMASK_ULL(high, low) \ 505 + GENMASK_ULL((high) - ICE_MI_GBDM_S, (low) - ICE_MI_GBDM_S) 506 + #define ICE_MI_GBDM ICE_MI_GBDM_GENMASK_ULL(65, 61) 507 + #define ICE_MI_GBDS ICE_MI_GBDM_GENMASK_ULL(69, 66) 508 + #define ICE_MI_GBDL ICE_MI_GBDM_GENMASK_ULL(74, 70) 509 + #define ICE_MI_GBI ICE_MI_GBDM_GENMASK_ULL(80, 77) 510 + #define ICE_MI_GCC BIT_ULL(81 - ICE_MI_GBDM_S) 511 + #define ICE_MI_GCDM ICE_MI_GBDM_GENMASK_ULL(86, 82) 512 + #define ICE_MI_GCDS ICE_MI_GBDM_GENMASK_ULL(90, 87) 513 + #define ICE_MI_GCDL ICE_MI_GBDM_GENMASK_ULL(95, 91) 514 + #define ICE_MI_GCI ICE_MI_GBDM_GENMASK_ULL(101, 98) 515 + #define ICE_MI_GDC BIT_ULL(102 - ICE_MI_GBDM_S) 516 + #define ICE_MI_GDDM ICE_MI_GBDM_GENMASK_ULL(107, 103) 517 + #define ICE_MI_GDDS ICE_MI_GBDM_GENMASK_ULL(111, 108) 518 + #define ICE_MI_GDDL ICE_MI_GBDM_GENMASK_ULL(116, 112) 519 + #define ICE_MI_GDI ICE_MI_GBDM_GENMASK_ULL(122, 119) 520 + #define ICE_MI_FLAG_S 123 /* offset for the 3rd 64-bits field */ 521 + #define ICE_MI_FLAG_IDD (ICE_MI_FLAG_S / BITS_PER_BYTE) 522 + #define ICE_MI_FLAG_OFF (ICE_MI_FLAG_S % BITS_PER_BYTE) 523 + #define ICE_MI_FLAG GENMASK_ULL(186 - ICE_MI_FLAG_S, \ 524 + 123 - ICE_MI_FLAG_S) 525 + 526 + /** 527 + * ice_metainit_parse_item - parse 192 bits of Metadata Init entry 528 + * @hw: pointer to the hardware structure 529 + * @idx: index of Metadata Init entry 530 + * @item: item of Metadata Init entry 531 + * @data: Metadata Init entry data to be parsed 532 + * @size: size of Metadata Init entry 533 + */ 534 + static void ice_metainit_parse_item(struct ice_hw *hw, u16 idx, void *item, 535 + void *data, int __maybe_unused size) 536 + { 537 + struct ice_metainit_item *mi = item; 538 + u8 *buf = data; 539 + u64 d64; 540 + 541 + mi->idx = idx; 542 + 543 + d64 = *(u64 *)buf; 544 + 545 + mi->tsr = FIELD_GET(ICE_MI_TSR, d64); 546 + mi->ho = FIELD_GET(ICE_MI_HO, d64); 547 + mi->pc = FIELD_GET(ICE_MI_PC, d64); 548 + mi->pg_rn = FIELD_GET(ICE_MI_PGRN, d64); 549 + mi->cd = FIELD_GET(ICE_MI_CD, d64); 550 + 551 + mi->gpr_a_ctrl = FIELD_GET(ICE_MI_GAC, d64); 552 + mi->gpr_a_data_mdid = FIELD_GET(ICE_MI_GADM, d64); 553 + mi->gpr_a_data_start = FIELD_GET(ICE_MI_GADS, d64); 554 + mi->gpr_a_data_len = FIELD_GET(ICE_MI_GADL, d64); 555 + mi->gpr_a_id = FIELD_GET(ICE_MI_GAI, d64); 556 + 557 + mi->gpr_b_ctrl = FIELD_GET(ICE_MI_GBC, d64); 558 + 559 + d64 = *((u64 *)&buf[ICE_MI_GBDM_IDD]) >> ICE_MI_GBDM_OFF; 560 + 561 + mi->gpr_b_data_mdid = FIELD_GET(ICE_MI_GBDM, d64); 562 + mi->gpr_b_data_start = FIELD_GET(ICE_MI_GBDS, d64); 563 + mi->gpr_b_data_len = FIELD_GET(ICE_MI_GBDL, d64); 564 + mi->gpr_b_id = FIELD_GET(ICE_MI_GBI, d64); 565 + 566 + mi->gpr_c_ctrl = FIELD_GET(ICE_MI_GCC, d64); 567 + mi->gpr_c_data_mdid = FIELD_GET(ICE_MI_GCDM, d64); 568 + mi->gpr_c_data_start = FIELD_GET(ICE_MI_GCDS, d64); 569 + mi->gpr_c_data_len = FIELD_GET(ICE_MI_GCDL, d64); 570 + mi->gpr_c_id = FIELD_GET(ICE_MI_GCI, d64); 571 + 572 + mi->gpr_d_ctrl = FIELD_GET(ICE_MI_GDC, d64); 573 + mi->gpr_d_data_mdid = FIELD_GET(ICE_MI_GDDM, d64); 574 + mi->gpr_d_data_start = FIELD_GET(ICE_MI_GDDS, d64); 575 + mi->gpr_d_data_len = FIELD_GET(ICE_MI_GDDL, d64); 576 + mi->gpr_d_id = FIELD_GET(ICE_MI_GDI, d64); 577 + 578 + d64 = *((u64 *)&buf[ICE_MI_FLAG_IDD]) >> ICE_MI_FLAG_OFF; 579 + 580 + mi->flags = FIELD_GET(ICE_MI_FLAG, d64); 581 + 582 + if (hw->debug_mask & ICE_DBG_PARSER) 583 + ice_metainit_dump(hw, mi); 584 + } 585 + 586 + /** 587 + * ice_metainit_table_get - create a metainit table 588 + * @hw: pointer to the hardware structure 589 + * 590 + * Return: a pointer to the allocated Metadata initialization table. 591 + */ 592 + static struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw) 593 + { 594 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT, 595 + sizeof(struct ice_metainit_item), 596 + ICE_METAINIT_TABLE_SIZE, 597 + ice_metainit_parse_item, false); 598 + } 599 + 600 + /** 601 + * ice_bst_tcam_search - find a TCAM item with specific type 602 + * @tcam_table: the TCAM table 603 + * @lbl_table: the lbl table to search 604 + * @type: the type we need to match against 605 + * @start: start searching from this index 606 + * 607 + * Return: a pointer to the matching BOOST TCAM item or NULL. 608 + */ 609 + struct ice_bst_tcam_item * 610 + ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, 611 + struct ice_lbl_item *lbl_table, 612 + enum ice_lbl_type type, u16 *start) 613 + { 614 + u16 i = *start; 615 + 616 + for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) { 617 + if (lbl_table[i].type == type) { 618 + *start = i; 619 + return &tcam_table[lbl_table[i].idx]; 620 + } 621 + } 622 + 623 + return NULL; 624 + } 625 + 626 + /*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL, 627 + * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM 628 + * sections ***/ 629 + static void ice_pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key) 630 + { 631 + struct device *dev = ice_hw_to_dev(hw); 632 + 633 + dev_info(dev, "key:\n"); 634 + dev_info(dev, "\tvalid = %d\n", key->valid); 635 + dev_info(dev, "\tnode_id = %d\n", key->node_id); 636 + dev_info(dev, "\tflag0 = %d\n", key->flag0); 637 + dev_info(dev, "\tflag1 = %d\n", key->flag1); 638 + dev_info(dev, "\tflag2 = %d\n", key->flag2); 639 + dev_info(dev, "\tflag3 = %d\n", key->flag3); 640 + dev_info(dev, "\tboost_idx = %d\n", key->boost_idx); 641 + dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg); 642 + dev_info(dev, "\tnext_proto = 0x%08x\n", key->next_proto); 643 + } 644 + 645 + static void ice_pg_nm_cam_key_dump(struct ice_hw *hw, 646 + struct ice_pg_nm_cam_key *key) 647 + { 648 + struct device *dev = ice_hw_to_dev(hw); 649 + 650 + dev_info(dev, "key:\n"); 651 + dev_info(dev, "\tvalid = %d\n", key->valid); 652 + dev_info(dev, "\tnode_id = %d\n", key->node_id); 653 + dev_info(dev, "\tflag0 = %d\n", key->flag0); 654 + dev_info(dev, "\tflag1 = %d\n", key->flag1); 655 + dev_info(dev, "\tflag2 = %d\n", key->flag2); 656 + dev_info(dev, "\tflag3 = %d\n", key->flag3); 657 + dev_info(dev, "\tboost_idx = %d\n", key->boost_idx); 658 + dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg); 659 + } 660 + 661 + static void ice_pg_cam_action_dump(struct ice_hw *hw, 662 + struct ice_pg_cam_action *action) 663 + { 664 + struct device *dev = ice_hw_to_dev(hw); 665 + 666 + dev_info(dev, "action:\n"); 667 + dev_info(dev, "\tnext_node = %d\n", action->next_node); 668 + dev_info(dev, "\tnext_pc = %d\n", action->next_pc); 669 + dev_info(dev, "\tis_pg = %d\n", action->is_pg); 670 + dev_info(dev, "\tproto_id = %d\n", action->proto_id); 671 + dev_info(dev, "\tis_mg = %d\n", action->is_mg); 672 + dev_info(dev, "\tmarker_id = %d\n", action->marker_id); 673 + dev_info(dev, "\tis_last_round = %d\n", action->is_last_round); 674 + dev_info(dev, "\tho_polarity = %d\n", action->ho_polarity); 675 + dev_info(dev, "\tho_inc = %d\n", action->ho_inc); 676 + } 677 + 678 + /** 679 + * ice_pg_cam_dump - dump an parse graph cam info 680 + * @hw: pointer to the hardware structure 681 + * @item: parse graph cam to dump 682 + */ 683 + static void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item) 684 + { 685 + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); 686 + ice_pg_cam_key_dump(hw, &item->key); 687 + ice_pg_cam_action_dump(hw, &item->action); 688 + } 689 + 690 + /** 691 + * ice_pg_nm_cam_dump - dump an parse graph no match cam info 692 + * @hw: pointer to the hardware structure 693 + * @item: parse graph no match cam to dump 694 + */ 695 + static void ice_pg_nm_cam_dump(struct ice_hw *hw, 696 + struct ice_pg_nm_cam_item *item) 697 + { 698 + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); 699 + ice_pg_nm_cam_key_dump(hw, &item->key); 700 + ice_pg_cam_action_dump(hw, &item->action); 701 + } 702 + 703 + #define ICE_PGCA_NN GENMASK_ULL(10, 0) 704 + #define ICE_PGCA_NPC GENMASK_ULL(18, 11) 705 + #define ICE_PGCA_IPG BIT_ULL(19) 706 + #define ICE_PGCA_PID GENMASK_ULL(30, 23) 707 + #define ICE_PGCA_IMG BIT_ULL(31) 708 + #define ICE_PGCA_MID GENMASK_ULL(39, 32) 709 + #define ICE_PGCA_ILR BIT_ULL(40) 710 + #define ICE_PGCA_HOP BIT_ULL(41) 711 + #define ICE_PGCA_HOI GENMASK_ULL(50, 42) 712 + 713 + /** 714 + * ice_pg_cam_action_init - parse 55 bits of Parse Graph CAM Action 715 + * @action: pointer to the Parse Graph CAM Action structure 716 + * @data: Parse Graph CAM Action data to be parsed 717 + */ 718 + static void ice_pg_cam_action_init(struct ice_pg_cam_action *action, u64 data) 719 + { 720 + action->next_node = FIELD_GET(ICE_PGCA_NN, data); 721 + action->next_pc = FIELD_GET(ICE_PGCA_NPC, data); 722 + action->is_pg = FIELD_GET(ICE_PGCA_IPG, data); 723 + action->proto_id = FIELD_GET(ICE_PGCA_PID, data); 724 + action->is_mg = FIELD_GET(ICE_PGCA_IMG, data); 725 + action->marker_id = FIELD_GET(ICE_PGCA_MID, data); 726 + action->is_last_round = FIELD_GET(ICE_PGCA_ILR, data); 727 + action->ho_polarity = FIELD_GET(ICE_PGCA_HOP, data); 728 + action->ho_inc = FIELD_GET(ICE_PGCA_HOI, data); 729 + } 730 + 731 + #define ICE_PGNCK_VLD BIT_ULL(0) 732 + #define ICE_PGNCK_NID GENMASK_ULL(11, 1) 733 + #define ICE_PGNCK_F0 BIT_ULL(12) 734 + #define ICE_PGNCK_F1 BIT_ULL(13) 735 + #define ICE_PGNCK_F2 BIT_ULL(14) 736 + #define ICE_PGNCK_F3 BIT_ULL(15) 737 + #define ICE_PGNCK_BH BIT_ULL(16) 738 + #define ICE_PGNCK_BI GENMASK_ULL(24, 17) 739 + #define ICE_PGNCK_AR GENMASK_ULL(40, 25) 740 + 741 + /** 742 + * ice_pg_nm_cam_key_init - parse 41 bits of Parse Graph NoMatch CAM Key 743 + * @key: pointer to the Parse Graph NoMatch CAM Key structure 744 + * @data: Parse Graph NoMatch CAM Key data to be parsed 745 + */ 746 + static void ice_pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data) 747 + { 748 + key->valid = FIELD_GET(ICE_PGNCK_VLD, data); 749 + key->node_id = FIELD_GET(ICE_PGNCK_NID, data); 750 + key->flag0 = FIELD_GET(ICE_PGNCK_F0, data); 751 + key->flag1 = FIELD_GET(ICE_PGNCK_F1, data); 752 + key->flag2 = FIELD_GET(ICE_PGNCK_F2, data); 753 + key->flag3 = FIELD_GET(ICE_PGNCK_F3, data); 754 + 755 + if (FIELD_GET(ICE_PGNCK_BH, data)) 756 + key->boost_idx = FIELD_GET(ICE_PGNCK_BI, data); 757 + else 758 + key->boost_idx = 0; 759 + 760 + key->alu_reg = FIELD_GET(ICE_PGNCK_AR, data); 761 + } 762 + 763 + #define ICE_PGCK_VLD BIT_ULL(0) 764 + #define ICE_PGCK_NID GENMASK_ULL(11, 1) 765 + #define ICE_PGCK_F0 BIT_ULL(12) 766 + #define ICE_PGCK_F1 BIT_ULL(13) 767 + #define ICE_PGCK_F2 BIT_ULL(14) 768 + #define ICE_PGCK_F3 BIT_ULL(15) 769 + #define ICE_PGCK_BH BIT_ULL(16) 770 + #define ICE_PGCK_BI GENMASK_ULL(24, 17) 771 + #define ICE_PGCK_AR GENMASK_ULL(40, 25) 772 + #define ICE_PGCK_NPK_S 41 /* offset for the 2nd 64-bits field */ 773 + #define ICE_PGCK_NPK_IDD (ICE_PGCK_NPK_S / BITS_PER_BYTE) 774 + #define ICE_PGCK_NPK_OFF (ICE_PGCK_NPK_S % BITS_PER_BYTE) 775 + #define ICE_PGCK_NPK GENMASK_ULL(72 - ICE_PGCK_NPK_S, \ 776 + 41 - ICE_PGCK_NPK_S) 777 + 778 + /** 779 + * ice_pg_cam_key_init - parse 73 bits of Parse Graph CAM Key 780 + * @key: pointer to the Parse Graph CAM Key structure 781 + * @data: Parse Graph CAM Key data to be parsed 782 + */ 783 + static void ice_pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data) 784 + { 785 + u64 d64 = *(u64 *)data; 786 + 787 + key->valid = FIELD_GET(ICE_PGCK_VLD, d64); 788 + key->node_id = FIELD_GET(ICE_PGCK_NID, d64); 789 + key->flag0 = FIELD_GET(ICE_PGCK_F0, d64); 790 + key->flag1 = FIELD_GET(ICE_PGCK_F1, d64); 791 + key->flag2 = FIELD_GET(ICE_PGCK_F2, d64); 792 + key->flag3 = FIELD_GET(ICE_PGCK_F3, d64); 793 + 794 + if (FIELD_GET(ICE_PGCK_BH, d64)) 795 + key->boost_idx = FIELD_GET(ICE_PGCK_BI, d64); 796 + else 797 + key->boost_idx = 0; 798 + 799 + key->alu_reg = FIELD_GET(ICE_PGCK_AR, d64); 800 + 801 + d64 = *((u64 *)&data[ICE_PGCK_NPK_IDD]) >> ICE_PGCK_NPK_OFF; 802 + 803 + key->next_proto = FIELD_GET(ICE_PGCK_NPK, d64); 804 + } 805 + 806 + #define ICE_PG_CAM_ACT_S 73 807 + #define ICE_PG_CAM_ACT_IDD (ICE_PG_CAM_ACT_S / BITS_PER_BYTE) 808 + #define ICE_PG_CAM_ACT_OFF (ICE_PG_CAM_ACT_S % BITS_PER_BYTE) 809 + 810 + /** 811 + * ice_pg_cam_parse_item - parse 128 bits of Parse Graph CAM Entry 812 + * @hw: pointer to the hardware structure 813 + * @idx: index of Parse Graph CAM Entry 814 + * @item: item of Parse Graph CAM Entry 815 + * @data: Parse Graph CAM Entry data to be parsed 816 + * @size: size of Parse Graph CAM Entry 817 + */ 818 + static void ice_pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, 819 + void *data, int __maybe_unused size) 820 + { 821 + struct ice_pg_cam_item *ci = item; 822 + u8 *buf = data; 823 + u64 d64; 824 + 825 + ci->idx = idx; 826 + 827 + ice_pg_cam_key_init(&ci->key, buf); 828 + 829 + d64 = *((u64 *)&buf[ICE_PG_CAM_ACT_IDD]) >> ICE_PG_CAM_ACT_OFF; 830 + ice_pg_cam_action_init(&ci->action, d64); 831 + 832 + if (hw->debug_mask & ICE_DBG_PARSER) 833 + ice_pg_cam_dump(hw, ci); 834 + } 835 + 836 + #define ICE_PG_SP_CAM_KEY_S 56 837 + #define ICE_PG_SP_CAM_KEY_IDD (ICE_PG_SP_CAM_KEY_S / BITS_PER_BYTE) 838 + 839 + /** 840 + * ice_pg_sp_cam_parse_item - parse 136 bits of Parse Graph Spill CAM Entry 841 + * @hw: pointer to the hardware structure 842 + * @idx: index of Parse Graph Spill CAM Entry 843 + * @item: item of Parse Graph Spill CAM Entry 844 + * @data: Parse Graph Spill CAM Entry data to be parsed 845 + * @size: size of Parse Graph Spill CAM Entry 846 + */ 847 + static void ice_pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, 848 + void *data, int __maybe_unused size) 849 + { 850 + struct ice_pg_cam_item *ci = item; 851 + u8 *buf = data; 852 + u64 d64; 853 + 854 + ci->idx = idx; 855 + 856 + d64 = *(u64 *)buf; 857 + ice_pg_cam_action_init(&ci->action, d64); 858 + 859 + ice_pg_cam_key_init(&ci->key, &buf[ICE_PG_SP_CAM_KEY_IDD]); 860 + 861 + if (hw->debug_mask & ICE_DBG_PARSER) 862 + ice_pg_cam_dump(hw, ci); 863 + } 864 + 865 + #define ICE_PG_NM_CAM_ACT_S 41 866 + #define ICE_PG_NM_CAM_ACT_IDD (ICE_PG_NM_CAM_ACT_S / BITS_PER_BYTE) 867 + #define ICE_PG_NM_CAM_ACT_OFF (ICE_PG_NM_CAM_ACT_S % BITS_PER_BYTE) 868 + 869 + /** 870 + * ice_pg_nm_cam_parse_item - parse 96 bits of Parse Graph NoMatch CAM Entry 871 + * @hw: pointer to the hardware structure 872 + * @idx: index of Parse Graph NoMatch CAM Entry 873 + * @item: item of Parse Graph NoMatch CAM Entry 874 + * @data: Parse Graph NoMatch CAM Entry data to be parsed 875 + * @size: size of Parse Graph NoMatch CAM Entry 876 + */ 877 + static void ice_pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, 878 + void *data, int __maybe_unused size) 879 + { 880 + struct ice_pg_nm_cam_item *ci = item; 881 + u8 *buf = data; 882 + u64 d64; 883 + 884 + ci->idx = idx; 885 + 886 + d64 = *(u64 *)buf; 887 + ice_pg_nm_cam_key_init(&ci->key, d64); 888 + 889 + d64 = *((u64 *)&buf[ICE_PG_NM_CAM_ACT_IDD]) >> ICE_PG_NM_CAM_ACT_OFF; 890 + ice_pg_cam_action_init(&ci->action, d64); 891 + 892 + if (hw->debug_mask & ICE_DBG_PARSER) 893 + ice_pg_nm_cam_dump(hw, ci); 894 + } 895 + 896 + #define ICE_PG_NM_SP_CAM_ACT_S 56 897 + #define ICE_PG_NM_SP_CAM_ACT_IDD (ICE_PG_NM_SP_CAM_ACT_S / BITS_PER_BYTE) 898 + #define ICE_PG_NM_SP_CAM_ACT_OFF (ICE_PG_NM_SP_CAM_ACT_S % BITS_PER_BYTE) 899 + 900 + /** 901 + * ice_pg_nm_sp_cam_parse_item - parse 104 bits of Parse Graph NoMatch Spill 902 + * CAM Entry 903 + * @hw: pointer to the hardware structure 904 + * @idx: index of Parse Graph NoMatch Spill CAM Entry 905 + * @item: item of Parse Graph NoMatch Spill CAM Entry 906 + * @data: Parse Graph NoMatch Spill CAM Entry data to be parsed 907 + * @size: size of Parse Graph NoMatch Spill CAM Entry 908 + */ 909 + static void ice_pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx, 910 + void *item, void *data, 911 + int __maybe_unused size) 912 + { 913 + struct ice_pg_nm_cam_item *ci = item; 914 + u8 *buf = data; 915 + u64 d64; 916 + 917 + ci->idx = idx; 918 + 919 + d64 = *(u64 *)buf; 920 + ice_pg_cam_action_init(&ci->action, d64); 921 + 922 + d64 = *((u64 *)&buf[ICE_PG_NM_SP_CAM_ACT_IDD]) >> 923 + ICE_PG_NM_SP_CAM_ACT_OFF; 924 + ice_pg_nm_cam_key_init(&ci->key, d64); 925 + 926 + if (hw->debug_mask & ICE_DBG_PARSER) 927 + ice_pg_nm_cam_dump(hw, ci); 928 + } 929 + 930 + /** 931 + * ice_pg_cam_table_get - create a parse graph cam table 932 + * @hw: pointer to the hardware structure 933 + * 934 + * Return: a pointer to the allocated Parse Graph CAM table. 935 + */ 936 + static struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw) 937 + { 938 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM, 939 + sizeof(struct ice_pg_cam_item), 940 + ICE_PG_CAM_TABLE_SIZE, 941 + ice_pg_cam_parse_item, false); 942 + } 943 + 944 + /** 945 + * ice_pg_sp_cam_table_get - create a parse graph spill cam table 946 + * @hw: pointer to the hardware structure 947 + * 948 + * Return: a pointer to the allocated Parse Graph Spill CAM table. 949 + */ 950 + static struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw) 951 + { 952 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL, 953 + sizeof(struct ice_pg_cam_item), 954 + ICE_PG_SP_CAM_TABLE_SIZE, 955 + ice_pg_sp_cam_parse_item, false); 956 + } 957 + 958 + /** 959 + * ice_pg_nm_cam_table_get - create a parse graph no match cam table 960 + * @hw: pointer to the hardware structure 961 + * 962 + * Return: a pointer to the allocated Parse Graph No Match CAM table. 963 + */ 964 + static struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw) 965 + { 966 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM, 967 + sizeof(struct ice_pg_nm_cam_item), 968 + ICE_PG_NM_CAM_TABLE_SIZE, 969 + ice_pg_nm_cam_parse_item, false); 970 + } 971 + 972 + /** 973 + * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table 974 + * @hw: pointer to the hardware structure 975 + * 976 + * Return: a pointer to the allocated Parse Graph No Match Spill CAM table. 977 + */ 978 + static struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw) 979 + { 980 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL, 981 + sizeof(struct ice_pg_nm_cam_item), 982 + ICE_PG_NM_SP_CAM_TABLE_SIZE, 983 + ice_pg_nm_sp_cam_parse_item, false); 984 + } 985 + 986 + static bool __ice_pg_cam_match(struct ice_pg_cam_item *item, 987 + struct ice_pg_cam_key *key) 988 + { 989 + return (item->key.valid && 990 + !memcmp(&item->key.val, &key->val, sizeof(key->val))); 991 + } 992 + 993 + static bool __ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *item, 994 + struct ice_pg_cam_key *key) 995 + { 996 + return (item->key.valid && 997 + !memcmp(&item->key.val, &key->val, sizeof(item->key.val))); 998 + } 999 + 1000 + /** 1001 + * ice_pg_cam_match - search parse graph cam table by key 1002 + * @table: parse graph cam table to search 1003 + * @size: cam table size 1004 + * @key: search key 1005 + * 1006 + * Return: a pointer to the matching PG CAM item or NULL. 1007 + */ 1008 + struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, 1009 + int size, struct ice_pg_cam_key *key) 1010 + { 1011 + int i; 1012 + 1013 + for (i = 0; i < size; i++) { 1014 + struct ice_pg_cam_item *item = &table[i]; 1015 + 1016 + if (__ice_pg_cam_match(item, key)) 1017 + return item; 1018 + } 1019 + 1020 + return NULL; 1021 + } 1022 + 1023 + /** 1024 + * ice_pg_nm_cam_match - search parse graph no match cam table by key 1025 + * @table: parse graph no match cam table to search 1026 + * @size: cam table size 1027 + * @key: search key 1028 + * 1029 + * Return: a pointer to the matching PG No Match CAM item or NULL. 1030 + */ 1031 + struct ice_pg_nm_cam_item * 1032 + ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, 1033 + struct ice_pg_cam_key *key) 1034 + { 1035 + int i; 1036 + 1037 + for (i = 0; i < size; i++) { 1038 + struct ice_pg_nm_cam_item *item = &table[i]; 1039 + 1040 + if (__ice_pg_nm_cam_match(item, key)) 1041 + return item; 1042 + } 1043 + 1044 + return NULL; 1045 + } 1046 + 1047 + /*** Ternary match ***/ 1048 + /* Perform a ternary match on a 1-byte pattern (@pat) given @key and @key_inv 1049 + * Rules (per bit): 1050 + * Key == 0 and Key_inv == 0 : Never match (Don't care) 1051 + * Key == 0 and Key_inv == 1 : Match on bit == 1 1052 + * Key == 1 and Key_inv == 0 : Match on bit == 0 1053 + * Key == 1 and Key_inv == 1 : Always match (Don't care) 1054 + * 1055 + * Return: true if all bits match, false otherwise. 1056 + */ 1057 + static bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat) 1058 + { 1059 + u8 bit_key, bit_key_inv, bit_pat; 1060 + int i; 1061 + 1062 + for (i = 0; i < BITS_PER_BYTE; i++) { 1063 + bit_key = key & BIT(i); 1064 + bit_key_inv = key_inv & BIT(i); 1065 + bit_pat = pat & BIT(i); 1066 + 1067 + if (bit_key != 0 && bit_key_inv != 0) 1068 + continue; 1069 + 1070 + if ((bit_key == 0 && bit_key_inv == 0) || bit_key == bit_pat) 1071 + return false; 1072 + } 1073 + 1074 + return true; 1075 + } 1076 + 1077 + static bool ice_ternary_match(const u8 *key, const u8 *key_inv, 1078 + const u8 *pat, int len) 1079 + { 1080 + int i; 1081 + 1082 + for (i = 0; i < len; i++) 1083 + if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i])) 1084 + return false; 1085 + 1086 + return true; 1087 + } 1088 + 1089 + /*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ 1090 + static void ice_bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb) 1091 + { 1092 + struct device *dev = ice_hw_to_dev(hw); 1093 + 1094 + dev_info(dev, "next proto key builder:\n"); 1095 + dev_info(dev, "\topc = %d\n", kb->opc); 1096 + dev_info(dev, "\tstart_reg0 = %d\n", kb->start_reg0); 1097 + dev_info(dev, "\tlen_reg1 = %d\n", kb->len_reg1); 1098 + } 1099 + 1100 + static void ice_bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb) 1101 + { 1102 + struct device *dev = ice_hw_to_dev(hw); 1103 + 1104 + dev_info(dev, "parse graph key builder:\n"); 1105 + dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena); 1106 + dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena); 1107 + dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena); 1108 + dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena); 1109 + dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx); 1110 + dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx); 1111 + dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx); 1112 + dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx); 1113 + dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx); 1114 + } 1115 + 1116 + static void ice_bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int idx) 1117 + { 1118 + struct device *dev = ice_hw_to_dev(hw); 1119 + 1120 + dev_info(dev, "alu%d:\n", idx); 1121 + dev_info(dev, "\topc = %d\n", alu->opc); 1122 + dev_info(dev, "\tsrc_start = %d\n", alu->src_start); 1123 + dev_info(dev, "\tsrc_len = %d\n", alu->src_len); 1124 + dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel); 1125 + dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key); 1126 + dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id); 1127 + dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id); 1128 + dev_info(dev, "\tinc0 = %d\n", alu->inc0); 1129 + dev_info(dev, "\tinc1 = %d\n", alu->inc1); 1130 + dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc); 1131 + dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset); 1132 + dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr); 1133 + dev_info(dev, "\timm = %d\n", alu->imm); 1134 + dev_info(dev, "\tdst_start = %d\n", alu->dst_start); 1135 + dev_info(dev, "\tdst_len = %d\n", alu->dst_len); 1136 + dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm); 1137 + dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm); 1138 + } 1139 + 1140 + /** 1141 + * ice_bst_tcam_dump - dump a boost tcam info 1142 + * @hw: pointer to the hardware structure 1143 + * @item: boost tcam to dump 1144 + */ 1145 + static void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item) 1146 + { 1147 + struct device *dev = ice_hw_to_dev(hw); 1148 + int i; 1149 + 1150 + dev_info(dev, "addr = %d\n", item->addr); 1151 + 1152 + dev_info(dev, "key : "); 1153 + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) 1154 + dev_info(dev, "%02x ", item->key[i]); 1155 + 1156 + dev_info(dev, "\n"); 1157 + 1158 + dev_info(dev, "key_inv: "); 1159 + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) 1160 + dev_info(dev, "%02x ", item->key_inv[i]); 1161 + 1162 + dev_info(dev, "\n"); 1163 + 1164 + dev_info(dev, "hit_idx_grp = %d\n", item->hit_idx_grp); 1165 + dev_info(dev, "pg_prio = %d\n", item->pg_prio); 1166 + 1167 + ice_bst_np_kb_dump(hw, &item->np_kb); 1168 + ice_bst_pg_kb_dump(hw, &item->pg_kb); 1169 + 1170 + ice_bst_alu_dump(hw, &item->alu0, ICE_ALU0_IDX); 1171 + ice_bst_alu_dump(hw, &item->alu1, ICE_ALU1_IDX); 1172 + ice_bst_alu_dump(hw, &item->alu2, ICE_ALU2_IDX); 1173 + } 1174 + 1175 + static void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item) 1176 + { 1177 + struct device *dev = ice_hw_to_dev(hw); 1178 + 1179 + dev_info(dev, "index = %u\n", item->idx); 1180 + dev_info(dev, "type = %u\n", item->type); 1181 + dev_info(dev, "label = %s\n", item->label); 1182 + } 1183 + 1184 + #define ICE_BST_ALU_OPC GENMASK_ULL(5, 0) 1185 + #define ICE_BST_ALU_SS GENMASK_ULL(13, 6) 1186 + #define ICE_BST_ALU_SL GENMASK_ULL(18, 14) 1187 + #define ICE_BST_ALU_SXS BIT_ULL(19) 1188 + #define ICE_BST_ALU_SXK GENMASK_ULL(23, 20) 1189 + #define ICE_BST_ALU_SRID GENMASK_ULL(30, 24) 1190 + #define ICE_BST_ALU_DRID GENMASK_ULL(37, 31) 1191 + #define ICE_BST_ALU_INC0 BIT_ULL(38) 1192 + #define ICE_BST_ALU_INC1 BIT_ULL(39) 1193 + #define ICE_BST_ALU_POO GENMASK_ULL(41, 40) 1194 + #define ICE_BST_ALU_PO GENMASK_ULL(49, 42) 1195 + #define ICE_BST_ALU_BA_S 50 /* offset for the 2nd 64-bits field */ 1196 + #define ICE_BST_ALU_BA GENMASK_ULL(57 - ICE_BST_ALU_BA_S, \ 1197 + 50 - ICE_BST_ALU_BA_S) 1198 + #define ICE_BST_ALU_IMM GENMASK_ULL(73 - ICE_BST_ALU_BA_S, \ 1199 + 58 - ICE_BST_ALU_BA_S) 1200 + #define ICE_BST_ALU_DFE BIT_ULL(74 - ICE_BST_ALU_BA_S) 1201 + #define ICE_BST_ALU_DS GENMASK_ULL(80 - ICE_BST_ALU_BA_S, \ 1202 + 75 - ICE_BST_ALU_BA_S) 1203 + #define ICE_BST_ALU_DL GENMASK_ULL(86 - ICE_BST_ALU_BA_S, \ 1204 + 81 - ICE_BST_ALU_BA_S) 1205 + #define ICE_BST_ALU_FEI BIT_ULL(87 - ICE_BST_ALU_BA_S) 1206 + #define ICE_BST_ALU_FSI GENMASK_ULL(95 - ICE_BST_ALU_BA_S, \ 1207 + 88 - ICE_BST_ALU_BA_S) 1208 + 1209 + /** 1210 + * ice_bst_alu_init - parse 96 bits of ALU entry 1211 + * @alu: pointer to the ALU entry structure 1212 + * @data: ALU entry data to be parsed 1213 + * @off: offset of the ALU entry data 1214 + */ 1215 + static void ice_bst_alu_init(struct ice_alu *alu, u8 *data, u8 off) 1216 + { 1217 + u64 d64; 1218 + u8 idd; 1219 + 1220 + d64 = *((u64 *)data) >> off; 1221 + 1222 + alu->opc = FIELD_GET(ICE_BST_ALU_OPC, d64); 1223 + alu->src_start = FIELD_GET(ICE_BST_ALU_SS, d64); 1224 + alu->src_len = FIELD_GET(ICE_BST_ALU_SL, d64); 1225 + alu->shift_xlate_sel = FIELD_GET(ICE_BST_ALU_SXS, d64); 1226 + alu->shift_xlate_key = FIELD_GET(ICE_BST_ALU_SXK, d64); 1227 + alu->src_reg_id = FIELD_GET(ICE_BST_ALU_SRID, d64); 1228 + alu->dst_reg_id = FIELD_GET(ICE_BST_ALU_DRID, d64); 1229 + alu->inc0 = FIELD_GET(ICE_BST_ALU_INC0, d64); 1230 + alu->inc1 = FIELD_GET(ICE_BST_ALU_INC1, d64); 1231 + alu->proto_offset_opc = FIELD_GET(ICE_BST_ALU_POO, d64); 1232 + alu->proto_offset = FIELD_GET(ICE_BST_ALU_PO, d64); 1233 + 1234 + idd = (ICE_BST_ALU_BA_S + off) / BITS_PER_BYTE; 1235 + off = (ICE_BST_ALU_BA_S + off) % BITS_PER_BYTE; 1236 + d64 = *((u64 *)(&data[idd])) >> off; 1237 + 1238 + alu->branch_addr = FIELD_GET(ICE_BST_ALU_BA, d64); 1239 + alu->imm = FIELD_GET(ICE_BST_ALU_IMM, d64); 1240 + alu->dedicate_flags_ena = FIELD_GET(ICE_BST_ALU_DFE, d64); 1241 + alu->dst_start = FIELD_GET(ICE_BST_ALU_DS, d64); 1242 + alu->dst_len = FIELD_GET(ICE_BST_ALU_DL, d64); 1243 + alu->flags_extr_imm = FIELD_GET(ICE_BST_ALU_FEI, d64); 1244 + alu->flags_start_imm = FIELD_GET(ICE_BST_ALU_FSI, d64); 1245 + } 1246 + 1247 + #define ICE_BST_PGKB_F0_ENA BIT_ULL(0) 1248 + #define ICE_BST_PGKB_F0_IDX GENMASK_ULL(6, 1) 1249 + #define ICE_BST_PGKB_F1_ENA BIT_ULL(7) 1250 + #define ICE_BST_PGKB_F1_IDX GENMASK_ULL(13, 8) 1251 + #define ICE_BST_PGKB_F2_ENA BIT_ULL(14) 1252 + #define ICE_BST_PGKB_F2_IDX GENMASK_ULL(20, 15) 1253 + #define ICE_BST_PGKB_F3_ENA BIT_ULL(21) 1254 + #define ICE_BST_PGKB_F3_IDX GENMASK_ULL(27, 22) 1255 + #define ICE_BST_PGKB_AR_IDX GENMASK_ULL(34, 28) 1256 + 1257 + /** 1258 + * ice_bst_pgkb_init - parse 35 bits of Parse Graph Key Build 1259 + * @kb: pointer to the Parse Graph Key Build structure 1260 + * @data: Parse Graph Key Build data to be parsed 1261 + */ 1262 + static void ice_bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) 1263 + { 1264 + kb->flag0_ena = FIELD_GET(ICE_BST_PGKB_F0_ENA, data); 1265 + kb->flag0_idx = FIELD_GET(ICE_BST_PGKB_F0_IDX, data); 1266 + kb->flag1_ena = FIELD_GET(ICE_BST_PGKB_F1_ENA, data); 1267 + kb->flag1_idx = FIELD_GET(ICE_BST_PGKB_F1_IDX, data); 1268 + kb->flag2_ena = FIELD_GET(ICE_BST_PGKB_F2_ENA, data); 1269 + kb->flag2_idx = FIELD_GET(ICE_BST_PGKB_F2_IDX, data); 1270 + kb->flag3_ena = FIELD_GET(ICE_BST_PGKB_F3_ENA, data); 1271 + kb->flag3_idx = FIELD_GET(ICE_BST_PGKB_F3_IDX, data); 1272 + kb->alu_reg_idx = FIELD_GET(ICE_BST_PGKB_AR_IDX, data); 1273 + } 1274 + 1275 + #define ICE_BST_NPKB_OPC GENMASK(1, 0) 1276 + #define ICE_BST_NPKB_S_R0 GENMASK(9, 2) 1277 + #define ICE_BST_NPKB_L_R1 GENMASK(17, 10) 1278 + 1279 + /** 1280 + * ice_bst_npkb_init - parse 18 bits of Next Protocol Key Build 1281 + * @kb: pointer to the Next Protocol Key Build structure 1282 + * @data: Next Protocol Key Build data to be parsed 1283 + */ 1284 + static void ice_bst_npkb_init(struct ice_np_keybuilder *kb, u32 data) 1285 + { 1286 + kb->opc = FIELD_GET(ICE_BST_NPKB_OPC, data); 1287 + kb->start_reg0 = FIELD_GET(ICE_BST_NPKB_S_R0, data); 1288 + kb->len_reg1 = FIELD_GET(ICE_BST_NPKB_L_R1, data); 1289 + } 1290 + 1291 + #define ICE_BT_KEY_S 32 1292 + #define ICE_BT_KEY_IDD (ICE_BT_KEY_S / BITS_PER_BYTE) 1293 + #define ICE_BT_KIV_S 192 1294 + #define ICE_BT_KIV_IDD (ICE_BT_KIV_S / BITS_PER_BYTE) 1295 + #define ICE_BT_HIG_S 352 1296 + #define ICE_BT_HIG_IDD (ICE_BT_HIG_S / BITS_PER_BYTE) 1297 + #define ICE_BT_PGP_S 360 1298 + #define ICE_BT_PGP_IDD (ICE_BT_PGP_S / BITS_PER_BYTE) 1299 + #define ICE_BT_PGP_M GENMASK(361 - ICE_BT_PGP_S, 360 - ICE_BT_PGP_S) 1300 + #define ICE_BT_NPKB_S 362 1301 + #define ICE_BT_NPKB_IDD (ICE_BT_NPKB_S / BITS_PER_BYTE) 1302 + #define ICE_BT_NPKB_OFF (ICE_BT_NPKB_S % BITS_PER_BYTE) 1303 + #define ICE_BT_PGKB_S 380 1304 + #define ICE_BT_PGKB_IDD (ICE_BT_PGKB_S / BITS_PER_BYTE) 1305 + #define ICE_BT_PGKB_OFF (ICE_BT_PGKB_S % BITS_PER_BYTE) 1306 + #define ICE_BT_ALU0_S 415 1307 + #define ICE_BT_ALU0_IDD (ICE_BT_ALU0_S / BITS_PER_BYTE) 1308 + #define ICE_BT_ALU0_OFF (ICE_BT_ALU0_S % BITS_PER_BYTE) 1309 + #define ICE_BT_ALU1_S 511 1310 + #define ICE_BT_ALU1_IDD (ICE_BT_ALU1_S / BITS_PER_BYTE) 1311 + #define ICE_BT_ALU1_OFF (ICE_BT_ALU1_S % BITS_PER_BYTE) 1312 + #define ICE_BT_ALU2_S 607 1313 + #define ICE_BT_ALU2_IDD (ICE_BT_ALU2_S / BITS_PER_BYTE) 1314 + #define ICE_BT_ALU2_OFF (ICE_BT_ALU2_S % BITS_PER_BYTE) 1315 + 1316 + /** 1317 + * ice_bst_parse_item - parse 704 bits of Boost TCAM entry 1318 + * @hw: pointer to the hardware structure 1319 + * @idx: index of Boost TCAM entry 1320 + * @item: item of Boost TCAM entry 1321 + * @data: Boost TCAM entry data to be parsed 1322 + * @size: size of Boost TCAM entry 1323 + */ 1324 + static void ice_bst_parse_item(struct ice_hw *hw, u16 idx, void *item, 1325 + void *data, int __maybe_unused size) 1326 + { 1327 + struct ice_bst_tcam_item *ti = item; 1328 + u8 *buf = (u8 *)data; 1329 + int i; 1330 + 1331 + ti->addr = *(u16 *)buf; 1332 + 1333 + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) { 1334 + ti->key[i] = buf[ICE_BT_KEY_IDD + i]; 1335 + ti->key_inv[i] = buf[ICE_BT_KIV_IDD + i]; 1336 + } 1337 + ti->hit_idx_grp = buf[ICE_BT_HIG_IDD]; 1338 + ti->pg_prio = buf[ICE_BT_PGP_IDD] & ICE_BT_PGP_M; 1339 + 1340 + ice_bst_npkb_init(&ti->np_kb, 1341 + *((u32 *)(&buf[ICE_BT_NPKB_IDD])) >> 1342 + ICE_BT_NPKB_OFF); 1343 + ice_bst_pgkb_init(&ti->pg_kb, 1344 + *((u64 *)(&buf[ICE_BT_PGKB_IDD])) >> 1345 + ICE_BT_PGKB_OFF); 1346 + 1347 + ice_bst_alu_init(&ti->alu0, &buf[ICE_BT_ALU0_IDD], ICE_BT_ALU0_OFF); 1348 + ice_bst_alu_init(&ti->alu1, &buf[ICE_BT_ALU1_IDD], ICE_BT_ALU1_OFF); 1349 + ice_bst_alu_init(&ti->alu2, &buf[ICE_BT_ALU2_IDD], ICE_BT_ALU2_OFF); 1350 + 1351 + if (hw->debug_mask & ICE_DBG_PARSER) 1352 + ice_bst_tcam_dump(hw, ti); 1353 + } 1354 + 1355 + /** 1356 + * ice_bst_tcam_table_get - create a boost tcam table 1357 + * @hw: pointer to the hardware structure 1358 + * 1359 + * Return: a pointer to the allocated Boost TCAM table. 1360 + */ 1361 + static struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw) 1362 + { 1363 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM, 1364 + sizeof(struct ice_bst_tcam_item), 1365 + ICE_BST_TCAM_TABLE_SIZE, 1366 + ice_bst_parse_item, true); 1367 + } 1368 + 1369 + static void ice_parse_lbl_item(struct ice_hw *hw, u16 idx, void *item, 1370 + void *data, int __maybe_unused size) 1371 + { 1372 + struct ice_lbl_item *lbl_item = item; 1373 + struct ice_lbl_item *lbl_data = data; 1374 + 1375 + lbl_item->idx = lbl_data->idx; 1376 + memcpy(lbl_item->label, lbl_data->label, sizeof(lbl_item->label)); 1377 + 1378 + if (strstarts(lbl_item->label, ICE_LBL_BST_DVM)) 1379 + lbl_item->type = ICE_LBL_BST_TYPE_DVM; 1380 + else if (strstarts(lbl_item->label, ICE_LBL_BST_SVM)) 1381 + lbl_item->type = ICE_LBL_BST_TYPE_SVM; 1382 + else if (strstarts(lbl_item->label, ICE_LBL_TNL_VXLAN)) 1383 + lbl_item->type = ICE_LBL_BST_TYPE_VXLAN; 1384 + else if (strstarts(lbl_item->label, ICE_LBL_TNL_GENEVE)) 1385 + lbl_item->type = ICE_LBL_BST_TYPE_GENEVE; 1386 + else if (strstarts(lbl_item->label, ICE_LBL_TNL_UDP_ECPRI)) 1387 + lbl_item->type = ICE_LBL_BST_TYPE_UDP_ECPRI; 1388 + 1389 + if (hw->debug_mask & ICE_DBG_PARSER) 1390 + ice_lbl_dump(hw, lbl_item); 1391 + } 1392 + 1393 + /** 1394 + * ice_bst_lbl_table_get - create a boost label table 1395 + * @hw: pointer to the hardware structure 1396 + * 1397 + * Return: a pointer to the allocated Boost label table. 1398 + */ 1399 + static struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw) 1400 + { 1401 + return ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM, 1402 + sizeof(struct ice_lbl_item), 1403 + ICE_BST_TCAM_TABLE_SIZE, 1404 + ice_parse_lbl_item, true); 1405 + } 1406 + 1407 + /** 1408 + * ice_bst_tcam_match - match a pattern on the boost tcam table 1409 + * @tcam_table: boost tcam table to search 1410 + * @pat: pattern to match 1411 + * 1412 + * Return: a pointer to the matching Boost TCAM item or NULL. 1413 + */ 1414 + struct ice_bst_tcam_item * 1415 + ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat) 1416 + { 1417 + int i; 1418 + 1419 + for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) { 1420 + struct ice_bst_tcam_item *item = &tcam_table[i]; 1421 + 1422 + if (item->hit_idx_grp == 0) 1423 + continue; 1424 + if (ice_ternary_match(item->key, item->key_inv, pat, 1425 + ICE_BST_TCAM_KEY_SIZE)) 1426 + return item; 1427 + } 1428 + 1429 + return NULL; 1430 + } 1431 + 1432 + /*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/ 1433 + /** 1434 + * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info 1435 + * @hw: pointer to the hardware structure 1436 + * @item: ptype marker tcam to dump 1437 + */ 1438 + static void ice_ptype_mk_tcam_dump(struct ice_hw *hw, 1439 + struct ice_ptype_mk_tcam_item *item) 1440 + { 1441 + struct device *dev = ice_hw_to_dev(hw); 1442 + int i; 1443 + 1444 + dev_info(dev, "address = %d\n", item->address); 1445 + dev_info(dev, "ptype = %d\n", item->ptype); 1446 + 1447 + dev_info(dev, "key :"); 1448 + for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++) 1449 + dev_info(dev, "%02x ", item->key[i]); 1450 + 1451 + dev_info(dev, "\n"); 1452 + 1453 + dev_info(dev, "key_inv:"); 1454 + for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++) 1455 + dev_info(dev, "%02x ", item->key_inv[i]); 1456 + 1457 + dev_info(dev, "\n"); 1458 + } 1459 + 1460 + static void ice_parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx, 1461 + void *item, void *data, int size) 1462 + { 1463 + memcpy(item, data, size); 1464 + 1465 + if (hw->debug_mask & ICE_DBG_PARSER) 1466 + ice_ptype_mk_tcam_dump(hw, 1467 + (struct ice_ptype_mk_tcam_item *)item); 1468 + } 1469 + 1470 + /** 1471 + * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table 1472 + * @hw: pointer to the hardware structure 1473 + * 1474 + * Return: a pointer to the allocated Marker PType TCAM table. 1475 + */ 1476 + static 1477 + struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw) 1478 + { 1479 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE, 1480 + sizeof(struct ice_ptype_mk_tcam_item), 1481 + ICE_PTYPE_MK_TCAM_TABLE_SIZE, 1482 + ice_parse_ptype_mk_tcam_item, true); 1483 + } 1484 + 1485 + /** 1486 + * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table 1487 + * @table: ptype marker tcam table to search 1488 + * @pat: pattern to match 1489 + * @len: length of the pattern 1490 + * 1491 + * Return: a pointer to the matching Marker PType item or NULL. 1492 + */ 1493 + struct ice_ptype_mk_tcam_item * 1494 + ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, 1495 + u8 *pat, int len) 1496 + { 1497 + int i; 1498 + 1499 + for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) { 1500 + struct ice_ptype_mk_tcam_item *item = &table[i]; 1501 + 1502 + if (ice_ternary_match(item->key, item->key_inv, pat, len)) 1503 + return item; 1504 + } 1505 + 1506 + return NULL; 1507 + } 1508 + 1509 + /*** ICE_SID_RXPARSER_MARKER_GRP section ***/ 1510 + /** 1511 + * ice_mk_grp_dump - dump an marker group item info 1512 + * @hw: pointer to the hardware structure 1513 + * @item: marker group item to dump 1514 + */ 1515 + static void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item) 1516 + { 1517 + struct device *dev = ice_hw_to_dev(hw); 1518 + int i; 1519 + 1520 + dev_info(dev, "index = %d\n", item->idx); 1521 + 1522 + dev_info(dev, "markers: "); 1523 + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) 1524 + dev_info(dev, "%d ", item->markers[i]); 1525 + 1526 + dev_info(dev, "\n"); 1527 + } 1528 + 1529 + static void ice_mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, 1530 + void *data, int __maybe_unused size) 1531 + { 1532 + struct ice_mk_grp_item *grp = item; 1533 + u8 *buf = data; 1534 + int i; 1535 + 1536 + grp->idx = idx; 1537 + 1538 + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) 1539 + grp->markers[i] = buf[i]; 1540 + 1541 + if (hw->debug_mask & ICE_DBG_PARSER) 1542 + ice_mk_grp_dump(hw, grp); 1543 + } 1544 + 1545 + /** 1546 + * ice_mk_grp_table_get - create a marker group table 1547 + * @hw: pointer to the hardware structure 1548 + * 1549 + * Return: a pointer to the allocated Marker Group ID table. 1550 + */ 1551 + static struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw) 1552 + { 1553 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP, 1554 + sizeof(struct ice_mk_grp_item), 1555 + ICE_MK_GRP_TABLE_SIZE, 1556 + ice_mk_grp_parse_item, false); 1557 + } 1558 + 1559 + /*** ICE_SID_RXPARSER_PROTO_GRP section ***/ 1560 + static void ice_proto_off_dump(struct ice_hw *hw, 1561 + struct ice_proto_off *po, int idx) 1562 + { 1563 + struct device *dev = ice_hw_to_dev(hw); 1564 + 1565 + dev_info(dev, "proto %d\n", idx); 1566 + dev_info(dev, "\tpolarity = %d\n", po->polarity); 1567 + dev_info(dev, "\tproto_id = %d\n", po->proto_id); 1568 + dev_info(dev, "\toffset = %d\n", po->offset); 1569 + } 1570 + 1571 + /** 1572 + * ice_proto_grp_dump - dump a proto group item info 1573 + * @hw: pointer to the hardware structure 1574 + * @item: proto group item to dump 1575 + */ 1576 + static void ice_proto_grp_dump(struct ice_hw *hw, 1577 + struct ice_proto_grp_item *item) 1578 + { 1579 + int i; 1580 + 1581 + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); 1582 + 1583 + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) 1584 + ice_proto_off_dump(hw, &item->po[i], i); 1585 + } 1586 + 1587 + #define ICE_PO_POL BIT(0) 1588 + #define ICE_PO_PID GENMASK(8, 1) 1589 + #define ICE_PO_OFF GENMASK(21, 12) 1590 + 1591 + /** 1592 + * ice_proto_off_parse - parse 22 bits of Protocol entry 1593 + * @po: pointer to the Protocol entry structure 1594 + * @data: Protocol entry data to be parsed 1595 + */ 1596 + static void ice_proto_off_parse(struct ice_proto_off *po, u32 data) 1597 + { 1598 + po->polarity = FIELD_GET(ICE_PO_POL, data); 1599 + po->proto_id = FIELD_GET(ICE_PO_PID, data); 1600 + po->offset = FIELD_GET(ICE_PO_OFF, data); 1601 + } 1602 + 1603 + /** 1604 + * ice_proto_grp_parse_item - parse 192 bits of Protocol Group Table entry 1605 + * @hw: pointer to the hardware structure 1606 + * @idx: index of Protocol Group Table entry 1607 + * @item: item of Protocol Group Table entry 1608 + * @data: Protocol Group Table entry data to be parsed 1609 + * @size: size of Protocol Group Table entry 1610 + */ 1611 + static void ice_proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, 1612 + void *data, int __maybe_unused size) 1613 + { 1614 + struct ice_proto_grp_item *grp = item; 1615 + u8 *buf = (u8 *)data; 1616 + u8 idd, off; 1617 + u32 d32; 1618 + int i; 1619 + 1620 + grp->idx = idx; 1621 + 1622 + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) { 1623 + idd = (ICE_PROTO_GRP_ITEM_SIZE * i) / BITS_PER_BYTE; 1624 + off = (ICE_PROTO_GRP_ITEM_SIZE * i) % BITS_PER_BYTE; 1625 + d32 = *((u32 *)&buf[idd]) >> off; 1626 + ice_proto_off_parse(&grp->po[i], d32); 1627 + } 1628 + 1629 + if (hw->debug_mask & ICE_DBG_PARSER) 1630 + ice_proto_grp_dump(hw, grp); 1631 + } 1632 + 1633 + /** 1634 + * ice_proto_grp_table_get - create a proto group table 1635 + * @hw: pointer to the hardware structure 1636 + * 1637 + * Return: a pointer to the allocated Protocol Group table. 1638 + */ 1639 + static struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw) 1640 + { 1641 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP, 1642 + sizeof(struct ice_proto_grp_item), 1643 + ICE_PROTO_GRP_TABLE_SIZE, 1644 + ice_proto_grp_parse_item, false); 1645 + } 1646 + 1647 + /*** ICE_SID_RXPARSER_FLAG_REDIR section ***/ 1648 + /** 1649 + * ice_flg_rd_dump - dump a flag redirect item info 1650 + * @hw: pointer to the hardware structure 1651 + * @item: flag redirect item to dump 1652 + */ 1653 + static void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item) 1654 + { 1655 + struct device *dev = ice_hw_to_dev(hw); 1656 + 1657 + dev_info(dev, "index = %d\n", item->idx); 1658 + dev_info(dev, "expose = %d\n", item->expose); 1659 + dev_info(dev, "intr_flg_id = %d\n", item->intr_flg_id); 1660 + } 1661 + 1662 + #define ICE_FRT_EXPO BIT(0) 1663 + #define ICE_FRT_IFID GENMASK(6, 1) 1664 + 1665 + /** 1666 + * ice_flg_rd_parse_item - parse 8 bits of Flag Redirect Table entry 1667 + * @hw: pointer to the hardware structure 1668 + * @idx: index of Flag Redirect Table entry 1669 + * @item: item of Flag Redirect Table entry 1670 + * @data: Flag Redirect Table entry data to be parsed 1671 + * @size: size of Flag Redirect Table entry 1672 + */ 1673 + static void ice_flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item, 1674 + void *data, int __maybe_unused size) 1675 + { 1676 + struct ice_flg_rd_item *rdi = item; 1677 + u8 d8 = *(u8 *)data; 1678 + 1679 + rdi->idx = idx; 1680 + rdi->expose = FIELD_GET(ICE_FRT_EXPO, d8); 1681 + rdi->intr_flg_id = FIELD_GET(ICE_FRT_IFID, d8); 1682 + 1683 + if (hw->debug_mask & ICE_DBG_PARSER) 1684 + ice_flg_rd_dump(hw, rdi); 1685 + } 1686 + 1687 + /** 1688 + * ice_flg_rd_table_get - create a flag redirect table 1689 + * @hw: pointer to the hardware structure 1690 + * 1691 + * Return: a pointer to the allocated Flags Redirection table. 1692 + */ 1693 + static struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw) 1694 + { 1695 + return ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR, 1696 + sizeof(struct ice_flg_rd_item), 1697 + ICE_FLG_RD_TABLE_SIZE, 1698 + ice_flg_rd_parse_item, false); 1699 + } 1700 + 1701 + /** 1702 + * ice_flg_redirect - redirect a parser flag to packet flag 1703 + * @table: flag redirect table 1704 + * @psr_flg: parser flag to redirect 1705 + * 1706 + * Return: flag or 0 if @psr_flag = 0. 1707 + */ 1708 + u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg) 1709 + { 1710 + u64 flg = 0; 1711 + int i; 1712 + 1713 + for (i = 0; i < ICE_FLG_RDT_SIZE; i++) { 1714 + struct ice_flg_rd_item *item = &table[i]; 1715 + 1716 + if (!item->expose) 1717 + continue; 1718 + 1719 + if (psr_flg & BIT(item->intr_flg_id)) 1720 + flg |= BIT(i); 1721 + } 1722 + 1723 + return flg; 1724 + } 1725 + 1726 + /*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL, 1727 + * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS 1728 + * sections ***/ 1729 + static void ice_xlt_kb_entry_dump(struct ice_hw *hw, 1730 + struct ice_xlt_kb_entry *entry, int idx) 1731 + { 1732 + struct device *dev = ice_hw_to_dev(hw); 1733 + int i; 1734 + 1735 + dev_info(dev, "key builder entry %d\n", idx); 1736 + dev_info(dev, "\txlt1_ad_sel = %d\n", entry->xlt1_ad_sel); 1737 + dev_info(dev, "\txlt2_ad_sel = %d\n", entry->xlt2_ad_sel); 1738 + 1739 + for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) 1740 + dev_info(dev, "\tflg%d_sel = %d\n", i, entry->flg0_14_sel[i]); 1741 + 1742 + dev_info(dev, "\txlt1_md_sel = %d\n", entry->xlt1_md_sel); 1743 + dev_info(dev, "\txlt2_md_sel = %d\n", entry->xlt2_md_sel); 1744 + } 1745 + 1746 + /** 1747 + * ice_xlt_kb_dump - dump a xlt key build info 1748 + * @hw: pointer to the hardware structure 1749 + * @kb: key build to dump 1750 + */ 1751 + static void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb) 1752 + { 1753 + struct device *dev = ice_hw_to_dev(hw); 1754 + int i; 1755 + 1756 + dev_info(dev, "xlt1_pm = %d\n", kb->xlt1_pm); 1757 + dev_info(dev, "xlt2_pm = %d\n", kb->xlt2_pm); 1758 + dev_info(dev, "prof_id_pm = %d\n", kb->prof_id_pm); 1759 + dev_info(dev, "flag15 lo = 0x%08x\n", (u32)kb->flag15); 1760 + dev_info(dev, "flag15 hi = 0x%08x\n", 1761 + (u32)(kb->flag15 >> (sizeof(u32) * BITS_PER_BYTE))); 1762 + 1763 + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) 1764 + ice_xlt_kb_entry_dump(hw, &kb->entries[i], i); 1765 + } 1766 + 1767 + #define ICE_XLT_KB_X1AS_S 32 /* offset for the 1st 64-bits field */ 1768 + #define ICE_XLT_KB_X1AS_IDD (ICE_XLT_KB_X1AS_S / BITS_PER_BYTE) 1769 + #define ICE_XLT_KB_X1AS_OFF (ICE_XLT_KB_X1AS_S % BITS_PER_BYTE) 1770 + #define ICE_XLT_KB_X1AS GENMASK_ULL(34 - ICE_XLT_KB_X1AS_S, \ 1771 + 32 - ICE_XLT_KB_X1AS_S) 1772 + #define ICE_XLT_KB_X2AS GENMASK_ULL(37 - ICE_XLT_KB_X1AS_S, \ 1773 + 35 - ICE_XLT_KB_X1AS_S) 1774 + #define ICE_XLT_KB_FL00 GENMASK_ULL(46 - ICE_XLT_KB_X1AS_S, \ 1775 + 38 - ICE_XLT_KB_X1AS_S) 1776 + #define ICE_XLT_KB_FL01 GENMASK_ULL(55 - ICE_XLT_KB_X1AS_S, \ 1777 + 47 - ICE_XLT_KB_X1AS_S) 1778 + #define ICE_XLT_KB_FL02 GENMASK_ULL(64 - ICE_XLT_KB_X1AS_S, \ 1779 + 56 - ICE_XLT_KB_X1AS_S) 1780 + #define ICE_XLT_KB_FL03 GENMASK_ULL(73 - ICE_XLT_KB_X1AS_S, \ 1781 + 65 - ICE_XLT_KB_X1AS_S) 1782 + #define ICE_XLT_KB_FL04 GENMASK_ULL(82 - ICE_XLT_KB_X1AS_S, \ 1783 + 74 - ICE_XLT_KB_X1AS_S) 1784 + #define ICE_XLT_KB_FL05 GENMASK_ULL(91 - ICE_XLT_KB_X1AS_S, \ 1785 + 83 - ICE_XLT_KB_X1AS_S) 1786 + #define ICE_XLT_KB_FL06_S 92 /* offset for the 2nd 64-bits field */ 1787 + #define ICE_XLT_KB_FL06_IDD (ICE_XLT_KB_FL06_S / BITS_PER_BYTE) 1788 + #define ICE_XLT_KB_FL06_OFF (ICE_XLT_KB_FL06_S % BITS_PER_BYTE) 1789 + #define ICE_XLT_KB_FL06 GENMASK_ULL(100 - ICE_XLT_KB_FL06_S, \ 1790 + 92 - ICE_XLT_KB_FL06_S) 1791 + #define ICE_XLT_KB_FL07 GENMASK_ULL(109 - ICE_XLT_KB_FL06_S, \ 1792 + 101 - ICE_XLT_KB_FL06_S) 1793 + #define ICE_XLT_KB_FL08 GENMASK_ULL(118 - ICE_XLT_KB_FL06_S, \ 1794 + 110 - ICE_XLT_KB_FL06_S) 1795 + #define ICE_XLT_KB_FL09 GENMASK_ULL(127 - ICE_XLT_KB_FL06_S, \ 1796 + 119 - ICE_XLT_KB_FL06_S) 1797 + #define ICE_XLT_KB_FL10 GENMASK_ULL(136 - ICE_XLT_KB_FL06_S, \ 1798 + 128 - ICE_XLT_KB_FL06_S) 1799 + #define ICE_XLT_KB_FL11 GENMASK_ULL(145 - ICE_XLT_KB_FL06_S, \ 1800 + 137 - ICE_XLT_KB_FL06_S) 1801 + #define ICE_XLT_KB_FL12_S 146 /* offset for the 3rd 64-bits field */ 1802 + #define ICE_XLT_KB_FL12_IDD (ICE_XLT_KB_FL12_S / BITS_PER_BYTE) 1803 + #define ICE_XLT_KB_FL12_OFF (ICE_XLT_KB_FL12_S % BITS_PER_BYTE) 1804 + #define ICE_XLT_KB_FL12 GENMASK_ULL(154 - ICE_XLT_KB_FL12_S, \ 1805 + 146 - ICE_XLT_KB_FL12_S) 1806 + #define ICE_XLT_KB_FL13 GENMASK_ULL(163 - ICE_XLT_KB_FL12_S, \ 1807 + 155 - ICE_XLT_KB_FL12_S) 1808 + #define ICE_XLT_KB_FL14 GENMASK_ULL(181 - ICE_XLT_KB_FL12_S, \ 1809 + 164 - ICE_XLT_KB_FL12_S) 1810 + #define ICE_XLT_KB_X1MS GENMASK_ULL(186 - ICE_XLT_KB_FL12_S, \ 1811 + 182 - ICE_XLT_KB_FL12_S) 1812 + #define ICE_XLT_KB_X2MS GENMASK_ULL(191 - ICE_XLT_KB_FL12_S, \ 1813 + 187 - ICE_XLT_KB_FL12_S) 1814 + 1815 + /** 1816 + * ice_kb_entry_init - parse 192 bits of XLT Key Builder entry 1817 + * @entry: pointer to the XLT Key Builder entry structure 1818 + * @data: XLT Key Builder entry data to be parsed 1819 + */ 1820 + static void ice_kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data) 1821 + { 1822 + u8 i = 0; 1823 + u64 d64; 1824 + 1825 + d64 = *((u64 *)&data[ICE_XLT_KB_X1AS_IDD]) >> ICE_XLT_KB_X1AS_OFF; 1826 + 1827 + entry->xlt1_ad_sel = FIELD_GET(ICE_XLT_KB_X1AS, d64); 1828 + entry->xlt2_ad_sel = FIELD_GET(ICE_XLT_KB_X2AS, d64); 1829 + 1830 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL00, d64); 1831 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL01, d64); 1832 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL02, d64); 1833 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL03, d64); 1834 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL04, d64); 1835 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL05, d64); 1836 + 1837 + d64 = *((u64 *)&data[ICE_XLT_KB_FL06_IDD]) >> ICE_XLT_KB_FL06_OFF; 1838 + 1839 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL06, d64); 1840 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL07, d64); 1841 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL08, d64); 1842 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL09, d64); 1843 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL10, d64); 1844 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL11, d64); 1845 + 1846 + d64 = *((u64 *)&data[ICE_XLT_KB_FL12_IDD]) >> ICE_XLT_KB_FL12_OFF; 1847 + 1848 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL12, d64); 1849 + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL13, d64); 1850 + entry->flg0_14_sel[i] = FIELD_GET(ICE_XLT_KB_FL14, d64); 1851 + 1852 + entry->xlt1_md_sel = FIELD_GET(ICE_XLT_KB_X1MS, d64); 1853 + entry->xlt2_md_sel = FIELD_GET(ICE_XLT_KB_X2MS, d64); 1854 + } 1855 + 1856 + #define ICE_XLT_KB_X1PM_OFF 0 1857 + #define ICE_XLT_KB_X2PM_OFF 1 1858 + #define ICE_XLT_KB_PIPM_OFF 2 1859 + #define ICE_XLT_KB_FL15_OFF 4 1860 + #define ICE_XLT_KB_TBL_OFF 12 1861 + 1862 + /** 1863 + * ice_parse_kb_data - parse 204 bits of XLT Key Build Table 1864 + * @hw: pointer to the hardware structure 1865 + * @kb: pointer to the XLT Key Build Table structure 1866 + * @data: XLT Key Build Table data to be parsed 1867 + */ 1868 + static void ice_parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb, 1869 + void *data) 1870 + { 1871 + u8 *buf = data; 1872 + int i; 1873 + 1874 + kb->xlt1_pm = buf[ICE_XLT_KB_X1PM_OFF]; 1875 + kb->xlt2_pm = buf[ICE_XLT_KB_X2PM_OFF]; 1876 + kb->prof_id_pm = buf[ICE_XLT_KB_PIPM_OFF]; 1877 + 1878 + kb->flag15 = *(u64 *)&buf[ICE_XLT_KB_FL15_OFF]; 1879 + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) 1880 + ice_kb_entry_init(&kb->entries[i], 1881 + &buf[ICE_XLT_KB_TBL_OFF + 1882 + i * ICE_XLT_KB_TBL_ENTRY_SIZE]); 1883 + 1884 + if (hw->debug_mask & ICE_DBG_PARSER) 1885 + ice_xlt_kb_dump(hw, kb); 1886 + } 1887 + 1888 + static struct ice_xlt_kb *ice_xlt_kb_get(struct ice_hw *hw, u32 sect_type) 1889 + { 1890 + struct ice_pkg_enum state = {}; 1891 + struct ice_seg *seg = hw->seg; 1892 + struct ice_xlt_kb *kb; 1893 + void *data; 1894 + 1895 + if (!seg) 1896 + return ERR_PTR(-EINVAL); 1897 + 1898 + kb = kzalloc(sizeof(*kb), GFP_KERNEL); 1899 + if (!kb) 1900 + return ERR_PTR(-ENOMEM); 1901 + 1902 + data = ice_pkg_enum_section(seg, &state, sect_type); 1903 + if (!data) { 1904 + ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n", 1905 + sect_type); 1906 + kfree(kb); 1907 + return ERR_PTR(-EINVAL); 1908 + } 1909 + 1910 + ice_parse_kb_data(hw, kb, data); 1911 + 1912 + return kb; 1913 + } 1914 + 1915 + /** 1916 + * ice_xlt_kb_get_sw - create switch xlt key build 1917 + * @hw: pointer to the hardware structure 1918 + * 1919 + * Return: a pointer to the allocated Key Builder table for Switch. 1920 + */ 1921 + static struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw) 1922 + { 1923 + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW); 1924 + } 1925 + 1926 + /** 1927 + * ice_xlt_kb_get_acl - create acl xlt key build 1928 + * @hw: pointer to the hardware structure 1929 + * 1930 + * Return: a pointer to the allocated Key Builder table for ACL. 1931 + */ 1932 + static struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw) 1933 + { 1934 + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL); 1935 + } 1936 + 1937 + /** 1938 + * ice_xlt_kb_get_fd - create fdir xlt key build 1939 + * @hw: pointer to the hardware structure 1940 + * 1941 + * Return: a pointer to the allocated Key Builder table for Flow Director. 1942 + */ 1943 + static struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw) 1944 + { 1945 + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD); 1946 + } 1947 + 1948 + /** 1949 + * ice_xlt_kb_get_rss - create rss xlt key build 1950 + * @hw: pointer to the hardware structure 1951 + * 1952 + * Return: a pointer to the allocated Key Builder table for RSS. 1953 + */ 1954 + static struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw) 1955 + { 1956 + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS); 1957 + } 1958 + 1959 + #define ICE_XLT_KB_MASK GENMASK_ULL(5, 0) 1960 + 1961 + /** 1962 + * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag 1963 + * @kb: xlt key build 1964 + * @pkt_flag: 64 bits packet flag 1965 + * 1966 + * Return: XLT flag or 0 if @pkt_flag = 0. 1967 + */ 1968 + u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag) 1969 + { 1970 + struct ice_xlt_kb_entry *entry = &kb->entries[0]; 1971 + u16 flag = 0; 1972 + int i; 1973 + 1974 + /* check flag 15 */ 1975 + if (kb->flag15 & pkt_flag) 1976 + flag = BIT(ICE_XLT_KB_FLAG0_14_CNT); 1977 + 1978 + /* check flag 0 - 14 */ 1979 + for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) { 1980 + /* only check first entry */ 1981 + u16 idx = entry->flg0_14_sel[i] & ICE_XLT_KB_MASK; 1982 + 1983 + if (pkt_flag & BIT(idx)) 1984 + flag |= (u16)BIT(i); 1985 + } 1986 + 1987 + return flag; 1988 + } 1989 + 1990 + /*** Parser API ***/ 1991 + /** 1992 + * ice_parser_create - create a parser instance 1993 + * @hw: pointer to the hardware structure 1994 + * 1995 + * Return: a pointer to the allocated parser instance or ERR_PTR 1996 + * in case of error. 1997 + */ 1998 + struct ice_parser *ice_parser_create(struct ice_hw *hw) 1999 + { 2000 + struct ice_parser *p; 2001 + void *err; 2002 + 2003 + p = kzalloc(sizeof(*p), GFP_KERNEL); 2004 + if (!p) 2005 + return ERR_PTR(-ENOMEM); 2006 + 2007 + p->hw = hw; 2008 + p->rt.psr = p; 2009 + 2010 + p->imem_table = ice_imem_table_get(hw); 2011 + if (IS_ERR(p->imem_table)) { 2012 + err = p->imem_table; 2013 + goto err; 2014 + } 2015 + 2016 + p->mi_table = ice_metainit_table_get(hw); 2017 + if (IS_ERR(p->mi_table)) { 2018 + err = p->mi_table; 2019 + goto err; 2020 + } 2021 + 2022 + p->pg_cam_table = ice_pg_cam_table_get(hw); 2023 + if (IS_ERR(p->pg_cam_table)) { 2024 + err = p->pg_cam_table; 2025 + goto err; 2026 + } 2027 + 2028 + p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw); 2029 + if (IS_ERR(p->pg_sp_cam_table)) { 2030 + err = p->pg_sp_cam_table; 2031 + goto err; 2032 + } 2033 + 2034 + p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw); 2035 + if (IS_ERR(p->pg_nm_cam_table)) { 2036 + err = p->pg_nm_cam_table; 2037 + goto err; 2038 + } 2039 + 2040 + p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw); 2041 + if (IS_ERR(p->pg_nm_sp_cam_table)) { 2042 + err = p->pg_nm_sp_cam_table; 2043 + goto err; 2044 + } 2045 + 2046 + p->bst_tcam_table = ice_bst_tcam_table_get(hw); 2047 + if (IS_ERR(p->bst_tcam_table)) { 2048 + err = p->bst_tcam_table; 2049 + goto err; 2050 + } 2051 + 2052 + p->bst_lbl_table = ice_bst_lbl_table_get(hw); 2053 + if (IS_ERR(p->bst_lbl_table)) { 2054 + err = p->bst_lbl_table; 2055 + goto err; 2056 + } 2057 + 2058 + p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw); 2059 + if (IS_ERR(p->ptype_mk_tcam_table)) { 2060 + err = p->ptype_mk_tcam_table; 2061 + goto err; 2062 + } 2063 + 2064 + p->mk_grp_table = ice_mk_grp_table_get(hw); 2065 + if (IS_ERR(p->mk_grp_table)) { 2066 + err = p->mk_grp_table; 2067 + goto err; 2068 + } 2069 + 2070 + p->proto_grp_table = ice_proto_grp_table_get(hw); 2071 + if (IS_ERR(p->proto_grp_table)) { 2072 + err = p->proto_grp_table; 2073 + goto err; 2074 + } 2075 + 2076 + p->flg_rd_table = ice_flg_rd_table_get(hw); 2077 + if (IS_ERR(p->flg_rd_table)) { 2078 + err = p->flg_rd_table; 2079 + goto err; 2080 + } 2081 + 2082 + p->xlt_kb_sw = ice_xlt_kb_get_sw(hw); 2083 + if (IS_ERR(p->xlt_kb_sw)) { 2084 + err = p->xlt_kb_sw; 2085 + goto err; 2086 + } 2087 + 2088 + p->xlt_kb_acl = ice_xlt_kb_get_acl(hw); 2089 + if (IS_ERR(p->xlt_kb_acl)) { 2090 + err = p->xlt_kb_acl; 2091 + goto err; 2092 + } 2093 + 2094 + p->xlt_kb_fd = ice_xlt_kb_get_fd(hw); 2095 + if (IS_ERR(p->xlt_kb_fd)) { 2096 + err = p->xlt_kb_fd; 2097 + goto err; 2098 + } 2099 + 2100 + p->xlt_kb_rss = ice_xlt_kb_get_rss(hw); 2101 + if (IS_ERR(p->xlt_kb_rss)) { 2102 + err = p->xlt_kb_rss; 2103 + goto err; 2104 + } 2105 + 2106 + return p; 2107 + err: 2108 + ice_parser_destroy(p); 2109 + return err; 2110 + } 2111 + 2112 + /** 2113 + * ice_parser_destroy - destroy a parser instance 2114 + * @psr: pointer to a parser instance 2115 + */ 2116 + void ice_parser_destroy(struct ice_parser *psr) 2117 + { 2118 + kfree(psr->imem_table); 2119 + kfree(psr->mi_table); 2120 + kfree(psr->pg_cam_table); 2121 + kfree(psr->pg_sp_cam_table); 2122 + kfree(psr->pg_nm_cam_table); 2123 + kfree(psr->pg_nm_sp_cam_table); 2124 + kfree(psr->bst_tcam_table); 2125 + kfree(psr->bst_lbl_table); 2126 + kfree(psr->ptype_mk_tcam_table); 2127 + kfree(psr->mk_grp_table); 2128 + kfree(psr->proto_grp_table); 2129 + kfree(psr->flg_rd_table); 2130 + kfree(psr->xlt_kb_sw); 2131 + kfree(psr->xlt_kb_acl); 2132 + kfree(psr->xlt_kb_fd); 2133 + kfree(psr->xlt_kb_rss); 2134 + 2135 + kfree(psr); 2136 + } 2137 + 2138 + /** 2139 + * ice_parser_run - parse on a packet in binary and return the result 2140 + * @psr: pointer to a parser instance 2141 + * @pkt_buf: packet data 2142 + * @pkt_len: packet length 2143 + * @rslt: input/output parameter to save parser result. 2144 + * 2145 + * Return: 0 on success or errno. 2146 + */ 2147 + int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, 2148 + int pkt_len, struct ice_parser_result *rslt) 2149 + { 2150 + ice_parser_rt_reset(&psr->rt); 2151 + ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len); 2152 + 2153 + return ice_parser_rt_execute(&psr->rt, rslt); 2154 + } 2155 + 2156 + /** 2157 + * ice_parser_result_dump - dump a parser result info 2158 + * @hw: pointer to the hardware structure 2159 + * @rslt: parser result info to dump 2160 + */ 2161 + void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt) 2162 + { 2163 + struct device *dev = ice_hw_to_dev(hw); 2164 + int i; 2165 + 2166 + dev_info(dev, "ptype = %d\n", rslt->ptype); 2167 + for (i = 0; i < rslt->po_num; i++) 2168 + dev_info(dev, "proto = %d, offset = %d\n", 2169 + rslt->po[i].proto_id, rslt->po[i].offset); 2170 + 2171 + dev_info(dev, "flags_psr = 0x%016llx\n", rslt->flags_psr); 2172 + dev_info(dev, "flags_pkt = 0x%016llx\n", rslt->flags_pkt); 2173 + dev_info(dev, "flags_sw = 0x%04x\n", rslt->flags_sw); 2174 + dev_info(dev, "flags_fd = 0x%04x\n", rslt->flags_fd); 2175 + dev_info(dev, "flags_rss = 0x%04x\n", rslt->flags_rss); 2176 + } 2177 + 2178 + #define ICE_BT_VLD_KEY 0xFF 2179 + #define ICE_BT_INV_KEY 0xFE 2180 + 2181 + static void ice_bst_dvm_set(struct ice_parser *psr, enum ice_lbl_type type, 2182 + bool on) 2183 + { 2184 + u16 i = 0; 2185 + 2186 + while (true) { 2187 + struct ice_bst_tcam_item *item; 2188 + u8 key; 2189 + 2190 + item = ice_bst_tcam_search(psr->bst_tcam_table, 2191 + psr->bst_lbl_table, 2192 + type, &i); 2193 + if (!item) 2194 + break; 2195 + 2196 + key = on ? ICE_BT_VLD_KEY : ICE_BT_INV_KEY; 2197 + item->key[ICE_BT_VM_OFF] = key; 2198 + item->key_inv[ICE_BT_VM_OFF] = key; 2199 + i++; 2200 + } 2201 + } 2202 + 2203 + /** 2204 + * ice_parser_dvm_set - configure double vlan mode for parser 2205 + * @psr: pointer to a parser instance 2206 + * @on: true to turn on; false to turn off 2207 + */ 2208 + void ice_parser_dvm_set(struct ice_parser *psr, bool on) 2209 + { 2210 + ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_DVM, on); 2211 + ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_SVM, !on); 2212 + } 2213 + 2214 + static int ice_tunnel_port_set(struct ice_parser *psr, enum ice_lbl_type type, 2215 + u16 udp_port, bool on) 2216 + { 2217 + u8 *buf = (u8 *)&udp_port; 2218 + u16 i = 0; 2219 + 2220 + while (true) { 2221 + struct ice_bst_tcam_item *item; 2222 + 2223 + item = ice_bst_tcam_search(psr->bst_tcam_table, 2224 + psr->bst_lbl_table, 2225 + type, &i); 2226 + if (!item) 2227 + break; 2228 + 2229 + /* found empty slot to add */ 2230 + if (on && item->key[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY && 2231 + item->key_inv[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY) { 2232 + item->key_inv[ICE_BT_TUN_PORT_OFF_L] = 2233 + buf[ICE_UDP_PORT_OFF_L]; 2234 + item->key_inv[ICE_BT_TUN_PORT_OFF_H] = 2235 + buf[ICE_UDP_PORT_OFF_H]; 2236 + 2237 + item->key[ICE_BT_TUN_PORT_OFF_L] = 2238 + ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_L]; 2239 + item->key[ICE_BT_TUN_PORT_OFF_H] = 2240 + ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_H]; 2241 + 2242 + return 0; 2243 + /* found a matched slot to delete */ 2244 + } else if (!on && 2245 + (item->key_inv[ICE_BT_TUN_PORT_OFF_L] == 2246 + buf[ICE_UDP_PORT_OFF_L] || 2247 + item->key_inv[ICE_BT_TUN_PORT_OFF_H] == 2248 + buf[ICE_UDP_PORT_OFF_H])) { 2249 + item->key_inv[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY; 2250 + item->key_inv[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY; 2251 + 2252 + item->key[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY; 2253 + item->key[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY; 2254 + 2255 + return 0; 2256 + } 2257 + i++; 2258 + } 2259 + 2260 + return -EINVAL; 2261 + } 2262 + 2263 + /** 2264 + * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser 2265 + * @psr: pointer to a parser instance 2266 + * @udp_port: vxlan tunnel port in UDP header 2267 + * @on: true to turn on; false to turn off 2268 + * 2269 + * Return: 0 on success or errno on failure. 2270 + */ 2271 + int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, 2272 + u16 udp_port, bool on) 2273 + { 2274 + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_VXLAN, udp_port, on); 2275 + } 2276 + 2277 + /** 2278 + * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser 2279 + * @psr: pointer to a parser instance 2280 + * @udp_port: geneve tunnel port in UDP header 2281 + * @on: true to turn on; false to turn off 2282 + * 2283 + * Return: 0 on success or errno on failure. 2284 + */ 2285 + int ice_parser_geneve_tunnel_set(struct ice_parser *psr, 2286 + u16 udp_port, bool on) 2287 + { 2288 + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_GENEVE, udp_port, on); 2289 + } 2290 + 2291 + /** 2292 + * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser 2293 + * @psr: pointer to a parser instance 2294 + * @udp_port: ecpri tunnel port in UDP header 2295 + * @on: true to turn on; false to turn off 2296 + * 2297 + * Return: 0 on success or errno on failure. 2298 + */ 2299 + int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, 2300 + u16 udp_port, bool on) 2301 + { 2302 + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_UDP_ECPRI, 2303 + udp_port, on); 2304 + } 2305 + 2306 + /** 2307 + * ice_nearest_proto_id - find nearest protocol ID 2308 + * @rslt: pointer to a parser result instance 2309 + * @offset: a min value for the protocol offset 2310 + * @proto_id: the protocol ID (output) 2311 + * @proto_off: the protocol offset (output) 2312 + * 2313 + * From the protocols in @rslt, find the nearest protocol that has offset 2314 + * larger than @offset. 2315 + * 2316 + * Return: if true, the protocol's ID and offset 2317 + */ 2318 + static bool ice_nearest_proto_id(struct ice_parser_result *rslt, u16 offset, 2319 + u8 *proto_id, u16 *proto_off) 2320 + { 2321 + u16 dist = U16_MAX; 2322 + u8 proto = 0; 2323 + int i; 2324 + 2325 + for (i = 0; i < rslt->po_num; i++) { 2326 + if (offset < rslt->po[i].offset) 2327 + continue; 2328 + if (offset - rslt->po[i].offset < dist) { 2329 + proto = rslt->po[i].proto_id; 2330 + dist = offset - rslt->po[i].offset; 2331 + } 2332 + } 2333 + 2334 + if (dist % 2) 2335 + return false; 2336 + 2337 + *proto_id = proto; 2338 + *proto_off = dist; 2339 + 2340 + return true; 2341 + } 2342 + 2343 + /* default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2 2344 + * In future, the flag masks should learn from DDP 2345 + */ 2346 + #define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002 2347 + #define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000 2348 + #define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080 2349 + #define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010 2350 + 2351 + /** 2352 + * ice_parser_profile_init - initialize a FXP profile based on parser result 2353 + * @rslt: a instance of a parser result 2354 + * @pkt_buf: packet data buffer 2355 + * @msk_buf: packet mask buffer 2356 + * @buf_len: packet length 2357 + * @blk: FXP pipeline stage 2358 + * @prof: input/output parameter to save the profile 2359 + * 2360 + * Return: 0 on success or errno on failure. 2361 + */ 2362 + int ice_parser_profile_init(struct ice_parser_result *rslt, 2363 + const u8 *pkt_buf, const u8 *msk_buf, 2364 + int buf_len, enum ice_block blk, 2365 + struct ice_parser_profile *prof) 2366 + { 2367 + u8 proto_id = U8_MAX; 2368 + u16 proto_off = 0; 2369 + u16 off; 2370 + 2371 + memset(prof, 0, sizeof(*prof)); 2372 + set_bit(rslt->ptype, prof->ptypes); 2373 + if (blk == ICE_BLK_SW) { 2374 + prof->flags = rslt->flags_sw; 2375 + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW; 2376 + } else if (blk == ICE_BLK_ACL) { 2377 + prof->flags = rslt->flags_acl; 2378 + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL; 2379 + } else if (blk == ICE_BLK_FD) { 2380 + prof->flags = rslt->flags_fd; 2381 + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD; 2382 + } else if (blk == ICE_BLK_RSS) { 2383 + prof->flags = rslt->flags_rss; 2384 + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS; 2385 + } else { 2386 + return -EINVAL; 2387 + } 2388 + 2389 + for (off = 0; off < buf_len - 1; off++) { 2390 + if (msk_buf[off] == 0 && msk_buf[off + 1] == 0) 2391 + continue; 2392 + if (!ice_nearest_proto_id(rslt, off, &proto_id, &proto_off)) 2393 + continue; 2394 + if (prof->fv_num >= ICE_PARSER_FV_MAX) 2395 + return -EINVAL; 2396 + 2397 + prof->fv[prof->fv_num].proto_id = proto_id; 2398 + prof->fv[prof->fv_num].offset = proto_off; 2399 + prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off]; 2400 + prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off]; 2401 + prof->fv_num++; 2402 + } 2403 + 2404 + return 0; 2405 + } 2406 + 2407 + /** 2408 + * ice_parser_profile_dump - dump an FXP profile info 2409 + * @hw: pointer to the hardware structure 2410 + * @prof: profile info to dump 2411 + */ 2412 + void ice_parser_profile_dump(struct ice_hw *hw, 2413 + struct ice_parser_profile *prof) 2414 + { 2415 + struct device *dev = ice_hw_to_dev(hw); 2416 + u16 i; 2417 + 2418 + dev_info(dev, "ptypes:\n"); 2419 + for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++) 2420 + if (test_bit(i, prof->ptypes)) 2421 + dev_info(dev, "\t%u\n", i); 2422 + 2423 + for (i = 0; i < prof->fv_num; i++) 2424 + dev_info(dev, "proto = %u, offset = %2u, spec = 0x%04x, mask = 0x%04x\n", 2425 + prof->fv[i].proto_id, prof->fv[i].offset, 2426 + prof->fv[i].spec, prof->fv[i].msk); 2427 + 2428 + dev_info(dev, "flags = 0x%04x\n", prof->flags); 2429 + dev_info(dev, "flags_msk = 0x%04x\n", prof->flags_msk); 2430 + }
+540
drivers/net/ethernet/intel/ice/ice_parser.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2024 Intel Corporation */ 3 + 4 + #ifndef _ICE_PARSER_H_ 5 + #define _ICE_PARSER_H_ 6 + 7 + #define ICE_SEC_DATA_OFFSET 4 8 + #define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48 9 + #define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24 10 + #define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16 11 + #define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17 12 + #define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12 13 + #define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13 14 + #define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88 15 + #define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24 16 + #define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8 17 + #define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24 18 + #define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1 19 + 20 + #define ICE_SEC_LBL_DATA_OFFSET 2 21 + #define ICE_SID_LBL_ENTRY_SIZE 66 22 + 23 + /*** ICE_SID_RXPARSER_IMEM section ***/ 24 + #define ICE_IMEM_TABLE_SIZE 192 25 + 26 + /* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM 27 + * output. 28 + */ 29 + struct ice_bst_main { 30 + bool alu0; 31 + bool alu1; 32 + bool alu2; 33 + bool pg; 34 + }; 35 + 36 + struct ice_bst_keybuilder { 37 + u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */ 38 + bool tsr_ctrl; /* TCAM Search Register control */ 39 + }; 40 + 41 + /* Next protocol Key builder */ 42 + struct ice_np_keybuilder { 43 + u8 opc; 44 + u8 start_reg0; 45 + u8 len_reg1; 46 + }; 47 + 48 + enum ice_np_keybuilder_opcode { 49 + ICE_NPKB_OPC_EXTRACT = 0, 50 + ICE_NPKB_OPC_BUILD = 1, 51 + ICE_NPKB_OPC_BYPASS = 2, 52 + }; 53 + 54 + /* Parse Graph Key builder */ 55 + struct ice_pg_keybuilder { 56 + bool flag0_ena; 57 + bool flag1_ena; 58 + bool flag2_ena; 59 + bool flag3_ena; 60 + u8 flag0_idx; 61 + u8 flag1_idx; 62 + u8 flag2_idx; 63 + u8 flag3_idx; 64 + u8 alu_reg_idx; 65 + }; 66 + 67 + enum ice_alu_idx { 68 + ICE_ALU0_IDX = 0, 69 + ICE_ALU1_IDX = 1, 70 + ICE_ALU2_IDX = 2, 71 + }; 72 + 73 + enum ice_alu_opcode { 74 + ICE_ALU_PARK = 0, 75 + ICE_ALU_MOV_ADD = 1, 76 + ICE_ALU_ADD = 2, 77 + ICE_ALU_MOV_AND = 4, 78 + ICE_ALU_AND = 5, 79 + ICE_ALU_AND_IMM = 6, 80 + ICE_ALU_MOV_OR = 7, 81 + ICE_ALU_OR = 8, 82 + ICE_ALU_MOV_XOR = 9, 83 + ICE_ALU_XOR = 10, 84 + ICE_ALU_NOP = 11, 85 + ICE_ALU_BR = 12, 86 + ICE_ALU_BREQ = 13, 87 + ICE_ALU_BRNEQ = 14, 88 + ICE_ALU_BRGT = 15, 89 + ICE_ALU_BRLT = 16, 90 + ICE_ALU_BRGEQ = 17, 91 + ICE_ALU_BRLEG = 18, 92 + ICE_ALU_SETEQ = 19, 93 + ICE_ALU_ANDEQ = 20, 94 + ICE_ALU_OREQ = 21, 95 + ICE_ALU_SETNEQ = 22, 96 + ICE_ALU_ANDNEQ = 23, 97 + ICE_ALU_ORNEQ = 24, 98 + ICE_ALU_SETGT = 25, 99 + ICE_ALU_ANDGT = 26, 100 + ICE_ALU_ORGT = 27, 101 + ICE_ALU_SETLT = 28, 102 + ICE_ALU_ANDLT = 29, 103 + ICE_ALU_ORLT = 30, 104 + ICE_ALU_MOV_SUB = 31, 105 + ICE_ALU_SUB = 32, 106 + ICE_ALU_INVALID = 64, 107 + }; 108 + 109 + enum ice_proto_off_opcode { 110 + ICE_PO_OFF_REMAIN = 0, 111 + ICE_PO_OFF_HDR_ADD = 1, 112 + ICE_PO_OFF_HDR_SUB = 2, 113 + }; 114 + 115 + struct ice_alu { 116 + enum ice_alu_opcode opc; 117 + u8 src_start; 118 + u8 src_len; 119 + bool shift_xlate_sel; 120 + u8 shift_xlate_key; 121 + u8 src_reg_id; 122 + u8 dst_reg_id; 123 + bool inc0; 124 + bool inc1; 125 + u8 proto_offset_opc; 126 + u8 proto_offset; 127 + u8 branch_addr; 128 + u16 imm; 129 + bool dedicate_flags_ena; 130 + u8 dst_start; 131 + u8 dst_len; 132 + bool flags_extr_imm; 133 + u8 flags_start_imm; 134 + }; 135 + 136 + /* Parser program code (iMEM) */ 137 + struct ice_imem_item { 138 + u16 idx; 139 + struct ice_bst_main b_m; 140 + struct ice_bst_keybuilder b_kb; 141 + u8 pg_prio; 142 + struct ice_np_keybuilder np_kb; 143 + struct ice_pg_keybuilder pg_kb; 144 + struct ice_alu alu0; 145 + struct ice_alu alu1; 146 + struct ice_alu alu2; 147 + }; 148 + 149 + /*** ICE_SID_RXPARSER_METADATA_INIT section ***/ 150 + #define ICE_METAINIT_TABLE_SIZE 16 151 + 152 + /* Metadata Initialization item */ 153 + struct ice_metainit_item { 154 + u16 idx; 155 + 156 + u8 tsr; /* TCAM Search key Register */ 157 + u16 ho; /* Header Offset register */ 158 + u16 pc; /* Program Counter register */ 159 + u16 pg_rn; /* Parse Graph Root Node */ 160 + u8 cd; /* Control Domain ID */ 161 + 162 + /* General Purpose Registers */ 163 + bool gpr_a_ctrl; 164 + u8 gpr_a_data_mdid; 165 + u8 gpr_a_data_start; 166 + u8 gpr_a_data_len; 167 + u8 gpr_a_id; 168 + 169 + bool gpr_b_ctrl; 170 + u8 gpr_b_data_mdid; 171 + u8 gpr_b_data_start; 172 + u8 gpr_b_data_len; 173 + u8 gpr_b_id; 174 + 175 + bool gpr_c_ctrl; 176 + u8 gpr_c_data_mdid; 177 + u8 gpr_c_data_start; 178 + u8 gpr_c_data_len; 179 + u8 gpr_c_id; 180 + 181 + bool gpr_d_ctrl; 182 + u8 gpr_d_data_mdid; 183 + u8 gpr_d_data_start; 184 + u8 gpr_d_data_len; 185 + u8 gpr_d_id; 186 + 187 + u64 flags; /* Initial value for all flags */ 188 + }; 189 + 190 + /*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL, 191 + * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM 192 + * sections ***/ 193 + #define ICE_PG_CAM_TABLE_SIZE 2048 194 + #define ICE_PG_SP_CAM_TABLE_SIZE 128 195 + #define ICE_PG_NM_CAM_TABLE_SIZE 1024 196 + #define ICE_PG_NM_SP_CAM_TABLE_SIZE 64 197 + 198 + struct ice_pg_cam_key { 199 + bool valid; 200 + struct_group_attr(val, __packed, 201 + u16 node_id; /* Node ID of protocol in parse graph */ 202 + bool flag0; 203 + bool flag1; 204 + bool flag2; 205 + bool flag3; 206 + u8 boost_idx; /* Boost TCAM match index */ 207 + u16 alu_reg; 208 + u32 next_proto; /* next Protocol value (must be last) */ 209 + ); 210 + }; 211 + 212 + struct ice_pg_nm_cam_key { 213 + bool valid; 214 + struct_group_attr(val, __packed, 215 + u16 node_id; 216 + bool flag0; 217 + bool flag1; 218 + bool flag2; 219 + bool flag3; 220 + u8 boost_idx; 221 + u16 alu_reg; 222 + ); 223 + }; 224 + 225 + struct ice_pg_cam_action { 226 + u16 next_node; /* Parser Node ID for the next round */ 227 + u8 next_pc; /* next Program Counter */ 228 + bool is_pg; /* is protocol group */ 229 + u8 proto_id; /* protocol ID or proto group ID */ 230 + bool is_mg; /* is marker group */ 231 + u8 marker_id; /* marker ID or marker group ID */ 232 + bool is_last_round; 233 + bool ho_polarity; /* header offset polarity */ 234 + u16 ho_inc; 235 + }; 236 + 237 + /* Parse Graph item */ 238 + struct ice_pg_cam_item { 239 + u16 idx; 240 + struct ice_pg_cam_key key; 241 + struct ice_pg_cam_action action; 242 + }; 243 + 244 + /* Parse Graph No Match item */ 245 + struct ice_pg_nm_cam_item { 246 + u16 idx; 247 + struct ice_pg_nm_cam_key key; 248 + struct ice_pg_cam_action action; 249 + }; 250 + 251 + struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, 252 + int size, struct ice_pg_cam_key *key); 253 + struct ice_pg_nm_cam_item * 254 + ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, 255 + struct ice_pg_cam_key *key); 256 + 257 + /*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ 258 + #define ICE_BST_TCAM_TABLE_SIZE 256 259 + #define ICE_BST_TCAM_KEY_SIZE 20 260 + #define ICE_BST_KEY_TCAM_SIZE 19 261 + 262 + /* Boost TCAM item */ 263 + struct ice_bst_tcam_item { 264 + u16 addr; 265 + u8 key[ICE_BST_TCAM_KEY_SIZE]; 266 + u8 key_inv[ICE_BST_TCAM_KEY_SIZE]; 267 + u8 hit_idx_grp; 268 + u8 pg_prio; 269 + struct ice_np_keybuilder np_kb; 270 + struct ice_pg_keybuilder pg_kb; 271 + struct ice_alu alu0; 272 + struct ice_alu alu1; 273 + struct ice_alu alu2; 274 + }; 275 + 276 + #define ICE_LBL_LEN 64 277 + #define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM" 278 + #define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM" 279 + #define ICE_LBL_TNL_VXLAN "TNL_VXLAN" 280 + #define ICE_LBL_TNL_GENEVE "TNL_GENEVE" 281 + #define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI" 282 + 283 + enum ice_lbl_type { 284 + ICE_LBL_BST_TYPE_UNKNOWN, 285 + ICE_LBL_BST_TYPE_DVM, 286 + ICE_LBL_BST_TYPE_SVM, 287 + ICE_LBL_BST_TYPE_VXLAN, 288 + ICE_LBL_BST_TYPE_GENEVE, 289 + ICE_LBL_BST_TYPE_UDP_ECPRI, 290 + }; 291 + 292 + struct ice_lbl_item { 293 + u16 idx; 294 + char label[ICE_LBL_LEN]; 295 + 296 + /* must be at the end, not part of the DDP section */ 297 + enum ice_lbl_type type; 298 + }; 299 + 300 + struct ice_bst_tcam_item * 301 + ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat); 302 + struct ice_bst_tcam_item * 303 + ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, 304 + struct ice_lbl_item *lbl_table, 305 + enum ice_lbl_type type, u16 *start); 306 + 307 + /*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/ 308 + #define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024 309 + #define ICE_PTYPE_MK_TCAM_KEY_SIZE 10 310 + 311 + struct ice_ptype_mk_tcam_item { 312 + u16 address; 313 + u16 ptype; 314 + u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE]; 315 + u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE]; 316 + } __packed; 317 + 318 + struct ice_ptype_mk_tcam_item * 319 + ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, 320 + u8 *pat, int len); 321 + /*** ICE_SID_RXPARSER_MARKER_GRP section ***/ 322 + #define ICE_MK_GRP_TABLE_SIZE 128 323 + #define ICE_MK_COUNT_PER_GRP 8 324 + 325 + /* Marker Group item */ 326 + struct ice_mk_grp_item { 327 + int idx; 328 + u8 markers[ICE_MK_COUNT_PER_GRP]; 329 + }; 330 + 331 + /*** ICE_SID_RXPARSER_PROTO_GRP section ***/ 332 + #define ICE_PROTO_COUNT_PER_GRP 8 333 + #define ICE_PROTO_GRP_TABLE_SIZE 192 334 + #define ICE_PROTO_GRP_ITEM_SIZE 22 335 + struct ice_proto_off { 336 + bool polarity; /* true: positive, false: negative */ 337 + u8 proto_id; 338 + u16 offset; /* 10 bit protocol offset */ 339 + }; 340 + 341 + /* Protocol Group item */ 342 + struct ice_proto_grp_item { 343 + u16 idx; 344 + struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP]; 345 + }; 346 + 347 + /*** ICE_SID_RXPARSER_FLAG_REDIR section ***/ 348 + #define ICE_FLG_RD_TABLE_SIZE 64 349 + #define ICE_FLG_RDT_SIZE 64 350 + 351 + /* Flags Redirection item */ 352 + struct ice_flg_rd_item { 353 + u16 idx; 354 + bool expose; 355 + u8 intr_flg_id; /* Internal Flag ID */ 356 + }; 357 + 358 + u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg); 359 + 360 + /*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL, 361 + * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS 362 + * sections ***/ 363 + #define ICE_XLT_KB_FLAG0_14_CNT 15 364 + #define ICE_XLT_KB_TBL_CNT 8 365 + #define ICE_XLT_KB_TBL_ENTRY_SIZE 24 366 + 367 + struct ice_xlt_kb_entry { 368 + u8 xlt1_ad_sel; 369 + u8 xlt2_ad_sel; 370 + u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT]; 371 + u8 xlt1_md_sel; 372 + u8 xlt2_md_sel; 373 + }; 374 + 375 + /* XLT Key Builder */ 376 + struct ice_xlt_kb { 377 + u8 xlt1_pm; /* XLT1 Partition Mode */ 378 + u8 xlt2_pm; /* XLT2 Partition Mode */ 379 + u8 prof_id_pm; /* Profile ID Partition Mode */ 380 + u64 flag15; 381 + 382 + struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT]; 383 + }; 384 + 385 + u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag); 386 + 387 + /*** Parser API ***/ 388 + #define ICE_GPR_HV_IDX 64 389 + #define ICE_GPR_HV_SIZE 32 390 + #define ICE_GPR_ERR_IDX 84 391 + #define ICE_GPR_FLG_IDX 104 392 + #define ICE_GPR_FLG_SIZE 16 393 + 394 + #define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */ 395 + #define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */ 396 + #define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */ 397 + #define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */ 398 + 399 + #define ICE_PARSER_MAX_PKT_LEN 504 400 + #define ICE_PARSER_PKT_REV 32 401 + #define ICE_PARSER_GPR_NUM 128 402 + #define ICE_PARSER_FLG_NUM 64 403 + #define ICE_PARSER_ERR_NUM 16 404 + #define ICE_BST_KEY_SIZE 10 405 + #define ICE_MARKER_ID_SIZE 9 406 + #define ICE_MARKER_MAX_SIZE \ 407 + (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1) 408 + #define ICE_MARKER_ID_NUM 8 409 + #define ICE_PO_PAIR_SIZE 256 410 + 411 + struct ice_gpr_pu { 412 + /* array of flags to indicate if GRP needs to be updated */ 413 + bool gpr_val_upd[ICE_PARSER_GPR_NUM]; 414 + u16 gpr_val[ICE_PARSER_GPR_NUM]; 415 + u64 flg_msk; 416 + u64 flg_val; 417 + u16 err_msk; 418 + u16 err_val; 419 + }; 420 + 421 + enum ice_pg_prio { 422 + ICE_PG_P0 = 0, 423 + ICE_PG_P1 = 1, 424 + ICE_PG_P2 = 2, 425 + ICE_PG_P3 = 3, 426 + }; 427 + 428 + struct ice_parser_rt { 429 + struct ice_parser *psr; 430 + u16 gpr[ICE_PARSER_GPR_NUM]; 431 + u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV]; 432 + u16 pkt_len; 433 + u16 po; 434 + u8 bst_key[ICE_BST_KEY_SIZE]; 435 + struct ice_pg_cam_key pg_key; 436 + struct ice_alu *alu0; 437 + struct ice_alu *alu1; 438 + struct ice_alu *alu2; 439 + struct ice_pg_cam_action *action; 440 + u8 pg_prio; 441 + struct ice_gpr_pu pu; 442 + u8 markers[ICE_MARKER_ID_SIZE]; 443 + bool protocols[ICE_PO_PAIR_SIZE]; 444 + u16 offsets[ICE_PO_PAIR_SIZE]; 445 + }; 446 + 447 + struct ice_parser_proto_off { 448 + u8 proto_id; /* hardware protocol ID */ 449 + u16 offset; /* offset from the start of the protocol header */ 450 + }; 451 + 452 + #define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16 453 + #define ICE_PARSER_FLAG_PSR_SIZE 8 454 + #define ICE_PARSER_FV_SIZE 48 455 + #define ICE_PARSER_FV_MAX 24 456 + #define ICE_BT_TUN_PORT_OFF_H 16 457 + #define ICE_BT_TUN_PORT_OFF_L 15 458 + #define ICE_BT_VM_OFF 0 459 + #define ICE_UDP_PORT_OFF_H 1 460 + #define ICE_UDP_PORT_OFF_L 0 461 + 462 + struct ice_parser_result { 463 + u16 ptype; /* 16 bits hardware PTYPE */ 464 + /* array of protocol and header offset pairs */ 465 + struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE]; 466 + int po_num; /* # of protocol-offset pairs must <= 16 */ 467 + u64 flags_psr; /* parser flags */ 468 + u64 flags_pkt; /* packet flags */ 469 + u16 flags_sw; /* key builder flags for SW */ 470 + u16 flags_acl; /* key builder flags for ACL */ 471 + u16 flags_fd; /* key builder flags for FD */ 472 + u16 flags_rss; /* key builder flags for RSS */ 473 + }; 474 + 475 + void ice_parser_rt_reset(struct ice_parser_rt *rt); 476 + void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, 477 + int pkt_len); 478 + int ice_parser_rt_execute(struct ice_parser_rt *rt, 479 + struct ice_parser_result *rslt); 480 + 481 + struct ice_parser { 482 + struct ice_hw *hw; /* pointer to the hardware structure */ 483 + 484 + struct ice_imem_item *imem_table; 485 + struct ice_metainit_item *mi_table; 486 + 487 + struct ice_pg_cam_item *pg_cam_table; 488 + struct ice_pg_cam_item *pg_sp_cam_table; 489 + struct ice_pg_nm_cam_item *pg_nm_cam_table; 490 + struct ice_pg_nm_cam_item *pg_nm_sp_cam_table; 491 + 492 + struct ice_bst_tcam_item *bst_tcam_table; 493 + struct ice_lbl_item *bst_lbl_table; 494 + struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table; 495 + struct ice_mk_grp_item *mk_grp_table; 496 + struct ice_proto_grp_item *proto_grp_table; 497 + struct ice_flg_rd_item *flg_rd_table; 498 + 499 + struct ice_xlt_kb *xlt_kb_sw; 500 + struct ice_xlt_kb *xlt_kb_acl; 501 + struct ice_xlt_kb *xlt_kb_fd; 502 + struct ice_xlt_kb *xlt_kb_rss; 503 + 504 + struct ice_parser_rt rt; 505 + }; 506 + 507 + struct ice_parser *ice_parser_create(struct ice_hw *hw); 508 + void ice_parser_destroy(struct ice_parser *psr); 509 + void ice_parser_dvm_set(struct ice_parser *psr, bool on); 510 + int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); 511 + int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); 512 + int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); 513 + int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, 514 + int pkt_len, struct ice_parser_result *rslt); 515 + void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt); 516 + 517 + struct ice_parser_fv { 518 + u8 proto_id; /* hardware protocol ID */ 519 + u16 offset; /* offset from the start of the protocol header */ 520 + u16 spec; /* pattern to match */ 521 + u16 msk; /* pattern mask */ 522 + }; 523 + 524 + struct ice_parser_profile { 525 + /* array of field vectors */ 526 + struct ice_parser_fv fv[ICE_PARSER_FV_SIZE]; 527 + int fv_num; /* # of field vectors must <= 48 */ 528 + u16 flags; /* key builder flags */ 529 + u16 flags_msk; /* key builder flag mask */ 530 + 531 + DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */ 532 + }; 533 + 534 + int ice_parser_profile_init(struct ice_parser_result *rslt, 535 + const u8 *pkt_buf, const u8 *msk_buf, 536 + int buf_len, enum ice_block blk, 537 + struct ice_parser_profile *prof); 538 + void ice_parser_profile_dump(struct ice_hw *hw, 539 + struct ice_parser_profile *prof); 540 + #endif /* _ICE_PARSER_H_ */
+861
drivers/net/ethernet/intel/ice/ice_parser_rt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2024 Intel Corporation */ 3 + 4 + #include "ice_common.h" 5 + 6 + static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr) 7 + { 8 + rt->gpr[ICE_GPR_TSR_IDX] = tsr; 9 + } 10 + 11 + static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho) 12 + { 13 + rt->gpr[ICE_GPR_HO_IDX] = ho; 14 + memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE); 15 + } 16 + 17 + static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc) 18 + { 19 + rt->gpr[ICE_GPR_NP_IDX] = pc; 20 + } 21 + 22 + static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node) 23 + { 24 + rt->gpr[ICE_GPR_NN_IDX] = node; 25 + } 26 + 27 + static void 28 + ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set) 29 + { 30 + struct ice_hw *hw = rt->psr->hw; 31 + unsigned int word, id; 32 + 33 + word = idx / ICE_GPR_FLG_SIZE; 34 + id = idx % ICE_GPR_FLG_SIZE; 35 + 36 + if (set) { 37 + rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id); 38 + ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx); 39 + } else { 40 + rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id); 41 + ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx); 42 + } 43 + } 44 + 45 + static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val) 46 + { 47 + struct ice_hw *hw = rt->psr->hw; 48 + 49 + if (idx == ICE_GPR_HO_IDX) 50 + ice_rt_ho_set(rt, val); 51 + else 52 + rt->gpr[idx] = val; 53 + 54 + ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val); 55 + } 56 + 57 + static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set) 58 + { 59 + struct ice_hw *hw = rt->psr->hw; 60 + 61 + if (set) { 62 + rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx); 63 + ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx); 64 + } else { 65 + rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx); 66 + ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx); 67 + } 68 + } 69 + 70 + /** 71 + * ice_parser_rt_reset - reset the parser runtime 72 + * @rt: pointer to the parser runtime 73 + */ 74 + void ice_parser_rt_reset(struct ice_parser_rt *rt) 75 + { 76 + struct ice_parser *psr = rt->psr; 77 + struct ice_metainit_item *mi; 78 + unsigned int i; 79 + 80 + mi = &psr->mi_table[0]; 81 + 82 + memset(rt, 0, sizeof(*rt)); 83 + rt->psr = psr; 84 + 85 + ice_rt_tsr_set(rt, mi->tsr); 86 + ice_rt_ho_set(rt, mi->ho); 87 + ice_rt_np_set(rt, mi->pc); 88 + ice_rt_nn_set(rt, mi->pg_rn); 89 + 90 + for (i = 0; i < ICE_PARSER_FLG_NUM; i++) { 91 + if (mi->flags & BIT(i)) 92 + ice_rt_flag_set(rt, i, true); 93 + } 94 + } 95 + 96 + /** 97 + * ice_parser_rt_pktbuf_set - set a packet into parser runtime 98 + * @rt: pointer to the parser runtime 99 + * @pkt_buf: buffer with packet data 100 + * @pkt_len: packet buffer length 101 + */ 102 + void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, 103 + int pkt_len) 104 + { 105 + int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len); 106 + u16 ho = rt->gpr[ICE_GPR_HO_IDX]; 107 + 108 + memcpy(rt->pkt_buf, pkt_buf, len); 109 + rt->pkt_len = pkt_len; 110 + 111 + memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE); 112 + } 113 + 114 + static void ice_bst_key_init(struct ice_parser_rt *rt, 115 + struct ice_imem_item *imem) 116 + { 117 + u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX]; 118 + u16 ho = rt->gpr[ICE_GPR_HO_IDX]; 119 + u8 *key = rt->bst_key; 120 + int idd, i; 121 + 122 + idd = ICE_BST_TCAM_KEY_SIZE - 1; 123 + if (imem->b_kb.tsr_ctrl) 124 + key[idd] = tsr; 125 + else 126 + key[idd] = imem->b_kb.prio; 127 + 128 + idd = ICE_BST_KEY_TCAM_SIZE - 1; 129 + for (i = idd; i >= 0; i--) { 130 + int j; 131 + 132 + j = ho + idd - i; 133 + if (j < ICE_PARSER_MAX_PKT_LEN) 134 + key[i] = rt->pkt_buf[ho + idd - i]; 135 + else 136 + key[i] = 0; 137 + } 138 + 139 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n"); 140 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", 141 + key[0], key[1], key[2], key[3], key[4], 142 + key[5], key[6], key[7], key[8], key[9]); 143 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n"); 144 + } 145 + 146 + static u16 ice_bit_rev_u16(u16 v, int len) 147 + { 148 + return bitrev16(v) >> (BITS_PER_TYPE(v) - len); 149 + } 150 + 151 + static u32 ice_bit_rev_u32(u32 v, int len) 152 + { 153 + return bitrev32(v) >> (BITS_PER_TYPE(v) - len); 154 + } 155 + 156 + static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len) 157 + { 158 + int offset; 159 + u32 buf[2]; 160 + u64 val; 161 + 162 + offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16)); 163 + 164 + memcpy(buf, &rt->gpr[offset], sizeof(buf)); 165 + 166 + buf[0] = bitrev8x4(buf[0]); 167 + buf[1] = bitrev8x4(buf[1]); 168 + 169 + val = *(u64 *)buf; 170 + val >>= start % BITS_PER_TYPE(u16); 171 + 172 + return ice_bit_rev_u32(val, len); 173 + } 174 + 175 + static u32 ice_pk_build(struct ice_parser_rt *rt, 176 + struct ice_np_keybuilder *kb) 177 + { 178 + if (kb->opc == ICE_NPKB_OPC_EXTRACT) 179 + return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1); 180 + else if (kb->opc == ICE_NPKB_OPC_BUILD) 181 + return rt->gpr[kb->start_reg0] | 182 + ((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16)); 183 + else if (kb->opc == ICE_NPKB_OPC_BYPASS) 184 + return 0; 185 + 186 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n", 187 + kb->opc); 188 + return U32_MAX; 189 + } 190 + 191 + static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index) 192 + { 193 + int word = index / ICE_GPR_FLG_SIZE; 194 + int id = index % ICE_GPR_FLG_SIZE; 195 + 196 + return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id)); 197 + } 198 + 199 + static int ice_imem_pgk_init(struct ice_parser_rt *rt, 200 + struct ice_imem_item *imem) 201 + { 202 + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); 203 + rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb); 204 + if (rt->pg_key.next_proto == U32_MAX) 205 + return -EINVAL; 206 + 207 + if (imem->pg_kb.flag0_ena) 208 + rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx); 209 + if (imem->pg_kb.flag1_ena) 210 + rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx); 211 + if (imem->pg_kb.flag2_ena) 212 + rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx); 213 + if (imem->pg_kb.flag3_ena) 214 + rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx); 215 + 216 + rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx]; 217 + rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX]; 218 + 219 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", 220 + rt->pg_key.node_id, 221 + rt->pg_key.flag0, 222 + rt->pg_key.flag1, 223 + rt->pg_key.flag2, 224 + rt->pg_key.flag3, 225 + rt->pg_key.boost_idx, 226 + rt->pg_key.alu_reg, 227 + rt->pg_key.next_proto); 228 + 229 + return 0; 230 + } 231 + 232 + static void ice_imem_alu0_set(struct ice_parser_rt *rt, 233 + struct ice_imem_item *imem) 234 + { 235 + rt->alu0 = &imem->alu0; 236 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n", 237 + imem->idx); 238 + } 239 + 240 + static void ice_imem_alu1_set(struct ice_parser_rt *rt, 241 + struct ice_imem_item *imem) 242 + { 243 + rt->alu1 = &imem->alu1; 244 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n", 245 + imem->idx); 246 + } 247 + 248 + static void ice_imem_alu2_set(struct ice_parser_rt *rt, 249 + struct ice_imem_item *imem) 250 + { 251 + rt->alu2 = &imem->alu2; 252 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n", 253 + imem->idx); 254 + } 255 + 256 + static void ice_imem_pgp_set(struct ice_parser_rt *rt, 257 + struct ice_imem_item *imem) 258 + { 259 + rt->pg_prio = imem->pg_prio; 260 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n", 261 + rt->pg_prio, imem->idx); 262 + } 263 + 264 + static int ice_bst_pgk_init(struct ice_parser_rt *rt, 265 + struct ice_bst_tcam_item *bst) 266 + { 267 + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); 268 + rt->pg_key.boost_idx = bst->hit_idx_grp; 269 + rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb); 270 + if (rt->pg_key.next_proto == U32_MAX) 271 + return -EINVAL; 272 + 273 + if (bst->pg_kb.flag0_ena) 274 + rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx); 275 + if (bst->pg_kb.flag1_ena) 276 + rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx); 277 + if (bst->pg_kb.flag2_ena) 278 + rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx); 279 + if (bst->pg_kb.flag3_ena) 280 + rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx); 281 + 282 + rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx]; 283 + rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX]; 284 + 285 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", 286 + rt->pg_key.node_id, 287 + rt->pg_key.flag0, 288 + rt->pg_key.flag1, 289 + rt->pg_key.flag2, 290 + rt->pg_key.flag3, 291 + rt->pg_key.boost_idx, 292 + rt->pg_key.alu_reg, 293 + rt->pg_key.next_proto); 294 + 295 + return 0; 296 + } 297 + 298 + static void ice_bst_alu0_set(struct ice_parser_rt *rt, 299 + struct ice_bst_tcam_item *bst) 300 + { 301 + rt->alu0 = &bst->alu0; 302 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n", 303 + bst->addr); 304 + } 305 + 306 + static void ice_bst_alu1_set(struct ice_parser_rt *rt, 307 + struct ice_bst_tcam_item *bst) 308 + { 309 + rt->alu1 = &bst->alu1; 310 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n", 311 + bst->addr); 312 + } 313 + 314 + static void ice_bst_alu2_set(struct ice_parser_rt *rt, 315 + struct ice_bst_tcam_item *bst) 316 + { 317 + rt->alu2 = &bst->alu2; 318 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n", 319 + bst->addr); 320 + } 321 + 322 + static void ice_bst_pgp_set(struct ice_parser_rt *rt, 323 + struct ice_bst_tcam_item *bst) 324 + { 325 + rt->pg_prio = bst->pg_prio; 326 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n", 327 + rt->pg_prio, bst->addr); 328 + } 329 + 330 + static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt) 331 + { 332 + struct ice_parser *psr = rt->psr; 333 + struct ice_pg_cam_item *item; 334 + 335 + item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE, 336 + &rt->pg_key); 337 + if (!item) 338 + item = ice_pg_cam_match(psr->pg_sp_cam_table, 339 + ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key); 340 + return item; 341 + } 342 + 343 + static 344 + struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt) 345 + { 346 + struct ice_parser *psr = rt->psr; 347 + struct ice_pg_nm_cam_item *item; 348 + 349 + item = ice_pg_nm_cam_match(psr->pg_nm_cam_table, 350 + ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key); 351 + 352 + if (!item) 353 + item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table, 354 + ICE_PG_NM_SP_CAM_TABLE_SIZE, 355 + &rt->pg_key); 356 + return item; 357 + } 358 + 359 + static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val) 360 + { 361 + rt->pu.gpr_val_upd[idx] = true; 362 + rt->pu.gpr_val[idx] = val; 363 + 364 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n", 365 + idx, val); 366 + } 367 + 368 + static void ice_pg_exe(struct ice_parser_rt *rt) 369 + { 370 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n"); 371 + 372 + ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc); 373 + ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node); 374 + 375 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n"); 376 + } 377 + 378 + static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val) 379 + { 380 + rt->pu.flg_msk |= BIT(idx); 381 + if (val) 382 + rt->pu.flg_val |= BIT(idx); 383 + else 384 + rt->pu.flg_val &= ~BIT(idx); 385 + 386 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n", 387 + idx, val); 388 + } 389 + 390 + static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu) 391 + { 392 + u32 hv_bit_sel; 393 + int i; 394 + 395 + if (!alu->dedicate_flags_ena) 396 + return; 397 + 398 + if (alu->flags_extr_imm) { 399 + for (i = 0; i < alu->dst_len; i++) 400 + ice_flg_add(rt, alu->dst_start + i, 401 + !!(alu->flags_start_imm & BIT(i))); 402 + } else { 403 + for (i = 0; i < alu->dst_len; i++) { 404 + hv_bit_sel = ice_hv_bit_sel(rt, 405 + alu->flags_start_imm + i, 406 + 1); 407 + ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel); 408 + } 409 + } 410 + } 411 + 412 + static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu) 413 + { 414 + if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD) 415 + rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset); 416 + else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB) 417 + rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset); 418 + else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN) 419 + rt->po = rt->gpr[ICE_GPR_HO_IDX]; 420 + 421 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n", 422 + rt->po); 423 + } 424 + 425 + static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx, 426 + int start, int len) 427 + { 428 + int offset; 429 + u32 val; 430 + 431 + offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16)); 432 + 433 + memcpy(&val, &rt->gpr[offset], sizeof(val)); 434 + 435 + val = bitrev8x4(val); 436 + val >>= start % BITS_PER_TYPE(u16); 437 + 438 + return ice_bit_rev_u16(val, len); 439 + } 440 + 441 + static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val) 442 + { 443 + rt->pu.err_msk |= (u16)BIT(idx); 444 + if (val) 445 + rt->pu.flg_val |= (u64)BIT_ULL(idx); 446 + else 447 + rt->pu.flg_val &= ~(u64)BIT_ULL(idx); 448 + 449 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n", 450 + idx, val); 451 + } 452 + 453 + static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu, 454 + bool val) 455 + { 456 + u16 flg_idx; 457 + 458 + if (alu->dedicate_flags_ena) { 459 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n", 460 + alu->opc); 461 + return; 462 + } 463 + 464 + if (alu->dst_reg_id == ICE_GPR_ERR_IDX) { 465 + if (alu->dst_start >= ICE_PARSER_ERR_NUM) { 466 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n", 467 + alu->dst_start); 468 + return; 469 + } 470 + ice_err_add(rt, alu->dst_start, val); 471 + } else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) { 472 + flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) + 473 + alu->dst_start); 474 + 475 + if (flg_idx >= ICE_PARSER_FLG_NUM) { 476 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n", 477 + flg_idx); 478 + return; 479 + } 480 + ice_flg_add(rt, flg_idx, val); 481 + } else { 482 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n", 483 + alu->dst_reg_id, alu->dst_start); 484 + } 485 + } 486 + 487 + static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu) 488 + { 489 + u16 dst, src, shift, imm; 490 + 491 + if (alu->shift_xlate_sel) { 492 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n"); 493 + return; 494 + } 495 + 496 + ice_po_update(rt, alu); 497 + ice_flg_update(rt, alu); 498 + 499 + dst = rt->gpr[alu->dst_reg_id]; 500 + src = ice_reg_bit_sel(rt, alu->src_reg_id, 501 + alu->src_start, alu->src_len); 502 + shift = alu->shift_xlate_key; 503 + imm = alu->imm; 504 + 505 + switch (alu->opc) { 506 + case ICE_ALU_PARK: 507 + break; 508 + case ICE_ALU_MOV_ADD: 509 + dst = (src << shift) + imm; 510 + ice_gpr_add(rt, alu->dst_reg_id, dst); 511 + break; 512 + case ICE_ALU_ADD: 513 + dst += (src << shift) + imm; 514 + ice_gpr_add(rt, alu->dst_reg_id, dst); 515 + break; 516 + case ICE_ALU_ORLT: 517 + if (src < imm) 518 + ice_dst_reg_bit_set(rt, alu, true); 519 + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); 520 + break; 521 + case ICE_ALU_OREQ: 522 + if (src == imm) 523 + ice_dst_reg_bit_set(rt, alu, true); 524 + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); 525 + break; 526 + case ICE_ALU_SETEQ: 527 + ice_dst_reg_bit_set(rt, alu, src == imm); 528 + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); 529 + break; 530 + case ICE_ALU_MOV_XOR: 531 + dst = (src << shift) ^ imm; 532 + ice_gpr_add(rt, alu->dst_reg_id, dst); 533 + break; 534 + default: 535 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n", 536 + alu->opc); 537 + break; 538 + } 539 + } 540 + 541 + static void ice_alu0_exe(struct ice_parser_rt *rt) 542 + { 543 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n"); 544 + ice_alu_exe(rt, rt->alu0); 545 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n"); 546 + } 547 + 548 + static void ice_alu1_exe(struct ice_parser_rt *rt) 549 + { 550 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n"); 551 + ice_alu_exe(rt, rt->alu1); 552 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n"); 553 + } 554 + 555 + static void ice_alu2_exe(struct ice_parser_rt *rt) 556 + { 557 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n"); 558 + ice_alu_exe(rt, rt->alu2); 559 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n"); 560 + } 561 + 562 + static void ice_pu_exe(struct ice_parser_rt *rt) 563 + { 564 + struct ice_gpr_pu *pu = &rt->pu; 565 + unsigned int i; 566 + 567 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n"); 568 + 569 + for (i = 0; i < ICE_PARSER_GPR_NUM; i++) { 570 + if (pu->gpr_val_upd[i]) 571 + ice_rt_gpr_set(rt, i, pu->gpr_val[i]); 572 + } 573 + 574 + for (i = 0; i < ICE_PARSER_FLG_NUM; i++) { 575 + if (pu->flg_msk & BIT(i)) 576 + ice_rt_flag_set(rt, i, pu->flg_val & BIT(i)); 577 + } 578 + 579 + for (i = 0; i < ICE_PARSER_ERR_NUM; i++) { 580 + if (pu->err_msk & BIT(i)) 581 + ice_rt_err_set(rt, i, pu->err_val & BIT(i)); 582 + } 583 + 584 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n"); 585 + } 586 + 587 + static void ice_alu_pg_exe(struct ice_parser_rt *rt) 588 + { 589 + memset(&rt->pu, 0, sizeof(rt->pu)); 590 + 591 + switch (rt->pg_prio) { 592 + case (ICE_PG_P0): 593 + ice_pg_exe(rt); 594 + ice_alu0_exe(rt); 595 + ice_alu1_exe(rt); 596 + ice_alu2_exe(rt); 597 + break; 598 + case (ICE_PG_P1): 599 + ice_alu0_exe(rt); 600 + ice_pg_exe(rt); 601 + ice_alu1_exe(rt); 602 + ice_alu2_exe(rt); 603 + break; 604 + case (ICE_PG_P2): 605 + ice_alu0_exe(rt); 606 + ice_alu1_exe(rt); 607 + ice_pg_exe(rt); 608 + ice_alu2_exe(rt); 609 + break; 610 + case (ICE_PG_P3): 611 + ice_alu0_exe(rt); 612 + ice_alu1_exe(rt); 613 + ice_alu2_exe(rt); 614 + ice_pg_exe(rt); 615 + break; 616 + } 617 + 618 + ice_pu_exe(rt); 619 + 620 + if (rt->action->ho_inc == 0) 621 + return; 622 + 623 + if (rt->action->ho_polarity) 624 + ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc); 625 + else 626 + ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc); 627 + } 628 + 629 + static void ice_proto_off_update(struct ice_parser_rt *rt) 630 + { 631 + struct ice_parser *psr = rt->psr; 632 + 633 + if (rt->action->is_pg) { 634 + struct ice_proto_grp_item *proto_grp = 635 + &psr->proto_grp_table[rt->action->proto_id]; 636 + u16 po; 637 + int i; 638 + 639 + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) { 640 + struct ice_proto_off *entry = &proto_grp->po[i]; 641 + 642 + if (entry->proto_id == U8_MAX) 643 + break; 644 + 645 + if (!entry->polarity) 646 + po = rt->po + entry->offset; 647 + else 648 + po = rt->po - entry->offset; 649 + 650 + rt->protocols[entry->proto_id] = true; 651 + rt->offsets[entry->proto_id] = po; 652 + 653 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", 654 + entry->proto_id, po); 655 + } 656 + } else { 657 + rt->protocols[rt->action->proto_id] = true; 658 + rt->offsets[rt->action->proto_id] = rt->po; 659 + 660 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", 661 + rt->action->proto_id, rt->po); 662 + } 663 + } 664 + 665 + static void ice_marker_set(struct ice_parser_rt *rt, int idx) 666 + { 667 + unsigned int byte = idx / BITS_PER_BYTE; 668 + unsigned int bit = idx % BITS_PER_BYTE; 669 + 670 + rt->markers[byte] |= (u8)BIT(bit); 671 + } 672 + 673 + static void ice_marker_update(struct ice_parser_rt *rt) 674 + { 675 + struct ice_parser *psr = rt->psr; 676 + 677 + if (rt->action->is_mg) { 678 + struct ice_mk_grp_item *mk_grp = 679 + &psr->mk_grp_table[rt->action->marker_id]; 680 + int i; 681 + 682 + for (i = 0; i < ICE_MARKER_ID_NUM; i++) { 683 + u8 marker = mk_grp->markers[i]; 684 + 685 + if (marker == ICE_MARKER_MAX_SIZE) 686 + break; 687 + 688 + ice_marker_set(rt, marker); 689 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", 690 + marker); 691 + } 692 + } else { 693 + if (rt->action->marker_id != ICE_MARKER_MAX_SIZE) 694 + ice_marker_set(rt, rt->action->marker_id); 695 + 696 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", 697 + rt->action->marker_id); 698 + } 699 + } 700 + 701 + static u16 ice_ptype_resolve(struct ice_parser_rt *rt) 702 + { 703 + struct ice_ptype_mk_tcam_item *item; 704 + struct ice_parser *psr = rt->psr; 705 + 706 + item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table, 707 + rt->markers, ICE_MARKER_ID_SIZE); 708 + if (item) 709 + return item->ptype; 710 + 711 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n"); 712 + return U16_MAX; 713 + } 714 + 715 + static void ice_proto_off_resolve(struct ice_parser_rt *rt, 716 + struct ice_parser_result *rslt) 717 + { 718 + int i; 719 + 720 + for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) { 721 + if (rt->protocols[i]) { 722 + rslt->po[rslt->po_num].proto_id = (u8)i; 723 + rslt->po[rslt->po_num].offset = rt->offsets[i]; 724 + rslt->po_num++; 725 + } 726 + } 727 + } 728 + 729 + static void ice_result_resolve(struct ice_parser_rt *rt, 730 + struct ice_parser_result *rslt) 731 + { 732 + struct ice_parser *psr = rt->psr; 733 + 734 + memset(rslt, 0, sizeof(*rslt)); 735 + 736 + memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX], 737 + ICE_PARSER_FLAG_PSR_SIZE); 738 + rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr); 739 + rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt); 740 + rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt); 741 + rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt); 742 + 743 + ice_proto_off_resolve(rt, rslt); 744 + rslt->ptype = ice_ptype_resolve(rt); 745 + } 746 + 747 + /** 748 + * ice_parser_rt_execute - parser execution routine 749 + * @rt: pointer to the parser runtime 750 + * @rslt: input/output parameter to save parser result 751 + * 752 + * Return: 0 on success or errno. 753 + */ 754 + int ice_parser_rt_execute(struct ice_parser_rt *rt, 755 + struct ice_parser_result *rslt) 756 + { 757 + struct ice_pg_nm_cam_item *pg_nm_cam; 758 + struct ice_parser *psr = rt->psr; 759 + struct ice_pg_cam_item *pg_cam; 760 + int status = 0; 761 + u16 node; 762 + u16 pc; 763 + 764 + node = rt->gpr[ICE_GPR_NN_IDX]; 765 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node); 766 + 767 + while (true) { 768 + struct ice_bst_tcam_item *bst; 769 + struct ice_imem_item *imem; 770 + 771 + pc = rt->gpr[ICE_GPR_NP_IDX]; 772 + imem = &psr->imem_table[pc]; 773 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n", 774 + pc); 775 + 776 + ice_bst_key_init(rt, imem); 777 + bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key); 778 + if (!bst) { 779 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n"); 780 + status = ice_imem_pgk_init(rt, imem); 781 + if (status) 782 + break; 783 + ice_imem_alu0_set(rt, imem); 784 + ice_imem_alu1_set(rt, imem); 785 + ice_imem_alu2_set(rt, imem); 786 + ice_imem_pgp_set(rt, imem); 787 + } else { 788 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n", 789 + bst->addr); 790 + if (imem->b_m.pg) { 791 + status = ice_bst_pgk_init(rt, bst); 792 + if (status) 793 + break; 794 + ice_bst_pgp_set(rt, bst); 795 + } else { 796 + status = ice_imem_pgk_init(rt, imem); 797 + if (status) 798 + break; 799 + ice_imem_pgp_set(rt, imem); 800 + } 801 + 802 + if (imem->b_m.alu0) 803 + ice_bst_alu0_set(rt, bst); 804 + else 805 + ice_imem_alu0_set(rt, imem); 806 + 807 + if (imem->b_m.alu1) 808 + ice_bst_alu1_set(rt, bst); 809 + else 810 + ice_imem_alu1_set(rt, imem); 811 + 812 + if (imem->b_m.alu2) 813 + ice_bst_alu2_set(rt, bst); 814 + else 815 + ice_imem_alu2_set(rt, imem); 816 + } 817 + 818 + rt->action = NULL; 819 + pg_cam = ice_rt_pg_cam_match(rt); 820 + if (!pg_cam) { 821 + pg_nm_cam = ice_rt_pg_nm_cam_match(rt); 822 + if (pg_nm_cam) { 823 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n", 824 + pg_nm_cam->idx); 825 + rt->action = &pg_nm_cam->action; 826 + } 827 + } else { 828 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n", 829 + pg_cam->idx); 830 + rt->action = &pg_cam->action; 831 + } 832 + 833 + if (!rt->action) { 834 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n"); 835 + status = -EINVAL; 836 + break; 837 + } 838 + 839 + ice_alu_pg_exe(rt); 840 + ice_marker_update(rt); 841 + ice_proto_off_update(rt); 842 + 843 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n", 844 + rt->action->next_node); 845 + 846 + if (rt->action->is_last_round) { 847 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n"); 848 + break; 849 + } 850 + 851 + if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) { 852 + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n", 853 + rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len); 854 + break; 855 + } 856 + } 857 + 858 + ice_result_resolve(rt, rslt); 859 + 860 + return status; 861 + }
+1
drivers/net/ethernet/intel/ice/ice_type.h
··· 61 61 ICE_DBG_AQ_DESC | \ 62 62 ICE_DBG_AQ_DESC_BUF | \ 63 63 ICE_DBG_AQ_CMD) 64 + #define ICE_DBG_PARSER BIT_ULL(28) 64 65 65 66 #define ICE_DBG_USER BIT_ULL(31) 66 67
+8
drivers/net/ethernet/intel/ice/ice_vf_lib.h
··· 12 12 #include <net/devlink.h> 13 13 #include <linux/avf/virtchnl.h> 14 14 #include "ice_type.h" 15 + #include "ice_flow.h" 15 16 #include "ice_virtchnl_fdir.h" 16 17 #include "ice_vsi_vlan_ops.h" 17 18 ··· 51 50 u16 count; /* total count of Rx|Tx events */ 52 51 /* count number of the last printed event */ 53 52 u16 last_printed; 53 + }; 54 + 55 + /* Structure to store fdir fv entry */ 56 + struct ice_fdir_prof_info { 57 + struct ice_parser_profile prof; 58 + u64 fdir_active_cnt; 54 59 }; 55 60 56 61 /* VF operations */ ··· 98 91 u16 lan_vsi_idx; /* index into PF struct */ 99 92 u16 ctrl_vsi_idx; 100 93 struct ice_vf_fdir fdir; 94 + struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; 101 95 /* first vector index of this VF in the PF space */ 102 96 int first_vector_idx; 103 97 struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
+4
drivers/net/ethernet/intel/ice/ice_virtchnl.c
··· 461 461 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) 462 462 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; 463 463 464 + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 && 465 + vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) 466 + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32; 467 + 464 468 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 465 469 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 466 470
+395 -8
drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
··· 26 26 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 27 27 ICE_FDIR_TUNNEL_TYPE_GTPU, 28 28 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 29 + ICE_FDIR_TUNNEL_TYPE_ECPRI, 30 + ICE_FDIR_TUNNEL_TYPE_GTPU_INNER, 31 + ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER, 32 + ICE_FDIR_TUNNEL_TYPE_GRE, 33 + ICE_FDIR_TUNNEL_TYPE_GTPOGRE, 34 + ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER, 35 + ICE_FDIR_TUNNEL_TYPE_GRE_INNER, 36 + ICE_FDIR_TUNNEL_TYPE_L2TPV2, 37 + ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER, 29 38 }; 30 39 31 40 struct virtchnl_fdir_fltr_conf { ··· 42 33 enum ice_fdir_tunnel_type ttype; 43 34 u64 inset_flag; 44 35 u32 flow_id; 36 + 37 + struct ice_parser_profile *prof; 38 + bool parser_ena; 39 + u8 *pkt_buf; 40 + u8 pkt_len; 45 41 }; 46 42 47 43 struct virtchnl_fdir_inset_map { ··· 801 787 } 802 788 803 789 /** 790 + * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary) 791 + * @proto: virtchnl protocol headers 792 + * 793 + * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note 794 + * that common FDIR rule must have non-zero proto->count. Thus, we choose the 795 + * tunnel_level and count of proto as the indicators. If both tunnel_level and 796 + * count of proto are zero, this FDIR rule will be regarded as raw flow. 797 + * 798 + * Returns: true if headers describe raw flow, false otherwise. 799 + */ 800 + static bool 801 + ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto) 802 + { 803 + return (proto->tunnel_level == 0 && proto->count == 0); 804 + } 805 + 806 + /** 807 + * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule 808 + * @vf: pointer to the VF info 809 + * @proto: virtchnl protocol headers 810 + * @conf: FDIR configuration for each filter 811 + * 812 + * Parse the virtual channel filter's raw flow and store it in @conf 813 + * 814 + * Return: 0 on success or negative errno on failure. 815 + */ 816 + static int 817 + ice_vc_fdir_parse_raw(struct ice_vf *vf, 818 + struct virtchnl_proto_hdrs *proto, 819 + struct virtchnl_fdir_fltr_conf *conf) 820 + { 821 + u8 *pkt_buf, *msk_buf __free(kfree); 822 + struct ice_parser_result rslt; 823 + struct ice_pf *pf = vf->pf; 824 + struct ice_parser *psr; 825 + int status = -ENOMEM; 826 + struct ice_hw *hw; 827 + u16 udp_port = 0; 828 + 829 + pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); 830 + msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); 831 + if (!pkt_buf || !msk_buf) 832 + goto err_mem_alloc; 833 + 834 + memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len); 835 + memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len); 836 + 837 + hw = &pf->hw; 838 + 839 + /* Get raw profile info via Parser Lib */ 840 + psr = ice_parser_create(hw); 841 + if (IS_ERR(psr)) { 842 + status = PTR_ERR(psr); 843 + goto err_mem_alloc; 844 + } 845 + 846 + ice_parser_dvm_set(psr, ice_is_dvm_ena(hw)); 847 + 848 + if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN)) 849 + ice_parser_vxlan_tunnel_set(psr, udp_port, true); 850 + 851 + status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt); 852 + if (status) 853 + goto err_parser_destroy; 854 + 855 + if (hw->debug_mask & ICE_DBG_PARSER) 856 + ice_parser_result_dump(hw, &rslt); 857 + 858 + conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL); 859 + if (!conf->prof) { 860 + status = -ENOMEM; 861 + goto err_parser_destroy; 862 + } 863 + 864 + status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, 865 + proto->raw.pkt_len, ICE_BLK_FD, 866 + conf->prof); 867 + if (status) 868 + goto err_parser_profile_init; 869 + 870 + if (hw->debug_mask & ICE_DBG_PARSER) 871 + ice_parser_profile_dump(hw, conf->prof); 872 + 873 + /* Store raw flow info into @conf */ 874 + conf->pkt_len = proto->raw.pkt_len; 875 + conf->pkt_buf = pkt_buf; 876 + conf->parser_ena = true; 877 + 878 + ice_parser_destroy(psr); 879 + return 0; 880 + 881 + err_parser_profile_init: 882 + kfree(conf->prof); 883 + err_parser_destroy: 884 + ice_parser_destroy(psr); 885 + err_mem_alloc: 886 + kfree(pkt_buf); 887 + return status; 888 + } 889 + 890 + /** 804 891 * ice_vc_fdir_parse_pattern 805 892 * @vf: pointer to the VF info 806 893 * @fltr: virtual channel add cmd buffer ··· 927 812 proto->count, vf->vf_id); 928 813 return -EINVAL; 929 814 } 815 + 816 + /* For raw FDIR filters created by the parser */ 817 + if (ice_vc_fdir_is_raw_flow(proto)) 818 + return ice_vc_fdir_parse_raw(vf, proto, conf); 930 819 931 820 for (i = 0; i < proto->count; i++) { 932 821 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; ··· 1220 1101 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1221 1102 int ret; 1222 1103 1223 - if (!ice_vc_validate_pattern(vf, proto)) 1224 - return -EINVAL; 1104 + /* For raw FDIR filters created by the parser */ 1105 + if (!ice_vc_fdir_is_raw_flow(proto)) 1106 + if (!ice_vc_validate_pattern(vf, proto)) 1107 + return -EINVAL; 1225 1108 1226 1109 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1227 1110 if (ret) ··· 1416 1295 return -ENOMEM; 1417 1296 1418 1297 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1419 - ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1420 - if (ret) { 1421 - dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1422 - vf->vf_id, input->flow_type); 1423 - goto err_free_pkt; 1298 + if (conf->parser_ena) { 1299 + memcpy(pkt, conf->pkt_buf, conf->pkt_len); 1300 + } else { 1301 + ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1302 + if (ret) { 1303 + dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1304 + vf->vf_id, input->flow_type); 1305 + goto err_free_pkt; 1306 + } 1424 1307 } 1425 1308 1426 1309 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); ··· 1644 1519 err_exit: 1645 1520 ice_vf_fdir_dump_info(vf); 1646 1521 return ret; 1522 + } 1523 + 1524 + static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype) 1525 + { 1526 + return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER || 1527 + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER || 1528 + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER || 1529 + ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER || 1530 + ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI || 1531 + ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER); 1647 1532 } 1648 1533 1649 1534 /** ··· 1917 1782 } 1918 1783 1919 1784 /** 1785 + * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context 1786 + * @fv_a: struct of parsed FDIR profile field vector 1787 + * @fv_b: struct of parsed FDIR profile field vector 1788 + * 1789 + * Check if the two parsed FDIR profile field vector context are different, 1790 + * including proto_id, offset and mask. 1791 + * 1792 + * Return: true on different, false on otherwise. 1793 + */ 1794 + static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a, 1795 + struct ice_parser_fv *fv_b) 1796 + { 1797 + return (fv_a->proto_id != fv_b->proto_id || 1798 + fv_a->offset != fv_b->offset || 1799 + fv_a->msk != fv_b->msk); 1800 + } 1801 + 1802 + /** 1803 + * ice_vc_parser_fv_save - save parsed FDIR profile fv context 1804 + * @fv: struct of parsed FDIR profile field vector 1805 + * @fv_src: parsed FDIR profile field vector context to save 1806 + * 1807 + * Save the parsed FDIR profile field vector context, including proto_id, 1808 + * offset and mask. 1809 + * 1810 + * Return: Void. 1811 + */ 1812 + static void ice_vc_parser_fv_save(struct ice_parser_fv *fv, 1813 + struct ice_parser_fv *fv_src) 1814 + { 1815 + fv->proto_id = fv_src->proto_id; 1816 + fv->offset = fv_src->offset; 1817 + fv->msk = fv_src->msk; 1818 + fv->spec = 0; 1819 + } 1820 + 1821 + /** 1822 + * ice_vc_add_fdir_raw - add a raw FDIR filter for VF 1823 + * @vf: pointer to the VF info 1824 + * @conf: FDIR configuration for each filter 1825 + * @v_ret: the final VIRTCHNL code 1826 + * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER 1827 + * @len: length of the stat 1828 + * 1829 + * Return: 0 on success or negative errno on failure. 1830 + */ 1831 + static int 1832 + ice_vc_add_fdir_raw(struct ice_vf *vf, 1833 + struct virtchnl_fdir_fltr_conf *conf, 1834 + enum virtchnl_status_code *v_ret, 1835 + struct virtchnl_fdir_add *stat, int len) 1836 + { 1837 + struct ice_vsi *vf_vsi, *ctrl_vsi; 1838 + struct ice_fdir_prof_info *pi; 1839 + struct ice_pf *pf = vf->pf; 1840 + int ret, ptg, id, i; 1841 + struct device *dev; 1842 + struct ice_hw *hw; 1843 + bool fv_found; 1844 + 1845 + dev = ice_pf_to_dev(pf); 1846 + hw = &pf->hw; 1847 + *v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1848 + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1849 + 1850 + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); 1851 + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; 1852 + 1853 + vf_vsi = ice_get_vf_vsi(vf); 1854 + if (!vf_vsi) { 1855 + dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); 1856 + return -ENODEV; 1857 + } 1858 + 1859 + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1860 + if (!ctrl_vsi) { 1861 + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n", 1862 + vf->vf_id); 1863 + return -ENODEV; 1864 + } 1865 + 1866 + fv_found = false; 1867 + 1868 + /* Check if profile info already exists, then update the counter */ 1869 + pi = &vf->fdir_prof_info[ptg]; 1870 + if (pi->fdir_active_cnt != 0) { 1871 + for (i = 0; i < ICE_MAX_FV_WORDS; i++) 1872 + if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i], 1873 + &conf->prof->fv[i])) 1874 + break; 1875 + if (i == ICE_MAX_FV_WORDS) { 1876 + fv_found = true; 1877 + pi->fdir_active_cnt++; 1878 + } 1879 + } 1880 + 1881 + /* HW profile setting is only required for the first time */ 1882 + if (!fv_found) { 1883 + ret = ice_flow_set_parser_prof(hw, vf_vsi->idx, 1884 + ctrl_vsi->idx, conf->prof, 1885 + ICE_BLK_FD); 1886 + 1887 + if (ret) { 1888 + *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1889 + dev_dbg(dev, "VF %d: insert hw prof failed\n", 1890 + vf->vf_id); 1891 + return ret; 1892 + } 1893 + } 1894 + 1895 + ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 1896 + if (ret) { 1897 + *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1898 + dev_dbg(dev, "VF %d: insert FDIR list failed\n", 1899 + vf->vf_id); 1900 + return ret; 1901 + } 1902 + 1903 + ret = ice_vc_fdir_set_irq_ctx(vf, conf, 1904 + VIRTCHNL_OP_ADD_FDIR_FILTER); 1905 + if (ret) { 1906 + dev_dbg(dev, "VF %d: set FDIR context failed\n", 1907 + vf->vf_id); 1908 + goto err_rem_entry; 1909 + } 1910 + 1911 + ret = ice_vc_fdir_write_fltr(vf, conf, true, false); 1912 + if (ret) { 1913 + dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n", 1914 + vf->vf_id, ret); 1915 + goto err_clr_irq; 1916 + } 1917 + 1918 + /* Save parsed profile fv info of the FDIR rule for the first time */ 1919 + if (!fv_found) { 1920 + for (i = 0; i < conf->prof->fv_num; i++) 1921 + ice_vc_parser_fv_save(&pi->prof.fv[i], 1922 + &conf->prof->fv[i]); 1923 + pi->prof.fv_num = conf->prof->fv_num; 1924 + pi->fdir_active_cnt = 1; 1925 + } 1926 + 1927 + return 0; 1928 + 1929 + err_clr_irq: 1930 + ice_vc_fdir_clear_irq_ctx(vf); 1931 + err_rem_entry: 1932 + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1933 + return ret; 1934 + } 1935 + 1936 + /** 1920 1937 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1921 1938 * @vf: pointer to the VF info 1922 1939 * @msg: pointer to the msg buffer ··· 2133 1846 len = sizeof(*stat); 2134 1847 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 2135 1848 if (ret) { 2136 - v_ret = VIRTCHNL_STATUS_SUCCESS; 1849 + v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2137 1850 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 2138 1851 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 2139 1852 goto err_free_conf; ··· 2148 1861 goto exit; 2149 1862 } 2150 1863 1864 + /* For raw FDIR filters created by the parser */ 1865 + if (conf->parser_ena) { 1866 + ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len); 1867 + if (ret) 1868 + goto err_free_conf; 1869 + goto exit; 1870 + } 1871 + 1872 + is_tun = ice_fdir_is_tunnel(conf->ttype); 2151 1873 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 2152 1874 if (ret) { 2153 1875 v_ret = VIRTCHNL_STATUS_SUCCESS; ··· 2218 1922 } 2219 1923 2220 1924 /** 1925 + * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF 1926 + * @vf: pointer to the VF info 1927 + * @conf: FDIR configuration for each filter 1928 + * @v_ret: the final VIRTCHNL code 1929 + * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER 1930 + * @len: length of the stat 1931 + * 1932 + * Return: 0 on success or negative errno on failure. 1933 + */ 1934 + static int 1935 + ice_vc_del_fdir_raw(struct ice_vf *vf, 1936 + struct virtchnl_fdir_fltr_conf *conf, 1937 + enum virtchnl_status_code *v_ret, 1938 + struct virtchnl_fdir_del *stat, int len) 1939 + { 1940 + struct ice_vsi *vf_vsi, *ctrl_vsi; 1941 + enum ice_block blk = ICE_BLK_FD; 1942 + struct ice_fdir_prof_info *pi; 1943 + struct ice_pf *pf = vf->pf; 1944 + struct device *dev; 1945 + struct ice_hw *hw; 1946 + unsigned long id; 1947 + u16 vsi_num; 1948 + int ptg; 1949 + int ret; 1950 + 1951 + dev = ice_pf_to_dev(pf); 1952 + hw = &pf->hw; 1953 + *v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1954 + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1955 + 1956 + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); 1957 + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; 1958 + 1959 + ret = ice_vc_fdir_write_fltr(vf, conf, false, false); 1960 + if (ret) { 1961 + dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n", 1962 + vf->vf_id, ret); 1963 + return ret; 1964 + } 1965 + 1966 + vf_vsi = ice_get_vf_vsi(vf); 1967 + if (!vf_vsi) { 1968 + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); 1969 + return -ENODEV; 1970 + } 1971 + 1972 + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1973 + if (!ctrl_vsi) { 1974 + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n", 1975 + vf->vf_id); 1976 + return -ENODEV; 1977 + } 1978 + 1979 + pi = &vf->fdir_prof_info[ptg]; 1980 + if (pi->fdir_active_cnt != 0) { 1981 + pi->fdir_active_cnt--; 1982 + /* Remove the profile id flow if no active FDIR rule left */ 1983 + if (!pi->fdir_active_cnt) { 1984 + vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx); 1985 + ice_rem_prof_id_flow(hw, blk, vsi_num, id); 1986 + 1987 + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1988 + ice_rem_prof_id_flow(hw, blk, vsi_num, id); 1989 + } 1990 + } 1991 + 1992 + conf->parser_ena = false; 1993 + return 0; 1994 + } 1995 + 1996 + /** 2221 1997 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 2222 1998 * @vf: pointer to the VF info 2223 1999 * @msg: pointer to the msg buffer ··· 2301 1933 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 2302 1934 struct virtchnl_fdir_del *stat = NULL; 2303 1935 struct virtchnl_fdir_fltr_conf *conf; 1936 + struct ice_vf_fdir *fdir = &vf->fdir; 2304 1937 enum virtchnl_status_code v_ret; 1938 + struct ice_fdir_fltr *input; 1939 + enum ice_fltr_ptype flow; 2305 1940 struct device *dev; 2306 1941 struct ice_pf *pf; 2307 1942 int is_tun = 0; ··· 2354 1983 goto err_exit; 2355 1984 } 2356 1985 1986 + /* For raw FDIR filters created by the parser */ 1987 + if (conf->parser_ena) { 1988 + ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len); 1989 + if (ret) 1990 + goto err_del_tmr; 1991 + goto exit; 1992 + } 1993 + 1994 + is_tun = ice_fdir_is_tunnel(conf->ttype); 2357 1995 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 2358 1996 if (ret) { 2359 1997 v_ret = VIRTCHNL_STATUS_SUCCESS; ··· 2372 1992 goto err_del_tmr; 2373 1993 } 2374 1994 1995 + /* Remove unused profiles to avoid unexpected behaviors */ 1996 + input = &conf->input; 1997 + flow = input->flow_type; 1998 + if (fdir->fdir_fltr_cnt[flow][is_tun] == 1) 1999 + ice_vc_fdir_rem_prof(vf, flow, is_tun); 2000 + 2001 + exit: 2375 2002 kfree(stat); 2376 2003 2377 2004 return ret;
+12 -1
include/linux/avf/virtchnl.h
··· 247 247 /* used to negotiate communicating link speeds in Mbps */ 248 248 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) 249 249 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) 250 + #define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11) 250 251 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) 251 252 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) 252 253 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) ··· 1122 1121 }; 1123 1122 1124 1123 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 1124 + #define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024 1125 1125 #define PROTO_HDR_SHIFT 5 1126 1126 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT) 1127 1127 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) ··· 1268 1266 u8 pad[3]; 1269 1267 /** 1270 1268 * specify where protocol header start from. 1269 + * must be 0 when sending a raw packet request. 1271 1270 * 0 - from the outer layer 1272 1271 * 1 - from the first inner layer 1273 1272 * 2 - from the second inner layer 1274 1273 * .... 1275 1274 **/ 1276 1275 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ 1277 - struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; 1276 + union { 1277 + struct virtchnl_proto_hdr 1278 + proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; 1279 + struct { 1280 + u16 pkt_len; 1281 + u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET]; 1282 + u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET]; 1283 + } raw; 1284 + }; 1278 1285 }; 1279 1286 1280 1287 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);