Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: microchip: vcap: Add tc flower keys for lan966x

Add the following TC flower filter keys to lan966x for IS2:
- ipv4_addr (sip and dip)
- ipv6_addr (sip and dip)
- control (IPv4 fragments)
- portnum (tcp and udp port numbers)
- basic (L3 and L4 protocol)
- vlan (outer vlan tag info)
- tcp (tcp flags)
- ip (tos field)

As the parsing of these keys is similar between lan966x and sparx5, move
the code in a separate file to be shared by these 2 chips. And put the
specific parsing outside of the common functions.

Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Horatiu Vultur and committed by
David S. Miller
47400aae 21119e2c

+623 -492
+111 -29
drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
··· 3 3 #include "lan966x_main.h" 4 4 #include "vcap_api.h" 5 5 #include "vcap_api_client.h" 6 + #include "vcap_tc.h" 6 7 7 - struct lan966x_tc_flower_parse_usage { 8 - struct flow_cls_offload *f; 9 - struct flow_rule *frule; 10 - struct vcap_rule *vrule; 11 - unsigned int used_keys; 12 - u16 l3_proto; 13 - }; 14 - 15 - static int lan966x_tc_flower_handler_ethaddr_usage(struct lan966x_tc_flower_parse_usage *st) 8 + static bool lan966x_tc_is_known_etype(u16 etype) 16 9 { 17 - enum vcap_key_field smac_key = VCAP_KF_L2_SMAC; 18 - enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC; 19 - struct flow_match_eth_addrs match; 20 - struct vcap_u48_key smac, dmac; 10 + switch (etype) { 11 + case ETH_P_ALL: 12 + case ETH_P_ARP: 13 + case ETH_P_IP: 14 + case ETH_P_IPV6: 15 + return true; 16 + } 17 + 18 + return false; 19 + } 20 + 21 + static int 22 + lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) 23 + { 24 + struct flow_match_control match; 21 25 int err = 0; 22 26 23 - flow_rule_match_eth_addrs(st->frule, &match); 24 - 25 - if (!is_zero_ether_addr(match.mask->src)) { 26 - vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN); 27 - vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN); 28 - err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac); 27 + flow_rule_match_control(st->frule, &match); 28 + if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 29 + if (match.key->flags & FLOW_DIS_IS_FRAGMENT) 30 + err = vcap_rule_add_key_bit(st->vrule, 31 + VCAP_KF_L3_FRAGMENT, 32 + VCAP_BIT_1); 33 + else 34 + err = vcap_rule_add_key_bit(st->vrule, 35 + VCAP_KF_L3_FRAGMENT, 36 + VCAP_BIT_0); 29 37 if (err) 30 38 goto out; 31 39 } 32 40 33 - if (!is_zero_ether_addr(match.mask->dst)) { 34 - vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN); 35 - vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN); 36 - err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac); 41 + if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 42 + if (match.key->flags & FLOW_DIS_FIRST_FRAG) 43 + err = vcap_rule_add_key_bit(st->vrule, 44 + VCAP_KF_L3_FRAG_OFS_GT0, 45 + VCAP_BIT_0); 46 + else 47 + err = vcap_rule_add_key_bit(st->vrule, 48 + VCAP_KF_L3_FRAG_OFS_GT0, 49 + VCAP_BIT_1); 37 50 if (err) 38 51 goto out; 39 52 } 40 53 41 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS); 54 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); 42 55 43 56 return err; 44 57 45 58 out: 46 - NL_SET_ERR_MSG_MOD(st->f->common.extack, "eth_addr parse error"); 59 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error"); 47 60 return err; 48 61 } 49 62 50 63 static int 51 - (*lan966x_tc_flower_handlers_usage[])(struct lan966x_tc_flower_parse_usage *st) = { 52 - [FLOW_DISSECTOR_KEY_ETH_ADDRS] = lan966x_tc_flower_handler_ethaddr_usage, 64 + lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st) 65 + { 66 + struct flow_match_basic match; 67 + int err = 0; 68 + 69 + flow_rule_match_basic(st->frule, &match); 70 + if (match.mask->n_proto) { 71 + st->l3_proto = be16_to_cpu(match.key->n_proto); 72 + if (!lan966x_tc_is_known_etype(st->l3_proto)) { 73 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE, 74 + st->l3_proto, ~0); 75 + if (err) 76 + goto out; 77 + } else if (st->l3_proto == ETH_P_IP) { 78 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 79 + VCAP_BIT_1); 80 + if (err) 81 + goto out; 82 + } 83 + } 84 + if (match.mask->ip_proto) { 85 + st->l4_proto = match.key->ip_proto; 86 + 87 + if (st->l4_proto == IPPROTO_TCP) { 88 + err = vcap_rule_add_key_bit(st->vrule, 89 + VCAP_KF_TCP_IS, 90 + VCAP_BIT_1); 91 + if (err) 92 + goto out; 93 + } else if (st->l4_proto == IPPROTO_UDP) { 94 + err = vcap_rule_add_key_bit(st->vrule, 95 + VCAP_KF_TCP_IS, 96 + VCAP_BIT_0); 97 + if (err) 98 + goto out; 99 + } else { 100 + err = vcap_rule_add_key_u32(st->vrule, 101 + VCAP_KF_L3_IP_PROTO, 102 + st->l4_proto, ~0); 103 + if (err) 104 + goto out; 105 + } 106 + } 107 + 108 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC); 109 + return err; 110 + out: 111 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error"); 112 + return err; 113 + } 114 + 115 + static int 116 + lan966x_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st) 117 + { 118 + return vcap_tc_flower_handler_vlan_usage(st, 119 + VCAP_KF_8021Q_VID_CLS, 120 + VCAP_KF_8021Q_PCP_CLS); 121 + } 122 + 123 + static int 124 + (*lan966x_tc_flower_handlers_usage[])(struct vcap_tc_flower_parse_usage *st) = { 125 + [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage, 126 + [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage, 127 + [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage, 128 + [FLOW_DISSECTOR_KEY_CONTROL] = lan966x_tc_flower_handler_control_usage, 129 + [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage, 130 + [FLOW_DISSECTOR_KEY_BASIC] = lan966x_tc_flower_handler_basic_usage, 131 + [FLOW_DISSECTOR_KEY_VLAN] = lan966x_tc_flower_handler_vlan_usage, 132 + [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage, 133 + [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage, 134 + [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage, 53 135 }; 54 136 55 137 static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f, ··· 139 57 struct vcap_rule *vrule, 140 58 u16 *l3_proto) 141 59 { 142 - struct lan966x_tc_flower_parse_usage state = { 143 - .f = f, 60 + struct vcap_tc_flower_parse_usage state = { 61 + .fco = f, 144 62 .vrule = vrule, 145 63 .l3_proto = ETH_P_ALL, 146 64 };
+71 -462
drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
··· 10 10 #include "sparx5_tc.h" 11 11 #include "vcap_api.h" 12 12 #include "vcap_api_client.h" 13 + #include "vcap_tc.h" 13 14 #include "sparx5_main.h" 14 15 #include "sparx5_vcap_impl.h" 15 16 ··· 28 27 struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE]; 29 28 }; 30 29 31 - struct sparx5_tc_flower_parse_usage { 32 - struct flow_cls_offload *fco; 33 - struct flow_rule *frule; 34 - struct vcap_rule *vrule; 35 - struct vcap_admin *admin; 36 - u16 l3_proto; 37 - u8 l4_proto; 38 - unsigned int used_keys; 39 - }; 40 - 41 - enum sparx5_is2_arp_opcode { 42 - SPX5_IS2_ARP_REQUEST, 43 - SPX5_IS2_ARP_REPLY, 44 - SPX5_IS2_RARP_REQUEST, 45 - SPX5_IS2_RARP_REPLY, 46 - }; 47 - 48 - enum tc_arp_opcode { 49 - TC_ARP_OP_RESERVED, 50 - TC_ARP_OP_REQUEST, 51 - TC_ARP_OP_REPLY, 52 - }; 53 - 54 - static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st) 55 - { 56 - enum vcap_key_field smac_key = VCAP_KF_L2_SMAC; 57 - enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC; 58 - struct flow_match_eth_addrs match; 59 - struct vcap_u48_key smac, dmac; 60 - int err = 0; 61 - 62 - flow_rule_match_eth_addrs(st->frule, &match); 63 - 64 - if (!is_zero_ether_addr(match.mask->src)) { 65 - vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN); 66 - vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN); 67 - err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac); 68 - if (err) 69 - goto out; 70 - } 71 - 72 - if (!is_zero_ether_addr(match.mask->dst)) { 73 - vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN); 74 - vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN); 75 - err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac); 76 - if (err) 77 - goto out; 78 - } 79 - 80 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS); 81 - 82 - return err; 83 - 84 - out: 85 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error"); 86 - return err; 87 - } 88 - 89 30 static int 90 - sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st) 31 + sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st) 91 32 { 33 + struct flow_match_basic mt; 92 34 int err = 0; 93 35 94 - if (st->l3_proto == ETH_P_IP) { 95 - struct flow_match_ipv4_addrs mt; 36 + flow_rule_match_basic(st->frule, &mt); 96 37 97 - flow_rule_match_ipv4_addrs(st->frule, &mt); 98 - if (mt.mask->src) { 38 + if (mt.mask->n_proto) { 39 + st->l3_proto = be16_to_cpu(mt.key->n_proto); 40 + if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) { 41 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE, 42 + st->l3_proto, ~0); 43 + if (err) 44 + goto out; 45 + } else if (st->l3_proto == ETH_P_IP) { 46 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 47 + VCAP_BIT_1); 48 + if (err) 49 + goto out; 50 + } else if (st->l3_proto == ETH_P_IPV6) { 51 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 52 + VCAP_BIT_0); 53 + if (err) 54 + goto out; 55 + if (st->admin->vtype == VCAP_TYPE_IS0) { 56 + err = vcap_rule_add_key_bit(st->vrule, 57 + VCAP_KF_IP_SNAP_IS, 58 + VCAP_BIT_1); 59 + if (err) 60 + goto out; 61 + } 62 + } 63 + } 64 + 65 + if (mt.mask->ip_proto) { 66 + st->l4_proto = mt.key->ip_proto; 67 + if (st->l4_proto == IPPROTO_TCP) { 68 + err = vcap_rule_add_key_bit(st->vrule, 69 + VCAP_KF_TCP_IS, 70 + VCAP_BIT_1); 71 + if (err) 72 + goto out; 73 + } else if (st->l4_proto == IPPROTO_UDP) { 74 + err = vcap_rule_add_key_bit(st->vrule, 75 + VCAP_KF_TCP_IS, 76 + VCAP_BIT_0); 77 + if (err) 78 + goto out; 79 + if (st->admin->vtype == VCAP_TYPE_IS0) { 80 + err = vcap_rule_add_key_bit(st->vrule, 81 + VCAP_KF_TCP_UDP_IS, 82 + VCAP_BIT_1); 83 + if (err) 84 + goto out; 85 + } 86 + } else { 99 87 err = vcap_rule_add_key_u32(st->vrule, 100 - VCAP_KF_L3_IP4_SIP, 101 - be32_to_cpu(mt.key->src), 102 - be32_to_cpu(mt.mask->src)); 103 - if (err) 104 - goto out; 105 - } 106 - if (mt.mask->dst) { 107 - err = vcap_rule_add_key_u32(st->vrule, 108 - VCAP_KF_L3_IP4_DIP, 109 - be32_to_cpu(mt.key->dst), 110 - be32_to_cpu(mt.mask->dst)); 88 + VCAP_KF_L3_IP_PROTO, 89 + st->l4_proto, ~0); 111 90 if (err) 112 91 goto out; 113 92 } 114 93 } 115 94 116 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS); 95 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC); 117 96 118 97 return err; 119 98 120 99 out: 121 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error"); 100 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error"); 122 101 return err; 123 102 } 124 103 125 104 static int 126 - sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st) 127 - { 128 - int err = 0; 129 - 130 - if (st->l3_proto == ETH_P_IPV6) { 131 - struct flow_match_ipv6_addrs mt; 132 - struct vcap_u128_key sip; 133 - struct vcap_u128_key dip; 134 - 135 - flow_rule_match_ipv6_addrs(st->frule, &mt); 136 - /* Check if address masks are non-zero */ 137 - if (!ipv6_addr_any(&mt.mask->src)) { 138 - vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16); 139 - vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16); 140 - err = vcap_rule_add_key_u128(st->vrule, 141 - VCAP_KF_L3_IP6_SIP, &sip); 142 - if (err) 143 - goto out; 144 - } 145 - if (!ipv6_addr_any(&mt.mask->dst)) { 146 - vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16); 147 - vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16); 148 - err = vcap_rule_add_key_u128(st->vrule, 149 - VCAP_KF_L3_IP6_DIP, &dip); 150 - if (err) 151 - goto out; 152 - } 153 - } 154 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS); 155 - return err; 156 - out: 157 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error"); 158 - return err; 159 - } 160 - 161 - static int 162 - sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st) 105 + sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) 163 106 { 164 107 struct flow_match_control mt; 165 108 u32 value, mask; ··· 152 207 } 153 208 154 209 static int 155 - sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st) 210 + sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st) 156 211 { 157 - struct flow_match_ports mt; 158 - u16 value, mask; 159 - int err = 0; 160 - 161 - flow_rule_match_ports(st->frule, &mt); 162 - 163 - if (mt.mask->src) { 164 - value = be16_to_cpu(mt.key->src); 165 - mask = be16_to_cpu(mt.mask->src); 166 - err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value, 167 - mask); 168 - if (err) 169 - goto out; 170 - } 171 - 172 - if (mt.mask->dst) { 173 - value = be16_to_cpu(mt.key->dst); 174 - mask = be16_to_cpu(mt.mask->dst); 175 - err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value, 176 - mask); 177 - if (err) 178 - goto out; 179 - } 180 - 181 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS); 182 - 183 - return err; 184 - 185 - out: 186 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error"); 187 - return err; 188 - } 189 - 190 - static int 191 - sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st) 192 - { 193 - struct flow_match_basic mt; 194 - int err = 0; 195 - 196 - flow_rule_match_basic(st->frule, &mt); 197 - 198 - if (mt.mask->n_proto) { 199 - st->l3_proto = be16_to_cpu(mt.key->n_proto); 200 - if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) { 201 - err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE, 202 - st->l3_proto, ~0); 203 - if (err) 204 - goto out; 205 - } else if (st->l3_proto == ETH_P_IP) { 206 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 207 - VCAP_BIT_1); 208 - if (err) 209 - goto out; 210 - } else if (st->l3_proto == ETH_P_IPV6) { 211 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 212 - VCAP_BIT_0); 213 - if (err) 214 - goto out; 215 - if (st->admin->vtype == VCAP_TYPE_IS0) { 216 - err = vcap_rule_add_key_bit(st->vrule, 217 - VCAP_KF_IP_SNAP_IS, 218 - VCAP_BIT_1); 219 - if (err) 220 - goto out; 221 - } 222 - 223 - } 224 - } 225 - 226 - if (mt.mask->ip_proto) { 227 - st->l4_proto = mt.key->ip_proto; 228 - if (st->l4_proto == IPPROTO_TCP) { 229 - err = vcap_rule_add_key_bit(st->vrule, 230 - VCAP_KF_TCP_IS, 231 - VCAP_BIT_1); 232 - if (err) 233 - goto out; 234 - } else if (st->l4_proto == IPPROTO_UDP) { 235 - err = vcap_rule_add_key_bit(st->vrule, 236 - VCAP_KF_TCP_IS, 237 - VCAP_BIT_0); 238 - if (err) 239 - goto out; 240 - if (st->admin->vtype == VCAP_TYPE_IS0) { 241 - err = vcap_rule_add_key_bit(st->vrule, 242 - VCAP_KF_TCP_UDP_IS, 243 - VCAP_BIT_1); 244 - if (err) 245 - goto out; 246 - } 247 - } else { 248 - err = vcap_rule_add_key_u32(st->vrule, 249 - VCAP_KF_L3_IP_PROTO, 250 - st->l4_proto, ~0); 251 - if (err) 252 - goto out; 253 - } 254 - } 255 - 256 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC); 257 - 258 - return err; 259 - 260 - out: 261 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error"); 262 - return err; 263 - } 264 - 265 - static int 266 - sparx5_tc_flower_handler_cvlan_usage(struct sparx5_tc_flower_parse_usage *st) 267 - { 268 - enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0; 269 - enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0; 270 - struct flow_match_vlan mt; 271 - u16 tpid; 272 - int err; 273 - 274 212 if (st->admin->vtype != VCAP_TYPE_IS0) { 275 213 NL_SET_ERR_MSG_MOD(st->fco->common.extack, 276 214 "cvlan not supported in this VCAP"); 277 215 return -EINVAL; 278 216 } 279 217 280 - flow_rule_match_cvlan(st->frule, &mt); 281 - 282 - tpid = be16_to_cpu(mt.key->vlan_tpid); 283 - 284 - if (tpid == ETH_P_8021Q) { 285 - vid_key = VCAP_KF_8021Q_VID1; 286 - pcp_key = VCAP_KF_8021Q_PCP1; 287 - } 288 - 289 - if (mt.mask->vlan_id) { 290 - err = vcap_rule_add_key_u32(st->vrule, vid_key, 291 - mt.key->vlan_id, 292 - mt.mask->vlan_id); 293 - if (err) 294 - goto out; 295 - } 296 - 297 - if (mt.mask->vlan_priority) { 298 - err = vcap_rule_add_key_u32(st->vrule, pcp_key, 299 - mt.key->vlan_priority, 300 - mt.mask->vlan_priority); 301 - if (err) 302 - goto out; 303 - } 304 - 305 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN); 306 - 307 - return 0; 308 - out: 309 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error"); 310 - return err; 218 + return vcap_tc_flower_handler_cvlan_usage(st); 311 219 } 312 220 313 221 static int 314 - sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st) 222 + sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st) 315 223 { 316 224 enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS; 317 225 enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS; 318 - struct flow_match_vlan mt; 319 - int err; 320 - 321 - flow_rule_match_vlan(st->frule, &mt); 322 226 323 227 if (st->admin->vtype == VCAP_TYPE_IS0) { 324 228 vid_key = VCAP_KF_8021Q_VID0; 325 229 pcp_key = VCAP_KF_8021Q_PCP0; 326 230 } 327 231 328 - if (mt.mask->vlan_id) { 329 - err = vcap_rule_add_key_u32(st->vrule, vid_key, 330 - mt.key->vlan_id, 331 - mt.mask->vlan_id); 332 - if (err) 333 - goto out; 334 - } 335 - 336 - if (mt.mask->vlan_priority) { 337 - err = vcap_rule_add_key_u32(st->vrule, pcp_key, 338 - mt.key->vlan_priority, 339 - mt.mask->vlan_priority); 340 - if (err) 341 - goto out; 342 - } 343 - 344 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN); 345 - 346 - return 0; 347 - out: 348 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error"); 349 - return err; 232 + return vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key); 350 233 } 351 234 352 - static int 353 - sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st) 354 - { 355 - struct flow_match_tcp mt; 356 - u16 tcp_flags_mask; 357 - u16 tcp_flags_key; 358 - enum vcap_bit val; 359 - int err = 0; 360 - 361 - flow_rule_match_tcp(st->frule, &mt); 362 - tcp_flags_key = be16_to_cpu(mt.key->flags); 363 - tcp_flags_mask = be16_to_cpu(mt.mask->flags); 364 - 365 - if (tcp_flags_mask & TCPHDR_FIN) { 366 - val = VCAP_BIT_0; 367 - if (tcp_flags_key & TCPHDR_FIN) 368 - val = VCAP_BIT_1; 369 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val); 370 - if (err) 371 - goto out; 372 - } 373 - 374 - if (tcp_flags_mask & TCPHDR_SYN) { 375 - val = VCAP_BIT_0; 376 - if (tcp_flags_key & TCPHDR_SYN) 377 - val = VCAP_BIT_1; 378 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val); 379 - if (err) 380 - goto out; 381 - } 382 - 383 - if (tcp_flags_mask & TCPHDR_RST) { 384 - val = VCAP_BIT_0; 385 - if (tcp_flags_key & TCPHDR_RST) 386 - val = VCAP_BIT_1; 387 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val); 388 - if (err) 389 - goto out; 390 - } 391 - 392 - if (tcp_flags_mask & TCPHDR_PSH) { 393 - val = VCAP_BIT_0; 394 - if (tcp_flags_key & TCPHDR_PSH) 395 - val = VCAP_BIT_1; 396 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val); 397 - if (err) 398 - goto out; 399 - } 400 - 401 - if (tcp_flags_mask & TCPHDR_ACK) { 402 - val = VCAP_BIT_0; 403 - if (tcp_flags_key & TCPHDR_ACK) 404 - val = VCAP_BIT_1; 405 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val); 406 - if (err) 407 - goto out; 408 - } 409 - 410 - if (tcp_flags_mask & TCPHDR_URG) { 411 - val = VCAP_BIT_0; 412 - if (tcp_flags_key & TCPHDR_URG) 413 - val = VCAP_BIT_1; 414 - err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val); 415 - if (err) 416 - goto out; 417 - } 418 - 419 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP); 420 - 421 - return err; 422 - 423 - out: 424 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error"); 425 - return err; 426 - } 427 - 428 - static int 429 - sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st) 430 - { 431 - struct flow_match_arp mt; 432 - u16 value, mask; 433 - u32 ipval, ipmsk; 434 - int err; 435 - 436 - flow_rule_match_arp(st->frule, &mt); 437 - 438 - if (mt.mask->op) { 439 - mask = 0x3; 440 - if (st->l3_proto == ETH_P_ARP) { 441 - value = mt.key->op == TC_ARP_OP_REQUEST ? 442 - SPX5_IS2_ARP_REQUEST : 443 - SPX5_IS2_ARP_REPLY; 444 - } else { /* RARP */ 445 - value = mt.key->op == TC_ARP_OP_REQUEST ? 446 - SPX5_IS2_RARP_REQUEST : 447 - SPX5_IS2_RARP_REPLY; 448 - } 449 - err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE, 450 - value, mask); 451 - if (err) 452 - goto out; 453 - } 454 - 455 - /* The IS2 ARP keyset does not support ARP hardware addresses */ 456 - if (!is_zero_ether_addr(mt.mask->sha) || 457 - !is_zero_ether_addr(mt.mask->tha)) { 458 - err = -EINVAL; 459 - goto out; 460 - } 461 - 462 - if (mt.mask->sip) { 463 - ipval = be32_to_cpu((__force __be32)mt.key->sip); 464 - ipmsk = be32_to_cpu((__force __be32)mt.mask->sip); 465 - 466 - err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP, 467 - ipval, ipmsk); 468 - if (err) 469 - goto out; 470 - } 471 - 472 - if (mt.mask->tip) { 473 - ipval = be32_to_cpu((__force __be32)mt.key->tip); 474 - ipmsk = be32_to_cpu((__force __be32)mt.mask->tip); 475 - 476 - err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP, 477 - ipval, ipmsk); 478 - if (err) 479 - goto out; 480 - } 481 - 482 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP); 483 - 484 - return 0; 485 - 486 - out: 487 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error"); 488 - return err; 489 - } 490 - 491 - static int 492 - sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st) 493 - { 494 - struct flow_match_ip mt; 495 - int err = 0; 496 - 497 - flow_rule_match_ip(st->frule, &mt); 498 - 499 - if (mt.mask->tos) { 500 - err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS, 501 - mt.key->tos, 502 - mt.mask->tos); 503 - if (err) 504 - goto out; 505 - } 506 - 507 - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP); 508 - 509 - return err; 510 - 511 - out: 512 - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error"); 513 - return err; 514 - } 515 - 516 - static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = { 517 - [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage, 518 - [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage, 519 - [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage, 235 + static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = { 236 + [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage, 237 + [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage, 238 + [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage, 520 239 [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage, 521 - [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage, 240 + [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage, 522 241 [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage, 523 242 [FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage, 524 243 [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage, 525 - [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage, 526 - [FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage, 527 - [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage, 244 + [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage, 245 + [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage, 246 + [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage, 528 247 }; 529 248 530 249 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco, ··· 196 587 struct vcap_rule *vrule, 197 588 u16 *l3_proto) 198 589 { 199 - struct sparx5_tc_flower_parse_usage state = { 590 + struct vcap_tc_flower_parse_usage state = { 200 591 .fco = fco, 201 592 .vrule = vrule, 202 593 .l3_proto = ETH_P_ALL,
+1 -1
drivers/net/ethernet/microchip/vcap/Makefile
··· 7 7 obj-$(CONFIG_VCAP_KUNIT_TEST) += vcap_model_kunit.o 8 8 vcap-$(CONFIG_DEBUG_FS) += vcap_api_debugfs.o 9 9 10 - vcap-y += vcap_api.o 10 + vcap-y += vcap_api.o vcap_tc.o
+409
drivers/net/ethernet/microchip/vcap/vcap_tc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Microchip VCAP TC 3 + * 4 + * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries. 5 + */ 6 + 7 + #include <net/flow_offload.h> 8 + #include <net/ipv6.h> 9 + #include <net/tcp.h> 10 + 11 + #include "vcap_api_client.h" 12 + #include "vcap_tc.h" 13 + 14 + enum vcap_is2_arp_opcode { 15 + VCAP_IS2_ARP_REQUEST, 16 + VCAP_IS2_ARP_REPLY, 17 + VCAP_IS2_RARP_REQUEST, 18 + VCAP_IS2_RARP_REPLY, 19 + }; 20 + 21 + enum vcap_arp_opcode { 22 + VCAP_ARP_OP_RESERVED, 23 + VCAP_ARP_OP_REQUEST, 24 + VCAP_ARP_OP_REPLY, 25 + }; 26 + 27 + int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st) 28 + { 29 + enum vcap_key_field smac_key = VCAP_KF_L2_SMAC; 30 + enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC; 31 + struct flow_match_eth_addrs match; 32 + struct vcap_u48_key smac, dmac; 33 + int err = 0; 34 + 35 + flow_rule_match_eth_addrs(st->frule, &match); 36 + 37 + if (!is_zero_ether_addr(match.mask->src)) { 38 + vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN); 39 + vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN); 40 + err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac); 41 + if (err) 42 + goto out; 43 + } 44 + 45 + if (!is_zero_ether_addr(match.mask->dst)) { 46 + vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN); 47 + vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN); 48 + err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac); 49 + if (err) 50 + goto out; 51 + } 52 + 53 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS); 54 + 55 + return err; 56 + 57 + out: 58 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error"); 59 + return err; 60 + } 61 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ethaddr_usage); 62 + 63 + int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st) 64 + { 65 + int err = 0; 66 + 67 + if (st->l3_proto == ETH_P_IP) { 68 + struct flow_match_ipv4_addrs mt; 69 + 70 + flow_rule_match_ipv4_addrs(st->frule, &mt); 71 + if (mt.mask->src) { 72 + err = vcap_rule_add_key_u32(st->vrule, 73 + VCAP_KF_L3_IP4_SIP, 74 + be32_to_cpu(mt.key->src), 75 + be32_to_cpu(mt.mask->src)); 76 + if (err) 77 + goto out; 78 + } 79 + if (mt.mask->dst) { 80 + err = vcap_rule_add_key_u32(st->vrule, 81 + VCAP_KF_L3_IP4_DIP, 82 + be32_to_cpu(mt.key->dst), 83 + be32_to_cpu(mt.mask->dst)); 84 + if (err) 85 + goto out; 86 + } 87 + } 88 + 89 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS); 90 + 91 + return err; 92 + 93 + out: 94 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error"); 95 + return err; 96 + } 97 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv4_usage); 98 + 99 + int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st) 100 + { 101 + int err = 0; 102 + 103 + if (st->l3_proto == ETH_P_IPV6) { 104 + struct flow_match_ipv6_addrs mt; 105 + struct vcap_u128_key sip; 106 + struct vcap_u128_key dip; 107 + 108 + flow_rule_match_ipv6_addrs(st->frule, &mt); 109 + /* Check if address masks are non-zero */ 110 + if (!ipv6_addr_any(&mt.mask->src)) { 111 + vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16); 112 + vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16); 113 + err = vcap_rule_add_key_u128(st->vrule, 114 + VCAP_KF_L3_IP6_SIP, &sip); 115 + if (err) 116 + goto out; 117 + } 118 + if (!ipv6_addr_any(&mt.mask->dst)) { 119 + vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16); 120 + vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16); 121 + err = vcap_rule_add_key_u128(st->vrule, 122 + VCAP_KF_L3_IP6_DIP, &dip); 123 + if (err) 124 + goto out; 125 + } 126 + } 127 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS); 128 + return err; 129 + out: 130 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error"); 131 + return err; 132 + } 133 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv6_usage); 134 + 135 + int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st) 136 + { 137 + struct flow_match_ports mt; 138 + u16 value, mask; 139 + int err = 0; 140 + 141 + flow_rule_match_ports(st->frule, &mt); 142 + 143 + if (mt.mask->src) { 144 + value = be16_to_cpu(mt.key->src); 145 + mask = be16_to_cpu(mt.mask->src); 146 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value, 147 + mask); 148 + if (err) 149 + goto out; 150 + } 151 + 152 + if (mt.mask->dst) { 153 + value = be16_to_cpu(mt.key->dst); 154 + mask = be16_to_cpu(mt.mask->dst); 155 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value, 156 + mask); 157 + if (err) 158 + goto out; 159 + } 160 + 161 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS); 162 + 163 + return err; 164 + 165 + out: 166 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error"); 167 + return err; 168 + } 169 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_portnum_usage); 170 + 171 + int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st) 172 + { 173 + enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0; 174 + enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0; 175 + struct flow_match_vlan mt; 176 + u16 tpid; 177 + int err; 178 + 179 + flow_rule_match_cvlan(st->frule, &mt); 180 + 181 + tpid = be16_to_cpu(mt.key->vlan_tpid); 182 + 183 + if (tpid == ETH_P_8021Q) { 184 + vid_key = VCAP_KF_8021Q_VID1; 185 + pcp_key = VCAP_KF_8021Q_PCP1; 186 + } 187 + 188 + if (mt.mask->vlan_id) { 189 + err = vcap_rule_add_key_u32(st->vrule, vid_key, 190 + mt.key->vlan_id, 191 + mt.mask->vlan_id); 192 + if (err) 193 + goto out; 194 + } 195 + 196 + if (mt.mask->vlan_priority) { 197 + err = vcap_rule_add_key_u32(st->vrule, pcp_key, 198 + mt.key->vlan_priority, 199 + mt.mask->vlan_priority); 200 + if (err) 201 + goto out; 202 + } 203 + 204 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN); 205 + 206 + return 0; 207 + out: 208 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error"); 209 + return err; 210 + } 211 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_cvlan_usage); 212 + 213 + int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st, 214 + enum vcap_key_field vid_key, 215 + enum vcap_key_field pcp_key) 216 + { 217 + struct flow_match_vlan mt; 218 + int err; 219 + 220 + flow_rule_match_vlan(st->frule, &mt); 221 + 222 + if (mt.mask->vlan_id) { 223 + err = vcap_rule_add_key_u32(st->vrule, vid_key, 224 + mt.key->vlan_id, 225 + mt.mask->vlan_id); 226 + if (err) 227 + goto out; 228 + } 229 + 230 + if (mt.mask->vlan_priority) { 231 + err = vcap_rule_add_key_u32(st->vrule, pcp_key, 232 + mt.key->vlan_priority, 233 + mt.mask->vlan_priority); 234 + if (err) 235 + goto out; 236 + } 237 + 238 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN); 239 + 240 + return 0; 241 + out: 242 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error"); 243 + return err; 244 + } 245 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_vlan_usage); 246 + 247 + int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st) 248 + { 249 + struct flow_match_tcp mt; 250 + u16 tcp_flags_mask; 251 + u16 tcp_flags_key; 252 + enum vcap_bit val; 253 + int err = 0; 254 + 255 + flow_rule_match_tcp(st->frule, &mt); 256 + tcp_flags_key = be16_to_cpu(mt.key->flags); 257 + tcp_flags_mask = be16_to_cpu(mt.mask->flags); 258 + 259 + if (tcp_flags_mask & TCPHDR_FIN) { 260 + val = VCAP_BIT_0; 261 + if (tcp_flags_key & TCPHDR_FIN) 262 + val = VCAP_BIT_1; 263 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val); 264 + if (err) 265 + goto out; 266 + } 267 + 268 + if (tcp_flags_mask & TCPHDR_SYN) { 269 + val = VCAP_BIT_0; 270 + if (tcp_flags_key & TCPHDR_SYN) 271 + val = VCAP_BIT_1; 272 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val); 273 + if (err) 274 + goto out; 275 + } 276 + 277 + if (tcp_flags_mask & TCPHDR_RST) { 278 + val = VCAP_BIT_0; 279 + if (tcp_flags_key & TCPHDR_RST) 280 + val = VCAP_BIT_1; 281 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val); 282 + if (err) 283 + goto out; 284 + } 285 + 286 + if (tcp_flags_mask & TCPHDR_PSH) { 287 + val = VCAP_BIT_0; 288 + if (tcp_flags_key & TCPHDR_PSH) 289 + val = VCAP_BIT_1; 290 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val); 291 + if (err) 292 + goto out; 293 + } 294 + 295 + if (tcp_flags_mask & TCPHDR_ACK) { 296 + val = VCAP_BIT_0; 297 + if (tcp_flags_key & TCPHDR_ACK) 298 + val = VCAP_BIT_1; 299 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val); 300 + if (err) 301 + goto out; 302 + } 303 + 304 + if (tcp_flags_mask & TCPHDR_URG) { 305 + val = VCAP_BIT_0; 306 + if (tcp_flags_key & TCPHDR_URG) 307 + val = VCAP_BIT_1; 308 + err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val); 309 + if (err) 310 + goto out; 311 + } 312 + 313 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP); 314 + 315 + return err; 316 + 317 + out: 318 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error"); 319 + return err; 320 + } 321 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_tcp_usage); 322 + 323 + int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st) 324 + { 325 + struct flow_match_arp mt; 326 + u16 value, mask; 327 + u32 ipval, ipmsk; 328 + int err; 329 + 330 + flow_rule_match_arp(st->frule, &mt); 331 + 332 + if (mt.mask->op) { 333 + mask = 0x3; 334 + if (st->l3_proto == ETH_P_ARP) { 335 + value = mt.key->op == VCAP_ARP_OP_REQUEST ? 336 + VCAP_IS2_ARP_REQUEST : 337 + VCAP_IS2_ARP_REPLY; 338 + } else { /* RARP */ 339 + value = mt.key->op == VCAP_ARP_OP_REQUEST ? 340 + VCAP_IS2_RARP_REQUEST : 341 + VCAP_IS2_RARP_REPLY; 342 + } 343 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE, 344 + value, mask); 345 + if (err) 346 + goto out; 347 + } 348 + 349 + /* The IS2 ARP keyset does not support ARP hardware addresses */ 350 + if (!is_zero_ether_addr(mt.mask->sha) || 351 + !is_zero_ether_addr(mt.mask->tha)) { 352 + err = -EINVAL; 353 + goto out; 354 + } 355 + 356 + if (mt.mask->sip) { 357 + ipval = be32_to_cpu((__force __be32)mt.key->sip); 358 + ipmsk = be32_to_cpu((__force __be32)mt.mask->sip); 359 + 360 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP, 361 + ipval, ipmsk); 362 + if (err) 363 + goto out; 364 + } 365 + 366 + if (mt.mask->tip) { 367 + ipval = be32_to_cpu((__force __be32)mt.key->tip); 368 + ipmsk = be32_to_cpu((__force __be32)mt.mask->tip); 369 + 370 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP, 371 + ipval, ipmsk); 372 + if (err) 373 + goto out; 374 + } 375 + 376 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP); 377 + 378 + return 0; 379 + 380 + out: 381 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error"); 382 + return err; 383 + } 384 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_arp_usage); 385 + 386 + int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st) 387 + { 388 + struct flow_match_ip mt; 389 + int err = 0; 390 + 391 + flow_rule_match_ip(st->frule, &mt); 392 + 393 + if (mt.mask->tos) { 394 + err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS, 395 + mt.key->tos, 396 + mt.mask->tos); 397 + if (err) 398 + goto out; 399 + } 400 + 401 + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP); 402 + 403 + return err; 404 + 405 + out: 406 + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error"); 407 + return err; 408 + } 409 + EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ip_usage);
+31
drivers/net/ethernet/microchip/vcap/vcap_tc.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause */ 2 + /* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries. 3 + * Microchip VCAP TC 4 + */ 5 + 6 + #ifndef __VCAP_TC__ 7 + #define __VCAP_TC__ 8 + 9 + struct vcap_tc_flower_parse_usage { 10 + struct flow_cls_offload *fco; 11 + struct flow_rule *frule; 12 + struct vcap_rule *vrule; 13 + struct vcap_admin *admin; 14 + u16 l3_proto; 15 + u8 l4_proto; 16 + unsigned int used_keys; 17 + }; 18 + 19 + int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st); 20 + int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st); 21 + int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st); 22 + int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st); 23 + int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st); 24 + int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st, 25 + enum vcap_key_field vid_key, 26 + enum vcap_key_field pcp_key); 27 + int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st); 28 + int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st); 29 + int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st); 30 + 31 + #endif /* __VCAP_TC__ */