Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2019 Solarflare Communications Inc.
5 * Copyright 2020-2022 Xilinx Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
10 */
11
12#include <net/pkt_cls.h>
13#include <net/vxlan.h>
14#include <net/geneve.h>
15#include "tc.h"
16#include "tc_bindings.h"
17#include "tc_encap_actions.h"
18#include "mae.h"
19#include "ef100_rep.h"
20#include "efx.h"
21
22enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev)
23{
24 if (netif_is_vxlan(net_dev))
25 return EFX_ENCAP_TYPE_VXLAN;
26 if (netif_is_geneve(net_dev))
27 return EFX_ENCAP_TYPE_GENEVE;
28
29 return EFX_ENCAP_TYPE_NONE;
30}
31
32#define EFX_EFV_PF NULL
33/* Look up the representor information (efv) for a device.
34 * May return NULL for the PF (us), or an error pointer for a device that
35 * isn't supported as a TC offload endpoint
36 */
37struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
38 struct net_device *dev)
39{
40 struct efx_rep *efv;
41
42 if (!dev)
43 return ERR_PTR(-EOPNOTSUPP);
44 /* Is it us (the PF)? */
45 if (dev == efx->net_dev)
46 return EFX_EFV_PF;
47 /* Is it an efx vfrep at all? */
48 if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
49 return ERR_PTR(-EOPNOTSUPP);
50 /* Is it ours? We don't support TC rules that include another
51 * EF100's netdevices (not even on another port of the same NIC).
52 */
53 efv = netdev_priv(dev);
54 if (efv->parent != efx)
55 return ERR_PTR(-EOPNOTSUPP);
56 return efv;
57}
58
59/* Convert a driver-internal vport ID into an internal device (PF or VF) */
60static s64 efx_tc_flower_internal_mport(struct efx_nic *efx, struct efx_rep *efv)
61{
62 u32 mport;
63
64 if (IS_ERR(efv))
65 return PTR_ERR(efv);
66 if (!efv) /* device is PF (us) */
67 efx_mae_mport_uplink(efx, &mport);
68 else /* device is repr */
69 efx_mae_mport_mport(efx, efv->mport, &mport);
70 return mport;
71}
72
73/* Convert a driver-internal vport ID into an external device (wire or VF) */
74s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
75{
76 u32 mport;
77
78 if (IS_ERR(efv))
79 return PTR_ERR(efv);
80 if (!efv) /* device is PF (us) */
81 efx_mae_mport_wire(efx, &mport);
82 else /* device is repr */
83 efx_mae_mport_mport(efx, efv->mport, &mport);
84 return mport;
85}
86
87static const struct rhashtable_params efx_tc_encap_match_ht_params = {
88 .key_len = offsetof(struct efx_tc_encap_match, linkage),
89 .key_offset = 0,
90 .head_offset = offsetof(struct efx_tc_encap_match, linkage),
91};
92
93static const struct rhashtable_params efx_tc_match_action_ht_params = {
94 .key_len = sizeof(unsigned long),
95 .key_offset = offsetof(struct efx_tc_flow_rule, cookie),
96 .head_offset = offsetof(struct efx_tc_flow_rule, linkage),
97};
98
99static void efx_tc_free_action_set(struct efx_nic *efx,
100 struct efx_tc_action_set *act, bool in_hw)
101{
102 /* Failure paths calling this on the 'cursor' action set in_hw=false,
103 * because if the alloc had succeeded we'd've put it in acts.list and
104 * not still have it in act.
105 */
106 if (in_hw) {
107 efx_mae_free_action_set(efx, act->fw_id);
108 /* in_hw is true iff we are on an acts.list; make sure to
109 * remove ourselves from that list before we are freed.
110 */
111 list_del(&act->list);
112 }
113 if (act->count) {
114 spin_lock_bh(&act->count->cnt->lock);
115 if (!list_empty(&act->count_user))
116 list_del(&act->count_user);
117 spin_unlock_bh(&act->count->cnt->lock);
118 efx_tc_flower_put_counter_index(efx, act->count);
119 }
120 if (act->encap_md) {
121 list_del(&act->encap_user);
122 efx_tc_flower_release_encap_md(efx, act->encap_md);
123 }
124 kfree(act);
125}
126
127static void efx_tc_free_action_set_list(struct efx_nic *efx,
128 struct efx_tc_action_set_list *acts,
129 bool in_hw)
130{
131 struct efx_tc_action_set *act, *next;
132
133 /* Failure paths set in_hw=false, because usually the acts didn't get
134 * to efx_mae_alloc_action_set_list(); if they did, the failure tree
135 * has a separate efx_mae_free_action_set_list() before calling us.
136 */
137 if (in_hw)
138 efx_mae_free_action_set_list(efx, acts);
139 /* Any act that's on the list will be in_hw even if the list isn't */
140 list_for_each_entry_safe(act, next, &acts->list, list)
141 efx_tc_free_action_set(efx, act, true);
142 /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
143}
144
145/* Boilerplate for the simple 'copy a field' cases */
146#define _MAP_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \
147if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_##_name)) { \
148 struct flow_match_##_type fm; \
149 \
150 flow_rule_match_##_tcget(rule, &fm); \
151 match->value._field = fm.key->_tcfield; \
152 match->mask._field = fm.mask->_tcfield; \
153}
154#define MAP_KEY_AND_MASK(_name, _type, _tcfield, _field) \
155 _MAP_KEY_AND_MASK(_name, _type, _type, _tcfield, _field)
156#define MAP_ENC_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \
157 _MAP_KEY_AND_MASK(ENC_##_name, _type, _tcget, _tcfield, _field)
158
159static int efx_tc_flower_parse_match(struct efx_nic *efx,
160 struct flow_rule *rule,
161 struct efx_tc_match *match,
162 struct netlink_ext_ack *extack)
163{
164 struct flow_dissector *dissector = rule->match.dissector;
165 unsigned char ipv = 0;
166
167 /* Owing to internal TC infelicities, the IPV6_ADDRS key might be set
168 * even on IPv4 filters; so rather than relying on dissector->used_keys
169 * we check the addr_type in the CONTROL key. If we don't find it (or
170 * it's masked, which should never happen), we treat both IPV4_ADDRS
171 * and IPV6_ADDRS as absent.
172 */
173 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
174 struct flow_match_control fm;
175
176 flow_rule_match_control(rule, &fm);
177 if (IS_ALL_ONES(fm.mask->addr_type))
178 switch (fm.key->addr_type) {
179 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
180 ipv = 4;
181 break;
182 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
183 ipv = 6;
184 break;
185 default:
186 break;
187 }
188
189 if (fm.mask->flags & FLOW_DIS_IS_FRAGMENT) {
190 match->value.ip_frag = fm.key->flags & FLOW_DIS_IS_FRAGMENT;
191 match->mask.ip_frag = true;
192 }
193 if (fm.mask->flags & FLOW_DIS_FIRST_FRAG) {
194 match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG;
195 match->mask.ip_firstfrag = true;
196 }
197 if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
198 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x",
199 fm.mask->flags);
200 return -EOPNOTSUPP;
201 }
202 }
203 if (dissector->used_keys &
204 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
205 BIT(FLOW_DISSECTOR_KEY_BASIC) |
206 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
207 BIT(FLOW_DISSECTOR_KEY_VLAN) |
208 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
209 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
210 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
211 BIT(FLOW_DISSECTOR_KEY_PORTS) |
212 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
213 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
214 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
215 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
216 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
217 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
218 BIT(FLOW_DISSECTOR_KEY_TCP) |
219 BIT(FLOW_DISSECTOR_KEY_IP))) {
220 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x",
221 dissector->used_keys);
222 return -EOPNOTSUPP;
223 }
224
225 MAP_KEY_AND_MASK(BASIC, basic, n_proto, eth_proto);
226 /* Make sure we're IP if any L3/L4 keys used. */
227 if (!IS_ALL_ONES(match->mask.eth_proto) ||
228 !(match->value.eth_proto == htons(ETH_P_IP) ||
229 match->value.eth_proto == htons(ETH_P_IPV6)))
230 if (dissector->used_keys &
231 (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
232 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
233 BIT(FLOW_DISSECTOR_KEY_PORTS) |
234 BIT(FLOW_DISSECTOR_KEY_IP) |
235 BIT(FLOW_DISSECTOR_KEY_TCP))) {
236 NL_SET_ERR_MSG_FMT_MOD(extack, "L3/L4 flower keys %#x require protocol ipv[46]",
237 dissector->used_keys);
238 return -EINVAL;
239 }
240
241 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
242 struct flow_match_vlan fm;
243
244 flow_rule_match_vlan(rule, &fm);
245 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) {
246 match->value.vlan_proto[0] = fm.key->vlan_tpid;
247 match->mask.vlan_proto[0] = fm.mask->vlan_tpid;
248 match->value.vlan_tci[0] = cpu_to_be16(fm.key->vlan_priority << 13 |
249 fm.key->vlan_id);
250 match->mask.vlan_tci[0] = cpu_to_be16(fm.mask->vlan_priority << 13 |
251 fm.mask->vlan_id);
252 }
253 }
254
255 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
256 struct flow_match_vlan fm;
257
258 flow_rule_match_cvlan(rule, &fm);
259 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) {
260 match->value.vlan_proto[1] = fm.key->vlan_tpid;
261 match->mask.vlan_proto[1] = fm.mask->vlan_tpid;
262 match->value.vlan_tci[1] = cpu_to_be16(fm.key->vlan_priority << 13 |
263 fm.key->vlan_id);
264 match->mask.vlan_tci[1] = cpu_to_be16(fm.mask->vlan_priority << 13 |
265 fm.mask->vlan_id);
266 }
267 }
268
269 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
270 struct flow_match_eth_addrs fm;
271
272 flow_rule_match_eth_addrs(rule, &fm);
273 ether_addr_copy(match->value.eth_saddr, fm.key->src);
274 ether_addr_copy(match->value.eth_daddr, fm.key->dst);
275 ether_addr_copy(match->mask.eth_saddr, fm.mask->src);
276 ether_addr_copy(match->mask.eth_daddr, fm.mask->dst);
277 }
278
279 MAP_KEY_AND_MASK(BASIC, basic, ip_proto, ip_proto);
280 /* Make sure we're TCP/UDP if any L4 keys used. */
281 if ((match->value.ip_proto != IPPROTO_UDP &&
282 match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto))
283 if (dissector->used_keys &
284 (BIT(FLOW_DISSECTOR_KEY_PORTS) |
285 BIT(FLOW_DISSECTOR_KEY_TCP))) {
286 NL_SET_ERR_MSG_FMT_MOD(extack, "L4 flower keys %#x require ipproto udp or tcp",
287 dissector->used_keys);
288 return -EINVAL;
289 }
290 MAP_KEY_AND_MASK(IP, ip, tos, ip_tos);
291 MAP_KEY_AND_MASK(IP, ip, ttl, ip_ttl);
292 if (ipv == 4) {
293 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, src, src_ip);
294 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, dst, dst_ip);
295 }
296#ifdef CONFIG_IPV6
297 else if (ipv == 6) {
298 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, src, src_ip6);
299 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, dst, dst_ip6);
300 }
301#endif
302 MAP_KEY_AND_MASK(PORTS, ports, src, l4_sport);
303 MAP_KEY_AND_MASK(PORTS, ports, dst, l4_dport);
304 MAP_KEY_AND_MASK(TCP, tcp, flags, tcp_flags);
305 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
306 struct flow_match_control fm;
307
308 flow_rule_match_enc_control(rule, &fm);
309 if (fm.mask->flags) {
310 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on enc_control.flags %#x",
311 fm.mask->flags);
312 return -EOPNOTSUPP;
313 }
314 if (!IS_ALL_ONES(fm.mask->addr_type)) {
315 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported enc addr_type mask %u (key %u)",
316 fm.mask->addr_type,
317 fm.key->addr_type);
318 return -EOPNOTSUPP;
319 }
320 switch (fm.key->addr_type) {
321 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
322 MAP_ENC_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, enc_ipv4_addrs,
323 src, enc_src_ip);
324 MAP_ENC_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, enc_ipv4_addrs,
325 dst, enc_dst_ip);
326 break;
327#ifdef CONFIG_IPV6
328 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
329 MAP_ENC_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, enc_ipv6_addrs,
330 src, enc_src_ip6);
331 MAP_ENC_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, enc_ipv6_addrs,
332 dst, enc_dst_ip6);
333 break;
334#endif
335 default:
336 NL_SET_ERR_MSG_FMT_MOD(extack,
337 "Unsupported enc addr_type %u (supported are IPv4, IPv6)",
338 fm.key->addr_type);
339 return -EOPNOTSUPP;
340 }
341 MAP_ENC_KEY_AND_MASK(IP, ip, enc_ip, tos, enc_ip_tos);
342 MAP_ENC_KEY_AND_MASK(IP, ip, enc_ip, ttl, enc_ip_ttl);
343 MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, src, enc_sport);
344 MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, dst, enc_dport);
345 MAP_ENC_KEY_AND_MASK(KEYID, enc_keyid, enc_keyid, keyid, enc_keyid);
346 } else if (dissector->used_keys &
347 (BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
348 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
349 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
350 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
351 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
352 NL_SET_ERR_MSG_FMT_MOD(extack, "Flower enc keys require enc_control (keys: %#x)",
353 dissector->used_keys);
354 return -EOPNOTSUPP;
355 }
356
357 return 0;
358}
359
360static void efx_tc_flower_release_encap_match(struct efx_nic *efx,
361 struct efx_tc_encap_match *encap)
362{
363 int rc;
364
365 if (!refcount_dec_and_test(&encap->ref))
366 return; /* still in use */
367
368 if (encap->type == EFX_TC_EM_DIRECT) {
369 rc = efx_mae_unregister_encap_match(efx, encap);
370 if (rc)
371 /* Display message but carry on and remove entry from our
372 * SW tables, because there's not much we can do about it.
373 */
374 netif_err(efx, drv, efx->net_dev,
375 "Failed to release encap match %#x, rc %d\n",
376 encap->fw_id, rc);
377 }
378 rhashtable_remove_fast(&efx->tc->encap_match_ht, &encap->linkage,
379 efx_tc_encap_match_ht_params);
380 if (encap->pseudo)
381 efx_tc_flower_release_encap_match(efx, encap->pseudo);
382 kfree(encap);
383}
384
385static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
386 struct efx_tc_match *match,
387 enum efx_encap_type type,
388 enum efx_tc_em_pseudo_type em_type,
389 u8 child_ip_tos_mask,
390 __be16 child_udp_sport_mask,
391 struct netlink_ext_ack *extack)
392{
393 struct efx_tc_encap_match *encap, *old, *pseudo = NULL;
394 bool ipv6 = false;
395 int rc;
396
397 /* We require that the socket-defining fields (IP addrs and UDP dest
398 * port) are present and exact-match. Other fields may only be used
399 * if the field-set (and any masks) are the same for all encap
400 * matches on the same <sip,dip,dport> tuple; this is enforced by
401 * pseudo encap matches.
402 */
403 if (match->mask.enc_dst_ip | match->mask.enc_src_ip) {
404 if (!IS_ALL_ONES(match->mask.enc_dst_ip)) {
405 NL_SET_ERR_MSG_MOD(extack,
406 "Egress encap match is not exact on dst IP address");
407 return -EOPNOTSUPP;
408 }
409 if (!IS_ALL_ONES(match->mask.enc_src_ip)) {
410 NL_SET_ERR_MSG_MOD(extack,
411 "Egress encap match is not exact on src IP address");
412 return -EOPNOTSUPP;
413 }
414#ifdef CONFIG_IPV6
415 if (!ipv6_addr_any(&match->mask.enc_dst_ip6) ||
416 !ipv6_addr_any(&match->mask.enc_src_ip6)) {
417 NL_SET_ERR_MSG_MOD(extack,
418 "Egress encap match on both IPv4 and IPv6, don't understand");
419 return -EOPNOTSUPP;
420 }
421 } else {
422 ipv6 = true;
423 if (!efx_ipv6_addr_all_ones(&match->mask.enc_dst_ip6)) {
424 NL_SET_ERR_MSG_MOD(extack,
425 "Egress encap match is not exact on dst IP address");
426 return -EOPNOTSUPP;
427 }
428 if (!efx_ipv6_addr_all_ones(&match->mask.enc_src_ip6)) {
429 NL_SET_ERR_MSG_MOD(extack,
430 "Egress encap match is not exact on src IP address");
431 return -EOPNOTSUPP;
432 }
433#endif
434 }
435 if (!IS_ALL_ONES(match->mask.enc_dport)) {
436 NL_SET_ERR_MSG_MOD(extack, "Egress encap match is not exact on dst UDP port");
437 return -EOPNOTSUPP;
438 }
439 if (match->mask.enc_sport || match->mask.enc_ip_tos) {
440 struct efx_tc_match pmatch = *match;
441
442 if (em_type == EFX_TC_EM_PSEUDO_MASK) { /* can't happen */
443 NL_SET_ERR_MSG_MOD(extack, "Bad recursion in egress encap match handler");
444 return -EOPNOTSUPP;
445 }
446 pmatch.value.enc_ip_tos = 0;
447 pmatch.mask.enc_ip_tos = 0;
448 pmatch.value.enc_sport = 0;
449 pmatch.mask.enc_sport = 0;
450 rc = efx_tc_flower_record_encap_match(efx, &pmatch, type,
451 EFX_TC_EM_PSEUDO_MASK,
452 match->mask.enc_ip_tos,
453 match->mask.enc_sport,
454 extack);
455 if (rc)
456 return rc;
457 pseudo = pmatch.encap;
458 }
459 if (match->mask.enc_ip_ttl) {
460 NL_SET_ERR_MSG_MOD(extack, "Egress encap match on IP TTL not supported");
461 rc = -EOPNOTSUPP;
462 goto fail_pseudo;
463 }
464
465 rc = efx_mae_check_encap_match_caps(efx, ipv6, match->mask.enc_ip_tos,
466 match->mask.enc_sport, extack);
467 if (rc)
468 goto fail_pseudo;
469
470 encap = kzalloc(sizeof(*encap), GFP_USER);
471 if (!encap) {
472 rc = -ENOMEM;
473 goto fail_pseudo;
474 }
475 encap->src_ip = match->value.enc_src_ip;
476 encap->dst_ip = match->value.enc_dst_ip;
477#ifdef CONFIG_IPV6
478 encap->src_ip6 = match->value.enc_src_ip6;
479 encap->dst_ip6 = match->value.enc_dst_ip6;
480#endif
481 encap->udp_dport = match->value.enc_dport;
482 encap->tun_type = type;
483 encap->ip_tos = match->value.enc_ip_tos;
484 encap->ip_tos_mask = match->mask.enc_ip_tos;
485 encap->child_ip_tos_mask = child_ip_tos_mask;
486 encap->udp_sport = match->value.enc_sport;
487 encap->udp_sport_mask = match->mask.enc_sport;
488 encap->child_udp_sport_mask = child_udp_sport_mask;
489 encap->type = em_type;
490 encap->pseudo = pseudo;
491 old = rhashtable_lookup_get_insert_fast(&efx->tc->encap_match_ht,
492 &encap->linkage,
493 efx_tc_encap_match_ht_params);
494 if (old) {
495 /* don't need our new entry */
496 kfree(encap);
497 if (pseudo) /* don't need our new pseudo either */
498 efx_tc_flower_release_encap_match(efx, pseudo);
499 /* check old and new em_types are compatible */
500 switch (old->type) {
501 case EFX_TC_EM_DIRECT:
502 /* old EM is in hardware, so mustn't overlap with a
503 * pseudo, but may be shared with another direct EM
504 */
505 if (em_type == EFX_TC_EM_DIRECT)
506 break;
507 NL_SET_ERR_MSG_MOD(extack, "Pseudo encap match conflicts with existing direct entry");
508 return -EEXIST;
509 case EFX_TC_EM_PSEUDO_MASK:
510 /* old EM is protecting a ToS- or src port-qualified
511 * filter, so may only be shared with another pseudo
512 * for the same ToS and src port masks.
513 */
514 if (em_type != EFX_TC_EM_PSEUDO_MASK) {
515 NL_SET_ERR_MSG_FMT_MOD(extack,
516 "%s encap match conflicts with existing pseudo(MASK) entry",
517 em_type ? "Pseudo" : "Direct");
518 return -EEXIST;
519 }
520 if (child_ip_tos_mask != old->child_ip_tos_mask) {
521 NL_SET_ERR_MSG_FMT_MOD(extack,
522 "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x",
523 child_ip_tos_mask,
524 old->child_ip_tos_mask);
525 return -EEXIST;
526 }
527 if (child_udp_sport_mask != old->child_udp_sport_mask) {
528 NL_SET_ERR_MSG_FMT_MOD(extack,
529 "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x",
530 child_udp_sport_mask,
531 old->child_udp_sport_mask);
532 return -EEXIST;
533 }
534 break;
535 default: /* Unrecognised pseudo-type. Just say no */
536 NL_SET_ERR_MSG_FMT_MOD(extack,
537 "%s encap match conflicts with existing pseudo(%d) entry",
538 em_type ? "Pseudo" : "Direct",
539 old->type);
540 return -EEXIST;
541 }
542 /* check old and new tun_types are compatible */
543 if (old->tun_type != type) {
544 NL_SET_ERR_MSG_FMT_MOD(extack,
545 "Egress encap match with conflicting tun_type %u != %u",
546 old->tun_type, type);
547 return -EEXIST;
548 }
549 if (!refcount_inc_not_zero(&old->ref))
550 return -EAGAIN;
551 /* existing entry found */
552 encap = old;
553 } else {
554 if (em_type == EFX_TC_EM_DIRECT) {
555 rc = efx_mae_register_encap_match(efx, encap);
556 if (rc) {
557 NL_SET_ERR_MSG_MOD(extack, "Failed to record egress encap match in HW");
558 goto fail;
559 }
560 }
561 refcount_set(&encap->ref, 1);
562 }
563 match->encap = encap;
564 return 0;
565fail:
566 rhashtable_remove_fast(&efx->tc->encap_match_ht, &encap->linkage,
567 efx_tc_encap_match_ht_params);
568 kfree(encap);
569fail_pseudo:
570 if (pseudo)
571 efx_tc_flower_release_encap_match(efx, pseudo);
572 return rc;
573}
574
575static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
576{
577 efx_mae_delete_rule(efx, rule->fw_id);
578
579 /* Release entries in subsidiary tables */
580 efx_tc_free_action_set_list(efx, &rule->acts, true);
581 if (rule->match.encap)
582 efx_tc_flower_release_encap_match(efx, rule->match.encap);
583 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
584}
585
586static const char *efx_tc_encap_type_name(enum efx_encap_type typ)
587{
588 switch (typ) {
589 case EFX_ENCAP_TYPE_NONE:
590 return "none";
591 case EFX_ENCAP_TYPE_VXLAN:
592 return "vxlan";
593 case EFX_ENCAP_TYPE_GENEVE:
594 return "geneve";
595 default:
596 pr_warn_once("Unknown efx_encap_type %d encountered\n", typ);
597 return "unknown";
598 }
599}
600
601/* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */
602enum efx_tc_action_order {
603 EFX_TC_AO_DECAP,
604 EFX_TC_AO_VLAN_POP,
605 EFX_TC_AO_VLAN_PUSH,
606 EFX_TC_AO_COUNT,
607 EFX_TC_AO_ENCAP,
608 EFX_TC_AO_DELIVER
609};
610/* Determine whether we can add @new action without violating order */
611static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
612 enum efx_tc_action_order new)
613{
614 switch (new) {
615 case EFX_TC_AO_DECAP:
616 if (act->decap)
617 return false;
618 fallthrough;
619 case EFX_TC_AO_VLAN_POP:
620 if (act->vlan_pop >= 2)
621 return false;
622 /* If we've already pushed a VLAN, we can't then pop it;
623 * the hardware would instead try to pop an existing VLAN
624 * before pushing the new one.
625 */
626 if (act->vlan_push)
627 return false;
628 fallthrough;
629 case EFX_TC_AO_VLAN_PUSH:
630 if (act->vlan_push >= 2)
631 return false;
632 fallthrough;
633 case EFX_TC_AO_COUNT:
634 if (act->count)
635 return false;
636 fallthrough;
637 case EFX_TC_AO_ENCAP:
638 if (act->encap_md)
639 return false;
640 fallthrough;
641 case EFX_TC_AO_DELIVER:
642 return !act->deliver;
643 default:
644 /* Bad caller. Whatever they wanted to do, say they can't. */
645 WARN_ON_ONCE(1);
646 return false;
647 }
648}
649
650static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
651 struct net_device *net_dev,
652 struct flow_cls_offload *tc)
653{
654 struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
655 struct netlink_ext_ack *extack = tc->common.extack;
656 struct efx_tc_flow_rule *rule = NULL, *old = NULL;
657 struct efx_tc_action_set *act = NULL;
658 bool found = false, uplinked = false;
659 const struct flow_action_entry *fa;
660 struct efx_tc_match match;
661 struct efx_rep *to_efv;
662 s64 rc;
663 int i;
664
665 /* Parse match */
666 memset(&match, 0, sizeof(match));
667 rc = efx_tc_flower_parse_match(efx, fr, &match, NULL);
668 if (rc)
669 return rc;
670 /* The rule as given to us doesn't specify a source netdevice.
671 * But, determining whether packets from a VF should match it is
672 * complicated, so leave those to the software slowpath: qualify
673 * the filter with source m-port == wire.
674 */
675 rc = efx_tc_flower_external_mport(efx, EFX_EFV_PF);
676 if (rc < 0) {
677 NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port for foreign filter");
678 return rc;
679 }
680 match.value.ingress_port = rc;
681 match.mask.ingress_port = ~0;
682
683 if (tc->common.chain_index) {
684 NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
685 return -EOPNOTSUPP;
686 }
687 match.mask.recirc_id = 0xff;
688
689 flow_action_for_each(i, fa, &fr->action) {
690 switch (fa->id) {
691 case FLOW_ACTION_REDIRECT:
692 case FLOW_ACTION_MIRRED: /* mirred means mirror here */
693 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
694 if (IS_ERR(to_efv))
695 continue;
696 found = true;
697 break;
698 default:
699 break;
700 }
701 }
702 if (!found) { /* We don't care. */
703 netif_dbg(efx, drv, efx->net_dev,
704 "Ignoring foreign filter that doesn't egdev us\n");
705 return -EOPNOTSUPP;
706 }
707
708 rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
709 if (rc)
710 return rc;
711
712 if (efx_tc_match_is_encap(&match.mask)) {
713 enum efx_encap_type type;
714
715 type = efx_tc_indr_netdev_type(net_dev);
716 if (type == EFX_ENCAP_TYPE_NONE) {
717 NL_SET_ERR_MSG_MOD(extack,
718 "Egress encap match on unsupported tunnel device");
719 return -EOPNOTSUPP;
720 }
721
722 rc = efx_mae_check_encap_type_supported(efx, type);
723 if (rc) {
724 NL_SET_ERR_MSG_FMT_MOD(extack,
725 "Firmware reports no support for %s encap match",
726 efx_tc_encap_type_name(type));
727 return rc;
728 }
729
730 rc = efx_tc_flower_record_encap_match(efx, &match, type,
731 EFX_TC_EM_DIRECT, 0, 0,
732 extack);
733 if (rc)
734 return rc;
735 } else {
736 /* This is not a tunnel decap rule, ignore it */
737 netif_dbg(efx, drv, efx->net_dev,
738 "Ignoring foreign filter without encap match\n");
739 return -EOPNOTSUPP;
740 }
741
742 rule = kzalloc(sizeof(*rule), GFP_USER);
743 if (!rule) {
744 rc = -ENOMEM;
745 goto out_free;
746 }
747 INIT_LIST_HEAD(&rule->acts.list);
748 rule->cookie = tc->cookie;
749 old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
750 &rule->linkage,
751 efx_tc_match_action_ht_params);
752 if (old) {
753 netif_dbg(efx, drv, efx->net_dev,
754 "Ignoring already-offloaded rule (cookie %lx)\n",
755 tc->cookie);
756 rc = -EEXIST;
757 goto out_free;
758 }
759
760 act = kzalloc(sizeof(*act), GFP_USER);
761 if (!act) {
762 rc = -ENOMEM;
763 goto release;
764 }
765
766 /* Parse actions. For foreign rules we only support decap & redirect.
767 * See corresponding code in efx_tc_flower_replace() for theory of
768 * operation & how 'act' cursor is used.
769 */
770 flow_action_for_each(i, fa, &fr->action) {
771 struct efx_tc_action_set save;
772
773 switch (fa->id) {
774 case FLOW_ACTION_REDIRECT:
775 case FLOW_ACTION_MIRRED:
776 /* See corresponding code in efx_tc_flower_replace() for
777 * long explanations of what's going on here.
778 */
779 save = *act;
780 if (fa->hw_stats) {
781 struct efx_tc_counter_index *ctr;
782
783 if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) {
784 NL_SET_ERR_MSG_FMT_MOD(extack,
785 "hw_stats_type %u not supported (only 'delayed')",
786 fa->hw_stats);
787 rc = -EOPNOTSUPP;
788 goto release;
789 }
790 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) {
791 rc = -EOPNOTSUPP;
792 goto release;
793 }
794
795 ctr = efx_tc_flower_get_counter_index(efx,
796 tc->cookie,
797 EFX_TC_COUNTER_TYPE_AR);
798 if (IS_ERR(ctr)) {
799 rc = PTR_ERR(ctr);
800 NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
801 goto release;
802 }
803 act->count = ctr;
804 INIT_LIST_HEAD(&act->count_user);
805 }
806
807 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) {
808 /* can't happen */
809 rc = -EOPNOTSUPP;
810 NL_SET_ERR_MSG_MOD(extack,
811 "Deliver action violates action order (can't happen)");
812 goto release;
813 }
814 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
815 /* PF implies egdev is us, in which case we really
816 * want to deliver to the uplink (because this is an
817 * ingress filter). If we don't recognise the egdev
818 * at all, then we'd better trap so SW can handle it.
819 */
820 if (IS_ERR(to_efv))
821 to_efv = EFX_EFV_PF;
822 if (to_efv == EFX_EFV_PF) {
823 if (uplinked)
824 break;
825 uplinked = true;
826 }
827 rc = efx_tc_flower_internal_mport(efx, to_efv);
828 if (rc < 0) {
829 NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port");
830 goto release;
831 }
832 act->dest_mport = rc;
833 act->deliver = 1;
834 rc = efx_mae_alloc_action_set(efx, act);
835 if (rc) {
836 NL_SET_ERR_MSG_MOD(extack,
837 "Failed to write action set to hw (mirred)");
838 goto release;
839 }
840 list_add_tail(&act->list, &rule->acts.list);
841 act = NULL;
842 if (fa->id == FLOW_ACTION_REDIRECT)
843 break; /* end of the line */
844 /* Mirror, so continue on with saved act */
845 act = kzalloc(sizeof(*act), GFP_USER);
846 if (!act) {
847 rc = -ENOMEM;
848 goto release;
849 }
850 *act = save;
851 break;
852 case FLOW_ACTION_TUNNEL_DECAP:
853 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DECAP)) {
854 rc = -EINVAL;
855 NL_SET_ERR_MSG_MOD(extack, "Decap action violates action order");
856 goto release;
857 }
858 act->decap = 1;
859 /* If we previously delivered/trapped to uplink, now
860 * that we've decapped we'll want another copy if we
861 * try to deliver/trap to uplink again.
862 */
863 uplinked = false;
864 break;
865 default:
866 NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u",
867 fa->id);
868 rc = -EOPNOTSUPP;
869 goto release;
870 }
871 }
872
873 if (act) {
874 if (!uplinked) {
875 /* Not shot/redirected, so deliver to default dest (which is
876 * the uplink, as this is an ingress filter)
877 */
878 efx_mae_mport_uplink(efx, &act->dest_mport);
879 act->deliver = 1;
880 }
881 rc = efx_mae_alloc_action_set(efx, act);
882 if (rc) {
883 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)");
884 goto release;
885 }
886 list_add_tail(&act->list, &rule->acts.list);
887 act = NULL; /* Prevent double-free in error path */
888 }
889
890 rule->match = match;
891
892 netif_dbg(efx, drv, efx->net_dev,
893 "Successfully parsed foreign filter (cookie %lx)\n",
894 tc->cookie);
895
896 rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
897 if (rc) {
898 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw");
899 goto release;
900 }
901 rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
902 rule->acts.fw_id, &rule->fw_id);
903 if (rc) {
904 NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
905 goto release_acts;
906 }
907 return 0;
908
909release_acts:
910 efx_mae_free_action_set_list(efx, &rule->acts);
911release:
912 /* We failed to insert the rule, so free up any entries we created in
913 * subsidiary tables.
914 */
915 if (act)
916 efx_tc_free_action_set(efx, act, false);
917 if (rule) {
918 rhashtable_remove_fast(&efx->tc->match_action_ht,
919 &rule->linkage,
920 efx_tc_match_action_ht_params);
921 efx_tc_free_action_set_list(efx, &rule->acts, false);
922 }
923out_free:
924 kfree(rule);
925 if (match.encap)
926 efx_tc_flower_release_encap_match(efx, match.encap);
927 return rc;
928}
929
930static int efx_tc_flower_replace(struct efx_nic *efx,
931 struct net_device *net_dev,
932 struct flow_cls_offload *tc,
933 struct efx_rep *efv)
934{
935 struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
936 struct netlink_ext_ack *extack = tc->common.extack;
937 const struct ip_tunnel_info *encap_info = NULL;
938 struct efx_tc_flow_rule *rule = NULL, *old;
939 struct efx_tc_action_set *act = NULL;
940 const struct flow_action_entry *fa;
941 struct efx_rep *from_efv, *to_efv;
942 struct efx_tc_match match;
943 u32 acts_id;
944 s64 rc;
945 int i;
946
947 if (!tc_can_offload_extack(efx->net_dev, extack))
948 return -EOPNOTSUPP;
949 if (WARN_ON(!efx->tc))
950 return -ENETDOWN;
951 if (WARN_ON(!efx->tc->up))
952 return -ENETDOWN;
953
954 from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
955 if (IS_ERR(from_efv)) {
956 /* Not from our PF or representors, so probably a tunnel dev */
957 return efx_tc_flower_replace_foreign(efx, net_dev, tc);
958 }
959
960 if (efv != from_efv) {
961 /* can't happen */
962 NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)",
963 netdev_name(net_dev), efv ? "non-" : "",
964 from_efv ? "non-" : "");
965 return -EINVAL;
966 }
967
968 /* Parse match */
969 memset(&match, 0, sizeof(match));
970 rc = efx_tc_flower_external_mport(efx, from_efv);
971 if (rc < 0) {
972 NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port");
973 return rc;
974 }
975 match.value.ingress_port = rc;
976 match.mask.ingress_port = ~0;
977 rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
978 if (rc)
979 return rc;
980 if (efx_tc_match_is_encap(&match.mask)) {
981 NL_SET_ERR_MSG_MOD(extack, "Ingress enc_key matches not supported");
982 return -EOPNOTSUPP;
983 }
984
985 if (tc->common.chain_index) {
986 NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
987 return -EOPNOTSUPP;
988 }
989 match.mask.recirc_id = 0xff;
990
991 rc = efx_mae_match_check_caps(efx, &match.mask, extack);
992 if (rc)
993 return rc;
994
995 rule = kzalloc(sizeof(*rule), GFP_USER);
996 if (!rule)
997 return -ENOMEM;
998 INIT_LIST_HEAD(&rule->acts.list);
999 rule->cookie = tc->cookie;
1000 old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
1001 &rule->linkage,
1002 efx_tc_match_action_ht_params);
1003 if (old) {
1004 netif_dbg(efx, drv, efx->net_dev,
1005 "Already offloaded rule (cookie %lx)\n", tc->cookie);
1006 NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
1007 kfree(rule);
1008 return -EEXIST;
1009 }
1010
1011 /* Parse actions */
1012 act = kzalloc(sizeof(*act), GFP_USER);
1013 if (!act) {
1014 rc = -ENOMEM;
1015 goto release;
1016 }
1017
1018 /**
1019 * DOC: TC action translation
1020 *
1021 * Actions in TC are sequential and cumulative, with delivery actions
1022 * potentially anywhere in the order. The EF100 MAE, however, takes
1023 * an 'action set list' consisting of 'action sets', each of which is
1024 * applied to the _original_ packet, and consists of a set of optional
1025 * actions in a fixed order with delivery at the end.
1026 * To translate between these two models, we maintain a 'cursor', @act,
1027 * which describes the cumulative effect of all the packet-mutating
1028 * actions encountered so far; on handling a delivery (mirred or drop)
1029 * action, once the action-set has been inserted into hardware, we
1030 * append @act to the action-set list (@rule->acts); if this is a pipe
1031 * action (mirred mirror) we then allocate a new @act with a copy of
1032 * the cursor state _before_ the delivery action, otherwise we set @act
1033 * to %NULL.
1034 * This ensures that every allocated action-set is either attached to
1035 * @rule->acts or pointed to by @act (and never both), and that only
1036 * those action-sets in @rule->acts exist in hardware. Consequently,
1037 * in the failure path, @act only needs to be freed in memory, whereas
1038 * for @rule->acts we remove each action-set from hardware before
1039 * freeing it (efx_tc_free_action_set_list()), even if the action-set
1040 * list itself is not in hardware.
1041 */
1042 flow_action_for_each(i, fa, &fr->action) {
1043 struct efx_tc_action_set save;
1044 u16 tci;
1045
1046 if (!act) {
1047 /* more actions after a non-pipe action */
1048 NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action");
1049 rc = -EINVAL;
1050 goto release;
1051 }
1052
1053 if ((fa->id == FLOW_ACTION_REDIRECT ||
1054 fa->id == FLOW_ACTION_MIRRED ||
1055 fa->id == FLOW_ACTION_DROP) && fa->hw_stats) {
1056 struct efx_tc_counter_index *ctr;
1057
1058 /* Currently the only actions that want stats are
1059 * mirred and gact (ok, shot, trap, goto-chain), which
1060 * means we want stats just before delivery. Also,
1061 * note that tunnel_key set shouldn't change the length
1062 * — it's only the subsequent mirred that does that,
1063 * and the stats are taken _before_ the mirred action
1064 * happens.
1065 */
1066 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) {
1067 /* All supported actions that count either steal
1068 * (gact shot, mirred redirect) or clone act
1069 * (mirred mirror), so we should never get two
1070 * count actions on one action_set.
1071 */
1072 NL_SET_ERR_MSG_MOD(extack, "Count-action conflict (can't happen)");
1073 rc = -EOPNOTSUPP;
1074 goto release;
1075 }
1076
1077 if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) {
1078 NL_SET_ERR_MSG_FMT_MOD(extack, "hw_stats_type %u not supported (only 'delayed')",
1079 fa->hw_stats);
1080 rc = -EOPNOTSUPP;
1081 goto release;
1082 }
1083
1084 ctr = efx_tc_flower_get_counter_index(efx, tc->cookie,
1085 EFX_TC_COUNTER_TYPE_AR);
1086 if (IS_ERR(ctr)) {
1087 rc = PTR_ERR(ctr);
1088 NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
1089 goto release;
1090 }
1091 act->count = ctr;
1092 INIT_LIST_HEAD(&act->count_user);
1093 }
1094
1095 switch (fa->id) {
1096 case FLOW_ACTION_DROP:
1097 rc = efx_mae_alloc_action_set(efx, act);
1098 if (rc) {
1099 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)");
1100 goto release;
1101 }
1102 list_add_tail(&act->list, &rule->acts.list);
1103 act = NULL; /* end of the line */
1104 break;
1105 case FLOW_ACTION_REDIRECT:
1106 case FLOW_ACTION_MIRRED:
1107 save = *act;
1108
1109 if (encap_info) {
1110 struct efx_tc_encap_action *encap;
1111
1112 if (!efx_tc_flower_action_order_ok(act,
1113 EFX_TC_AO_ENCAP)) {
1114 rc = -EOPNOTSUPP;
1115 NL_SET_ERR_MSG_MOD(extack, "Encap action violates action order");
1116 goto release;
1117 }
1118 encap = efx_tc_flower_create_encap_md(
1119 efx, encap_info, fa->dev, extack);
1120 if (IS_ERR_OR_NULL(encap)) {
1121 rc = PTR_ERR(encap);
1122 if (!rc)
1123 rc = -EIO; /* arbitrary */
1124 goto release;
1125 }
1126 act->encap_md = encap;
1127 list_add_tail(&act->encap_user, &encap->users);
1128 act->dest_mport = encap->dest_mport;
1129 act->deliver = 1;
1130 if (act->count && !WARN_ON(!act->count->cnt)) {
1131 /* This counter is used by an encap
1132 * action, which needs a reference back
1133 * so it can prod neighbouring whenever
1134 * traffic is seen.
1135 */
1136 spin_lock_bh(&act->count->cnt->lock);
1137 list_add_tail(&act->count_user,
1138 &act->count->cnt->users);
1139 spin_unlock_bh(&act->count->cnt->lock);
1140 }
1141 rc = efx_mae_alloc_action_set(efx, act);
1142 if (rc) {
1143 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (encap)");
1144 goto release;
1145 }
1146 list_add_tail(&act->list, &rule->acts.list);
1147 act->user = &rule->acts;
1148 act = NULL;
1149 if (fa->id == FLOW_ACTION_REDIRECT)
1150 break; /* end of the line */
1151 /* Mirror, so continue on with saved act */
1152 save.count = NULL;
1153 act = kzalloc(sizeof(*act), GFP_USER);
1154 if (!act) {
1155 rc = -ENOMEM;
1156 goto release;
1157 }
1158 *act = save;
1159 break;
1160 }
1161
1162 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) {
1163 /* can't happen */
1164 rc = -EOPNOTSUPP;
1165 NL_SET_ERR_MSG_MOD(extack, "Deliver action violates action order (can't happen)");
1166 goto release;
1167 }
1168
1169 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
1170 if (IS_ERR(to_efv)) {
1171 NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch");
1172 rc = PTR_ERR(to_efv);
1173 goto release;
1174 }
1175 rc = efx_tc_flower_external_mport(efx, to_efv);
1176 if (rc < 0) {
1177 NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port");
1178 goto release;
1179 }
1180 act->dest_mport = rc;
1181 act->deliver = 1;
1182 rc = efx_mae_alloc_action_set(efx, act);
1183 if (rc) {
1184 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)");
1185 goto release;
1186 }
1187 list_add_tail(&act->list, &rule->acts.list);
1188 act = NULL;
1189 if (fa->id == FLOW_ACTION_REDIRECT)
1190 break; /* end of the line */
1191 /* Mirror, so continue on with saved act */
1192 save.count = NULL;
1193 act = kzalloc(sizeof(*act), GFP_USER);
1194 if (!act) {
1195 rc = -ENOMEM;
1196 goto release;
1197 }
1198 *act = save;
1199 break;
1200 case FLOW_ACTION_VLAN_POP:
1201 if (act->vlan_push) {
1202 act->vlan_push--;
1203 } else if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_POP)) {
1204 act->vlan_pop++;
1205 } else {
1206 NL_SET_ERR_MSG_MOD(extack,
1207 "More than two VLAN pops, or action order violated");
1208 rc = -EINVAL;
1209 goto release;
1210 }
1211 break;
1212 case FLOW_ACTION_VLAN_PUSH:
1213 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_PUSH)) {
1214 rc = -EINVAL;
1215 NL_SET_ERR_MSG_MOD(extack,
1216 "More than two VLAN pushes, or action order violated");
1217 goto release;
1218 }
1219 tci = fa->vlan.vid & VLAN_VID_MASK;
1220 tci |= fa->vlan.prio << VLAN_PRIO_SHIFT;
1221 act->vlan_tci[act->vlan_push] = cpu_to_be16(tci);
1222 act->vlan_proto[act->vlan_push] = fa->vlan.proto;
1223 act->vlan_push++;
1224 break;
1225 case FLOW_ACTION_TUNNEL_ENCAP:
1226 if (encap_info) {
1227 /* Can't specify encap multiple times.
1228 * If you want to overwrite an existing
1229 * encap_info, use an intervening
1230 * FLOW_ACTION_TUNNEL_DECAP to clear it.
1231 */
1232 NL_SET_ERR_MSG_MOD(extack, "Tunnel key set when already set");
1233 rc = -EINVAL;
1234 goto release;
1235 }
1236 if (!fa->tunnel) {
1237 NL_SET_ERR_MSG_MOD(extack, "Tunnel key set is missing key");
1238 rc = -EOPNOTSUPP;
1239 goto release;
1240 }
1241 encap_info = fa->tunnel;
1242 break;
1243 case FLOW_ACTION_TUNNEL_DECAP:
1244 if (encap_info) {
1245 encap_info = NULL;
1246 break;
1247 }
1248 /* Since we don't support enc_key matches on ingress
1249 * (and if we did there'd be no tunnel-device to give
1250 * us a type), we can't offload a decap that's not
1251 * just undoing a previous encap action.
1252 */
1253 NL_SET_ERR_MSG_MOD(extack, "Cannot offload tunnel decap action without tunnel device");
1254 rc = -EOPNOTSUPP;
1255 goto release;
1256 default:
1257 NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u",
1258 fa->id);
1259 rc = -EOPNOTSUPP;
1260 goto release;
1261 }
1262 }
1263
1264 if (act) {
1265 /* Not shot/redirected, so deliver to default dest */
1266 if (from_efv == EFX_EFV_PF)
1267 /* Rule applies to traffic from the wire,
1268 * and default dest is thus the PF
1269 */
1270 efx_mae_mport_uplink(efx, &act->dest_mport);
1271 else
1272 /* Representor, so rule applies to traffic from
1273 * representee, and default dest is thus the rep.
1274 * All reps use the same mport for delivery
1275 */
1276 efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
1277 &act->dest_mport);
1278 act->deliver = 1;
1279 rc = efx_mae_alloc_action_set(efx, act);
1280 if (rc) {
1281 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)");
1282 goto release;
1283 }
1284 list_add_tail(&act->list, &rule->acts.list);
1285 act = NULL; /* Prevent double-free in error path */
1286 }
1287
1288 netif_dbg(efx, drv, efx->net_dev,
1289 "Successfully parsed filter (cookie %lx)\n",
1290 tc->cookie);
1291
1292 rule->match = match;
1293
1294 rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
1295 if (rc) {
1296 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw");
1297 goto release;
1298 }
1299 if (from_efv == EFX_EFV_PF)
1300 /* PF netdev, so rule applies to traffic from wire */
1301 rule->fallback = &efx->tc->facts.pf;
1302 else
1303 /* repdev, so rule applies to traffic from representee */
1304 rule->fallback = &efx->tc->facts.reps;
1305 if (!efx_tc_check_ready(efx, rule)) {
1306 netif_dbg(efx, drv, efx->net_dev, "action not ready for hw\n");
1307 acts_id = rule->fallback->fw_id;
1308 } else {
1309 netif_dbg(efx, drv, efx->net_dev, "ready for hw\n");
1310 acts_id = rule->acts.fw_id;
1311 }
1312 rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
1313 acts_id, &rule->fw_id);
1314 if (rc) {
1315 NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
1316 goto release_acts;
1317 }
1318 return 0;
1319
1320release_acts:
1321 efx_mae_free_action_set_list(efx, &rule->acts);
1322release:
1323 /* We failed to insert the rule, so free up any entries we created in
1324 * subsidiary tables.
1325 */
1326 if (act)
1327 efx_tc_free_action_set(efx, act, false);
1328 if (rule) {
1329 rhashtable_remove_fast(&efx->tc->match_action_ht,
1330 &rule->linkage,
1331 efx_tc_match_action_ht_params);
1332 efx_tc_free_action_set_list(efx, &rule->acts, false);
1333 }
1334 kfree(rule);
1335 return rc;
1336}
1337
1338static int efx_tc_flower_destroy(struct efx_nic *efx,
1339 struct net_device *net_dev,
1340 struct flow_cls_offload *tc)
1341{
1342 struct netlink_ext_ack *extack = tc->common.extack;
1343 struct efx_tc_flow_rule *rule;
1344
1345 rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
1346 efx_tc_match_action_ht_params);
1347 if (!rule) {
1348 /* Only log a message if we're the ingress device. Otherwise
1349 * it's a foreign filter and we might just not have been
1350 * interested (e.g. we might not have been the egress device
1351 * either).
1352 */
1353 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
1354 netif_warn(efx, drv, efx->net_dev,
1355 "Filter %lx not found to remove\n", tc->cookie);
1356 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
1357 return -ENOENT;
1358 }
1359
1360 /* Remove it from HW */
1361 efx_tc_delete_rule(efx, rule);
1362 /* Delete it from SW */
1363 rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
1364 efx_tc_match_action_ht_params);
1365 netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
1366 kfree(rule);
1367 return 0;
1368}
1369
1370static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev,
1371 struct flow_cls_offload *tc)
1372{
1373 struct netlink_ext_ack *extack = tc->common.extack;
1374 struct efx_tc_counter_index *ctr;
1375 struct efx_tc_counter *cnt;
1376 u64 packets, bytes;
1377
1378 ctr = efx_tc_flower_find_counter_index(efx, tc->cookie);
1379 if (!ctr) {
1380 /* See comment in efx_tc_flower_destroy() */
1381 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
1382 if (net_ratelimit())
1383 netif_warn(efx, drv, efx->net_dev,
1384 "Filter %lx not found for stats\n",
1385 tc->cookie);
1386 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
1387 return -ENOENT;
1388 }
1389 if (WARN_ON(!ctr->cnt)) /* can't happen */
1390 return -EIO;
1391 cnt = ctr->cnt;
1392
1393 spin_lock_bh(&cnt->lock);
1394 /* Report only new pkts/bytes since last time TC asked */
1395 packets = cnt->packets;
1396 bytes = cnt->bytes;
1397 flow_stats_update(&tc->stats, bytes - cnt->old_bytes,
1398 packets - cnt->old_packets, 0, cnt->touched,
1399 FLOW_ACTION_HW_STATS_DELAYED);
1400 cnt->old_packets = packets;
1401 cnt->old_bytes = bytes;
1402 spin_unlock_bh(&cnt->lock);
1403 return 0;
1404}
1405
1406int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
1407 struct flow_cls_offload *tc, struct efx_rep *efv)
1408{
1409 int rc;
1410
1411 if (!efx->tc)
1412 return -EOPNOTSUPP;
1413
1414 mutex_lock(&efx->tc->mutex);
1415 switch (tc->command) {
1416 case FLOW_CLS_REPLACE:
1417 rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
1418 break;
1419 case FLOW_CLS_DESTROY:
1420 rc = efx_tc_flower_destroy(efx, net_dev, tc);
1421 break;
1422 case FLOW_CLS_STATS:
1423 rc = efx_tc_flower_stats(efx, net_dev, tc);
1424 break;
1425 default:
1426 rc = -EOPNOTSUPP;
1427 break;
1428 }
1429 mutex_unlock(&efx->tc->mutex);
1430 return rc;
1431}
1432
1433static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
1434 u32 eg_port, struct efx_tc_flow_rule *rule)
1435{
1436 struct efx_tc_action_set_list *acts = &rule->acts;
1437 struct efx_tc_match *match = &rule->match;
1438 struct efx_tc_action_set *act;
1439 int rc;
1440
1441 match->value.ingress_port = ing_port;
1442 match->mask.ingress_port = ~0;
1443 act = kzalloc(sizeof(*act), GFP_KERNEL);
1444 if (!act)
1445 return -ENOMEM;
1446 act->deliver = 1;
1447 act->dest_mport = eg_port;
1448 rc = efx_mae_alloc_action_set(efx, act);
1449 if (rc)
1450 goto fail1;
1451 EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
1452 list_add_tail(&act->list, &acts->list);
1453 rc = efx_mae_alloc_action_set_list(efx, acts);
1454 if (rc)
1455 goto fail2;
1456 rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
1457 acts->fw_id, &rule->fw_id);
1458 if (rc)
1459 goto fail3;
1460 return 0;
1461fail3:
1462 efx_mae_free_action_set_list(efx, acts);
1463fail2:
1464 list_del(&act->list);
1465 efx_mae_free_action_set(efx, act->fw_id);
1466fail1:
1467 kfree(act);
1468 return rc;
1469}
1470
1471static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
1472{
1473 struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
1474 u32 ing_port, eg_port;
1475
1476 efx_mae_mport_uplink(efx, &ing_port);
1477 efx_mae_mport_wire(efx, &eg_port);
1478 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
1479}
1480
1481static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
1482{
1483 struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
1484 u32 ing_port, eg_port;
1485
1486 efx_mae_mport_wire(efx, &ing_port);
1487 efx_mae_mport_uplink(efx, &eg_port);
1488 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
1489}
1490
1491int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
1492{
1493 struct efx_tc_flow_rule *rule = &efv->dflt;
1494 struct efx_nic *efx = efv->parent;
1495 u32 ing_port, eg_port;
1496
1497 efx_mae_mport_mport(efx, efv->mport, &ing_port);
1498 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
1499 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
1500}
1501
1502void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
1503 struct efx_tc_flow_rule *rule)
1504{
1505 if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
1506 efx_tc_delete_rule(efx, rule);
1507 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
1508}
1509
1510static int efx_tc_configure_fallback_acts(struct efx_nic *efx, u32 eg_port,
1511 struct efx_tc_action_set_list *acts)
1512{
1513 struct efx_tc_action_set *act;
1514 int rc;
1515
1516 act = kzalloc(sizeof(*act), GFP_KERNEL);
1517 if (!act)
1518 return -ENOMEM;
1519 act->deliver = 1;
1520 act->dest_mport = eg_port;
1521 rc = efx_mae_alloc_action_set(efx, act);
1522 if (rc)
1523 goto fail1;
1524 EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
1525 list_add_tail(&act->list, &acts->list);
1526 rc = efx_mae_alloc_action_set_list(efx, acts);
1527 if (rc)
1528 goto fail2;
1529 return 0;
1530fail2:
1531 list_del(&act->list);
1532 efx_mae_free_action_set(efx, act->fw_id);
1533fail1:
1534 kfree(act);
1535 return rc;
1536}
1537
1538static int efx_tc_configure_fallback_acts_pf(struct efx_nic *efx)
1539{
1540 struct efx_tc_action_set_list *acts = &efx->tc->facts.pf;
1541 u32 eg_port;
1542
1543 efx_mae_mport_uplink(efx, &eg_port);
1544 return efx_tc_configure_fallback_acts(efx, eg_port, acts);
1545}
1546
1547static int efx_tc_configure_fallback_acts_reps(struct efx_nic *efx)
1548{
1549 struct efx_tc_action_set_list *acts = &efx->tc->facts.reps;
1550 u32 eg_port;
1551
1552 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
1553 return efx_tc_configure_fallback_acts(efx, eg_port, acts);
1554}
1555
1556static void efx_tc_deconfigure_fallback_acts(struct efx_nic *efx,
1557 struct efx_tc_action_set_list *acts)
1558{
1559 efx_tc_free_action_set_list(efx, acts, true);
1560}
1561
1562static int efx_tc_configure_rep_mport(struct efx_nic *efx)
1563{
1564 u32 rep_mport_label;
1565 int rc;
1566
1567 rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
1568 if (rc)
1569 return rc;
1570 pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
1571 efx->tc->reps_mport_id, rep_mport_label);
1572 /* Use mport *selector* as vport ID */
1573 efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
1574 &efx->tc->reps_mport_vport_id);
1575 return 0;
1576}
1577
1578static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
1579{
1580 efx_mae_free_mport(efx, efx->tc->reps_mport_id);
1581 efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
1582}
1583
1584int efx_tc_insert_rep_filters(struct efx_nic *efx)
1585{
1586 struct efx_filter_spec promisc, allmulti;
1587 int rc;
1588
1589 if (efx->type->is_vf)
1590 return 0;
1591 if (!efx->tc)
1592 return 0;
1593 efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
1594 efx_filter_set_uc_def(&promisc);
1595 efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
1596 rc = efx_filter_insert_filter(efx, &promisc, false);
1597 if (rc < 0)
1598 return rc;
1599 efx->tc->reps_filter_uc = rc;
1600 efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
1601 efx_filter_set_mc_def(&allmulti);
1602 efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
1603 rc = efx_filter_insert_filter(efx, &allmulti, false);
1604 if (rc < 0)
1605 return rc;
1606 efx->tc->reps_filter_mc = rc;
1607 return 0;
1608}
1609
1610void efx_tc_remove_rep_filters(struct efx_nic *efx)
1611{
1612 if (efx->type->is_vf)
1613 return;
1614 if (!efx->tc)
1615 return;
1616 if (efx->tc->reps_filter_mc >= 0)
1617 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
1618 efx->tc->reps_filter_mc = -1;
1619 if (efx->tc->reps_filter_uc >= 0)
1620 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
1621 efx->tc->reps_filter_uc = -1;
1622}
1623
1624int efx_init_tc(struct efx_nic *efx)
1625{
1626 int rc;
1627
1628 rc = efx_mae_get_caps(efx, efx->tc->caps);
1629 if (rc)
1630 return rc;
1631 if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
1632 /* Firmware supports some match fields the driver doesn't know
1633 * about. Not fatal, unless any of those fields are required
1634 * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
1635 */
1636 netif_warn(efx, probe, efx->net_dev,
1637 "FW reports additional match fields %u\n",
1638 efx->tc->caps->match_field_count);
1639 if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
1640 netif_err(efx, probe, efx->net_dev,
1641 "Too few action prios supported (have %u, need %u)\n",
1642 efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
1643 return -EIO;
1644 }
1645 rc = efx_tc_configure_default_rule_pf(efx);
1646 if (rc)
1647 return rc;
1648 rc = efx_tc_configure_default_rule_wire(efx);
1649 if (rc)
1650 return rc;
1651 rc = efx_tc_configure_rep_mport(efx);
1652 if (rc)
1653 return rc;
1654 rc = efx_tc_configure_fallback_acts_pf(efx);
1655 if (rc)
1656 return rc;
1657 rc = efx_tc_configure_fallback_acts_reps(efx);
1658 if (rc)
1659 return rc;
1660 rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
1661 if (rc)
1662 return rc;
1663 efx->tc->up = true;
1664 return 0;
1665}
1666
1667void efx_fini_tc(struct efx_nic *efx)
1668{
1669 /* We can get called even if efx_init_struct_tc() failed */
1670 if (!efx->tc)
1671 return;
1672 if (efx->tc->up)
1673 flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
1674 efx_tc_deconfigure_rep_mport(efx);
1675 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
1676 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
1677 efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf);
1678 efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps);
1679 efx->tc->up = false;
1680}
1681
1682/* At teardown time, all TC filter rules (and thus all resources they created)
1683 * should already have been removed. If we find any in our hashtables, make a
1684 * cursory attempt to clean up the software side.
1685 */
1686static void efx_tc_encap_match_free(void *ptr, void *__unused)
1687{
1688 struct efx_tc_encap_match *encap = ptr;
1689
1690 WARN_ON(refcount_read(&encap->ref));
1691 kfree(encap);
1692}
1693
1694static void efx_tc_flow_free(void *ptr, void *arg)
1695{
1696 struct efx_tc_flow_rule *rule = ptr;
1697 struct efx_nic *efx = arg;
1698
1699 netif_err(efx, drv, efx->net_dev,
1700 "tc rule %lx still present at teardown, removing\n",
1701 rule->cookie);
1702
1703 /* Also releases entries in subsidiary tables */
1704 efx_tc_delete_rule(efx, rule);
1705
1706 kfree(rule);
1707}
1708
1709int efx_init_struct_tc(struct efx_nic *efx)
1710{
1711 int rc;
1712
1713 if (efx->type->is_vf)
1714 return 0;
1715
1716 efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
1717 if (!efx->tc)
1718 return -ENOMEM;
1719 efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
1720 if (!efx->tc->caps) {
1721 rc = -ENOMEM;
1722 goto fail_alloc_caps;
1723 }
1724 INIT_LIST_HEAD(&efx->tc->block_list);
1725
1726 mutex_init(&efx->tc->mutex);
1727 init_waitqueue_head(&efx->tc->flush_wq);
1728 rc = efx_tc_init_encap_actions(efx);
1729 if (rc < 0)
1730 goto fail_encap_actions;
1731 rc = efx_tc_init_counters(efx);
1732 if (rc < 0)
1733 goto fail_counters;
1734 rc = rhashtable_init(&efx->tc->encap_match_ht, &efx_tc_encap_match_ht_params);
1735 if (rc < 0)
1736 goto fail_encap_match_ht;
1737 rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
1738 if (rc < 0)
1739 goto fail_match_action_ht;
1740 efx->tc->reps_filter_uc = -1;
1741 efx->tc->reps_filter_mc = -1;
1742 INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
1743 efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
1744 INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
1745 efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
1746 INIT_LIST_HEAD(&efx->tc->facts.pf.list);
1747 efx->tc->facts.pf.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL;
1748 INIT_LIST_HEAD(&efx->tc->facts.reps.list);
1749 efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL;
1750 efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type;
1751 return 0;
1752fail_match_action_ht:
1753 rhashtable_destroy(&efx->tc->encap_match_ht);
1754fail_encap_match_ht:
1755 efx_tc_destroy_counters(efx);
1756fail_counters:
1757 efx_tc_destroy_encap_actions(efx);
1758fail_encap_actions:
1759 mutex_destroy(&efx->tc->mutex);
1760 kfree(efx->tc->caps);
1761fail_alloc_caps:
1762 kfree(efx->tc);
1763 efx->tc = NULL;
1764 return rc;
1765}
1766
1767void efx_fini_struct_tc(struct efx_nic *efx)
1768{
1769 if (!efx->tc)
1770 return;
1771
1772 mutex_lock(&efx->tc->mutex);
1773 EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
1774 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
1775 EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
1776 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
1777 EFX_WARN_ON_PARANOID(efx->tc->facts.pf.fw_id !=
1778 MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
1779 EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id !=
1780 MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
1781 rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
1782 efx);
1783 rhashtable_free_and_destroy(&efx->tc->encap_match_ht,
1784 efx_tc_encap_match_free, NULL);
1785 efx_tc_fini_counters(efx);
1786 efx_tc_fini_encap_actions(efx);
1787 mutex_unlock(&efx->tc->mutex);
1788 mutex_destroy(&efx->tc->mutex);
1789 kfree(efx->tc->caps);
1790 kfree(efx->tc);
1791 efx->tc = NULL;
1792}