Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/rhashtable.h>
12#include <linux/workqueue.h>
13#include <linux/refcount.h>
14
15#include <linux/if_ether.h>
16#include <linux/in6.h>
17#include <linux/ip.h>
18#include <linux/mpls.h>
19#include <linux/ppp_defs.h>
20
21#include <net/sch_generic.h>
22#include <net/pkt_cls.h>
23#include <net/pkt_sched.h>
24#include <net/ip.h>
25#include <net/flow_dissector.h>
26#include <net/geneve.h>
27#include <net/vxlan.h>
28#include <net/erspan.h>
29#include <net/gtp.h>
30#include <net/tc_wrapper.h>
31
32#include <net/dst.h>
33#include <net/dst_metadata.h>
34
35#include <uapi/linux/netfilter/nf_conntrack_common.h>
36
37#define TCA_FLOWER_KEY_CT_FLAGS_MAX \
38 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
39#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
40 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
41
42struct fl_flow_key {
43 struct flow_dissector_key_meta meta;
44 struct flow_dissector_key_control control;
45 struct flow_dissector_key_control enc_control;
46 struct flow_dissector_key_basic basic;
47 struct flow_dissector_key_eth_addrs eth;
48 struct flow_dissector_key_vlan vlan;
49 struct flow_dissector_key_vlan cvlan;
50 union {
51 struct flow_dissector_key_ipv4_addrs ipv4;
52 struct flow_dissector_key_ipv6_addrs ipv6;
53 };
54 struct flow_dissector_key_ports tp;
55 struct flow_dissector_key_icmp icmp;
56 struct flow_dissector_key_arp arp;
57 struct flow_dissector_key_keyid enc_key_id;
58 union {
59 struct flow_dissector_key_ipv4_addrs enc_ipv4;
60 struct flow_dissector_key_ipv6_addrs enc_ipv6;
61 };
62 struct flow_dissector_key_ports enc_tp;
63 struct flow_dissector_key_mpls mpls;
64 struct flow_dissector_key_tcp tcp;
65 struct flow_dissector_key_ip ip;
66 struct flow_dissector_key_ip enc_ip;
67 struct flow_dissector_key_enc_opts enc_opts;
68 struct flow_dissector_key_ports_range tp_range;
69 struct flow_dissector_key_ct ct;
70 struct flow_dissector_key_hash hash;
71 struct flow_dissector_key_num_of_vlans num_of_vlans;
72 struct flow_dissector_key_pppoe pppoe;
73 struct flow_dissector_key_l2tpv3 l2tpv3;
74} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
75
76struct fl_flow_mask_range {
77 unsigned short int start;
78 unsigned short int end;
79};
80
81struct fl_flow_mask {
82 struct fl_flow_key key;
83 struct fl_flow_mask_range range;
84 u32 flags;
85 struct rhash_head ht_node;
86 struct rhashtable ht;
87 struct rhashtable_params filter_ht_params;
88 struct flow_dissector dissector;
89 struct list_head filters;
90 struct rcu_work rwork;
91 struct list_head list;
92 refcount_t refcnt;
93};
94
95struct fl_flow_tmplt {
96 struct fl_flow_key dummy_key;
97 struct fl_flow_key mask;
98 struct flow_dissector dissector;
99 struct tcf_chain *chain;
100};
101
102struct cls_fl_head {
103 struct rhashtable ht;
104 spinlock_t masks_lock; /* Protect masks list */
105 struct list_head masks;
106 struct list_head hw_filters;
107 struct rcu_work rwork;
108 struct idr handle_idr;
109};
110
111struct cls_fl_filter {
112 struct fl_flow_mask *mask;
113 struct rhash_head ht_node;
114 struct fl_flow_key mkey;
115 struct tcf_exts exts;
116 struct tcf_result res;
117 struct fl_flow_key key;
118 struct list_head list;
119 struct list_head hw_list;
120 u32 handle;
121 u32 flags;
122 u32 in_hw_count;
123 struct rcu_work rwork;
124 struct net_device *hw_dev;
125 /* Flower classifier is unlocked, which means that its reference counter
126 * can be changed concurrently without any kind of external
127 * synchronization. Use atomic reference counter to be concurrency-safe.
128 */
129 refcount_t refcnt;
130 bool deleted;
131};
132
133static const struct rhashtable_params mask_ht_params = {
134 .key_offset = offsetof(struct fl_flow_mask, key),
135 .key_len = sizeof(struct fl_flow_key),
136 .head_offset = offsetof(struct fl_flow_mask, ht_node),
137 .automatic_shrinking = true,
138};
139
140static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
141{
142 return mask->range.end - mask->range.start;
143}
144
145static void fl_mask_update_range(struct fl_flow_mask *mask)
146{
147 const u8 *bytes = (const u8 *) &mask->key;
148 size_t size = sizeof(mask->key);
149 size_t i, first = 0, last;
150
151 for (i = 0; i < size; i++) {
152 if (bytes[i]) {
153 first = i;
154 break;
155 }
156 }
157 last = first;
158 for (i = size - 1; i != first; i--) {
159 if (bytes[i]) {
160 last = i;
161 break;
162 }
163 }
164 mask->range.start = rounddown(first, sizeof(long));
165 mask->range.end = roundup(last + 1, sizeof(long));
166}
167
168static void *fl_key_get_start(struct fl_flow_key *key,
169 const struct fl_flow_mask *mask)
170{
171 return (u8 *) key + mask->range.start;
172}
173
174static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
175 struct fl_flow_mask *mask)
176{
177 const long *lkey = fl_key_get_start(key, mask);
178 const long *lmask = fl_key_get_start(&mask->key, mask);
179 long *lmkey = fl_key_get_start(mkey, mask);
180 int i;
181
182 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
183 *lmkey++ = *lkey++ & *lmask++;
184}
185
186static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
187 struct fl_flow_mask *mask)
188{
189 const long *lmask = fl_key_get_start(&mask->key, mask);
190 const long *ltmplt;
191 int i;
192
193 if (!tmplt)
194 return true;
195 ltmplt = fl_key_get_start(&tmplt->mask, mask);
196 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
197 if (~*ltmplt++ & *lmask++)
198 return false;
199 }
200 return true;
201}
202
203static void fl_clear_masked_range(struct fl_flow_key *key,
204 struct fl_flow_mask *mask)
205{
206 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
207}
208
209static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
210 struct fl_flow_key *key,
211 struct fl_flow_key *mkey)
212{
213 u16 min_mask, max_mask, min_val, max_val;
214
215 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
216 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
217 min_val = ntohs(filter->key.tp_range.tp_min.dst);
218 max_val = ntohs(filter->key.tp_range.tp_max.dst);
219
220 if (min_mask && max_mask) {
221 if (ntohs(key->tp_range.tp.dst) < min_val ||
222 ntohs(key->tp_range.tp.dst) > max_val)
223 return false;
224
225 /* skb does not have min and max values */
226 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
227 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
228 }
229 return true;
230}
231
232static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
233 struct fl_flow_key *key,
234 struct fl_flow_key *mkey)
235{
236 u16 min_mask, max_mask, min_val, max_val;
237
238 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
239 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
240 min_val = ntohs(filter->key.tp_range.tp_min.src);
241 max_val = ntohs(filter->key.tp_range.tp_max.src);
242
243 if (min_mask && max_mask) {
244 if (ntohs(key->tp_range.tp.src) < min_val ||
245 ntohs(key->tp_range.tp.src) > max_val)
246 return false;
247
248 /* skb does not have min and max values */
249 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
250 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
251 }
252 return true;
253}
254
255static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
256 struct fl_flow_key *mkey)
257{
258 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
259 mask->filter_ht_params);
260}
261
262static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
263 struct fl_flow_key *mkey,
264 struct fl_flow_key *key)
265{
266 struct cls_fl_filter *filter, *f;
267
268 list_for_each_entry_rcu(filter, &mask->filters, list) {
269 if (!fl_range_port_dst_cmp(filter, key, mkey))
270 continue;
271
272 if (!fl_range_port_src_cmp(filter, key, mkey))
273 continue;
274
275 f = __fl_lookup(mask, mkey);
276 if (f)
277 return f;
278 }
279 return NULL;
280}
281
282static noinline_for_stack
283struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
284{
285 struct fl_flow_key mkey;
286
287 fl_set_masked_key(&mkey, key, mask);
288 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
289 return fl_lookup_range(mask, &mkey, key);
290
291 return __fl_lookup(mask, &mkey);
292}
293
294static u16 fl_ct_info_to_flower_map[] = {
295 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
297 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
299 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
300 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
301 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
302 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
304 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
305 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 TCA_FLOWER_KEY_CT_FLAGS_NEW,
307};
308
309TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
310 const struct tcf_proto *tp,
311 struct tcf_result *res)
312{
313 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
314 bool post_ct = tc_skb_cb(skb)->post_ct;
315 u16 zone = tc_skb_cb(skb)->zone;
316 struct fl_flow_key skb_key;
317 struct fl_flow_mask *mask;
318 struct cls_fl_filter *f;
319
320 list_for_each_entry_rcu(mask, &head->masks, list) {
321 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
322 fl_clear_masked_range(&skb_key, mask);
323
324 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
325 /* skb_flow_dissect() does not set n_proto in case an unknown
326 * protocol, so do it rather here.
327 */
328 skb_key.basic.n_proto = skb_protocol(skb, false);
329 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
330 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
331 fl_ct_info_to_flower_map,
332 ARRAY_SIZE(fl_ct_info_to_flower_map),
333 post_ct, zone);
334 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
335 skb_flow_dissect(skb, &mask->dissector, &skb_key,
336 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
337
338 f = fl_mask_lookup(mask, &skb_key);
339 if (f && !tc_skip_sw(f->flags)) {
340 *res = f->res;
341 return tcf_exts_exec(skb, &f->exts, res);
342 }
343 }
344 return -1;
345}
346
347static int fl_init(struct tcf_proto *tp)
348{
349 struct cls_fl_head *head;
350
351 head = kzalloc(sizeof(*head), GFP_KERNEL);
352 if (!head)
353 return -ENOBUFS;
354
355 spin_lock_init(&head->masks_lock);
356 INIT_LIST_HEAD_RCU(&head->masks);
357 INIT_LIST_HEAD(&head->hw_filters);
358 rcu_assign_pointer(tp->root, head);
359 idr_init(&head->handle_idr);
360
361 return rhashtable_init(&head->ht, &mask_ht_params);
362}
363
364static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
365{
366 /* temporary masks don't have their filters list and ht initialized */
367 if (mask_init_done) {
368 WARN_ON(!list_empty(&mask->filters));
369 rhashtable_destroy(&mask->ht);
370 }
371 kfree(mask);
372}
373
374static void fl_mask_free_work(struct work_struct *work)
375{
376 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 struct fl_flow_mask, rwork);
378
379 fl_mask_free(mask, true);
380}
381
382static void fl_uninit_mask_free_work(struct work_struct *work)
383{
384 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
385 struct fl_flow_mask, rwork);
386
387 fl_mask_free(mask, false);
388}
389
390static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
391{
392 if (!refcount_dec_and_test(&mask->refcnt))
393 return false;
394
395 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
396
397 spin_lock(&head->masks_lock);
398 list_del_rcu(&mask->list);
399 spin_unlock(&head->masks_lock);
400
401 tcf_queue_work(&mask->rwork, fl_mask_free_work);
402
403 return true;
404}
405
406static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
407{
408 /* Flower classifier only changes root pointer during init and destroy.
409 * Users must obtain reference to tcf_proto instance before calling its
410 * API, so tp->root pointer is protected from concurrent call to
411 * fl_destroy() by reference counting.
412 */
413 return rcu_dereference_raw(tp->root);
414}
415
416static void __fl_destroy_filter(struct cls_fl_filter *f)
417{
418 tcf_exts_destroy(&f->exts);
419 tcf_exts_put_net(&f->exts);
420 kfree(f);
421}
422
423static void fl_destroy_filter_work(struct work_struct *work)
424{
425 struct cls_fl_filter *f = container_of(to_rcu_work(work),
426 struct cls_fl_filter, rwork);
427
428 __fl_destroy_filter(f);
429}
430
431static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
432 bool rtnl_held, struct netlink_ext_ack *extack)
433{
434 struct tcf_block *block = tp->chain->block;
435 struct flow_cls_offload cls_flower = {};
436
437 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
438 cls_flower.command = FLOW_CLS_DESTROY;
439 cls_flower.cookie = (unsigned long) f;
440
441 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
442 &f->flags, &f->in_hw_count, rtnl_held);
443
444}
445
446static int fl_hw_replace_filter(struct tcf_proto *tp,
447 struct cls_fl_filter *f, bool rtnl_held,
448 struct netlink_ext_ack *extack)
449{
450 struct tcf_block *block = tp->chain->block;
451 struct flow_cls_offload cls_flower = {};
452 bool skip_sw = tc_skip_sw(f->flags);
453 int err = 0;
454
455 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
456 if (!cls_flower.rule)
457 return -ENOMEM;
458
459 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
460 cls_flower.command = FLOW_CLS_REPLACE;
461 cls_flower.cookie = (unsigned long) f;
462 cls_flower.rule->match.dissector = &f->mask->dissector;
463 cls_flower.rule->match.mask = &f->mask->key;
464 cls_flower.rule->match.key = &f->mkey;
465 cls_flower.classid = f->res.classid;
466
467 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
468 cls_flower.common.extack);
469 if (err) {
470 kfree(cls_flower.rule);
471
472 return skip_sw ? err : 0;
473 }
474
475 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
476 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
477 tc_cleanup_offload_action(&cls_flower.rule->action);
478 kfree(cls_flower.rule);
479
480 if (err) {
481 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
482 return err;
483 }
484
485 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
486 return -EINVAL;
487
488 return 0;
489}
490
491static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
492 bool rtnl_held)
493{
494 struct tcf_block *block = tp->chain->block;
495 struct flow_cls_offload cls_flower = {};
496
497 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
498 cls_flower.command = FLOW_CLS_STATS;
499 cls_flower.cookie = (unsigned long) f;
500 cls_flower.classid = f->res.classid;
501
502 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
503 rtnl_held);
504
505 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
506}
507
508static void __fl_put(struct cls_fl_filter *f)
509{
510 if (!refcount_dec_and_test(&f->refcnt))
511 return;
512
513 if (tcf_exts_get_net(&f->exts))
514 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
515 else
516 __fl_destroy_filter(f);
517}
518
519static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
520{
521 struct cls_fl_filter *f;
522
523 rcu_read_lock();
524 f = idr_find(&head->handle_idr, handle);
525 if (f && !refcount_inc_not_zero(&f->refcnt))
526 f = NULL;
527 rcu_read_unlock();
528
529 return f;
530}
531
532static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
533{
534 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
535 struct cls_fl_filter *f;
536
537 f = idr_find(&head->handle_idr, handle);
538 return f ? &f->exts : NULL;
539}
540
541static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
542 bool *last, bool rtnl_held,
543 struct netlink_ext_ack *extack)
544{
545 struct cls_fl_head *head = fl_head_dereference(tp);
546
547 *last = false;
548
549 spin_lock(&tp->lock);
550 if (f->deleted) {
551 spin_unlock(&tp->lock);
552 return -ENOENT;
553 }
554
555 f->deleted = true;
556 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
557 f->mask->filter_ht_params);
558 idr_remove(&head->handle_idr, f->handle);
559 list_del_rcu(&f->list);
560 spin_unlock(&tp->lock);
561
562 *last = fl_mask_put(head, f->mask);
563 if (!tc_skip_hw(f->flags))
564 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
565 tcf_unbind_filter(tp, &f->res);
566 __fl_put(f);
567
568 return 0;
569}
570
571static void fl_destroy_sleepable(struct work_struct *work)
572{
573 struct cls_fl_head *head = container_of(to_rcu_work(work),
574 struct cls_fl_head,
575 rwork);
576
577 rhashtable_destroy(&head->ht);
578 kfree(head);
579 module_put(THIS_MODULE);
580}
581
582static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
583 struct netlink_ext_ack *extack)
584{
585 struct cls_fl_head *head = fl_head_dereference(tp);
586 struct fl_flow_mask *mask, *next_mask;
587 struct cls_fl_filter *f, *next;
588 bool last;
589
590 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
591 list_for_each_entry_safe(f, next, &mask->filters, list) {
592 __fl_delete(tp, f, &last, rtnl_held, extack);
593 if (last)
594 break;
595 }
596 }
597 idr_destroy(&head->handle_idr);
598
599 __module_get(THIS_MODULE);
600 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
601}
602
603static void fl_put(struct tcf_proto *tp, void *arg)
604{
605 struct cls_fl_filter *f = arg;
606
607 __fl_put(f);
608}
609
610static void *fl_get(struct tcf_proto *tp, u32 handle)
611{
612 struct cls_fl_head *head = fl_head_dereference(tp);
613
614 return __fl_get(head, handle);
615}
616
617static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
618 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
619 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
620 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
621 .len = IFNAMSIZ },
622 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
623 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
624 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
625 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
626 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
627 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
628 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
629 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
630 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
631 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
632 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
633 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
634 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
635 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
636 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
637 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
638 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
639 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
640 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
641 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
642 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
643 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
644 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
645 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
646 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
647 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
648 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
649 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
650 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
651 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
652 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
653 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
662 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
663 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
664 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
665 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
666 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
670 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
671 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
675 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
676 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
677 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
678 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
681 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
682 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
683 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
684 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
686 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
688 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
689 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
690 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
691 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
695 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
696 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
697 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
698 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
701 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
702 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
703 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
704 [TCA_FLOWER_KEY_CT_STATE] =
705 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
706 [TCA_FLOWER_KEY_CT_STATE_MASK] =
707 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
708 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
709 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
710 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
711 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
712 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
713 .len = 128 / BITS_PER_BYTE },
714 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
715 .len = 128 / BITS_PER_BYTE },
716 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
717 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
718 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
719 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
720 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
721 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
722 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
723
724};
725
726static const struct nla_policy
727enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
728 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
729 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
730 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
731 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
732 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
733 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
734};
735
736static const struct nla_policy
737geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
738 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
739 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
740 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
741 .len = 128 },
742};
743
744static const struct nla_policy
745vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
746 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
747};
748
749static const struct nla_policy
750erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
751 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
752 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
753 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
754 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
755};
756
757static const struct nla_policy
758gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
759 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
760 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
761};
762
763static const struct nla_policy
764mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
765 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
766 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
767 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
768 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
769 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
770};
771
772static void fl_set_key_val(struct nlattr **tb,
773 void *val, int val_type,
774 void *mask, int mask_type, int len)
775{
776 if (!tb[val_type])
777 return;
778 nla_memcpy(val, tb[val_type], len);
779 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
780 memset(mask, 0xff, len);
781 else
782 nla_memcpy(mask, tb[mask_type], len);
783}
784
785static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
786 struct fl_flow_key *mask,
787 struct netlink_ext_ack *extack)
788{
789 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
790 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
791 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
792 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
793 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
794 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
795 fl_set_key_val(tb, &key->tp_range.tp_min.src,
796 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
797 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
798 fl_set_key_val(tb, &key->tp_range.tp_max.src,
799 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
800 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
801
802 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
803 ntohs(key->tp_range.tp_max.dst) <=
804 ntohs(key->tp_range.tp_min.dst)) {
805 NL_SET_ERR_MSG_ATTR(extack,
806 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
807 "Invalid destination port range (min must be strictly smaller than max)");
808 return -EINVAL;
809 }
810 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
811 ntohs(key->tp_range.tp_max.src) <=
812 ntohs(key->tp_range.tp_min.src)) {
813 NL_SET_ERR_MSG_ATTR(extack,
814 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
815 "Invalid source port range (min must be strictly smaller than max)");
816 return -EINVAL;
817 }
818
819 return 0;
820}
821
822static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
823 struct flow_dissector_key_mpls *key_val,
824 struct flow_dissector_key_mpls *key_mask,
825 struct netlink_ext_ack *extack)
826{
827 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
828 struct flow_dissector_mpls_lse *lse_mask;
829 struct flow_dissector_mpls_lse *lse_val;
830 u8 lse_index;
831 u8 depth;
832 int err;
833
834 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
835 mpls_stack_entry_policy, extack);
836 if (err < 0)
837 return err;
838
839 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
840 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
841 return -EINVAL;
842 }
843
844 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
845
846 /* LSE depth starts at 1, for consistency with terminology used by
847 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
848 */
849 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
850 NL_SET_ERR_MSG_ATTR(extack,
851 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
852 "Invalid MPLS depth");
853 return -EINVAL;
854 }
855 lse_index = depth - 1;
856
857 dissector_set_mpls_lse(key_val, lse_index);
858 dissector_set_mpls_lse(key_mask, lse_index);
859
860 lse_val = &key_val->ls[lse_index];
861 lse_mask = &key_mask->ls[lse_index];
862
863 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
864 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
865 lse_mask->mpls_ttl = MPLS_TTL_MASK;
866 }
867 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
868 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
869
870 if (bos & ~MPLS_BOS_MASK) {
871 NL_SET_ERR_MSG_ATTR(extack,
872 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
873 "Bottom Of Stack (BOS) must be 0 or 1");
874 return -EINVAL;
875 }
876 lse_val->mpls_bos = bos;
877 lse_mask->mpls_bos = MPLS_BOS_MASK;
878 }
879 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
880 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
881
882 if (tc & ~MPLS_TC_MASK) {
883 NL_SET_ERR_MSG_ATTR(extack,
884 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
885 "Traffic Class (TC) must be between 0 and 7");
886 return -EINVAL;
887 }
888 lse_val->mpls_tc = tc;
889 lse_mask->mpls_tc = MPLS_TC_MASK;
890 }
891 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
892 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
893
894 if (label & ~MPLS_LABEL_MASK) {
895 NL_SET_ERR_MSG_ATTR(extack,
896 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
897 "Label must be between 0 and 1048575");
898 return -EINVAL;
899 }
900 lse_val->mpls_label = label;
901 lse_mask->mpls_label = MPLS_LABEL_MASK;
902 }
903
904 return 0;
905}
906
907static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
908 struct flow_dissector_key_mpls *key_val,
909 struct flow_dissector_key_mpls *key_mask,
910 struct netlink_ext_ack *extack)
911{
912 struct nlattr *nla_lse;
913 int rem;
914 int err;
915
916 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
917 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
918 "NLA_F_NESTED is missing");
919 return -EINVAL;
920 }
921
922 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
923 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
924 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
925 "Invalid MPLS option type");
926 return -EINVAL;
927 }
928
929 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
930 if (err < 0)
931 return err;
932 }
933 if (rem) {
934 NL_SET_ERR_MSG(extack,
935 "Bytes leftover after parsing MPLS options");
936 return -EINVAL;
937 }
938
939 return 0;
940}
941
942static int fl_set_key_mpls(struct nlattr **tb,
943 struct flow_dissector_key_mpls *key_val,
944 struct flow_dissector_key_mpls *key_mask,
945 struct netlink_ext_ack *extack)
946{
947 struct flow_dissector_mpls_lse *lse_mask;
948 struct flow_dissector_mpls_lse *lse_val;
949
950 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
951 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
952 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
953 tb[TCA_FLOWER_KEY_MPLS_TC] ||
954 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
955 NL_SET_ERR_MSG_ATTR(extack,
956 tb[TCA_FLOWER_KEY_MPLS_OPTS],
957 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
958 return -EBADMSG;
959 }
960
961 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
962 key_val, key_mask, extack);
963 }
964
965 lse_val = &key_val->ls[0];
966 lse_mask = &key_mask->ls[0];
967
968 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
969 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
970 lse_mask->mpls_ttl = MPLS_TTL_MASK;
971 dissector_set_mpls_lse(key_val, 0);
972 dissector_set_mpls_lse(key_mask, 0);
973 }
974 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
975 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
976
977 if (bos & ~MPLS_BOS_MASK) {
978 NL_SET_ERR_MSG_ATTR(extack,
979 tb[TCA_FLOWER_KEY_MPLS_BOS],
980 "Bottom Of Stack (BOS) must be 0 or 1");
981 return -EINVAL;
982 }
983 lse_val->mpls_bos = bos;
984 lse_mask->mpls_bos = MPLS_BOS_MASK;
985 dissector_set_mpls_lse(key_val, 0);
986 dissector_set_mpls_lse(key_mask, 0);
987 }
988 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
989 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
990
991 if (tc & ~MPLS_TC_MASK) {
992 NL_SET_ERR_MSG_ATTR(extack,
993 tb[TCA_FLOWER_KEY_MPLS_TC],
994 "Traffic Class (TC) must be between 0 and 7");
995 return -EINVAL;
996 }
997 lse_val->mpls_tc = tc;
998 lse_mask->mpls_tc = MPLS_TC_MASK;
999 dissector_set_mpls_lse(key_val, 0);
1000 dissector_set_mpls_lse(key_mask, 0);
1001 }
1002 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1003 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1004
1005 if (label & ~MPLS_LABEL_MASK) {
1006 NL_SET_ERR_MSG_ATTR(extack,
1007 tb[TCA_FLOWER_KEY_MPLS_LABEL],
1008 "Label must be between 0 and 1048575");
1009 return -EINVAL;
1010 }
1011 lse_val->mpls_label = label;
1012 lse_mask->mpls_label = MPLS_LABEL_MASK;
1013 dissector_set_mpls_lse(key_val, 0);
1014 dissector_set_mpls_lse(key_mask, 0);
1015 }
1016 return 0;
1017}
1018
1019static void fl_set_key_vlan(struct nlattr **tb,
1020 __be16 ethertype,
1021 int vlan_id_key, int vlan_prio_key,
1022 int vlan_next_eth_type_key,
1023 struct flow_dissector_key_vlan *key_val,
1024 struct flow_dissector_key_vlan *key_mask)
1025{
1026#define VLAN_PRIORITY_MASK 0x7
1027
1028 if (tb[vlan_id_key]) {
1029 key_val->vlan_id =
1030 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1031 key_mask->vlan_id = VLAN_VID_MASK;
1032 }
1033 if (tb[vlan_prio_key]) {
1034 key_val->vlan_priority =
1035 nla_get_u8(tb[vlan_prio_key]) &
1036 VLAN_PRIORITY_MASK;
1037 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1038 }
1039 if (ethertype) {
1040 key_val->vlan_tpid = ethertype;
1041 key_mask->vlan_tpid = cpu_to_be16(~0);
1042 }
1043 if (tb[vlan_next_eth_type_key]) {
1044 key_val->vlan_eth_type =
1045 nla_get_be16(tb[vlan_next_eth_type_key]);
1046 key_mask->vlan_eth_type = cpu_to_be16(~0);
1047 }
1048}
1049
1050static void fl_set_key_pppoe(struct nlattr **tb,
1051 struct flow_dissector_key_pppoe *key_val,
1052 struct flow_dissector_key_pppoe *key_mask,
1053 struct fl_flow_key *key,
1054 struct fl_flow_key *mask)
1055{
1056 /* key_val::type must be set to ETH_P_PPP_SES
1057 * because ETH_P_PPP_SES was stored in basic.n_proto
1058 * which might get overwritten by ppp_proto
1059 * or might be set to 0, the role of key_val::type
1060 * is simmilar to vlan_key::tpid
1061 */
1062 key_val->type = htons(ETH_P_PPP_SES);
1063 key_mask->type = cpu_to_be16(~0);
1064
1065 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1066 key_val->session_id =
1067 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1068 key_mask->session_id = cpu_to_be16(~0);
1069 }
1070 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1071 key_val->ppp_proto =
1072 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1073 key_mask->ppp_proto = cpu_to_be16(~0);
1074
1075 if (key_val->ppp_proto == htons(PPP_IP)) {
1076 key->basic.n_proto = htons(ETH_P_IP);
1077 mask->basic.n_proto = cpu_to_be16(~0);
1078 } else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1079 key->basic.n_proto = htons(ETH_P_IPV6);
1080 mask->basic.n_proto = cpu_to_be16(~0);
1081 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1082 key->basic.n_proto = htons(ETH_P_MPLS_UC);
1083 mask->basic.n_proto = cpu_to_be16(~0);
1084 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1085 key->basic.n_proto = htons(ETH_P_MPLS_MC);
1086 mask->basic.n_proto = cpu_to_be16(~0);
1087 }
1088 } else {
1089 key->basic.n_proto = 0;
1090 mask->basic.n_proto = cpu_to_be16(0);
1091 }
1092}
1093
1094static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1095 u32 *dissector_key, u32 *dissector_mask,
1096 u32 flower_flag_bit, u32 dissector_flag_bit)
1097{
1098 if (flower_mask & flower_flag_bit) {
1099 *dissector_mask |= dissector_flag_bit;
1100 if (flower_key & flower_flag_bit)
1101 *dissector_key |= dissector_flag_bit;
1102 }
1103}
1104
1105static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1106 u32 *flags_mask, struct netlink_ext_ack *extack)
1107{
1108 u32 key, mask;
1109
1110 /* mask is mandatory for flags */
1111 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1112 NL_SET_ERR_MSG(extack, "Missing flags mask");
1113 return -EINVAL;
1114 }
1115
1116 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1117 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1118
1119 *flags_key = 0;
1120 *flags_mask = 0;
1121
1122 fl_set_key_flag(key, mask, flags_key, flags_mask,
1123 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1124 fl_set_key_flag(key, mask, flags_key, flags_mask,
1125 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1126 FLOW_DIS_FIRST_FRAG);
1127
1128 return 0;
1129}
1130
1131static void fl_set_key_ip(struct nlattr **tb, bool encap,
1132 struct flow_dissector_key_ip *key,
1133 struct flow_dissector_key_ip *mask)
1134{
1135 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1136 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1137 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1138 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1139
1140 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1141 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1142}
1143
1144static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1145 int depth, int option_len,
1146 struct netlink_ext_ack *extack)
1147{
1148 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1149 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1150 struct geneve_opt *opt;
1151 int err, data_len = 0;
1152
1153 if (option_len > sizeof(struct geneve_opt))
1154 data_len = option_len - sizeof(struct geneve_opt);
1155
1156 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1157 memset(opt, 0xff, option_len);
1158 opt->length = data_len / 4;
1159 opt->r1 = 0;
1160 opt->r2 = 0;
1161 opt->r3 = 0;
1162
1163 /* If no mask has been prodived we assume an exact match. */
1164 if (!depth)
1165 return sizeof(struct geneve_opt) + data_len;
1166
1167 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1168 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1169 return -EINVAL;
1170 }
1171
1172 err = nla_parse_nested_deprecated(tb,
1173 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1174 nla, geneve_opt_policy, extack);
1175 if (err < 0)
1176 return err;
1177
1178 /* We are not allowed to omit any of CLASS, TYPE or DATA
1179 * fields from the key.
1180 */
1181 if (!option_len &&
1182 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1183 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1184 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1185 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1186 return -EINVAL;
1187 }
1188
1189 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1190 * for the mask.
1191 */
1192 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1193 int new_len = key->enc_opts.len;
1194
1195 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1196 data_len = nla_len(data);
1197 if (data_len < 4) {
1198 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1199 return -ERANGE;
1200 }
1201 if (data_len % 4) {
1202 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1203 return -ERANGE;
1204 }
1205
1206 new_len += sizeof(struct geneve_opt) + data_len;
1207 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1208 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1209 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1210 return -ERANGE;
1211 }
1212 opt->length = data_len / 4;
1213 memcpy(opt->opt_data, nla_data(data), data_len);
1214 }
1215
1216 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1217 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1218 opt->opt_class = nla_get_be16(class);
1219 }
1220
1221 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1222 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1223 opt->type = nla_get_u8(type);
1224 }
1225
1226 return sizeof(struct geneve_opt) + data_len;
1227}
1228
1229static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1230 int depth, int option_len,
1231 struct netlink_ext_ack *extack)
1232{
1233 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1234 struct vxlan_metadata *md;
1235 int err;
1236
1237 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1238 memset(md, 0xff, sizeof(*md));
1239
1240 if (!depth)
1241 return sizeof(*md);
1242
1243 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1244 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1245 return -EINVAL;
1246 }
1247
1248 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1249 vxlan_opt_policy, extack);
1250 if (err < 0)
1251 return err;
1252
1253 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1254 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1255 return -EINVAL;
1256 }
1257
1258 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1259 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1260 md->gbp &= VXLAN_GBP_MASK;
1261 }
1262
1263 return sizeof(*md);
1264}
1265
1266static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1267 int depth, int option_len,
1268 struct netlink_ext_ack *extack)
1269{
1270 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1271 struct erspan_metadata *md;
1272 int err;
1273
1274 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1275 memset(md, 0xff, sizeof(*md));
1276 md->version = 1;
1277
1278 if (!depth)
1279 return sizeof(*md);
1280
1281 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1282 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1283 return -EINVAL;
1284 }
1285
1286 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1287 erspan_opt_policy, extack);
1288 if (err < 0)
1289 return err;
1290
1291 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1292 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1293 return -EINVAL;
1294 }
1295
1296 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1297 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1298
1299 if (md->version == 1) {
1300 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1301 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1302 return -EINVAL;
1303 }
1304 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1305 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1306 memset(&md->u, 0x00, sizeof(md->u));
1307 md->u.index = nla_get_be32(nla);
1308 }
1309 } else if (md->version == 2) {
1310 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1311 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1312 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1313 return -EINVAL;
1314 }
1315 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1316 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1317 md->u.md2.dir = nla_get_u8(nla);
1318 }
1319 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1320 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1321 set_hwid(&md->u.md2, nla_get_u8(nla));
1322 }
1323 } else {
1324 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1325 return -EINVAL;
1326 }
1327
1328 return sizeof(*md);
1329}
1330
1331static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1332 int depth, int option_len,
1333 struct netlink_ext_ack *extack)
1334{
1335 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1336 struct gtp_pdu_session_info *sinfo;
1337 u8 len = key->enc_opts.len;
1338 int err;
1339
1340 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1341 memset(sinfo, 0xff, option_len);
1342
1343 if (!depth)
1344 return sizeof(*sinfo);
1345
1346 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1347 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1348 return -EINVAL;
1349 }
1350
1351 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1352 gtp_opt_policy, extack);
1353 if (err < 0)
1354 return err;
1355
1356 if (!option_len &&
1357 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1358 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1359 NL_SET_ERR_MSG_MOD(extack,
1360 "Missing tunnel key gtp option pdu type or qfi");
1361 return -EINVAL;
1362 }
1363
1364 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1365 sinfo->pdu_type =
1366 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1367
1368 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1369 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1370
1371 return sizeof(*sinfo);
1372}
1373
1374static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1375 struct fl_flow_key *mask,
1376 struct netlink_ext_ack *extack)
1377{
1378 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1379 int err, option_len, key_depth, msk_depth = 0;
1380
1381 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1382 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1383 enc_opts_policy, extack);
1384 if (err)
1385 return err;
1386
1387 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1388
1389 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1390 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1391 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1392 enc_opts_policy, extack);
1393 if (err)
1394 return err;
1395
1396 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1397 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1398 if (!nla_ok(nla_opt_msk, msk_depth)) {
1399 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1400 return -EINVAL;
1401 }
1402 }
1403
1404 nla_for_each_attr(nla_opt_key, nla_enc_key,
1405 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1406 switch (nla_type(nla_opt_key)) {
1407 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1408 if (key->enc_opts.dst_opt_type &&
1409 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1410 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1411 return -EINVAL;
1412 }
1413 option_len = 0;
1414 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1415 option_len = fl_set_geneve_opt(nla_opt_key, key,
1416 key_depth, option_len,
1417 extack);
1418 if (option_len < 0)
1419 return option_len;
1420
1421 key->enc_opts.len += option_len;
1422 /* At the same time we need to parse through the mask
1423 * in order to verify exact and mask attribute lengths.
1424 */
1425 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1426 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1427 msk_depth, option_len,
1428 extack);
1429 if (option_len < 0)
1430 return option_len;
1431
1432 mask->enc_opts.len += option_len;
1433 if (key->enc_opts.len != mask->enc_opts.len) {
1434 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1435 return -EINVAL;
1436 }
1437 break;
1438 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1439 if (key->enc_opts.dst_opt_type) {
1440 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1441 return -EINVAL;
1442 }
1443 option_len = 0;
1444 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1445 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1446 key_depth, option_len,
1447 extack);
1448 if (option_len < 0)
1449 return option_len;
1450
1451 key->enc_opts.len += option_len;
1452 /* At the same time we need to parse through the mask
1453 * in order to verify exact and mask attribute lengths.
1454 */
1455 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1456 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1457 msk_depth, option_len,
1458 extack);
1459 if (option_len < 0)
1460 return option_len;
1461
1462 mask->enc_opts.len += option_len;
1463 if (key->enc_opts.len != mask->enc_opts.len) {
1464 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1465 return -EINVAL;
1466 }
1467 break;
1468 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1469 if (key->enc_opts.dst_opt_type) {
1470 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1471 return -EINVAL;
1472 }
1473 option_len = 0;
1474 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1475 option_len = fl_set_erspan_opt(nla_opt_key, key,
1476 key_depth, option_len,
1477 extack);
1478 if (option_len < 0)
1479 return option_len;
1480
1481 key->enc_opts.len += option_len;
1482 /* At the same time we need to parse through the mask
1483 * in order to verify exact and mask attribute lengths.
1484 */
1485 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1486 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1487 msk_depth, option_len,
1488 extack);
1489 if (option_len < 0)
1490 return option_len;
1491
1492 mask->enc_opts.len += option_len;
1493 if (key->enc_opts.len != mask->enc_opts.len) {
1494 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1495 return -EINVAL;
1496 }
1497 break;
1498 case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1499 if (key->enc_opts.dst_opt_type) {
1500 NL_SET_ERR_MSG_MOD(extack,
1501 "Duplicate type for gtp options");
1502 return -EINVAL;
1503 }
1504 option_len = 0;
1505 key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1506 option_len = fl_set_gtp_opt(nla_opt_key, key,
1507 key_depth, option_len,
1508 extack);
1509 if (option_len < 0)
1510 return option_len;
1511
1512 key->enc_opts.len += option_len;
1513 /* At the same time we need to parse through the mask
1514 * in order to verify exact and mask attribute lengths.
1515 */
1516 mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1517 option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1518 msk_depth, option_len,
1519 extack);
1520 if (option_len < 0)
1521 return option_len;
1522
1523 mask->enc_opts.len += option_len;
1524 if (key->enc_opts.len != mask->enc_opts.len) {
1525 NL_SET_ERR_MSG_MOD(extack,
1526 "Key and mask miss aligned");
1527 return -EINVAL;
1528 }
1529 break;
1530 default:
1531 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1532 return -EINVAL;
1533 }
1534
1535 if (!msk_depth)
1536 continue;
1537
1538 if (!nla_ok(nla_opt_msk, msk_depth)) {
1539 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1540 return -EINVAL;
1541 }
1542 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1543 }
1544
1545 return 0;
1546}
1547
1548static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1549 struct netlink_ext_ack *extack)
1550{
1551 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1552 NL_SET_ERR_MSG_ATTR(extack, tb,
1553 "no trk, so no other flag can be set");
1554 return -EINVAL;
1555 }
1556
1557 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1558 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1559 NL_SET_ERR_MSG_ATTR(extack, tb,
1560 "new and est are mutually exclusive");
1561 return -EINVAL;
1562 }
1563
1564 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1565 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1566 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1567 NL_SET_ERR_MSG_ATTR(extack, tb,
1568 "when inv is set, only trk may be set");
1569 return -EINVAL;
1570 }
1571
1572 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1573 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1574 NL_SET_ERR_MSG_ATTR(extack, tb,
1575 "new and rpl are mutually exclusive");
1576 return -EINVAL;
1577 }
1578
1579 return 0;
1580}
1581
1582static int fl_set_key_ct(struct nlattr **tb,
1583 struct flow_dissector_key_ct *key,
1584 struct flow_dissector_key_ct *mask,
1585 struct netlink_ext_ack *extack)
1586{
1587 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1588 int err;
1589
1590 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1591 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1592 return -EOPNOTSUPP;
1593 }
1594 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1595 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1596 sizeof(key->ct_state));
1597
1598 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1599 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1600 extack);
1601 if (err)
1602 return err;
1603
1604 }
1605 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1606 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1607 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1608 return -EOPNOTSUPP;
1609 }
1610 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1611 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1612 sizeof(key->ct_zone));
1613 }
1614 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1615 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1616 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1617 return -EOPNOTSUPP;
1618 }
1619 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1620 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1621 sizeof(key->ct_mark));
1622 }
1623 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1624 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1625 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1626 return -EOPNOTSUPP;
1627 }
1628 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1629 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1630 sizeof(key->ct_labels));
1631 }
1632
1633 return 0;
1634}
1635
1636static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1637 struct fl_flow_key *key, struct fl_flow_key *mask,
1638 int vthresh)
1639{
1640 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1641
1642 if (!tb) {
1643 *ethertype = 0;
1644 return good_num_of_vlans;
1645 }
1646
1647 *ethertype = nla_get_be16(tb);
1648 if (good_num_of_vlans || eth_type_vlan(*ethertype))
1649 return true;
1650
1651 key->basic.n_proto = *ethertype;
1652 mask->basic.n_proto = cpu_to_be16(~0);
1653 return false;
1654}
1655
1656static int fl_set_key(struct net *net, struct nlattr **tb,
1657 struct fl_flow_key *key, struct fl_flow_key *mask,
1658 struct netlink_ext_ack *extack)
1659{
1660 __be16 ethertype;
1661 int ret = 0;
1662
1663 if (tb[TCA_FLOWER_INDEV]) {
1664 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1665 if (err < 0)
1666 return err;
1667 key->meta.ingress_ifindex = err;
1668 mask->meta.ingress_ifindex = 0xffffffff;
1669 }
1670
1671 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1672 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1673 sizeof(key->eth.dst));
1674 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1675 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1676 sizeof(key->eth.src));
1677 fl_set_key_val(tb, &key->num_of_vlans,
1678 TCA_FLOWER_KEY_NUM_OF_VLANS,
1679 &mask->num_of_vlans,
1680 TCA_FLOWER_UNSPEC,
1681 sizeof(key->num_of_vlans));
1682
1683 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) {
1684 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1685 TCA_FLOWER_KEY_VLAN_PRIO,
1686 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1687 &key->vlan, &mask->vlan);
1688
1689 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1690 ðertype, key, mask, 1)) {
1691 fl_set_key_vlan(tb, ethertype,
1692 TCA_FLOWER_KEY_CVLAN_ID,
1693 TCA_FLOWER_KEY_CVLAN_PRIO,
1694 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1695 &key->cvlan, &mask->cvlan);
1696 fl_set_key_val(tb, &key->basic.n_proto,
1697 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1698 &mask->basic.n_proto,
1699 TCA_FLOWER_UNSPEC,
1700 sizeof(key->basic.n_proto));
1701 }
1702 }
1703
1704 if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1705 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1706
1707 if (key->basic.n_proto == htons(ETH_P_IP) ||
1708 key->basic.n_proto == htons(ETH_P_IPV6)) {
1709 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1710 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1711 sizeof(key->basic.ip_proto));
1712 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1713 }
1714
1715 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1716 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1717 mask->control.addr_type = ~0;
1718 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1719 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1720 sizeof(key->ipv4.src));
1721 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1722 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1723 sizeof(key->ipv4.dst));
1724 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1725 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1726 mask->control.addr_type = ~0;
1727 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1728 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1729 sizeof(key->ipv6.src));
1730 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1731 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1732 sizeof(key->ipv6.dst));
1733 }
1734
1735 if (key->basic.ip_proto == IPPROTO_TCP) {
1736 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1737 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1738 sizeof(key->tp.src));
1739 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1740 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1741 sizeof(key->tp.dst));
1742 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1743 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1744 sizeof(key->tcp.flags));
1745 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1746 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1747 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1748 sizeof(key->tp.src));
1749 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1750 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1751 sizeof(key->tp.dst));
1752 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1753 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1754 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1755 sizeof(key->tp.src));
1756 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1757 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1758 sizeof(key->tp.dst));
1759 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1760 key->basic.ip_proto == IPPROTO_ICMP) {
1761 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1762 &mask->icmp.type,
1763 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1764 sizeof(key->icmp.type));
1765 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1766 &mask->icmp.code,
1767 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1768 sizeof(key->icmp.code));
1769 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1770 key->basic.ip_proto == IPPROTO_ICMPV6) {
1771 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1772 &mask->icmp.type,
1773 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1774 sizeof(key->icmp.type));
1775 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1776 &mask->icmp.code,
1777 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1778 sizeof(key->icmp.code));
1779 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1780 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1781 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1782 if (ret)
1783 return ret;
1784 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1785 key->basic.n_proto == htons(ETH_P_RARP)) {
1786 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1787 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1788 sizeof(key->arp.sip));
1789 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1790 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1791 sizeof(key->arp.tip));
1792 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1793 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1794 sizeof(key->arp.op));
1795 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1796 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1797 sizeof(key->arp.sha));
1798 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1799 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1800 sizeof(key->arp.tha));
1801 } else if (key->basic.ip_proto == IPPROTO_L2TP) {
1802 fl_set_key_val(tb, &key->l2tpv3.session_id,
1803 TCA_FLOWER_KEY_L2TPV3_SID,
1804 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1805 sizeof(key->l2tpv3.session_id));
1806 }
1807
1808 if (key->basic.ip_proto == IPPROTO_TCP ||
1809 key->basic.ip_proto == IPPROTO_UDP ||
1810 key->basic.ip_proto == IPPROTO_SCTP) {
1811 ret = fl_set_key_port_range(tb, key, mask, extack);
1812 if (ret)
1813 return ret;
1814 }
1815
1816 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1817 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1818 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1819 mask->enc_control.addr_type = ~0;
1820 fl_set_key_val(tb, &key->enc_ipv4.src,
1821 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1822 &mask->enc_ipv4.src,
1823 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1824 sizeof(key->enc_ipv4.src));
1825 fl_set_key_val(tb, &key->enc_ipv4.dst,
1826 TCA_FLOWER_KEY_ENC_IPV4_DST,
1827 &mask->enc_ipv4.dst,
1828 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1829 sizeof(key->enc_ipv4.dst));
1830 }
1831
1832 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1833 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1834 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1835 mask->enc_control.addr_type = ~0;
1836 fl_set_key_val(tb, &key->enc_ipv6.src,
1837 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1838 &mask->enc_ipv6.src,
1839 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1840 sizeof(key->enc_ipv6.src));
1841 fl_set_key_val(tb, &key->enc_ipv6.dst,
1842 TCA_FLOWER_KEY_ENC_IPV6_DST,
1843 &mask->enc_ipv6.dst,
1844 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1845 sizeof(key->enc_ipv6.dst));
1846 }
1847
1848 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1849 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1850 sizeof(key->enc_key_id.keyid));
1851
1852 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1853 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1854 sizeof(key->enc_tp.src));
1855
1856 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1857 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1858 sizeof(key->enc_tp.dst));
1859
1860 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1861
1862 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1863 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1864 sizeof(key->hash.hash));
1865
1866 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1867 ret = fl_set_enc_opt(tb, key, mask, extack);
1868 if (ret)
1869 return ret;
1870 }
1871
1872 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1873 if (ret)
1874 return ret;
1875
1876 if (tb[TCA_FLOWER_KEY_FLAGS])
1877 ret = fl_set_key_flags(tb, &key->control.flags,
1878 &mask->control.flags, extack);
1879
1880 return ret;
1881}
1882
1883static void fl_mask_copy(struct fl_flow_mask *dst,
1884 struct fl_flow_mask *src)
1885{
1886 const void *psrc = fl_key_get_start(&src->key, src);
1887 void *pdst = fl_key_get_start(&dst->key, src);
1888
1889 memcpy(pdst, psrc, fl_mask_range(src));
1890 dst->range = src->range;
1891}
1892
1893static const struct rhashtable_params fl_ht_params = {
1894 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1895 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1896 .automatic_shrinking = true,
1897};
1898
1899static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1900{
1901 mask->filter_ht_params = fl_ht_params;
1902 mask->filter_ht_params.key_len = fl_mask_range(mask);
1903 mask->filter_ht_params.key_offset += mask->range.start;
1904
1905 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1906}
1907
1908#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1909#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1910
1911#define FL_KEY_IS_MASKED(mask, member) \
1912 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1913 0, FL_KEY_MEMBER_SIZE(member)) \
1914
1915#define FL_KEY_SET(keys, cnt, id, member) \
1916 do { \
1917 keys[cnt].key_id = id; \
1918 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1919 cnt++; \
1920 } while(0);
1921
1922#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1923 do { \
1924 if (FL_KEY_IS_MASKED(mask, member)) \
1925 FL_KEY_SET(keys, cnt, id, member); \
1926 } while(0);
1927
1928static void fl_init_dissector(struct flow_dissector *dissector,
1929 struct fl_flow_key *mask)
1930{
1931 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1932 size_t cnt = 0;
1933
1934 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1935 FLOW_DISSECTOR_KEY_META, meta);
1936 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1937 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1938 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1939 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1940 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1941 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1942 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1943 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1944 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1945 FLOW_DISSECTOR_KEY_PORTS, tp);
1946 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1947 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1948 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1949 FLOW_DISSECTOR_KEY_IP, ip);
1950 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1951 FLOW_DISSECTOR_KEY_TCP, tcp);
1952 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1953 FLOW_DISSECTOR_KEY_ICMP, icmp);
1954 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1955 FLOW_DISSECTOR_KEY_ARP, arp);
1956 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1957 FLOW_DISSECTOR_KEY_MPLS, mpls);
1958 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1959 FLOW_DISSECTOR_KEY_VLAN, vlan);
1960 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1961 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1962 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1963 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1964 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1965 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1966 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1967 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1968 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1969 FL_KEY_IS_MASKED(mask, enc_ipv6))
1970 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1971 enc_control);
1972 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1973 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1974 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1975 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1976 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1977 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1978 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1979 FLOW_DISSECTOR_KEY_CT, ct);
1980 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1981 FLOW_DISSECTOR_KEY_HASH, hash);
1982 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1983 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
1984 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1985 FLOW_DISSECTOR_KEY_PPPOE, pppoe);
1986 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1987 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
1988
1989 skb_flow_dissector_init(dissector, keys, cnt);
1990}
1991
1992static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1993 struct fl_flow_mask *mask)
1994{
1995 struct fl_flow_mask *newmask;
1996 int err;
1997
1998 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1999 if (!newmask)
2000 return ERR_PTR(-ENOMEM);
2001
2002 fl_mask_copy(newmask, mask);
2003
2004 if ((newmask->key.tp_range.tp_min.dst &&
2005 newmask->key.tp_range.tp_max.dst) ||
2006 (newmask->key.tp_range.tp_min.src &&
2007 newmask->key.tp_range.tp_max.src))
2008 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2009
2010 err = fl_init_mask_hashtable(newmask);
2011 if (err)
2012 goto errout_free;
2013
2014 fl_init_dissector(&newmask->dissector, &newmask->key);
2015
2016 INIT_LIST_HEAD_RCU(&newmask->filters);
2017
2018 refcount_set(&newmask->refcnt, 1);
2019 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2020 &newmask->ht_node, mask_ht_params);
2021 if (err)
2022 goto errout_destroy;
2023
2024 spin_lock(&head->masks_lock);
2025 list_add_tail_rcu(&newmask->list, &head->masks);
2026 spin_unlock(&head->masks_lock);
2027
2028 return newmask;
2029
2030errout_destroy:
2031 rhashtable_destroy(&newmask->ht);
2032errout_free:
2033 kfree(newmask);
2034
2035 return ERR_PTR(err);
2036}
2037
2038static int fl_check_assign_mask(struct cls_fl_head *head,
2039 struct cls_fl_filter *fnew,
2040 struct cls_fl_filter *fold,
2041 struct fl_flow_mask *mask)
2042{
2043 struct fl_flow_mask *newmask;
2044 int ret = 0;
2045
2046 rcu_read_lock();
2047
2048 /* Insert mask as temporary node to prevent concurrent creation of mask
2049 * with same key. Any concurrent lookups with same key will return
2050 * -EAGAIN because mask's refcnt is zero.
2051 */
2052 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2053 &mask->ht_node,
2054 mask_ht_params);
2055 if (!fnew->mask) {
2056 rcu_read_unlock();
2057
2058 if (fold) {
2059 ret = -EINVAL;
2060 goto errout_cleanup;
2061 }
2062
2063 newmask = fl_create_new_mask(head, mask);
2064 if (IS_ERR(newmask)) {
2065 ret = PTR_ERR(newmask);
2066 goto errout_cleanup;
2067 }
2068
2069 fnew->mask = newmask;
2070 return 0;
2071 } else if (IS_ERR(fnew->mask)) {
2072 ret = PTR_ERR(fnew->mask);
2073 } else if (fold && fold->mask != fnew->mask) {
2074 ret = -EINVAL;
2075 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2076 /* Mask was deleted concurrently, try again */
2077 ret = -EAGAIN;
2078 }
2079 rcu_read_unlock();
2080 return ret;
2081
2082errout_cleanup:
2083 rhashtable_remove_fast(&head->ht, &mask->ht_node,
2084 mask_ht_params);
2085 return ret;
2086}
2087
2088static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2089 struct cls_fl_filter *f, struct fl_flow_mask *mask,
2090 unsigned long base, struct nlattr **tb,
2091 struct nlattr *est,
2092 struct fl_flow_tmplt *tmplt,
2093 u32 flags, u32 fl_flags,
2094 struct netlink_ext_ack *extack)
2095{
2096 int err;
2097
2098 err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2099 fl_flags, extack);
2100 if (err < 0)
2101 return err;
2102
2103 if (tb[TCA_FLOWER_CLASSID]) {
2104 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2105 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2106 rtnl_lock();
2107 tcf_bind_filter(tp, &f->res, base);
2108 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2109 rtnl_unlock();
2110 }
2111
2112 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2113 if (err)
2114 return err;
2115
2116 fl_mask_update_range(mask);
2117 fl_set_masked_key(&f->mkey, &f->key, mask);
2118
2119 if (!fl_mask_fits_tmplt(tmplt, mask)) {
2120 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2121 return -EINVAL;
2122 }
2123
2124 return 0;
2125}
2126
2127static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2128 struct cls_fl_filter *fold,
2129 bool *in_ht)
2130{
2131 struct fl_flow_mask *mask = fnew->mask;
2132 int err;
2133
2134 err = rhashtable_lookup_insert_fast(&mask->ht,
2135 &fnew->ht_node,
2136 mask->filter_ht_params);
2137 if (err) {
2138 *in_ht = false;
2139 /* It is okay if filter with same key exists when
2140 * overwriting.
2141 */
2142 return fold && err == -EEXIST ? 0 : err;
2143 }
2144
2145 *in_ht = true;
2146 return 0;
2147}
2148
2149static int fl_change(struct net *net, struct sk_buff *in_skb,
2150 struct tcf_proto *tp, unsigned long base,
2151 u32 handle, struct nlattr **tca,
2152 void **arg, u32 flags,
2153 struct netlink_ext_ack *extack)
2154{
2155 struct cls_fl_head *head = fl_head_dereference(tp);
2156 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2157 struct cls_fl_filter *fold = *arg;
2158 struct cls_fl_filter *fnew;
2159 struct fl_flow_mask *mask;
2160 struct nlattr **tb;
2161 bool in_ht;
2162 int err;
2163
2164 if (!tca[TCA_OPTIONS]) {
2165 err = -EINVAL;
2166 goto errout_fold;
2167 }
2168
2169 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2170 if (!mask) {
2171 err = -ENOBUFS;
2172 goto errout_fold;
2173 }
2174
2175 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2176 if (!tb) {
2177 err = -ENOBUFS;
2178 goto errout_mask_alloc;
2179 }
2180
2181 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2182 tca[TCA_OPTIONS], fl_policy, NULL);
2183 if (err < 0)
2184 goto errout_tb;
2185
2186 if (fold && handle && fold->handle != handle) {
2187 err = -EINVAL;
2188 goto errout_tb;
2189 }
2190
2191 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2192 if (!fnew) {
2193 err = -ENOBUFS;
2194 goto errout_tb;
2195 }
2196 INIT_LIST_HEAD(&fnew->hw_list);
2197 refcount_set(&fnew->refcnt, 1);
2198
2199 if (tb[TCA_FLOWER_FLAGS]) {
2200 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2201
2202 if (!tc_flags_valid(fnew->flags)) {
2203 kfree(fnew);
2204 err = -EINVAL;
2205 goto errout_tb;
2206 }
2207 }
2208
2209 if (!fold) {
2210 spin_lock(&tp->lock);
2211 if (!handle) {
2212 handle = 1;
2213 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2214 INT_MAX, GFP_ATOMIC);
2215 } else {
2216 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2217 handle, GFP_ATOMIC);
2218
2219 /* Filter with specified handle was concurrently
2220 * inserted after initial check in cls_api. This is not
2221 * necessarily an error if NLM_F_EXCL is not set in
2222 * message flags. Returning EAGAIN will cause cls_api to
2223 * try to update concurrently inserted rule.
2224 */
2225 if (err == -ENOSPC)
2226 err = -EAGAIN;
2227 }
2228 spin_unlock(&tp->lock);
2229
2230 if (err) {
2231 kfree(fnew);
2232 goto errout_tb;
2233 }
2234 }
2235 fnew->handle = handle;
2236
2237 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2238 !tc_skip_hw(fnew->flags));
2239 if (err < 0)
2240 goto errout_idr;
2241
2242 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2243 tp->chain->tmplt_priv, flags, fnew->flags,
2244 extack);
2245 if (err)
2246 goto errout_idr;
2247
2248 err = fl_check_assign_mask(head, fnew, fold, mask);
2249 if (err)
2250 goto errout_idr;
2251
2252 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2253 if (err)
2254 goto errout_mask;
2255
2256 if (!tc_skip_hw(fnew->flags)) {
2257 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2258 if (err)
2259 goto errout_ht;
2260 }
2261
2262 if (!tc_in_hw(fnew->flags))
2263 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2264
2265 spin_lock(&tp->lock);
2266
2267 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2268 * proto again or create new one, if necessary.
2269 */
2270 if (tp->deleting) {
2271 err = -EAGAIN;
2272 goto errout_hw;
2273 }
2274
2275 if (fold) {
2276 /* Fold filter was deleted concurrently. Retry lookup. */
2277 if (fold->deleted) {
2278 err = -EAGAIN;
2279 goto errout_hw;
2280 }
2281
2282 fnew->handle = handle;
2283
2284 if (!in_ht) {
2285 struct rhashtable_params params =
2286 fnew->mask->filter_ht_params;
2287
2288 err = rhashtable_insert_fast(&fnew->mask->ht,
2289 &fnew->ht_node,
2290 params);
2291 if (err)
2292 goto errout_hw;
2293 in_ht = true;
2294 }
2295
2296 refcount_inc(&fnew->refcnt);
2297 rhashtable_remove_fast(&fold->mask->ht,
2298 &fold->ht_node,
2299 fold->mask->filter_ht_params);
2300 idr_replace(&head->handle_idr, fnew, fnew->handle);
2301 list_replace_rcu(&fold->list, &fnew->list);
2302 fold->deleted = true;
2303
2304 spin_unlock(&tp->lock);
2305
2306 fl_mask_put(head, fold->mask);
2307 if (!tc_skip_hw(fold->flags))
2308 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2309 tcf_unbind_filter(tp, &fold->res);
2310 /* Caller holds reference to fold, so refcnt is always > 0
2311 * after this.
2312 */
2313 refcount_dec(&fold->refcnt);
2314 __fl_put(fold);
2315 } else {
2316 idr_replace(&head->handle_idr, fnew, fnew->handle);
2317
2318 refcount_inc(&fnew->refcnt);
2319 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2320 spin_unlock(&tp->lock);
2321 }
2322
2323 *arg = fnew;
2324
2325 kfree(tb);
2326 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2327 return 0;
2328
2329errout_ht:
2330 spin_lock(&tp->lock);
2331errout_hw:
2332 fnew->deleted = true;
2333 spin_unlock(&tp->lock);
2334 if (!tc_skip_hw(fnew->flags))
2335 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2336 if (in_ht)
2337 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2338 fnew->mask->filter_ht_params);
2339errout_mask:
2340 fl_mask_put(head, fnew->mask);
2341errout_idr:
2342 idr_remove(&head->handle_idr, fnew->handle);
2343 __fl_put(fnew);
2344errout_tb:
2345 kfree(tb);
2346errout_mask_alloc:
2347 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2348errout_fold:
2349 if (fold)
2350 __fl_put(fold);
2351 return err;
2352}
2353
2354static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2355 bool rtnl_held, struct netlink_ext_ack *extack)
2356{
2357 struct cls_fl_head *head = fl_head_dereference(tp);
2358 struct cls_fl_filter *f = arg;
2359 bool last_on_mask;
2360 int err = 0;
2361
2362 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2363 *last = list_empty(&head->masks);
2364 __fl_put(f);
2365
2366 return err;
2367}
2368
2369static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2370 bool rtnl_held)
2371{
2372 struct cls_fl_head *head = fl_head_dereference(tp);
2373 unsigned long id = arg->cookie, tmp;
2374 struct cls_fl_filter *f;
2375
2376 arg->count = arg->skip;
2377
2378 rcu_read_lock();
2379 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2380 /* don't return filters that are being deleted */
2381 if (!refcount_inc_not_zero(&f->refcnt))
2382 continue;
2383 rcu_read_unlock();
2384
2385 if (arg->fn(tp, f, arg) < 0) {
2386 __fl_put(f);
2387 arg->stop = 1;
2388 rcu_read_lock();
2389 break;
2390 }
2391 __fl_put(f);
2392 arg->count++;
2393 rcu_read_lock();
2394 }
2395 rcu_read_unlock();
2396 arg->cookie = id;
2397}
2398
2399static struct cls_fl_filter *
2400fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2401{
2402 struct cls_fl_head *head = fl_head_dereference(tp);
2403
2404 spin_lock(&tp->lock);
2405 if (list_empty(&head->hw_filters)) {
2406 spin_unlock(&tp->lock);
2407 return NULL;
2408 }
2409
2410 if (!f)
2411 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2412 hw_list);
2413 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2414 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2415 spin_unlock(&tp->lock);
2416 return f;
2417 }
2418 }
2419
2420 spin_unlock(&tp->lock);
2421 return NULL;
2422}
2423
2424static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2425 void *cb_priv, struct netlink_ext_ack *extack)
2426{
2427 struct tcf_block *block = tp->chain->block;
2428 struct flow_cls_offload cls_flower = {};
2429 struct cls_fl_filter *f = NULL;
2430 int err;
2431
2432 /* hw_filters list can only be changed by hw offload functions after
2433 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2434 * iterating it.
2435 */
2436 ASSERT_RTNL();
2437
2438 while ((f = fl_get_next_hw_filter(tp, f, add))) {
2439 cls_flower.rule =
2440 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2441 if (!cls_flower.rule) {
2442 __fl_put(f);
2443 return -ENOMEM;
2444 }
2445
2446 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2447 extack);
2448 cls_flower.command = add ?
2449 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2450 cls_flower.cookie = (unsigned long)f;
2451 cls_flower.rule->match.dissector = &f->mask->dissector;
2452 cls_flower.rule->match.mask = &f->mask->key;
2453 cls_flower.rule->match.key = &f->mkey;
2454
2455 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2456 cls_flower.common.extack);
2457 if (err) {
2458 kfree(cls_flower.rule);
2459 if (tc_skip_sw(f->flags)) {
2460 __fl_put(f);
2461 return err;
2462 }
2463 goto next_flow;
2464 }
2465
2466 cls_flower.classid = f->res.classid;
2467
2468 err = tc_setup_cb_reoffload(block, tp, add, cb,
2469 TC_SETUP_CLSFLOWER, &cls_flower,
2470 cb_priv, &f->flags,
2471 &f->in_hw_count);
2472 tc_cleanup_offload_action(&cls_flower.rule->action);
2473 kfree(cls_flower.rule);
2474
2475 if (err) {
2476 __fl_put(f);
2477 return err;
2478 }
2479next_flow:
2480 __fl_put(f);
2481 }
2482
2483 return 0;
2484}
2485
2486static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2487{
2488 struct flow_cls_offload *cls_flower = type_data;
2489 struct cls_fl_filter *f =
2490 (struct cls_fl_filter *) cls_flower->cookie;
2491 struct cls_fl_head *head = fl_head_dereference(tp);
2492
2493 spin_lock(&tp->lock);
2494 list_add(&f->hw_list, &head->hw_filters);
2495 spin_unlock(&tp->lock);
2496}
2497
2498static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2499{
2500 struct flow_cls_offload *cls_flower = type_data;
2501 struct cls_fl_filter *f =
2502 (struct cls_fl_filter *) cls_flower->cookie;
2503
2504 spin_lock(&tp->lock);
2505 if (!list_empty(&f->hw_list))
2506 list_del_init(&f->hw_list);
2507 spin_unlock(&tp->lock);
2508}
2509
2510static int fl_hw_create_tmplt(struct tcf_chain *chain,
2511 struct fl_flow_tmplt *tmplt)
2512{
2513 struct flow_cls_offload cls_flower = {};
2514 struct tcf_block *block = chain->block;
2515
2516 cls_flower.rule = flow_rule_alloc(0);
2517 if (!cls_flower.rule)
2518 return -ENOMEM;
2519
2520 cls_flower.common.chain_index = chain->index;
2521 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2522 cls_flower.cookie = (unsigned long) tmplt;
2523 cls_flower.rule->match.dissector = &tmplt->dissector;
2524 cls_flower.rule->match.mask = &tmplt->mask;
2525 cls_flower.rule->match.key = &tmplt->dummy_key;
2526
2527 /* We don't care if driver (any of them) fails to handle this
2528 * call. It serves just as a hint for it.
2529 */
2530 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2531 kfree(cls_flower.rule);
2532
2533 return 0;
2534}
2535
2536static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2537 struct fl_flow_tmplt *tmplt)
2538{
2539 struct flow_cls_offload cls_flower = {};
2540 struct tcf_block *block = chain->block;
2541
2542 cls_flower.common.chain_index = chain->index;
2543 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2544 cls_flower.cookie = (unsigned long) tmplt;
2545
2546 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2547}
2548
2549static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2550 struct nlattr **tca,
2551 struct netlink_ext_ack *extack)
2552{
2553 struct fl_flow_tmplt *tmplt;
2554 struct nlattr **tb;
2555 int err;
2556
2557 if (!tca[TCA_OPTIONS])
2558 return ERR_PTR(-EINVAL);
2559
2560 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2561 if (!tb)
2562 return ERR_PTR(-ENOBUFS);
2563 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2564 tca[TCA_OPTIONS], fl_policy, NULL);
2565 if (err)
2566 goto errout_tb;
2567
2568 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2569 if (!tmplt) {
2570 err = -ENOMEM;
2571 goto errout_tb;
2572 }
2573 tmplt->chain = chain;
2574 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2575 if (err)
2576 goto errout_tmplt;
2577
2578 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2579
2580 err = fl_hw_create_tmplt(chain, tmplt);
2581 if (err)
2582 goto errout_tmplt;
2583
2584 kfree(tb);
2585 return tmplt;
2586
2587errout_tmplt:
2588 kfree(tmplt);
2589errout_tb:
2590 kfree(tb);
2591 return ERR_PTR(err);
2592}
2593
2594static void fl_tmplt_destroy(void *tmplt_priv)
2595{
2596 struct fl_flow_tmplt *tmplt = tmplt_priv;
2597
2598 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2599 kfree(tmplt);
2600}
2601
2602static int fl_dump_key_val(struct sk_buff *skb,
2603 void *val, int val_type,
2604 void *mask, int mask_type, int len)
2605{
2606 int err;
2607
2608 if (!memchr_inv(mask, 0, len))
2609 return 0;
2610 err = nla_put(skb, val_type, len, val);
2611 if (err)
2612 return err;
2613 if (mask_type != TCA_FLOWER_UNSPEC) {
2614 err = nla_put(skb, mask_type, len, mask);
2615 if (err)
2616 return err;
2617 }
2618 return 0;
2619}
2620
2621static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2622 struct fl_flow_key *mask)
2623{
2624 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2625 TCA_FLOWER_KEY_PORT_DST_MIN,
2626 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2627 sizeof(key->tp_range.tp_min.dst)) ||
2628 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2629 TCA_FLOWER_KEY_PORT_DST_MAX,
2630 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2631 sizeof(key->tp_range.tp_max.dst)) ||
2632 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2633 TCA_FLOWER_KEY_PORT_SRC_MIN,
2634 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2635 sizeof(key->tp_range.tp_min.src)) ||
2636 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2637 TCA_FLOWER_KEY_PORT_SRC_MAX,
2638 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2639 sizeof(key->tp_range.tp_max.src)))
2640 return -1;
2641
2642 return 0;
2643}
2644
2645static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2646 struct flow_dissector_key_mpls *mpls_key,
2647 struct flow_dissector_key_mpls *mpls_mask,
2648 u8 lse_index)
2649{
2650 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2651 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2652 int err;
2653
2654 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2655 lse_index + 1);
2656 if (err)
2657 return err;
2658
2659 if (lse_mask->mpls_ttl) {
2660 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2661 lse_key->mpls_ttl);
2662 if (err)
2663 return err;
2664 }
2665 if (lse_mask->mpls_bos) {
2666 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2667 lse_key->mpls_bos);
2668 if (err)
2669 return err;
2670 }
2671 if (lse_mask->mpls_tc) {
2672 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2673 lse_key->mpls_tc);
2674 if (err)
2675 return err;
2676 }
2677 if (lse_mask->mpls_label) {
2678 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2679 lse_key->mpls_label);
2680 if (err)
2681 return err;
2682 }
2683
2684 return 0;
2685}
2686
2687static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2688 struct flow_dissector_key_mpls *mpls_key,
2689 struct flow_dissector_key_mpls *mpls_mask)
2690{
2691 struct nlattr *opts;
2692 struct nlattr *lse;
2693 u8 lse_index;
2694 int err;
2695
2696 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2697 if (!opts)
2698 return -EMSGSIZE;
2699
2700 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2701 if (!(mpls_mask->used_lses & 1 << lse_index))
2702 continue;
2703
2704 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2705 if (!lse) {
2706 err = -EMSGSIZE;
2707 goto err_opts;
2708 }
2709
2710 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2711 lse_index);
2712 if (err)
2713 goto err_opts_lse;
2714 nla_nest_end(skb, lse);
2715 }
2716 nla_nest_end(skb, opts);
2717
2718 return 0;
2719
2720err_opts_lse:
2721 nla_nest_cancel(skb, lse);
2722err_opts:
2723 nla_nest_cancel(skb, opts);
2724
2725 return err;
2726}
2727
2728static int fl_dump_key_mpls(struct sk_buff *skb,
2729 struct flow_dissector_key_mpls *mpls_key,
2730 struct flow_dissector_key_mpls *mpls_mask)
2731{
2732 struct flow_dissector_mpls_lse *lse_mask;
2733 struct flow_dissector_mpls_lse *lse_key;
2734 int err;
2735
2736 if (!mpls_mask->used_lses)
2737 return 0;
2738
2739 lse_mask = &mpls_mask->ls[0];
2740 lse_key = &mpls_key->ls[0];
2741
2742 /* For backward compatibility, don't use the MPLS nested attributes if
2743 * the rule can be expressed using the old attributes.
2744 */
2745 if (mpls_mask->used_lses & ~1 ||
2746 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2747 !lse_mask->mpls_tc && !lse_mask->mpls_label))
2748 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2749
2750 if (lse_mask->mpls_ttl) {
2751 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2752 lse_key->mpls_ttl);
2753 if (err)
2754 return err;
2755 }
2756 if (lse_mask->mpls_tc) {
2757 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2758 lse_key->mpls_tc);
2759 if (err)
2760 return err;
2761 }
2762 if (lse_mask->mpls_label) {
2763 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2764 lse_key->mpls_label);
2765 if (err)
2766 return err;
2767 }
2768 if (lse_mask->mpls_bos) {
2769 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2770 lse_key->mpls_bos);
2771 if (err)
2772 return err;
2773 }
2774 return 0;
2775}
2776
2777static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2778 struct flow_dissector_key_ip *key,
2779 struct flow_dissector_key_ip *mask)
2780{
2781 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2782 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2783 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2784 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2785
2786 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2787 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2788 return -1;
2789
2790 return 0;
2791}
2792
2793static int fl_dump_key_vlan(struct sk_buff *skb,
2794 int vlan_id_key, int vlan_prio_key,
2795 struct flow_dissector_key_vlan *vlan_key,
2796 struct flow_dissector_key_vlan *vlan_mask)
2797{
2798 int err;
2799
2800 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2801 return 0;
2802 if (vlan_mask->vlan_id) {
2803 err = nla_put_u16(skb, vlan_id_key,
2804 vlan_key->vlan_id);
2805 if (err)
2806 return err;
2807 }
2808 if (vlan_mask->vlan_priority) {
2809 err = nla_put_u8(skb, vlan_prio_key,
2810 vlan_key->vlan_priority);
2811 if (err)
2812 return err;
2813 }
2814 return 0;
2815}
2816
2817static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2818 u32 *flower_key, u32 *flower_mask,
2819 u32 flower_flag_bit, u32 dissector_flag_bit)
2820{
2821 if (dissector_mask & dissector_flag_bit) {
2822 *flower_mask |= flower_flag_bit;
2823 if (dissector_key & dissector_flag_bit)
2824 *flower_key |= flower_flag_bit;
2825 }
2826}
2827
2828static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2829{
2830 u32 key, mask;
2831 __be32 _key, _mask;
2832 int err;
2833
2834 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2835 return 0;
2836
2837 key = 0;
2838 mask = 0;
2839
2840 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2841 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2842 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2843 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2844 FLOW_DIS_FIRST_FRAG);
2845
2846 _key = cpu_to_be32(key);
2847 _mask = cpu_to_be32(mask);
2848
2849 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2850 if (err)
2851 return err;
2852
2853 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2854}
2855
2856static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2857 struct flow_dissector_key_enc_opts *enc_opts)
2858{
2859 struct geneve_opt *opt;
2860 struct nlattr *nest;
2861 int opt_off = 0;
2862
2863 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2864 if (!nest)
2865 goto nla_put_failure;
2866
2867 while (enc_opts->len > opt_off) {
2868 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2869
2870 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2871 opt->opt_class))
2872 goto nla_put_failure;
2873 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2874 opt->type))
2875 goto nla_put_failure;
2876 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2877 opt->length * 4, opt->opt_data))
2878 goto nla_put_failure;
2879
2880 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2881 }
2882 nla_nest_end(skb, nest);
2883 return 0;
2884
2885nla_put_failure:
2886 nla_nest_cancel(skb, nest);
2887 return -EMSGSIZE;
2888}
2889
2890static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2891 struct flow_dissector_key_enc_opts *enc_opts)
2892{
2893 struct vxlan_metadata *md;
2894 struct nlattr *nest;
2895
2896 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2897 if (!nest)
2898 goto nla_put_failure;
2899
2900 md = (struct vxlan_metadata *)&enc_opts->data[0];
2901 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2902 goto nla_put_failure;
2903
2904 nla_nest_end(skb, nest);
2905 return 0;
2906
2907nla_put_failure:
2908 nla_nest_cancel(skb, nest);
2909 return -EMSGSIZE;
2910}
2911
2912static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2913 struct flow_dissector_key_enc_opts *enc_opts)
2914{
2915 struct erspan_metadata *md;
2916 struct nlattr *nest;
2917
2918 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2919 if (!nest)
2920 goto nla_put_failure;
2921
2922 md = (struct erspan_metadata *)&enc_opts->data[0];
2923 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2924 goto nla_put_failure;
2925
2926 if (md->version == 1 &&
2927 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2928 goto nla_put_failure;
2929
2930 if (md->version == 2 &&
2931 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2932 md->u.md2.dir) ||
2933 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2934 get_hwid(&md->u.md2))))
2935 goto nla_put_failure;
2936
2937 nla_nest_end(skb, nest);
2938 return 0;
2939
2940nla_put_failure:
2941 nla_nest_cancel(skb, nest);
2942 return -EMSGSIZE;
2943}
2944
2945static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2946 struct flow_dissector_key_enc_opts *enc_opts)
2947
2948{
2949 struct gtp_pdu_session_info *session_info;
2950 struct nlattr *nest;
2951
2952 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2953 if (!nest)
2954 goto nla_put_failure;
2955
2956 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2957
2958 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2959 session_info->pdu_type))
2960 goto nla_put_failure;
2961
2962 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2963 goto nla_put_failure;
2964
2965 nla_nest_end(skb, nest);
2966 return 0;
2967
2968nla_put_failure:
2969 nla_nest_cancel(skb, nest);
2970 return -EMSGSIZE;
2971}
2972
2973static int fl_dump_key_ct(struct sk_buff *skb,
2974 struct flow_dissector_key_ct *key,
2975 struct flow_dissector_key_ct *mask)
2976{
2977 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2978 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2979 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2980 sizeof(key->ct_state)))
2981 goto nla_put_failure;
2982
2983 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2984 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2985 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2986 sizeof(key->ct_zone)))
2987 goto nla_put_failure;
2988
2989 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2990 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2991 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2992 sizeof(key->ct_mark)))
2993 goto nla_put_failure;
2994
2995 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2996 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2997 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2998 sizeof(key->ct_labels)))
2999 goto nla_put_failure;
3000
3001 return 0;
3002
3003nla_put_failure:
3004 return -EMSGSIZE;
3005}
3006
3007static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3008 struct flow_dissector_key_enc_opts *enc_opts)
3009{
3010 struct nlattr *nest;
3011 int err;
3012
3013 if (!enc_opts->len)
3014 return 0;
3015
3016 nest = nla_nest_start_noflag(skb, enc_opt_type);
3017 if (!nest)
3018 goto nla_put_failure;
3019
3020 switch (enc_opts->dst_opt_type) {
3021 case TUNNEL_GENEVE_OPT:
3022 err = fl_dump_key_geneve_opt(skb, enc_opts);
3023 if (err)
3024 goto nla_put_failure;
3025 break;
3026 case TUNNEL_VXLAN_OPT:
3027 err = fl_dump_key_vxlan_opt(skb, enc_opts);
3028 if (err)
3029 goto nla_put_failure;
3030 break;
3031 case TUNNEL_ERSPAN_OPT:
3032 err = fl_dump_key_erspan_opt(skb, enc_opts);
3033 if (err)
3034 goto nla_put_failure;
3035 break;
3036 case TUNNEL_GTP_OPT:
3037 err = fl_dump_key_gtp_opt(skb, enc_opts);
3038 if (err)
3039 goto nla_put_failure;
3040 break;
3041 default:
3042 goto nla_put_failure;
3043 }
3044 nla_nest_end(skb, nest);
3045 return 0;
3046
3047nla_put_failure:
3048 nla_nest_cancel(skb, nest);
3049 return -EMSGSIZE;
3050}
3051
3052static int fl_dump_key_enc_opt(struct sk_buff *skb,
3053 struct flow_dissector_key_enc_opts *key_opts,
3054 struct flow_dissector_key_enc_opts *msk_opts)
3055{
3056 int err;
3057
3058 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3059 if (err)
3060 return err;
3061
3062 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3063}
3064
3065static int fl_dump_key(struct sk_buff *skb, struct net *net,
3066 struct fl_flow_key *key, struct fl_flow_key *mask)
3067{
3068 if (mask->meta.ingress_ifindex) {
3069 struct net_device *dev;
3070
3071 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3072 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3073 goto nla_put_failure;
3074 }
3075
3076 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3077 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3078 sizeof(key->eth.dst)) ||
3079 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3080 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3081 sizeof(key->eth.src)) ||
3082 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3083 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3084 sizeof(key->basic.n_proto)))
3085 goto nla_put_failure;
3086
3087 if (mask->num_of_vlans.num_of_vlans) {
3088 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3089 goto nla_put_failure;
3090 }
3091
3092 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3093 goto nla_put_failure;
3094
3095 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3096 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3097 goto nla_put_failure;
3098
3099 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3100 TCA_FLOWER_KEY_CVLAN_PRIO,
3101 &key->cvlan, &mask->cvlan) ||
3102 (mask->cvlan.vlan_tpid &&
3103 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3104 key->cvlan.vlan_tpid)))
3105 goto nla_put_failure;
3106
3107 if (mask->basic.n_proto) {
3108 if (mask->cvlan.vlan_eth_type) {
3109 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3110 key->basic.n_proto))
3111 goto nla_put_failure;
3112 } else if (mask->vlan.vlan_eth_type) {
3113 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3114 key->vlan.vlan_eth_type))
3115 goto nla_put_failure;
3116 }
3117 }
3118
3119 if ((key->basic.n_proto == htons(ETH_P_IP) ||
3120 key->basic.n_proto == htons(ETH_P_IPV6)) &&
3121 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3122 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3123 sizeof(key->basic.ip_proto)) ||
3124 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3125 goto nla_put_failure;
3126
3127 if (mask->pppoe.session_id) {
3128 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3129 key->pppoe.session_id))
3130 goto nla_put_failure;
3131 }
3132 if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3133 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3134 key->pppoe.ppp_proto))
3135 goto nla_put_failure;
3136 }
3137
3138 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3139 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3140 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3141 sizeof(key->ipv4.src)) ||
3142 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3143 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3144 sizeof(key->ipv4.dst))))
3145 goto nla_put_failure;
3146 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3147 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3148 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3149 sizeof(key->ipv6.src)) ||
3150 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3151 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3152 sizeof(key->ipv6.dst))))
3153 goto nla_put_failure;
3154
3155 if (key->basic.ip_proto == IPPROTO_TCP &&
3156 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3157 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3158 sizeof(key->tp.src)) ||
3159 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3160 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3161 sizeof(key->tp.dst)) ||
3162 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3163 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3164 sizeof(key->tcp.flags))))
3165 goto nla_put_failure;
3166 else if (key->basic.ip_proto == IPPROTO_UDP &&
3167 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3168 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3169 sizeof(key->tp.src)) ||
3170 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3171 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3172 sizeof(key->tp.dst))))
3173 goto nla_put_failure;
3174 else if (key->basic.ip_proto == IPPROTO_SCTP &&
3175 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3176 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3177 sizeof(key->tp.src)) ||
3178 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3179 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3180 sizeof(key->tp.dst))))
3181 goto nla_put_failure;
3182 else if (key->basic.n_proto == htons(ETH_P_IP) &&
3183 key->basic.ip_proto == IPPROTO_ICMP &&
3184 (fl_dump_key_val(skb, &key->icmp.type,
3185 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3186 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3187 sizeof(key->icmp.type)) ||
3188 fl_dump_key_val(skb, &key->icmp.code,
3189 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3190 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3191 sizeof(key->icmp.code))))
3192 goto nla_put_failure;
3193 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3194 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3195 (fl_dump_key_val(skb, &key->icmp.type,
3196 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3197 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3198 sizeof(key->icmp.type)) ||
3199 fl_dump_key_val(skb, &key->icmp.code,
3200 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3201 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3202 sizeof(key->icmp.code))))
3203 goto nla_put_failure;
3204 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3205 key->basic.n_proto == htons(ETH_P_RARP)) &&
3206 (fl_dump_key_val(skb, &key->arp.sip,
3207 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3208 TCA_FLOWER_KEY_ARP_SIP_MASK,
3209 sizeof(key->arp.sip)) ||
3210 fl_dump_key_val(skb, &key->arp.tip,
3211 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3212 TCA_FLOWER_KEY_ARP_TIP_MASK,
3213 sizeof(key->arp.tip)) ||
3214 fl_dump_key_val(skb, &key->arp.op,
3215 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3216 TCA_FLOWER_KEY_ARP_OP_MASK,
3217 sizeof(key->arp.op)) ||
3218 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3219 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3220 sizeof(key->arp.sha)) ||
3221 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3222 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3223 sizeof(key->arp.tha))))
3224 goto nla_put_failure;
3225 else if (key->basic.ip_proto == IPPROTO_L2TP &&
3226 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3227 TCA_FLOWER_KEY_L2TPV3_SID,
3228 &mask->l2tpv3.session_id,
3229 TCA_FLOWER_UNSPEC,
3230 sizeof(key->l2tpv3.session_id)))
3231 goto nla_put_failure;
3232
3233 if ((key->basic.ip_proto == IPPROTO_TCP ||
3234 key->basic.ip_proto == IPPROTO_UDP ||
3235 key->basic.ip_proto == IPPROTO_SCTP) &&
3236 fl_dump_key_port_range(skb, key, mask))
3237 goto nla_put_failure;
3238
3239 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3240 (fl_dump_key_val(skb, &key->enc_ipv4.src,
3241 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3242 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3243 sizeof(key->enc_ipv4.src)) ||
3244 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3245 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3246 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3247 sizeof(key->enc_ipv4.dst))))
3248 goto nla_put_failure;
3249 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3250 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3251 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3252 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3253 sizeof(key->enc_ipv6.src)) ||
3254 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3255 TCA_FLOWER_KEY_ENC_IPV6_DST,
3256 &mask->enc_ipv6.dst,
3257 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3258 sizeof(key->enc_ipv6.dst))))
3259 goto nla_put_failure;
3260
3261 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3262 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3263 sizeof(key->enc_key_id)) ||
3264 fl_dump_key_val(skb, &key->enc_tp.src,
3265 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3266 &mask->enc_tp.src,
3267 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3268 sizeof(key->enc_tp.src)) ||
3269 fl_dump_key_val(skb, &key->enc_tp.dst,
3270 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3271 &mask->enc_tp.dst,
3272 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3273 sizeof(key->enc_tp.dst)) ||
3274 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3275 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3276 goto nla_put_failure;
3277
3278 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3279 goto nla_put_failure;
3280
3281 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3282 goto nla_put_failure;
3283
3284 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3285 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3286 sizeof(key->hash.hash)))
3287 goto nla_put_failure;
3288
3289 return 0;
3290
3291nla_put_failure:
3292 return -EMSGSIZE;
3293}
3294
3295static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3296 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3297{
3298 struct cls_fl_filter *f = fh;
3299 struct nlattr *nest;
3300 struct fl_flow_key *key, *mask;
3301 bool skip_hw;
3302
3303 if (!f)
3304 return skb->len;
3305
3306 t->tcm_handle = f->handle;
3307
3308 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3309 if (!nest)
3310 goto nla_put_failure;
3311
3312 spin_lock(&tp->lock);
3313
3314 if (f->res.classid &&
3315 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3316 goto nla_put_failure_locked;
3317
3318 key = &f->key;
3319 mask = &f->mask->key;
3320 skip_hw = tc_skip_hw(f->flags);
3321
3322 if (fl_dump_key(skb, net, key, mask))
3323 goto nla_put_failure_locked;
3324
3325 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3326 goto nla_put_failure_locked;
3327
3328 spin_unlock(&tp->lock);
3329
3330 if (!skip_hw)
3331 fl_hw_update_stats(tp, f, rtnl_held);
3332
3333 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3334 goto nla_put_failure;
3335
3336 if (tcf_exts_dump(skb, &f->exts))
3337 goto nla_put_failure;
3338
3339 nla_nest_end(skb, nest);
3340
3341 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3342 goto nla_put_failure;
3343
3344 return skb->len;
3345
3346nla_put_failure_locked:
3347 spin_unlock(&tp->lock);
3348nla_put_failure:
3349 nla_nest_cancel(skb, nest);
3350 return -1;
3351}
3352
3353static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3354 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3355{
3356 struct cls_fl_filter *f = fh;
3357 struct nlattr *nest;
3358 bool skip_hw;
3359
3360 if (!f)
3361 return skb->len;
3362
3363 t->tcm_handle = f->handle;
3364
3365 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3366 if (!nest)
3367 goto nla_put_failure;
3368
3369 spin_lock(&tp->lock);
3370
3371 skip_hw = tc_skip_hw(f->flags);
3372
3373 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3374 goto nla_put_failure_locked;
3375
3376 spin_unlock(&tp->lock);
3377
3378 if (!skip_hw)
3379 fl_hw_update_stats(tp, f, rtnl_held);
3380
3381 if (tcf_exts_terse_dump(skb, &f->exts))
3382 goto nla_put_failure;
3383
3384 nla_nest_end(skb, nest);
3385
3386 return skb->len;
3387
3388nla_put_failure_locked:
3389 spin_unlock(&tp->lock);
3390nla_put_failure:
3391 nla_nest_cancel(skb, nest);
3392 return -1;
3393}
3394
3395static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3396{
3397 struct fl_flow_tmplt *tmplt = tmplt_priv;
3398 struct fl_flow_key *key, *mask;
3399 struct nlattr *nest;
3400
3401 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3402 if (!nest)
3403 goto nla_put_failure;
3404
3405 key = &tmplt->dummy_key;
3406 mask = &tmplt->mask;
3407
3408 if (fl_dump_key(skb, net, key, mask))
3409 goto nla_put_failure;
3410
3411 nla_nest_end(skb, nest);
3412
3413 return skb->len;
3414
3415nla_put_failure:
3416 nla_nest_cancel(skb, nest);
3417 return -EMSGSIZE;
3418}
3419
3420static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3421 unsigned long base)
3422{
3423 struct cls_fl_filter *f = fh;
3424
3425 tc_cls_bind_class(classid, cl, q, &f->res, base);
3426}
3427
3428static bool fl_delete_empty(struct tcf_proto *tp)
3429{
3430 struct cls_fl_head *head = fl_head_dereference(tp);
3431
3432 spin_lock(&tp->lock);
3433 tp->deleting = idr_is_empty(&head->handle_idr);
3434 spin_unlock(&tp->lock);
3435
3436 return tp->deleting;
3437}
3438
3439static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3440 .kind = "flower",
3441 .classify = fl_classify,
3442 .init = fl_init,
3443 .destroy = fl_destroy,
3444 .get = fl_get,
3445 .put = fl_put,
3446 .change = fl_change,
3447 .delete = fl_delete,
3448 .delete_empty = fl_delete_empty,
3449 .walk = fl_walk,
3450 .reoffload = fl_reoffload,
3451 .hw_add = fl_hw_add,
3452 .hw_del = fl_hw_del,
3453 .dump = fl_dump,
3454 .terse_dump = fl_terse_dump,
3455 .bind_class = fl_bind_class,
3456 .tmplt_create = fl_tmplt_create,
3457 .tmplt_destroy = fl_tmplt_destroy,
3458 .tmplt_dump = fl_tmplt_dump,
3459 .get_exts = fl_get_exts,
3460 .owner = THIS_MODULE,
3461 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
3462};
3463
3464static int __init cls_fl_init(void)
3465{
3466 return register_tcf_proto_ops(&cls_fl_ops);
3467}
3468
3469static void __exit cls_fl_exit(void)
3470{
3471 unregister_tcf_proto_ops(&cls_fl_ops);
3472}
3473
3474module_init(cls_fl_init);
3475module_exit(cls_fl_exit);
3476
3477MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3478MODULE_DESCRIPTION("Flower classifier");
3479MODULE_LICENSE("GPL v2");