Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_api.c Packet classifier API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/kmod.h>
21#include <linux/slab.h>
22#include <linux/idr.h>
23#include <linux/jhash.h>
24#include <linux/rculist.h>
25#include <net/net_namespace.h>
26#include <net/sock.h>
27#include <net/netlink.h>
28#include <net/pkt_sched.h>
29#include <net/pkt_cls.h>
30#include <net/tc_act/tc_pedit.h>
31#include <net/tc_act/tc_mirred.h>
32#include <net/tc_act/tc_vlan.h>
33#include <net/tc_act/tc_tunnel_key.h>
34#include <net/tc_act/tc_csum.h>
35#include <net/tc_act/tc_gact.h>
36#include <net/tc_act/tc_police.h>
37#include <net/tc_act/tc_sample.h>
38#include <net/tc_act/tc_skbedit.h>
39#include <net/tc_act/tc_ct.h>
40#include <net/tc_act/tc_mpls.h>
41#include <net/tc_act/tc_gate.h>
42#include <net/flow_offload.h>
43
44extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45
46/* The list of all installed classifier types */
47static LIST_HEAD(tcf_proto_base);
48
49/* Protects list of registered TC modules. It is pure SMP lock. */
50static DEFINE_RWLOCK(cls_mod_lock);
51
52static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
53{
54 return jhash_3words(tp->chain->index, tp->prio,
55 (__force __u32)tp->protocol, 0);
56}
57
58static void tcf_proto_signal_destroying(struct tcf_chain *chain,
59 struct tcf_proto *tp)
60{
61 struct tcf_block *block = chain->block;
62
63 mutex_lock(&block->proto_destroy_lock);
64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
65 destroy_obj_hashfn(tp));
66 mutex_unlock(&block->proto_destroy_lock);
67}
68
69static bool tcf_proto_cmp(const struct tcf_proto *tp1,
70 const struct tcf_proto *tp2)
71{
72 return tp1->chain->index == tp2->chain->index &&
73 tp1->prio == tp2->prio &&
74 tp1->protocol == tp2->protocol;
75}
76
77static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
78 struct tcf_proto *tp)
79{
80 u32 hash = destroy_obj_hashfn(tp);
81 struct tcf_proto *iter;
82 bool found = false;
83
84 rcu_read_lock();
85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
86 destroy_ht_node, hash) {
87 if (tcf_proto_cmp(tp, iter)) {
88 found = true;
89 break;
90 }
91 }
92 rcu_read_unlock();
93
94 return found;
95}
96
97static void
98tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
99{
100 struct tcf_block *block = chain->block;
101
102 mutex_lock(&block->proto_destroy_lock);
103 if (hash_hashed(&tp->destroy_ht_node))
104 hash_del_rcu(&tp->destroy_ht_node);
105 mutex_unlock(&block->proto_destroy_lock);
106}
107
108/* Find classifier type by string name */
109
110static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
111{
112 const struct tcf_proto_ops *t, *res = NULL;
113
114 if (kind) {
115 read_lock(&cls_mod_lock);
116 list_for_each_entry(t, &tcf_proto_base, head) {
117 if (strcmp(kind, t->kind) == 0) {
118 if (try_module_get(t->owner))
119 res = t;
120 break;
121 }
122 }
123 read_unlock(&cls_mod_lock);
124 }
125 return res;
126}
127
128static const struct tcf_proto_ops *
129tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
130 struct netlink_ext_ack *extack)
131{
132 const struct tcf_proto_ops *ops;
133
134 ops = __tcf_proto_lookup_ops(kind);
135 if (ops)
136 return ops;
137#ifdef CONFIG_MODULES
138 if (rtnl_held)
139 rtnl_unlock();
140 request_module("cls_%s", kind);
141 if (rtnl_held)
142 rtnl_lock();
143 ops = __tcf_proto_lookup_ops(kind);
144 /* We dropped the RTNL semaphore in order to perform
145 * the module load. So, even if we succeeded in loading
146 * the module we have to replay the request. We indicate
147 * this using -EAGAIN.
148 */
149 if (ops) {
150 module_put(ops->owner);
151 return ERR_PTR(-EAGAIN);
152 }
153#endif
154 NL_SET_ERR_MSG(extack, "TC classifier not found");
155 return ERR_PTR(-ENOENT);
156}
157
158/* Register(unregister) new classifier type */
159
160int register_tcf_proto_ops(struct tcf_proto_ops *ops)
161{
162 struct tcf_proto_ops *t;
163 int rc = -EEXIST;
164
165 write_lock(&cls_mod_lock);
166 list_for_each_entry(t, &tcf_proto_base, head)
167 if (!strcmp(ops->kind, t->kind))
168 goto out;
169
170 list_add_tail(&ops->head, &tcf_proto_base);
171 rc = 0;
172out:
173 write_unlock(&cls_mod_lock);
174 return rc;
175}
176EXPORT_SYMBOL(register_tcf_proto_ops);
177
178static struct workqueue_struct *tc_filter_wq;
179
180int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
181{
182 struct tcf_proto_ops *t;
183 int rc = -ENOENT;
184
185 /* Wait for outstanding call_rcu()s, if any, from a
186 * tcf_proto_ops's destroy() handler.
187 */
188 rcu_barrier();
189 flush_workqueue(tc_filter_wq);
190
191 write_lock(&cls_mod_lock);
192 list_for_each_entry(t, &tcf_proto_base, head) {
193 if (t == ops) {
194 list_del(&t->head);
195 rc = 0;
196 break;
197 }
198 }
199 write_unlock(&cls_mod_lock);
200 return rc;
201}
202EXPORT_SYMBOL(unregister_tcf_proto_ops);
203
204bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
205{
206 INIT_RCU_WORK(rwork, func);
207 return queue_rcu_work(tc_filter_wq, rwork);
208}
209EXPORT_SYMBOL(tcf_queue_work);
210
211/* Select new prio value from the range, managed by kernel. */
212
213static inline u32 tcf_auto_prio(struct tcf_proto *tp)
214{
215 u32 first = TC_H_MAKE(0xC0000000U, 0U);
216
217 if (tp)
218 first = tp->prio - 1;
219
220 return TC_H_MAJ(first);
221}
222
223static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
224{
225 if (kind)
226 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
227 memset(name, 0, IFNAMSIZ);
228 return false;
229}
230
231static bool tcf_proto_is_unlocked(const char *kind)
232{
233 const struct tcf_proto_ops *ops;
234 bool ret;
235
236 if (strlen(kind) == 0)
237 return false;
238
239 ops = tcf_proto_lookup_ops(kind, false, NULL);
240 /* On error return false to take rtnl lock. Proto lookup/create
241 * functions will perform lookup again and properly handle errors.
242 */
243 if (IS_ERR(ops))
244 return false;
245
246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
247 module_put(ops->owner);
248 return ret;
249}
250
251static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
252 u32 prio, struct tcf_chain *chain,
253 bool rtnl_held,
254 struct netlink_ext_ack *extack)
255{
256 struct tcf_proto *tp;
257 int err;
258
259 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
260 if (!tp)
261 return ERR_PTR(-ENOBUFS);
262
263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
264 if (IS_ERR(tp->ops)) {
265 err = PTR_ERR(tp->ops);
266 goto errout;
267 }
268 tp->classify = tp->ops->classify;
269 tp->protocol = protocol;
270 tp->prio = prio;
271 tp->chain = chain;
272 spin_lock_init(&tp->lock);
273 refcount_set(&tp->refcnt, 1);
274
275 err = tp->ops->init(tp);
276 if (err) {
277 module_put(tp->ops->owner);
278 goto errout;
279 }
280 return tp;
281
282errout:
283 kfree(tp);
284 return ERR_PTR(err);
285}
286
287static void tcf_proto_get(struct tcf_proto *tp)
288{
289 refcount_inc(&tp->refcnt);
290}
291
292static void tcf_chain_put(struct tcf_chain *chain);
293
294static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
295 bool sig_destroy, struct netlink_ext_ack *extack)
296{
297 tp->ops->destroy(tp, rtnl_held, extack);
298 if (sig_destroy)
299 tcf_proto_signal_destroyed(tp->chain, tp);
300 tcf_chain_put(tp->chain);
301 module_put(tp->ops->owner);
302 kfree_rcu(tp, rcu);
303}
304
305static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
306 struct netlink_ext_ack *extack)
307{
308 if (refcount_dec_and_test(&tp->refcnt))
309 tcf_proto_destroy(tp, rtnl_held, true, extack);
310}
311
312static bool tcf_proto_check_delete(struct tcf_proto *tp)
313{
314 if (tp->ops->delete_empty)
315 return tp->ops->delete_empty(tp);
316
317 tp->deleting = true;
318 return tp->deleting;
319}
320
321static void tcf_proto_mark_delete(struct tcf_proto *tp)
322{
323 spin_lock(&tp->lock);
324 tp->deleting = true;
325 spin_unlock(&tp->lock);
326}
327
328static bool tcf_proto_is_deleting(struct tcf_proto *tp)
329{
330 bool deleting;
331
332 spin_lock(&tp->lock);
333 deleting = tp->deleting;
334 spin_unlock(&tp->lock);
335
336 return deleting;
337}
338
339#define ASSERT_BLOCK_LOCKED(block) \
340 lockdep_assert_held(&(block)->lock)
341
342struct tcf_filter_chain_list_item {
343 struct list_head list;
344 tcf_chain_head_change_t *chain_head_change;
345 void *chain_head_change_priv;
346};
347
348static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
349 u32 chain_index)
350{
351 struct tcf_chain *chain;
352
353 ASSERT_BLOCK_LOCKED(block);
354
355 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
356 if (!chain)
357 return NULL;
358 list_add_tail_rcu(&chain->list, &block->chain_list);
359 mutex_init(&chain->filter_chain_lock);
360 chain->block = block;
361 chain->index = chain_index;
362 chain->refcnt = 1;
363 if (!chain->index)
364 block->chain0.chain = chain;
365 return chain;
366}
367
368static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
369 struct tcf_proto *tp_head)
370{
371 if (item->chain_head_change)
372 item->chain_head_change(tp_head, item->chain_head_change_priv);
373}
374
375static void tcf_chain0_head_change(struct tcf_chain *chain,
376 struct tcf_proto *tp_head)
377{
378 struct tcf_filter_chain_list_item *item;
379 struct tcf_block *block = chain->block;
380
381 if (chain->index)
382 return;
383
384 mutex_lock(&block->lock);
385 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
386 tcf_chain_head_change_item(item, tp_head);
387 mutex_unlock(&block->lock);
388}
389
390/* Returns true if block can be safely freed. */
391
392static bool tcf_chain_detach(struct tcf_chain *chain)
393{
394 struct tcf_block *block = chain->block;
395
396 ASSERT_BLOCK_LOCKED(block);
397
398 list_del_rcu(&chain->list);
399 if (!chain->index)
400 block->chain0.chain = NULL;
401
402 if (list_empty(&block->chain_list) &&
403 refcount_read(&block->refcnt) == 0)
404 return true;
405
406 return false;
407}
408
409static void tcf_block_destroy(struct tcf_block *block)
410{
411 mutex_destroy(&block->lock);
412 mutex_destroy(&block->proto_destroy_lock);
413 kfree_rcu(block, rcu);
414}
415
416static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
417{
418 struct tcf_block *block = chain->block;
419
420 mutex_destroy(&chain->filter_chain_lock);
421 kfree_rcu(chain, rcu);
422 if (free_block)
423 tcf_block_destroy(block);
424}
425
426static void tcf_chain_hold(struct tcf_chain *chain)
427{
428 ASSERT_BLOCK_LOCKED(chain->block);
429
430 ++chain->refcnt;
431}
432
433static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
434{
435 ASSERT_BLOCK_LOCKED(chain->block);
436
437 /* In case all the references are action references, this
438 * chain should not be shown to the user.
439 */
440 return chain->refcnt == chain->action_refcnt;
441}
442
443static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
444 u32 chain_index)
445{
446 struct tcf_chain *chain;
447
448 ASSERT_BLOCK_LOCKED(block);
449
450 list_for_each_entry(chain, &block->chain_list, list) {
451 if (chain->index == chain_index)
452 return chain;
453 }
454 return NULL;
455}
456
457#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
458static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
459 u32 chain_index)
460{
461 struct tcf_chain *chain;
462
463 list_for_each_entry_rcu(chain, &block->chain_list, list) {
464 if (chain->index == chain_index)
465 return chain;
466 }
467 return NULL;
468}
469#endif
470
471static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
472 u32 seq, u16 flags, int event, bool unicast);
473
474static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
475 u32 chain_index, bool create,
476 bool by_act)
477{
478 struct tcf_chain *chain = NULL;
479 bool is_first_reference;
480
481 mutex_lock(&block->lock);
482 chain = tcf_chain_lookup(block, chain_index);
483 if (chain) {
484 tcf_chain_hold(chain);
485 } else {
486 if (!create)
487 goto errout;
488 chain = tcf_chain_create(block, chain_index);
489 if (!chain)
490 goto errout;
491 }
492
493 if (by_act)
494 ++chain->action_refcnt;
495 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
496 mutex_unlock(&block->lock);
497
498 /* Send notification only in case we got the first
499 * non-action reference. Until then, the chain acts only as
500 * a placeholder for actions pointing to it and user ought
501 * not know about them.
502 */
503 if (is_first_reference && !by_act)
504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
505 RTM_NEWCHAIN, false);
506
507 return chain;
508
509errout:
510 mutex_unlock(&block->lock);
511 return chain;
512}
513
514static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
515 bool create)
516{
517 return __tcf_chain_get(block, chain_index, create, false);
518}
519
520struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
521{
522 return __tcf_chain_get(block, chain_index, true, true);
523}
524EXPORT_SYMBOL(tcf_chain_get_by_act);
525
526static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
527 void *tmplt_priv);
528static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
529 void *tmplt_priv, u32 chain_index,
530 struct tcf_block *block, struct sk_buff *oskb,
531 u32 seq, u16 flags, bool unicast);
532
533static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
534 bool explicitly_created)
535{
536 struct tcf_block *block = chain->block;
537 const struct tcf_proto_ops *tmplt_ops;
538 bool free_block = false;
539 unsigned int refcnt;
540 void *tmplt_priv;
541
542 mutex_lock(&block->lock);
543 if (explicitly_created) {
544 if (!chain->explicitly_created) {
545 mutex_unlock(&block->lock);
546 return;
547 }
548 chain->explicitly_created = false;
549 }
550
551 if (by_act)
552 chain->action_refcnt--;
553
554 /* tc_chain_notify_delete can't be called while holding block lock.
555 * However, when block is unlocked chain can be changed concurrently, so
556 * save these to temporary variables.
557 */
558 refcnt = --chain->refcnt;
559 tmplt_ops = chain->tmplt_ops;
560 tmplt_priv = chain->tmplt_priv;
561
562 /* The last dropped non-action reference will trigger notification. */
563 if (refcnt - chain->action_refcnt == 0 && !by_act) {
564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
565 block, NULL, 0, 0, false);
566 /* Last reference to chain, no need to lock. */
567 chain->flushing = false;
568 }
569
570 if (refcnt == 0)
571 free_block = tcf_chain_detach(chain);
572 mutex_unlock(&block->lock);
573
574 if (refcnt == 0) {
575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
576 tcf_chain_destroy(chain, free_block);
577 }
578}
579
580static void tcf_chain_put(struct tcf_chain *chain)
581{
582 __tcf_chain_put(chain, false, false);
583}
584
585void tcf_chain_put_by_act(struct tcf_chain *chain)
586{
587 __tcf_chain_put(chain, true, false);
588}
589EXPORT_SYMBOL(tcf_chain_put_by_act);
590
591static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
592{
593 __tcf_chain_put(chain, false, true);
594}
595
596static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
597{
598 struct tcf_proto *tp, *tp_next;
599
600 mutex_lock(&chain->filter_chain_lock);
601 tp = tcf_chain_dereference(chain->filter_chain, chain);
602 while (tp) {
603 tp_next = rcu_dereference_protected(tp->next, 1);
604 tcf_proto_signal_destroying(chain, tp);
605 tp = tp_next;
606 }
607 tp = tcf_chain_dereference(chain->filter_chain, chain);
608 RCU_INIT_POINTER(chain->filter_chain, NULL);
609 tcf_chain0_head_change(chain, NULL);
610 chain->flushing = true;
611 mutex_unlock(&chain->filter_chain_lock);
612
613 while (tp) {
614 tp_next = rcu_dereference_protected(tp->next, 1);
615 tcf_proto_put(tp, rtnl_held, NULL);
616 tp = tp_next;
617 }
618}
619
620static int tcf_block_setup(struct tcf_block *block,
621 struct flow_block_offload *bo);
622
623static void tcf_block_offload_init(struct flow_block_offload *bo,
624 struct net_device *dev, struct Qdisc *sch,
625 enum flow_block_command command,
626 enum flow_block_binder_type binder_type,
627 struct flow_block *flow_block,
628 bool shared, struct netlink_ext_ack *extack)
629{
630 bo->net = dev_net(dev);
631 bo->command = command;
632 bo->binder_type = binder_type;
633 bo->block = flow_block;
634 bo->block_shared = shared;
635 bo->extack = extack;
636 bo->sch = sch;
637 INIT_LIST_HEAD(&bo->cb_list);
638}
639
640static void tcf_block_unbind(struct tcf_block *block,
641 struct flow_block_offload *bo);
642
643static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
644{
645 struct tcf_block *block = block_cb->indr.data;
646 struct net_device *dev = block_cb->indr.dev;
647 struct Qdisc *sch = block_cb->indr.sch;
648 struct netlink_ext_ack extack = {};
649 struct flow_block_offload bo;
650
651 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
652 block_cb->indr.binder_type,
653 &block->flow_block, tcf_block_shared(block),
654 &extack);
655 rtnl_lock();
656 down_write(&block->cb_lock);
657 list_del(&block_cb->driver_list);
658 list_move(&block_cb->list, &bo.cb_list);
659 tcf_block_unbind(block, &bo);
660 up_write(&block->cb_lock);
661 rtnl_unlock();
662}
663
664static bool tcf_block_offload_in_use(struct tcf_block *block)
665{
666 return atomic_read(&block->offloadcnt);
667}
668
669static int tcf_block_offload_cmd(struct tcf_block *block,
670 struct net_device *dev, struct Qdisc *sch,
671 struct tcf_block_ext_info *ei,
672 enum flow_block_command command,
673 struct netlink_ext_ack *extack)
674{
675 struct flow_block_offload bo = {};
676
677 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
678 &block->flow_block, tcf_block_shared(block),
679 extack);
680
681 if (dev->netdev_ops->ndo_setup_tc) {
682 int err;
683
684 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
685 if (err < 0) {
686 if (err != -EOPNOTSUPP)
687 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
688 return err;
689 }
690
691 return tcf_block_setup(block, &bo);
692 }
693
694 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
695 tc_block_indr_cleanup);
696 tcf_block_setup(block, &bo);
697
698 return -EOPNOTSUPP;
699}
700
701static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
702 struct tcf_block_ext_info *ei,
703 struct netlink_ext_ack *extack)
704{
705 struct net_device *dev = q->dev_queue->dev;
706 int err;
707
708 down_write(&block->cb_lock);
709
710 /* If tc offload feature is disabled and the block we try to bind
711 * to already has some offloaded filters, forbid to bind.
712 */
713 if (dev->netdev_ops->ndo_setup_tc &&
714 !tc_can_offload(dev) &&
715 tcf_block_offload_in_use(block)) {
716 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
717 err = -EOPNOTSUPP;
718 goto err_unlock;
719 }
720
721 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
722 if (err == -EOPNOTSUPP)
723 goto no_offload_dev_inc;
724 if (err)
725 goto err_unlock;
726
727 up_write(&block->cb_lock);
728 return 0;
729
730no_offload_dev_inc:
731 if (tcf_block_offload_in_use(block))
732 goto err_unlock;
733
734 err = 0;
735 block->nooffloaddevcnt++;
736err_unlock:
737 up_write(&block->cb_lock);
738 return err;
739}
740
741static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
742 struct tcf_block_ext_info *ei)
743{
744 struct net_device *dev = q->dev_queue->dev;
745 int err;
746
747 down_write(&block->cb_lock);
748 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
749 if (err == -EOPNOTSUPP)
750 goto no_offload_dev_dec;
751 up_write(&block->cb_lock);
752 return;
753
754no_offload_dev_dec:
755 WARN_ON(block->nooffloaddevcnt-- == 0);
756 up_write(&block->cb_lock);
757}
758
759static int
760tcf_chain0_head_change_cb_add(struct tcf_block *block,
761 struct tcf_block_ext_info *ei,
762 struct netlink_ext_ack *extack)
763{
764 struct tcf_filter_chain_list_item *item;
765 struct tcf_chain *chain0;
766
767 item = kmalloc(sizeof(*item), GFP_KERNEL);
768 if (!item) {
769 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
770 return -ENOMEM;
771 }
772 item->chain_head_change = ei->chain_head_change;
773 item->chain_head_change_priv = ei->chain_head_change_priv;
774
775 mutex_lock(&block->lock);
776 chain0 = block->chain0.chain;
777 if (chain0)
778 tcf_chain_hold(chain0);
779 else
780 list_add(&item->list, &block->chain0.filter_chain_list);
781 mutex_unlock(&block->lock);
782
783 if (chain0) {
784 struct tcf_proto *tp_head;
785
786 mutex_lock(&chain0->filter_chain_lock);
787
788 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
789 if (tp_head)
790 tcf_chain_head_change_item(item, tp_head);
791
792 mutex_lock(&block->lock);
793 list_add(&item->list, &block->chain0.filter_chain_list);
794 mutex_unlock(&block->lock);
795
796 mutex_unlock(&chain0->filter_chain_lock);
797 tcf_chain_put(chain0);
798 }
799
800 return 0;
801}
802
803static void
804tcf_chain0_head_change_cb_del(struct tcf_block *block,
805 struct tcf_block_ext_info *ei)
806{
807 struct tcf_filter_chain_list_item *item;
808
809 mutex_lock(&block->lock);
810 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
811 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
812 (item->chain_head_change == ei->chain_head_change &&
813 item->chain_head_change_priv == ei->chain_head_change_priv)) {
814 if (block->chain0.chain)
815 tcf_chain_head_change_item(item, NULL);
816 list_del(&item->list);
817 mutex_unlock(&block->lock);
818
819 kfree(item);
820 return;
821 }
822 }
823 mutex_unlock(&block->lock);
824 WARN_ON(1);
825}
826
827struct tcf_net {
828 spinlock_t idr_lock; /* Protects idr */
829 struct idr idr;
830};
831
832static unsigned int tcf_net_id;
833
834static int tcf_block_insert(struct tcf_block *block, struct net *net,
835 struct netlink_ext_ack *extack)
836{
837 struct tcf_net *tn = net_generic(net, tcf_net_id);
838 int err;
839
840 idr_preload(GFP_KERNEL);
841 spin_lock(&tn->idr_lock);
842 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
843 GFP_NOWAIT);
844 spin_unlock(&tn->idr_lock);
845 idr_preload_end();
846
847 return err;
848}
849
850static void tcf_block_remove(struct tcf_block *block, struct net *net)
851{
852 struct tcf_net *tn = net_generic(net, tcf_net_id);
853
854 spin_lock(&tn->idr_lock);
855 idr_remove(&tn->idr, block->index);
856 spin_unlock(&tn->idr_lock);
857}
858
859static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
860 u32 block_index,
861 struct netlink_ext_ack *extack)
862{
863 struct tcf_block *block;
864
865 block = kzalloc(sizeof(*block), GFP_KERNEL);
866 if (!block) {
867 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
868 return ERR_PTR(-ENOMEM);
869 }
870 mutex_init(&block->lock);
871 mutex_init(&block->proto_destroy_lock);
872 init_rwsem(&block->cb_lock);
873 flow_block_init(&block->flow_block);
874 INIT_LIST_HEAD(&block->chain_list);
875 INIT_LIST_HEAD(&block->owner_list);
876 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
877
878 refcount_set(&block->refcnt, 1);
879 block->net = net;
880 block->index = block_index;
881
882 /* Don't store q pointer for blocks which are shared */
883 if (!tcf_block_shared(block))
884 block->q = q;
885 return block;
886}
887
888static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
889{
890 struct tcf_net *tn = net_generic(net, tcf_net_id);
891
892 return idr_find(&tn->idr, block_index);
893}
894
895static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
896{
897 struct tcf_block *block;
898
899 rcu_read_lock();
900 block = tcf_block_lookup(net, block_index);
901 if (block && !refcount_inc_not_zero(&block->refcnt))
902 block = NULL;
903 rcu_read_unlock();
904
905 return block;
906}
907
908static struct tcf_chain *
909__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
910{
911 mutex_lock(&block->lock);
912 if (chain)
913 chain = list_is_last(&chain->list, &block->chain_list) ?
914 NULL : list_next_entry(chain, list);
915 else
916 chain = list_first_entry_or_null(&block->chain_list,
917 struct tcf_chain, list);
918
919 /* skip all action-only chains */
920 while (chain && tcf_chain_held_by_acts_only(chain))
921 chain = list_is_last(&chain->list, &block->chain_list) ?
922 NULL : list_next_entry(chain, list);
923
924 if (chain)
925 tcf_chain_hold(chain);
926 mutex_unlock(&block->lock);
927
928 return chain;
929}
930
931/* Function to be used by all clients that want to iterate over all chains on
932 * block. It properly obtains block->lock and takes reference to chain before
933 * returning it. Users of this function must be tolerant to concurrent chain
934 * insertion/deletion or ensure that no concurrent chain modification is
935 * possible. Note that all netlink dump callbacks cannot guarantee to provide
936 * consistent dump because rtnl lock is released each time skb is filled with
937 * data and sent to user-space.
938 */
939
940struct tcf_chain *
941tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
942{
943 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
944
945 if (chain)
946 tcf_chain_put(chain);
947
948 return chain_next;
949}
950EXPORT_SYMBOL(tcf_get_next_chain);
951
952static struct tcf_proto *
953__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
954{
955 u32 prio = 0;
956
957 ASSERT_RTNL();
958 mutex_lock(&chain->filter_chain_lock);
959
960 if (!tp) {
961 tp = tcf_chain_dereference(chain->filter_chain, chain);
962 } else if (tcf_proto_is_deleting(tp)) {
963 /* 'deleting' flag is set and chain->filter_chain_lock was
964 * unlocked, which means next pointer could be invalid. Restart
965 * search.
966 */
967 prio = tp->prio + 1;
968 tp = tcf_chain_dereference(chain->filter_chain, chain);
969
970 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
971 if (!tp->deleting && tp->prio >= prio)
972 break;
973 } else {
974 tp = tcf_chain_dereference(tp->next, chain);
975 }
976
977 if (tp)
978 tcf_proto_get(tp);
979
980 mutex_unlock(&chain->filter_chain_lock);
981
982 return tp;
983}
984
985/* Function to be used by all clients that want to iterate over all tp's on
986 * chain. Users of this function must be tolerant to concurrent tp
987 * insertion/deletion or ensure that no concurrent chain modification is
988 * possible. Note that all netlink dump callbacks cannot guarantee to provide
989 * consistent dump because rtnl lock is released each time skb is filled with
990 * data and sent to user-space.
991 */
992
993struct tcf_proto *
994tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
995{
996 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
997
998 if (tp)
999 tcf_proto_put(tp, true, NULL);
1000
1001 return tp_next;
1002}
1003EXPORT_SYMBOL(tcf_get_next_proto);
1004
1005static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1006{
1007 struct tcf_chain *chain;
1008
1009 /* Last reference to block. At this point chains cannot be added or
1010 * removed concurrently.
1011 */
1012 for (chain = tcf_get_next_chain(block, NULL);
1013 chain;
1014 chain = tcf_get_next_chain(block, chain)) {
1015 tcf_chain_put_explicitly_created(chain);
1016 tcf_chain_flush(chain, rtnl_held);
1017 }
1018}
1019
1020/* Lookup Qdisc and increments its reference counter.
1021 * Set parent, if necessary.
1022 */
1023
1024static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1025 u32 *parent, int ifindex, bool rtnl_held,
1026 struct netlink_ext_ack *extack)
1027{
1028 const struct Qdisc_class_ops *cops;
1029 struct net_device *dev;
1030 int err = 0;
1031
1032 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1033 return 0;
1034
1035 rcu_read_lock();
1036
1037 /* Find link */
1038 dev = dev_get_by_index_rcu(net, ifindex);
1039 if (!dev) {
1040 rcu_read_unlock();
1041 return -ENODEV;
1042 }
1043
1044 /* Find qdisc */
1045 if (!*parent) {
1046 *q = dev->qdisc;
1047 *parent = (*q)->handle;
1048 } else {
1049 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1050 if (!*q) {
1051 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1052 err = -EINVAL;
1053 goto errout_rcu;
1054 }
1055 }
1056
1057 *q = qdisc_refcount_inc_nz(*q);
1058 if (!*q) {
1059 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1060 err = -EINVAL;
1061 goto errout_rcu;
1062 }
1063
1064 /* Is it classful? */
1065 cops = (*q)->ops->cl_ops;
1066 if (!cops) {
1067 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1068 err = -EINVAL;
1069 goto errout_qdisc;
1070 }
1071
1072 if (!cops->tcf_block) {
1073 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1074 err = -EOPNOTSUPP;
1075 goto errout_qdisc;
1076 }
1077
1078errout_rcu:
1079 /* At this point we know that qdisc is not noop_qdisc,
1080 * which means that qdisc holds a reference to net_device
1081 * and we hold a reference to qdisc, so it is safe to release
1082 * rcu read lock.
1083 */
1084 rcu_read_unlock();
1085 return err;
1086
1087errout_qdisc:
1088 rcu_read_unlock();
1089
1090 if (rtnl_held)
1091 qdisc_put(*q);
1092 else
1093 qdisc_put_unlocked(*q);
1094 *q = NULL;
1095
1096 return err;
1097}
1098
1099static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1100 int ifindex, struct netlink_ext_ack *extack)
1101{
1102 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1103 return 0;
1104
1105 /* Do we search for filter, attached to class? */
1106 if (TC_H_MIN(parent)) {
1107 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1108
1109 *cl = cops->find(q, parent);
1110 if (*cl == 0) {
1111 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1112 return -ENOENT;
1113 }
1114 }
1115
1116 return 0;
1117}
1118
1119static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1120 unsigned long cl, int ifindex,
1121 u32 block_index,
1122 struct netlink_ext_ack *extack)
1123{
1124 struct tcf_block *block;
1125
1126 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1127 block = tcf_block_refcnt_get(net, block_index);
1128 if (!block) {
1129 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1130 return ERR_PTR(-EINVAL);
1131 }
1132 } else {
1133 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1134
1135 block = cops->tcf_block(q, cl, extack);
1136 if (!block)
1137 return ERR_PTR(-EINVAL);
1138
1139 if (tcf_block_shared(block)) {
1140 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1141 return ERR_PTR(-EOPNOTSUPP);
1142 }
1143
1144 /* Always take reference to block in order to support execution
1145 * of rules update path of cls API without rtnl lock. Caller
1146 * must release block when it is finished using it. 'if' block
1147 * of this conditional obtain reference to block by calling
1148 * tcf_block_refcnt_get().
1149 */
1150 refcount_inc(&block->refcnt);
1151 }
1152
1153 return block;
1154}
1155
1156static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1157 struct tcf_block_ext_info *ei, bool rtnl_held)
1158{
1159 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1160 /* Flushing/putting all chains will cause the block to be
1161 * deallocated when last chain is freed. However, if chain_list
1162 * is empty, block has to be manually deallocated. After block
1163 * reference counter reached 0, it is no longer possible to
1164 * increment it or add new chains to block.
1165 */
1166 bool free_block = list_empty(&block->chain_list);
1167
1168 mutex_unlock(&block->lock);
1169 if (tcf_block_shared(block))
1170 tcf_block_remove(block, block->net);
1171
1172 if (q)
1173 tcf_block_offload_unbind(block, q, ei);
1174
1175 if (free_block)
1176 tcf_block_destroy(block);
1177 else
1178 tcf_block_flush_all_chains(block, rtnl_held);
1179 } else if (q) {
1180 tcf_block_offload_unbind(block, q, ei);
1181 }
1182}
1183
1184static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1185{
1186 __tcf_block_put(block, NULL, NULL, rtnl_held);
1187}
1188
1189/* Find tcf block.
1190 * Set q, parent, cl when appropriate.
1191 */
1192
1193static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1194 u32 *parent, unsigned long *cl,
1195 int ifindex, u32 block_index,
1196 struct netlink_ext_ack *extack)
1197{
1198 struct tcf_block *block;
1199 int err = 0;
1200
1201 ASSERT_RTNL();
1202
1203 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1204 if (err)
1205 goto errout;
1206
1207 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1208 if (err)
1209 goto errout_qdisc;
1210
1211 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1212 if (IS_ERR(block)) {
1213 err = PTR_ERR(block);
1214 goto errout_qdisc;
1215 }
1216
1217 return block;
1218
1219errout_qdisc:
1220 if (*q)
1221 qdisc_put(*q);
1222errout:
1223 *q = NULL;
1224 return ERR_PTR(err);
1225}
1226
1227static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1228 bool rtnl_held)
1229{
1230 if (!IS_ERR_OR_NULL(block))
1231 tcf_block_refcnt_put(block, rtnl_held);
1232
1233 if (q) {
1234 if (rtnl_held)
1235 qdisc_put(q);
1236 else
1237 qdisc_put_unlocked(q);
1238 }
1239}
1240
1241struct tcf_block_owner_item {
1242 struct list_head list;
1243 struct Qdisc *q;
1244 enum flow_block_binder_type binder_type;
1245};
1246
1247static void
1248tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1249 struct Qdisc *q,
1250 enum flow_block_binder_type binder_type)
1251{
1252 if (block->keep_dst &&
1253 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1254 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1255 netif_keep_dst(qdisc_dev(q));
1256}
1257
1258void tcf_block_netif_keep_dst(struct tcf_block *block)
1259{
1260 struct tcf_block_owner_item *item;
1261
1262 block->keep_dst = true;
1263 list_for_each_entry(item, &block->owner_list, list)
1264 tcf_block_owner_netif_keep_dst(block, item->q,
1265 item->binder_type);
1266}
1267EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1268
1269static int tcf_block_owner_add(struct tcf_block *block,
1270 struct Qdisc *q,
1271 enum flow_block_binder_type binder_type)
1272{
1273 struct tcf_block_owner_item *item;
1274
1275 item = kmalloc(sizeof(*item), GFP_KERNEL);
1276 if (!item)
1277 return -ENOMEM;
1278 item->q = q;
1279 item->binder_type = binder_type;
1280 list_add(&item->list, &block->owner_list);
1281 return 0;
1282}
1283
1284static void tcf_block_owner_del(struct tcf_block *block,
1285 struct Qdisc *q,
1286 enum flow_block_binder_type binder_type)
1287{
1288 struct tcf_block_owner_item *item;
1289
1290 list_for_each_entry(item, &block->owner_list, list) {
1291 if (item->q == q && item->binder_type == binder_type) {
1292 list_del(&item->list);
1293 kfree(item);
1294 return;
1295 }
1296 }
1297 WARN_ON(1);
1298}
1299
1300int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1301 struct tcf_block_ext_info *ei,
1302 struct netlink_ext_ack *extack)
1303{
1304 struct net *net = qdisc_net(q);
1305 struct tcf_block *block = NULL;
1306 int err;
1307
1308 if (ei->block_index)
1309 /* block_index not 0 means the shared block is requested */
1310 block = tcf_block_refcnt_get(net, ei->block_index);
1311
1312 if (!block) {
1313 block = tcf_block_create(net, q, ei->block_index, extack);
1314 if (IS_ERR(block))
1315 return PTR_ERR(block);
1316 if (tcf_block_shared(block)) {
1317 err = tcf_block_insert(block, net, extack);
1318 if (err)
1319 goto err_block_insert;
1320 }
1321 }
1322
1323 err = tcf_block_owner_add(block, q, ei->binder_type);
1324 if (err)
1325 goto err_block_owner_add;
1326
1327 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1328
1329 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1330 if (err)
1331 goto err_chain0_head_change_cb_add;
1332
1333 err = tcf_block_offload_bind(block, q, ei, extack);
1334 if (err)
1335 goto err_block_offload_bind;
1336
1337 *p_block = block;
1338 return 0;
1339
1340err_block_offload_bind:
1341 tcf_chain0_head_change_cb_del(block, ei);
1342err_chain0_head_change_cb_add:
1343 tcf_block_owner_del(block, q, ei->binder_type);
1344err_block_owner_add:
1345err_block_insert:
1346 tcf_block_refcnt_put(block, true);
1347 return err;
1348}
1349EXPORT_SYMBOL(tcf_block_get_ext);
1350
1351static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1352{
1353 struct tcf_proto __rcu **p_filter_chain = priv;
1354
1355 rcu_assign_pointer(*p_filter_chain, tp_head);
1356}
1357
1358int tcf_block_get(struct tcf_block **p_block,
1359 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1360 struct netlink_ext_ack *extack)
1361{
1362 struct tcf_block_ext_info ei = {
1363 .chain_head_change = tcf_chain_head_change_dflt,
1364 .chain_head_change_priv = p_filter_chain,
1365 };
1366
1367 WARN_ON(!p_filter_chain);
1368 return tcf_block_get_ext(p_block, q, &ei, extack);
1369}
1370EXPORT_SYMBOL(tcf_block_get);
1371
1372/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1373 * actions should be all removed after flushing.
1374 */
1375void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1376 struct tcf_block_ext_info *ei)
1377{
1378 if (!block)
1379 return;
1380 tcf_chain0_head_change_cb_del(block, ei);
1381 tcf_block_owner_del(block, q, ei->binder_type);
1382
1383 __tcf_block_put(block, q, ei, true);
1384}
1385EXPORT_SYMBOL(tcf_block_put_ext);
1386
1387void tcf_block_put(struct tcf_block *block)
1388{
1389 struct tcf_block_ext_info ei = {0, };
1390
1391 if (!block)
1392 return;
1393 tcf_block_put_ext(block, block->q, &ei);
1394}
1395
1396EXPORT_SYMBOL(tcf_block_put);
1397
1398static int
1399tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1400 void *cb_priv, bool add, bool offload_in_use,
1401 struct netlink_ext_ack *extack)
1402{
1403 struct tcf_chain *chain, *chain_prev;
1404 struct tcf_proto *tp, *tp_prev;
1405 int err;
1406
1407 lockdep_assert_held(&block->cb_lock);
1408
1409 for (chain = __tcf_get_next_chain(block, NULL);
1410 chain;
1411 chain_prev = chain,
1412 chain = __tcf_get_next_chain(block, chain),
1413 tcf_chain_put(chain_prev)) {
1414 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1415 tp_prev = tp,
1416 tp = __tcf_get_next_proto(chain, tp),
1417 tcf_proto_put(tp_prev, true, NULL)) {
1418 if (tp->ops->reoffload) {
1419 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1420 extack);
1421 if (err && add)
1422 goto err_playback_remove;
1423 } else if (add && offload_in_use) {
1424 err = -EOPNOTSUPP;
1425 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1426 goto err_playback_remove;
1427 }
1428 }
1429 }
1430
1431 return 0;
1432
1433err_playback_remove:
1434 tcf_proto_put(tp, true, NULL);
1435 tcf_chain_put(chain);
1436 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1437 extack);
1438 return err;
1439}
1440
1441static int tcf_block_bind(struct tcf_block *block,
1442 struct flow_block_offload *bo)
1443{
1444 struct flow_block_cb *block_cb, *next;
1445 int err, i = 0;
1446
1447 lockdep_assert_held(&block->cb_lock);
1448
1449 list_for_each_entry(block_cb, &bo->cb_list, list) {
1450 err = tcf_block_playback_offloads(block, block_cb->cb,
1451 block_cb->cb_priv, true,
1452 tcf_block_offload_in_use(block),
1453 bo->extack);
1454 if (err)
1455 goto err_unroll;
1456 if (!bo->unlocked_driver_cb)
1457 block->lockeddevcnt++;
1458
1459 i++;
1460 }
1461 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1462
1463 return 0;
1464
1465err_unroll:
1466 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1467 if (i-- > 0) {
1468 list_del(&block_cb->list);
1469 tcf_block_playback_offloads(block, block_cb->cb,
1470 block_cb->cb_priv, false,
1471 tcf_block_offload_in_use(block),
1472 NULL);
1473 if (!bo->unlocked_driver_cb)
1474 block->lockeddevcnt--;
1475 }
1476 flow_block_cb_free(block_cb);
1477 }
1478
1479 return err;
1480}
1481
1482static void tcf_block_unbind(struct tcf_block *block,
1483 struct flow_block_offload *bo)
1484{
1485 struct flow_block_cb *block_cb, *next;
1486
1487 lockdep_assert_held(&block->cb_lock);
1488
1489 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1490 tcf_block_playback_offloads(block, block_cb->cb,
1491 block_cb->cb_priv, false,
1492 tcf_block_offload_in_use(block),
1493 NULL);
1494 list_del(&block_cb->list);
1495 flow_block_cb_free(block_cb);
1496 if (!bo->unlocked_driver_cb)
1497 block->lockeddevcnt--;
1498 }
1499}
1500
1501static int tcf_block_setup(struct tcf_block *block,
1502 struct flow_block_offload *bo)
1503{
1504 int err;
1505
1506 switch (bo->command) {
1507 case FLOW_BLOCK_BIND:
1508 err = tcf_block_bind(block, bo);
1509 break;
1510 case FLOW_BLOCK_UNBIND:
1511 err = 0;
1512 tcf_block_unbind(block, bo);
1513 break;
1514 default:
1515 WARN_ON_ONCE(1);
1516 err = -EOPNOTSUPP;
1517 }
1518
1519 return err;
1520}
1521
1522/* Main classifier routine: scans classifier chain attached
1523 * to this qdisc, (optionally) tests for protocol and asks
1524 * specific classifiers.
1525 */
1526static inline int __tcf_classify(struct sk_buff *skb,
1527 const struct tcf_proto *tp,
1528 const struct tcf_proto *orig_tp,
1529 struct tcf_result *res,
1530 bool compat_mode,
1531 u32 *last_executed_chain)
1532{
1533#ifdef CONFIG_NET_CLS_ACT
1534 const int max_reclassify_loop = 4;
1535 const struct tcf_proto *first_tp;
1536 int limit = 0;
1537
1538reclassify:
1539#endif
1540 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1541 __be16 protocol = skb_protocol(skb, false);
1542 int err;
1543
1544 if (tp->protocol != protocol &&
1545 tp->protocol != htons(ETH_P_ALL))
1546 continue;
1547
1548 err = tp->classify(skb, tp, res);
1549#ifdef CONFIG_NET_CLS_ACT
1550 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1551 first_tp = orig_tp;
1552 *last_executed_chain = first_tp->chain->index;
1553 goto reset;
1554 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1555 first_tp = res->goto_tp;
1556 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1557 goto reset;
1558 }
1559#endif
1560 if (err >= 0)
1561 return err;
1562 }
1563
1564 return TC_ACT_UNSPEC; /* signal: continue lookup */
1565#ifdef CONFIG_NET_CLS_ACT
1566reset:
1567 if (unlikely(limit++ >= max_reclassify_loop)) {
1568 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1569 tp->chain->block->index,
1570 tp->prio & 0xffff,
1571 ntohs(tp->protocol));
1572 return TC_ACT_SHOT;
1573 }
1574
1575 tp = first_tp;
1576 goto reclassify;
1577#endif
1578}
1579
1580int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1581 struct tcf_result *res, bool compat_mode)
1582{
1583 u32 last_executed_chain = 0;
1584
1585 return __tcf_classify(skb, tp, tp, res, compat_mode,
1586 &last_executed_chain);
1587}
1588EXPORT_SYMBOL(tcf_classify);
1589
1590int tcf_classify_ingress(struct sk_buff *skb,
1591 const struct tcf_block *ingress_block,
1592 const struct tcf_proto *tp,
1593 struct tcf_result *res, bool compat_mode)
1594{
1595#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1596 u32 last_executed_chain = 0;
1597
1598 return __tcf_classify(skb, tp, tp, res, compat_mode,
1599 &last_executed_chain);
1600#else
1601 u32 last_executed_chain = tp ? tp->chain->index : 0;
1602 const struct tcf_proto *orig_tp = tp;
1603 struct tc_skb_ext *ext;
1604 int ret;
1605
1606 ext = skb_ext_find(skb, TC_SKB_EXT);
1607
1608 if (ext && ext->chain) {
1609 struct tcf_chain *fchain;
1610
1611 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1612 if (!fchain)
1613 return TC_ACT_SHOT;
1614
1615 /* Consume, so cloned/redirect skbs won't inherit ext */
1616 skb_ext_del(skb, TC_SKB_EXT);
1617
1618 tp = rcu_dereference_bh(fchain->filter_chain);
1619 last_executed_chain = fchain->index;
1620 }
1621
1622 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1623 &last_executed_chain);
1624
1625 /* If we missed on some chain */
1626 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1627 ext = skb_ext_add(skb, TC_SKB_EXT);
1628 if (WARN_ON_ONCE(!ext))
1629 return TC_ACT_SHOT;
1630 ext->chain = last_executed_chain;
1631 ext->mru = qdisc_skb_cb(skb)->mru;
1632 }
1633
1634 return ret;
1635#endif
1636}
1637EXPORT_SYMBOL(tcf_classify_ingress);
1638
1639struct tcf_chain_info {
1640 struct tcf_proto __rcu **pprev;
1641 struct tcf_proto __rcu *next;
1642};
1643
1644static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1645 struct tcf_chain_info *chain_info)
1646{
1647 return tcf_chain_dereference(*chain_info->pprev, chain);
1648}
1649
1650static int tcf_chain_tp_insert(struct tcf_chain *chain,
1651 struct tcf_chain_info *chain_info,
1652 struct tcf_proto *tp)
1653{
1654 if (chain->flushing)
1655 return -EAGAIN;
1656
1657 if (*chain_info->pprev == chain->filter_chain)
1658 tcf_chain0_head_change(chain, tp);
1659 tcf_proto_get(tp);
1660 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1661 rcu_assign_pointer(*chain_info->pprev, tp);
1662
1663 return 0;
1664}
1665
1666static void tcf_chain_tp_remove(struct tcf_chain *chain,
1667 struct tcf_chain_info *chain_info,
1668 struct tcf_proto *tp)
1669{
1670 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1671
1672 tcf_proto_mark_delete(tp);
1673 if (tp == chain->filter_chain)
1674 tcf_chain0_head_change(chain, next);
1675 RCU_INIT_POINTER(*chain_info->pprev, next);
1676}
1677
1678static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1679 struct tcf_chain_info *chain_info,
1680 u32 protocol, u32 prio,
1681 bool prio_allocate);
1682
1683/* Try to insert new proto.
1684 * If proto with specified priority already exists, free new proto
1685 * and return existing one.
1686 */
1687
1688static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1689 struct tcf_proto *tp_new,
1690 u32 protocol, u32 prio,
1691 bool rtnl_held)
1692{
1693 struct tcf_chain_info chain_info;
1694 struct tcf_proto *tp;
1695 int err = 0;
1696
1697 mutex_lock(&chain->filter_chain_lock);
1698
1699 if (tcf_proto_exists_destroying(chain, tp_new)) {
1700 mutex_unlock(&chain->filter_chain_lock);
1701 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1702 return ERR_PTR(-EAGAIN);
1703 }
1704
1705 tp = tcf_chain_tp_find(chain, &chain_info,
1706 protocol, prio, false);
1707 if (!tp)
1708 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1709 mutex_unlock(&chain->filter_chain_lock);
1710
1711 if (tp) {
1712 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1713 tp_new = tp;
1714 } else if (err) {
1715 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1716 tp_new = ERR_PTR(err);
1717 }
1718
1719 return tp_new;
1720}
1721
1722static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1723 struct tcf_proto *tp, bool rtnl_held,
1724 struct netlink_ext_ack *extack)
1725{
1726 struct tcf_chain_info chain_info;
1727 struct tcf_proto *tp_iter;
1728 struct tcf_proto **pprev;
1729 struct tcf_proto *next;
1730
1731 mutex_lock(&chain->filter_chain_lock);
1732
1733 /* Atomically find and remove tp from chain. */
1734 for (pprev = &chain->filter_chain;
1735 (tp_iter = tcf_chain_dereference(*pprev, chain));
1736 pprev = &tp_iter->next) {
1737 if (tp_iter == tp) {
1738 chain_info.pprev = pprev;
1739 chain_info.next = tp_iter->next;
1740 WARN_ON(tp_iter->deleting);
1741 break;
1742 }
1743 }
1744 /* Verify that tp still exists and no new filters were inserted
1745 * concurrently.
1746 * Mark tp for deletion if it is empty.
1747 */
1748 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1749 mutex_unlock(&chain->filter_chain_lock);
1750 return;
1751 }
1752
1753 tcf_proto_signal_destroying(chain, tp);
1754 next = tcf_chain_dereference(chain_info.next, chain);
1755 if (tp == chain->filter_chain)
1756 tcf_chain0_head_change(chain, next);
1757 RCU_INIT_POINTER(*chain_info.pprev, next);
1758 mutex_unlock(&chain->filter_chain_lock);
1759
1760 tcf_proto_put(tp, rtnl_held, extack);
1761}
1762
1763static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1764 struct tcf_chain_info *chain_info,
1765 u32 protocol, u32 prio,
1766 bool prio_allocate)
1767{
1768 struct tcf_proto **pprev;
1769 struct tcf_proto *tp;
1770
1771 /* Check the chain for existence of proto-tcf with this priority */
1772 for (pprev = &chain->filter_chain;
1773 (tp = tcf_chain_dereference(*pprev, chain));
1774 pprev = &tp->next) {
1775 if (tp->prio >= prio) {
1776 if (tp->prio == prio) {
1777 if (prio_allocate ||
1778 (tp->protocol != protocol && protocol))
1779 return ERR_PTR(-EINVAL);
1780 } else {
1781 tp = NULL;
1782 }
1783 break;
1784 }
1785 }
1786 chain_info->pprev = pprev;
1787 if (tp) {
1788 chain_info->next = tp->next;
1789 tcf_proto_get(tp);
1790 } else {
1791 chain_info->next = NULL;
1792 }
1793 return tp;
1794}
1795
1796static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1797 struct tcf_proto *tp, struct tcf_block *block,
1798 struct Qdisc *q, u32 parent, void *fh,
1799 u32 portid, u32 seq, u16 flags, int event,
1800 bool terse_dump, bool rtnl_held)
1801{
1802 struct tcmsg *tcm;
1803 struct nlmsghdr *nlh;
1804 unsigned char *b = skb_tail_pointer(skb);
1805
1806 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1807 if (!nlh)
1808 goto out_nlmsg_trim;
1809 tcm = nlmsg_data(nlh);
1810 tcm->tcm_family = AF_UNSPEC;
1811 tcm->tcm__pad1 = 0;
1812 tcm->tcm__pad2 = 0;
1813 if (q) {
1814 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1815 tcm->tcm_parent = parent;
1816 } else {
1817 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1818 tcm->tcm_block_index = block->index;
1819 }
1820 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1821 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1822 goto nla_put_failure;
1823 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1824 goto nla_put_failure;
1825 if (!fh) {
1826 tcm->tcm_handle = 0;
1827 } else if (terse_dump) {
1828 if (tp->ops->terse_dump) {
1829 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1830 rtnl_held) < 0)
1831 goto nla_put_failure;
1832 } else {
1833 goto cls_op_not_supp;
1834 }
1835 } else {
1836 if (tp->ops->dump &&
1837 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1838 goto nla_put_failure;
1839 }
1840 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1841 return skb->len;
1842
1843out_nlmsg_trim:
1844nla_put_failure:
1845cls_op_not_supp:
1846 nlmsg_trim(skb, b);
1847 return -1;
1848}
1849
1850static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1851 struct nlmsghdr *n, struct tcf_proto *tp,
1852 struct tcf_block *block, struct Qdisc *q,
1853 u32 parent, void *fh, int event, bool unicast,
1854 bool rtnl_held)
1855{
1856 struct sk_buff *skb;
1857 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1858 int err = 0;
1859
1860 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1861 if (!skb)
1862 return -ENOBUFS;
1863
1864 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1865 n->nlmsg_seq, n->nlmsg_flags, event,
1866 false, rtnl_held) <= 0) {
1867 kfree_skb(skb);
1868 return -EINVAL;
1869 }
1870
1871 if (unicast)
1872 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1873 else
1874 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1875 n->nlmsg_flags & NLM_F_ECHO);
1876
1877 if (err > 0)
1878 err = 0;
1879 return err;
1880}
1881
1882static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1883 struct nlmsghdr *n, struct tcf_proto *tp,
1884 struct tcf_block *block, struct Qdisc *q,
1885 u32 parent, void *fh, bool unicast, bool *last,
1886 bool rtnl_held, struct netlink_ext_ack *extack)
1887{
1888 struct sk_buff *skb;
1889 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1890 int err;
1891
1892 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1893 if (!skb)
1894 return -ENOBUFS;
1895
1896 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1897 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1898 false, rtnl_held) <= 0) {
1899 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1900 kfree_skb(skb);
1901 return -EINVAL;
1902 }
1903
1904 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1905 if (err) {
1906 kfree_skb(skb);
1907 return err;
1908 }
1909
1910 if (unicast)
1911 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1912 else
1913 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1914 n->nlmsg_flags & NLM_F_ECHO);
1915 if (err < 0)
1916 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1917
1918 if (err > 0)
1919 err = 0;
1920 return err;
1921}
1922
1923static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1924 struct tcf_block *block, struct Qdisc *q,
1925 u32 parent, struct nlmsghdr *n,
1926 struct tcf_chain *chain, int event)
1927{
1928 struct tcf_proto *tp;
1929
1930 for (tp = tcf_get_next_proto(chain, NULL);
1931 tp; tp = tcf_get_next_proto(chain, tp))
1932 tfilter_notify(net, oskb, n, tp, block,
1933 q, parent, NULL, event, false, true);
1934}
1935
1936static void tfilter_put(struct tcf_proto *tp, void *fh)
1937{
1938 if (tp->ops->put && fh)
1939 tp->ops->put(tp, fh);
1940}
1941
1942static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1943 struct netlink_ext_ack *extack)
1944{
1945 struct net *net = sock_net(skb->sk);
1946 struct nlattr *tca[TCA_MAX + 1];
1947 char name[IFNAMSIZ];
1948 struct tcmsg *t;
1949 u32 protocol;
1950 u32 prio;
1951 bool prio_allocate;
1952 u32 parent;
1953 u32 chain_index;
1954 struct Qdisc *q = NULL;
1955 struct tcf_chain_info chain_info;
1956 struct tcf_chain *chain = NULL;
1957 struct tcf_block *block;
1958 struct tcf_proto *tp;
1959 unsigned long cl;
1960 void *fh;
1961 int err;
1962 int tp_created;
1963 bool rtnl_held = false;
1964
1965 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1966 return -EPERM;
1967
1968replay:
1969 tp_created = 0;
1970
1971 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1972 rtm_tca_policy, extack);
1973 if (err < 0)
1974 return err;
1975
1976 t = nlmsg_data(n);
1977 protocol = TC_H_MIN(t->tcm_info);
1978 prio = TC_H_MAJ(t->tcm_info);
1979 prio_allocate = false;
1980 parent = t->tcm_parent;
1981 tp = NULL;
1982 cl = 0;
1983 block = NULL;
1984
1985 if (prio == 0) {
1986 /* If no priority is provided by the user,
1987 * we allocate one.
1988 */
1989 if (n->nlmsg_flags & NLM_F_CREATE) {
1990 prio = TC_H_MAKE(0x80000000U, 0U);
1991 prio_allocate = true;
1992 } else {
1993 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1994 return -ENOENT;
1995 }
1996 }
1997
1998 /* Find head of filter chain. */
1999
2000 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2001 if (err)
2002 return err;
2003
2004 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2005 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2006 err = -EINVAL;
2007 goto errout;
2008 }
2009
2010 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2011 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2012 * type is not specified, classifier is not unlocked.
2013 */
2014 if (rtnl_held ||
2015 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2016 !tcf_proto_is_unlocked(name)) {
2017 rtnl_held = true;
2018 rtnl_lock();
2019 }
2020
2021 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2022 if (err)
2023 goto errout;
2024
2025 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2026 extack);
2027 if (IS_ERR(block)) {
2028 err = PTR_ERR(block);
2029 goto errout;
2030 }
2031 block->classid = parent;
2032
2033 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2034 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2035 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2036 err = -EINVAL;
2037 goto errout;
2038 }
2039 chain = tcf_chain_get(block, chain_index, true);
2040 if (!chain) {
2041 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2042 err = -ENOMEM;
2043 goto errout;
2044 }
2045
2046 mutex_lock(&chain->filter_chain_lock);
2047 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2048 prio, prio_allocate);
2049 if (IS_ERR(tp)) {
2050 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2051 err = PTR_ERR(tp);
2052 goto errout_locked;
2053 }
2054
2055 if (tp == NULL) {
2056 struct tcf_proto *tp_new = NULL;
2057
2058 if (chain->flushing) {
2059 err = -EAGAIN;
2060 goto errout_locked;
2061 }
2062
2063 /* Proto-tcf does not exist, create new one */
2064
2065 if (tca[TCA_KIND] == NULL || !protocol) {
2066 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2067 err = -EINVAL;
2068 goto errout_locked;
2069 }
2070
2071 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2072 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2073 err = -ENOENT;
2074 goto errout_locked;
2075 }
2076
2077 if (prio_allocate)
2078 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2079 &chain_info));
2080
2081 mutex_unlock(&chain->filter_chain_lock);
2082 tp_new = tcf_proto_create(name, protocol, prio, chain,
2083 rtnl_held, extack);
2084 if (IS_ERR(tp_new)) {
2085 err = PTR_ERR(tp_new);
2086 goto errout_tp;
2087 }
2088
2089 tp_created = 1;
2090 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2091 rtnl_held);
2092 if (IS_ERR(tp)) {
2093 err = PTR_ERR(tp);
2094 goto errout_tp;
2095 }
2096 } else {
2097 mutex_unlock(&chain->filter_chain_lock);
2098 }
2099
2100 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2101 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2102 err = -EINVAL;
2103 goto errout;
2104 }
2105
2106 fh = tp->ops->get(tp, t->tcm_handle);
2107
2108 if (!fh) {
2109 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2110 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2111 err = -ENOENT;
2112 goto errout;
2113 }
2114 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2115 tfilter_put(tp, fh);
2116 NL_SET_ERR_MSG(extack, "Filter already exists");
2117 err = -EEXIST;
2118 goto errout;
2119 }
2120
2121 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2122 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2123 err = -EINVAL;
2124 goto errout;
2125 }
2126
2127 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2128 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2129 rtnl_held, extack);
2130 if (err == 0) {
2131 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2132 RTM_NEWTFILTER, false, rtnl_held);
2133 tfilter_put(tp, fh);
2134 /* q pointer is NULL for shared blocks */
2135 if (q)
2136 q->flags &= ~TCQ_F_CAN_BYPASS;
2137 }
2138
2139errout:
2140 if (err && tp_created)
2141 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2142errout_tp:
2143 if (chain) {
2144 if (tp && !IS_ERR(tp))
2145 tcf_proto_put(tp, rtnl_held, NULL);
2146 if (!tp_created)
2147 tcf_chain_put(chain);
2148 }
2149 tcf_block_release(q, block, rtnl_held);
2150
2151 if (rtnl_held)
2152 rtnl_unlock();
2153
2154 if (err == -EAGAIN) {
2155 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2156 * of target chain.
2157 */
2158 rtnl_held = true;
2159 /* Replay the request. */
2160 goto replay;
2161 }
2162 return err;
2163
2164errout_locked:
2165 mutex_unlock(&chain->filter_chain_lock);
2166 goto errout;
2167}
2168
2169static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2170 struct netlink_ext_ack *extack)
2171{
2172 struct net *net = sock_net(skb->sk);
2173 struct nlattr *tca[TCA_MAX + 1];
2174 char name[IFNAMSIZ];
2175 struct tcmsg *t;
2176 u32 protocol;
2177 u32 prio;
2178 u32 parent;
2179 u32 chain_index;
2180 struct Qdisc *q = NULL;
2181 struct tcf_chain_info chain_info;
2182 struct tcf_chain *chain = NULL;
2183 struct tcf_block *block = NULL;
2184 struct tcf_proto *tp = NULL;
2185 unsigned long cl = 0;
2186 void *fh = NULL;
2187 int err;
2188 bool rtnl_held = false;
2189
2190 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2191 return -EPERM;
2192
2193 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2194 rtm_tca_policy, extack);
2195 if (err < 0)
2196 return err;
2197
2198 t = nlmsg_data(n);
2199 protocol = TC_H_MIN(t->tcm_info);
2200 prio = TC_H_MAJ(t->tcm_info);
2201 parent = t->tcm_parent;
2202
2203 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2204 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2205 return -ENOENT;
2206 }
2207
2208 /* Find head of filter chain. */
2209
2210 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2211 if (err)
2212 return err;
2213
2214 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2215 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2216 err = -EINVAL;
2217 goto errout;
2218 }
2219 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2220 * found), qdisc is not unlocked, classifier type is not specified,
2221 * classifier is not unlocked.
2222 */
2223 if (!prio ||
2224 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2225 !tcf_proto_is_unlocked(name)) {
2226 rtnl_held = true;
2227 rtnl_lock();
2228 }
2229
2230 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2231 if (err)
2232 goto errout;
2233
2234 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2235 extack);
2236 if (IS_ERR(block)) {
2237 err = PTR_ERR(block);
2238 goto errout;
2239 }
2240
2241 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2242 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2243 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2244 err = -EINVAL;
2245 goto errout;
2246 }
2247 chain = tcf_chain_get(block, chain_index, false);
2248 if (!chain) {
2249 /* User requested flush on non-existent chain. Nothing to do,
2250 * so just return success.
2251 */
2252 if (prio == 0) {
2253 err = 0;
2254 goto errout;
2255 }
2256 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2257 err = -ENOENT;
2258 goto errout;
2259 }
2260
2261 if (prio == 0) {
2262 tfilter_notify_chain(net, skb, block, q, parent, n,
2263 chain, RTM_DELTFILTER);
2264 tcf_chain_flush(chain, rtnl_held);
2265 err = 0;
2266 goto errout;
2267 }
2268
2269 mutex_lock(&chain->filter_chain_lock);
2270 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2271 prio, false);
2272 if (!tp || IS_ERR(tp)) {
2273 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2274 err = tp ? PTR_ERR(tp) : -ENOENT;
2275 goto errout_locked;
2276 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2277 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2278 err = -EINVAL;
2279 goto errout_locked;
2280 } else if (t->tcm_handle == 0) {
2281 tcf_proto_signal_destroying(chain, tp);
2282 tcf_chain_tp_remove(chain, &chain_info, tp);
2283 mutex_unlock(&chain->filter_chain_lock);
2284
2285 tcf_proto_put(tp, rtnl_held, NULL);
2286 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2287 RTM_DELTFILTER, false, rtnl_held);
2288 err = 0;
2289 goto errout;
2290 }
2291 mutex_unlock(&chain->filter_chain_lock);
2292
2293 fh = tp->ops->get(tp, t->tcm_handle);
2294
2295 if (!fh) {
2296 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2297 err = -ENOENT;
2298 } else {
2299 bool last;
2300
2301 err = tfilter_del_notify(net, skb, n, tp, block,
2302 q, parent, fh, false, &last,
2303 rtnl_held, extack);
2304
2305 if (err)
2306 goto errout;
2307 if (last)
2308 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2309 }
2310
2311errout:
2312 if (chain) {
2313 if (tp && !IS_ERR(tp))
2314 tcf_proto_put(tp, rtnl_held, NULL);
2315 tcf_chain_put(chain);
2316 }
2317 tcf_block_release(q, block, rtnl_held);
2318
2319 if (rtnl_held)
2320 rtnl_unlock();
2321
2322 return err;
2323
2324errout_locked:
2325 mutex_unlock(&chain->filter_chain_lock);
2326 goto errout;
2327}
2328
2329static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2330 struct netlink_ext_ack *extack)
2331{
2332 struct net *net = sock_net(skb->sk);
2333 struct nlattr *tca[TCA_MAX + 1];
2334 char name[IFNAMSIZ];
2335 struct tcmsg *t;
2336 u32 protocol;
2337 u32 prio;
2338 u32 parent;
2339 u32 chain_index;
2340 struct Qdisc *q = NULL;
2341 struct tcf_chain_info chain_info;
2342 struct tcf_chain *chain = NULL;
2343 struct tcf_block *block = NULL;
2344 struct tcf_proto *tp = NULL;
2345 unsigned long cl = 0;
2346 void *fh = NULL;
2347 int err;
2348 bool rtnl_held = false;
2349
2350 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2351 rtm_tca_policy, extack);
2352 if (err < 0)
2353 return err;
2354
2355 t = nlmsg_data(n);
2356 protocol = TC_H_MIN(t->tcm_info);
2357 prio = TC_H_MAJ(t->tcm_info);
2358 parent = t->tcm_parent;
2359
2360 if (prio == 0) {
2361 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2362 return -ENOENT;
2363 }
2364
2365 /* Find head of filter chain. */
2366
2367 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2368 if (err)
2369 return err;
2370
2371 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2372 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2373 err = -EINVAL;
2374 goto errout;
2375 }
2376 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2377 * unlocked, classifier type is not specified, classifier is not
2378 * unlocked.
2379 */
2380 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2381 !tcf_proto_is_unlocked(name)) {
2382 rtnl_held = true;
2383 rtnl_lock();
2384 }
2385
2386 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2387 if (err)
2388 goto errout;
2389
2390 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2391 extack);
2392 if (IS_ERR(block)) {
2393 err = PTR_ERR(block);
2394 goto errout;
2395 }
2396
2397 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2398 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2399 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2400 err = -EINVAL;
2401 goto errout;
2402 }
2403 chain = tcf_chain_get(block, chain_index, false);
2404 if (!chain) {
2405 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2406 err = -EINVAL;
2407 goto errout;
2408 }
2409
2410 mutex_lock(&chain->filter_chain_lock);
2411 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2412 prio, false);
2413 mutex_unlock(&chain->filter_chain_lock);
2414 if (!tp || IS_ERR(tp)) {
2415 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2416 err = tp ? PTR_ERR(tp) : -ENOENT;
2417 goto errout;
2418 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2419 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2420 err = -EINVAL;
2421 goto errout;
2422 }
2423
2424 fh = tp->ops->get(tp, t->tcm_handle);
2425
2426 if (!fh) {
2427 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2428 err = -ENOENT;
2429 } else {
2430 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2431 fh, RTM_NEWTFILTER, true, rtnl_held);
2432 if (err < 0)
2433 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2434 }
2435
2436 tfilter_put(tp, fh);
2437errout:
2438 if (chain) {
2439 if (tp && !IS_ERR(tp))
2440 tcf_proto_put(tp, rtnl_held, NULL);
2441 tcf_chain_put(chain);
2442 }
2443 tcf_block_release(q, block, rtnl_held);
2444
2445 if (rtnl_held)
2446 rtnl_unlock();
2447
2448 return err;
2449}
2450
2451struct tcf_dump_args {
2452 struct tcf_walker w;
2453 struct sk_buff *skb;
2454 struct netlink_callback *cb;
2455 struct tcf_block *block;
2456 struct Qdisc *q;
2457 u32 parent;
2458 bool terse_dump;
2459};
2460
2461static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2462{
2463 struct tcf_dump_args *a = (void *)arg;
2464 struct net *net = sock_net(a->skb->sk);
2465
2466 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2467 n, NETLINK_CB(a->cb->skb).portid,
2468 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2469 RTM_NEWTFILTER, a->terse_dump, true);
2470}
2471
2472static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2473 struct sk_buff *skb, struct netlink_callback *cb,
2474 long index_start, long *p_index, bool terse)
2475{
2476 struct net *net = sock_net(skb->sk);
2477 struct tcf_block *block = chain->block;
2478 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2479 struct tcf_proto *tp, *tp_prev;
2480 struct tcf_dump_args arg;
2481
2482 for (tp = __tcf_get_next_proto(chain, NULL);
2483 tp;
2484 tp_prev = tp,
2485 tp = __tcf_get_next_proto(chain, tp),
2486 tcf_proto_put(tp_prev, true, NULL),
2487 (*p_index)++) {
2488 if (*p_index < index_start)
2489 continue;
2490 if (TC_H_MAJ(tcm->tcm_info) &&
2491 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2492 continue;
2493 if (TC_H_MIN(tcm->tcm_info) &&
2494 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2495 continue;
2496 if (*p_index > index_start)
2497 memset(&cb->args[1], 0,
2498 sizeof(cb->args) - sizeof(cb->args[0]));
2499 if (cb->args[1] == 0) {
2500 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2501 NETLINK_CB(cb->skb).portid,
2502 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2503 RTM_NEWTFILTER, false, true) <= 0)
2504 goto errout;
2505 cb->args[1] = 1;
2506 }
2507 if (!tp->ops->walk)
2508 continue;
2509 arg.w.fn = tcf_node_dump;
2510 arg.skb = skb;
2511 arg.cb = cb;
2512 arg.block = block;
2513 arg.q = q;
2514 arg.parent = parent;
2515 arg.w.stop = 0;
2516 arg.w.skip = cb->args[1] - 1;
2517 arg.w.count = 0;
2518 arg.w.cookie = cb->args[2];
2519 arg.terse_dump = terse;
2520 tp->ops->walk(tp, &arg.w, true);
2521 cb->args[2] = arg.w.cookie;
2522 cb->args[1] = arg.w.count + 1;
2523 if (arg.w.stop)
2524 goto errout;
2525 }
2526 return true;
2527
2528errout:
2529 tcf_proto_put(tp, true, NULL);
2530 return false;
2531}
2532
2533static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2534 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2535};
2536
2537/* called with RTNL */
2538static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2539{
2540 struct tcf_chain *chain, *chain_prev;
2541 struct net *net = sock_net(skb->sk);
2542 struct nlattr *tca[TCA_MAX + 1];
2543 struct Qdisc *q = NULL;
2544 struct tcf_block *block;
2545 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2546 bool terse_dump = false;
2547 long index_start;
2548 long index;
2549 u32 parent;
2550 int err;
2551
2552 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2553 return skb->len;
2554
2555 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2556 tcf_tfilter_dump_policy, cb->extack);
2557 if (err)
2558 return err;
2559
2560 if (tca[TCA_DUMP_FLAGS]) {
2561 struct nla_bitfield32 flags =
2562 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2563
2564 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2565 }
2566
2567 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2568 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2569 if (!block)
2570 goto out;
2571 /* If we work with block index, q is NULL and parent value
2572 * will never be used in the following code. The check
2573 * in tcf_fill_node prevents it. However, compiler does not
2574 * see that far, so set parent to zero to silence the warning
2575 * about parent being uninitialized.
2576 */
2577 parent = 0;
2578 } else {
2579 const struct Qdisc_class_ops *cops;
2580 struct net_device *dev;
2581 unsigned long cl = 0;
2582
2583 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2584 if (!dev)
2585 return skb->len;
2586
2587 parent = tcm->tcm_parent;
2588 if (!parent)
2589 q = dev->qdisc;
2590 else
2591 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2592 if (!q)
2593 goto out;
2594 cops = q->ops->cl_ops;
2595 if (!cops)
2596 goto out;
2597 if (!cops->tcf_block)
2598 goto out;
2599 if (TC_H_MIN(tcm->tcm_parent)) {
2600 cl = cops->find(q, tcm->tcm_parent);
2601 if (cl == 0)
2602 goto out;
2603 }
2604 block = cops->tcf_block(q, cl, NULL);
2605 if (!block)
2606 goto out;
2607 parent = block->classid;
2608 if (tcf_block_shared(block))
2609 q = NULL;
2610 }
2611
2612 index_start = cb->args[0];
2613 index = 0;
2614
2615 for (chain = __tcf_get_next_chain(block, NULL);
2616 chain;
2617 chain_prev = chain,
2618 chain = __tcf_get_next_chain(block, chain),
2619 tcf_chain_put(chain_prev)) {
2620 if (tca[TCA_CHAIN] &&
2621 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2622 continue;
2623 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2624 index_start, &index, terse_dump)) {
2625 tcf_chain_put(chain);
2626 err = -EMSGSIZE;
2627 break;
2628 }
2629 }
2630
2631 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2632 tcf_block_refcnt_put(block, true);
2633 cb->args[0] = index;
2634
2635out:
2636 /* If we did no progress, the error (EMSGSIZE) is real */
2637 if (skb->len == 0 && err)
2638 return err;
2639 return skb->len;
2640}
2641
2642static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2643 void *tmplt_priv, u32 chain_index,
2644 struct net *net, struct sk_buff *skb,
2645 struct tcf_block *block,
2646 u32 portid, u32 seq, u16 flags, int event)
2647{
2648 unsigned char *b = skb_tail_pointer(skb);
2649 const struct tcf_proto_ops *ops;
2650 struct nlmsghdr *nlh;
2651 struct tcmsg *tcm;
2652 void *priv;
2653
2654 ops = tmplt_ops;
2655 priv = tmplt_priv;
2656
2657 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2658 if (!nlh)
2659 goto out_nlmsg_trim;
2660 tcm = nlmsg_data(nlh);
2661 tcm->tcm_family = AF_UNSPEC;
2662 tcm->tcm__pad1 = 0;
2663 tcm->tcm__pad2 = 0;
2664 tcm->tcm_handle = 0;
2665 if (block->q) {
2666 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2667 tcm->tcm_parent = block->q->handle;
2668 } else {
2669 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2670 tcm->tcm_block_index = block->index;
2671 }
2672
2673 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2674 goto nla_put_failure;
2675
2676 if (ops) {
2677 if (nla_put_string(skb, TCA_KIND, ops->kind))
2678 goto nla_put_failure;
2679 if (ops->tmplt_dump(skb, net, priv) < 0)
2680 goto nla_put_failure;
2681 }
2682
2683 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2684 return skb->len;
2685
2686out_nlmsg_trim:
2687nla_put_failure:
2688 nlmsg_trim(skb, b);
2689 return -EMSGSIZE;
2690}
2691
2692static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2693 u32 seq, u16 flags, int event, bool unicast)
2694{
2695 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2696 struct tcf_block *block = chain->block;
2697 struct net *net = block->net;
2698 struct sk_buff *skb;
2699 int err = 0;
2700
2701 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2702 if (!skb)
2703 return -ENOBUFS;
2704
2705 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2706 chain->index, net, skb, block, portid,
2707 seq, flags, event) <= 0) {
2708 kfree_skb(skb);
2709 return -EINVAL;
2710 }
2711
2712 if (unicast)
2713 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2714 else
2715 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2716 flags & NLM_F_ECHO);
2717
2718 if (err > 0)
2719 err = 0;
2720 return err;
2721}
2722
2723static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2724 void *tmplt_priv, u32 chain_index,
2725 struct tcf_block *block, struct sk_buff *oskb,
2726 u32 seq, u16 flags, bool unicast)
2727{
2728 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2729 struct net *net = block->net;
2730 struct sk_buff *skb;
2731
2732 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2733 if (!skb)
2734 return -ENOBUFS;
2735
2736 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2737 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2738 kfree_skb(skb);
2739 return -EINVAL;
2740 }
2741
2742 if (unicast)
2743 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2744
2745 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2746}
2747
2748static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2749 struct nlattr **tca,
2750 struct netlink_ext_ack *extack)
2751{
2752 const struct tcf_proto_ops *ops;
2753 char name[IFNAMSIZ];
2754 void *tmplt_priv;
2755
2756 /* If kind is not set, user did not specify template. */
2757 if (!tca[TCA_KIND])
2758 return 0;
2759
2760 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2761 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2762 return -EINVAL;
2763 }
2764
2765 ops = tcf_proto_lookup_ops(name, true, extack);
2766 if (IS_ERR(ops))
2767 return PTR_ERR(ops);
2768 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2769 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2770 return -EOPNOTSUPP;
2771 }
2772
2773 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2774 if (IS_ERR(tmplt_priv)) {
2775 module_put(ops->owner);
2776 return PTR_ERR(tmplt_priv);
2777 }
2778 chain->tmplt_ops = ops;
2779 chain->tmplt_priv = tmplt_priv;
2780 return 0;
2781}
2782
2783static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2784 void *tmplt_priv)
2785{
2786 /* If template ops are set, no work to do for us. */
2787 if (!tmplt_ops)
2788 return;
2789
2790 tmplt_ops->tmplt_destroy(tmplt_priv);
2791 module_put(tmplt_ops->owner);
2792}
2793
2794/* Add/delete/get a chain */
2795
2796static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2797 struct netlink_ext_ack *extack)
2798{
2799 struct net *net = sock_net(skb->sk);
2800 struct nlattr *tca[TCA_MAX + 1];
2801 struct tcmsg *t;
2802 u32 parent;
2803 u32 chain_index;
2804 struct Qdisc *q = NULL;
2805 struct tcf_chain *chain = NULL;
2806 struct tcf_block *block;
2807 unsigned long cl;
2808 int err;
2809
2810 if (n->nlmsg_type != RTM_GETCHAIN &&
2811 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2812 return -EPERM;
2813
2814replay:
2815 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2816 rtm_tca_policy, extack);
2817 if (err < 0)
2818 return err;
2819
2820 t = nlmsg_data(n);
2821 parent = t->tcm_parent;
2822 cl = 0;
2823
2824 block = tcf_block_find(net, &q, &parent, &cl,
2825 t->tcm_ifindex, t->tcm_block_index, extack);
2826 if (IS_ERR(block))
2827 return PTR_ERR(block);
2828
2829 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2830 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2831 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2832 err = -EINVAL;
2833 goto errout_block;
2834 }
2835
2836 mutex_lock(&block->lock);
2837 chain = tcf_chain_lookup(block, chain_index);
2838 if (n->nlmsg_type == RTM_NEWCHAIN) {
2839 if (chain) {
2840 if (tcf_chain_held_by_acts_only(chain)) {
2841 /* The chain exists only because there is
2842 * some action referencing it.
2843 */
2844 tcf_chain_hold(chain);
2845 } else {
2846 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2847 err = -EEXIST;
2848 goto errout_block_locked;
2849 }
2850 } else {
2851 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2852 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2853 err = -ENOENT;
2854 goto errout_block_locked;
2855 }
2856 chain = tcf_chain_create(block, chain_index);
2857 if (!chain) {
2858 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2859 err = -ENOMEM;
2860 goto errout_block_locked;
2861 }
2862 }
2863 } else {
2864 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2865 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2866 err = -EINVAL;
2867 goto errout_block_locked;
2868 }
2869 tcf_chain_hold(chain);
2870 }
2871
2872 if (n->nlmsg_type == RTM_NEWCHAIN) {
2873 /* Modifying chain requires holding parent block lock. In case
2874 * the chain was successfully added, take a reference to the
2875 * chain. This ensures that an empty chain does not disappear at
2876 * the end of this function.
2877 */
2878 tcf_chain_hold(chain);
2879 chain->explicitly_created = true;
2880 }
2881 mutex_unlock(&block->lock);
2882
2883 switch (n->nlmsg_type) {
2884 case RTM_NEWCHAIN:
2885 err = tc_chain_tmplt_add(chain, net, tca, extack);
2886 if (err) {
2887 tcf_chain_put_explicitly_created(chain);
2888 goto errout;
2889 }
2890
2891 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2892 RTM_NEWCHAIN, false);
2893 break;
2894 case RTM_DELCHAIN:
2895 tfilter_notify_chain(net, skb, block, q, parent, n,
2896 chain, RTM_DELTFILTER);
2897 /* Flush the chain first as the user requested chain removal. */
2898 tcf_chain_flush(chain, true);
2899 /* In case the chain was successfully deleted, put a reference
2900 * to the chain previously taken during addition.
2901 */
2902 tcf_chain_put_explicitly_created(chain);
2903 break;
2904 case RTM_GETCHAIN:
2905 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2906 n->nlmsg_seq, n->nlmsg_type, true);
2907 if (err < 0)
2908 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2909 break;
2910 default:
2911 err = -EOPNOTSUPP;
2912 NL_SET_ERR_MSG(extack, "Unsupported message type");
2913 goto errout;
2914 }
2915
2916errout:
2917 tcf_chain_put(chain);
2918errout_block:
2919 tcf_block_release(q, block, true);
2920 if (err == -EAGAIN)
2921 /* Replay the request. */
2922 goto replay;
2923 return err;
2924
2925errout_block_locked:
2926 mutex_unlock(&block->lock);
2927 goto errout_block;
2928}
2929
2930/* called with RTNL */
2931static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2932{
2933 struct net *net = sock_net(skb->sk);
2934 struct nlattr *tca[TCA_MAX + 1];
2935 struct Qdisc *q = NULL;
2936 struct tcf_block *block;
2937 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2938 struct tcf_chain *chain;
2939 long index_start;
2940 long index;
2941 int err;
2942
2943 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2944 return skb->len;
2945
2946 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2947 rtm_tca_policy, cb->extack);
2948 if (err)
2949 return err;
2950
2951 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2952 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2953 if (!block)
2954 goto out;
2955 } else {
2956 const struct Qdisc_class_ops *cops;
2957 struct net_device *dev;
2958 unsigned long cl = 0;
2959
2960 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2961 if (!dev)
2962 return skb->len;
2963
2964 if (!tcm->tcm_parent)
2965 q = dev->qdisc;
2966 else
2967 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2968
2969 if (!q)
2970 goto out;
2971 cops = q->ops->cl_ops;
2972 if (!cops)
2973 goto out;
2974 if (!cops->tcf_block)
2975 goto out;
2976 if (TC_H_MIN(tcm->tcm_parent)) {
2977 cl = cops->find(q, tcm->tcm_parent);
2978 if (cl == 0)
2979 goto out;
2980 }
2981 block = cops->tcf_block(q, cl, NULL);
2982 if (!block)
2983 goto out;
2984 if (tcf_block_shared(block))
2985 q = NULL;
2986 }
2987
2988 index_start = cb->args[0];
2989 index = 0;
2990
2991 mutex_lock(&block->lock);
2992 list_for_each_entry(chain, &block->chain_list, list) {
2993 if ((tca[TCA_CHAIN] &&
2994 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2995 continue;
2996 if (index < index_start) {
2997 index++;
2998 continue;
2999 }
3000 if (tcf_chain_held_by_acts_only(chain))
3001 continue;
3002 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3003 chain->index, net, skb, block,
3004 NETLINK_CB(cb->skb).portid,
3005 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3006 RTM_NEWCHAIN);
3007 if (err <= 0)
3008 break;
3009 index++;
3010 }
3011 mutex_unlock(&block->lock);
3012
3013 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3014 tcf_block_refcnt_put(block, true);
3015 cb->args[0] = index;
3016
3017out:
3018 /* If we did no progress, the error (EMSGSIZE) is real */
3019 if (skb->len == 0 && err)
3020 return err;
3021 return skb->len;
3022}
3023
3024void tcf_exts_destroy(struct tcf_exts *exts)
3025{
3026#ifdef CONFIG_NET_CLS_ACT
3027 if (exts->actions) {
3028 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3029 kfree(exts->actions);
3030 }
3031 exts->nr_actions = 0;
3032#endif
3033}
3034EXPORT_SYMBOL(tcf_exts_destroy);
3035
3036int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3037 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3038 bool rtnl_held, struct netlink_ext_ack *extack)
3039{
3040#ifdef CONFIG_NET_CLS_ACT
3041 {
3042 struct tc_action *act;
3043 size_t attr_size = 0;
3044
3045 if (exts->police && tb[exts->police]) {
3046 act = tcf_action_init_1(net, tp, tb[exts->police],
3047 rate_tlv, "police", ovr,
3048 TCA_ACT_BIND, rtnl_held,
3049 extack);
3050 if (IS_ERR(act))
3051 return PTR_ERR(act);
3052
3053 act->type = exts->type = TCA_OLD_COMPAT;
3054 exts->actions[0] = act;
3055 exts->nr_actions = 1;
3056 } else if (exts->action && tb[exts->action]) {
3057 int err;
3058
3059 err = tcf_action_init(net, tp, tb[exts->action],
3060 rate_tlv, NULL, ovr, TCA_ACT_BIND,
3061 exts->actions, &attr_size,
3062 rtnl_held, extack);
3063 if (err < 0)
3064 return err;
3065 exts->nr_actions = err;
3066 }
3067 }
3068#else
3069 if ((exts->action && tb[exts->action]) ||
3070 (exts->police && tb[exts->police])) {
3071 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3072 return -EOPNOTSUPP;
3073 }
3074#endif
3075
3076 return 0;
3077}
3078EXPORT_SYMBOL(tcf_exts_validate);
3079
3080void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3081{
3082#ifdef CONFIG_NET_CLS_ACT
3083 struct tcf_exts old = *dst;
3084
3085 *dst = *src;
3086 tcf_exts_destroy(&old);
3087#endif
3088}
3089EXPORT_SYMBOL(tcf_exts_change);
3090
3091#ifdef CONFIG_NET_CLS_ACT
3092static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3093{
3094 if (exts->nr_actions == 0)
3095 return NULL;
3096 else
3097 return exts->actions[0];
3098}
3099#endif
3100
3101int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3102{
3103#ifdef CONFIG_NET_CLS_ACT
3104 struct nlattr *nest;
3105
3106 if (exts->action && tcf_exts_has_actions(exts)) {
3107 /*
3108 * again for backward compatible mode - we want
3109 * to work with both old and new modes of entering
3110 * tc data even if iproute2 was newer - jhs
3111 */
3112 if (exts->type != TCA_OLD_COMPAT) {
3113 nest = nla_nest_start_noflag(skb, exts->action);
3114 if (nest == NULL)
3115 goto nla_put_failure;
3116
3117 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3118 < 0)
3119 goto nla_put_failure;
3120 nla_nest_end(skb, nest);
3121 } else if (exts->police) {
3122 struct tc_action *act = tcf_exts_first_act(exts);
3123 nest = nla_nest_start_noflag(skb, exts->police);
3124 if (nest == NULL || !act)
3125 goto nla_put_failure;
3126 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3127 goto nla_put_failure;
3128 nla_nest_end(skb, nest);
3129 }
3130 }
3131 return 0;
3132
3133nla_put_failure:
3134 nla_nest_cancel(skb, nest);
3135 return -1;
3136#else
3137 return 0;
3138#endif
3139}
3140EXPORT_SYMBOL(tcf_exts_dump);
3141
3142int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3143{
3144#ifdef CONFIG_NET_CLS_ACT
3145 struct nlattr *nest;
3146
3147 if (!exts->action || !tcf_exts_has_actions(exts))
3148 return 0;
3149
3150 nest = nla_nest_start_noflag(skb, exts->action);
3151 if (!nest)
3152 goto nla_put_failure;
3153
3154 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3155 goto nla_put_failure;
3156 nla_nest_end(skb, nest);
3157 return 0;
3158
3159nla_put_failure:
3160 nla_nest_cancel(skb, nest);
3161 return -1;
3162#else
3163 return 0;
3164#endif
3165}
3166EXPORT_SYMBOL(tcf_exts_terse_dump);
3167
3168int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3169{
3170#ifdef CONFIG_NET_CLS_ACT
3171 struct tc_action *a = tcf_exts_first_act(exts);
3172 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3173 return -1;
3174#endif
3175 return 0;
3176}
3177EXPORT_SYMBOL(tcf_exts_dump_stats);
3178
3179static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3180{
3181 if (*flags & TCA_CLS_FLAGS_IN_HW)
3182 return;
3183 *flags |= TCA_CLS_FLAGS_IN_HW;
3184 atomic_inc(&block->offloadcnt);
3185}
3186
3187static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3188{
3189 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3190 return;
3191 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3192 atomic_dec(&block->offloadcnt);
3193}
3194
3195static void tc_cls_offload_cnt_update(struct tcf_block *block,
3196 struct tcf_proto *tp, u32 *cnt,
3197 u32 *flags, u32 diff, bool add)
3198{
3199 lockdep_assert_held(&block->cb_lock);
3200
3201 spin_lock(&tp->lock);
3202 if (add) {
3203 if (!*cnt)
3204 tcf_block_offload_inc(block, flags);
3205 *cnt += diff;
3206 } else {
3207 *cnt -= diff;
3208 if (!*cnt)
3209 tcf_block_offload_dec(block, flags);
3210 }
3211 spin_unlock(&tp->lock);
3212}
3213
3214static void
3215tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3216 u32 *cnt, u32 *flags)
3217{
3218 lockdep_assert_held(&block->cb_lock);
3219
3220 spin_lock(&tp->lock);
3221 tcf_block_offload_dec(block, flags);
3222 *cnt = 0;
3223 spin_unlock(&tp->lock);
3224}
3225
3226static int
3227__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3228 void *type_data, bool err_stop)
3229{
3230 struct flow_block_cb *block_cb;
3231 int ok_count = 0;
3232 int err;
3233
3234 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3235 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3236 if (err) {
3237 if (err_stop)
3238 return err;
3239 } else {
3240 ok_count++;
3241 }
3242 }
3243 return ok_count;
3244}
3245
3246int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3247 void *type_data, bool err_stop, bool rtnl_held)
3248{
3249 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3250 int ok_count;
3251
3252retry:
3253 if (take_rtnl)
3254 rtnl_lock();
3255 down_read(&block->cb_lock);
3256 /* Need to obtain rtnl lock if block is bound to devs that require it.
3257 * In block bind code cb_lock is obtained while holding rtnl, so we must
3258 * obtain the locks in same order here.
3259 */
3260 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3261 up_read(&block->cb_lock);
3262 take_rtnl = true;
3263 goto retry;
3264 }
3265
3266 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3267
3268 up_read(&block->cb_lock);
3269 if (take_rtnl)
3270 rtnl_unlock();
3271 return ok_count;
3272}
3273EXPORT_SYMBOL(tc_setup_cb_call);
3274
3275/* Non-destructive filter add. If filter that wasn't already in hardware is
3276 * successfully offloaded, increment block offloads counter. On failure,
3277 * previously offloaded filter is considered to be intact and offloads counter
3278 * is not decremented.
3279 */
3280
3281int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3282 enum tc_setup_type type, void *type_data, bool err_stop,
3283 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3284{
3285 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3286 int ok_count;
3287
3288retry:
3289 if (take_rtnl)
3290 rtnl_lock();
3291 down_read(&block->cb_lock);
3292 /* Need to obtain rtnl lock if block is bound to devs that require it.
3293 * In block bind code cb_lock is obtained while holding rtnl, so we must
3294 * obtain the locks in same order here.
3295 */
3296 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3297 up_read(&block->cb_lock);
3298 take_rtnl = true;
3299 goto retry;
3300 }
3301
3302 /* Make sure all netdevs sharing this block are offload-capable. */
3303 if (block->nooffloaddevcnt && err_stop) {
3304 ok_count = -EOPNOTSUPP;
3305 goto err_unlock;
3306 }
3307
3308 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3309 if (ok_count < 0)
3310 goto err_unlock;
3311
3312 if (tp->ops->hw_add)
3313 tp->ops->hw_add(tp, type_data);
3314 if (ok_count > 0)
3315 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3316 ok_count, true);
3317err_unlock:
3318 up_read(&block->cb_lock);
3319 if (take_rtnl)
3320 rtnl_unlock();
3321 return ok_count < 0 ? ok_count : 0;
3322}
3323EXPORT_SYMBOL(tc_setup_cb_add);
3324
3325/* Destructive filter replace. If filter that wasn't already in hardware is
3326 * successfully offloaded, increment block offload counter. On failure,
3327 * previously offloaded filter is considered to be destroyed and offload counter
3328 * is decremented.
3329 */
3330
3331int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3332 enum tc_setup_type type, void *type_data, bool err_stop,
3333 u32 *old_flags, unsigned int *old_in_hw_count,
3334 u32 *new_flags, unsigned int *new_in_hw_count,
3335 bool rtnl_held)
3336{
3337 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3338 int ok_count;
3339
3340retry:
3341 if (take_rtnl)
3342 rtnl_lock();
3343 down_read(&block->cb_lock);
3344 /* Need to obtain rtnl lock if block is bound to devs that require it.
3345 * In block bind code cb_lock is obtained while holding rtnl, so we must
3346 * obtain the locks in same order here.
3347 */
3348 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3349 up_read(&block->cb_lock);
3350 take_rtnl = true;
3351 goto retry;
3352 }
3353
3354 /* Make sure all netdevs sharing this block are offload-capable. */
3355 if (block->nooffloaddevcnt && err_stop) {
3356 ok_count = -EOPNOTSUPP;
3357 goto err_unlock;
3358 }
3359
3360 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3361 if (tp->ops->hw_del)
3362 tp->ops->hw_del(tp, type_data);
3363
3364 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3365 if (ok_count < 0)
3366 goto err_unlock;
3367
3368 if (tp->ops->hw_add)
3369 tp->ops->hw_add(tp, type_data);
3370 if (ok_count > 0)
3371 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3372 new_flags, ok_count, true);
3373err_unlock:
3374 up_read(&block->cb_lock);
3375 if (take_rtnl)
3376 rtnl_unlock();
3377 return ok_count < 0 ? ok_count : 0;
3378}
3379EXPORT_SYMBOL(tc_setup_cb_replace);
3380
3381/* Destroy filter and decrement block offload counter, if filter was previously
3382 * offloaded.
3383 */
3384
3385int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3386 enum tc_setup_type type, void *type_data, bool err_stop,
3387 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3388{
3389 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3390 int ok_count;
3391
3392retry:
3393 if (take_rtnl)
3394 rtnl_lock();
3395 down_read(&block->cb_lock);
3396 /* Need to obtain rtnl lock if block is bound to devs that require it.
3397 * In block bind code cb_lock is obtained while holding rtnl, so we must
3398 * obtain the locks in same order here.
3399 */
3400 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3401 up_read(&block->cb_lock);
3402 take_rtnl = true;
3403 goto retry;
3404 }
3405
3406 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3407
3408 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3409 if (tp->ops->hw_del)
3410 tp->ops->hw_del(tp, type_data);
3411
3412 up_read(&block->cb_lock);
3413 if (take_rtnl)
3414 rtnl_unlock();
3415 return ok_count < 0 ? ok_count : 0;
3416}
3417EXPORT_SYMBOL(tc_setup_cb_destroy);
3418
3419int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3420 bool add, flow_setup_cb_t *cb,
3421 enum tc_setup_type type, void *type_data,
3422 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3423{
3424 int err = cb(type, type_data, cb_priv);
3425
3426 if (err) {
3427 if (add && tc_skip_sw(*flags))
3428 return err;
3429 } else {
3430 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3431 add);
3432 }
3433
3434 return 0;
3435}
3436EXPORT_SYMBOL(tc_setup_cb_reoffload);
3437
3438static int tcf_act_get_cookie(struct flow_action_entry *entry,
3439 const struct tc_action *act)
3440{
3441 struct tc_cookie *cookie;
3442 int err = 0;
3443
3444 rcu_read_lock();
3445 cookie = rcu_dereference(act->act_cookie);
3446 if (cookie) {
3447 entry->cookie = flow_action_cookie_create(cookie->data,
3448 cookie->len,
3449 GFP_ATOMIC);
3450 if (!entry->cookie)
3451 err = -ENOMEM;
3452 }
3453 rcu_read_unlock();
3454 return err;
3455}
3456
3457static void tcf_act_put_cookie(struct flow_action_entry *entry)
3458{
3459 flow_action_cookie_destroy(entry->cookie);
3460}
3461
3462void tc_cleanup_flow_action(struct flow_action *flow_action)
3463{
3464 struct flow_action_entry *entry;
3465 int i;
3466
3467 flow_action_for_each(i, entry, flow_action) {
3468 tcf_act_put_cookie(entry);
3469 if (entry->destructor)
3470 entry->destructor(entry->destructor_priv);
3471 }
3472}
3473EXPORT_SYMBOL(tc_cleanup_flow_action);
3474
3475static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3476 const struct tc_action *act)
3477{
3478#ifdef CONFIG_NET_CLS_ACT
3479 entry->dev = act->ops->get_dev(act, &entry->destructor);
3480 if (!entry->dev)
3481 return;
3482 entry->destructor_priv = entry->dev;
3483#endif
3484}
3485
3486static void tcf_tunnel_encap_put_tunnel(void *priv)
3487{
3488 struct ip_tunnel_info *tunnel = priv;
3489
3490 kfree(tunnel);
3491}
3492
3493static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3494 const struct tc_action *act)
3495{
3496 entry->tunnel = tcf_tunnel_info_copy(act);
3497 if (!entry->tunnel)
3498 return -ENOMEM;
3499 entry->destructor = tcf_tunnel_encap_put_tunnel;
3500 entry->destructor_priv = entry->tunnel;
3501 return 0;
3502}
3503
3504static void tcf_sample_get_group(struct flow_action_entry *entry,
3505 const struct tc_action *act)
3506{
3507#ifdef CONFIG_NET_CLS_ACT
3508 entry->sample.psample_group =
3509 act->ops->get_psample_group(act, &entry->destructor);
3510 entry->destructor_priv = entry->sample.psample_group;
3511#endif
3512}
3513
3514static void tcf_gate_entry_destructor(void *priv)
3515{
3516 struct action_gate_entry *oe = priv;
3517
3518 kfree(oe);
3519}
3520
3521static int tcf_gate_get_entries(struct flow_action_entry *entry,
3522 const struct tc_action *act)
3523{
3524 entry->gate.entries = tcf_gate_get_list(act);
3525
3526 if (!entry->gate.entries)
3527 return -EINVAL;
3528
3529 entry->destructor = tcf_gate_entry_destructor;
3530 entry->destructor_priv = entry->gate.entries;
3531
3532 return 0;
3533}
3534
3535static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3536{
3537 if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3538 return FLOW_ACTION_HW_STATS_DONT_CARE;
3539 else if (!hw_stats)
3540 return FLOW_ACTION_HW_STATS_DISABLED;
3541
3542 return hw_stats;
3543}
3544
3545int tc_setup_flow_action(struct flow_action *flow_action,
3546 const struct tcf_exts *exts)
3547{
3548 struct tc_action *act;
3549 int i, j, k, err = 0;
3550
3551 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3552 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3553 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3554
3555 if (!exts)
3556 return 0;
3557
3558 j = 0;
3559 tcf_exts_for_each_action(i, act, exts) {
3560 struct flow_action_entry *entry;
3561
3562 entry = &flow_action->entries[j];
3563 spin_lock_bh(&act->tcfa_lock);
3564 err = tcf_act_get_cookie(entry, act);
3565 if (err)
3566 goto err_out_locked;
3567
3568 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3569
3570 if (is_tcf_gact_ok(act)) {
3571 entry->id = FLOW_ACTION_ACCEPT;
3572 } else if (is_tcf_gact_shot(act)) {
3573 entry->id = FLOW_ACTION_DROP;
3574 } else if (is_tcf_gact_trap(act)) {
3575 entry->id = FLOW_ACTION_TRAP;
3576 } else if (is_tcf_gact_goto_chain(act)) {
3577 entry->id = FLOW_ACTION_GOTO;
3578 entry->chain_index = tcf_gact_goto_chain_index(act);
3579 } else if (is_tcf_mirred_egress_redirect(act)) {
3580 entry->id = FLOW_ACTION_REDIRECT;
3581 tcf_mirred_get_dev(entry, act);
3582 } else if (is_tcf_mirred_egress_mirror(act)) {
3583 entry->id = FLOW_ACTION_MIRRED;
3584 tcf_mirred_get_dev(entry, act);
3585 } else if (is_tcf_mirred_ingress_redirect(act)) {
3586 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3587 tcf_mirred_get_dev(entry, act);
3588 } else if (is_tcf_mirred_ingress_mirror(act)) {
3589 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3590 tcf_mirred_get_dev(entry, act);
3591 } else if (is_tcf_vlan(act)) {
3592 switch (tcf_vlan_action(act)) {
3593 case TCA_VLAN_ACT_PUSH:
3594 entry->id = FLOW_ACTION_VLAN_PUSH;
3595 entry->vlan.vid = tcf_vlan_push_vid(act);
3596 entry->vlan.proto = tcf_vlan_push_proto(act);
3597 entry->vlan.prio = tcf_vlan_push_prio(act);
3598 break;
3599 case TCA_VLAN_ACT_POP:
3600 entry->id = FLOW_ACTION_VLAN_POP;
3601 break;
3602 case TCA_VLAN_ACT_MODIFY:
3603 entry->id = FLOW_ACTION_VLAN_MANGLE;
3604 entry->vlan.vid = tcf_vlan_push_vid(act);
3605 entry->vlan.proto = tcf_vlan_push_proto(act);
3606 entry->vlan.prio = tcf_vlan_push_prio(act);
3607 break;
3608 default:
3609 err = -EOPNOTSUPP;
3610 goto err_out_locked;
3611 }
3612 } else if (is_tcf_tunnel_set(act)) {
3613 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3614 err = tcf_tunnel_encap_get_tunnel(entry, act);
3615 if (err)
3616 goto err_out_locked;
3617 } else if (is_tcf_tunnel_release(act)) {
3618 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3619 } else if (is_tcf_pedit(act)) {
3620 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3621 switch (tcf_pedit_cmd(act, k)) {
3622 case TCA_PEDIT_KEY_EX_CMD_SET:
3623 entry->id = FLOW_ACTION_MANGLE;
3624 break;
3625 case TCA_PEDIT_KEY_EX_CMD_ADD:
3626 entry->id = FLOW_ACTION_ADD;
3627 break;
3628 default:
3629 err = -EOPNOTSUPP;
3630 goto err_out_locked;
3631 }
3632 entry->mangle.htype = tcf_pedit_htype(act, k);
3633 entry->mangle.mask = tcf_pedit_mask(act, k);
3634 entry->mangle.val = tcf_pedit_val(act, k);
3635 entry->mangle.offset = tcf_pedit_offset(act, k);
3636 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3637 entry = &flow_action->entries[++j];
3638 }
3639 } else if (is_tcf_csum(act)) {
3640 entry->id = FLOW_ACTION_CSUM;
3641 entry->csum_flags = tcf_csum_update_flags(act);
3642 } else if (is_tcf_skbedit_mark(act)) {
3643 entry->id = FLOW_ACTION_MARK;
3644 entry->mark = tcf_skbedit_mark(act);
3645 } else if (is_tcf_sample(act)) {
3646 entry->id = FLOW_ACTION_SAMPLE;
3647 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3648 entry->sample.truncate = tcf_sample_truncate(act);
3649 entry->sample.rate = tcf_sample_rate(act);
3650 tcf_sample_get_group(entry, act);
3651 } else if (is_tcf_police(act)) {
3652 entry->id = FLOW_ACTION_POLICE;
3653 entry->police.burst = tcf_police_burst(act);
3654 entry->police.rate_bytes_ps =
3655 tcf_police_rate_bytes_ps(act);
3656 entry->police.mtu = tcf_police_tcfp_mtu(act);
3657 entry->police.index = act->tcfa_index;
3658 } else if (is_tcf_ct(act)) {
3659 entry->id = FLOW_ACTION_CT;
3660 entry->ct.action = tcf_ct_action(act);
3661 entry->ct.zone = tcf_ct_zone(act);
3662 entry->ct.flow_table = tcf_ct_ft(act);
3663 } else if (is_tcf_mpls(act)) {
3664 switch (tcf_mpls_action(act)) {
3665 case TCA_MPLS_ACT_PUSH:
3666 entry->id = FLOW_ACTION_MPLS_PUSH;
3667 entry->mpls_push.proto = tcf_mpls_proto(act);
3668 entry->mpls_push.label = tcf_mpls_label(act);
3669 entry->mpls_push.tc = tcf_mpls_tc(act);
3670 entry->mpls_push.bos = tcf_mpls_bos(act);
3671 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3672 break;
3673 case TCA_MPLS_ACT_POP:
3674 entry->id = FLOW_ACTION_MPLS_POP;
3675 entry->mpls_pop.proto = tcf_mpls_proto(act);
3676 break;
3677 case TCA_MPLS_ACT_MODIFY:
3678 entry->id = FLOW_ACTION_MPLS_MANGLE;
3679 entry->mpls_mangle.label = tcf_mpls_label(act);
3680 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3681 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3682 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3683 break;
3684 default:
3685 goto err_out_locked;
3686 }
3687 } else if (is_tcf_skbedit_ptype(act)) {
3688 entry->id = FLOW_ACTION_PTYPE;
3689 entry->ptype = tcf_skbedit_ptype(act);
3690 } else if (is_tcf_skbedit_priority(act)) {
3691 entry->id = FLOW_ACTION_PRIORITY;
3692 entry->priority = tcf_skbedit_priority(act);
3693 } else if (is_tcf_gate(act)) {
3694 entry->id = FLOW_ACTION_GATE;
3695 entry->gate.index = tcf_gate_index(act);
3696 entry->gate.prio = tcf_gate_prio(act);
3697 entry->gate.basetime = tcf_gate_basetime(act);
3698 entry->gate.cycletime = tcf_gate_cycletime(act);
3699 entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3700 entry->gate.num_entries = tcf_gate_num_entries(act);
3701 err = tcf_gate_get_entries(entry, act);
3702 if (err)
3703 goto err_out_locked;
3704 } else {
3705 err = -EOPNOTSUPP;
3706 goto err_out_locked;
3707 }
3708 spin_unlock_bh(&act->tcfa_lock);
3709
3710 if (!is_tcf_pedit(act))
3711 j++;
3712 }
3713
3714err_out:
3715 if (err)
3716 tc_cleanup_flow_action(flow_action);
3717
3718 return err;
3719err_out_locked:
3720 spin_unlock_bh(&act->tcfa_lock);
3721 goto err_out;
3722}
3723EXPORT_SYMBOL(tc_setup_flow_action);
3724
3725unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3726{
3727 unsigned int num_acts = 0;
3728 struct tc_action *act;
3729 int i;
3730
3731 tcf_exts_for_each_action(i, act, exts) {
3732 if (is_tcf_pedit(act))
3733 num_acts += tcf_pedit_nkeys(act);
3734 else
3735 num_acts++;
3736 }
3737 return num_acts;
3738}
3739EXPORT_SYMBOL(tcf_exts_num_actions);
3740
3741#ifdef CONFIG_NET_CLS_ACT
3742static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3743 u32 *p_block_index,
3744 struct netlink_ext_ack *extack)
3745{
3746 *p_block_index = nla_get_u32(block_index_attr);
3747 if (!*p_block_index) {
3748 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3749 return -EINVAL;
3750 }
3751
3752 return 0;
3753}
3754
3755int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3756 enum flow_block_binder_type binder_type,
3757 struct nlattr *block_index_attr,
3758 struct netlink_ext_ack *extack)
3759{
3760 u32 block_index;
3761 int err;
3762
3763 if (!block_index_attr)
3764 return 0;
3765
3766 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3767 if (err)
3768 return err;
3769
3770 if (!block_index)
3771 return 0;
3772
3773 qe->info.binder_type = binder_type;
3774 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3775 qe->info.chain_head_change_priv = &qe->filter_chain;
3776 qe->info.block_index = block_index;
3777
3778 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3779}
3780EXPORT_SYMBOL(tcf_qevent_init);
3781
3782void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3783{
3784 if (qe->info.block_index)
3785 tcf_block_put_ext(qe->block, sch, &qe->info);
3786}
3787EXPORT_SYMBOL(tcf_qevent_destroy);
3788
3789int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3790 struct netlink_ext_ack *extack)
3791{
3792 u32 block_index;
3793 int err;
3794
3795 if (!block_index_attr)
3796 return 0;
3797
3798 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3799 if (err)
3800 return err;
3801
3802 /* Bounce newly-configured block or change in block. */
3803 if (block_index != qe->info.block_index) {
3804 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3805 return -EINVAL;
3806 }
3807
3808 return 0;
3809}
3810EXPORT_SYMBOL(tcf_qevent_validate_change);
3811
3812struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3813 struct sk_buff **to_free, int *ret)
3814{
3815 struct tcf_result cl_res;
3816 struct tcf_proto *fl;
3817
3818 if (!qe->info.block_index)
3819 return skb;
3820
3821 fl = rcu_dereference_bh(qe->filter_chain);
3822
3823 switch (tcf_classify(skb, fl, &cl_res, false)) {
3824 case TC_ACT_SHOT:
3825 qdisc_qstats_drop(sch);
3826 __qdisc_drop(skb, to_free);
3827 *ret = __NET_XMIT_BYPASS;
3828 return NULL;
3829 case TC_ACT_STOLEN:
3830 case TC_ACT_QUEUED:
3831 case TC_ACT_TRAP:
3832 __qdisc_drop(skb, to_free);
3833 *ret = __NET_XMIT_STOLEN;
3834 return NULL;
3835 case TC_ACT_REDIRECT:
3836 skb_do_redirect(skb);
3837 *ret = __NET_XMIT_STOLEN;
3838 return NULL;
3839 }
3840
3841 return skb;
3842}
3843EXPORT_SYMBOL(tcf_qevent_handle);
3844
3845int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3846{
3847 if (!qe->info.block_index)
3848 return 0;
3849 return nla_put_u32(skb, attr_name, qe->info.block_index);
3850}
3851EXPORT_SYMBOL(tcf_qevent_dump);
3852#endif
3853
3854static __net_init int tcf_net_init(struct net *net)
3855{
3856 struct tcf_net *tn = net_generic(net, tcf_net_id);
3857
3858 spin_lock_init(&tn->idr_lock);
3859 idr_init(&tn->idr);
3860 return 0;
3861}
3862
3863static void __net_exit tcf_net_exit(struct net *net)
3864{
3865 struct tcf_net *tn = net_generic(net, tcf_net_id);
3866
3867 idr_destroy(&tn->idr);
3868}
3869
3870static struct pernet_operations tcf_net_ops = {
3871 .init = tcf_net_init,
3872 .exit = tcf_net_exit,
3873 .id = &tcf_net_id,
3874 .size = sizeof(struct tcf_net),
3875};
3876
3877static int __init tc_filter_init(void)
3878{
3879 int err;
3880
3881 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3882 if (!tc_filter_wq)
3883 return -ENOMEM;
3884
3885 err = register_pernet_subsys(&tcf_net_ops);
3886 if (err)
3887 goto err_register_pernet_subsys;
3888
3889 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3890 RTNL_FLAG_DOIT_UNLOCKED);
3891 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3892 RTNL_FLAG_DOIT_UNLOCKED);
3893 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3894 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3895 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3896 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3897 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3898 tc_dump_chain, 0);
3899
3900 return 0;
3901
3902err_register_pernet_subsys:
3903 destroy_workqueue(tc_filter_wq);
3904 return err;
3905}
3906
3907subsys_initcall(tc_filter_init);