Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * The filters are packed to hash tables of key nodes
8 * with a set of 32bit key/mask pairs at every node.
9 * Nodes reference next level hash tables etc.
10 *
11 * This scheme is the best universal classifier I managed to
12 * invent; it is not super-fast, but it is not slow (provided you
13 * program it correctly), and general enough. And its relative
14 * speed grows as the number of rules becomes larger.
15 *
16 * It seems that it represents the best middle point between
17 * speed and manageability both by human and by machine.
18 *
19 * It is especially useful for link sharing combined with QoS;
20 * pure RSVP doesn't need such a general approach and can use
21 * much simpler (and faster) schemes, sort of cls_rsvp.c.
22 *
23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
24 */
25
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31#include <linux/errno.h>
32#include <linux/percpu.h>
33#include <linux/rtnetlink.h>
34#include <linux/skbuff.h>
35#include <linux/bitmap.h>
36#include <linux/netdevice.h>
37#include <linux/hash.h>
38#include <net/netlink.h>
39#include <net/act_api.h>
40#include <net/pkt_cls.h>
41#include <linux/idr.h>
42#include <net/tc_wrapper.h>
43
44struct tc_u_knode {
45 struct tc_u_knode __rcu *next;
46 u32 handle;
47 struct tc_u_hnode __rcu *ht_up;
48 struct tcf_exts exts;
49 int ifindex;
50 u8 fshift;
51 struct tcf_result res;
52 struct tc_u_hnode __rcu *ht_down;
53#ifdef CONFIG_CLS_U32_PERF
54 struct tc_u32_pcnt __percpu *pf;
55#endif
56 u32 flags;
57 unsigned int in_hw_count;
58#ifdef CONFIG_CLS_U32_MARK
59 u32 val;
60 u32 mask;
61 u32 __percpu *pcpu_success;
62#endif
63 struct rcu_work rwork;
64 /* The 'sel' field MUST be the last field in structure to allow for
65 * tc_u32_keys allocated at end of structure.
66 */
67 struct tc_u32_sel sel;
68};
69
70struct tc_u_hnode {
71 struct tc_u_hnode __rcu *next;
72 u32 handle;
73 u32 prio;
74 int refcnt;
75 unsigned int divisor;
76 struct idr handle_idr;
77 bool is_root;
78 struct rcu_head rcu;
79 u32 flags;
80 /* The 'ht' field MUST be the last field in structure to allow for
81 * more entries allocated at end of structure.
82 */
83 struct tc_u_knode __rcu *ht[];
84};
85
86struct tc_u_common {
87 struct tc_u_hnode __rcu *hlist;
88 void *ptr;
89 int refcnt;
90 struct idr handle_idr;
91 struct hlist_node hnode;
92 long knodes;
93};
94
95static inline unsigned int u32_hash_fold(__be32 key,
96 const struct tc_u32_sel *sel,
97 u8 fshift)
98{
99 unsigned int h = ntohl(key & sel->hmask) >> fshift;
100
101 return h;
102}
103
104TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb,
105 const struct tcf_proto *tp,
106 struct tcf_result *res)
107{
108 struct {
109 struct tc_u_knode *knode;
110 unsigned int off;
111 } stack[TC_U32_MAXDEPTH];
112
113 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
114 unsigned int off = skb_network_offset(skb);
115 struct tc_u_knode *n;
116 int sdepth = 0;
117 int off2 = 0;
118 int sel = 0;
119#ifdef CONFIG_CLS_U32_PERF
120 int j;
121#endif
122 int i, r;
123
124next_ht:
125 n = rcu_dereference_bh(ht->ht[sel]);
126
127next_knode:
128 if (n) {
129 struct tc_u32_key *key = n->sel.keys;
130
131#ifdef CONFIG_CLS_U32_PERF
132 __this_cpu_inc(n->pf->rcnt);
133 j = 0;
134#endif
135
136 if (tc_skip_sw(n->flags)) {
137 n = rcu_dereference_bh(n->next);
138 goto next_knode;
139 }
140
141#ifdef CONFIG_CLS_U32_MARK
142 if ((skb->mark & n->mask) != n->val) {
143 n = rcu_dereference_bh(n->next);
144 goto next_knode;
145 } else {
146 __this_cpu_inc(*n->pcpu_success);
147 }
148#endif
149
150 for (i = n->sel.nkeys; i > 0; i--, key++) {
151 int toff = off + key->off + (off2 & key->offmask);
152 __be32 *data, hdata;
153
154 if (skb_headroom(skb) + toff > INT_MAX)
155 goto out;
156
157 data = skb_header_pointer(skb, toff, 4, &hdata);
158 if (!data)
159 goto out;
160 if ((*data ^ key->val) & key->mask) {
161 n = rcu_dereference_bh(n->next);
162 goto next_knode;
163 }
164#ifdef CONFIG_CLS_U32_PERF
165 __this_cpu_inc(n->pf->kcnts[j]);
166 j++;
167#endif
168 }
169
170 ht = rcu_dereference_bh(n->ht_down);
171 if (!ht) {
172check_terminal:
173 if (n->sel.flags & TC_U32_TERMINAL) {
174
175 *res = n->res;
176 if (!tcf_match_indev(skb, n->ifindex)) {
177 n = rcu_dereference_bh(n->next);
178 goto next_knode;
179 }
180#ifdef CONFIG_CLS_U32_PERF
181 __this_cpu_inc(n->pf->rhit);
182#endif
183 r = tcf_exts_exec(skb, &n->exts, res);
184 if (r < 0) {
185 n = rcu_dereference_bh(n->next);
186 goto next_knode;
187 }
188
189 return r;
190 }
191 n = rcu_dereference_bh(n->next);
192 goto next_knode;
193 }
194
195 /* PUSH */
196 if (sdepth >= TC_U32_MAXDEPTH)
197 goto deadloop;
198 stack[sdepth].knode = n;
199 stack[sdepth].off = off;
200 sdepth++;
201
202 ht = rcu_dereference_bh(n->ht_down);
203 sel = 0;
204 if (ht->divisor) {
205 __be32 *data, hdata;
206
207 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
208 &hdata);
209 if (!data)
210 goto out;
211 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
212 n->fshift);
213 }
214 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
215 goto next_ht;
216
217 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
218 off2 = n->sel.off + 3;
219 if (n->sel.flags & TC_U32_VAROFFSET) {
220 __be16 *data, hdata;
221
222 data = skb_header_pointer(skb,
223 off + n->sel.offoff,
224 2, &hdata);
225 if (!data)
226 goto out;
227 off2 += ntohs(n->sel.offmask & *data) >>
228 n->sel.offshift;
229 }
230 off2 &= ~3;
231 }
232 if (n->sel.flags & TC_U32_EAT) {
233 off += off2;
234 off2 = 0;
235 }
236
237 if (off < skb->len)
238 goto next_ht;
239 }
240
241 /* POP */
242 if (sdepth--) {
243 n = stack[sdepth].knode;
244 ht = rcu_dereference_bh(n->ht_up);
245 off = stack[sdepth].off;
246 goto check_terminal;
247 }
248out:
249 return -1;
250
251deadloop:
252 net_warn_ratelimited("cls_u32: dead loop\n");
253 return -1;
254}
255
256static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
257{
258 struct tc_u_hnode *ht;
259
260 for (ht = rtnl_dereference(tp_c->hlist);
261 ht;
262 ht = rtnl_dereference(ht->next))
263 if (ht->handle == handle)
264 break;
265
266 return ht;
267}
268
269static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
270{
271 unsigned int sel;
272 struct tc_u_knode *n = NULL;
273
274 sel = TC_U32_HASH(handle);
275 if (sel > ht->divisor)
276 goto out;
277
278 for (n = rtnl_dereference(ht->ht[sel]);
279 n;
280 n = rtnl_dereference(n->next))
281 if (n->handle == handle)
282 break;
283out:
284 return n;
285}
286
287
288static void *u32_get(struct tcf_proto *tp, u32 handle)
289{
290 struct tc_u_hnode *ht;
291 struct tc_u_common *tp_c = tp->data;
292
293 if (TC_U32_HTID(handle) == TC_U32_ROOT)
294 ht = rtnl_dereference(tp->root);
295 else
296 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
297
298 if (!ht)
299 return NULL;
300
301 if (TC_U32_KEY(handle) == 0)
302 return ht;
303
304 return u32_lookup_key(ht, handle);
305}
306
307/* Protected by rtnl lock */
308static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
309{
310 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
311 if (id < 0)
312 return 0;
313 return (id | 0x800U) << 20;
314}
315
316static struct hlist_head *tc_u_common_hash;
317
318#define U32_HASH_SHIFT 10
319#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
320
321static void *tc_u_common_ptr(const struct tcf_proto *tp)
322{
323 struct tcf_block *block = tp->chain->block;
324
325 /* The block sharing is currently supported only
326 * for classless qdiscs. In that case we use block
327 * for tc_u_common identification. In case the
328 * block is not shared, block->q is a valid pointer
329 * and we can use that. That works for classful qdiscs.
330 */
331 if (tcf_block_shared(block))
332 return block;
333 else
334 return block->q;
335}
336
337static struct hlist_head *tc_u_hash(void *key)
338{
339 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
340}
341
342static struct tc_u_common *tc_u_common_find(void *key)
343{
344 struct tc_u_common *tc;
345 hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
346 if (tc->ptr == key)
347 return tc;
348 }
349 return NULL;
350}
351
352static int u32_init(struct tcf_proto *tp)
353{
354 struct tc_u_hnode *root_ht;
355 void *key = tc_u_common_ptr(tp);
356 struct tc_u_common *tp_c = tc_u_common_find(key);
357
358 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
359 if (root_ht == NULL)
360 return -ENOBUFS;
361
362 root_ht->refcnt++;
363 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
364 root_ht->prio = tp->prio;
365 root_ht->is_root = true;
366 idr_init(&root_ht->handle_idr);
367
368 if (tp_c == NULL) {
369 tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
370 if (tp_c == NULL) {
371 kfree(root_ht);
372 return -ENOBUFS;
373 }
374 tp_c->ptr = key;
375 INIT_HLIST_NODE(&tp_c->hnode);
376 idr_init(&tp_c->handle_idr);
377
378 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
379 }
380
381 tp_c->refcnt++;
382 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
383 rcu_assign_pointer(tp_c->hlist, root_ht);
384
385 root_ht->refcnt++;
386 rcu_assign_pointer(tp->root, root_ht);
387 tp->data = tp_c;
388 return 0;
389}
390
391static void __u32_destroy_key(struct tc_u_knode *n)
392{
393 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
394
395 tcf_exts_destroy(&n->exts);
396 if (ht && --ht->refcnt == 0)
397 kfree(ht);
398 kfree(n);
399}
400
401static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
402{
403 tcf_exts_put_net(&n->exts);
404#ifdef CONFIG_CLS_U32_PERF
405 if (free_pf)
406 free_percpu(n->pf);
407#endif
408#ifdef CONFIG_CLS_U32_MARK
409 if (free_pf)
410 free_percpu(n->pcpu_success);
411#endif
412 __u32_destroy_key(n);
413}
414
415/* u32_delete_key_rcu should be called when free'ing a copied
416 * version of a tc_u_knode obtained from u32_init_knode(). When
417 * copies are obtained from u32_init_knode() the statistics are
418 * shared between the old and new copies to allow readers to
419 * continue to update the statistics during the copy. To support
420 * this the u32_delete_key_rcu variant does not free the percpu
421 * statistics.
422 */
423static void u32_delete_key_work(struct work_struct *work)
424{
425 struct tc_u_knode *key = container_of(to_rcu_work(work),
426 struct tc_u_knode,
427 rwork);
428 rtnl_lock();
429 u32_destroy_key(key, false);
430 rtnl_unlock();
431}
432
433/* u32_delete_key_freepf_rcu is the rcu callback variant
434 * that free's the entire structure including the statistics
435 * percpu variables. Only use this if the key is not a copy
436 * returned by u32_init_knode(). See u32_delete_key_rcu()
437 * for the variant that should be used with keys return from
438 * u32_init_knode()
439 */
440static void u32_delete_key_freepf_work(struct work_struct *work)
441{
442 struct tc_u_knode *key = container_of(to_rcu_work(work),
443 struct tc_u_knode,
444 rwork);
445 rtnl_lock();
446 u32_destroy_key(key, true);
447 rtnl_unlock();
448}
449
450static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
451{
452 struct tc_u_common *tp_c = tp->data;
453 struct tc_u_knode __rcu **kp;
454 struct tc_u_knode *pkp;
455 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
456
457 if (ht) {
458 kp = &ht->ht[TC_U32_HASH(key->handle)];
459 for (pkp = rtnl_dereference(*kp); pkp;
460 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
461 if (pkp == key) {
462 RCU_INIT_POINTER(*kp, key->next);
463 tp_c->knodes--;
464
465 tcf_unbind_filter(tp, &key->res);
466 idr_remove(&ht->handle_idr, key->handle);
467 tcf_exts_get_net(&key->exts);
468 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
469 return 0;
470 }
471 }
472 }
473 WARN_ON(1);
474 return 0;
475}
476
477static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
478 struct netlink_ext_ack *extack)
479{
480 struct tcf_block *block = tp->chain->block;
481 struct tc_cls_u32_offload cls_u32 = {};
482
483 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
484 cls_u32.command = TC_CLSU32_DELETE_HNODE;
485 cls_u32.hnode.divisor = h->divisor;
486 cls_u32.hnode.handle = h->handle;
487 cls_u32.hnode.prio = h->prio;
488
489 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
490}
491
492static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
493 u32 flags, struct netlink_ext_ack *extack)
494{
495 struct tcf_block *block = tp->chain->block;
496 struct tc_cls_u32_offload cls_u32 = {};
497 bool skip_sw = tc_skip_sw(flags);
498 bool offloaded = false;
499 int err;
500
501 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
502 cls_u32.command = TC_CLSU32_NEW_HNODE;
503 cls_u32.hnode.divisor = h->divisor;
504 cls_u32.hnode.handle = h->handle;
505 cls_u32.hnode.prio = h->prio;
506
507 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
508 if (err < 0) {
509 u32_clear_hw_hnode(tp, h, NULL);
510 return err;
511 } else if (err > 0) {
512 offloaded = true;
513 }
514
515 if (skip_sw && !offloaded)
516 return -EINVAL;
517
518 return 0;
519}
520
521static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
522 struct netlink_ext_ack *extack)
523{
524 struct tcf_block *block = tp->chain->block;
525 struct tc_cls_u32_offload cls_u32 = {};
526
527 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
528 cls_u32.command = TC_CLSU32_DELETE_KNODE;
529 cls_u32.knode.handle = n->handle;
530
531 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
532 &n->flags, &n->in_hw_count, true);
533}
534
535static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
536 u32 flags, struct netlink_ext_ack *extack)
537{
538 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
539 struct tcf_block *block = tp->chain->block;
540 struct tc_cls_u32_offload cls_u32 = {};
541 bool skip_sw = tc_skip_sw(flags);
542 int err;
543
544 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
545 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
546 cls_u32.knode.handle = n->handle;
547 cls_u32.knode.fshift = n->fshift;
548#ifdef CONFIG_CLS_U32_MARK
549 cls_u32.knode.val = n->val;
550 cls_u32.knode.mask = n->mask;
551#else
552 cls_u32.knode.val = 0;
553 cls_u32.knode.mask = 0;
554#endif
555 cls_u32.knode.sel = &n->sel;
556 cls_u32.knode.res = &n->res;
557 cls_u32.knode.exts = &n->exts;
558 if (n->ht_down)
559 cls_u32.knode.link_handle = ht->handle;
560
561 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
562 &n->flags, &n->in_hw_count, true);
563 if (err) {
564 u32_remove_hw_knode(tp, n, NULL);
565 return err;
566 }
567
568 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
569 return -EINVAL;
570
571 return 0;
572}
573
574static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
575 struct netlink_ext_ack *extack)
576{
577 struct tc_u_common *tp_c = tp->data;
578 struct tc_u_knode *n;
579 unsigned int h;
580
581 for (h = 0; h <= ht->divisor; h++) {
582 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
583 RCU_INIT_POINTER(ht->ht[h],
584 rtnl_dereference(n->next));
585 tp_c->knodes--;
586 tcf_unbind_filter(tp, &n->res);
587 u32_remove_hw_knode(tp, n, extack);
588 idr_remove(&ht->handle_idr, n->handle);
589 if (tcf_exts_get_net(&n->exts))
590 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
591 else
592 u32_destroy_key(n, true);
593 }
594 }
595}
596
597static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
598 struct netlink_ext_ack *extack)
599{
600 struct tc_u_common *tp_c = tp->data;
601 struct tc_u_hnode __rcu **hn;
602 struct tc_u_hnode *phn;
603
604 WARN_ON(--ht->refcnt);
605
606 u32_clear_hnode(tp, ht, extack);
607
608 hn = &tp_c->hlist;
609 for (phn = rtnl_dereference(*hn);
610 phn;
611 hn = &phn->next, phn = rtnl_dereference(*hn)) {
612 if (phn == ht) {
613 u32_clear_hw_hnode(tp, ht, extack);
614 idr_destroy(&ht->handle_idr);
615 idr_remove(&tp_c->handle_idr, ht->handle);
616 RCU_INIT_POINTER(*hn, ht->next);
617 kfree_rcu(ht, rcu);
618 return 0;
619 }
620 }
621
622 return -ENOENT;
623}
624
625static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
626 struct netlink_ext_ack *extack)
627{
628 struct tc_u_common *tp_c = tp->data;
629 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
630
631 WARN_ON(root_ht == NULL);
632
633 if (root_ht && --root_ht->refcnt == 1)
634 u32_destroy_hnode(tp, root_ht, extack);
635
636 if (--tp_c->refcnt == 0) {
637 struct tc_u_hnode *ht;
638
639 hlist_del(&tp_c->hnode);
640
641 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
642 u32_clear_hnode(tp, ht, extack);
643 RCU_INIT_POINTER(tp_c->hlist, ht->next);
644
645 /* u32_destroy_key() will later free ht for us, if it's
646 * still referenced by some knode
647 */
648 if (--ht->refcnt == 0)
649 kfree_rcu(ht, rcu);
650 }
651
652 idr_destroy(&tp_c->handle_idr);
653 kfree(tp_c);
654 }
655
656 tp->data = NULL;
657}
658
659static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
660 bool rtnl_held, struct netlink_ext_ack *extack)
661{
662 struct tc_u_hnode *ht = arg;
663 struct tc_u_common *tp_c = tp->data;
664 int ret = 0;
665
666 if (TC_U32_KEY(ht->handle)) {
667 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
668 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
669 goto out;
670 }
671
672 if (ht->is_root) {
673 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
674 return -EINVAL;
675 }
676
677 if (ht->refcnt == 1) {
678 u32_destroy_hnode(tp, ht, extack);
679 } else {
680 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
681 return -EBUSY;
682 }
683
684out:
685 *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
686 return ret;
687}
688
689static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
690{
691 u32 index = htid | 0x800;
692 u32 max = htid | 0xFFF;
693
694 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
695 index = htid + 1;
696 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
697 GFP_KERNEL))
698 index = max;
699 }
700
701 return index;
702}
703
704static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
705 [TCA_U32_CLASSID] = { .type = NLA_U32 },
706 [TCA_U32_HASH] = { .type = NLA_U32 },
707 [TCA_U32_LINK] = { .type = NLA_U32 },
708 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
709 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
710 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
711 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
712 [TCA_U32_FLAGS] = { .type = NLA_U32 },
713};
714
715static int u32_set_parms(struct net *net, struct tcf_proto *tp,
716 unsigned long base,
717 struct tc_u_knode *n, struct nlattr **tb,
718 struct nlattr *est, u32 flags, u32 fl_flags,
719 struct netlink_ext_ack *extack)
720{
721 int err, ifindex = -1;
722
723 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
724 fl_flags, extack);
725 if (err < 0)
726 return err;
727
728 if (tb[TCA_U32_INDEV]) {
729 ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
730 if (ifindex < 0)
731 return -EINVAL;
732 }
733
734 if (tb[TCA_U32_LINK]) {
735 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
736 struct tc_u_hnode *ht_down = NULL, *ht_old;
737
738 if (TC_U32_KEY(handle)) {
739 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
740 return -EINVAL;
741 }
742
743 if (handle) {
744 ht_down = u32_lookup_ht(tp->data, handle);
745
746 if (!ht_down) {
747 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
748 return -EINVAL;
749 }
750 if (ht_down->is_root) {
751 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
752 return -EINVAL;
753 }
754 ht_down->refcnt++;
755 }
756
757 ht_old = rtnl_dereference(n->ht_down);
758 rcu_assign_pointer(n->ht_down, ht_down);
759
760 if (ht_old)
761 ht_old->refcnt--;
762 }
763 if (tb[TCA_U32_CLASSID]) {
764 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
765 tcf_bind_filter(tp, &n->res, base);
766 }
767
768 if (ifindex >= 0)
769 n->ifindex = ifindex;
770
771 return 0;
772}
773
774static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
775 struct tc_u_knode *n)
776{
777 struct tc_u_knode __rcu **ins;
778 struct tc_u_knode *pins;
779 struct tc_u_hnode *ht;
780
781 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
782 ht = rtnl_dereference(tp->root);
783 else
784 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
785
786 ins = &ht->ht[TC_U32_HASH(n->handle)];
787
788 /* The node must always exist for it to be replaced if this is not the
789 * case then something went very wrong elsewhere.
790 */
791 for (pins = rtnl_dereference(*ins); ;
792 ins = &pins->next, pins = rtnl_dereference(*ins))
793 if (pins->handle == n->handle)
794 break;
795
796 idr_replace(&ht->handle_idr, n, n->handle);
797 RCU_INIT_POINTER(n->next, pins->next);
798 rcu_assign_pointer(*ins, n);
799}
800
801static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
802 struct tc_u_knode *n)
803{
804 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
805 struct tc_u32_sel *s = &n->sel;
806 struct tc_u_knode *new;
807
808 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
809 if (!new)
810 return NULL;
811
812 RCU_INIT_POINTER(new->next, n->next);
813 new->handle = n->handle;
814 RCU_INIT_POINTER(new->ht_up, n->ht_up);
815
816 new->ifindex = n->ifindex;
817 new->fshift = n->fshift;
818 new->res = n->res;
819 new->flags = n->flags;
820 RCU_INIT_POINTER(new->ht_down, ht);
821
822#ifdef CONFIG_CLS_U32_PERF
823 /* Statistics may be incremented by readers during update
824 * so we must keep them in tact. When the node is later destroyed
825 * a special destroy call must be made to not free the pf memory.
826 */
827 new->pf = n->pf;
828#endif
829
830#ifdef CONFIG_CLS_U32_MARK
831 new->val = n->val;
832 new->mask = n->mask;
833 /* Similarly success statistics must be moved as pointers */
834 new->pcpu_success = n->pcpu_success;
835#endif
836 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
837
838 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
839 kfree(new);
840 return NULL;
841 }
842
843 /* bump reference count as long as we hold pointer to structure */
844 if (ht)
845 ht->refcnt++;
846
847 return new;
848}
849
850static int u32_change(struct net *net, struct sk_buff *in_skb,
851 struct tcf_proto *tp, unsigned long base, u32 handle,
852 struct nlattr **tca, void **arg, u32 flags,
853 struct netlink_ext_ack *extack)
854{
855 struct tc_u_common *tp_c = tp->data;
856 struct tc_u_hnode *ht;
857 struct tc_u_knode *n;
858 struct tc_u32_sel *s;
859 struct nlattr *opt = tca[TCA_OPTIONS];
860 struct nlattr *tb[TCA_U32_MAX + 1];
861 u32 htid, userflags = 0;
862 size_t sel_size;
863 int err;
864
865 if (!opt) {
866 if (handle) {
867 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
868 return -EINVAL;
869 } else {
870 return 0;
871 }
872 }
873
874 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
875 extack);
876 if (err < 0)
877 return err;
878
879 if (tb[TCA_U32_FLAGS]) {
880 userflags = nla_get_u32(tb[TCA_U32_FLAGS]);
881 if (!tc_flags_valid(userflags)) {
882 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
883 return -EINVAL;
884 }
885 }
886
887 n = *arg;
888 if (n) {
889 struct tc_u_knode *new;
890
891 if (TC_U32_KEY(n->handle) == 0) {
892 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
893 return -EINVAL;
894 }
895
896 if ((n->flags ^ userflags) &
897 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
898 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
899 return -EINVAL;
900 }
901
902 new = u32_init_knode(net, tp, n);
903 if (!new)
904 return -ENOMEM;
905
906 err = u32_set_parms(net, tp, base, new, tb,
907 tca[TCA_RATE], flags, new->flags,
908 extack);
909
910 if (err) {
911 __u32_destroy_key(new);
912 return err;
913 }
914
915 err = u32_replace_hw_knode(tp, new, flags, extack);
916 if (err) {
917 __u32_destroy_key(new);
918 return err;
919 }
920
921 if (!tc_in_hw(new->flags))
922 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
923
924 u32_replace_knode(tp, tp_c, new);
925 tcf_unbind_filter(tp, &n->res);
926 tcf_exts_get_net(&n->exts);
927 tcf_queue_work(&n->rwork, u32_delete_key_work);
928 return 0;
929 }
930
931 if (tb[TCA_U32_DIVISOR]) {
932 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
933
934 if (!is_power_of_2(divisor)) {
935 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
936 return -EINVAL;
937 }
938 if (divisor-- > 0x100) {
939 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
940 return -EINVAL;
941 }
942 if (TC_U32_KEY(handle)) {
943 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
944 return -EINVAL;
945 }
946 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
947 if (ht == NULL)
948 return -ENOBUFS;
949 if (handle == 0) {
950 handle = gen_new_htid(tp->data, ht);
951 if (handle == 0) {
952 kfree(ht);
953 return -ENOMEM;
954 }
955 } else {
956 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
957 handle, GFP_KERNEL);
958 if (err) {
959 kfree(ht);
960 return err;
961 }
962 }
963 ht->refcnt = 1;
964 ht->divisor = divisor;
965 ht->handle = handle;
966 ht->prio = tp->prio;
967 idr_init(&ht->handle_idr);
968 ht->flags = userflags;
969
970 err = u32_replace_hw_hnode(tp, ht, userflags, extack);
971 if (err) {
972 idr_remove(&tp_c->handle_idr, handle);
973 kfree(ht);
974 return err;
975 }
976
977 RCU_INIT_POINTER(ht->next, tp_c->hlist);
978 rcu_assign_pointer(tp_c->hlist, ht);
979 *arg = ht;
980
981 return 0;
982 }
983
984 if (tb[TCA_U32_HASH]) {
985 htid = nla_get_u32(tb[TCA_U32_HASH]);
986 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
987 ht = rtnl_dereference(tp->root);
988 htid = ht->handle;
989 } else {
990 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
991 if (!ht) {
992 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
993 return -EINVAL;
994 }
995 }
996 } else {
997 ht = rtnl_dereference(tp->root);
998 htid = ht->handle;
999 }
1000
1001 if (ht->divisor < TC_U32_HASH(htid)) {
1002 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1003 return -EINVAL;
1004 }
1005
1006 if (handle) {
1007 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1008 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1009 return -EINVAL;
1010 }
1011 handle = htid | TC_U32_NODE(handle);
1012 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1013 GFP_KERNEL);
1014 if (err)
1015 return err;
1016 } else
1017 handle = gen_new_kid(ht, htid);
1018
1019 if (tb[TCA_U32_SEL] == NULL) {
1020 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1021 err = -EINVAL;
1022 goto erridr;
1023 }
1024
1025 s = nla_data(tb[TCA_U32_SEL]);
1026 sel_size = struct_size(s, keys, s->nkeys);
1027 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1028 err = -EINVAL;
1029 goto erridr;
1030 }
1031
1032 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1033 if (n == NULL) {
1034 err = -ENOBUFS;
1035 goto erridr;
1036 }
1037
1038#ifdef CONFIG_CLS_U32_PERF
1039 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1040 __alignof__(struct tc_u32_pcnt));
1041 if (!n->pf) {
1042 err = -ENOBUFS;
1043 goto errfree;
1044 }
1045#endif
1046
1047 unsafe_memcpy(&n->sel, s, sel_size,
1048 /* A composite flex-array structure destination,
1049 * which was correctly sized with struct_size(),
1050 * bounds-checked against nla_len(), and allocated
1051 * above. */);
1052 RCU_INIT_POINTER(n->ht_up, ht);
1053 n->handle = handle;
1054 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1055 n->flags = userflags;
1056
1057 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1058 if (err < 0)
1059 goto errout;
1060
1061#ifdef CONFIG_CLS_U32_MARK
1062 n->pcpu_success = alloc_percpu(u32);
1063 if (!n->pcpu_success) {
1064 err = -ENOMEM;
1065 goto errout;
1066 }
1067
1068 if (tb[TCA_U32_MARK]) {
1069 struct tc_u32_mark *mark;
1070
1071 mark = nla_data(tb[TCA_U32_MARK]);
1072 n->val = mark->val;
1073 n->mask = mark->mask;
1074 }
1075#endif
1076
1077 err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE],
1078 flags, n->flags, extack);
1079 if (err == 0) {
1080 struct tc_u_knode __rcu **ins;
1081 struct tc_u_knode *pins;
1082
1083 err = u32_replace_hw_knode(tp, n, flags, extack);
1084 if (err)
1085 goto errhw;
1086
1087 if (!tc_in_hw(n->flags))
1088 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1089
1090 ins = &ht->ht[TC_U32_HASH(handle)];
1091 for (pins = rtnl_dereference(*ins); pins;
1092 ins = &pins->next, pins = rtnl_dereference(*ins))
1093 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1094 break;
1095
1096 RCU_INIT_POINTER(n->next, pins);
1097 rcu_assign_pointer(*ins, n);
1098 tp_c->knodes++;
1099 *arg = n;
1100 return 0;
1101 }
1102
1103errhw:
1104#ifdef CONFIG_CLS_U32_MARK
1105 free_percpu(n->pcpu_success);
1106#endif
1107
1108errout:
1109 tcf_exts_destroy(&n->exts);
1110#ifdef CONFIG_CLS_U32_PERF
1111errfree:
1112 free_percpu(n->pf);
1113#endif
1114 kfree(n);
1115erridr:
1116 idr_remove(&ht->handle_idr, handle);
1117 return err;
1118}
1119
1120static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1121 bool rtnl_held)
1122{
1123 struct tc_u_common *tp_c = tp->data;
1124 struct tc_u_hnode *ht;
1125 struct tc_u_knode *n;
1126 unsigned int h;
1127
1128 if (arg->stop)
1129 return;
1130
1131 for (ht = rtnl_dereference(tp_c->hlist);
1132 ht;
1133 ht = rtnl_dereference(ht->next)) {
1134 if (ht->prio != tp->prio)
1135 continue;
1136
1137 if (!tc_cls_stats_dump(tp, arg, ht))
1138 return;
1139
1140 for (h = 0; h <= ht->divisor; h++) {
1141 for (n = rtnl_dereference(ht->ht[h]);
1142 n;
1143 n = rtnl_dereference(n->next)) {
1144 if (!tc_cls_stats_dump(tp, arg, n))
1145 return;
1146 }
1147 }
1148 }
1149}
1150
1151static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1152 bool add, flow_setup_cb_t *cb, void *cb_priv,
1153 struct netlink_ext_ack *extack)
1154{
1155 struct tc_cls_u32_offload cls_u32 = {};
1156 int err;
1157
1158 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1159 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1160 cls_u32.hnode.divisor = ht->divisor;
1161 cls_u32.hnode.handle = ht->handle;
1162 cls_u32.hnode.prio = ht->prio;
1163
1164 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1165 if (err && add && tc_skip_sw(ht->flags))
1166 return err;
1167
1168 return 0;
1169}
1170
1171static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1172 bool add, flow_setup_cb_t *cb, void *cb_priv,
1173 struct netlink_ext_ack *extack)
1174{
1175 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1176 struct tcf_block *block = tp->chain->block;
1177 struct tc_cls_u32_offload cls_u32 = {};
1178
1179 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1180 cls_u32.command = add ?
1181 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1182 cls_u32.knode.handle = n->handle;
1183
1184 if (add) {
1185 cls_u32.knode.fshift = n->fshift;
1186#ifdef CONFIG_CLS_U32_MARK
1187 cls_u32.knode.val = n->val;
1188 cls_u32.knode.mask = n->mask;
1189#else
1190 cls_u32.knode.val = 0;
1191 cls_u32.knode.mask = 0;
1192#endif
1193 cls_u32.knode.sel = &n->sel;
1194 cls_u32.knode.res = &n->res;
1195 cls_u32.knode.exts = &n->exts;
1196 if (n->ht_down)
1197 cls_u32.knode.link_handle = ht->handle;
1198 }
1199
1200 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1201 &cls_u32, cb_priv, &n->flags,
1202 &n->in_hw_count);
1203}
1204
1205static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1206 void *cb_priv, struct netlink_ext_ack *extack)
1207{
1208 struct tc_u_common *tp_c = tp->data;
1209 struct tc_u_hnode *ht;
1210 struct tc_u_knode *n;
1211 unsigned int h;
1212 int err;
1213
1214 for (ht = rtnl_dereference(tp_c->hlist);
1215 ht;
1216 ht = rtnl_dereference(ht->next)) {
1217 if (ht->prio != tp->prio)
1218 continue;
1219
1220 /* When adding filters to a new dev, try to offload the
1221 * hashtable first. When removing, do the filters before the
1222 * hashtable.
1223 */
1224 if (add && !tc_skip_hw(ht->flags)) {
1225 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1226 extack);
1227 if (err)
1228 return err;
1229 }
1230
1231 for (h = 0; h <= ht->divisor; h++) {
1232 for (n = rtnl_dereference(ht->ht[h]);
1233 n;
1234 n = rtnl_dereference(n->next)) {
1235 if (tc_skip_hw(n->flags))
1236 continue;
1237
1238 err = u32_reoffload_knode(tp, n, add, cb,
1239 cb_priv, extack);
1240 if (err)
1241 return err;
1242 }
1243 }
1244
1245 if (!add && !tc_skip_hw(ht->flags))
1246 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1247 }
1248
1249 return 0;
1250}
1251
1252static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1253 unsigned long base)
1254{
1255 struct tc_u_knode *n = fh;
1256
1257 tc_cls_bind_class(classid, cl, q, &n->res, base);
1258}
1259
1260static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1261 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1262{
1263 struct tc_u_knode *n = fh;
1264 struct tc_u_hnode *ht_up, *ht_down;
1265 struct nlattr *nest;
1266
1267 if (n == NULL)
1268 return skb->len;
1269
1270 t->tcm_handle = n->handle;
1271
1272 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1273 if (nest == NULL)
1274 goto nla_put_failure;
1275
1276 if (TC_U32_KEY(n->handle) == 0) {
1277 struct tc_u_hnode *ht = fh;
1278 u32 divisor = ht->divisor + 1;
1279
1280 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1281 goto nla_put_failure;
1282 } else {
1283#ifdef CONFIG_CLS_U32_PERF
1284 struct tc_u32_pcnt *gpf;
1285 int cpu;
1286#endif
1287
1288 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
1289 &n->sel))
1290 goto nla_put_failure;
1291
1292 ht_up = rtnl_dereference(n->ht_up);
1293 if (ht_up) {
1294 u32 htid = n->handle & 0xFFFFF000;
1295 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1296 goto nla_put_failure;
1297 }
1298 if (n->res.classid &&
1299 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1300 goto nla_put_failure;
1301
1302 ht_down = rtnl_dereference(n->ht_down);
1303 if (ht_down &&
1304 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1305 goto nla_put_failure;
1306
1307 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1308 goto nla_put_failure;
1309
1310#ifdef CONFIG_CLS_U32_MARK
1311 if ((n->val || n->mask)) {
1312 struct tc_u32_mark mark = {.val = n->val,
1313 .mask = n->mask,
1314 .success = 0};
1315 int cpum;
1316
1317 for_each_possible_cpu(cpum) {
1318 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1319
1320 mark.success += cnt;
1321 }
1322
1323 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1324 goto nla_put_failure;
1325 }
1326#endif
1327
1328 if (tcf_exts_dump(skb, &n->exts) < 0)
1329 goto nla_put_failure;
1330
1331 if (n->ifindex) {
1332 struct net_device *dev;
1333 dev = __dev_get_by_index(net, n->ifindex);
1334 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1335 goto nla_put_failure;
1336 }
1337#ifdef CONFIG_CLS_U32_PERF
1338 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
1339 if (!gpf)
1340 goto nla_put_failure;
1341
1342 for_each_possible_cpu(cpu) {
1343 int i;
1344 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1345
1346 gpf->rcnt += pf->rcnt;
1347 gpf->rhit += pf->rhit;
1348 for (i = 0; i < n->sel.nkeys; i++)
1349 gpf->kcnts[i] += pf->kcnts[i];
1350 }
1351
1352 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
1353 gpf, TCA_U32_PAD)) {
1354 kfree(gpf);
1355 goto nla_put_failure;
1356 }
1357 kfree(gpf);
1358#endif
1359 }
1360
1361 nla_nest_end(skb, nest);
1362
1363 if (TC_U32_KEY(n->handle))
1364 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1365 goto nla_put_failure;
1366 return skb->len;
1367
1368nla_put_failure:
1369 nla_nest_cancel(skb, nest);
1370 return -1;
1371}
1372
1373static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1374 .kind = "u32",
1375 .classify = u32_classify,
1376 .init = u32_init,
1377 .destroy = u32_destroy,
1378 .get = u32_get,
1379 .change = u32_change,
1380 .delete = u32_delete,
1381 .walk = u32_walk,
1382 .reoffload = u32_reoffload,
1383 .dump = u32_dump,
1384 .bind_class = u32_bind_class,
1385 .owner = THIS_MODULE,
1386};
1387
1388static int __init init_u32(void)
1389{
1390 int i, ret;
1391
1392 pr_info("u32 classifier\n");
1393#ifdef CONFIG_CLS_U32_PERF
1394 pr_info(" Performance counters on\n");
1395#endif
1396 pr_info(" input device check on\n");
1397#ifdef CONFIG_NET_CLS_ACT
1398 pr_info(" Actions configured\n");
1399#endif
1400 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1401 sizeof(struct hlist_head),
1402 GFP_KERNEL);
1403 if (!tc_u_common_hash)
1404 return -ENOMEM;
1405
1406 for (i = 0; i < U32_HASH_SIZE; i++)
1407 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1408
1409 ret = register_tcf_proto_ops(&cls_u32_ops);
1410 if (ret)
1411 kvfree(tc_u_common_hash);
1412 return ret;
1413}
1414
1415static void __exit exit_u32(void)
1416{
1417 unregister_tcf_proto_ops(&cls_u32_ops);
1418 kvfree(tc_u_common_hash);
1419}
1420
1421module_init(init_u32)
1422module_exit(exit_u32)
1423MODULE_LICENSE("GPL");