Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * x_tables core - Backend for {ip,ip6,arp}_tables
3 *
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6 *
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/socket.h>
20#include <linux/net.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/string.h>
24#include <linux/vmalloc.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/audit.h>
29#include <linux/user_namespace.h>
30#include <net/net_namespace.h>
31
32#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter_arp.h>
34#include <linux/netfilter_ipv4/ip_tables.h>
35#include <linux/netfilter_ipv6/ip6_tables.h>
36#include <linux/netfilter_arp/arp_tables.h>
37
38MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41
42#define XT_PCPU_BLOCK_SIZE 4096
43#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
44
45struct compat_delta {
46 unsigned int offset; /* offset in kernel */
47 int delta; /* delta in 32bit user land */
48};
49
50struct xt_af {
51 struct mutex mutex;
52 struct list_head match;
53 struct list_head target;
54#ifdef CONFIG_COMPAT
55 struct mutex compat_mutex;
56 struct compat_delta *compat_tab;
57 unsigned int number; /* number of slots in compat_tab[] */
58 unsigned int cur; /* number of used slots in compat_tab[] */
59#endif
60};
61
62static struct xt_af *xt;
63
64static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
65 [NFPROTO_UNSPEC] = "x",
66 [NFPROTO_IPV4] = "ip",
67 [NFPROTO_ARP] = "arp",
68 [NFPROTO_BRIDGE] = "eb",
69 [NFPROTO_IPV6] = "ip6",
70};
71
72/* Registration hooks for targets. */
73int xt_register_target(struct xt_target *target)
74{
75 u_int8_t af = target->family;
76
77 mutex_lock(&xt[af].mutex);
78 list_add(&target->list, &xt[af].target);
79 mutex_unlock(&xt[af].mutex);
80 return 0;
81}
82EXPORT_SYMBOL(xt_register_target);
83
84void
85xt_unregister_target(struct xt_target *target)
86{
87 u_int8_t af = target->family;
88
89 mutex_lock(&xt[af].mutex);
90 list_del(&target->list);
91 mutex_unlock(&xt[af].mutex);
92}
93EXPORT_SYMBOL(xt_unregister_target);
94
95int
96xt_register_targets(struct xt_target *target, unsigned int n)
97{
98 unsigned int i;
99 int err = 0;
100
101 for (i = 0; i < n; i++) {
102 err = xt_register_target(&target[i]);
103 if (err)
104 goto err;
105 }
106 return err;
107
108err:
109 if (i > 0)
110 xt_unregister_targets(target, i);
111 return err;
112}
113EXPORT_SYMBOL(xt_register_targets);
114
115void
116xt_unregister_targets(struct xt_target *target, unsigned int n)
117{
118 while (n-- > 0)
119 xt_unregister_target(&target[n]);
120}
121EXPORT_SYMBOL(xt_unregister_targets);
122
123int xt_register_match(struct xt_match *match)
124{
125 u_int8_t af = match->family;
126
127 mutex_lock(&xt[af].mutex);
128 list_add(&match->list, &xt[af].match);
129 mutex_unlock(&xt[af].mutex);
130 return 0;
131}
132EXPORT_SYMBOL(xt_register_match);
133
134void
135xt_unregister_match(struct xt_match *match)
136{
137 u_int8_t af = match->family;
138
139 mutex_lock(&xt[af].mutex);
140 list_del(&match->list);
141 mutex_unlock(&xt[af].mutex);
142}
143EXPORT_SYMBOL(xt_unregister_match);
144
145int
146xt_register_matches(struct xt_match *match, unsigned int n)
147{
148 unsigned int i;
149 int err = 0;
150
151 for (i = 0; i < n; i++) {
152 err = xt_register_match(&match[i]);
153 if (err)
154 goto err;
155 }
156 return err;
157
158err:
159 if (i > 0)
160 xt_unregister_matches(match, i);
161 return err;
162}
163EXPORT_SYMBOL(xt_register_matches);
164
165void
166xt_unregister_matches(struct xt_match *match, unsigned int n)
167{
168 while (n-- > 0)
169 xt_unregister_match(&match[n]);
170}
171EXPORT_SYMBOL(xt_unregister_matches);
172
173
174/*
175 * These are weird, but module loading must not be done with mutex
176 * held (since they will register), and we have to have a single
177 * function to use.
178 */
179
180/* Find match, grabs ref. Returns ERR_PTR() on error. */
181struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
182{
183 struct xt_match *m;
184 int err = -ENOENT;
185
186 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
187 return ERR_PTR(-EINVAL);
188
189 mutex_lock(&xt[af].mutex);
190 list_for_each_entry(m, &xt[af].match, list) {
191 if (strcmp(m->name, name) == 0) {
192 if (m->revision == revision) {
193 if (try_module_get(m->me)) {
194 mutex_unlock(&xt[af].mutex);
195 return m;
196 }
197 } else
198 err = -EPROTOTYPE; /* Found something. */
199 }
200 }
201 mutex_unlock(&xt[af].mutex);
202
203 if (af != NFPROTO_UNSPEC)
204 /* Try searching again in the family-independent list */
205 return xt_find_match(NFPROTO_UNSPEC, name, revision);
206
207 return ERR_PTR(err);
208}
209EXPORT_SYMBOL(xt_find_match);
210
211struct xt_match *
212xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
213{
214 struct xt_match *match;
215
216 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
217 return ERR_PTR(-EINVAL);
218
219 match = xt_find_match(nfproto, name, revision);
220 if (IS_ERR(match)) {
221 request_module("%st_%s", xt_prefix[nfproto], name);
222 match = xt_find_match(nfproto, name, revision);
223 }
224
225 return match;
226}
227EXPORT_SYMBOL_GPL(xt_request_find_match);
228
229/* Find target, grabs ref. Returns ERR_PTR() on error. */
230static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
231{
232 struct xt_target *t;
233 int err = -ENOENT;
234
235 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
236 return ERR_PTR(-EINVAL);
237
238 mutex_lock(&xt[af].mutex);
239 list_for_each_entry(t, &xt[af].target, list) {
240 if (strcmp(t->name, name) == 0) {
241 if (t->revision == revision) {
242 if (try_module_get(t->me)) {
243 mutex_unlock(&xt[af].mutex);
244 return t;
245 }
246 } else
247 err = -EPROTOTYPE; /* Found something. */
248 }
249 }
250 mutex_unlock(&xt[af].mutex);
251
252 if (af != NFPROTO_UNSPEC)
253 /* Try searching again in the family-independent list */
254 return xt_find_target(NFPROTO_UNSPEC, name, revision);
255
256 return ERR_PTR(err);
257}
258
259struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
260{
261 struct xt_target *target;
262
263 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
264 return ERR_PTR(-EINVAL);
265
266 target = xt_find_target(af, name, revision);
267 if (IS_ERR(target)) {
268 request_module("%st_%s", xt_prefix[af], name);
269 target = xt_find_target(af, name, revision);
270 }
271
272 return target;
273}
274EXPORT_SYMBOL_GPL(xt_request_find_target);
275
276
277static int xt_obj_to_user(u16 __user *psize, u16 size,
278 void __user *pname, const char *name,
279 u8 __user *prev, u8 rev)
280{
281 if (put_user(size, psize))
282 return -EFAULT;
283 if (copy_to_user(pname, name, strlen(name) + 1))
284 return -EFAULT;
285 if (put_user(rev, prev))
286 return -EFAULT;
287
288 return 0;
289}
290
291#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
292 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
293 U->u.user.name, K->u.kernel.TYPE->name, \
294 &U->u.user.revision, K->u.kernel.TYPE->revision)
295
296int xt_data_to_user(void __user *dst, const void *src,
297 int usersize, int size, int aligned_size)
298{
299 usersize = usersize ? : size;
300 if (copy_to_user(dst, src, usersize))
301 return -EFAULT;
302 if (usersize != aligned_size &&
303 clear_user(dst + usersize, aligned_size - usersize))
304 return -EFAULT;
305
306 return 0;
307}
308EXPORT_SYMBOL_GPL(xt_data_to_user);
309
310#define XT_DATA_TO_USER(U, K, TYPE) \
311 xt_data_to_user(U->data, K->data, \
312 K->u.kernel.TYPE->usersize, \
313 K->u.kernel.TYPE->TYPE##size, \
314 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
315
316int xt_match_to_user(const struct xt_entry_match *m,
317 struct xt_entry_match __user *u)
318{
319 return XT_OBJ_TO_USER(u, m, match, 0) ||
320 XT_DATA_TO_USER(u, m, match);
321}
322EXPORT_SYMBOL_GPL(xt_match_to_user);
323
324int xt_target_to_user(const struct xt_entry_target *t,
325 struct xt_entry_target __user *u)
326{
327 return XT_OBJ_TO_USER(u, t, target, 0) ||
328 XT_DATA_TO_USER(u, t, target);
329}
330EXPORT_SYMBOL_GPL(xt_target_to_user);
331
332static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
333{
334 const struct xt_match *m;
335 int have_rev = 0;
336
337 list_for_each_entry(m, &xt[af].match, list) {
338 if (strcmp(m->name, name) == 0) {
339 if (m->revision > *bestp)
340 *bestp = m->revision;
341 if (m->revision == revision)
342 have_rev = 1;
343 }
344 }
345
346 if (af != NFPROTO_UNSPEC && !have_rev)
347 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
348
349 return have_rev;
350}
351
352static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
353{
354 const struct xt_target *t;
355 int have_rev = 0;
356
357 list_for_each_entry(t, &xt[af].target, list) {
358 if (strcmp(t->name, name) == 0) {
359 if (t->revision > *bestp)
360 *bestp = t->revision;
361 if (t->revision == revision)
362 have_rev = 1;
363 }
364 }
365
366 if (af != NFPROTO_UNSPEC && !have_rev)
367 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
368
369 return have_rev;
370}
371
372/* Returns true or false (if no such extension at all) */
373int xt_find_revision(u8 af, const char *name, u8 revision, int target,
374 int *err)
375{
376 int have_rev, best = -1;
377
378 mutex_lock(&xt[af].mutex);
379 if (target == 1)
380 have_rev = target_revfn(af, name, revision, &best);
381 else
382 have_rev = match_revfn(af, name, revision, &best);
383 mutex_unlock(&xt[af].mutex);
384
385 /* Nothing at all? Return 0 to try loading module. */
386 if (best == -1) {
387 *err = -ENOENT;
388 return 0;
389 }
390
391 *err = best;
392 if (!have_rev)
393 *err = -EPROTONOSUPPORT;
394 return 1;
395}
396EXPORT_SYMBOL_GPL(xt_find_revision);
397
398static char *
399textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
400{
401 static const char *const inetbr_names[] = {
402 "PREROUTING", "INPUT", "FORWARD",
403 "OUTPUT", "POSTROUTING", "BROUTING",
404 };
405 static const char *const arp_names[] = {
406 "INPUT", "FORWARD", "OUTPUT",
407 };
408 const char *const *names;
409 unsigned int i, max;
410 char *p = buf;
411 bool np = false;
412 int res;
413
414 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
415 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
416 ARRAY_SIZE(inetbr_names);
417 *p = '\0';
418 for (i = 0; i < max; ++i) {
419 if (!(mask & (1 << i)))
420 continue;
421 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
422 if (res > 0) {
423 size -= res;
424 p += res;
425 }
426 np = true;
427 }
428
429 return buf;
430}
431
432/**
433 * xt_check_proc_name - check that name is suitable for /proc file creation
434 *
435 * @name: file name candidate
436 * @size: length of buffer
437 *
438 * some x_tables modules wish to create a file in /proc.
439 * This function makes sure that the name is suitable for this
440 * purpose, it checks that name is NUL terminated and isn't a 'special'
441 * name, like "..".
442 *
443 * returns negative number on error or 0 if name is useable.
444 */
445int xt_check_proc_name(const char *name, unsigned int size)
446{
447 if (name[0] == '\0')
448 return -EINVAL;
449
450 if (strnlen(name, size) == size)
451 return -ENAMETOOLONG;
452
453 if (strcmp(name, ".") == 0 ||
454 strcmp(name, "..") == 0 ||
455 strchr(name, '/'))
456 return -EINVAL;
457
458 return 0;
459}
460EXPORT_SYMBOL(xt_check_proc_name);
461
462int xt_check_match(struct xt_mtchk_param *par,
463 unsigned int size, u16 proto, bool inv_proto)
464{
465 int ret;
466
467 if (XT_ALIGN(par->match->matchsize) != size &&
468 par->match->matchsize != -1) {
469 /*
470 * ebt_among is exempt from centralized matchsize checking
471 * because it uses a dynamic-size data set.
472 */
473 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
474 xt_prefix[par->family], par->match->name,
475 par->match->revision,
476 XT_ALIGN(par->match->matchsize), size);
477 return -EINVAL;
478 }
479 if (par->match->table != NULL &&
480 strcmp(par->match->table, par->table) != 0) {
481 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
482 xt_prefix[par->family], par->match->name,
483 par->match->table, par->table);
484 return -EINVAL;
485 }
486 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
487 char used[64], allow[64];
488
489 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
490 xt_prefix[par->family], par->match->name,
491 textify_hooks(used, sizeof(used),
492 par->hook_mask, par->family),
493 textify_hooks(allow, sizeof(allow),
494 par->match->hooks,
495 par->family));
496 return -EINVAL;
497 }
498 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
499 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
500 xt_prefix[par->family], par->match->name,
501 par->match->proto);
502 return -EINVAL;
503 }
504 if (par->match->checkentry != NULL) {
505 ret = par->match->checkentry(par);
506 if (ret < 0)
507 return ret;
508 else if (ret > 0)
509 /* Flag up potential errors. */
510 return -EIO;
511 }
512 return 0;
513}
514EXPORT_SYMBOL_GPL(xt_check_match);
515
516/** xt_check_entry_match - check that matches end before start of target
517 *
518 * @match: beginning of xt_entry_match
519 * @target: beginning of this rules target (alleged end of matches)
520 * @alignment: alignment requirement of match structures
521 *
522 * Validates that all matches add up to the beginning of the target,
523 * and that each match covers at least the base structure size.
524 *
525 * Return: 0 on success, negative errno on failure.
526 */
527static int xt_check_entry_match(const char *match, const char *target,
528 const size_t alignment)
529{
530 const struct xt_entry_match *pos;
531 int length = target - match;
532
533 if (length == 0) /* no matches */
534 return 0;
535
536 pos = (struct xt_entry_match *)match;
537 do {
538 if ((unsigned long)pos % alignment)
539 return -EINVAL;
540
541 if (length < (int)sizeof(struct xt_entry_match))
542 return -EINVAL;
543
544 if (pos->u.match_size < sizeof(struct xt_entry_match))
545 return -EINVAL;
546
547 if (pos->u.match_size > length)
548 return -EINVAL;
549
550 length -= pos->u.match_size;
551 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
552 } while (length > 0);
553
554 return 0;
555}
556
557/** xt_check_table_hooks - check hook entry points are sane
558 *
559 * @info xt_table_info to check
560 * @valid_hooks - hook entry points that we can enter from
561 *
562 * Validates that the hook entry and underflows points are set up.
563 *
564 * Return: 0 on success, negative errno on failure.
565 */
566int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
567{
568 const char *err = "unsorted underflow";
569 unsigned int i, max_uflow, max_entry;
570 bool check_hooks = false;
571
572 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
573
574 max_entry = 0;
575 max_uflow = 0;
576
577 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
578 if (!(valid_hooks & (1 << i)))
579 continue;
580
581 if (info->hook_entry[i] == 0xFFFFFFFF)
582 return -EINVAL;
583 if (info->underflow[i] == 0xFFFFFFFF)
584 return -EINVAL;
585
586 if (check_hooks) {
587 if (max_uflow > info->underflow[i])
588 goto error;
589
590 if (max_uflow == info->underflow[i]) {
591 err = "duplicate underflow";
592 goto error;
593 }
594 if (max_entry > info->hook_entry[i]) {
595 err = "unsorted entry";
596 goto error;
597 }
598 if (max_entry == info->hook_entry[i]) {
599 err = "duplicate entry";
600 goto error;
601 }
602 }
603 max_entry = info->hook_entry[i];
604 max_uflow = info->underflow[i];
605 check_hooks = true;
606 }
607
608 return 0;
609error:
610 pr_err_ratelimited("%s at hook %d\n", err, i);
611 return -EINVAL;
612}
613EXPORT_SYMBOL(xt_check_table_hooks);
614
615static bool verdict_ok(int verdict)
616{
617 if (verdict > 0)
618 return true;
619
620 if (verdict < 0) {
621 int v = -verdict - 1;
622
623 if (verdict == XT_RETURN)
624 return true;
625
626 switch (v) {
627 case NF_ACCEPT: return true;
628 case NF_DROP: return true;
629 case NF_QUEUE: return true;
630 default:
631 break;
632 }
633
634 return false;
635 }
636
637 return false;
638}
639
640static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
641 const char *msg, unsigned int msglen)
642{
643 return usersize == kernsize && strnlen(msg, msglen) < msglen;
644}
645
646#ifdef CONFIG_COMPAT
647int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
648{
649 struct xt_af *xp = &xt[af];
650
651 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
652
653 if (WARN_ON(!xp->compat_tab))
654 return -ENOMEM;
655
656 if (xp->cur >= xp->number)
657 return -EINVAL;
658
659 if (xp->cur)
660 delta += xp->compat_tab[xp->cur - 1].delta;
661 xp->compat_tab[xp->cur].offset = offset;
662 xp->compat_tab[xp->cur].delta = delta;
663 xp->cur++;
664 return 0;
665}
666EXPORT_SYMBOL_GPL(xt_compat_add_offset);
667
668void xt_compat_flush_offsets(u_int8_t af)
669{
670 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
671
672 if (xt[af].compat_tab) {
673 vfree(xt[af].compat_tab);
674 xt[af].compat_tab = NULL;
675 xt[af].number = 0;
676 xt[af].cur = 0;
677 }
678}
679EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
680
681int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
682{
683 struct compat_delta *tmp = xt[af].compat_tab;
684 int mid, left = 0, right = xt[af].cur - 1;
685
686 while (left <= right) {
687 mid = (left + right) >> 1;
688 if (offset > tmp[mid].offset)
689 left = mid + 1;
690 else if (offset < tmp[mid].offset)
691 right = mid - 1;
692 else
693 return mid ? tmp[mid - 1].delta : 0;
694 }
695 return left ? tmp[left - 1].delta : 0;
696}
697EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
698
699int xt_compat_init_offsets(u8 af, unsigned int number)
700{
701 size_t mem;
702
703 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
704
705 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
706 return -EINVAL;
707
708 if (WARN_ON(xt[af].compat_tab))
709 return -EINVAL;
710
711 mem = sizeof(struct compat_delta) * number;
712 if (mem > XT_MAX_TABLE_SIZE)
713 return -ENOMEM;
714
715 xt[af].compat_tab = vmalloc(mem);
716 if (!xt[af].compat_tab)
717 return -ENOMEM;
718
719 xt[af].number = number;
720 xt[af].cur = 0;
721
722 return 0;
723}
724EXPORT_SYMBOL(xt_compat_init_offsets);
725
726int xt_compat_match_offset(const struct xt_match *match)
727{
728 u_int16_t csize = match->compatsize ? : match->matchsize;
729 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
730}
731EXPORT_SYMBOL_GPL(xt_compat_match_offset);
732
733void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
734 unsigned int *size)
735{
736 const struct xt_match *match = m->u.kernel.match;
737 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
738 int pad, off = xt_compat_match_offset(match);
739 u_int16_t msize = cm->u.user.match_size;
740 char name[sizeof(m->u.user.name)];
741
742 m = *dstptr;
743 memcpy(m, cm, sizeof(*cm));
744 if (match->compat_from_user)
745 match->compat_from_user(m->data, cm->data);
746 else
747 memcpy(m->data, cm->data, msize - sizeof(*cm));
748 pad = XT_ALIGN(match->matchsize) - match->matchsize;
749 if (pad > 0)
750 memset(m->data + match->matchsize, 0, pad);
751
752 msize += off;
753 m->u.user.match_size = msize;
754 strlcpy(name, match->name, sizeof(name));
755 module_put(match->me);
756 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
757
758 *size += off;
759 *dstptr += msize;
760}
761EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
762
763#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
764 xt_data_to_user(U->data, K->data, \
765 K->u.kernel.TYPE->usersize, \
766 C_SIZE, \
767 COMPAT_XT_ALIGN(C_SIZE))
768
769int xt_compat_match_to_user(const struct xt_entry_match *m,
770 void __user **dstptr, unsigned int *size)
771{
772 const struct xt_match *match = m->u.kernel.match;
773 struct compat_xt_entry_match __user *cm = *dstptr;
774 int off = xt_compat_match_offset(match);
775 u_int16_t msize = m->u.user.match_size - off;
776
777 if (XT_OBJ_TO_USER(cm, m, match, msize))
778 return -EFAULT;
779
780 if (match->compat_to_user) {
781 if (match->compat_to_user((void __user *)cm->data, m->data))
782 return -EFAULT;
783 } else {
784 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
785 return -EFAULT;
786 }
787
788 *size -= off;
789 *dstptr += msize;
790 return 0;
791}
792EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
793
794/* non-compat version may have padding after verdict */
795struct compat_xt_standard_target {
796 struct compat_xt_entry_target t;
797 compat_uint_t verdict;
798};
799
800struct compat_xt_error_target {
801 struct compat_xt_entry_target t;
802 char errorname[XT_FUNCTION_MAXNAMELEN];
803};
804
805int xt_compat_check_entry_offsets(const void *base, const char *elems,
806 unsigned int target_offset,
807 unsigned int next_offset)
808{
809 long size_of_base_struct = elems - (const char *)base;
810 const struct compat_xt_entry_target *t;
811 const char *e = base;
812
813 if (target_offset < size_of_base_struct)
814 return -EINVAL;
815
816 if (target_offset + sizeof(*t) > next_offset)
817 return -EINVAL;
818
819 t = (void *)(e + target_offset);
820 if (t->u.target_size < sizeof(*t))
821 return -EINVAL;
822
823 if (target_offset + t->u.target_size > next_offset)
824 return -EINVAL;
825
826 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
827 const struct compat_xt_standard_target *st = (const void *)t;
828
829 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
830 return -EINVAL;
831
832 if (!verdict_ok(st->verdict))
833 return -EINVAL;
834 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
835 const struct compat_xt_error_target *et = (const void *)t;
836
837 if (!error_tg_ok(t->u.target_size, sizeof(*et),
838 et->errorname, sizeof(et->errorname)))
839 return -EINVAL;
840 }
841
842 /* compat_xt_entry match has less strict alignment requirements,
843 * otherwise they are identical. In case of padding differences
844 * we need to add compat version of xt_check_entry_match.
845 */
846 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
847
848 return xt_check_entry_match(elems, base + target_offset,
849 __alignof__(struct compat_xt_entry_match));
850}
851EXPORT_SYMBOL(xt_compat_check_entry_offsets);
852#endif /* CONFIG_COMPAT */
853
854/**
855 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
856 *
857 * @base: pointer to arp/ip/ip6t_entry
858 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
859 * @target_offset: the arp/ip/ip6_t->target_offset
860 * @next_offset: the arp/ip/ip6_t->next_offset
861 *
862 * validates that target_offset and next_offset are sane and that all
863 * match sizes (if any) align with the target offset.
864 *
865 * This function does not validate the targets or matches themselves, it
866 * only tests that all the offsets and sizes are correct, that all
867 * match structures are aligned, and that the last structure ends where
868 * the target structure begins.
869 *
870 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
871 *
872 * The arp/ip/ip6t_entry structure @base must have passed following tests:
873 * - it must point to a valid memory location
874 * - base to base + next_offset must be accessible, i.e. not exceed allocated
875 * length.
876 *
877 * A well-formed entry looks like this:
878 *
879 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
880 * e->elems[]-----' | |
881 * matchsize | |
882 * matchsize | |
883 * | |
884 * target_offset---------------------------------' |
885 * next_offset---------------------------------------------------'
886 *
887 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
888 * This is where matches (if any) and the target reside.
889 * target_offset: beginning of target.
890 * next_offset: start of the next rule; also: size of this rule.
891 * Since targets have a minimum size, target_offset + minlen <= next_offset.
892 *
893 * Every match stores its size, sum of sizes must not exceed target_offset.
894 *
895 * Return: 0 on success, negative errno on failure.
896 */
897int xt_check_entry_offsets(const void *base,
898 const char *elems,
899 unsigned int target_offset,
900 unsigned int next_offset)
901{
902 long size_of_base_struct = elems - (const char *)base;
903 const struct xt_entry_target *t;
904 const char *e = base;
905
906 /* target start is within the ip/ip6/arpt_entry struct */
907 if (target_offset < size_of_base_struct)
908 return -EINVAL;
909
910 if (target_offset + sizeof(*t) > next_offset)
911 return -EINVAL;
912
913 t = (void *)(e + target_offset);
914 if (t->u.target_size < sizeof(*t))
915 return -EINVAL;
916
917 if (target_offset + t->u.target_size > next_offset)
918 return -EINVAL;
919
920 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
921 const struct xt_standard_target *st = (const void *)t;
922
923 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
924 return -EINVAL;
925
926 if (!verdict_ok(st->verdict))
927 return -EINVAL;
928 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
929 const struct xt_error_target *et = (const void *)t;
930
931 if (!error_tg_ok(t->u.target_size, sizeof(*et),
932 et->errorname, sizeof(et->errorname)))
933 return -EINVAL;
934 }
935
936 return xt_check_entry_match(elems, base + target_offset,
937 __alignof__(struct xt_entry_match));
938}
939EXPORT_SYMBOL(xt_check_entry_offsets);
940
941/**
942 * xt_alloc_entry_offsets - allocate array to store rule head offsets
943 *
944 * @size: number of entries
945 *
946 * Return: NULL or kmalloc'd or vmalloc'd array
947 */
948unsigned int *xt_alloc_entry_offsets(unsigned int size)
949{
950 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
951 return NULL;
952
953 return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
954
955}
956EXPORT_SYMBOL(xt_alloc_entry_offsets);
957
958/**
959 * xt_find_jump_offset - check if target is a valid jump offset
960 *
961 * @offsets: array containing all valid rule start offsets of a rule blob
962 * @target: the jump target to search for
963 * @size: entries in @offset
964 */
965bool xt_find_jump_offset(const unsigned int *offsets,
966 unsigned int target, unsigned int size)
967{
968 int m, low = 0, hi = size;
969
970 while (hi > low) {
971 m = (low + hi) / 2u;
972
973 if (offsets[m] > target)
974 hi = m;
975 else if (offsets[m] < target)
976 low = m + 1;
977 else
978 return true;
979 }
980
981 return false;
982}
983EXPORT_SYMBOL(xt_find_jump_offset);
984
985int xt_check_target(struct xt_tgchk_param *par,
986 unsigned int size, u16 proto, bool inv_proto)
987{
988 int ret;
989
990 if (XT_ALIGN(par->target->targetsize) != size) {
991 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
992 xt_prefix[par->family], par->target->name,
993 par->target->revision,
994 XT_ALIGN(par->target->targetsize), size);
995 return -EINVAL;
996 }
997 if (par->target->table != NULL &&
998 strcmp(par->target->table, par->table) != 0) {
999 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1000 xt_prefix[par->family], par->target->name,
1001 par->target->table, par->table);
1002 return -EINVAL;
1003 }
1004 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1005 char used[64], allow[64];
1006
1007 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1008 xt_prefix[par->family], par->target->name,
1009 textify_hooks(used, sizeof(used),
1010 par->hook_mask, par->family),
1011 textify_hooks(allow, sizeof(allow),
1012 par->target->hooks,
1013 par->family));
1014 return -EINVAL;
1015 }
1016 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1017 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1018 xt_prefix[par->family], par->target->name,
1019 par->target->proto);
1020 return -EINVAL;
1021 }
1022 if (par->target->checkentry != NULL) {
1023 ret = par->target->checkentry(par);
1024 if (ret < 0)
1025 return ret;
1026 else if (ret > 0)
1027 /* Flag up potential errors. */
1028 return -EIO;
1029 }
1030 return 0;
1031}
1032EXPORT_SYMBOL_GPL(xt_check_target);
1033
1034/**
1035 * xt_copy_counters_from_user - copy counters and metadata from userspace
1036 *
1037 * @user: src pointer to userspace memory
1038 * @len: alleged size of userspace memory
1039 * @info: where to store the xt_counters_info metadata
1040 * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
1041 *
1042 * Copies counter meta data from @user and stores it in @info.
1043 *
1044 * vmallocs memory to hold the counters, then copies the counter data
1045 * from @user to the new memory and returns a pointer to it.
1046 *
1047 * If @compat is true, @info gets converted automatically to the 64bit
1048 * representation.
1049 *
1050 * The metadata associated with the counters is stored in @info.
1051 *
1052 * Return: returns pointer that caller has to test via IS_ERR().
1053 * If IS_ERR is false, caller has to vfree the pointer.
1054 */
1055void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
1056 struct xt_counters_info *info, bool compat)
1057{
1058 void *mem;
1059 u64 size;
1060
1061#ifdef CONFIG_COMPAT
1062 if (compat) {
1063 /* structures only differ in size due to alignment */
1064 struct compat_xt_counters_info compat_tmp;
1065
1066 if (len <= sizeof(compat_tmp))
1067 return ERR_PTR(-EINVAL);
1068
1069 len -= sizeof(compat_tmp);
1070 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
1071 return ERR_PTR(-EFAULT);
1072
1073 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1074 info->num_counters = compat_tmp.num_counters;
1075 user += sizeof(compat_tmp);
1076 } else
1077#endif
1078 {
1079 if (len <= sizeof(*info))
1080 return ERR_PTR(-EINVAL);
1081
1082 len -= sizeof(*info);
1083 if (copy_from_user(info, user, sizeof(*info)) != 0)
1084 return ERR_PTR(-EFAULT);
1085
1086 user += sizeof(*info);
1087 }
1088 info->name[sizeof(info->name) - 1] = '\0';
1089
1090 size = sizeof(struct xt_counters);
1091 size *= info->num_counters;
1092
1093 if (size != (u64)len)
1094 return ERR_PTR(-EINVAL);
1095
1096 mem = vmalloc(len);
1097 if (!mem)
1098 return ERR_PTR(-ENOMEM);
1099
1100 if (copy_from_user(mem, user, len) == 0)
1101 return mem;
1102
1103 vfree(mem);
1104 return ERR_PTR(-EFAULT);
1105}
1106EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
1107
1108#ifdef CONFIG_COMPAT
1109int xt_compat_target_offset(const struct xt_target *target)
1110{
1111 u_int16_t csize = target->compatsize ? : target->targetsize;
1112 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1113}
1114EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1115
1116void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1117 unsigned int *size)
1118{
1119 const struct xt_target *target = t->u.kernel.target;
1120 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1121 int pad, off = xt_compat_target_offset(target);
1122 u_int16_t tsize = ct->u.user.target_size;
1123 char name[sizeof(t->u.user.name)];
1124
1125 t = *dstptr;
1126 memcpy(t, ct, sizeof(*ct));
1127 if (target->compat_from_user)
1128 target->compat_from_user(t->data, ct->data);
1129 else
1130 memcpy(t->data, ct->data, tsize - sizeof(*ct));
1131 pad = XT_ALIGN(target->targetsize) - target->targetsize;
1132 if (pad > 0)
1133 memset(t->data + target->targetsize, 0, pad);
1134
1135 tsize += off;
1136 t->u.user.target_size = tsize;
1137 strlcpy(name, target->name, sizeof(name));
1138 module_put(target->me);
1139 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1140
1141 *size += off;
1142 *dstptr += tsize;
1143}
1144EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1145
1146int xt_compat_target_to_user(const struct xt_entry_target *t,
1147 void __user **dstptr, unsigned int *size)
1148{
1149 const struct xt_target *target = t->u.kernel.target;
1150 struct compat_xt_entry_target __user *ct = *dstptr;
1151 int off = xt_compat_target_offset(target);
1152 u_int16_t tsize = t->u.user.target_size - off;
1153
1154 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1155 return -EFAULT;
1156
1157 if (target->compat_to_user) {
1158 if (target->compat_to_user((void __user *)ct->data, t->data))
1159 return -EFAULT;
1160 } else {
1161 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1162 return -EFAULT;
1163 }
1164
1165 *size -= off;
1166 *dstptr += tsize;
1167 return 0;
1168}
1169EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1170#endif
1171
1172struct xt_table_info *xt_alloc_table_info(unsigned int size)
1173{
1174 struct xt_table_info *info = NULL;
1175 size_t sz = sizeof(*info) + size;
1176
1177 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1178 return NULL;
1179
1180 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1181 if (!info)
1182 return NULL;
1183
1184 memset(info, 0, sizeof(*info));
1185 info->size = size;
1186 return info;
1187}
1188EXPORT_SYMBOL(xt_alloc_table_info);
1189
1190void xt_free_table_info(struct xt_table_info *info)
1191{
1192 int cpu;
1193
1194 if (info->jumpstack != NULL) {
1195 for_each_possible_cpu(cpu)
1196 kvfree(info->jumpstack[cpu]);
1197 kvfree(info->jumpstack);
1198 }
1199
1200 kvfree(info);
1201}
1202EXPORT_SYMBOL(xt_free_table_info);
1203
1204/* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */
1205struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1206 const char *name)
1207{
1208 struct xt_table *t, *found = NULL;
1209
1210 mutex_lock(&xt[af].mutex);
1211 list_for_each_entry(t, &net->xt.tables[af], list)
1212 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1213 return t;
1214
1215 if (net == &init_net)
1216 goto out;
1217
1218 /* Table doesn't exist in this netns, re-try init */
1219 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1220 int err;
1221
1222 if (strcmp(t->name, name))
1223 continue;
1224 if (!try_module_get(t->me))
1225 goto out;
1226 mutex_unlock(&xt[af].mutex);
1227 err = t->table_init(net);
1228 if (err < 0) {
1229 module_put(t->me);
1230 return ERR_PTR(err);
1231 }
1232
1233 found = t;
1234
1235 mutex_lock(&xt[af].mutex);
1236 break;
1237 }
1238
1239 if (!found)
1240 goto out;
1241
1242 /* and once again: */
1243 list_for_each_entry(t, &net->xt.tables[af], list)
1244 if (strcmp(t->name, name) == 0)
1245 return t;
1246
1247 module_put(found->me);
1248 out:
1249 mutex_unlock(&xt[af].mutex);
1250 return ERR_PTR(-ENOENT);
1251}
1252EXPORT_SYMBOL_GPL(xt_find_table_lock);
1253
1254struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1255 const char *name)
1256{
1257 struct xt_table *t = xt_find_table_lock(net, af, name);
1258
1259#ifdef CONFIG_MODULES
1260 if (IS_ERR(t)) {
1261 int err = request_module("%stable_%s", xt_prefix[af], name);
1262 if (err < 0)
1263 return ERR_PTR(err);
1264 t = xt_find_table_lock(net, af, name);
1265 }
1266#endif
1267
1268 return t;
1269}
1270EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1271
1272void xt_table_unlock(struct xt_table *table)
1273{
1274 mutex_unlock(&xt[table->af].mutex);
1275}
1276EXPORT_SYMBOL_GPL(xt_table_unlock);
1277
1278#ifdef CONFIG_COMPAT
1279void xt_compat_lock(u_int8_t af)
1280{
1281 mutex_lock(&xt[af].compat_mutex);
1282}
1283EXPORT_SYMBOL_GPL(xt_compat_lock);
1284
1285void xt_compat_unlock(u_int8_t af)
1286{
1287 mutex_unlock(&xt[af].compat_mutex);
1288}
1289EXPORT_SYMBOL_GPL(xt_compat_unlock);
1290#endif
1291
1292DEFINE_PER_CPU(seqcount_t, xt_recseq);
1293EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1294
1295struct static_key xt_tee_enabled __read_mostly;
1296EXPORT_SYMBOL_GPL(xt_tee_enabled);
1297
1298static int xt_jumpstack_alloc(struct xt_table_info *i)
1299{
1300 unsigned int size;
1301 int cpu;
1302
1303 size = sizeof(void **) * nr_cpu_ids;
1304 if (size > PAGE_SIZE)
1305 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1306 else
1307 i->jumpstack = kzalloc(size, GFP_KERNEL);
1308 if (i->jumpstack == NULL)
1309 return -ENOMEM;
1310
1311 /* ruleset without jumps -- no stack needed */
1312 if (i->stacksize == 0)
1313 return 0;
1314
1315 /* Jumpstack needs to be able to record two full callchains, one
1316 * from the first rule set traversal, plus one table reentrancy
1317 * via -j TEE without clobbering the callchain that brought us to
1318 * TEE target.
1319 *
1320 * This is done by allocating two jumpstacks per cpu, on reentry
1321 * the upper half of the stack is used.
1322 *
1323 * see the jumpstack setup in ipt_do_table() for more details.
1324 */
1325 size = sizeof(void *) * i->stacksize * 2u;
1326 for_each_possible_cpu(cpu) {
1327 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1328 cpu_to_node(cpu));
1329 if (i->jumpstack[cpu] == NULL)
1330 /*
1331 * Freeing will be done later on by the callers. The
1332 * chain is: xt_replace_table -> __do_replace ->
1333 * do_replace -> xt_free_table_info.
1334 */
1335 return -ENOMEM;
1336 }
1337
1338 return 0;
1339}
1340
1341struct xt_counters *xt_counters_alloc(unsigned int counters)
1342{
1343 struct xt_counters *mem;
1344
1345 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1346 return NULL;
1347
1348 counters *= sizeof(*mem);
1349 if (counters > XT_MAX_TABLE_SIZE)
1350 return NULL;
1351
1352 return vzalloc(counters);
1353}
1354EXPORT_SYMBOL(xt_counters_alloc);
1355
1356struct xt_table_info *
1357xt_replace_table(struct xt_table *table,
1358 unsigned int num_counters,
1359 struct xt_table_info *newinfo,
1360 int *error)
1361{
1362 struct xt_table_info *private;
1363 unsigned int cpu;
1364 int ret;
1365
1366 ret = xt_jumpstack_alloc(newinfo);
1367 if (ret < 0) {
1368 *error = ret;
1369 return NULL;
1370 }
1371
1372 /* Do the substitution. */
1373 local_bh_disable();
1374 private = table->private;
1375
1376 /* Check inside lock: is the old number correct? */
1377 if (num_counters != private->number) {
1378 pr_debug("num_counters != table->private->number (%u/%u)\n",
1379 num_counters, private->number);
1380 local_bh_enable();
1381 *error = -EAGAIN;
1382 return NULL;
1383 }
1384
1385 newinfo->initial_entries = private->initial_entries;
1386 /*
1387 * Ensure contents of newinfo are visible before assigning to
1388 * private.
1389 */
1390 smp_wmb();
1391 table->private = newinfo;
1392
1393 /* make sure all cpus see new ->private value */
1394 smp_wmb();
1395
1396 /*
1397 * Even though table entries have now been swapped, other CPU's
1398 * may still be using the old entries...
1399 */
1400 local_bh_enable();
1401
1402 /* ... so wait for even xt_recseq on all cpus */
1403 for_each_possible_cpu(cpu) {
1404 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1405 u32 seq = raw_read_seqcount(s);
1406
1407 if (seq & 1) {
1408 do {
1409 cond_resched();
1410 cpu_relax();
1411 } while (seq == raw_read_seqcount(s));
1412 }
1413 }
1414
1415#ifdef CONFIG_AUDIT
1416 if (audit_enabled) {
1417 audit_log(audit_context(), GFP_KERNEL,
1418 AUDIT_NETFILTER_CFG,
1419 "table=%s family=%u entries=%u",
1420 table->name, table->af, private->number);
1421 }
1422#endif
1423
1424 return private;
1425}
1426EXPORT_SYMBOL_GPL(xt_replace_table);
1427
1428struct xt_table *xt_register_table(struct net *net,
1429 const struct xt_table *input_table,
1430 struct xt_table_info *bootstrap,
1431 struct xt_table_info *newinfo)
1432{
1433 int ret;
1434 struct xt_table_info *private;
1435 struct xt_table *t, *table;
1436
1437 /* Don't add one object to multiple lists. */
1438 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1439 if (!table) {
1440 ret = -ENOMEM;
1441 goto out;
1442 }
1443
1444 mutex_lock(&xt[table->af].mutex);
1445 /* Don't autoload: we'd eat our tail... */
1446 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1447 if (strcmp(t->name, table->name) == 0) {
1448 ret = -EEXIST;
1449 goto unlock;
1450 }
1451 }
1452
1453 /* Simplifies replace_table code. */
1454 table->private = bootstrap;
1455
1456 if (!xt_replace_table(table, 0, newinfo, &ret))
1457 goto unlock;
1458
1459 private = table->private;
1460 pr_debug("table->private->number = %u\n", private->number);
1461
1462 /* save number of initial entries */
1463 private->initial_entries = private->number;
1464
1465 list_add(&table->list, &net->xt.tables[table->af]);
1466 mutex_unlock(&xt[table->af].mutex);
1467 return table;
1468
1469unlock:
1470 mutex_unlock(&xt[table->af].mutex);
1471 kfree(table);
1472out:
1473 return ERR_PTR(ret);
1474}
1475EXPORT_SYMBOL_GPL(xt_register_table);
1476
1477void *xt_unregister_table(struct xt_table *table)
1478{
1479 struct xt_table_info *private;
1480
1481 mutex_lock(&xt[table->af].mutex);
1482 private = table->private;
1483 list_del(&table->list);
1484 mutex_unlock(&xt[table->af].mutex);
1485 kfree(table);
1486
1487 return private;
1488}
1489EXPORT_SYMBOL_GPL(xt_unregister_table);
1490
1491#ifdef CONFIG_PROC_FS
1492static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1493{
1494 struct net *net = seq_file_net(seq);
1495 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1496
1497 mutex_lock(&xt[af].mutex);
1498 return seq_list_start(&net->xt.tables[af], *pos);
1499}
1500
1501static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1502{
1503 struct net *net = seq_file_net(seq);
1504 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1505
1506 return seq_list_next(v, &net->xt.tables[af], pos);
1507}
1508
1509static void xt_table_seq_stop(struct seq_file *seq, void *v)
1510{
1511 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1512
1513 mutex_unlock(&xt[af].mutex);
1514}
1515
1516static int xt_table_seq_show(struct seq_file *seq, void *v)
1517{
1518 struct xt_table *table = list_entry(v, struct xt_table, list);
1519
1520 if (*table->name)
1521 seq_printf(seq, "%s\n", table->name);
1522 return 0;
1523}
1524
1525static const struct seq_operations xt_table_seq_ops = {
1526 .start = xt_table_seq_start,
1527 .next = xt_table_seq_next,
1528 .stop = xt_table_seq_stop,
1529 .show = xt_table_seq_show,
1530};
1531
1532/*
1533 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1534 * the multi-AF mutexes.
1535 */
1536struct nf_mttg_trav {
1537 struct list_head *head, *curr;
1538 uint8_t class;
1539};
1540
1541enum {
1542 MTTG_TRAV_INIT,
1543 MTTG_TRAV_NFP_UNSPEC,
1544 MTTG_TRAV_NFP_SPEC,
1545 MTTG_TRAV_DONE,
1546};
1547
1548static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1549 bool is_target)
1550{
1551 static const uint8_t next_class[] = {
1552 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1553 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1554 };
1555 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1556 struct nf_mttg_trav *trav = seq->private;
1557
1558 switch (trav->class) {
1559 case MTTG_TRAV_INIT:
1560 trav->class = MTTG_TRAV_NFP_UNSPEC;
1561 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1562 trav->head = trav->curr = is_target ?
1563 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1564 break;
1565 case MTTG_TRAV_NFP_UNSPEC:
1566 trav->curr = trav->curr->next;
1567 if (trav->curr != trav->head)
1568 break;
1569 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1570 mutex_lock(&xt[nfproto].mutex);
1571 trav->head = trav->curr = is_target ?
1572 &xt[nfproto].target : &xt[nfproto].match;
1573 trav->class = next_class[trav->class];
1574 break;
1575 case MTTG_TRAV_NFP_SPEC:
1576 trav->curr = trav->curr->next;
1577 if (trav->curr != trav->head)
1578 break;
1579 /* fall through */
1580 default:
1581 return NULL;
1582 }
1583
1584 if (ppos != NULL)
1585 ++*ppos;
1586 return trav;
1587}
1588
1589static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1590 bool is_target)
1591{
1592 struct nf_mttg_trav *trav = seq->private;
1593 unsigned int j;
1594
1595 trav->class = MTTG_TRAV_INIT;
1596 for (j = 0; j < *pos; ++j)
1597 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1598 return NULL;
1599 return trav;
1600}
1601
1602static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1603{
1604 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1605 struct nf_mttg_trav *trav = seq->private;
1606
1607 switch (trav->class) {
1608 case MTTG_TRAV_NFP_UNSPEC:
1609 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1610 break;
1611 case MTTG_TRAV_NFP_SPEC:
1612 mutex_unlock(&xt[nfproto].mutex);
1613 break;
1614 }
1615}
1616
1617static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1618{
1619 return xt_mttg_seq_start(seq, pos, false);
1620}
1621
1622static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1623{
1624 return xt_mttg_seq_next(seq, v, ppos, false);
1625}
1626
1627static int xt_match_seq_show(struct seq_file *seq, void *v)
1628{
1629 const struct nf_mttg_trav *trav = seq->private;
1630 const struct xt_match *match;
1631
1632 switch (trav->class) {
1633 case MTTG_TRAV_NFP_UNSPEC:
1634 case MTTG_TRAV_NFP_SPEC:
1635 if (trav->curr == trav->head)
1636 return 0;
1637 match = list_entry(trav->curr, struct xt_match, list);
1638 if (*match->name)
1639 seq_printf(seq, "%s\n", match->name);
1640 }
1641 return 0;
1642}
1643
1644static const struct seq_operations xt_match_seq_ops = {
1645 .start = xt_match_seq_start,
1646 .next = xt_match_seq_next,
1647 .stop = xt_mttg_seq_stop,
1648 .show = xt_match_seq_show,
1649};
1650
1651static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1652{
1653 return xt_mttg_seq_start(seq, pos, true);
1654}
1655
1656static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1657{
1658 return xt_mttg_seq_next(seq, v, ppos, true);
1659}
1660
1661static int xt_target_seq_show(struct seq_file *seq, void *v)
1662{
1663 const struct nf_mttg_trav *trav = seq->private;
1664 const struct xt_target *target;
1665
1666 switch (trav->class) {
1667 case MTTG_TRAV_NFP_UNSPEC:
1668 case MTTG_TRAV_NFP_SPEC:
1669 if (trav->curr == trav->head)
1670 return 0;
1671 target = list_entry(trav->curr, struct xt_target, list);
1672 if (*target->name)
1673 seq_printf(seq, "%s\n", target->name);
1674 }
1675 return 0;
1676}
1677
1678static const struct seq_operations xt_target_seq_ops = {
1679 .start = xt_target_seq_start,
1680 .next = xt_target_seq_next,
1681 .stop = xt_mttg_seq_stop,
1682 .show = xt_target_seq_show,
1683};
1684
1685#define FORMAT_TABLES "_tables_names"
1686#define FORMAT_MATCHES "_tables_matches"
1687#define FORMAT_TARGETS "_tables_targets"
1688
1689#endif /* CONFIG_PROC_FS */
1690
1691/**
1692 * xt_hook_ops_alloc - set up hooks for a new table
1693 * @table: table with metadata needed to set up hooks
1694 * @fn: Hook function
1695 *
1696 * This function will create the nf_hook_ops that the x_table needs
1697 * to hand to xt_hook_link_net().
1698 */
1699struct nf_hook_ops *
1700xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1701{
1702 unsigned int hook_mask = table->valid_hooks;
1703 uint8_t i, num_hooks = hweight32(hook_mask);
1704 uint8_t hooknum;
1705 struct nf_hook_ops *ops;
1706
1707 if (!num_hooks)
1708 return ERR_PTR(-EINVAL);
1709
1710 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1711 if (ops == NULL)
1712 return ERR_PTR(-ENOMEM);
1713
1714 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1715 hook_mask >>= 1, ++hooknum) {
1716 if (!(hook_mask & 1))
1717 continue;
1718 ops[i].hook = fn;
1719 ops[i].pf = table->af;
1720 ops[i].hooknum = hooknum;
1721 ops[i].priority = table->priority;
1722 ++i;
1723 }
1724
1725 return ops;
1726}
1727EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1728
1729int xt_proto_init(struct net *net, u_int8_t af)
1730{
1731#ifdef CONFIG_PROC_FS
1732 char buf[XT_FUNCTION_MAXNAMELEN];
1733 struct proc_dir_entry *proc;
1734 kuid_t root_uid;
1735 kgid_t root_gid;
1736#endif
1737
1738 if (af >= ARRAY_SIZE(xt_prefix))
1739 return -EINVAL;
1740
1741
1742#ifdef CONFIG_PROC_FS
1743 root_uid = make_kuid(net->user_ns, 0);
1744 root_gid = make_kgid(net->user_ns, 0);
1745
1746 strlcpy(buf, xt_prefix[af], sizeof(buf));
1747 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1748 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1749 sizeof(struct seq_net_private),
1750 (void *)(unsigned long)af);
1751 if (!proc)
1752 goto out;
1753 if (uid_valid(root_uid) && gid_valid(root_gid))
1754 proc_set_user(proc, root_uid, root_gid);
1755
1756 strlcpy(buf, xt_prefix[af], sizeof(buf));
1757 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1758 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1759 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1760 (void *)(unsigned long)af);
1761 if (!proc)
1762 goto out_remove_tables;
1763 if (uid_valid(root_uid) && gid_valid(root_gid))
1764 proc_set_user(proc, root_uid, root_gid);
1765
1766 strlcpy(buf, xt_prefix[af], sizeof(buf));
1767 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1768 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1769 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1770 (void *)(unsigned long)af);
1771 if (!proc)
1772 goto out_remove_matches;
1773 if (uid_valid(root_uid) && gid_valid(root_gid))
1774 proc_set_user(proc, root_uid, root_gid);
1775#endif
1776
1777 return 0;
1778
1779#ifdef CONFIG_PROC_FS
1780out_remove_matches:
1781 strlcpy(buf, xt_prefix[af], sizeof(buf));
1782 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1783 remove_proc_entry(buf, net->proc_net);
1784
1785out_remove_tables:
1786 strlcpy(buf, xt_prefix[af], sizeof(buf));
1787 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1788 remove_proc_entry(buf, net->proc_net);
1789out:
1790 return -1;
1791#endif
1792}
1793EXPORT_SYMBOL_GPL(xt_proto_init);
1794
1795void xt_proto_fini(struct net *net, u_int8_t af)
1796{
1797#ifdef CONFIG_PROC_FS
1798 char buf[XT_FUNCTION_MAXNAMELEN];
1799
1800 strlcpy(buf, xt_prefix[af], sizeof(buf));
1801 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1802 remove_proc_entry(buf, net->proc_net);
1803
1804 strlcpy(buf, xt_prefix[af], sizeof(buf));
1805 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1806 remove_proc_entry(buf, net->proc_net);
1807
1808 strlcpy(buf, xt_prefix[af], sizeof(buf));
1809 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1810 remove_proc_entry(buf, net->proc_net);
1811#endif /*CONFIG_PROC_FS*/
1812}
1813EXPORT_SYMBOL_GPL(xt_proto_fini);
1814
1815/**
1816 * xt_percpu_counter_alloc - allocate x_tables rule counter
1817 *
1818 * @state: pointer to xt_percpu allocation state
1819 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1820 *
1821 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1822 * contain the address of the real (percpu) counter.
1823 *
1824 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1825 * to fetch the real percpu counter.
1826 *
1827 * To speed up allocation and improve data locality, a 4kb block is
1828 * allocated. Freeing any counter may free an entire block, so all
1829 * counters allocated using the same state must be freed at the same
1830 * time.
1831 *
1832 * xt_percpu_counter_alloc_state contains the base address of the
1833 * allocated page and the current sub-offset.
1834 *
1835 * returns false on error.
1836 */
1837bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1838 struct xt_counters *counter)
1839{
1840 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1841
1842 if (nr_cpu_ids <= 1)
1843 return true;
1844
1845 if (!state->mem) {
1846 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1847 XT_PCPU_BLOCK_SIZE);
1848 if (!state->mem)
1849 return false;
1850 }
1851 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1852 state->off += sizeof(*counter);
1853 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1854 state->mem = NULL;
1855 state->off = 0;
1856 }
1857 return true;
1858}
1859EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1860
1861void xt_percpu_counter_free(struct xt_counters *counters)
1862{
1863 unsigned long pcnt = counters->pcnt;
1864
1865 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1866 free_percpu((void __percpu *)pcnt);
1867}
1868EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1869
1870static int __net_init xt_net_init(struct net *net)
1871{
1872 int i;
1873
1874 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1875 INIT_LIST_HEAD(&net->xt.tables[i]);
1876 return 0;
1877}
1878
1879static void __net_exit xt_net_exit(struct net *net)
1880{
1881 int i;
1882
1883 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1884 WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1885}
1886
1887static struct pernet_operations xt_net_ops = {
1888 .init = xt_net_init,
1889 .exit = xt_net_exit,
1890};
1891
1892static int __init xt_init(void)
1893{
1894 unsigned int i;
1895 int rv;
1896
1897 for_each_possible_cpu(i) {
1898 seqcount_init(&per_cpu(xt_recseq, i));
1899 }
1900
1901 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1902 if (!xt)
1903 return -ENOMEM;
1904
1905 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1906 mutex_init(&xt[i].mutex);
1907#ifdef CONFIG_COMPAT
1908 mutex_init(&xt[i].compat_mutex);
1909 xt[i].compat_tab = NULL;
1910#endif
1911 INIT_LIST_HEAD(&xt[i].target);
1912 INIT_LIST_HEAD(&xt[i].match);
1913 }
1914 rv = register_pernet_subsys(&xt_net_ops);
1915 if (rv < 0)
1916 kfree(xt);
1917 return rv;
1918}
1919
1920static void __exit xt_fini(void)
1921{
1922 unregister_pernet_subsys(&xt_net_ops);
1923 kfree(xt);
1924}
1925
1926module_init(xt_init);
1927module_exit(xt_fini);
1928