Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.17-rc6 2417 lines 63 kB view raw
1/* 2 * ebtables 3 * 4 * Author: 5 * Bart De Schuymer <bdschuym@pandora.be> 6 * 7 * ebtables.c,v 2.0, July, 2002 8 * 9 * This code is stongly inspired on the iptables code which is 10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18#include <linux/kmod.h> 19#include <linux/module.h> 20#include <linux/vmalloc.h> 21#include <linux/netfilter/x_tables.h> 22#include <linux/netfilter_bridge/ebtables.h> 23#include <linux/spinlock.h> 24#include <linux/mutex.h> 25#include <linux/slab.h> 26#include <asm/uaccess.h> 27#include <linux/smp.h> 28#include <linux/cpumask.h> 29#include <net/sock.h> 30/* needed for logical [in,out]-dev filtering */ 31#include "../br_private.h" 32 33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ 34 "report to author: "format, ## args) 35/* #define BUGPRINT(format, args...) */ 36 37/* 38 * Each cpu has its own set of counters, so there is no need for write_lock in 39 * the softirq 40 * For reading or updating the counters, the user context needs to 41 * get a write_lock 42 */ 43 44/* The size of each set of counters is altered to get cache alignment */ 45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) 46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter))) 47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ 48 COUNTER_OFFSET(n) * cpu)) 49 50 51 52static DEFINE_MUTEX(ebt_mutex); 53 54#ifdef CONFIG_COMPAT 55static void ebt_standard_compat_from_user(void *dst, const void *src) 56{ 57 int v = *(compat_int_t *)src; 58 59 if (v >= 0) 60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v); 61 memcpy(dst, &v, sizeof(v)); 62} 63 64static int ebt_standard_compat_to_user(void __user *dst, const void *src) 65{ 66 compat_int_t cv = *(int *)src; 67 68 if (cv >= 0) 69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv); 70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 71} 72#endif 73 74 75static struct xt_target ebt_standard_target = { 76 .name = "standard", 77 .revision = 0, 78 .family = NFPROTO_BRIDGE, 79 .targetsize = sizeof(int), 80#ifdef CONFIG_COMPAT 81 .compatsize = sizeof(compat_int_t), 82 .compat_from_user = ebt_standard_compat_from_user, 83 .compat_to_user = ebt_standard_compat_to_user, 84#endif 85}; 86 87static inline int 88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, 89 struct xt_action_param *par) 90{ 91 par->target = w->u.watcher; 92 par->targinfo = w->data; 93 w->u.watcher->target(skb, par); 94 /* watchers don't give a verdict */ 95 return 0; 96} 97 98static inline int 99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb, 100 struct xt_action_param *par) 101{ 102 par->match = m->u.match; 103 par->matchinfo = m->data; 104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; 105} 106 107static inline int 108ebt_dev_check(const char *entry, const struct net_device *device) 109{ 110 int i = 0; 111 const char *devname; 112 113 if (*entry == '\0') 114 return 0; 115 if (!device) 116 return 1; 117 devname = device->name; 118 /* 1 is the wildcard token */ 119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) 120 i++; 121 return devname[i] != entry[i] && entry[i] != 1; 122} 123 124#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg)) 125/* process standard matches */ 126static inline int 127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, 128 const struct net_device *in, const struct net_device *out) 129{ 130 const struct ethhdr *h = eth_hdr(skb); 131 const struct net_bridge_port *p; 132 __be16 ethproto; 133 int verdict, i; 134 135 if (vlan_tx_tag_present(skb)) 136 ethproto = htons(ETH_P_8021Q); 137 else 138 ethproto = h->h_proto; 139 140 if (e->bitmask & EBT_802_3) { 141 if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO)) 142 return 1; 143 } else if (!(e->bitmask & EBT_NOPROTO) && 144 FWINV2(e->ethproto != ethproto, EBT_IPROTO)) 145 return 1; 146 147 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN)) 148 return 1; 149 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT)) 150 return 1; 151 /* rcu_read_lock()ed by nf_hook_slow */ 152 if (in && (p = br_port_get_rcu(in)) != NULL && 153 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN)) 154 return 1; 155 if (out && (p = br_port_get_rcu(out)) != NULL && 156 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT)) 157 return 1; 158 159 if (e->bitmask & EBT_SOURCEMAC) { 160 verdict = 0; 161 for (i = 0; i < 6; i++) 162 verdict |= (h->h_source[i] ^ e->sourcemac[i]) & 163 e->sourcemsk[i]; 164 if (FWINV2(verdict != 0, EBT_ISOURCE) ) 165 return 1; 166 } 167 if (e->bitmask & EBT_DESTMAC) { 168 verdict = 0; 169 for (i = 0; i < 6; i++) 170 verdict |= (h->h_dest[i] ^ e->destmac[i]) & 171 e->destmsk[i]; 172 if (FWINV2(verdict != 0, EBT_IDEST) ) 173 return 1; 174 } 175 return 0; 176} 177 178static inline __pure 179struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry) 180{ 181 return (void *)entry + entry->next_offset; 182} 183 184/* Do some firewalling */ 185unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, 186 const struct net_device *in, const struct net_device *out, 187 struct ebt_table *table) 188{ 189 int i, nentries; 190 struct ebt_entry *point; 191 struct ebt_counter *counter_base, *cb_base; 192 const struct ebt_entry_target *t; 193 int verdict, sp = 0; 194 struct ebt_chainstack *cs; 195 struct ebt_entries *chaininfo; 196 const char *base; 197 const struct ebt_table_info *private; 198 struct xt_action_param acpar; 199 200 acpar.family = NFPROTO_BRIDGE; 201 acpar.in = in; 202 acpar.out = out; 203 acpar.hotdrop = false; 204 acpar.hooknum = hook; 205 206 read_lock_bh(&table->lock); 207 private = table->private; 208 cb_base = COUNTER_BASE(private->counters, private->nentries, 209 smp_processor_id()); 210 if (private->chainstack) 211 cs = private->chainstack[smp_processor_id()]; 212 else 213 cs = NULL; 214 chaininfo = private->hook_entry[hook]; 215 nentries = private->hook_entry[hook]->nentries; 216 point = (struct ebt_entry *)(private->hook_entry[hook]->data); 217 counter_base = cb_base + private->hook_entry[hook]->counter_offset; 218 /* base for chain jumps */ 219 base = private->entries; 220 i = 0; 221 while (i < nentries) { 222 if (ebt_basic_match(point, skb, in, out)) 223 goto letscontinue; 224 225 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0) 226 goto letscontinue; 227 if (acpar.hotdrop) { 228 read_unlock_bh(&table->lock); 229 return NF_DROP; 230 } 231 232 /* increase counter */ 233 (*(counter_base + i)).pcnt++; 234 (*(counter_base + i)).bcnt += skb->len; 235 236 /* these should only watch: not modify, nor tell us 237 what to do with the packet */ 238 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); 239 240 t = (struct ebt_entry_target *) 241 (((char *)point) + point->target_offset); 242 /* standard target */ 243 if (!t->u.target->target) 244 verdict = ((struct ebt_standard_target *)t)->verdict; 245 else { 246 acpar.target = t->u.target; 247 acpar.targinfo = t->data; 248 verdict = t->u.target->target(skb, &acpar); 249 } 250 if (verdict == EBT_ACCEPT) { 251 read_unlock_bh(&table->lock); 252 return NF_ACCEPT; 253 } 254 if (verdict == EBT_DROP) { 255 read_unlock_bh(&table->lock); 256 return NF_DROP; 257 } 258 if (verdict == EBT_RETURN) { 259letsreturn: 260#ifdef CONFIG_NETFILTER_DEBUG 261 if (sp == 0) { 262 BUGPRINT("RETURN on base chain"); 263 /* act like this is EBT_CONTINUE */ 264 goto letscontinue; 265 } 266#endif 267 sp--; 268 /* put all the local variables right */ 269 i = cs[sp].n; 270 chaininfo = cs[sp].chaininfo; 271 nentries = chaininfo->nentries; 272 point = cs[sp].e; 273 counter_base = cb_base + 274 chaininfo->counter_offset; 275 continue; 276 } 277 if (verdict == EBT_CONTINUE) 278 goto letscontinue; 279#ifdef CONFIG_NETFILTER_DEBUG 280 if (verdict < 0) { 281 BUGPRINT("bogus standard verdict\n"); 282 read_unlock_bh(&table->lock); 283 return NF_DROP; 284 } 285#endif 286 /* jump to a udc */ 287 cs[sp].n = i + 1; 288 cs[sp].chaininfo = chaininfo; 289 cs[sp].e = ebt_next_entry(point); 290 i = 0; 291 chaininfo = (struct ebt_entries *) (base + verdict); 292#ifdef CONFIG_NETFILTER_DEBUG 293 if (chaininfo->distinguisher) { 294 BUGPRINT("jump to non-chain\n"); 295 read_unlock_bh(&table->lock); 296 return NF_DROP; 297 } 298#endif 299 nentries = chaininfo->nentries; 300 point = (struct ebt_entry *)chaininfo->data; 301 counter_base = cb_base + chaininfo->counter_offset; 302 sp++; 303 continue; 304letscontinue: 305 point = ebt_next_entry(point); 306 i++; 307 } 308 309 /* I actually like this :) */ 310 if (chaininfo->policy == EBT_RETURN) 311 goto letsreturn; 312 if (chaininfo->policy == EBT_ACCEPT) { 313 read_unlock_bh(&table->lock); 314 return NF_ACCEPT; 315 } 316 read_unlock_bh(&table->lock); 317 return NF_DROP; 318} 319 320/* If it succeeds, returns element and locks mutex */ 321static inline void * 322find_inlist_lock_noload(struct list_head *head, const char *name, int *error, 323 struct mutex *mutex) 324{ 325 struct { 326 struct list_head list; 327 char name[EBT_FUNCTION_MAXNAMELEN]; 328 } *e; 329 330 mutex_lock(mutex); 331 list_for_each_entry(e, head, list) { 332 if (strcmp(e->name, name) == 0) 333 return e; 334 } 335 *error = -ENOENT; 336 mutex_unlock(mutex); 337 return NULL; 338} 339 340static void * 341find_inlist_lock(struct list_head *head, const char *name, const char *prefix, 342 int *error, struct mutex *mutex) 343{ 344 return try_then_request_module( 345 find_inlist_lock_noload(head, name, error, mutex), 346 "%s%s", prefix, name); 347} 348 349static inline struct ebt_table * 350find_table_lock(struct net *net, const char *name, int *error, 351 struct mutex *mutex) 352{ 353 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name, 354 "ebtable_", error, mutex); 355} 356 357static inline int 358ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, 359 unsigned int *cnt) 360{ 361 const struct ebt_entry *e = par->entryinfo; 362 struct xt_match *match; 363 size_t left = ((char *)e + e->watchers_offset) - (char *)m; 364 int ret; 365 366 if (left < sizeof(struct ebt_entry_match) || 367 left - sizeof(struct ebt_entry_match) < m->match_size) 368 return -EINVAL; 369 370 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); 371 if (IS_ERR(match)) 372 return PTR_ERR(match); 373 m->u.match = match; 374 375 par->match = match; 376 par->matchinfo = m->data; 377 ret = xt_check_match(par, m->match_size, 378 e->ethproto, e->invflags & EBT_IPROTO); 379 if (ret < 0) { 380 module_put(match->me); 381 return ret; 382 } 383 384 (*cnt)++; 385 return 0; 386} 387 388static inline int 389ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, 390 unsigned int *cnt) 391{ 392 const struct ebt_entry *e = par->entryinfo; 393 struct xt_target *watcher; 394 size_t left = ((char *)e + e->target_offset) - (char *)w; 395 int ret; 396 397 if (left < sizeof(struct ebt_entry_watcher) || 398 left - sizeof(struct ebt_entry_watcher) < w->watcher_size) 399 return -EINVAL; 400 401 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); 402 if (IS_ERR(watcher)) 403 return PTR_ERR(watcher); 404 w->u.watcher = watcher; 405 406 par->target = watcher; 407 par->targinfo = w->data; 408 ret = xt_check_target(par, w->watcher_size, 409 e->ethproto, e->invflags & EBT_IPROTO); 410 if (ret < 0) { 411 module_put(watcher->me); 412 return ret; 413 } 414 415 (*cnt)++; 416 return 0; 417} 418 419static int ebt_verify_pointers(const struct ebt_replace *repl, 420 struct ebt_table_info *newinfo) 421{ 422 unsigned int limit = repl->entries_size; 423 unsigned int valid_hooks = repl->valid_hooks; 424 unsigned int offset = 0; 425 int i; 426 427 for (i = 0; i < NF_BR_NUMHOOKS; i++) 428 newinfo->hook_entry[i] = NULL; 429 430 newinfo->entries_size = repl->entries_size; 431 newinfo->nentries = repl->nentries; 432 433 while (offset < limit) { 434 size_t left = limit - offset; 435 struct ebt_entry *e = (void *)newinfo->entries + offset; 436 437 if (left < sizeof(unsigned int)) 438 break; 439 440 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 441 if ((valid_hooks & (1 << i)) == 0) 442 continue; 443 if ((char __user *)repl->hook_entry[i] == 444 repl->entries + offset) 445 break; 446 } 447 448 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { 449 if (e->bitmask != 0) { 450 /* we make userspace set this right, 451 so there is no misunderstanding */ 452 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set " 453 "in distinguisher\n"); 454 return -EINVAL; 455 } 456 if (i != NF_BR_NUMHOOKS) 457 newinfo->hook_entry[i] = (struct ebt_entries *)e; 458 if (left < sizeof(struct ebt_entries)) 459 break; 460 offset += sizeof(struct ebt_entries); 461 } else { 462 if (left < sizeof(struct ebt_entry)) 463 break; 464 if (left < e->next_offset) 465 break; 466 if (e->next_offset < sizeof(struct ebt_entry)) 467 return -EINVAL; 468 offset += e->next_offset; 469 } 470 } 471 if (offset != limit) { 472 BUGPRINT("entries_size too small\n"); 473 return -EINVAL; 474 } 475 476 /* check if all valid hooks have a chain */ 477 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 478 if (!newinfo->hook_entry[i] && 479 (valid_hooks & (1 << i))) { 480 BUGPRINT("Valid hook without chain\n"); 481 return -EINVAL; 482 } 483 } 484 return 0; 485} 486 487/* 488 * this one is very careful, as it is the first function 489 * to parse the userspace data 490 */ 491static inline int 492ebt_check_entry_size_and_hooks(const struct ebt_entry *e, 493 const struct ebt_table_info *newinfo, 494 unsigned int *n, unsigned int *cnt, 495 unsigned int *totalcnt, unsigned int *udc_cnt) 496{ 497 int i; 498 499 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 500 if ((void *)e == (void *)newinfo->hook_entry[i]) 501 break; 502 } 503 /* beginning of a new chain 504 if i == NF_BR_NUMHOOKS it must be a user defined chain */ 505 if (i != NF_BR_NUMHOOKS || !e->bitmask) { 506 /* this checks if the previous chain has as many entries 507 as it said it has */ 508 if (*n != *cnt) { 509 BUGPRINT("nentries does not equal the nr of entries " 510 "in the chain\n"); 511 return -EINVAL; 512 } 513 if (((struct ebt_entries *)e)->policy != EBT_DROP && 514 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) { 515 /* only RETURN from udc */ 516 if (i != NF_BR_NUMHOOKS || 517 ((struct ebt_entries *)e)->policy != EBT_RETURN) { 518 BUGPRINT("bad policy\n"); 519 return -EINVAL; 520 } 521 } 522 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */ 523 (*udc_cnt)++; 524 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) { 525 BUGPRINT("counter_offset != totalcnt"); 526 return -EINVAL; 527 } 528 *n = ((struct ebt_entries *)e)->nentries; 529 *cnt = 0; 530 return 0; 531 } 532 /* a plain old entry, heh */ 533 if (sizeof(struct ebt_entry) > e->watchers_offset || 534 e->watchers_offset > e->target_offset || 535 e->target_offset >= e->next_offset) { 536 BUGPRINT("entry offsets not in right order\n"); 537 return -EINVAL; 538 } 539 /* this is not checked anywhere else */ 540 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) { 541 BUGPRINT("target size too small\n"); 542 return -EINVAL; 543 } 544 (*cnt)++; 545 (*totalcnt)++; 546 return 0; 547} 548 549struct ebt_cl_stack 550{ 551 struct ebt_chainstack cs; 552 int from; 553 unsigned int hookmask; 554}; 555 556/* 557 * we need these positions to check that the jumps to a different part of the 558 * entries is a jump to the beginning of a new chain. 559 */ 560static inline int 561ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, 562 unsigned int *n, struct ebt_cl_stack *udc) 563{ 564 int i; 565 566 /* we're only interested in chain starts */ 567 if (e->bitmask) 568 return 0; 569 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 570 if (newinfo->hook_entry[i] == (struct ebt_entries *)e) 571 break; 572 } 573 /* only care about udc */ 574 if (i != NF_BR_NUMHOOKS) 575 return 0; 576 577 udc[*n].cs.chaininfo = (struct ebt_entries *)e; 578 /* these initialisations are depended on later in check_chainloops() */ 579 udc[*n].cs.n = 0; 580 udc[*n].hookmask = 0; 581 582 (*n)++; 583 return 0; 584} 585 586static inline int 587ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i) 588{ 589 struct xt_mtdtor_param par; 590 591 if (i && (*i)-- == 0) 592 return 1; 593 594 par.net = net; 595 par.match = m->u.match; 596 par.matchinfo = m->data; 597 par.family = NFPROTO_BRIDGE; 598 if (par.match->destroy != NULL) 599 par.match->destroy(&par); 600 module_put(par.match->me); 601 return 0; 602} 603 604static inline int 605ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i) 606{ 607 struct xt_tgdtor_param par; 608 609 if (i && (*i)-- == 0) 610 return 1; 611 612 par.net = net; 613 par.target = w->u.watcher; 614 par.targinfo = w->data; 615 par.family = NFPROTO_BRIDGE; 616 if (par.target->destroy != NULL) 617 par.target->destroy(&par); 618 module_put(par.target->me); 619 return 0; 620} 621 622static inline int 623ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt) 624{ 625 struct xt_tgdtor_param par; 626 struct ebt_entry_target *t; 627 628 if (e->bitmask == 0) 629 return 0; 630 /* we're done */ 631 if (cnt && (*cnt)-- == 0) 632 return 1; 633 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); 634 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); 635 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 636 637 par.net = net; 638 par.target = t->u.target; 639 par.targinfo = t->data; 640 par.family = NFPROTO_BRIDGE; 641 if (par.target->destroy != NULL) 642 par.target->destroy(&par); 643 module_put(par.target->me); 644 return 0; 645} 646 647static inline int 648ebt_check_entry(struct ebt_entry *e, struct net *net, 649 const struct ebt_table_info *newinfo, 650 const char *name, unsigned int *cnt, 651 struct ebt_cl_stack *cl_s, unsigned int udc_cnt) 652{ 653 struct ebt_entry_target *t; 654 struct xt_target *target; 655 unsigned int i, j, hook = 0, hookmask = 0; 656 size_t gap; 657 int ret; 658 struct xt_mtchk_param mtpar; 659 struct xt_tgchk_param tgpar; 660 661 /* don't mess with the struct ebt_entries */ 662 if (e->bitmask == 0) 663 return 0; 664 665 if (e->bitmask & ~EBT_F_MASK) { 666 BUGPRINT("Unknown flag for bitmask\n"); 667 return -EINVAL; 668 } 669 if (e->invflags & ~EBT_INV_MASK) { 670 BUGPRINT("Unknown flag for inv bitmask\n"); 671 return -EINVAL; 672 } 673 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) { 674 BUGPRINT("NOPROTO & 802_3 not allowed\n"); 675 return -EINVAL; 676 } 677 /* what hook do we belong to? */ 678 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 679 if (!newinfo->hook_entry[i]) 680 continue; 681 if ((char *)newinfo->hook_entry[i] < (char *)e) 682 hook = i; 683 else 684 break; 685 } 686 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on 687 a base chain */ 688 if (i < NF_BR_NUMHOOKS) 689 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); 690 else { 691 for (i = 0; i < udc_cnt; i++) 692 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e) 693 break; 694 if (i == 0) 695 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); 696 else 697 hookmask = cl_s[i - 1].hookmask; 698 } 699 i = 0; 700 701 mtpar.net = tgpar.net = net; 702 mtpar.table = tgpar.table = name; 703 mtpar.entryinfo = tgpar.entryinfo = e; 704 mtpar.hook_mask = tgpar.hook_mask = hookmask; 705 mtpar.family = tgpar.family = NFPROTO_BRIDGE; 706 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i); 707 if (ret != 0) 708 goto cleanup_matches; 709 j = 0; 710 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j); 711 if (ret != 0) 712 goto cleanup_watchers; 713 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 714 gap = e->next_offset - e->target_offset; 715 716 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0); 717 if (IS_ERR(target)) { 718 ret = PTR_ERR(target); 719 goto cleanup_watchers; 720 } 721 722 t->u.target = target; 723 if (t->u.target == &ebt_standard_target) { 724 if (gap < sizeof(struct ebt_standard_target)) { 725 BUGPRINT("Standard target size too big\n"); 726 ret = -EFAULT; 727 goto cleanup_watchers; 728 } 729 if (((struct ebt_standard_target *)t)->verdict < 730 -NUM_STANDARD_TARGETS) { 731 BUGPRINT("Invalid standard target\n"); 732 ret = -EFAULT; 733 goto cleanup_watchers; 734 } 735 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) { 736 module_put(t->u.target->me); 737 ret = -EFAULT; 738 goto cleanup_watchers; 739 } 740 741 tgpar.target = target; 742 tgpar.targinfo = t->data; 743 ret = xt_check_target(&tgpar, t->target_size, 744 e->ethproto, e->invflags & EBT_IPROTO); 745 if (ret < 0) { 746 module_put(target->me); 747 goto cleanup_watchers; 748 } 749 (*cnt)++; 750 return 0; 751cleanup_watchers: 752 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j); 753cleanup_matches: 754 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i); 755 return ret; 756} 757 758/* 759 * checks for loops and sets the hook mask for udc 760 * the hook mask for udc tells us from which base chains the udc can be 761 * accessed. This mask is a parameter to the check() functions of the extensions 762 */ 763static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s, 764 unsigned int udc_cnt, unsigned int hooknr, char *base) 765{ 766 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; 767 const struct ebt_entry *e = (struct ebt_entry *)chain->data; 768 const struct ebt_entry_target *t; 769 770 while (pos < nentries || chain_nr != -1) { 771 /* end of udc, go back one 'recursion' step */ 772 if (pos == nentries) { 773 /* put back values of the time when this chain was called */ 774 e = cl_s[chain_nr].cs.e; 775 if (cl_s[chain_nr].from != -1) 776 nentries = 777 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries; 778 else 779 nentries = chain->nentries; 780 pos = cl_s[chain_nr].cs.n; 781 /* make sure we won't see a loop that isn't one */ 782 cl_s[chain_nr].cs.n = 0; 783 chain_nr = cl_s[chain_nr].from; 784 if (pos == nentries) 785 continue; 786 } 787 t = (struct ebt_entry_target *) 788 (((char *)e) + e->target_offset); 789 if (strcmp(t->u.name, EBT_STANDARD_TARGET)) 790 goto letscontinue; 791 if (e->target_offset + sizeof(struct ebt_standard_target) > 792 e->next_offset) { 793 BUGPRINT("Standard target size too big\n"); 794 return -1; 795 } 796 verdict = ((struct ebt_standard_target *)t)->verdict; 797 if (verdict >= 0) { /* jump to another chain */ 798 struct ebt_entries *hlp2 = 799 (struct ebt_entries *)(base + verdict); 800 for (i = 0; i < udc_cnt; i++) 801 if (hlp2 == cl_s[i].cs.chaininfo) 802 break; 803 /* bad destination or loop */ 804 if (i == udc_cnt) { 805 BUGPRINT("bad destination\n"); 806 return -1; 807 } 808 if (cl_s[i].cs.n) { 809 BUGPRINT("loop\n"); 810 return -1; 811 } 812 if (cl_s[i].hookmask & (1 << hooknr)) 813 goto letscontinue; 814 /* this can't be 0, so the loop test is correct */ 815 cl_s[i].cs.n = pos + 1; 816 pos = 0; 817 cl_s[i].cs.e = ebt_next_entry(e); 818 e = (struct ebt_entry *)(hlp2->data); 819 nentries = hlp2->nentries; 820 cl_s[i].from = chain_nr; 821 chain_nr = i; 822 /* this udc is accessible from the base chain for hooknr */ 823 cl_s[i].hookmask |= (1 << hooknr); 824 continue; 825 } 826letscontinue: 827 e = ebt_next_entry(e); 828 pos++; 829 } 830 return 0; 831} 832 833/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ 834static int translate_table(struct net *net, const char *name, 835 struct ebt_table_info *newinfo) 836{ 837 unsigned int i, j, k, udc_cnt; 838 int ret; 839 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */ 840 841 i = 0; 842 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i]) 843 i++; 844 if (i == NF_BR_NUMHOOKS) { 845 BUGPRINT("No valid hooks specified\n"); 846 return -EINVAL; 847 } 848 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) { 849 BUGPRINT("Chains don't start at beginning\n"); 850 return -EINVAL; 851 } 852 /* make sure chains are ordered after each other in same order 853 as their corresponding hooks */ 854 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { 855 if (!newinfo->hook_entry[j]) 856 continue; 857 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) { 858 BUGPRINT("Hook order must be followed\n"); 859 return -EINVAL; 860 } 861 i = j; 862 } 863 864 /* do some early checkings and initialize some things */ 865 i = 0; /* holds the expected nr. of entries for the chain */ 866 j = 0; /* holds the up to now counted entries for the chain */ 867 k = 0; /* holds the total nr. of entries, should equal 868 newinfo->nentries afterwards */ 869 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ 870 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 871 ebt_check_entry_size_and_hooks, newinfo, 872 &i, &j, &k, &udc_cnt); 873 874 if (ret != 0) 875 return ret; 876 877 if (i != j) { 878 BUGPRINT("nentries does not equal the nr of entries in the " 879 "(last) chain\n"); 880 return -EINVAL; 881 } 882 if (k != newinfo->nentries) { 883 BUGPRINT("Total nentries is wrong\n"); 884 return -EINVAL; 885 } 886 887 /* get the location of the udc, put them in an array 888 while we're at it, allocate the chainstack */ 889 if (udc_cnt) { 890 /* this will get free'd in do_replace()/ebt_register_table() 891 if an error occurs */ 892 newinfo->chainstack = 893 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack))); 894 if (!newinfo->chainstack) 895 return -ENOMEM; 896 for_each_possible_cpu(i) { 897 newinfo->chainstack[i] = 898 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0]))); 899 if (!newinfo->chainstack[i]) { 900 while (i) 901 vfree(newinfo->chainstack[--i]); 902 vfree(newinfo->chainstack); 903 newinfo->chainstack = NULL; 904 return -ENOMEM; 905 } 906 } 907 908 cl_s = vmalloc(udc_cnt * sizeof(*cl_s)); 909 if (!cl_s) 910 return -ENOMEM; 911 i = 0; /* the i'th udc */ 912 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 913 ebt_get_udc_positions, newinfo, &i, cl_s); 914 /* sanity check */ 915 if (i != udc_cnt) { 916 BUGPRINT("i != udc_cnt\n"); 917 vfree(cl_s); 918 return -EFAULT; 919 } 920 } 921 922 /* Check for loops */ 923 for (i = 0; i < NF_BR_NUMHOOKS; i++) 924 if (newinfo->hook_entry[i]) 925 if (check_chainloops(newinfo->hook_entry[i], 926 cl_s, udc_cnt, i, newinfo->entries)) { 927 vfree(cl_s); 928 return -EINVAL; 929 } 930 931 /* we now know the following (along with E=mc²): 932 - the nr of entries in each chain is right 933 - the size of the allocated space is right 934 - all valid hooks have a corresponding chain 935 - there are no loops 936 - wrong data can still be on the level of a single entry 937 - could be there are jumps to places that are not the 938 beginning of a chain. This can only occur in chains that 939 are not accessible from any base chains, so we don't care. */ 940 941 /* used to know what we need to clean up if something goes wrong */ 942 i = 0; 943 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 944 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt); 945 if (ret != 0) { 946 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 947 ebt_cleanup_entry, net, &i); 948 } 949 vfree(cl_s); 950 return ret; 951} 952 953/* called under write_lock */ 954static void get_counters(const struct ebt_counter *oldcounters, 955 struct ebt_counter *counters, unsigned int nentries) 956{ 957 int i, cpu; 958 struct ebt_counter *counter_base; 959 960 /* counters of cpu 0 */ 961 memcpy(counters, oldcounters, 962 sizeof(struct ebt_counter) * nentries); 963 964 /* add other counters to those of cpu 0 */ 965 for_each_possible_cpu(cpu) { 966 if (cpu == 0) 967 continue; 968 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 969 for (i = 0; i < nentries; i++) { 970 counters[i].pcnt += counter_base[i].pcnt; 971 counters[i].bcnt += counter_base[i].bcnt; 972 } 973 } 974} 975 976static int do_replace_finish(struct net *net, struct ebt_replace *repl, 977 struct ebt_table_info *newinfo) 978{ 979 int ret, i; 980 struct ebt_counter *counterstmp = NULL; 981 /* used to be able to unlock earlier */ 982 struct ebt_table_info *table; 983 struct ebt_table *t; 984 985 /* the user wants counters back 986 the check on the size is done later, when we have the lock */ 987 if (repl->num_counters) { 988 unsigned long size = repl->num_counters * sizeof(*counterstmp); 989 counterstmp = vmalloc(size); 990 if (!counterstmp) 991 return -ENOMEM; 992 } 993 994 newinfo->chainstack = NULL; 995 ret = ebt_verify_pointers(repl, newinfo); 996 if (ret != 0) 997 goto free_counterstmp; 998 999 ret = translate_table(net, repl->name, newinfo); 1000 1001 if (ret != 0) 1002 goto free_counterstmp; 1003 1004 t = find_table_lock(net, repl->name, &ret, &ebt_mutex); 1005 if (!t) { 1006 ret = -ENOENT; 1007 goto free_iterate; 1008 } 1009 1010 /* the table doesn't like it */ 1011 if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) 1012 goto free_unlock; 1013 1014 if (repl->num_counters && repl->num_counters != t->private->nentries) { 1015 BUGPRINT("Wrong nr. of counters requested\n"); 1016 ret = -EINVAL; 1017 goto free_unlock; 1018 } 1019 1020 /* we have the mutex lock, so no danger in reading this pointer */ 1021 table = t->private; 1022 /* make sure the table can only be rmmod'ed if it contains no rules */ 1023 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) { 1024 ret = -ENOENT; 1025 goto free_unlock; 1026 } else if (table->nentries && !newinfo->nentries) 1027 module_put(t->me); 1028 /* we need an atomic snapshot of the counters */ 1029 write_lock_bh(&t->lock); 1030 if (repl->num_counters) 1031 get_counters(t->private->counters, counterstmp, 1032 t->private->nentries); 1033 1034 t->private = newinfo; 1035 write_unlock_bh(&t->lock); 1036 mutex_unlock(&ebt_mutex); 1037 /* so, a user can change the chains while having messed up her counter 1038 allocation. Only reason why this is done is because this way the lock 1039 is held only once, while this doesn't bring the kernel into a 1040 dangerous state. */ 1041 if (repl->num_counters && 1042 copy_to_user(repl->counters, counterstmp, 1043 repl->num_counters * sizeof(struct ebt_counter))) { 1044 /* Silent error, can't fail, new table is already in place */ 1045 net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); 1046 } 1047 1048 /* decrease module count and free resources */ 1049 EBT_ENTRY_ITERATE(table->entries, table->entries_size, 1050 ebt_cleanup_entry, net, NULL); 1051 1052 vfree(table->entries); 1053 if (table->chainstack) { 1054 for_each_possible_cpu(i) 1055 vfree(table->chainstack[i]); 1056 vfree(table->chainstack); 1057 } 1058 vfree(table); 1059 1060 vfree(counterstmp); 1061 return ret; 1062 1063free_unlock: 1064 mutex_unlock(&ebt_mutex); 1065free_iterate: 1066 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 1067 ebt_cleanup_entry, net, NULL); 1068free_counterstmp: 1069 vfree(counterstmp); 1070 /* can be initialized in translate_table() */ 1071 if (newinfo->chainstack) { 1072 for_each_possible_cpu(i) 1073 vfree(newinfo->chainstack[i]); 1074 vfree(newinfo->chainstack); 1075 } 1076 return ret; 1077} 1078 1079/* replace the table */ 1080static int do_replace(struct net *net, const void __user *user, 1081 unsigned int len) 1082{ 1083 int ret, countersize; 1084 struct ebt_table_info *newinfo; 1085 struct ebt_replace tmp; 1086 1087 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1088 return -EFAULT; 1089 1090 if (len != sizeof(tmp) + tmp.entries_size) { 1091 BUGPRINT("Wrong len argument\n"); 1092 return -EINVAL; 1093 } 1094 1095 if (tmp.entries_size == 0) { 1096 BUGPRINT("Entries_size never zero\n"); 1097 return -EINVAL; 1098 } 1099 /* overflow check */ 1100 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / 1101 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) 1102 return -ENOMEM; 1103 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 1104 return -ENOMEM; 1105 1106 tmp.name[sizeof(tmp.name) - 1] = 0; 1107 1108 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1109 newinfo = vmalloc(sizeof(*newinfo) + countersize); 1110 if (!newinfo) 1111 return -ENOMEM; 1112 1113 if (countersize) 1114 memset(newinfo->counters, 0, countersize); 1115 1116 newinfo->entries = vmalloc(tmp.entries_size); 1117 if (!newinfo->entries) { 1118 ret = -ENOMEM; 1119 goto free_newinfo; 1120 } 1121 if (copy_from_user( 1122 newinfo->entries, tmp.entries, tmp.entries_size) != 0) { 1123 BUGPRINT("Couldn't copy entries from userspace\n"); 1124 ret = -EFAULT; 1125 goto free_entries; 1126 } 1127 1128 ret = do_replace_finish(net, &tmp, newinfo); 1129 if (ret == 0) 1130 return ret; 1131free_entries: 1132 vfree(newinfo->entries); 1133free_newinfo: 1134 vfree(newinfo); 1135 return ret; 1136} 1137 1138struct ebt_table * 1139ebt_register_table(struct net *net, const struct ebt_table *input_table) 1140{ 1141 struct ebt_table_info *newinfo; 1142 struct ebt_table *t, *table; 1143 struct ebt_replace_kernel *repl; 1144 int ret, i, countersize; 1145 void *p; 1146 1147 if (input_table == NULL || (repl = input_table->table) == NULL || 1148 repl->entries == NULL || repl->entries_size == 0 || 1149 repl->counters != NULL || input_table->private != NULL) { 1150 BUGPRINT("Bad table data for ebt_register_table!!!\n"); 1151 return ERR_PTR(-EINVAL); 1152 } 1153 1154 /* Don't add one table to multiple lists. */ 1155 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL); 1156 if (!table) { 1157 ret = -ENOMEM; 1158 goto out; 1159 } 1160 1161 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; 1162 newinfo = vmalloc(sizeof(*newinfo) + countersize); 1163 ret = -ENOMEM; 1164 if (!newinfo) 1165 goto free_table; 1166 1167 p = vmalloc(repl->entries_size); 1168 if (!p) 1169 goto free_newinfo; 1170 1171 memcpy(p, repl->entries, repl->entries_size); 1172 newinfo->entries = p; 1173 1174 newinfo->entries_size = repl->entries_size; 1175 newinfo->nentries = repl->nentries; 1176 1177 if (countersize) 1178 memset(newinfo->counters, 0, countersize); 1179 1180 /* fill in newinfo and parse the entries */ 1181 newinfo->chainstack = NULL; 1182 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 1183 if ((repl->valid_hooks & (1 << i)) == 0) 1184 newinfo->hook_entry[i] = NULL; 1185 else 1186 newinfo->hook_entry[i] = p + 1187 ((char *)repl->hook_entry[i] - repl->entries); 1188 } 1189 ret = translate_table(net, repl->name, newinfo); 1190 if (ret != 0) { 1191 BUGPRINT("Translate_table failed\n"); 1192 goto free_chainstack; 1193 } 1194 1195 if (table->check && table->check(newinfo, table->valid_hooks)) { 1196 BUGPRINT("The table doesn't like its own initial data, lol\n"); 1197 ret = -EINVAL; 1198 goto free_chainstack; 1199 } 1200 1201 table->private = newinfo; 1202 rwlock_init(&table->lock); 1203 mutex_lock(&ebt_mutex); 1204 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) { 1205 if (strcmp(t->name, table->name) == 0) { 1206 ret = -EEXIST; 1207 BUGPRINT("Table name already exists\n"); 1208 goto free_unlock; 1209 } 1210 } 1211 1212 /* Hold a reference count if the chains aren't empty */ 1213 if (newinfo->nentries && !try_module_get(table->me)) { 1214 ret = -ENOENT; 1215 goto free_unlock; 1216 } 1217 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); 1218 mutex_unlock(&ebt_mutex); 1219 return table; 1220free_unlock: 1221 mutex_unlock(&ebt_mutex); 1222free_chainstack: 1223 if (newinfo->chainstack) { 1224 for_each_possible_cpu(i) 1225 vfree(newinfo->chainstack[i]); 1226 vfree(newinfo->chainstack); 1227 } 1228 vfree(newinfo->entries); 1229free_newinfo: 1230 vfree(newinfo); 1231free_table: 1232 kfree(table); 1233out: 1234 return ERR_PTR(ret); 1235} 1236 1237void ebt_unregister_table(struct net *net, struct ebt_table *table) 1238{ 1239 int i; 1240 1241 if (!table) { 1242 BUGPRINT("Request to unregister NULL table!!!\n"); 1243 return; 1244 } 1245 mutex_lock(&ebt_mutex); 1246 list_del(&table->list); 1247 mutex_unlock(&ebt_mutex); 1248 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, 1249 ebt_cleanup_entry, net, NULL); 1250 if (table->private->nentries) 1251 module_put(table->me); 1252 vfree(table->private->entries); 1253 if (table->private->chainstack) { 1254 for_each_possible_cpu(i) 1255 vfree(table->private->chainstack[i]); 1256 vfree(table->private->chainstack); 1257 } 1258 vfree(table->private); 1259 kfree(table); 1260} 1261 1262/* userspace just supplied us with counters */ 1263static int do_update_counters(struct net *net, const char *name, 1264 struct ebt_counter __user *counters, 1265 unsigned int num_counters, 1266 const void __user *user, unsigned int len) 1267{ 1268 int i, ret; 1269 struct ebt_counter *tmp; 1270 struct ebt_table *t; 1271 1272 if (num_counters == 0) 1273 return -EINVAL; 1274 1275 tmp = vmalloc(num_counters * sizeof(*tmp)); 1276 if (!tmp) 1277 return -ENOMEM; 1278 1279 t = find_table_lock(net, name, &ret, &ebt_mutex); 1280 if (!t) 1281 goto free_tmp; 1282 1283 if (num_counters != t->private->nentries) { 1284 BUGPRINT("Wrong nr of counters\n"); 1285 ret = -EINVAL; 1286 goto unlock_mutex; 1287 } 1288 1289 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) { 1290 ret = -EFAULT; 1291 goto unlock_mutex; 1292 } 1293 1294 /* we want an atomic add of the counters */ 1295 write_lock_bh(&t->lock); 1296 1297 /* we add to the counters of the first cpu */ 1298 for (i = 0; i < num_counters; i++) { 1299 t->private->counters[i].pcnt += tmp[i].pcnt; 1300 t->private->counters[i].bcnt += tmp[i].bcnt; 1301 } 1302 1303 write_unlock_bh(&t->lock); 1304 ret = 0; 1305unlock_mutex: 1306 mutex_unlock(&ebt_mutex); 1307free_tmp: 1308 vfree(tmp); 1309 return ret; 1310} 1311 1312static int update_counters(struct net *net, const void __user *user, 1313 unsigned int len) 1314{ 1315 struct ebt_replace hlp; 1316 1317 if (copy_from_user(&hlp, user, sizeof(hlp))) 1318 return -EFAULT; 1319 1320 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) 1321 return -EINVAL; 1322 1323 return do_update_counters(net, hlp.name, hlp.counters, 1324 hlp.num_counters, user, len); 1325} 1326 1327static inline int ebt_make_matchname(const struct ebt_entry_match *m, 1328 const char *base, char __user *ubase) 1329{ 1330 char __user *hlp = ubase + ((char *)m - base); 1331 char name[EBT_FUNCTION_MAXNAMELEN] = {}; 1332 1333 /* ebtables expects 32 bytes long names but xt_match names are 29 bytes 1334 long. Copy 29 bytes and fill remaining bytes with zeroes. */ 1335 strlcpy(name, m->u.match->name, sizeof(name)); 1336 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) 1337 return -EFAULT; 1338 return 0; 1339} 1340 1341static inline int ebt_make_watchername(const struct ebt_entry_watcher *w, 1342 const char *base, char __user *ubase) 1343{ 1344 char __user *hlp = ubase + ((char *)w - base); 1345 char name[EBT_FUNCTION_MAXNAMELEN] = {}; 1346 1347 strlcpy(name, w->u.watcher->name, sizeof(name)); 1348 if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN)) 1349 return -EFAULT; 1350 return 0; 1351} 1352 1353static inline int 1354ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase) 1355{ 1356 int ret; 1357 char __user *hlp; 1358 const struct ebt_entry_target *t; 1359 char name[EBT_FUNCTION_MAXNAMELEN] = {}; 1360 1361 if (e->bitmask == 0) 1362 return 0; 1363 1364 hlp = ubase + (((char *)e + e->target_offset) - base); 1365 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 1366 1367 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase); 1368 if (ret != 0) 1369 return ret; 1370 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase); 1371 if (ret != 0) 1372 return ret; 1373 strlcpy(name, t->u.target->name, sizeof(name)); 1374 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) 1375 return -EFAULT; 1376 return 0; 1377} 1378 1379static int copy_counters_to_user(struct ebt_table *t, 1380 const struct ebt_counter *oldcounters, 1381 void __user *user, unsigned int num_counters, 1382 unsigned int nentries) 1383{ 1384 struct ebt_counter *counterstmp; 1385 int ret = 0; 1386 1387 /* userspace might not need the counters */ 1388 if (num_counters == 0) 1389 return 0; 1390 1391 if (num_counters != nentries) { 1392 BUGPRINT("Num_counters wrong\n"); 1393 return -EINVAL; 1394 } 1395 1396 counterstmp = vmalloc(nentries * sizeof(*counterstmp)); 1397 if (!counterstmp) 1398 return -ENOMEM; 1399 1400 write_lock_bh(&t->lock); 1401 get_counters(oldcounters, counterstmp, nentries); 1402 write_unlock_bh(&t->lock); 1403 1404 if (copy_to_user(user, counterstmp, 1405 nentries * sizeof(struct ebt_counter))) 1406 ret = -EFAULT; 1407 vfree(counterstmp); 1408 return ret; 1409} 1410 1411/* called with ebt_mutex locked */ 1412static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1413 const int *len, int cmd) 1414{ 1415 struct ebt_replace tmp; 1416 const struct ebt_counter *oldcounters; 1417 unsigned int entries_size, nentries; 1418 int ret; 1419 char *entries; 1420 1421 if (cmd == EBT_SO_GET_ENTRIES) { 1422 entries_size = t->private->entries_size; 1423 nentries = t->private->nentries; 1424 entries = t->private->entries; 1425 oldcounters = t->private->counters; 1426 } else { 1427 entries_size = t->table->entries_size; 1428 nentries = t->table->nentries; 1429 entries = t->table->entries; 1430 oldcounters = t->table->counters; 1431 } 1432 1433 if (copy_from_user(&tmp, user, sizeof(tmp))) 1434 return -EFAULT; 1435 1436 if (*len != sizeof(struct ebt_replace) + entries_size + 1437 (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0)) 1438 return -EINVAL; 1439 1440 if (tmp.nentries != nentries) { 1441 BUGPRINT("Nentries wrong\n"); 1442 return -EINVAL; 1443 } 1444 1445 if (tmp.entries_size != entries_size) { 1446 BUGPRINT("Wrong size\n"); 1447 return -EINVAL; 1448 } 1449 1450 ret = copy_counters_to_user(t, oldcounters, tmp.counters, 1451 tmp.num_counters, nentries); 1452 if (ret) 1453 return ret; 1454 1455 if (copy_to_user(tmp.entries, entries, entries_size)) { 1456 BUGPRINT("Couldn't copy entries to userspace\n"); 1457 return -EFAULT; 1458 } 1459 /* set the match/watcher/target names right */ 1460 return EBT_ENTRY_ITERATE(entries, entries_size, 1461 ebt_make_names, entries, tmp.entries); 1462} 1463 1464static int do_ebt_set_ctl(struct sock *sk, 1465 int cmd, void __user *user, unsigned int len) 1466{ 1467 int ret; 1468 struct net *net = sock_net(sk); 1469 1470 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1471 return -EPERM; 1472 1473 switch (cmd) { 1474 case EBT_SO_SET_ENTRIES: 1475 ret = do_replace(net, user, len); 1476 break; 1477 case EBT_SO_SET_COUNTERS: 1478 ret = update_counters(net, user, len); 1479 break; 1480 default: 1481 ret = -EINVAL; 1482 } 1483 return ret; 1484} 1485 1486static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1487{ 1488 int ret; 1489 struct ebt_replace tmp; 1490 struct ebt_table *t; 1491 struct net *net = sock_net(sk); 1492 1493 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1494 return -EPERM; 1495 1496 if (copy_from_user(&tmp, user, sizeof(tmp))) 1497 return -EFAULT; 1498 1499 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 1500 if (!t) 1501 return ret; 1502 1503 switch (cmd) { 1504 case EBT_SO_GET_INFO: 1505 case EBT_SO_GET_INIT_INFO: 1506 if (*len != sizeof(struct ebt_replace)) { 1507 ret = -EINVAL; 1508 mutex_unlock(&ebt_mutex); 1509 break; 1510 } 1511 if (cmd == EBT_SO_GET_INFO) { 1512 tmp.nentries = t->private->nentries; 1513 tmp.entries_size = t->private->entries_size; 1514 tmp.valid_hooks = t->valid_hooks; 1515 } else { 1516 tmp.nentries = t->table->nentries; 1517 tmp.entries_size = t->table->entries_size; 1518 tmp.valid_hooks = t->table->valid_hooks; 1519 } 1520 mutex_unlock(&ebt_mutex); 1521 if (copy_to_user(user, &tmp, *len) != 0) { 1522 BUGPRINT("c2u Didn't work\n"); 1523 ret = -EFAULT; 1524 break; 1525 } 1526 ret = 0; 1527 break; 1528 1529 case EBT_SO_GET_ENTRIES: 1530 case EBT_SO_GET_INIT_ENTRIES: 1531 ret = copy_everything_to_user(t, user, len, cmd); 1532 mutex_unlock(&ebt_mutex); 1533 break; 1534 1535 default: 1536 mutex_unlock(&ebt_mutex); 1537 ret = -EINVAL; 1538 } 1539 1540 return ret; 1541} 1542 1543#ifdef CONFIG_COMPAT 1544/* 32 bit-userspace compatibility definitions. */ 1545struct compat_ebt_replace { 1546 char name[EBT_TABLE_MAXNAMELEN]; 1547 compat_uint_t valid_hooks; 1548 compat_uint_t nentries; 1549 compat_uint_t entries_size; 1550 /* start of the chains */ 1551 compat_uptr_t hook_entry[NF_BR_NUMHOOKS]; 1552 /* nr of counters userspace expects back */ 1553 compat_uint_t num_counters; 1554 /* where the kernel will put the old counters. */ 1555 compat_uptr_t counters; 1556 compat_uptr_t entries; 1557}; 1558 1559/* struct ebt_entry_match, _target and _watcher have same layout */ 1560struct compat_ebt_entry_mwt { 1561 union { 1562 char name[EBT_FUNCTION_MAXNAMELEN]; 1563 compat_uptr_t ptr; 1564 } u; 1565 compat_uint_t match_size; 1566 compat_uint_t data[0]; 1567}; 1568 1569/* account for possible padding between match_size and ->data */ 1570static int ebt_compat_entry_padsize(void) 1571{ 1572 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < 1573 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); 1574 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - 1575 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); 1576} 1577 1578static int ebt_compat_match_offset(const struct xt_match *match, 1579 unsigned int userlen) 1580{ 1581 /* 1582 * ebt_among needs special handling. The kernel .matchsize is 1583 * set to -1 at registration time; at runtime an EBT_ALIGN()ed 1584 * value is expected. 1585 * Example: userspace sends 4500, ebt_among.c wants 4504. 1586 */ 1587 if (unlikely(match->matchsize == -1)) 1588 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen); 1589 return xt_compat_match_offset(match); 1590} 1591 1592static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, 1593 unsigned int *size) 1594{ 1595 const struct xt_match *match = m->u.match; 1596 struct compat_ebt_entry_mwt __user *cm = *dstptr; 1597 int off = ebt_compat_match_offset(match, m->match_size); 1598 compat_uint_t msize = m->match_size - off; 1599 1600 BUG_ON(off >= m->match_size); 1601 1602 if (copy_to_user(cm->u.name, match->name, 1603 strlen(match->name) + 1) || put_user(msize, &cm->match_size)) 1604 return -EFAULT; 1605 1606 if (match->compat_to_user) { 1607 if (match->compat_to_user(cm->data, m->data)) 1608 return -EFAULT; 1609 } else if (copy_to_user(cm->data, m->data, msize)) 1610 return -EFAULT; 1611 1612 *size -= ebt_compat_entry_padsize() + off; 1613 *dstptr = cm->data; 1614 *dstptr += msize; 1615 return 0; 1616} 1617 1618static int compat_target_to_user(struct ebt_entry_target *t, 1619 void __user **dstptr, 1620 unsigned int *size) 1621{ 1622 const struct xt_target *target = t->u.target; 1623 struct compat_ebt_entry_mwt __user *cm = *dstptr; 1624 int off = xt_compat_target_offset(target); 1625 compat_uint_t tsize = t->target_size - off; 1626 1627 BUG_ON(off >= t->target_size); 1628 1629 if (copy_to_user(cm->u.name, target->name, 1630 strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) 1631 return -EFAULT; 1632 1633 if (target->compat_to_user) { 1634 if (target->compat_to_user(cm->data, t->data)) 1635 return -EFAULT; 1636 } else if (copy_to_user(cm->data, t->data, tsize)) 1637 return -EFAULT; 1638 1639 *size -= ebt_compat_entry_padsize() + off; 1640 *dstptr = cm->data; 1641 *dstptr += tsize; 1642 return 0; 1643} 1644 1645static int compat_watcher_to_user(struct ebt_entry_watcher *w, 1646 void __user **dstptr, 1647 unsigned int *size) 1648{ 1649 return compat_target_to_user((struct ebt_entry_target *)w, 1650 dstptr, size); 1651} 1652 1653static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, 1654 unsigned int *size) 1655{ 1656 struct ebt_entry_target *t; 1657 struct ebt_entry __user *ce; 1658 u32 watchers_offset, target_offset, next_offset; 1659 compat_uint_t origsize; 1660 int ret; 1661 1662 if (e->bitmask == 0) { 1663 if (*size < sizeof(struct ebt_entries)) 1664 return -EINVAL; 1665 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries))) 1666 return -EFAULT; 1667 1668 *dstptr += sizeof(struct ebt_entries); 1669 *size -= sizeof(struct ebt_entries); 1670 return 0; 1671 } 1672 1673 if (*size < sizeof(*ce)) 1674 return -EINVAL; 1675 1676 ce = (struct ebt_entry __user *)*dstptr; 1677 if (copy_to_user(ce, e, sizeof(*ce))) 1678 return -EFAULT; 1679 1680 origsize = *size; 1681 *dstptr += sizeof(*ce); 1682 1683 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size); 1684 if (ret) 1685 return ret; 1686 watchers_offset = e->watchers_offset - (origsize - *size); 1687 1688 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size); 1689 if (ret) 1690 return ret; 1691 target_offset = e->target_offset - (origsize - *size); 1692 1693 t = (struct ebt_entry_target *) ((char *) e + e->target_offset); 1694 1695 ret = compat_target_to_user(t, dstptr, size); 1696 if (ret) 1697 return ret; 1698 next_offset = e->next_offset - (origsize - *size); 1699 1700 if (put_user(watchers_offset, &ce->watchers_offset) || 1701 put_user(target_offset, &ce->target_offset) || 1702 put_user(next_offset, &ce->next_offset)) 1703 return -EFAULT; 1704 1705 *size -= sizeof(*ce); 1706 return 0; 1707} 1708 1709static int compat_calc_match(struct ebt_entry_match *m, int *off) 1710{ 1711 *off += ebt_compat_match_offset(m->u.match, m->match_size); 1712 *off += ebt_compat_entry_padsize(); 1713 return 0; 1714} 1715 1716static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off) 1717{ 1718 *off += xt_compat_target_offset(w->u.watcher); 1719 *off += ebt_compat_entry_padsize(); 1720 return 0; 1721} 1722 1723static int compat_calc_entry(const struct ebt_entry *e, 1724 const struct ebt_table_info *info, 1725 const void *base, 1726 struct compat_ebt_replace *newinfo) 1727{ 1728 const struct ebt_entry_target *t; 1729 unsigned int entry_offset; 1730 int off, ret, i; 1731 1732 if (e->bitmask == 0) 1733 return 0; 1734 1735 off = 0; 1736 entry_offset = (void *)e - base; 1737 1738 EBT_MATCH_ITERATE(e, compat_calc_match, &off); 1739 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); 1740 1741 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset); 1742 1743 off += xt_compat_target_offset(t->u.target); 1744 off += ebt_compat_entry_padsize(); 1745 1746 newinfo->entries_size -= off; 1747 1748 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off); 1749 if (ret) 1750 return ret; 1751 1752 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 1753 const void *hookptr = info->hook_entry[i]; 1754 if (info->hook_entry[i] && 1755 (e < (struct ebt_entry *)(base - hookptr))) { 1756 newinfo->hook_entry[i] -= off; 1757 pr_debug("0x%08X -> 0x%08X\n", 1758 newinfo->hook_entry[i] + off, 1759 newinfo->hook_entry[i]); 1760 } 1761 } 1762 1763 return 0; 1764} 1765 1766 1767static int compat_table_info(const struct ebt_table_info *info, 1768 struct compat_ebt_replace *newinfo) 1769{ 1770 unsigned int size = info->entries_size; 1771 const void *entries = info->entries; 1772 1773 newinfo->entries_size = size; 1774 1775 xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); 1776 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1777 entries, newinfo); 1778} 1779 1780static int compat_copy_everything_to_user(struct ebt_table *t, 1781 void __user *user, int *len, int cmd) 1782{ 1783 struct compat_ebt_replace repl, tmp; 1784 struct ebt_counter *oldcounters; 1785 struct ebt_table_info tinfo; 1786 int ret; 1787 void __user *pos; 1788 1789 memset(&tinfo, 0, sizeof(tinfo)); 1790 1791 if (cmd == EBT_SO_GET_ENTRIES) { 1792 tinfo.entries_size = t->private->entries_size; 1793 tinfo.nentries = t->private->nentries; 1794 tinfo.entries = t->private->entries; 1795 oldcounters = t->private->counters; 1796 } else { 1797 tinfo.entries_size = t->table->entries_size; 1798 tinfo.nentries = t->table->nentries; 1799 tinfo.entries = t->table->entries; 1800 oldcounters = t->table->counters; 1801 } 1802 1803 if (copy_from_user(&tmp, user, sizeof(tmp))) 1804 return -EFAULT; 1805 1806 if (tmp.nentries != tinfo.nentries || 1807 (tmp.num_counters && tmp.num_counters != tinfo.nentries)) 1808 return -EINVAL; 1809 1810 memcpy(&repl, &tmp, sizeof(repl)); 1811 if (cmd == EBT_SO_GET_ENTRIES) 1812 ret = compat_table_info(t->private, &repl); 1813 else 1814 ret = compat_table_info(&tinfo, &repl); 1815 if (ret) 1816 return ret; 1817 1818 if (*len != sizeof(tmp) + repl.entries_size + 1819 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) { 1820 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n", 1821 *len, tinfo.entries_size, repl.entries_size); 1822 return -EINVAL; 1823 } 1824 1825 /* userspace might not need the counters */ 1826 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters), 1827 tmp.num_counters, tinfo.nentries); 1828 if (ret) 1829 return ret; 1830 1831 pos = compat_ptr(tmp.entries); 1832 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, 1833 compat_copy_entry_to_user, &pos, &tmp.entries_size); 1834} 1835 1836struct ebt_entries_buf_state { 1837 char *buf_kern_start; /* kernel buffer to copy (translated) data to */ 1838 u32 buf_kern_len; /* total size of kernel buffer */ 1839 u32 buf_kern_offset; /* amount of data copied so far */ 1840 u32 buf_user_offset; /* read position in userspace buffer */ 1841}; 1842 1843static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) 1844{ 1845 state->buf_kern_offset += sz; 1846 return state->buf_kern_offset >= sz ? 0 : -EINVAL; 1847} 1848 1849static int ebt_buf_add(struct ebt_entries_buf_state *state, 1850 void *data, unsigned int sz) 1851{ 1852 if (state->buf_kern_start == NULL) 1853 goto count_only; 1854 1855 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); 1856 1857 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); 1858 1859 count_only: 1860 state->buf_user_offset += sz; 1861 return ebt_buf_count(state, sz); 1862} 1863 1864static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) 1865{ 1866 char *b = state->buf_kern_start; 1867 1868 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); 1869 1870 if (b != NULL && sz > 0) 1871 memset(b + state->buf_kern_offset, 0, sz); 1872 /* do not adjust ->buf_user_offset here, we added kernel-side padding */ 1873 return ebt_buf_count(state, sz); 1874} 1875 1876enum compat_mwt { 1877 EBT_COMPAT_MATCH, 1878 EBT_COMPAT_WATCHER, 1879 EBT_COMPAT_TARGET, 1880}; 1881 1882static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, 1883 enum compat_mwt compat_mwt, 1884 struct ebt_entries_buf_state *state, 1885 const unsigned char *base) 1886{ 1887 char name[EBT_FUNCTION_MAXNAMELEN]; 1888 struct xt_match *match; 1889 struct xt_target *wt; 1890 void *dst = NULL; 1891 int off, pad = 0; 1892 unsigned int size_kern, match_size = mwt->match_size; 1893 1894 strlcpy(name, mwt->u.name, sizeof(name)); 1895 1896 if (state->buf_kern_start) 1897 dst = state->buf_kern_start + state->buf_kern_offset; 1898 1899 switch (compat_mwt) { 1900 case EBT_COMPAT_MATCH: 1901 match = xt_request_find_match(NFPROTO_BRIDGE, name, 0); 1902 if (IS_ERR(match)) 1903 return PTR_ERR(match); 1904 1905 off = ebt_compat_match_offset(match, match_size); 1906 if (dst) { 1907 if (match->compat_from_user) 1908 match->compat_from_user(dst, mwt->data); 1909 else 1910 memcpy(dst, mwt->data, match_size); 1911 } 1912 1913 size_kern = match->matchsize; 1914 if (unlikely(size_kern == -1)) 1915 size_kern = match_size; 1916 module_put(match->me); 1917 break; 1918 case EBT_COMPAT_WATCHER: /* fallthrough */ 1919 case EBT_COMPAT_TARGET: 1920 wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0); 1921 if (IS_ERR(wt)) 1922 return PTR_ERR(wt); 1923 off = xt_compat_target_offset(wt); 1924 1925 if (dst) { 1926 if (wt->compat_from_user) 1927 wt->compat_from_user(dst, mwt->data); 1928 else 1929 memcpy(dst, mwt->data, match_size); 1930 } 1931 1932 size_kern = wt->targetsize; 1933 module_put(wt->me); 1934 break; 1935 1936 default: 1937 return -EINVAL; 1938 } 1939 1940 state->buf_kern_offset += match_size + off; 1941 state->buf_user_offset += match_size; 1942 pad = XT_ALIGN(size_kern) - size_kern; 1943 1944 if (pad > 0 && dst) { 1945 BUG_ON(state->buf_kern_len <= pad); 1946 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); 1947 memset(dst + size_kern, 0, pad); 1948 } 1949 return off + match_size; 1950} 1951 1952/* 1953 * return size of all matches, watchers or target, including necessary 1954 * alignment and padding. 1955 */ 1956static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, 1957 unsigned int size_left, enum compat_mwt type, 1958 struct ebt_entries_buf_state *state, const void *base) 1959{ 1960 int growth = 0; 1961 char *buf; 1962 1963 if (size_left == 0) 1964 return 0; 1965 1966 buf = (char *) match32; 1967 1968 while (size_left >= sizeof(*match32)) { 1969 struct ebt_entry_match *match_kern; 1970 int ret; 1971 1972 match_kern = (struct ebt_entry_match *) state->buf_kern_start; 1973 if (match_kern) { 1974 char *tmp; 1975 tmp = state->buf_kern_start + state->buf_kern_offset; 1976 match_kern = (struct ebt_entry_match *) tmp; 1977 } 1978 ret = ebt_buf_add(state, buf, sizeof(*match32)); 1979 if (ret < 0) 1980 return ret; 1981 size_left -= sizeof(*match32); 1982 1983 /* add padding before match->data (if any) */ 1984 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); 1985 if (ret < 0) 1986 return ret; 1987 1988 if (match32->match_size > size_left) 1989 return -EINVAL; 1990 1991 size_left -= match32->match_size; 1992 1993 ret = compat_mtw_from_user(match32, type, state, base); 1994 if (ret < 0) 1995 return ret; 1996 1997 BUG_ON(ret < match32->match_size); 1998 growth += ret - match32->match_size; 1999 growth += ebt_compat_entry_padsize(); 2000 2001 buf += sizeof(*match32); 2002 buf += match32->match_size; 2003 2004 if (match_kern) 2005 match_kern->match_size = ret; 2006 2007 WARN_ON(type == EBT_COMPAT_TARGET && size_left); 2008 match32 = (struct compat_ebt_entry_mwt *) buf; 2009 } 2010 2011 return growth; 2012} 2013 2014/* called for all ebt_entry structures. */ 2015static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, 2016 unsigned int *total, 2017 struct ebt_entries_buf_state *state) 2018{ 2019 unsigned int i, j, startoff, new_offset = 0; 2020 /* stores match/watchers/targets & offset of next struct ebt_entry: */ 2021 unsigned int offsets[4]; 2022 unsigned int *offsets_update = NULL; 2023 int ret; 2024 char *buf_start; 2025 2026 if (*total < sizeof(struct ebt_entries)) 2027 return -EINVAL; 2028 2029 if (!entry->bitmask) { 2030 *total -= sizeof(struct ebt_entries); 2031 return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); 2032 } 2033 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) 2034 return -EINVAL; 2035 2036 startoff = state->buf_user_offset; 2037 /* pull in most part of ebt_entry, it does not need to be changed. */ 2038 ret = ebt_buf_add(state, entry, 2039 offsetof(struct ebt_entry, watchers_offset)); 2040 if (ret < 0) 2041 return ret; 2042 2043 offsets[0] = sizeof(struct ebt_entry); /* matches come first */ 2044 memcpy(&offsets[1], &entry->watchers_offset, 2045 sizeof(offsets) - sizeof(offsets[0])); 2046 2047 if (state->buf_kern_start) { 2048 buf_start = state->buf_kern_start + state->buf_kern_offset; 2049 offsets_update = (unsigned int *) buf_start; 2050 } 2051 ret = ebt_buf_add(state, &offsets[1], 2052 sizeof(offsets) - sizeof(offsets[0])); 2053 if (ret < 0) 2054 return ret; 2055 buf_start = (char *) entry; 2056 /* 2057 * 0: matches offset, always follows ebt_entry. 2058 * 1: watchers offset, from ebt_entry structure 2059 * 2: target offset, from ebt_entry structure 2060 * 3: next ebt_entry offset, from ebt_entry structure 2061 * 2062 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2063 */ 2064 for (i = 0, j = 1 ; j < 4 ; j++, i++) { 2065 struct compat_ebt_entry_mwt *match32; 2066 unsigned int size; 2067 char *buf = buf_start; 2068 2069 buf = buf_start + offsets[i]; 2070 if (offsets[i] > offsets[j]) 2071 return -EINVAL; 2072 2073 match32 = (struct compat_ebt_entry_mwt *) buf; 2074 size = offsets[j] - offsets[i]; 2075 ret = ebt_size_mwt(match32, size, i, state, base); 2076 if (ret < 0) 2077 return ret; 2078 new_offset += ret; 2079 if (offsets_update && new_offset) { 2080 pr_debug("change offset %d to %d\n", 2081 offsets_update[i], offsets[j] + new_offset); 2082 offsets_update[i] = offsets[j] + new_offset; 2083 } 2084 } 2085 2086 if (state->buf_kern_start == NULL) { 2087 unsigned int offset = buf_start - (char *) base; 2088 2089 ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); 2090 if (ret < 0) 2091 return ret; 2092 } 2093 2094 startoff = state->buf_user_offset - startoff; 2095 2096 BUG_ON(*total < startoff); 2097 *total -= startoff; 2098 return 0; 2099} 2100 2101/* 2102 * repl->entries_size is the size of the ebt_entry blob in userspace. 2103 * It might need more memory when copied to a 64 bit kernel in case 2104 * userspace is 32-bit. So, first task: find out how much memory is needed. 2105 * 2106 * Called before validation is performed. 2107 */ 2108static int compat_copy_entries(unsigned char *data, unsigned int size_user, 2109 struct ebt_entries_buf_state *state) 2110{ 2111 unsigned int size_remaining = size_user; 2112 int ret; 2113 2114 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data, 2115 &size_remaining, state); 2116 if (ret < 0) 2117 return ret; 2118 2119 WARN_ON(size_remaining); 2120 return state->buf_kern_offset; 2121} 2122 2123 2124static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, 2125 void __user *user, unsigned int len) 2126{ 2127 struct compat_ebt_replace tmp; 2128 int i; 2129 2130 if (len < sizeof(tmp)) 2131 return -EINVAL; 2132 2133 if (copy_from_user(&tmp, user, sizeof(tmp))) 2134 return -EFAULT; 2135 2136 if (len != sizeof(tmp) + tmp.entries_size) 2137 return -EINVAL; 2138 2139 if (tmp.entries_size == 0) 2140 return -EINVAL; 2141 2142 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / 2143 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) 2144 return -ENOMEM; 2145 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 2146 return -ENOMEM; 2147 2148 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); 2149 2150 /* starting with hook_entry, 32 vs. 64 bit structures are different */ 2151 for (i = 0; i < NF_BR_NUMHOOKS; i++) 2152 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]); 2153 2154 repl->num_counters = tmp.num_counters; 2155 repl->counters = compat_ptr(tmp.counters); 2156 repl->entries = compat_ptr(tmp.entries); 2157 return 0; 2158} 2159 2160static int compat_do_replace(struct net *net, void __user *user, 2161 unsigned int len) 2162{ 2163 int ret, i, countersize, size64; 2164 struct ebt_table_info *newinfo; 2165 struct ebt_replace tmp; 2166 struct ebt_entries_buf_state state; 2167 void *entries_tmp; 2168 2169 ret = compat_copy_ebt_replace_from_user(&tmp, user, len); 2170 if (ret) { 2171 /* try real handler in case userland supplied needed padding */ 2172 if (ret == -EINVAL && do_replace(net, user, len) == 0) 2173 ret = 0; 2174 return ret; 2175 } 2176 2177 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 2178 newinfo = vmalloc(sizeof(*newinfo) + countersize); 2179 if (!newinfo) 2180 return -ENOMEM; 2181 2182 if (countersize) 2183 memset(newinfo->counters, 0, countersize); 2184 2185 memset(&state, 0, sizeof(state)); 2186 2187 newinfo->entries = vmalloc(tmp.entries_size); 2188 if (!newinfo->entries) { 2189 ret = -ENOMEM; 2190 goto free_newinfo; 2191 } 2192 if (copy_from_user( 2193 newinfo->entries, tmp.entries, tmp.entries_size) != 0) { 2194 ret = -EFAULT; 2195 goto free_entries; 2196 } 2197 2198 entries_tmp = newinfo->entries; 2199 2200 xt_compat_lock(NFPROTO_BRIDGE); 2201 2202 xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2203 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2204 if (ret < 0) 2205 goto out_unlock; 2206 2207 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n", 2208 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset, 2209 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size)); 2210 2211 size64 = ret; 2212 newinfo->entries = vmalloc(size64); 2213 if (!newinfo->entries) { 2214 vfree(entries_tmp); 2215 ret = -ENOMEM; 2216 goto out_unlock; 2217 } 2218 2219 memset(&state, 0, sizeof(state)); 2220 state.buf_kern_start = newinfo->entries; 2221 state.buf_kern_len = size64; 2222 2223 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2224 BUG_ON(ret < 0); /* parses same data again */ 2225 2226 vfree(entries_tmp); 2227 tmp.entries_size = size64; 2228 2229 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 2230 char __user *usrptr; 2231 if (tmp.hook_entry[i]) { 2232 unsigned int delta; 2233 usrptr = (char __user *) tmp.hook_entry[i]; 2234 delta = usrptr - tmp.entries; 2235 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta); 2236 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr; 2237 } 2238 } 2239 2240 xt_compat_flush_offsets(NFPROTO_BRIDGE); 2241 xt_compat_unlock(NFPROTO_BRIDGE); 2242 2243 ret = do_replace_finish(net, &tmp, newinfo); 2244 if (ret == 0) 2245 return ret; 2246free_entries: 2247 vfree(newinfo->entries); 2248free_newinfo: 2249 vfree(newinfo); 2250 return ret; 2251out_unlock: 2252 xt_compat_flush_offsets(NFPROTO_BRIDGE); 2253 xt_compat_unlock(NFPROTO_BRIDGE); 2254 goto free_entries; 2255} 2256 2257static int compat_update_counters(struct net *net, void __user *user, 2258 unsigned int len) 2259{ 2260 struct compat_ebt_replace hlp; 2261 2262 if (copy_from_user(&hlp, user, sizeof(hlp))) 2263 return -EFAULT; 2264 2265 /* try real handler in case userland supplied needed padding */ 2266 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) 2267 return update_counters(net, user, len); 2268 2269 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), 2270 hlp.num_counters, user, len); 2271} 2272 2273static int compat_do_ebt_set_ctl(struct sock *sk, 2274 int cmd, void __user *user, unsigned int len) 2275{ 2276 int ret; 2277 struct net *net = sock_net(sk); 2278 2279 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2280 return -EPERM; 2281 2282 switch (cmd) { 2283 case EBT_SO_SET_ENTRIES: 2284 ret = compat_do_replace(net, user, len); 2285 break; 2286 case EBT_SO_SET_COUNTERS: 2287 ret = compat_update_counters(net, user, len); 2288 break; 2289 default: 2290 ret = -EINVAL; 2291 } 2292 return ret; 2293} 2294 2295static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, 2296 void __user *user, int *len) 2297{ 2298 int ret; 2299 struct compat_ebt_replace tmp; 2300 struct ebt_table *t; 2301 struct net *net = sock_net(sk); 2302 2303 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2304 return -EPERM; 2305 2306 /* try real handler in case userland supplied needed padding */ 2307 if ((cmd == EBT_SO_GET_INFO || 2308 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp)) 2309 return do_ebt_get_ctl(sk, cmd, user, len); 2310 2311 if (copy_from_user(&tmp, user, sizeof(tmp))) 2312 return -EFAULT; 2313 2314 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 2315 if (!t) 2316 return ret; 2317 2318 xt_compat_lock(NFPROTO_BRIDGE); 2319 switch (cmd) { 2320 case EBT_SO_GET_INFO: 2321 tmp.nentries = t->private->nentries; 2322 ret = compat_table_info(t->private, &tmp); 2323 if (ret) 2324 goto out; 2325 tmp.valid_hooks = t->valid_hooks; 2326 2327 if (copy_to_user(user, &tmp, *len) != 0) { 2328 ret = -EFAULT; 2329 break; 2330 } 2331 ret = 0; 2332 break; 2333 case EBT_SO_GET_INIT_INFO: 2334 tmp.nentries = t->table->nentries; 2335 tmp.entries_size = t->table->entries_size; 2336 tmp.valid_hooks = t->table->valid_hooks; 2337 2338 if (copy_to_user(user, &tmp, *len) != 0) { 2339 ret = -EFAULT; 2340 break; 2341 } 2342 ret = 0; 2343 break; 2344 case EBT_SO_GET_ENTRIES: 2345 case EBT_SO_GET_INIT_ENTRIES: 2346 /* 2347 * try real handler first in case of userland-side padding. 2348 * in case we are dealing with an 'ordinary' 32 bit binary 2349 * without 64bit compatibility padding, this will fail right 2350 * after copy_from_user when the *len argument is validated. 2351 * 2352 * the compat_ variant needs to do one pass over the kernel 2353 * data set to adjust for size differences before it the check. 2354 */ 2355 if (copy_everything_to_user(t, user, len, cmd) == 0) 2356 ret = 0; 2357 else 2358 ret = compat_copy_everything_to_user(t, user, len, cmd); 2359 break; 2360 default: 2361 ret = -EINVAL; 2362 } 2363 out: 2364 xt_compat_flush_offsets(NFPROTO_BRIDGE); 2365 xt_compat_unlock(NFPROTO_BRIDGE); 2366 mutex_unlock(&ebt_mutex); 2367 return ret; 2368} 2369#endif 2370 2371static struct nf_sockopt_ops ebt_sockopts = { 2372 .pf = PF_INET, 2373 .set_optmin = EBT_BASE_CTL, 2374 .set_optmax = EBT_SO_SET_MAX + 1, 2375 .set = do_ebt_set_ctl, 2376#ifdef CONFIG_COMPAT 2377 .compat_set = compat_do_ebt_set_ctl, 2378#endif 2379 .get_optmin = EBT_BASE_CTL, 2380 .get_optmax = EBT_SO_GET_MAX + 1, 2381 .get = do_ebt_get_ctl, 2382#ifdef CONFIG_COMPAT 2383 .compat_get = compat_do_ebt_get_ctl, 2384#endif 2385 .owner = THIS_MODULE, 2386}; 2387 2388static int __init ebtables_init(void) 2389{ 2390 int ret; 2391 2392 ret = xt_register_target(&ebt_standard_target); 2393 if (ret < 0) 2394 return ret; 2395 ret = nf_register_sockopt(&ebt_sockopts); 2396 if (ret < 0) { 2397 xt_unregister_target(&ebt_standard_target); 2398 return ret; 2399 } 2400 2401 printk(KERN_INFO "Ebtables v2.0 registered\n"); 2402 return 0; 2403} 2404 2405static void __exit ebtables_fini(void) 2406{ 2407 nf_unregister_sockopt(&ebt_sockopts); 2408 xt_unregister_target(&ebt_standard_target); 2409 printk(KERN_INFO "Ebtables v2.0 unregistered\n"); 2410} 2411 2412EXPORT_SYMBOL(ebt_register_table); 2413EXPORT_SYMBOL(ebt_unregister_table); 2414EXPORT_SYMBOL(ebt_do_table); 2415module_init(ebtables_init); 2416module_exit(ebtables_fini); 2417MODULE_LICENSE("GPL");