Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

The following patchset contains another batch with Netfilter/IPVS updates
for net-next, they are:

1) Add abstracted ICMP codes to the nf_tables reject expression. We
introduce four reasons to reject using ICMP that overlap in IPv4
and IPv6 from the semantic point of view. This should simplify the
maintainance of dual stack rule-sets through the inet table.

2) Move nf_send_reset() functions from header files to per-family
nf_reject modules, suggested by Patrick McHardy.

3) We have to use IS_ENABLED(CONFIG_BRIDGE_NETFILTER) everywhere in the
code now that br_netfilter can be modularized. Convert remaining spots
in the network stack code.

4) Use rcu_barrier() in the nf_tables module removal path to ensure that
we don't leave object that are still pending to be released via
call_rcu (that may likely result in a crash).

5) Remove incomplete arch 32/64 compat from nft_compat. The original (bad)
idea was to probe the word size based on the xtables match/target info
size, but this assumption is wrong when you have to dump the information
back to userspace.

6) Allow to filter from prerouting and postrouting in the nf_tables bridge.
In order to emulate the ebtables NAT chains (which are actually simple
filter chains with no special semantics), we have support filtering from
this hooks too.

7) Add explicit module dependency between xt_physdev and br_netfilter.
This provides a way to detect if the user needs br_netfilter from
the configuration path. This should reduce the breakage of the
br_netfilter modularization.

8) Cleanup coding style in ip_vs.h, from Simon Horman.

9) Fix crash in the recently added nf_tables masq expression. We have
to register/unregister the notifiers to clean up the conntrack table
entries from the module init/exit path, not from the rule addition /
deletion path. From Arturo Borrero.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+698 -439
+75 -139
include/net/ip_vs.h
··· 1 - /* 2 - * IP Virtual Server 3 - * data structure and functionality definitions 1 + /* IP Virtual Server 2 + * data structure and functionality definitions 4 3 */ 5 4 6 5 #ifndef _NET_IP_VS_H ··· 11 12 12 13 #include <linux/list.h> /* for struct list_head */ 13 14 #include <linux/spinlock.h> /* for struct rwlock_t */ 14 - #include <linux/atomic.h> /* for struct atomic_t */ 15 + #include <linux/atomic.h> /* for struct atomic_t */ 15 16 #include <linux/compiler.h> 16 17 #include <linux/timer.h> 17 18 #include <linux/bug.h> ··· 29 30 #endif 30 31 #include <net/net_namespace.h> /* Netw namespace */ 31 32 32 - /* 33 - * Generic access of ipvs struct 34 - */ 33 + /* Generic access of ipvs struct */ 35 34 static inline struct netns_ipvs *net_ipvs(struct net* net) 36 35 { 37 36 return net->ipvs; 38 37 } 39 - /* 40 - * Get net ptr from skb in traffic cases 38 + 39 + /* Get net ptr from skb in traffic cases 41 40 * use skb_sknet when call is from userland (ioctl or netlink) 42 41 */ 43 42 static inline struct net *skb_net(const struct sk_buff *skb) ··· 87 90 return &init_net; 88 91 #endif 89 92 } 90 - /* 91 - * This one needed for single_open_net since net is stored directly in 93 + 94 + /* This one needed for single_open_net since net is stored directly in 92 95 * private not as a struct i.e. seq_file_net can't be used. 93 96 */ 94 97 static inline struct net *seq_file_single_net(struct seq_file *seq) ··· 105 108 106 109 struct ip_vs_iphdr { 107 110 __u32 len; /* IPv4 simply where L4 starts 108 - IPv6 where L4 Transport Header starts */ 111 + * IPv6 where L4 Transport Header starts */ 109 112 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 110 113 __s16 protocol; 111 114 __s32 flags; ··· 301 304 #define LeaveFunction(level) do {} while (0) 302 305 #endif 303 306 304 - 305 - /* 306 - * The port number of FTP service (in network order). 307 - */ 307 + /* The port number of FTP service (in network order). */ 308 308 #define FTPPORT cpu_to_be16(21) 309 309 #define FTPDATA cpu_to_be16(20) 310 310 311 - /* 312 - * TCP State Values 313 - */ 311 + /* TCP State Values */ 314 312 enum { 315 313 IP_VS_TCP_S_NONE = 0, 316 314 IP_VS_TCP_S_ESTABLISHED, ··· 321 329 IP_VS_TCP_S_LAST 322 330 }; 323 331 324 - /* 325 - * UDP State Values 326 - */ 332 + /* UDP State Values */ 327 333 enum { 328 334 IP_VS_UDP_S_NORMAL, 329 335 IP_VS_UDP_S_LAST, 330 336 }; 331 337 332 - /* 333 - * ICMP State Values 334 - */ 338 + /* ICMP State Values */ 335 339 enum { 336 340 IP_VS_ICMP_S_NORMAL, 337 341 IP_VS_ICMP_S_LAST, 338 342 }; 339 343 340 - /* 341 - * SCTP State Values 342 - */ 344 + /* SCTP State Values */ 343 345 enum ip_vs_sctp_states { 344 346 IP_VS_SCTP_S_NONE, 345 347 IP_VS_SCTP_S_INIT1, ··· 352 366 IP_VS_SCTP_S_LAST 353 367 }; 354 368 355 - /* 356 - * Delta sequence info structure 357 - * Each ip_vs_conn has 2 (output AND input seq. changes). 358 - * Only used in the VS/NAT. 369 + /* Delta sequence info structure 370 + * Each ip_vs_conn has 2 (output AND input seq. changes). 371 + * Only used in the VS/NAT. 359 372 */ 360 373 struct ip_vs_seq { 361 374 __u32 init_seq; /* Add delta from this seq */ 362 375 __u32 delta; /* Delta in sequence numbers */ 363 376 __u32 previous_delta; /* Delta in sequence numbers 364 - before last resized pkt */ 377 + * before last resized pkt */ 365 378 }; 366 379 367 - /* 368 - * counters per cpu 369 - */ 380 + /* counters per cpu */ 370 381 struct ip_vs_counters { 371 382 __u32 conns; /* connections scheduled */ 372 383 __u32 inpkts; /* incoming packets */ ··· 371 388 __u64 inbytes; /* incoming bytes */ 372 389 __u64 outbytes; /* outgoing bytes */ 373 390 }; 374 - /* 375 - * Stats per cpu 376 - */ 391 + /* Stats per cpu */ 377 392 struct ip_vs_cpu_stats { 378 393 struct ip_vs_counters ustats; 379 394 struct u64_stats_sync syncp; 380 395 }; 381 396 382 - /* 383 - * IPVS statistics objects 384 - */ 397 + /* IPVS statistics objects */ 385 398 struct ip_vs_estimator { 386 399 struct list_head list; 387 400 ··· 470 491 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 471 492 }; 472 493 473 - /* 474 - * protocol data per netns 475 - */ 494 + /* protocol data per netns */ 476 495 struct ip_vs_proto_data { 477 496 struct ip_vs_proto_data *next; 478 497 struct ip_vs_protocol *pp; ··· 497 520 __u8 pe_data_len; 498 521 }; 499 522 500 - /* 501 - * IP_VS structure allocated for each dynamically scheduled connection 502 - */ 523 + /* IP_VS structure allocated for each dynamically scheduled connection */ 503 524 struct ip_vs_conn { 504 525 struct hlist_node c_list; /* hashed list heads */ 505 526 /* Protocol, addresses and port numbers */ ··· 536 561 struct ip_vs_dest *dest; /* real server */ 537 562 atomic_t in_pkts; /* incoming packet counter */ 538 563 539 - /* packet transmitter for different forwarding methods. If it 540 - mangles the packet, it must return NF_DROP or better NF_STOLEN, 541 - otherwise this must be changed to a sk_buff **. 542 - NF_ACCEPT can be returned when destination is local. 564 + /* Packet transmitter for different forwarding methods. If it 565 + * mangles the packet, it must return NF_DROP or better NF_STOLEN, 566 + * otherwise this must be changed to a sk_buff **. 567 + * NF_ACCEPT can be returned when destination is local. 543 568 */ 544 569 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 545 570 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 546 571 547 572 /* Note: we can group the following members into a structure, 548 - in order to save more space, and the following members are 549 - only used in VS/NAT anyway */ 573 + * in order to save more space, and the following members are 574 + * only used in VS/NAT anyway 575 + */ 550 576 struct ip_vs_app *app; /* bound ip_vs_app object */ 551 577 void *app_data; /* Application private data */ 552 578 struct ip_vs_seq in_seq; /* incoming seq. struct */ ··· 560 584 struct rcu_head rcu_head; 561 585 }; 562 586 563 - /* 564 - * To save some memory in conn table when name space is disabled. 565 - */ 587 + /* To save some memory in conn table when name space is disabled. */ 566 588 static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) 567 589 { 568 590 #ifdef CONFIG_NET_NS ··· 569 595 return &init_net; 570 596 #endif 571 597 } 598 + 572 599 static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) 573 600 { 574 601 #ifdef CONFIG_NET_NS ··· 587 612 #endif 588 613 } 589 614 590 - /* 591 - * Extended internal versions of struct ip_vs_service_user and 592 - * ip_vs_dest_user for IPv6 support. 615 + /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user 616 + * for IPv6 support. 593 617 * 594 - * We need these to conveniently pass around service and destination 595 - * options, but unfortunately, we also need to keep the old definitions to 596 - * maintain userspace backwards compatibility for the setsockopt interface. 618 + * We need these to conveniently pass around service and destination 619 + * options, but unfortunately, we also need to keep the old definitions to 620 + * maintain userspace backwards compatibility for the setsockopt interface. 597 621 */ 598 622 struct ip_vs_service_user_kern { 599 623 /* virtual service addresses */ ··· 630 656 631 657 632 658 /* 633 - * The information about the virtual service offered to the net 634 - * and the forwarding entries 659 + * The information about the virtual service offered to the net and the 660 + * forwarding entries. 635 661 */ 636 662 struct ip_vs_service { 637 663 struct hlist_node s_list; /* for normal service table */ ··· 671 697 struct rcu_head rcu_head; 672 698 }; 673 699 674 - /* 675 - * The real server destination forwarding entry 676 - * with ip address, port number, and so on. 700 + /* The real server destination forwarding entry with ip address, port number, 701 + * and so on. 677 702 */ 678 703 struct ip_vs_dest { 679 704 struct list_head n_list; /* for the dests in the service */ ··· 711 738 unsigned int in_rs_table:1; /* we are in rs_table */ 712 739 }; 713 740 714 - 715 - /* 716 - * The scheduler object 717 - */ 741 + /* The scheduler object */ 718 742 struct ip_vs_scheduler { 719 743 struct list_head n_list; /* d-linked list head */ 720 744 char *name; /* scheduler name */ ··· 751 781 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); 752 782 }; 753 783 754 - /* 755 - * The application module object (a.k.a. app incarnation) 756 - */ 784 + /* The application module object (a.k.a. app incarnation) */ 757 785 struct ip_vs_app { 758 786 struct list_head a_list; /* member in app list */ 759 787 int type; /* IP_VS_APP_TYPE_xxx */ ··· 767 799 atomic_t usecnt; /* usage counter */ 768 800 struct rcu_head rcu_head; 769 801 770 - /* 771 - * output hook: Process packet in inout direction, diff set for TCP. 802 + /* output hook: Process packet in inout direction, diff set for TCP. 772 803 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 773 804 * 2=Mangled but checksum was not updated 774 805 */ 775 806 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, 776 807 struct sk_buff *, int *diff); 777 808 778 - /* 779 - * input hook: Process packet in outin direction, diff set for TCP. 809 + /* input hook: Process packet in outin direction, diff set for TCP. 780 810 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 781 811 * 2=Mangled but checksum was not updated 782 812 */ ··· 833 867 struct netns_ipvs { 834 868 int gen; /* Generation */ 835 869 int enable; /* enable like nf_hooks do */ 836 - /* 837 - * Hash table: for real service lookups 838 - */ 870 + /* Hash table: for real service lookups */ 839 871 #define IP_VS_RTAB_BITS 4 840 872 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 841 873 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) ··· 867 903 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 868 904 #endif 869 905 /* ip_vs_conn */ 870 - atomic_t conn_count; /* connection counter */ 906 + atomic_t conn_count; /* connection counter */ 871 907 872 908 /* ip_vs_ctl */ 873 909 struct ip_vs_stats tot_stats; /* Statistics & est. */ ··· 954 990 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 955 991 /* net name space ptr */ 956 992 struct net *net; /* Needed by timer routines */ 957 - /* Number of heterogeneous destinations, needed because 958 - * heterogeneous are not supported when synchronization is 959 - * enabled */ 993 + /* Number of heterogeneous destinations, needed becaus heterogeneous 994 + * are not supported when synchronization is enabled. 995 + */ 960 996 unsigned int mixed_address_family_dests; 961 997 }; 962 998 ··· 1111 1147 1112 1148 #endif 1113 1149 1114 - /* 1115 - * IPVS core functions 1116 - * (from ip_vs_core.c) 1150 + /* IPVS core functions 1151 + * (from ip_vs_core.c) 1117 1152 */ 1118 1153 const char *ip_vs_proto_name(unsigned int proto); 1119 1154 void ip_vs_init_hash_table(struct list_head *table, int rows); ··· 1120 1157 1121 1158 #define IP_VS_APP_TYPE_FTP 1 1122 1159 1123 - /* 1124 - * ip_vs_conn handling functions 1125 - * (from ip_vs_conn.c) 1160 + /* ip_vs_conn handling functions 1161 + * (from ip_vs_conn.c) 1126 1162 */ 1127 - 1128 1163 enum { 1129 1164 IP_VS_DIR_INPUT = 0, 1130 1165 IP_VS_DIR_OUTPUT, ··· 1253 1292 atomic_inc(&ctl_cp->n_control); 1254 1293 } 1255 1294 1256 - /* 1257 - * IPVS netns init & cleanup functions 1258 - */ 1295 + /* IPVS netns init & cleanup functions */ 1259 1296 int ip_vs_estimator_net_init(struct net *net); 1260 1297 int ip_vs_control_net_init(struct net *net); 1261 1298 int ip_vs_protocol_net_init(struct net *net); ··· 1268 1309 void ip_vs_sync_net_cleanup(struct net *net); 1269 1310 void ip_vs_service_net_cleanup(struct net *net); 1270 1311 1271 - /* 1272 - * IPVS application functions 1273 - * (from ip_vs_app.c) 1312 + /* IPVS application functions 1313 + * (from ip_vs_app.c) 1274 1314 */ 1275 1315 #define IP_VS_APP_MAX_PORTS 8 1276 1316 struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app); ··· 1289 1331 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1290 1332 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); 1291 1333 1292 - /* 1293 - * Use a #define to avoid all of module.h just for these trivial ops 1294 - */ 1334 + /* Use a #define to avoid all of module.h just for these trivial ops */ 1295 1335 #define ip_vs_pe_get(pe) \ 1296 1336 if (pe && pe->module) \ 1297 1337 __module_get(pe->module); ··· 1298 1342 if (pe && pe->module) \ 1299 1343 module_put(pe->module); 1300 1344 1301 - /* 1302 - * IPVS protocol functions (from ip_vs_proto.c) 1303 - */ 1345 + /* IPVS protocol functions (from ip_vs_proto.c) */ 1304 1346 int ip_vs_protocol_init(void); 1305 1347 void ip_vs_protocol_cleanup(void); 1306 1348 void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); ··· 1316 1362 extern struct ip_vs_protocol ip_vs_protocol_ah; 1317 1363 extern struct ip_vs_protocol ip_vs_protocol_sctp; 1318 1364 1319 - /* 1320 - * Registering/unregistering scheduler functions 1321 - * (from ip_vs_sched.c) 1365 + /* Registering/unregistering scheduler functions 1366 + * (from ip_vs_sched.c) 1322 1367 */ 1323 1368 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1324 1369 int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); ··· 1336 1383 1337 1384 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1338 1385 1339 - 1340 - /* 1341 - * IPVS control data and functions (from ip_vs_ctl.c) 1342 - */ 1386 + /* IPVS control data and functions (from ip_vs_ctl.c) */ 1343 1387 extern struct ip_vs_stats ip_vs_stats; 1344 1388 extern int sysctl_ip_vs_sync_ver; 1345 1389 ··· 1377 1427 kfree(dest); 1378 1428 } 1379 1429 1380 - /* 1381 - * IPVS sync daemon data and function prototypes 1382 - * (from ip_vs_sync.c) 1430 + /* IPVS sync daemon data and function prototypes 1431 + * (from ip_vs_sync.c) 1383 1432 */ 1384 1433 int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); 1385 1434 int stop_sync_thread(struct net *net, int state); 1386 1435 void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); 1387 1436 1388 - /* 1389 - * IPVS rate estimator prototypes (from ip_vs_est.c) 1390 - */ 1437 + /* IPVS rate estimator prototypes (from ip_vs_est.c) */ 1391 1438 void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); 1392 1439 void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); 1393 1440 void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1394 1441 void ip_vs_read_estimator(struct ip_vs_stats_user *dst, 1395 1442 struct ip_vs_stats *stats); 1396 1443 1397 - /* 1398 - * Various IPVS packet transmitters (from ip_vs_xmit.c) 1399 - */ 1444 + /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ 1400 1445 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1401 1446 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1402 1447 int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ··· 1422 1477 #endif 1423 1478 1424 1479 #ifdef CONFIG_SYSCTL 1425 - /* 1426 - * This is a simple mechanism to ignore packets when 1427 - * we are loaded. Just set ip_vs_drop_rate to 'n' and 1428 - * we start to drop 1/rate of the packets 1480 + /* This is a simple mechanism to ignore packets when 1481 + * we are loaded. Just set ip_vs_drop_rate to 'n' and 1482 + * we start to drop 1/rate of the packets 1429 1483 */ 1430 - 1431 1484 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) 1432 1485 { 1433 1486 if (!ipvs->drop_rate) ··· 1439 1496 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } 1440 1497 #endif 1441 1498 1442 - /* 1443 - * ip_vs_fwd_tag returns the forwarding tag of the connection 1444 - */ 1499 + /* ip_vs_fwd_tag returns the forwarding tag of the connection */ 1445 1500 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) 1446 1501 1447 1502 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) ··· 1498 1557 return csum_partial(diff, sizeof(diff), oldsum); 1499 1558 } 1500 1559 1501 - /* 1502 - * Forget current conntrack (unconfirmed) and attach notrack entry 1503 - */ 1560 + /* Forget current conntrack (unconfirmed) and attach notrack entry */ 1504 1561 static inline void ip_vs_notrack(struct sk_buff *skb) 1505 1562 { 1506 1563 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) ··· 1515 1576 } 1516 1577 1517 1578 #ifdef CONFIG_IP_VS_NFCT 1518 - /* 1519 - * Netfilter connection tracking 1520 - * (from ip_vs_nfct.c) 1579 + /* Netfilter connection tracking 1580 + * (from ip_vs_nfct.c) 1521 1581 */ 1522 1582 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1523 1583 { ··· 1555 1617 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) 1556 1618 { 1557 1619 } 1558 - /* CONFIG_IP_VS_NFCT */ 1559 - #endif 1620 + #endif /* CONFIG_IP_VS_NFCT */ 1560 1621 1561 1622 static inline int 1562 1623 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1563 1624 { 1564 - /* 1565 - * We think the overhead of processing active connections is 256 1625 + /* We think the overhead of processing active connections is 256 1566 1626 * times higher than that of inactive connections in average. (This 1567 1627 * 256 times might not be accurate, we will change it later) We 1568 1628 * use the following formula to estimate the overhead now:
+6
include/net/netfilter/br_netfilter.h
··· 1 + #ifndef _BR_NETFILTER_H_ 2 + #define _BR_NETFILTER_H_ 3 + 4 + void br_netfilter_enable(void); 5 + 6 + #endif /* _BR_NETFILTER_H_ */
+2 -117
include/net/netfilter/ipv4/nf_reject.h
··· 1 1 #ifndef _IPV4_NF_REJECT_H 2 2 #define _IPV4_NF_REJECT_H 3 3 4 - #include <net/ip.h> 5 - #include <net/tcp.h> 6 - #include <net/route.h> 7 - #include <net/dst.h> 4 + #include <net/icmp.h> 8 5 9 6 static inline void nf_send_unreach(struct sk_buff *skb_in, int code) 10 7 { 11 8 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 12 9 } 13 10 14 - /* Send RST reply */ 15 - static void nf_send_reset(struct sk_buff *oldskb, int hook) 16 - { 17 - struct sk_buff *nskb; 18 - const struct iphdr *oiph; 19 - struct iphdr *niph; 20 - const struct tcphdr *oth; 21 - struct tcphdr _otcph, *tcph; 22 - 23 - /* IP header checks: fragment. */ 24 - if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 25 - return; 26 - 27 - oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), 28 - sizeof(_otcph), &_otcph); 29 - if (oth == NULL) 30 - return; 31 - 32 - /* No RST for RST. */ 33 - if (oth->rst) 34 - return; 35 - 36 - if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 37 - return; 38 - 39 - /* Check checksum */ 40 - if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 41 - return; 42 - oiph = ip_hdr(oldskb); 43 - 44 - nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 45 - LL_MAX_HEADER, GFP_ATOMIC); 46 - if (!nskb) 47 - return; 48 - 49 - skb_reserve(nskb, LL_MAX_HEADER); 50 - 51 - skb_reset_network_header(nskb); 52 - niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); 53 - niph->version = 4; 54 - niph->ihl = sizeof(struct iphdr) / 4; 55 - niph->tos = 0; 56 - niph->id = 0; 57 - niph->frag_off = htons(IP_DF); 58 - niph->protocol = IPPROTO_TCP; 59 - niph->check = 0; 60 - niph->saddr = oiph->daddr; 61 - niph->daddr = oiph->saddr; 62 - 63 - skb_reset_transport_header(nskb); 64 - tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 65 - memset(tcph, 0, sizeof(*tcph)); 66 - tcph->source = oth->dest; 67 - tcph->dest = oth->source; 68 - tcph->doff = sizeof(struct tcphdr) / 4; 69 - 70 - if (oth->ack) 71 - tcph->seq = oth->ack_seq; 72 - else { 73 - tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + 74 - oldskb->len - ip_hdrlen(oldskb) - 75 - (oth->doff << 2)); 76 - tcph->ack = 1; 77 - } 78 - 79 - tcph->rst = 1; 80 - tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, 81 - niph->daddr, 0); 82 - nskb->ip_summed = CHECKSUM_PARTIAL; 83 - nskb->csum_start = (unsigned char *)tcph - nskb->head; 84 - nskb->csum_offset = offsetof(struct tcphdr, check); 85 - 86 - /* ip_route_me_harder expects skb->dst to be set */ 87 - skb_dst_set_noref(nskb, skb_dst(oldskb)); 88 - 89 - nskb->protocol = htons(ETH_P_IP); 90 - if (ip_route_me_harder(nskb, RTN_UNSPEC)) 91 - goto free_nskb; 92 - 93 - niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); 94 - 95 - /* "Never happens" */ 96 - if (nskb->len > dst_mtu(skb_dst(nskb))) 97 - goto free_nskb; 98 - 99 - nf_ct_attach(nskb, oldskb); 100 - 101 - #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 102 - /* If we use ip_local_out for bridged traffic, the MAC source on 103 - * the RST will be ours, instead of the destination's. This confuses 104 - * some routers/firewalls, and they drop the packet. So we need to 105 - * build the eth header using the original destination's MAC as the 106 - * source, and send the RST packet directly. 107 - */ 108 - if (oldskb->nf_bridge) { 109 - struct ethhdr *oeth = eth_hdr(oldskb); 110 - nskb->dev = oldskb->nf_bridge->physindev; 111 - niph->tot_len = htons(nskb->len); 112 - ip_send_check(niph); 113 - if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), 114 - oeth->h_source, oeth->h_dest, nskb->len) < 0) 115 - goto free_nskb; 116 - dev_queue_xmit(nskb); 117 - } else 118 - #endif 119 - ip_local_out(nskb); 120 - 121 - return; 122 - 123 - free_nskb: 124 - kfree_skb(nskb); 125 - } 126 - 11 + void nf_send_reset(struct sk_buff *oldskb, int hook); 127 12 128 13 #endif /* _IPV4_NF_REJECT_H */
+2 -7
include/net/netfilter/nft_reject.h
··· 14 14 15 15 int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr); 16 16 17 - void nft_reject_ipv4_eval(const struct nft_expr *expr, 18 - struct nft_data data[NFT_REG_MAX + 1], 19 - const struct nft_pktinfo *pkt); 20 - 21 - void nft_reject_ipv6_eval(const struct nft_expr *expr, 22 - struct nft_data data[NFT_REG_MAX + 1], 23 - const struct nft_pktinfo *pkt); 17 + int nft_reject_icmp_code(u8 code); 18 + int nft_reject_icmpv6_code(u8 code); 24 19 25 20 #endif
+21
include/uapi/linux/netfilter/nf_tables.h
··· 749 749 * 750 750 * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable 751 751 * @NFT_REJECT_TCP_RST: reject using TCP RST 752 + * @NFT_REJECT_ICMPX_UNREACH: abstracted ICMP unreachable for bridge and inet 752 753 */ 753 754 enum nft_reject_types { 754 755 NFT_REJECT_ICMP_UNREACH, 755 756 NFT_REJECT_TCP_RST, 757 + NFT_REJECT_ICMPX_UNREACH, 756 758 }; 759 + 760 + /** 761 + * enum nft_reject_code - Generic reject codes for IPv4/IPv6 762 + * 763 + * @NFT_REJECT_ICMPX_NO_ROUTE: no route to host / network unreachable 764 + * @NFT_REJECT_ICMPX_PORT_UNREACH: port unreachable 765 + * @NFT_REJECT_ICMPX_HOST_UNREACH: host unreachable 766 + * @NFT_REJECT_ICMPX_ADMIN_PROHIBITED: administratively prohibited 767 + * 768 + * These codes are mapped to real ICMP and ICMPv6 codes. 769 + */ 770 + enum nft_reject_inet_code { 771 + NFT_REJECT_ICMPX_NO_ROUTE = 0, 772 + NFT_REJECT_ICMPX_PORT_UNREACH, 773 + NFT_REJECT_ICMPX_HOST_UNREACH, 774 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED, 775 + __NFT_REJECT_ICMPX_MAX 776 + }; 777 + #define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX + 1) 757 778 758 779 /** 759 780 * enum nft_reject_attributes - nf_tables reject expression netlink attributes
+5
net/bridge/br_netfilter.c
··· 856 856 return NF_ACCEPT; 857 857 } 858 858 859 + void br_netfilter_enable(void) 860 + { 861 + } 862 + EXPORT_SYMBOL_GPL(br_netfilter_enable); 863 + 859 864 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because 860 865 * br_dev_queue_push_xmit is called afterwards */ 861 866 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
+2
net/bridge/netfilter/nf_tables_bridge.c
··· 34 34 .owner = THIS_MODULE, 35 35 .nops = 1, 36 36 .hooks = { 37 + [NF_BR_PRE_ROUTING] = nft_do_chain_bridge, 37 38 [NF_BR_LOCAL_IN] = nft_do_chain_bridge, 38 39 [NF_BR_FORWARD] = nft_do_chain_bridge, 39 40 [NF_BR_LOCAL_OUT] = nft_do_chain_bridge, 41 + [NF_BR_POST_ROUTING] = nft_do_chain_bridge, 40 42 }, 41 43 }; 42 44
+90 -5
net/bridge/netfilter/nft_reject_bridge.c
··· 14 14 #include <linux/netfilter/nf_tables.h> 15 15 #include <net/netfilter/nf_tables.h> 16 16 #include <net/netfilter/nft_reject.h> 17 + #include <net/netfilter/ipv4/nf_reject.h> 18 + #include <net/netfilter/ipv6/nf_reject.h> 17 19 18 20 static void nft_reject_bridge_eval(const struct nft_expr *expr, 19 21 struct nft_data data[NFT_REG_MAX + 1], 20 22 const struct nft_pktinfo *pkt) 21 23 { 24 + struct nft_reject *priv = nft_expr_priv(expr); 25 + struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); 26 + 22 27 switch (eth_hdr(pkt->skb)->h_proto) { 23 28 case htons(ETH_P_IP): 24 - return nft_reject_ipv4_eval(expr, data, pkt); 29 + switch (priv->type) { 30 + case NFT_REJECT_ICMP_UNREACH: 31 + nf_send_unreach(pkt->skb, priv->icmp_code); 32 + break; 33 + case NFT_REJECT_TCP_RST: 34 + nf_send_reset(pkt->skb, pkt->ops->hooknum); 35 + break; 36 + case NFT_REJECT_ICMPX_UNREACH: 37 + nf_send_unreach(pkt->skb, 38 + nft_reject_icmp_code(priv->icmp_code)); 39 + break; 40 + } 41 + break; 25 42 case htons(ETH_P_IPV6): 26 - return nft_reject_ipv6_eval(expr, data, pkt); 43 + switch (priv->type) { 44 + case NFT_REJECT_ICMP_UNREACH: 45 + nf_send_unreach6(net, pkt->skb, priv->icmp_code, 46 + pkt->ops->hooknum); 47 + break; 48 + case NFT_REJECT_TCP_RST: 49 + nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); 50 + break; 51 + case NFT_REJECT_ICMPX_UNREACH: 52 + nf_send_unreach6(net, pkt->skb, 53 + nft_reject_icmpv6_code(priv->icmp_code), 54 + pkt->ops->hooknum); 55 + break; 56 + } 57 + break; 27 58 default: 28 59 /* No explicit way to reject this protocol, drop it. */ 29 - data[NFT_REG_VERDICT].verdict = NF_DROP; 30 60 break; 31 61 } 62 + data[NFT_REG_VERDICT].verdict = NF_DROP; 63 + } 64 + 65 + static int nft_reject_bridge_init(const struct nft_ctx *ctx, 66 + const struct nft_expr *expr, 67 + const struct nlattr * const tb[]) 68 + { 69 + struct nft_reject *priv = nft_expr_priv(expr); 70 + int icmp_code; 71 + 72 + if (tb[NFTA_REJECT_TYPE] == NULL) 73 + return -EINVAL; 74 + 75 + priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); 76 + switch (priv->type) { 77 + case NFT_REJECT_ICMP_UNREACH: 78 + case NFT_REJECT_ICMPX_UNREACH: 79 + if (tb[NFTA_REJECT_ICMP_CODE] == NULL) 80 + return -EINVAL; 81 + 82 + icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); 83 + if (priv->type == NFT_REJECT_ICMPX_UNREACH && 84 + icmp_code > NFT_REJECT_ICMPX_MAX) 85 + return -EINVAL; 86 + 87 + priv->icmp_code = icmp_code; 88 + break; 89 + case NFT_REJECT_TCP_RST: 90 + break; 91 + default: 92 + return -EINVAL; 93 + } 94 + return 0; 95 + } 96 + 97 + static int nft_reject_bridge_dump(struct sk_buff *skb, 98 + const struct nft_expr *expr) 99 + { 100 + const struct nft_reject *priv = nft_expr_priv(expr); 101 + 102 + if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) 103 + goto nla_put_failure; 104 + 105 + switch (priv->type) { 106 + case NFT_REJECT_ICMP_UNREACH: 107 + case NFT_REJECT_ICMPX_UNREACH: 108 + if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) 109 + goto nla_put_failure; 110 + break; 111 + } 112 + 113 + return 0; 114 + 115 + nla_put_failure: 116 + return -1; 32 117 } 33 118 34 119 static struct nft_expr_type nft_reject_bridge_type; ··· 121 36 .type = &nft_reject_bridge_type, 122 37 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), 123 38 .eval = nft_reject_bridge_eval, 124 - .init = nft_reject_init, 125 - .dump = nft_reject_dump, 39 + .init = nft_reject_bridge_init, 40 + .dump = nft_reject_bridge_dump, 126 41 }; 127 42 128 43 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
+1 -1
net/core/skbuff.c
··· 572 572 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 573 573 nf_conntrack_put(skb->nfct); 574 574 #endif 575 - #ifdef CONFIG_BRIDGE_NETFILTER 575 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 576 576 nf_bridge_put(skb->nf_bridge); 577 577 #endif 578 578 /* XXX: IS this still necessary? - JHS */
+1 -1
net/ipv4/ip_output.c
··· 516 516 517 517 hlen = iph->ihl * 4; 518 518 mtu = mtu - hlen; /* Size of data space */ 519 - #ifdef CONFIG_BRIDGE_NETFILTER 519 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 520 520 if (skb->nf_bridge) 521 521 mtu -= nf_bridge_mtu_reduction(skb); 522 522 #endif
+6
net/ipv4/netfilter/Kconfig
··· 61 61 fields such as the source, destination, type of service and 62 62 the packet mark. 63 63 64 + config NF_REJECT_IPV4 65 + tristate "IPv4 packet rejection" 66 + default m if NETFILTER_ADVANCED=n 67 + 64 68 config NFT_REJECT_IPV4 65 69 depends on NF_TABLES_IPV4 70 + select NF_REJECT_IPV4 66 71 default NFT_REJECT 67 72 tristate 68 73 ··· 213 208 config IP_NF_TARGET_REJECT 214 209 tristate "REJECT target support" 215 210 depends on IP_NF_FILTER 211 + select NF_REJECT_IPV4 216 212 default m if NETFILTER_ADVANCED=n 217 213 help 218 214 The REJECT target allows a filtering rule to specify that an ICMP
+3
net/ipv4/netfilter/Makefile
··· 23 23 obj-$(CONFIG_NF_LOG_ARP) += nf_log_arp.o 24 24 obj-$(CONFIG_NF_LOG_IPV4) += nf_log_ipv4.o 25 25 26 + # reject 27 + obj-$(CONFIG_NF_REJECT_IPV4) += nf_reject_ipv4.o 28 + 26 29 # NAT helpers (nf_conntrack) 27 30 obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o 28 31 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
+1 -1
net/ipv4/netfilter/ipt_REJECT.c
··· 20 20 #include <linux/netfilter/x_tables.h> 21 21 #include <linux/netfilter_ipv4/ip_tables.h> 22 22 #include <linux/netfilter_ipv4/ipt_REJECT.h> 23 - #ifdef CONFIG_BRIDGE_NETFILTER 23 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 24 24 #include <linux/netfilter_bridge.h> 25 25 #endif 26 26
+1 -1
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 50 50 zone = nf_ct_zone((struct nf_conn *)skb->nfct); 51 51 #endif 52 52 53 - #ifdef CONFIG_BRIDGE_NETFILTER 53 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 54 54 if (skb->nf_bridge && 55 55 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 56 56 return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+127
net/ipv4/netfilter/nf_reject_ipv4.c
··· 1 + /* (C) 1999-2001 Paul `Rusty' Russell 2 + * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #include <net/ip.h> 10 + #include <net/tcp.h> 11 + #include <net/route.h> 12 + #include <net/dst.h> 13 + #include <linux/netfilter_ipv4.h> 14 + 15 + /* Send RST reply */ 16 + void nf_send_reset(struct sk_buff *oldskb, int hook) 17 + { 18 + struct sk_buff *nskb; 19 + const struct iphdr *oiph; 20 + struct iphdr *niph; 21 + const struct tcphdr *oth; 22 + struct tcphdr _otcph, *tcph; 23 + 24 + /* IP header checks: fragment. */ 25 + if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 26 + return; 27 + 28 + oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), 29 + sizeof(_otcph), &_otcph); 30 + if (oth == NULL) 31 + return; 32 + 33 + /* No RST for RST. */ 34 + if (oth->rst) 35 + return; 36 + 37 + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 38 + return; 39 + 40 + /* Check checksum */ 41 + if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 42 + return; 43 + oiph = ip_hdr(oldskb); 44 + 45 + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 46 + LL_MAX_HEADER, GFP_ATOMIC); 47 + if (!nskb) 48 + return; 49 + 50 + skb_reserve(nskb, LL_MAX_HEADER); 51 + 52 + skb_reset_network_header(nskb); 53 + niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); 54 + niph->version = 4; 55 + niph->ihl = sizeof(struct iphdr) / 4; 56 + niph->tos = 0; 57 + niph->id = 0; 58 + niph->frag_off = htons(IP_DF); 59 + niph->protocol = IPPROTO_TCP; 60 + niph->check = 0; 61 + niph->saddr = oiph->daddr; 62 + niph->daddr = oiph->saddr; 63 + 64 + skb_reset_transport_header(nskb); 65 + tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 66 + memset(tcph, 0, sizeof(*tcph)); 67 + tcph->source = oth->dest; 68 + tcph->dest = oth->source; 69 + tcph->doff = sizeof(struct tcphdr) / 4; 70 + 71 + if (oth->ack) 72 + tcph->seq = oth->ack_seq; 73 + else { 74 + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + 75 + oldskb->len - ip_hdrlen(oldskb) - 76 + (oth->doff << 2)); 77 + tcph->ack = 1; 78 + } 79 + 80 + tcph->rst = 1; 81 + tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, 82 + niph->daddr, 0); 83 + nskb->ip_summed = CHECKSUM_PARTIAL; 84 + nskb->csum_start = (unsigned char *)tcph - nskb->head; 85 + nskb->csum_offset = offsetof(struct tcphdr, check); 86 + 87 + /* ip_route_me_harder expects skb->dst to be set */ 88 + skb_dst_set_noref(nskb, skb_dst(oldskb)); 89 + 90 + nskb->protocol = htons(ETH_P_IP); 91 + if (ip_route_me_harder(nskb, RTN_UNSPEC)) 92 + goto free_nskb; 93 + 94 + niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); 95 + 96 + /* "Never happens" */ 97 + if (nskb->len > dst_mtu(skb_dst(nskb))) 98 + goto free_nskb; 99 + 100 + nf_ct_attach(nskb, oldskb); 101 + 102 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 103 + /* If we use ip_local_out for bridged traffic, the MAC source on 104 + * the RST will be ours, instead of the destination's. This confuses 105 + * some routers/firewalls, and they drop the packet. So we need to 106 + * build the eth header using the original destination's MAC as the 107 + * source, and send the RST packet directly. 108 + */ 109 + if (oldskb->nf_bridge) { 110 + struct ethhdr *oeth = eth_hdr(oldskb); 111 + nskb->dev = oldskb->nf_bridge->physindev; 112 + niph->tot_len = htons(nskb->len); 113 + ip_send_check(niph); 114 + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), 115 + oeth->h_source, oeth->h_dest, nskb->len) < 0) 116 + goto free_nskb; 117 + dev_queue_xmit(nskb); 118 + } else 119 + #endif 120 + ip_local_out(nskb); 121 + 122 + return; 123 + 124 + free_nskb: 125 + kfree_skb(nskb); 126 + } 127 + EXPORT_SYMBOL_GPL(nf_send_reset);
+11 -23
net/ipv4/netfilter/nft_masq_ipv4.c
··· 32 32 data[NFT_REG_VERDICT].verdict = verdict; 33 33 } 34 34 35 - static int nft_masq_ipv4_init(const struct nft_ctx *ctx, 36 - const struct nft_expr *expr, 37 - const struct nlattr * const tb[]) 38 - { 39 - int err; 40 - 41 - err = nft_masq_init(ctx, expr, tb); 42 - if (err < 0) 43 - return err; 44 - 45 - nf_nat_masquerade_ipv4_register_notifier(); 46 - return 0; 47 - } 48 - 49 - static void nft_masq_ipv4_destroy(const struct nft_ctx *ctx, 50 - const struct nft_expr *expr) 51 - { 52 - nf_nat_masquerade_ipv4_unregister_notifier(); 53 - } 54 - 55 35 static struct nft_expr_type nft_masq_ipv4_type; 56 36 static const struct nft_expr_ops nft_masq_ipv4_ops = { 57 37 .type = &nft_masq_ipv4_type, 58 38 .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), 59 39 .eval = nft_masq_ipv4_eval, 60 - .init = nft_masq_ipv4_init, 61 - .destroy = nft_masq_ipv4_destroy, 40 + .init = nft_masq_init, 62 41 .dump = nft_masq_dump, 63 42 }; 64 43 ··· 52 73 53 74 static int __init nft_masq_ipv4_module_init(void) 54 75 { 55 - return nft_register_expr(&nft_masq_ipv4_type); 76 + int ret; 77 + 78 + ret = nft_register_expr(&nft_masq_ipv4_type); 79 + if (ret < 0) 80 + return ret; 81 + 82 + nf_nat_masquerade_ipv4_register_notifier(); 83 + 84 + return ret; 56 85 } 57 86 58 87 static void __exit nft_masq_ipv4_module_exit(void) 59 88 { 60 89 nft_unregister_expr(&nft_masq_ipv4_type); 90 + nf_nat_masquerade_ipv4_unregister_notifier(); 61 91 } 62 92 63 93 module_init(nft_masq_ipv4_module_init);
-1
net/ipv4/netfilter/nft_reject_ipv4.c
··· 16 16 #include <linux/netfilter.h> 17 17 #include <linux/netfilter/nf_tables.h> 18 18 #include <net/netfilter/nf_tables.h> 19 - #include <net/icmp.h> 20 19 #include <net/netfilter/ipv4/nf_reject.h> 21 20 #include <net/netfilter/nft_reject.h> 22 21
+6
net/ipv6/netfilter/Kconfig
··· 40 40 fields such as the source, destination, flowlabel, hop-limit and 41 41 the packet mark. 42 42 43 + config NF_REJECT_IPV6 44 + tristate "IPv6 packet rejection" 45 + default m if NETFILTER_ADVANCED=n 46 + 43 47 config NFT_REJECT_IPV6 44 48 depends on NF_TABLES_IPV6 49 + select NF_REJECT_IPV6 45 50 default NFT_REJECT 46 51 tristate 47 52 ··· 213 208 config IP6_NF_TARGET_REJECT 214 209 tristate "REJECT target support" 215 210 depends on IP6_NF_FILTER 211 + select NF_REJECT_IPV6 216 212 default m if NETFILTER_ADVANCED=n 217 213 help 218 214 The REJECT target allows a filtering rule to specify that an ICMPv6
+3
net/ipv6/netfilter/Makefile
··· 27 27 # logging 28 28 obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o 29 29 30 + # reject 31 + obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o 32 + 30 33 # nf_tables 31 34 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o 32 35 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
+1 -1
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
··· 40 40 zone = nf_ct_zone((struct nf_conn *)skb->nfct); 41 41 #endif 42 42 43 - #ifdef CONFIG_BRIDGE_NETFILTER 43 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 44 44 if (skb->nf_bridge && 45 45 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 46 46 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+163
net/ipv6/netfilter/nf_reject_ipv6.c
··· 1 + /* (C) 1999-2001 Paul `Rusty' Russell 2 + * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + #include <net/ipv6.h> 9 + #include <net/ip6_route.h> 10 + #include <net/ip6_fib.h> 11 + #include <net/ip6_checksum.h> 12 + #include <linux/netfilter_ipv6.h> 13 + 14 + void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) 15 + { 16 + struct sk_buff *nskb; 17 + struct tcphdr otcph, *tcph; 18 + unsigned int otcplen, hh_len; 19 + int tcphoff, needs_ack; 20 + const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); 21 + struct ipv6hdr *ip6h; 22 + #define DEFAULT_TOS_VALUE 0x0U 23 + const __u8 tclass = DEFAULT_TOS_VALUE; 24 + struct dst_entry *dst = NULL; 25 + u8 proto; 26 + __be16 frag_off; 27 + struct flowi6 fl6; 28 + 29 + if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || 30 + (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { 31 + pr_debug("addr is not unicast.\n"); 32 + return; 33 + } 34 + 35 + proto = oip6h->nexthdr; 36 + tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); 37 + 38 + if ((tcphoff < 0) || (tcphoff > oldskb->len)) { 39 + pr_debug("Cannot get TCP header.\n"); 40 + return; 41 + } 42 + 43 + otcplen = oldskb->len - tcphoff; 44 + 45 + /* IP header checks: fragment, too short. */ 46 + if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { 47 + pr_debug("proto(%d) != IPPROTO_TCP, " 48 + "or too short. otcplen = %d\n", 49 + proto, otcplen); 50 + return; 51 + } 52 + 53 + if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) 54 + BUG(); 55 + 56 + /* No RST for RST. */ 57 + if (otcph.rst) { 58 + pr_debug("RST is set\n"); 59 + return; 60 + } 61 + 62 + /* Check checksum. */ 63 + if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { 64 + pr_debug("TCP checksum is invalid\n"); 65 + return; 66 + } 67 + 68 + memset(&fl6, 0, sizeof(fl6)); 69 + fl6.flowi6_proto = IPPROTO_TCP; 70 + fl6.saddr = oip6h->daddr; 71 + fl6.daddr = oip6h->saddr; 72 + fl6.fl6_sport = otcph.dest; 73 + fl6.fl6_dport = otcph.source; 74 + security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 75 + dst = ip6_route_output(net, NULL, &fl6); 76 + if (dst == NULL || dst->error) { 77 + dst_release(dst); 78 + return; 79 + } 80 + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); 81 + if (IS_ERR(dst)) 82 + return; 83 + 84 + hh_len = (dst->dev->hard_header_len + 15)&~15; 85 + nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) 86 + + sizeof(struct tcphdr) + dst->trailer_len, 87 + GFP_ATOMIC); 88 + 89 + if (!nskb) { 90 + net_dbg_ratelimited("cannot alloc skb\n"); 91 + dst_release(dst); 92 + return; 93 + } 94 + 95 + skb_dst_set(nskb, dst); 96 + 97 + skb_reserve(nskb, hh_len + dst->header_len); 98 + 99 + skb_put(nskb, sizeof(struct ipv6hdr)); 100 + skb_reset_network_header(nskb); 101 + ip6h = ipv6_hdr(nskb); 102 + ip6_flow_hdr(ip6h, tclass, 0); 103 + ip6h->hop_limit = ip6_dst_hoplimit(dst); 104 + ip6h->nexthdr = IPPROTO_TCP; 105 + ip6h->saddr = oip6h->daddr; 106 + ip6h->daddr = oip6h->saddr; 107 + 108 + skb_reset_transport_header(nskb); 109 + tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 110 + /* Truncate to length (no data) */ 111 + tcph->doff = sizeof(struct tcphdr)/4; 112 + tcph->source = otcph.dest; 113 + tcph->dest = otcph.source; 114 + 115 + if (otcph.ack) { 116 + needs_ack = 0; 117 + tcph->seq = otcph.ack_seq; 118 + tcph->ack_seq = 0; 119 + } else { 120 + needs_ack = 1; 121 + tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin 122 + + otcplen - (otcph.doff<<2)); 123 + tcph->seq = 0; 124 + } 125 + 126 + /* Reset flags */ 127 + ((u_int8_t *)tcph)[13] = 0; 128 + tcph->rst = 1; 129 + tcph->ack = needs_ack; 130 + tcph->window = 0; 131 + tcph->urg_ptr = 0; 132 + tcph->check = 0; 133 + 134 + /* Adjust TCP checksum */ 135 + tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, 136 + &ipv6_hdr(nskb)->daddr, 137 + sizeof(struct tcphdr), IPPROTO_TCP, 138 + csum_partial(tcph, 139 + sizeof(struct tcphdr), 0)); 140 + 141 + nf_ct_attach(nskb, oldskb); 142 + 143 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 144 + /* If we use ip6_local_out for bridged traffic, the MAC source on 145 + * the RST will be ours, instead of the destination's. This confuses 146 + * some routers/firewalls, and they drop the packet. So we need to 147 + * build the eth header using the original destination's MAC as the 148 + * source, and send the RST packet directly. 149 + */ 150 + if (oldskb->nf_bridge) { 151 + struct ethhdr *oeth = eth_hdr(oldskb); 152 + nskb->dev = oldskb->nf_bridge->physindev; 153 + nskb->protocol = htons(ETH_P_IPV6); 154 + ip6h->payload_len = htons(sizeof(struct tcphdr)); 155 + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), 156 + oeth->h_source, oeth->h_dest, nskb->len) < 0) 157 + return; 158 + dev_queue_xmit(nskb); 159 + } else 160 + #endif 161 + ip6_local_out(nskb); 162 + } 163 + EXPORT_SYMBOL_GPL(nf_send_reset6);
+11 -23
net/ipv6/netfilter/nft_masq_ipv6.c
··· 32 32 data[NFT_REG_VERDICT].verdict = verdict; 33 33 } 34 34 35 - static int nft_masq_ipv6_init(const struct nft_ctx *ctx, 36 - const struct nft_expr *expr, 37 - const struct nlattr * const tb[]) 38 - { 39 - int err; 40 - 41 - err = nft_masq_init(ctx, expr, tb); 42 - if (err < 0) 43 - return err; 44 - 45 - nf_nat_masquerade_ipv6_register_notifier(); 46 - return 0; 47 - } 48 - 49 - static void nft_masq_ipv6_destroy(const struct nft_ctx *ctx, 50 - const struct nft_expr *expr) 51 - { 52 - nf_nat_masquerade_ipv6_unregister_notifier(); 53 - } 54 - 55 35 static struct nft_expr_type nft_masq_ipv6_type; 56 36 static const struct nft_expr_ops nft_masq_ipv6_ops = { 57 37 .type = &nft_masq_ipv6_type, 58 38 .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), 59 39 .eval = nft_masq_ipv6_eval, 60 - .init = nft_masq_ipv6_init, 61 - .destroy = nft_masq_ipv6_destroy, 40 + .init = nft_masq_init, 62 41 .dump = nft_masq_dump, 63 42 }; 64 43 ··· 52 73 53 74 static int __init nft_masq_ipv6_module_init(void) 54 75 { 55 - return nft_register_expr(&nft_masq_ipv6_type); 76 + int ret; 77 + 78 + ret = nft_register_expr(&nft_masq_ipv6_type); 79 + if (ret < 0) 80 + return ret; 81 + 82 + nf_nat_masquerade_ipv6_register_notifier(); 83 + 84 + return ret; 56 85 } 57 86 58 87 static void __exit nft_masq_ipv6_module_exit(void) 59 88 { 60 89 nft_unregister_expr(&nft_masq_ipv6_type); 90 + nf_nat_masquerade_ipv6_unregister_notifier(); 61 91 } 62 92 63 93 module_init(nft_masq_ipv6_module_init);
+2 -2
net/netfilter/ipset/ip_set_hash_netiface.c
··· 237 237 #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) 238 238 239 239 if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { 240 - #ifdef CONFIG_BRIDGE_NETFILTER 240 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 241 241 const struct nf_bridge_info *nf_bridge = skb->nf_bridge; 242 242 243 243 if (!nf_bridge) ··· 474 474 ip6_netmask(&e.ip, e.cidr); 475 475 476 476 if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { 477 - #ifdef CONFIG_BRIDGE_NETFILTER 477 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 478 478 const struct nf_bridge_info *nf_bridge = skb->nf_bridge; 479 479 480 480 if (!nf_bridge)
+1 -1
net/netfilter/nf_log_common.c
··· 158 158 '0' + loginfo->u.log.level, prefix, 159 159 in ? in->name : "", 160 160 out ? out->name : ""); 161 - #ifdef CONFIG_BRIDGE_NETFILTER 161 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 162 162 if (skb->nf_bridge) { 163 163 const struct net_device *physindev; 164 164 const struct net_device *physoutdev;
+2 -2
net/netfilter/nf_queue.c
··· 52 52 dev_put(entry->indev); 53 53 if (entry->outdev) 54 54 dev_put(entry->outdev); 55 - #ifdef CONFIG_BRIDGE_NETFILTER 55 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 56 56 if (entry->skb->nf_bridge) { 57 57 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; 58 58 ··· 77 77 dev_hold(entry->indev); 78 78 if (entry->outdev) 79 79 dev_hold(entry->outdev); 80 - #ifdef CONFIG_BRIDGE_NETFILTER 80 + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 81 81 if (entry->skb->nf_bridge) { 82 82 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; 83 83 struct net_device *physdev;
+1
net/netfilter/nf_tables_api.c
··· 4163 4163 { 4164 4164 unregister_pernet_subsys(&nf_tables_net_ops); 4165 4165 nfnetlink_subsys_unregister(&nf_tables_subsys); 4166 + rcu_barrier(); 4166 4167 nf_tables_core_module_exit(); 4167 4168 kfree(info); 4168 4169 }
+14 -100
net/netfilter/nft_compat.c
··· 101 101 102 102 static void target_compat_from_user(struct xt_target *t, void *in, void *out) 103 103 { 104 - #ifdef CONFIG_COMPAT 105 - if (t->compat_from_user) { 106 - int pad; 104 + int pad; 107 105 108 - t->compat_from_user(out, in); 109 - pad = XT_ALIGN(t->targetsize) - t->targetsize; 110 - if (pad > 0) 111 - memset(out + t->targetsize, 0, pad); 112 - } else 113 - #endif 114 - memcpy(out, in, XT_ALIGN(t->targetsize)); 115 - } 116 - 117 - static inline int nft_compat_target_offset(struct xt_target *target) 118 - { 119 - #ifdef CONFIG_COMPAT 120 - return xt_compat_target_offset(target); 121 - #else 122 - return 0; 123 - #endif 106 + memcpy(out, in, t->targetsize); 107 + pad = XT_ALIGN(t->targetsize) - t->targetsize; 108 + if (pad > 0) 109 + memset(out + t->targetsize, 0, pad); 124 110 } 125 111 126 112 static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = { ··· 194 208 module_put(target->me); 195 209 } 196 210 197 - static int 198 - target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in) 199 - { 200 - int ret; 201 - 202 - #ifdef CONFIG_COMPAT 203 - if (t->compat_to_user) { 204 - mm_segment_t old_fs; 205 - void *out; 206 - 207 - out = kmalloc(XT_ALIGN(t->targetsize), GFP_ATOMIC); 208 - if (out == NULL) 209 - return -ENOMEM; 210 - 211 - /* We want to reuse existing compat_to_user */ 212 - old_fs = get_fs(); 213 - set_fs(KERNEL_DS); 214 - t->compat_to_user(out, in); 215 - set_fs(old_fs); 216 - ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out); 217 - kfree(out); 218 - } else 219 - #endif 220 - ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), in); 221 - 222 - return ret; 223 - } 224 - 225 211 static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) 226 212 { 227 213 const struct xt_target *target = expr->ops->data; ··· 201 243 202 244 if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) || 203 245 nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) || 204 - target_dump_info(skb, target, info)) 246 + nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info)) 205 247 goto nla_put_failure; 206 248 207 249 return 0; ··· 299 341 300 342 static void match_compat_from_user(struct xt_match *m, void *in, void *out) 301 343 { 302 - #ifdef CONFIG_COMPAT 303 - if (m->compat_from_user) { 304 - int pad; 344 + int pad; 305 345 306 - m->compat_from_user(out, in); 307 - pad = XT_ALIGN(m->matchsize) - m->matchsize; 308 - if (pad > 0) 309 - memset(out + m->matchsize, 0, pad); 310 - } else 311 - #endif 312 - memcpy(out, in, XT_ALIGN(m->matchsize)); 346 + memcpy(out, in, m->matchsize); 347 + pad = XT_ALIGN(m->matchsize) - m->matchsize; 348 + if (pad > 0) 349 + memset(out + m->matchsize, 0, pad); 313 350 } 314 351 315 352 static int ··· 357 404 module_put(match->me); 358 405 } 359 406 360 - static int 361 - match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in) 362 - { 363 - int ret; 364 - 365 - #ifdef CONFIG_COMPAT 366 - if (m->compat_to_user) { 367 - mm_segment_t old_fs; 368 - void *out; 369 - 370 - out = kmalloc(XT_ALIGN(m->matchsize), GFP_ATOMIC); 371 - if (out == NULL) 372 - return -ENOMEM; 373 - 374 - /* We want to reuse existing compat_to_user */ 375 - old_fs = get_fs(); 376 - set_fs(KERNEL_DS); 377 - m->compat_to_user(out, in); 378 - set_fs(old_fs); 379 - ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out); 380 - kfree(out); 381 - } else 382 - #endif 383 - ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), in); 384 - 385 - return ret; 386 - } 387 - 388 - static inline int nft_compat_match_offset(struct xt_match *match) 389 - { 390 - #ifdef CONFIG_COMPAT 391 - return xt_compat_match_offset(match); 392 - #else 393 - return 0; 394 - #endif 395 - } 396 - 397 407 static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr) 398 408 { 399 409 void *info = nft_expr_priv(expr); ··· 364 448 365 449 if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || 366 450 nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) || 367 - match_dump_info(skb, match, info)) 451 + nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(match->matchsize), info)) 368 452 goto nla_put_failure; 369 453 370 454 return 0; ··· 559 643 return ERR_PTR(-ENOMEM); 560 644 561 645 nft_match->ops.type = &nft_match_type; 562 - nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize) + 563 - nft_compat_match_offset(match)); 646 + nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize)); 564 647 nft_match->ops.eval = nft_match_eval; 565 648 nft_match->ops.init = nft_match_init; 566 649 nft_match->ops.destroy = nft_match_destroy; ··· 629 714 return ERR_PTR(-ENOMEM); 630 715 631 716 nft_target->ops.type = &nft_target_type; 632 - nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize) + 633 - nft_compat_target_offset(target)); 717 + nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 634 718 nft_target->ops.eval = nft_target_eval; 635 719 nft_target->ops.init = nft_target_init; 636 720 nft_target->ops.destroy = nft_target_destroy;
+37
net/netfilter/nft_reject.c
··· 17 17 #include <linux/netfilter/nf_tables.h> 18 18 #include <net/netfilter/nf_tables.h> 19 19 #include <net/netfilter/nft_reject.h> 20 + #include <linux/icmp.h> 21 + #include <linux/icmpv6.h> 20 22 21 23 const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = { 22 24 [NFTA_REJECT_TYPE] = { .type = NLA_U32 }, ··· 71 69 return -1; 72 70 } 73 71 EXPORT_SYMBOL_GPL(nft_reject_dump); 72 + 73 + static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = { 74 + [NFT_REJECT_ICMPX_NO_ROUTE] = ICMP_NET_UNREACH, 75 + [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMP_PORT_UNREACH, 76 + [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMP_HOST_UNREACH, 77 + [NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMP_PKT_FILTERED, 78 + }; 79 + 80 + int nft_reject_icmp_code(u8 code) 81 + { 82 + if (code > NFT_REJECT_ICMPX_MAX) 83 + return -EINVAL; 84 + 85 + return icmp_code_v4[code]; 86 + } 87 + 88 + EXPORT_SYMBOL_GPL(nft_reject_icmp_code); 89 + 90 + 91 + static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = { 92 + [NFT_REJECT_ICMPX_NO_ROUTE] = ICMPV6_NOROUTE, 93 + [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMPV6_PORT_UNREACH, 94 + [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMPV6_ADDR_UNREACH, 95 + [NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMPV6_ADM_PROHIBITED, 96 + }; 97 + 98 + int nft_reject_icmpv6_code(u8 code) 99 + { 100 + if (code > NFT_REJECT_ICMPX_MAX) 101 + return -EINVAL; 102 + 103 + return icmp_code_v6[code]; 104 + } 105 + 106 + EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code); 74 107 75 108 MODULE_LICENSE("GPL"); 76 109 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+90 -4
net/netfilter/nft_reject_inet.c
··· 14 14 #include <linux/netfilter/nf_tables.h> 15 15 #include <net/netfilter/nf_tables.h> 16 16 #include <net/netfilter/nft_reject.h> 17 + #include <net/netfilter/ipv4/nf_reject.h> 18 + #include <net/netfilter/ipv6/nf_reject.h> 17 19 18 20 static void nft_reject_inet_eval(const struct nft_expr *expr, 19 21 struct nft_data data[NFT_REG_MAX + 1], 20 22 const struct nft_pktinfo *pkt) 21 23 { 24 + struct nft_reject *priv = nft_expr_priv(expr); 25 + struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); 26 + 22 27 switch (pkt->ops->pf) { 23 28 case NFPROTO_IPV4: 24 - return nft_reject_ipv4_eval(expr, data, pkt); 29 + switch (priv->type) { 30 + case NFT_REJECT_ICMP_UNREACH: 31 + nf_send_unreach(pkt->skb, priv->icmp_code); 32 + break; 33 + case NFT_REJECT_TCP_RST: 34 + nf_send_reset(pkt->skb, pkt->ops->hooknum); 35 + break; 36 + case NFT_REJECT_ICMPX_UNREACH: 37 + nf_send_unreach(pkt->skb, 38 + nft_reject_icmp_code(priv->icmp_code)); 39 + break; 40 + } 41 + break; 25 42 case NFPROTO_IPV6: 26 - return nft_reject_ipv6_eval(expr, data, pkt); 43 + switch (priv->type) { 44 + case NFT_REJECT_ICMP_UNREACH: 45 + nf_send_unreach6(net, pkt->skb, priv->icmp_code, 46 + pkt->ops->hooknum); 47 + break; 48 + case NFT_REJECT_TCP_RST: 49 + nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); 50 + break; 51 + case NFT_REJECT_ICMPX_UNREACH: 52 + nf_send_unreach6(net, pkt->skb, 53 + nft_reject_icmpv6_code(priv->icmp_code), 54 + pkt->ops->hooknum); 55 + break; 56 + } 57 + break; 27 58 } 59 + data[NFT_REG_VERDICT].verdict = NF_DROP; 60 + } 61 + 62 + static int nft_reject_inet_init(const struct nft_ctx *ctx, 63 + const struct nft_expr *expr, 64 + const struct nlattr * const tb[]) 65 + { 66 + struct nft_reject *priv = nft_expr_priv(expr); 67 + int icmp_code; 68 + 69 + if (tb[NFTA_REJECT_TYPE] == NULL) 70 + return -EINVAL; 71 + 72 + priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); 73 + switch (priv->type) { 74 + case NFT_REJECT_ICMP_UNREACH: 75 + case NFT_REJECT_ICMPX_UNREACH: 76 + if (tb[NFTA_REJECT_ICMP_CODE] == NULL) 77 + return -EINVAL; 78 + 79 + icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); 80 + if (priv->type == NFT_REJECT_ICMPX_UNREACH && 81 + icmp_code > NFT_REJECT_ICMPX_MAX) 82 + return -EINVAL; 83 + 84 + priv->icmp_code = icmp_code; 85 + break; 86 + case NFT_REJECT_TCP_RST: 87 + break; 88 + default: 89 + return -EINVAL; 90 + } 91 + return 0; 92 + } 93 + 94 + static int nft_reject_inet_dump(struct sk_buff *skb, 95 + const struct nft_expr *expr) 96 + { 97 + const struct nft_reject *priv = nft_expr_priv(expr); 98 + 99 + if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) 100 + goto nla_put_failure; 101 + 102 + switch (priv->type) { 103 + case NFT_REJECT_ICMP_UNREACH: 104 + case NFT_REJECT_ICMPX_UNREACH: 105 + if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) 106 + goto nla_put_failure; 107 + break; 108 + } 109 + 110 + return 0; 111 + 112 + nla_put_failure: 113 + return -1; 28 114 } 29 115 30 116 static struct nft_expr_type nft_reject_inet_type; ··· 118 32 .type = &nft_reject_inet_type, 119 33 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), 120 34 .eval = nft_reject_inet_eval, 121 - .init = nft_reject_init, 122 - .dump = nft_reject_dump, 35 + .init = nft_reject_inet_init, 36 + .dump = nft_reject_inet_dump, 123 37 }; 124 38 125 39 static struct nft_expr_type nft_reject_inet_type __read_mostly = {
+3
net/netfilter/xt_physdev.c
··· 13 13 #include <linux/netfilter_bridge.h> 14 14 #include <linux/netfilter/xt_physdev.h> 15 15 #include <linux/netfilter/x_tables.h> 16 + #include <net/netfilter/br_netfilter.h> 16 17 17 18 MODULE_LICENSE("GPL"); 18 19 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); ··· 87 86 static int physdev_mt_check(const struct xt_mtchk_param *par) 88 87 { 89 88 const struct xt_physdev_info *info = par->matchinfo; 89 + 90 + br_netfilter_enable(); 90 91 91 92 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 92 93 info->bitmask & ~XT_PHYSDEV_OP_MASK)