Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'nf-next' of git://1984.lsi.us.es/net-next

+1168 -378
+3
include/linux/netfilter/Kbuild
··· 5 5 header-y += nf_conntrack_sctp.h 6 6 header-y += nf_conntrack_tcp.h 7 7 header-y += nf_conntrack_tuple_common.h 8 + header-y += nf_nat.h 8 9 header-y += nfnetlink.h 10 + header-y += nfnetlink_acct.h 9 11 header-y += nfnetlink_compat.h 10 12 header-y += nfnetlink_conntrack.h 11 13 header-y += nfnetlink_log.h ··· 23 21 header-y += xt_IDLETIMER.h 24 22 header-y += xt_LED.h 25 23 header-y += xt_MARK.h 24 + header-y += xt_nfacct.h 26 25 header-y += xt_NFLOG.h 27 26 header-y += xt_NFQUEUE.h 28 27 header-y += xt_RATEEST.h
+4
include/linux/netfilter/nf_conntrack_common.h
··· 83 83 /* Conntrack is a fake untracked entry */ 84 84 IPS_UNTRACKED_BIT = 12, 85 85 IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), 86 + 87 + /* Conntrack has a userspace helper. */ 88 + IPS_USERSPACE_HELPER_BIT = 13, 89 + IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT), 86 90 }; 87 91 88 92 /* Connection tracking event types */
+27
include/linux/netfilter/nf_conntrack_tuple_common.h
··· 7 7 IP_CT_DIR_MAX 8 8 }; 9 9 10 + /* The protocol-specific manipulable parts of the tuple: always in 11 + * network order 12 + */ 13 + union nf_conntrack_man_proto { 14 + /* Add other protocols here. */ 15 + __be16 all; 16 + 17 + struct { 18 + __be16 port; 19 + } tcp; 20 + struct { 21 + __be16 port; 22 + } udp; 23 + struct { 24 + __be16 id; 25 + } icmp; 26 + struct { 27 + __be16 port; 28 + } dccp; 29 + struct { 30 + __be16 port; 31 + } sctp; 32 + struct { 33 + __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ 34 + } gre; 35 + }; 36 + 10 37 #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) 11 38 12 39 #endif /* _NF_CONNTRACK_TUPLE_COMMON_H */
+25
include/linux/netfilter/nf_nat.h
··· 1 + #ifndef _NETFILTER_NF_NAT_H 2 + #define _NETFILTER_NF_NAT_H 3 + 4 + #include <linux/netfilter.h> 5 + #include <linux/netfilter/nf_conntrack_tuple_common.h> 6 + 7 + #define NF_NAT_RANGE_MAP_IPS 1 8 + #define NF_NAT_RANGE_PROTO_SPECIFIED 2 9 + #define NF_NAT_RANGE_PROTO_RANDOM 4 10 + #define NF_NAT_RANGE_PERSISTENT 8 11 + 12 + struct nf_nat_ipv4_range { 13 + unsigned int flags; 14 + __be32 min_ip; 15 + __be32 max_ip; 16 + union nf_conntrack_man_proto min; 17 + union nf_conntrack_man_proto max; 18 + }; 19 + 20 + struct nf_nat_ipv4_multi_range_compat { 21 + unsigned int rangesize; 22 + struct nf_nat_ipv4_range range[1]; 23 + }; 24 + 25 + #endif /* _NETFILTER_NF_NAT_H */
+2 -1
include/linux/netfilter/nfnetlink.h
··· 48 48 #define NFNL_SUBSYS_ULOG 4 49 49 #define NFNL_SUBSYS_OSF 5 50 50 #define NFNL_SUBSYS_IPSET 6 51 - #define NFNL_SUBSYS_COUNT 7 51 + #define NFNL_SUBSYS_ACCT 7 52 + #define NFNL_SUBSYS_COUNT 8 52 53 53 54 #ifdef __KERNEL__ 54 55
+2 -1
include/linux/netfilter/xt_CT.h
··· 3 3 4 4 #include <linux/types.h> 5 5 6 - #define XT_CT_NOTRACK 0x1 6 + #define XT_CT_NOTRACK 0x1 7 + #define XT_CT_USERSPACE_HELPER 0x2 7 8 8 9 struct xt_ct_target_info { 9 10 __u16 flags;
+13
include/linux/netfilter/xt_nfacct.h
··· 1 + #ifndef _XT_NFACCT_MATCH_H 2 + #define _XT_NFACCT_MATCH_H 3 + 4 + #include <linux/netfilter/nfnetlink_acct.h> 5 + 6 + struct nf_acct; 7 + 8 + struct xt_nfacct_match_info { 9 + char name[NFACCT_NAME_MAX]; 10 + struct nf_acct *nfacct; 11 + }; 12 + 13 + #endif /* _XT_NFACCT_MATCH_H */
+23
include/linux/netfilter/xt_rpfilter.h
··· 1 + #ifndef _XT_RPATH_H 2 + #define _XT_RPATH_H 3 + 4 + #include <linux/types.h> 5 + 6 + enum { 7 + XT_RPFILTER_LOOSE = 1 << 0, 8 + XT_RPFILTER_VALID_MARK = 1 << 1, 9 + XT_RPFILTER_ACCEPT_LOCAL = 1 << 2, 10 + XT_RPFILTER_INVERT = 1 << 3, 11 + #ifdef __KERNEL__ 12 + XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE | 13 + XT_RPFILTER_VALID_MARK | 14 + XT_RPFILTER_ACCEPT_LOCAL | 15 + XT_RPFILTER_INVERT, 16 + #endif 17 + }; 18 + 19 + struct xt_rpfilter_info { 20 + __u8 flags; 21 + }; 22 + 23 + #endif
-1
include/linux/netfilter_ipv4/Kbuild
··· 12 12 header-y += ipt_ecn.h 13 13 header-y += ipt_realm.h 14 14 header-y += ipt_ttl.h 15 - header-y += nf_nat.h
-58
include/linux/netfilter_ipv4/nf_nat.h
··· 1 - #ifndef _LINUX_NF_NAT_H 2 - #define _LINUX_NF_NAT_H 3 - 4 - #include <linux/types.h> 5 - 6 - #define IP_NAT_RANGE_MAP_IPS 1 7 - #define IP_NAT_RANGE_PROTO_SPECIFIED 2 8 - #define IP_NAT_RANGE_PROTO_RANDOM 4 9 - #define IP_NAT_RANGE_PERSISTENT 8 10 - 11 - /* The protocol-specific manipulable parts of the tuple. */ 12 - union nf_conntrack_man_proto { 13 - /* Add other protocols here. */ 14 - __be16 all; 15 - 16 - struct { 17 - __be16 port; 18 - } tcp; 19 - struct { 20 - __be16 port; 21 - } udp; 22 - struct { 23 - __be16 id; 24 - } icmp; 25 - struct { 26 - __be16 port; 27 - } dccp; 28 - struct { 29 - __be16 port; 30 - } sctp; 31 - struct { 32 - __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ 33 - } gre; 34 - }; 35 - 36 - /* Single range specification. */ 37 - struct nf_nat_range { 38 - /* Set to OR of flags above. */ 39 - unsigned int flags; 40 - 41 - /* Inclusive: network order. */ 42 - __be32 min_ip, max_ip; 43 - 44 - /* Inclusive: network order */ 45 - union nf_conntrack_man_proto min, max; 46 - }; 47 - 48 - /* For backwards compat: don't use in modern code. */ 49 - struct nf_nat_multi_range_compat { 50 - unsigned int rangesize; /* Must be 1. */ 51 - 52 - /* hangs off end. */ 53 - struct nf_nat_range range[1]; 54 - }; 55 - 56 - #define nf_nat_multi_range nf_nat_multi_range_compat 57 - 58 - #endif
+2
include/net/ip6_route.h
··· 70 70 extern struct dst_entry * ip6_route_output(struct net *net, 71 71 const struct sock *sk, 72 72 struct flowi6 *fl6); 73 + extern struct dst_entry * ip6_route_lookup(struct net *net, 74 + struct flowi6 *fl6, int flags); 73 75 74 76 extern int ip6_route_init(void); 75 77 extern void ip6_route_cleanup(void);
+2 -2
include/net/netfilter/nf_conntrack_acct.h
··· 15 15 #include <net/netfilter/nf_conntrack_extend.h> 16 16 17 17 struct nf_conn_counter { 18 - u_int64_t packets; 19 - u_int64_t bytes; 18 + atomic64_t packets; 19 + atomic64_t bytes; 20 20 }; 21 21 22 22 static inline
-1
include/net/netfilter/nf_conntrack_expect.h
··· 91 91 92 92 void nf_ct_remove_expectations(struct nf_conn *ct); 93 93 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); 94 - void nf_ct_remove_userspace_expectations(void); 95 94 96 95 /* Allocate space for an expectation: this is mandatory before calling 97 96 nf_ct_expect_related. You will have to call put afterwards. */
-1
include/net/netfilter/nf_conntrack_tuple.h
··· 12 12 13 13 #include <linux/netfilter/x_tables.h> 14 14 #include <linux/netfilter/nf_conntrack_tuple_common.h> 15 - #include <linux/netfilter_ipv4/nf_nat.h> 16 15 #include <linux/list_nulls.h> 17 16 18 17 /* A `tuple' is a structure containing the information to uniquely
+4 -6
include/net/netfilter/nf_nat.h
··· 1 1 #ifndef _NF_NAT_H 2 2 #define _NF_NAT_H 3 3 #include <linux/netfilter_ipv4.h> 4 - #include <linux/netfilter_ipv4/nf_nat.h> 4 + #include <linux/netfilter/nf_nat.h> 5 5 #include <net/netfilter/nf_conntrack_tuple.h> 6 6 7 - #define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16 8 - 9 7 enum nf_nat_manip_type { 10 - IP_NAT_MANIP_SRC, 11 - IP_NAT_MANIP_DST 8 + NF_NAT_MANIP_SRC, 9 + NF_NAT_MANIP_DST 12 10 }; 13 11 14 12 /* SRC manip occurs POST_ROUTING or LOCAL_IN */ ··· 50 52 51 53 /* Set up the info structure to map into this range. */ 52 54 extern unsigned int nf_nat_setup_info(struct nf_conn *ct, 53 - const struct nf_nat_range *range, 55 + const struct nf_nat_ipv4_range *range, 54 56 enum nf_nat_manip_type maniptype); 55 57 56 58 /* Is this tuple already taken? (not by us)*/
+1 -1
include/net/netfilter/nf_nat_core.h
··· 20 20 static inline int nf_nat_initialized(struct nf_conn *ct, 21 21 enum nf_nat_manip_type manip) 22 22 { 23 - if (manip == IP_NAT_MANIP_SRC) 23 + if (manip == NF_NAT_MANIP_SRC) 24 24 return ct->status & IPS_SRC_NAT_DONE; 25 25 else 26 26 return ct->status & IPS_DST_NAT_DONE;
+5 -12
include/net/netfilter/nf_nat_protocol.h
··· 4 4 #include <net/netfilter/nf_nat.h> 5 5 #include <linux/netfilter/nfnetlink_conntrack.h> 6 6 7 - struct nf_nat_range; 7 + struct nf_nat_ipv4_range; 8 8 9 9 struct nf_nat_protocol { 10 10 /* Protocol number. */ 11 11 unsigned int protonum; 12 - 13 - struct module *me; 14 12 15 13 /* Translate a packet to the target according to manip type. 16 14 Return true if succeeded. */ ··· 28 30 possible. Per-protocol part of tuple is initialized to the 29 31 incoming packet. */ 30 32 void (*unique_tuple)(struct nf_conntrack_tuple *tuple, 31 - const struct nf_nat_range *range, 33 + const struct nf_nat_ipv4_range *range, 32 34 enum nf_nat_manip_type maniptype, 33 35 const struct nf_conn *ct); 34 36 35 - int (*range_to_nlattr)(struct sk_buff *skb, 36 - const struct nf_nat_range *range); 37 - 38 37 int (*nlattr_to_range)(struct nlattr *tb[], 39 - struct nf_nat_range *range); 38 + struct nf_nat_ipv4_range *range); 40 39 }; 41 40 42 41 /* Protocol registration. */ ··· 56 61 const union nf_conntrack_man_proto *max); 57 62 58 63 extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, 59 - const struct nf_nat_range *range, 64 + const struct nf_nat_ipv4_range *range, 60 65 enum nf_nat_manip_type maniptype, 61 66 const struct nf_conn *ct, 62 67 u_int16_t *rover); 63 68 64 - extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb, 65 - const struct nf_nat_range *range); 66 69 extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], 67 - struct nf_nat_range *range); 70 + struct nf_nat_ipv4_range *range); 68 71 69 72 #endif /*_NF_NAT_PROTO_H*/
+1
net/ipv4/fib_rules.c
··· 67 67 68 68 return err; 69 69 } 70 + EXPORT_SYMBOL_GPL(fib_lookup); 70 71 71 72 static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, 72 73 int flags, struct fib_lookup_arg *arg)
+1
net/ipv4/fib_trie.c
··· 1607 1607 rcu_read_unlock(); 1608 1608 return ret; 1609 1609 } 1610 + EXPORT_SYMBOL_GPL(fib_table_lookup); 1610 1611 1611 1612 /* 1612 1613 * Remove the leaf and return parent.
+10
net/ipv4/netfilter/Kconfig
··· 82 82 83 83 To compile it as a module, choose M here. If unsure, say N. 84 84 85 + config IP_NF_MATCH_RPFILTER 86 + tristate '"rpfilter" reverse path filter match support' 87 + depends on NETFILTER_ADVANCED 88 + ---help--- 89 + This option allows you to match packets whose replies would 90 + go out via the interface the packet came in. 91 + 92 + To compile it as a module, choose M here. If unsure, say N. 93 + The module will be called ipt_rpfilter. 94 + 85 95 config IP_NF_MATCH_TTL 86 96 tristate '"ttl" match support' 87 97 depends on NETFILTER_ADVANCED
+1
net/ipv4/netfilter/Makefile
··· 50 50 # matches 51 51 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o 52 52 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o 53 + obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o 53 54 54 55 # targets 55 56 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
+8 -8
net/ipv4/netfilter/ipt_MASQUERADE.c
··· 30 30 /* FIXME: Multiple targets. --RR */ 31 31 static int masquerade_tg_check(const struct xt_tgchk_param *par) 32 32 { 33 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 33 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 34 34 35 - if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { 35 + if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { 36 36 pr_debug("bad MAP_IPS.\n"); 37 37 return -EINVAL; 38 38 } ··· 49 49 struct nf_conn *ct; 50 50 struct nf_conn_nat *nat; 51 51 enum ip_conntrack_info ctinfo; 52 - struct nf_nat_range newrange; 53 - const struct nf_nat_multi_range_compat *mr; 52 + struct nf_nat_ipv4_range newrange; 53 + const struct nf_nat_ipv4_multi_range_compat *mr; 54 54 const struct rtable *rt; 55 55 __be32 newsrc; 56 56 ··· 79 79 nat->masq_index = par->out->ifindex; 80 80 81 81 /* Transfer from original range. */ 82 - newrange = ((struct nf_nat_range) 83 - { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, 82 + newrange = ((struct nf_nat_ipv4_range) 83 + { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, 84 84 newsrc, newsrc, 85 85 mr->range[0].min, mr->range[0].max }); 86 86 87 87 /* Hand modified range to generic setup. */ 88 - return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC); 88 + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); 89 89 } 90 90 91 91 static int ··· 139 139 .name = "MASQUERADE", 140 140 .family = NFPROTO_IPV4, 141 141 .target = masquerade_tg, 142 - .targetsize = sizeof(struct nf_nat_multi_range_compat), 142 + .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), 143 143 .table = "nat", 144 144 .hooks = 1 << NF_INET_POST_ROUTING, 145 145 .checkentry = masquerade_tg_check,
+7 -7
net/ipv4/netfilter/ipt_NETMAP.c
··· 24 24 25 25 static int netmap_tg_check(const struct xt_tgchk_param *par) 26 26 { 27 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 27 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 28 28 29 - if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { 29 + if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { 30 30 pr_debug("bad MAP_IPS.\n"); 31 31 return -EINVAL; 32 32 } ··· 43 43 struct nf_conn *ct; 44 44 enum ip_conntrack_info ctinfo; 45 45 __be32 new_ip, netmask; 46 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 47 - struct nf_nat_range newrange; 46 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 47 + struct nf_nat_ipv4_range newrange; 48 48 49 49 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || 50 50 par->hooknum == NF_INET_POST_ROUTING || ··· 61 61 new_ip = ip_hdr(skb)->saddr & ~netmask; 62 62 new_ip |= mr->range[0].min_ip & netmask; 63 63 64 - newrange = ((struct nf_nat_range) 65 - { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, 64 + newrange = ((struct nf_nat_ipv4_range) 65 + { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, 66 66 new_ip, new_ip, 67 67 mr->range[0].min, mr->range[0].max }); 68 68 ··· 74 74 .name = "NETMAP", 75 75 .family = NFPROTO_IPV4, 76 76 .target = netmap_tg, 77 - .targetsize = sizeof(struct nf_nat_multi_range_compat), 77 + .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), 78 78 .table = "nat", 79 79 .hooks = (1 << NF_INET_PRE_ROUTING) | 80 80 (1 << NF_INET_POST_ROUTING) |
+8 -8
net/ipv4/netfilter/ipt_REDIRECT.c
··· 28 28 /* FIXME: Take multiple ranges --RR */ 29 29 static int redirect_tg_check(const struct xt_tgchk_param *par) 30 30 { 31 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 31 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 32 32 33 - if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { 33 + if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { 34 34 pr_debug("bad MAP_IPS.\n"); 35 35 return -EINVAL; 36 36 } ··· 47 47 struct nf_conn *ct; 48 48 enum ip_conntrack_info ctinfo; 49 49 __be32 newdst; 50 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 51 - struct nf_nat_range newrange; 50 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 51 + struct nf_nat_ipv4_range newrange; 52 52 53 53 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || 54 54 par->hooknum == NF_INET_LOCAL_OUT); ··· 76 76 } 77 77 78 78 /* Transfer from original range. */ 79 - newrange = ((struct nf_nat_range) 80 - { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, 79 + newrange = ((struct nf_nat_ipv4_range) 80 + { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, 81 81 newdst, newdst, 82 82 mr->range[0].min, mr->range[0].max }); 83 83 84 84 /* Hand modified range to generic setup. */ 85 - return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_DST); 85 + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); 86 86 } 87 87 88 88 static struct xt_target redirect_tg_reg __read_mostly = { 89 89 .name = "REDIRECT", 90 90 .family = NFPROTO_IPV4, 91 91 .target = redirect_tg, 92 - .targetsize = sizeof(struct nf_nat_multi_range_compat), 92 + .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), 93 93 .table = "nat", 94 94 .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), 95 95 .checkentry = redirect_tg_check,
+141
net/ipv4/netfilter/ipt_rpfilter.c
··· 1 + /* 2 + * Copyright (c) 2011 Florian Westphal <fw@strlen.de> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * based on fib_frontend.c; Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 + */ 10 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 + #include <linux/module.h> 12 + #include <linux/skbuff.h> 13 + #include <linux/netdevice.h> 14 + #include <linux/ip.h> 15 + #include <net/ip.h> 16 + #include <net/ip_fib.h> 17 + #include <net/route.h> 18 + 19 + #include <linux/netfilter/xt_rpfilter.h> 20 + #include <linux/netfilter/x_tables.h> 21 + 22 + MODULE_LICENSE("GPL"); 23 + MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); 24 + MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match"); 25 + 26 + /* don't try to find route from mcast/bcast/zeronet */ 27 + static __be32 rpfilter_get_saddr(__be32 addr) 28 + { 29 + if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) || 30 + ipv4_is_zeronet(addr)) 31 + return 0; 32 + return addr; 33 + } 34 + 35 + static bool rpfilter_lookup_reverse(struct flowi4 *fl4, 36 + const struct net_device *dev, u8 flags) 37 + { 38 + struct fib_result res; 39 + bool dev_match; 40 + struct net *net = dev_net(dev); 41 + int ret __maybe_unused; 42 + 43 + if (fib_lookup(net, fl4, &res)) 44 + return false; 45 + 46 + if (res.type != RTN_UNICAST) { 47 + if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL)) 48 + return false; 49 + } 50 + dev_match = false; 51 + #ifdef CONFIG_IP_ROUTE_MULTIPATH 52 + for (ret = 0; ret < res.fi->fib_nhs; ret++) { 53 + struct fib_nh *nh = &res.fi->fib_nh[ret]; 54 + 55 + if (nh->nh_dev == dev) { 56 + dev_match = true; 57 + break; 58 + } 59 + } 60 + #else 61 + if (FIB_RES_DEV(res) == dev) 62 + dev_match = true; 63 + #endif 64 + if (dev_match || flags & XT_RPFILTER_LOOSE) 65 + return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST; 66 + return dev_match; 67 + } 68 + 69 + static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 70 + { 71 + const struct xt_rpfilter_info *info; 72 + const struct iphdr *iph; 73 + struct flowi4 flow; 74 + bool invert; 75 + 76 + info = par->matchinfo; 77 + invert = info->flags & XT_RPFILTER_INVERT; 78 + 79 + if (par->in->flags & IFF_LOOPBACK) 80 + return true ^ invert; 81 + 82 + iph = ip_hdr(skb); 83 + if (ipv4_is_multicast(iph->daddr)) { 84 + if (ipv4_is_zeronet(iph->saddr)) 85 + return ipv4_is_local_multicast(iph->daddr) ^ invert; 86 + flow.flowi4_iif = 0; 87 + } else { 88 + flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex; 89 + } 90 + 91 + flow.daddr = iph->saddr; 92 + flow.saddr = rpfilter_get_saddr(iph->daddr); 93 + flow.flowi4_oif = 0; 94 + flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 95 + flow.flowi4_tos = RT_TOS(iph->tos); 96 + flow.flowi4_scope = RT_SCOPE_UNIVERSE; 97 + 98 + return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert; 99 + } 100 + 101 + static int rpfilter_check(const struct xt_mtchk_param *par) 102 + { 103 + const struct xt_rpfilter_info *info = par->matchinfo; 104 + unsigned int options = ~XT_RPFILTER_OPTION_MASK; 105 + if (info->flags & options) { 106 + pr_info("unknown options encountered"); 107 + return -EINVAL; 108 + } 109 + 110 + if (strcmp(par->table, "mangle") != 0 && 111 + strcmp(par->table, "raw") != 0) { 112 + pr_info("match only valid in the \'raw\' " 113 + "or \'mangle\' tables, not \'%s\'.\n", par->table); 114 + return -EINVAL; 115 + } 116 + 117 + return 0; 118 + } 119 + 120 + static struct xt_match rpfilter_mt_reg __read_mostly = { 121 + .name = "rpfilter", 122 + .family = NFPROTO_IPV4, 123 + .checkentry = rpfilter_check, 124 + .match = rpfilter_mt, 125 + .matchsize = sizeof(struct xt_rpfilter_info), 126 + .hooks = (1 << NF_INET_PRE_ROUTING), 127 + .me = THIS_MODULE 128 + }; 129 + 130 + static int __init rpfilter_mt_init(void) 131 + { 132 + return xt_register_match(&rpfilter_mt_reg); 133 + } 134 + 135 + static void __exit rpfilter_mt_exit(void) 136 + { 137 + xt_unregister_match(&rpfilter_mt_reg); 138 + } 139 + 140 + module_init(rpfilter_mt_init); 141 + module_exit(rpfilter_mt_exit);
+33 -63
net/ipv4/netfilter/nf_nat_core.c
··· 30 30 #include <net/netfilter/nf_nat_helper.h> 31 31 #include <net/netfilter/nf_conntrack_helper.h> 32 32 #include <net/netfilter/nf_conntrack_l3proto.h> 33 - #include <net/netfilter/nf_conntrack_l4proto.h> 34 33 #include <net/netfilter/nf_conntrack_zones.h> 35 34 36 35 static DEFINE_SPINLOCK(nf_nat_lock); ··· 56 57 /* Original src, to ensure we map it consistently if poss. */ 57 58 hash = jhash_3words((__force u32)tuple->src.u3.ip, 58 59 (__force u32)tuple->src.u.all ^ zone, 59 - tuple->dst.protonum, 0); 60 + tuple->dst.protonum, nf_conntrack_hash_rnd); 60 61 return ((u64)hash * net->ipv4.nat_htable_size) >> 32; 61 62 } 62 63 ··· 81 82 * that meet the constraints of range. */ 82 83 static int 83 84 in_range(const struct nf_conntrack_tuple *tuple, 84 - const struct nf_nat_range *range) 85 + const struct nf_nat_ipv4_range *range) 85 86 { 86 87 const struct nf_nat_protocol *proto; 87 88 int ret = 0; 88 89 89 90 /* If we are supposed to map IPs, then we must be in the 90 91 range specified, otherwise let this drag us onto a new src IP. */ 91 - if (range->flags & IP_NAT_RANGE_MAP_IPS) { 92 + if (range->flags & NF_NAT_RANGE_MAP_IPS) { 92 93 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) || 93 94 ntohl(tuple->src.u3.ip) > ntohl(range->max_ip)) 94 95 return 0; ··· 96 97 97 98 rcu_read_lock(); 98 99 proto = __nf_nat_proto_find(tuple->dst.protonum); 99 - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || 100 - proto->in_range(tuple, IP_NAT_MANIP_SRC, 100 + if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || 101 + proto->in_range(tuple, NF_NAT_MANIP_SRC, 101 102 &range->min, &range->max)) 102 103 ret = 1; 103 104 rcu_read_unlock(); ··· 122 123 find_appropriate_src(struct net *net, u16 zone, 123 124 const struct nf_conntrack_tuple *tuple, 124 125 struct nf_conntrack_tuple *result, 125 - const struct nf_nat_range *range) 126 + const struct nf_nat_ipv4_range *range) 126 127 { 127 128 unsigned int h = hash_by_src(net, zone, tuple); 128 129 const struct nf_conn_nat *nat; ··· 156 157 */ 157 158 static void 158 159 find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, 159 - const struct nf_nat_range *range, 160 + const struct nf_nat_ipv4_range *range, 160 161 const struct nf_conn *ct, 161 162 enum nf_nat_manip_type maniptype) 162 163 { ··· 165 166 u_int32_t minip, maxip, j; 166 167 167 168 /* No IP mapping? Do nothing. */ 168 - if (!(range->flags & IP_NAT_RANGE_MAP_IPS)) 169 + if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 169 170 return; 170 171 171 - if (maniptype == IP_NAT_MANIP_SRC) 172 + if (maniptype == NF_NAT_MANIP_SRC) 172 173 var_ipp = &tuple->src.u3.ip; 173 174 else 174 175 var_ipp = &tuple->dst.u3.ip; ··· 188 189 minip = ntohl(range->min_ip); 189 190 maxip = ntohl(range->max_ip); 190 191 j = jhash_2words((__force u32)tuple->src.u3.ip, 191 - range->flags & IP_NAT_RANGE_PERSISTENT ? 192 + range->flags & NF_NAT_RANGE_PERSISTENT ? 192 193 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0); 193 194 j = ((u64)j * (maxip - minip + 1)) >> 32; 194 195 *var_ipp = htonl(minip + j); ··· 203 204 static void 204 205 get_unique_tuple(struct nf_conntrack_tuple *tuple, 205 206 const struct nf_conntrack_tuple *orig_tuple, 206 - const struct nf_nat_range *range, 207 + const struct nf_nat_ipv4_range *range, 207 208 struct nf_conn *ct, 208 209 enum nf_nat_manip_type maniptype) 209 210 { ··· 218 219 This is only required for source (ie. NAT/masq) mappings. 219 220 So far, we don't do local source mappings, so multiple 220 221 manips not an issue. */ 221 - if (maniptype == IP_NAT_MANIP_SRC && 222 - !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { 222 + if (maniptype == NF_NAT_MANIP_SRC && 223 + !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { 223 224 /* try the original tuple first */ 224 225 if (in_range(orig_tuple, range)) { 225 226 if (!nf_nat_used_tuple(orig_tuple, ct)) { ··· 246 247 proto = __nf_nat_proto_find(orig_tuple->dst.protonum); 247 248 248 249 /* Only bother mapping if it's not already in range and unique */ 249 - if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { 250 - if (range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) { 250 + if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { 251 + if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 251 252 if (proto->in_range(tuple, maniptype, &range->min, 252 253 &range->max) && 253 254 (range->min.all == range->max.all || ··· 266 267 267 268 unsigned int 268 269 nf_nat_setup_info(struct nf_conn *ct, 269 - const struct nf_nat_range *range, 270 + const struct nf_nat_ipv4_range *range, 270 271 enum nf_nat_manip_type maniptype) 271 272 { 272 273 struct net *net = nf_ct_net(ct); ··· 283 284 } 284 285 } 285 286 286 - NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC || 287 - maniptype == IP_NAT_MANIP_DST); 287 + NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 288 + maniptype == NF_NAT_MANIP_DST); 288 289 BUG_ON(nf_nat_initialized(ct, maniptype)); 289 290 290 291 /* What we've got will look like inverse of reply. Normally ··· 305 306 nf_conntrack_alter_reply(ct, &reply); 306 307 307 308 /* Non-atomic: we own this at the moment. */ 308 - if (maniptype == IP_NAT_MANIP_SRC) 309 + if (maniptype == NF_NAT_MANIP_SRC) 309 310 ct->status |= IPS_SRC_NAT; 310 311 else 311 312 ct->status |= IPS_DST_NAT; 312 313 } 313 314 314 - if (maniptype == IP_NAT_MANIP_SRC) { 315 + if (maniptype == NF_NAT_MANIP_SRC) { 315 316 unsigned int srchash; 316 317 317 318 srchash = hash_by_src(net, nf_ct_zone(ct), ··· 326 327 } 327 328 328 329 /* It's done. */ 329 - if (maniptype == IP_NAT_MANIP_DST) 330 + if (maniptype == NF_NAT_MANIP_DST) 330 331 ct->status |= IPS_DST_NAT_DONE; 331 332 else 332 333 ct->status |= IPS_SRC_NAT_DONE; ··· 360 361 361 362 iph = (void *)skb->data + iphdroff; 362 363 363 - if (maniptype == IP_NAT_MANIP_SRC) { 364 + if (maniptype == NF_NAT_MANIP_SRC) { 364 365 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); 365 366 iph->saddr = target->src.u3.ip; 366 367 } else { ··· 380 381 unsigned long statusbit; 381 382 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); 382 383 383 - if (mtype == IP_NAT_MANIP_SRC) 384 + if (mtype == NF_NAT_MANIP_SRC) 384 385 statusbit = IPS_SRC_NAT; 385 386 else 386 387 statusbit = IPS_DST_NAT; ··· 413 414 struct icmphdr icmp; 414 415 struct iphdr ip; 415 416 } *inside; 416 - const struct nf_conntrack_l4proto *l4proto; 417 - struct nf_conntrack_tuple inner, target; 417 + struct nf_conntrack_tuple target; 418 418 int hdrlen = ip_hdrlen(skb); 419 419 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 420 420 unsigned long statusbit; ··· 445 447 return 0; 446 448 } 447 449 448 - if (manip == IP_NAT_MANIP_SRC) 450 + if (manip == NF_NAT_MANIP_SRC) 449 451 statusbit = IPS_SRC_NAT; 450 452 else 451 453 statusbit = IPS_DST_NAT; ··· 460 462 pr_debug("icmp_reply_translation: translating error %p manip %u " 461 463 "dir %s\n", skb, manip, 462 464 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); 463 - 464 - /* rcu_read_lock()ed by nf_hook_slow */ 465 - l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); 466 - 467 - if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr), 468 - (hdrlen + 469 - sizeof(struct icmphdr) + inside->ip.ihl * 4), 470 - (u_int16_t)AF_INET, inside->ip.protocol, 471 - &inner, l3proto, l4proto)) 472 - return 0; 473 465 474 466 /* Change inner back to look like incoming packet. We do the 475 467 opposite manip on this hook to normal, because it might not ··· 563 575 #include <linux/netfilter/nfnetlink.h> 564 576 #include <linux/netfilter/nfnetlink_conntrack.h> 565 577 566 - static const struct nf_nat_protocol * 567 - nf_nat_proto_find_get(u_int8_t protonum) 568 - { 569 - const struct nf_nat_protocol *p; 570 - 571 - rcu_read_lock(); 572 - p = __nf_nat_proto_find(protonum); 573 - if (!try_module_get(p->me)) 574 - p = &nf_nat_unknown_protocol; 575 - rcu_read_unlock(); 576 - 577 - return p; 578 - } 579 - 580 - static void 581 - nf_nat_proto_put(const struct nf_nat_protocol *p) 582 - { 583 - module_put(p->me); 584 - } 585 - 586 578 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 587 579 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 588 580 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, ··· 570 602 571 603 static int nfnetlink_parse_nat_proto(struct nlattr *attr, 572 604 const struct nf_conn *ct, 573 - struct nf_nat_range *range) 605 + struct nf_nat_ipv4_range *range) 574 606 { 575 607 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 576 608 const struct nf_nat_protocol *npt; ··· 580 612 if (err < 0) 581 613 return err; 582 614 583 - npt = nf_nat_proto_find_get(nf_ct_protonum(ct)); 615 + rcu_read_lock(); 616 + npt = __nf_nat_proto_find(nf_ct_protonum(ct)); 584 617 if (npt->nlattr_to_range) 585 618 err = npt->nlattr_to_range(tb, range); 586 - nf_nat_proto_put(npt); 619 + rcu_read_unlock(); 587 620 return err; 588 621 } 589 622 590 623 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 591 624 [CTA_NAT_MINIP] = { .type = NLA_U32 }, 592 625 [CTA_NAT_MAXIP] = { .type = NLA_U32 }, 626 + [CTA_NAT_PROTO] = { .type = NLA_NESTED }, 593 627 }; 594 628 595 629 static int 596 630 nfnetlink_parse_nat(const struct nlattr *nat, 597 - const struct nf_conn *ct, struct nf_nat_range *range) 631 + const struct nf_conn *ct, struct nf_nat_ipv4_range *range) 598 632 { 599 633 struct nlattr *tb[CTA_NAT_MAX+1]; 600 634 int err; ··· 616 646 range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]); 617 647 618 648 if (range->min_ip) 619 - range->flags |= IP_NAT_RANGE_MAP_IPS; 649 + range->flags |= NF_NAT_RANGE_MAP_IPS; 620 650 621 651 if (!tb[CTA_NAT_PROTO]) 622 652 return 0; ··· 633 663 enum nf_nat_manip_type manip, 634 664 const struct nlattr *attr) 635 665 { 636 - struct nf_nat_range range; 666 + struct nf_nat_ipv4_range range; 637 667 638 668 if (nfnetlink_parse_nat(attr, ct, &range) < 0) 639 669 return -EINVAL;
+10 -10
net/ipv4/netfilter/nf_nat_h323.c
··· 398 398 static void ip_nat_q931_expect(struct nf_conn *new, 399 399 struct nf_conntrack_expect *this) 400 400 { 401 - struct nf_nat_range range; 401 + struct nf_nat_ipv4_range range; 402 402 403 403 if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ 404 404 nf_nat_follow_master(new, this); ··· 409 409 BUG_ON(new->status & IPS_NAT_DONE_MASK); 410 410 411 411 /* Change src to where master sends to */ 412 - range.flags = IP_NAT_RANGE_MAP_IPS; 412 + range.flags = NF_NAT_RANGE_MAP_IPS; 413 413 range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; 414 - nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); 414 + nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC); 415 415 416 416 /* For DST manip, map port here to where it's expected. */ 417 - range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); 417 + range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 418 418 range.min = range.max = this->saved_proto; 419 419 range.min_ip = range.max_ip = 420 420 new->master->tuplehash[!this->dir].tuple.src.u3.ip; 421 - nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); 421 + nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST); 422 422 } 423 423 424 424 /****************************************************************************/ ··· 496 496 static void ip_nat_callforwarding_expect(struct nf_conn *new, 497 497 struct nf_conntrack_expect *this) 498 498 { 499 - struct nf_nat_range range; 499 + struct nf_nat_ipv4_range range; 500 500 501 501 /* This must be a fresh one. */ 502 502 BUG_ON(new->status & IPS_NAT_DONE_MASK); 503 503 504 504 /* Change src to where master sends to */ 505 - range.flags = IP_NAT_RANGE_MAP_IPS; 505 + range.flags = NF_NAT_RANGE_MAP_IPS; 506 506 range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; 507 - nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); 507 + nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC); 508 508 509 509 /* For DST manip, map port here to where it's expected. */ 510 - range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); 510 + range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 511 511 range.min = range.max = this->saved_proto; 512 512 range.min_ip = range.max_ip = this->saved_ip; 513 - nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); 513 + nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST); 514 514 } 515 515 516 516 /****************************************************************************/
+5 -11
net/ipv4/netfilter/nf_nat_helper.c
··· 253 253 struct udphdr *udph; 254 254 int datalen, oldlen; 255 255 256 - /* UDP helpers might accidentally mangle the wrong packet */ 257 - iph = ip_hdr(skb); 258 - if (skb->len < iph->ihl*4 + sizeof(*udph) + 259 - match_offset + match_len) 260 - return 0; 261 - 262 256 if (!skb_make_writable(skb, skb->len)) 263 257 return 0; 264 258 ··· 424 430 void nf_nat_follow_master(struct nf_conn *ct, 425 431 struct nf_conntrack_expect *exp) 426 432 { 427 - struct nf_nat_range range; 433 + struct nf_nat_ipv4_range range; 428 434 429 435 /* This must be a fresh one. */ 430 436 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 431 437 432 438 /* Change src to where master sends to */ 433 - range.flags = IP_NAT_RANGE_MAP_IPS; 439 + range.flags = NF_NAT_RANGE_MAP_IPS; 434 440 range.min_ip = range.max_ip 435 441 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; 436 - nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); 442 + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); 437 443 438 444 /* For DST manip, map port here to where it's expected. */ 439 - range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); 445 + range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 440 446 range.min = range.max = exp->saved_proto; 441 447 range.min_ip = range.max_ip 442 448 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; 443 - nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); 449 + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 444 450 } 445 451 EXPORT_SYMBOL(nf_nat_follow_master);
+7 -7
net/ipv4/netfilter/nf_nat_pptp.c
··· 47 47 struct nf_conntrack_tuple t; 48 48 const struct nf_ct_pptp_master *ct_pptp_info; 49 49 const struct nf_nat_pptp *nat_pptp_info; 50 - struct nf_nat_range range; 50 + struct nf_nat_ipv4_range range; 51 51 52 52 ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; 53 53 nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; ··· 88 88 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 89 89 90 90 /* Change src to where master sends to */ 91 - range.flags = IP_NAT_RANGE_MAP_IPS; 91 + range.flags = NF_NAT_RANGE_MAP_IPS; 92 92 range.min_ip = range.max_ip 93 93 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; 94 94 if (exp->dir == IP_CT_DIR_ORIGINAL) { 95 - range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; 95 + range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 96 96 range.min = range.max = exp->saved_proto; 97 97 } 98 - nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); 98 + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); 99 99 100 100 /* For DST manip, map port here to where it's expected. */ 101 - range.flags = IP_NAT_RANGE_MAP_IPS; 101 + range.flags = NF_NAT_RANGE_MAP_IPS; 102 102 range.min_ip = range.max_ip 103 103 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; 104 104 if (exp->dir == IP_CT_DIR_REPLY) { 105 - range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; 105 + range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 106 106 range.min = range.max = exp->saved_proto; 107 107 } 108 - nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); 108 + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 109 109 } 110 110 111 111 /* outbound packets == from PNS to PAC */
+12 -24
net/ipv4/netfilter/nf_nat_proto_common.c
··· 26 26 { 27 27 __be16 port; 28 28 29 - if (maniptype == IP_NAT_MANIP_SRC) 29 + if (maniptype == NF_NAT_MANIP_SRC) 30 30 port = tuple->src.u.all; 31 31 else 32 32 port = tuple->dst.u.all; ··· 37 37 EXPORT_SYMBOL_GPL(nf_nat_proto_in_range); 38 38 39 39 void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, 40 - const struct nf_nat_range *range, 40 + const struct nf_nat_ipv4_range *range, 41 41 enum nf_nat_manip_type maniptype, 42 42 const struct nf_conn *ct, 43 43 u_int16_t *rover) ··· 46 46 __be16 *portptr; 47 47 u_int16_t off; 48 48 49 - if (maniptype == IP_NAT_MANIP_SRC) 49 + if (maniptype == NF_NAT_MANIP_SRC) 50 50 portptr = &tuple->src.u.all; 51 51 else 52 52 portptr = &tuple->dst.u.all; 53 53 54 54 /* If no range specified... */ 55 - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { 55 + if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { 56 56 /* If it's dst rewrite, can't change port */ 57 - if (maniptype == IP_NAT_MANIP_DST) 57 + if (maniptype == NF_NAT_MANIP_DST) 58 58 return; 59 59 60 60 if (ntohs(*portptr) < 1024) { ··· 75 75 range_size = ntohs(range->max.all) - min + 1; 76 76 } 77 77 78 - if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) 78 + if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) 79 79 off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip, 80 - maniptype == IP_NAT_MANIP_SRC 80 + maniptype == NF_NAT_MANIP_SRC 81 81 ? tuple->dst.u.all 82 82 : tuple->src.u.all); 83 83 else ··· 87 87 *portptr = htons(min + off % range_size); 88 88 if (++i != range_size && nf_nat_used_tuple(tuple, ct)) 89 89 continue; 90 - if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) 90 + if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) 91 91 *rover = off; 92 92 return; 93 93 } ··· 96 96 EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple); 97 97 98 98 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 99 - int nf_nat_proto_range_to_nlattr(struct sk_buff *skb, 100 - const struct nf_nat_range *range) 101 - { 102 - NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all); 103 - NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all); 104 - return 0; 105 - 106 - nla_put_failure: 107 - return -1; 108 - } 109 - EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range); 110 - 111 99 int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], 112 - struct nf_nat_range *range) 100 + struct nf_nat_ipv4_range *range) 113 101 { 114 102 if (tb[CTA_PROTONAT_PORT_MIN]) { 115 103 range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); 116 104 range->max.all = range->min.tcp.port; 117 - range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; 105 + range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 118 106 } 119 107 if (tb[CTA_PROTONAT_PORT_MAX]) { 120 108 range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]); 121 - range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; 109 + range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 122 110 } 123 111 return 0; 124 112 } 125 - EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr); 113 + EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range); 126 114 #endif
+2 -4
net/ipv4/netfilter/nf_nat_proto_dccp.c
··· 24 24 25 25 static void 26 26 dccp_unique_tuple(struct nf_conntrack_tuple *tuple, 27 - const struct nf_nat_range *range, 27 + const struct nf_nat_ipv4_range *range, 28 28 enum nf_nat_manip_type maniptype, 29 29 const struct nf_conn *ct) 30 30 { ··· 54 54 iph = (struct iphdr *)(skb->data + iphdroff); 55 55 hdr = (struct dccp_hdr *)(skb->data + hdroff); 56 56 57 - if (maniptype == IP_NAT_MANIP_SRC) { 57 + if (maniptype == NF_NAT_MANIP_SRC) { 58 58 oldip = iph->saddr; 59 59 newip = tuple->src.u3.ip; 60 60 newport = tuple->src.u.dccp.port; ··· 80 80 81 81 static const struct nf_nat_protocol nf_nat_protocol_dccp = { 82 82 .protonum = IPPROTO_DCCP, 83 - .me = THIS_MODULE, 84 83 .manip_pkt = dccp_manip_pkt, 85 84 .in_range = nf_nat_proto_in_range, 86 85 .unique_tuple = dccp_unique_tuple, 87 86 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 88 - .range_to_nlattr = nf_nat_proto_range_to_nlattr, 89 87 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 90 88 #endif 91 89 };
+4 -6
net/ipv4/netfilter/nf_nat_proto_gre.c
··· 39 39 /* generate unique tuple ... */ 40 40 static void 41 41 gre_unique_tuple(struct nf_conntrack_tuple *tuple, 42 - const struct nf_nat_range *range, 42 + const struct nf_nat_ipv4_range *range, 43 43 enum nf_nat_manip_type maniptype, 44 44 const struct nf_conn *ct) 45 45 { ··· 52 52 if (!ct->master) 53 53 return; 54 54 55 - if (maniptype == IP_NAT_MANIP_SRC) 55 + if (maniptype == NF_NAT_MANIP_SRC) 56 56 keyptr = &tuple->src.u.gre.key; 57 57 else 58 58 keyptr = &tuple->dst.u.gre.key; 59 59 60 - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { 60 + if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { 61 61 pr_debug("%p: NATing GRE PPTP\n", ct); 62 62 min = 1; 63 63 range_size = 0xffff; ··· 99 99 100 100 /* we only have destination manip of a packet, since 'source key' 101 101 * is not present in the packet itself */ 102 - if (maniptype != IP_NAT_MANIP_DST) 102 + if (maniptype != NF_NAT_MANIP_DST) 103 103 return true; 104 104 switch (greh->version) { 105 105 case GRE_VERSION_1701: ··· 119 119 120 120 static const struct nf_nat_protocol gre = { 121 121 .protonum = IPPROTO_GRE, 122 - .me = THIS_MODULE, 123 122 .manip_pkt = gre_manip_pkt, 124 123 .in_range = nf_nat_proto_in_range, 125 124 .unique_tuple = gre_unique_tuple, 126 125 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 127 - .range_to_nlattr = nf_nat_proto_range_to_nlattr, 128 126 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 129 127 #endif 130 128 };
+2 -4
net/ipv4/netfilter/nf_nat_proto_icmp.c
··· 30 30 31 31 static void 32 32 icmp_unique_tuple(struct nf_conntrack_tuple *tuple, 33 - const struct nf_nat_range *range, 33 + const struct nf_nat_ipv4_range *range, 34 34 enum nf_nat_manip_type maniptype, 35 35 const struct nf_conn *ct) 36 36 { ··· 40 40 41 41 range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1; 42 42 /* If no range specified... */ 43 - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) 43 + if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) 44 44 range_size = 0xFFFF; 45 45 46 46 for (i = 0; ; ++id) { ··· 74 74 75 75 const struct nf_nat_protocol nf_nat_protocol_icmp = { 76 76 .protonum = IPPROTO_ICMP, 77 - .me = THIS_MODULE, 78 77 .manip_pkt = icmp_manip_pkt, 79 78 .in_range = icmp_in_range, 80 79 .unique_tuple = icmp_unique_tuple, 81 80 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 82 - .range_to_nlattr = nf_nat_proto_range_to_nlattr, 83 81 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 84 82 #endif 85 83 };
+2 -4
net/ipv4/netfilter/nf_nat_proto_sctp.c
··· 19 19 20 20 static void 21 21 sctp_unique_tuple(struct nf_conntrack_tuple *tuple, 22 - const struct nf_nat_range *range, 22 + const struct nf_nat_ipv4_range *range, 23 23 enum nf_nat_manip_type maniptype, 24 24 const struct nf_conn *ct) 25 25 { ··· 46 46 iph = (struct iphdr *)(skb->data + iphdroff); 47 47 hdr = (struct sctphdr *)(skb->data + hdroff); 48 48 49 - if (maniptype == IP_NAT_MANIP_SRC) { 49 + if (maniptype == NF_NAT_MANIP_SRC) { 50 50 /* Get rid of src ip and src pt */ 51 51 oldip = iph->saddr; 52 52 newip = tuple->src.u3.ip; ··· 70 70 71 71 static const struct nf_nat_protocol nf_nat_protocol_sctp = { 72 72 .protonum = IPPROTO_SCTP, 73 - .me = THIS_MODULE, 74 73 .manip_pkt = sctp_manip_pkt, 75 74 .in_range = nf_nat_proto_in_range, 76 75 .unique_tuple = sctp_unique_tuple, 77 76 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 78 - .range_to_nlattr = nf_nat_proto_range_to_nlattr, 79 77 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 80 78 #endif 81 79 };
+2 -4
net/ipv4/netfilter/nf_nat_proto_tcp.c
··· 23 23 24 24 static void 25 25 tcp_unique_tuple(struct nf_conntrack_tuple *tuple, 26 - const struct nf_nat_range *range, 26 + const struct nf_nat_ipv4_range *range, 27 27 enum nf_nat_manip_type maniptype, 28 28 const struct nf_conn *ct) 29 29 { ··· 55 55 iph = (struct iphdr *)(skb->data + iphdroff); 56 56 hdr = (struct tcphdr *)(skb->data + hdroff); 57 57 58 - if (maniptype == IP_NAT_MANIP_SRC) { 58 + if (maniptype == NF_NAT_MANIP_SRC) { 59 59 /* Get rid of src ip and src pt */ 60 60 oldip = iph->saddr; 61 61 newip = tuple->src.u3.ip; ··· 82 82 83 83 const struct nf_nat_protocol nf_nat_protocol_tcp = { 84 84 .protonum = IPPROTO_TCP, 85 - .me = THIS_MODULE, 86 85 .manip_pkt = tcp_manip_pkt, 87 86 .in_range = nf_nat_proto_in_range, 88 87 .unique_tuple = tcp_unique_tuple, 89 88 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 90 - .range_to_nlattr = nf_nat_proto_range_to_nlattr, 91 89 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 92 90 #endif 93 91 };
+2 -4
net/ipv4/netfilter/nf_nat_proto_udp.c
··· 22 22 23 23 static void 24 24 udp_unique_tuple(struct nf_conntrack_tuple *tuple, 25 - const struct nf_nat_range *range, 25 + const struct nf_nat_ipv4_range *range, 26 26 enum nf_nat_manip_type maniptype, 27 27 const struct nf_conn *ct) 28 28 { ··· 47 47 iph = (struct iphdr *)(skb->data + iphdroff); 48 48 hdr = (struct udphdr *)(skb->data + hdroff); 49 49 50 - if (maniptype == IP_NAT_MANIP_SRC) { 50 + if (maniptype == NF_NAT_MANIP_SRC) { 51 51 /* Get rid of src ip and src pt */ 52 52 oldip = iph->saddr; 53 53 newip = tuple->src.u3.ip; ··· 73 73 74 74 const struct nf_nat_protocol nf_nat_protocol_udp = { 75 75 .protonum = IPPROTO_UDP, 76 - .me = THIS_MODULE, 77 76 .manip_pkt = udp_manip_pkt, 78 77 .in_range = nf_nat_proto_in_range, 79 78 .unique_tuple = udp_unique_tuple, 80 79 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 81 - .range_to_nlattr = nf_nat_proto_range_to_nlattr, 82 80 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 83 81 #endif 84 82 };
+2 -4
net/ipv4/netfilter/nf_nat_proto_udplite.c
··· 21 21 22 22 static void 23 23 udplite_unique_tuple(struct nf_conntrack_tuple *tuple, 24 - const struct nf_nat_range *range, 24 + const struct nf_nat_ipv4_range *range, 25 25 enum nf_nat_manip_type maniptype, 26 26 const struct nf_conn *ct) 27 27 { ··· 47 47 iph = (struct iphdr *)(skb->data + iphdroff); 48 48 hdr = (struct udphdr *)(skb->data + hdroff); 49 49 50 - if (maniptype == IP_NAT_MANIP_SRC) { 50 + if (maniptype == NF_NAT_MANIP_SRC) { 51 51 /* Get rid of src ip and src pt */ 52 52 oldip = iph->saddr; 53 53 newip = tuple->src.u3.ip; ··· 72 72 73 73 static const struct nf_nat_protocol nf_nat_protocol_udplite = { 74 74 .protonum = IPPROTO_UDPLITE, 75 - .me = THIS_MODULE, 76 75 .manip_pkt = udplite_manip_pkt, 77 76 .in_range = nf_nat_proto_in_range, 78 77 .unique_tuple = udplite_unique_tuple, 79 78 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 80 - .range_to_nlattr = nf_nat_proto_range_to_nlattr, 81 79 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 82 80 #endif 83 81 };
+1 -2
net/ipv4/netfilter/nf_nat_proto_unknown.c
··· 27 27 } 28 28 29 29 static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple, 30 - const struct nf_nat_range *range, 30 + const struct nf_nat_ipv4_range *range, 31 31 enum nf_nat_manip_type maniptype, 32 32 const struct nf_conn *ct) 33 33 { ··· 46 46 } 47 47 48 48 const struct nf_nat_protocol nf_nat_unknown_protocol = { 49 - /* .me isn't set: getting a ref to this cannot fail. */ 50 49 .manip_pkt = unknown_manip_pkt, 51 50 .in_range = unknown_in_range, 52 51 .unique_tuple = unknown_unique_tuple,
+11 -11
net/ipv4/netfilter/nf_nat_rule.c
··· 44 44 { 45 45 struct nf_conn *ct; 46 46 enum ip_conntrack_info ctinfo; 47 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 47 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 48 48 49 49 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING || 50 50 par->hooknum == NF_INET_LOCAL_IN); ··· 56 56 ctinfo == IP_CT_RELATED_REPLY)); 57 57 NF_CT_ASSERT(par->out != NULL); 58 58 59 - return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC); 59 + return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC); 60 60 } 61 61 62 62 static unsigned int ··· 64 64 { 65 65 struct nf_conn *ct; 66 66 enum ip_conntrack_info ctinfo; 67 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 67 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 68 68 69 69 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || 70 70 par->hooknum == NF_INET_LOCAL_OUT); ··· 74 74 /* Connection must be valid and new. */ 75 75 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); 76 76 77 - return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST); 77 + return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST); 78 78 } 79 79 80 80 static int ipt_snat_checkentry(const struct xt_tgchk_param *par) 81 81 { 82 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 82 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 83 83 84 84 /* Must be a valid range */ 85 85 if (mr->rangesize != 1) { ··· 91 91 92 92 static int ipt_dnat_checkentry(const struct xt_tgchk_param *par) 93 93 { 94 - const struct nf_nat_multi_range_compat *mr = par->targinfo; 94 + const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 95 95 96 96 /* Must be a valid range */ 97 97 if (mr->rangesize != 1) { ··· 105 105 alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 106 106 { 107 107 /* Force range to this IP; let proto decide mapping for 108 - per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 108 + per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED). 109 109 */ 110 - struct nf_nat_range range; 110 + struct nf_nat_ipv4_range range; 111 111 112 112 range.flags = 0; 113 113 pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, 114 - HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC ? 114 + HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? 115 115 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip : 116 116 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); 117 117 ··· 140 140 static struct xt_target ipt_snat_reg __read_mostly = { 141 141 .name = "SNAT", 142 142 .target = ipt_snat_target, 143 - .targetsize = sizeof(struct nf_nat_multi_range_compat), 143 + .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), 144 144 .table = "nat", 145 145 .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), 146 146 .checkentry = ipt_snat_checkentry, ··· 150 150 static struct xt_target ipt_dnat_reg __read_mostly = { 151 151 .name = "DNAT", 152 152 .target = ipt_dnat_target, 153 - .targetsize = sizeof(struct nf_nat_multi_range_compat), 153 + .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), 154 154 .table = "nat", 155 155 .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), 156 156 .checkentry = ipt_dnat_checkentry,
+5 -5
net/ipv4/netfilter/nf_nat_sip.c
··· 249 249 static void ip_nat_sip_expected(struct nf_conn *ct, 250 250 struct nf_conntrack_expect *exp) 251 251 { 252 - struct nf_nat_range range; 252 + struct nf_nat_ipv4_range range; 253 253 254 254 /* This must be a fresh one. */ 255 255 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 256 256 257 257 /* For DST manip, map port here to where it's expected. */ 258 - range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); 258 + range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 259 259 range.min = range.max = exp->saved_proto; 260 260 range.min_ip = range.max_ip = exp->saved_ip; 261 - nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); 261 + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 262 262 263 263 /* Change src to where master sends to, but only if the connection 264 264 * actually came from the same source. */ 265 265 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 266 266 ct->master->tuplehash[exp->dir].tuple.src.u3.ip) { 267 - range.flags = IP_NAT_RANGE_MAP_IPS; 267 + range.flags = NF_NAT_RANGE_MAP_IPS; 268 268 range.min_ip = range.max_ip 269 269 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; 270 - nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); 270 + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); 271 271 } 272 272 } 273 273
+1 -1
net/ipv4/netfilter/nf_nat_standalone.c
··· 137 137 return ret; 138 138 } else 139 139 pr_debug("Already setup manip %s for ct %p\n", 140 - maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", 140 + maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 141 141 ct); 142 142 break; 143 143
+10
net/ipv6/netfilter/Kconfig
··· 125 125 126 126 To compile it as a module, choose M here. If unsure, say N. 127 127 128 + config IP6_NF_MATCH_RPFILTER 129 + tristate '"rpfilter" reverse path filter match support' 130 + depends on NETFILTER_ADVANCED 131 + ---help--- 132 + This option allows you to match packets whose replies would 133 + go out via the interface the packet came in. 134 + 135 + To compile it as a module, choose M here. If unsure, say N. 136 + The module will be called ip6t_rpfilter. 137 + 128 138 config IP6_NF_MATCH_RT 129 139 tristate '"rt" Routing header match support' 130 140 depends on NETFILTER_ADVANCED
+1
net/ipv6/netfilter/Makefile
··· 27 27 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o 28 28 obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o 29 29 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o 30 + obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o 30 31 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o 31 32 32 33 # targets
+133
net/ipv6/netfilter/ip6t_rpfilter.c
··· 1 + /* 2 + * Copyright (c) 2011 Florian Westphal <fw@strlen.de> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 + #include <linux/module.h> 10 + #include <linux/skbuff.h> 11 + #include <linux/netdevice.h> 12 + #include <linux/route.h> 13 + #include <net/ip6_fib.h> 14 + #include <net/ip6_route.h> 15 + 16 + #include <linux/netfilter/xt_rpfilter.h> 17 + #include <linux/netfilter/x_tables.h> 18 + 19 + MODULE_LICENSE("GPL"); 20 + MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); 21 + MODULE_DESCRIPTION("Xtables: IPv6 reverse path filter match"); 22 + 23 + static bool rpfilter_addr_unicast(const struct in6_addr *addr) 24 + { 25 + int addr_type = ipv6_addr_type(addr); 26 + return addr_type & IPV6_ADDR_UNICAST; 27 + } 28 + 29 + static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, 30 + const struct net_device *dev, u8 flags) 31 + { 32 + struct rt6_info *rt; 33 + struct ipv6hdr *iph = ipv6_hdr(skb); 34 + bool ret = false; 35 + struct flowi6 fl6 = { 36 + .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, 37 + .flowi6_proto = iph->nexthdr, 38 + .daddr = iph->saddr, 39 + }; 40 + int lookup_flags; 41 + 42 + if (rpfilter_addr_unicast(&iph->daddr)) { 43 + memcpy(&fl6.saddr, &iph->daddr, sizeof(struct in6_addr)); 44 + lookup_flags = RT6_LOOKUP_F_HAS_SADDR; 45 + } else { 46 + lookup_flags = 0; 47 + } 48 + 49 + fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 50 + if ((flags & XT_RPFILTER_LOOSE) == 0) { 51 + fl6.flowi6_oif = dev->ifindex; 52 + lookup_flags |= RT6_LOOKUP_F_IFACE; 53 + } 54 + 55 + rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags); 56 + if (rt->dst.error) 57 + goto out; 58 + 59 + if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST)) 60 + goto out; 61 + 62 + if (rt->rt6i_flags & RTF_LOCAL) { 63 + ret = flags & XT_RPFILTER_ACCEPT_LOCAL; 64 + goto out; 65 + } 66 + 67 + if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) 68 + ret = true; 69 + out: 70 + dst_release(&rt->dst); 71 + return ret; 72 + } 73 + 74 + static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 75 + { 76 + const struct xt_rpfilter_info *info = par->matchinfo; 77 + int saddrtype; 78 + struct ipv6hdr *iph; 79 + bool invert = info->flags & XT_RPFILTER_INVERT; 80 + 81 + if (par->in->flags & IFF_LOOPBACK) 82 + return true ^ invert; 83 + 84 + iph = ipv6_hdr(skb); 85 + saddrtype = ipv6_addr_type(&iph->saddr); 86 + if (unlikely(saddrtype == IPV6_ADDR_ANY)) 87 + return true ^ invert; /* not routable: forward path will drop it */ 88 + 89 + return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert; 90 + } 91 + 92 + static int rpfilter_check(const struct xt_mtchk_param *par) 93 + { 94 + const struct xt_rpfilter_info *info = par->matchinfo; 95 + unsigned int options = ~XT_RPFILTER_OPTION_MASK; 96 + 97 + if (info->flags & options) { 98 + pr_info("unknown options encountered"); 99 + return -EINVAL; 100 + } 101 + 102 + if (strcmp(par->table, "mangle") != 0 && 103 + strcmp(par->table, "raw") != 0) { 104 + pr_info("match only valid in the \'raw\' " 105 + "or \'mangle\' tables, not \'%s\'.\n", par->table); 106 + return -EINVAL; 107 + } 108 + 109 + return 0; 110 + } 111 + 112 + static struct xt_match rpfilter_mt_reg __read_mostly = { 113 + .name = "rpfilter", 114 + .family = NFPROTO_IPV6, 115 + .checkentry = rpfilter_check, 116 + .match = rpfilter_mt, 117 + .matchsize = sizeof(struct xt_rpfilter_info), 118 + .hooks = (1 << NF_INET_PRE_ROUTING), 119 + .me = THIS_MODULE 120 + }; 121 + 122 + static int __init rpfilter_mt_init(void) 123 + { 124 + return xt_register_match(&rpfilter_mt_reg); 125 + } 126 + 127 + static void __exit rpfilter_mt_exit(void) 128 + { 129 + xt_unregister_match(&rpfilter_mt_reg); 130 + } 131 + 132 + module_init(rpfilter_mt_init); 133 + module_exit(rpfilter_mt_exit);
+7
net/ipv6/route.c
··· 658 658 659 659 } 660 660 661 + struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6, 662 + int flags) 663 + { 664 + return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); 665 + } 666 + EXPORT_SYMBOL_GPL(ip6_route_lookup); 667 + 661 668 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, 662 669 const struct in6_addr *saddr, int oif, int strict) 663 670 {
+18
net/netfilter/Kconfig
··· 4 4 config NETFILTER_NETLINK 5 5 tristate 6 6 7 + config NETFILTER_NETLINK_ACCT 8 + tristate "Netfilter NFACCT over NFNETLINK interface" 9 + depends on NETFILTER_ADVANCED 10 + select NETFILTER_NETLINK 11 + help 12 + If this option is enabled, the kernel will include support 13 + for extended accounting via NFNETLINK. 14 + 7 15 config NETFILTER_NETLINK_QUEUE 8 16 tristate "Netfilter NFQUEUE over NFNETLINK interface" 9 17 depends on NETFILTER_ADVANCED ··· 884 876 Multiport matching allows you to match TCP or UDP packets based on 885 877 a series of source or destination ports: normally a rule can only 886 878 match a single range of ports. 879 + 880 + To compile it as a module, choose M here. If unsure, say N. 881 + 882 + config NETFILTER_XT_MATCH_NFACCT 883 + tristate '"nfacct" match support' 884 + default m if NETFILTER_ADVANCED=n 885 + select NETFILTER_NETLINK_ACCT 886 + help 887 + This option allows you to use the extended accounting through 888 + nfnetlink_acct. 887 889 888 890 To compile it as a module, choose M here. If unsure, say N. 889 891
+2
net/netfilter/Makefile
··· 7 7 obj-$(CONFIG_NETFILTER) = netfilter.o 8 8 9 9 obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o 10 + obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o 10 11 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o 11 12 obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o 12 13 ··· 91 90 obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o 92 91 obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o 93 92 obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o 93 + obj-$(CONFIG_NETFILTER_XT_MATCH_NFACCT) += xt_nfacct.o 94 94 obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o 95 95 obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o 96 96 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
+15
net/netfilter/ipvs/Kconfig
··· 232 232 If you want to compile it in kernel, say Y. To compile it as a 233 233 module, choose M here. If unsure, say N. 234 234 235 + comment 'IPVS SH scheduler' 236 + 237 + config IP_VS_SH_TAB_BITS 238 + int "IPVS source hashing table size (the Nth power of 2)" 239 + range 4 20 240 + default 8 241 + ---help--- 242 + The source hashing scheduler maps source IPs to destinations 243 + stored in a hash table. This table is tiled by each destination 244 + until all slots in the table are filled. When using weights to 245 + allow destinations to receive more connections, the table is 246 + tiled an amount proportional to the weights specified. The table 247 + needs to be large enough to effectively fit all the destinations 248 + multiplied by their respective weights. 249 + 235 250 comment 'IPVS application helper' 236 251 237 252 config IP_VS_FTP
+17 -1
net/netfilter/ipvs/ip_vs_sh.c
··· 30 30 * server is dead or overloaded, the load balancer can bypass the cache 31 31 * server and send requests to the original server directly. 32 32 * 33 + * The weight destination attribute can be used to control the 34 + * distribution of connections to the destinations in servernode. The 35 + * greater the weight, the more connections the destination 36 + * will receive. 37 + * 33 38 */ 34 39 35 40 #define KMSG_COMPONENT "IPVS" ··· 104 99 struct ip_vs_sh_bucket *b; 105 100 struct list_head *p; 106 101 struct ip_vs_dest *dest; 102 + int d_count; 107 103 108 104 b = tbl; 109 105 p = &svc->destinations; 106 + d_count = 0; 110 107 for (i=0; i<IP_VS_SH_TAB_SIZE; i++) { 111 108 if (list_empty(p)) { 112 109 b->dest = NULL; ··· 120 113 atomic_inc(&dest->refcnt); 121 114 b->dest = dest; 122 115 123 - p = p->next; 116 + IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n", 117 + i, IP_VS_DBG_ADDR(svc->af, &dest->addr), 118 + atomic_read(&dest->weight)); 119 + 120 + /* Don't move to next dest until filling weight */ 121 + if (++d_count >= atomic_read(&dest->weight)) { 122 + p = p->next; 123 + d_count = 0; 124 + } 125 + 124 126 } 125 127 b++; 126 128 }
+2 -2
net/netfilter/nf_conntrack_acct.c
··· 46 46 return 0; 47 47 48 48 return seq_printf(s, "packets=%llu bytes=%llu ", 49 - (unsigned long long)acct[dir].packets, 50 - (unsigned long long)acct[dir].bytes); 49 + (unsigned long long)atomic64_read(&acct[dir].packets), 50 + (unsigned long long)atomic64_read(&acct[dir].bytes)); 51 51 }; 52 52 EXPORT_SYMBOL_GPL(seq_print_acct); 53 53
+6 -9
net/netfilter/nf_conntrack_core.c
··· 67 67 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); 68 68 69 69 unsigned int nf_conntrack_hash_rnd __read_mostly; 70 + EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); 70 71 71 72 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) 72 73 { ··· 1045 1044 1046 1045 acct = nf_conn_acct_find(ct); 1047 1046 if (acct) { 1048 - spin_lock_bh(&ct->lock); 1049 - acct[CTINFO2DIR(ctinfo)].packets++; 1050 - acct[CTINFO2DIR(ctinfo)].bytes += skb->len; 1051 - spin_unlock_bh(&ct->lock); 1047 + atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); 1048 + atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes); 1052 1049 } 1053 1050 } 1054 1051 } ··· 1062 1063 1063 1064 acct = nf_conn_acct_find(ct); 1064 1065 if (acct) { 1065 - spin_lock_bh(&ct->lock); 1066 - acct[CTINFO2DIR(ctinfo)].packets++; 1067 - acct[CTINFO2DIR(ctinfo)].bytes += 1068 - skb->len - skb_network_offset(skb); 1069 - spin_unlock_bh(&ct->lock); 1066 + atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); 1067 + atomic64_add(skb->len - skb_network_offset(skb), 1068 + &acct[CTINFO2DIR(ctinfo)].bytes); 1070 1069 } 1071 1070 } 1072 1071
+21 -42
net/netfilter/nf_conntrack_expect.c
··· 38 38 39 39 static struct kmem_cache *nf_ct_expect_cachep __read_mostly; 40 40 41 - static HLIST_HEAD(nf_ct_userspace_expect_list); 42 - 43 41 /* nf_conntrack_expect helper functions */ 44 42 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, 45 43 u32 pid, int report) ··· 45 47 struct nf_conn_help *master_help = nfct_help(exp->master); 46 48 struct net *net = nf_ct_exp_net(exp); 47 49 50 + NF_CT_ASSERT(master_help); 48 51 NF_CT_ASSERT(!timer_pending(&exp->timeout)); 49 52 50 53 hlist_del_rcu(&exp->hnode); 51 54 net->ct.expect_count--; 52 55 53 56 hlist_del(&exp->lnode); 54 - if (!(exp->flags & NF_CT_EXPECT_USERSPACE)) 55 - master_help->expecting[exp->class]--; 57 + master_help->expecting[exp->class]--; 56 58 57 59 nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report); 58 60 nf_ct_expect_put(exp); ··· 312 314 } 313 315 EXPORT_SYMBOL_GPL(nf_ct_expect_put); 314 316 315 - static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) 317 + static int nf_ct_expect_insert(struct nf_conntrack_expect *exp) 316 318 { 317 319 struct nf_conn_help *master_help = nfct_help(exp->master); 320 + struct nf_conntrack_helper *helper; 318 321 struct net *net = nf_ct_exp_net(exp); 319 - const struct nf_conntrack_expect_policy *p; 320 322 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); 321 323 322 324 /* two references : one for hash insert, one for the timer */ 323 325 atomic_add(2, &exp->use); 324 326 325 - if (master_help) { 326 - hlist_add_head(&exp->lnode, &master_help->expectations); 327 - master_help->expecting[exp->class]++; 328 - } else if (exp->flags & NF_CT_EXPECT_USERSPACE) 329 - hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list); 327 + hlist_add_head(&exp->lnode, &master_help->expectations); 328 + master_help->expecting[exp->class]++; 330 329 331 330 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]); 332 331 net->ct.expect_count++; 333 332 334 333 setup_timer(&exp->timeout, nf_ct_expectation_timed_out, 335 334 (unsigned long)exp); 336 - if (master_help) { 337 - p = &rcu_dereference_protected( 338 - master_help->helper, 339 - lockdep_is_held(&nf_conntrack_lock) 340 - )->expect_policy[exp->class]; 341 - exp->timeout.expires = jiffies + p->timeout * HZ; 335 + helper = rcu_dereference_protected(master_help->helper, 336 + lockdep_is_held(&nf_conntrack_lock)); 337 + if (helper) { 338 + exp->timeout.expires = jiffies + 339 + helper->expect_policy[exp->class].timeout * HZ; 342 340 } 343 341 add_timer(&exp->timeout); 344 342 345 343 NF_CT_STAT_INC(net, expect_create); 344 + return 0; 346 345 } 347 346 348 347 /* Race with expectations being used means we could have none to find; OK. */ ··· 384 389 struct nf_conntrack_expect *i; 385 390 struct nf_conn *master = expect->master; 386 391 struct nf_conn_help *master_help = nfct_help(master); 392 + struct nf_conntrack_helper *helper; 387 393 struct net *net = nf_ct_exp_net(expect); 388 394 struct hlist_node *n; 389 395 unsigned int h; 390 396 int ret = 1; 391 397 392 - /* Don't allow expectations created from kernel-space with no helper */ 393 - if (!(expect->flags & NF_CT_EXPECT_USERSPACE) && 394 - (!master_help || (master_help && !master_help->helper))) { 398 + if (!master_help) { 395 399 ret = -ESHUTDOWN; 396 400 goto out; 397 401 } ··· 408 414 } 409 415 } 410 416 /* Will be over limit? */ 411 - if (master_help) { 412 - p = &rcu_dereference_protected( 413 - master_help->helper, 414 - lockdep_is_held(&nf_conntrack_lock) 415 - )->expect_policy[expect->class]; 417 + helper = rcu_dereference_protected(master_help->helper, 418 + lockdep_is_held(&nf_conntrack_lock)); 419 + if (helper) { 420 + p = &helper->expect_policy[expect->class]; 416 421 if (p->max_expected && 417 422 master_help->expecting[expect->class] >= p->max_expected) { 418 423 evict_oldest_expect(master, expect); ··· 443 450 if (ret <= 0) 444 451 goto out; 445 452 446 - ret = 0; 447 - nf_ct_expect_insert(expect); 453 + ret = nf_ct_expect_insert(expect); 454 + if (ret < 0) 455 + goto out; 448 456 spin_unlock_bh(&nf_conntrack_lock); 449 457 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report); 450 458 return ret; ··· 454 460 return ret; 455 461 } 456 462 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report); 457 - 458 - void nf_ct_remove_userspace_expectations(void) 459 - { 460 - struct nf_conntrack_expect *exp; 461 - struct hlist_node *n, *next; 462 - 463 - hlist_for_each_entry_safe(exp, n, next, 464 - &nf_ct_userspace_expect_list, lnode) { 465 - if (del_timer(&exp->timeout)) { 466 - nf_ct_unlink_expect(exp); 467 - nf_ct_expect_put(exp); 468 - } 469 - } 470 - } 471 - EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations); 472 463 473 464 #ifdef CONFIG_PROC_FS 474 465 struct ct_expect_iter_state {
+12
net/netfilter/nf_conntrack_helper.c
··· 121 121 int ret = 0; 122 122 123 123 if (tmpl != NULL) { 124 + /* we've got a userspace helper. */ 125 + if (tmpl->status & IPS_USERSPACE_HELPER) { 126 + help = nf_ct_helper_ext_add(ct, flags); 127 + if (help == NULL) { 128 + ret = -ENOMEM; 129 + goto out; 130 + } 131 + rcu_assign_pointer(help->helper, NULL); 132 + __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status); 133 + ret = 0; 134 + goto out; 135 + } 124 136 help = nfct_help(tmpl); 125 137 if (help != NULL) 126 138 helper = help->helper;
+48 -34
net/netfilter/nf_conntrack_netlink.c
··· 203 203 } 204 204 205 205 static int 206 - ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, 207 - enum ip_conntrack_dir dir) 206 + dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes, 207 + enum ip_conntrack_dir dir) 208 208 { 209 209 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; 210 210 struct nlattr *nest_count; 211 - const struct nf_conn_counter *acct; 212 - 213 - acct = nf_conn_acct_find(ct); 214 - if (!acct) 215 - return 0; 216 211 217 212 nest_count = nla_nest_start(skb, type | NLA_F_NESTED); 218 213 if (!nest_count) 219 214 goto nla_put_failure; 220 215 221 - NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, 222 - cpu_to_be64(acct[dir].packets)); 223 - NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, 224 - cpu_to_be64(acct[dir].bytes)); 216 + NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)); 217 + NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)); 225 218 226 219 nla_nest_end(skb, nest_count); 227 220 ··· 222 229 223 230 nla_put_failure: 224 231 return -1; 232 + } 233 + 234 + static int 235 + ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, 236 + enum ip_conntrack_dir dir, int type) 237 + { 238 + struct nf_conn_counter *acct; 239 + u64 pkts, bytes; 240 + 241 + acct = nf_conn_acct_find(ct); 242 + if (!acct) 243 + return 0; 244 + 245 + if (type == IPCTNL_MSG_CT_GET_CTRZERO) { 246 + pkts = atomic64_xchg(&acct[dir].packets, 0); 247 + bytes = atomic64_xchg(&acct[dir].bytes, 0); 248 + } else { 249 + pkts = atomic64_read(&acct[dir].packets); 250 + bytes = atomic64_read(&acct[dir].bytes); 251 + } 252 + return dump_counters(skb, pkts, bytes, dir); 225 253 } 226 254 227 255 static int ··· 407 393 } 408 394 409 395 static int 410 - ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 411 - int event, struct nf_conn *ct) 396 + ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, 397 + struct nf_conn *ct) 412 398 { 413 399 struct nlmsghdr *nlh; 414 400 struct nfgenmsg *nfmsg; 415 401 struct nlattr *nest_parms; 416 - unsigned int flags = pid ? NLM_F_MULTI : 0; 402 + unsigned int flags = pid ? NLM_F_MULTI : 0, event; 417 403 418 - event |= NFNL_SUBSYS_CTNETLINK << 8; 404 + event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW); 419 405 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 420 406 if (nlh == NULL) 421 407 goto nlmsg_failure; ··· 444 430 445 431 if (ctnetlink_dump_status(skb, ct) < 0 || 446 432 ctnetlink_dump_timeout(skb, ct) < 0 || 447 - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 448 - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || 433 + ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 || 434 + ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 || 449 435 ctnetlink_dump_timestamp(skb, ct) < 0 || 450 436 ctnetlink_dump_protoinfo(skb, ct) < 0 || 451 437 ctnetlink_dump_helpinfo(skb, ct) < 0 || ··· 626 612 goto nla_put_failure; 627 613 628 614 if (events & (1 << IPCT_DESTROY)) { 629 - if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 630 - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || 615 + if (ctnetlink_dump_counters(skb, ct, 616 + IP_CT_DIR_ORIGINAL, type) < 0 || 617 + ctnetlink_dump_counters(skb, ct, 618 + IP_CT_DIR_REPLY, type) < 0 || 631 619 ctnetlink_dump_timestamp(skb, ct) < 0) 632 620 goto nla_put_failure; 633 621 } else { ··· 725 709 } 726 710 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 727 711 cb->nlh->nlmsg_seq, 728 - IPCTNL_MSG_CT_NEW, ct) < 0) { 712 + NFNL_MSG_TYPE( 713 + cb->nlh->nlmsg_type), 714 + ct) < 0) { 729 715 nf_conntrack_get(&ct->ct_general); 730 716 cb->args[1] = (unsigned long)ct; 731 717 goto out; 732 - } 733 - 734 - if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == 735 - IPCTNL_MSG_CT_GET_CTRZERO) { 736 - struct nf_conn_counter *acct; 737 - 738 - acct = nf_conn_acct_find(ct); 739 - if (acct) 740 - memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 741 718 } 742 719 } 743 720 if (cb->args[1]) { ··· 1010 1001 1011 1002 rcu_read_lock(); 1012 1003 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 1013 - IPCTNL_MSG_CT_NEW, ct); 1004 + NFNL_MSG_TYPE(nlh->nlmsg_type), ct); 1014 1005 rcu_read_unlock(); 1015 1006 nf_ct_put(ct); 1016 1007 if (err <= 0) ··· 1096 1087 1097 1088 if (cda[CTA_NAT_DST]) { 1098 1089 ret = ctnetlink_parse_nat_setup(ct, 1099 - IP_NAT_MANIP_DST, 1090 + NF_NAT_MANIP_DST, 1100 1091 cda[CTA_NAT_DST]); 1101 1092 if (ret < 0) 1102 1093 return ret; 1103 1094 } 1104 1095 if (cda[CTA_NAT_SRC]) { 1105 1096 ret = ctnetlink_parse_nat_setup(ct, 1106 - IP_NAT_MANIP_SRC, 1097 + NF_NAT_MANIP_SRC, 1107 1098 cda[CTA_NAT_SRC]); 1108 1099 if (ret < 0) 1109 1100 return ret; ··· 1856 1847 if (err < 0) 1857 1848 return err; 1858 1849 1859 - if (cda[CTA_EXPECT_MASTER]) 1850 + if (cda[CTA_EXPECT_TUPLE]) 1851 + err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1852 + else if (cda[CTA_EXPECT_MASTER]) 1860 1853 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); 1861 1854 else 1862 1855 return -EINVAL; ··· 2034 2023 } 2035 2024 help = nfct_help(ct); 2036 2025 if (!help) { 2026 + err = -EOPNOTSUPP; 2027 + goto out; 2028 + } 2029 + if (test_bit(IPS_USERSPACE_HELPER_BIT, &ct->status)) { 2037 2030 if (!cda[CTA_EXPECT_TIMEOUT]) { 2038 2031 err = -EINVAL; 2039 2032 goto out; ··· 2262 2247 { 2263 2248 pr_info("ctnetlink: unregistering from nfnetlink.\n"); 2264 2249 2265 - nf_ct_remove_userspace_expectations(); 2266 2250 unregister_pernet_subsys(&ctnetlink_net_ops); 2267 2251 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 2268 2252 nfnetlink_subsys_unregister(&ctnl_subsys);
+5 -3
net/netfilter/xt_CT.c
··· 62 62 int ret = 0; 63 63 u8 proto; 64 64 65 - if (info->flags & ~XT_CT_NOTRACK) 66 - return -EINVAL; 65 + if (info->flags & ~(XT_CT_NOTRACK | XT_CT_USERSPACE_HELPER)) 66 + return -EOPNOTSUPP; 67 67 68 68 if (info->flags & XT_CT_NOTRACK) { 69 69 ct = nf_ct_untracked_get(); ··· 92 92 GFP_KERNEL)) 93 93 goto err3; 94 94 95 - if (info->helper[0]) { 95 + if (info->flags & XT_CT_USERSPACE_HELPER) { 96 + __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status); 97 + } else if (info->helper[0]) { 96 98 ret = -ENOENT; 97 99 proto = xt_ct_find_proto(par); 98 100 if (!proto) {
+16 -16
net/netfilter/xt_connbytes.c
··· 40 40 case XT_CONNBYTES_PKTS: 41 41 switch (sinfo->direction) { 42 42 case XT_CONNBYTES_DIR_ORIGINAL: 43 - what = counters[IP_CT_DIR_ORIGINAL].packets; 43 + what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); 44 44 break; 45 45 case XT_CONNBYTES_DIR_REPLY: 46 - what = counters[IP_CT_DIR_REPLY].packets; 46 + what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); 47 47 break; 48 48 case XT_CONNBYTES_DIR_BOTH: 49 - what = counters[IP_CT_DIR_ORIGINAL].packets; 50 - what += counters[IP_CT_DIR_REPLY].packets; 49 + what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); 50 + what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets); 51 51 break; 52 52 } 53 53 break; 54 54 case XT_CONNBYTES_BYTES: 55 55 switch (sinfo->direction) { 56 56 case XT_CONNBYTES_DIR_ORIGINAL: 57 - what = counters[IP_CT_DIR_ORIGINAL].bytes; 57 + what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); 58 58 break; 59 59 case XT_CONNBYTES_DIR_REPLY: 60 - what = counters[IP_CT_DIR_REPLY].bytes; 60 + what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); 61 61 break; 62 62 case XT_CONNBYTES_DIR_BOTH: 63 - what = counters[IP_CT_DIR_ORIGINAL].bytes; 64 - what += counters[IP_CT_DIR_REPLY].bytes; 63 + what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); 64 + what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); 65 65 break; 66 66 } 67 67 break; 68 68 case XT_CONNBYTES_AVGPKT: 69 69 switch (sinfo->direction) { 70 70 case XT_CONNBYTES_DIR_ORIGINAL: 71 - bytes = counters[IP_CT_DIR_ORIGINAL].bytes; 72 - pkts = counters[IP_CT_DIR_ORIGINAL].packets; 71 + bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); 72 + pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); 73 73 break; 74 74 case XT_CONNBYTES_DIR_REPLY: 75 - bytes = counters[IP_CT_DIR_REPLY].bytes; 76 - pkts = counters[IP_CT_DIR_REPLY].packets; 75 + bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); 76 + pkts = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); 77 77 break; 78 78 case XT_CONNBYTES_DIR_BOTH: 79 - bytes = counters[IP_CT_DIR_ORIGINAL].bytes + 80 - counters[IP_CT_DIR_REPLY].bytes; 81 - pkts = counters[IP_CT_DIR_ORIGINAL].packets + 82 - counters[IP_CT_DIR_REPLY].packets; 79 + bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes) + 80 + atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); 81 + pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) + 82 + atomic64_read(&counters[IP_CT_DIR_REPLY].packets); 83 83 break; 84 84 } 85 85 if (pkts != 0)
+76
net/netfilter/xt_nfacct.c
··· 1 + /* 2 + * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org> 3 + * (C) 2011 Intra2net AG <http://www.intra2net.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 (or any 7 + * later at your option) as published by the Free Software Foundation. 8 + */ 9 + #include <linux/module.h> 10 + #include <linux/skbuff.h> 11 + 12 + #include <linux/netfilter/x_tables.h> 13 + #include <linux/netfilter/nfnetlink_acct.h> 14 + #include <linux/netfilter/xt_nfacct.h> 15 + 16 + MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 17 + MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure"); 18 + MODULE_LICENSE("GPL"); 19 + MODULE_ALIAS("ipt_nfacct"); 20 + MODULE_ALIAS("ip6t_nfacct"); 21 + 22 + static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par) 23 + { 24 + const struct xt_nfacct_match_info *info = par->targinfo; 25 + 26 + nfnl_acct_update(skb, info->nfacct); 27 + 28 + return true; 29 + } 30 + 31 + static int 32 + nfacct_mt_checkentry(const struct xt_mtchk_param *par) 33 + { 34 + struct xt_nfacct_match_info *info = par->matchinfo; 35 + struct nf_acct *nfacct; 36 + 37 + nfacct = nfnl_acct_find_get(info->name); 38 + if (nfacct == NULL) { 39 + pr_info("xt_nfacct: accounting object with name `%s' " 40 + "does not exists\n", info->name); 41 + return -ENOENT; 42 + } 43 + info->nfacct = nfacct; 44 + return 0; 45 + } 46 + 47 + static void 48 + nfacct_mt_destroy(const struct xt_mtdtor_param *par) 49 + { 50 + const struct xt_nfacct_match_info *info = par->matchinfo; 51 + 52 + nfnl_acct_put(info->nfacct); 53 + } 54 + 55 + static struct xt_match nfacct_mt_reg __read_mostly = { 56 + .name = "nfacct", 57 + .family = NFPROTO_UNSPEC, 58 + .checkentry = nfacct_mt_checkentry, 59 + .match = nfacct_mt, 60 + .destroy = nfacct_mt_destroy, 61 + .matchsize = sizeof(struct xt_nfacct_match_info), 62 + .me = THIS_MODULE, 63 + }; 64 + 65 + static int __init nfacct_mt_init(void) 66 + { 67 + return xt_register_match(&nfacct_mt_reg); 68 + } 69 + 70 + static void __exit nfacct_mt_exit(void) 71 + { 72 + xt_unregister_match(&nfacct_mt_reg); 73 + } 74 + 75 + module_init(nfacct_mt_init); 76 + module_exit(nfacct_mt_exit);