Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: add NAT support for shifted portmap ranges

This is a patch proposal to support shifted ranges in portmaps. (i.e. tcp/udp
incoming port 5000-5100 on WAN redirected to LAN 192.168.1.5:2000-2100)

Currently DNAT only works for single port or identical port ranges. (i.e.
ports 5000-5100 on WAN interface redirected to a LAN host while original
destination port is not altered) When different port ranges are configured,
either 'random' mode should be used, or else all incoming connections are
mapped onto the first port in the redirect range. (in described example
WAN:5000-5100 will all be mapped to 192.168.1.5:2000)

This patch introduces a new mode indicated by flag NF_NAT_RANGE_PROTO_OFFSET
which uses a base port value to calculate an offset with the destination port
present in the incoming stream. That offset is then applied as index within the
redirect port range (index modulo rangewidth to handle range overflow).

In described example the base port would be 5000. An incoming stream with
destination port 5004 would result in an offset value 4 which means that the
NAT'ed stream will be using destination port 2004.

Other possibilities include deterministic mapping of larger or multiple ranges
to a smaller range : WAN:5000-5999 -> LAN:5000-5099 (maps WAN port 5*xx to port
51xx)

This patch does not change any current behavior. It just adds new NAT proto
range functionality which must be selected via the specific flag when intended
to use.

A patch for iptables (libipt_DNAT.c + libip6t_DNAT.c) will also be proposed
which makes this functionality immediately available.

Signed-off-by: Thierry Du Tre <thierry@dtsystems.be>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

authored by

Thierry Du Tre and committed by
Pablo Neira Ayuso
2eb0f624 71cc0873

+145 -71
+1 -1
include/net/netfilter/ipv4/nf_nat_masquerade.h
··· 6 6 7 7 unsigned int 8 8 nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, 9 - const struct nf_nat_range *range, 9 + const struct nf_nat_range2 *range, 10 10 const struct net_device *out); 11 11 12 12 void nf_nat_masquerade_ipv4_register_notifier(void);
+1 -1
include/net/netfilter/ipv6/nf_nat_masquerade.h
··· 3 3 #define _NF_NAT_MASQUERADE_IPV6_H_ 4 4 5 5 unsigned int 6 - nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 6 + nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 7 7 const struct net_device *out); 8 8 void nf_nat_masquerade_ipv6_register_notifier(void); 9 9 void nf_nat_masquerade_ipv6_unregister_notifier(void);
+1 -1
include/net/netfilter/nf_nat.h
··· 39 39 40 40 /* Set up the info structure to map into this range. */ 41 41 unsigned int nf_nat_setup_info(struct nf_conn *ct, 42 - const struct nf_nat_range *range, 42 + const struct nf_nat_range2 *range, 43 43 enum nf_nat_manip_type maniptype); 44 44 45 45 extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
+2 -2
include/net/netfilter/nf_nat_l3proto.h
··· 7 7 u8 l3proto; 8 8 9 9 bool (*in_range)(const struct nf_conntrack_tuple *t, 10 - const struct nf_nat_range *range); 10 + const struct nf_nat_range2 *range); 11 11 12 12 u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16); 13 13 ··· 33 33 struct flowi *fl); 34 34 35 35 int (*nlattr_to_range)(struct nlattr *tb[], 36 - struct nf_nat_range *range); 36 + struct nf_nat_range2 *range); 37 37 }; 38 38 39 39 int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+4 -4
include/net/netfilter/nf_nat_l4proto.h
··· 34 34 */ 35 35 void (*unique_tuple)(const struct nf_nat_l3proto *l3proto, 36 36 struct nf_conntrack_tuple *tuple, 37 - const struct nf_nat_range *range, 37 + const struct nf_nat_range2 *range, 38 38 enum nf_nat_manip_type maniptype, 39 39 const struct nf_conn *ct); 40 40 41 41 int (*nlattr_to_range)(struct nlattr *tb[], 42 - struct nf_nat_range *range); 42 + struct nf_nat_range2 *range); 43 43 }; 44 44 45 45 /* Protocol registration. */ ··· 72 72 73 73 void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, 74 74 struct nf_conntrack_tuple *tuple, 75 - const struct nf_nat_range *range, 75 + const struct nf_nat_range2 *range, 76 76 enum nf_nat_manip_type maniptype, 77 77 const struct nf_conn *ct, u16 *rover); 78 78 79 79 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], 80 - struct nf_nat_range *range); 80 + struct nf_nat_range2 *range); 81 81 82 82 #endif /*_NF_NAT_L4PROTO_H*/
+1 -1
include/net/netfilter/nf_nat_redirect.h
··· 7 7 const struct nf_nat_ipv4_multi_range_compat *mr, 8 8 unsigned int hooknum); 9 9 unsigned int 10 - nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 10 + nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 11 11 unsigned int hooknum); 12 12 13 13 #endif /* _NF_NAT_REDIRECT_H_ */
+11 -1
include/uapi/linux/netfilter/nf_nat.h
··· 10 10 #define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) 11 11 #define NF_NAT_RANGE_PERSISTENT (1 << 3) 12 12 #define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) 13 + #define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) 13 14 14 15 #define NF_NAT_RANGE_PROTO_RANDOM_ALL \ 15 16 (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) ··· 18 17 #define NF_NAT_RANGE_MASK \ 19 18 (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ 20 19 NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ 21 - NF_NAT_RANGE_PROTO_RANDOM_FULLY) 20 + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET) 22 21 23 22 struct nf_nat_ipv4_range { 24 23 unsigned int flags; ··· 39 38 union nf_inet_addr max_addr; 40 39 union nf_conntrack_man_proto min_proto; 41 40 union nf_conntrack_man_proto max_proto; 41 + }; 42 + 43 + struct nf_nat_range2 { 44 + unsigned int flags; 45 + union nf_inet_addr min_addr; 46 + union nf_inet_addr max_addr; 47 + union nf_conntrack_man_proto min_proto; 48 + union nf_conntrack_man_proto max_proto; 49 + union nf_conntrack_man_proto base_proto; 42 50 }; 43 51 44 52 #endif /* _NETFILTER_NF_NAT_H */
+1 -1
net/ipv4/netfilter/ipt_MASQUERADE.c
··· 47 47 static unsigned int 48 48 masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) 49 49 { 50 - struct nf_nat_range range; 50 + struct nf_nat_range2 range; 51 51 const struct nf_nat_ipv4_multi_range_compat *mr; 52 52 53 53 mr = par->targinfo;
+2 -2
net/ipv4/netfilter/nf_nat_h323.c
··· 395 395 static void ip_nat_q931_expect(struct nf_conn *new, 396 396 struct nf_conntrack_expect *this) 397 397 { 398 - struct nf_nat_range range; 398 + struct nf_nat_range2 range; 399 399 400 400 if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ 401 401 nf_nat_follow_master(new, this); ··· 497 497 static void ip_nat_callforwarding_expect(struct nf_conn *new, 498 498 struct nf_conntrack_expect *this) 499 499 { 500 - struct nf_nat_range range; 500 + struct nf_nat_range2 range; 501 501 502 502 /* This must be a fresh one. */ 503 503 BUG_ON(new->status & IPS_NAT_DONE_MASK);
+2 -2
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
··· 63 63 #endif /* CONFIG_XFRM */ 64 64 65 65 static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t, 66 - const struct nf_nat_range *range) 66 + const struct nf_nat_range2 *range) 67 67 { 68 68 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && 69 69 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); ··· 143 143 144 144 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 145 145 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[], 146 - struct nf_nat_range *range) 146 + struct nf_nat_range2 *range) 147 147 { 148 148 if (tb[CTA_NAT_V4_MINIP]) { 149 149 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
+2 -2
net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
··· 24 24 25 25 unsigned int 26 26 nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, 27 - const struct nf_nat_range *range, 27 + const struct nf_nat_range2 *range, 28 28 const struct net_device *out) 29 29 { 30 30 struct nf_conn *ct; 31 31 struct nf_conn_nat *nat; 32 32 enum ip_conntrack_info ctinfo; 33 - struct nf_nat_range newrange; 33 + struct nf_nat_range2 newrange; 34 34 const struct rtable *rt; 35 35 __be32 newsrc, nh; 36 36
+1 -1
net/ipv4/netfilter/nf_nat_pptp.c
··· 48 48 struct nf_conntrack_tuple t = {}; 49 49 const struct nf_ct_pptp_master *ct_pptp_info; 50 50 const struct nf_nat_pptp *nat_pptp_info; 51 - struct nf_nat_range range; 51 + struct nf_nat_range2 range; 52 52 struct nf_conn_nat *nat; 53 53 54 54 nat = nf_ct_nat_ext_add(ct);
+1 -1
net/ipv4/netfilter/nf_nat_proto_gre.c
··· 41 41 static void 42 42 gre_unique_tuple(const struct nf_nat_l3proto *l3proto, 43 43 struct nf_conntrack_tuple *tuple, 44 - const struct nf_nat_range *range, 44 + const struct nf_nat_range2 *range, 45 45 enum nf_nat_manip_type maniptype, 46 46 const struct nf_conn *ct) 47 47 {
+1 -1
net/ipv4/netfilter/nf_nat_proto_icmp.c
··· 30 30 static void 31 31 icmp_unique_tuple(const struct nf_nat_l3proto *l3proto, 32 32 struct nf_conntrack_tuple *tuple, 33 - const struct nf_nat_range *range, 33 + const struct nf_nat_range2 *range, 34 34 enum nf_nat_manip_type maniptype, 35 35 const struct nf_conn *ct) 36 36 {
+1 -1
net/ipv4/netfilter/nft_masq_ipv4.c
··· 21 21 const struct nft_pktinfo *pkt) 22 22 { 23 23 struct nft_masq *priv = nft_expr_priv(expr); 24 - struct nf_nat_range range; 24 + struct nf_nat_range2 range; 25 25 26 26 memset(&range, 0, sizeof(range)); 27 27 range.flags = priv->flags;
+1 -1
net/ipv6/netfilter/ip6t_MASQUERADE.c
··· 29 29 30 30 static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par) 31 31 { 32 - const struct nf_nat_range *range = par->targinfo; 32 + const struct nf_nat_range2 *range = par->targinfo; 33 33 34 34 if (range->flags & NF_NAT_RANGE_MAP_IPS) 35 35 return -EINVAL;
+2 -2
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
··· 62 62 #endif 63 63 64 64 static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t, 65 - const struct nf_nat_range *range) 65 + const struct nf_nat_range2 *range) 66 66 { 67 67 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 && 68 68 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0; ··· 151 151 152 152 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 153 153 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[], 154 - struct nf_nat_range *range) 154 + struct nf_nat_range2 *range) 155 155 { 156 156 if (tb[CTA_NAT_V6_MINIP]) { 157 157 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
+2 -2
net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
··· 26 26 static atomic_t v6_worker_count; 27 27 28 28 unsigned int 29 - nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 29 + nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 30 30 const struct net_device *out) 31 31 { 32 32 enum ip_conntrack_info ctinfo; 33 33 struct nf_conn_nat *nat; 34 34 struct in6_addr src; 35 35 struct nf_conn *ct; 36 - struct nf_nat_range newrange; 36 + struct nf_nat_range2 newrange; 37 37 38 38 ct = nf_ct_get(skb, &ctinfo); 39 39 WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+1 -1
net/ipv6/netfilter/nf_nat_proto_icmpv6.c
··· 32 32 static void 33 33 icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto, 34 34 struct nf_conntrack_tuple *tuple, 35 - const struct nf_nat_range *range, 35 + const struct nf_nat_range2 *range, 36 36 enum nf_nat_manip_type maniptype, 37 37 const struct nf_conn *ct) 38 38 {
+1 -1
net/ipv6/netfilter/nft_masq_ipv6.c
··· 22 22 const struct nft_pktinfo *pkt) 23 23 { 24 24 struct nft_masq *priv = nft_expr_priv(expr); 25 - struct nf_nat_range range; 25 + struct nf_nat_range2 range; 26 26 27 27 memset(&range, 0, sizeof(range)); 28 28 range.flags = priv->flags;
+1 -1
net/ipv6/netfilter/nft_redir_ipv6.c
··· 22 22 const struct nft_pktinfo *pkt) 23 23 { 24 24 struct nft_redir *priv = nft_expr_priv(expr); 25 - struct nf_nat_range range; 25 + struct nf_nat_range2 range; 26 26 27 27 memset(&range, 0, sizeof(range)); 28 28 if (priv->sreg_proto_min) {
+14 -13
net/netfilter/nf_nat_core.c
··· 157 157 static int in_range(const struct nf_nat_l3proto *l3proto, 158 158 const struct nf_nat_l4proto *l4proto, 159 159 const struct nf_conntrack_tuple *tuple, 160 - const struct nf_nat_range *range) 160 + const struct nf_nat_range2 *range) 161 161 { 162 162 /* If we are supposed to map IPs, then we must be in the 163 163 * range specified, otherwise let this drag us onto a new src IP. ··· 194 194 const struct nf_nat_l4proto *l4proto, 195 195 const struct nf_conntrack_tuple *tuple, 196 196 struct nf_conntrack_tuple *result, 197 - const struct nf_nat_range *range) 197 + const struct nf_nat_range2 *range) 198 198 { 199 199 unsigned int h = hash_by_src(net, tuple); 200 200 const struct nf_conn *ct; ··· 224 224 static void 225 225 find_best_ips_proto(const struct nf_conntrack_zone *zone, 226 226 struct nf_conntrack_tuple *tuple, 227 - const struct nf_nat_range *range, 227 + const struct nf_nat_range2 *range, 228 228 const struct nf_conn *ct, 229 229 enum nf_nat_manip_type maniptype) 230 230 { ··· 298 298 static void 299 299 get_unique_tuple(struct nf_conntrack_tuple *tuple, 300 300 const struct nf_conntrack_tuple *orig_tuple, 301 - const struct nf_nat_range *range, 301 + const struct nf_nat_range2 *range, 302 302 struct nf_conn *ct, 303 303 enum nf_nat_manip_type maniptype) 304 304 { ··· 349 349 /* Only bother mapping if it's not already in range and unique */ 350 350 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 351 351 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 352 - if (l4proto->in_range(tuple, maniptype, 353 - &range->min_proto, 354 - &range->max_proto) && 352 + if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) && 353 + l4proto->in_range(tuple, maniptype, 354 + &range->min_proto, 355 + &range->max_proto) && 355 356 (range->min_proto.all == range->max_proto.all || 356 357 !nf_nat_used_tuple(tuple, ct))) 357 358 goto out; ··· 361 360 } 362 361 } 363 362 364 - /* Last change: get protocol to try to obtain unique tuple. */ 363 + /* Last chance: get protocol to try to obtain unique tuple. */ 365 364 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); 366 365 out: 367 366 rcu_read_unlock(); ··· 382 381 383 382 unsigned int 384 383 nf_nat_setup_info(struct nf_conn *ct, 385 - const struct nf_nat_range *range, 384 + const struct nf_nat_range2 *range, 386 385 enum nf_nat_manip_type maniptype) 387 386 { 388 387 struct net *net = nf_ct_net(ct); ··· 460 459 (manip == NF_NAT_MANIP_SRC ? 461 460 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 462 461 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 463 - struct nf_nat_range range = { 462 + struct nf_nat_range2 range = { 464 463 .flags = NF_NAT_RANGE_MAP_IPS, 465 464 .min_addr = ip, 466 465 .max_addr = ip, ··· 703 702 704 703 static int nfnetlink_parse_nat_proto(struct nlattr *attr, 705 704 const struct nf_conn *ct, 706 - struct nf_nat_range *range) 705 + struct nf_nat_range2 *range) 707 706 { 708 707 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 709 708 const struct nf_nat_l4proto *l4proto; ··· 731 730 732 731 static int 733 732 nfnetlink_parse_nat(const struct nlattr *nat, 734 - const struct nf_conn *ct, struct nf_nat_range *range, 733 + const struct nf_conn *ct, struct nf_nat_range2 *range, 735 734 const struct nf_nat_l3proto *l3proto) 736 735 { 737 736 struct nlattr *tb[CTA_NAT_MAX+1]; ··· 759 758 enum nf_nat_manip_type manip, 760 759 const struct nlattr *attr) 761 760 { 762 - struct nf_nat_range range; 761 + struct nf_nat_range2 range; 763 762 const struct nf_nat_l3proto *l3proto; 764 763 int err; 765 764
+1 -1
net/netfilter/nf_nat_helper.c
··· 191 191 void nf_nat_follow_master(struct nf_conn *ct, 192 192 struct nf_conntrack_expect *exp) 193 193 { 194 - struct nf_nat_range range; 194 + struct nf_nat_range2 range; 195 195 196 196 /* This must be a fresh one. */ 197 197 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+6 -3
net/netfilter/nf_nat_proto_common.c
··· 36 36 37 37 void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, 38 38 struct nf_conntrack_tuple *tuple, 39 - const struct nf_nat_range *range, 39 + const struct nf_nat_range2 *range, 40 40 enum nf_nat_manip_type maniptype, 41 41 const struct nf_conn *ct, 42 42 u16 *rover) ··· 83 83 : tuple->src.u.all); 84 84 } else if (range->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY) { 85 85 off = prandom_u32(); 86 + } else if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) { 87 + off = (ntohs(*portptr) - ntohs(range->base_proto.all)); 86 88 } else { 87 89 off = *rover; 88 90 } ··· 93 91 *portptr = htons(min + off % range_size); 94 92 if (++i != range_size && nf_nat_used_tuple(tuple, ct)) 95 93 continue; 96 - if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) 94 + if (!(range->flags & (NF_NAT_RANGE_PROTO_RANDOM_ALL| 95 + NF_NAT_RANGE_PROTO_OFFSET))) 97 96 *rover = off; 98 97 return; 99 98 } ··· 103 100 104 101 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 105 102 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], 106 - struct nf_nat_range *range) 103 + struct nf_nat_range2 *range) 107 104 { 108 105 if (tb[CTA_PROTONAT_PORT_MIN]) { 109 106 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
+1 -1
net/netfilter/nf_nat_proto_dccp.c
··· 23 23 static void 24 24 dccp_unique_tuple(const struct nf_nat_l3proto *l3proto, 25 25 struct nf_conntrack_tuple *tuple, 26 - const struct nf_nat_range *range, 26 + const struct nf_nat_range2 *range, 27 27 enum nf_nat_manip_type maniptype, 28 28 const struct nf_conn *ct) 29 29 {
+1 -1
net/netfilter/nf_nat_proto_sctp.c
··· 17 17 static void 18 18 sctp_unique_tuple(const struct nf_nat_l3proto *l3proto, 19 19 struct nf_conntrack_tuple *tuple, 20 - const struct nf_nat_range *range, 20 + const struct nf_nat_range2 *range, 21 21 enum nf_nat_manip_type maniptype, 22 22 const struct nf_conn *ct) 23 23 {
+1 -1
net/netfilter/nf_nat_proto_tcp.c
··· 23 23 static void 24 24 tcp_unique_tuple(const struct nf_nat_l3proto *l3proto, 25 25 struct nf_conntrack_tuple *tuple, 26 - const struct nf_nat_range *range, 26 + const struct nf_nat_range2 *range, 27 27 enum nf_nat_manip_type maniptype, 28 28 const struct nf_conn *ct) 29 29 {
+2 -2
net/netfilter/nf_nat_proto_udp.c
··· 22 22 static void 23 23 udp_unique_tuple(const struct nf_nat_l3proto *l3proto, 24 24 struct nf_conntrack_tuple *tuple, 25 - const struct nf_nat_range *range, 25 + const struct nf_nat_range2 *range, 26 26 enum nf_nat_manip_type maniptype, 27 27 const struct nf_conn *ct) 28 28 { ··· 100 100 static void 101 101 udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, 102 102 struct nf_conntrack_tuple *tuple, 103 - const struct nf_nat_range *range, 103 + const struct nf_nat_range2 *range, 104 104 enum nf_nat_manip_type maniptype, 105 105 const struct nf_conn *ct) 106 106 {
+1 -1
net/netfilter/nf_nat_proto_unknown.c
··· 27 27 28 28 static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto, 29 29 struct nf_conntrack_tuple *tuple, 30 - const struct nf_nat_range *range, 30 + const struct nf_nat_range2 *range, 31 31 enum nf_nat_manip_type maniptype, 32 32 const struct nf_conn *ct) 33 33 {
+3 -3
net/netfilter/nf_nat_redirect.c
··· 36 36 struct nf_conn *ct; 37 37 enum ip_conntrack_info ctinfo; 38 38 __be32 newdst; 39 - struct nf_nat_range newrange; 39 + struct nf_nat_range2 newrange; 40 40 41 41 WARN_ON(hooknum != NF_INET_PRE_ROUTING && 42 42 hooknum != NF_INET_LOCAL_OUT); ··· 82 82 static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; 83 83 84 84 unsigned int 85 - nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 85 + nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 86 86 unsigned int hooknum) 87 87 { 88 - struct nf_nat_range newrange; 88 + struct nf_nat_range2 newrange; 89 89 struct in6_addr newdst; 90 90 enum ip_conntrack_info ctinfo; 91 91 struct nf_conn *ct;
+1 -1
net/netfilter/nf_nat_sip.c
··· 316 316 static void nf_nat_sip_expected(struct nf_conn *ct, 317 317 struct nf_conntrack_expect *exp) 318 318 { 319 - struct nf_nat_range range; 319 + struct nf_nat_range2 range; 320 320 321 321 /* This must be a fresh one. */ 322 322 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+1 -1
net/netfilter/nft_nat.c
··· 43 43 const struct nft_nat *priv = nft_expr_priv(expr); 44 44 enum ip_conntrack_info ctinfo; 45 45 struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo); 46 - struct nf_nat_range range; 46 + struct nf_nat_range2 range; 47 47 48 48 memset(&range, 0, sizeof(range)); 49 49 if (priv->sreg_addr_min) {
+4 -4
net/netfilter/xt_NETMAP.c
··· 21 21 static unsigned int 22 22 netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par) 23 23 { 24 - const struct nf_nat_range *range = par->targinfo; 25 - struct nf_nat_range newrange; 24 + const struct nf_nat_range2 *range = par->targinfo; 25 + struct nf_nat_range2 newrange; 26 26 struct nf_conn *ct; 27 27 enum ip_conntrack_info ctinfo; 28 28 union nf_inet_addr new_addr, netmask; ··· 56 56 57 57 static int netmap_tg6_checkentry(const struct xt_tgchk_param *par) 58 58 { 59 - const struct nf_nat_range *range = par->targinfo; 59 + const struct nf_nat_range2 *range = par->targinfo; 60 60 61 61 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 62 62 return -EINVAL; ··· 75 75 enum ip_conntrack_info ctinfo; 76 76 __be32 new_ip, netmask; 77 77 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 78 - struct nf_nat_range newrange; 78 + struct nf_nat_range2 newrange; 79 79 80 80 WARN_ON(xt_hooknum(par) != NF_INET_PRE_ROUTING && 81 81 xt_hooknum(par) != NF_INET_POST_ROUTING &&
+1 -1
net/netfilter/xt_REDIRECT.c
··· 36 36 37 37 static int redirect_tg6_checkentry(const struct xt_tgchk_param *par) 38 38 { 39 - const struct nf_nat_range *range = par->targinfo; 39 + const struct nf_nat_range2 *range = par->targinfo; 40 40 41 41 if (range->flags & NF_NAT_RANGE_MAP_IPS) 42 42 return -EINVAL;
+66 -6
net/netfilter/xt_nat.c
··· 37 37 nf_ct_netns_put(par->net, par->family); 38 38 } 39 39 40 - static void xt_nat_convert_range(struct nf_nat_range *dst, 40 + static void xt_nat_convert_range(struct nf_nat_range2 *dst, 41 41 const struct nf_nat_ipv4_range *src) 42 42 { 43 43 memset(&dst->min_addr, 0, sizeof(dst->min_addr)); 44 44 memset(&dst->max_addr, 0, sizeof(dst->max_addr)); 45 + memset(&dst->base_proto, 0, sizeof(dst->base_proto)); 45 46 46 47 dst->flags = src->flags; 47 48 dst->min_addr.ip = src->min_ip; ··· 55 54 xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par) 56 55 { 57 56 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 58 - struct nf_nat_range range; 57 + struct nf_nat_range2 range; 59 58 enum ip_conntrack_info ctinfo; 60 59 struct nf_conn *ct; 61 60 ··· 72 71 xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par) 73 72 { 74 73 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 75 - struct nf_nat_range range; 74 + struct nf_nat_range2 range; 76 75 enum ip_conntrack_info ctinfo; 77 76 struct nf_conn *ct; 78 77 ··· 87 86 static unsigned int 88 87 xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par) 89 88 { 90 - const struct nf_nat_range *range = par->targinfo; 89 + const struct nf_nat_range *range_v1 = par->targinfo; 90 + struct nf_nat_range2 range; 91 + enum ip_conntrack_info ctinfo; 92 + struct nf_conn *ct; 93 + 94 + ct = nf_ct_get(skb, &ctinfo); 95 + WARN_ON(!(ct != NULL && 96 + (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 97 + ctinfo == IP_CT_RELATED_REPLY))); 98 + 99 + memcpy(&range, range_v1, sizeof(*range_v1)); 100 + memset(&range.base_proto, 0, sizeof(range.base_proto)); 101 + 102 + return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); 103 + } 104 + 105 + static unsigned int 106 + xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par) 107 + { 108 + const struct nf_nat_range *range_v1 = par->targinfo; 109 + struct nf_nat_range2 range; 110 + enum ip_conntrack_info ctinfo; 111 + struct nf_conn *ct; 112 + 113 + ct = nf_ct_get(skb, &ctinfo); 114 + WARN_ON(!(ct != NULL && 115 + (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED))); 116 + 117 + memcpy(&range, range_v1, sizeof(*range_v1)); 118 + memset(&range.base_proto, 0, sizeof(range.base_proto)); 119 + 120 + return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 121 + } 122 + 123 + static unsigned int 124 + xt_snat_target_v2(struct sk_buff *skb, const struct xt_action_param *par) 125 + { 126 + const struct nf_nat_range2 *range = par->targinfo; 91 127 enum ip_conntrack_info ctinfo; 92 128 struct nf_conn *ct; 93 129 ··· 137 99 } 138 100 139 101 static unsigned int 140 - xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par) 102 + xt_dnat_target_v2(struct sk_buff *skb, const struct xt_action_param *par) 141 103 { 142 - const struct nf_nat_range *range = par->targinfo; 104 + const struct nf_nat_range2 *range = par->targinfo; 143 105 enum ip_conntrack_info ctinfo; 144 106 struct nf_conn *ct; 145 107 ··· 196 158 .destroy = xt_nat_destroy, 197 159 .target = xt_dnat_target_v1, 198 160 .targetsize = sizeof(struct nf_nat_range), 161 + .table = "nat", 162 + .hooks = (1 << NF_INET_PRE_ROUTING) | 163 + (1 << NF_INET_LOCAL_OUT), 164 + .me = THIS_MODULE, 165 + }, 166 + { 167 + .name = "SNAT", 168 + .revision = 2, 169 + .checkentry = xt_nat_checkentry, 170 + .destroy = xt_nat_destroy, 171 + .target = xt_snat_target_v2, 172 + .targetsize = sizeof(struct nf_nat_range2), 173 + .table = "nat", 174 + .hooks = (1 << NF_INET_POST_ROUTING) | 175 + (1 << NF_INET_LOCAL_IN), 176 + .me = THIS_MODULE, 177 + }, 178 + { 179 + .name = "DNAT", 180 + .revision = 2, 181 + .target = xt_dnat_target_v2, 182 + .targetsize = sizeof(struct nf_nat_range2), 199 183 .table = "nat", 200 184 .hooks = (1 << NF_INET_PRE_ROUTING) | 201 185 (1 << NF_INET_LOCAL_OUT),
+2 -2
net/openvswitch/conntrack.c
··· 72 72 struct md_mark mark; 73 73 struct md_labels labels; 74 74 #ifdef CONFIG_NF_NAT_NEEDED 75 - struct nf_nat_range range; /* Only present for SRC NAT and DST NAT. */ 75 + struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ 76 76 #endif 77 77 }; 78 78 ··· 710 710 */ 711 711 static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, 712 712 enum ip_conntrack_info ctinfo, 713 - const struct nf_nat_range *range, 713 + const struct nf_nat_range2 *range, 714 714 enum nf_nat_manip_type maniptype) 715 715 { 716 716 int hooknum, nh_off, err = NF_ACCEPT;