Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: use actual socket sk rather than skb sk when routing harder

If netfilter changes the packet mark when mangling, the packet is
rerouted using the route_me_harder set of functions. Prior to this
commit, there's one big difference between route_me_harder and the
ordinary initial routing functions, described in the comment above
__ip_queue_xmit():

/* Note: skb->sk can be different from sk, in case of tunnels */
int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,

That function goes on to correctly make use of sk->sk_bound_dev_if,
rather than skb->sk->sk_bound_dev_if. And indeed the comment is true: a
tunnel will receive a packet in ndo_start_xmit with an initial skb->sk.
It will make some transformations to that packet, and then it will send
the encapsulated packet out of a *new* socket. That new socket will
basically always have a different sk_bound_dev_if (otherwise there'd be
a routing loop). So for the purposes of routing the encapsulated packet,
the routing information as it pertains to the socket should come from
that socket's sk, rather than the packet's original skb->sk. For that
reason __ip_queue_xmit() and related functions all do the right thing.

One might argue that all tunnels should just call skb_orphan(skb) before
transmitting the encapsulated packet into the new socket. But tunnels do
*not* do this -- and this is wisely avoided in skb_scrub_packet() too --
because features like TSQ rely on skb->destructor() being called when
that buffer space is truely available again. Calling skb_orphan(skb) too
early would result in buffers filling up unnecessarily and accounting
info being all wrong. Instead, additional routing must take into account
the new sk, just as __ip_queue_xmit() notes.

So, this commit addresses the problem by fishing the correct sk out of
state->sk -- it's already set properly in the call to nf_hook() in
__ip_local_out(), which receives the sk as part of its normal
functionality. So we make sure to plumb state->sk through the various
route_me_harder functions, and then make correct use of it following the
example of __ip_queue_xmit().

Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

authored by

Jason A. Donenfeld and committed by
Pablo Neira Ayuso
46d6c5ae af8afcf1

+26 -24
+1 -1
include/linux/netfilter_ipv4.h
··· 16 16 u_int32_t mark; 17 17 }; 18 18 19 - int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); 19 + int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); 20 20 21 21 struct nf_queue_entry; 22 22
+5 -5
include/linux/netfilter_ipv6.h
··· 42 42 #if IS_MODULE(CONFIG_IPV6) 43 43 int (*chk_addr)(struct net *net, const struct in6_addr *addr, 44 44 const struct net_device *dev, int strict); 45 - int (*route_me_harder)(struct net *net, struct sk_buff *skb); 45 + int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); 46 46 int (*dev_get_saddr)(struct net *net, const struct net_device *dev, 47 47 const struct in6_addr *daddr, unsigned int srcprefs, 48 48 struct in6_addr *saddr); ··· 143 143 #endif 144 144 } 145 145 146 - int ip6_route_me_harder(struct net *net, struct sk_buff *skb); 146 + int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); 147 147 148 - static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) 148 + static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) 149 149 { 150 150 #if IS_MODULE(CONFIG_IPV6) 151 151 const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); ··· 153 153 if (!v6_ops) 154 154 return -EHOSTUNREACH; 155 155 156 - return v6_ops->route_me_harder(net, skb); 156 + return v6_ops->route_me_harder(net, sk, skb); 157 157 #elif IS_BUILTIN(CONFIG_IPV6) 158 - return ip6_route_me_harder(net, skb); 158 + return ip6_route_me_harder(net, sk, skb); 159 159 #else 160 160 return -EHOSTUNREACH; 161 161 #endif
+5 -3
net/ipv4/netfilter.c
··· 17 17 #include <net/netfilter/nf_queue.h> 18 18 19 19 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ 20 - int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type) 20 + int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type) 21 21 { 22 22 const struct iphdr *iph = ip_hdr(skb); 23 23 struct rtable *rt; 24 24 struct flowi4 fl4 = {}; 25 25 __be32 saddr = iph->saddr; 26 - const struct sock *sk = skb_to_full_sk(skb); 27 - __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; 26 + __u8 flags; 28 27 struct net_device *dev = skb_dst(skb)->dev; 29 28 unsigned int hh_len; 29 + 30 + sk = sk_to_full_sk(sk); 31 + flags = sk ? inet_sk_flowi_flags(sk) : 0; 30 32 31 33 if (addr_type == RTN_UNSPEC) 32 34 addr_type = inet_addr_type_dev_table(net, dev, saddr);
+1 -1
net/ipv4/netfilter/iptable_mangle.c
··· 62 62 iph->daddr != daddr || 63 63 skb->mark != mark || 64 64 iph->tos != tos) { 65 - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); 65 + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); 66 66 if (err < 0) 67 67 ret = NF_DROP_ERR(err); 68 68 }
+1 -1
net/ipv4/netfilter/nf_reject_ipv4.c
··· 145 145 ip4_dst_hoplimit(skb_dst(nskb))); 146 146 nf_reject_ip_tcphdr_put(nskb, oldskb, oth); 147 147 148 - if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) 148 + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) 149 149 goto free_nskb; 150 150 151 151 niph = ip_hdr(nskb);
+3 -3
net/ipv6/netfilter.c
··· 20 20 #include <net/netfilter/ipv6/nf_defrag_ipv6.h> 21 21 #include "../bridge/br_private.h" 22 22 23 - int ip6_route_me_harder(struct net *net, struct sk_buff *skb) 23 + int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb) 24 24 { 25 25 const struct ipv6hdr *iph = ipv6_hdr(skb); 26 - struct sock *sk = sk_to_full_sk(skb->sk); 26 + struct sock *sk = sk_to_full_sk(sk_partial); 27 27 unsigned int hh_len; 28 28 struct dst_entry *dst; 29 29 int strict = (ipv6_addr_type(&iph->daddr) & ··· 84 84 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 85 85 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || 86 86 skb->mark != rt_info->mark) 87 - return ip6_route_me_harder(entry->state.net, skb); 87 + return ip6_route_me_harder(entry->state.net, entry->state.sk, skb); 88 88 } 89 89 return 0; 90 90 }
+1 -1
net/ipv6/netfilter/ip6table_mangle.c
··· 57 57 skb->mark != mark || 58 58 ipv6_hdr(skb)->hop_limit != hop_limit || 59 59 flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) { 60 - err = ip6_route_me_harder(state->net, skb); 60 + err = ip6_route_me_harder(state->net, state->sk, skb); 61 61 if (err < 0) 62 62 ret = NF_DROP_ERR(err); 63 63 }
+2 -2
net/netfilter/ipvs/ip_vs_core.c
··· 742 742 struct dst_entry *dst = skb_dst(skb); 743 743 744 744 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && 745 - ip6_route_me_harder(ipvs->net, skb) != 0) 745 + ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0) 746 746 return 1; 747 747 } else 748 748 #endif 749 749 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && 750 - ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0) 750 + ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0) 751 751 return 1; 752 752 753 753 return 0;
+2 -2
net/netfilter/nf_nat_proto.c
··· 715 715 716 716 if (ct->tuplehash[dir].tuple.dst.u3.ip != 717 717 ct->tuplehash[!dir].tuple.src.u3.ip) { 718 - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); 718 + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); 719 719 if (err < 0) 720 720 ret = NF_DROP_ERR(err); 721 721 } ··· 953 953 954 954 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, 955 955 &ct->tuplehash[!dir].tuple.src.u3)) { 956 - err = nf_ip6_route_me_harder(state->net, skb); 956 + err = nf_ip6_route_me_harder(state->net, state->sk, skb); 957 957 if (err < 0) 958 958 ret = NF_DROP_ERR(err); 959 959 }
+1 -1
net/netfilter/nf_synproxy_core.c
··· 446 446 447 447 skb_dst_set_noref(nskb, skb_dst(skb)); 448 448 nskb->protocol = htons(ETH_P_IP); 449 - if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) 449 + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) 450 450 goto free_nskb; 451 451 452 452 if (nfct) {
+2 -2
net/netfilter/nft_chain_route.c
··· 42 42 iph->daddr != daddr || 43 43 skb->mark != mark || 44 44 iph->tos != tos) { 45 - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); 45 + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); 46 46 if (err < 0) 47 47 ret = NF_DROP_ERR(err); 48 48 } ··· 92 92 skb->mark != mark || 93 93 ipv6_hdr(skb)->hop_limit != hop_limit || 94 94 flowlabel != *((u32 *)ipv6_hdr(skb)))) { 95 - err = nf_ip6_route_me_harder(state->net, skb); 95 + err = nf_ip6_route_me_harder(state->net, state->sk, skb); 96 96 if (err < 0) 97 97 ret = NF_DROP_ERR(err); 98 98 }
+2 -2
net/netfilter/utils.c
··· 191 191 skb->mark == rt_info->mark && 192 192 iph->daddr == rt_info->daddr && 193 193 iph->saddr == rt_info->saddr)) 194 - return ip_route_me_harder(entry->state.net, skb, 195 - RTN_UNSPEC); 194 + return ip_route_me_harder(entry->state.net, entry->state.sk, 195 + skb, RTN_UNSPEC); 196 196 } 197 197 #endif 198 198 return 0;