Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

udp_tunnel: Pass UDP socket down through udp_tunnel{, 6}_xmit_skb().

That was we can make sure the output path of ipv4/ipv6 operate on
the UDP socket rather than whatever random thing happens to be in
skb->sk.

Based upon a patch by Jiri Pirko.

Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>

+48 -28
+8 -6
drivers/net/vxlan.c
··· 1672 1672 } 1673 1673 1674 1674 #if IS_ENABLED(CONFIG_IPV6) 1675 - static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, 1675 + static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, 1676 + struct sk_buff *skb, 1676 1677 struct net_device *dev, struct in6_addr *saddr, 1677 1678 struct in6_addr *daddr, __u8 prio, __u8 ttl, 1678 1679 __be16 src_port, __be16 dst_port, ··· 1749 1748 1750 1749 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 1751 1750 1752 - udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio, 1751 + udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio, 1753 1752 ttl, src_port, dst_port, 1754 1753 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX)); 1755 1754 return 0; ··· 1759 1758 } 1760 1759 #endif 1761 1760 1762 - int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb, 1761 + int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 1763 1762 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 1764 1763 __be16 src_port, __be16 dst_port, 1765 1764 struct vxlan_metadata *md, bool xnet, u32 vxflags) ··· 1828 1827 1829 1828 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 1830 1829 1831 - return udp_tunnel_xmit_skb(rt, skb, src, dst, tos, 1830 + return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, 1832 1831 ttl, df, src_port, dst_port, xnet, 1833 1832 !(vxflags & VXLAN_F_UDP_CSUM)); 1834 1833 } ··· 1883 1882 struct vxlan_rdst *rdst, bool did_rsc) 1884 1883 { 1885 1884 struct vxlan_dev *vxlan = netdev_priv(dev); 1885 + struct sock *sk = vxlan->vn_sock->sock->sk; 1886 1886 struct rtable *rt = NULL; 1887 1887 const struct iphdr *old_iph; 1888 1888 struct flowi4 fl4; ··· 1963 1961 md.vni = htonl(vni << 8); 1964 1962 md.gbp = skb->mark; 1965 1963 1966 - err = vxlan_xmit_skb(rt, skb, fl4.saddr, 1964 + err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr, 1967 1965 dst->sin.sin_addr.s_addr, tos, ttl, df, 1968 1966 src_port, dst_port, &md, 1969 1967 !net_eq(vxlan->net, dev_net(vxlan->dev)), ··· 2023 2021 md.vni = htonl(vni << 8); 2024 2022 md.gbp = skb->mark; 2025 2023 2026 - err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr, 2024 + err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, 2027 2025 0, ttl, src_port, dst_port, &md, 2028 2026 !net_eq(vxlan->net, dev_net(vxlan->dev)), 2029 2027 vxlan->flags);
+3 -2
include/net/ip6_tunnel.h
··· 73 73 struct net *ip6_tnl_get_link_net(const struct net_device *dev); 74 74 int ip6_tnl_get_iflink(const struct net_device *dev); 75 75 76 - static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 76 + static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, 77 + struct net_device *dev) 77 78 { 78 79 struct net_device_stats *stats = &dev->stats; 79 80 int pkt_len, err; 80 81 81 82 pkt_len = skb->len; 82 - err = ip6_local_out(skb); 83 + err = ip6_local_out_sk(sk, skb); 83 84 84 85 if (net_xmit_eval(err) == 0) { 85 86 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+1
include/net/ipv6.h
··· 827 827 int ip6_mc_input(struct sk_buff *skb); 828 828 829 829 int __ip6_local_out(struct sk_buff *skb); 830 + int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb); 830 831 int ip6_local_out(struct sk_buff *skb); 831 832 832 833 /*
+3 -2
include/net/udp_tunnel.h
··· 77 77 struct udp_tunnel_sock_cfg *sock_cfg); 78 78 79 79 /* Transmit the skb using UDP encapsulation. */ 80 - int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb, 80 + int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 81 81 __be32 src, __be32 dst, __u8 tos, __u8 ttl, 82 82 __be16 df, __be16 src_port, __be16 dst_port, 83 83 bool xnet, bool nocheck); 84 84 85 85 #if IS_ENABLED(CONFIG_IPV6) 86 - int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, 86 + int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, 87 + struct sk_buff *skb, 87 88 struct net_device *dev, struct in6_addr *saddr, 88 89 struct in6_addr *daddr, 89 90 __u8 prio, __u8 ttl, __be16 src_port,
+1 -1
include/net/vxlan.h
··· 145 145 146 146 void vxlan_sock_release(struct vxlan_sock *vs); 147 147 148 - int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb, 148 + int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 149 149 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 150 150 __be16 src_port, __be16 dst_port, struct vxlan_metadata *md, 151 151 bool xnet, u32 vxflags);
+1 -1
net/ipv4/geneve.c
··· 136 136 137 137 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 138 138 139 - return udp_tunnel_xmit_skb(rt, skb, src, dst, 139 + return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst, 140 140 tos, ttl, df, src_port, dst_port, xnet, 141 141 !csum); 142 142 }
+1 -1
net/ipv4/ip_tunnel.c
··· 782 782 return; 783 783 } 784 784 785 - err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol, 785 + err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, 786 786 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); 787 787 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 788 788
+2 -2
net/ipv4/udp_tunnel.c
··· 75 75 } 76 76 EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); 77 77 78 - int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb, 78 + int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 79 79 __be32 src, __be32 dst, __u8 tos, __u8 ttl, 80 80 __be16 df, __be16 src_port, __be16 dst_port, 81 81 bool xnet, bool nocheck) ··· 92 92 93 93 udp_set_csum(nocheck, skb, src, dst, skb->len); 94 94 95 - return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP, 95 + return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, 96 96 tos, ttl, df, xnet); 97 97 } 98 98 EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
+1 -1
net/ipv6/ip6_gre.c
··· 760 760 761 761 skb_set_inner_protocol(skb, protocol); 762 762 763 - ip6tunnel_xmit(skb, dev); 763 + ip6tunnel_xmit(NULL, skb, dev); 764 764 if (ndst) 765 765 ip6_tnl_dst_store(tunnel, ndst); 766 766 return 0;
+1 -1
net/ipv6/ip6_tunnel.c
··· 1100 1100 ipv6h->nexthdr = proto; 1101 1101 ipv6h->saddr = fl6->saddr; 1102 1102 ipv6h->daddr = fl6->daddr; 1103 - ip6tunnel_xmit(skb, dev); 1103 + ip6tunnel_xmit(NULL, skb, dev); 1104 1104 if (ndst) 1105 1105 ip6_tnl_dst_store(t, ndst); 1106 1106 return 0;
+3 -2
net/ipv6/ip6_udp_tunnel.c
··· 62 62 } 63 63 EXPORT_SYMBOL_GPL(udp_sock_create6); 64 64 65 - int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, 65 + int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, 66 + struct sk_buff *skb, 66 67 struct net_device *dev, struct in6_addr *saddr, 67 68 struct in6_addr *daddr, 68 69 __u8 prio, __u8 ttl, __be16 src_port, ··· 98 97 ip6h->daddr = *daddr; 99 98 ip6h->saddr = *saddr; 100 99 101 - ip6tunnel_xmit(skb, dev); 100 + ip6tunnel_xmit(sk, skb, dev); 102 101 return 0; 103 102 } 104 103 EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
+16 -5
net/ipv6/output_core.c
··· 136 136 EXPORT_SYMBOL(ip6_dst_hoplimit); 137 137 #endif 138 138 139 - int __ip6_local_out(struct sk_buff *skb) 139 + static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) 140 140 { 141 141 int len; 142 142 ··· 146 146 ipv6_hdr(skb)->payload_len = htons(len); 147 147 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 148 148 149 - return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb->sk, skb, 149 + return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, 150 150 NULL, skb_dst(skb)->dev, dst_output_sk); 151 + } 152 + 153 + int __ip6_local_out(struct sk_buff *skb) 154 + { 155 + return __ip6_local_out_sk(skb->sk, skb); 151 156 } 152 157 EXPORT_SYMBOL_GPL(__ip6_local_out); 153 158 154 - int ip6_local_out(struct sk_buff *skb) 159 + int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) 155 160 { 156 161 int err; 157 162 158 - err = __ip6_local_out(skb); 163 + err = __ip6_local_out_sk(sk, skb); 159 164 if (likely(err == 1)) 160 - err = dst_output(skb); 165 + err = dst_output_sk(sk, skb); 161 166 162 167 return err; 168 + } 169 + EXPORT_SYMBOL_GPL(ip6_local_out_sk); 170 + 171 + int ip6_local_out(struct sk_buff *skb) 172 + { 173 + return ip6_local_out_sk(skb->sk, skb); 163 174 } 164 175 EXPORT_SYMBOL_GPL(ip6_local_out);
+3 -2
net/openvswitch/vport-vxlan.c
··· 222 222 { 223 223 struct net *net = ovs_dp_get_net(vport->dp); 224 224 struct vxlan_port *vxlan_port = vxlan_vport(vport); 225 - __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; 225 + struct sock *sk = vxlan_port->vs->sock->sk; 226 + __be16 dst_port = inet_sk(sk)->inet_sport; 226 227 const struct ovs_key_ipv4_tunnel *tun_key; 227 228 struct vxlan_metadata md = {0}; 228 229 struct rtable *rt; ··· 256 255 vxflags = vxlan_port->exts | 257 256 (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0); 258 257 259 - err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst, 258 + err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst, 260 259 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, 261 260 src_port, dst_port, 262 261 &md, false, vxflags);
+4 -2
net/tipc/udp_media.c
··· 176 176 goto tx_error; 177 177 } 178 178 ttl = ip4_dst_hoplimit(&rt->dst); 179 - err = udp_tunnel_xmit_skb(rt, clone, src->ipv4.s_addr, 179 + err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone, 180 + src->ipv4.s_addr, 180 181 dst->ipv4.s_addr, 0, ttl, 0, 181 182 src->udp_port, dst->udp_port, 182 183 false, true); ··· 198 197 if (err) 199 198 goto tx_error; 200 199 ttl = ip6_dst_hoplimit(ndst); 201 - err = udp_tunnel6_xmit_skb(ndst, clone, ndst->dev, &src->ipv6, 200 + err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone, 201 + ndst->dev, &src->ipv6, 202 202 &dst->ipv6, 0, ttl, src->udp_port, 203 203 dst->udp_port, false); 204 204 #endif