Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'udp-tunnel-route-lookups'

Beniamino Galvani says:

====================
net: consolidate IPv4 route lookup for UDP tunnels

At the moment different UDP tunnels rely on different functions for
IPv4 route lookup, and those functions all implement the same
logic. Only bareudp uses the generic ip_route_output_tunnel(), while
geneve and vxlan basically duplicate it slightly differently.

This series first extends the generic lookup function so that it is
suitable for all UDP tunnel implementations. Then, bareudp, geneve and
vxlan are adapted to use them.

This results in code with less duplication and hopefully better
maintainability.

After this series is merged, IPv6 will be converted in a similar way.

Changelog:
v2
- fix compilation with IPv6 disabled
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+148 -201
+7 -4
drivers/net/bareudp.c
··· 306 306 if (!sock) 307 307 return -ESHUTDOWN; 308 308 309 - rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info, 310 - IPPROTO_UDP, use_cache); 309 + rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key, 310 + 0, 0, key->tos, 311 + use_cache ? 312 + (struct dst_cache *)&info->dst_cache : NULL); 311 313 312 314 if (IS_ERR(rt)) 313 315 return PTR_ERR(rt); ··· 485 483 struct rtable *rt; 486 484 __be32 saddr; 487 485 488 - rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, 489 - info, IPPROTO_UDP, use_cache); 486 + rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, 487 + &info->key, 0, 0, info->key.tos, 488 + use_cache ? &info->dst_cache : NULL); 490 489 if (IS_ERR(rt)) 491 490 return PTR_ERR(rt); 492 491
+43 -70
drivers/net/geneve.c
··· 784 784 return err; 785 785 } 786 786 787 - static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, 788 - struct net_device *dev, 789 - struct geneve_sock *gs4, 790 - struct flowi4 *fl4, 791 - const struct ip_tunnel_info *info, 792 - __be16 dport, __be16 sport, 793 - __u8 *full_tos) 787 + static u8 geneve_get_dsfield(struct sk_buff *skb, struct net_device *dev, 788 + const struct ip_tunnel_info *info, 789 + bool *use_cache) 794 790 { 795 - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 796 791 struct geneve_dev *geneve = netdev_priv(dev); 797 - struct dst_cache *dst_cache; 798 - struct rtable *rt = NULL; 799 - __u8 tos; 792 + u8 dsfield; 800 793 801 - if (!gs4) 802 - return ERR_PTR(-EIO); 794 + dsfield = info->key.tos; 795 + if (dsfield == 1 && !geneve->cfg.collect_md) { 796 + dsfield = ip_tunnel_get_dsfield(ip_hdr(skb), skb); 797 + *use_cache = false; 798 + } 803 799 804 - memset(fl4, 0, sizeof(*fl4)); 805 - fl4->flowi4_mark = skb->mark; 806 - fl4->flowi4_proto = IPPROTO_UDP; 807 - fl4->daddr = info->key.u.ipv4.dst; 808 - fl4->saddr = info->key.u.ipv4.src; 809 - fl4->fl4_dport = dport; 810 - fl4->fl4_sport = sport; 811 - fl4->flowi4_flags = info->key.flow_flags; 812 - 813 - tos = info->key.tos; 814 - if ((tos == 1) && !geneve->cfg.collect_md) { 815 - tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb); 816 - use_cache = false; 817 - } 818 - fl4->flowi4_tos = RT_TOS(tos); 819 - if (full_tos) 820 - *full_tos = tos; 821 - 822 - dst_cache = (struct dst_cache *)&info->dst_cache; 823 - if (use_cache) { 824 - rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); 825 - if (rt) 826 - return rt; 827 - } 828 - rt = ip_route_output_key(geneve->net, fl4); 829 - if (IS_ERR(rt)) { 830 - netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); 831 - return ERR_PTR(-ENETUNREACH); 832 - } 833 - if (rt->dst.dev == dev) { /* is this necessary? */ 834 - netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); 835 - ip_rt_put(rt); 836 - return ERR_PTR(-ELOOP); 837 - } 838 - if (use_cache) 839 - dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr); 840 - return rt; 800 + return dsfield; 841 801 } 842 802 843 803 #if IS_ENABLED(CONFIG_IPV6) ··· 825 865 fl6->fl6_dport = dport; 826 866 fl6->fl6_sport = sport; 827 867 828 - prio = info->key.tos; 829 - if ((prio == 1) && !geneve->cfg.collect_md) { 830 - prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); 831 - use_cache = false; 832 - } 833 - 868 + prio = geneve_get_dsfield(skb, dev, info, &use_cache); 834 869 fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label); 835 870 dst_cache = (struct dst_cache *)&info->dst_cache; 836 871 if (use_cache) { ··· 859 904 struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); 860 905 const struct ip_tunnel_key *key = &info->key; 861 906 struct rtable *rt; 862 - struct flowi4 fl4; 863 - __u8 full_tos; 907 + bool use_cache; 864 908 __u8 tos, ttl; 865 909 __be16 df = 0; 910 + __be32 saddr; 866 911 __be16 sport; 867 912 int err; 868 913 869 914 if (!pskb_inet_may_pull(skb)) 870 915 return -EINVAL; 871 916 917 + if (!gs4) 918 + return -EIO; 919 + 920 + use_cache = ip_tunnel_dst_cache_usable(skb, info); 921 + tos = geneve_get_dsfield(skb, dev, info, &use_cache); 872 922 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 873 - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, 874 - geneve->cfg.info.key.tp_dst, sport, &full_tos); 923 + 924 + rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, 925 + &info->key, 926 + sport, geneve->cfg.info.key.tp_dst, tos, 927 + use_cache ? 928 + (struct dst_cache *)&info->dst_cache : NULL); 875 929 if (IS_ERR(rt)) 876 930 return PTR_ERR(rt); 877 931 ··· 903 939 return -ENOMEM; 904 940 } 905 941 906 - unclone->key.u.ipv4.dst = fl4.saddr; 907 - unclone->key.u.ipv4.src = fl4.daddr; 942 + unclone->key.u.ipv4.dst = saddr; 943 + unclone->key.u.ipv4.src = info->key.u.ipv4.dst; 908 944 } 909 945 910 946 if (!pskb_may_pull(skb, ETH_HLEN)) { ··· 918 954 return -EMSGSIZE; 919 955 } 920 956 957 + tos = ip_tunnel_ecn_encap(tos, ip_hdr(skb), skb); 921 958 if (geneve->cfg.collect_md) { 922 - tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 923 959 ttl = key->ttl; 924 960 925 961 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 926 962 } else { 927 - tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb); 928 963 if (geneve->cfg.ttl_inherit) 929 964 ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); 930 965 else ··· 951 988 if (unlikely(err)) 952 989 return err; 953 990 954 - udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, 991 + udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, 955 992 tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, 956 993 !net_eq(geneve->net, dev_net(geneve->dev)), 957 994 !(info->key.tun_flags & TUNNEL_CSUM)); ··· 1100 1137 1101 1138 if (ip_tunnel_info_af(info) == AF_INET) { 1102 1139 struct rtable *rt; 1103 - struct flowi4 fl4; 1104 - 1105 1140 struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); 1141 + bool use_cache; 1142 + __be32 saddr; 1143 + u8 tos; 1144 + 1145 + if (!gs4) 1146 + return -EIO; 1147 + 1148 + use_cache = ip_tunnel_dst_cache_usable(skb, info); 1149 + tos = geneve_get_dsfield(skb, dev, info, &use_cache); 1106 1150 sport = udp_flow_src_port(geneve->net, skb, 1107 1151 1, USHRT_MAX, true); 1108 1152 1109 - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, 1110 - geneve->cfg.info.key.tp_dst, sport, NULL); 1153 + rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, 1154 + &info->key, 1155 + sport, geneve->cfg.info.key.tp_dst, 1156 + tos, 1157 + use_cache ? &info->dst_cache : NULL); 1111 1158 if (IS_ERR(rt)) 1112 1159 return PTR_ERR(rt); 1113 1160 1114 1161 ip_rt_put(rt); 1115 - info->key.u.ipv4.src = fl4.saddr; 1162 + info->key.u.ipv4.src = saddr; 1116 1163 #if IS_ENABLED(CONFIG_IPV6) 1117 1164 } else if (ip_tunnel_info_af(info) == AF_INET6) { 1118 1165 struct dst_entry *dst;
+41 -73
drivers/net/vxlan/vxlan_core.c
··· 2215 2215 return 0; 2216 2216 } 2217 2217 2218 - static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, 2219 - struct vxlan_sock *sock4, 2220 - struct sk_buff *skb, int oif, u8 tos, 2221 - __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, 2222 - __u8 flow_flags, struct dst_cache *dst_cache, 2223 - const struct ip_tunnel_info *info) 2224 - { 2225 - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 2226 - struct rtable *rt = NULL; 2227 - struct flowi4 fl4; 2228 - 2229 - if (!sock4) 2230 - return ERR_PTR(-EIO); 2231 - 2232 - if (tos && !info) 2233 - use_cache = false; 2234 - if (use_cache) { 2235 - rt = dst_cache_get_ip4(dst_cache, saddr); 2236 - if (rt) 2237 - return rt; 2238 - } 2239 - 2240 - memset(&fl4, 0, sizeof(fl4)); 2241 - fl4.flowi4_oif = oif; 2242 - fl4.flowi4_tos = RT_TOS(tos); 2243 - fl4.flowi4_mark = skb->mark; 2244 - fl4.flowi4_proto = IPPROTO_UDP; 2245 - fl4.daddr = daddr; 2246 - fl4.saddr = *saddr; 2247 - fl4.fl4_dport = dport; 2248 - fl4.fl4_sport = sport; 2249 - fl4.flowi4_flags = flow_flags; 2250 - 2251 - rt = ip_route_output_key(vxlan->net, &fl4); 2252 - if (!IS_ERR(rt)) { 2253 - if (rt->dst.dev == dev) { 2254 - netdev_dbg(dev, "circular route to %pI4\n", &daddr); 2255 - ip_rt_put(rt); 2256 - return ERR_PTR(-ELOOP); 2257 - } 2258 - 2259 - *saddr = fl4.saddr; 2260 - if (use_cache) 2261 - dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 2262 - } else { 2263 - netdev_dbg(dev, "no route to %pI4\n", &daddr); 2264 - return ERR_PTR(-ENETUNREACH); 2265 - } 2266 - return rt; 2267 - } 2268 - 2269 2218 #if IS_ENABLED(CONFIG_IPV6) 2270 2219 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, 2271 2220 struct net_device *dev, ··· 2367 2418 { 2368 2419 struct dst_cache *dst_cache; 2369 2420 struct ip_tunnel_info *info; 2421 + struct ip_tunnel_key *pkey; 2422 + struct ip_tunnel_key key; 2370 2423 struct vxlan_dev *vxlan = netdev_priv(dev); 2371 2424 const struct iphdr *old_iph = ip_hdr(skb); 2372 2425 union vxlan_addr *dst; 2373 - union vxlan_addr remote_ip, local_ip; 2426 + union vxlan_addr remote_ip; 2374 2427 struct vxlan_metadata _md; 2375 2428 struct vxlan_metadata *md = &_md; 2376 2429 unsigned int pkt_len = skb->len; 2377 2430 __be16 src_port = 0, dst_port; 2378 2431 struct dst_entry *ndst = NULL; 2379 - __u8 tos, ttl, flow_flags = 0; 2432 + __u8 tos, ttl; 2380 2433 int ifindex; 2381 2434 int err; 2382 2435 u32 flags = vxlan->cfg.flags; 2436 + bool use_cache; 2383 2437 bool udp_sum = false; 2384 2438 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); 2385 2439 __be32 vni = 0; 2386 2440 #if IS_ENABLED(CONFIG_IPV6) 2441 + union vxlan_addr local_ip; 2387 2442 __be32 label; 2388 2443 #endif 2389 2444 2390 2445 info = skb_tunnel_info(skb); 2446 + use_cache = ip_tunnel_dst_cache_usable(skb, info); 2391 2447 2392 2448 if (rdst) { 2393 2449 dst = &rdst->remote_ip; 2450 + memset(&key, 0, sizeof(key)); 2451 + pkey = &key; 2452 + 2394 2453 if (vxlan_addr_any(dst)) { 2395 2454 if (did_rsc) { 2396 2455 /* short-circuited back to local bridge */ ··· 2412 2455 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 2413 2456 vni = (rdst->remote_vni) ? : default_vni; 2414 2457 ifindex = rdst->remote_ifindex; 2415 - local_ip = vxlan->cfg.saddr; 2458 + 2459 + if (dst->sa.sa_family == AF_INET) { 2460 + key.u.ipv4.src = vxlan->cfg.saddr.sin.sin_addr.s_addr; 2461 + key.u.ipv4.dst = rdst->remote_ip.sin.sin_addr.s_addr; 2462 + } else { 2463 + key.u.ipv6.src = vxlan->cfg.saddr.sin6.sin6_addr; 2464 + key.u.ipv6.dst = rdst->remote_ip.sin6.sin6_addr; 2465 + } 2466 + 2416 2467 dst_cache = &rdst->dst_cache; 2417 2468 md->gbp = skb->mark; 2418 2469 if (flags & VXLAN_F_TTL_INHERIT) { ··· 2434 2469 tos = vxlan->cfg.tos; 2435 2470 if (tos == 1) 2436 2471 tos = ip_tunnel_get_dsfield(old_iph, skb); 2472 + if (tos && !info) 2473 + use_cache = false; 2437 2474 2438 2475 if (dst->sa.sa_family == AF_INET) 2439 2476 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); 2440 2477 else 2441 2478 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2442 2479 #if IS_ENABLED(CONFIG_IPV6) 2480 + local_ip = vxlan->cfg.saddr; 2443 2481 label = vxlan->cfg.label; 2444 2482 #endif 2445 2483 } else { ··· 2454 2486 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 2455 2487 if (remote_ip.sa.sa_family == AF_INET) { 2456 2488 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 2457 - local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; 2458 2489 } else { 2459 2490 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 2491 + #if IS_ENABLED(CONFIG_IPV6) 2460 2492 local_ip.sin6.sin6_addr = info->key.u.ipv6.src; 2493 + #endif 2461 2494 } 2462 2495 dst = &remote_ip; 2496 + pkey = &info->key; 2463 2497 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 2464 - flow_flags = info->key.flow_flags; 2465 2498 vni = tunnel_id_to_key32(info->key.tun_id); 2466 2499 ifindex = 0; 2467 2500 dst_cache = &info->dst_cache; ··· 2486 2517 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2487 2518 struct rtable *rt; 2488 2519 __be16 df = 0; 2520 + __be32 saddr; 2489 2521 2490 2522 if (!ifindex) 2491 2523 ifindex = sock4->sock->sk->sk_bound_dev_if; 2492 2524 2493 - rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, 2494 - dst->sin.sin_addr.s_addr, 2495 - &local_ip.sin.sin_addr.s_addr, 2496 - dst_port, src_port, flow_flags, 2497 - dst_cache, info); 2525 + rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, ifindex, 2526 + &saddr, pkey, src_port, dst_port, 2527 + tos, use_cache ? dst_cache : NULL); 2498 2528 if (IS_ERR(rt)) { 2499 2529 err = PTR_ERR(rt); 2500 2530 goto tx_error; ··· 2529 2561 } else if (err) { 2530 2562 if (info) { 2531 2563 struct ip_tunnel_info *unclone; 2532 - struct in_addr src, dst; 2533 2564 2534 2565 unclone = skb_tunnel_info_unclone(skb); 2535 2566 if (unlikely(!unclone)) 2536 2567 goto tx_error; 2537 2568 2538 - src = remote_ip.sin.sin_addr; 2539 - dst = local_ip.sin.sin_addr; 2540 - unclone->key.u.ipv4.src = src.s_addr; 2541 - unclone->key.u.ipv4.dst = dst.s_addr; 2569 + unclone->key.u.ipv4.src = pkey->u.ipv4.dst; 2570 + unclone->key.u.ipv4.dst = saddr; 2542 2571 } 2543 2572 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); 2544 2573 dst_release(ndst); ··· 2549 2584 if (err < 0) 2550 2585 goto tx_error; 2551 2586 2552 - udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr, 2553 - dst->sin.sin_addr.s_addr, tos, ttl, df, 2587 + udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr, 2588 + pkey->u.ipv4.dst, tos, ttl, df, 2554 2589 src_port, dst_port, xnet, !udp_sum); 2555 2590 #if IS_ENABLED(CONFIG_IPV6) 2556 2591 } else { ··· 3251 3286 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 3252 3287 struct rtable *rt; 3253 3288 3254 - rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, 3255 - info->key.u.ipv4.dst, 3256 - &info->key.u.ipv4.src, dport, sport, 3257 - info->key.flow_flags, &info->dst_cache, 3258 - info); 3289 + if (!sock4) 3290 + return -EIO; 3291 + 3292 + rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, 0, 3293 + &info->key.u.ipv4.src, 3294 + &info->key, 3295 + sport, dport, info->key.tos, 3296 + &info->dst_cache); 3259 3297 if (IS_ERR(rt)) 3260 3298 return PTR_ERR(rt); 3261 3299 ip_rt_put(rt);
-6
include/net/route.h
··· 136 136 137 137 struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, 138 138 const struct sock *sk); 139 - struct rtable *ip_route_output_tunnel(struct sk_buff *skb, 140 - struct net_device *dev, 141 - struct net *net, __be32 *saddr, 142 - const struct ip_tunnel_info *info, 143 - u8 protocol, bool use_cache); 144 - 145 139 struct dst_entry *ipv4_blackhole_route(struct net *net, 146 140 struct dst_entry *dst_orig); 147 141
+8
include/net/udp_tunnel.h
··· 162 162 163 163 void udp_tunnel_sock_release(struct socket *sock); 164 164 165 + struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, 166 + struct net_device *dev, 167 + struct net *net, int oif, 168 + __be32 *saddr, 169 + const struct ip_tunnel_key *key, 170 + __be16 sport, __be16 dport, u8 tos, 171 + struct dst_cache *dst_cache); 172 + 165 173 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, 166 174 __be16 flags, __be64 tunnel_id, 167 175 int md_size);
-48
net/ipv4/route.c
··· 2885 2885 } 2886 2886 EXPORT_SYMBOL_GPL(ip_route_output_flow); 2887 2887 2888 - struct rtable *ip_route_output_tunnel(struct sk_buff *skb, 2889 - struct net_device *dev, 2890 - struct net *net, __be32 *saddr, 2891 - const struct ip_tunnel_info *info, 2892 - u8 protocol, bool use_cache) 2893 - { 2894 - #ifdef CONFIG_DST_CACHE 2895 - struct dst_cache *dst_cache; 2896 - #endif 2897 - struct rtable *rt = NULL; 2898 - struct flowi4 fl4; 2899 - __u8 tos; 2900 - 2901 - #ifdef CONFIG_DST_CACHE 2902 - dst_cache = (struct dst_cache *)&info->dst_cache; 2903 - if (use_cache) { 2904 - rt = dst_cache_get_ip4(dst_cache, saddr); 2905 - if (rt) 2906 - return rt; 2907 - } 2908 - #endif 2909 - memset(&fl4, 0, sizeof(fl4)); 2910 - fl4.flowi4_mark = skb->mark; 2911 - fl4.flowi4_proto = protocol; 2912 - fl4.daddr = info->key.u.ipv4.dst; 2913 - fl4.saddr = info->key.u.ipv4.src; 2914 - tos = info->key.tos; 2915 - fl4.flowi4_tos = RT_TOS(tos); 2916 - 2917 - rt = ip_route_output_key(net, &fl4); 2918 - if (IS_ERR(rt)) { 2919 - netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); 2920 - return ERR_PTR(-ENETUNREACH); 2921 - } 2922 - if (rt->dst.dev == dev) { /* is this necessary? */ 2923 - netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr); 2924 - ip_rt_put(rt); 2925 - return ERR_PTR(-ELOOP); 2926 - } 2927 - #ifdef CONFIG_DST_CACHE 2928 - if (use_cache) 2929 - dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 2930 - #endif 2931 - *saddr = fl4.saddr; 2932 - return rt; 2933 - } 2934 - EXPORT_SYMBOL_GPL(ip_route_output_tunnel); 2935 - 2936 2888 /* called with rcu_read_lock held */ 2937 2889 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2938 2890 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
+49
net/ipv4/udp_tunnel_core.c
··· 204 204 } 205 205 EXPORT_SYMBOL_GPL(udp_tun_rx_dst); 206 206 207 + struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, 208 + struct net_device *dev, 209 + struct net *net, int oif, 210 + __be32 *saddr, 211 + const struct ip_tunnel_key *key, 212 + __be16 sport, __be16 dport, u8 tos, 213 + struct dst_cache *dst_cache) 214 + { 215 + struct rtable *rt = NULL; 216 + struct flowi4 fl4; 217 + 218 + #ifdef CONFIG_DST_CACHE 219 + if (dst_cache) { 220 + rt = dst_cache_get_ip4(dst_cache, saddr); 221 + if (rt) 222 + return rt; 223 + } 224 + #endif 225 + 226 + memset(&fl4, 0, sizeof(fl4)); 227 + fl4.flowi4_mark = skb->mark; 228 + fl4.flowi4_proto = IPPROTO_UDP; 229 + fl4.flowi4_oif = oif; 230 + fl4.daddr = key->u.ipv4.dst; 231 + fl4.saddr = key->u.ipv4.src; 232 + fl4.fl4_dport = dport; 233 + fl4.fl4_sport = sport; 234 + fl4.flowi4_tos = RT_TOS(tos); 235 + fl4.flowi4_flags = key->flow_flags; 236 + 237 + rt = ip_route_output_key(net, &fl4); 238 + if (IS_ERR(rt)) { 239 + netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); 240 + return ERR_PTR(-ENETUNREACH); 241 + } 242 + if (rt->dst.dev == dev) { /* is this necessary? */ 243 + netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr); 244 + ip_rt_put(rt); 245 + return ERR_PTR(-ELOOP); 246 + } 247 + #ifdef CONFIG_DST_CACHE 248 + if (dst_cache) 249 + dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 250 + #endif 251 + *saddr = fl4.saddr; 252 + return rt; 253 + } 254 + EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup); 255 + 207 256 MODULE_LICENSE("GPL");