Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.7-rc7 3409 lines 87 kB view raw
1/* 2 * VXLAN: Virtual eXtensible Local Area Network 3 * 4 * Copyright (c) 2012-2013 Vyatta Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/kernel.h> 14#include <linux/types.h> 15#include <linux/module.h> 16#include <linux/errno.h> 17#include <linux/slab.h> 18#include <linux/skbuff.h> 19#include <linux/rculist.h> 20#include <linux/netdevice.h> 21#include <linux/in.h> 22#include <linux/ip.h> 23#include <linux/udp.h> 24#include <linux/igmp.h> 25#include <linux/etherdevice.h> 26#include <linux/if_ether.h> 27#include <linux/if_vlan.h> 28#include <linux/hash.h> 29#include <linux/ethtool.h> 30#include <net/arp.h> 31#include <net/ndisc.h> 32#include <net/ip.h> 33#include <net/ip_tunnels.h> 34#include <net/icmp.h> 35#include <net/udp.h> 36#include <net/udp_tunnel.h> 37#include <net/rtnetlink.h> 38#include <net/route.h> 39#include <net/dsfield.h> 40#include <net/inet_ecn.h> 41#include <net/net_namespace.h> 42#include <net/netns/generic.h> 43#include <net/vxlan.h> 44#include <net/protocol.h> 45 46#if IS_ENABLED(CONFIG_IPV6) 47#include <net/ipv6.h> 48#include <net/addrconf.h> 49#include <net/ip6_tunnel.h> 50#include <net/ip6_checksum.h> 51#endif 52#include <net/dst_metadata.h> 53 54#define VXLAN_VERSION "0.1" 55 56#define PORT_HASH_BITS 8 57#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) 58#define FDB_AGE_DEFAULT 300 /* 5 min */ 59#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 60 61/* UDP port for VXLAN traffic. 62 * The IANA assigned port is 4789, but the Linux default is 8472 63 * for compatibility with early adopters. 64 */ 65static unsigned short vxlan_port __read_mostly = 8472; 66module_param_named(udp_port, vxlan_port, ushort, 0444); 67MODULE_PARM_DESC(udp_port, "Destination UDP port"); 68 69static bool log_ecn_error = true; 70module_param(log_ecn_error, bool, 0644); 71MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 72 73static int vxlan_net_id; 74static struct rtnl_link_ops vxlan_link_ops; 75 76static const u8 all_zeros_mac[ETH_ALEN + 2]; 77 78static int vxlan_sock_add(struct vxlan_dev *vxlan); 79 80/* per-network namespace private data for this module */ 81struct vxlan_net { 82 struct list_head vxlan_list; 83 struct hlist_head sock_list[PORT_HASH_SIZE]; 84 spinlock_t sock_lock; 85}; 86 87/* Forwarding table entry */ 88struct vxlan_fdb { 89 struct hlist_node hlist; /* linked list of entries */ 90 struct rcu_head rcu; 91 unsigned long updated; /* jiffies */ 92 unsigned long used; 93 struct list_head remotes; 94 u8 eth_addr[ETH_ALEN]; 95 u16 state; /* see ndm_state */ 96 u8 flags; /* see ndm_flags */ 97}; 98 99/* salt for hash table */ 100static u32 vxlan_salt __read_mostly; 101 102static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) 103{ 104 return vs->flags & VXLAN_F_COLLECT_METADATA || 105 ip_tunnel_collect_metadata(); 106} 107 108#if IS_ENABLED(CONFIG_IPV6) 109static inline 110bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 111{ 112 if (a->sa.sa_family != b->sa.sa_family) 113 return false; 114 if (a->sa.sa_family == AF_INET6) 115 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); 116 else 117 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 118} 119 120static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 121{ 122 if (ipa->sa.sa_family == AF_INET6) 123 return ipv6_addr_any(&ipa->sin6.sin6_addr); 124 else 125 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 126} 127 128static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 129{ 130 if (ipa->sa.sa_family == AF_INET6) 131 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); 132 else 133 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 134} 135 136static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 137{ 138 if (nla_len(nla) >= sizeof(struct in6_addr)) { 139 ip->sin6.sin6_addr = nla_get_in6_addr(nla); 140 ip->sa.sa_family = AF_INET6; 141 return 0; 142 } else if (nla_len(nla) >= sizeof(__be32)) { 143 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 144 ip->sa.sa_family = AF_INET; 145 return 0; 146 } else { 147 return -EAFNOSUPPORT; 148 } 149} 150 151static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 152 const union vxlan_addr *ip) 153{ 154 if (ip->sa.sa_family == AF_INET6) 155 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr); 156 else 157 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 158} 159 160#else /* !CONFIG_IPV6 */ 161 162static inline 163bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 164{ 165 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 166} 167 168static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 169{ 170 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 171} 172 173static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 174{ 175 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 176} 177 178static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 179{ 180 if (nla_len(nla) >= sizeof(struct in6_addr)) { 181 return -EAFNOSUPPORT; 182 } else if (nla_len(nla) >= sizeof(__be32)) { 183 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 184 ip->sa.sa_family = AF_INET; 185 return 0; 186 } else { 187 return -EAFNOSUPPORT; 188 } 189} 190 191static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 192 const union vxlan_addr *ip) 193{ 194 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 195} 196#endif 197 198/* Virtual Network hash table head */ 199static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) 200{ 201 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; 202} 203 204/* Socket hash table head */ 205static inline struct hlist_head *vs_head(struct net *net, __be16 port) 206{ 207 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 208 209 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; 210} 211 212/* First remote destination for a forwarding entry. 213 * Guaranteed to be non-NULL because remotes are never deleted. 214 */ 215static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) 216{ 217 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); 218} 219 220static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) 221{ 222 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 223} 224 225/* Find VXLAN socket based on network namespace, address family and UDP port 226 * and enabled unshareable flags. 227 */ 228static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, 229 __be16 port, u32 flags) 230{ 231 struct vxlan_sock *vs; 232 233 flags &= VXLAN_F_RCV_FLAGS; 234 235 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 236 if (inet_sk(vs->sock->sk)->inet_sport == port && 237 vxlan_get_sk_family(vs) == family && 238 vs->flags == flags) 239 return vs; 240 } 241 return NULL; 242} 243 244static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) 245{ 246 struct vxlan_dev *vxlan; 247 248 /* For flow based devices, map all packets to VNI 0 */ 249 if (vs->flags & VXLAN_F_COLLECT_METADATA) 250 vni = 0; 251 252 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { 253 if (vxlan->default_dst.remote_vni == vni) 254 return vxlan; 255 } 256 257 return NULL; 258} 259 260/* Look up VNI in a per net namespace table */ 261static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni, 262 sa_family_t family, __be16 port, 263 u32 flags) 264{ 265 struct vxlan_sock *vs; 266 267 vs = vxlan_find_sock(net, family, port, flags); 268 if (!vs) 269 return NULL; 270 271 return vxlan_vs_find_vni(vs, vni); 272} 273 274/* Fill in neighbour message in skbuff. */ 275static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 276 const struct vxlan_fdb *fdb, 277 u32 portid, u32 seq, int type, unsigned int flags, 278 const struct vxlan_rdst *rdst) 279{ 280 unsigned long now = jiffies; 281 struct nda_cacheinfo ci; 282 struct nlmsghdr *nlh; 283 struct ndmsg *ndm; 284 bool send_ip, send_eth; 285 286 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 287 if (nlh == NULL) 288 return -EMSGSIZE; 289 290 ndm = nlmsg_data(nlh); 291 memset(ndm, 0, sizeof(*ndm)); 292 293 send_eth = send_ip = true; 294 295 if (type == RTM_GETNEIGH) { 296 ndm->ndm_family = AF_INET; 297 send_ip = !vxlan_addr_any(&rdst->remote_ip); 298 send_eth = !is_zero_ether_addr(fdb->eth_addr); 299 } else 300 ndm->ndm_family = AF_BRIDGE; 301 ndm->ndm_state = fdb->state; 302 ndm->ndm_ifindex = vxlan->dev->ifindex; 303 ndm->ndm_flags = fdb->flags; 304 ndm->ndm_type = RTN_UNICAST; 305 306 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && 307 nla_put_s32(skb, NDA_LINK_NETNSID, 308 peernet2id_alloc(dev_net(vxlan->dev), vxlan->net))) 309 goto nla_put_failure; 310 311 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 312 goto nla_put_failure; 313 314 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) 315 goto nla_put_failure; 316 317 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && 318 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 319 goto nla_put_failure; 320 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 321 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) 322 goto nla_put_failure; 323 if (rdst->remote_ifindex && 324 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 325 goto nla_put_failure; 326 327 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 328 ci.ndm_confirmed = 0; 329 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 330 ci.ndm_refcnt = 0; 331 332 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 333 goto nla_put_failure; 334 335 nlmsg_end(skb, nlh); 336 return 0; 337 338nla_put_failure: 339 nlmsg_cancel(skb, nlh); 340 return -EMSGSIZE; 341} 342 343static inline size_t vxlan_nlmsg_size(void) 344{ 345 return NLMSG_ALIGN(sizeof(struct ndmsg)) 346 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 347 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ 348 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 349 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 350 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 351 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */ 352 + nla_total_size(sizeof(struct nda_cacheinfo)); 353} 354 355static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 356 struct vxlan_rdst *rd, int type) 357{ 358 struct net *net = dev_net(vxlan->dev); 359 struct sk_buff *skb; 360 int err = -ENOBUFS; 361 362 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); 363 if (skb == NULL) 364 goto errout; 365 366 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); 367 if (err < 0) { 368 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 369 WARN_ON(err == -EMSGSIZE); 370 kfree_skb(skb); 371 goto errout; 372 } 373 374 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 375 return; 376errout: 377 if (err < 0) 378 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 379} 380 381static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) 382{ 383 struct vxlan_dev *vxlan = netdev_priv(dev); 384 struct vxlan_fdb f = { 385 .state = NUD_STALE, 386 }; 387 struct vxlan_rdst remote = { 388 .remote_ip = *ipa, /* goes to NDA_DST */ 389 .remote_vni = cpu_to_be32(VXLAN_N_VID), 390 }; 391 392 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 393} 394 395static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 396{ 397 struct vxlan_fdb f = { 398 .state = NUD_STALE, 399 }; 400 struct vxlan_rdst remote = { }; 401 402 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 403 404 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 405} 406 407/* Hash Ethernet address */ 408static u32 eth_hash(const unsigned char *addr) 409{ 410 u64 value = get_unaligned((u64 *)addr); 411 412 /* only want 6 bytes */ 413#ifdef __BIG_ENDIAN 414 value >>= 16; 415#else 416 value <<= 16; 417#endif 418 return hash_64(value, FDB_HASH_BITS); 419} 420 421/* Hash chain to use given mac address */ 422static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, 423 const u8 *mac) 424{ 425 return &vxlan->fdb_head[eth_hash(mac)]; 426} 427 428/* Look up Ethernet address in forwarding table */ 429static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 430 const u8 *mac) 431{ 432 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 433 struct vxlan_fdb *f; 434 435 hlist_for_each_entry_rcu(f, head, hlist) { 436 if (ether_addr_equal(mac, f->eth_addr)) 437 return f; 438 } 439 440 return NULL; 441} 442 443static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 444 const u8 *mac) 445{ 446 struct vxlan_fdb *f; 447 448 f = __vxlan_find_mac(vxlan, mac); 449 if (f) 450 f->used = jiffies; 451 452 return f; 453} 454 455/* caller should hold vxlan->hash_lock */ 456static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 457 union vxlan_addr *ip, __be16 port, 458 __be32 vni, __u32 ifindex) 459{ 460 struct vxlan_rdst *rd; 461 462 list_for_each_entry(rd, &f->remotes, list) { 463 if (vxlan_addr_equal(&rd->remote_ip, ip) && 464 rd->remote_port == port && 465 rd->remote_vni == vni && 466 rd->remote_ifindex == ifindex) 467 return rd; 468 } 469 470 return NULL; 471} 472 473/* Replace destination of unicast mac */ 474static int vxlan_fdb_replace(struct vxlan_fdb *f, 475 union vxlan_addr *ip, __be16 port, __be32 vni, 476 __u32 ifindex) 477{ 478 struct vxlan_rdst *rd; 479 480 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 481 if (rd) 482 return 0; 483 484 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); 485 if (!rd) 486 return 0; 487 488 dst_cache_reset(&rd->dst_cache); 489 rd->remote_ip = *ip; 490 rd->remote_port = port; 491 rd->remote_vni = vni; 492 rd->remote_ifindex = ifindex; 493 return 1; 494} 495 496/* Add/update destinations for multicast */ 497static int vxlan_fdb_append(struct vxlan_fdb *f, 498 union vxlan_addr *ip, __be16 port, __be32 vni, 499 __u32 ifindex, struct vxlan_rdst **rdp) 500{ 501 struct vxlan_rdst *rd; 502 503 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 504 if (rd) 505 return 0; 506 507 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 508 if (rd == NULL) 509 return -ENOBUFS; 510 511 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { 512 kfree(rd); 513 return -ENOBUFS; 514 } 515 516 rd->remote_ip = *ip; 517 rd->remote_port = port; 518 rd->remote_vni = vni; 519 rd->remote_ifindex = ifindex; 520 521 list_add_tail_rcu(&rd->list, &f->remotes); 522 523 *rdp = rd; 524 return 1; 525} 526 527static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 528 unsigned int off, 529 struct vxlanhdr *vh, size_t hdrlen, 530 __be32 vni_field, 531 struct gro_remcsum *grc, 532 bool nopartial) 533{ 534 size_t start, offset; 535 536 if (skb->remcsum_offload) 537 return vh; 538 539 if (!NAPI_GRO_CB(skb)->csum_valid) 540 return NULL; 541 542 start = vxlan_rco_start(vni_field); 543 offset = start + vxlan_rco_offset(vni_field); 544 545 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, 546 start, offset, grc, nopartial); 547 548 skb->remcsum_offload = 1; 549 550 return vh; 551} 552 553static struct sk_buff **vxlan_gro_receive(struct sock *sk, 554 struct sk_buff **head, 555 struct sk_buff *skb) 556{ 557 struct sk_buff *p, **pp = NULL; 558 struct vxlanhdr *vh, *vh2; 559 unsigned int hlen, off_vx; 560 int flush = 1; 561 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); 562 __be32 flags; 563 struct gro_remcsum grc; 564 565 skb_gro_remcsum_init(&grc); 566 567 off_vx = skb_gro_offset(skb); 568 hlen = off_vx + sizeof(*vh); 569 vh = skb_gro_header_fast(skb, off_vx); 570 if (skb_gro_header_hard(skb, hlen)) { 571 vh = skb_gro_header_slow(skb, hlen, off_vx); 572 if (unlikely(!vh)) 573 goto out; 574 } 575 576 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 577 578 flags = vh->vx_flags; 579 580 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 581 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 582 vh->vx_vni, &grc, 583 !!(vs->flags & 584 VXLAN_F_REMCSUM_NOPARTIAL)); 585 586 if (!vh) 587 goto out; 588 } 589 590 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 591 592 for (p = *head; p; p = p->next) { 593 if (!NAPI_GRO_CB(p)->same_flow) 594 continue; 595 596 vh2 = (struct vxlanhdr *)(p->data + off_vx); 597 if (vh->vx_flags != vh2->vx_flags || 598 vh->vx_vni != vh2->vx_vni) { 599 NAPI_GRO_CB(p)->same_flow = 0; 600 continue; 601 } 602 } 603 604 pp = eth_gro_receive(head, skb); 605 flush = 0; 606 607out: 608 skb_gro_remcsum_cleanup(skb, &grc); 609 NAPI_GRO_CB(skb)->flush |= flush; 610 611 return pp; 612} 613 614static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 615{ 616 /* Sets 'skb->inner_mac_header' since we are always called with 617 * 'skb->encapsulation' set. 618 */ 619 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 620} 621 622/* Notify netdevs that UDP port started listening */ 623static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) 624{ 625 struct net_device *dev; 626 struct sock *sk = vs->sock->sk; 627 struct net *net = sock_net(sk); 628 sa_family_t sa_family = vxlan_get_sk_family(vs); 629 __be16 port = inet_sk(sk)->inet_sport; 630 631 rcu_read_lock(); 632 for_each_netdev_rcu(net, dev) { 633 if (dev->netdev_ops->ndo_add_vxlan_port) 634 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 635 port); 636 } 637 rcu_read_unlock(); 638} 639 640/* Notify netdevs that UDP port is no more listening */ 641static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) 642{ 643 struct net_device *dev; 644 struct sock *sk = vs->sock->sk; 645 struct net *net = sock_net(sk); 646 sa_family_t sa_family = vxlan_get_sk_family(vs); 647 __be16 port = inet_sk(sk)->inet_sport; 648 649 rcu_read_lock(); 650 for_each_netdev_rcu(net, dev) { 651 if (dev->netdev_ops->ndo_del_vxlan_port) 652 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family, 653 port); 654 } 655 rcu_read_unlock(); 656} 657 658/* Add new entry to forwarding table -- assumes lock held */ 659static int vxlan_fdb_create(struct vxlan_dev *vxlan, 660 const u8 *mac, union vxlan_addr *ip, 661 __u16 state, __u16 flags, 662 __be16 port, __be32 vni, __u32 ifindex, 663 __u8 ndm_flags) 664{ 665 struct vxlan_rdst *rd = NULL; 666 struct vxlan_fdb *f; 667 int notify = 0; 668 669 f = __vxlan_find_mac(vxlan, mac); 670 if (f) { 671 if (flags & NLM_F_EXCL) { 672 netdev_dbg(vxlan->dev, 673 "lost race to create %pM\n", mac); 674 return -EEXIST; 675 } 676 if (f->state != state) { 677 f->state = state; 678 f->updated = jiffies; 679 notify = 1; 680 } 681 if (f->flags != ndm_flags) { 682 f->flags = ndm_flags; 683 f->updated = jiffies; 684 notify = 1; 685 } 686 if ((flags & NLM_F_REPLACE)) { 687 /* Only change unicasts */ 688 if (!(is_multicast_ether_addr(f->eth_addr) || 689 is_zero_ether_addr(f->eth_addr))) { 690 notify |= vxlan_fdb_replace(f, ip, port, vni, 691 ifindex); 692 } else 693 return -EOPNOTSUPP; 694 } 695 if ((flags & NLM_F_APPEND) && 696 (is_multicast_ether_addr(f->eth_addr) || 697 is_zero_ether_addr(f->eth_addr))) { 698 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, 699 &rd); 700 701 if (rc < 0) 702 return rc; 703 notify |= rc; 704 } 705 } else { 706 if (!(flags & NLM_F_CREATE)) 707 return -ENOENT; 708 709 if (vxlan->cfg.addrmax && 710 vxlan->addrcnt >= vxlan->cfg.addrmax) 711 return -ENOSPC; 712 713 /* Disallow replace to add a multicast entry */ 714 if ((flags & NLM_F_REPLACE) && 715 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 716 return -EOPNOTSUPP; 717 718 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 719 f = kmalloc(sizeof(*f), GFP_ATOMIC); 720 if (!f) 721 return -ENOMEM; 722 723 notify = 1; 724 f->state = state; 725 f->flags = ndm_flags; 726 f->updated = f->used = jiffies; 727 INIT_LIST_HEAD(&f->remotes); 728 memcpy(f->eth_addr, mac, ETH_ALEN); 729 730 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 731 732 ++vxlan->addrcnt; 733 hlist_add_head_rcu(&f->hlist, 734 vxlan_fdb_head(vxlan, mac)); 735 } 736 737 if (notify) { 738 if (rd == NULL) 739 rd = first_remote_rtnl(f); 740 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); 741 } 742 743 return 0; 744} 745 746static void vxlan_fdb_free(struct rcu_head *head) 747{ 748 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 749 struct vxlan_rdst *rd, *nd; 750 751 list_for_each_entry_safe(rd, nd, &f->remotes, list) { 752 dst_cache_destroy(&rd->dst_cache); 753 kfree(rd); 754 } 755 kfree(f); 756} 757 758static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 759{ 760 netdev_dbg(vxlan->dev, 761 "delete %pM\n", f->eth_addr); 762 763 --vxlan->addrcnt; 764 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 765 766 hlist_del_rcu(&f->hlist); 767 call_rcu(&f->rcu, vxlan_fdb_free); 768} 769 770static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 771 union vxlan_addr *ip, __be16 *port, __be32 *vni, 772 u32 *ifindex) 773{ 774 struct net *net = dev_net(vxlan->dev); 775 int err; 776 777 if (tb[NDA_DST]) { 778 err = vxlan_nla_get_addr(ip, tb[NDA_DST]); 779 if (err) 780 return err; 781 } else { 782 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; 783 if (remote->sa.sa_family == AF_INET) { 784 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); 785 ip->sa.sa_family = AF_INET; 786#if IS_ENABLED(CONFIG_IPV6) 787 } else { 788 ip->sin6.sin6_addr = in6addr_any; 789 ip->sa.sa_family = AF_INET6; 790#endif 791 } 792 } 793 794 if (tb[NDA_PORT]) { 795 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) 796 return -EINVAL; 797 *port = nla_get_be16(tb[NDA_PORT]); 798 } else { 799 *port = vxlan->cfg.dst_port; 800 } 801 802 if (tb[NDA_VNI]) { 803 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) 804 return -EINVAL; 805 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 806 } else { 807 *vni = vxlan->default_dst.remote_vni; 808 } 809 810 if (tb[NDA_IFINDEX]) { 811 struct net_device *tdev; 812 813 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 814 return -EINVAL; 815 *ifindex = nla_get_u32(tb[NDA_IFINDEX]); 816 tdev = __dev_get_by_index(net, *ifindex); 817 if (!tdev) 818 return -EADDRNOTAVAIL; 819 } else { 820 *ifindex = 0; 821 } 822 823 return 0; 824} 825 826/* Add static entry (via netlink) */ 827static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 828 struct net_device *dev, 829 const unsigned char *addr, u16 vid, u16 flags) 830{ 831 struct vxlan_dev *vxlan = netdev_priv(dev); 832 /* struct net *net = dev_net(vxlan->dev); */ 833 union vxlan_addr ip; 834 __be16 port; 835 __be32 vni; 836 u32 ifindex; 837 int err; 838 839 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 840 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 841 ndm->ndm_state); 842 return -EINVAL; 843 } 844 845 if (tb[NDA_DST] == NULL) 846 return -EINVAL; 847 848 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 849 if (err) 850 return err; 851 852 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) 853 return -EAFNOSUPPORT; 854 855 spin_lock_bh(&vxlan->hash_lock); 856 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 857 port, vni, ifindex, ndm->ndm_flags); 858 spin_unlock_bh(&vxlan->hash_lock); 859 860 return err; 861} 862 863/* Delete entry (via netlink) */ 864static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 865 struct net_device *dev, 866 const unsigned char *addr, u16 vid) 867{ 868 struct vxlan_dev *vxlan = netdev_priv(dev); 869 struct vxlan_fdb *f; 870 struct vxlan_rdst *rd = NULL; 871 union vxlan_addr ip; 872 __be16 port; 873 __be32 vni; 874 u32 ifindex; 875 int err; 876 877 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 878 if (err) 879 return err; 880 881 err = -ENOENT; 882 883 spin_lock_bh(&vxlan->hash_lock); 884 f = vxlan_find_mac(vxlan, addr); 885 if (!f) 886 goto out; 887 888 if (!vxlan_addr_any(&ip)) { 889 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); 890 if (!rd) 891 goto out; 892 } 893 894 err = 0; 895 896 /* remove a destination if it's not the only one on the list, 897 * otherwise destroy the fdb entry 898 */ 899 if (rd && !list_is_singular(&f->remotes)) { 900 list_del_rcu(&rd->list); 901 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 902 kfree_rcu(rd, rcu); 903 goto out; 904 } 905 906 vxlan_fdb_destroy(vxlan, f); 907 908out: 909 spin_unlock_bh(&vxlan->hash_lock); 910 911 return err; 912} 913 914/* Dump forwarding table */ 915static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 916 struct net_device *dev, 917 struct net_device *filter_dev, int idx) 918{ 919 struct vxlan_dev *vxlan = netdev_priv(dev); 920 unsigned int h; 921 922 for (h = 0; h < FDB_HASH_SIZE; ++h) { 923 struct vxlan_fdb *f; 924 int err; 925 926 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 927 struct vxlan_rdst *rd; 928 929 list_for_each_entry_rcu(rd, &f->remotes, list) { 930 if (idx < cb->args[0]) 931 goto skip; 932 933 err = vxlan_fdb_info(skb, vxlan, f, 934 NETLINK_CB(cb->skb).portid, 935 cb->nlh->nlmsg_seq, 936 RTM_NEWNEIGH, 937 NLM_F_MULTI, rd); 938 if (err < 0) { 939 cb->args[1] = err; 940 goto out; 941 } 942skip: 943 ++idx; 944 } 945 } 946 } 947out: 948 return idx; 949} 950 951/* Watch incoming packets to learn mapping between Ethernet address 952 * and Tunnel endpoint. 953 * Return true if packet is bogus and should be dropped. 954 */ 955static bool vxlan_snoop(struct net_device *dev, 956 union vxlan_addr *src_ip, const u8 *src_mac) 957{ 958 struct vxlan_dev *vxlan = netdev_priv(dev); 959 struct vxlan_fdb *f; 960 961 f = vxlan_find_mac(vxlan, src_mac); 962 if (likely(f)) { 963 struct vxlan_rdst *rdst = first_remote_rcu(f); 964 965 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip))) 966 return false; 967 968 /* Don't migrate static entries, drop packets */ 969 if (f->state & NUD_NOARP) 970 return true; 971 972 if (net_ratelimit()) 973 netdev_info(dev, 974 "%pM migrated from %pIS to %pIS\n", 975 src_mac, &rdst->remote_ip.sa, &src_ip->sa); 976 977 rdst->remote_ip = *src_ip; 978 f->updated = jiffies; 979 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); 980 } else { 981 /* learned new entry */ 982 spin_lock(&vxlan->hash_lock); 983 984 /* close off race between vxlan_flush and incoming packets */ 985 if (netif_running(dev)) 986 vxlan_fdb_create(vxlan, src_mac, src_ip, 987 NUD_REACHABLE, 988 NLM_F_EXCL|NLM_F_CREATE, 989 vxlan->cfg.dst_port, 990 vxlan->default_dst.remote_vni, 991 0, NTF_SELF); 992 spin_unlock(&vxlan->hash_lock); 993 } 994 995 return false; 996} 997 998/* See if multicast group is already in use by other ID */ 999static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) 1000{ 1001 struct vxlan_dev *vxlan; 1002 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 1003 1004 /* The vxlan_sock is only used by dev, leaving group has 1005 * no effect on other vxlan devices. 1006 */ 1007 if (family == AF_INET && dev->vn4_sock && 1008 atomic_read(&dev->vn4_sock->refcnt) == 1) 1009 return false; 1010#if IS_ENABLED(CONFIG_IPV6) 1011 if (family == AF_INET6 && dev->vn6_sock && 1012 atomic_read(&dev->vn6_sock->refcnt) == 1) 1013 return false; 1014#endif 1015 1016 list_for_each_entry(vxlan, &vn->vxlan_list, next) { 1017 if (!netif_running(vxlan->dev) || vxlan == dev) 1018 continue; 1019 1020 if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock) 1021 continue; 1022#if IS_ENABLED(CONFIG_IPV6) 1023 if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock) 1024 continue; 1025#endif 1026 1027 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, 1028 &dev->default_dst.remote_ip)) 1029 continue; 1030 1031 if (vxlan->default_dst.remote_ifindex != 1032 dev->default_dst.remote_ifindex) 1033 continue; 1034 1035 return true; 1036 } 1037 1038 return false; 1039} 1040 1041static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) 1042{ 1043 struct vxlan_net *vn; 1044 1045 if (!vs) 1046 return false; 1047 if (!atomic_dec_and_test(&vs->refcnt)) 1048 return false; 1049 1050 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); 1051 spin_lock(&vn->sock_lock); 1052 hlist_del_rcu(&vs->hlist); 1053 vxlan_notify_del_rx_port(vs); 1054 spin_unlock(&vn->sock_lock); 1055 1056 return true; 1057} 1058 1059static void vxlan_sock_release(struct vxlan_dev *vxlan) 1060{ 1061 bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock); 1062#if IS_ENABLED(CONFIG_IPV6) 1063 bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock); 1064#endif 1065 1066 synchronize_net(); 1067 1068 if (ipv4) { 1069 udp_tunnel_sock_release(vxlan->vn4_sock->sock); 1070 kfree(vxlan->vn4_sock); 1071 } 1072 1073#if IS_ENABLED(CONFIG_IPV6) 1074 if (ipv6) { 1075 udp_tunnel_sock_release(vxlan->vn6_sock->sock); 1076 kfree(vxlan->vn6_sock); 1077 } 1078#endif 1079} 1080 1081/* Update multicast group membership when first VNI on 1082 * multicast address is brought up 1083 */ 1084static int vxlan_igmp_join(struct vxlan_dev *vxlan) 1085{ 1086 struct sock *sk; 1087 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1088 int ifindex = vxlan->default_dst.remote_ifindex; 1089 int ret = -EINVAL; 1090 1091 if (ip->sa.sa_family == AF_INET) { 1092 struct ip_mreqn mreq = { 1093 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1094 .imr_ifindex = ifindex, 1095 }; 1096 1097 sk = vxlan->vn4_sock->sock->sk; 1098 lock_sock(sk); 1099 ret = ip_mc_join_group(sk, &mreq); 1100 release_sock(sk); 1101#if IS_ENABLED(CONFIG_IPV6) 1102 } else { 1103 sk = vxlan->vn6_sock->sock->sk; 1104 lock_sock(sk); 1105 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, 1106 &ip->sin6.sin6_addr); 1107 release_sock(sk); 1108#endif 1109 } 1110 1111 return ret; 1112} 1113 1114/* Inverse of vxlan_igmp_join when last VNI is brought down */ 1115static int vxlan_igmp_leave(struct vxlan_dev *vxlan) 1116{ 1117 struct sock *sk; 1118 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1119 int ifindex = vxlan->default_dst.remote_ifindex; 1120 int ret = -EINVAL; 1121 1122 if (ip->sa.sa_family == AF_INET) { 1123 struct ip_mreqn mreq = { 1124 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1125 .imr_ifindex = ifindex, 1126 }; 1127 1128 sk = vxlan->vn4_sock->sock->sk; 1129 lock_sock(sk); 1130 ret = ip_mc_leave_group(sk, &mreq); 1131 release_sock(sk); 1132#if IS_ENABLED(CONFIG_IPV6) 1133 } else { 1134 sk = vxlan->vn6_sock->sock->sk; 1135 lock_sock(sk); 1136 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, 1137 &ip->sin6.sin6_addr); 1138 release_sock(sk); 1139#endif 1140 } 1141 1142 return ret; 1143} 1144 1145static bool vxlan_remcsum(struct vxlanhdr *unparsed, 1146 struct sk_buff *skb, u32 vxflags) 1147{ 1148 size_t start, offset; 1149 1150 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) 1151 goto out; 1152 1153 start = vxlan_rco_start(unparsed->vx_vni); 1154 offset = start + vxlan_rco_offset(unparsed->vx_vni); 1155 1156 if (!pskb_may_pull(skb, offset + sizeof(u16))) 1157 return false; 1158 1159 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, 1160 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); 1161out: 1162 unparsed->vx_flags &= ~VXLAN_HF_RCO; 1163 unparsed->vx_vni &= VXLAN_VNI_MASK; 1164 return true; 1165} 1166 1167static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, 1168 struct sk_buff *skb, u32 vxflags, 1169 struct vxlan_metadata *md) 1170{ 1171 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; 1172 struct metadata_dst *tun_dst; 1173 1174 if (!(unparsed->vx_flags & VXLAN_HF_GBP)) 1175 goto out; 1176 1177 md->gbp = ntohs(gbp->policy_id); 1178 1179 tun_dst = (struct metadata_dst *)skb_dst(skb); 1180 if (tun_dst) { 1181 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1182 tun_dst->u.tun_info.options_len = sizeof(*md); 1183 } 1184 if (gbp->dont_learn) 1185 md->gbp |= VXLAN_GBP_DONT_LEARN; 1186 1187 if (gbp->policy_applied) 1188 md->gbp |= VXLAN_GBP_POLICY_APPLIED; 1189 1190 /* In flow-based mode, GBP is carried in dst_metadata */ 1191 if (!(vxflags & VXLAN_F_COLLECT_METADATA)) 1192 skb->mark = md->gbp; 1193out: 1194 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; 1195} 1196 1197static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, 1198 __be16 *protocol, 1199 struct sk_buff *skb, u32 vxflags) 1200{ 1201 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; 1202 1203 /* Need to have Next Protocol set for interfaces in GPE mode. */ 1204 if (!gpe->np_applied) 1205 return false; 1206 /* "The initial version is 0. If a receiver does not support the 1207 * version indicated it MUST drop the packet. 1208 */ 1209 if (gpe->version != 0) 1210 return false; 1211 /* "When the O bit is set to 1, the packet is an OAM packet and OAM 1212 * processing MUST occur." However, we don't implement OAM 1213 * processing, thus drop the packet. 1214 */ 1215 if (gpe->oam_flag) 1216 return false; 1217 1218 switch (gpe->next_protocol) { 1219 case VXLAN_GPE_NP_IPV4: 1220 *protocol = htons(ETH_P_IP); 1221 break; 1222 case VXLAN_GPE_NP_IPV6: 1223 *protocol = htons(ETH_P_IPV6); 1224 break; 1225 case VXLAN_GPE_NP_ETHERNET: 1226 *protocol = htons(ETH_P_TEB); 1227 break; 1228 default: 1229 return false; 1230 } 1231 1232 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; 1233 return true; 1234} 1235 1236static bool vxlan_set_mac(struct vxlan_dev *vxlan, 1237 struct vxlan_sock *vs, 1238 struct sk_buff *skb) 1239{ 1240 union vxlan_addr saddr; 1241 1242 skb_reset_mac_header(skb); 1243 skb->protocol = eth_type_trans(skb, vxlan->dev); 1244 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1245 1246 /* Ignore packet loops (and multicast echo) */ 1247 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1248 return false; 1249 1250 /* Get address from the outer IP header */ 1251 if (vxlan_get_sk_family(vs) == AF_INET) { 1252 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; 1253 saddr.sa.sa_family = AF_INET; 1254#if IS_ENABLED(CONFIG_IPV6) 1255 } else { 1256 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr; 1257 saddr.sa.sa_family = AF_INET6; 1258#endif 1259 } 1260 1261 if ((vxlan->flags & VXLAN_F_LEARN) && 1262 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) 1263 return false; 1264 1265 return true; 1266} 1267 1268static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, 1269 struct sk_buff *skb) 1270{ 1271 int err = 0; 1272 1273 if (vxlan_get_sk_family(vs) == AF_INET) 1274 err = IP_ECN_decapsulate(oiph, skb); 1275#if IS_ENABLED(CONFIG_IPV6) 1276 else 1277 err = IP6_ECN_decapsulate(oiph, skb); 1278#endif 1279 1280 if (unlikely(err) && log_ecn_error) { 1281 if (vxlan_get_sk_family(vs) == AF_INET) 1282 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1283 &((struct iphdr *)oiph)->saddr, 1284 ((struct iphdr *)oiph)->tos); 1285 else 1286 net_info_ratelimited("non-ECT from %pI6\n", 1287 &((struct ipv6hdr *)oiph)->saddr); 1288 } 1289 return err <= 1; 1290} 1291 1292/* Callback from net/ipv4/udp.c to receive packets */ 1293static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) 1294{ 1295 struct pcpu_sw_netstats *stats; 1296 struct vxlan_dev *vxlan; 1297 struct vxlan_sock *vs; 1298 struct vxlanhdr unparsed; 1299 struct vxlan_metadata _md; 1300 struct vxlan_metadata *md = &_md; 1301 __be16 protocol = htons(ETH_P_TEB); 1302 bool raw_proto = false; 1303 void *oiph; 1304 1305 /* Need UDP and VXLAN header to be present */ 1306 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1307 goto drop; 1308 1309 unparsed = *vxlan_hdr(skb); 1310 /* VNI flag always required to be set */ 1311 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { 1312 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1313 ntohl(vxlan_hdr(skb)->vx_flags), 1314 ntohl(vxlan_hdr(skb)->vx_vni)); 1315 /* Return non vxlan pkt */ 1316 goto drop; 1317 } 1318 unparsed.vx_flags &= ~VXLAN_HF_VNI; 1319 unparsed.vx_vni &= ~VXLAN_VNI_MASK; 1320 1321 vs = rcu_dereference_sk_user_data(sk); 1322 if (!vs) 1323 goto drop; 1324 1325 vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni)); 1326 if (!vxlan) 1327 goto drop; 1328 1329 /* For backwards compatibility, only allow reserved fields to be 1330 * used by VXLAN extensions if explicitly requested. 1331 */ 1332 if (vs->flags & VXLAN_F_GPE) { 1333 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) 1334 goto drop; 1335 raw_proto = true; 1336 } 1337 1338 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, 1339 !net_eq(vxlan->net, dev_net(vxlan->dev)))) 1340 goto drop; 1341 1342 if (vxlan_collect_metadata(vs)) { 1343 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); 1344 struct metadata_dst *tun_dst; 1345 1346 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, 1347 vxlan_vni_to_tun_id(vni), sizeof(*md)); 1348 1349 if (!tun_dst) 1350 goto drop; 1351 1352 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 1353 1354 skb_dst_set(skb, (struct dst_entry *)tun_dst); 1355 } else { 1356 memset(md, 0, sizeof(*md)); 1357 } 1358 1359 if (vs->flags & VXLAN_F_REMCSUM_RX) 1360 if (!vxlan_remcsum(&unparsed, skb, vs->flags)) 1361 goto drop; 1362 if (vs->flags & VXLAN_F_GBP) 1363 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); 1364 /* Note that GBP and GPE can never be active together. This is 1365 * ensured in vxlan_dev_configure. 1366 */ 1367 1368 if (unparsed.vx_flags || unparsed.vx_vni) { 1369 /* If there are any unprocessed flags remaining treat 1370 * this as a malformed packet. This behavior diverges from 1371 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1372 * in reserved fields are to be ignored. The approach here 1373 * maintains compatibility with previous stack code, and also 1374 * is more robust and provides a little more security in 1375 * adding extensions to VXLAN. 1376 */ 1377 goto drop; 1378 } 1379 1380 if (!raw_proto) { 1381 if (!vxlan_set_mac(vxlan, vs, skb)) 1382 goto drop; 1383 } else { 1384 skb_reset_mac_header(skb); 1385 skb->dev = vxlan->dev; 1386 skb->pkt_type = PACKET_HOST; 1387 } 1388 1389 oiph = skb_network_header(skb); 1390 skb_reset_network_header(skb); 1391 1392 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { 1393 ++vxlan->dev->stats.rx_frame_errors; 1394 ++vxlan->dev->stats.rx_errors; 1395 goto drop; 1396 } 1397 1398 stats = this_cpu_ptr(vxlan->dev->tstats); 1399 u64_stats_update_begin(&stats->syncp); 1400 stats->rx_packets++; 1401 stats->rx_bytes += skb->len; 1402 u64_stats_update_end(&stats->syncp); 1403 1404 gro_cells_receive(&vxlan->gro_cells, skb); 1405 return 0; 1406 1407drop: 1408 /* Consume bad packet */ 1409 kfree_skb(skb); 1410 return 0; 1411} 1412 1413static int arp_reduce(struct net_device *dev, struct sk_buff *skb) 1414{ 1415 struct vxlan_dev *vxlan = netdev_priv(dev); 1416 struct arphdr *parp; 1417 u8 *arpptr, *sha; 1418 __be32 sip, tip; 1419 struct neighbour *n; 1420 1421 if (dev->flags & IFF_NOARP) 1422 goto out; 1423 1424 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { 1425 dev->stats.tx_dropped++; 1426 goto out; 1427 } 1428 parp = arp_hdr(skb); 1429 1430 if ((parp->ar_hrd != htons(ARPHRD_ETHER) && 1431 parp->ar_hrd != htons(ARPHRD_IEEE802)) || 1432 parp->ar_pro != htons(ETH_P_IP) || 1433 parp->ar_op != htons(ARPOP_REQUEST) || 1434 parp->ar_hln != dev->addr_len || 1435 parp->ar_pln != 4) 1436 goto out; 1437 arpptr = (u8 *)parp + sizeof(struct arphdr); 1438 sha = arpptr; 1439 arpptr += dev->addr_len; /* sha */ 1440 memcpy(&sip, arpptr, sizeof(sip)); 1441 arpptr += sizeof(sip); 1442 arpptr += dev->addr_len; /* tha */ 1443 memcpy(&tip, arpptr, sizeof(tip)); 1444 1445 if (ipv4_is_loopback(tip) || 1446 ipv4_is_multicast(tip)) 1447 goto out; 1448 1449 n = neigh_lookup(&arp_tbl, &tip, dev); 1450 1451 if (n) { 1452 struct vxlan_fdb *f; 1453 struct sk_buff *reply; 1454 1455 if (!(n->nud_state & NUD_CONNECTED)) { 1456 neigh_release(n); 1457 goto out; 1458 } 1459 1460 f = vxlan_find_mac(vxlan, n->ha); 1461 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1462 /* bridge-local neighbor */ 1463 neigh_release(n); 1464 goto out; 1465 } 1466 1467 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1468 n->ha, sha); 1469 1470 neigh_release(n); 1471 1472 if (reply == NULL) 1473 goto out; 1474 1475 skb_reset_mac_header(reply); 1476 __skb_pull(reply, skb_network_offset(reply)); 1477 reply->ip_summed = CHECKSUM_UNNECESSARY; 1478 reply->pkt_type = PACKET_HOST; 1479 1480 if (netif_rx_ni(reply) == NET_RX_DROP) 1481 dev->stats.rx_dropped++; 1482 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1483 union vxlan_addr ipa = { 1484 .sin.sin_addr.s_addr = tip, 1485 .sin.sin_family = AF_INET, 1486 }; 1487 1488 vxlan_ip_miss(dev, &ipa); 1489 } 1490out: 1491 consume_skb(skb); 1492 return NETDEV_TX_OK; 1493} 1494 1495#if IS_ENABLED(CONFIG_IPV6) 1496static struct sk_buff *vxlan_na_create(struct sk_buff *request, 1497 struct neighbour *n, bool isrouter) 1498{ 1499 struct net_device *dev = request->dev; 1500 struct sk_buff *reply; 1501 struct nd_msg *ns, *na; 1502 struct ipv6hdr *pip6; 1503 u8 *daddr; 1504 int na_olen = 8; /* opt hdr + ETH_ALEN for target */ 1505 int ns_olen; 1506 int i, len; 1507 1508 if (dev == NULL) 1509 return NULL; 1510 1511 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + 1512 sizeof(*na) + na_olen + dev->needed_tailroom; 1513 reply = alloc_skb(len, GFP_ATOMIC); 1514 if (reply == NULL) 1515 return NULL; 1516 1517 reply->protocol = htons(ETH_P_IPV6); 1518 reply->dev = dev; 1519 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); 1520 skb_push(reply, sizeof(struct ethhdr)); 1521 skb_reset_mac_header(reply); 1522 1523 ns = (struct nd_msg *)skb_transport_header(request); 1524 1525 daddr = eth_hdr(request)->h_source; 1526 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); 1527 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { 1528 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { 1529 daddr = ns->opt + i + sizeof(struct nd_opt_hdr); 1530 break; 1531 } 1532 } 1533 1534 /* Ethernet header */ 1535 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); 1536 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); 1537 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); 1538 reply->protocol = htons(ETH_P_IPV6); 1539 1540 skb_pull(reply, sizeof(struct ethhdr)); 1541 skb_reset_network_header(reply); 1542 skb_put(reply, sizeof(struct ipv6hdr)); 1543 1544 /* IPv6 header */ 1545 1546 pip6 = ipv6_hdr(reply); 1547 memset(pip6, 0, sizeof(struct ipv6hdr)); 1548 pip6->version = 6; 1549 pip6->priority = ipv6_hdr(request)->priority; 1550 pip6->nexthdr = IPPROTO_ICMPV6; 1551 pip6->hop_limit = 255; 1552 pip6->daddr = ipv6_hdr(request)->saddr; 1553 pip6->saddr = *(struct in6_addr *)n->primary_key; 1554 1555 skb_pull(reply, sizeof(struct ipv6hdr)); 1556 skb_reset_transport_header(reply); 1557 1558 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); 1559 1560 /* Neighbor Advertisement */ 1561 memset(na, 0, sizeof(*na)+na_olen); 1562 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 1563 na->icmph.icmp6_router = isrouter; 1564 na->icmph.icmp6_override = 1; 1565 na->icmph.icmp6_solicited = 1; 1566 na->target = ns->target; 1567 ether_addr_copy(&na->opt[2], n->ha); 1568 na->opt[0] = ND_OPT_TARGET_LL_ADDR; 1569 na->opt[1] = na_olen >> 3; 1570 1571 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, 1572 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, 1573 csum_partial(na, sizeof(*na)+na_olen, 0)); 1574 1575 pip6->payload_len = htons(sizeof(*na)+na_olen); 1576 1577 skb_push(reply, sizeof(struct ipv6hdr)); 1578 1579 reply->ip_summed = CHECKSUM_UNNECESSARY; 1580 1581 return reply; 1582} 1583 1584static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) 1585{ 1586 struct vxlan_dev *vxlan = netdev_priv(dev); 1587 struct nd_msg *msg; 1588 const struct ipv6hdr *iphdr; 1589 const struct in6_addr *saddr, *daddr; 1590 struct neighbour *n; 1591 struct inet6_dev *in6_dev; 1592 1593 in6_dev = __in6_dev_get(dev); 1594 if (!in6_dev) 1595 goto out; 1596 1597 iphdr = ipv6_hdr(skb); 1598 saddr = &iphdr->saddr; 1599 daddr = &iphdr->daddr; 1600 1601 msg = (struct nd_msg *)skb_transport_header(skb); 1602 if (msg->icmph.icmp6_code != 0 || 1603 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) 1604 goto out; 1605 1606 if (ipv6_addr_loopback(daddr) || 1607 ipv6_addr_is_multicast(&msg->target)) 1608 goto out; 1609 1610 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); 1611 1612 if (n) { 1613 struct vxlan_fdb *f; 1614 struct sk_buff *reply; 1615 1616 if (!(n->nud_state & NUD_CONNECTED)) { 1617 neigh_release(n); 1618 goto out; 1619 } 1620 1621 f = vxlan_find_mac(vxlan, n->ha); 1622 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1623 /* bridge-local neighbor */ 1624 neigh_release(n); 1625 goto out; 1626 } 1627 1628 reply = vxlan_na_create(skb, n, 1629 !!(f ? f->flags & NTF_ROUTER : 0)); 1630 1631 neigh_release(n); 1632 1633 if (reply == NULL) 1634 goto out; 1635 1636 if (netif_rx_ni(reply) == NET_RX_DROP) 1637 dev->stats.rx_dropped++; 1638 1639 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1640 union vxlan_addr ipa = { 1641 .sin6.sin6_addr = msg->target, 1642 .sin6.sin6_family = AF_INET6, 1643 }; 1644 1645 vxlan_ip_miss(dev, &ipa); 1646 } 1647 1648out: 1649 consume_skb(skb); 1650 return NETDEV_TX_OK; 1651} 1652#endif 1653 1654static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 1655{ 1656 struct vxlan_dev *vxlan = netdev_priv(dev); 1657 struct neighbour *n; 1658 1659 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1660 return false; 1661 1662 n = NULL; 1663 switch (ntohs(eth_hdr(skb)->h_proto)) { 1664 case ETH_P_IP: 1665 { 1666 struct iphdr *pip; 1667 1668 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1669 return false; 1670 pip = ip_hdr(skb); 1671 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 1672 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1673 union vxlan_addr ipa = { 1674 .sin.sin_addr.s_addr = pip->daddr, 1675 .sin.sin_family = AF_INET, 1676 }; 1677 1678 vxlan_ip_miss(dev, &ipa); 1679 return false; 1680 } 1681 1682 break; 1683 } 1684#if IS_ENABLED(CONFIG_IPV6) 1685 case ETH_P_IPV6: 1686 { 1687 struct ipv6hdr *pip6; 1688 1689 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 1690 return false; 1691 pip6 = ipv6_hdr(skb); 1692 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); 1693 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1694 union vxlan_addr ipa = { 1695 .sin6.sin6_addr = pip6->daddr, 1696 .sin6.sin6_family = AF_INET6, 1697 }; 1698 1699 vxlan_ip_miss(dev, &ipa); 1700 return false; 1701 } 1702 1703 break; 1704 } 1705#endif 1706 default: 1707 return false; 1708 } 1709 1710 if (n) { 1711 bool diff; 1712 1713 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); 1714 if (diff) { 1715 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 1716 dev->addr_len); 1717 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); 1718 } 1719 neigh_release(n); 1720 return diff; 1721 } 1722 1723 return false; 1724} 1725 1726static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, 1727 struct vxlan_metadata *md) 1728{ 1729 struct vxlanhdr_gbp *gbp; 1730 1731 if (!md->gbp) 1732 return; 1733 1734 gbp = (struct vxlanhdr_gbp *)vxh; 1735 vxh->vx_flags |= VXLAN_HF_GBP; 1736 1737 if (md->gbp & VXLAN_GBP_DONT_LEARN) 1738 gbp->dont_learn = 1; 1739 1740 if (md->gbp & VXLAN_GBP_POLICY_APPLIED) 1741 gbp->policy_applied = 1; 1742 1743 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); 1744} 1745 1746static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags, 1747 __be16 protocol) 1748{ 1749 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; 1750 1751 gpe->np_applied = 1; 1752 1753 switch (protocol) { 1754 case htons(ETH_P_IP): 1755 gpe->next_protocol = VXLAN_GPE_NP_IPV4; 1756 return 0; 1757 case htons(ETH_P_IPV6): 1758 gpe->next_protocol = VXLAN_GPE_NP_IPV6; 1759 return 0; 1760 case htons(ETH_P_TEB): 1761 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET; 1762 return 0; 1763 } 1764 return -EPFNOSUPPORT; 1765} 1766 1767static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, 1768 int iphdr_len, __be32 vni, 1769 struct vxlan_metadata *md, u32 vxflags, 1770 bool udp_sum) 1771{ 1772 struct vxlanhdr *vxh; 1773 int min_headroom; 1774 int err; 1775 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 1776 __be16 inner_protocol = htons(ETH_P_TEB); 1777 1778 if ((vxflags & VXLAN_F_REMCSUM_TX) && 1779 skb->ip_summed == CHECKSUM_PARTIAL) { 1780 int csum_start = skb_checksum_start_offset(skb); 1781 1782 if (csum_start <= VXLAN_MAX_REMCSUM_START && 1783 !(csum_start & VXLAN_RCO_SHIFT_MASK) && 1784 (skb->csum_offset == offsetof(struct udphdr, check) || 1785 skb->csum_offset == offsetof(struct tcphdr, check))) 1786 type |= SKB_GSO_TUNNEL_REMCSUM; 1787 } 1788 1789 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1790 + VXLAN_HLEN + iphdr_len 1791 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 1792 1793 /* Need space for new headers (invalidates iph ptr) */ 1794 err = skb_cow_head(skb, min_headroom); 1795 if (unlikely(err)) 1796 goto out_free; 1797 1798 skb = vlan_hwaccel_push_inside(skb); 1799 if (WARN_ON(!skb)) 1800 return -ENOMEM; 1801 1802 err = iptunnel_handle_offloads(skb, type); 1803 if (err) 1804 goto out_free; 1805 1806 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1807 vxh->vx_flags = VXLAN_HF_VNI; 1808 vxh->vx_vni = vxlan_vni_field(vni); 1809 1810 if (type & SKB_GSO_TUNNEL_REMCSUM) { 1811 unsigned int start; 1812 1813 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); 1814 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); 1815 vxh->vx_flags |= VXLAN_HF_RCO; 1816 1817 if (!skb_is_gso(skb)) { 1818 skb->ip_summed = CHECKSUM_NONE; 1819 skb->encapsulation = 0; 1820 } 1821 } 1822 1823 if (vxflags & VXLAN_F_GBP) 1824 vxlan_build_gbp_hdr(vxh, vxflags, md); 1825 if (vxflags & VXLAN_F_GPE) { 1826 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol); 1827 if (err < 0) 1828 goto out_free; 1829 inner_protocol = skb->protocol; 1830 } 1831 1832 skb_set_inner_protocol(skb, inner_protocol); 1833 return 0; 1834 1835out_free: 1836 kfree_skb(skb); 1837 return err; 1838} 1839 1840static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, 1841 struct sk_buff *skb, int oif, u8 tos, 1842 __be32 daddr, __be32 *saddr, 1843 struct dst_cache *dst_cache, 1844 const struct ip_tunnel_info *info) 1845{ 1846 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1847 struct rtable *rt = NULL; 1848 struct flowi4 fl4; 1849 1850 if (tos && !info) 1851 use_cache = false; 1852 if (use_cache) { 1853 rt = dst_cache_get_ip4(dst_cache, saddr); 1854 if (rt) 1855 return rt; 1856 } 1857 1858 memset(&fl4, 0, sizeof(fl4)); 1859 fl4.flowi4_oif = oif; 1860 fl4.flowi4_tos = RT_TOS(tos); 1861 fl4.flowi4_mark = skb->mark; 1862 fl4.flowi4_proto = IPPROTO_UDP; 1863 fl4.daddr = daddr; 1864 fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; 1865 1866 rt = ip_route_output_key(vxlan->net, &fl4); 1867 if (!IS_ERR(rt)) { 1868 *saddr = fl4.saddr; 1869 if (use_cache) 1870 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 1871 } 1872 return rt; 1873} 1874 1875#if IS_ENABLED(CONFIG_IPV6) 1876static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, 1877 struct sk_buff *skb, int oif, u8 tos, 1878 __be32 label, 1879 const struct in6_addr *daddr, 1880 struct in6_addr *saddr, 1881 struct dst_cache *dst_cache, 1882 const struct ip_tunnel_info *info) 1883{ 1884 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1885 struct dst_entry *ndst; 1886 struct flowi6 fl6; 1887 int err; 1888 1889 if (tos && !info) 1890 use_cache = false; 1891 if (use_cache) { 1892 ndst = dst_cache_get_ip6(dst_cache, saddr); 1893 if (ndst) 1894 return ndst; 1895 } 1896 1897 memset(&fl6, 0, sizeof(fl6)); 1898 fl6.flowi6_oif = oif; 1899 fl6.daddr = *daddr; 1900 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 1901 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1902 fl6.flowi6_mark = skb->mark; 1903 fl6.flowi6_proto = IPPROTO_UDP; 1904 1905 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1906 vxlan->vn6_sock->sock->sk, 1907 &ndst, &fl6); 1908 if (err < 0) 1909 return ERR_PTR(err); 1910 1911 *saddr = fl6.saddr; 1912 if (use_cache) 1913 dst_cache_set_ip6(dst_cache, ndst, saddr); 1914 return ndst; 1915} 1916#endif 1917 1918/* Bypass encapsulation if the destination is local */ 1919static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1920 struct vxlan_dev *dst_vxlan) 1921{ 1922 struct pcpu_sw_netstats *tx_stats, *rx_stats; 1923 union vxlan_addr loopback; 1924 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1925 struct net_device *dev = skb->dev; 1926 int len = skb->len; 1927 1928 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1929 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1930 skb->pkt_type = PACKET_HOST; 1931 skb->encapsulation = 0; 1932 skb->dev = dst_vxlan->dev; 1933 __skb_pull(skb, skb_network_offset(skb)); 1934 1935 if (remote_ip->sa.sa_family == AF_INET) { 1936 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 1937 loopback.sa.sa_family = AF_INET; 1938#if IS_ENABLED(CONFIG_IPV6) 1939 } else { 1940 loopback.sin6.sin6_addr = in6addr_loopback; 1941 loopback.sa.sa_family = AF_INET6; 1942#endif 1943 } 1944 1945 if (dst_vxlan->flags & VXLAN_F_LEARN) 1946 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); 1947 1948 u64_stats_update_begin(&tx_stats->syncp); 1949 tx_stats->tx_packets++; 1950 tx_stats->tx_bytes += len; 1951 u64_stats_update_end(&tx_stats->syncp); 1952 1953 if (netif_rx(skb) == NET_RX_SUCCESS) { 1954 u64_stats_update_begin(&rx_stats->syncp); 1955 rx_stats->rx_packets++; 1956 rx_stats->rx_bytes += len; 1957 u64_stats_update_end(&rx_stats->syncp); 1958 } else { 1959 dev->stats.rx_dropped++; 1960 } 1961} 1962 1963static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 1964 struct vxlan_rdst *rdst, bool did_rsc) 1965{ 1966 struct dst_cache *dst_cache; 1967 struct ip_tunnel_info *info; 1968 struct vxlan_dev *vxlan = netdev_priv(dev); 1969 struct sock *sk; 1970 struct rtable *rt = NULL; 1971 const struct iphdr *old_iph; 1972 union vxlan_addr *dst; 1973 union vxlan_addr remote_ip; 1974 struct vxlan_metadata _md; 1975 struct vxlan_metadata *md = &_md; 1976 __be16 src_port = 0, dst_port; 1977 __be32 vni, label; 1978 __be16 df = 0; 1979 __u8 tos, ttl; 1980 int err; 1981 u32 flags = vxlan->flags; 1982 bool udp_sum = false; 1983 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); 1984 1985 info = skb_tunnel_info(skb); 1986 1987 if (rdst) { 1988 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 1989 vni = rdst->remote_vni; 1990 dst = &rdst->remote_ip; 1991 dst_cache = &rdst->dst_cache; 1992 } else { 1993 if (!info) { 1994 WARN_ONCE(1, "%s: Missing encapsulation instructions\n", 1995 dev->name); 1996 goto drop; 1997 } 1998 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 1999 vni = vxlan_tun_id_to_vni(info->key.tun_id); 2000 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 2001 if (remote_ip.sa.sa_family == AF_INET) 2002 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 2003 else 2004 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 2005 dst = &remote_ip; 2006 dst_cache = &info->dst_cache; 2007 } 2008 2009 if (vxlan_addr_any(dst)) { 2010 if (did_rsc) { 2011 /* short-circuited back to local bridge */ 2012 vxlan_encap_bypass(skb, vxlan, vxlan); 2013 return; 2014 } 2015 goto drop; 2016 } 2017 2018 old_iph = ip_hdr(skb); 2019 2020 ttl = vxlan->cfg.ttl; 2021 if (!ttl && vxlan_addr_multicast(dst)) 2022 ttl = 1; 2023 2024 tos = vxlan->cfg.tos; 2025 if (tos == 1) 2026 tos = ip_tunnel_get_dsfield(old_iph, skb); 2027 2028 label = vxlan->cfg.label; 2029 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2030 vxlan->cfg.port_max, true); 2031 2032 if (info) { 2033 ttl = info->key.ttl; 2034 tos = info->key.tos; 2035 label = info->key.label; 2036 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 2037 2038 if (info->options_len) 2039 md = ip_tunnel_info_opts(info); 2040 } else { 2041 md->gbp = skb->mark; 2042 } 2043 2044 if (dst->sa.sa_family == AF_INET) { 2045 __be32 saddr; 2046 2047 if (!vxlan->vn4_sock) 2048 goto drop; 2049 sk = vxlan->vn4_sock->sock->sk; 2050 2051 rt = vxlan_get_route(vxlan, skb, 2052 rdst ? rdst->remote_ifindex : 0, tos, 2053 dst->sin.sin_addr.s_addr, &saddr, 2054 dst_cache, info); 2055 if (IS_ERR(rt)) { 2056 netdev_dbg(dev, "no route to %pI4\n", 2057 &dst->sin.sin_addr.s_addr); 2058 dev->stats.tx_carrier_errors++; 2059 goto tx_error; 2060 } 2061 2062 if (rt->dst.dev == dev) { 2063 netdev_dbg(dev, "circular route to %pI4\n", 2064 &dst->sin.sin_addr.s_addr); 2065 dev->stats.collisions++; 2066 goto rt_tx_error; 2067 } 2068 2069 /* Bypass encapsulation if the destination is local */ 2070 if (rt->rt_flags & RTCF_LOCAL && 2071 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2072 struct vxlan_dev *dst_vxlan; 2073 2074 ip_rt_put(rt); 2075 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 2076 dst->sa.sa_family, dst_port, 2077 vxlan->flags); 2078 if (!dst_vxlan) 2079 goto tx_error; 2080 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 2081 return; 2082 } 2083 2084 if (!info) 2085 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); 2086 else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) 2087 df = htons(IP_DF); 2088 2089 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2090 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2091 err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), 2092 vni, md, flags, udp_sum); 2093 if (err < 0) 2094 goto xmit_tx_error; 2095 2096 udp_tunnel_xmit_skb(rt, sk, skb, saddr, 2097 dst->sin.sin_addr.s_addr, tos, ttl, df, 2098 src_port, dst_port, xnet, !udp_sum); 2099#if IS_ENABLED(CONFIG_IPV6) 2100 } else { 2101 struct dst_entry *ndst; 2102 struct in6_addr saddr; 2103 u32 rt6i_flags; 2104 2105 if (!vxlan->vn6_sock) 2106 goto drop; 2107 sk = vxlan->vn6_sock->sock->sk; 2108 2109 ndst = vxlan6_get_route(vxlan, skb, 2110 rdst ? rdst->remote_ifindex : 0, tos, 2111 label, &dst->sin6.sin6_addr, &saddr, 2112 dst_cache, info); 2113 if (IS_ERR(ndst)) { 2114 netdev_dbg(dev, "no route to %pI6\n", 2115 &dst->sin6.sin6_addr); 2116 dev->stats.tx_carrier_errors++; 2117 goto tx_error; 2118 } 2119 2120 if (ndst->dev == dev) { 2121 netdev_dbg(dev, "circular route to %pI6\n", 2122 &dst->sin6.sin6_addr); 2123 dst_release(ndst); 2124 dev->stats.collisions++; 2125 goto tx_error; 2126 } 2127 2128 /* Bypass encapsulation if the destination is local */ 2129 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2130 if (rt6i_flags & RTF_LOCAL && 2131 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2132 struct vxlan_dev *dst_vxlan; 2133 2134 dst_release(ndst); 2135 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 2136 dst->sa.sa_family, dst_port, 2137 vxlan->flags); 2138 if (!dst_vxlan) 2139 goto tx_error; 2140 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 2141 return; 2142 } 2143 2144 if (!info) 2145 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2146 2147 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2148 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2149 skb_scrub_packet(skb, xnet); 2150 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), 2151 vni, md, flags, udp_sum); 2152 if (err < 0) { 2153 dst_release(ndst); 2154 return; 2155 } 2156 udp_tunnel6_xmit_skb(ndst, sk, skb, dev, 2157 &saddr, &dst->sin6.sin6_addr, tos, ttl, 2158 label, src_port, dst_port, !udp_sum); 2159#endif 2160 } 2161 2162 return; 2163 2164drop: 2165 dev->stats.tx_dropped++; 2166 goto tx_free; 2167 2168xmit_tx_error: 2169 /* skb is already freed. */ 2170 skb = NULL; 2171rt_tx_error: 2172 ip_rt_put(rt); 2173tx_error: 2174 dev->stats.tx_errors++; 2175tx_free: 2176 dev_kfree_skb(skb); 2177} 2178 2179/* Transmit local packets over Vxlan 2180 * 2181 * Outer IP header inherits ECN and DF from inner header. 2182 * Outer UDP destination is the VXLAN assigned port. 2183 * source port is based on hash of flow 2184 */ 2185static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 2186{ 2187 struct vxlan_dev *vxlan = netdev_priv(dev); 2188 const struct ip_tunnel_info *info; 2189 struct ethhdr *eth; 2190 bool did_rsc = false; 2191 struct vxlan_rdst *rdst, *fdst = NULL; 2192 struct vxlan_fdb *f; 2193 2194 info = skb_tunnel_info(skb); 2195 2196 skb_reset_mac_header(skb); 2197 2198 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { 2199 if (info && info->mode & IP_TUNNEL_INFO_TX) 2200 vxlan_xmit_one(skb, dev, NULL, false); 2201 else 2202 kfree_skb(skb); 2203 return NETDEV_TX_OK; 2204 } 2205 2206 if (vxlan->flags & VXLAN_F_PROXY) { 2207 eth = eth_hdr(skb); 2208 if (ntohs(eth->h_proto) == ETH_P_ARP) 2209 return arp_reduce(dev, skb); 2210#if IS_ENABLED(CONFIG_IPV6) 2211 else if (ntohs(eth->h_proto) == ETH_P_IPV6 && 2212 pskb_may_pull(skb, sizeof(struct ipv6hdr) 2213 + sizeof(struct nd_msg)) && 2214 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 2215 struct nd_msg *msg; 2216 2217 msg = (struct nd_msg *)skb_transport_header(skb); 2218 if (msg->icmph.icmp6_code == 0 && 2219 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) 2220 return neigh_reduce(dev, skb); 2221 } 2222#endif 2223 } 2224 2225 eth = eth_hdr(skb); 2226 f = vxlan_find_mac(vxlan, eth->h_dest); 2227 did_rsc = false; 2228 2229 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && 2230 (ntohs(eth->h_proto) == ETH_P_IP || 2231 ntohs(eth->h_proto) == ETH_P_IPV6)) { 2232 did_rsc = route_shortcircuit(dev, skb); 2233 if (did_rsc) 2234 f = vxlan_find_mac(vxlan, eth->h_dest); 2235 } 2236 2237 if (f == NULL) { 2238 f = vxlan_find_mac(vxlan, all_zeros_mac); 2239 if (f == NULL) { 2240 if ((vxlan->flags & VXLAN_F_L2MISS) && 2241 !is_multicast_ether_addr(eth->h_dest)) 2242 vxlan_fdb_miss(vxlan, eth->h_dest); 2243 2244 dev->stats.tx_dropped++; 2245 kfree_skb(skb); 2246 return NETDEV_TX_OK; 2247 } 2248 } 2249 2250 list_for_each_entry_rcu(rdst, &f->remotes, list) { 2251 struct sk_buff *skb1; 2252 2253 if (!fdst) { 2254 fdst = rdst; 2255 continue; 2256 } 2257 skb1 = skb_clone(skb, GFP_ATOMIC); 2258 if (skb1) 2259 vxlan_xmit_one(skb1, dev, rdst, did_rsc); 2260 } 2261 2262 if (fdst) 2263 vxlan_xmit_one(skb, dev, fdst, did_rsc); 2264 else 2265 kfree_skb(skb); 2266 return NETDEV_TX_OK; 2267} 2268 2269/* Walk the forwarding table and purge stale entries */ 2270static void vxlan_cleanup(unsigned long arg) 2271{ 2272 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; 2273 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2274 unsigned int h; 2275 2276 if (!netif_running(vxlan->dev)) 2277 return; 2278 2279 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2280 struct hlist_node *p, *n; 2281 2282 spin_lock_bh(&vxlan->hash_lock); 2283 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2284 struct vxlan_fdb *f 2285 = container_of(p, struct vxlan_fdb, hlist); 2286 unsigned long timeout; 2287 2288 if (f->state & NUD_PERMANENT) 2289 continue; 2290 2291 timeout = f->used + vxlan->cfg.age_interval * HZ; 2292 if (time_before_eq(timeout, jiffies)) { 2293 netdev_dbg(vxlan->dev, 2294 "garbage collect %pM\n", 2295 f->eth_addr); 2296 f->state = NUD_STALE; 2297 vxlan_fdb_destroy(vxlan, f); 2298 } else if (time_before(timeout, next_timer)) 2299 next_timer = timeout; 2300 } 2301 spin_unlock_bh(&vxlan->hash_lock); 2302 } 2303 2304 mod_timer(&vxlan->age_timer, next_timer); 2305} 2306 2307static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2308{ 2309 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2310 __be32 vni = vxlan->default_dst.remote_vni; 2311 2312 spin_lock(&vn->sock_lock); 2313 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); 2314 spin_unlock(&vn->sock_lock); 2315} 2316 2317/* Setup stats when device is created */ 2318static int vxlan_init(struct net_device *dev) 2319{ 2320 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2321 if (!dev->tstats) 2322 return -ENOMEM; 2323 2324 return 0; 2325} 2326 2327static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) 2328{ 2329 struct vxlan_fdb *f; 2330 2331 spin_lock_bh(&vxlan->hash_lock); 2332 f = __vxlan_find_mac(vxlan, all_zeros_mac); 2333 if (f) 2334 vxlan_fdb_destroy(vxlan, f); 2335 spin_unlock_bh(&vxlan->hash_lock); 2336} 2337 2338static void vxlan_uninit(struct net_device *dev) 2339{ 2340 struct vxlan_dev *vxlan = netdev_priv(dev); 2341 2342 vxlan_fdb_delete_default(vxlan); 2343 2344 free_percpu(dev->tstats); 2345} 2346 2347/* Start ageing timer and join group when device is brought up */ 2348static int vxlan_open(struct net_device *dev) 2349{ 2350 struct vxlan_dev *vxlan = netdev_priv(dev); 2351 int ret; 2352 2353 ret = vxlan_sock_add(vxlan); 2354 if (ret < 0) 2355 return ret; 2356 2357 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { 2358 ret = vxlan_igmp_join(vxlan); 2359 if (ret == -EADDRINUSE) 2360 ret = 0; 2361 if (ret) { 2362 vxlan_sock_release(vxlan); 2363 return ret; 2364 } 2365 } 2366 2367 if (vxlan->cfg.age_interval) 2368 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); 2369 2370 return ret; 2371} 2372 2373/* Purge the forwarding table */ 2374static void vxlan_flush(struct vxlan_dev *vxlan) 2375{ 2376 unsigned int h; 2377 2378 spin_lock_bh(&vxlan->hash_lock); 2379 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2380 struct hlist_node *p, *n; 2381 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2382 struct vxlan_fdb *f 2383 = container_of(p, struct vxlan_fdb, hlist); 2384 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2385 if (!is_zero_ether_addr(f->eth_addr)) 2386 vxlan_fdb_destroy(vxlan, f); 2387 } 2388 } 2389 spin_unlock_bh(&vxlan->hash_lock); 2390} 2391 2392/* Cleanup timer and forwarding table on shutdown */ 2393static int vxlan_stop(struct net_device *dev) 2394{ 2395 struct vxlan_dev *vxlan = netdev_priv(dev); 2396 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2397 int ret = 0; 2398 2399 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2400 !vxlan_group_used(vn, vxlan)) 2401 ret = vxlan_igmp_leave(vxlan); 2402 2403 del_timer_sync(&vxlan->age_timer); 2404 2405 vxlan_flush(vxlan); 2406 vxlan_sock_release(vxlan); 2407 2408 return ret; 2409} 2410 2411/* Stub, nothing needs to be done. */ 2412static void vxlan_set_multicast_list(struct net_device *dev) 2413{ 2414} 2415 2416static int __vxlan_change_mtu(struct net_device *dev, 2417 struct net_device *lowerdev, 2418 struct vxlan_rdst *dst, int new_mtu, bool strict) 2419{ 2420 int max_mtu = IP_MAX_MTU; 2421 2422 if (lowerdev) 2423 max_mtu = lowerdev->mtu; 2424 2425 if (dst->remote_ip.sa.sa_family == AF_INET6) 2426 max_mtu -= VXLAN6_HEADROOM; 2427 else 2428 max_mtu -= VXLAN_HEADROOM; 2429 2430 if (new_mtu < 68) 2431 return -EINVAL; 2432 2433 if (new_mtu > max_mtu) { 2434 if (strict) 2435 return -EINVAL; 2436 2437 new_mtu = max_mtu; 2438 } 2439 2440 dev->mtu = new_mtu; 2441 return 0; 2442} 2443 2444static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2445{ 2446 struct vxlan_dev *vxlan = netdev_priv(dev); 2447 struct vxlan_rdst *dst = &vxlan->default_dst; 2448 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 2449 dst->remote_ifindex); 2450 return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true); 2451} 2452 2453static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 2454{ 2455 struct vxlan_dev *vxlan = netdev_priv(dev); 2456 struct ip_tunnel_info *info = skb_tunnel_info(skb); 2457 __be16 sport, dport; 2458 2459 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2460 vxlan->cfg.port_max, true); 2461 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2462 2463 if (ip_tunnel_info_af(info) == AF_INET) { 2464 struct rtable *rt; 2465 2466 if (!vxlan->vn4_sock) 2467 return -EINVAL; 2468 rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, 2469 info->key.u.ipv4.dst, 2470 &info->key.u.ipv4.src, NULL, info); 2471 if (IS_ERR(rt)) 2472 return PTR_ERR(rt); 2473 ip_rt_put(rt); 2474 } else { 2475#if IS_ENABLED(CONFIG_IPV6) 2476 struct dst_entry *ndst; 2477 2478 if (!vxlan->vn6_sock) 2479 return -EINVAL; 2480 ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, 2481 info->key.label, &info->key.u.ipv6.dst, 2482 &info->key.u.ipv6.src, NULL, info); 2483 if (IS_ERR(ndst)) 2484 return PTR_ERR(ndst); 2485 dst_release(ndst); 2486#else /* !CONFIG_IPV6 */ 2487 return -EPFNOSUPPORT; 2488#endif 2489 } 2490 info->key.tp_src = sport; 2491 info->key.tp_dst = dport; 2492 return 0; 2493} 2494 2495static const struct net_device_ops vxlan_netdev_ether_ops = { 2496 .ndo_init = vxlan_init, 2497 .ndo_uninit = vxlan_uninit, 2498 .ndo_open = vxlan_open, 2499 .ndo_stop = vxlan_stop, 2500 .ndo_start_xmit = vxlan_xmit, 2501 .ndo_get_stats64 = ip_tunnel_get_stats64, 2502 .ndo_set_rx_mode = vxlan_set_multicast_list, 2503 .ndo_change_mtu = vxlan_change_mtu, 2504 .ndo_validate_addr = eth_validate_addr, 2505 .ndo_set_mac_address = eth_mac_addr, 2506 .ndo_fdb_add = vxlan_fdb_add, 2507 .ndo_fdb_del = vxlan_fdb_delete, 2508 .ndo_fdb_dump = vxlan_fdb_dump, 2509 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2510}; 2511 2512static const struct net_device_ops vxlan_netdev_raw_ops = { 2513 .ndo_init = vxlan_init, 2514 .ndo_uninit = vxlan_uninit, 2515 .ndo_open = vxlan_open, 2516 .ndo_stop = vxlan_stop, 2517 .ndo_start_xmit = vxlan_xmit, 2518 .ndo_get_stats64 = ip_tunnel_get_stats64, 2519 .ndo_change_mtu = vxlan_change_mtu, 2520 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2521}; 2522 2523/* Info for udev, that this is a virtual tunnel endpoint */ 2524static struct device_type vxlan_type = { 2525 .name = "vxlan", 2526}; 2527 2528/* Calls the ndo_add_vxlan_port of the caller in order to 2529 * supply the listening VXLAN udp ports. Callers are expected 2530 * to implement the ndo_add_vxlan_port. 2531 */ 2532static void vxlan_push_rx_ports(struct net_device *dev) 2533{ 2534 struct vxlan_sock *vs; 2535 struct net *net = dev_net(dev); 2536 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2537 sa_family_t sa_family; 2538 __be16 port; 2539 unsigned int i; 2540 2541 if (!dev->netdev_ops->ndo_add_vxlan_port) 2542 return; 2543 2544 spin_lock(&vn->sock_lock); 2545 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2546 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { 2547 port = inet_sk(vs->sock->sk)->inet_sport; 2548 sa_family = vxlan_get_sk_family(vs); 2549 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 2550 port); 2551 } 2552 } 2553 spin_unlock(&vn->sock_lock); 2554} 2555 2556/* Initialize the device structure. */ 2557static void vxlan_setup(struct net_device *dev) 2558{ 2559 struct vxlan_dev *vxlan = netdev_priv(dev); 2560 unsigned int h; 2561 2562 eth_hw_addr_random(dev); 2563 ether_setup(dev); 2564 2565 dev->destructor = free_netdev; 2566 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2567 2568 dev->features |= NETIF_F_LLTX; 2569 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2570 dev->features |= NETIF_F_RXCSUM; 2571 dev->features |= NETIF_F_GSO_SOFTWARE; 2572 2573 dev->vlan_features = dev->features; 2574 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2575 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2576 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2577 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2578 netif_keep_dst(dev); 2579 dev->priv_flags |= IFF_NO_QUEUE; 2580 2581 INIT_LIST_HEAD(&vxlan->next); 2582 spin_lock_init(&vxlan->hash_lock); 2583 2584 init_timer_deferrable(&vxlan->age_timer); 2585 vxlan->age_timer.function = vxlan_cleanup; 2586 vxlan->age_timer.data = (unsigned long) vxlan; 2587 2588 vxlan->cfg.dst_port = htons(vxlan_port); 2589 2590 vxlan->dev = dev; 2591 2592 gro_cells_init(&vxlan->gro_cells, dev); 2593 2594 for (h = 0; h < FDB_HASH_SIZE; ++h) 2595 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 2596} 2597 2598static void vxlan_ether_setup(struct net_device *dev) 2599{ 2600 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2601 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2602 dev->netdev_ops = &vxlan_netdev_ether_ops; 2603} 2604 2605static void vxlan_raw_setup(struct net_device *dev) 2606{ 2607 dev->header_ops = NULL; 2608 dev->type = ARPHRD_NONE; 2609 dev->hard_header_len = 0; 2610 dev->addr_len = 0; 2611 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 2612 dev->netdev_ops = &vxlan_netdev_raw_ops; 2613} 2614 2615static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2616 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2617 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2618 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 2619 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 2620 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 2621 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 2622 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 2623 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 2624 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 }, 2625 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 2626 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 2627 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 2628 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 2629 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, 2630 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 2631 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 2632 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 2633 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, 2634 [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, 2635 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, 2636 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 2637 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 2638 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 2639 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 2640 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 2641 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, 2642 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 2643}; 2644 2645static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 2646{ 2647 if (tb[IFLA_ADDRESS]) { 2648 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 2649 pr_debug("invalid link address (not ethernet)\n"); 2650 return -EINVAL; 2651 } 2652 2653 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 2654 pr_debug("invalid all zero ethernet address\n"); 2655 return -EADDRNOTAVAIL; 2656 } 2657 } 2658 2659 if (!data) 2660 return -EINVAL; 2661 2662 if (data[IFLA_VXLAN_ID]) { 2663 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); 2664 if (id >= VXLAN_VID_MASK) 2665 return -ERANGE; 2666 } 2667 2668 if (data[IFLA_VXLAN_PORT_RANGE]) { 2669 const struct ifla_vxlan_port_range *p 2670 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2671 2672 if (ntohs(p->high) < ntohs(p->low)) { 2673 pr_debug("port range %u .. %u not valid\n", 2674 ntohs(p->low), ntohs(p->high)); 2675 return -EINVAL; 2676 } 2677 } 2678 2679 return 0; 2680} 2681 2682static void vxlan_get_drvinfo(struct net_device *netdev, 2683 struct ethtool_drvinfo *drvinfo) 2684{ 2685 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); 2686 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); 2687} 2688 2689static const struct ethtool_ops vxlan_ethtool_ops = { 2690 .get_drvinfo = vxlan_get_drvinfo, 2691 .get_link = ethtool_op_get_link, 2692}; 2693 2694static struct socket *vxlan_create_sock(struct net *net, bool ipv6, 2695 __be16 port, u32 flags) 2696{ 2697 struct socket *sock; 2698 struct udp_port_cfg udp_conf; 2699 int err; 2700 2701 memset(&udp_conf, 0, sizeof(udp_conf)); 2702 2703 if (ipv6) { 2704 udp_conf.family = AF_INET6; 2705 udp_conf.use_udp6_rx_checksums = 2706 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2707 udp_conf.ipv6_v6only = 1; 2708 } else { 2709 udp_conf.family = AF_INET; 2710 } 2711 2712 udp_conf.local_udp_port = port; 2713 2714 /* Open UDP socket */ 2715 err = udp_sock_create(net, &udp_conf, &sock); 2716 if (err < 0) 2717 return ERR_PTR(err); 2718 2719 return sock; 2720} 2721 2722/* Create new listen socket if needed */ 2723static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, 2724 __be16 port, u32 flags) 2725{ 2726 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2727 struct vxlan_sock *vs; 2728 struct socket *sock; 2729 unsigned int h; 2730 struct udp_tunnel_sock_cfg tunnel_cfg; 2731 2732 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2733 if (!vs) 2734 return ERR_PTR(-ENOMEM); 2735 2736 for (h = 0; h < VNI_HASH_SIZE; ++h) 2737 INIT_HLIST_HEAD(&vs->vni_list[h]); 2738 2739 sock = vxlan_create_sock(net, ipv6, port, flags); 2740 if (IS_ERR(sock)) { 2741 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port), 2742 PTR_ERR(sock)); 2743 kfree(vs); 2744 return ERR_CAST(sock); 2745 } 2746 2747 vs->sock = sock; 2748 atomic_set(&vs->refcnt, 1); 2749 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 2750 2751 spin_lock(&vn->sock_lock); 2752 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2753 vxlan_notify_add_rx_port(vs); 2754 spin_unlock(&vn->sock_lock); 2755 2756 /* Mark socket as an encapsulation socket. */ 2757 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 2758 tunnel_cfg.sk_user_data = vs; 2759 tunnel_cfg.encap_type = 1; 2760 tunnel_cfg.encap_rcv = vxlan_rcv; 2761 tunnel_cfg.encap_destroy = NULL; 2762 tunnel_cfg.gro_receive = vxlan_gro_receive; 2763 tunnel_cfg.gro_complete = vxlan_gro_complete; 2764 2765 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 2766 2767 return vs; 2768} 2769 2770static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) 2771{ 2772 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2773 struct vxlan_sock *vs = NULL; 2774 2775 if (!vxlan->cfg.no_share) { 2776 spin_lock(&vn->sock_lock); 2777 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, 2778 vxlan->cfg.dst_port, vxlan->flags); 2779 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) { 2780 spin_unlock(&vn->sock_lock); 2781 return -EBUSY; 2782 } 2783 spin_unlock(&vn->sock_lock); 2784 } 2785 if (!vs) 2786 vs = vxlan_socket_create(vxlan->net, ipv6, 2787 vxlan->cfg.dst_port, vxlan->flags); 2788 if (IS_ERR(vs)) 2789 return PTR_ERR(vs); 2790#if IS_ENABLED(CONFIG_IPV6) 2791 if (ipv6) 2792 vxlan->vn6_sock = vs; 2793 else 2794#endif 2795 vxlan->vn4_sock = vs; 2796 vxlan_vs_add_dev(vs, vxlan); 2797 return 0; 2798} 2799 2800static int vxlan_sock_add(struct vxlan_dev *vxlan) 2801{ 2802 bool ipv6 = vxlan->flags & VXLAN_F_IPV6; 2803 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; 2804 int ret = 0; 2805 2806 vxlan->vn4_sock = NULL; 2807#if IS_ENABLED(CONFIG_IPV6) 2808 vxlan->vn6_sock = NULL; 2809 if (ipv6 || metadata) 2810 ret = __vxlan_sock_add(vxlan, true); 2811#endif 2812 if (!ret && (!ipv6 || metadata)) 2813 ret = __vxlan_sock_add(vxlan, false); 2814 if (ret < 0) 2815 vxlan_sock_release(vxlan); 2816 return ret; 2817} 2818 2819static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, 2820 struct vxlan_config *conf) 2821{ 2822 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); 2823 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; 2824 struct vxlan_rdst *dst = &vxlan->default_dst; 2825 unsigned short needed_headroom = ETH_HLEN; 2826 int err; 2827 bool use_ipv6 = false; 2828 __be16 default_port = vxlan->cfg.dst_port; 2829 struct net_device *lowerdev = NULL; 2830 2831 if (conf->flags & VXLAN_F_GPE) { 2832 if (conf->flags & ~VXLAN_F_ALLOWED_GPE) 2833 return -EINVAL; 2834 /* For now, allow GPE only together with COLLECT_METADATA. 2835 * This can be relaxed later; in such case, the other side 2836 * of the PtP link will have to be provided. 2837 */ 2838 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) 2839 return -EINVAL; 2840 2841 vxlan_raw_setup(dev); 2842 } else { 2843 vxlan_ether_setup(dev); 2844 } 2845 2846 vxlan->net = src_net; 2847 2848 dst->remote_vni = conf->vni; 2849 2850 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); 2851 2852 /* Unless IPv6 is explicitly requested, assume IPv4 */ 2853 if (!dst->remote_ip.sa.sa_family) 2854 dst->remote_ip.sa.sa_family = AF_INET; 2855 2856 if (dst->remote_ip.sa.sa_family == AF_INET6 || 2857 vxlan->cfg.saddr.sa.sa_family == AF_INET6) { 2858 if (!IS_ENABLED(CONFIG_IPV6)) 2859 return -EPFNOSUPPORT; 2860 use_ipv6 = true; 2861 vxlan->flags |= VXLAN_F_IPV6; 2862 } 2863 2864 if (conf->label && !use_ipv6) { 2865 pr_info("label only supported in use with IPv6\n"); 2866 return -EINVAL; 2867 } 2868 2869 if (conf->remote_ifindex) { 2870 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); 2871 dst->remote_ifindex = conf->remote_ifindex; 2872 2873 if (!lowerdev) { 2874 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2875 return -ENODEV; 2876 } 2877 2878#if IS_ENABLED(CONFIG_IPV6) 2879 if (use_ipv6) { 2880 struct inet6_dev *idev = __in6_dev_get(lowerdev); 2881 if (idev && idev->cnf.disable_ipv6) { 2882 pr_info("IPv6 is disabled via sysctl\n"); 2883 return -EPERM; 2884 } 2885 } 2886#endif 2887 2888 if (!conf->mtu) 2889 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2890 2891 needed_headroom = lowerdev->hard_header_len; 2892 } 2893 2894 if (conf->mtu) { 2895 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); 2896 if (err) 2897 return err; 2898 } 2899 2900 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 2901 needed_headroom += VXLAN6_HEADROOM; 2902 else 2903 needed_headroom += VXLAN_HEADROOM; 2904 dev->needed_headroom = needed_headroom; 2905 2906 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2907 if (!vxlan->cfg.dst_port) { 2908 if (conf->flags & VXLAN_F_GPE) 2909 vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ 2910 else 2911 vxlan->cfg.dst_port = default_port; 2912 } 2913 vxlan->flags |= conf->flags; 2914 2915 if (!vxlan->cfg.age_interval) 2916 vxlan->cfg.age_interval = FDB_AGE_DEFAULT; 2917 2918 list_for_each_entry(tmp, &vn->vxlan_list, next) { 2919 if (tmp->cfg.vni == conf->vni && 2920 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || 2921 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && 2922 tmp->cfg.dst_port == vxlan->cfg.dst_port && 2923 (tmp->flags & VXLAN_F_RCV_FLAGS) == 2924 (vxlan->flags & VXLAN_F_RCV_FLAGS)) 2925 return -EEXIST; 2926 } 2927 2928 dev->ethtool_ops = &vxlan_ethtool_ops; 2929 2930 /* create an fdb entry for a valid default destination */ 2931 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 2932 err = vxlan_fdb_create(vxlan, all_zeros_mac, 2933 &vxlan->default_dst.remote_ip, 2934 NUD_REACHABLE|NUD_PERMANENT, 2935 NLM_F_EXCL|NLM_F_CREATE, 2936 vxlan->cfg.dst_port, 2937 vxlan->default_dst.remote_vni, 2938 vxlan->default_dst.remote_ifindex, 2939 NTF_SELF); 2940 if (err) 2941 return err; 2942 } 2943 2944 err = register_netdevice(dev); 2945 if (err) { 2946 vxlan_fdb_delete_default(vxlan); 2947 return err; 2948 } 2949 2950 list_add(&vxlan->next, &vn->vxlan_list); 2951 2952 return 0; 2953} 2954 2955static int vxlan_newlink(struct net *src_net, struct net_device *dev, 2956 struct nlattr *tb[], struct nlattr *data[]) 2957{ 2958 struct vxlan_config conf; 2959 int err; 2960 2961 memset(&conf, 0, sizeof(conf)); 2962 2963 if (data[IFLA_VXLAN_ID]) 2964 conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 2965 2966 if (data[IFLA_VXLAN_GROUP]) { 2967 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); 2968 } else if (data[IFLA_VXLAN_GROUP6]) { 2969 if (!IS_ENABLED(CONFIG_IPV6)) 2970 return -EPFNOSUPPORT; 2971 2972 conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); 2973 conf.remote_ip.sa.sa_family = AF_INET6; 2974 } 2975 2976 if (data[IFLA_VXLAN_LOCAL]) { 2977 conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); 2978 conf.saddr.sa.sa_family = AF_INET; 2979 } else if (data[IFLA_VXLAN_LOCAL6]) { 2980 if (!IS_ENABLED(CONFIG_IPV6)) 2981 return -EPFNOSUPPORT; 2982 2983 /* TODO: respect scope id */ 2984 conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); 2985 conf.saddr.sa.sa_family = AF_INET6; 2986 } 2987 2988 if (data[IFLA_VXLAN_LINK]) 2989 conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); 2990 2991 if (data[IFLA_VXLAN_TOS]) 2992 conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2993 2994 if (data[IFLA_VXLAN_TTL]) 2995 conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); 2996 2997 if (data[IFLA_VXLAN_LABEL]) 2998 conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & 2999 IPV6_FLOWLABEL_MASK; 3000 3001 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) 3002 conf.flags |= VXLAN_F_LEARN; 3003 3004 if (data[IFLA_VXLAN_AGEING]) 3005 conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 3006 3007 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) 3008 conf.flags |= VXLAN_F_PROXY; 3009 3010 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) 3011 conf.flags |= VXLAN_F_RSC; 3012 3013 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) 3014 conf.flags |= VXLAN_F_L2MISS; 3015 3016 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) 3017 conf.flags |= VXLAN_F_L3MISS; 3018 3019 if (data[IFLA_VXLAN_LIMIT]) 3020 conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 3021 3022 if (data[IFLA_VXLAN_COLLECT_METADATA] && 3023 nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) 3024 conf.flags |= VXLAN_F_COLLECT_METADATA; 3025 3026 if (data[IFLA_VXLAN_PORT_RANGE]) { 3027 const struct ifla_vxlan_port_range *p 3028 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 3029 conf.port_min = ntohs(p->low); 3030 conf.port_max = ntohs(p->high); 3031 } 3032 3033 if (data[IFLA_VXLAN_PORT]) 3034 conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 3035 3036 if (data[IFLA_VXLAN_UDP_CSUM] && 3037 !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) 3038 conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX; 3039 3040 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && 3041 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) 3042 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; 3043 3044 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] && 3045 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 3046 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 3047 3048 if (data[IFLA_VXLAN_REMCSUM_TX] && 3049 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) 3050 conf.flags |= VXLAN_F_REMCSUM_TX; 3051 3052 if (data[IFLA_VXLAN_REMCSUM_RX] && 3053 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) 3054 conf.flags |= VXLAN_F_REMCSUM_RX; 3055 3056 if (data[IFLA_VXLAN_GBP]) 3057 conf.flags |= VXLAN_F_GBP; 3058 3059 if (data[IFLA_VXLAN_GPE]) 3060 conf.flags |= VXLAN_F_GPE; 3061 3062 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) 3063 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3064 3065 if (tb[IFLA_MTU]) 3066 conf.mtu = nla_get_u32(tb[IFLA_MTU]); 3067 3068 err = vxlan_dev_configure(src_net, dev, &conf); 3069 switch (err) { 3070 case -ENODEV: 3071 pr_info("ifindex %d does not exist\n", conf.remote_ifindex); 3072 break; 3073 3074 case -EPERM: 3075 pr_info("IPv6 is disabled via sysctl\n"); 3076 break; 3077 3078 case -EEXIST: 3079 pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni)); 3080 break; 3081 3082 case -EINVAL: 3083 pr_info("unsupported combination of extensions\n"); 3084 break; 3085 } 3086 3087 return err; 3088} 3089 3090static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3091{ 3092 struct vxlan_dev *vxlan = netdev_priv(dev); 3093 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3094 3095 spin_lock(&vn->sock_lock); 3096 if (!hlist_unhashed(&vxlan->hlist)) 3097 hlist_del_rcu(&vxlan->hlist); 3098 spin_unlock(&vn->sock_lock); 3099 3100 gro_cells_destroy(&vxlan->gro_cells); 3101 list_del(&vxlan->next); 3102 unregister_netdevice_queue(dev, head); 3103} 3104 3105static size_t vxlan_get_size(const struct net_device *dev) 3106{ 3107 3108 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 3109 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ 3110 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 3111 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 3112 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 3113 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 3114 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 3115 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 3116 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ 3117 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ 3118 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ 3119 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ 3120 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ 3121 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 3122 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 3123 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 3124 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ 3125 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ 3126 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ 3127 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */ 3128 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ 3129 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ 3130 0; 3131} 3132 3133static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 3134{ 3135 const struct vxlan_dev *vxlan = netdev_priv(dev); 3136 const struct vxlan_rdst *dst = &vxlan->default_dst; 3137 struct ifla_vxlan_port_range ports = { 3138 .low = htons(vxlan->cfg.port_min), 3139 .high = htons(vxlan->cfg.port_max), 3140 }; 3141 3142 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) 3143 goto nla_put_failure; 3144 3145 if (!vxlan_addr_any(&dst->remote_ip)) { 3146 if (dst->remote_ip.sa.sa_family == AF_INET) { 3147 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP, 3148 dst->remote_ip.sin.sin_addr.s_addr)) 3149 goto nla_put_failure; 3150#if IS_ENABLED(CONFIG_IPV6) 3151 } else { 3152 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6, 3153 &dst->remote_ip.sin6.sin6_addr)) 3154 goto nla_put_failure; 3155#endif 3156 } 3157 } 3158 3159 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 3160 goto nla_put_failure; 3161 3162 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { 3163 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { 3164 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, 3165 vxlan->cfg.saddr.sin.sin_addr.s_addr)) 3166 goto nla_put_failure; 3167#if IS_ENABLED(CONFIG_IPV6) 3168 } else { 3169 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, 3170 &vxlan->cfg.saddr.sin6.sin6_addr)) 3171 goto nla_put_failure; 3172#endif 3173 } 3174 } 3175 3176 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 3177 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 3178 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 3179 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 3180 !!(vxlan->flags & VXLAN_F_LEARN)) || 3181 nla_put_u8(skb, IFLA_VXLAN_PROXY, 3182 !!(vxlan->flags & VXLAN_F_PROXY)) || 3183 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) || 3184 nla_put_u8(skb, IFLA_VXLAN_L2MISS, 3185 !!(vxlan->flags & VXLAN_F_L2MISS)) || 3186 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 3187 !!(vxlan->flags & VXLAN_F_L3MISS)) || 3188 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, 3189 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) || 3190 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || 3191 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || 3192 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || 3193 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, 3194 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || 3195 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 3196 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || 3197 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 3198 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || 3199 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX, 3200 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) || 3201 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX, 3202 !!(vxlan->flags & VXLAN_F_REMCSUM_RX))) 3203 goto nla_put_failure; 3204 3205 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 3206 goto nla_put_failure; 3207 3208 if (vxlan->flags & VXLAN_F_GBP && 3209 nla_put_flag(skb, IFLA_VXLAN_GBP)) 3210 goto nla_put_failure; 3211 3212 if (vxlan->flags & VXLAN_F_GPE && 3213 nla_put_flag(skb, IFLA_VXLAN_GPE)) 3214 goto nla_put_failure; 3215 3216 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL && 3217 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 3218 goto nla_put_failure; 3219 3220 return 0; 3221 3222nla_put_failure: 3223 return -EMSGSIZE; 3224} 3225 3226static struct net *vxlan_get_link_net(const struct net_device *dev) 3227{ 3228 struct vxlan_dev *vxlan = netdev_priv(dev); 3229 3230 return vxlan->net; 3231} 3232 3233static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 3234 .kind = "vxlan", 3235 .maxtype = IFLA_VXLAN_MAX, 3236 .policy = vxlan_policy, 3237 .priv_size = sizeof(struct vxlan_dev), 3238 .setup = vxlan_setup, 3239 .validate = vxlan_validate, 3240 .newlink = vxlan_newlink, 3241 .dellink = vxlan_dellink, 3242 .get_size = vxlan_get_size, 3243 .fill_info = vxlan_fill_info, 3244 .get_link_net = vxlan_get_link_net, 3245}; 3246 3247struct net_device *vxlan_dev_create(struct net *net, const char *name, 3248 u8 name_assign_type, 3249 struct vxlan_config *conf) 3250{ 3251 struct nlattr *tb[IFLA_MAX + 1]; 3252 struct net_device *dev; 3253 int err; 3254 3255 memset(&tb, 0, sizeof(tb)); 3256 3257 dev = rtnl_create_link(net, name, name_assign_type, 3258 &vxlan_link_ops, tb); 3259 if (IS_ERR(dev)) 3260 return dev; 3261 3262 err = vxlan_dev_configure(net, dev, conf); 3263 if (err < 0) { 3264 free_netdev(dev); 3265 return ERR_PTR(err); 3266 } 3267 3268 err = rtnl_configure_link(dev, NULL); 3269 if (err < 0) { 3270 LIST_HEAD(list_kill); 3271 3272 vxlan_dellink(dev, &list_kill); 3273 unregister_netdevice_many(&list_kill); 3274 return ERR_PTR(err); 3275 } 3276 3277 return dev; 3278} 3279EXPORT_SYMBOL_GPL(vxlan_dev_create); 3280 3281static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 3282 struct net_device *dev) 3283{ 3284 struct vxlan_dev *vxlan, *next; 3285 LIST_HEAD(list_kill); 3286 3287 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3288 struct vxlan_rdst *dst = &vxlan->default_dst; 3289 3290 /* In case we created vxlan device with carrier 3291 * and we loose the carrier due to module unload 3292 * we also need to remove vxlan device. In other 3293 * cases, it's not necessary and remote_ifindex 3294 * is 0 here, so no matches. 3295 */ 3296 if (dst->remote_ifindex == dev->ifindex) 3297 vxlan_dellink(vxlan->dev, &list_kill); 3298 } 3299 3300 unregister_netdevice_many(&list_kill); 3301} 3302 3303static int vxlan_netdevice_event(struct notifier_block *unused, 3304 unsigned long event, void *ptr) 3305{ 3306 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3307 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 3308 3309 if (event == NETDEV_UNREGISTER) 3310 vxlan_handle_lowerdev_unregister(vn, dev); 3311 else if (event == NETDEV_OFFLOAD_PUSH_VXLAN) 3312 vxlan_push_rx_ports(dev); 3313 3314 return NOTIFY_DONE; 3315} 3316 3317static struct notifier_block vxlan_notifier_block __read_mostly = { 3318 .notifier_call = vxlan_netdevice_event, 3319}; 3320 3321static __net_init int vxlan_init_net(struct net *net) 3322{ 3323 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3324 unsigned int h; 3325 3326 INIT_LIST_HEAD(&vn->vxlan_list); 3327 spin_lock_init(&vn->sock_lock); 3328 3329 for (h = 0; h < PORT_HASH_SIZE; ++h) 3330 INIT_HLIST_HEAD(&vn->sock_list[h]); 3331 3332 return 0; 3333} 3334 3335static void __net_exit vxlan_exit_net(struct net *net) 3336{ 3337 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3338 struct vxlan_dev *vxlan, *next; 3339 struct net_device *dev, *aux; 3340 LIST_HEAD(list); 3341 3342 rtnl_lock(); 3343 for_each_netdev_safe(net, dev, aux) 3344 if (dev->rtnl_link_ops == &vxlan_link_ops) 3345 unregister_netdevice_queue(dev, &list); 3346 3347 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3348 /* If vxlan->dev is in the same netns, it has already been added 3349 * to the list by the previous loop. 3350 */ 3351 if (!net_eq(dev_net(vxlan->dev), net)) { 3352 gro_cells_destroy(&vxlan->gro_cells); 3353 unregister_netdevice_queue(vxlan->dev, &list); 3354 } 3355 } 3356 3357 unregister_netdevice_many(&list); 3358 rtnl_unlock(); 3359} 3360 3361static struct pernet_operations vxlan_net_ops = { 3362 .init = vxlan_init_net, 3363 .exit = vxlan_exit_net, 3364 .id = &vxlan_net_id, 3365 .size = sizeof(struct vxlan_net), 3366}; 3367 3368static int __init vxlan_init_module(void) 3369{ 3370 int rc; 3371 3372 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 3373 3374 rc = register_pernet_subsys(&vxlan_net_ops); 3375 if (rc) 3376 goto out1; 3377 3378 rc = register_netdevice_notifier(&vxlan_notifier_block); 3379 if (rc) 3380 goto out2; 3381 3382 rc = rtnl_link_register(&vxlan_link_ops); 3383 if (rc) 3384 goto out3; 3385 3386 return 0; 3387out3: 3388 unregister_netdevice_notifier(&vxlan_notifier_block); 3389out2: 3390 unregister_pernet_subsys(&vxlan_net_ops); 3391out1: 3392 return rc; 3393} 3394late_initcall(vxlan_init_module); 3395 3396static void __exit vxlan_cleanup_module(void) 3397{ 3398 rtnl_link_unregister(&vxlan_link_ops); 3399 unregister_netdevice_notifier(&vxlan_notifier_block); 3400 unregister_pernet_subsys(&vxlan_net_ops); 3401 /* rcu_barrier() is called by netns */ 3402} 3403module_exit(vxlan_cleanup_module); 3404 3405MODULE_LICENSE("GPL"); 3406MODULE_VERSION(VXLAN_VERSION); 3407MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); 3408MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); 3409MODULE_ALIAS_RTNL_LINK("vxlan");