Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.15-rc1 2957 lines 75 kB view raw
1/* 2 * VXLAN: Virtual eXtensible Local Area Network 3 * 4 * Copyright (c) 2012-2013 Vyatta Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/kernel.h> 14#include <linux/types.h> 15#include <linux/module.h> 16#include <linux/errno.h> 17#include <linux/slab.h> 18#include <linux/skbuff.h> 19#include <linux/rculist.h> 20#include <linux/netdevice.h> 21#include <linux/in.h> 22#include <linux/ip.h> 23#include <linux/udp.h> 24#include <linux/igmp.h> 25#include <linux/etherdevice.h> 26#include <linux/if_ether.h> 27#include <linux/if_vlan.h> 28#include <linux/hash.h> 29#include <linux/ethtool.h> 30#include <net/arp.h> 31#include <net/ndisc.h> 32#include <net/ip.h> 33#include <net/ip_tunnels.h> 34#include <net/icmp.h> 35#include <net/udp.h> 36#include <net/rtnetlink.h> 37#include <net/route.h> 38#include <net/dsfield.h> 39#include <net/inet_ecn.h> 40#include <net/net_namespace.h> 41#include <net/netns/generic.h> 42#include <net/vxlan.h> 43#include <net/protocol.h> 44#if IS_ENABLED(CONFIG_IPV6) 45#include <net/ipv6.h> 46#include <net/addrconf.h> 47#include <net/ip6_tunnel.h> 48#include <net/ip6_checksum.h> 49#endif 50 51#define VXLAN_VERSION "0.1" 52 53#define PORT_HASH_BITS 8 54#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) 55#define VNI_HASH_BITS 10 56#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 57#define FDB_HASH_BITS 8 58#define FDB_HASH_SIZE (1<<FDB_HASH_BITS) 59#define FDB_AGE_DEFAULT 300 /* 5 min */ 60#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 61 62#define VXLAN_N_VID (1u << 24) 63#define VXLAN_VID_MASK (VXLAN_N_VID - 1) 64#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) 65 66#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ 67 68/* VXLAN protocol header */ 69struct vxlanhdr { 70 __be32 vx_flags; 71 __be32 vx_vni; 72}; 73 74/* UDP port for VXLAN traffic. 75 * The IANA assigned port is 4789, but the Linux default is 8472 76 * for compatibility with early adopters. 77 */ 78static unsigned short vxlan_port __read_mostly = 8472; 79module_param_named(udp_port, vxlan_port, ushort, 0444); 80MODULE_PARM_DESC(udp_port, "Destination UDP port"); 81 82static bool log_ecn_error = true; 83module_param(log_ecn_error, bool, 0644); 84MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 85 86static int vxlan_net_id; 87 88static const u8 all_zeros_mac[ETH_ALEN]; 89 90/* per-network namespace private data for this module */ 91struct vxlan_net { 92 struct list_head vxlan_list; 93 struct hlist_head sock_list[PORT_HASH_SIZE]; 94 spinlock_t sock_lock; 95}; 96 97union vxlan_addr { 98 struct sockaddr_in sin; 99 struct sockaddr_in6 sin6; 100 struct sockaddr sa; 101}; 102 103struct vxlan_rdst { 104 union vxlan_addr remote_ip; 105 __be16 remote_port; 106 u32 remote_vni; 107 u32 remote_ifindex; 108 struct list_head list; 109 struct rcu_head rcu; 110}; 111 112/* Forwarding table entry */ 113struct vxlan_fdb { 114 struct hlist_node hlist; /* linked list of entries */ 115 struct rcu_head rcu; 116 unsigned long updated; /* jiffies */ 117 unsigned long used; 118 struct list_head remotes; 119 u16 state; /* see ndm_state */ 120 u8 flags; /* see ndm_flags */ 121 u8 eth_addr[ETH_ALEN]; 122}; 123 124/* Pseudo network device */ 125struct vxlan_dev { 126 struct hlist_node hlist; /* vni hash table */ 127 struct list_head next; /* vxlan's per namespace list */ 128 struct vxlan_sock *vn_sock; /* listening socket */ 129 struct net_device *dev; 130 struct vxlan_rdst default_dst; /* default destination */ 131 union vxlan_addr saddr; /* source address */ 132 __be16 dst_port; 133 __u16 port_min; /* source port range */ 134 __u16 port_max; 135 __u8 tos; /* TOS override */ 136 __u8 ttl; 137 u32 flags; /* VXLAN_F_* below */ 138 139 struct work_struct sock_work; 140 struct work_struct igmp_join; 141 struct work_struct igmp_leave; 142 143 unsigned long age_interval; 144 struct timer_list age_timer; 145 spinlock_t hash_lock; 146 unsigned int addrcnt; 147 unsigned int addrmax; 148 149 struct hlist_head fdb_head[FDB_HASH_SIZE]; 150}; 151 152#define VXLAN_F_LEARN 0x01 153#define VXLAN_F_PROXY 0x02 154#define VXLAN_F_RSC 0x04 155#define VXLAN_F_L2MISS 0x08 156#define VXLAN_F_L3MISS 0x10 157#define VXLAN_F_IPV6 0x20 /* internal flag */ 158 159/* salt for hash table */ 160static u32 vxlan_salt __read_mostly; 161static struct workqueue_struct *vxlan_wq; 162 163static void vxlan_sock_work(struct work_struct *work); 164 165#if IS_ENABLED(CONFIG_IPV6) 166static inline 167bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 168{ 169 if (a->sa.sa_family != b->sa.sa_family) 170 return false; 171 if (a->sa.sa_family == AF_INET6) 172 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); 173 else 174 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 175} 176 177static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 178{ 179 if (ipa->sa.sa_family == AF_INET6) 180 return ipv6_addr_any(&ipa->sin6.sin6_addr); 181 else 182 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 183} 184 185static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 186{ 187 if (ipa->sa.sa_family == AF_INET6) 188 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); 189 else 190 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 191} 192 193static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 194{ 195 if (nla_len(nla) >= sizeof(struct in6_addr)) { 196 nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr)); 197 ip->sa.sa_family = AF_INET6; 198 return 0; 199 } else if (nla_len(nla) >= sizeof(__be32)) { 200 ip->sin.sin_addr.s_addr = nla_get_be32(nla); 201 ip->sa.sa_family = AF_INET; 202 return 0; 203 } else { 204 return -EAFNOSUPPORT; 205 } 206} 207 208static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 209 const union vxlan_addr *ip) 210{ 211 if (ip->sa.sa_family == AF_INET6) 212 return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr); 213 else 214 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr); 215} 216 217#else /* !CONFIG_IPV6 */ 218 219static inline 220bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 221{ 222 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 223} 224 225static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 226{ 227 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 228} 229 230static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 231{ 232 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 233} 234 235static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 236{ 237 if (nla_len(nla) >= sizeof(struct in6_addr)) { 238 return -EAFNOSUPPORT; 239 } else if (nla_len(nla) >= sizeof(__be32)) { 240 ip->sin.sin_addr.s_addr = nla_get_be32(nla); 241 ip->sa.sa_family = AF_INET; 242 return 0; 243 } else { 244 return -EAFNOSUPPORT; 245 } 246} 247 248static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 249 const union vxlan_addr *ip) 250{ 251 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr); 252} 253#endif 254 255/* Virtual Network hash table head */ 256static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id) 257{ 258 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)]; 259} 260 261/* Socket hash table head */ 262static inline struct hlist_head *vs_head(struct net *net, __be16 port) 263{ 264 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 265 266 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; 267} 268 269/* First remote destination for a forwarding entry. 270 * Guaranteed to be non-NULL because remotes are never deleted. 271 */ 272static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) 273{ 274 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); 275} 276 277static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) 278{ 279 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 280} 281 282/* Find VXLAN socket based on network namespace and UDP port */ 283static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) 284{ 285 struct vxlan_sock *vs; 286 287 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 288 if (inet_sk(vs->sock->sk)->inet_sport == port) 289 return vs; 290 } 291 return NULL; 292} 293 294static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) 295{ 296 struct vxlan_dev *vxlan; 297 298 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) { 299 if (vxlan->default_dst.remote_vni == id) 300 return vxlan; 301 } 302 303 return NULL; 304} 305 306/* Look up VNI in a per net namespace table */ 307static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) 308{ 309 struct vxlan_sock *vs; 310 311 vs = vxlan_find_sock(net, port); 312 if (!vs) 313 return NULL; 314 315 return vxlan_vs_find_vni(vs, id); 316} 317 318/* Fill in neighbour message in skbuff. */ 319static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 320 const struct vxlan_fdb *fdb, 321 u32 portid, u32 seq, int type, unsigned int flags, 322 const struct vxlan_rdst *rdst) 323{ 324 unsigned long now = jiffies; 325 struct nda_cacheinfo ci; 326 struct nlmsghdr *nlh; 327 struct ndmsg *ndm; 328 bool send_ip, send_eth; 329 330 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 331 if (nlh == NULL) 332 return -EMSGSIZE; 333 334 ndm = nlmsg_data(nlh); 335 memset(ndm, 0, sizeof(*ndm)); 336 337 send_eth = send_ip = true; 338 339 if (type == RTM_GETNEIGH) { 340 ndm->ndm_family = AF_INET; 341 send_ip = !vxlan_addr_any(&rdst->remote_ip); 342 send_eth = !is_zero_ether_addr(fdb->eth_addr); 343 } else 344 ndm->ndm_family = AF_BRIDGE; 345 ndm->ndm_state = fdb->state; 346 ndm->ndm_ifindex = vxlan->dev->ifindex; 347 ndm->ndm_flags = fdb->flags; 348 ndm->ndm_type = NDA_DST; 349 350 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 351 goto nla_put_failure; 352 353 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) 354 goto nla_put_failure; 355 356 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port && 357 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 358 goto nla_put_failure; 359 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 360 nla_put_u32(skb, NDA_VNI, rdst->remote_vni)) 361 goto nla_put_failure; 362 if (rdst->remote_ifindex && 363 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 364 goto nla_put_failure; 365 366 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 367 ci.ndm_confirmed = 0; 368 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 369 ci.ndm_refcnt = 0; 370 371 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 372 goto nla_put_failure; 373 374 return nlmsg_end(skb, nlh); 375 376nla_put_failure: 377 nlmsg_cancel(skb, nlh); 378 return -EMSGSIZE; 379} 380 381static inline size_t vxlan_nlmsg_size(void) 382{ 383 return NLMSG_ALIGN(sizeof(struct ndmsg)) 384 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 385 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ 386 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 387 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 388 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 389 + nla_total_size(sizeof(struct nda_cacheinfo)); 390} 391 392static void vxlan_fdb_notify(struct vxlan_dev *vxlan, 393 struct vxlan_fdb *fdb, int type) 394{ 395 struct net *net = dev_net(vxlan->dev); 396 struct sk_buff *skb; 397 int err = -ENOBUFS; 398 399 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); 400 if (skb == NULL) 401 goto errout; 402 403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, 404 first_remote_rtnl(fdb)); 405 if (err < 0) { 406 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 407 WARN_ON(err == -EMSGSIZE); 408 kfree_skb(skb); 409 goto errout; 410 } 411 412 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 413 return; 414errout: 415 if (err < 0) 416 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 417} 418 419static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) 420{ 421 struct vxlan_dev *vxlan = netdev_priv(dev); 422 struct vxlan_fdb f = { 423 .state = NUD_STALE, 424 }; 425 struct vxlan_rdst remote = { 426 .remote_ip = *ipa, /* goes to NDA_DST */ 427 .remote_vni = VXLAN_N_VID, 428 }; 429 430 INIT_LIST_HEAD(&f.remotes); 431 list_add_rcu(&remote.list, &f.remotes); 432 433 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 434} 435 436static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 437{ 438 struct vxlan_fdb f = { 439 .state = NUD_STALE, 440 }; 441 442 INIT_LIST_HEAD(&f.remotes); 443 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 444 445 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 446} 447 448/* Hash Ethernet address */ 449static u32 eth_hash(const unsigned char *addr) 450{ 451 u64 value = get_unaligned((u64 *)addr); 452 453 /* only want 6 bytes */ 454#ifdef __BIG_ENDIAN 455 value >>= 16; 456#else 457 value <<= 16; 458#endif 459 return hash_64(value, FDB_HASH_BITS); 460} 461 462/* Hash chain to use given mac address */ 463static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, 464 const u8 *mac) 465{ 466 return &vxlan->fdb_head[eth_hash(mac)]; 467} 468 469/* Look up Ethernet address in forwarding table */ 470static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 471 const u8 *mac) 472{ 473 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 474 struct vxlan_fdb *f; 475 476 hlist_for_each_entry_rcu(f, head, hlist) { 477 if (ether_addr_equal(mac, f->eth_addr)) 478 return f; 479 } 480 481 return NULL; 482} 483 484static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 485 const u8 *mac) 486{ 487 struct vxlan_fdb *f; 488 489 f = __vxlan_find_mac(vxlan, mac); 490 if (f) 491 f->used = jiffies; 492 493 return f; 494} 495 496/* caller should hold vxlan->hash_lock */ 497static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 498 union vxlan_addr *ip, __be16 port, 499 __u32 vni, __u32 ifindex) 500{ 501 struct vxlan_rdst *rd; 502 503 list_for_each_entry(rd, &f->remotes, list) { 504 if (vxlan_addr_equal(&rd->remote_ip, ip) && 505 rd->remote_port == port && 506 rd->remote_vni == vni && 507 rd->remote_ifindex == ifindex) 508 return rd; 509 } 510 511 return NULL; 512} 513 514/* Replace destination of unicast mac */ 515static int vxlan_fdb_replace(struct vxlan_fdb *f, 516 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) 517{ 518 struct vxlan_rdst *rd; 519 520 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 521 if (rd) 522 return 0; 523 524 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); 525 if (!rd) 526 return 0; 527 rd->remote_ip = *ip; 528 rd->remote_port = port; 529 rd->remote_vni = vni; 530 rd->remote_ifindex = ifindex; 531 return 1; 532} 533 534/* Add/update destinations for multicast */ 535static int vxlan_fdb_append(struct vxlan_fdb *f, 536 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) 537{ 538 struct vxlan_rdst *rd; 539 540 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 541 if (rd) 542 return 0; 543 544 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 545 if (rd == NULL) 546 return -ENOBUFS; 547 rd->remote_ip = *ip; 548 rd->remote_port = port; 549 rd->remote_vni = vni; 550 rd->remote_ifindex = ifindex; 551 552 list_add_tail_rcu(&rd->list, &f->remotes); 553 554 return 1; 555} 556 557static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb) 558{ 559 struct sk_buff *p, **pp = NULL; 560 struct vxlanhdr *vh, *vh2; 561 struct ethhdr *eh, *eh2; 562 unsigned int hlen, off_vx, off_eth; 563 const struct packet_offload *ptype; 564 __be16 type; 565 int flush = 1; 566 567 off_vx = skb_gro_offset(skb); 568 hlen = off_vx + sizeof(*vh); 569 vh = skb_gro_header_fast(skb, off_vx); 570 if (skb_gro_header_hard(skb, hlen)) { 571 vh = skb_gro_header_slow(skb, hlen, off_vx); 572 if (unlikely(!vh)) 573 goto out; 574 } 575 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 576 577 off_eth = skb_gro_offset(skb); 578 hlen = off_eth + sizeof(*eh); 579 eh = skb_gro_header_fast(skb, off_eth); 580 if (skb_gro_header_hard(skb, hlen)) { 581 eh = skb_gro_header_slow(skb, hlen, off_eth); 582 if (unlikely(!eh)) 583 goto out; 584 } 585 586 flush = 0; 587 588 for (p = *head; p; p = p->next) { 589 if (!NAPI_GRO_CB(p)->same_flow) 590 continue; 591 592 vh2 = (struct vxlanhdr *)(p->data + off_vx); 593 eh2 = (struct ethhdr *)(p->data + off_eth); 594 if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) { 595 NAPI_GRO_CB(p)->same_flow = 0; 596 continue; 597 } 598 } 599 600 type = eh->h_proto; 601 602 rcu_read_lock(); 603 ptype = gro_find_receive_by_type(type); 604 if (ptype == NULL) { 605 flush = 1; 606 goto out_unlock; 607 } 608 609 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */ 610 pp = ptype->callbacks.gro_receive(head, skb); 611 612out_unlock: 613 rcu_read_unlock(); 614out: 615 NAPI_GRO_CB(skb)->flush |= flush; 616 617 return pp; 618} 619 620static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) 621{ 622 struct ethhdr *eh; 623 struct packet_offload *ptype; 624 __be16 type; 625 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); 626 int err = -ENOSYS; 627 628 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); 629 type = eh->h_proto; 630 631 rcu_read_lock(); 632 ptype = gro_find_complete_by_type(type); 633 if (ptype != NULL) 634 err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len); 635 636 rcu_read_unlock(); 637 return err; 638} 639 640/* Notify netdevs that UDP port started listening */ 641static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) 642{ 643 struct net_device *dev; 644 struct sock *sk = vs->sock->sk; 645 struct net *net = sock_net(sk); 646 sa_family_t sa_family = sk->sk_family; 647 __be16 port = inet_sk(sk)->inet_sport; 648 int err; 649 650 if (sa_family == AF_INET) { 651 err = udp_add_offload(&vs->udp_offloads); 652 if (err) 653 pr_warn("vxlan: udp_add_offload failed with status %d\n", err); 654 } 655 656 rcu_read_lock(); 657 for_each_netdev_rcu(net, dev) { 658 if (dev->netdev_ops->ndo_add_vxlan_port) 659 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 660 port); 661 } 662 rcu_read_unlock(); 663} 664 665/* Notify netdevs that UDP port is no more listening */ 666static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) 667{ 668 struct net_device *dev; 669 struct sock *sk = vs->sock->sk; 670 struct net *net = sock_net(sk); 671 sa_family_t sa_family = sk->sk_family; 672 __be16 port = inet_sk(sk)->inet_sport; 673 674 rcu_read_lock(); 675 for_each_netdev_rcu(net, dev) { 676 if (dev->netdev_ops->ndo_del_vxlan_port) 677 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family, 678 port); 679 } 680 rcu_read_unlock(); 681 682 if (sa_family == AF_INET) 683 udp_del_offload(&vs->udp_offloads); 684} 685 686/* Add new entry to forwarding table -- assumes lock held */ 687static int vxlan_fdb_create(struct vxlan_dev *vxlan, 688 const u8 *mac, union vxlan_addr *ip, 689 __u16 state, __u16 flags, 690 __be16 port, __u32 vni, __u32 ifindex, 691 __u8 ndm_flags) 692{ 693 struct vxlan_fdb *f; 694 int notify = 0; 695 696 f = __vxlan_find_mac(vxlan, mac); 697 if (f) { 698 if (flags & NLM_F_EXCL) { 699 netdev_dbg(vxlan->dev, 700 "lost race to create %pM\n", mac); 701 return -EEXIST; 702 } 703 if (f->state != state) { 704 f->state = state; 705 f->updated = jiffies; 706 notify = 1; 707 } 708 if (f->flags != ndm_flags) { 709 f->flags = ndm_flags; 710 f->updated = jiffies; 711 notify = 1; 712 } 713 if ((flags & NLM_F_REPLACE)) { 714 /* Only change unicasts */ 715 if (!(is_multicast_ether_addr(f->eth_addr) || 716 is_zero_ether_addr(f->eth_addr))) { 717 int rc = vxlan_fdb_replace(f, ip, port, vni, 718 ifindex); 719 720 if (rc < 0) 721 return rc; 722 notify |= rc; 723 } else 724 return -EOPNOTSUPP; 725 } 726 if ((flags & NLM_F_APPEND) && 727 (is_multicast_ether_addr(f->eth_addr) || 728 is_zero_ether_addr(f->eth_addr))) { 729 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); 730 731 if (rc < 0) 732 return rc; 733 notify |= rc; 734 } 735 } else { 736 if (!(flags & NLM_F_CREATE)) 737 return -ENOENT; 738 739 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax) 740 return -ENOSPC; 741 742 /* Disallow replace to add a multicast entry */ 743 if ((flags & NLM_F_REPLACE) && 744 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 745 return -EOPNOTSUPP; 746 747 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 748 f = kmalloc(sizeof(*f), GFP_ATOMIC); 749 if (!f) 750 return -ENOMEM; 751 752 notify = 1; 753 f->state = state; 754 f->flags = ndm_flags; 755 f->updated = f->used = jiffies; 756 INIT_LIST_HEAD(&f->remotes); 757 memcpy(f->eth_addr, mac, ETH_ALEN); 758 759 vxlan_fdb_append(f, ip, port, vni, ifindex); 760 761 ++vxlan->addrcnt; 762 hlist_add_head_rcu(&f->hlist, 763 vxlan_fdb_head(vxlan, mac)); 764 } 765 766 if (notify) 767 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 768 769 return 0; 770} 771 772static void vxlan_fdb_free(struct rcu_head *head) 773{ 774 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 775 struct vxlan_rdst *rd, *nd; 776 777 list_for_each_entry_safe(rd, nd, &f->remotes, list) 778 kfree(rd); 779 kfree(f); 780} 781 782static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 783{ 784 netdev_dbg(vxlan->dev, 785 "delete %pM\n", f->eth_addr); 786 787 --vxlan->addrcnt; 788 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); 789 790 hlist_del_rcu(&f->hlist); 791 call_rcu(&f->rcu, vxlan_fdb_free); 792} 793 794static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 795 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex) 796{ 797 struct net *net = dev_net(vxlan->dev); 798 int err; 799 800 if (tb[NDA_DST]) { 801 err = vxlan_nla_get_addr(ip, tb[NDA_DST]); 802 if (err) 803 return err; 804 } else { 805 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; 806 if (remote->sa.sa_family == AF_INET) { 807 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); 808 ip->sa.sa_family = AF_INET; 809#if IS_ENABLED(CONFIG_IPV6) 810 } else { 811 ip->sin6.sin6_addr = in6addr_any; 812 ip->sa.sa_family = AF_INET6; 813#endif 814 } 815 } 816 817 if (tb[NDA_PORT]) { 818 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) 819 return -EINVAL; 820 *port = nla_get_be16(tb[NDA_PORT]); 821 } else { 822 *port = vxlan->dst_port; 823 } 824 825 if (tb[NDA_VNI]) { 826 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) 827 return -EINVAL; 828 *vni = nla_get_u32(tb[NDA_VNI]); 829 } else { 830 *vni = vxlan->default_dst.remote_vni; 831 } 832 833 if (tb[NDA_IFINDEX]) { 834 struct net_device *tdev; 835 836 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 837 return -EINVAL; 838 *ifindex = nla_get_u32(tb[NDA_IFINDEX]); 839 tdev = __dev_get_by_index(net, *ifindex); 840 if (!tdev) 841 return -EADDRNOTAVAIL; 842 } else { 843 *ifindex = 0; 844 } 845 846 return 0; 847} 848 849/* Add static entry (via netlink) */ 850static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 851 struct net_device *dev, 852 const unsigned char *addr, u16 flags) 853{ 854 struct vxlan_dev *vxlan = netdev_priv(dev); 855 /* struct net *net = dev_net(vxlan->dev); */ 856 union vxlan_addr ip; 857 __be16 port; 858 u32 vni, ifindex; 859 int err; 860 861 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 862 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 863 ndm->ndm_state); 864 return -EINVAL; 865 } 866 867 if (tb[NDA_DST] == NULL) 868 return -EINVAL; 869 870 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 871 if (err) 872 return err; 873 874 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) 875 return -EAFNOSUPPORT; 876 877 spin_lock_bh(&vxlan->hash_lock); 878 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 879 port, vni, ifindex, ndm->ndm_flags); 880 spin_unlock_bh(&vxlan->hash_lock); 881 882 return err; 883} 884 885/* Delete entry (via netlink) */ 886static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 887 struct net_device *dev, 888 const unsigned char *addr) 889{ 890 struct vxlan_dev *vxlan = netdev_priv(dev); 891 struct vxlan_fdb *f; 892 struct vxlan_rdst *rd = NULL; 893 union vxlan_addr ip; 894 __be16 port; 895 u32 vni, ifindex; 896 int err; 897 898 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 899 if (err) 900 return err; 901 902 err = -ENOENT; 903 904 spin_lock_bh(&vxlan->hash_lock); 905 f = vxlan_find_mac(vxlan, addr); 906 if (!f) 907 goto out; 908 909 if (!vxlan_addr_any(&ip)) { 910 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); 911 if (!rd) 912 goto out; 913 } 914 915 err = 0; 916 917 /* remove a destination if it's not the only one on the list, 918 * otherwise destroy the fdb entry 919 */ 920 if (rd && !list_is_singular(&f->remotes)) { 921 list_del_rcu(&rd->list); 922 kfree_rcu(rd, rcu); 923 goto out; 924 } 925 926 vxlan_fdb_destroy(vxlan, f); 927 928out: 929 spin_unlock_bh(&vxlan->hash_lock); 930 931 return err; 932} 933 934/* Dump forwarding table */ 935static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 936 struct net_device *dev, int idx) 937{ 938 struct vxlan_dev *vxlan = netdev_priv(dev); 939 unsigned int h; 940 941 for (h = 0; h < FDB_HASH_SIZE; ++h) { 942 struct vxlan_fdb *f; 943 int err; 944 945 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 946 struct vxlan_rdst *rd; 947 948 if (idx < cb->args[0]) 949 goto skip; 950 951 list_for_each_entry_rcu(rd, &f->remotes, list) { 952 err = vxlan_fdb_info(skb, vxlan, f, 953 NETLINK_CB(cb->skb).portid, 954 cb->nlh->nlmsg_seq, 955 RTM_NEWNEIGH, 956 NLM_F_MULTI, rd); 957 if (err < 0) 958 goto out; 959 } 960skip: 961 ++idx; 962 } 963 } 964out: 965 return idx; 966} 967 968/* Watch incoming packets to learn mapping between Ethernet address 969 * and Tunnel endpoint. 970 * Return true if packet is bogus and should be droppped. 971 */ 972static bool vxlan_snoop(struct net_device *dev, 973 union vxlan_addr *src_ip, const u8 *src_mac) 974{ 975 struct vxlan_dev *vxlan = netdev_priv(dev); 976 struct vxlan_fdb *f; 977 978 f = vxlan_find_mac(vxlan, src_mac); 979 if (likely(f)) { 980 struct vxlan_rdst *rdst = first_remote_rcu(f); 981 982 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip))) 983 return false; 984 985 /* Don't migrate static entries, drop packets */ 986 if (f->state & NUD_NOARP) 987 return true; 988 989 if (net_ratelimit()) 990 netdev_info(dev, 991 "%pM migrated from %pIS to %pIS\n", 992 src_mac, &rdst->remote_ip, &src_ip); 993 994 rdst->remote_ip = *src_ip; 995 f->updated = jiffies; 996 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 997 } else { 998 /* learned new entry */ 999 spin_lock(&vxlan->hash_lock); 1000 1001 /* close off race between vxlan_flush and incoming packets */ 1002 if (netif_running(dev)) 1003 vxlan_fdb_create(vxlan, src_mac, src_ip, 1004 NUD_REACHABLE, 1005 NLM_F_EXCL|NLM_F_CREATE, 1006 vxlan->dst_port, 1007 vxlan->default_dst.remote_vni, 1008 0, NTF_SELF); 1009 spin_unlock(&vxlan->hash_lock); 1010 } 1011 1012 return false; 1013} 1014 1015/* See if multicast group is already in use by other ID */ 1016static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) 1017{ 1018 struct vxlan_dev *vxlan; 1019 1020 /* The vxlan_sock is only used by dev, leaving group has 1021 * no effect on other vxlan devices. 1022 */ 1023 if (atomic_read(&dev->vn_sock->refcnt) == 1) 1024 return false; 1025 1026 list_for_each_entry(vxlan, &vn->vxlan_list, next) { 1027 if (!netif_running(vxlan->dev) || vxlan == dev) 1028 continue; 1029 1030 if (vxlan->vn_sock != dev->vn_sock) 1031 continue; 1032 1033 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, 1034 &dev->default_dst.remote_ip)) 1035 continue; 1036 1037 if (vxlan->default_dst.remote_ifindex != 1038 dev->default_dst.remote_ifindex) 1039 continue; 1040 1041 return true; 1042 } 1043 1044 return false; 1045} 1046 1047static void vxlan_sock_hold(struct vxlan_sock *vs) 1048{ 1049 atomic_inc(&vs->refcnt); 1050} 1051 1052void vxlan_sock_release(struct vxlan_sock *vs) 1053{ 1054 struct sock *sk = vs->sock->sk; 1055 struct net *net = sock_net(sk); 1056 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1057 1058 if (!atomic_dec_and_test(&vs->refcnt)) 1059 return; 1060 1061 spin_lock(&vn->sock_lock); 1062 hlist_del_rcu(&vs->hlist); 1063 rcu_assign_sk_user_data(vs->sock->sk, NULL); 1064 vxlan_notify_del_rx_port(vs); 1065 spin_unlock(&vn->sock_lock); 1066 1067 queue_work(vxlan_wq, &vs->del_work); 1068} 1069EXPORT_SYMBOL_GPL(vxlan_sock_release); 1070 1071/* Callback to update multicast group membership when first VNI on 1072 * multicast asddress is brought up 1073 * Done as workqueue because ip_mc_join_group acquires RTNL. 1074 */ 1075static void vxlan_igmp_join(struct work_struct *work) 1076{ 1077 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join); 1078 struct vxlan_sock *vs = vxlan->vn_sock; 1079 struct sock *sk = vs->sock->sk; 1080 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1081 int ifindex = vxlan->default_dst.remote_ifindex; 1082 1083 lock_sock(sk); 1084 if (ip->sa.sa_family == AF_INET) { 1085 struct ip_mreqn mreq = { 1086 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1087 .imr_ifindex = ifindex, 1088 }; 1089 1090 ip_mc_join_group(sk, &mreq); 1091#if IS_ENABLED(CONFIG_IPV6) 1092 } else { 1093 ipv6_stub->ipv6_sock_mc_join(sk, ifindex, 1094 &ip->sin6.sin6_addr); 1095#endif 1096 } 1097 release_sock(sk); 1098 1099 vxlan_sock_release(vs); 1100 dev_put(vxlan->dev); 1101} 1102 1103/* Inverse of vxlan_igmp_join when last VNI is brought down */ 1104static void vxlan_igmp_leave(struct work_struct *work) 1105{ 1106 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave); 1107 struct vxlan_sock *vs = vxlan->vn_sock; 1108 struct sock *sk = vs->sock->sk; 1109 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1110 int ifindex = vxlan->default_dst.remote_ifindex; 1111 1112 lock_sock(sk); 1113 if (ip->sa.sa_family == AF_INET) { 1114 struct ip_mreqn mreq = { 1115 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1116 .imr_ifindex = ifindex, 1117 }; 1118 1119 ip_mc_leave_group(sk, &mreq); 1120#if IS_ENABLED(CONFIG_IPV6) 1121 } else { 1122 ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, 1123 &ip->sin6.sin6_addr); 1124#endif 1125 } 1126 1127 release_sock(sk); 1128 1129 vxlan_sock_release(vs); 1130 dev_put(vxlan->dev); 1131} 1132 1133/* Callback from net/ipv4/udp.c to receive packets */ 1134static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 1135{ 1136 struct vxlan_sock *vs; 1137 struct vxlanhdr *vxh; 1138 1139 /* Need Vxlan and inner Ethernet header to be present */ 1140 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1141 goto error; 1142 1143 /* Return packets with reserved bits set */ 1144 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); 1145 if (vxh->vx_flags != htonl(VXLAN_FLAGS) || 1146 (vxh->vx_vni & htonl(0xff))) { 1147 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1148 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); 1149 goto error; 1150 } 1151 1152 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) 1153 goto drop; 1154 1155 vs = rcu_dereference_sk_user_data(sk); 1156 if (!vs) 1157 goto drop; 1158 1159 /* If the NIC driver gave us an encapsulated packet 1160 * with the encapsulation mark, the device checksummed it 1161 * for us. Otherwise force the upper layers to verify it. 1162 */ 1163 if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) || 1164 !skb->encapsulation) 1165 skb->ip_summed = CHECKSUM_NONE; 1166 1167 skb->encapsulation = 0; 1168 1169 vs->rcv(vs, skb, vxh->vx_vni); 1170 return 0; 1171 1172drop: 1173 /* Consume bad packet */ 1174 kfree_skb(skb); 1175 return 0; 1176 1177error: 1178 /* Return non vxlan pkt */ 1179 return 1; 1180} 1181 1182static void vxlan_rcv(struct vxlan_sock *vs, 1183 struct sk_buff *skb, __be32 vx_vni) 1184{ 1185 struct iphdr *oip = NULL; 1186 struct ipv6hdr *oip6 = NULL; 1187 struct vxlan_dev *vxlan; 1188 struct pcpu_sw_netstats *stats; 1189 union vxlan_addr saddr; 1190 __u32 vni; 1191 int err = 0; 1192 union vxlan_addr *remote_ip; 1193 1194 vni = ntohl(vx_vni) >> 8; 1195 /* Is this VNI defined? */ 1196 vxlan = vxlan_vs_find_vni(vs, vni); 1197 if (!vxlan) 1198 goto drop; 1199 1200 remote_ip = &vxlan->default_dst.remote_ip; 1201 skb_reset_mac_header(skb); 1202 skb->protocol = eth_type_trans(skb, vxlan->dev); 1203 1204 /* Ignore packet loops (and multicast echo) */ 1205 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1206 goto drop; 1207 1208 /* Re-examine inner Ethernet packet */ 1209 if (remote_ip->sa.sa_family == AF_INET) { 1210 oip = ip_hdr(skb); 1211 saddr.sin.sin_addr.s_addr = oip->saddr; 1212 saddr.sa.sa_family = AF_INET; 1213#if IS_ENABLED(CONFIG_IPV6) 1214 } else { 1215 oip6 = ipv6_hdr(skb); 1216 saddr.sin6.sin6_addr = oip6->saddr; 1217 saddr.sa.sa_family = AF_INET6; 1218#endif 1219 } 1220 1221 if ((vxlan->flags & VXLAN_F_LEARN) && 1222 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) 1223 goto drop; 1224 1225 skb_reset_network_header(skb); 1226 1227 if (oip6) 1228 err = IP6_ECN_decapsulate(oip6, skb); 1229 if (oip) 1230 err = IP_ECN_decapsulate(oip, skb); 1231 1232 if (unlikely(err)) { 1233 if (log_ecn_error) { 1234 if (oip6) 1235 net_info_ratelimited("non-ECT from %pI6\n", 1236 &oip6->saddr); 1237 if (oip) 1238 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1239 &oip->saddr, oip->tos); 1240 } 1241 if (err > 1) { 1242 ++vxlan->dev->stats.rx_frame_errors; 1243 ++vxlan->dev->stats.rx_errors; 1244 goto drop; 1245 } 1246 } 1247 1248 stats = this_cpu_ptr(vxlan->dev->tstats); 1249 u64_stats_update_begin(&stats->syncp); 1250 stats->rx_packets++; 1251 stats->rx_bytes += skb->len; 1252 u64_stats_update_end(&stats->syncp); 1253 1254 netif_rx(skb); 1255 1256 return; 1257drop: 1258 /* Consume bad packet */ 1259 kfree_skb(skb); 1260} 1261 1262static int arp_reduce(struct net_device *dev, struct sk_buff *skb) 1263{ 1264 struct vxlan_dev *vxlan = netdev_priv(dev); 1265 struct arphdr *parp; 1266 u8 *arpptr, *sha; 1267 __be32 sip, tip; 1268 struct neighbour *n; 1269 1270 if (dev->flags & IFF_NOARP) 1271 goto out; 1272 1273 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { 1274 dev->stats.tx_dropped++; 1275 goto out; 1276 } 1277 parp = arp_hdr(skb); 1278 1279 if ((parp->ar_hrd != htons(ARPHRD_ETHER) && 1280 parp->ar_hrd != htons(ARPHRD_IEEE802)) || 1281 parp->ar_pro != htons(ETH_P_IP) || 1282 parp->ar_op != htons(ARPOP_REQUEST) || 1283 parp->ar_hln != dev->addr_len || 1284 parp->ar_pln != 4) 1285 goto out; 1286 arpptr = (u8 *)parp + sizeof(struct arphdr); 1287 sha = arpptr; 1288 arpptr += dev->addr_len; /* sha */ 1289 memcpy(&sip, arpptr, sizeof(sip)); 1290 arpptr += sizeof(sip); 1291 arpptr += dev->addr_len; /* tha */ 1292 memcpy(&tip, arpptr, sizeof(tip)); 1293 1294 if (ipv4_is_loopback(tip) || 1295 ipv4_is_multicast(tip)) 1296 goto out; 1297 1298 n = neigh_lookup(&arp_tbl, &tip, dev); 1299 1300 if (n) { 1301 struct vxlan_fdb *f; 1302 struct sk_buff *reply; 1303 1304 if (!(n->nud_state & NUD_CONNECTED)) { 1305 neigh_release(n); 1306 goto out; 1307 } 1308 1309 f = vxlan_find_mac(vxlan, n->ha); 1310 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1311 /* bridge-local neighbor */ 1312 neigh_release(n); 1313 goto out; 1314 } 1315 1316 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1317 n->ha, sha); 1318 1319 neigh_release(n); 1320 1321 if (reply == NULL) 1322 goto out; 1323 1324 skb_reset_mac_header(reply); 1325 __skb_pull(reply, skb_network_offset(reply)); 1326 reply->ip_summed = CHECKSUM_UNNECESSARY; 1327 reply->pkt_type = PACKET_HOST; 1328 1329 if (netif_rx_ni(reply) == NET_RX_DROP) 1330 dev->stats.rx_dropped++; 1331 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1332 union vxlan_addr ipa = { 1333 .sin.sin_addr.s_addr = tip, 1334 .sa.sa_family = AF_INET, 1335 }; 1336 1337 vxlan_ip_miss(dev, &ipa); 1338 } 1339out: 1340 consume_skb(skb); 1341 return NETDEV_TX_OK; 1342} 1343 1344#if IS_ENABLED(CONFIG_IPV6) 1345 1346static struct sk_buff *vxlan_na_create(struct sk_buff *request, 1347 struct neighbour *n, bool isrouter) 1348{ 1349 struct net_device *dev = request->dev; 1350 struct sk_buff *reply; 1351 struct nd_msg *ns, *na; 1352 struct ipv6hdr *pip6; 1353 u8 *daddr; 1354 int na_olen = 8; /* opt hdr + ETH_ALEN for target */ 1355 int ns_olen; 1356 int i, len; 1357 1358 if (dev == NULL) 1359 return NULL; 1360 1361 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + 1362 sizeof(*na) + na_olen + dev->needed_tailroom; 1363 reply = alloc_skb(len, GFP_ATOMIC); 1364 if (reply == NULL) 1365 return NULL; 1366 1367 reply->protocol = htons(ETH_P_IPV6); 1368 reply->dev = dev; 1369 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); 1370 skb_push(reply, sizeof(struct ethhdr)); 1371 skb_set_mac_header(reply, 0); 1372 1373 ns = (struct nd_msg *)skb_transport_header(request); 1374 1375 daddr = eth_hdr(request)->h_source; 1376 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); 1377 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { 1378 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { 1379 daddr = ns->opt + i + sizeof(struct nd_opt_hdr); 1380 break; 1381 } 1382 } 1383 1384 /* Ethernet header */ 1385 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); 1386 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); 1387 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); 1388 reply->protocol = htons(ETH_P_IPV6); 1389 1390 skb_pull(reply, sizeof(struct ethhdr)); 1391 skb_set_network_header(reply, 0); 1392 skb_put(reply, sizeof(struct ipv6hdr)); 1393 1394 /* IPv6 header */ 1395 1396 pip6 = ipv6_hdr(reply); 1397 memset(pip6, 0, sizeof(struct ipv6hdr)); 1398 pip6->version = 6; 1399 pip6->priority = ipv6_hdr(request)->priority; 1400 pip6->nexthdr = IPPROTO_ICMPV6; 1401 pip6->hop_limit = 255; 1402 pip6->daddr = ipv6_hdr(request)->saddr; 1403 pip6->saddr = *(struct in6_addr *)n->primary_key; 1404 1405 skb_pull(reply, sizeof(struct ipv6hdr)); 1406 skb_set_transport_header(reply, 0); 1407 1408 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); 1409 1410 /* Neighbor Advertisement */ 1411 memset(na, 0, sizeof(*na)+na_olen); 1412 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 1413 na->icmph.icmp6_router = isrouter; 1414 na->icmph.icmp6_override = 1; 1415 na->icmph.icmp6_solicited = 1; 1416 na->target = ns->target; 1417 ether_addr_copy(&na->opt[2], n->ha); 1418 na->opt[0] = ND_OPT_TARGET_LL_ADDR; 1419 na->opt[1] = na_olen >> 3; 1420 1421 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, 1422 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, 1423 csum_partial(na, sizeof(*na)+na_olen, 0)); 1424 1425 pip6->payload_len = htons(sizeof(*na)+na_olen); 1426 1427 skb_push(reply, sizeof(struct ipv6hdr)); 1428 1429 reply->ip_summed = CHECKSUM_UNNECESSARY; 1430 1431 return reply; 1432} 1433 1434static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) 1435{ 1436 struct vxlan_dev *vxlan = netdev_priv(dev); 1437 struct nd_msg *msg; 1438 const struct ipv6hdr *iphdr; 1439 const struct in6_addr *saddr, *daddr; 1440 struct neighbour *n; 1441 struct inet6_dev *in6_dev; 1442 1443 in6_dev = __in6_dev_get(dev); 1444 if (!in6_dev) 1445 goto out; 1446 1447 if (!pskb_may_pull(skb, skb->len)) 1448 goto out; 1449 1450 iphdr = ipv6_hdr(skb); 1451 saddr = &iphdr->saddr; 1452 daddr = &iphdr->daddr; 1453 1454 msg = (struct nd_msg *)skb_transport_header(skb); 1455 if (msg->icmph.icmp6_code != 0 || 1456 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) 1457 goto out; 1458 1459 if (ipv6_addr_loopback(daddr) || 1460 ipv6_addr_is_multicast(&msg->target)) 1461 goto out; 1462 1463 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); 1464 1465 if (n) { 1466 struct vxlan_fdb *f; 1467 struct sk_buff *reply; 1468 1469 if (!(n->nud_state & NUD_CONNECTED)) { 1470 neigh_release(n); 1471 goto out; 1472 } 1473 1474 f = vxlan_find_mac(vxlan, n->ha); 1475 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1476 /* bridge-local neighbor */ 1477 neigh_release(n); 1478 goto out; 1479 } 1480 1481 reply = vxlan_na_create(skb, n, 1482 !!(f ? f->flags & NTF_ROUTER : 0)); 1483 1484 neigh_release(n); 1485 1486 if (reply == NULL) 1487 goto out; 1488 1489 if (netif_rx_ni(reply) == NET_RX_DROP) 1490 dev->stats.rx_dropped++; 1491 1492 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1493 union vxlan_addr ipa = { 1494 .sin6.sin6_addr = msg->target, 1495 .sa.sa_family = AF_INET6, 1496 }; 1497 1498 vxlan_ip_miss(dev, &ipa); 1499 } 1500 1501out: 1502 consume_skb(skb); 1503 return NETDEV_TX_OK; 1504} 1505#endif 1506 1507static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 1508{ 1509 struct vxlan_dev *vxlan = netdev_priv(dev); 1510 struct neighbour *n; 1511 1512 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1513 return false; 1514 1515 n = NULL; 1516 switch (ntohs(eth_hdr(skb)->h_proto)) { 1517 case ETH_P_IP: 1518 { 1519 struct iphdr *pip; 1520 1521 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1522 return false; 1523 pip = ip_hdr(skb); 1524 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 1525 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1526 union vxlan_addr ipa = { 1527 .sin.sin_addr.s_addr = pip->daddr, 1528 .sa.sa_family = AF_INET, 1529 }; 1530 1531 vxlan_ip_miss(dev, &ipa); 1532 return false; 1533 } 1534 1535 break; 1536 } 1537#if IS_ENABLED(CONFIG_IPV6) 1538 case ETH_P_IPV6: 1539 { 1540 struct ipv6hdr *pip6; 1541 1542 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 1543 return false; 1544 pip6 = ipv6_hdr(skb); 1545 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); 1546 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1547 union vxlan_addr ipa = { 1548 .sin6.sin6_addr = pip6->daddr, 1549 .sa.sa_family = AF_INET6, 1550 }; 1551 1552 vxlan_ip_miss(dev, &ipa); 1553 return false; 1554 } 1555 1556 break; 1557 } 1558#endif 1559 default: 1560 return false; 1561 } 1562 1563 if (n) { 1564 bool diff; 1565 1566 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); 1567 if (diff) { 1568 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 1569 dev->addr_len); 1570 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); 1571 } 1572 neigh_release(n); 1573 return diff; 1574 } 1575 1576 return false; 1577} 1578 1579/* Compute source port for outgoing packet 1580 * first choice to use L4 flow hash since it will spread 1581 * better and maybe available from hardware 1582 * secondary choice is to use jhash on the Ethernet header 1583 */ 1584__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb) 1585{ 1586 unsigned int range = (port_max - port_min) + 1; 1587 u32 hash; 1588 1589 hash = skb_get_hash(skb); 1590 if (!hash) 1591 hash = jhash(skb->data, 2 * ETH_ALEN, 1592 (__force u32) skb->protocol); 1593 1594 return htons((((u64) hash * range) >> 32) + port_min); 1595} 1596EXPORT_SYMBOL_GPL(vxlan_src_port); 1597 1598static int handle_offloads(struct sk_buff *skb) 1599{ 1600 if (skb_is_gso(skb)) { 1601 int err = skb_unclone(skb, GFP_ATOMIC); 1602 if (unlikely(err)) 1603 return err; 1604 1605 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1606 } else if (skb->ip_summed != CHECKSUM_PARTIAL) 1607 skb->ip_summed = CHECKSUM_NONE; 1608 1609 return 0; 1610} 1611 1612#if IS_ENABLED(CONFIG_IPV6) 1613static int vxlan6_xmit_skb(struct vxlan_sock *vs, 1614 struct dst_entry *dst, struct sk_buff *skb, 1615 struct net_device *dev, struct in6_addr *saddr, 1616 struct in6_addr *daddr, __u8 prio, __u8 ttl, 1617 __be16 src_port, __be16 dst_port, __be32 vni) 1618{ 1619 struct ipv6hdr *ip6h; 1620 struct vxlanhdr *vxh; 1621 struct udphdr *uh; 1622 int min_headroom; 1623 int err; 1624 1625 if (!skb->encapsulation) { 1626 skb_reset_inner_headers(skb); 1627 skb->encapsulation = 1; 1628 } 1629 1630 skb_scrub_packet(skb, false); 1631 1632 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1633 + VXLAN_HLEN + sizeof(struct ipv6hdr) 1634 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 1635 1636 /* Need space for new headers (invalidates iph ptr) */ 1637 err = skb_cow_head(skb, min_headroom); 1638 if (unlikely(err)) 1639 return err; 1640 1641 if (vlan_tx_tag_present(skb)) { 1642 if (WARN_ON(!__vlan_put_tag(skb, 1643 skb->vlan_proto, 1644 vlan_tx_tag_get(skb)))) 1645 return -ENOMEM; 1646 1647 skb->vlan_tci = 0; 1648 } 1649 1650 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1651 vxh->vx_flags = htonl(VXLAN_FLAGS); 1652 vxh->vx_vni = vni; 1653 1654 __skb_push(skb, sizeof(*uh)); 1655 skb_reset_transport_header(skb); 1656 uh = udp_hdr(skb); 1657 1658 uh->dest = dst_port; 1659 uh->source = src_port; 1660 1661 uh->len = htons(skb->len); 1662 uh->check = 0; 1663 1664 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1665 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1666 IPSKB_REROUTED); 1667 skb_dst_set(skb, dst); 1668 1669 if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) { 1670 __wsum csum = skb_checksum(skb, 0, skb->len, 0); 1671 skb->ip_summed = CHECKSUM_UNNECESSARY; 1672 uh->check = csum_ipv6_magic(saddr, daddr, skb->len, 1673 IPPROTO_UDP, csum); 1674 if (uh->check == 0) 1675 uh->check = CSUM_MANGLED_0; 1676 } else { 1677 skb->ip_summed = CHECKSUM_PARTIAL; 1678 skb->csum_start = skb_transport_header(skb) - skb->head; 1679 skb->csum_offset = offsetof(struct udphdr, check); 1680 uh->check = ~csum_ipv6_magic(saddr, daddr, 1681 skb->len, IPPROTO_UDP, 0); 1682 } 1683 1684 __skb_push(skb, sizeof(*ip6h)); 1685 skb_reset_network_header(skb); 1686 ip6h = ipv6_hdr(skb); 1687 ip6h->version = 6; 1688 ip6h->priority = prio; 1689 ip6h->flow_lbl[0] = 0; 1690 ip6h->flow_lbl[1] = 0; 1691 ip6h->flow_lbl[2] = 0; 1692 ip6h->payload_len = htons(skb->len); 1693 ip6h->nexthdr = IPPROTO_UDP; 1694 ip6h->hop_limit = ttl; 1695 ip6h->daddr = *daddr; 1696 ip6h->saddr = *saddr; 1697 1698 err = handle_offloads(skb); 1699 if (err) 1700 return err; 1701 1702 ip6tunnel_xmit(skb, dev); 1703 return 0; 1704} 1705#endif 1706 1707int vxlan_xmit_skb(struct vxlan_sock *vs, 1708 struct rtable *rt, struct sk_buff *skb, 1709 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 1710 __be16 src_port, __be16 dst_port, __be32 vni) 1711{ 1712 struct vxlanhdr *vxh; 1713 struct udphdr *uh; 1714 int min_headroom; 1715 int err; 1716 1717 if (!skb->encapsulation) { 1718 skb_reset_inner_headers(skb); 1719 skb->encapsulation = 1; 1720 } 1721 1722 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1723 + VXLAN_HLEN + sizeof(struct iphdr) 1724 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 1725 1726 /* Need space for new headers (invalidates iph ptr) */ 1727 err = skb_cow_head(skb, min_headroom); 1728 if (unlikely(err)) 1729 return err; 1730 1731 if (vlan_tx_tag_present(skb)) { 1732 if (WARN_ON(!__vlan_put_tag(skb, 1733 skb->vlan_proto, 1734 vlan_tx_tag_get(skb)))) 1735 return -ENOMEM; 1736 1737 skb->vlan_tci = 0; 1738 } 1739 1740 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1741 vxh->vx_flags = htonl(VXLAN_FLAGS); 1742 vxh->vx_vni = vni; 1743 1744 __skb_push(skb, sizeof(*uh)); 1745 skb_reset_transport_header(skb); 1746 uh = udp_hdr(skb); 1747 1748 uh->dest = dst_port; 1749 uh->source = src_port; 1750 1751 uh->len = htons(skb->len); 1752 uh->check = 0; 1753 1754 err = handle_offloads(skb); 1755 if (err) 1756 return err; 1757 1758 return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, 1759 false); 1760} 1761EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1762 1763/* Bypass encapsulation if the destination is local */ 1764static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1765 struct vxlan_dev *dst_vxlan) 1766{ 1767 struct pcpu_sw_netstats *tx_stats, *rx_stats; 1768 union vxlan_addr loopback; 1769 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1770 1771 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1772 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1773 skb->pkt_type = PACKET_HOST; 1774 skb->encapsulation = 0; 1775 skb->dev = dst_vxlan->dev; 1776 __skb_pull(skb, skb_network_offset(skb)); 1777 1778 if (remote_ip->sa.sa_family == AF_INET) { 1779 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 1780 loopback.sa.sa_family = AF_INET; 1781#if IS_ENABLED(CONFIG_IPV6) 1782 } else { 1783 loopback.sin6.sin6_addr = in6addr_loopback; 1784 loopback.sa.sa_family = AF_INET6; 1785#endif 1786 } 1787 1788 if (dst_vxlan->flags & VXLAN_F_LEARN) 1789 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); 1790 1791 u64_stats_update_begin(&tx_stats->syncp); 1792 tx_stats->tx_packets++; 1793 tx_stats->tx_bytes += skb->len; 1794 u64_stats_update_end(&tx_stats->syncp); 1795 1796 if (netif_rx(skb) == NET_RX_SUCCESS) { 1797 u64_stats_update_begin(&rx_stats->syncp); 1798 rx_stats->rx_packets++; 1799 rx_stats->rx_bytes += skb->len; 1800 u64_stats_update_end(&rx_stats->syncp); 1801 } else { 1802 skb->dev->stats.rx_dropped++; 1803 } 1804} 1805 1806static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 1807 struct vxlan_rdst *rdst, bool did_rsc) 1808{ 1809 struct vxlan_dev *vxlan = netdev_priv(dev); 1810 struct rtable *rt = NULL; 1811 const struct iphdr *old_iph; 1812 struct flowi4 fl4; 1813 union vxlan_addr *dst; 1814 __be16 src_port = 0, dst_port; 1815 u32 vni; 1816 __be16 df = 0; 1817 __u8 tos, ttl; 1818 int err; 1819 1820 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; 1821 vni = rdst->remote_vni; 1822 dst = &rdst->remote_ip; 1823 1824 if (vxlan_addr_any(dst)) { 1825 if (did_rsc) { 1826 /* short-circuited back to local bridge */ 1827 vxlan_encap_bypass(skb, vxlan, vxlan); 1828 return; 1829 } 1830 goto drop; 1831 } 1832 1833 old_iph = ip_hdr(skb); 1834 1835 ttl = vxlan->ttl; 1836 if (!ttl && vxlan_addr_multicast(dst)) 1837 ttl = 1; 1838 1839 tos = vxlan->tos; 1840 if (tos == 1) 1841 tos = ip_tunnel_get_dsfield(old_iph, skb); 1842 1843 src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb); 1844 1845 if (dst->sa.sa_family == AF_INET) { 1846 memset(&fl4, 0, sizeof(fl4)); 1847 fl4.flowi4_oif = rdst->remote_ifindex; 1848 fl4.flowi4_tos = RT_TOS(tos); 1849 fl4.daddr = dst->sin.sin_addr.s_addr; 1850 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; 1851 1852 rt = ip_route_output_key(dev_net(dev), &fl4); 1853 if (IS_ERR(rt)) { 1854 netdev_dbg(dev, "no route to %pI4\n", 1855 &dst->sin.sin_addr.s_addr); 1856 dev->stats.tx_carrier_errors++; 1857 goto tx_error; 1858 } 1859 1860 if (rt->dst.dev == dev) { 1861 netdev_dbg(dev, "circular route to %pI4\n", 1862 &dst->sin.sin_addr.s_addr); 1863 dev->stats.collisions++; 1864 goto rt_tx_error; 1865 } 1866 1867 /* Bypass encapsulation if the destination is local */ 1868 if (rt->rt_flags & RTCF_LOCAL && 1869 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 1870 struct vxlan_dev *dst_vxlan; 1871 1872 ip_rt_put(rt); 1873 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); 1874 if (!dst_vxlan) 1875 goto tx_error; 1876 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1877 return; 1878 } 1879 1880 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 1881 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 1882 1883 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, 1884 fl4.saddr, dst->sin.sin_addr.s_addr, 1885 tos, ttl, df, src_port, dst_port, 1886 htonl(vni << 8)); 1887 1888 if (err < 0) 1889 goto rt_tx_error; 1890 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 1891#if IS_ENABLED(CONFIG_IPV6) 1892 } else { 1893 struct sock *sk = vxlan->vn_sock->sock->sk; 1894 struct dst_entry *ndst; 1895 struct flowi6 fl6; 1896 u32 flags; 1897 1898 memset(&fl6, 0, sizeof(fl6)); 1899 fl6.flowi6_oif = rdst->remote_ifindex; 1900 fl6.daddr = dst->sin6.sin6_addr; 1901 fl6.saddr = vxlan->saddr.sin6.sin6_addr; 1902 fl6.flowi6_proto = IPPROTO_UDP; 1903 1904 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) { 1905 netdev_dbg(dev, "no route to %pI6\n", 1906 &dst->sin6.sin6_addr); 1907 dev->stats.tx_carrier_errors++; 1908 goto tx_error; 1909 } 1910 1911 if (ndst->dev == dev) { 1912 netdev_dbg(dev, "circular route to %pI6\n", 1913 &dst->sin6.sin6_addr); 1914 dst_release(ndst); 1915 dev->stats.collisions++; 1916 goto tx_error; 1917 } 1918 1919 /* Bypass encapsulation if the destination is local */ 1920 flags = ((struct rt6_info *)ndst)->rt6i_flags; 1921 if (flags & RTF_LOCAL && 1922 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 1923 struct vxlan_dev *dst_vxlan; 1924 1925 dst_release(ndst); 1926 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); 1927 if (!dst_vxlan) 1928 goto tx_error; 1929 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1930 return; 1931 } 1932 1933 ttl = ttl ? : ip6_dst_hoplimit(ndst); 1934 1935 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb, 1936 dev, &fl6.saddr, &fl6.daddr, 0, ttl, 1937 src_port, dst_port, htonl(vni << 8)); 1938#endif 1939 } 1940 1941 return; 1942 1943drop: 1944 dev->stats.tx_dropped++; 1945 goto tx_free; 1946 1947rt_tx_error: 1948 ip_rt_put(rt); 1949tx_error: 1950 dev->stats.tx_errors++; 1951tx_free: 1952 dev_kfree_skb(skb); 1953} 1954 1955/* Transmit local packets over Vxlan 1956 * 1957 * Outer IP header inherits ECN and DF from inner header. 1958 * Outer UDP destination is the VXLAN assigned port. 1959 * source port is based on hash of flow 1960 */ 1961static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 1962{ 1963 struct vxlan_dev *vxlan = netdev_priv(dev); 1964 struct ethhdr *eth; 1965 bool did_rsc = false; 1966 struct vxlan_rdst *rdst, *fdst = NULL; 1967 struct vxlan_fdb *f; 1968 1969 skb_reset_mac_header(skb); 1970 eth = eth_hdr(skb); 1971 1972 if ((vxlan->flags & VXLAN_F_PROXY)) { 1973 if (ntohs(eth->h_proto) == ETH_P_ARP) 1974 return arp_reduce(dev, skb); 1975#if IS_ENABLED(CONFIG_IPV6) 1976 else if (ntohs(eth->h_proto) == ETH_P_IPV6 && 1977 skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) && 1978 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 1979 struct nd_msg *msg; 1980 1981 msg = (struct nd_msg *)skb_transport_header(skb); 1982 if (msg->icmph.icmp6_code == 0 && 1983 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) 1984 return neigh_reduce(dev, skb); 1985 } 1986#endif 1987 } 1988 1989 f = vxlan_find_mac(vxlan, eth->h_dest); 1990 did_rsc = false; 1991 1992 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && 1993 (ntohs(eth->h_proto) == ETH_P_IP || 1994 ntohs(eth->h_proto) == ETH_P_IPV6)) { 1995 did_rsc = route_shortcircuit(dev, skb); 1996 if (did_rsc) 1997 f = vxlan_find_mac(vxlan, eth->h_dest); 1998 } 1999 2000 if (f == NULL) { 2001 f = vxlan_find_mac(vxlan, all_zeros_mac); 2002 if (f == NULL) { 2003 if ((vxlan->flags & VXLAN_F_L2MISS) && 2004 !is_multicast_ether_addr(eth->h_dest)) 2005 vxlan_fdb_miss(vxlan, eth->h_dest); 2006 2007 dev->stats.tx_dropped++; 2008 kfree_skb(skb); 2009 return NETDEV_TX_OK; 2010 } 2011 } 2012 2013 list_for_each_entry_rcu(rdst, &f->remotes, list) { 2014 struct sk_buff *skb1; 2015 2016 if (!fdst) { 2017 fdst = rdst; 2018 continue; 2019 } 2020 skb1 = skb_clone(skb, GFP_ATOMIC); 2021 if (skb1) 2022 vxlan_xmit_one(skb1, dev, rdst, did_rsc); 2023 } 2024 2025 if (fdst) 2026 vxlan_xmit_one(skb, dev, fdst, did_rsc); 2027 else 2028 kfree_skb(skb); 2029 return NETDEV_TX_OK; 2030} 2031 2032/* Walk the forwarding table and purge stale entries */ 2033static void vxlan_cleanup(unsigned long arg) 2034{ 2035 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; 2036 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2037 unsigned int h; 2038 2039 if (!netif_running(vxlan->dev)) 2040 return; 2041 2042 spin_lock_bh(&vxlan->hash_lock); 2043 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2044 struct hlist_node *p, *n; 2045 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2046 struct vxlan_fdb *f 2047 = container_of(p, struct vxlan_fdb, hlist); 2048 unsigned long timeout; 2049 2050 if (f->state & NUD_PERMANENT) 2051 continue; 2052 2053 timeout = f->used + vxlan->age_interval * HZ; 2054 if (time_before_eq(timeout, jiffies)) { 2055 netdev_dbg(vxlan->dev, 2056 "garbage collect %pM\n", 2057 f->eth_addr); 2058 f->state = NUD_STALE; 2059 vxlan_fdb_destroy(vxlan, f); 2060 } else if (time_before(timeout, next_timer)) 2061 next_timer = timeout; 2062 } 2063 } 2064 spin_unlock_bh(&vxlan->hash_lock); 2065 2066 mod_timer(&vxlan->age_timer, next_timer); 2067} 2068 2069static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2070{ 2071 __u32 vni = vxlan->default_dst.remote_vni; 2072 2073 vxlan->vn_sock = vs; 2074 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); 2075} 2076 2077/* Setup stats when device is created */ 2078static int vxlan_init(struct net_device *dev) 2079{ 2080 struct vxlan_dev *vxlan = netdev_priv(dev); 2081 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 2082 struct vxlan_sock *vs; 2083 2084 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2085 if (!dev->tstats) 2086 return -ENOMEM; 2087 2088 spin_lock(&vn->sock_lock); 2089 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); 2090 if (vs) { 2091 /* If we have a socket with same port already, reuse it */ 2092 atomic_inc(&vs->refcnt); 2093 vxlan_vs_add_dev(vs, vxlan); 2094 } else { 2095 /* otherwise make new socket outside of RTNL */ 2096 dev_hold(dev); 2097 queue_work(vxlan_wq, &vxlan->sock_work); 2098 } 2099 spin_unlock(&vn->sock_lock); 2100 2101 return 0; 2102} 2103 2104static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) 2105{ 2106 struct vxlan_fdb *f; 2107 2108 spin_lock_bh(&vxlan->hash_lock); 2109 f = __vxlan_find_mac(vxlan, all_zeros_mac); 2110 if (f) 2111 vxlan_fdb_destroy(vxlan, f); 2112 spin_unlock_bh(&vxlan->hash_lock); 2113} 2114 2115static void vxlan_uninit(struct net_device *dev) 2116{ 2117 struct vxlan_dev *vxlan = netdev_priv(dev); 2118 struct vxlan_sock *vs = vxlan->vn_sock; 2119 2120 vxlan_fdb_delete_default(vxlan); 2121 2122 if (vs) 2123 vxlan_sock_release(vs); 2124 free_percpu(dev->tstats); 2125} 2126 2127/* Start ageing timer and join group when device is brought up */ 2128static int vxlan_open(struct net_device *dev) 2129{ 2130 struct vxlan_dev *vxlan = netdev_priv(dev); 2131 struct vxlan_sock *vs = vxlan->vn_sock; 2132 2133 /* socket hasn't been created */ 2134 if (!vs) 2135 return -ENOTCONN; 2136 2137 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { 2138 vxlan_sock_hold(vs); 2139 dev_hold(dev); 2140 queue_work(vxlan_wq, &vxlan->igmp_join); 2141 } 2142 2143 if (vxlan->age_interval) 2144 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); 2145 2146 return 0; 2147} 2148 2149/* Purge the forwarding table */ 2150static void vxlan_flush(struct vxlan_dev *vxlan) 2151{ 2152 unsigned int h; 2153 2154 spin_lock_bh(&vxlan->hash_lock); 2155 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2156 struct hlist_node *p, *n; 2157 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2158 struct vxlan_fdb *f 2159 = container_of(p, struct vxlan_fdb, hlist); 2160 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2161 if (!is_zero_ether_addr(f->eth_addr)) 2162 vxlan_fdb_destroy(vxlan, f); 2163 } 2164 } 2165 spin_unlock_bh(&vxlan->hash_lock); 2166} 2167 2168/* Cleanup timer and forwarding table on shutdown */ 2169static int vxlan_stop(struct net_device *dev) 2170{ 2171 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 2172 struct vxlan_dev *vxlan = netdev_priv(dev); 2173 struct vxlan_sock *vs = vxlan->vn_sock; 2174 2175 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2176 !vxlan_group_used(vn, vxlan)) { 2177 vxlan_sock_hold(vs); 2178 dev_hold(dev); 2179 queue_work(vxlan_wq, &vxlan->igmp_leave); 2180 } 2181 2182 del_timer_sync(&vxlan->age_timer); 2183 2184 vxlan_flush(vxlan); 2185 2186 return 0; 2187} 2188 2189/* Stub, nothing needs to be done. */ 2190static void vxlan_set_multicast_list(struct net_device *dev) 2191{ 2192} 2193 2194static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2195{ 2196 struct vxlan_dev *vxlan = netdev_priv(dev); 2197 struct vxlan_rdst *dst = &vxlan->default_dst; 2198 struct net_device *lowerdev; 2199 int max_mtu; 2200 2201 lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex); 2202 if (lowerdev == NULL) 2203 return eth_change_mtu(dev, new_mtu); 2204 2205 if (dst->remote_ip.sa.sa_family == AF_INET6) 2206 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; 2207 else 2208 max_mtu = lowerdev->mtu - VXLAN_HEADROOM; 2209 2210 if (new_mtu < 68 || new_mtu > max_mtu) 2211 return -EINVAL; 2212 2213 dev->mtu = new_mtu; 2214 return 0; 2215} 2216 2217static const struct net_device_ops vxlan_netdev_ops = { 2218 .ndo_init = vxlan_init, 2219 .ndo_uninit = vxlan_uninit, 2220 .ndo_open = vxlan_open, 2221 .ndo_stop = vxlan_stop, 2222 .ndo_start_xmit = vxlan_xmit, 2223 .ndo_get_stats64 = ip_tunnel_get_stats64, 2224 .ndo_set_rx_mode = vxlan_set_multicast_list, 2225 .ndo_change_mtu = vxlan_change_mtu, 2226 .ndo_validate_addr = eth_validate_addr, 2227 .ndo_set_mac_address = eth_mac_addr, 2228 .ndo_fdb_add = vxlan_fdb_add, 2229 .ndo_fdb_del = vxlan_fdb_delete, 2230 .ndo_fdb_dump = vxlan_fdb_dump, 2231}; 2232 2233/* Info for udev, that this is a virtual tunnel endpoint */ 2234static struct device_type vxlan_type = { 2235 .name = "vxlan", 2236}; 2237 2238/* Calls the ndo_add_vxlan_port of the caller in order to 2239 * supply the listening VXLAN udp ports. Callers are expected 2240 * to implement the ndo_add_vxlan_port. 2241 */ 2242void vxlan_get_rx_port(struct net_device *dev) 2243{ 2244 struct vxlan_sock *vs; 2245 struct net *net = dev_net(dev); 2246 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2247 sa_family_t sa_family; 2248 __be16 port; 2249 unsigned int i; 2250 2251 spin_lock(&vn->sock_lock); 2252 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2253 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { 2254 port = inet_sk(vs->sock->sk)->inet_sport; 2255 sa_family = vs->sock->sk->sk_family; 2256 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 2257 port); 2258 } 2259 } 2260 spin_unlock(&vn->sock_lock); 2261} 2262EXPORT_SYMBOL_GPL(vxlan_get_rx_port); 2263 2264/* Initialize the device structure. */ 2265static void vxlan_setup(struct net_device *dev) 2266{ 2267 struct vxlan_dev *vxlan = netdev_priv(dev); 2268 unsigned int h; 2269 int low, high; 2270 2271 eth_hw_addr_random(dev); 2272 ether_setup(dev); 2273 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) 2274 dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM; 2275 else 2276 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; 2277 2278 dev->netdev_ops = &vxlan_netdev_ops; 2279 dev->destructor = free_netdev; 2280 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2281 2282 dev->tx_queue_len = 0; 2283 dev->features |= NETIF_F_LLTX; 2284 dev->features |= NETIF_F_NETNS_LOCAL; 2285 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2286 dev->features |= NETIF_F_RXCSUM; 2287 dev->features |= NETIF_F_GSO_SOFTWARE; 2288 2289 dev->vlan_features = dev->features; 2290 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2291 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2292 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2293 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2294 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 2295 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2296 2297 INIT_LIST_HEAD(&vxlan->next); 2298 spin_lock_init(&vxlan->hash_lock); 2299 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join); 2300 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave); 2301 INIT_WORK(&vxlan->sock_work, vxlan_sock_work); 2302 2303 init_timer_deferrable(&vxlan->age_timer); 2304 vxlan->age_timer.function = vxlan_cleanup; 2305 vxlan->age_timer.data = (unsigned long) vxlan; 2306 2307 inet_get_local_port_range(dev_net(dev), &low, &high); 2308 vxlan->port_min = low; 2309 vxlan->port_max = high; 2310 vxlan->dst_port = htons(vxlan_port); 2311 2312 vxlan->dev = dev; 2313 2314 for (h = 0; h < FDB_HASH_SIZE; ++h) 2315 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 2316} 2317 2318static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2319 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2320 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2321 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 2322 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 2323 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 2324 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 2325 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 2326 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 2327 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 2328 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 2329 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 2330 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 2331 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, 2332 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 2333 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 2334 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 2335 [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, 2336}; 2337 2338static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 2339{ 2340 if (tb[IFLA_ADDRESS]) { 2341 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 2342 pr_debug("invalid link address (not ethernet)\n"); 2343 return -EINVAL; 2344 } 2345 2346 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 2347 pr_debug("invalid all zero ethernet address\n"); 2348 return -EADDRNOTAVAIL; 2349 } 2350 } 2351 2352 if (!data) 2353 return -EINVAL; 2354 2355 if (data[IFLA_VXLAN_ID]) { 2356 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); 2357 if (id >= VXLAN_VID_MASK) 2358 return -ERANGE; 2359 } 2360 2361 if (data[IFLA_VXLAN_PORT_RANGE]) { 2362 const struct ifla_vxlan_port_range *p 2363 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2364 2365 if (ntohs(p->high) < ntohs(p->low)) { 2366 pr_debug("port range %u .. %u not valid\n", 2367 ntohs(p->low), ntohs(p->high)); 2368 return -EINVAL; 2369 } 2370 } 2371 2372 return 0; 2373} 2374 2375static void vxlan_get_drvinfo(struct net_device *netdev, 2376 struct ethtool_drvinfo *drvinfo) 2377{ 2378 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); 2379 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); 2380} 2381 2382static const struct ethtool_ops vxlan_ethtool_ops = { 2383 .get_drvinfo = vxlan_get_drvinfo, 2384 .get_link = ethtool_op_get_link, 2385}; 2386 2387static void vxlan_del_work(struct work_struct *work) 2388{ 2389 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work); 2390 2391 sk_release_kernel(vs->sock->sk); 2392 kfree_rcu(vs, rcu); 2393} 2394 2395#if IS_ENABLED(CONFIG_IPV6) 2396/* Create UDP socket for encapsulation receive. AF_INET6 socket 2397 * could be used for both IPv4 and IPv6 communications, but 2398 * users may set bindv6only=1. 2399 */ 2400static struct socket *create_v6_sock(struct net *net, __be16 port) 2401{ 2402 struct sock *sk; 2403 struct socket *sock; 2404 struct sockaddr_in6 vxlan_addr = { 2405 .sin6_family = AF_INET6, 2406 .sin6_port = port, 2407 }; 2408 int rc, val = 1; 2409 2410 rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock); 2411 if (rc < 0) { 2412 pr_debug("UDPv6 socket create failed\n"); 2413 return ERR_PTR(rc); 2414 } 2415 2416 /* Put in proper namespace */ 2417 sk = sock->sk; 2418 sk_change_net(sk, net); 2419 2420 kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY, 2421 (char *)&val, sizeof(val)); 2422 rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr, 2423 sizeof(struct sockaddr_in6)); 2424 if (rc < 0) { 2425 pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n", 2426 &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc); 2427 sk_release_kernel(sk); 2428 return ERR_PTR(rc); 2429 } 2430 /* At this point, IPv6 module should have been loaded in 2431 * sock_create_kern(). 2432 */ 2433 BUG_ON(!ipv6_stub); 2434 2435 /* Disable multicast loopback */ 2436 inet_sk(sk)->mc_loop = 0; 2437 return sock; 2438} 2439 2440#else 2441 2442static struct socket *create_v6_sock(struct net *net, __be16 port) 2443{ 2444 return ERR_PTR(-EPFNOSUPPORT); 2445} 2446#endif 2447 2448static struct socket *create_v4_sock(struct net *net, __be16 port) 2449{ 2450 struct sock *sk; 2451 struct socket *sock; 2452 struct sockaddr_in vxlan_addr = { 2453 .sin_family = AF_INET, 2454 .sin_addr.s_addr = htonl(INADDR_ANY), 2455 .sin_port = port, 2456 }; 2457 int rc; 2458 2459 /* Create UDP socket for encapsulation receive. */ 2460 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); 2461 if (rc < 0) { 2462 pr_debug("UDP socket create failed\n"); 2463 return ERR_PTR(rc); 2464 } 2465 2466 /* Put in proper namespace */ 2467 sk = sock->sk; 2468 sk_change_net(sk, net); 2469 2470 rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr, 2471 sizeof(vxlan_addr)); 2472 if (rc < 0) { 2473 pr_debug("bind for UDP socket %pI4:%u (%d)\n", 2474 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc); 2475 sk_release_kernel(sk); 2476 return ERR_PTR(rc); 2477 } 2478 2479 /* Disable multicast loopback */ 2480 inet_sk(sk)->mc_loop = 0; 2481 return sock; 2482} 2483 2484/* Create new listen socket if needed */ 2485static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, 2486 vxlan_rcv_t *rcv, void *data, bool ipv6) 2487{ 2488 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2489 struct vxlan_sock *vs; 2490 struct socket *sock; 2491 struct sock *sk; 2492 unsigned int h; 2493 2494 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2495 if (!vs) 2496 return ERR_PTR(-ENOMEM); 2497 2498 for (h = 0; h < VNI_HASH_SIZE; ++h) 2499 INIT_HLIST_HEAD(&vs->vni_list[h]); 2500 2501 INIT_WORK(&vs->del_work, vxlan_del_work); 2502 2503 if (ipv6) 2504 sock = create_v6_sock(net, port); 2505 else 2506 sock = create_v4_sock(net, port); 2507 if (IS_ERR(sock)) { 2508 kfree(vs); 2509 return ERR_CAST(sock); 2510 } 2511 2512 vs->sock = sock; 2513 sk = sock->sk; 2514 atomic_set(&vs->refcnt, 1); 2515 vs->rcv = rcv; 2516 vs->data = data; 2517 rcu_assign_sk_user_data(vs->sock->sk, vs); 2518 2519 /* Initialize the vxlan udp offloads structure */ 2520 vs->udp_offloads.port = port; 2521 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive; 2522 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete; 2523 2524 spin_lock(&vn->sock_lock); 2525 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2526 vxlan_notify_add_rx_port(vs); 2527 spin_unlock(&vn->sock_lock); 2528 2529 /* Mark socket as an encapsulation socket. */ 2530 udp_sk(sk)->encap_type = 1; 2531 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; 2532#if IS_ENABLED(CONFIG_IPV6) 2533 if (ipv6) 2534 ipv6_stub->udpv6_encap_enable(); 2535 else 2536#endif 2537 udp_encap_enable(); 2538 2539 return vs; 2540} 2541 2542struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, 2543 vxlan_rcv_t *rcv, void *data, 2544 bool no_share, bool ipv6) 2545{ 2546 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2547 struct vxlan_sock *vs; 2548 2549 vs = vxlan_socket_create(net, port, rcv, data, ipv6); 2550 if (!IS_ERR(vs)) 2551 return vs; 2552 2553 if (no_share) /* Return error if sharing is not allowed. */ 2554 return vs; 2555 2556 spin_lock(&vn->sock_lock); 2557 vs = vxlan_find_sock(net, port); 2558 if (vs) { 2559 if (vs->rcv == rcv) 2560 atomic_inc(&vs->refcnt); 2561 else 2562 vs = ERR_PTR(-EBUSY); 2563 } 2564 spin_unlock(&vn->sock_lock); 2565 2566 if (!vs) 2567 vs = ERR_PTR(-EINVAL); 2568 2569 return vs; 2570} 2571EXPORT_SYMBOL_GPL(vxlan_sock_add); 2572 2573/* Scheduled at device creation to bind to a socket */ 2574static void vxlan_sock_work(struct work_struct *work) 2575{ 2576 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work); 2577 struct net *net = dev_net(vxlan->dev); 2578 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2579 __be16 port = vxlan->dst_port; 2580 struct vxlan_sock *nvs; 2581 2582 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6); 2583 spin_lock(&vn->sock_lock); 2584 if (!IS_ERR(nvs)) 2585 vxlan_vs_add_dev(nvs, vxlan); 2586 spin_unlock(&vn->sock_lock); 2587 2588 dev_put(vxlan->dev); 2589} 2590 2591static int vxlan_newlink(struct net *net, struct net_device *dev, 2592 struct nlattr *tb[], struct nlattr *data[]) 2593{ 2594 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2595 struct vxlan_dev *vxlan = netdev_priv(dev); 2596 struct vxlan_rdst *dst = &vxlan->default_dst; 2597 __u32 vni; 2598 int err; 2599 bool use_ipv6 = false; 2600 2601 if (!data[IFLA_VXLAN_ID]) 2602 return -EINVAL; 2603 2604 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2605 dst->remote_vni = vni; 2606 2607 /* Unless IPv6 is explicitly requested, assume IPv4 */ 2608 dst->remote_ip.sa.sa_family = AF_INET; 2609 if (data[IFLA_VXLAN_GROUP]) { 2610 dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]); 2611 } else if (data[IFLA_VXLAN_GROUP6]) { 2612 if (!IS_ENABLED(CONFIG_IPV6)) 2613 return -EPFNOSUPPORT; 2614 2615 nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6], 2616 sizeof(struct in6_addr)); 2617 dst->remote_ip.sa.sa_family = AF_INET6; 2618 use_ipv6 = true; 2619 } 2620 2621 if (data[IFLA_VXLAN_LOCAL]) { 2622 vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); 2623 vxlan->saddr.sa.sa_family = AF_INET; 2624 } else if (data[IFLA_VXLAN_LOCAL6]) { 2625 if (!IS_ENABLED(CONFIG_IPV6)) 2626 return -EPFNOSUPPORT; 2627 2628 /* TODO: respect scope id */ 2629 nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6], 2630 sizeof(struct in6_addr)); 2631 vxlan->saddr.sa.sa_family = AF_INET6; 2632 use_ipv6 = true; 2633 } 2634 2635 if (data[IFLA_VXLAN_LINK] && 2636 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 2637 struct net_device *lowerdev 2638 = __dev_get_by_index(net, dst->remote_ifindex); 2639 2640 if (!lowerdev) { 2641 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2642 return -ENODEV; 2643 } 2644 2645#if IS_ENABLED(CONFIG_IPV6) 2646 if (use_ipv6) { 2647 struct inet6_dev *idev = __in6_dev_get(lowerdev); 2648 if (idev && idev->cnf.disable_ipv6) { 2649 pr_info("IPv6 is disabled via sysctl\n"); 2650 return -EPERM; 2651 } 2652 vxlan->flags |= VXLAN_F_IPV6; 2653 } 2654#endif 2655 2656 if (!tb[IFLA_MTU]) 2657 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2658 2659 /* update header length based on lower device */ 2660 dev->hard_header_len = lowerdev->hard_header_len + 2661 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2662 } else if (use_ipv6) 2663 vxlan->flags |= VXLAN_F_IPV6; 2664 2665 if (data[IFLA_VXLAN_TOS]) 2666 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2667 2668 if (data[IFLA_VXLAN_TTL]) 2669 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); 2670 2671 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) 2672 vxlan->flags |= VXLAN_F_LEARN; 2673 2674 if (data[IFLA_VXLAN_AGEING]) 2675 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 2676 else 2677 vxlan->age_interval = FDB_AGE_DEFAULT; 2678 2679 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) 2680 vxlan->flags |= VXLAN_F_PROXY; 2681 2682 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) 2683 vxlan->flags |= VXLAN_F_RSC; 2684 2685 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) 2686 vxlan->flags |= VXLAN_F_L2MISS; 2687 2688 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) 2689 vxlan->flags |= VXLAN_F_L3MISS; 2690 2691 if (data[IFLA_VXLAN_LIMIT]) 2692 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 2693 2694 if (data[IFLA_VXLAN_PORT_RANGE]) { 2695 const struct ifla_vxlan_port_range *p 2696 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2697 vxlan->port_min = ntohs(p->low); 2698 vxlan->port_max = ntohs(p->high); 2699 } 2700 2701 if (data[IFLA_VXLAN_PORT]) 2702 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 2703 2704 if (vxlan_find_vni(net, vni, vxlan->dst_port)) { 2705 pr_info("duplicate VNI %u\n", vni); 2706 return -EEXIST; 2707 } 2708 2709 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 2710 2711 /* create an fdb entry for a valid default destination */ 2712 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 2713 err = vxlan_fdb_create(vxlan, all_zeros_mac, 2714 &vxlan->default_dst.remote_ip, 2715 NUD_REACHABLE|NUD_PERMANENT, 2716 NLM_F_EXCL|NLM_F_CREATE, 2717 vxlan->dst_port, 2718 vxlan->default_dst.remote_vni, 2719 vxlan->default_dst.remote_ifindex, 2720 NTF_SELF); 2721 if (err) 2722 return err; 2723 } 2724 2725 err = register_netdevice(dev); 2726 if (err) { 2727 vxlan_fdb_delete_default(vxlan); 2728 return err; 2729 } 2730 2731 list_add(&vxlan->next, &vn->vxlan_list); 2732 2733 return 0; 2734} 2735 2736static void vxlan_dellink(struct net_device *dev, struct list_head *head) 2737{ 2738 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 2739 struct vxlan_dev *vxlan = netdev_priv(dev); 2740 2741 spin_lock(&vn->sock_lock); 2742 if (!hlist_unhashed(&vxlan->hlist)) 2743 hlist_del_rcu(&vxlan->hlist); 2744 spin_unlock(&vn->sock_lock); 2745 2746 list_del(&vxlan->next); 2747 unregister_netdevice_queue(dev, head); 2748} 2749 2750static size_t vxlan_get_size(const struct net_device *dev) 2751{ 2752 2753 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 2754 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ 2755 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 2756 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 2757 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 2758 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 2759 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 2760 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ 2761 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ 2762 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ 2763 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ 2764 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 2765 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 2766 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 2767 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */ 2768 0; 2769} 2770 2771static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 2772{ 2773 const struct vxlan_dev *vxlan = netdev_priv(dev); 2774 const struct vxlan_rdst *dst = &vxlan->default_dst; 2775 struct ifla_vxlan_port_range ports = { 2776 .low = htons(vxlan->port_min), 2777 .high = htons(vxlan->port_max), 2778 }; 2779 2780 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) 2781 goto nla_put_failure; 2782 2783 if (!vxlan_addr_any(&dst->remote_ip)) { 2784 if (dst->remote_ip.sa.sa_family == AF_INET) { 2785 if (nla_put_be32(skb, IFLA_VXLAN_GROUP, 2786 dst->remote_ip.sin.sin_addr.s_addr)) 2787 goto nla_put_failure; 2788#if IS_ENABLED(CONFIG_IPV6) 2789 } else { 2790 if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr), 2791 &dst->remote_ip.sin6.sin6_addr)) 2792 goto nla_put_failure; 2793#endif 2794 } 2795 } 2796 2797 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 2798 goto nla_put_failure; 2799 2800 if (!vxlan_addr_any(&vxlan->saddr)) { 2801 if (vxlan->saddr.sa.sa_family == AF_INET) { 2802 if (nla_put_be32(skb, IFLA_VXLAN_LOCAL, 2803 vxlan->saddr.sin.sin_addr.s_addr)) 2804 goto nla_put_failure; 2805#if IS_ENABLED(CONFIG_IPV6) 2806 } else { 2807 if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr), 2808 &vxlan->saddr.sin6.sin6_addr)) 2809 goto nla_put_failure; 2810#endif 2811 } 2812 } 2813 2814 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || 2815 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || 2816 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 2817 !!(vxlan->flags & VXLAN_F_LEARN)) || 2818 nla_put_u8(skb, IFLA_VXLAN_PROXY, 2819 !!(vxlan->flags & VXLAN_F_PROXY)) || 2820 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) || 2821 nla_put_u8(skb, IFLA_VXLAN_L2MISS, 2822 !!(vxlan->flags & VXLAN_F_L2MISS)) || 2823 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 2824 !!(vxlan->flags & VXLAN_F_L3MISS)) || 2825 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || 2826 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) || 2827 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port)) 2828 goto nla_put_failure; 2829 2830 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 2831 goto nla_put_failure; 2832 2833 return 0; 2834 2835nla_put_failure: 2836 return -EMSGSIZE; 2837} 2838 2839static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 2840 .kind = "vxlan", 2841 .maxtype = IFLA_VXLAN_MAX, 2842 .policy = vxlan_policy, 2843 .priv_size = sizeof(struct vxlan_dev), 2844 .setup = vxlan_setup, 2845 .validate = vxlan_validate, 2846 .newlink = vxlan_newlink, 2847 .dellink = vxlan_dellink, 2848 .get_size = vxlan_get_size, 2849 .fill_info = vxlan_fill_info, 2850}; 2851 2852static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 2853 struct net_device *dev) 2854{ 2855 struct vxlan_dev *vxlan, *next; 2856 LIST_HEAD(list_kill); 2857 2858 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 2859 struct vxlan_rdst *dst = &vxlan->default_dst; 2860 2861 /* In case we created vxlan device with carrier 2862 * and we loose the carrier due to module unload 2863 * we also need to remove vxlan device. In other 2864 * cases, it's not necessary and remote_ifindex 2865 * is 0 here, so no matches. 2866 */ 2867 if (dst->remote_ifindex == dev->ifindex) 2868 vxlan_dellink(vxlan->dev, &list_kill); 2869 } 2870 2871 unregister_netdevice_many(&list_kill); 2872} 2873 2874static int vxlan_lowerdev_event(struct notifier_block *unused, 2875 unsigned long event, void *ptr) 2876{ 2877 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2878 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 2879 2880 if (event == NETDEV_UNREGISTER) 2881 vxlan_handle_lowerdev_unregister(vn, dev); 2882 2883 return NOTIFY_DONE; 2884} 2885 2886static struct notifier_block vxlan_notifier_block __read_mostly = { 2887 .notifier_call = vxlan_lowerdev_event, 2888}; 2889 2890static __net_init int vxlan_init_net(struct net *net) 2891{ 2892 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2893 unsigned int h; 2894 2895 INIT_LIST_HEAD(&vn->vxlan_list); 2896 spin_lock_init(&vn->sock_lock); 2897 2898 for (h = 0; h < PORT_HASH_SIZE; ++h) 2899 INIT_HLIST_HEAD(&vn->sock_list[h]); 2900 2901 return 0; 2902} 2903 2904static struct pernet_operations vxlan_net_ops = { 2905 .init = vxlan_init_net, 2906 .id = &vxlan_net_id, 2907 .size = sizeof(struct vxlan_net), 2908}; 2909 2910static int __init vxlan_init_module(void) 2911{ 2912 int rc; 2913 2914 vxlan_wq = alloc_workqueue("vxlan", 0, 0); 2915 if (!vxlan_wq) 2916 return -ENOMEM; 2917 2918 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 2919 2920 rc = register_pernet_subsys(&vxlan_net_ops); 2921 if (rc) 2922 goto out1; 2923 2924 rc = register_netdevice_notifier(&vxlan_notifier_block); 2925 if (rc) 2926 goto out2; 2927 2928 rc = rtnl_link_register(&vxlan_link_ops); 2929 if (rc) 2930 goto out3; 2931 2932 return 0; 2933out3: 2934 unregister_netdevice_notifier(&vxlan_notifier_block); 2935out2: 2936 unregister_pernet_subsys(&vxlan_net_ops); 2937out1: 2938 destroy_workqueue(vxlan_wq); 2939 return rc; 2940} 2941late_initcall(vxlan_init_module); 2942 2943static void __exit vxlan_cleanup_module(void) 2944{ 2945 rtnl_link_unregister(&vxlan_link_ops); 2946 unregister_netdevice_notifier(&vxlan_notifier_block); 2947 destroy_workqueue(vxlan_wq); 2948 unregister_pernet_subsys(&vxlan_net_ops); 2949 /* rcu_barrier() is called by netns */ 2950} 2951module_exit(vxlan_cleanup_module); 2952 2953MODULE_LICENSE("GPL"); 2954MODULE_VERSION(VXLAN_VERSION); 2955MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); 2956MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); 2957MODULE_ALIAS_RTNL_LINK("vxlan");