Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.12-rc6 3629 lines 93 kB view raw
1/* 2 * VXLAN: Virtual eXtensible Local Area Network 3 * 4 * Copyright (c) 2012-2013 Vyatta Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/errno.h> 16#include <linux/slab.h> 17#include <linux/udp.h> 18#include <linux/igmp.h> 19#include <linux/if_ether.h> 20#include <linux/ethtool.h> 21#include <net/arp.h> 22#include <net/ndisc.h> 23#include <net/ip.h> 24#include <net/icmp.h> 25#include <net/rtnetlink.h> 26#include <net/inet_ecn.h> 27#include <net/net_namespace.h> 28#include <net/netns/generic.h> 29#include <net/vxlan.h> 30 31#if IS_ENABLED(CONFIG_IPV6) 32#include <net/ip6_tunnel.h> 33#include <net/ip6_checksum.h> 34#endif 35 36#define VXLAN_VERSION "0.1" 37 38#define PORT_HASH_BITS 8 39#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) 40#define FDB_AGE_DEFAULT 300 /* 5 min */ 41#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 42 43/* UDP port for VXLAN traffic. 44 * The IANA assigned port is 4789, but the Linux default is 8472 45 * for compatibility with early adopters. 46 */ 47static unsigned short vxlan_port __read_mostly = 8472; 48module_param_named(udp_port, vxlan_port, ushort, 0444); 49MODULE_PARM_DESC(udp_port, "Destination UDP port"); 50 51static bool log_ecn_error = true; 52module_param(log_ecn_error, bool, 0644); 53MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 54 55static unsigned int vxlan_net_id; 56static struct rtnl_link_ops vxlan_link_ops; 57 58static const u8 all_zeros_mac[ETH_ALEN + 2]; 59 60static int vxlan_sock_add(struct vxlan_dev *vxlan); 61 62static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); 63 64/* per-network namespace private data for this module */ 65struct vxlan_net { 66 struct list_head vxlan_list; 67 struct hlist_head sock_list[PORT_HASH_SIZE]; 68 spinlock_t sock_lock; 69}; 70 71/* Forwarding table entry */ 72struct vxlan_fdb { 73 struct hlist_node hlist; /* linked list of entries */ 74 struct rcu_head rcu; 75 unsigned long updated; /* jiffies */ 76 unsigned long used; 77 struct list_head remotes; 78 u8 eth_addr[ETH_ALEN]; 79 u16 state; /* see ndm_state */ 80 __be32 vni; 81 u8 flags; /* see ndm_flags */ 82}; 83 84/* salt for hash table */ 85static u32 vxlan_salt __read_mostly; 86 87static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) 88{ 89 return vs->flags & VXLAN_F_COLLECT_METADATA || 90 ip_tunnel_collect_metadata(); 91} 92 93#if IS_ENABLED(CONFIG_IPV6) 94static inline 95bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 96{ 97 if (a->sa.sa_family != b->sa.sa_family) 98 return false; 99 if (a->sa.sa_family == AF_INET6) 100 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); 101 else 102 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 103} 104 105static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 106{ 107 if (ipa->sa.sa_family == AF_INET6) 108 return ipv6_addr_any(&ipa->sin6.sin6_addr); 109 else 110 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 111} 112 113static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 114{ 115 if (ipa->sa.sa_family == AF_INET6) 116 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); 117 else 118 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 119} 120 121static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 122{ 123 if (nla_len(nla) >= sizeof(struct in6_addr)) { 124 ip->sin6.sin6_addr = nla_get_in6_addr(nla); 125 ip->sa.sa_family = AF_INET6; 126 return 0; 127 } else if (nla_len(nla) >= sizeof(__be32)) { 128 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 129 ip->sa.sa_family = AF_INET; 130 return 0; 131 } else { 132 return -EAFNOSUPPORT; 133 } 134} 135 136static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 137 const union vxlan_addr *ip) 138{ 139 if (ip->sa.sa_family == AF_INET6) 140 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr); 141 else 142 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 143} 144 145#else /* !CONFIG_IPV6 */ 146 147static inline 148bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 149{ 150 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 151} 152 153static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 154{ 155 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 156} 157 158static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 159{ 160 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 161} 162 163static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 164{ 165 if (nla_len(nla) >= sizeof(struct in6_addr)) { 166 return -EAFNOSUPPORT; 167 } else if (nla_len(nla) >= sizeof(__be32)) { 168 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 169 ip->sa.sa_family = AF_INET; 170 return 0; 171 } else { 172 return -EAFNOSUPPORT; 173 } 174} 175 176static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 177 const union vxlan_addr *ip) 178{ 179 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 180} 181#endif 182 183/* Virtual Network hash table head */ 184static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) 185{ 186 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; 187} 188 189/* Socket hash table head */ 190static inline struct hlist_head *vs_head(struct net *net, __be16 port) 191{ 192 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 193 194 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; 195} 196 197/* First remote destination for a forwarding entry. 198 * Guaranteed to be non-NULL because remotes are never deleted. 199 */ 200static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) 201{ 202 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); 203} 204 205static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) 206{ 207 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 208} 209 210/* Find VXLAN socket based on network namespace, address family and UDP port 211 * and enabled unshareable flags. 212 */ 213static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, 214 __be16 port, u32 flags) 215{ 216 struct vxlan_sock *vs; 217 218 flags &= VXLAN_F_RCV_FLAGS; 219 220 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 221 if (inet_sk(vs->sock->sk)->inet_sport == port && 222 vxlan_get_sk_family(vs) == family && 223 vs->flags == flags) 224 return vs; 225 } 226 return NULL; 227} 228 229static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) 230{ 231 struct vxlan_dev *vxlan; 232 233 /* For flow based devices, map all packets to VNI 0 */ 234 if (vs->flags & VXLAN_F_COLLECT_METADATA) 235 vni = 0; 236 237 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { 238 if (vxlan->default_dst.remote_vni == vni) 239 return vxlan; 240 } 241 242 return NULL; 243} 244 245/* Look up VNI in a per net namespace table */ 246static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni, 247 sa_family_t family, __be16 port, 248 u32 flags) 249{ 250 struct vxlan_sock *vs; 251 252 vs = vxlan_find_sock(net, family, port, flags); 253 if (!vs) 254 return NULL; 255 256 return vxlan_vs_find_vni(vs, vni); 257} 258 259/* Fill in neighbour message in skbuff. */ 260static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 261 const struct vxlan_fdb *fdb, 262 u32 portid, u32 seq, int type, unsigned int flags, 263 const struct vxlan_rdst *rdst) 264{ 265 unsigned long now = jiffies; 266 struct nda_cacheinfo ci; 267 struct nlmsghdr *nlh; 268 struct ndmsg *ndm; 269 bool send_ip, send_eth; 270 271 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 272 if (nlh == NULL) 273 return -EMSGSIZE; 274 275 ndm = nlmsg_data(nlh); 276 memset(ndm, 0, sizeof(*ndm)); 277 278 send_eth = send_ip = true; 279 280 if (type == RTM_GETNEIGH) { 281 send_ip = !vxlan_addr_any(&rdst->remote_ip); 282 send_eth = !is_zero_ether_addr(fdb->eth_addr); 283 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET; 284 } else 285 ndm->ndm_family = AF_BRIDGE; 286 ndm->ndm_state = fdb->state; 287 ndm->ndm_ifindex = vxlan->dev->ifindex; 288 ndm->ndm_flags = fdb->flags; 289 ndm->ndm_type = RTN_UNICAST; 290 291 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && 292 nla_put_s32(skb, NDA_LINK_NETNSID, 293 peernet2id(dev_net(vxlan->dev), vxlan->net))) 294 goto nla_put_failure; 295 296 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 297 goto nla_put_failure; 298 299 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) 300 goto nla_put_failure; 301 302 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && 303 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 304 goto nla_put_failure; 305 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 306 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) 307 goto nla_put_failure; 308 if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && 309 nla_put_u32(skb, NDA_SRC_VNI, 310 be32_to_cpu(fdb->vni))) 311 goto nla_put_failure; 312 if (rdst->remote_ifindex && 313 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 314 goto nla_put_failure; 315 316 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 317 ci.ndm_confirmed = 0; 318 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 319 ci.ndm_refcnt = 0; 320 321 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 322 goto nla_put_failure; 323 324 nlmsg_end(skb, nlh); 325 return 0; 326 327nla_put_failure: 328 nlmsg_cancel(skb, nlh); 329 return -EMSGSIZE; 330} 331 332static inline size_t vxlan_nlmsg_size(void) 333{ 334 return NLMSG_ALIGN(sizeof(struct ndmsg)) 335 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 336 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ 337 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 338 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 339 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 340 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */ 341 + nla_total_size(sizeof(struct nda_cacheinfo)); 342} 343 344static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 345 struct vxlan_rdst *rd, int type) 346{ 347 struct net *net = dev_net(vxlan->dev); 348 struct sk_buff *skb; 349 int err = -ENOBUFS; 350 351 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); 352 if (skb == NULL) 353 goto errout; 354 355 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); 356 if (err < 0) { 357 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 358 WARN_ON(err == -EMSGSIZE); 359 kfree_skb(skb); 360 goto errout; 361 } 362 363 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 364 return; 365errout: 366 if (err < 0) 367 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 368} 369 370static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) 371{ 372 struct vxlan_dev *vxlan = netdev_priv(dev); 373 struct vxlan_fdb f = { 374 .state = NUD_STALE, 375 }; 376 struct vxlan_rdst remote = { 377 .remote_ip = *ipa, /* goes to NDA_DST */ 378 .remote_vni = cpu_to_be32(VXLAN_N_VID), 379 }; 380 381 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 382} 383 384static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 385{ 386 struct vxlan_fdb f = { 387 .state = NUD_STALE, 388 }; 389 struct vxlan_rdst remote = { }; 390 391 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 392 393 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 394} 395 396/* Hash Ethernet address */ 397static u32 eth_hash(const unsigned char *addr) 398{ 399 u64 value = get_unaligned((u64 *)addr); 400 401 /* only want 6 bytes */ 402#ifdef __BIG_ENDIAN 403 value >>= 16; 404#else 405 value <<= 16; 406#endif 407 return hash_64(value, FDB_HASH_BITS); 408} 409 410static u32 eth_vni_hash(const unsigned char *addr, __be32 vni) 411{ 412 /* use 1 byte of OUI and 3 bytes of NIC */ 413 u32 key = get_unaligned((u32 *)(addr + 2)); 414 415 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1); 416} 417 418/* Hash chain to use given mac address */ 419static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, 420 const u8 *mac, __be32 vni) 421{ 422 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) 423 return &vxlan->fdb_head[eth_vni_hash(mac, vni)]; 424 else 425 return &vxlan->fdb_head[eth_hash(mac)]; 426} 427 428/* Look up Ethernet address in forwarding table */ 429static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 430 const u8 *mac, __be32 vni) 431{ 432 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); 433 struct vxlan_fdb *f; 434 435 hlist_for_each_entry_rcu(f, head, hlist) { 436 if (ether_addr_equal(mac, f->eth_addr)) { 437 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { 438 if (vni == f->vni) 439 return f; 440 } else { 441 return f; 442 } 443 } 444 } 445 446 return NULL; 447} 448 449static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 450 const u8 *mac, __be32 vni) 451{ 452 struct vxlan_fdb *f; 453 454 f = __vxlan_find_mac(vxlan, mac, vni); 455 if (f) 456 f->used = jiffies; 457 458 return f; 459} 460 461/* caller should hold vxlan->hash_lock */ 462static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 463 union vxlan_addr *ip, __be16 port, 464 __be32 vni, __u32 ifindex) 465{ 466 struct vxlan_rdst *rd; 467 468 list_for_each_entry(rd, &f->remotes, list) { 469 if (vxlan_addr_equal(&rd->remote_ip, ip) && 470 rd->remote_port == port && 471 rd->remote_vni == vni && 472 rd->remote_ifindex == ifindex) 473 return rd; 474 } 475 476 return NULL; 477} 478 479/* Replace destination of unicast mac */ 480static int vxlan_fdb_replace(struct vxlan_fdb *f, 481 union vxlan_addr *ip, __be16 port, __be32 vni, 482 __u32 ifindex) 483{ 484 struct vxlan_rdst *rd; 485 486 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 487 if (rd) 488 return 0; 489 490 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); 491 if (!rd) 492 return 0; 493 494 dst_cache_reset(&rd->dst_cache); 495 rd->remote_ip = *ip; 496 rd->remote_port = port; 497 rd->remote_vni = vni; 498 rd->remote_ifindex = ifindex; 499 return 1; 500} 501 502/* Add/update destinations for multicast */ 503static int vxlan_fdb_append(struct vxlan_fdb *f, 504 union vxlan_addr *ip, __be16 port, __be32 vni, 505 __u32 ifindex, struct vxlan_rdst **rdp) 506{ 507 struct vxlan_rdst *rd; 508 509 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 510 if (rd) 511 return 0; 512 513 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 514 if (rd == NULL) 515 return -ENOBUFS; 516 517 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { 518 kfree(rd); 519 return -ENOBUFS; 520 } 521 522 rd->remote_ip = *ip; 523 rd->remote_port = port; 524 rd->remote_vni = vni; 525 rd->remote_ifindex = ifindex; 526 527 list_add_tail_rcu(&rd->list, &f->remotes); 528 529 *rdp = rd; 530 return 1; 531} 532 533static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 534 unsigned int off, 535 struct vxlanhdr *vh, size_t hdrlen, 536 __be32 vni_field, 537 struct gro_remcsum *grc, 538 bool nopartial) 539{ 540 size_t start, offset; 541 542 if (skb->remcsum_offload) 543 return vh; 544 545 if (!NAPI_GRO_CB(skb)->csum_valid) 546 return NULL; 547 548 start = vxlan_rco_start(vni_field); 549 offset = start + vxlan_rco_offset(vni_field); 550 551 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, 552 start, offset, grc, nopartial); 553 554 skb->remcsum_offload = 1; 555 556 return vh; 557} 558 559static struct sk_buff **vxlan_gro_receive(struct sock *sk, 560 struct sk_buff **head, 561 struct sk_buff *skb) 562{ 563 struct sk_buff *p, **pp = NULL; 564 struct vxlanhdr *vh, *vh2; 565 unsigned int hlen, off_vx; 566 int flush = 1; 567 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); 568 __be32 flags; 569 struct gro_remcsum grc; 570 571 skb_gro_remcsum_init(&grc); 572 573 off_vx = skb_gro_offset(skb); 574 hlen = off_vx + sizeof(*vh); 575 vh = skb_gro_header_fast(skb, off_vx); 576 if (skb_gro_header_hard(skb, hlen)) { 577 vh = skb_gro_header_slow(skb, hlen, off_vx); 578 if (unlikely(!vh)) 579 goto out; 580 } 581 582 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 583 584 flags = vh->vx_flags; 585 586 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 587 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 588 vh->vx_vni, &grc, 589 !!(vs->flags & 590 VXLAN_F_REMCSUM_NOPARTIAL)); 591 592 if (!vh) 593 goto out; 594 } 595 596 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 597 598 for (p = *head; p; p = p->next) { 599 if (!NAPI_GRO_CB(p)->same_flow) 600 continue; 601 602 vh2 = (struct vxlanhdr *)(p->data + off_vx); 603 if (vh->vx_flags != vh2->vx_flags || 604 vh->vx_vni != vh2->vx_vni) { 605 NAPI_GRO_CB(p)->same_flow = 0; 606 continue; 607 } 608 } 609 610 pp = call_gro_receive(eth_gro_receive, head, skb); 611 flush = 0; 612 613out: 614 skb_gro_remcsum_cleanup(skb, &grc); 615 NAPI_GRO_CB(skb)->flush |= flush; 616 617 return pp; 618} 619 620static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 621{ 622 /* Sets 'skb->inner_mac_header' since we are always called with 623 * 'skb->encapsulation' set. 624 */ 625 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 626} 627 628/* Add new entry to forwarding table -- assumes lock held */ 629static int vxlan_fdb_create(struct vxlan_dev *vxlan, 630 const u8 *mac, union vxlan_addr *ip, 631 __u16 state, __u16 flags, 632 __be16 port, __be32 src_vni, __be32 vni, 633 __u32 ifindex, __u8 ndm_flags) 634{ 635 struct vxlan_rdst *rd = NULL; 636 struct vxlan_fdb *f; 637 int notify = 0; 638 int rc; 639 640 f = __vxlan_find_mac(vxlan, mac, src_vni); 641 if (f) { 642 if (flags & NLM_F_EXCL) { 643 netdev_dbg(vxlan->dev, 644 "lost race to create %pM\n", mac); 645 return -EEXIST; 646 } 647 if (f->state != state) { 648 f->state = state; 649 f->updated = jiffies; 650 notify = 1; 651 } 652 if (f->flags != ndm_flags) { 653 f->flags = ndm_flags; 654 f->updated = jiffies; 655 notify = 1; 656 } 657 if ((flags & NLM_F_REPLACE)) { 658 /* Only change unicasts */ 659 if (!(is_multicast_ether_addr(f->eth_addr) || 660 is_zero_ether_addr(f->eth_addr))) { 661 notify |= vxlan_fdb_replace(f, ip, port, vni, 662 ifindex); 663 } else 664 return -EOPNOTSUPP; 665 } 666 if ((flags & NLM_F_APPEND) && 667 (is_multicast_ether_addr(f->eth_addr) || 668 is_zero_ether_addr(f->eth_addr))) { 669 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 670 671 if (rc < 0) 672 return rc; 673 notify |= rc; 674 } 675 } else { 676 if (!(flags & NLM_F_CREATE)) 677 return -ENOENT; 678 679 if (vxlan->cfg.addrmax && 680 vxlan->addrcnt >= vxlan->cfg.addrmax) 681 return -ENOSPC; 682 683 /* Disallow replace to add a multicast entry */ 684 if ((flags & NLM_F_REPLACE) && 685 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 686 return -EOPNOTSUPP; 687 688 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 689 f = kmalloc(sizeof(*f), GFP_ATOMIC); 690 if (!f) 691 return -ENOMEM; 692 693 notify = 1; 694 f->state = state; 695 f->flags = ndm_flags; 696 f->updated = f->used = jiffies; 697 f->vni = src_vni; 698 INIT_LIST_HEAD(&f->remotes); 699 memcpy(f->eth_addr, mac, ETH_ALEN); 700 701 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 702 if (rc < 0) { 703 kfree(f); 704 return rc; 705 } 706 707 ++vxlan->addrcnt; 708 hlist_add_head_rcu(&f->hlist, 709 vxlan_fdb_head(vxlan, mac, src_vni)); 710 } 711 712 if (notify) { 713 if (rd == NULL) 714 rd = first_remote_rtnl(f); 715 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); 716 } 717 718 return 0; 719} 720 721static void vxlan_fdb_free(struct rcu_head *head) 722{ 723 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 724 struct vxlan_rdst *rd, *nd; 725 726 list_for_each_entry_safe(rd, nd, &f->remotes, list) { 727 dst_cache_destroy(&rd->dst_cache); 728 kfree(rd); 729 } 730 kfree(f); 731} 732 733static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 734{ 735 netdev_dbg(vxlan->dev, 736 "delete %pM\n", f->eth_addr); 737 738 --vxlan->addrcnt; 739 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 740 741 hlist_del_rcu(&f->hlist); 742 call_rcu(&f->rcu, vxlan_fdb_free); 743} 744 745static void vxlan_dst_free(struct rcu_head *head) 746{ 747 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); 748 749 dst_cache_destroy(&rd->dst_cache); 750 kfree(rd); 751} 752 753static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, 754 struct vxlan_rdst *rd) 755{ 756 list_del_rcu(&rd->list); 757 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 758 call_rcu(&rd->rcu, vxlan_dst_free); 759} 760 761static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 762 union vxlan_addr *ip, __be16 *port, __be32 *src_vni, 763 __be32 *vni, u32 *ifindex) 764{ 765 struct net *net = dev_net(vxlan->dev); 766 int err; 767 768 if (tb[NDA_DST]) { 769 err = vxlan_nla_get_addr(ip, tb[NDA_DST]); 770 if (err) 771 return err; 772 } else { 773 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; 774 if (remote->sa.sa_family == AF_INET) { 775 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); 776 ip->sa.sa_family = AF_INET; 777#if IS_ENABLED(CONFIG_IPV6) 778 } else { 779 ip->sin6.sin6_addr = in6addr_any; 780 ip->sa.sa_family = AF_INET6; 781#endif 782 } 783 } 784 785 if (tb[NDA_PORT]) { 786 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) 787 return -EINVAL; 788 *port = nla_get_be16(tb[NDA_PORT]); 789 } else { 790 *port = vxlan->cfg.dst_port; 791 } 792 793 if (tb[NDA_VNI]) { 794 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) 795 return -EINVAL; 796 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 797 } else { 798 *vni = vxlan->default_dst.remote_vni; 799 } 800 801 if (tb[NDA_SRC_VNI]) { 802 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) 803 return -EINVAL; 804 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); 805 } else { 806 *src_vni = vxlan->default_dst.remote_vni; 807 } 808 809 if (tb[NDA_IFINDEX]) { 810 struct net_device *tdev; 811 812 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 813 return -EINVAL; 814 *ifindex = nla_get_u32(tb[NDA_IFINDEX]); 815 tdev = __dev_get_by_index(net, *ifindex); 816 if (!tdev) 817 return -EADDRNOTAVAIL; 818 } else { 819 *ifindex = 0; 820 } 821 822 return 0; 823} 824 825/* Add static entry (via netlink) */ 826static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 827 struct net_device *dev, 828 const unsigned char *addr, u16 vid, u16 flags) 829{ 830 struct vxlan_dev *vxlan = netdev_priv(dev); 831 /* struct net *net = dev_net(vxlan->dev); */ 832 union vxlan_addr ip; 833 __be16 port; 834 __be32 src_vni, vni; 835 u32 ifindex; 836 int err; 837 838 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 839 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 840 ndm->ndm_state); 841 return -EINVAL; 842 } 843 844 if (tb[NDA_DST] == NULL) 845 return -EINVAL; 846 847 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); 848 if (err) 849 return err; 850 851 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) 852 return -EAFNOSUPPORT; 853 854 spin_lock_bh(&vxlan->hash_lock); 855 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 856 port, src_vni, vni, ifindex, ndm->ndm_flags); 857 spin_unlock_bh(&vxlan->hash_lock); 858 859 return err; 860} 861 862static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, 863 const unsigned char *addr, union vxlan_addr ip, 864 __be16 port, __be32 src_vni, u32 vni, u32 ifindex, 865 u16 vid) 866{ 867 struct vxlan_fdb *f; 868 struct vxlan_rdst *rd = NULL; 869 int err = -ENOENT; 870 871 f = vxlan_find_mac(vxlan, addr, src_vni); 872 if (!f) 873 return err; 874 875 if (!vxlan_addr_any(&ip)) { 876 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); 877 if (!rd) 878 goto out; 879 } 880 881 /* remove a destination if it's not the only one on the list, 882 * otherwise destroy the fdb entry 883 */ 884 if (rd && !list_is_singular(&f->remotes)) { 885 vxlan_fdb_dst_destroy(vxlan, f, rd); 886 goto out; 887 } 888 889 vxlan_fdb_destroy(vxlan, f); 890 891out: 892 return 0; 893} 894 895/* Delete entry (via netlink) */ 896static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 897 struct net_device *dev, 898 const unsigned char *addr, u16 vid) 899{ 900 struct vxlan_dev *vxlan = netdev_priv(dev); 901 union vxlan_addr ip; 902 __be32 src_vni, vni; 903 __be16 port; 904 u32 ifindex; 905 int err; 906 907 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); 908 if (err) 909 return err; 910 911 spin_lock_bh(&vxlan->hash_lock); 912 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, 913 vid); 914 spin_unlock_bh(&vxlan->hash_lock); 915 916 return err; 917} 918 919/* Dump forwarding table */ 920static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 921 struct net_device *dev, 922 struct net_device *filter_dev, int *idx) 923{ 924 struct vxlan_dev *vxlan = netdev_priv(dev); 925 unsigned int h; 926 int err = 0; 927 928 for (h = 0; h < FDB_HASH_SIZE; ++h) { 929 struct vxlan_fdb *f; 930 931 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 932 struct vxlan_rdst *rd; 933 934 list_for_each_entry_rcu(rd, &f->remotes, list) { 935 if (*idx < cb->args[2]) 936 goto skip; 937 938 err = vxlan_fdb_info(skb, vxlan, f, 939 NETLINK_CB(cb->skb).portid, 940 cb->nlh->nlmsg_seq, 941 RTM_NEWNEIGH, 942 NLM_F_MULTI, rd); 943 if (err < 0) 944 goto out; 945skip: 946 *idx += 1; 947 } 948 } 949 } 950out: 951 return err; 952} 953 954/* Watch incoming packets to learn mapping between Ethernet address 955 * and Tunnel endpoint. 956 * Return true if packet is bogus and should be dropped. 957 */ 958static bool vxlan_snoop(struct net_device *dev, 959 union vxlan_addr *src_ip, const u8 *src_mac, 960 __be32 vni) 961{ 962 struct vxlan_dev *vxlan = netdev_priv(dev); 963 struct vxlan_fdb *f; 964 965 f = vxlan_find_mac(vxlan, src_mac, vni); 966 if (likely(f)) { 967 struct vxlan_rdst *rdst = first_remote_rcu(f); 968 969 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip))) 970 return false; 971 972 /* Don't migrate static entries, drop packets */ 973 if (f->state & NUD_NOARP) 974 return true; 975 976 if (net_ratelimit()) 977 netdev_info(dev, 978 "%pM migrated from %pIS to %pIS\n", 979 src_mac, &rdst->remote_ip.sa, &src_ip->sa); 980 981 rdst->remote_ip = *src_ip; 982 f->updated = jiffies; 983 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); 984 } else { 985 /* learned new entry */ 986 spin_lock(&vxlan->hash_lock); 987 988 /* close off race between vxlan_flush and incoming packets */ 989 if (netif_running(dev)) 990 vxlan_fdb_create(vxlan, src_mac, src_ip, 991 NUD_REACHABLE, 992 NLM_F_EXCL|NLM_F_CREATE, 993 vxlan->cfg.dst_port, 994 vni, 995 vxlan->default_dst.remote_vni, 996 0, NTF_SELF); 997 spin_unlock(&vxlan->hash_lock); 998 } 999 1000 return false; 1001} 1002 1003/* See if multicast group is already in use by other ID */ 1004static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) 1005{ 1006 struct vxlan_dev *vxlan; 1007 struct vxlan_sock *sock4; 1008#if IS_ENABLED(CONFIG_IPV6) 1009 struct vxlan_sock *sock6; 1010#endif 1011 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 1012 1013 sock4 = rtnl_dereference(dev->vn4_sock); 1014 1015 /* The vxlan_sock is only used by dev, leaving group has 1016 * no effect on other vxlan devices. 1017 */ 1018 if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1) 1019 return false; 1020#if IS_ENABLED(CONFIG_IPV6) 1021 sock6 = rtnl_dereference(dev->vn6_sock); 1022 if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1) 1023 return false; 1024#endif 1025 1026 list_for_each_entry(vxlan, &vn->vxlan_list, next) { 1027 if (!netif_running(vxlan->dev) || vxlan == dev) 1028 continue; 1029 1030 if (family == AF_INET && 1031 rtnl_dereference(vxlan->vn4_sock) != sock4) 1032 continue; 1033#if IS_ENABLED(CONFIG_IPV6) 1034 if (family == AF_INET6 && 1035 rtnl_dereference(vxlan->vn6_sock) != sock6) 1036 continue; 1037#endif 1038 1039 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, 1040 &dev->default_dst.remote_ip)) 1041 continue; 1042 1043 if (vxlan->default_dst.remote_ifindex != 1044 dev->default_dst.remote_ifindex) 1045 continue; 1046 1047 return true; 1048 } 1049 1050 return false; 1051} 1052 1053static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) 1054{ 1055 struct vxlan_net *vn; 1056 1057 if (!vs) 1058 return false; 1059 if (!atomic_dec_and_test(&vs->refcnt)) 1060 return false; 1061 1062 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); 1063 spin_lock(&vn->sock_lock); 1064 hlist_del_rcu(&vs->hlist); 1065 udp_tunnel_notify_del_rx_port(vs->sock, 1066 (vs->flags & VXLAN_F_GPE) ? 1067 UDP_TUNNEL_TYPE_VXLAN_GPE : 1068 UDP_TUNNEL_TYPE_VXLAN); 1069 spin_unlock(&vn->sock_lock); 1070 1071 return true; 1072} 1073 1074static void vxlan_sock_release(struct vxlan_dev *vxlan) 1075{ 1076 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1077#if IS_ENABLED(CONFIG_IPV6) 1078 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1079 1080 rcu_assign_pointer(vxlan->vn6_sock, NULL); 1081#endif 1082 1083 rcu_assign_pointer(vxlan->vn4_sock, NULL); 1084 synchronize_net(); 1085 1086 vxlan_vs_del_dev(vxlan); 1087 1088 if (__vxlan_sock_release_prep(sock4)) { 1089 udp_tunnel_sock_release(sock4->sock); 1090 kfree(sock4); 1091 } 1092 1093#if IS_ENABLED(CONFIG_IPV6) 1094 if (__vxlan_sock_release_prep(sock6)) { 1095 udp_tunnel_sock_release(sock6->sock); 1096 kfree(sock6); 1097 } 1098#endif 1099} 1100 1101/* Update multicast group membership when first VNI on 1102 * multicast address is brought up 1103 */ 1104static int vxlan_igmp_join(struct vxlan_dev *vxlan) 1105{ 1106 struct sock *sk; 1107 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1108 int ifindex = vxlan->default_dst.remote_ifindex; 1109 int ret = -EINVAL; 1110 1111 if (ip->sa.sa_family == AF_INET) { 1112 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1113 struct ip_mreqn mreq = { 1114 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1115 .imr_ifindex = ifindex, 1116 }; 1117 1118 sk = sock4->sock->sk; 1119 lock_sock(sk); 1120 ret = ip_mc_join_group(sk, &mreq); 1121 release_sock(sk); 1122#if IS_ENABLED(CONFIG_IPV6) 1123 } else { 1124 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1125 1126 sk = sock6->sock->sk; 1127 lock_sock(sk); 1128 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, 1129 &ip->sin6.sin6_addr); 1130 release_sock(sk); 1131#endif 1132 } 1133 1134 return ret; 1135} 1136 1137/* Inverse of vxlan_igmp_join when last VNI is brought down */ 1138static int vxlan_igmp_leave(struct vxlan_dev *vxlan) 1139{ 1140 struct sock *sk; 1141 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1142 int ifindex = vxlan->default_dst.remote_ifindex; 1143 int ret = -EINVAL; 1144 1145 if (ip->sa.sa_family == AF_INET) { 1146 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1147 struct ip_mreqn mreq = { 1148 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1149 .imr_ifindex = ifindex, 1150 }; 1151 1152 sk = sock4->sock->sk; 1153 lock_sock(sk); 1154 ret = ip_mc_leave_group(sk, &mreq); 1155 release_sock(sk); 1156#if IS_ENABLED(CONFIG_IPV6) 1157 } else { 1158 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1159 1160 sk = sock6->sock->sk; 1161 lock_sock(sk); 1162 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, 1163 &ip->sin6.sin6_addr); 1164 release_sock(sk); 1165#endif 1166 } 1167 1168 return ret; 1169} 1170 1171static bool vxlan_remcsum(struct vxlanhdr *unparsed, 1172 struct sk_buff *skb, u32 vxflags) 1173{ 1174 size_t start, offset; 1175 1176 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) 1177 goto out; 1178 1179 start = vxlan_rco_start(unparsed->vx_vni); 1180 offset = start + vxlan_rco_offset(unparsed->vx_vni); 1181 1182 if (!pskb_may_pull(skb, offset + sizeof(u16))) 1183 return false; 1184 1185 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, 1186 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); 1187out: 1188 unparsed->vx_flags &= ~VXLAN_HF_RCO; 1189 unparsed->vx_vni &= VXLAN_VNI_MASK; 1190 return true; 1191} 1192 1193static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, 1194 struct sk_buff *skb, u32 vxflags, 1195 struct vxlan_metadata *md) 1196{ 1197 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; 1198 struct metadata_dst *tun_dst; 1199 1200 if (!(unparsed->vx_flags & VXLAN_HF_GBP)) 1201 goto out; 1202 1203 md->gbp = ntohs(gbp->policy_id); 1204 1205 tun_dst = (struct metadata_dst *)skb_dst(skb); 1206 if (tun_dst) { 1207 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1208 tun_dst->u.tun_info.options_len = sizeof(*md); 1209 } 1210 if (gbp->dont_learn) 1211 md->gbp |= VXLAN_GBP_DONT_LEARN; 1212 1213 if (gbp->policy_applied) 1214 md->gbp |= VXLAN_GBP_POLICY_APPLIED; 1215 1216 /* In flow-based mode, GBP is carried in dst_metadata */ 1217 if (!(vxflags & VXLAN_F_COLLECT_METADATA)) 1218 skb->mark = md->gbp; 1219out: 1220 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; 1221} 1222 1223static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, 1224 __be16 *protocol, 1225 struct sk_buff *skb, u32 vxflags) 1226{ 1227 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; 1228 1229 /* Need to have Next Protocol set for interfaces in GPE mode. */ 1230 if (!gpe->np_applied) 1231 return false; 1232 /* "The initial version is 0. If a receiver does not support the 1233 * version indicated it MUST drop the packet. 1234 */ 1235 if (gpe->version != 0) 1236 return false; 1237 /* "When the O bit is set to 1, the packet is an OAM packet and OAM 1238 * processing MUST occur." However, we don't implement OAM 1239 * processing, thus drop the packet. 1240 */ 1241 if (gpe->oam_flag) 1242 return false; 1243 1244 switch (gpe->next_protocol) { 1245 case VXLAN_GPE_NP_IPV4: 1246 *protocol = htons(ETH_P_IP); 1247 break; 1248 case VXLAN_GPE_NP_IPV6: 1249 *protocol = htons(ETH_P_IPV6); 1250 break; 1251 case VXLAN_GPE_NP_ETHERNET: 1252 *protocol = htons(ETH_P_TEB); 1253 break; 1254 default: 1255 return false; 1256 } 1257 1258 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; 1259 return true; 1260} 1261 1262static bool vxlan_set_mac(struct vxlan_dev *vxlan, 1263 struct vxlan_sock *vs, 1264 struct sk_buff *skb, __be32 vni) 1265{ 1266 union vxlan_addr saddr; 1267 1268 skb_reset_mac_header(skb); 1269 skb->protocol = eth_type_trans(skb, vxlan->dev); 1270 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1271 1272 /* Ignore packet loops (and multicast echo) */ 1273 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1274 return false; 1275 1276 /* Get address from the outer IP header */ 1277 if (vxlan_get_sk_family(vs) == AF_INET) { 1278 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; 1279 saddr.sa.sa_family = AF_INET; 1280#if IS_ENABLED(CONFIG_IPV6) 1281 } else { 1282 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr; 1283 saddr.sa.sa_family = AF_INET6; 1284#endif 1285 } 1286 1287 if ((vxlan->flags & VXLAN_F_LEARN) && 1288 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni)) 1289 return false; 1290 1291 return true; 1292} 1293 1294static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, 1295 struct sk_buff *skb) 1296{ 1297 int err = 0; 1298 1299 if (vxlan_get_sk_family(vs) == AF_INET) 1300 err = IP_ECN_decapsulate(oiph, skb); 1301#if IS_ENABLED(CONFIG_IPV6) 1302 else 1303 err = IP6_ECN_decapsulate(oiph, skb); 1304#endif 1305 1306 if (unlikely(err) && log_ecn_error) { 1307 if (vxlan_get_sk_family(vs) == AF_INET) 1308 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1309 &((struct iphdr *)oiph)->saddr, 1310 ((struct iphdr *)oiph)->tos); 1311 else 1312 net_info_ratelimited("non-ECT from %pI6\n", 1313 &((struct ipv6hdr *)oiph)->saddr); 1314 } 1315 return err <= 1; 1316} 1317 1318/* Callback from net/ipv4/udp.c to receive packets */ 1319static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) 1320{ 1321 struct pcpu_sw_netstats *stats; 1322 struct vxlan_dev *vxlan; 1323 struct vxlan_sock *vs; 1324 struct vxlanhdr unparsed; 1325 struct vxlan_metadata _md; 1326 struct vxlan_metadata *md = &_md; 1327 __be16 protocol = htons(ETH_P_TEB); 1328 bool raw_proto = false; 1329 void *oiph; 1330 __be32 vni = 0; 1331 1332 /* Need UDP and VXLAN header to be present */ 1333 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1334 goto drop; 1335 1336 unparsed = *vxlan_hdr(skb); 1337 /* VNI flag always required to be set */ 1338 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { 1339 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1340 ntohl(vxlan_hdr(skb)->vx_flags), 1341 ntohl(vxlan_hdr(skb)->vx_vni)); 1342 /* Return non vxlan pkt */ 1343 goto drop; 1344 } 1345 unparsed.vx_flags &= ~VXLAN_HF_VNI; 1346 unparsed.vx_vni &= ~VXLAN_VNI_MASK; 1347 1348 vs = rcu_dereference_sk_user_data(sk); 1349 if (!vs) 1350 goto drop; 1351 1352 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); 1353 1354 vxlan = vxlan_vs_find_vni(vs, vni); 1355 if (!vxlan) 1356 goto drop; 1357 1358 /* For backwards compatibility, only allow reserved fields to be 1359 * used by VXLAN extensions if explicitly requested. 1360 */ 1361 if (vs->flags & VXLAN_F_GPE) { 1362 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) 1363 goto drop; 1364 raw_proto = true; 1365 } 1366 1367 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, 1368 !net_eq(vxlan->net, dev_net(vxlan->dev)))) 1369 goto drop; 1370 1371 if (vxlan_collect_metadata(vs)) { 1372 struct metadata_dst *tun_dst; 1373 1374 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, 1375 key32_to_tunnel_id(vni), sizeof(*md)); 1376 1377 if (!tun_dst) 1378 goto drop; 1379 1380 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 1381 1382 skb_dst_set(skb, (struct dst_entry *)tun_dst); 1383 } else { 1384 memset(md, 0, sizeof(*md)); 1385 } 1386 1387 if (vs->flags & VXLAN_F_REMCSUM_RX) 1388 if (!vxlan_remcsum(&unparsed, skb, vs->flags)) 1389 goto drop; 1390 if (vs->flags & VXLAN_F_GBP) 1391 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); 1392 /* Note that GBP and GPE can never be active together. This is 1393 * ensured in vxlan_dev_configure. 1394 */ 1395 1396 if (unparsed.vx_flags || unparsed.vx_vni) { 1397 /* If there are any unprocessed flags remaining treat 1398 * this as a malformed packet. This behavior diverges from 1399 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1400 * in reserved fields are to be ignored. The approach here 1401 * maintains compatibility with previous stack code, and also 1402 * is more robust and provides a little more security in 1403 * adding extensions to VXLAN. 1404 */ 1405 goto drop; 1406 } 1407 1408 if (!raw_proto) { 1409 if (!vxlan_set_mac(vxlan, vs, skb, vni)) 1410 goto drop; 1411 } else { 1412 skb_reset_mac_header(skb); 1413 skb->dev = vxlan->dev; 1414 skb->pkt_type = PACKET_HOST; 1415 } 1416 1417 oiph = skb_network_header(skb); 1418 skb_reset_network_header(skb); 1419 1420 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { 1421 ++vxlan->dev->stats.rx_frame_errors; 1422 ++vxlan->dev->stats.rx_errors; 1423 goto drop; 1424 } 1425 1426 stats = this_cpu_ptr(vxlan->dev->tstats); 1427 u64_stats_update_begin(&stats->syncp); 1428 stats->rx_packets++; 1429 stats->rx_bytes += skb->len; 1430 u64_stats_update_end(&stats->syncp); 1431 1432 gro_cells_receive(&vxlan->gro_cells, skb); 1433 return 0; 1434 1435drop: 1436 /* Consume bad packet */ 1437 kfree_skb(skb); 1438 return 0; 1439} 1440 1441static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) 1442{ 1443 struct vxlan_dev *vxlan = netdev_priv(dev); 1444 struct arphdr *parp; 1445 u8 *arpptr, *sha; 1446 __be32 sip, tip; 1447 struct neighbour *n; 1448 1449 if (dev->flags & IFF_NOARP) 1450 goto out; 1451 1452 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { 1453 dev->stats.tx_dropped++; 1454 goto out; 1455 } 1456 parp = arp_hdr(skb); 1457 1458 if ((parp->ar_hrd != htons(ARPHRD_ETHER) && 1459 parp->ar_hrd != htons(ARPHRD_IEEE802)) || 1460 parp->ar_pro != htons(ETH_P_IP) || 1461 parp->ar_op != htons(ARPOP_REQUEST) || 1462 parp->ar_hln != dev->addr_len || 1463 parp->ar_pln != 4) 1464 goto out; 1465 arpptr = (u8 *)parp + sizeof(struct arphdr); 1466 sha = arpptr; 1467 arpptr += dev->addr_len; /* sha */ 1468 memcpy(&sip, arpptr, sizeof(sip)); 1469 arpptr += sizeof(sip); 1470 arpptr += dev->addr_len; /* tha */ 1471 memcpy(&tip, arpptr, sizeof(tip)); 1472 1473 if (ipv4_is_loopback(tip) || 1474 ipv4_is_multicast(tip)) 1475 goto out; 1476 1477 n = neigh_lookup(&arp_tbl, &tip, dev); 1478 1479 if (n) { 1480 struct vxlan_fdb *f; 1481 struct sk_buff *reply; 1482 1483 if (!(n->nud_state & NUD_CONNECTED)) { 1484 neigh_release(n); 1485 goto out; 1486 } 1487 1488 f = vxlan_find_mac(vxlan, n->ha, vni); 1489 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1490 /* bridge-local neighbor */ 1491 neigh_release(n); 1492 goto out; 1493 } 1494 1495 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1496 n->ha, sha); 1497 1498 neigh_release(n); 1499 1500 if (reply == NULL) 1501 goto out; 1502 1503 skb_reset_mac_header(reply); 1504 __skb_pull(reply, skb_network_offset(reply)); 1505 reply->ip_summed = CHECKSUM_UNNECESSARY; 1506 reply->pkt_type = PACKET_HOST; 1507 1508 if (netif_rx_ni(reply) == NET_RX_DROP) 1509 dev->stats.rx_dropped++; 1510 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1511 union vxlan_addr ipa = { 1512 .sin.sin_addr.s_addr = tip, 1513 .sin.sin_family = AF_INET, 1514 }; 1515 1516 vxlan_ip_miss(dev, &ipa); 1517 } 1518out: 1519 consume_skb(skb); 1520 return NETDEV_TX_OK; 1521} 1522 1523#if IS_ENABLED(CONFIG_IPV6) 1524static struct sk_buff *vxlan_na_create(struct sk_buff *request, 1525 struct neighbour *n, bool isrouter) 1526{ 1527 struct net_device *dev = request->dev; 1528 struct sk_buff *reply; 1529 struct nd_msg *ns, *na; 1530 struct ipv6hdr *pip6; 1531 u8 *daddr; 1532 int na_olen = 8; /* opt hdr + ETH_ALEN for target */ 1533 int ns_olen; 1534 int i, len; 1535 1536 if (dev == NULL || !pskb_may_pull(request, request->len)) 1537 return NULL; 1538 1539 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + 1540 sizeof(*na) + na_olen + dev->needed_tailroom; 1541 reply = alloc_skb(len, GFP_ATOMIC); 1542 if (reply == NULL) 1543 return NULL; 1544 1545 reply->protocol = htons(ETH_P_IPV6); 1546 reply->dev = dev; 1547 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); 1548 skb_push(reply, sizeof(struct ethhdr)); 1549 skb_reset_mac_header(reply); 1550 1551 ns = (struct nd_msg *)(ipv6_hdr(request) + 1); 1552 1553 daddr = eth_hdr(request)->h_source; 1554 ns_olen = request->len - skb_network_offset(request) - 1555 sizeof(struct ipv6hdr) - sizeof(*ns); 1556 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { 1557 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { 1558 daddr = ns->opt + i + sizeof(struct nd_opt_hdr); 1559 break; 1560 } 1561 } 1562 1563 /* Ethernet header */ 1564 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); 1565 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); 1566 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); 1567 reply->protocol = htons(ETH_P_IPV6); 1568 1569 skb_pull(reply, sizeof(struct ethhdr)); 1570 skb_reset_network_header(reply); 1571 skb_put(reply, sizeof(struct ipv6hdr)); 1572 1573 /* IPv6 header */ 1574 1575 pip6 = ipv6_hdr(reply); 1576 memset(pip6, 0, sizeof(struct ipv6hdr)); 1577 pip6->version = 6; 1578 pip6->priority = ipv6_hdr(request)->priority; 1579 pip6->nexthdr = IPPROTO_ICMPV6; 1580 pip6->hop_limit = 255; 1581 pip6->daddr = ipv6_hdr(request)->saddr; 1582 pip6->saddr = *(struct in6_addr *)n->primary_key; 1583 1584 skb_pull(reply, sizeof(struct ipv6hdr)); 1585 skb_reset_transport_header(reply); 1586 1587 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); 1588 1589 /* Neighbor Advertisement */ 1590 memset(na, 0, sizeof(*na)+na_olen); 1591 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 1592 na->icmph.icmp6_router = isrouter; 1593 na->icmph.icmp6_override = 1; 1594 na->icmph.icmp6_solicited = 1; 1595 na->target = ns->target; 1596 ether_addr_copy(&na->opt[2], n->ha); 1597 na->opt[0] = ND_OPT_TARGET_LL_ADDR; 1598 na->opt[1] = na_olen >> 3; 1599 1600 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, 1601 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, 1602 csum_partial(na, sizeof(*na)+na_olen, 0)); 1603 1604 pip6->payload_len = htons(sizeof(*na)+na_olen); 1605 1606 skb_push(reply, sizeof(struct ipv6hdr)); 1607 1608 reply->ip_summed = CHECKSUM_UNNECESSARY; 1609 1610 return reply; 1611} 1612 1613static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) 1614{ 1615 struct vxlan_dev *vxlan = netdev_priv(dev); 1616 struct nd_msg *msg; 1617 const struct ipv6hdr *iphdr; 1618 const struct in6_addr *daddr; 1619 struct neighbour *n; 1620 struct inet6_dev *in6_dev; 1621 1622 in6_dev = __in6_dev_get(dev); 1623 if (!in6_dev) 1624 goto out; 1625 1626 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) 1627 goto out; 1628 1629 iphdr = ipv6_hdr(skb); 1630 daddr = &iphdr->daddr; 1631 1632 msg = (struct nd_msg *)(iphdr + 1); 1633 if (msg->icmph.icmp6_code != 0 || 1634 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) 1635 goto out; 1636 1637 if (ipv6_addr_loopback(daddr) || 1638 ipv6_addr_is_multicast(&msg->target)) 1639 goto out; 1640 1641 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); 1642 1643 if (n) { 1644 struct vxlan_fdb *f; 1645 struct sk_buff *reply; 1646 1647 if (!(n->nud_state & NUD_CONNECTED)) { 1648 neigh_release(n); 1649 goto out; 1650 } 1651 1652 f = vxlan_find_mac(vxlan, n->ha, vni); 1653 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1654 /* bridge-local neighbor */ 1655 neigh_release(n); 1656 goto out; 1657 } 1658 1659 reply = vxlan_na_create(skb, n, 1660 !!(f ? f->flags & NTF_ROUTER : 0)); 1661 1662 neigh_release(n); 1663 1664 if (reply == NULL) 1665 goto out; 1666 1667 if (netif_rx_ni(reply) == NET_RX_DROP) 1668 dev->stats.rx_dropped++; 1669 1670 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1671 union vxlan_addr ipa = { 1672 .sin6.sin6_addr = msg->target, 1673 .sin6.sin6_family = AF_INET6, 1674 }; 1675 1676 vxlan_ip_miss(dev, &ipa); 1677 } 1678 1679out: 1680 consume_skb(skb); 1681 return NETDEV_TX_OK; 1682} 1683#endif 1684 1685static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 1686{ 1687 struct vxlan_dev *vxlan = netdev_priv(dev); 1688 struct neighbour *n; 1689 1690 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1691 return false; 1692 1693 n = NULL; 1694 switch (ntohs(eth_hdr(skb)->h_proto)) { 1695 case ETH_P_IP: 1696 { 1697 struct iphdr *pip; 1698 1699 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1700 return false; 1701 pip = ip_hdr(skb); 1702 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 1703 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1704 union vxlan_addr ipa = { 1705 .sin.sin_addr.s_addr = pip->daddr, 1706 .sin.sin_family = AF_INET, 1707 }; 1708 1709 vxlan_ip_miss(dev, &ipa); 1710 return false; 1711 } 1712 1713 break; 1714 } 1715#if IS_ENABLED(CONFIG_IPV6) 1716 case ETH_P_IPV6: 1717 { 1718 struct ipv6hdr *pip6; 1719 1720 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 1721 return false; 1722 pip6 = ipv6_hdr(skb); 1723 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); 1724 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1725 union vxlan_addr ipa = { 1726 .sin6.sin6_addr = pip6->daddr, 1727 .sin6.sin6_family = AF_INET6, 1728 }; 1729 1730 vxlan_ip_miss(dev, &ipa); 1731 return false; 1732 } 1733 1734 break; 1735 } 1736#endif 1737 default: 1738 return false; 1739 } 1740 1741 if (n) { 1742 bool diff; 1743 1744 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); 1745 if (diff) { 1746 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 1747 dev->addr_len); 1748 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); 1749 } 1750 neigh_release(n); 1751 return diff; 1752 } 1753 1754 return false; 1755} 1756 1757static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, 1758 struct vxlan_metadata *md) 1759{ 1760 struct vxlanhdr_gbp *gbp; 1761 1762 if (!md->gbp) 1763 return; 1764 1765 gbp = (struct vxlanhdr_gbp *)vxh; 1766 vxh->vx_flags |= VXLAN_HF_GBP; 1767 1768 if (md->gbp & VXLAN_GBP_DONT_LEARN) 1769 gbp->dont_learn = 1; 1770 1771 if (md->gbp & VXLAN_GBP_POLICY_APPLIED) 1772 gbp->policy_applied = 1; 1773 1774 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); 1775} 1776 1777static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags, 1778 __be16 protocol) 1779{ 1780 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; 1781 1782 gpe->np_applied = 1; 1783 1784 switch (protocol) { 1785 case htons(ETH_P_IP): 1786 gpe->next_protocol = VXLAN_GPE_NP_IPV4; 1787 return 0; 1788 case htons(ETH_P_IPV6): 1789 gpe->next_protocol = VXLAN_GPE_NP_IPV6; 1790 return 0; 1791 case htons(ETH_P_TEB): 1792 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET; 1793 return 0; 1794 } 1795 return -EPFNOSUPPORT; 1796} 1797 1798static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, 1799 int iphdr_len, __be32 vni, 1800 struct vxlan_metadata *md, u32 vxflags, 1801 bool udp_sum) 1802{ 1803 struct vxlanhdr *vxh; 1804 int min_headroom; 1805 int err; 1806 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 1807 __be16 inner_protocol = htons(ETH_P_TEB); 1808 1809 if ((vxflags & VXLAN_F_REMCSUM_TX) && 1810 skb->ip_summed == CHECKSUM_PARTIAL) { 1811 int csum_start = skb_checksum_start_offset(skb); 1812 1813 if (csum_start <= VXLAN_MAX_REMCSUM_START && 1814 !(csum_start & VXLAN_RCO_SHIFT_MASK) && 1815 (skb->csum_offset == offsetof(struct udphdr, check) || 1816 skb->csum_offset == offsetof(struct tcphdr, check))) 1817 type |= SKB_GSO_TUNNEL_REMCSUM; 1818 } 1819 1820 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1821 + VXLAN_HLEN + iphdr_len; 1822 1823 /* Need space for new headers (invalidates iph ptr) */ 1824 err = skb_cow_head(skb, min_headroom); 1825 if (unlikely(err)) 1826 return err; 1827 1828 err = iptunnel_handle_offloads(skb, type); 1829 if (err) 1830 return err; 1831 1832 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1833 vxh->vx_flags = VXLAN_HF_VNI; 1834 vxh->vx_vni = vxlan_vni_field(vni); 1835 1836 if (type & SKB_GSO_TUNNEL_REMCSUM) { 1837 unsigned int start; 1838 1839 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); 1840 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); 1841 vxh->vx_flags |= VXLAN_HF_RCO; 1842 1843 if (!skb_is_gso(skb)) { 1844 skb->ip_summed = CHECKSUM_NONE; 1845 skb->encapsulation = 0; 1846 } 1847 } 1848 1849 if (vxflags & VXLAN_F_GBP) 1850 vxlan_build_gbp_hdr(vxh, vxflags, md); 1851 if (vxflags & VXLAN_F_GPE) { 1852 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol); 1853 if (err < 0) 1854 return err; 1855 inner_protocol = skb->protocol; 1856 } 1857 1858 skb_set_inner_protocol(skb, inner_protocol); 1859 return 0; 1860} 1861 1862static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, 1863 struct vxlan_sock *sock4, 1864 struct sk_buff *skb, int oif, u8 tos, 1865 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, 1866 struct dst_cache *dst_cache, 1867 const struct ip_tunnel_info *info) 1868{ 1869 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1870 struct rtable *rt = NULL; 1871 struct flowi4 fl4; 1872 1873 if (!sock4) 1874 return ERR_PTR(-EIO); 1875 1876 if (tos && !info) 1877 use_cache = false; 1878 if (use_cache) { 1879 rt = dst_cache_get_ip4(dst_cache, saddr); 1880 if (rt) 1881 return rt; 1882 } 1883 1884 memset(&fl4, 0, sizeof(fl4)); 1885 fl4.flowi4_oif = oif; 1886 fl4.flowi4_tos = RT_TOS(tos); 1887 fl4.flowi4_mark = skb->mark; 1888 fl4.flowi4_proto = IPPROTO_UDP; 1889 fl4.daddr = daddr; 1890 fl4.saddr = *saddr; 1891 fl4.fl4_dport = dport; 1892 fl4.fl4_sport = sport; 1893 1894 rt = ip_route_output_key(vxlan->net, &fl4); 1895 if (likely(!IS_ERR(rt))) { 1896 if (rt->dst.dev == dev) { 1897 netdev_dbg(dev, "circular route to %pI4\n", &daddr); 1898 ip_rt_put(rt); 1899 return ERR_PTR(-ELOOP); 1900 } 1901 1902 *saddr = fl4.saddr; 1903 if (use_cache) 1904 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 1905 } else { 1906 netdev_dbg(dev, "no route to %pI4\n", &daddr); 1907 return ERR_PTR(-ENETUNREACH); 1908 } 1909 return rt; 1910} 1911 1912#if IS_ENABLED(CONFIG_IPV6) 1913static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, 1914 struct net_device *dev, 1915 struct vxlan_sock *sock6, 1916 struct sk_buff *skb, int oif, u8 tos, 1917 __be32 label, 1918 const struct in6_addr *daddr, 1919 struct in6_addr *saddr, 1920 __be16 dport, __be16 sport, 1921 struct dst_cache *dst_cache, 1922 const struct ip_tunnel_info *info) 1923{ 1924 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1925 struct dst_entry *ndst; 1926 struct flowi6 fl6; 1927 int err; 1928 1929 if (!sock6) 1930 return ERR_PTR(-EIO); 1931 1932 if (tos && !info) 1933 use_cache = false; 1934 if (use_cache) { 1935 ndst = dst_cache_get_ip6(dst_cache, saddr); 1936 if (ndst) 1937 return ndst; 1938 } 1939 1940 memset(&fl6, 0, sizeof(fl6)); 1941 fl6.flowi6_oif = oif; 1942 fl6.daddr = *daddr; 1943 fl6.saddr = *saddr; 1944 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1945 fl6.flowi6_mark = skb->mark; 1946 fl6.flowi6_proto = IPPROTO_UDP; 1947 fl6.fl6_dport = dport; 1948 fl6.fl6_sport = sport; 1949 1950 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1951 sock6->sock->sk, 1952 &ndst, &fl6); 1953 if (unlikely(err < 0)) { 1954 netdev_dbg(dev, "no route to %pI6\n", daddr); 1955 return ERR_PTR(-ENETUNREACH); 1956 } 1957 1958 if (unlikely(ndst->dev == dev)) { 1959 netdev_dbg(dev, "circular route to %pI6\n", daddr); 1960 dst_release(ndst); 1961 return ERR_PTR(-ELOOP); 1962 } 1963 1964 *saddr = fl6.saddr; 1965 if (use_cache) 1966 dst_cache_set_ip6(dst_cache, ndst, saddr); 1967 return ndst; 1968} 1969#endif 1970 1971/* Bypass encapsulation if the destination is local */ 1972static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1973 struct vxlan_dev *dst_vxlan, __be32 vni) 1974{ 1975 struct pcpu_sw_netstats *tx_stats, *rx_stats; 1976 union vxlan_addr loopback; 1977 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1978 struct net_device *dev = skb->dev; 1979 int len = skb->len; 1980 1981 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1982 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1983 skb->pkt_type = PACKET_HOST; 1984 skb->encapsulation = 0; 1985 skb->dev = dst_vxlan->dev; 1986 __skb_pull(skb, skb_network_offset(skb)); 1987 1988 if (remote_ip->sa.sa_family == AF_INET) { 1989 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 1990 loopback.sa.sa_family = AF_INET; 1991#if IS_ENABLED(CONFIG_IPV6) 1992 } else { 1993 loopback.sin6.sin6_addr = in6addr_loopback; 1994 loopback.sa.sa_family = AF_INET6; 1995#endif 1996 } 1997 1998 if (dst_vxlan->flags & VXLAN_F_LEARN) 1999 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni); 2000 2001 u64_stats_update_begin(&tx_stats->syncp); 2002 tx_stats->tx_packets++; 2003 tx_stats->tx_bytes += len; 2004 u64_stats_update_end(&tx_stats->syncp); 2005 2006 if (netif_rx(skb) == NET_RX_SUCCESS) { 2007 u64_stats_update_begin(&rx_stats->syncp); 2008 rx_stats->rx_packets++; 2009 rx_stats->rx_bytes += len; 2010 u64_stats_update_end(&rx_stats->syncp); 2011 } else { 2012 dev->stats.rx_dropped++; 2013 } 2014} 2015 2016static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, 2017 struct vxlan_dev *vxlan, union vxlan_addr *daddr, 2018 __be16 dst_port, __be32 vni, struct dst_entry *dst, 2019 u32 rt_flags) 2020{ 2021#if IS_ENABLED(CONFIG_IPV6) 2022 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of 2023 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple 2024 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry. 2025 */ 2026 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL); 2027#endif 2028 /* Bypass encapsulation if the destination is local */ 2029 if (rt_flags & RTCF_LOCAL && 2030 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2031 struct vxlan_dev *dst_vxlan; 2032 2033 dst_release(dst); 2034 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 2035 daddr->sa.sa_family, dst_port, 2036 vxlan->flags); 2037 if (!dst_vxlan) { 2038 dev->stats.tx_errors++; 2039 kfree_skb(skb); 2040 2041 return -ENOENT; 2042 } 2043 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni); 2044 return 1; 2045 } 2046 2047 return 0; 2048} 2049 2050static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 2051 __be32 default_vni, struct vxlan_rdst *rdst, 2052 bool did_rsc) 2053{ 2054 struct dst_cache *dst_cache; 2055 struct ip_tunnel_info *info; 2056 struct vxlan_dev *vxlan = netdev_priv(dev); 2057 const struct iphdr *old_iph = ip_hdr(skb); 2058 union vxlan_addr *dst; 2059 union vxlan_addr remote_ip, local_ip; 2060 struct vxlan_metadata _md; 2061 struct vxlan_metadata *md = &_md; 2062 __be16 src_port = 0, dst_port; 2063 struct dst_entry *ndst = NULL; 2064 __be32 vni, label; 2065 __u8 tos, ttl; 2066 int err; 2067 u32 flags = vxlan->flags; 2068 bool udp_sum = false; 2069 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); 2070 2071 info = skb_tunnel_info(skb); 2072 2073 if (rdst) { 2074 dst = &rdst->remote_ip; 2075 if (vxlan_addr_any(dst)) { 2076 if (did_rsc) { 2077 /* short-circuited back to local bridge */ 2078 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni); 2079 return; 2080 } 2081 goto drop; 2082 } 2083 2084 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 2085 vni = (rdst->remote_vni) ? : default_vni; 2086 local_ip = vxlan->cfg.saddr; 2087 dst_cache = &rdst->dst_cache; 2088 md->gbp = skb->mark; 2089 ttl = vxlan->cfg.ttl; 2090 if (!ttl && vxlan_addr_multicast(dst)) 2091 ttl = 1; 2092 2093 tos = vxlan->cfg.tos; 2094 if (tos == 1) 2095 tos = ip_tunnel_get_dsfield(old_iph, skb); 2096 2097 if (dst->sa.sa_family == AF_INET) 2098 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); 2099 else 2100 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2101 label = vxlan->cfg.label; 2102 } else { 2103 if (!info) { 2104 WARN_ONCE(1, "%s: Missing encapsulation instructions\n", 2105 dev->name); 2106 goto drop; 2107 } 2108 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 2109 if (remote_ip.sa.sa_family == AF_INET) { 2110 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 2111 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; 2112 } else { 2113 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 2114 local_ip.sin6.sin6_addr = info->key.u.ipv6.src; 2115 } 2116 dst = &remote_ip; 2117 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 2118 vni = tunnel_id_to_key32(info->key.tun_id); 2119 dst_cache = &info->dst_cache; 2120 if (info->options_len) 2121 md = ip_tunnel_info_opts(info); 2122 ttl = info->key.ttl; 2123 tos = info->key.tos; 2124 label = info->key.label; 2125 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 2126 } 2127 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2128 vxlan->cfg.port_max, true); 2129 2130 rcu_read_lock(); 2131 if (dst->sa.sa_family == AF_INET) { 2132 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2133 struct rtable *rt; 2134 __be16 df = 0; 2135 2136 rt = vxlan_get_route(vxlan, dev, sock4, skb, 2137 rdst ? rdst->remote_ifindex : 0, tos, 2138 dst->sin.sin_addr.s_addr, 2139 &local_ip.sin.sin_addr.s_addr, 2140 dst_port, src_port, 2141 dst_cache, info); 2142 if (IS_ERR(rt)) { 2143 err = PTR_ERR(rt); 2144 goto tx_error; 2145 } 2146 2147 /* Bypass encapsulation if the destination is local */ 2148 if (!info) { 2149 err = encap_bypass_if_local(skb, dev, vxlan, dst, 2150 dst_port, vni, &rt->dst, 2151 rt->rt_flags); 2152 if (err) 2153 goto out_unlock; 2154 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { 2155 df = htons(IP_DF); 2156 } 2157 2158 ndst = &rt->dst; 2159 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2160 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2161 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), 2162 vni, md, flags, udp_sum); 2163 if (err < 0) 2164 goto tx_error; 2165 2166 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr, 2167 dst->sin.sin_addr.s_addr, tos, ttl, df, 2168 src_port, dst_port, xnet, !udp_sum); 2169#if IS_ENABLED(CONFIG_IPV6) 2170 } else { 2171 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 2172 2173 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 2174 rdst ? rdst->remote_ifindex : 0, tos, 2175 label, &dst->sin6.sin6_addr, 2176 &local_ip.sin6.sin6_addr, 2177 dst_port, src_port, 2178 dst_cache, info); 2179 if (IS_ERR(ndst)) { 2180 err = PTR_ERR(ndst); 2181 ndst = NULL; 2182 goto tx_error; 2183 } 2184 2185 if (!info) { 2186 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2187 2188 err = encap_bypass_if_local(skb, dev, vxlan, dst, 2189 dst_port, vni, ndst, 2190 rt6i_flags); 2191 if (err) 2192 goto out_unlock; 2193 } 2194 2195 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2196 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2197 skb_scrub_packet(skb, xnet); 2198 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), 2199 vni, md, flags, udp_sum); 2200 if (err < 0) 2201 goto tx_error; 2202 2203 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, 2204 &local_ip.sin6.sin6_addr, 2205 &dst->sin6.sin6_addr, tos, ttl, 2206 label, src_port, dst_port, !udp_sum); 2207#endif 2208 } 2209out_unlock: 2210 rcu_read_unlock(); 2211 return; 2212 2213drop: 2214 dev->stats.tx_dropped++; 2215 dev_kfree_skb(skb); 2216 return; 2217 2218tx_error: 2219 rcu_read_unlock(); 2220 if (err == -ELOOP) 2221 dev->stats.collisions++; 2222 else if (err == -ENETUNREACH) 2223 dev->stats.tx_carrier_errors++; 2224 dst_release(ndst); 2225 dev->stats.tx_errors++; 2226 kfree_skb(skb); 2227} 2228 2229/* Transmit local packets over Vxlan 2230 * 2231 * Outer IP header inherits ECN and DF from inner header. 2232 * Outer UDP destination is the VXLAN assigned port. 2233 * source port is based on hash of flow 2234 */ 2235static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 2236{ 2237 struct vxlan_dev *vxlan = netdev_priv(dev); 2238 const struct ip_tunnel_info *info; 2239 struct ethhdr *eth; 2240 bool did_rsc = false; 2241 struct vxlan_rdst *rdst, *fdst = NULL; 2242 struct vxlan_fdb *f; 2243 __be32 vni = 0; 2244 2245 info = skb_tunnel_info(skb); 2246 2247 skb_reset_mac_header(skb); 2248 2249 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { 2250 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE && 2251 info->mode & IP_TUNNEL_INFO_TX) { 2252 vni = tunnel_id_to_key32(info->key.tun_id); 2253 } else { 2254 if (info && info->mode & IP_TUNNEL_INFO_TX) 2255 vxlan_xmit_one(skb, dev, vni, NULL, false); 2256 else 2257 kfree_skb(skb); 2258 return NETDEV_TX_OK; 2259 } 2260 } 2261 2262 if (vxlan->flags & VXLAN_F_PROXY) { 2263 eth = eth_hdr(skb); 2264 if (ntohs(eth->h_proto) == ETH_P_ARP) 2265 return arp_reduce(dev, skb, vni); 2266#if IS_ENABLED(CONFIG_IPV6) 2267 else if (ntohs(eth->h_proto) == ETH_P_IPV6) { 2268 struct ipv6hdr *hdr, _hdr; 2269 if ((hdr = skb_header_pointer(skb, 2270 skb_network_offset(skb), 2271 sizeof(_hdr), &_hdr)) && 2272 hdr->nexthdr == IPPROTO_ICMPV6) 2273 return neigh_reduce(dev, skb, vni); 2274 } 2275#endif 2276 } 2277 2278 eth = eth_hdr(skb); 2279 f = vxlan_find_mac(vxlan, eth->h_dest, vni); 2280 did_rsc = false; 2281 2282 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && 2283 (ntohs(eth->h_proto) == ETH_P_IP || 2284 ntohs(eth->h_proto) == ETH_P_IPV6)) { 2285 did_rsc = route_shortcircuit(dev, skb); 2286 if (did_rsc) 2287 f = vxlan_find_mac(vxlan, eth->h_dest, vni); 2288 } 2289 2290 if (f == NULL) { 2291 f = vxlan_find_mac(vxlan, all_zeros_mac, vni); 2292 if (f == NULL) { 2293 if ((vxlan->flags & VXLAN_F_L2MISS) && 2294 !is_multicast_ether_addr(eth->h_dest)) 2295 vxlan_fdb_miss(vxlan, eth->h_dest); 2296 2297 dev->stats.tx_dropped++; 2298 kfree_skb(skb); 2299 return NETDEV_TX_OK; 2300 } 2301 } 2302 2303 list_for_each_entry_rcu(rdst, &f->remotes, list) { 2304 struct sk_buff *skb1; 2305 2306 if (!fdst) { 2307 fdst = rdst; 2308 continue; 2309 } 2310 skb1 = skb_clone(skb, GFP_ATOMIC); 2311 if (skb1) 2312 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); 2313 } 2314 2315 if (fdst) 2316 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); 2317 else 2318 kfree_skb(skb); 2319 return NETDEV_TX_OK; 2320} 2321 2322/* Walk the forwarding table and purge stale entries */ 2323static void vxlan_cleanup(unsigned long arg) 2324{ 2325 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; 2326 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2327 unsigned int h; 2328 2329 if (!netif_running(vxlan->dev)) 2330 return; 2331 2332 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2333 struct hlist_node *p, *n; 2334 2335 spin_lock_bh(&vxlan->hash_lock); 2336 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2337 struct vxlan_fdb *f 2338 = container_of(p, struct vxlan_fdb, hlist); 2339 unsigned long timeout; 2340 2341 if (f->state & (NUD_PERMANENT | NUD_NOARP)) 2342 continue; 2343 2344 if (f->flags & NTF_EXT_LEARNED) 2345 continue; 2346 2347 timeout = f->used + vxlan->cfg.age_interval * HZ; 2348 if (time_before_eq(timeout, jiffies)) { 2349 netdev_dbg(vxlan->dev, 2350 "garbage collect %pM\n", 2351 f->eth_addr); 2352 f->state = NUD_STALE; 2353 vxlan_fdb_destroy(vxlan, f); 2354 } else if (time_before(timeout, next_timer)) 2355 next_timer = timeout; 2356 } 2357 spin_unlock_bh(&vxlan->hash_lock); 2358 } 2359 2360 mod_timer(&vxlan->age_timer, next_timer); 2361} 2362 2363static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) 2364{ 2365 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2366 2367 spin_lock(&vn->sock_lock); 2368 hlist_del_init_rcu(&vxlan->hlist); 2369 spin_unlock(&vn->sock_lock); 2370} 2371 2372static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2373{ 2374 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2375 __be32 vni = vxlan->default_dst.remote_vni; 2376 2377 spin_lock(&vn->sock_lock); 2378 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); 2379 spin_unlock(&vn->sock_lock); 2380} 2381 2382/* Setup stats when device is created */ 2383static int vxlan_init(struct net_device *dev) 2384{ 2385 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2386 if (!dev->tstats) 2387 return -ENOMEM; 2388 2389 return 0; 2390} 2391 2392static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) 2393{ 2394 struct vxlan_fdb *f; 2395 2396 spin_lock_bh(&vxlan->hash_lock); 2397 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); 2398 if (f) 2399 vxlan_fdb_destroy(vxlan, f); 2400 spin_unlock_bh(&vxlan->hash_lock); 2401} 2402 2403static void vxlan_uninit(struct net_device *dev) 2404{ 2405 struct vxlan_dev *vxlan = netdev_priv(dev); 2406 2407 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); 2408 2409 free_percpu(dev->tstats); 2410} 2411 2412/* Start ageing timer and join group when device is brought up */ 2413static int vxlan_open(struct net_device *dev) 2414{ 2415 struct vxlan_dev *vxlan = netdev_priv(dev); 2416 int ret; 2417 2418 ret = vxlan_sock_add(vxlan); 2419 if (ret < 0) 2420 return ret; 2421 2422 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { 2423 ret = vxlan_igmp_join(vxlan); 2424 if (ret == -EADDRINUSE) 2425 ret = 0; 2426 if (ret) { 2427 vxlan_sock_release(vxlan); 2428 return ret; 2429 } 2430 } 2431 2432 if (vxlan->cfg.age_interval) 2433 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); 2434 2435 return ret; 2436} 2437 2438/* Purge the forwarding table */ 2439static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) 2440{ 2441 unsigned int h; 2442 2443 spin_lock_bh(&vxlan->hash_lock); 2444 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2445 struct hlist_node *p, *n; 2446 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2447 struct vxlan_fdb *f 2448 = container_of(p, struct vxlan_fdb, hlist); 2449 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) 2450 continue; 2451 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2452 if (!is_zero_ether_addr(f->eth_addr)) 2453 vxlan_fdb_destroy(vxlan, f); 2454 } 2455 } 2456 spin_unlock_bh(&vxlan->hash_lock); 2457} 2458 2459/* Cleanup timer and forwarding table on shutdown */ 2460static int vxlan_stop(struct net_device *dev) 2461{ 2462 struct vxlan_dev *vxlan = netdev_priv(dev); 2463 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2464 int ret = 0; 2465 2466 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2467 !vxlan_group_used(vn, vxlan)) 2468 ret = vxlan_igmp_leave(vxlan); 2469 2470 del_timer_sync(&vxlan->age_timer); 2471 2472 vxlan_flush(vxlan, false); 2473 vxlan_sock_release(vxlan); 2474 2475 return ret; 2476} 2477 2478/* Stub, nothing needs to be done. */ 2479static void vxlan_set_multicast_list(struct net_device *dev) 2480{ 2481} 2482 2483static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2484{ 2485 struct vxlan_dev *vxlan = netdev_priv(dev); 2486 struct vxlan_rdst *dst = &vxlan->default_dst; 2487 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 2488 dst->remote_ifindex); 2489 bool use_ipv6 = false; 2490 2491 if (dst->remote_ip.sa.sa_family == AF_INET6) 2492 use_ipv6 = true; 2493 2494 /* This check is different than dev->max_mtu, because it looks at 2495 * the lowerdev->mtu, rather than the static dev->max_mtu 2496 */ 2497 if (lowerdev) { 2498 int max_mtu = lowerdev->mtu - 2499 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2500 if (new_mtu > max_mtu) 2501 return -EINVAL; 2502 } 2503 2504 dev->mtu = new_mtu; 2505 return 0; 2506} 2507 2508static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 2509{ 2510 struct vxlan_dev *vxlan = netdev_priv(dev); 2511 struct ip_tunnel_info *info = skb_tunnel_info(skb); 2512 __be16 sport, dport; 2513 2514 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2515 vxlan->cfg.port_max, true); 2516 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2517 2518 if (ip_tunnel_info_af(info) == AF_INET) { 2519 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2520 struct rtable *rt; 2521 2522 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, 2523 info->key.u.ipv4.dst, 2524 &info->key.u.ipv4.src, dport, sport, 2525 &info->dst_cache, info); 2526 if (IS_ERR(rt)) 2527 return PTR_ERR(rt); 2528 ip_rt_put(rt); 2529 } else { 2530#if IS_ENABLED(CONFIG_IPV6) 2531 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 2532 struct dst_entry *ndst; 2533 2534 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, 2535 info->key.label, &info->key.u.ipv6.dst, 2536 &info->key.u.ipv6.src, dport, sport, 2537 &info->dst_cache, info); 2538 if (IS_ERR(ndst)) 2539 return PTR_ERR(ndst); 2540 dst_release(ndst); 2541#else /* !CONFIG_IPV6 */ 2542 return -EPFNOSUPPORT; 2543#endif 2544 } 2545 info->key.tp_src = sport; 2546 info->key.tp_dst = dport; 2547 return 0; 2548} 2549 2550static const struct net_device_ops vxlan_netdev_ether_ops = { 2551 .ndo_init = vxlan_init, 2552 .ndo_uninit = vxlan_uninit, 2553 .ndo_open = vxlan_open, 2554 .ndo_stop = vxlan_stop, 2555 .ndo_start_xmit = vxlan_xmit, 2556 .ndo_get_stats64 = ip_tunnel_get_stats64, 2557 .ndo_set_rx_mode = vxlan_set_multicast_list, 2558 .ndo_change_mtu = vxlan_change_mtu, 2559 .ndo_validate_addr = eth_validate_addr, 2560 .ndo_set_mac_address = eth_mac_addr, 2561 .ndo_fdb_add = vxlan_fdb_add, 2562 .ndo_fdb_del = vxlan_fdb_delete, 2563 .ndo_fdb_dump = vxlan_fdb_dump, 2564 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2565}; 2566 2567static const struct net_device_ops vxlan_netdev_raw_ops = { 2568 .ndo_init = vxlan_init, 2569 .ndo_uninit = vxlan_uninit, 2570 .ndo_open = vxlan_open, 2571 .ndo_stop = vxlan_stop, 2572 .ndo_start_xmit = vxlan_xmit, 2573 .ndo_get_stats64 = ip_tunnel_get_stats64, 2574 .ndo_change_mtu = vxlan_change_mtu, 2575 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2576}; 2577 2578/* Info for udev, that this is a virtual tunnel endpoint */ 2579static struct device_type vxlan_type = { 2580 .name = "vxlan", 2581}; 2582 2583/* Calls the ndo_udp_tunnel_add of the caller in order to 2584 * supply the listening VXLAN udp ports. Callers are expected 2585 * to implement the ndo_udp_tunnel_add. 2586 */ 2587static void vxlan_push_rx_ports(struct net_device *dev) 2588{ 2589 struct vxlan_sock *vs; 2590 struct net *net = dev_net(dev); 2591 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2592 unsigned int i; 2593 2594 spin_lock(&vn->sock_lock); 2595 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2596 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) 2597 udp_tunnel_push_rx_port(dev, vs->sock, 2598 (vs->flags & VXLAN_F_GPE) ? 2599 UDP_TUNNEL_TYPE_VXLAN_GPE : 2600 UDP_TUNNEL_TYPE_VXLAN); 2601 } 2602 spin_unlock(&vn->sock_lock); 2603} 2604 2605/* Initialize the device structure. */ 2606static void vxlan_setup(struct net_device *dev) 2607{ 2608 struct vxlan_dev *vxlan = netdev_priv(dev); 2609 unsigned int h; 2610 2611 eth_hw_addr_random(dev); 2612 ether_setup(dev); 2613 2614 dev->needs_free_netdev = true; 2615 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2616 2617 dev->features |= NETIF_F_LLTX; 2618 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2619 dev->features |= NETIF_F_RXCSUM; 2620 dev->features |= NETIF_F_GSO_SOFTWARE; 2621 2622 dev->vlan_features = dev->features; 2623 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2624 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2625 netif_keep_dst(dev); 2626 dev->priv_flags |= IFF_NO_QUEUE; 2627 2628 INIT_LIST_HEAD(&vxlan->next); 2629 spin_lock_init(&vxlan->hash_lock); 2630 2631 init_timer_deferrable(&vxlan->age_timer); 2632 vxlan->age_timer.function = vxlan_cleanup; 2633 vxlan->age_timer.data = (unsigned long) vxlan; 2634 2635 vxlan->cfg.dst_port = htons(vxlan_port); 2636 2637 vxlan->dev = dev; 2638 2639 gro_cells_init(&vxlan->gro_cells, dev); 2640 2641 for (h = 0; h < FDB_HASH_SIZE; ++h) 2642 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 2643} 2644 2645static void vxlan_ether_setup(struct net_device *dev) 2646{ 2647 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2648 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2649 dev->netdev_ops = &vxlan_netdev_ether_ops; 2650} 2651 2652static void vxlan_raw_setup(struct net_device *dev) 2653{ 2654 dev->header_ops = NULL; 2655 dev->type = ARPHRD_NONE; 2656 dev->hard_header_len = 0; 2657 dev->addr_len = 0; 2658 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 2659 dev->netdev_ops = &vxlan_netdev_raw_ops; 2660} 2661 2662static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2663 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2664 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2665 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 2666 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 2667 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 2668 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 2669 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 2670 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 2671 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 }, 2672 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 2673 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 2674 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 2675 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 2676 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, 2677 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 2678 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 2679 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 2680 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, 2681 [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, 2682 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, 2683 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 2684 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 2685 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 2686 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 2687 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 2688 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, 2689 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 2690}; 2691 2692static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 2693{ 2694 if (tb[IFLA_ADDRESS]) { 2695 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 2696 pr_debug("invalid link address (not ethernet)\n"); 2697 return -EINVAL; 2698 } 2699 2700 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 2701 pr_debug("invalid all zero ethernet address\n"); 2702 return -EADDRNOTAVAIL; 2703 } 2704 } 2705 2706 if (!data) 2707 return -EINVAL; 2708 2709 if (data[IFLA_VXLAN_ID]) { 2710 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); 2711 if (id >= VXLAN_N_VID) 2712 return -ERANGE; 2713 } 2714 2715 if (data[IFLA_VXLAN_PORT_RANGE]) { 2716 const struct ifla_vxlan_port_range *p 2717 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2718 2719 if (ntohs(p->high) < ntohs(p->low)) { 2720 pr_debug("port range %u .. %u not valid\n", 2721 ntohs(p->low), ntohs(p->high)); 2722 return -EINVAL; 2723 } 2724 } 2725 2726 return 0; 2727} 2728 2729static void vxlan_get_drvinfo(struct net_device *netdev, 2730 struct ethtool_drvinfo *drvinfo) 2731{ 2732 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); 2733 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); 2734} 2735 2736static const struct ethtool_ops vxlan_ethtool_ops = { 2737 .get_drvinfo = vxlan_get_drvinfo, 2738 .get_link = ethtool_op_get_link, 2739}; 2740 2741static struct socket *vxlan_create_sock(struct net *net, bool ipv6, 2742 __be16 port, u32 flags) 2743{ 2744 struct socket *sock; 2745 struct udp_port_cfg udp_conf; 2746 int err; 2747 2748 memset(&udp_conf, 0, sizeof(udp_conf)); 2749 2750 if (ipv6) { 2751 udp_conf.family = AF_INET6; 2752 udp_conf.use_udp6_rx_checksums = 2753 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2754 udp_conf.ipv6_v6only = 1; 2755 } else { 2756 udp_conf.family = AF_INET; 2757 } 2758 2759 udp_conf.local_udp_port = port; 2760 2761 /* Open UDP socket */ 2762 err = udp_sock_create(net, &udp_conf, &sock); 2763 if (err < 0) 2764 return ERR_PTR(err); 2765 2766 return sock; 2767} 2768 2769/* Create new listen socket if needed */ 2770static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, 2771 __be16 port, u32 flags) 2772{ 2773 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2774 struct vxlan_sock *vs; 2775 struct socket *sock; 2776 unsigned int h; 2777 struct udp_tunnel_sock_cfg tunnel_cfg; 2778 2779 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2780 if (!vs) 2781 return ERR_PTR(-ENOMEM); 2782 2783 for (h = 0; h < VNI_HASH_SIZE; ++h) 2784 INIT_HLIST_HEAD(&vs->vni_list[h]); 2785 2786 sock = vxlan_create_sock(net, ipv6, port, flags); 2787 if (IS_ERR(sock)) { 2788 kfree(vs); 2789 return ERR_CAST(sock); 2790 } 2791 2792 vs->sock = sock; 2793 atomic_set(&vs->refcnt, 1); 2794 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 2795 2796 spin_lock(&vn->sock_lock); 2797 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2798 udp_tunnel_notify_add_rx_port(sock, 2799 (vs->flags & VXLAN_F_GPE) ? 2800 UDP_TUNNEL_TYPE_VXLAN_GPE : 2801 UDP_TUNNEL_TYPE_VXLAN); 2802 spin_unlock(&vn->sock_lock); 2803 2804 /* Mark socket as an encapsulation socket. */ 2805 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 2806 tunnel_cfg.sk_user_data = vs; 2807 tunnel_cfg.encap_type = 1; 2808 tunnel_cfg.encap_rcv = vxlan_rcv; 2809 tunnel_cfg.encap_destroy = NULL; 2810 tunnel_cfg.gro_receive = vxlan_gro_receive; 2811 tunnel_cfg.gro_complete = vxlan_gro_complete; 2812 2813 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 2814 2815 return vs; 2816} 2817 2818static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) 2819{ 2820 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2821 struct vxlan_sock *vs = NULL; 2822 2823 if (!vxlan->cfg.no_share) { 2824 spin_lock(&vn->sock_lock); 2825 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, 2826 vxlan->cfg.dst_port, vxlan->flags); 2827 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) { 2828 spin_unlock(&vn->sock_lock); 2829 return -EBUSY; 2830 } 2831 spin_unlock(&vn->sock_lock); 2832 } 2833 if (!vs) 2834 vs = vxlan_socket_create(vxlan->net, ipv6, 2835 vxlan->cfg.dst_port, vxlan->flags); 2836 if (IS_ERR(vs)) 2837 return PTR_ERR(vs); 2838#if IS_ENABLED(CONFIG_IPV6) 2839 if (ipv6) 2840 rcu_assign_pointer(vxlan->vn6_sock, vs); 2841 else 2842#endif 2843 rcu_assign_pointer(vxlan->vn4_sock, vs); 2844 vxlan_vs_add_dev(vs, vxlan); 2845 return 0; 2846} 2847 2848static int vxlan_sock_add(struct vxlan_dev *vxlan) 2849{ 2850 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; 2851 bool ipv6 = vxlan->flags & VXLAN_F_IPV6 || metadata; 2852 bool ipv4 = !ipv6 || metadata; 2853 int ret = 0; 2854 2855 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); 2856#if IS_ENABLED(CONFIG_IPV6) 2857 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); 2858 if (ipv6) { 2859 ret = __vxlan_sock_add(vxlan, true); 2860 if (ret < 0 && ret != -EAFNOSUPPORT) 2861 ipv4 = false; 2862 } 2863#endif 2864 if (ipv4) 2865 ret = __vxlan_sock_add(vxlan, false); 2866 if (ret < 0) 2867 vxlan_sock_release(vxlan); 2868 return ret; 2869} 2870 2871static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, 2872 struct vxlan_config *conf, 2873 bool changelink) 2874{ 2875 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); 2876 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; 2877 struct vxlan_rdst *dst = &vxlan->default_dst; 2878 unsigned short needed_headroom = ETH_HLEN; 2879 bool use_ipv6 = false; 2880 __be16 default_port = vxlan->cfg.dst_port; 2881 struct net_device *lowerdev = NULL; 2882 2883 if (!changelink) { 2884 if (conf->flags & VXLAN_F_GPE) { 2885 /* For now, allow GPE only together with 2886 * COLLECT_METADATA. This can be relaxed later; in such 2887 * case, the other side of the PtP link will have to be 2888 * provided. 2889 */ 2890 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || 2891 !(conf->flags & VXLAN_F_COLLECT_METADATA)) { 2892 pr_info("unsupported combination of extensions\n"); 2893 return -EINVAL; 2894 } 2895 vxlan_raw_setup(dev); 2896 } else { 2897 vxlan_ether_setup(dev); 2898 } 2899 2900 /* MTU range: 68 - 65535 */ 2901 dev->min_mtu = ETH_MIN_MTU; 2902 dev->max_mtu = ETH_MAX_MTU; 2903 vxlan->net = src_net; 2904 } 2905 2906 dst->remote_vni = conf->vni; 2907 2908 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); 2909 2910 /* Unless IPv6 is explicitly requested, assume IPv4 */ 2911 if (!dst->remote_ip.sa.sa_family) 2912 dst->remote_ip.sa.sa_family = AF_INET; 2913 2914 if (dst->remote_ip.sa.sa_family == AF_INET6 || 2915 vxlan->cfg.saddr.sa.sa_family == AF_INET6) { 2916 if (!IS_ENABLED(CONFIG_IPV6)) 2917 return -EPFNOSUPPORT; 2918 use_ipv6 = true; 2919 vxlan->flags |= VXLAN_F_IPV6; 2920 } 2921 2922 if (conf->label && !use_ipv6) { 2923 pr_info("label only supported in use with IPv6\n"); 2924 return -EINVAL; 2925 } 2926 2927 if (conf->remote_ifindex && 2928 conf->remote_ifindex != vxlan->cfg.remote_ifindex) { 2929 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); 2930 dst->remote_ifindex = conf->remote_ifindex; 2931 2932 if (!lowerdev) { 2933 pr_info("ifindex %d does not exist\n", 2934 dst->remote_ifindex); 2935 return -ENODEV; 2936 } 2937 2938#if IS_ENABLED(CONFIG_IPV6) 2939 if (use_ipv6) { 2940 struct inet6_dev *idev = __in6_dev_get(lowerdev); 2941 if (idev && idev->cnf.disable_ipv6) { 2942 pr_info("IPv6 is disabled via sysctl\n"); 2943 return -EPERM; 2944 } 2945 } 2946#endif 2947 2948 if (!conf->mtu) 2949 dev->mtu = lowerdev->mtu - 2950 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2951 2952 needed_headroom = lowerdev->hard_header_len; 2953 } else if (!conf->remote_ifindex && 2954 vxlan_addr_multicast(&dst->remote_ip)) { 2955 pr_info("multicast destination requires interface to be specified\n"); 2956 return -EINVAL; 2957 } 2958 2959 if (lowerdev) { 2960 dev->gso_max_size = lowerdev->gso_max_size; 2961 dev->gso_max_segs = lowerdev->gso_max_segs; 2962 } 2963 2964 if (conf->mtu) { 2965 int max_mtu = ETH_MAX_MTU; 2966 2967 if (lowerdev) 2968 max_mtu = lowerdev->mtu; 2969 2970 max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2971 2972 if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu) 2973 return -EINVAL; 2974 2975 dev->mtu = conf->mtu; 2976 2977 if (conf->mtu > max_mtu) 2978 dev->mtu = max_mtu; 2979 } 2980 2981 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 2982 needed_headroom += VXLAN6_HEADROOM; 2983 else 2984 needed_headroom += VXLAN_HEADROOM; 2985 dev->needed_headroom = needed_headroom; 2986 2987 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2988 if (!vxlan->cfg.dst_port) { 2989 if (conf->flags & VXLAN_F_GPE) 2990 vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */ 2991 else 2992 vxlan->cfg.dst_port = default_port; 2993 } 2994 vxlan->flags |= conf->flags; 2995 2996 if (!vxlan->cfg.age_interval) 2997 vxlan->cfg.age_interval = FDB_AGE_DEFAULT; 2998 2999 if (changelink) 3000 return 0; 3001 3002 list_for_each_entry(tmp, &vn->vxlan_list, next) { 3003 if (tmp->cfg.vni == conf->vni && 3004 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || 3005 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && 3006 tmp->cfg.dst_port == vxlan->cfg.dst_port && 3007 (tmp->flags & VXLAN_F_RCV_FLAGS) == 3008 (vxlan->flags & VXLAN_F_RCV_FLAGS)) { 3009 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni)); 3010 return -EEXIST; 3011 } 3012 } 3013 3014 return 0; 3015} 3016 3017static int __vxlan_dev_create(struct net *net, struct net_device *dev, 3018 struct vxlan_config *conf) 3019{ 3020 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3021 struct vxlan_dev *vxlan = netdev_priv(dev); 3022 int err; 3023 3024 err = vxlan_dev_configure(net, dev, conf, false); 3025 if (err) 3026 return err; 3027 3028 dev->ethtool_ops = &vxlan_ethtool_ops; 3029 3030 /* create an fdb entry for a valid default destination */ 3031 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 3032 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3033 &vxlan->default_dst.remote_ip, 3034 NUD_REACHABLE | NUD_PERMANENT, 3035 NLM_F_EXCL | NLM_F_CREATE, 3036 vxlan->cfg.dst_port, 3037 vxlan->default_dst.remote_vni, 3038 vxlan->default_dst.remote_vni, 3039 vxlan->default_dst.remote_ifindex, 3040 NTF_SELF); 3041 if (err) 3042 return err; 3043 } 3044 3045 err = register_netdevice(dev); 3046 if (err) { 3047 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); 3048 return err; 3049 } 3050 3051 list_add(&vxlan->next, &vn->vxlan_list); 3052 return 0; 3053} 3054 3055static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], 3056 struct net_device *dev, struct vxlan_config *conf, 3057 bool changelink) 3058{ 3059 struct vxlan_dev *vxlan = netdev_priv(dev); 3060 3061 memset(conf, 0, sizeof(*conf)); 3062 3063 /* if changelink operation, start with old existing cfg */ 3064 if (changelink) 3065 memcpy(conf, &vxlan->cfg, sizeof(*conf)); 3066 3067 if (data[IFLA_VXLAN_ID]) { 3068 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 3069 3070 if (changelink && (vni != conf->vni)) 3071 return -EOPNOTSUPP; 3072 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 3073 } 3074 3075 if (data[IFLA_VXLAN_GROUP]) { 3076 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); 3077 } else if (data[IFLA_VXLAN_GROUP6]) { 3078 if (!IS_ENABLED(CONFIG_IPV6)) 3079 return -EPFNOSUPPORT; 3080 3081 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); 3082 conf->remote_ip.sa.sa_family = AF_INET6; 3083 } 3084 3085 if (data[IFLA_VXLAN_LOCAL]) { 3086 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); 3087 conf->saddr.sa.sa_family = AF_INET; 3088 } else if (data[IFLA_VXLAN_LOCAL6]) { 3089 if (!IS_ENABLED(CONFIG_IPV6)) 3090 return -EPFNOSUPPORT; 3091 3092 /* TODO: respect scope id */ 3093 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); 3094 conf->saddr.sa.sa_family = AF_INET6; 3095 } 3096 3097 if (data[IFLA_VXLAN_LINK]) 3098 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); 3099 3100 if (data[IFLA_VXLAN_TOS]) 3101 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 3102 3103 if (data[IFLA_VXLAN_TTL]) 3104 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); 3105 3106 if (data[IFLA_VXLAN_LABEL]) 3107 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & 3108 IPV6_FLOWLABEL_MASK; 3109 3110 if (data[IFLA_VXLAN_LEARNING]) { 3111 if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) { 3112 conf->flags |= VXLAN_F_LEARN; 3113 } else { 3114 conf->flags &= ~VXLAN_F_LEARN; 3115 vxlan->flags &= ~VXLAN_F_LEARN; 3116 } 3117 } else if (!changelink) { 3118 /* default to learn on a new device */ 3119 conf->flags |= VXLAN_F_LEARN; 3120 } 3121 3122 if (data[IFLA_VXLAN_AGEING]) { 3123 if (changelink) 3124 return -EOPNOTSUPP; 3125 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 3126 } 3127 3128 if (data[IFLA_VXLAN_PROXY]) { 3129 if (changelink) 3130 return -EOPNOTSUPP; 3131 if (nla_get_u8(data[IFLA_VXLAN_PROXY])) 3132 conf->flags |= VXLAN_F_PROXY; 3133 } 3134 3135 if (data[IFLA_VXLAN_RSC]) { 3136 if (changelink) 3137 return -EOPNOTSUPP; 3138 if (nla_get_u8(data[IFLA_VXLAN_RSC])) 3139 conf->flags |= VXLAN_F_RSC; 3140 } 3141 3142 if (data[IFLA_VXLAN_L2MISS]) { 3143 if (changelink) 3144 return -EOPNOTSUPP; 3145 if (nla_get_u8(data[IFLA_VXLAN_L2MISS])) 3146 conf->flags |= VXLAN_F_L2MISS; 3147 } 3148 3149 if (data[IFLA_VXLAN_L3MISS]) { 3150 if (changelink) 3151 return -EOPNOTSUPP; 3152 if (nla_get_u8(data[IFLA_VXLAN_L3MISS])) 3153 conf->flags |= VXLAN_F_L3MISS; 3154 } 3155 3156 if (data[IFLA_VXLAN_LIMIT]) { 3157 if (changelink) 3158 return -EOPNOTSUPP; 3159 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 3160 } 3161 3162 if (data[IFLA_VXLAN_COLLECT_METADATA]) { 3163 if (changelink) 3164 return -EOPNOTSUPP; 3165 if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) 3166 conf->flags |= VXLAN_F_COLLECT_METADATA; 3167 } 3168 3169 if (data[IFLA_VXLAN_PORT_RANGE]) { 3170 if (!changelink) { 3171 const struct ifla_vxlan_port_range *p 3172 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 3173 conf->port_min = ntohs(p->low); 3174 conf->port_max = ntohs(p->high); 3175 } else { 3176 return -EOPNOTSUPP; 3177 } 3178 } 3179 3180 if (data[IFLA_VXLAN_PORT]) { 3181 if (changelink) 3182 return -EOPNOTSUPP; 3183 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 3184 } 3185 3186 if (data[IFLA_VXLAN_UDP_CSUM]) { 3187 if (changelink) 3188 return -EOPNOTSUPP; 3189 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) 3190 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX; 3191 } 3192 3193 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) { 3194 if (changelink) 3195 return -EOPNOTSUPP; 3196 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) 3197 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; 3198 } 3199 3200 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) { 3201 if (changelink) 3202 return -EOPNOTSUPP; 3203 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 3204 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 3205 } 3206 3207 if (data[IFLA_VXLAN_REMCSUM_TX]) { 3208 if (changelink) 3209 return -EOPNOTSUPP; 3210 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) 3211 conf->flags |= VXLAN_F_REMCSUM_TX; 3212 } 3213 3214 if (data[IFLA_VXLAN_REMCSUM_RX]) { 3215 if (changelink) 3216 return -EOPNOTSUPP; 3217 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) 3218 conf->flags |= VXLAN_F_REMCSUM_RX; 3219 } 3220 3221 if (data[IFLA_VXLAN_GBP]) { 3222 if (changelink) 3223 return -EOPNOTSUPP; 3224 conf->flags |= VXLAN_F_GBP; 3225 } 3226 3227 if (data[IFLA_VXLAN_GPE]) { 3228 if (changelink) 3229 return -EOPNOTSUPP; 3230 conf->flags |= VXLAN_F_GPE; 3231 } 3232 3233 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) { 3234 if (changelink) 3235 return -EOPNOTSUPP; 3236 conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3237 } 3238 3239 if (tb[IFLA_MTU]) { 3240 if (changelink) 3241 return -EOPNOTSUPP; 3242 conf->mtu = nla_get_u32(tb[IFLA_MTU]); 3243 } 3244 3245 return 0; 3246} 3247 3248static int vxlan_newlink(struct net *src_net, struct net_device *dev, 3249 struct nlattr *tb[], struct nlattr *data[]) 3250{ 3251 struct vxlan_config conf; 3252 int err; 3253 3254 err = vxlan_nl2conf(tb, data, dev, &conf, false); 3255 if (err) 3256 return err; 3257 3258 return __vxlan_dev_create(src_net, dev, &conf); 3259} 3260 3261static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], 3262 struct nlattr *data[]) 3263{ 3264 struct vxlan_dev *vxlan = netdev_priv(dev); 3265 struct vxlan_rdst *dst = &vxlan->default_dst; 3266 struct vxlan_rdst old_dst; 3267 struct vxlan_config conf; 3268 int err; 3269 3270 err = vxlan_nl2conf(tb, data, 3271 dev, &conf, true); 3272 if (err) 3273 return err; 3274 3275 memcpy(&old_dst, dst, sizeof(struct vxlan_rdst)); 3276 3277 err = vxlan_dev_configure(vxlan->net, dev, &conf, true); 3278 if (err) 3279 return err; 3280 3281 /* handle default dst entry */ 3282 if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) { 3283 spin_lock_bh(&vxlan->hash_lock); 3284 if (!vxlan_addr_any(&old_dst.remote_ip)) 3285 __vxlan_fdb_delete(vxlan, all_zeros_mac, 3286 old_dst.remote_ip, 3287 vxlan->cfg.dst_port, 3288 old_dst.remote_vni, 3289 old_dst.remote_vni, 3290 old_dst.remote_ifindex, 0); 3291 3292 if (!vxlan_addr_any(&dst->remote_ip)) { 3293 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3294 &dst->remote_ip, 3295 NUD_REACHABLE | NUD_PERMANENT, 3296 NLM_F_CREATE | NLM_F_APPEND, 3297 vxlan->cfg.dst_port, 3298 dst->remote_vni, 3299 dst->remote_vni, 3300 dst->remote_ifindex, 3301 NTF_SELF); 3302 if (err) { 3303 spin_unlock_bh(&vxlan->hash_lock); 3304 return err; 3305 } 3306 } 3307 spin_unlock_bh(&vxlan->hash_lock); 3308 } 3309 3310 return 0; 3311} 3312 3313static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3314{ 3315 struct vxlan_dev *vxlan = netdev_priv(dev); 3316 3317 vxlan_flush(vxlan, true); 3318 3319 gro_cells_destroy(&vxlan->gro_cells); 3320 list_del(&vxlan->next); 3321 unregister_netdevice_queue(dev, head); 3322} 3323 3324static size_t vxlan_get_size(const struct net_device *dev) 3325{ 3326 3327 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 3328 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ 3329 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 3330 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 3331 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 3332 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 3333 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 3334 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 3335 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ 3336 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ 3337 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ 3338 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ 3339 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ 3340 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 3341 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 3342 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 3343 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ 3344 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ 3345 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ 3346 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */ 3347 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ 3348 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ 3349 0; 3350} 3351 3352static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 3353{ 3354 const struct vxlan_dev *vxlan = netdev_priv(dev); 3355 const struct vxlan_rdst *dst = &vxlan->default_dst; 3356 struct ifla_vxlan_port_range ports = { 3357 .low = htons(vxlan->cfg.port_min), 3358 .high = htons(vxlan->cfg.port_max), 3359 }; 3360 3361 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) 3362 goto nla_put_failure; 3363 3364 if (!vxlan_addr_any(&dst->remote_ip)) { 3365 if (dst->remote_ip.sa.sa_family == AF_INET) { 3366 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP, 3367 dst->remote_ip.sin.sin_addr.s_addr)) 3368 goto nla_put_failure; 3369#if IS_ENABLED(CONFIG_IPV6) 3370 } else { 3371 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6, 3372 &dst->remote_ip.sin6.sin6_addr)) 3373 goto nla_put_failure; 3374#endif 3375 } 3376 } 3377 3378 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 3379 goto nla_put_failure; 3380 3381 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { 3382 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { 3383 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, 3384 vxlan->cfg.saddr.sin.sin_addr.s_addr)) 3385 goto nla_put_failure; 3386#if IS_ENABLED(CONFIG_IPV6) 3387 } else { 3388 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, 3389 &vxlan->cfg.saddr.sin6.sin6_addr)) 3390 goto nla_put_failure; 3391#endif 3392 } 3393 } 3394 3395 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 3396 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 3397 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 3398 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 3399 !!(vxlan->flags & VXLAN_F_LEARN)) || 3400 nla_put_u8(skb, IFLA_VXLAN_PROXY, 3401 !!(vxlan->flags & VXLAN_F_PROXY)) || 3402 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) || 3403 nla_put_u8(skb, IFLA_VXLAN_L2MISS, 3404 !!(vxlan->flags & VXLAN_F_L2MISS)) || 3405 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 3406 !!(vxlan->flags & VXLAN_F_L3MISS)) || 3407 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, 3408 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) || 3409 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || 3410 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || 3411 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || 3412 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, 3413 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || 3414 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 3415 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || 3416 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 3417 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || 3418 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX, 3419 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) || 3420 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX, 3421 !!(vxlan->flags & VXLAN_F_REMCSUM_RX))) 3422 goto nla_put_failure; 3423 3424 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 3425 goto nla_put_failure; 3426 3427 if (vxlan->flags & VXLAN_F_GBP && 3428 nla_put_flag(skb, IFLA_VXLAN_GBP)) 3429 goto nla_put_failure; 3430 3431 if (vxlan->flags & VXLAN_F_GPE && 3432 nla_put_flag(skb, IFLA_VXLAN_GPE)) 3433 goto nla_put_failure; 3434 3435 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL && 3436 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 3437 goto nla_put_failure; 3438 3439 return 0; 3440 3441nla_put_failure: 3442 return -EMSGSIZE; 3443} 3444 3445static struct net *vxlan_get_link_net(const struct net_device *dev) 3446{ 3447 struct vxlan_dev *vxlan = netdev_priv(dev); 3448 3449 return vxlan->net; 3450} 3451 3452static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 3453 .kind = "vxlan", 3454 .maxtype = IFLA_VXLAN_MAX, 3455 .policy = vxlan_policy, 3456 .priv_size = sizeof(struct vxlan_dev), 3457 .setup = vxlan_setup, 3458 .validate = vxlan_validate, 3459 .newlink = vxlan_newlink, 3460 .changelink = vxlan_changelink, 3461 .dellink = vxlan_dellink, 3462 .get_size = vxlan_get_size, 3463 .fill_info = vxlan_fill_info, 3464 .get_link_net = vxlan_get_link_net, 3465}; 3466 3467struct net_device *vxlan_dev_create(struct net *net, const char *name, 3468 u8 name_assign_type, 3469 struct vxlan_config *conf) 3470{ 3471 struct nlattr *tb[IFLA_MAX + 1]; 3472 struct net_device *dev; 3473 int err; 3474 3475 memset(&tb, 0, sizeof(tb)); 3476 3477 dev = rtnl_create_link(net, name, name_assign_type, 3478 &vxlan_link_ops, tb); 3479 if (IS_ERR(dev)) 3480 return dev; 3481 3482 err = __vxlan_dev_create(net, dev, conf); 3483 if (err < 0) { 3484 free_netdev(dev); 3485 return ERR_PTR(err); 3486 } 3487 3488 err = rtnl_configure_link(dev, NULL); 3489 if (err < 0) { 3490 LIST_HEAD(list_kill); 3491 3492 vxlan_dellink(dev, &list_kill); 3493 unregister_netdevice_many(&list_kill); 3494 return ERR_PTR(err); 3495 } 3496 3497 return dev; 3498} 3499EXPORT_SYMBOL_GPL(vxlan_dev_create); 3500 3501static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 3502 struct net_device *dev) 3503{ 3504 struct vxlan_dev *vxlan, *next; 3505 LIST_HEAD(list_kill); 3506 3507 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3508 struct vxlan_rdst *dst = &vxlan->default_dst; 3509 3510 /* In case we created vxlan device with carrier 3511 * and we loose the carrier due to module unload 3512 * we also need to remove vxlan device. In other 3513 * cases, it's not necessary and remote_ifindex 3514 * is 0 here, so no matches. 3515 */ 3516 if (dst->remote_ifindex == dev->ifindex) 3517 vxlan_dellink(vxlan->dev, &list_kill); 3518 } 3519 3520 unregister_netdevice_many(&list_kill); 3521} 3522 3523static int vxlan_netdevice_event(struct notifier_block *unused, 3524 unsigned long event, void *ptr) 3525{ 3526 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3527 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 3528 3529 if (event == NETDEV_UNREGISTER) 3530 vxlan_handle_lowerdev_unregister(vn, dev); 3531 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) 3532 vxlan_push_rx_ports(dev); 3533 3534 return NOTIFY_DONE; 3535} 3536 3537static struct notifier_block vxlan_notifier_block __read_mostly = { 3538 .notifier_call = vxlan_netdevice_event, 3539}; 3540 3541static __net_init int vxlan_init_net(struct net *net) 3542{ 3543 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3544 unsigned int h; 3545 3546 INIT_LIST_HEAD(&vn->vxlan_list); 3547 spin_lock_init(&vn->sock_lock); 3548 3549 for (h = 0; h < PORT_HASH_SIZE; ++h) 3550 INIT_HLIST_HEAD(&vn->sock_list[h]); 3551 3552 return 0; 3553} 3554 3555static void __net_exit vxlan_exit_net(struct net *net) 3556{ 3557 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3558 struct vxlan_dev *vxlan, *next; 3559 struct net_device *dev, *aux; 3560 LIST_HEAD(list); 3561 3562 rtnl_lock(); 3563 for_each_netdev_safe(net, dev, aux) 3564 if (dev->rtnl_link_ops == &vxlan_link_ops) 3565 unregister_netdevice_queue(dev, &list); 3566 3567 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3568 /* If vxlan->dev is in the same netns, it has already been added 3569 * to the list by the previous loop. 3570 */ 3571 if (!net_eq(dev_net(vxlan->dev), net)) { 3572 gro_cells_destroy(&vxlan->gro_cells); 3573 unregister_netdevice_queue(vxlan->dev, &list); 3574 } 3575 } 3576 3577 unregister_netdevice_many(&list); 3578 rtnl_unlock(); 3579} 3580 3581static struct pernet_operations vxlan_net_ops = { 3582 .init = vxlan_init_net, 3583 .exit = vxlan_exit_net, 3584 .id = &vxlan_net_id, 3585 .size = sizeof(struct vxlan_net), 3586}; 3587 3588static int __init vxlan_init_module(void) 3589{ 3590 int rc; 3591 3592 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 3593 3594 rc = register_pernet_subsys(&vxlan_net_ops); 3595 if (rc) 3596 goto out1; 3597 3598 rc = register_netdevice_notifier(&vxlan_notifier_block); 3599 if (rc) 3600 goto out2; 3601 3602 rc = rtnl_link_register(&vxlan_link_ops); 3603 if (rc) 3604 goto out3; 3605 3606 return 0; 3607out3: 3608 unregister_netdevice_notifier(&vxlan_notifier_block); 3609out2: 3610 unregister_pernet_subsys(&vxlan_net_ops); 3611out1: 3612 return rc; 3613} 3614late_initcall(vxlan_init_module); 3615 3616static void __exit vxlan_cleanup_module(void) 3617{ 3618 rtnl_link_unregister(&vxlan_link_ops); 3619 unregister_netdevice_notifier(&vxlan_notifier_block); 3620 unregister_pernet_subsys(&vxlan_net_ops); 3621 /* rcu_barrier() is called by netns */ 3622} 3623module_exit(vxlan_cleanup_module); 3624 3625MODULE_LICENSE("GPL"); 3626MODULE_VERSION(VXLAN_VERSION); 3627MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); 3628MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); 3629MODULE_ALIAS_RTNL_LINK("vxlan");