Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.10-rc3 3368 lines 87 kB view raw
1/* 2 * VXLAN: Virtual eXtensible Local Area Network 3 * 4 * Copyright (c) 2012-2013 Vyatta Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/errno.h> 16#include <linux/slab.h> 17#include <linux/udp.h> 18#include <linux/igmp.h> 19#include <linux/if_ether.h> 20#include <linux/ethtool.h> 21#include <net/arp.h> 22#include <net/ndisc.h> 23#include <net/ip.h> 24#include <net/icmp.h> 25#include <net/rtnetlink.h> 26#include <net/inet_ecn.h> 27#include <net/net_namespace.h> 28#include <net/netns/generic.h> 29#include <net/vxlan.h> 30 31#if IS_ENABLED(CONFIG_IPV6) 32#include <net/ip6_tunnel.h> 33#include <net/ip6_checksum.h> 34#endif 35 36#define VXLAN_VERSION "0.1" 37 38#define PORT_HASH_BITS 8 39#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) 40#define FDB_AGE_DEFAULT 300 /* 5 min */ 41#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 42 43/* UDP port for VXLAN traffic. 44 * The IANA assigned port is 4789, but the Linux default is 8472 45 * for compatibility with early adopters. 46 */ 47static unsigned short vxlan_port __read_mostly = 8472; 48module_param_named(udp_port, vxlan_port, ushort, 0444); 49MODULE_PARM_DESC(udp_port, "Destination UDP port"); 50 51static bool log_ecn_error = true; 52module_param(log_ecn_error, bool, 0644); 53MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 54 55static unsigned int vxlan_net_id; 56static struct rtnl_link_ops vxlan_link_ops; 57 58static const u8 all_zeros_mac[ETH_ALEN + 2]; 59 60static int vxlan_sock_add(struct vxlan_dev *vxlan); 61 62/* per-network namespace private data for this module */ 63struct vxlan_net { 64 struct list_head vxlan_list; 65 struct hlist_head sock_list[PORT_HASH_SIZE]; 66 spinlock_t sock_lock; 67}; 68 69/* Forwarding table entry */ 70struct vxlan_fdb { 71 struct hlist_node hlist; /* linked list of entries */ 72 struct rcu_head rcu; 73 unsigned long updated; /* jiffies */ 74 unsigned long used; 75 struct list_head remotes; 76 u8 eth_addr[ETH_ALEN]; 77 u16 state; /* see ndm_state */ 78 u8 flags; /* see ndm_flags */ 79}; 80 81/* salt for hash table */ 82static u32 vxlan_salt __read_mostly; 83 84static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) 85{ 86 return vs->flags & VXLAN_F_COLLECT_METADATA || 87 ip_tunnel_collect_metadata(); 88} 89 90#if IS_ENABLED(CONFIG_IPV6) 91static inline 92bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 93{ 94 if (a->sa.sa_family != b->sa.sa_family) 95 return false; 96 if (a->sa.sa_family == AF_INET6) 97 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); 98 else 99 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 100} 101 102static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 103{ 104 if (ipa->sa.sa_family == AF_INET6) 105 return ipv6_addr_any(&ipa->sin6.sin6_addr); 106 else 107 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 108} 109 110static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 111{ 112 if (ipa->sa.sa_family == AF_INET6) 113 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); 114 else 115 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 116} 117 118static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 119{ 120 if (nla_len(nla) >= sizeof(struct in6_addr)) { 121 ip->sin6.sin6_addr = nla_get_in6_addr(nla); 122 ip->sa.sa_family = AF_INET6; 123 return 0; 124 } else if (nla_len(nla) >= sizeof(__be32)) { 125 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 126 ip->sa.sa_family = AF_INET; 127 return 0; 128 } else { 129 return -EAFNOSUPPORT; 130 } 131} 132 133static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 134 const union vxlan_addr *ip) 135{ 136 if (ip->sa.sa_family == AF_INET6) 137 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr); 138 else 139 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 140} 141 142#else /* !CONFIG_IPV6 */ 143 144static inline 145bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 146{ 147 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 148} 149 150static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 151{ 152 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 153} 154 155static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 156{ 157 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 158} 159 160static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 161{ 162 if (nla_len(nla) >= sizeof(struct in6_addr)) { 163 return -EAFNOSUPPORT; 164 } else if (nla_len(nla) >= sizeof(__be32)) { 165 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 166 ip->sa.sa_family = AF_INET; 167 return 0; 168 } else { 169 return -EAFNOSUPPORT; 170 } 171} 172 173static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 174 const union vxlan_addr *ip) 175{ 176 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 177} 178#endif 179 180/* Virtual Network hash table head */ 181static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) 182{ 183 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; 184} 185 186/* Socket hash table head */ 187static inline struct hlist_head *vs_head(struct net *net, __be16 port) 188{ 189 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 190 191 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; 192} 193 194/* First remote destination for a forwarding entry. 195 * Guaranteed to be non-NULL because remotes are never deleted. 196 */ 197static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) 198{ 199 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); 200} 201 202static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) 203{ 204 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 205} 206 207/* Find VXLAN socket based on network namespace, address family and UDP port 208 * and enabled unshareable flags. 209 */ 210static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, 211 __be16 port, u32 flags) 212{ 213 struct vxlan_sock *vs; 214 215 flags &= VXLAN_F_RCV_FLAGS; 216 217 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 218 if (inet_sk(vs->sock->sk)->inet_sport == port && 219 vxlan_get_sk_family(vs) == family && 220 vs->flags == flags) 221 return vs; 222 } 223 return NULL; 224} 225 226static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) 227{ 228 struct vxlan_dev *vxlan; 229 230 /* For flow based devices, map all packets to VNI 0 */ 231 if (vs->flags & VXLAN_F_COLLECT_METADATA) 232 vni = 0; 233 234 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { 235 if (vxlan->default_dst.remote_vni == vni) 236 return vxlan; 237 } 238 239 return NULL; 240} 241 242/* Look up VNI in a per net namespace table */ 243static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni, 244 sa_family_t family, __be16 port, 245 u32 flags) 246{ 247 struct vxlan_sock *vs; 248 249 vs = vxlan_find_sock(net, family, port, flags); 250 if (!vs) 251 return NULL; 252 253 return vxlan_vs_find_vni(vs, vni); 254} 255 256/* Fill in neighbour message in skbuff. */ 257static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 258 const struct vxlan_fdb *fdb, 259 u32 portid, u32 seq, int type, unsigned int flags, 260 const struct vxlan_rdst *rdst) 261{ 262 unsigned long now = jiffies; 263 struct nda_cacheinfo ci; 264 struct nlmsghdr *nlh; 265 struct ndmsg *ndm; 266 bool send_ip, send_eth; 267 268 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 269 if (nlh == NULL) 270 return -EMSGSIZE; 271 272 ndm = nlmsg_data(nlh); 273 memset(ndm, 0, sizeof(*ndm)); 274 275 send_eth = send_ip = true; 276 277 if (type == RTM_GETNEIGH) { 278 ndm->ndm_family = AF_INET; 279 send_ip = !vxlan_addr_any(&rdst->remote_ip); 280 send_eth = !is_zero_ether_addr(fdb->eth_addr); 281 } else 282 ndm->ndm_family = AF_BRIDGE; 283 ndm->ndm_state = fdb->state; 284 ndm->ndm_ifindex = vxlan->dev->ifindex; 285 ndm->ndm_flags = fdb->flags; 286 ndm->ndm_type = RTN_UNICAST; 287 288 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && 289 nla_put_s32(skb, NDA_LINK_NETNSID, 290 peernet2id(dev_net(vxlan->dev), vxlan->net))) 291 goto nla_put_failure; 292 293 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 294 goto nla_put_failure; 295 296 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) 297 goto nla_put_failure; 298 299 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && 300 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 301 goto nla_put_failure; 302 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 303 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) 304 goto nla_put_failure; 305 if (rdst->remote_ifindex && 306 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 307 goto nla_put_failure; 308 309 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 310 ci.ndm_confirmed = 0; 311 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 312 ci.ndm_refcnt = 0; 313 314 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 315 goto nla_put_failure; 316 317 nlmsg_end(skb, nlh); 318 return 0; 319 320nla_put_failure: 321 nlmsg_cancel(skb, nlh); 322 return -EMSGSIZE; 323} 324 325static inline size_t vxlan_nlmsg_size(void) 326{ 327 return NLMSG_ALIGN(sizeof(struct ndmsg)) 328 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 329 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ 330 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 331 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 332 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 333 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */ 334 + nla_total_size(sizeof(struct nda_cacheinfo)); 335} 336 337static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 338 struct vxlan_rdst *rd, int type) 339{ 340 struct net *net = dev_net(vxlan->dev); 341 struct sk_buff *skb; 342 int err = -ENOBUFS; 343 344 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); 345 if (skb == NULL) 346 goto errout; 347 348 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); 349 if (err < 0) { 350 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 351 WARN_ON(err == -EMSGSIZE); 352 kfree_skb(skb); 353 goto errout; 354 } 355 356 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 357 return; 358errout: 359 if (err < 0) 360 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 361} 362 363static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) 364{ 365 struct vxlan_dev *vxlan = netdev_priv(dev); 366 struct vxlan_fdb f = { 367 .state = NUD_STALE, 368 }; 369 struct vxlan_rdst remote = { 370 .remote_ip = *ipa, /* goes to NDA_DST */ 371 .remote_vni = cpu_to_be32(VXLAN_N_VID), 372 }; 373 374 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 375} 376 377static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 378{ 379 struct vxlan_fdb f = { 380 .state = NUD_STALE, 381 }; 382 struct vxlan_rdst remote = { }; 383 384 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 385 386 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 387} 388 389/* Hash Ethernet address */ 390static u32 eth_hash(const unsigned char *addr) 391{ 392 u64 value = get_unaligned((u64 *)addr); 393 394 /* only want 6 bytes */ 395#ifdef __BIG_ENDIAN 396 value >>= 16; 397#else 398 value <<= 16; 399#endif 400 return hash_64(value, FDB_HASH_BITS); 401} 402 403/* Hash chain to use given mac address */ 404static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, 405 const u8 *mac) 406{ 407 return &vxlan->fdb_head[eth_hash(mac)]; 408} 409 410/* Look up Ethernet address in forwarding table */ 411static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 412 const u8 *mac) 413{ 414 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 415 struct vxlan_fdb *f; 416 417 hlist_for_each_entry_rcu(f, head, hlist) { 418 if (ether_addr_equal(mac, f->eth_addr)) 419 return f; 420 } 421 422 return NULL; 423} 424 425static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 426 const u8 *mac) 427{ 428 struct vxlan_fdb *f; 429 430 f = __vxlan_find_mac(vxlan, mac); 431 if (f) 432 f->used = jiffies; 433 434 return f; 435} 436 437/* caller should hold vxlan->hash_lock */ 438static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 439 union vxlan_addr *ip, __be16 port, 440 __be32 vni, __u32 ifindex) 441{ 442 struct vxlan_rdst *rd; 443 444 list_for_each_entry(rd, &f->remotes, list) { 445 if (vxlan_addr_equal(&rd->remote_ip, ip) && 446 rd->remote_port == port && 447 rd->remote_vni == vni && 448 rd->remote_ifindex == ifindex) 449 return rd; 450 } 451 452 return NULL; 453} 454 455/* Replace destination of unicast mac */ 456static int vxlan_fdb_replace(struct vxlan_fdb *f, 457 union vxlan_addr *ip, __be16 port, __be32 vni, 458 __u32 ifindex) 459{ 460 struct vxlan_rdst *rd; 461 462 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 463 if (rd) 464 return 0; 465 466 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); 467 if (!rd) 468 return 0; 469 470 dst_cache_reset(&rd->dst_cache); 471 rd->remote_ip = *ip; 472 rd->remote_port = port; 473 rd->remote_vni = vni; 474 rd->remote_ifindex = ifindex; 475 return 1; 476} 477 478/* Add/update destinations for multicast */ 479static int vxlan_fdb_append(struct vxlan_fdb *f, 480 union vxlan_addr *ip, __be16 port, __be32 vni, 481 __u32 ifindex, struct vxlan_rdst **rdp) 482{ 483 struct vxlan_rdst *rd; 484 485 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 486 if (rd) 487 return 0; 488 489 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 490 if (rd == NULL) 491 return -ENOBUFS; 492 493 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { 494 kfree(rd); 495 return -ENOBUFS; 496 } 497 498 rd->remote_ip = *ip; 499 rd->remote_port = port; 500 rd->remote_vni = vni; 501 rd->remote_ifindex = ifindex; 502 503 list_add_tail_rcu(&rd->list, &f->remotes); 504 505 *rdp = rd; 506 return 1; 507} 508 509static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 510 unsigned int off, 511 struct vxlanhdr *vh, size_t hdrlen, 512 __be32 vni_field, 513 struct gro_remcsum *grc, 514 bool nopartial) 515{ 516 size_t start, offset; 517 518 if (skb->remcsum_offload) 519 return vh; 520 521 if (!NAPI_GRO_CB(skb)->csum_valid) 522 return NULL; 523 524 start = vxlan_rco_start(vni_field); 525 offset = start + vxlan_rco_offset(vni_field); 526 527 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, 528 start, offset, grc, nopartial); 529 530 skb->remcsum_offload = 1; 531 532 return vh; 533} 534 535static struct sk_buff **vxlan_gro_receive(struct sock *sk, 536 struct sk_buff **head, 537 struct sk_buff *skb) 538{ 539 struct sk_buff *p, **pp = NULL; 540 struct vxlanhdr *vh, *vh2; 541 unsigned int hlen, off_vx; 542 int flush = 1; 543 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); 544 __be32 flags; 545 struct gro_remcsum grc; 546 547 skb_gro_remcsum_init(&grc); 548 549 off_vx = skb_gro_offset(skb); 550 hlen = off_vx + sizeof(*vh); 551 vh = skb_gro_header_fast(skb, off_vx); 552 if (skb_gro_header_hard(skb, hlen)) { 553 vh = skb_gro_header_slow(skb, hlen, off_vx); 554 if (unlikely(!vh)) 555 goto out; 556 } 557 558 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 559 560 flags = vh->vx_flags; 561 562 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 563 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 564 vh->vx_vni, &grc, 565 !!(vs->flags & 566 VXLAN_F_REMCSUM_NOPARTIAL)); 567 568 if (!vh) 569 goto out; 570 } 571 572 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 573 574 for (p = *head; p; p = p->next) { 575 if (!NAPI_GRO_CB(p)->same_flow) 576 continue; 577 578 vh2 = (struct vxlanhdr *)(p->data + off_vx); 579 if (vh->vx_flags != vh2->vx_flags || 580 vh->vx_vni != vh2->vx_vni) { 581 NAPI_GRO_CB(p)->same_flow = 0; 582 continue; 583 } 584 } 585 586 pp = call_gro_receive(eth_gro_receive, head, skb); 587 flush = 0; 588 589out: 590 skb_gro_remcsum_cleanup(skb, &grc); 591 NAPI_GRO_CB(skb)->flush |= flush; 592 593 return pp; 594} 595 596static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 597{ 598 /* Sets 'skb->inner_mac_header' since we are always called with 599 * 'skb->encapsulation' set. 600 */ 601 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 602} 603 604/* Add new entry to forwarding table -- assumes lock held */ 605static int vxlan_fdb_create(struct vxlan_dev *vxlan, 606 const u8 *mac, union vxlan_addr *ip, 607 __u16 state, __u16 flags, 608 __be16 port, __be32 vni, __u32 ifindex, 609 __u8 ndm_flags) 610{ 611 struct vxlan_rdst *rd = NULL; 612 struct vxlan_fdb *f; 613 int notify = 0; 614 int rc; 615 616 f = __vxlan_find_mac(vxlan, mac); 617 if (f) { 618 if (flags & NLM_F_EXCL) { 619 netdev_dbg(vxlan->dev, 620 "lost race to create %pM\n", mac); 621 return -EEXIST; 622 } 623 if (f->state != state) { 624 f->state = state; 625 f->updated = jiffies; 626 notify = 1; 627 } 628 if (f->flags != ndm_flags) { 629 f->flags = ndm_flags; 630 f->updated = jiffies; 631 notify = 1; 632 } 633 if ((flags & NLM_F_REPLACE)) { 634 /* Only change unicasts */ 635 if (!(is_multicast_ether_addr(f->eth_addr) || 636 is_zero_ether_addr(f->eth_addr))) { 637 notify |= vxlan_fdb_replace(f, ip, port, vni, 638 ifindex); 639 } else 640 return -EOPNOTSUPP; 641 } 642 if ((flags & NLM_F_APPEND) && 643 (is_multicast_ether_addr(f->eth_addr) || 644 is_zero_ether_addr(f->eth_addr))) { 645 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 646 647 if (rc < 0) 648 return rc; 649 notify |= rc; 650 } 651 } else { 652 if (!(flags & NLM_F_CREATE)) 653 return -ENOENT; 654 655 if (vxlan->cfg.addrmax && 656 vxlan->addrcnt >= vxlan->cfg.addrmax) 657 return -ENOSPC; 658 659 /* Disallow replace to add a multicast entry */ 660 if ((flags & NLM_F_REPLACE) && 661 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 662 return -EOPNOTSUPP; 663 664 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 665 f = kmalloc(sizeof(*f), GFP_ATOMIC); 666 if (!f) 667 return -ENOMEM; 668 669 notify = 1; 670 f->state = state; 671 f->flags = ndm_flags; 672 f->updated = f->used = jiffies; 673 INIT_LIST_HEAD(&f->remotes); 674 memcpy(f->eth_addr, mac, ETH_ALEN); 675 676 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 677 if (rc < 0) { 678 kfree(f); 679 return rc; 680 } 681 682 ++vxlan->addrcnt; 683 hlist_add_head_rcu(&f->hlist, 684 vxlan_fdb_head(vxlan, mac)); 685 } 686 687 if (notify) { 688 if (rd == NULL) 689 rd = first_remote_rtnl(f); 690 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); 691 } 692 693 return 0; 694} 695 696static void vxlan_fdb_free(struct rcu_head *head) 697{ 698 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 699 struct vxlan_rdst *rd, *nd; 700 701 list_for_each_entry_safe(rd, nd, &f->remotes, list) { 702 dst_cache_destroy(&rd->dst_cache); 703 kfree(rd); 704 } 705 kfree(f); 706} 707 708static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 709{ 710 netdev_dbg(vxlan->dev, 711 "delete %pM\n", f->eth_addr); 712 713 --vxlan->addrcnt; 714 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 715 716 hlist_del_rcu(&f->hlist); 717 call_rcu(&f->rcu, vxlan_fdb_free); 718} 719 720static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 721 union vxlan_addr *ip, __be16 *port, __be32 *vni, 722 u32 *ifindex) 723{ 724 struct net *net = dev_net(vxlan->dev); 725 int err; 726 727 if (tb[NDA_DST]) { 728 err = vxlan_nla_get_addr(ip, tb[NDA_DST]); 729 if (err) 730 return err; 731 } else { 732 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; 733 if (remote->sa.sa_family == AF_INET) { 734 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); 735 ip->sa.sa_family = AF_INET; 736#if IS_ENABLED(CONFIG_IPV6) 737 } else { 738 ip->sin6.sin6_addr = in6addr_any; 739 ip->sa.sa_family = AF_INET6; 740#endif 741 } 742 } 743 744 if (tb[NDA_PORT]) { 745 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) 746 return -EINVAL; 747 *port = nla_get_be16(tb[NDA_PORT]); 748 } else { 749 *port = vxlan->cfg.dst_port; 750 } 751 752 if (tb[NDA_VNI]) { 753 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) 754 return -EINVAL; 755 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 756 } else { 757 *vni = vxlan->default_dst.remote_vni; 758 } 759 760 if (tb[NDA_IFINDEX]) { 761 struct net_device *tdev; 762 763 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 764 return -EINVAL; 765 *ifindex = nla_get_u32(tb[NDA_IFINDEX]); 766 tdev = __dev_get_by_index(net, *ifindex); 767 if (!tdev) 768 return -EADDRNOTAVAIL; 769 } else { 770 *ifindex = 0; 771 } 772 773 return 0; 774} 775 776/* Add static entry (via netlink) */ 777static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 778 struct net_device *dev, 779 const unsigned char *addr, u16 vid, u16 flags) 780{ 781 struct vxlan_dev *vxlan = netdev_priv(dev); 782 /* struct net *net = dev_net(vxlan->dev); */ 783 union vxlan_addr ip; 784 __be16 port; 785 __be32 vni; 786 u32 ifindex; 787 int err; 788 789 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 790 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 791 ndm->ndm_state); 792 return -EINVAL; 793 } 794 795 if (tb[NDA_DST] == NULL) 796 return -EINVAL; 797 798 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 799 if (err) 800 return err; 801 802 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) 803 return -EAFNOSUPPORT; 804 805 spin_lock_bh(&vxlan->hash_lock); 806 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 807 port, vni, ifindex, ndm->ndm_flags); 808 spin_unlock_bh(&vxlan->hash_lock); 809 810 return err; 811} 812 813/* Delete entry (via netlink) */ 814static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 815 struct net_device *dev, 816 const unsigned char *addr, u16 vid) 817{ 818 struct vxlan_dev *vxlan = netdev_priv(dev); 819 struct vxlan_fdb *f; 820 struct vxlan_rdst *rd = NULL; 821 union vxlan_addr ip; 822 __be16 port; 823 __be32 vni; 824 u32 ifindex; 825 int err; 826 827 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 828 if (err) 829 return err; 830 831 err = -ENOENT; 832 833 spin_lock_bh(&vxlan->hash_lock); 834 f = vxlan_find_mac(vxlan, addr); 835 if (!f) 836 goto out; 837 838 if (!vxlan_addr_any(&ip)) { 839 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); 840 if (!rd) 841 goto out; 842 } 843 844 err = 0; 845 846 /* remove a destination if it's not the only one on the list, 847 * otherwise destroy the fdb entry 848 */ 849 if (rd && !list_is_singular(&f->remotes)) { 850 list_del_rcu(&rd->list); 851 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 852 kfree_rcu(rd, rcu); 853 goto out; 854 } 855 856 vxlan_fdb_destroy(vxlan, f); 857 858out: 859 spin_unlock_bh(&vxlan->hash_lock); 860 861 return err; 862} 863 864/* Dump forwarding table */ 865static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 866 struct net_device *dev, 867 struct net_device *filter_dev, int *idx) 868{ 869 struct vxlan_dev *vxlan = netdev_priv(dev); 870 unsigned int h; 871 int err = 0; 872 873 for (h = 0; h < FDB_HASH_SIZE; ++h) { 874 struct vxlan_fdb *f; 875 876 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 877 struct vxlan_rdst *rd; 878 879 list_for_each_entry_rcu(rd, &f->remotes, list) { 880 if (*idx < cb->args[2]) 881 goto skip; 882 883 err = vxlan_fdb_info(skb, vxlan, f, 884 NETLINK_CB(cb->skb).portid, 885 cb->nlh->nlmsg_seq, 886 RTM_NEWNEIGH, 887 NLM_F_MULTI, rd); 888 if (err < 0) 889 goto out; 890skip: 891 *idx += 1; 892 } 893 } 894 } 895out: 896 return err; 897} 898 899/* Watch incoming packets to learn mapping between Ethernet address 900 * and Tunnel endpoint. 901 * Return true if packet is bogus and should be dropped. 902 */ 903static bool vxlan_snoop(struct net_device *dev, 904 union vxlan_addr *src_ip, const u8 *src_mac) 905{ 906 struct vxlan_dev *vxlan = netdev_priv(dev); 907 struct vxlan_fdb *f; 908 909 f = vxlan_find_mac(vxlan, src_mac); 910 if (likely(f)) { 911 struct vxlan_rdst *rdst = first_remote_rcu(f); 912 913 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip))) 914 return false; 915 916 /* Don't migrate static entries, drop packets */ 917 if (f->state & NUD_NOARP) 918 return true; 919 920 if (net_ratelimit()) 921 netdev_info(dev, 922 "%pM migrated from %pIS to %pIS\n", 923 src_mac, &rdst->remote_ip.sa, &src_ip->sa); 924 925 rdst->remote_ip = *src_ip; 926 f->updated = jiffies; 927 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); 928 } else { 929 /* learned new entry */ 930 spin_lock(&vxlan->hash_lock); 931 932 /* close off race between vxlan_flush and incoming packets */ 933 if (netif_running(dev)) 934 vxlan_fdb_create(vxlan, src_mac, src_ip, 935 NUD_REACHABLE, 936 NLM_F_EXCL|NLM_F_CREATE, 937 vxlan->cfg.dst_port, 938 vxlan->default_dst.remote_vni, 939 0, NTF_SELF); 940 spin_unlock(&vxlan->hash_lock); 941 } 942 943 return false; 944} 945 946/* See if multicast group is already in use by other ID */ 947static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) 948{ 949 struct vxlan_dev *vxlan; 950 struct vxlan_sock *sock4; 951#if IS_ENABLED(CONFIG_IPV6) 952 struct vxlan_sock *sock6; 953#endif 954 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 955 956 sock4 = rtnl_dereference(dev->vn4_sock); 957 958 /* The vxlan_sock is only used by dev, leaving group has 959 * no effect on other vxlan devices. 960 */ 961 if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1) 962 return false; 963#if IS_ENABLED(CONFIG_IPV6) 964 sock6 = rtnl_dereference(dev->vn6_sock); 965 if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1) 966 return false; 967#endif 968 969 list_for_each_entry(vxlan, &vn->vxlan_list, next) { 970 if (!netif_running(vxlan->dev) || vxlan == dev) 971 continue; 972 973 if (family == AF_INET && 974 rtnl_dereference(vxlan->vn4_sock) != sock4) 975 continue; 976#if IS_ENABLED(CONFIG_IPV6) 977 if (family == AF_INET6 && 978 rtnl_dereference(vxlan->vn6_sock) != sock6) 979 continue; 980#endif 981 982 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, 983 &dev->default_dst.remote_ip)) 984 continue; 985 986 if (vxlan->default_dst.remote_ifindex != 987 dev->default_dst.remote_ifindex) 988 continue; 989 990 return true; 991 } 992 993 return false; 994} 995 996static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) 997{ 998 struct vxlan_net *vn; 999 1000 if (!vs) 1001 return false; 1002 if (!atomic_dec_and_test(&vs->refcnt)) 1003 return false; 1004 1005 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); 1006 spin_lock(&vn->sock_lock); 1007 hlist_del_rcu(&vs->hlist); 1008 udp_tunnel_notify_del_rx_port(vs->sock, 1009 (vs->flags & VXLAN_F_GPE) ? 1010 UDP_TUNNEL_TYPE_VXLAN_GPE : 1011 UDP_TUNNEL_TYPE_VXLAN); 1012 spin_unlock(&vn->sock_lock); 1013 1014 return true; 1015} 1016 1017static void vxlan_sock_release(struct vxlan_dev *vxlan) 1018{ 1019 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1020#if IS_ENABLED(CONFIG_IPV6) 1021 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1022 1023 rcu_assign_pointer(vxlan->vn6_sock, NULL); 1024#endif 1025 1026 rcu_assign_pointer(vxlan->vn4_sock, NULL); 1027 synchronize_net(); 1028 1029 if (__vxlan_sock_release_prep(sock4)) { 1030 udp_tunnel_sock_release(sock4->sock); 1031 kfree(sock4); 1032 } 1033 1034#if IS_ENABLED(CONFIG_IPV6) 1035 if (__vxlan_sock_release_prep(sock6)) { 1036 udp_tunnel_sock_release(sock6->sock); 1037 kfree(sock6); 1038 } 1039#endif 1040} 1041 1042/* Update multicast group membership when first VNI on 1043 * multicast address is brought up 1044 */ 1045static int vxlan_igmp_join(struct vxlan_dev *vxlan) 1046{ 1047 struct sock *sk; 1048 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1049 int ifindex = vxlan->default_dst.remote_ifindex; 1050 int ret = -EINVAL; 1051 1052 if (ip->sa.sa_family == AF_INET) { 1053 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1054 struct ip_mreqn mreq = { 1055 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1056 .imr_ifindex = ifindex, 1057 }; 1058 1059 sk = sock4->sock->sk; 1060 lock_sock(sk); 1061 ret = ip_mc_join_group(sk, &mreq); 1062 release_sock(sk); 1063#if IS_ENABLED(CONFIG_IPV6) 1064 } else { 1065 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1066 1067 sk = sock6->sock->sk; 1068 lock_sock(sk); 1069 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, 1070 &ip->sin6.sin6_addr); 1071 release_sock(sk); 1072#endif 1073 } 1074 1075 return ret; 1076} 1077 1078/* Inverse of vxlan_igmp_join when last VNI is brought down */ 1079static int vxlan_igmp_leave(struct vxlan_dev *vxlan) 1080{ 1081 struct sock *sk; 1082 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1083 int ifindex = vxlan->default_dst.remote_ifindex; 1084 int ret = -EINVAL; 1085 1086 if (ip->sa.sa_family == AF_INET) { 1087 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1088 struct ip_mreqn mreq = { 1089 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1090 .imr_ifindex = ifindex, 1091 }; 1092 1093 sk = sock4->sock->sk; 1094 lock_sock(sk); 1095 ret = ip_mc_leave_group(sk, &mreq); 1096 release_sock(sk); 1097#if IS_ENABLED(CONFIG_IPV6) 1098 } else { 1099 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1100 1101 sk = sock6->sock->sk; 1102 lock_sock(sk); 1103 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, 1104 &ip->sin6.sin6_addr); 1105 release_sock(sk); 1106#endif 1107 } 1108 1109 return ret; 1110} 1111 1112static bool vxlan_remcsum(struct vxlanhdr *unparsed, 1113 struct sk_buff *skb, u32 vxflags) 1114{ 1115 size_t start, offset; 1116 1117 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) 1118 goto out; 1119 1120 start = vxlan_rco_start(unparsed->vx_vni); 1121 offset = start + vxlan_rco_offset(unparsed->vx_vni); 1122 1123 if (!pskb_may_pull(skb, offset + sizeof(u16))) 1124 return false; 1125 1126 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, 1127 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); 1128out: 1129 unparsed->vx_flags &= ~VXLAN_HF_RCO; 1130 unparsed->vx_vni &= VXLAN_VNI_MASK; 1131 return true; 1132} 1133 1134static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, 1135 struct sk_buff *skb, u32 vxflags, 1136 struct vxlan_metadata *md) 1137{ 1138 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; 1139 struct metadata_dst *tun_dst; 1140 1141 if (!(unparsed->vx_flags & VXLAN_HF_GBP)) 1142 goto out; 1143 1144 md->gbp = ntohs(gbp->policy_id); 1145 1146 tun_dst = (struct metadata_dst *)skb_dst(skb); 1147 if (tun_dst) { 1148 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1149 tun_dst->u.tun_info.options_len = sizeof(*md); 1150 } 1151 if (gbp->dont_learn) 1152 md->gbp |= VXLAN_GBP_DONT_LEARN; 1153 1154 if (gbp->policy_applied) 1155 md->gbp |= VXLAN_GBP_POLICY_APPLIED; 1156 1157 /* In flow-based mode, GBP is carried in dst_metadata */ 1158 if (!(vxflags & VXLAN_F_COLLECT_METADATA)) 1159 skb->mark = md->gbp; 1160out: 1161 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; 1162} 1163 1164static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, 1165 __be16 *protocol, 1166 struct sk_buff *skb, u32 vxflags) 1167{ 1168 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; 1169 1170 /* Need to have Next Protocol set for interfaces in GPE mode. */ 1171 if (!gpe->np_applied) 1172 return false; 1173 /* "The initial version is 0. If a receiver does not support the 1174 * version indicated it MUST drop the packet. 1175 */ 1176 if (gpe->version != 0) 1177 return false; 1178 /* "When the O bit is set to 1, the packet is an OAM packet and OAM 1179 * processing MUST occur." However, we don't implement OAM 1180 * processing, thus drop the packet. 1181 */ 1182 if (gpe->oam_flag) 1183 return false; 1184 1185 switch (gpe->next_protocol) { 1186 case VXLAN_GPE_NP_IPV4: 1187 *protocol = htons(ETH_P_IP); 1188 break; 1189 case VXLAN_GPE_NP_IPV6: 1190 *protocol = htons(ETH_P_IPV6); 1191 break; 1192 case VXLAN_GPE_NP_ETHERNET: 1193 *protocol = htons(ETH_P_TEB); 1194 break; 1195 default: 1196 return false; 1197 } 1198 1199 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; 1200 return true; 1201} 1202 1203static bool vxlan_set_mac(struct vxlan_dev *vxlan, 1204 struct vxlan_sock *vs, 1205 struct sk_buff *skb) 1206{ 1207 union vxlan_addr saddr; 1208 1209 skb_reset_mac_header(skb); 1210 skb->protocol = eth_type_trans(skb, vxlan->dev); 1211 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1212 1213 /* Ignore packet loops (and multicast echo) */ 1214 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1215 return false; 1216 1217 /* Get address from the outer IP header */ 1218 if (vxlan_get_sk_family(vs) == AF_INET) { 1219 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; 1220 saddr.sa.sa_family = AF_INET; 1221#if IS_ENABLED(CONFIG_IPV6) 1222 } else { 1223 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr; 1224 saddr.sa.sa_family = AF_INET6; 1225#endif 1226 } 1227 1228 if ((vxlan->flags & VXLAN_F_LEARN) && 1229 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) 1230 return false; 1231 1232 return true; 1233} 1234 1235static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, 1236 struct sk_buff *skb) 1237{ 1238 int err = 0; 1239 1240 if (vxlan_get_sk_family(vs) == AF_INET) 1241 err = IP_ECN_decapsulate(oiph, skb); 1242#if IS_ENABLED(CONFIG_IPV6) 1243 else 1244 err = IP6_ECN_decapsulate(oiph, skb); 1245#endif 1246 1247 if (unlikely(err) && log_ecn_error) { 1248 if (vxlan_get_sk_family(vs) == AF_INET) 1249 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1250 &((struct iphdr *)oiph)->saddr, 1251 ((struct iphdr *)oiph)->tos); 1252 else 1253 net_info_ratelimited("non-ECT from %pI6\n", 1254 &((struct ipv6hdr *)oiph)->saddr); 1255 } 1256 return err <= 1; 1257} 1258 1259/* Callback from net/ipv4/udp.c to receive packets */ 1260static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) 1261{ 1262 struct pcpu_sw_netstats *stats; 1263 struct vxlan_dev *vxlan; 1264 struct vxlan_sock *vs; 1265 struct vxlanhdr unparsed; 1266 struct vxlan_metadata _md; 1267 struct vxlan_metadata *md = &_md; 1268 __be16 protocol = htons(ETH_P_TEB); 1269 bool raw_proto = false; 1270 void *oiph; 1271 1272 /* Need UDP and VXLAN header to be present */ 1273 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1274 goto drop; 1275 1276 unparsed = *vxlan_hdr(skb); 1277 /* VNI flag always required to be set */ 1278 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { 1279 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1280 ntohl(vxlan_hdr(skb)->vx_flags), 1281 ntohl(vxlan_hdr(skb)->vx_vni)); 1282 /* Return non vxlan pkt */ 1283 goto drop; 1284 } 1285 unparsed.vx_flags &= ~VXLAN_HF_VNI; 1286 unparsed.vx_vni &= ~VXLAN_VNI_MASK; 1287 1288 vs = rcu_dereference_sk_user_data(sk); 1289 if (!vs) 1290 goto drop; 1291 1292 vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni)); 1293 if (!vxlan) 1294 goto drop; 1295 1296 /* For backwards compatibility, only allow reserved fields to be 1297 * used by VXLAN extensions if explicitly requested. 1298 */ 1299 if (vs->flags & VXLAN_F_GPE) { 1300 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) 1301 goto drop; 1302 raw_proto = true; 1303 } 1304 1305 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, 1306 !net_eq(vxlan->net, dev_net(vxlan->dev)))) 1307 goto drop; 1308 1309 if (vxlan_collect_metadata(vs)) { 1310 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); 1311 struct metadata_dst *tun_dst; 1312 1313 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, 1314 key32_to_tunnel_id(vni), sizeof(*md)); 1315 1316 if (!tun_dst) 1317 goto drop; 1318 1319 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 1320 1321 skb_dst_set(skb, (struct dst_entry *)tun_dst); 1322 } else { 1323 memset(md, 0, sizeof(*md)); 1324 } 1325 1326 if (vs->flags & VXLAN_F_REMCSUM_RX) 1327 if (!vxlan_remcsum(&unparsed, skb, vs->flags)) 1328 goto drop; 1329 if (vs->flags & VXLAN_F_GBP) 1330 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); 1331 /* Note that GBP and GPE can never be active together. This is 1332 * ensured in vxlan_dev_configure. 1333 */ 1334 1335 if (unparsed.vx_flags || unparsed.vx_vni) { 1336 /* If there are any unprocessed flags remaining treat 1337 * this as a malformed packet. This behavior diverges from 1338 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1339 * in reserved fields are to be ignored. The approach here 1340 * maintains compatibility with previous stack code, and also 1341 * is more robust and provides a little more security in 1342 * adding extensions to VXLAN. 1343 */ 1344 goto drop; 1345 } 1346 1347 if (!raw_proto) { 1348 if (!vxlan_set_mac(vxlan, vs, skb)) 1349 goto drop; 1350 } else { 1351 skb_reset_mac_header(skb); 1352 skb->dev = vxlan->dev; 1353 skb->pkt_type = PACKET_HOST; 1354 } 1355 1356 oiph = skb_network_header(skb); 1357 skb_reset_network_header(skb); 1358 1359 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { 1360 ++vxlan->dev->stats.rx_frame_errors; 1361 ++vxlan->dev->stats.rx_errors; 1362 goto drop; 1363 } 1364 1365 stats = this_cpu_ptr(vxlan->dev->tstats); 1366 u64_stats_update_begin(&stats->syncp); 1367 stats->rx_packets++; 1368 stats->rx_bytes += skb->len; 1369 u64_stats_update_end(&stats->syncp); 1370 1371 gro_cells_receive(&vxlan->gro_cells, skb); 1372 return 0; 1373 1374drop: 1375 /* Consume bad packet */ 1376 kfree_skb(skb); 1377 return 0; 1378} 1379 1380static int arp_reduce(struct net_device *dev, struct sk_buff *skb) 1381{ 1382 struct vxlan_dev *vxlan = netdev_priv(dev); 1383 struct arphdr *parp; 1384 u8 *arpptr, *sha; 1385 __be32 sip, tip; 1386 struct neighbour *n; 1387 1388 if (dev->flags & IFF_NOARP) 1389 goto out; 1390 1391 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { 1392 dev->stats.tx_dropped++; 1393 goto out; 1394 } 1395 parp = arp_hdr(skb); 1396 1397 if ((parp->ar_hrd != htons(ARPHRD_ETHER) && 1398 parp->ar_hrd != htons(ARPHRD_IEEE802)) || 1399 parp->ar_pro != htons(ETH_P_IP) || 1400 parp->ar_op != htons(ARPOP_REQUEST) || 1401 parp->ar_hln != dev->addr_len || 1402 parp->ar_pln != 4) 1403 goto out; 1404 arpptr = (u8 *)parp + sizeof(struct arphdr); 1405 sha = arpptr; 1406 arpptr += dev->addr_len; /* sha */ 1407 memcpy(&sip, arpptr, sizeof(sip)); 1408 arpptr += sizeof(sip); 1409 arpptr += dev->addr_len; /* tha */ 1410 memcpy(&tip, arpptr, sizeof(tip)); 1411 1412 if (ipv4_is_loopback(tip) || 1413 ipv4_is_multicast(tip)) 1414 goto out; 1415 1416 n = neigh_lookup(&arp_tbl, &tip, dev); 1417 1418 if (n) { 1419 struct vxlan_fdb *f; 1420 struct sk_buff *reply; 1421 1422 if (!(n->nud_state & NUD_CONNECTED)) { 1423 neigh_release(n); 1424 goto out; 1425 } 1426 1427 f = vxlan_find_mac(vxlan, n->ha); 1428 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1429 /* bridge-local neighbor */ 1430 neigh_release(n); 1431 goto out; 1432 } 1433 1434 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1435 n->ha, sha); 1436 1437 neigh_release(n); 1438 1439 if (reply == NULL) 1440 goto out; 1441 1442 skb_reset_mac_header(reply); 1443 __skb_pull(reply, skb_network_offset(reply)); 1444 reply->ip_summed = CHECKSUM_UNNECESSARY; 1445 reply->pkt_type = PACKET_HOST; 1446 1447 if (netif_rx_ni(reply) == NET_RX_DROP) 1448 dev->stats.rx_dropped++; 1449 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1450 union vxlan_addr ipa = { 1451 .sin.sin_addr.s_addr = tip, 1452 .sin.sin_family = AF_INET, 1453 }; 1454 1455 vxlan_ip_miss(dev, &ipa); 1456 } 1457out: 1458 consume_skb(skb); 1459 return NETDEV_TX_OK; 1460} 1461 1462#if IS_ENABLED(CONFIG_IPV6) 1463static struct sk_buff *vxlan_na_create(struct sk_buff *request, 1464 struct neighbour *n, bool isrouter) 1465{ 1466 struct net_device *dev = request->dev; 1467 struct sk_buff *reply; 1468 struct nd_msg *ns, *na; 1469 struct ipv6hdr *pip6; 1470 u8 *daddr; 1471 int na_olen = 8; /* opt hdr + ETH_ALEN for target */ 1472 int ns_olen; 1473 int i, len; 1474 1475 if (dev == NULL) 1476 return NULL; 1477 1478 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + 1479 sizeof(*na) + na_olen + dev->needed_tailroom; 1480 reply = alloc_skb(len, GFP_ATOMIC); 1481 if (reply == NULL) 1482 return NULL; 1483 1484 reply->protocol = htons(ETH_P_IPV6); 1485 reply->dev = dev; 1486 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); 1487 skb_push(reply, sizeof(struct ethhdr)); 1488 skb_reset_mac_header(reply); 1489 1490 ns = (struct nd_msg *)skb_transport_header(request); 1491 1492 daddr = eth_hdr(request)->h_source; 1493 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); 1494 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { 1495 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { 1496 daddr = ns->opt + i + sizeof(struct nd_opt_hdr); 1497 break; 1498 } 1499 } 1500 1501 /* Ethernet header */ 1502 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); 1503 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); 1504 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); 1505 reply->protocol = htons(ETH_P_IPV6); 1506 1507 skb_pull(reply, sizeof(struct ethhdr)); 1508 skb_reset_network_header(reply); 1509 skb_put(reply, sizeof(struct ipv6hdr)); 1510 1511 /* IPv6 header */ 1512 1513 pip6 = ipv6_hdr(reply); 1514 memset(pip6, 0, sizeof(struct ipv6hdr)); 1515 pip6->version = 6; 1516 pip6->priority = ipv6_hdr(request)->priority; 1517 pip6->nexthdr = IPPROTO_ICMPV6; 1518 pip6->hop_limit = 255; 1519 pip6->daddr = ipv6_hdr(request)->saddr; 1520 pip6->saddr = *(struct in6_addr *)n->primary_key; 1521 1522 skb_pull(reply, sizeof(struct ipv6hdr)); 1523 skb_reset_transport_header(reply); 1524 1525 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); 1526 1527 /* Neighbor Advertisement */ 1528 memset(na, 0, sizeof(*na)+na_olen); 1529 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 1530 na->icmph.icmp6_router = isrouter; 1531 na->icmph.icmp6_override = 1; 1532 na->icmph.icmp6_solicited = 1; 1533 na->target = ns->target; 1534 ether_addr_copy(&na->opt[2], n->ha); 1535 na->opt[0] = ND_OPT_TARGET_LL_ADDR; 1536 na->opt[1] = na_olen >> 3; 1537 1538 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, 1539 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, 1540 csum_partial(na, sizeof(*na)+na_olen, 0)); 1541 1542 pip6->payload_len = htons(sizeof(*na)+na_olen); 1543 1544 skb_push(reply, sizeof(struct ipv6hdr)); 1545 1546 reply->ip_summed = CHECKSUM_UNNECESSARY; 1547 1548 return reply; 1549} 1550 1551static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) 1552{ 1553 struct vxlan_dev *vxlan = netdev_priv(dev); 1554 struct nd_msg *msg; 1555 const struct ipv6hdr *iphdr; 1556 const struct in6_addr *saddr, *daddr; 1557 struct neighbour *n; 1558 struct inet6_dev *in6_dev; 1559 1560 in6_dev = __in6_dev_get(dev); 1561 if (!in6_dev) 1562 goto out; 1563 1564 iphdr = ipv6_hdr(skb); 1565 saddr = &iphdr->saddr; 1566 daddr = &iphdr->daddr; 1567 1568 msg = (struct nd_msg *)skb_transport_header(skb); 1569 if (msg->icmph.icmp6_code != 0 || 1570 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) 1571 goto out; 1572 1573 if (ipv6_addr_loopback(daddr) || 1574 ipv6_addr_is_multicast(&msg->target)) 1575 goto out; 1576 1577 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); 1578 1579 if (n) { 1580 struct vxlan_fdb *f; 1581 struct sk_buff *reply; 1582 1583 if (!(n->nud_state & NUD_CONNECTED)) { 1584 neigh_release(n); 1585 goto out; 1586 } 1587 1588 f = vxlan_find_mac(vxlan, n->ha); 1589 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1590 /* bridge-local neighbor */ 1591 neigh_release(n); 1592 goto out; 1593 } 1594 1595 reply = vxlan_na_create(skb, n, 1596 !!(f ? f->flags & NTF_ROUTER : 0)); 1597 1598 neigh_release(n); 1599 1600 if (reply == NULL) 1601 goto out; 1602 1603 if (netif_rx_ni(reply) == NET_RX_DROP) 1604 dev->stats.rx_dropped++; 1605 1606 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1607 union vxlan_addr ipa = { 1608 .sin6.sin6_addr = msg->target, 1609 .sin6.sin6_family = AF_INET6, 1610 }; 1611 1612 vxlan_ip_miss(dev, &ipa); 1613 } 1614 1615out: 1616 consume_skb(skb); 1617 return NETDEV_TX_OK; 1618} 1619#endif 1620 1621static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 1622{ 1623 struct vxlan_dev *vxlan = netdev_priv(dev); 1624 struct neighbour *n; 1625 1626 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1627 return false; 1628 1629 n = NULL; 1630 switch (ntohs(eth_hdr(skb)->h_proto)) { 1631 case ETH_P_IP: 1632 { 1633 struct iphdr *pip; 1634 1635 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1636 return false; 1637 pip = ip_hdr(skb); 1638 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 1639 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1640 union vxlan_addr ipa = { 1641 .sin.sin_addr.s_addr = pip->daddr, 1642 .sin.sin_family = AF_INET, 1643 }; 1644 1645 vxlan_ip_miss(dev, &ipa); 1646 return false; 1647 } 1648 1649 break; 1650 } 1651#if IS_ENABLED(CONFIG_IPV6) 1652 case ETH_P_IPV6: 1653 { 1654 struct ipv6hdr *pip6; 1655 1656 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 1657 return false; 1658 pip6 = ipv6_hdr(skb); 1659 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); 1660 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1661 union vxlan_addr ipa = { 1662 .sin6.sin6_addr = pip6->daddr, 1663 .sin6.sin6_family = AF_INET6, 1664 }; 1665 1666 vxlan_ip_miss(dev, &ipa); 1667 return false; 1668 } 1669 1670 break; 1671 } 1672#endif 1673 default: 1674 return false; 1675 } 1676 1677 if (n) { 1678 bool diff; 1679 1680 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); 1681 if (diff) { 1682 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 1683 dev->addr_len); 1684 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); 1685 } 1686 neigh_release(n); 1687 return diff; 1688 } 1689 1690 return false; 1691} 1692 1693static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, 1694 struct vxlan_metadata *md) 1695{ 1696 struct vxlanhdr_gbp *gbp; 1697 1698 if (!md->gbp) 1699 return; 1700 1701 gbp = (struct vxlanhdr_gbp *)vxh; 1702 vxh->vx_flags |= VXLAN_HF_GBP; 1703 1704 if (md->gbp & VXLAN_GBP_DONT_LEARN) 1705 gbp->dont_learn = 1; 1706 1707 if (md->gbp & VXLAN_GBP_POLICY_APPLIED) 1708 gbp->policy_applied = 1; 1709 1710 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); 1711} 1712 1713static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags, 1714 __be16 protocol) 1715{ 1716 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; 1717 1718 gpe->np_applied = 1; 1719 1720 switch (protocol) { 1721 case htons(ETH_P_IP): 1722 gpe->next_protocol = VXLAN_GPE_NP_IPV4; 1723 return 0; 1724 case htons(ETH_P_IPV6): 1725 gpe->next_protocol = VXLAN_GPE_NP_IPV6; 1726 return 0; 1727 case htons(ETH_P_TEB): 1728 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET; 1729 return 0; 1730 } 1731 return -EPFNOSUPPORT; 1732} 1733 1734static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, 1735 int iphdr_len, __be32 vni, 1736 struct vxlan_metadata *md, u32 vxflags, 1737 bool udp_sum) 1738{ 1739 struct vxlanhdr *vxh; 1740 int min_headroom; 1741 int err; 1742 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 1743 __be16 inner_protocol = htons(ETH_P_TEB); 1744 1745 if ((vxflags & VXLAN_F_REMCSUM_TX) && 1746 skb->ip_summed == CHECKSUM_PARTIAL) { 1747 int csum_start = skb_checksum_start_offset(skb); 1748 1749 if (csum_start <= VXLAN_MAX_REMCSUM_START && 1750 !(csum_start & VXLAN_RCO_SHIFT_MASK) && 1751 (skb->csum_offset == offsetof(struct udphdr, check) || 1752 skb->csum_offset == offsetof(struct tcphdr, check))) 1753 type |= SKB_GSO_TUNNEL_REMCSUM; 1754 } 1755 1756 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1757 + VXLAN_HLEN + iphdr_len; 1758 1759 /* Need space for new headers (invalidates iph ptr) */ 1760 err = skb_cow_head(skb, min_headroom); 1761 if (unlikely(err)) 1762 return err; 1763 1764 err = iptunnel_handle_offloads(skb, type); 1765 if (err) 1766 return err; 1767 1768 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1769 vxh->vx_flags = VXLAN_HF_VNI; 1770 vxh->vx_vni = vxlan_vni_field(vni); 1771 1772 if (type & SKB_GSO_TUNNEL_REMCSUM) { 1773 unsigned int start; 1774 1775 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); 1776 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); 1777 vxh->vx_flags |= VXLAN_HF_RCO; 1778 1779 if (!skb_is_gso(skb)) { 1780 skb->ip_summed = CHECKSUM_NONE; 1781 skb->encapsulation = 0; 1782 } 1783 } 1784 1785 if (vxflags & VXLAN_F_GBP) 1786 vxlan_build_gbp_hdr(vxh, vxflags, md); 1787 if (vxflags & VXLAN_F_GPE) { 1788 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol); 1789 if (err < 0) 1790 return err; 1791 inner_protocol = skb->protocol; 1792 } 1793 1794 skb_set_inner_protocol(skb, inner_protocol); 1795 return 0; 1796} 1797 1798static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, 1799 struct vxlan_sock *sock4, 1800 struct sk_buff *skb, int oif, u8 tos, 1801 __be32 daddr, __be32 *saddr, 1802 struct dst_cache *dst_cache, 1803 const struct ip_tunnel_info *info) 1804{ 1805 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1806 struct rtable *rt = NULL; 1807 struct flowi4 fl4; 1808 1809 if (!sock4) 1810 return ERR_PTR(-EIO); 1811 1812 if (tos && !info) 1813 use_cache = false; 1814 if (use_cache) { 1815 rt = dst_cache_get_ip4(dst_cache, saddr); 1816 if (rt) 1817 return rt; 1818 } 1819 1820 memset(&fl4, 0, sizeof(fl4)); 1821 fl4.flowi4_oif = oif; 1822 fl4.flowi4_tos = RT_TOS(tos); 1823 fl4.flowi4_mark = skb->mark; 1824 fl4.flowi4_proto = IPPROTO_UDP; 1825 fl4.daddr = daddr; 1826 fl4.saddr = *saddr; 1827 1828 rt = ip_route_output_key(vxlan->net, &fl4); 1829 if (likely(!IS_ERR(rt))) { 1830 if (rt->dst.dev == dev) { 1831 netdev_dbg(dev, "circular route to %pI4\n", &daddr); 1832 ip_rt_put(rt); 1833 return ERR_PTR(-ELOOP); 1834 } 1835 1836 *saddr = fl4.saddr; 1837 if (use_cache) 1838 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 1839 } else { 1840 netdev_dbg(dev, "no route to %pI4\n", &daddr); 1841 return ERR_PTR(-ENETUNREACH); 1842 } 1843 return rt; 1844} 1845 1846#if IS_ENABLED(CONFIG_IPV6) 1847static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, 1848 struct net_device *dev, 1849 struct vxlan_sock *sock6, 1850 struct sk_buff *skb, int oif, u8 tos, 1851 __be32 label, 1852 const struct in6_addr *daddr, 1853 struct in6_addr *saddr, 1854 struct dst_cache *dst_cache, 1855 const struct ip_tunnel_info *info) 1856{ 1857 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1858 struct dst_entry *ndst; 1859 struct flowi6 fl6; 1860 int err; 1861 1862 if (!sock6) 1863 return ERR_PTR(-EIO); 1864 1865 if (tos && !info) 1866 use_cache = false; 1867 if (use_cache) { 1868 ndst = dst_cache_get_ip6(dst_cache, saddr); 1869 if (ndst) 1870 return ndst; 1871 } 1872 1873 memset(&fl6, 0, sizeof(fl6)); 1874 fl6.flowi6_oif = oif; 1875 fl6.daddr = *daddr; 1876 fl6.saddr = *saddr; 1877 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1878 fl6.flowi6_mark = skb->mark; 1879 fl6.flowi6_proto = IPPROTO_UDP; 1880 1881 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1882 sock6->sock->sk, 1883 &ndst, &fl6); 1884 if (unlikely(err < 0)) { 1885 netdev_dbg(dev, "no route to %pI6\n", daddr); 1886 return ERR_PTR(-ENETUNREACH); 1887 } 1888 1889 if (unlikely(ndst->dev == dev)) { 1890 netdev_dbg(dev, "circular route to %pI6\n", daddr); 1891 dst_release(ndst); 1892 return ERR_PTR(-ELOOP); 1893 } 1894 1895 *saddr = fl6.saddr; 1896 if (use_cache) 1897 dst_cache_set_ip6(dst_cache, ndst, saddr); 1898 return ndst; 1899} 1900#endif 1901 1902/* Bypass encapsulation if the destination is local */ 1903static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1904 struct vxlan_dev *dst_vxlan) 1905{ 1906 struct pcpu_sw_netstats *tx_stats, *rx_stats; 1907 union vxlan_addr loopback; 1908 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1909 struct net_device *dev = skb->dev; 1910 int len = skb->len; 1911 1912 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1913 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1914 skb->pkt_type = PACKET_HOST; 1915 skb->encapsulation = 0; 1916 skb->dev = dst_vxlan->dev; 1917 __skb_pull(skb, skb_network_offset(skb)); 1918 1919 if (remote_ip->sa.sa_family == AF_INET) { 1920 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 1921 loopback.sa.sa_family = AF_INET; 1922#if IS_ENABLED(CONFIG_IPV6) 1923 } else { 1924 loopback.sin6.sin6_addr = in6addr_loopback; 1925 loopback.sa.sa_family = AF_INET6; 1926#endif 1927 } 1928 1929 if (dst_vxlan->flags & VXLAN_F_LEARN) 1930 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); 1931 1932 u64_stats_update_begin(&tx_stats->syncp); 1933 tx_stats->tx_packets++; 1934 tx_stats->tx_bytes += len; 1935 u64_stats_update_end(&tx_stats->syncp); 1936 1937 if (netif_rx(skb) == NET_RX_SUCCESS) { 1938 u64_stats_update_begin(&rx_stats->syncp); 1939 rx_stats->rx_packets++; 1940 rx_stats->rx_bytes += len; 1941 u64_stats_update_end(&rx_stats->syncp); 1942 } else { 1943 dev->stats.rx_dropped++; 1944 } 1945} 1946 1947static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, 1948 struct vxlan_dev *vxlan, union vxlan_addr *daddr, 1949 __be32 dst_port, __be32 vni, struct dst_entry *dst, 1950 u32 rt_flags) 1951{ 1952#if IS_ENABLED(CONFIG_IPV6) 1953 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of 1954 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple 1955 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry. 1956 */ 1957 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL); 1958#endif 1959 /* Bypass encapsulation if the destination is local */ 1960 if (rt_flags & RTCF_LOCAL && 1961 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 1962 struct vxlan_dev *dst_vxlan; 1963 1964 dst_release(dst); 1965 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 1966 daddr->sa.sa_family, dst_port, 1967 vxlan->flags); 1968 if (!dst_vxlan) { 1969 dev->stats.tx_errors++; 1970 kfree_skb(skb); 1971 1972 return -ENOENT; 1973 } 1974 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1975 return 1; 1976 } 1977 1978 return 0; 1979} 1980 1981static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 1982 struct vxlan_rdst *rdst, bool did_rsc) 1983{ 1984 struct dst_cache *dst_cache; 1985 struct ip_tunnel_info *info; 1986 struct vxlan_dev *vxlan = netdev_priv(dev); 1987 const struct iphdr *old_iph = ip_hdr(skb); 1988 union vxlan_addr *dst; 1989 union vxlan_addr remote_ip, local_ip; 1990 union vxlan_addr *src; 1991 struct vxlan_metadata _md; 1992 struct vxlan_metadata *md = &_md; 1993 __be16 src_port = 0, dst_port; 1994 struct dst_entry *ndst = NULL; 1995 __be32 vni, label; 1996 __u8 tos, ttl; 1997 int err; 1998 u32 flags = vxlan->flags; 1999 bool udp_sum = false; 2000 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); 2001 2002 info = skb_tunnel_info(skb); 2003 2004 if (rdst) { 2005 dst = &rdst->remote_ip; 2006 if (vxlan_addr_any(dst)) { 2007 if (did_rsc) { 2008 /* short-circuited back to local bridge */ 2009 vxlan_encap_bypass(skb, vxlan, vxlan); 2010 return; 2011 } 2012 goto drop; 2013 } 2014 2015 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 2016 vni = rdst->remote_vni; 2017 src = &vxlan->cfg.saddr; 2018 dst_cache = &rdst->dst_cache; 2019 md->gbp = skb->mark; 2020 ttl = vxlan->cfg.ttl; 2021 if (!ttl && vxlan_addr_multicast(dst)) 2022 ttl = 1; 2023 2024 tos = vxlan->cfg.tos; 2025 if (tos == 1) 2026 tos = ip_tunnel_get_dsfield(old_iph, skb); 2027 2028 if (dst->sa.sa_family == AF_INET) 2029 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); 2030 else 2031 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2032 label = vxlan->cfg.label; 2033 } else { 2034 if (!info) { 2035 WARN_ONCE(1, "%s: Missing encapsulation instructions\n", 2036 dev->name); 2037 goto drop; 2038 } 2039 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 2040 if (remote_ip.sa.sa_family == AF_INET) { 2041 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 2042 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; 2043 } else { 2044 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 2045 local_ip.sin6.sin6_addr = info->key.u.ipv6.src; 2046 } 2047 dst = &remote_ip; 2048 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 2049 vni = tunnel_id_to_key32(info->key.tun_id); 2050 src = &local_ip; 2051 dst_cache = &info->dst_cache; 2052 if (info->options_len) 2053 md = ip_tunnel_info_opts(info); 2054 ttl = info->key.ttl; 2055 tos = info->key.tos; 2056 label = info->key.label; 2057 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 2058 } 2059 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2060 vxlan->cfg.port_max, true); 2061 2062 if (dst->sa.sa_family == AF_INET) { 2063 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2064 struct rtable *rt; 2065 __be16 df = 0; 2066 2067 rt = vxlan_get_route(vxlan, dev, sock4, skb, 2068 rdst ? rdst->remote_ifindex : 0, tos, 2069 dst->sin.sin_addr.s_addr, 2070 &src->sin.sin_addr.s_addr, 2071 dst_cache, info); 2072 if (IS_ERR(rt)) { 2073 err = PTR_ERR(rt); 2074 goto tx_error; 2075 } 2076 2077 /* Bypass encapsulation if the destination is local */ 2078 if (!info) { 2079 err = encap_bypass_if_local(skb, dev, vxlan, dst, 2080 dst_port, vni, &rt->dst, 2081 rt->rt_flags); 2082 if (err) 2083 return; 2084 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { 2085 df = htons(IP_DF); 2086 } 2087 2088 ndst = &rt->dst; 2089 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2090 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2091 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), 2092 vni, md, flags, udp_sum); 2093 if (err < 0) 2094 goto tx_error; 2095 2096 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, src->sin.sin_addr.s_addr, 2097 dst->sin.sin_addr.s_addr, tos, ttl, df, 2098 src_port, dst_port, xnet, !udp_sum); 2099#if IS_ENABLED(CONFIG_IPV6) 2100 } else { 2101 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 2102 2103 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 2104 rdst ? rdst->remote_ifindex : 0, tos, 2105 label, &dst->sin6.sin6_addr, 2106 &src->sin6.sin6_addr, 2107 dst_cache, info); 2108 if (IS_ERR(ndst)) { 2109 err = PTR_ERR(ndst); 2110 ndst = NULL; 2111 goto tx_error; 2112 } 2113 2114 if (!info) { 2115 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2116 2117 err = encap_bypass_if_local(skb, dev, vxlan, dst, 2118 dst_port, vni, ndst, 2119 rt6i_flags); 2120 if (err) 2121 return; 2122 } 2123 2124 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2125 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2126 skb_scrub_packet(skb, xnet); 2127 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), 2128 vni, md, flags, udp_sum); 2129 if (err < 0) 2130 goto tx_error; 2131 2132 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, 2133 &src->sin6.sin6_addr, 2134 &dst->sin6.sin6_addr, tos, ttl, 2135 label, src_port, dst_port, !udp_sum); 2136#endif 2137 } 2138 return; 2139 2140drop: 2141 dev->stats.tx_dropped++; 2142 dev_kfree_skb(skb); 2143 return; 2144 2145tx_error: 2146 if (err == -ELOOP) 2147 dev->stats.collisions++; 2148 else if (err == -ENETUNREACH) 2149 dev->stats.tx_carrier_errors++; 2150 dst_release(ndst); 2151 dev->stats.tx_errors++; 2152 kfree_skb(skb); 2153} 2154 2155/* Transmit local packets over Vxlan 2156 * 2157 * Outer IP header inherits ECN and DF from inner header. 2158 * Outer UDP destination is the VXLAN assigned port. 2159 * source port is based on hash of flow 2160 */ 2161static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 2162{ 2163 struct vxlan_dev *vxlan = netdev_priv(dev); 2164 const struct ip_tunnel_info *info; 2165 struct ethhdr *eth; 2166 bool did_rsc = false; 2167 struct vxlan_rdst *rdst, *fdst = NULL; 2168 struct vxlan_fdb *f; 2169 2170 info = skb_tunnel_info(skb); 2171 2172 skb_reset_mac_header(skb); 2173 2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { 2175 if (info && info->mode & IP_TUNNEL_INFO_TX) 2176 vxlan_xmit_one(skb, dev, NULL, false); 2177 else 2178 kfree_skb(skb); 2179 return NETDEV_TX_OK; 2180 } 2181 2182 if (vxlan->flags & VXLAN_F_PROXY) { 2183 eth = eth_hdr(skb); 2184 if (ntohs(eth->h_proto) == ETH_P_ARP) 2185 return arp_reduce(dev, skb); 2186#if IS_ENABLED(CONFIG_IPV6) 2187 else if (ntohs(eth->h_proto) == ETH_P_IPV6 && 2188 pskb_may_pull(skb, sizeof(struct ipv6hdr) 2189 + sizeof(struct nd_msg)) && 2190 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 2191 struct nd_msg *msg; 2192 2193 msg = (struct nd_msg *)skb_transport_header(skb); 2194 if (msg->icmph.icmp6_code == 0 && 2195 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) 2196 return neigh_reduce(dev, skb); 2197 } 2198#endif 2199 } 2200 2201 eth = eth_hdr(skb); 2202 f = vxlan_find_mac(vxlan, eth->h_dest); 2203 did_rsc = false; 2204 2205 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && 2206 (ntohs(eth->h_proto) == ETH_P_IP || 2207 ntohs(eth->h_proto) == ETH_P_IPV6)) { 2208 did_rsc = route_shortcircuit(dev, skb); 2209 if (did_rsc) 2210 f = vxlan_find_mac(vxlan, eth->h_dest); 2211 } 2212 2213 if (f == NULL) { 2214 f = vxlan_find_mac(vxlan, all_zeros_mac); 2215 if (f == NULL) { 2216 if ((vxlan->flags & VXLAN_F_L2MISS) && 2217 !is_multicast_ether_addr(eth->h_dest)) 2218 vxlan_fdb_miss(vxlan, eth->h_dest); 2219 2220 dev->stats.tx_dropped++; 2221 kfree_skb(skb); 2222 return NETDEV_TX_OK; 2223 } 2224 } 2225 2226 list_for_each_entry_rcu(rdst, &f->remotes, list) { 2227 struct sk_buff *skb1; 2228 2229 if (!fdst) { 2230 fdst = rdst; 2231 continue; 2232 } 2233 skb1 = skb_clone(skb, GFP_ATOMIC); 2234 if (skb1) 2235 vxlan_xmit_one(skb1, dev, rdst, did_rsc); 2236 } 2237 2238 if (fdst) 2239 vxlan_xmit_one(skb, dev, fdst, did_rsc); 2240 else 2241 kfree_skb(skb); 2242 return NETDEV_TX_OK; 2243} 2244 2245/* Walk the forwarding table and purge stale entries */ 2246static void vxlan_cleanup(unsigned long arg) 2247{ 2248 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; 2249 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2250 unsigned int h; 2251 2252 if (!netif_running(vxlan->dev)) 2253 return; 2254 2255 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2256 struct hlist_node *p, *n; 2257 2258 spin_lock_bh(&vxlan->hash_lock); 2259 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2260 struct vxlan_fdb *f 2261 = container_of(p, struct vxlan_fdb, hlist); 2262 unsigned long timeout; 2263 2264 if (f->state & NUD_PERMANENT) 2265 continue; 2266 2267 timeout = f->used + vxlan->cfg.age_interval * HZ; 2268 if (time_before_eq(timeout, jiffies)) { 2269 netdev_dbg(vxlan->dev, 2270 "garbage collect %pM\n", 2271 f->eth_addr); 2272 f->state = NUD_STALE; 2273 vxlan_fdb_destroy(vxlan, f); 2274 } else if (time_before(timeout, next_timer)) 2275 next_timer = timeout; 2276 } 2277 spin_unlock_bh(&vxlan->hash_lock); 2278 } 2279 2280 mod_timer(&vxlan->age_timer, next_timer); 2281} 2282 2283static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2284{ 2285 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2286 __be32 vni = vxlan->default_dst.remote_vni; 2287 2288 spin_lock(&vn->sock_lock); 2289 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); 2290 spin_unlock(&vn->sock_lock); 2291} 2292 2293/* Setup stats when device is created */ 2294static int vxlan_init(struct net_device *dev) 2295{ 2296 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2297 if (!dev->tstats) 2298 return -ENOMEM; 2299 2300 return 0; 2301} 2302 2303static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) 2304{ 2305 struct vxlan_fdb *f; 2306 2307 spin_lock_bh(&vxlan->hash_lock); 2308 f = __vxlan_find_mac(vxlan, all_zeros_mac); 2309 if (f) 2310 vxlan_fdb_destroy(vxlan, f); 2311 spin_unlock_bh(&vxlan->hash_lock); 2312} 2313 2314static void vxlan_uninit(struct net_device *dev) 2315{ 2316 struct vxlan_dev *vxlan = netdev_priv(dev); 2317 2318 vxlan_fdb_delete_default(vxlan); 2319 2320 free_percpu(dev->tstats); 2321} 2322 2323/* Start ageing timer and join group when device is brought up */ 2324static int vxlan_open(struct net_device *dev) 2325{ 2326 struct vxlan_dev *vxlan = netdev_priv(dev); 2327 int ret; 2328 2329 ret = vxlan_sock_add(vxlan); 2330 if (ret < 0) 2331 return ret; 2332 2333 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { 2334 ret = vxlan_igmp_join(vxlan); 2335 if (ret == -EADDRINUSE) 2336 ret = 0; 2337 if (ret) { 2338 vxlan_sock_release(vxlan); 2339 return ret; 2340 } 2341 } 2342 2343 if (vxlan->cfg.age_interval) 2344 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); 2345 2346 return ret; 2347} 2348 2349/* Purge the forwarding table */ 2350static void vxlan_flush(struct vxlan_dev *vxlan) 2351{ 2352 unsigned int h; 2353 2354 spin_lock_bh(&vxlan->hash_lock); 2355 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2356 struct hlist_node *p, *n; 2357 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2358 struct vxlan_fdb *f 2359 = container_of(p, struct vxlan_fdb, hlist); 2360 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2361 if (!is_zero_ether_addr(f->eth_addr)) 2362 vxlan_fdb_destroy(vxlan, f); 2363 } 2364 } 2365 spin_unlock_bh(&vxlan->hash_lock); 2366} 2367 2368/* Cleanup timer and forwarding table on shutdown */ 2369static int vxlan_stop(struct net_device *dev) 2370{ 2371 struct vxlan_dev *vxlan = netdev_priv(dev); 2372 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2373 int ret = 0; 2374 2375 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2376 !vxlan_group_used(vn, vxlan)) 2377 ret = vxlan_igmp_leave(vxlan); 2378 2379 del_timer_sync(&vxlan->age_timer); 2380 2381 vxlan_flush(vxlan); 2382 vxlan_sock_release(vxlan); 2383 2384 return ret; 2385} 2386 2387/* Stub, nothing needs to be done. */ 2388static void vxlan_set_multicast_list(struct net_device *dev) 2389{ 2390} 2391 2392static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2393{ 2394 struct vxlan_dev *vxlan = netdev_priv(dev); 2395 struct vxlan_rdst *dst = &vxlan->default_dst; 2396 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 2397 dst->remote_ifindex); 2398 bool use_ipv6 = false; 2399 2400 if (dst->remote_ip.sa.sa_family == AF_INET6) 2401 use_ipv6 = true; 2402 2403 /* This check is different than dev->max_mtu, because it looks at 2404 * the lowerdev->mtu, rather than the static dev->max_mtu 2405 */ 2406 if (lowerdev) { 2407 int max_mtu = lowerdev->mtu - 2408 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2409 if (new_mtu > max_mtu) 2410 return -EINVAL; 2411 } 2412 2413 dev->mtu = new_mtu; 2414 return 0; 2415} 2416 2417static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 2418{ 2419 struct vxlan_dev *vxlan = netdev_priv(dev); 2420 struct ip_tunnel_info *info = skb_tunnel_info(skb); 2421 __be16 sport, dport; 2422 2423 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2424 vxlan->cfg.port_max, true); 2425 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2426 2427 if (ip_tunnel_info_af(info) == AF_INET) { 2428 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2429 struct rtable *rt; 2430 2431 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, 2432 info->key.u.ipv4.dst, 2433 &info->key.u.ipv4.src, NULL, info); 2434 if (IS_ERR(rt)) 2435 return PTR_ERR(rt); 2436 ip_rt_put(rt); 2437 } else { 2438#if IS_ENABLED(CONFIG_IPV6) 2439 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 2440 struct dst_entry *ndst; 2441 2442 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, 2443 info->key.label, &info->key.u.ipv6.dst, 2444 &info->key.u.ipv6.src, NULL, info); 2445 if (IS_ERR(ndst)) 2446 return PTR_ERR(ndst); 2447 dst_release(ndst); 2448#else /* !CONFIG_IPV6 */ 2449 return -EPFNOSUPPORT; 2450#endif 2451 } 2452 info->key.tp_src = sport; 2453 info->key.tp_dst = dport; 2454 return 0; 2455} 2456 2457static const struct net_device_ops vxlan_netdev_ether_ops = { 2458 .ndo_init = vxlan_init, 2459 .ndo_uninit = vxlan_uninit, 2460 .ndo_open = vxlan_open, 2461 .ndo_stop = vxlan_stop, 2462 .ndo_start_xmit = vxlan_xmit, 2463 .ndo_get_stats64 = ip_tunnel_get_stats64, 2464 .ndo_set_rx_mode = vxlan_set_multicast_list, 2465 .ndo_change_mtu = vxlan_change_mtu, 2466 .ndo_validate_addr = eth_validate_addr, 2467 .ndo_set_mac_address = eth_mac_addr, 2468 .ndo_fdb_add = vxlan_fdb_add, 2469 .ndo_fdb_del = vxlan_fdb_delete, 2470 .ndo_fdb_dump = vxlan_fdb_dump, 2471 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2472}; 2473 2474static const struct net_device_ops vxlan_netdev_raw_ops = { 2475 .ndo_init = vxlan_init, 2476 .ndo_uninit = vxlan_uninit, 2477 .ndo_open = vxlan_open, 2478 .ndo_stop = vxlan_stop, 2479 .ndo_start_xmit = vxlan_xmit, 2480 .ndo_get_stats64 = ip_tunnel_get_stats64, 2481 .ndo_change_mtu = vxlan_change_mtu, 2482 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2483}; 2484 2485/* Info for udev, that this is a virtual tunnel endpoint */ 2486static struct device_type vxlan_type = { 2487 .name = "vxlan", 2488}; 2489 2490/* Calls the ndo_udp_tunnel_add of the caller in order to 2491 * supply the listening VXLAN udp ports. Callers are expected 2492 * to implement the ndo_udp_tunnel_add. 2493 */ 2494static void vxlan_push_rx_ports(struct net_device *dev) 2495{ 2496 struct vxlan_sock *vs; 2497 struct net *net = dev_net(dev); 2498 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2499 unsigned int i; 2500 2501 spin_lock(&vn->sock_lock); 2502 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2503 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) 2504 udp_tunnel_push_rx_port(dev, vs->sock, 2505 (vs->flags & VXLAN_F_GPE) ? 2506 UDP_TUNNEL_TYPE_VXLAN_GPE : 2507 UDP_TUNNEL_TYPE_VXLAN); 2508 } 2509 spin_unlock(&vn->sock_lock); 2510} 2511 2512/* Initialize the device structure. */ 2513static void vxlan_setup(struct net_device *dev) 2514{ 2515 struct vxlan_dev *vxlan = netdev_priv(dev); 2516 unsigned int h; 2517 2518 eth_hw_addr_random(dev); 2519 ether_setup(dev); 2520 2521 dev->destructor = free_netdev; 2522 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2523 2524 dev->features |= NETIF_F_LLTX; 2525 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2526 dev->features |= NETIF_F_RXCSUM; 2527 dev->features |= NETIF_F_GSO_SOFTWARE; 2528 2529 dev->vlan_features = dev->features; 2530 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2531 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2532 netif_keep_dst(dev); 2533 dev->priv_flags |= IFF_NO_QUEUE; 2534 2535 INIT_LIST_HEAD(&vxlan->next); 2536 spin_lock_init(&vxlan->hash_lock); 2537 2538 init_timer_deferrable(&vxlan->age_timer); 2539 vxlan->age_timer.function = vxlan_cleanup; 2540 vxlan->age_timer.data = (unsigned long) vxlan; 2541 2542 vxlan->cfg.dst_port = htons(vxlan_port); 2543 2544 vxlan->dev = dev; 2545 2546 gro_cells_init(&vxlan->gro_cells, dev); 2547 2548 for (h = 0; h < FDB_HASH_SIZE; ++h) 2549 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 2550} 2551 2552static void vxlan_ether_setup(struct net_device *dev) 2553{ 2554 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2555 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2556 dev->netdev_ops = &vxlan_netdev_ether_ops; 2557} 2558 2559static void vxlan_raw_setup(struct net_device *dev) 2560{ 2561 dev->header_ops = NULL; 2562 dev->type = ARPHRD_NONE; 2563 dev->hard_header_len = 0; 2564 dev->addr_len = 0; 2565 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 2566 dev->netdev_ops = &vxlan_netdev_raw_ops; 2567} 2568 2569static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2570 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2571 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2572 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 2573 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 2574 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 2575 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 2576 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 2577 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 2578 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 }, 2579 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 2580 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 2581 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 2582 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 2583 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, 2584 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 2585 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 2586 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 2587 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, 2588 [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, 2589 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, 2590 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 2591 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 2592 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 2593 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 2594 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 2595 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, 2596 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 2597}; 2598 2599static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 2600{ 2601 if (tb[IFLA_ADDRESS]) { 2602 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 2603 pr_debug("invalid link address (not ethernet)\n"); 2604 return -EINVAL; 2605 } 2606 2607 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 2608 pr_debug("invalid all zero ethernet address\n"); 2609 return -EADDRNOTAVAIL; 2610 } 2611 } 2612 2613 if (!data) 2614 return -EINVAL; 2615 2616 if (data[IFLA_VXLAN_ID]) { 2617 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); 2618 if (id >= VXLAN_VID_MASK) 2619 return -ERANGE; 2620 } 2621 2622 if (data[IFLA_VXLAN_PORT_RANGE]) { 2623 const struct ifla_vxlan_port_range *p 2624 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2625 2626 if (ntohs(p->high) < ntohs(p->low)) { 2627 pr_debug("port range %u .. %u not valid\n", 2628 ntohs(p->low), ntohs(p->high)); 2629 return -EINVAL; 2630 } 2631 } 2632 2633 return 0; 2634} 2635 2636static void vxlan_get_drvinfo(struct net_device *netdev, 2637 struct ethtool_drvinfo *drvinfo) 2638{ 2639 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); 2640 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); 2641} 2642 2643static const struct ethtool_ops vxlan_ethtool_ops = { 2644 .get_drvinfo = vxlan_get_drvinfo, 2645 .get_link = ethtool_op_get_link, 2646}; 2647 2648static struct socket *vxlan_create_sock(struct net *net, bool ipv6, 2649 __be16 port, u32 flags) 2650{ 2651 struct socket *sock; 2652 struct udp_port_cfg udp_conf; 2653 int err; 2654 2655 memset(&udp_conf, 0, sizeof(udp_conf)); 2656 2657 if (ipv6) { 2658 udp_conf.family = AF_INET6; 2659 udp_conf.use_udp6_rx_checksums = 2660 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2661 udp_conf.ipv6_v6only = 1; 2662 } else { 2663 udp_conf.family = AF_INET; 2664 } 2665 2666 udp_conf.local_udp_port = port; 2667 2668 /* Open UDP socket */ 2669 err = udp_sock_create(net, &udp_conf, &sock); 2670 if (err < 0) 2671 return ERR_PTR(err); 2672 2673 return sock; 2674} 2675 2676/* Create new listen socket if needed */ 2677static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, 2678 __be16 port, u32 flags) 2679{ 2680 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2681 struct vxlan_sock *vs; 2682 struct socket *sock; 2683 unsigned int h; 2684 struct udp_tunnel_sock_cfg tunnel_cfg; 2685 2686 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2687 if (!vs) 2688 return ERR_PTR(-ENOMEM); 2689 2690 for (h = 0; h < VNI_HASH_SIZE; ++h) 2691 INIT_HLIST_HEAD(&vs->vni_list[h]); 2692 2693 sock = vxlan_create_sock(net, ipv6, port, flags); 2694 if (IS_ERR(sock)) { 2695 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port), 2696 PTR_ERR(sock)); 2697 kfree(vs); 2698 return ERR_CAST(sock); 2699 } 2700 2701 vs->sock = sock; 2702 atomic_set(&vs->refcnt, 1); 2703 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 2704 2705 spin_lock(&vn->sock_lock); 2706 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2707 udp_tunnel_notify_add_rx_port(sock, 2708 (vs->flags & VXLAN_F_GPE) ? 2709 UDP_TUNNEL_TYPE_VXLAN_GPE : 2710 UDP_TUNNEL_TYPE_VXLAN); 2711 spin_unlock(&vn->sock_lock); 2712 2713 /* Mark socket as an encapsulation socket. */ 2714 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 2715 tunnel_cfg.sk_user_data = vs; 2716 tunnel_cfg.encap_type = 1; 2717 tunnel_cfg.encap_rcv = vxlan_rcv; 2718 tunnel_cfg.encap_destroy = NULL; 2719 tunnel_cfg.gro_receive = vxlan_gro_receive; 2720 tunnel_cfg.gro_complete = vxlan_gro_complete; 2721 2722 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 2723 2724 return vs; 2725} 2726 2727static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) 2728{ 2729 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2730 struct vxlan_sock *vs = NULL; 2731 2732 if (!vxlan->cfg.no_share) { 2733 spin_lock(&vn->sock_lock); 2734 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, 2735 vxlan->cfg.dst_port, vxlan->flags); 2736 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) { 2737 spin_unlock(&vn->sock_lock); 2738 return -EBUSY; 2739 } 2740 spin_unlock(&vn->sock_lock); 2741 } 2742 if (!vs) 2743 vs = vxlan_socket_create(vxlan->net, ipv6, 2744 vxlan->cfg.dst_port, vxlan->flags); 2745 if (IS_ERR(vs)) 2746 return PTR_ERR(vs); 2747#if IS_ENABLED(CONFIG_IPV6) 2748 if (ipv6) 2749 rcu_assign_pointer(vxlan->vn6_sock, vs); 2750 else 2751#endif 2752 rcu_assign_pointer(vxlan->vn4_sock, vs); 2753 vxlan_vs_add_dev(vs, vxlan); 2754 return 0; 2755} 2756 2757static int vxlan_sock_add(struct vxlan_dev *vxlan) 2758{ 2759 bool ipv6 = vxlan->flags & VXLAN_F_IPV6; 2760 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; 2761 int ret = 0; 2762 2763 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); 2764#if IS_ENABLED(CONFIG_IPV6) 2765 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); 2766 if (ipv6 || metadata) 2767 ret = __vxlan_sock_add(vxlan, true); 2768#endif 2769 if (!ret && (!ipv6 || metadata)) 2770 ret = __vxlan_sock_add(vxlan, false); 2771 if (ret < 0) 2772 vxlan_sock_release(vxlan); 2773 return ret; 2774} 2775 2776static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, 2777 struct vxlan_config *conf) 2778{ 2779 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); 2780 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; 2781 struct vxlan_rdst *dst = &vxlan->default_dst; 2782 unsigned short needed_headroom = ETH_HLEN; 2783 int err; 2784 bool use_ipv6 = false; 2785 __be16 default_port = vxlan->cfg.dst_port; 2786 struct net_device *lowerdev = NULL; 2787 2788 if (conf->flags & VXLAN_F_GPE) { 2789 /* For now, allow GPE only together with COLLECT_METADATA. 2790 * This can be relaxed later; in such case, the other side 2791 * of the PtP link will have to be provided. 2792 */ 2793 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || 2794 !(conf->flags & VXLAN_F_COLLECT_METADATA)) { 2795 pr_info("unsupported combination of extensions\n"); 2796 return -EINVAL; 2797 } 2798 2799 vxlan_raw_setup(dev); 2800 } else { 2801 vxlan_ether_setup(dev); 2802 } 2803 2804 /* MTU range: 68 - 65535 */ 2805 dev->min_mtu = ETH_MIN_MTU; 2806 dev->max_mtu = ETH_MAX_MTU; 2807 2808 vxlan->net = src_net; 2809 2810 dst->remote_vni = conf->vni; 2811 2812 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); 2813 2814 /* Unless IPv6 is explicitly requested, assume IPv4 */ 2815 if (!dst->remote_ip.sa.sa_family) 2816 dst->remote_ip.sa.sa_family = AF_INET; 2817 2818 if (dst->remote_ip.sa.sa_family == AF_INET6 || 2819 vxlan->cfg.saddr.sa.sa_family == AF_INET6) { 2820 if (!IS_ENABLED(CONFIG_IPV6)) 2821 return -EPFNOSUPPORT; 2822 use_ipv6 = true; 2823 vxlan->flags |= VXLAN_F_IPV6; 2824 } 2825 2826 if (conf->label && !use_ipv6) { 2827 pr_info("label only supported in use with IPv6\n"); 2828 return -EINVAL; 2829 } 2830 2831 if (conf->remote_ifindex) { 2832 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); 2833 dst->remote_ifindex = conf->remote_ifindex; 2834 2835 if (!lowerdev) { 2836 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2837 return -ENODEV; 2838 } 2839 2840#if IS_ENABLED(CONFIG_IPV6) 2841 if (use_ipv6) { 2842 struct inet6_dev *idev = __in6_dev_get(lowerdev); 2843 if (idev && idev->cnf.disable_ipv6) { 2844 pr_info("IPv6 is disabled via sysctl\n"); 2845 return -EPERM; 2846 } 2847 } 2848#endif 2849 2850 if (!conf->mtu) 2851 dev->mtu = lowerdev->mtu - 2852 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2853 2854 needed_headroom = lowerdev->hard_header_len; 2855 } else if (vxlan_addr_multicast(&dst->remote_ip)) { 2856 pr_info("multicast destination requires interface to be specified\n"); 2857 return -EINVAL; 2858 } 2859 2860 if (conf->mtu) { 2861 int max_mtu = ETH_MAX_MTU; 2862 2863 if (lowerdev) 2864 max_mtu = lowerdev->mtu; 2865 2866 max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2867 2868 if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu) 2869 return -EINVAL; 2870 2871 dev->mtu = conf->mtu; 2872 2873 if (conf->mtu > max_mtu) 2874 dev->mtu = max_mtu; 2875 } 2876 2877 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 2878 needed_headroom += VXLAN6_HEADROOM; 2879 else 2880 needed_headroom += VXLAN_HEADROOM; 2881 dev->needed_headroom = needed_headroom; 2882 2883 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2884 if (!vxlan->cfg.dst_port) { 2885 if (conf->flags & VXLAN_F_GPE) 2886 vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ 2887 else 2888 vxlan->cfg.dst_port = default_port; 2889 } 2890 vxlan->flags |= conf->flags; 2891 2892 if (!vxlan->cfg.age_interval) 2893 vxlan->cfg.age_interval = FDB_AGE_DEFAULT; 2894 2895 list_for_each_entry(tmp, &vn->vxlan_list, next) { 2896 if (tmp->cfg.vni == conf->vni && 2897 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || 2898 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && 2899 tmp->cfg.dst_port == vxlan->cfg.dst_port && 2900 (tmp->flags & VXLAN_F_RCV_FLAGS) == 2901 (vxlan->flags & VXLAN_F_RCV_FLAGS)) { 2902 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni)); 2903 return -EEXIST; 2904 } 2905 } 2906 2907 dev->ethtool_ops = &vxlan_ethtool_ops; 2908 2909 /* create an fdb entry for a valid default destination */ 2910 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 2911 err = vxlan_fdb_create(vxlan, all_zeros_mac, 2912 &vxlan->default_dst.remote_ip, 2913 NUD_REACHABLE|NUD_PERMANENT, 2914 NLM_F_EXCL|NLM_F_CREATE, 2915 vxlan->cfg.dst_port, 2916 vxlan->default_dst.remote_vni, 2917 vxlan->default_dst.remote_ifindex, 2918 NTF_SELF); 2919 if (err) 2920 return err; 2921 } 2922 2923 err = register_netdevice(dev); 2924 if (err) { 2925 vxlan_fdb_delete_default(vxlan); 2926 return err; 2927 } 2928 2929 list_add(&vxlan->next, &vn->vxlan_list); 2930 2931 return 0; 2932} 2933 2934static int vxlan_newlink(struct net *src_net, struct net_device *dev, 2935 struct nlattr *tb[], struct nlattr *data[]) 2936{ 2937 struct vxlan_config conf; 2938 2939 memset(&conf, 0, sizeof(conf)); 2940 2941 if (data[IFLA_VXLAN_ID]) 2942 conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 2943 2944 if (data[IFLA_VXLAN_GROUP]) { 2945 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); 2946 } else if (data[IFLA_VXLAN_GROUP6]) { 2947 if (!IS_ENABLED(CONFIG_IPV6)) 2948 return -EPFNOSUPPORT; 2949 2950 conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); 2951 conf.remote_ip.sa.sa_family = AF_INET6; 2952 } 2953 2954 if (data[IFLA_VXLAN_LOCAL]) { 2955 conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); 2956 conf.saddr.sa.sa_family = AF_INET; 2957 } else if (data[IFLA_VXLAN_LOCAL6]) { 2958 if (!IS_ENABLED(CONFIG_IPV6)) 2959 return -EPFNOSUPPORT; 2960 2961 /* TODO: respect scope id */ 2962 conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); 2963 conf.saddr.sa.sa_family = AF_INET6; 2964 } 2965 2966 if (data[IFLA_VXLAN_LINK]) 2967 conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); 2968 2969 if (data[IFLA_VXLAN_TOS]) 2970 conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2971 2972 if (data[IFLA_VXLAN_TTL]) 2973 conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); 2974 2975 if (data[IFLA_VXLAN_LABEL]) 2976 conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & 2977 IPV6_FLOWLABEL_MASK; 2978 2979 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) 2980 conf.flags |= VXLAN_F_LEARN; 2981 2982 if (data[IFLA_VXLAN_AGEING]) 2983 conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 2984 2985 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) 2986 conf.flags |= VXLAN_F_PROXY; 2987 2988 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) 2989 conf.flags |= VXLAN_F_RSC; 2990 2991 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) 2992 conf.flags |= VXLAN_F_L2MISS; 2993 2994 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) 2995 conf.flags |= VXLAN_F_L3MISS; 2996 2997 if (data[IFLA_VXLAN_LIMIT]) 2998 conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 2999 3000 if (data[IFLA_VXLAN_COLLECT_METADATA] && 3001 nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) 3002 conf.flags |= VXLAN_F_COLLECT_METADATA; 3003 3004 if (data[IFLA_VXLAN_PORT_RANGE]) { 3005 const struct ifla_vxlan_port_range *p 3006 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 3007 conf.port_min = ntohs(p->low); 3008 conf.port_max = ntohs(p->high); 3009 } 3010 3011 if (data[IFLA_VXLAN_PORT]) 3012 conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 3013 3014 if (data[IFLA_VXLAN_UDP_CSUM] && 3015 !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) 3016 conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX; 3017 3018 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && 3019 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) 3020 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; 3021 3022 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] && 3023 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 3024 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 3025 3026 if (data[IFLA_VXLAN_REMCSUM_TX] && 3027 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) 3028 conf.flags |= VXLAN_F_REMCSUM_TX; 3029 3030 if (data[IFLA_VXLAN_REMCSUM_RX] && 3031 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) 3032 conf.flags |= VXLAN_F_REMCSUM_RX; 3033 3034 if (data[IFLA_VXLAN_GBP]) 3035 conf.flags |= VXLAN_F_GBP; 3036 3037 if (data[IFLA_VXLAN_GPE]) 3038 conf.flags |= VXLAN_F_GPE; 3039 3040 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) 3041 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3042 3043 if (tb[IFLA_MTU]) 3044 conf.mtu = nla_get_u32(tb[IFLA_MTU]); 3045 3046 return vxlan_dev_configure(src_net, dev, &conf); 3047} 3048 3049static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3050{ 3051 struct vxlan_dev *vxlan = netdev_priv(dev); 3052 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3053 3054 spin_lock(&vn->sock_lock); 3055 if (!hlist_unhashed(&vxlan->hlist)) 3056 hlist_del_rcu(&vxlan->hlist); 3057 spin_unlock(&vn->sock_lock); 3058 3059 gro_cells_destroy(&vxlan->gro_cells); 3060 list_del(&vxlan->next); 3061 unregister_netdevice_queue(dev, head); 3062} 3063 3064static size_t vxlan_get_size(const struct net_device *dev) 3065{ 3066 3067 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 3068 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ 3069 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 3070 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 3071 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 3072 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 3073 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 3074 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 3075 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ 3076 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ 3077 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ 3078 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ 3079 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ 3080 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 3081 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 3082 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 3083 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ 3084 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ 3085 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ 3086 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */ 3087 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ 3088 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ 3089 0; 3090} 3091 3092static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 3093{ 3094 const struct vxlan_dev *vxlan = netdev_priv(dev); 3095 const struct vxlan_rdst *dst = &vxlan->default_dst; 3096 struct ifla_vxlan_port_range ports = { 3097 .low = htons(vxlan->cfg.port_min), 3098 .high = htons(vxlan->cfg.port_max), 3099 }; 3100 3101 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) 3102 goto nla_put_failure; 3103 3104 if (!vxlan_addr_any(&dst->remote_ip)) { 3105 if (dst->remote_ip.sa.sa_family == AF_INET) { 3106 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP, 3107 dst->remote_ip.sin.sin_addr.s_addr)) 3108 goto nla_put_failure; 3109#if IS_ENABLED(CONFIG_IPV6) 3110 } else { 3111 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6, 3112 &dst->remote_ip.sin6.sin6_addr)) 3113 goto nla_put_failure; 3114#endif 3115 } 3116 } 3117 3118 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 3119 goto nla_put_failure; 3120 3121 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { 3122 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { 3123 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, 3124 vxlan->cfg.saddr.sin.sin_addr.s_addr)) 3125 goto nla_put_failure; 3126#if IS_ENABLED(CONFIG_IPV6) 3127 } else { 3128 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, 3129 &vxlan->cfg.saddr.sin6.sin6_addr)) 3130 goto nla_put_failure; 3131#endif 3132 } 3133 } 3134 3135 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 3136 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 3137 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 3138 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 3139 !!(vxlan->flags & VXLAN_F_LEARN)) || 3140 nla_put_u8(skb, IFLA_VXLAN_PROXY, 3141 !!(vxlan->flags & VXLAN_F_PROXY)) || 3142 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) || 3143 nla_put_u8(skb, IFLA_VXLAN_L2MISS, 3144 !!(vxlan->flags & VXLAN_F_L2MISS)) || 3145 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 3146 !!(vxlan->flags & VXLAN_F_L3MISS)) || 3147 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, 3148 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) || 3149 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || 3150 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || 3151 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || 3152 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, 3153 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || 3154 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 3155 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || 3156 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 3157 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || 3158 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX, 3159 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) || 3160 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX, 3161 !!(vxlan->flags & VXLAN_F_REMCSUM_RX))) 3162 goto nla_put_failure; 3163 3164 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 3165 goto nla_put_failure; 3166 3167 if (vxlan->flags & VXLAN_F_GBP && 3168 nla_put_flag(skb, IFLA_VXLAN_GBP)) 3169 goto nla_put_failure; 3170 3171 if (vxlan->flags & VXLAN_F_GPE && 3172 nla_put_flag(skb, IFLA_VXLAN_GPE)) 3173 goto nla_put_failure; 3174 3175 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL && 3176 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 3177 goto nla_put_failure; 3178 3179 return 0; 3180 3181nla_put_failure: 3182 return -EMSGSIZE; 3183} 3184 3185static struct net *vxlan_get_link_net(const struct net_device *dev) 3186{ 3187 struct vxlan_dev *vxlan = netdev_priv(dev); 3188 3189 return vxlan->net; 3190} 3191 3192static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 3193 .kind = "vxlan", 3194 .maxtype = IFLA_VXLAN_MAX, 3195 .policy = vxlan_policy, 3196 .priv_size = sizeof(struct vxlan_dev), 3197 .setup = vxlan_setup, 3198 .validate = vxlan_validate, 3199 .newlink = vxlan_newlink, 3200 .dellink = vxlan_dellink, 3201 .get_size = vxlan_get_size, 3202 .fill_info = vxlan_fill_info, 3203 .get_link_net = vxlan_get_link_net, 3204}; 3205 3206struct net_device *vxlan_dev_create(struct net *net, const char *name, 3207 u8 name_assign_type, 3208 struct vxlan_config *conf) 3209{ 3210 struct nlattr *tb[IFLA_MAX + 1]; 3211 struct net_device *dev; 3212 int err; 3213 3214 memset(&tb, 0, sizeof(tb)); 3215 3216 dev = rtnl_create_link(net, name, name_assign_type, 3217 &vxlan_link_ops, tb); 3218 if (IS_ERR(dev)) 3219 return dev; 3220 3221 err = vxlan_dev_configure(net, dev, conf); 3222 if (err < 0) { 3223 free_netdev(dev); 3224 return ERR_PTR(err); 3225 } 3226 3227 err = rtnl_configure_link(dev, NULL); 3228 if (err < 0) { 3229 LIST_HEAD(list_kill); 3230 3231 vxlan_dellink(dev, &list_kill); 3232 unregister_netdevice_many(&list_kill); 3233 return ERR_PTR(err); 3234 } 3235 3236 return dev; 3237} 3238EXPORT_SYMBOL_GPL(vxlan_dev_create); 3239 3240static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 3241 struct net_device *dev) 3242{ 3243 struct vxlan_dev *vxlan, *next; 3244 LIST_HEAD(list_kill); 3245 3246 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3247 struct vxlan_rdst *dst = &vxlan->default_dst; 3248 3249 /* In case we created vxlan device with carrier 3250 * and we loose the carrier due to module unload 3251 * we also need to remove vxlan device. In other 3252 * cases, it's not necessary and remote_ifindex 3253 * is 0 here, so no matches. 3254 */ 3255 if (dst->remote_ifindex == dev->ifindex) 3256 vxlan_dellink(vxlan->dev, &list_kill); 3257 } 3258 3259 unregister_netdevice_many(&list_kill); 3260} 3261 3262static int vxlan_netdevice_event(struct notifier_block *unused, 3263 unsigned long event, void *ptr) 3264{ 3265 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3266 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 3267 3268 if (event == NETDEV_UNREGISTER) 3269 vxlan_handle_lowerdev_unregister(vn, dev); 3270 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) 3271 vxlan_push_rx_ports(dev); 3272 3273 return NOTIFY_DONE; 3274} 3275 3276static struct notifier_block vxlan_notifier_block __read_mostly = { 3277 .notifier_call = vxlan_netdevice_event, 3278}; 3279 3280static __net_init int vxlan_init_net(struct net *net) 3281{ 3282 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3283 unsigned int h; 3284 3285 INIT_LIST_HEAD(&vn->vxlan_list); 3286 spin_lock_init(&vn->sock_lock); 3287 3288 for (h = 0; h < PORT_HASH_SIZE; ++h) 3289 INIT_HLIST_HEAD(&vn->sock_list[h]); 3290 3291 return 0; 3292} 3293 3294static void __net_exit vxlan_exit_net(struct net *net) 3295{ 3296 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3297 struct vxlan_dev *vxlan, *next; 3298 struct net_device *dev, *aux; 3299 LIST_HEAD(list); 3300 3301 rtnl_lock(); 3302 for_each_netdev_safe(net, dev, aux) 3303 if (dev->rtnl_link_ops == &vxlan_link_ops) 3304 unregister_netdevice_queue(dev, &list); 3305 3306 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3307 /* If vxlan->dev is in the same netns, it has already been added 3308 * to the list by the previous loop. 3309 */ 3310 if (!net_eq(dev_net(vxlan->dev), net)) { 3311 gro_cells_destroy(&vxlan->gro_cells); 3312 unregister_netdevice_queue(vxlan->dev, &list); 3313 } 3314 } 3315 3316 unregister_netdevice_many(&list); 3317 rtnl_unlock(); 3318} 3319 3320static struct pernet_operations vxlan_net_ops = { 3321 .init = vxlan_init_net, 3322 .exit = vxlan_exit_net, 3323 .id = &vxlan_net_id, 3324 .size = sizeof(struct vxlan_net), 3325}; 3326 3327static int __init vxlan_init_module(void) 3328{ 3329 int rc; 3330 3331 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 3332 3333 rc = register_pernet_subsys(&vxlan_net_ops); 3334 if (rc) 3335 goto out1; 3336 3337 rc = register_netdevice_notifier(&vxlan_notifier_block); 3338 if (rc) 3339 goto out2; 3340 3341 rc = rtnl_link_register(&vxlan_link_ops); 3342 if (rc) 3343 goto out3; 3344 3345 return 0; 3346out3: 3347 unregister_netdevice_notifier(&vxlan_notifier_block); 3348out2: 3349 unregister_pernet_subsys(&vxlan_net_ops); 3350out1: 3351 return rc; 3352} 3353late_initcall(vxlan_init_module); 3354 3355static void __exit vxlan_cleanup_module(void) 3356{ 3357 rtnl_link_unregister(&vxlan_link_ops); 3358 unregister_netdevice_notifier(&vxlan_notifier_block); 3359 unregister_pernet_subsys(&vxlan_net_ops); 3360 /* rcu_barrier() is called by netns */ 3361} 3362module_exit(vxlan_cleanup_module); 3363 3364MODULE_LICENSE("GPL"); 3365MODULE_VERSION(VXLAN_VERSION); 3366MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); 3367MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); 3368MODULE_ALIAS_RTNL_LINK("vxlan");