Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc2 3346 lines 86 kB view raw
1/* 2 * VXLAN: Virtual eXtensible Local Area Network 3 * 4 * Copyright (c) 2012-2013 Vyatta Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/errno.h> 16#include <linux/slab.h> 17#include <linux/udp.h> 18#include <linux/igmp.h> 19#include <linux/if_ether.h> 20#include <linux/ethtool.h> 21#include <net/arp.h> 22#include <net/ndisc.h> 23#include <net/ip.h> 24#include <net/icmp.h> 25#include <net/rtnetlink.h> 26#include <net/inet_ecn.h> 27#include <net/net_namespace.h> 28#include <net/netns/generic.h> 29#include <net/vxlan.h> 30 31#if IS_ENABLED(CONFIG_IPV6) 32#include <net/ip6_tunnel.h> 33#include <net/ip6_checksum.h> 34#endif 35 36#define VXLAN_VERSION "0.1" 37 38#define PORT_HASH_BITS 8 39#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) 40#define FDB_AGE_DEFAULT 300 /* 5 min */ 41#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 42 43/* UDP port for VXLAN traffic. 44 * The IANA assigned port is 4789, but the Linux default is 8472 45 * for compatibility with early adopters. 46 */ 47static unsigned short vxlan_port __read_mostly = 8472; 48module_param_named(udp_port, vxlan_port, ushort, 0444); 49MODULE_PARM_DESC(udp_port, "Destination UDP port"); 50 51static bool log_ecn_error = true; 52module_param(log_ecn_error, bool, 0644); 53MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 54 55static int vxlan_net_id; 56static struct rtnl_link_ops vxlan_link_ops; 57 58static const u8 all_zeros_mac[ETH_ALEN + 2]; 59 60static int vxlan_sock_add(struct vxlan_dev *vxlan); 61 62/* per-network namespace private data for this module */ 63struct vxlan_net { 64 struct list_head vxlan_list; 65 struct hlist_head sock_list[PORT_HASH_SIZE]; 66 spinlock_t sock_lock; 67}; 68 69/* Forwarding table entry */ 70struct vxlan_fdb { 71 struct hlist_node hlist; /* linked list of entries */ 72 struct rcu_head rcu; 73 unsigned long updated; /* jiffies */ 74 unsigned long used; 75 struct list_head remotes; 76 u8 eth_addr[ETH_ALEN]; 77 u16 state; /* see ndm_state */ 78 u8 flags; /* see ndm_flags */ 79}; 80 81/* salt for hash table */ 82static u32 vxlan_salt __read_mostly; 83 84static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) 85{ 86 return vs->flags & VXLAN_F_COLLECT_METADATA || 87 ip_tunnel_collect_metadata(); 88} 89 90#if IS_ENABLED(CONFIG_IPV6) 91static inline 92bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 93{ 94 if (a->sa.sa_family != b->sa.sa_family) 95 return false; 96 if (a->sa.sa_family == AF_INET6) 97 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); 98 else 99 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 100} 101 102static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 103{ 104 if (ipa->sa.sa_family == AF_INET6) 105 return ipv6_addr_any(&ipa->sin6.sin6_addr); 106 else 107 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 108} 109 110static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 111{ 112 if (ipa->sa.sa_family == AF_INET6) 113 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); 114 else 115 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 116} 117 118static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 119{ 120 if (nla_len(nla) >= sizeof(struct in6_addr)) { 121 ip->sin6.sin6_addr = nla_get_in6_addr(nla); 122 ip->sa.sa_family = AF_INET6; 123 return 0; 124 } else if (nla_len(nla) >= sizeof(__be32)) { 125 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 126 ip->sa.sa_family = AF_INET; 127 return 0; 128 } else { 129 return -EAFNOSUPPORT; 130 } 131} 132 133static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 134 const union vxlan_addr *ip) 135{ 136 if (ip->sa.sa_family == AF_INET6) 137 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr); 138 else 139 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 140} 141 142#else /* !CONFIG_IPV6 */ 143 144static inline 145bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 146{ 147 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 148} 149 150static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 151{ 152 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 153} 154 155static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 156{ 157 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 158} 159 160static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 161{ 162 if (nla_len(nla) >= sizeof(struct in6_addr)) { 163 return -EAFNOSUPPORT; 164 } else if (nla_len(nla) >= sizeof(__be32)) { 165 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 166 ip->sa.sa_family = AF_INET; 167 return 0; 168 } else { 169 return -EAFNOSUPPORT; 170 } 171} 172 173static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 174 const union vxlan_addr *ip) 175{ 176 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 177} 178#endif 179 180/* Virtual Network hash table head */ 181static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) 182{ 183 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; 184} 185 186/* Socket hash table head */ 187static inline struct hlist_head *vs_head(struct net *net, __be16 port) 188{ 189 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 190 191 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; 192} 193 194/* First remote destination for a forwarding entry. 195 * Guaranteed to be non-NULL because remotes are never deleted. 196 */ 197static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) 198{ 199 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); 200} 201 202static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) 203{ 204 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 205} 206 207/* Find VXLAN socket based on network namespace, address family and UDP port 208 * and enabled unshareable flags. 209 */ 210static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, 211 __be16 port, u32 flags) 212{ 213 struct vxlan_sock *vs; 214 215 flags &= VXLAN_F_RCV_FLAGS; 216 217 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 218 if (inet_sk(vs->sock->sk)->inet_sport == port && 219 vxlan_get_sk_family(vs) == family && 220 vs->flags == flags) 221 return vs; 222 } 223 return NULL; 224} 225 226static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) 227{ 228 struct vxlan_dev *vxlan; 229 230 /* For flow based devices, map all packets to VNI 0 */ 231 if (vs->flags & VXLAN_F_COLLECT_METADATA) 232 vni = 0; 233 234 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { 235 if (vxlan->default_dst.remote_vni == vni) 236 return vxlan; 237 } 238 239 return NULL; 240} 241 242/* Look up VNI in a per net namespace table */ 243static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni, 244 sa_family_t family, __be16 port, 245 u32 flags) 246{ 247 struct vxlan_sock *vs; 248 249 vs = vxlan_find_sock(net, family, port, flags); 250 if (!vs) 251 return NULL; 252 253 return vxlan_vs_find_vni(vs, vni); 254} 255 256/* Fill in neighbour message in skbuff. */ 257static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 258 const struct vxlan_fdb *fdb, 259 u32 portid, u32 seq, int type, unsigned int flags, 260 const struct vxlan_rdst *rdst) 261{ 262 unsigned long now = jiffies; 263 struct nda_cacheinfo ci; 264 struct nlmsghdr *nlh; 265 struct ndmsg *ndm; 266 bool send_ip, send_eth; 267 268 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 269 if (nlh == NULL) 270 return -EMSGSIZE; 271 272 ndm = nlmsg_data(nlh); 273 memset(ndm, 0, sizeof(*ndm)); 274 275 send_eth = send_ip = true; 276 277 if (type == RTM_GETNEIGH) { 278 ndm->ndm_family = AF_INET; 279 send_ip = !vxlan_addr_any(&rdst->remote_ip); 280 send_eth = !is_zero_ether_addr(fdb->eth_addr); 281 } else 282 ndm->ndm_family = AF_BRIDGE; 283 ndm->ndm_state = fdb->state; 284 ndm->ndm_ifindex = vxlan->dev->ifindex; 285 ndm->ndm_flags = fdb->flags; 286 ndm->ndm_type = RTN_UNICAST; 287 288 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && 289 nla_put_s32(skb, NDA_LINK_NETNSID, 290 peernet2id(dev_net(vxlan->dev), vxlan->net))) 291 goto nla_put_failure; 292 293 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 294 goto nla_put_failure; 295 296 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) 297 goto nla_put_failure; 298 299 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && 300 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 301 goto nla_put_failure; 302 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 303 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) 304 goto nla_put_failure; 305 if (rdst->remote_ifindex && 306 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 307 goto nla_put_failure; 308 309 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 310 ci.ndm_confirmed = 0; 311 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 312 ci.ndm_refcnt = 0; 313 314 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 315 goto nla_put_failure; 316 317 nlmsg_end(skb, nlh); 318 return 0; 319 320nla_put_failure: 321 nlmsg_cancel(skb, nlh); 322 return -EMSGSIZE; 323} 324 325static inline size_t vxlan_nlmsg_size(void) 326{ 327 return NLMSG_ALIGN(sizeof(struct ndmsg)) 328 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 329 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ 330 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 331 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 332 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 333 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */ 334 + nla_total_size(sizeof(struct nda_cacheinfo)); 335} 336 337static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 338 struct vxlan_rdst *rd, int type) 339{ 340 struct net *net = dev_net(vxlan->dev); 341 struct sk_buff *skb; 342 int err = -ENOBUFS; 343 344 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); 345 if (skb == NULL) 346 goto errout; 347 348 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); 349 if (err < 0) { 350 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 351 WARN_ON(err == -EMSGSIZE); 352 kfree_skb(skb); 353 goto errout; 354 } 355 356 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 357 return; 358errout: 359 if (err < 0) 360 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 361} 362 363static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) 364{ 365 struct vxlan_dev *vxlan = netdev_priv(dev); 366 struct vxlan_fdb f = { 367 .state = NUD_STALE, 368 }; 369 struct vxlan_rdst remote = { 370 .remote_ip = *ipa, /* goes to NDA_DST */ 371 .remote_vni = cpu_to_be32(VXLAN_N_VID), 372 }; 373 374 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 375} 376 377static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 378{ 379 struct vxlan_fdb f = { 380 .state = NUD_STALE, 381 }; 382 struct vxlan_rdst remote = { }; 383 384 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 385 386 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 387} 388 389/* Hash Ethernet address */ 390static u32 eth_hash(const unsigned char *addr) 391{ 392 u64 value = get_unaligned((u64 *)addr); 393 394 /* only want 6 bytes */ 395#ifdef __BIG_ENDIAN 396 value >>= 16; 397#else 398 value <<= 16; 399#endif 400 return hash_64(value, FDB_HASH_BITS); 401} 402 403/* Hash chain to use given mac address */ 404static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, 405 const u8 *mac) 406{ 407 return &vxlan->fdb_head[eth_hash(mac)]; 408} 409 410/* Look up Ethernet address in forwarding table */ 411static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 412 const u8 *mac) 413{ 414 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 415 struct vxlan_fdb *f; 416 417 hlist_for_each_entry_rcu(f, head, hlist) { 418 if (ether_addr_equal(mac, f->eth_addr)) 419 return f; 420 } 421 422 return NULL; 423} 424 425static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 426 const u8 *mac) 427{ 428 struct vxlan_fdb *f; 429 430 f = __vxlan_find_mac(vxlan, mac); 431 if (f) 432 f->used = jiffies; 433 434 return f; 435} 436 437/* caller should hold vxlan->hash_lock */ 438static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 439 union vxlan_addr *ip, __be16 port, 440 __be32 vni, __u32 ifindex) 441{ 442 struct vxlan_rdst *rd; 443 444 list_for_each_entry(rd, &f->remotes, list) { 445 if (vxlan_addr_equal(&rd->remote_ip, ip) && 446 rd->remote_port == port && 447 rd->remote_vni == vni && 448 rd->remote_ifindex == ifindex) 449 return rd; 450 } 451 452 return NULL; 453} 454 455/* Replace destination of unicast mac */ 456static int vxlan_fdb_replace(struct vxlan_fdb *f, 457 union vxlan_addr *ip, __be16 port, __be32 vni, 458 __u32 ifindex) 459{ 460 struct vxlan_rdst *rd; 461 462 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 463 if (rd) 464 return 0; 465 466 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); 467 if (!rd) 468 return 0; 469 470 dst_cache_reset(&rd->dst_cache); 471 rd->remote_ip = *ip; 472 rd->remote_port = port; 473 rd->remote_vni = vni; 474 rd->remote_ifindex = ifindex; 475 return 1; 476} 477 478/* Add/update destinations for multicast */ 479static int vxlan_fdb_append(struct vxlan_fdb *f, 480 union vxlan_addr *ip, __be16 port, __be32 vni, 481 __u32 ifindex, struct vxlan_rdst **rdp) 482{ 483 struct vxlan_rdst *rd; 484 485 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 486 if (rd) 487 return 0; 488 489 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 490 if (rd == NULL) 491 return -ENOBUFS; 492 493 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { 494 kfree(rd); 495 return -ENOBUFS; 496 } 497 498 rd->remote_ip = *ip; 499 rd->remote_port = port; 500 rd->remote_vni = vni; 501 rd->remote_ifindex = ifindex; 502 503 list_add_tail_rcu(&rd->list, &f->remotes); 504 505 *rdp = rd; 506 return 1; 507} 508 509static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 510 unsigned int off, 511 struct vxlanhdr *vh, size_t hdrlen, 512 __be32 vni_field, 513 struct gro_remcsum *grc, 514 bool nopartial) 515{ 516 size_t start, offset; 517 518 if (skb->remcsum_offload) 519 return vh; 520 521 if (!NAPI_GRO_CB(skb)->csum_valid) 522 return NULL; 523 524 start = vxlan_rco_start(vni_field); 525 offset = start + vxlan_rco_offset(vni_field); 526 527 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, 528 start, offset, grc, nopartial); 529 530 skb->remcsum_offload = 1; 531 532 return vh; 533} 534 535static struct sk_buff **vxlan_gro_receive(struct sock *sk, 536 struct sk_buff **head, 537 struct sk_buff *skb) 538{ 539 struct sk_buff *p, **pp = NULL; 540 struct vxlanhdr *vh, *vh2; 541 unsigned int hlen, off_vx; 542 int flush = 1; 543 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); 544 __be32 flags; 545 struct gro_remcsum grc; 546 547 skb_gro_remcsum_init(&grc); 548 549 off_vx = skb_gro_offset(skb); 550 hlen = off_vx + sizeof(*vh); 551 vh = skb_gro_header_fast(skb, off_vx); 552 if (skb_gro_header_hard(skb, hlen)) { 553 vh = skb_gro_header_slow(skb, hlen, off_vx); 554 if (unlikely(!vh)) 555 goto out; 556 } 557 558 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 559 560 flags = vh->vx_flags; 561 562 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 563 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 564 vh->vx_vni, &grc, 565 !!(vs->flags & 566 VXLAN_F_REMCSUM_NOPARTIAL)); 567 568 if (!vh) 569 goto out; 570 } 571 572 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 573 574 for (p = *head; p; p = p->next) { 575 if (!NAPI_GRO_CB(p)->same_flow) 576 continue; 577 578 vh2 = (struct vxlanhdr *)(p->data + off_vx); 579 if (vh->vx_flags != vh2->vx_flags || 580 vh->vx_vni != vh2->vx_vni) { 581 NAPI_GRO_CB(p)->same_flow = 0; 582 continue; 583 } 584 } 585 586 pp = eth_gro_receive(head, skb); 587 flush = 0; 588 589out: 590 skb_gro_remcsum_cleanup(skb, &grc); 591 NAPI_GRO_CB(skb)->flush |= flush; 592 593 return pp; 594} 595 596static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 597{ 598 /* Sets 'skb->inner_mac_header' since we are always called with 599 * 'skb->encapsulation' set. 600 */ 601 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 602} 603 604/* Add new entry to forwarding table -- assumes lock held */ 605static int vxlan_fdb_create(struct vxlan_dev *vxlan, 606 const u8 *mac, union vxlan_addr *ip, 607 __u16 state, __u16 flags, 608 __be16 port, __be32 vni, __u32 ifindex, 609 __u8 ndm_flags) 610{ 611 struct vxlan_rdst *rd = NULL; 612 struct vxlan_fdb *f; 613 int notify = 0; 614 615 f = __vxlan_find_mac(vxlan, mac); 616 if (f) { 617 if (flags & NLM_F_EXCL) { 618 netdev_dbg(vxlan->dev, 619 "lost race to create %pM\n", mac); 620 return -EEXIST; 621 } 622 if (f->state != state) { 623 f->state = state; 624 f->updated = jiffies; 625 notify = 1; 626 } 627 if (f->flags != ndm_flags) { 628 f->flags = ndm_flags; 629 f->updated = jiffies; 630 notify = 1; 631 } 632 if ((flags & NLM_F_REPLACE)) { 633 /* Only change unicasts */ 634 if (!(is_multicast_ether_addr(f->eth_addr) || 635 is_zero_ether_addr(f->eth_addr))) { 636 notify |= vxlan_fdb_replace(f, ip, port, vni, 637 ifindex); 638 } else 639 return -EOPNOTSUPP; 640 } 641 if ((flags & NLM_F_APPEND) && 642 (is_multicast_ether_addr(f->eth_addr) || 643 is_zero_ether_addr(f->eth_addr))) { 644 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, 645 &rd); 646 647 if (rc < 0) 648 return rc; 649 notify |= rc; 650 } 651 } else { 652 if (!(flags & NLM_F_CREATE)) 653 return -ENOENT; 654 655 if (vxlan->cfg.addrmax && 656 vxlan->addrcnt >= vxlan->cfg.addrmax) 657 return -ENOSPC; 658 659 /* Disallow replace to add a multicast entry */ 660 if ((flags & NLM_F_REPLACE) && 661 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 662 return -EOPNOTSUPP; 663 664 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 665 f = kmalloc(sizeof(*f), GFP_ATOMIC); 666 if (!f) 667 return -ENOMEM; 668 669 notify = 1; 670 f->state = state; 671 f->flags = ndm_flags; 672 f->updated = f->used = jiffies; 673 INIT_LIST_HEAD(&f->remotes); 674 memcpy(f->eth_addr, mac, ETH_ALEN); 675 676 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 677 678 ++vxlan->addrcnt; 679 hlist_add_head_rcu(&f->hlist, 680 vxlan_fdb_head(vxlan, mac)); 681 } 682 683 if (notify) { 684 if (rd == NULL) 685 rd = first_remote_rtnl(f); 686 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); 687 } 688 689 return 0; 690} 691 692static void vxlan_fdb_free(struct rcu_head *head) 693{ 694 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 695 struct vxlan_rdst *rd, *nd; 696 697 list_for_each_entry_safe(rd, nd, &f->remotes, list) { 698 dst_cache_destroy(&rd->dst_cache); 699 kfree(rd); 700 } 701 kfree(f); 702} 703 704static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 705{ 706 netdev_dbg(vxlan->dev, 707 "delete %pM\n", f->eth_addr); 708 709 --vxlan->addrcnt; 710 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 711 712 hlist_del_rcu(&f->hlist); 713 call_rcu(&f->rcu, vxlan_fdb_free); 714} 715 716static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 717 union vxlan_addr *ip, __be16 *port, __be32 *vni, 718 u32 *ifindex) 719{ 720 struct net *net = dev_net(vxlan->dev); 721 int err; 722 723 if (tb[NDA_DST]) { 724 err = vxlan_nla_get_addr(ip, tb[NDA_DST]); 725 if (err) 726 return err; 727 } else { 728 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; 729 if (remote->sa.sa_family == AF_INET) { 730 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); 731 ip->sa.sa_family = AF_INET; 732#if IS_ENABLED(CONFIG_IPV6) 733 } else { 734 ip->sin6.sin6_addr = in6addr_any; 735 ip->sa.sa_family = AF_INET6; 736#endif 737 } 738 } 739 740 if (tb[NDA_PORT]) { 741 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) 742 return -EINVAL; 743 *port = nla_get_be16(tb[NDA_PORT]); 744 } else { 745 *port = vxlan->cfg.dst_port; 746 } 747 748 if (tb[NDA_VNI]) { 749 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) 750 return -EINVAL; 751 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 752 } else { 753 *vni = vxlan->default_dst.remote_vni; 754 } 755 756 if (tb[NDA_IFINDEX]) { 757 struct net_device *tdev; 758 759 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 760 return -EINVAL; 761 *ifindex = nla_get_u32(tb[NDA_IFINDEX]); 762 tdev = __dev_get_by_index(net, *ifindex); 763 if (!tdev) 764 return -EADDRNOTAVAIL; 765 } else { 766 *ifindex = 0; 767 } 768 769 return 0; 770} 771 772/* Add static entry (via netlink) */ 773static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 774 struct net_device *dev, 775 const unsigned char *addr, u16 vid, u16 flags) 776{ 777 struct vxlan_dev *vxlan = netdev_priv(dev); 778 /* struct net *net = dev_net(vxlan->dev); */ 779 union vxlan_addr ip; 780 __be16 port; 781 __be32 vni; 782 u32 ifindex; 783 int err; 784 785 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 786 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 787 ndm->ndm_state); 788 return -EINVAL; 789 } 790 791 if (tb[NDA_DST] == NULL) 792 return -EINVAL; 793 794 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 795 if (err) 796 return err; 797 798 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) 799 return -EAFNOSUPPORT; 800 801 spin_lock_bh(&vxlan->hash_lock); 802 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 803 port, vni, ifindex, ndm->ndm_flags); 804 spin_unlock_bh(&vxlan->hash_lock); 805 806 return err; 807} 808 809/* Delete entry (via netlink) */ 810static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 811 struct net_device *dev, 812 const unsigned char *addr, u16 vid) 813{ 814 struct vxlan_dev *vxlan = netdev_priv(dev); 815 struct vxlan_fdb *f; 816 struct vxlan_rdst *rd = NULL; 817 union vxlan_addr ip; 818 __be16 port; 819 __be32 vni; 820 u32 ifindex; 821 int err; 822 823 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); 824 if (err) 825 return err; 826 827 err = -ENOENT; 828 829 spin_lock_bh(&vxlan->hash_lock); 830 f = vxlan_find_mac(vxlan, addr); 831 if (!f) 832 goto out; 833 834 if (!vxlan_addr_any(&ip)) { 835 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); 836 if (!rd) 837 goto out; 838 } 839 840 err = 0; 841 842 /* remove a destination if it's not the only one on the list, 843 * otherwise destroy the fdb entry 844 */ 845 if (rd && !list_is_singular(&f->remotes)) { 846 list_del_rcu(&rd->list); 847 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 848 kfree_rcu(rd, rcu); 849 goto out; 850 } 851 852 vxlan_fdb_destroy(vxlan, f); 853 854out: 855 spin_unlock_bh(&vxlan->hash_lock); 856 857 return err; 858} 859 860/* Dump forwarding table */ 861static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 862 struct net_device *dev, 863 struct net_device *filter_dev, int *idx) 864{ 865 struct vxlan_dev *vxlan = netdev_priv(dev); 866 unsigned int h; 867 int err = 0; 868 869 for (h = 0; h < FDB_HASH_SIZE; ++h) { 870 struct vxlan_fdb *f; 871 872 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 873 struct vxlan_rdst *rd; 874 875 list_for_each_entry_rcu(rd, &f->remotes, list) { 876 if (*idx < cb->args[2]) 877 goto skip; 878 879 err = vxlan_fdb_info(skb, vxlan, f, 880 NETLINK_CB(cb->skb).portid, 881 cb->nlh->nlmsg_seq, 882 RTM_NEWNEIGH, 883 NLM_F_MULTI, rd); 884 if (err < 0) 885 goto out; 886skip: 887 *idx += 1; 888 } 889 } 890 } 891out: 892 return err; 893} 894 895/* Watch incoming packets to learn mapping between Ethernet address 896 * and Tunnel endpoint. 897 * Return true if packet is bogus and should be dropped. 898 */ 899static bool vxlan_snoop(struct net_device *dev, 900 union vxlan_addr *src_ip, const u8 *src_mac) 901{ 902 struct vxlan_dev *vxlan = netdev_priv(dev); 903 struct vxlan_fdb *f; 904 905 f = vxlan_find_mac(vxlan, src_mac); 906 if (likely(f)) { 907 struct vxlan_rdst *rdst = first_remote_rcu(f); 908 909 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip))) 910 return false; 911 912 /* Don't migrate static entries, drop packets */ 913 if (f->state & NUD_NOARP) 914 return true; 915 916 if (net_ratelimit()) 917 netdev_info(dev, 918 "%pM migrated from %pIS to %pIS\n", 919 src_mac, &rdst->remote_ip.sa, &src_ip->sa); 920 921 rdst->remote_ip = *src_ip; 922 f->updated = jiffies; 923 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); 924 } else { 925 /* learned new entry */ 926 spin_lock(&vxlan->hash_lock); 927 928 /* close off race between vxlan_flush and incoming packets */ 929 if (netif_running(dev)) 930 vxlan_fdb_create(vxlan, src_mac, src_ip, 931 NUD_REACHABLE, 932 NLM_F_EXCL|NLM_F_CREATE, 933 vxlan->cfg.dst_port, 934 vxlan->default_dst.remote_vni, 935 0, NTF_SELF); 936 spin_unlock(&vxlan->hash_lock); 937 } 938 939 return false; 940} 941 942/* See if multicast group is already in use by other ID */ 943static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) 944{ 945 struct vxlan_dev *vxlan; 946 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 947 948 /* The vxlan_sock is only used by dev, leaving group has 949 * no effect on other vxlan devices. 950 */ 951 if (family == AF_INET && dev->vn4_sock && 952 atomic_read(&dev->vn4_sock->refcnt) == 1) 953 return false; 954#if IS_ENABLED(CONFIG_IPV6) 955 if (family == AF_INET6 && dev->vn6_sock && 956 atomic_read(&dev->vn6_sock->refcnt) == 1) 957 return false; 958#endif 959 960 list_for_each_entry(vxlan, &vn->vxlan_list, next) { 961 if (!netif_running(vxlan->dev) || vxlan == dev) 962 continue; 963 964 if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock) 965 continue; 966#if IS_ENABLED(CONFIG_IPV6) 967 if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock) 968 continue; 969#endif 970 971 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, 972 &dev->default_dst.remote_ip)) 973 continue; 974 975 if (vxlan->default_dst.remote_ifindex != 976 dev->default_dst.remote_ifindex) 977 continue; 978 979 return true; 980 } 981 982 return false; 983} 984 985static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) 986{ 987 struct vxlan_net *vn; 988 989 if (!vs) 990 return false; 991 if (!atomic_dec_and_test(&vs->refcnt)) 992 return false; 993 994 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); 995 spin_lock(&vn->sock_lock); 996 hlist_del_rcu(&vs->hlist); 997 udp_tunnel_notify_del_rx_port(vs->sock, 998 (vs->flags & VXLAN_F_GPE) ? 999 UDP_TUNNEL_TYPE_VXLAN_GPE : 1000 UDP_TUNNEL_TYPE_VXLAN); 1001 spin_unlock(&vn->sock_lock); 1002 1003 return true; 1004} 1005 1006static void vxlan_sock_release(struct vxlan_dev *vxlan) 1007{ 1008 bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock); 1009#if IS_ENABLED(CONFIG_IPV6) 1010 bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock); 1011#endif 1012 1013 synchronize_net(); 1014 1015 if (ipv4) { 1016 udp_tunnel_sock_release(vxlan->vn4_sock->sock); 1017 kfree(vxlan->vn4_sock); 1018 } 1019 1020#if IS_ENABLED(CONFIG_IPV6) 1021 if (ipv6) { 1022 udp_tunnel_sock_release(vxlan->vn6_sock->sock); 1023 kfree(vxlan->vn6_sock); 1024 } 1025#endif 1026} 1027 1028/* Update multicast group membership when first VNI on 1029 * multicast address is brought up 1030 */ 1031static int vxlan_igmp_join(struct vxlan_dev *vxlan) 1032{ 1033 struct sock *sk; 1034 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1035 int ifindex = vxlan->default_dst.remote_ifindex; 1036 int ret = -EINVAL; 1037 1038 if (ip->sa.sa_family == AF_INET) { 1039 struct ip_mreqn mreq = { 1040 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1041 .imr_ifindex = ifindex, 1042 }; 1043 1044 sk = vxlan->vn4_sock->sock->sk; 1045 lock_sock(sk); 1046 ret = ip_mc_join_group(sk, &mreq); 1047 release_sock(sk); 1048#if IS_ENABLED(CONFIG_IPV6) 1049 } else { 1050 sk = vxlan->vn6_sock->sock->sk; 1051 lock_sock(sk); 1052 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, 1053 &ip->sin6.sin6_addr); 1054 release_sock(sk); 1055#endif 1056 } 1057 1058 return ret; 1059} 1060 1061/* Inverse of vxlan_igmp_join when last VNI is brought down */ 1062static int vxlan_igmp_leave(struct vxlan_dev *vxlan) 1063{ 1064 struct sock *sk; 1065 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1066 int ifindex = vxlan->default_dst.remote_ifindex; 1067 int ret = -EINVAL; 1068 1069 if (ip->sa.sa_family == AF_INET) { 1070 struct ip_mreqn mreq = { 1071 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1072 .imr_ifindex = ifindex, 1073 }; 1074 1075 sk = vxlan->vn4_sock->sock->sk; 1076 lock_sock(sk); 1077 ret = ip_mc_leave_group(sk, &mreq); 1078 release_sock(sk); 1079#if IS_ENABLED(CONFIG_IPV6) 1080 } else { 1081 sk = vxlan->vn6_sock->sock->sk; 1082 lock_sock(sk); 1083 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, 1084 &ip->sin6.sin6_addr); 1085 release_sock(sk); 1086#endif 1087 } 1088 1089 return ret; 1090} 1091 1092static bool vxlan_remcsum(struct vxlanhdr *unparsed, 1093 struct sk_buff *skb, u32 vxflags) 1094{ 1095 size_t start, offset; 1096 1097 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) 1098 goto out; 1099 1100 start = vxlan_rco_start(unparsed->vx_vni); 1101 offset = start + vxlan_rco_offset(unparsed->vx_vni); 1102 1103 if (!pskb_may_pull(skb, offset + sizeof(u16))) 1104 return false; 1105 1106 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, 1107 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); 1108out: 1109 unparsed->vx_flags &= ~VXLAN_HF_RCO; 1110 unparsed->vx_vni &= VXLAN_VNI_MASK; 1111 return true; 1112} 1113 1114static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, 1115 struct sk_buff *skb, u32 vxflags, 1116 struct vxlan_metadata *md) 1117{ 1118 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; 1119 struct metadata_dst *tun_dst; 1120 1121 if (!(unparsed->vx_flags & VXLAN_HF_GBP)) 1122 goto out; 1123 1124 md->gbp = ntohs(gbp->policy_id); 1125 1126 tun_dst = (struct metadata_dst *)skb_dst(skb); 1127 if (tun_dst) { 1128 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1129 tun_dst->u.tun_info.options_len = sizeof(*md); 1130 } 1131 if (gbp->dont_learn) 1132 md->gbp |= VXLAN_GBP_DONT_LEARN; 1133 1134 if (gbp->policy_applied) 1135 md->gbp |= VXLAN_GBP_POLICY_APPLIED; 1136 1137 /* In flow-based mode, GBP is carried in dst_metadata */ 1138 if (!(vxflags & VXLAN_F_COLLECT_METADATA)) 1139 skb->mark = md->gbp; 1140out: 1141 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; 1142} 1143 1144static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, 1145 __be16 *protocol, 1146 struct sk_buff *skb, u32 vxflags) 1147{ 1148 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; 1149 1150 /* Need to have Next Protocol set for interfaces in GPE mode. */ 1151 if (!gpe->np_applied) 1152 return false; 1153 /* "The initial version is 0. If a receiver does not support the 1154 * version indicated it MUST drop the packet. 1155 */ 1156 if (gpe->version != 0) 1157 return false; 1158 /* "When the O bit is set to 1, the packet is an OAM packet and OAM 1159 * processing MUST occur." However, we don't implement OAM 1160 * processing, thus drop the packet. 1161 */ 1162 if (gpe->oam_flag) 1163 return false; 1164 1165 switch (gpe->next_protocol) { 1166 case VXLAN_GPE_NP_IPV4: 1167 *protocol = htons(ETH_P_IP); 1168 break; 1169 case VXLAN_GPE_NP_IPV6: 1170 *protocol = htons(ETH_P_IPV6); 1171 break; 1172 case VXLAN_GPE_NP_ETHERNET: 1173 *protocol = htons(ETH_P_TEB); 1174 break; 1175 default: 1176 return false; 1177 } 1178 1179 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; 1180 return true; 1181} 1182 1183static bool vxlan_set_mac(struct vxlan_dev *vxlan, 1184 struct vxlan_sock *vs, 1185 struct sk_buff *skb) 1186{ 1187 union vxlan_addr saddr; 1188 1189 skb_reset_mac_header(skb); 1190 skb->protocol = eth_type_trans(skb, vxlan->dev); 1191 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1192 1193 /* Ignore packet loops (and multicast echo) */ 1194 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1195 return false; 1196 1197 /* Get address from the outer IP header */ 1198 if (vxlan_get_sk_family(vs) == AF_INET) { 1199 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; 1200 saddr.sa.sa_family = AF_INET; 1201#if IS_ENABLED(CONFIG_IPV6) 1202 } else { 1203 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr; 1204 saddr.sa.sa_family = AF_INET6; 1205#endif 1206 } 1207 1208 if ((vxlan->flags & VXLAN_F_LEARN) && 1209 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) 1210 return false; 1211 1212 return true; 1213} 1214 1215static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, 1216 struct sk_buff *skb) 1217{ 1218 int err = 0; 1219 1220 if (vxlan_get_sk_family(vs) == AF_INET) 1221 err = IP_ECN_decapsulate(oiph, skb); 1222#if IS_ENABLED(CONFIG_IPV6) 1223 else 1224 err = IP6_ECN_decapsulate(oiph, skb); 1225#endif 1226 1227 if (unlikely(err) && log_ecn_error) { 1228 if (vxlan_get_sk_family(vs) == AF_INET) 1229 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1230 &((struct iphdr *)oiph)->saddr, 1231 ((struct iphdr *)oiph)->tos); 1232 else 1233 net_info_ratelimited("non-ECT from %pI6\n", 1234 &((struct ipv6hdr *)oiph)->saddr); 1235 } 1236 return err <= 1; 1237} 1238 1239/* Callback from net/ipv4/udp.c to receive packets */ 1240static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) 1241{ 1242 struct pcpu_sw_netstats *stats; 1243 struct vxlan_dev *vxlan; 1244 struct vxlan_sock *vs; 1245 struct vxlanhdr unparsed; 1246 struct vxlan_metadata _md; 1247 struct vxlan_metadata *md = &_md; 1248 __be16 protocol = htons(ETH_P_TEB); 1249 bool raw_proto = false; 1250 void *oiph; 1251 1252 /* Need UDP and VXLAN header to be present */ 1253 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1254 goto drop; 1255 1256 unparsed = *vxlan_hdr(skb); 1257 /* VNI flag always required to be set */ 1258 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { 1259 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1260 ntohl(vxlan_hdr(skb)->vx_flags), 1261 ntohl(vxlan_hdr(skb)->vx_vni)); 1262 /* Return non vxlan pkt */ 1263 goto drop; 1264 } 1265 unparsed.vx_flags &= ~VXLAN_HF_VNI; 1266 unparsed.vx_vni &= ~VXLAN_VNI_MASK; 1267 1268 vs = rcu_dereference_sk_user_data(sk); 1269 if (!vs) 1270 goto drop; 1271 1272 vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni)); 1273 if (!vxlan) 1274 goto drop; 1275 1276 /* For backwards compatibility, only allow reserved fields to be 1277 * used by VXLAN extensions if explicitly requested. 1278 */ 1279 if (vs->flags & VXLAN_F_GPE) { 1280 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) 1281 goto drop; 1282 raw_proto = true; 1283 } 1284 1285 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, 1286 !net_eq(vxlan->net, dev_net(vxlan->dev)))) 1287 goto drop; 1288 1289 if (vxlan_collect_metadata(vs)) { 1290 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); 1291 struct metadata_dst *tun_dst; 1292 1293 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, 1294 key32_to_tunnel_id(vni), sizeof(*md)); 1295 1296 if (!tun_dst) 1297 goto drop; 1298 1299 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 1300 1301 skb_dst_set(skb, (struct dst_entry *)tun_dst); 1302 } else { 1303 memset(md, 0, sizeof(*md)); 1304 } 1305 1306 if (vs->flags & VXLAN_F_REMCSUM_RX) 1307 if (!vxlan_remcsum(&unparsed, skb, vs->flags)) 1308 goto drop; 1309 if (vs->flags & VXLAN_F_GBP) 1310 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); 1311 /* Note that GBP and GPE can never be active together. This is 1312 * ensured in vxlan_dev_configure. 1313 */ 1314 1315 if (unparsed.vx_flags || unparsed.vx_vni) { 1316 /* If there are any unprocessed flags remaining treat 1317 * this as a malformed packet. This behavior diverges from 1318 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1319 * in reserved fields are to be ignored. The approach here 1320 * maintains compatibility with previous stack code, and also 1321 * is more robust and provides a little more security in 1322 * adding extensions to VXLAN. 1323 */ 1324 goto drop; 1325 } 1326 1327 if (!raw_proto) { 1328 if (!vxlan_set_mac(vxlan, vs, skb)) 1329 goto drop; 1330 } else { 1331 skb_reset_mac_header(skb); 1332 skb->dev = vxlan->dev; 1333 skb->pkt_type = PACKET_HOST; 1334 } 1335 1336 oiph = skb_network_header(skb); 1337 skb_reset_network_header(skb); 1338 1339 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { 1340 ++vxlan->dev->stats.rx_frame_errors; 1341 ++vxlan->dev->stats.rx_errors; 1342 goto drop; 1343 } 1344 1345 stats = this_cpu_ptr(vxlan->dev->tstats); 1346 u64_stats_update_begin(&stats->syncp); 1347 stats->rx_packets++; 1348 stats->rx_bytes += skb->len; 1349 u64_stats_update_end(&stats->syncp); 1350 1351 gro_cells_receive(&vxlan->gro_cells, skb); 1352 return 0; 1353 1354drop: 1355 /* Consume bad packet */ 1356 kfree_skb(skb); 1357 return 0; 1358} 1359 1360static int arp_reduce(struct net_device *dev, struct sk_buff *skb) 1361{ 1362 struct vxlan_dev *vxlan = netdev_priv(dev); 1363 struct arphdr *parp; 1364 u8 *arpptr, *sha; 1365 __be32 sip, tip; 1366 struct neighbour *n; 1367 1368 if (dev->flags & IFF_NOARP) 1369 goto out; 1370 1371 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { 1372 dev->stats.tx_dropped++; 1373 goto out; 1374 } 1375 parp = arp_hdr(skb); 1376 1377 if ((parp->ar_hrd != htons(ARPHRD_ETHER) && 1378 parp->ar_hrd != htons(ARPHRD_IEEE802)) || 1379 parp->ar_pro != htons(ETH_P_IP) || 1380 parp->ar_op != htons(ARPOP_REQUEST) || 1381 parp->ar_hln != dev->addr_len || 1382 parp->ar_pln != 4) 1383 goto out; 1384 arpptr = (u8 *)parp + sizeof(struct arphdr); 1385 sha = arpptr; 1386 arpptr += dev->addr_len; /* sha */ 1387 memcpy(&sip, arpptr, sizeof(sip)); 1388 arpptr += sizeof(sip); 1389 arpptr += dev->addr_len; /* tha */ 1390 memcpy(&tip, arpptr, sizeof(tip)); 1391 1392 if (ipv4_is_loopback(tip) || 1393 ipv4_is_multicast(tip)) 1394 goto out; 1395 1396 n = neigh_lookup(&arp_tbl, &tip, dev); 1397 1398 if (n) { 1399 struct vxlan_fdb *f; 1400 struct sk_buff *reply; 1401 1402 if (!(n->nud_state & NUD_CONNECTED)) { 1403 neigh_release(n); 1404 goto out; 1405 } 1406 1407 f = vxlan_find_mac(vxlan, n->ha); 1408 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1409 /* bridge-local neighbor */ 1410 neigh_release(n); 1411 goto out; 1412 } 1413 1414 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1415 n->ha, sha); 1416 1417 neigh_release(n); 1418 1419 if (reply == NULL) 1420 goto out; 1421 1422 skb_reset_mac_header(reply); 1423 __skb_pull(reply, skb_network_offset(reply)); 1424 reply->ip_summed = CHECKSUM_UNNECESSARY; 1425 reply->pkt_type = PACKET_HOST; 1426 1427 if (netif_rx_ni(reply) == NET_RX_DROP) 1428 dev->stats.rx_dropped++; 1429 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1430 union vxlan_addr ipa = { 1431 .sin.sin_addr.s_addr = tip, 1432 .sin.sin_family = AF_INET, 1433 }; 1434 1435 vxlan_ip_miss(dev, &ipa); 1436 } 1437out: 1438 consume_skb(skb); 1439 return NETDEV_TX_OK; 1440} 1441 1442#if IS_ENABLED(CONFIG_IPV6) 1443static struct sk_buff *vxlan_na_create(struct sk_buff *request, 1444 struct neighbour *n, bool isrouter) 1445{ 1446 struct net_device *dev = request->dev; 1447 struct sk_buff *reply; 1448 struct nd_msg *ns, *na; 1449 struct ipv6hdr *pip6; 1450 u8 *daddr; 1451 int na_olen = 8; /* opt hdr + ETH_ALEN for target */ 1452 int ns_olen; 1453 int i, len; 1454 1455 if (dev == NULL) 1456 return NULL; 1457 1458 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + 1459 sizeof(*na) + na_olen + dev->needed_tailroom; 1460 reply = alloc_skb(len, GFP_ATOMIC); 1461 if (reply == NULL) 1462 return NULL; 1463 1464 reply->protocol = htons(ETH_P_IPV6); 1465 reply->dev = dev; 1466 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); 1467 skb_push(reply, sizeof(struct ethhdr)); 1468 skb_reset_mac_header(reply); 1469 1470 ns = (struct nd_msg *)skb_transport_header(request); 1471 1472 daddr = eth_hdr(request)->h_source; 1473 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); 1474 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { 1475 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { 1476 daddr = ns->opt + i + sizeof(struct nd_opt_hdr); 1477 break; 1478 } 1479 } 1480 1481 /* Ethernet header */ 1482 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); 1483 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); 1484 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); 1485 reply->protocol = htons(ETH_P_IPV6); 1486 1487 skb_pull(reply, sizeof(struct ethhdr)); 1488 skb_reset_network_header(reply); 1489 skb_put(reply, sizeof(struct ipv6hdr)); 1490 1491 /* IPv6 header */ 1492 1493 pip6 = ipv6_hdr(reply); 1494 memset(pip6, 0, sizeof(struct ipv6hdr)); 1495 pip6->version = 6; 1496 pip6->priority = ipv6_hdr(request)->priority; 1497 pip6->nexthdr = IPPROTO_ICMPV6; 1498 pip6->hop_limit = 255; 1499 pip6->daddr = ipv6_hdr(request)->saddr; 1500 pip6->saddr = *(struct in6_addr *)n->primary_key; 1501 1502 skb_pull(reply, sizeof(struct ipv6hdr)); 1503 skb_reset_transport_header(reply); 1504 1505 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); 1506 1507 /* Neighbor Advertisement */ 1508 memset(na, 0, sizeof(*na)+na_olen); 1509 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 1510 na->icmph.icmp6_router = isrouter; 1511 na->icmph.icmp6_override = 1; 1512 na->icmph.icmp6_solicited = 1; 1513 na->target = ns->target; 1514 ether_addr_copy(&na->opt[2], n->ha); 1515 na->opt[0] = ND_OPT_TARGET_LL_ADDR; 1516 na->opt[1] = na_olen >> 3; 1517 1518 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, 1519 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, 1520 csum_partial(na, sizeof(*na)+na_olen, 0)); 1521 1522 pip6->payload_len = htons(sizeof(*na)+na_olen); 1523 1524 skb_push(reply, sizeof(struct ipv6hdr)); 1525 1526 reply->ip_summed = CHECKSUM_UNNECESSARY; 1527 1528 return reply; 1529} 1530 1531static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) 1532{ 1533 struct vxlan_dev *vxlan = netdev_priv(dev); 1534 struct nd_msg *msg; 1535 const struct ipv6hdr *iphdr; 1536 const struct in6_addr *saddr, *daddr; 1537 struct neighbour *n; 1538 struct inet6_dev *in6_dev; 1539 1540 in6_dev = __in6_dev_get(dev); 1541 if (!in6_dev) 1542 goto out; 1543 1544 iphdr = ipv6_hdr(skb); 1545 saddr = &iphdr->saddr; 1546 daddr = &iphdr->daddr; 1547 1548 msg = (struct nd_msg *)skb_transport_header(skb); 1549 if (msg->icmph.icmp6_code != 0 || 1550 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) 1551 goto out; 1552 1553 if (ipv6_addr_loopback(daddr) || 1554 ipv6_addr_is_multicast(&msg->target)) 1555 goto out; 1556 1557 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); 1558 1559 if (n) { 1560 struct vxlan_fdb *f; 1561 struct sk_buff *reply; 1562 1563 if (!(n->nud_state & NUD_CONNECTED)) { 1564 neigh_release(n); 1565 goto out; 1566 } 1567 1568 f = vxlan_find_mac(vxlan, n->ha); 1569 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1570 /* bridge-local neighbor */ 1571 neigh_release(n); 1572 goto out; 1573 } 1574 1575 reply = vxlan_na_create(skb, n, 1576 !!(f ? f->flags & NTF_ROUTER : 0)); 1577 1578 neigh_release(n); 1579 1580 if (reply == NULL) 1581 goto out; 1582 1583 if (netif_rx_ni(reply) == NET_RX_DROP) 1584 dev->stats.rx_dropped++; 1585 1586 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1587 union vxlan_addr ipa = { 1588 .sin6.sin6_addr = msg->target, 1589 .sin6.sin6_family = AF_INET6, 1590 }; 1591 1592 vxlan_ip_miss(dev, &ipa); 1593 } 1594 1595out: 1596 consume_skb(skb); 1597 return NETDEV_TX_OK; 1598} 1599#endif 1600 1601static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 1602{ 1603 struct vxlan_dev *vxlan = netdev_priv(dev); 1604 struct neighbour *n; 1605 1606 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1607 return false; 1608 1609 n = NULL; 1610 switch (ntohs(eth_hdr(skb)->h_proto)) { 1611 case ETH_P_IP: 1612 { 1613 struct iphdr *pip; 1614 1615 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1616 return false; 1617 pip = ip_hdr(skb); 1618 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 1619 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1620 union vxlan_addr ipa = { 1621 .sin.sin_addr.s_addr = pip->daddr, 1622 .sin.sin_family = AF_INET, 1623 }; 1624 1625 vxlan_ip_miss(dev, &ipa); 1626 return false; 1627 } 1628 1629 break; 1630 } 1631#if IS_ENABLED(CONFIG_IPV6) 1632 case ETH_P_IPV6: 1633 { 1634 struct ipv6hdr *pip6; 1635 1636 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 1637 return false; 1638 pip6 = ipv6_hdr(skb); 1639 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); 1640 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1641 union vxlan_addr ipa = { 1642 .sin6.sin6_addr = pip6->daddr, 1643 .sin6.sin6_family = AF_INET6, 1644 }; 1645 1646 vxlan_ip_miss(dev, &ipa); 1647 return false; 1648 } 1649 1650 break; 1651 } 1652#endif 1653 default: 1654 return false; 1655 } 1656 1657 if (n) { 1658 bool diff; 1659 1660 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); 1661 if (diff) { 1662 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 1663 dev->addr_len); 1664 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); 1665 } 1666 neigh_release(n); 1667 return diff; 1668 } 1669 1670 return false; 1671} 1672 1673static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, 1674 struct vxlan_metadata *md) 1675{ 1676 struct vxlanhdr_gbp *gbp; 1677 1678 if (!md->gbp) 1679 return; 1680 1681 gbp = (struct vxlanhdr_gbp *)vxh; 1682 vxh->vx_flags |= VXLAN_HF_GBP; 1683 1684 if (md->gbp & VXLAN_GBP_DONT_LEARN) 1685 gbp->dont_learn = 1; 1686 1687 if (md->gbp & VXLAN_GBP_POLICY_APPLIED) 1688 gbp->policy_applied = 1; 1689 1690 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); 1691} 1692 1693static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags, 1694 __be16 protocol) 1695{ 1696 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; 1697 1698 gpe->np_applied = 1; 1699 1700 switch (protocol) { 1701 case htons(ETH_P_IP): 1702 gpe->next_protocol = VXLAN_GPE_NP_IPV4; 1703 return 0; 1704 case htons(ETH_P_IPV6): 1705 gpe->next_protocol = VXLAN_GPE_NP_IPV6; 1706 return 0; 1707 case htons(ETH_P_TEB): 1708 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET; 1709 return 0; 1710 } 1711 return -EPFNOSUPPORT; 1712} 1713 1714static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, 1715 int iphdr_len, __be32 vni, 1716 struct vxlan_metadata *md, u32 vxflags, 1717 bool udp_sum) 1718{ 1719 struct vxlanhdr *vxh; 1720 int min_headroom; 1721 int err; 1722 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 1723 __be16 inner_protocol = htons(ETH_P_TEB); 1724 1725 if ((vxflags & VXLAN_F_REMCSUM_TX) && 1726 skb->ip_summed == CHECKSUM_PARTIAL) { 1727 int csum_start = skb_checksum_start_offset(skb); 1728 1729 if (csum_start <= VXLAN_MAX_REMCSUM_START && 1730 !(csum_start & VXLAN_RCO_SHIFT_MASK) && 1731 (skb->csum_offset == offsetof(struct udphdr, check) || 1732 skb->csum_offset == offsetof(struct tcphdr, check))) 1733 type |= SKB_GSO_TUNNEL_REMCSUM; 1734 } 1735 1736 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1737 + VXLAN_HLEN + iphdr_len 1738 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 1739 1740 /* Need space for new headers (invalidates iph ptr) */ 1741 err = skb_cow_head(skb, min_headroom); 1742 if (unlikely(err)) 1743 goto out_free; 1744 1745 skb = vlan_hwaccel_push_inside(skb); 1746 if (WARN_ON(!skb)) 1747 return -ENOMEM; 1748 1749 err = iptunnel_handle_offloads(skb, type); 1750 if (err) 1751 goto out_free; 1752 1753 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1754 vxh->vx_flags = VXLAN_HF_VNI; 1755 vxh->vx_vni = vxlan_vni_field(vni); 1756 1757 if (type & SKB_GSO_TUNNEL_REMCSUM) { 1758 unsigned int start; 1759 1760 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); 1761 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); 1762 vxh->vx_flags |= VXLAN_HF_RCO; 1763 1764 if (!skb_is_gso(skb)) { 1765 skb->ip_summed = CHECKSUM_NONE; 1766 skb->encapsulation = 0; 1767 } 1768 } 1769 1770 if (vxflags & VXLAN_F_GBP) 1771 vxlan_build_gbp_hdr(vxh, vxflags, md); 1772 if (vxflags & VXLAN_F_GPE) { 1773 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol); 1774 if (err < 0) 1775 goto out_free; 1776 inner_protocol = skb->protocol; 1777 } 1778 1779 skb_set_inner_protocol(skb, inner_protocol); 1780 return 0; 1781 1782out_free: 1783 kfree_skb(skb); 1784 return err; 1785} 1786 1787static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, 1788 struct sk_buff *skb, int oif, u8 tos, 1789 __be32 daddr, __be32 *saddr, 1790 struct dst_cache *dst_cache, 1791 const struct ip_tunnel_info *info) 1792{ 1793 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1794 struct rtable *rt = NULL; 1795 struct flowi4 fl4; 1796 1797 if (tos && !info) 1798 use_cache = false; 1799 if (use_cache) { 1800 rt = dst_cache_get_ip4(dst_cache, saddr); 1801 if (rt) 1802 return rt; 1803 } 1804 1805 memset(&fl4, 0, sizeof(fl4)); 1806 fl4.flowi4_oif = oif; 1807 fl4.flowi4_tos = RT_TOS(tos); 1808 fl4.flowi4_mark = skb->mark; 1809 fl4.flowi4_proto = IPPROTO_UDP; 1810 fl4.daddr = daddr; 1811 fl4.saddr = *saddr; 1812 1813 rt = ip_route_output_key(vxlan->net, &fl4); 1814 if (!IS_ERR(rt)) { 1815 *saddr = fl4.saddr; 1816 if (use_cache) 1817 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 1818 } 1819 return rt; 1820} 1821 1822#if IS_ENABLED(CONFIG_IPV6) 1823static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, 1824 struct sk_buff *skb, int oif, u8 tos, 1825 __be32 label, 1826 const struct in6_addr *daddr, 1827 struct in6_addr *saddr, 1828 struct dst_cache *dst_cache, 1829 const struct ip_tunnel_info *info) 1830{ 1831 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1832 struct dst_entry *ndst; 1833 struct flowi6 fl6; 1834 int err; 1835 1836 if (tos && !info) 1837 use_cache = false; 1838 if (use_cache) { 1839 ndst = dst_cache_get_ip6(dst_cache, saddr); 1840 if (ndst) 1841 return ndst; 1842 } 1843 1844 memset(&fl6, 0, sizeof(fl6)); 1845 fl6.flowi6_oif = oif; 1846 fl6.daddr = *daddr; 1847 fl6.saddr = *saddr; 1848 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1849 fl6.flowi6_mark = skb->mark; 1850 fl6.flowi6_proto = IPPROTO_UDP; 1851 1852 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1853 vxlan->vn6_sock->sock->sk, 1854 &ndst, &fl6); 1855 if (err < 0) 1856 return ERR_PTR(err); 1857 1858 *saddr = fl6.saddr; 1859 if (use_cache) 1860 dst_cache_set_ip6(dst_cache, ndst, saddr); 1861 return ndst; 1862} 1863#endif 1864 1865/* Bypass encapsulation if the destination is local */ 1866static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1867 struct vxlan_dev *dst_vxlan) 1868{ 1869 struct pcpu_sw_netstats *tx_stats, *rx_stats; 1870 union vxlan_addr loopback; 1871 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1872 struct net_device *dev = skb->dev; 1873 int len = skb->len; 1874 1875 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1876 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1877 skb->pkt_type = PACKET_HOST; 1878 skb->encapsulation = 0; 1879 skb->dev = dst_vxlan->dev; 1880 __skb_pull(skb, skb_network_offset(skb)); 1881 1882 if (remote_ip->sa.sa_family == AF_INET) { 1883 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 1884 loopback.sa.sa_family = AF_INET; 1885#if IS_ENABLED(CONFIG_IPV6) 1886 } else { 1887 loopback.sin6.sin6_addr = in6addr_loopback; 1888 loopback.sa.sa_family = AF_INET6; 1889#endif 1890 } 1891 1892 if (dst_vxlan->flags & VXLAN_F_LEARN) 1893 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); 1894 1895 u64_stats_update_begin(&tx_stats->syncp); 1896 tx_stats->tx_packets++; 1897 tx_stats->tx_bytes += len; 1898 u64_stats_update_end(&tx_stats->syncp); 1899 1900 if (netif_rx(skb) == NET_RX_SUCCESS) { 1901 u64_stats_update_begin(&rx_stats->syncp); 1902 rx_stats->rx_packets++; 1903 rx_stats->rx_bytes += len; 1904 u64_stats_update_end(&rx_stats->syncp); 1905 } else { 1906 dev->stats.rx_dropped++; 1907 } 1908} 1909 1910static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 1911 struct vxlan_rdst *rdst, bool did_rsc) 1912{ 1913 struct dst_cache *dst_cache; 1914 struct ip_tunnel_info *info; 1915 struct vxlan_dev *vxlan = netdev_priv(dev); 1916 struct sock *sk; 1917 struct rtable *rt = NULL; 1918 const struct iphdr *old_iph; 1919 union vxlan_addr *dst; 1920 union vxlan_addr remote_ip, local_ip; 1921 union vxlan_addr *src; 1922 struct vxlan_metadata _md; 1923 struct vxlan_metadata *md = &_md; 1924 __be16 src_port = 0, dst_port; 1925 __be32 vni, label; 1926 __be16 df = 0; 1927 __u8 tos, ttl; 1928 int err; 1929 u32 flags = vxlan->flags; 1930 bool udp_sum = false; 1931 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); 1932 1933 info = skb_tunnel_info(skb); 1934 1935 if (rdst) { 1936 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 1937 vni = rdst->remote_vni; 1938 dst = &rdst->remote_ip; 1939 src = &vxlan->cfg.saddr; 1940 dst_cache = &rdst->dst_cache; 1941 } else { 1942 if (!info) { 1943 WARN_ONCE(1, "%s: Missing encapsulation instructions\n", 1944 dev->name); 1945 goto drop; 1946 } 1947 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 1948 vni = tunnel_id_to_key32(info->key.tun_id); 1949 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 1950 if (remote_ip.sa.sa_family == AF_INET) { 1951 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 1952 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; 1953 } else { 1954 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 1955 local_ip.sin6.sin6_addr = info->key.u.ipv6.src; 1956 } 1957 dst = &remote_ip; 1958 src = &local_ip; 1959 dst_cache = &info->dst_cache; 1960 } 1961 1962 if (vxlan_addr_any(dst)) { 1963 if (did_rsc) { 1964 /* short-circuited back to local bridge */ 1965 vxlan_encap_bypass(skb, vxlan, vxlan); 1966 return; 1967 } 1968 goto drop; 1969 } 1970 1971 old_iph = ip_hdr(skb); 1972 1973 ttl = vxlan->cfg.ttl; 1974 if (!ttl && vxlan_addr_multicast(dst)) 1975 ttl = 1; 1976 1977 tos = vxlan->cfg.tos; 1978 if (tos == 1) 1979 tos = ip_tunnel_get_dsfield(old_iph, skb); 1980 1981 label = vxlan->cfg.label; 1982 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 1983 vxlan->cfg.port_max, true); 1984 1985 if (info) { 1986 ttl = info->key.ttl; 1987 tos = info->key.tos; 1988 label = info->key.label; 1989 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 1990 1991 if (info->options_len) 1992 md = ip_tunnel_info_opts(info); 1993 } else { 1994 md->gbp = skb->mark; 1995 } 1996 1997 if (dst->sa.sa_family == AF_INET) { 1998 if (!vxlan->vn4_sock) 1999 goto drop; 2000 sk = vxlan->vn4_sock->sock->sk; 2001 2002 rt = vxlan_get_route(vxlan, skb, 2003 rdst ? rdst->remote_ifindex : 0, tos, 2004 dst->sin.sin_addr.s_addr, 2005 &src->sin.sin_addr.s_addr, 2006 dst_cache, info); 2007 if (IS_ERR(rt)) { 2008 netdev_dbg(dev, "no route to %pI4\n", 2009 &dst->sin.sin_addr.s_addr); 2010 dev->stats.tx_carrier_errors++; 2011 goto tx_error; 2012 } 2013 2014 if (rt->dst.dev == dev) { 2015 netdev_dbg(dev, "circular route to %pI4\n", 2016 &dst->sin.sin_addr.s_addr); 2017 dev->stats.collisions++; 2018 goto rt_tx_error; 2019 } 2020 2021 /* Bypass encapsulation if the destination is local */ 2022 if (!info && rt->rt_flags & RTCF_LOCAL && 2023 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2024 struct vxlan_dev *dst_vxlan; 2025 2026 ip_rt_put(rt); 2027 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 2028 dst->sa.sa_family, dst_port, 2029 vxlan->flags); 2030 if (!dst_vxlan) 2031 goto tx_error; 2032 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 2033 return; 2034 } 2035 2036 if (!info) 2037 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); 2038 else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) 2039 df = htons(IP_DF); 2040 2041 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2042 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2043 err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), 2044 vni, md, flags, udp_sum); 2045 if (err < 0) 2046 goto xmit_tx_error; 2047 2048 udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, 2049 dst->sin.sin_addr.s_addr, tos, ttl, df, 2050 src_port, dst_port, xnet, !udp_sum); 2051#if IS_ENABLED(CONFIG_IPV6) 2052 } else { 2053 struct dst_entry *ndst; 2054 u32 rt6i_flags; 2055 2056 if (!vxlan->vn6_sock) 2057 goto drop; 2058 sk = vxlan->vn6_sock->sock->sk; 2059 2060 ndst = vxlan6_get_route(vxlan, skb, 2061 rdst ? rdst->remote_ifindex : 0, tos, 2062 label, &dst->sin6.sin6_addr, 2063 &src->sin6.sin6_addr, 2064 dst_cache, info); 2065 if (IS_ERR(ndst)) { 2066 netdev_dbg(dev, "no route to %pI6\n", 2067 &dst->sin6.sin6_addr); 2068 dev->stats.tx_carrier_errors++; 2069 goto tx_error; 2070 } 2071 2072 if (ndst->dev == dev) { 2073 netdev_dbg(dev, "circular route to %pI6\n", 2074 &dst->sin6.sin6_addr); 2075 dst_release(ndst); 2076 dev->stats.collisions++; 2077 goto tx_error; 2078 } 2079 2080 /* Bypass encapsulation if the destination is local */ 2081 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2082 if (!info && rt6i_flags & RTF_LOCAL && 2083 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2084 struct vxlan_dev *dst_vxlan; 2085 2086 dst_release(ndst); 2087 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 2088 dst->sa.sa_family, dst_port, 2089 vxlan->flags); 2090 if (!dst_vxlan) 2091 goto tx_error; 2092 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 2093 return; 2094 } 2095 2096 if (!info) 2097 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2098 2099 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2100 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2101 skb_scrub_packet(skb, xnet); 2102 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), 2103 vni, md, flags, udp_sum); 2104 if (err < 0) { 2105 dst_release(ndst); 2106 dev->stats.tx_errors++; 2107 return; 2108 } 2109 udp_tunnel6_xmit_skb(ndst, sk, skb, dev, 2110 &src->sin6.sin6_addr, 2111 &dst->sin6.sin6_addr, tos, ttl, 2112 label, src_port, dst_port, !udp_sum); 2113#endif 2114 } 2115 2116 return; 2117 2118drop: 2119 dev->stats.tx_dropped++; 2120 goto tx_free; 2121 2122xmit_tx_error: 2123 /* skb is already freed. */ 2124 skb = NULL; 2125rt_tx_error: 2126 ip_rt_put(rt); 2127tx_error: 2128 dev->stats.tx_errors++; 2129tx_free: 2130 dev_kfree_skb(skb); 2131} 2132 2133/* Transmit local packets over Vxlan 2134 * 2135 * Outer IP header inherits ECN and DF from inner header. 2136 * Outer UDP destination is the VXLAN assigned port. 2137 * source port is based on hash of flow 2138 */ 2139static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 2140{ 2141 struct vxlan_dev *vxlan = netdev_priv(dev); 2142 const struct ip_tunnel_info *info; 2143 struct ethhdr *eth; 2144 bool did_rsc = false; 2145 struct vxlan_rdst *rdst, *fdst = NULL; 2146 struct vxlan_fdb *f; 2147 2148 info = skb_tunnel_info(skb); 2149 2150 skb_reset_mac_header(skb); 2151 2152 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { 2153 if (info && info->mode & IP_TUNNEL_INFO_TX) 2154 vxlan_xmit_one(skb, dev, NULL, false); 2155 else 2156 kfree_skb(skb); 2157 return NETDEV_TX_OK; 2158 } 2159 2160 if (vxlan->flags & VXLAN_F_PROXY) { 2161 eth = eth_hdr(skb); 2162 if (ntohs(eth->h_proto) == ETH_P_ARP) 2163 return arp_reduce(dev, skb); 2164#if IS_ENABLED(CONFIG_IPV6) 2165 else if (ntohs(eth->h_proto) == ETH_P_IPV6 && 2166 pskb_may_pull(skb, sizeof(struct ipv6hdr) 2167 + sizeof(struct nd_msg)) && 2168 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 2169 struct nd_msg *msg; 2170 2171 msg = (struct nd_msg *)skb_transport_header(skb); 2172 if (msg->icmph.icmp6_code == 0 && 2173 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) 2174 return neigh_reduce(dev, skb); 2175 } 2176#endif 2177 } 2178 2179 eth = eth_hdr(skb); 2180 f = vxlan_find_mac(vxlan, eth->h_dest); 2181 did_rsc = false; 2182 2183 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && 2184 (ntohs(eth->h_proto) == ETH_P_IP || 2185 ntohs(eth->h_proto) == ETH_P_IPV6)) { 2186 did_rsc = route_shortcircuit(dev, skb); 2187 if (did_rsc) 2188 f = vxlan_find_mac(vxlan, eth->h_dest); 2189 } 2190 2191 if (f == NULL) { 2192 f = vxlan_find_mac(vxlan, all_zeros_mac); 2193 if (f == NULL) { 2194 if ((vxlan->flags & VXLAN_F_L2MISS) && 2195 !is_multicast_ether_addr(eth->h_dest)) 2196 vxlan_fdb_miss(vxlan, eth->h_dest); 2197 2198 dev->stats.tx_dropped++; 2199 kfree_skb(skb); 2200 return NETDEV_TX_OK; 2201 } 2202 } 2203 2204 list_for_each_entry_rcu(rdst, &f->remotes, list) { 2205 struct sk_buff *skb1; 2206 2207 if (!fdst) { 2208 fdst = rdst; 2209 continue; 2210 } 2211 skb1 = skb_clone(skb, GFP_ATOMIC); 2212 if (skb1) 2213 vxlan_xmit_one(skb1, dev, rdst, did_rsc); 2214 } 2215 2216 if (fdst) 2217 vxlan_xmit_one(skb, dev, fdst, did_rsc); 2218 else 2219 kfree_skb(skb); 2220 return NETDEV_TX_OK; 2221} 2222 2223/* Walk the forwarding table and purge stale entries */ 2224static void vxlan_cleanup(unsigned long arg) 2225{ 2226 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; 2227 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2228 unsigned int h; 2229 2230 if (!netif_running(vxlan->dev)) 2231 return; 2232 2233 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2234 struct hlist_node *p, *n; 2235 2236 spin_lock_bh(&vxlan->hash_lock); 2237 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2238 struct vxlan_fdb *f 2239 = container_of(p, struct vxlan_fdb, hlist); 2240 unsigned long timeout; 2241 2242 if (f->state & NUD_PERMANENT) 2243 continue; 2244 2245 timeout = f->used + vxlan->cfg.age_interval * HZ; 2246 if (time_before_eq(timeout, jiffies)) { 2247 netdev_dbg(vxlan->dev, 2248 "garbage collect %pM\n", 2249 f->eth_addr); 2250 f->state = NUD_STALE; 2251 vxlan_fdb_destroy(vxlan, f); 2252 } else if (time_before(timeout, next_timer)) 2253 next_timer = timeout; 2254 } 2255 spin_unlock_bh(&vxlan->hash_lock); 2256 } 2257 2258 mod_timer(&vxlan->age_timer, next_timer); 2259} 2260 2261static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2262{ 2263 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2264 __be32 vni = vxlan->default_dst.remote_vni; 2265 2266 spin_lock(&vn->sock_lock); 2267 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); 2268 spin_unlock(&vn->sock_lock); 2269} 2270 2271/* Setup stats when device is created */ 2272static int vxlan_init(struct net_device *dev) 2273{ 2274 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2275 if (!dev->tstats) 2276 return -ENOMEM; 2277 2278 return 0; 2279} 2280 2281static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) 2282{ 2283 struct vxlan_fdb *f; 2284 2285 spin_lock_bh(&vxlan->hash_lock); 2286 f = __vxlan_find_mac(vxlan, all_zeros_mac); 2287 if (f) 2288 vxlan_fdb_destroy(vxlan, f); 2289 spin_unlock_bh(&vxlan->hash_lock); 2290} 2291 2292static void vxlan_uninit(struct net_device *dev) 2293{ 2294 struct vxlan_dev *vxlan = netdev_priv(dev); 2295 2296 vxlan_fdb_delete_default(vxlan); 2297 2298 free_percpu(dev->tstats); 2299} 2300 2301/* Start ageing timer and join group when device is brought up */ 2302static int vxlan_open(struct net_device *dev) 2303{ 2304 struct vxlan_dev *vxlan = netdev_priv(dev); 2305 int ret; 2306 2307 ret = vxlan_sock_add(vxlan); 2308 if (ret < 0) 2309 return ret; 2310 2311 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { 2312 ret = vxlan_igmp_join(vxlan); 2313 if (ret == -EADDRINUSE) 2314 ret = 0; 2315 if (ret) { 2316 vxlan_sock_release(vxlan); 2317 return ret; 2318 } 2319 } 2320 2321 if (vxlan->cfg.age_interval) 2322 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); 2323 2324 return ret; 2325} 2326 2327/* Purge the forwarding table */ 2328static void vxlan_flush(struct vxlan_dev *vxlan) 2329{ 2330 unsigned int h; 2331 2332 spin_lock_bh(&vxlan->hash_lock); 2333 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2334 struct hlist_node *p, *n; 2335 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2336 struct vxlan_fdb *f 2337 = container_of(p, struct vxlan_fdb, hlist); 2338 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2339 if (!is_zero_ether_addr(f->eth_addr)) 2340 vxlan_fdb_destroy(vxlan, f); 2341 } 2342 } 2343 spin_unlock_bh(&vxlan->hash_lock); 2344} 2345 2346/* Cleanup timer and forwarding table on shutdown */ 2347static int vxlan_stop(struct net_device *dev) 2348{ 2349 struct vxlan_dev *vxlan = netdev_priv(dev); 2350 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2351 int ret = 0; 2352 2353 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2354 !vxlan_group_used(vn, vxlan)) 2355 ret = vxlan_igmp_leave(vxlan); 2356 2357 del_timer_sync(&vxlan->age_timer); 2358 2359 vxlan_flush(vxlan); 2360 vxlan_sock_release(vxlan); 2361 2362 return ret; 2363} 2364 2365/* Stub, nothing needs to be done. */ 2366static void vxlan_set_multicast_list(struct net_device *dev) 2367{ 2368} 2369 2370static int __vxlan_change_mtu(struct net_device *dev, 2371 struct net_device *lowerdev, 2372 struct vxlan_rdst *dst, int new_mtu, bool strict) 2373{ 2374 int max_mtu = IP_MAX_MTU; 2375 2376 if (lowerdev) 2377 max_mtu = lowerdev->mtu; 2378 2379 if (dst->remote_ip.sa.sa_family == AF_INET6) 2380 max_mtu -= VXLAN6_HEADROOM; 2381 else 2382 max_mtu -= VXLAN_HEADROOM; 2383 2384 if (new_mtu < 68) 2385 return -EINVAL; 2386 2387 if (new_mtu > max_mtu) { 2388 if (strict) 2389 return -EINVAL; 2390 2391 new_mtu = max_mtu; 2392 } 2393 2394 dev->mtu = new_mtu; 2395 return 0; 2396} 2397 2398static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2399{ 2400 struct vxlan_dev *vxlan = netdev_priv(dev); 2401 struct vxlan_rdst *dst = &vxlan->default_dst; 2402 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 2403 dst->remote_ifindex); 2404 return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true); 2405} 2406 2407static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 2408{ 2409 struct vxlan_dev *vxlan = netdev_priv(dev); 2410 struct ip_tunnel_info *info = skb_tunnel_info(skb); 2411 __be16 sport, dport; 2412 2413 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2414 vxlan->cfg.port_max, true); 2415 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2416 2417 if (ip_tunnel_info_af(info) == AF_INET) { 2418 struct rtable *rt; 2419 2420 if (!vxlan->vn4_sock) 2421 return -EINVAL; 2422 rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, 2423 info->key.u.ipv4.dst, 2424 &info->key.u.ipv4.src, NULL, info); 2425 if (IS_ERR(rt)) 2426 return PTR_ERR(rt); 2427 ip_rt_put(rt); 2428 } else { 2429#if IS_ENABLED(CONFIG_IPV6) 2430 struct dst_entry *ndst; 2431 2432 if (!vxlan->vn6_sock) 2433 return -EINVAL; 2434 ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, 2435 info->key.label, &info->key.u.ipv6.dst, 2436 &info->key.u.ipv6.src, NULL, info); 2437 if (IS_ERR(ndst)) 2438 return PTR_ERR(ndst); 2439 dst_release(ndst); 2440#else /* !CONFIG_IPV6 */ 2441 return -EPFNOSUPPORT; 2442#endif 2443 } 2444 info->key.tp_src = sport; 2445 info->key.tp_dst = dport; 2446 return 0; 2447} 2448 2449static const struct net_device_ops vxlan_netdev_ether_ops = { 2450 .ndo_init = vxlan_init, 2451 .ndo_uninit = vxlan_uninit, 2452 .ndo_open = vxlan_open, 2453 .ndo_stop = vxlan_stop, 2454 .ndo_start_xmit = vxlan_xmit, 2455 .ndo_get_stats64 = ip_tunnel_get_stats64, 2456 .ndo_set_rx_mode = vxlan_set_multicast_list, 2457 .ndo_change_mtu = vxlan_change_mtu, 2458 .ndo_validate_addr = eth_validate_addr, 2459 .ndo_set_mac_address = eth_mac_addr, 2460 .ndo_fdb_add = vxlan_fdb_add, 2461 .ndo_fdb_del = vxlan_fdb_delete, 2462 .ndo_fdb_dump = vxlan_fdb_dump, 2463 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2464}; 2465 2466static const struct net_device_ops vxlan_netdev_raw_ops = { 2467 .ndo_init = vxlan_init, 2468 .ndo_uninit = vxlan_uninit, 2469 .ndo_open = vxlan_open, 2470 .ndo_stop = vxlan_stop, 2471 .ndo_start_xmit = vxlan_xmit, 2472 .ndo_get_stats64 = ip_tunnel_get_stats64, 2473 .ndo_change_mtu = vxlan_change_mtu, 2474 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2475}; 2476 2477/* Info for udev, that this is a virtual tunnel endpoint */ 2478static struct device_type vxlan_type = { 2479 .name = "vxlan", 2480}; 2481 2482/* Calls the ndo_udp_tunnel_add of the caller in order to 2483 * supply the listening VXLAN udp ports. Callers are expected 2484 * to implement the ndo_udp_tunnel_add. 2485 */ 2486static void vxlan_push_rx_ports(struct net_device *dev) 2487{ 2488 struct vxlan_sock *vs; 2489 struct net *net = dev_net(dev); 2490 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2491 unsigned int i; 2492 2493 spin_lock(&vn->sock_lock); 2494 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2495 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) 2496 udp_tunnel_push_rx_port(dev, vs->sock, 2497 (vs->flags & VXLAN_F_GPE) ? 2498 UDP_TUNNEL_TYPE_VXLAN_GPE : 2499 UDP_TUNNEL_TYPE_VXLAN); 2500 } 2501 spin_unlock(&vn->sock_lock); 2502} 2503 2504/* Initialize the device structure. */ 2505static void vxlan_setup(struct net_device *dev) 2506{ 2507 struct vxlan_dev *vxlan = netdev_priv(dev); 2508 unsigned int h; 2509 2510 eth_hw_addr_random(dev); 2511 ether_setup(dev); 2512 2513 dev->destructor = free_netdev; 2514 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2515 2516 dev->features |= NETIF_F_LLTX; 2517 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2518 dev->features |= NETIF_F_RXCSUM; 2519 dev->features |= NETIF_F_GSO_SOFTWARE; 2520 2521 dev->vlan_features = dev->features; 2522 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2523 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2524 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2525 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2526 netif_keep_dst(dev); 2527 dev->priv_flags |= IFF_NO_QUEUE; 2528 2529 INIT_LIST_HEAD(&vxlan->next); 2530 spin_lock_init(&vxlan->hash_lock); 2531 2532 init_timer_deferrable(&vxlan->age_timer); 2533 vxlan->age_timer.function = vxlan_cleanup; 2534 vxlan->age_timer.data = (unsigned long) vxlan; 2535 2536 vxlan->cfg.dst_port = htons(vxlan_port); 2537 2538 vxlan->dev = dev; 2539 2540 gro_cells_init(&vxlan->gro_cells, dev); 2541 2542 for (h = 0; h < FDB_HASH_SIZE; ++h) 2543 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 2544} 2545 2546static void vxlan_ether_setup(struct net_device *dev) 2547{ 2548 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2549 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2550 dev->netdev_ops = &vxlan_netdev_ether_ops; 2551} 2552 2553static void vxlan_raw_setup(struct net_device *dev) 2554{ 2555 dev->header_ops = NULL; 2556 dev->type = ARPHRD_NONE; 2557 dev->hard_header_len = 0; 2558 dev->addr_len = 0; 2559 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 2560 dev->netdev_ops = &vxlan_netdev_raw_ops; 2561} 2562 2563static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2564 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2565 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2566 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 2567 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 2568 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 2569 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 2570 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 2571 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 2572 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 }, 2573 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 2574 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 2575 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 2576 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 2577 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, 2578 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 2579 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 2580 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 2581 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, 2582 [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, 2583 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, 2584 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 2585 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 2586 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 2587 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 2588 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 2589 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, 2590 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 2591}; 2592 2593static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 2594{ 2595 if (tb[IFLA_ADDRESS]) { 2596 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 2597 pr_debug("invalid link address (not ethernet)\n"); 2598 return -EINVAL; 2599 } 2600 2601 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 2602 pr_debug("invalid all zero ethernet address\n"); 2603 return -EADDRNOTAVAIL; 2604 } 2605 } 2606 2607 if (!data) 2608 return -EINVAL; 2609 2610 if (data[IFLA_VXLAN_ID]) { 2611 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); 2612 if (id >= VXLAN_VID_MASK) 2613 return -ERANGE; 2614 } 2615 2616 if (data[IFLA_VXLAN_PORT_RANGE]) { 2617 const struct ifla_vxlan_port_range *p 2618 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2619 2620 if (ntohs(p->high) < ntohs(p->low)) { 2621 pr_debug("port range %u .. %u not valid\n", 2622 ntohs(p->low), ntohs(p->high)); 2623 return -EINVAL; 2624 } 2625 } 2626 2627 return 0; 2628} 2629 2630static void vxlan_get_drvinfo(struct net_device *netdev, 2631 struct ethtool_drvinfo *drvinfo) 2632{ 2633 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); 2634 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); 2635} 2636 2637static const struct ethtool_ops vxlan_ethtool_ops = { 2638 .get_drvinfo = vxlan_get_drvinfo, 2639 .get_link = ethtool_op_get_link, 2640}; 2641 2642static struct socket *vxlan_create_sock(struct net *net, bool ipv6, 2643 __be16 port, u32 flags) 2644{ 2645 struct socket *sock; 2646 struct udp_port_cfg udp_conf; 2647 int err; 2648 2649 memset(&udp_conf, 0, sizeof(udp_conf)); 2650 2651 if (ipv6) { 2652 udp_conf.family = AF_INET6; 2653 udp_conf.use_udp6_rx_checksums = 2654 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2655 udp_conf.ipv6_v6only = 1; 2656 } else { 2657 udp_conf.family = AF_INET; 2658 } 2659 2660 udp_conf.local_udp_port = port; 2661 2662 /* Open UDP socket */ 2663 err = udp_sock_create(net, &udp_conf, &sock); 2664 if (err < 0) 2665 return ERR_PTR(err); 2666 2667 return sock; 2668} 2669 2670/* Create new listen socket if needed */ 2671static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, 2672 __be16 port, u32 flags) 2673{ 2674 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2675 struct vxlan_sock *vs; 2676 struct socket *sock; 2677 unsigned int h; 2678 struct udp_tunnel_sock_cfg tunnel_cfg; 2679 2680 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2681 if (!vs) 2682 return ERR_PTR(-ENOMEM); 2683 2684 for (h = 0; h < VNI_HASH_SIZE; ++h) 2685 INIT_HLIST_HEAD(&vs->vni_list[h]); 2686 2687 sock = vxlan_create_sock(net, ipv6, port, flags); 2688 if (IS_ERR(sock)) { 2689 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port), 2690 PTR_ERR(sock)); 2691 kfree(vs); 2692 return ERR_CAST(sock); 2693 } 2694 2695 vs->sock = sock; 2696 atomic_set(&vs->refcnt, 1); 2697 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 2698 2699 spin_lock(&vn->sock_lock); 2700 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2701 udp_tunnel_notify_add_rx_port(sock, 2702 (vs->flags & VXLAN_F_GPE) ? 2703 UDP_TUNNEL_TYPE_VXLAN_GPE : 2704 UDP_TUNNEL_TYPE_VXLAN); 2705 spin_unlock(&vn->sock_lock); 2706 2707 /* Mark socket as an encapsulation socket. */ 2708 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 2709 tunnel_cfg.sk_user_data = vs; 2710 tunnel_cfg.encap_type = 1; 2711 tunnel_cfg.encap_rcv = vxlan_rcv; 2712 tunnel_cfg.encap_destroy = NULL; 2713 tunnel_cfg.gro_receive = vxlan_gro_receive; 2714 tunnel_cfg.gro_complete = vxlan_gro_complete; 2715 2716 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 2717 2718 return vs; 2719} 2720 2721static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) 2722{ 2723 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2724 struct vxlan_sock *vs = NULL; 2725 2726 if (!vxlan->cfg.no_share) { 2727 spin_lock(&vn->sock_lock); 2728 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, 2729 vxlan->cfg.dst_port, vxlan->flags); 2730 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) { 2731 spin_unlock(&vn->sock_lock); 2732 return -EBUSY; 2733 } 2734 spin_unlock(&vn->sock_lock); 2735 } 2736 if (!vs) 2737 vs = vxlan_socket_create(vxlan->net, ipv6, 2738 vxlan->cfg.dst_port, vxlan->flags); 2739 if (IS_ERR(vs)) 2740 return PTR_ERR(vs); 2741#if IS_ENABLED(CONFIG_IPV6) 2742 if (ipv6) 2743 vxlan->vn6_sock = vs; 2744 else 2745#endif 2746 vxlan->vn4_sock = vs; 2747 vxlan_vs_add_dev(vs, vxlan); 2748 return 0; 2749} 2750 2751static int vxlan_sock_add(struct vxlan_dev *vxlan) 2752{ 2753 bool ipv6 = vxlan->flags & VXLAN_F_IPV6; 2754 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; 2755 int ret = 0; 2756 2757 vxlan->vn4_sock = NULL; 2758#if IS_ENABLED(CONFIG_IPV6) 2759 vxlan->vn6_sock = NULL; 2760 if (ipv6 || metadata) 2761 ret = __vxlan_sock_add(vxlan, true); 2762#endif 2763 if (!ret && (!ipv6 || metadata)) 2764 ret = __vxlan_sock_add(vxlan, false); 2765 if (ret < 0) 2766 vxlan_sock_release(vxlan); 2767 return ret; 2768} 2769 2770static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, 2771 struct vxlan_config *conf) 2772{ 2773 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); 2774 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; 2775 struct vxlan_rdst *dst = &vxlan->default_dst; 2776 unsigned short needed_headroom = ETH_HLEN; 2777 int err; 2778 bool use_ipv6 = false; 2779 __be16 default_port = vxlan->cfg.dst_port; 2780 struct net_device *lowerdev = NULL; 2781 2782 if (conf->flags & VXLAN_F_GPE) { 2783 /* For now, allow GPE only together with COLLECT_METADATA. 2784 * This can be relaxed later; in such case, the other side 2785 * of the PtP link will have to be provided. 2786 */ 2787 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || 2788 !(conf->flags & VXLAN_F_COLLECT_METADATA)) { 2789 pr_info("unsupported combination of extensions\n"); 2790 return -EINVAL; 2791 } 2792 2793 vxlan_raw_setup(dev); 2794 } else { 2795 vxlan_ether_setup(dev); 2796 } 2797 2798 vxlan->net = src_net; 2799 2800 dst->remote_vni = conf->vni; 2801 2802 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); 2803 2804 /* Unless IPv6 is explicitly requested, assume IPv4 */ 2805 if (!dst->remote_ip.sa.sa_family) 2806 dst->remote_ip.sa.sa_family = AF_INET; 2807 2808 if (dst->remote_ip.sa.sa_family == AF_INET6 || 2809 vxlan->cfg.saddr.sa.sa_family == AF_INET6) { 2810 if (!IS_ENABLED(CONFIG_IPV6)) 2811 return -EPFNOSUPPORT; 2812 use_ipv6 = true; 2813 vxlan->flags |= VXLAN_F_IPV6; 2814 } 2815 2816 if (conf->label && !use_ipv6) { 2817 pr_info("label only supported in use with IPv6\n"); 2818 return -EINVAL; 2819 } 2820 2821 if (conf->remote_ifindex) { 2822 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); 2823 dst->remote_ifindex = conf->remote_ifindex; 2824 2825 if (!lowerdev) { 2826 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2827 return -ENODEV; 2828 } 2829 2830#if IS_ENABLED(CONFIG_IPV6) 2831 if (use_ipv6) { 2832 struct inet6_dev *idev = __in6_dev_get(lowerdev); 2833 if (idev && idev->cnf.disable_ipv6) { 2834 pr_info("IPv6 is disabled via sysctl\n"); 2835 return -EPERM; 2836 } 2837 } 2838#endif 2839 2840 if (!conf->mtu) 2841 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2842 2843 needed_headroom = lowerdev->hard_header_len; 2844 } else if (vxlan_addr_multicast(&dst->remote_ip)) { 2845 pr_info("multicast destination requires interface to be specified\n"); 2846 return -EINVAL; 2847 } 2848 2849 if (conf->mtu) { 2850 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); 2851 if (err) 2852 return err; 2853 } 2854 2855 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 2856 needed_headroom += VXLAN6_HEADROOM; 2857 else 2858 needed_headroom += VXLAN_HEADROOM; 2859 dev->needed_headroom = needed_headroom; 2860 2861 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2862 if (!vxlan->cfg.dst_port) { 2863 if (conf->flags & VXLAN_F_GPE) 2864 vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ 2865 else 2866 vxlan->cfg.dst_port = default_port; 2867 } 2868 vxlan->flags |= conf->flags; 2869 2870 if (!vxlan->cfg.age_interval) 2871 vxlan->cfg.age_interval = FDB_AGE_DEFAULT; 2872 2873 list_for_each_entry(tmp, &vn->vxlan_list, next) { 2874 if (tmp->cfg.vni == conf->vni && 2875 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || 2876 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && 2877 tmp->cfg.dst_port == vxlan->cfg.dst_port && 2878 (tmp->flags & VXLAN_F_RCV_FLAGS) == 2879 (vxlan->flags & VXLAN_F_RCV_FLAGS)) { 2880 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni)); 2881 return -EEXIST; 2882 } 2883 } 2884 2885 dev->ethtool_ops = &vxlan_ethtool_ops; 2886 2887 /* create an fdb entry for a valid default destination */ 2888 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 2889 err = vxlan_fdb_create(vxlan, all_zeros_mac, 2890 &vxlan->default_dst.remote_ip, 2891 NUD_REACHABLE|NUD_PERMANENT, 2892 NLM_F_EXCL|NLM_F_CREATE, 2893 vxlan->cfg.dst_port, 2894 vxlan->default_dst.remote_vni, 2895 vxlan->default_dst.remote_ifindex, 2896 NTF_SELF); 2897 if (err) 2898 return err; 2899 } 2900 2901 err = register_netdevice(dev); 2902 if (err) { 2903 vxlan_fdb_delete_default(vxlan); 2904 return err; 2905 } 2906 2907 list_add(&vxlan->next, &vn->vxlan_list); 2908 2909 return 0; 2910} 2911 2912static int vxlan_newlink(struct net *src_net, struct net_device *dev, 2913 struct nlattr *tb[], struct nlattr *data[]) 2914{ 2915 struct vxlan_config conf; 2916 2917 memset(&conf, 0, sizeof(conf)); 2918 2919 if (data[IFLA_VXLAN_ID]) 2920 conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 2921 2922 if (data[IFLA_VXLAN_GROUP]) { 2923 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); 2924 } else if (data[IFLA_VXLAN_GROUP6]) { 2925 if (!IS_ENABLED(CONFIG_IPV6)) 2926 return -EPFNOSUPPORT; 2927 2928 conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); 2929 conf.remote_ip.sa.sa_family = AF_INET6; 2930 } 2931 2932 if (data[IFLA_VXLAN_LOCAL]) { 2933 conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); 2934 conf.saddr.sa.sa_family = AF_INET; 2935 } else if (data[IFLA_VXLAN_LOCAL6]) { 2936 if (!IS_ENABLED(CONFIG_IPV6)) 2937 return -EPFNOSUPPORT; 2938 2939 /* TODO: respect scope id */ 2940 conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); 2941 conf.saddr.sa.sa_family = AF_INET6; 2942 } 2943 2944 if (data[IFLA_VXLAN_LINK]) 2945 conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); 2946 2947 if (data[IFLA_VXLAN_TOS]) 2948 conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2949 2950 if (data[IFLA_VXLAN_TTL]) 2951 conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); 2952 2953 if (data[IFLA_VXLAN_LABEL]) 2954 conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & 2955 IPV6_FLOWLABEL_MASK; 2956 2957 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) 2958 conf.flags |= VXLAN_F_LEARN; 2959 2960 if (data[IFLA_VXLAN_AGEING]) 2961 conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 2962 2963 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) 2964 conf.flags |= VXLAN_F_PROXY; 2965 2966 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) 2967 conf.flags |= VXLAN_F_RSC; 2968 2969 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) 2970 conf.flags |= VXLAN_F_L2MISS; 2971 2972 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) 2973 conf.flags |= VXLAN_F_L3MISS; 2974 2975 if (data[IFLA_VXLAN_LIMIT]) 2976 conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 2977 2978 if (data[IFLA_VXLAN_COLLECT_METADATA] && 2979 nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) 2980 conf.flags |= VXLAN_F_COLLECT_METADATA; 2981 2982 if (data[IFLA_VXLAN_PORT_RANGE]) { 2983 const struct ifla_vxlan_port_range *p 2984 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2985 conf.port_min = ntohs(p->low); 2986 conf.port_max = ntohs(p->high); 2987 } 2988 2989 if (data[IFLA_VXLAN_PORT]) 2990 conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 2991 2992 if (data[IFLA_VXLAN_UDP_CSUM] && 2993 !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) 2994 conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX; 2995 2996 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && 2997 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) 2998 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; 2999 3000 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] && 3001 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 3002 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 3003 3004 if (data[IFLA_VXLAN_REMCSUM_TX] && 3005 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) 3006 conf.flags |= VXLAN_F_REMCSUM_TX; 3007 3008 if (data[IFLA_VXLAN_REMCSUM_RX] && 3009 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) 3010 conf.flags |= VXLAN_F_REMCSUM_RX; 3011 3012 if (data[IFLA_VXLAN_GBP]) 3013 conf.flags |= VXLAN_F_GBP; 3014 3015 if (data[IFLA_VXLAN_GPE]) 3016 conf.flags |= VXLAN_F_GPE; 3017 3018 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) 3019 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3020 3021 if (tb[IFLA_MTU]) 3022 conf.mtu = nla_get_u32(tb[IFLA_MTU]); 3023 3024 return vxlan_dev_configure(src_net, dev, &conf); 3025} 3026 3027static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3028{ 3029 struct vxlan_dev *vxlan = netdev_priv(dev); 3030 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3031 3032 spin_lock(&vn->sock_lock); 3033 if (!hlist_unhashed(&vxlan->hlist)) 3034 hlist_del_rcu(&vxlan->hlist); 3035 spin_unlock(&vn->sock_lock); 3036 3037 gro_cells_destroy(&vxlan->gro_cells); 3038 list_del(&vxlan->next); 3039 unregister_netdevice_queue(dev, head); 3040} 3041 3042static size_t vxlan_get_size(const struct net_device *dev) 3043{ 3044 3045 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 3046 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ 3047 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 3048 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 3049 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 3050 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 3051 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 3052 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 3053 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ 3054 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ 3055 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ 3056 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ 3057 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ 3058 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 3059 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 3060 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 3061 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ 3062 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ 3063 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ 3064 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */ 3065 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ 3066 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ 3067 0; 3068} 3069 3070static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 3071{ 3072 const struct vxlan_dev *vxlan = netdev_priv(dev); 3073 const struct vxlan_rdst *dst = &vxlan->default_dst; 3074 struct ifla_vxlan_port_range ports = { 3075 .low = htons(vxlan->cfg.port_min), 3076 .high = htons(vxlan->cfg.port_max), 3077 }; 3078 3079 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) 3080 goto nla_put_failure; 3081 3082 if (!vxlan_addr_any(&dst->remote_ip)) { 3083 if (dst->remote_ip.sa.sa_family == AF_INET) { 3084 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP, 3085 dst->remote_ip.sin.sin_addr.s_addr)) 3086 goto nla_put_failure; 3087#if IS_ENABLED(CONFIG_IPV6) 3088 } else { 3089 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6, 3090 &dst->remote_ip.sin6.sin6_addr)) 3091 goto nla_put_failure; 3092#endif 3093 } 3094 } 3095 3096 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 3097 goto nla_put_failure; 3098 3099 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { 3100 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { 3101 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, 3102 vxlan->cfg.saddr.sin.sin_addr.s_addr)) 3103 goto nla_put_failure; 3104#if IS_ENABLED(CONFIG_IPV6) 3105 } else { 3106 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, 3107 &vxlan->cfg.saddr.sin6.sin6_addr)) 3108 goto nla_put_failure; 3109#endif 3110 } 3111 } 3112 3113 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 3114 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 3115 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 3116 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 3117 !!(vxlan->flags & VXLAN_F_LEARN)) || 3118 nla_put_u8(skb, IFLA_VXLAN_PROXY, 3119 !!(vxlan->flags & VXLAN_F_PROXY)) || 3120 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) || 3121 nla_put_u8(skb, IFLA_VXLAN_L2MISS, 3122 !!(vxlan->flags & VXLAN_F_L2MISS)) || 3123 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 3124 !!(vxlan->flags & VXLAN_F_L3MISS)) || 3125 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, 3126 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) || 3127 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || 3128 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || 3129 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || 3130 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, 3131 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || 3132 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 3133 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || 3134 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 3135 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || 3136 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX, 3137 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) || 3138 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX, 3139 !!(vxlan->flags & VXLAN_F_REMCSUM_RX))) 3140 goto nla_put_failure; 3141 3142 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 3143 goto nla_put_failure; 3144 3145 if (vxlan->flags & VXLAN_F_GBP && 3146 nla_put_flag(skb, IFLA_VXLAN_GBP)) 3147 goto nla_put_failure; 3148 3149 if (vxlan->flags & VXLAN_F_GPE && 3150 nla_put_flag(skb, IFLA_VXLAN_GPE)) 3151 goto nla_put_failure; 3152 3153 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL && 3154 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 3155 goto nla_put_failure; 3156 3157 return 0; 3158 3159nla_put_failure: 3160 return -EMSGSIZE; 3161} 3162 3163static struct net *vxlan_get_link_net(const struct net_device *dev) 3164{ 3165 struct vxlan_dev *vxlan = netdev_priv(dev); 3166 3167 return vxlan->net; 3168} 3169 3170static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 3171 .kind = "vxlan", 3172 .maxtype = IFLA_VXLAN_MAX, 3173 .policy = vxlan_policy, 3174 .priv_size = sizeof(struct vxlan_dev), 3175 .setup = vxlan_setup, 3176 .validate = vxlan_validate, 3177 .newlink = vxlan_newlink, 3178 .dellink = vxlan_dellink, 3179 .get_size = vxlan_get_size, 3180 .fill_info = vxlan_fill_info, 3181 .get_link_net = vxlan_get_link_net, 3182}; 3183 3184struct net_device *vxlan_dev_create(struct net *net, const char *name, 3185 u8 name_assign_type, 3186 struct vxlan_config *conf) 3187{ 3188 struct nlattr *tb[IFLA_MAX + 1]; 3189 struct net_device *dev; 3190 int err; 3191 3192 memset(&tb, 0, sizeof(tb)); 3193 3194 dev = rtnl_create_link(net, name, name_assign_type, 3195 &vxlan_link_ops, tb); 3196 if (IS_ERR(dev)) 3197 return dev; 3198 3199 err = vxlan_dev_configure(net, dev, conf); 3200 if (err < 0) { 3201 free_netdev(dev); 3202 return ERR_PTR(err); 3203 } 3204 3205 err = rtnl_configure_link(dev, NULL); 3206 if (err < 0) { 3207 LIST_HEAD(list_kill); 3208 3209 vxlan_dellink(dev, &list_kill); 3210 unregister_netdevice_many(&list_kill); 3211 return ERR_PTR(err); 3212 } 3213 3214 return dev; 3215} 3216EXPORT_SYMBOL_GPL(vxlan_dev_create); 3217 3218static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 3219 struct net_device *dev) 3220{ 3221 struct vxlan_dev *vxlan, *next; 3222 LIST_HEAD(list_kill); 3223 3224 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3225 struct vxlan_rdst *dst = &vxlan->default_dst; 3226 3227 /* In case we created vxlan device with carrier 3228 * and we loose the carrier due to module unload 3229 * we also need to remove vxlan device. In other 3230 * cases, it's not necessary and remote_ifindex 3231 * is 0 here, so no matches. 3232 */ 3233 if (dst->remote_ifindex == dev->ifindex) 3234 vxlan_dellink(vxlan->dev, &list_kill); 3235 } 3236 3237 unregister_netdevice_many(&list_kill); 3238} 3239 3240static int vxlan_netdevice_event(struct notifier_block *unused, 3241 unsigned long event, void *ptr) 3242{ 3243 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3244 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 3245 3246 if (event == NETDEV_UNREGISTER) 3247 vxlan_handle_lowerdev_unregister(vn, dev); 3248 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) 3249 vxlan_push_rx_ports(dev); 3250 3251 return NOTIFY_DONE; 3252} 3253 3254static struct notifier_block vxlan_notifier_block __read_mostly = { 3255 .notifier_call = vxlan_netdevice_event, 3256}; 3257 3258static __net_init int vxlan_init_net(struct net *net) 3259{ 3260 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3261 unsigned int h; 3262 3263 INIT_LIST_HEAD(&vn->vxlan_list); 3264 spin_lock_init(&vn->sock_lock); 3265 3266 for (h = 0; h < PORT_HASH_SIZE; ++h) 3267 INIT_HLIST_HEAD(&vn->sock_list[h]); 3268 3269 return 0; 3270} 3271 3272static void __net_exit vxlan_exit_net(struct net *net) 3273{ 3274 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3275 struct vxlan_dev *vxlan, *next; 3276 struct net_device *dev, *aux; 3277 LIST_HEAD(list); 3278 3279 rtnl_lock(); 3280 for_each_netdev_safe(net, dev, aux) 3281 if (dev->rtnl_link_ops == &vxlan_link_ops) 3282 unregister_netdevice_queue(dev, &list); 3283 3284 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3285 /* If vxlan->dev is in the same netns, it has already been added 3286 * to the list by the previous loop. 3287 */ 3288 if (!net_eq(dev_net(vxlan->dev), net)) { 3289 gro_cells_destroy(&vxlan->gro_cells); 3290 unregister_netdevice_queue(vxlan->dev, &list); 3291 } 3292 } 3293 3294 unregister_netdevice_many(&list); 3295 rtnl_unlock(); 3296} 3297 3298static struct pernet_operations vxlan_net_ops = { 3299 .init = vxlan_init_net, 3300 .exit = vxlan_exit_net, 3301 .id = &vxlan_net_id, 3302 .size = sizeof(struct vxlan_net), 3303}; 3304 3305static int __init vxlan_init_module(void) 3306{ 3307 int rc; 3308 3309 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 3310 3311 rc = register_pernet_subsys(&vxlan_net_ops); 3312 if (rc) 3313 goto out1; 3314 3315 rc = register_netdevice_notifier(&vxlan_notifier_block); 3316 if (rc) 3317 goto out2; 3318 3319 rc = rtnl_link_register(&vxlan_link_ops); 3320 if (rc) 3321 goto out3; 3322 3323 return 0; 3324out3: 3325 unregister_netdevice_notifier(&vxlan_notifier_block); 3326out2: 3327 unregister_pernet_subsys(&vxlan_net_ops); 3328out1: 3329 return rc; 3330} 3331late_initcall(vxlan_init_module); 3332 3333static void __exit vxlan_cleanup_module(void) 3334{ 3335 rtnl_link_unregister(&vxlan_link_ops); 3336 unregister_netdevice_notifier(&vxlan_notifier_block); 3337 unregister_pernet_subsys(&vxlan_net_ops); 3338 /* rcu_barrier() is called by netns */ 3339} 3340module_exit(vxlan_cleanup_module); 3341 3342MODULE_LICENSE("GPL"); 3343MODULE_VERSION(VXLAN_VERSION); 3344MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); 3345MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); 3346MODULE_ALIAS_RTNL_LINK("vxlan");