at v4.13 3734 lines 96 kB view raw
1/* 2 * VXLAN: Virtual eXtensible Local Area Network 3 * 4 * Copyright (c) 2012-2013 Vyatta Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/errno.h> 16#include <linux/slab.h> 17#include <linux/udp.h> 18#include <linux/igmp.h> 19#include <linux/if_ether.h> 20#include <linux/ethtool.h> 21#include <net/arp.h> 22#include <net/ndisc.h> 23#include <net/ip.h> 24#include <net/icmp.h> 25#include <net/rtnetlink.h> 26#include <net/inet_ecn.h> 27#include <net/net_namespace.h> 28#include <net/netns/generic.h> 29#include <net/vxlan.h> 30 31#if IS_ENABLED(CONFIG_IPV6) 32#include <net/ip6_tunnel.h> 33#include <net/ip6_checksum.h> 34#endif 35 36#define VXLAN_VERSION "0.1" 37 38#define PORT_HASH_BITS 8 39#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) 40#define FDB_AGE_DEFAULT 300 /* 5 min */ 41#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 42 43/* UDP port for VXLAN traffic. 44 * The IANA assigned port is 4789, but the Linux default is 8472 45 * for compatibility with early adopters. 46 */ 47static unsigned short vxlan_port __read_mostly = 8472; 48module_param_named(udp_port, vxlan_port, ushort, 0444); 49MODULE_PARM_DESC(udp_port, "Destination UDP port"); 50 51static bool log_ecn_error = true; 52module_param(log_ecn_error, bool, 0644); 53MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 54 55static unsigned int vxlan_net_id; 56static struct rtnl_link_ops vxlan_link_ops; 57 58static const u8 all_zeros_mac[ETH_ALEN + 2]; 59 60static int vxlan_sock_add(struct vxlan_dev *vxlan); 61 62static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); 63 64/* per-network namespace private data for this module */ 65struct vxlan_net { 66 struct list_head vxlan_list; 67 struct hlist_head sock_list[PORT_HASH_SIZE]; 68 spinlock_t sock_lock; 69}; 70 71/* Forwarding table entry */ 72struct vxlan_fdb { 73 struct hlist_node hlist; /* linked list of entries */ 74 struct rcu_head rcu; 75 unsigned long updated; /* jiffies */ 76 unsigned long used; 77 struct list_head remotes; 78 u8 eth_addr[ETH_ALEN]; 79 u16 state; /* see ndm_state */ 80 __be32 vni; 81 u8 flags; /* see ndm_flags */ 82}; 83 84/* salt for hash table */ 85static u32 vxlan_salt __read_mostly; 86 87static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) 88{ 89 return vs->flags & VXLAN_F_COLLECT_METADATA || 90 ip_tunnel_collect_metadata(); 91} 92 93#if IS_ENABLED(CONFIG_IPV6) 94static inline 95bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 96{ 97 if (a->sa.sa_family != b->sa.sa_family) 98 return false; 99 if (a->sa.sa_family == AF_INET6) 100 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); 101 else 102 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 103} 104 105static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 106{ 107 if (ipa->sa.sa_family == AF_INET6) 108 return ipv6_addr_any(&ipa->sin6.sin6_addr); 109 else 110 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 111} 112 113static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 114{ 115 if (ipa->sa.sa_family == AF_INET6) 116 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); 117 else 118 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 119} 120 121static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 122{ 123 if (nla_len(nla) >= sizeof(struct in6_addr)) { 124 ip->sin6.sin6_addr = nla_get_in6_addr(nla); 125 ip->sa.sa_family = AF_INET6; 126 return 0; 127 } else if (nla_len(nla) >= sizeof(__be32)) { 128 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 129 ip->sa.sa_family = AF_INET; 130 return 0; 131 } else { 132 return -EAFNOSUPPORT; 133 } 134} 135 136static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 137 const union vxlan_addr *ip) 138{ 139 if (ip->sa.sa_family == AF_INET6) 140 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr); 141 else 142 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 143} 144 145#else /* !CONFIG_IPV6 */ 146 147static inline 148bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) 149{ 150 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; 151} 152 153static inline bool vxlan_addr_any(const union vxlan_addr *ipa) 154{ 155 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); 156} 157 158static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) 159{ 160 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); 161} 162 163static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) 164{ 165 if (nla_len(nla) >= sizeof(struct in6_addr)) { 166 return -EAFNOSUPPORT; 167 } else if (nla_len(nla) >= sizeof(__be32)) { 168 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla); 169 ip->sa.sa_family = AF_INET; 170 return 0; 171 } else { 172 return -EAFNOSUPPORT; 173 } 174} 175 176static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, 177 const union vxlan_addr *ip) 178{ 179 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); 180} 181#endif 182 183/* Virtual Network hash table head */ 184static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) 185{ 186 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; 187} 188 189/* Socket hash table head */ 190static inline struct hlist_head *vs_head(struct net *net, __be16 port) 191{ 192 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 193 194 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; 195} 196 197/* First remote destination for a forwarding entry. 198 * Guaranteed to be non-NULL because remotes are never deleted. 199 */ 200static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) 201{ 202 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); 203} 204 205static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) 206{ 207 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 208} 209 210/* Find VXLAN socket based on network namespace, address family and UDP port 211 * and enabled unshareable flags. 212 */ 213static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, 214 __be16 port, u32 flags) 215{ 216 struct vxlan_sock *vs; 217 218 flags &= VXLAN_F_RCV_FLAGS; 219 220 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 221 if (inet_sk(vs->sock->sk)->inet_sport == port && 222 vxlan_get_sk_family(vs) == family && 223 vs->flags == flags) 224 return vs; 225 } 226 return NULL; 227} 228 229static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex, 230 __be32 vni) 231{ 232 struct vxlan_dev_node *node; 233 234 /* For flow based devices, map all packets to VNI 0 */ 235 if (vs->flags & VXLAN_F_COLLECT_METADATA) 236 vni = 0; 237 238 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) { 239 if (node->vxlan->default_dst.remote_vni != vni) 240 continue; 241 242 if (IS_ENABLED(CONFIG_IPV6)) { 243 const struct vxlan_config *cfg = &node->vxlan->cfg; 244 245 if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) && 246 cfg->remote_ifindex != ifindex) 247 continue; 248 } 249 250 return node->vxlan; 251 } 252 253 return NULL; 254} 255 256/* Look up VNI in a per net namespace table */ 257static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex, 258 __be32 vni, sa_family_t family, 259 __be16 port, u32 flags) 260{ 261 struct vxlan_sock *vs; 262 263 vs = vxlan_find_sock(net, family, port, flags); 264 if (!vs) 265 return NULL; 266 267 return vxlan_vs_find_vni(vs, ifindex, vni); 268} 269 270/* Fill in neighbour message in skbuff. */ 271static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 272 const struct vxlan_fdb *fdb, 273 u32 portid, u32 seq, int type, unsigned int flags, 274 const struct vxlan_rdst *rdst) 275{ 276 unsigned long now = jiffies; 277 struct nda_cacheinfo ci; 278 struct nlmsghdr *nlh; 279 struct ndmsg *ndm; 280 bool send_ip, send_eth; 281 282 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 283 if (nlh == NULL) 284 return -EMSGSIZE; 285 286 ndm = nlmsg_data(nlh); 287 memset(ndm, 0, sizeof(*ndm)); 288 289 send_eth = send_ip = true; 290 291 if (type == RTM_GETNEIGH) { 292 send_ip = !vxlan_addr_any(&rdst->remote_ip); 293 send_eth = !is_zero_ether_addr(fdb->eth_addr); 294 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET; 295 } else 296 ndm->ndm_family = AF_BRIDGE; 297 ndm->ndm_state = fdb->state; 298 ndm->ndm_ifindex = vxlan->dev->ifindex; 299 ndm->ndm_flags = fdb->flags; 300 ndm->ndm_type = RTN_UNICAST; 301 302 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && 303 nla_put_s32(skb, NDA_LINK_NETNSID, 304 peernet2id(dev_net(vxlan->dev), vxlan->net))) 305 goto nla_put_failure; 306 307 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 308 goto nla_put_failure; 309 310 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) 311 goto nla_put_failure; 312 313 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && 314 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 315 goto nla_put_failure; 316 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 317 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) 318 goto nla_put_failure; 319 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && 320 nla_put_u32(skb, NDA_SRC_VNI, 321 be32_to_cpu(fdb->vni))) 322 goto nla_put_failure; 323 if (rdst->remote_ifindex && 324 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 325 goto nla_put_failure; 326 327 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 328 ci.ndm_confirmed = 0; 329 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 330 ci.ndm_refcnt = 0; 331 332 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 333 goto nla_put_failure; 334 335 nlmsg_end(skb, nlh); 336 return 0; 337 338nla_put_failure: 339 nlmsg_cancel(skb, nlh); 340 return -EMSGSIZE; 341} 342 343static inline size_t vxlan_nlmsg_size(void) 344{ 345 return NLMSG_ALIGN(sizeof(struct ndmsg)) 346 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 347 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ 348 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 349 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 350 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 351 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */ 352 + nla_total_size(sizeof(struct nda_cacheinfo)); 353} 354 355static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 356 struct vxlan_rdst *rd, int type) 357{ 358 struct net *net = dev_net(vxlan->dev); 359 struct sk_buff *skb; 360 int err = -ENOBUFS; 361 362 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); 363 if (skb == NULL) 364 goto errout; 365 366 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); 367 if (err < 0) { 368 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 369 WARN_ON(err == -EMSGSIZE); 370 kfree_skb(skb); 371 goto errout; 372 } 373 374 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 375 return; 376errout: 377 if (err < 0) 378 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 379} 380 381static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) 382{ 383 struct vxlan_dev *vxlan = netdev_priv(dev); 384 struct vxlan_fdb f = { 385 .state = NUD_STALE, 386 }; 387 struct vxlan_rdst remote = { 388 .remote_ip = *ipa, /* goes to NDA_DST */ 389 .remote_vni = cpu_to_be32(VXLAN_N_VID), 390 }; 391 392 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 393} 394 395static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 396{ 397 struct vxlan_fdb f = { 398 .state = NUD_STALE, 399 }; 400 struct vxlan_rdst remote = { }; 401 402 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 403 404 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 405} 406 407/* Hash Ethernet address */ 408static u32 eth_hash(const unsigned char *addr) 409{ 410 u64 value = get_unaligned((u64 *)addr); 411 412 /* only want 6 bytes */ 413#ifdef __BIG_ENDIAN 414 value >>= 16; 415#else 416 value <<= 16; 417#endif 418 return hash_64(value, FDB_HASH_BITS); 419} 420 421static u32 eth_vni_hash(const unsigned char *addr, __be32 vni) 422{ 423 /* use 1 byte of OUI and 3 bytes of NIC */ 424 u32 key = get_unaligned((u32 *)(addr + 2)); 425 426 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1); 427} 428 429/* Hash chain to use given mac address */ 430static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, 431 const u8 *mac, __be32 vni) 432{ 433 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) 434 return &vxlan->fdb_head[eth_vni_hash(mac, vni)]; 435 else 436 return &vxlan->fdb_head[eth_hash(mac)]; 437} 438 439/* Look up Ethernet address in forwarding table */ 440static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 441 const u8 *mac, __be32 vni) 442{ 443 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); 444 struct vxlan_fdb *f; 445 446 hlist_for_each_entry_rcu(f, head, hlist) { 447 if (ether_addr_equal(mac, f->eth_addr)) { 448 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { 449 if (vni == f->vni) 450 return f; 451 } else { 452 return f; 453 } 454 } 455 } 456 457 return NULL; 458} 459 460static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 461 const u8 *mac, __be32 vni) 462{ 463 struct vxlan_fdb *f; 464 465 f = __vxlan_find_mac(vxlan, mac, vni); 466 if (f) 467 f->used = jiffies; 468 469 return f; 470} 471 472/* caller should hold vxlan->hash_lock */ 473static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 474 union vxlan_addr *ip, __be16 port, 475 __be32 vni, __u32 ifindex) 476{ 477 struct vxlan_rdst *rd; 478 479 list_for_each_entry(rd, &f->remotes, list) { 480 if (vxlan_addr_equal(&rd->remote_ip, ip) && 481 rd->remote_port == port && 482 rd->remote_vni == vni && 483 rd->remote_ifindex == ifindex) 484 return rd; 485 } 486 487 return NULL; 488} 489 490/* Replace destination of unicast mac */ 491static int vxlan_fdb_replace(struct vxlan_fdb *f, 492 union vxlan_addr *ip, __be16 port, __be32 vni, 493 __u32 ifindex) 494{ 495 struct vxlan_rdst *rd; 496 497 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 498 if (rd) 499 return 0; 500 501 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); 502 if (!rd) 503 return 0; 504 505 dst_cache_reset(&rd->dst_cache); 506 rd->remote_ip = *ip; 507 rd->remote_port = port; 508 rd->remote_vni = vni; 509 rd->remote_ifindex = ifindex; 510 return 1; 511} 512 513/* Add/update destinations for multicast */ 514static int vxlan_fdb_append(struct vxlan_fdb *f, 515 union vxlan_addr *ip, __be16 port, __be32 vni, 516 __u32 ifindex, struct vxlan_rdst **rdp) 517{ 518 struct vxlan_rdst *rd; 519 520 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 521 if (rd) 522 return 0; 523 524 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 525 if (rd == NULL) 526 return -ENOBUFS; 527 528 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { 529 kfree(rd); 530 return -ENOBUFS; 531 } 532 533 rd->remote_ip = *ip; 534 rd->remote_port = port; 535 rd->remote_vni = vni; 536 rd->remote_ifindex = ifindex; 537 538 list_add_tail_rcu(&rd->list, &f->remotes); 539 540 *rdp = rd; 541 return 1; 542} 543 544static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 545 unsigned int off, 546 struct vxlanhdr *vh, size_t hdrlen, 547 __be32 vni_field, 548 struct gro_remcsum *grc, 549 bool nopartial) 550{ 551 size_t start, offset; 552 553 if (skb->remcsum_offload) 554 return vh; 555 556 if (!NAPI_GRO_CB(skb)->csum_valid) 557 return NULL; 558 559 start = vxlan_rco_start(vni_field); 560 offset = start + vxlan_rco_offset(vni_field); 561 562 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, 563 start, offset, grc, nopartial); 564 565 skb->remcsum_offload = 1; 566 567 return vh; 568} 569 570static struct sk_buff **vxlan_gro_receive(struct sock *sk, 571 struct sk_buff **head, 572 struct sk_buff *skb) 573{ 574 struct sk_buff *p, **pp = NULL; 575 struct vxlanhdr *vh, *vh2; 576 unsigned int hlen, off_vx; 577 int flush = 1; 578 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); 579 __be32 flags; 580 struct gro_remcsum grc; 581 582 skb_gro_remcsum_init(&grc); 583 584 off_vx = skb_gro_offset(skb); 585 hlen = off_vx + sizeof(*vh); 586 vh = skb_gro_header_fast(skb, off_vx); 587 if (skb_gro_header_hard(skb, hlen)) { 588 vh = skb_gro_header_slow(skb, hlen, off_vx); 589 if (unlikely(!vh)) 590 goto out; 591 } 592 593 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 594 595 flags = vh->vx_flags; 596 597 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 598 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 599 vh->vx_vni, &grc, 600 !!(vs->flags & 601 VXLAN_F_REMCSUM_NOPARTIAL)); 602 603 if (!vh) 604 goto out; 605 } 606 607 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 608 609 for (p = *head; p; p = p->next) { 610 if (!NAPI_GRO_CB(p)->same_flow) 611 continue; 612 613 vh2 = (struct vxlanhdr *)(p->data + off_vx); 614 if (vh->vx_flags != vh2->vx_flags || 615 vh->vx_vni != vh2->vx_vni) { 616 NAPI_GRO_CB(p)->same_flow = 0; 617 continue; 618 } 619 } 620 621 pp = call_gro_receive(eth_gro_receive, head, skb); 622 flush = 0; 623 624out: 625 skb_gro_remcsum_cleanup(skb, &grc); 626 skb->remcsum_offload = 0; 627 NAPI_GRO_CB(skb)->flush |= flush; 628 629 return pp; 630} 631 632static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 633{ 634 /* Sets 'skb->inner_mac_header' since we are always called with 635 * 'skb->encapsulation' set. 636 */ 637 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 638} 639 640/* Add new entry to forwarding table -- assumes lock held */ 641static int vxlan_fdb_create(struct vxlan_dev *vxlan, 642 const u8 *mac, union vxlan_addr *ip, 643 __u16 state, __u16 flags, 644 __be16 port, __be32 src_vni, __be32 vni, 645 __u32 ifindex, __u8 ndm_flags) 646{ 647 struct vxlan_rdst *rd = NULL; 648 struct vxlan_fdb *f; 649 int notify = 0; 650 int rc; 651 652 f = __vxlan_find_mac(vxlan, mac, src_vni); 653 if (f) { 654 if (flags & NLM_F_EXCL) { 655 netdev_dbg(vxlan->dev, 656 "lost race to create %pM\n", mac); 657 return -EEXIST; 658 } 659 if (f->state != state) { 660 f->state = state; 661 f->updated = jiffies; 662 notify = 1; 663 } 664 if (f->flags != ndm_flags) { 665 f->flags = ndm_flags; 666 f->updated = jiffies; 667 notify = 1; 668 } 669 if ((flags & NLM_F_REPLACE)) { 670 /* Only change unicasts */ 671 if (!(is_multicast_ether_addr(f->eth_addr) || 672 is_zero_ether_addr(f->eth_addr))) { 673 notify |= vxlan_fdb_replace(f, ip, port, vni, 674 ifindex); 675 } else 676 return -EOPNOTSUPP; 677 } 678 if ((flags & NLM_F_APPEND) && 679 (is_multicast_ether_addr(f->eth_addr) || 680 is_zero_ether_addr(f->eth_addr))) { 681 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 682 683 if (rc < 0) 684 return rc; 685 notify |= rc; 686 } 687 } else { 688 if (!(flags & NLM_F_CREATE)) 689 return -ENOENT; 690 691 if (vxlan->cfg.addrmax && 692 vxlan->addrcnt >= vxlan->cfg.addrmax) 693 return -ENOSPC; 694 695 /* Disallow replace to add a multicast entry */ 696 if ((flags & NLM_F_REPLACE) && 697 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 698 return -EOPNOTSUPP; 699 700 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 701 f = kmalloc(sizeof(*f), GFP_ATOMIC); 702 if (!f) 703 return -ENOMEM; 704 705 notify = 1; 706 f->state = state; 707 f->flags = ndm_flags; 708 f->updated = f->used = jiffies; 709 f->vni = src_vni; 710 INIT_LIST_HEAD(&f->remotes); 711 memcpy(f->eth_addr, mac, ETH_ALEN); 712 713 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 714 if (rc < 0) { 715 kfree(f); 716 return rc; 717 } 718 719 ++vxlan->addrcnt; 720 hlist_add_head_rcu(&f->hlist, 721 vxlan_fdb_head(vxlan, mac, src_vni)); 722 } 723 724 if (notify) { 725 if (rd == NULL) 726 rd = first_remote_rtnl(f); 727 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); 728 } 729 730 return 0; 731} 732 733static void vxlan_fdb_free(struct rcu_head *head) 734{ 735 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 736 struct vxlan_rdst *rd, *nd; 737 738 list_for_each_entry_safe(rd, nd, &f->remotes, list) { 739 dst_cache_destroy(&rd->dst_cache); 740 kfree(rd); 741 } 742 kfree(f); 743} 744 745static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 746{ 747 netdev_dbg(vxlan->dev, 748 "delete %pM\n", f->eth_addr); 749 750 --vxlan->addrcnt; 751 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 752 753 hlist_del_rcu(&f->hlist); 754 call_rcu(&f->rcu, vxlan_fdb_free); 755} 756 757static void vxlan_dst_free(struct rcu_head *head) 758{ 759 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); 760 761 dst_cache_destroy(&rd->dst_cache); 762 kfree(rd); 763} 764 765static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, 766 struct vxlan_rdst *rd) 767{ 768 list_del_rcu(&rd->list); 769 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 770 call_rcu(&rd->rcu, vxlan_dst_free); 771} 772 773static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 774 union vxlan_addr *ip, __be16 *port, __be32 *src_vni, 775 __be32 *vni, u32 *ifindex) 776{ 777 struct net *net = dev_net(vxlan->dev); 778 int err; 779 780 if (tb[NDA_DST]) { 781 err = vxlan_nla_get_addr(ip, tb[NDA_DST]); 782 if (err) 783 return err; 784 } else { 785 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; 786 if (remote->sa.sa_family == AF_INET) { 787 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); 788 ip->sa.sa_family = AF_INET; 789#if IS_ENABLED(CONFIG_IPV6) 790 } else { 791 ip->sin6.sin6_addr = in6addr_any; 792 ip->sa.sa_family = AF_INET6; 793#endif 794 } 795 } 796 797 if (tb[NDA_PORT]) { 798 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) 799 return -EINVAL; 800 *port = nla_get_be16(tb[NDA_PORT]); 801 } else { 802 *port = vxlan->cfg.dst_port; 803 } 804 805 if (tb[NDA_VNI]) { 806 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) 807 return -EINVAL; 808 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 809 } else { 810 *vni = vxlan->default_dst.remote_vni; 811 } 812 813 if (tb[NDA_SRC_VNI]) { 814 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) 815 return -EINVAL; 816 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); 817 } else { 818 *src_vni = vxlan->default_dst.remote_vni; 819 } 820 821 if (tb[NDA_IFINDEX]) { 822 struct net_device *tdev; 823 824 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 825 return -EINVAL; 826 *ifindex = nla_get_u32(tb[NDA_IFINDEX]); 827 tdev = __dev_get_by_index(net, *ifindex); 828 if (!tdev) 829 return -EADDRNOTAVAIL; 830 } else { 831 *ifindex = 0; 832 } 833 834 return 0; 835} 836 837/* Add static entry (via netlink) */ 838static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 839 struct net_device *dev, 840 const unsigned char *addr, u16 vid, u16 flags) 841{ 842 struct vxlan_dev *vxlan = netdev_priv(dev); 843 /* struct net *net = dev_net(vxlan->dev); */ 844 union vxlan_addr ip; 845 __be16 port; 846 __be32 src_vni, vni; 847 u32 ifindex; 848 int err; 849 850 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 851 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 852 ndm->ndm_state); 853 return -EINVAL; 854 } 855 856 if (tb[NDA_DST] == NULL) 857 return -EINVAL; 858 859 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); 860 if (err) 861 return err; 862 863 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) 864 return -EAFNOSUPPORT; 865 866 spin_lock_bh(&vxlan->hash_lock); 867 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 868 port, src_vni, vni, ifindex, ndm->ndm_flags); 869 spin_unlock_bh(&vxlan->hash_lock); 870 871 return err; 872} 873 874static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, 875 const unsigned char *addr, union vxlan_addr ip, 876 __be16 port, __be32 src_vni, u32 vni, u32 ifindex, 877 u16 vid) 878{ 879 struct vxlan_fdb *f; 880 struct vxlan_rdst *rd = NULL; 881 int err = -ENOENT; 882 883 f = vxlan_find_mac(vxlan, addr, src_vni); 884 if (!f) 885 return err; 886 887 if (!vxlan_addr_any(&ip)) { 888 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); 889 if (!rd) 890 goto out; 891 } 892 893 /* remove a destination if it's not the only one on the list, 894 * otherwise destroy the fdb entry 895 */ 896 if (rd && !list_is_singular(&f->remotes)) { 897 vxlan_fdb_dst_destroy(vxlan, f, rd); 898 goto out; 899 } 900 901 vxlan_fdb_destroy(vxlan, f); 902 903out: 904 return 0; 905} 906 907/* Delete entry (via netlink) */ 908static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 909 struct net_device *dev, 910 const unsigned char *addr, u16 vid) 911{ 912 struct vxlan_dev *vxlan = netdev_priv(dev); 913 union vxlan_addr ip; 914 __be32 src_vni, vni; 915 __be16 port; 916 u32 ifindex; 917 int err; 918 919 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); 920 if (err) 921 return err; 922 923 spin_lock_bh(&vxlan->hash_lock); 924 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, 925 vid); 926 spin_unlock_bh(&vxlan->hash_lock); 927 928 return err; 929} 930 931/* Dump forwarding table */ 932static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 933 struct net_device *dev, 934 struct net_device *filter_dev, int *idx) 935{ 936 struct vxlan_dev *vxlan = netdev_priv(dev); 937 unsigned int h; 938 int err = 0; 939 940 for (h = 0; h < FDB_HASH_SIZE; ++h) { 941 struct vxlan_fdb *f; 942 943 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 944 struct vxlan_rdst *rd; 945 946 list_for_each_entry_rcu(rd, &f->remotes, list) { 947 if (*idx < cb->args[2]) 948 goto skip; 949 950 err = vxlan_fdb_info(skb, vxlan, f, 951 NETLINK_CB(cb->skb).portid, 952 cb->nlh->nlmsg_seq, 953 RTM_NEWNEIGH, 954 NLM_F_MULTI, rd); 955 if (err < 0) 956 goto out; 957skip: 958 *idx += 1; 959 } 960 } 961 } 962out: 963 return err; 964} 965 966/* Watch incoming packets to learn mapping between Ethernet address 967 * and Tunnel endpoint. 968 * Return true if packet is bogus and should be dropped. 969 */ 970static bool vxlan_snoop(struct net_device *dev, 971 union vxlan_addr *src_ip, const u8 *src_mac, 972 u32 src_ifindex, __be32 vni) 973{ 974 struct vxlan_dev *vxlan = netdev_priv(dev); 975 struct vxlan_fdb *f; 976 u32 ifindex = 0; 977 978#if IS_ENABLED(CONFIG_IPV6) 979 if (src_ip->sa.sa_family == AF_INET6 && 980 (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)) 981 ifindex = src_ifindex; 982#endif 983 984 f = vxlan_find_mac(vxlan, src_mac, vni); 985 if (likely(f)) { 986 struct vxlan_rdst *rdst = first_remote_rcu(f); 987 988 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) && 989 rdst->remote_ifindex == ifindex)) 990 return false; 991 992 /* Don't migrate static entries, drop packets */ 993 if (f->state & (NUD_PERMANENT | NUD_NOARP)) 994 return true; 995 996 if (net_ratelimit()) 997 netdev_info(dev, 998 "%pM migrated from %pIS to %pIS\n", 999 src_mac, &rdst->remote_ip.sa, &src_ip->sa); 1000 1001 rdst->remote_ip = *src_ip; 1002 f->updated = jiffies; 1003 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); 1004 } else { 1005 /* learned new entry */ 1006 spin_lock(&vxlan->hash_lock); 1007 1008 /* close off race between vxlan_flush and incoming packets */ 1009 if (netif_running(dev)) 1010 vxlan_fdb_create(vxlan, src_mac, src_ip, 1011 NUD_REACHABLE, 1012 NLM_F_EXCL|NLM_F_CREATE, 1013 vxlan->cfg.dst_port, 1014 vni, 1015 vxlan->default_dst.remote_vni, 1016 ifindex, NTF_SELF); 1017 spin_unlock(&vxlan->hash_lock); 1018 } 1019 1020 return false; 1021} 1022 1023/* See if multicast group is already in use by other ID */ 1024static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) 1025{ 1026 struct vxlan_dev *vxlan; 1027 struct vxlan_sock *sock4; 1028#if IS_ENABLED(CONFIG_IPV6) 1029 struct vxlan_sock *sock6; 1030#endif 1031 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 1032 1033 sock4 = rtnl_dereference(dev->vn4_sock); 1034 1035 /* The vxlan_sock is only used by dev, leaving group has 1036 * no effect on other vxlan devices. 1037 */ 1038 if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1) 1039 return false; 1040#if IS_ENABLED(CONFIG_IPV6) 1041 sock6 = rtnl_dereference(dev->vn6_sock); 1042 if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1) 1043 return false; 1044#endif 1045 1046 list_for_each_entry(vxlan, &vn->vxlan_list, next) { 1047 if (!netif_running(vxlan->dev) || vxlan == dev) 1048 continue; 1049 1050 if (family == AF_INET && 1051 rtnl_dereference(vxlan->vn4_sock) != sock4) 1052 continue; 1053#if IS_ENABLED(CONFIG_IPV6) 1054 if (family == AF_INET6 && 1055 rtnl_dereference(vxlan->vn6_sock) != sock6) 1056 continue; 1057#endif 1058 1059 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, 1060 &dev->default_dst.remote_ip)) 1061 continue; 1062 1063 if (vxlan->default_dst.remote_ifindex != 1064 dev->default_dst.remote_ifindex) 1065 continue; 1066 1067 return true; 1068 } 1069 1070 return false; 1071} 1072 1073static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) 1074{ 1075 struct vxlan_net *vn; 1076 1077 if (!vs) 1078 return false; 1079 if (!refcount_dec_and_test(&vs->refcnt)) 1080 return false; 1081 1082 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); 1083 spin_lock(&vn->sock_lock); 1084 hlist_del_rcu(&vs->hlist); 1085 udp_tunnel_notify_del_rx_port(vs->sock, 1086 (vs->flags & VXLAN_F_GPE) ? 1087 UDP_TUNNEL_TYPE_VXLAN_GPE : 1088 UDP_TUNNEL_TYPE_VXLAN); 1089 spin_unlock(&vn->sock_lock); 1090 1091 return true; 1092} 1093 1094static void vxlan_sock_release(struct vxlan_dev *vxlan) 1095{ 1096 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1097#if IS_ENABLED(CONFIG_IPV6) 1098 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1099 1100 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); 1101#endif 1102 1103 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); 1104 synchronize_net(); 1105 1106 vxlan_vs_del_dev(vxlan); 1107 1108 if (__vxlan_sock_release_prep(sock4)) { 1109 udp_tunnel_sock_release(sock4->sock); 1110 kfree(sock4); 1111 } 1112 1113#if IS_ENABLED(CONFIG_IPV6) 1114 if (__vxlan_sock_release_prep(sock6)) { 1115 udp_tunnel_sock_release(sock6->sock); 1116 kfree(sock6); 1117 } 1118#endif 1119} 1120 1121/* Update multicast group membership when first VNI on 1122 * multicast address is brought up 1123 */ 1124static int vxlan_igmp_join(struct vxlan_dev *vxlan) 1125{ 1126 struct sock *sk; 1127 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1128 int ifindex = vxlan->default_dst.remote_ifindex; 1129 int ret = -EINVAL; 1130 1131 if (ip->sa.sa_family == AF_INET) { 1132 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1133 struct ip_mreqn mreq = { 1134 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1135 .imr_ifindex = ifindex, 1136 }; 1137 1138 sk = sock4->sock->sk; 1139 lock_sock(sk); 1140 ret = ip_mc_join_group(sk, &mreq); 1141 release_sock(sk); 1142#if IS_ENABLED(CONFIG_IPV6) 1143 } else { 1144 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1145 1146 sk = sock6->sock->sk; 1147 lock_sock(sk); 1148 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, 1149 &ip->sin6.sin6_addr); 1150 release_sock(sk); 1151#endif 1152 } 1153 1154 return ret; 1155} 1156 1157/* Inverse of vxlan_igmp_join when last VNI is brought down */ 1158static int vxlan_igmp_leave(struct vxlan_dev *vxlan) 1159{ 1160 struct sock *sk; 1161 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; 1162 int ifindex = vxlan->default_dst.remote_ifindex; 1163 int ret = -EINVAL; 1164 1165 if (ip->sa.sa_family == AF_INET) { 1166 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1167 struct ip_mreqn mreq = { 1168 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1169 .imr_ifindex = ifindex, 1170 }; 1171 1172 sk = sock4->sock->sk; 1173 lock_sock(sk); 1174 ret = ip_mc_leave_group(sk, &mreq); 1175 release_sock(sk); 1176#if IS_ENABLED(CONFIG_IPV6) 1177 } else { 1178 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1179 1180 sk = sock6->sock->sk; 1181 lock_sock(sk); 1182 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, 1183 &ip->sin6.sin6_addr); 1184 release_sock(sk); 1185#endif 1186 } 1187 1188 return ret; 1189} 1190 1191static bool vxlan_remcsum(struct vxlanhdr *unparsed, 1192 struct sk_buff *skb, u32 vxflags) 1193{ 1194 size_t start, offset; 1195 1196 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) 1197 goto out; 1198 1199 start = vxlan_rco_start(unparsed->vx_vni); 1200 offset = start + vxlan_rco_offset(unparsed->vx_vni); 1201 1202 if (!pskb_may_pull(skb, offset + sizeof(u16))) 1203 return false; 1204 1205 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, 1206 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); 1207out: 1208 unparsed->vx_flags &= ~VXLAN_HF_RCO; 1209 unparsed->vx_vni &= VXLAN_VNI_MASK; 1210 return true; 1211} 1212 1213static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, 1214 struct sk_buff *skb, u32 vxflags, 1215 struct vxlan_metadata *md) 1216{ 1217 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; 1218 struct metadata_dst *tun_dst; 1219 1220 if (!(unparsed->vx_flags & VXLAN_HF_GBP)) 1221 goto out; 1222 1223 md->gbp = ntohs(gbp->policy_id); 1224 1225 tun_dst = (struct metadata_dst *)skb_dst(skb); 1226 if (tun_dst) { 1227 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1228 tun_dst->u.tun_info.options_len = sizeof(*md); 1229 } 1230 if (gbp->dont_learn) 1231 md->gbp |= VXLAN_GBP_DONT_LEARN; 1232 1233 if (gbp->policy_applied) 1234 md->gbp |= VXLAN_GBP_POLICY_APPLIED; 1235 1236 /* In flow-based mode, GBP is carried in dst_metadata */ 1237 if (!(vxflags & VXLAN_F_COLLECT_METADATA)) 1238 skb->mark = md->gbp; 1239out: 1240 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; 1241} 1242 1243static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, 1244 __be16 *protocol, 1245 struct sk_buff *skb, u32 vxflags) 1246{ 1247 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; 1248 1249 /* Need to have Next Protocol set for interfaces in GPE mode. */ 1250 if (!gpe->np_applied) 1251 return false; 1252 /* "The initial version is 0. If a receiver does not support the 1253 * version indicated it MUST drop the packet. 1254 */ 1255 if (gpe->version != 0) 1256 return false; 1257 /* "When the O bit is set to 1, the packet is an OAM packet and OAM 1258 * processing MUST occur." However, we don't implement OAM 1259 * processing, thus drop the packet. 1260 */ 1261 if (gpe->oam_flag) 1262 return false; 1263 1264 switch (gpe->next_protocol) { 1265 case VXLAN_GPE_NP_IPV4: 1266 *protocol = htons(ETH_P_IP); 1267 break; 1268 case VXLAN_GPE_NP_IPV6: 1269 *protocol = htons(ETH_P_IPV6); 1270 break; 1271 case VXLAN_GPE_NP_ETHERNET: 1272 *protocol = htons(ETH_P_TEB); 1273 break; 1274 default: 1275 return false; 1276 } 1277 1278 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; 1279 return true; 1280} 1281 1282static bool vxlan_set_mac(struct vxlan_dev *vxlan, 1283 struct vxlan_sock *vs, 1284 struct sk_buff *skb, __be32 vni) 1285{ 1286 union vxlan_addr saddr; 1287 u32 ifindex = skb->dev->ifindex; 1288 1289 skb_reset_mac_header(skb); 1290 skb->protocol = eth_type_trans(skb, vxlan->dev); 1291 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1292 1293 /* Ignore packet loops (and multicast echo) */ 1294 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1295 return false; 1296 1297 /* Get address from the outer IP header */ 1298 if (vxlan_get_sk_family(vs) == AF_INET) { 1299 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; 1300 saddr.sa.sa_family = AF_INET; 1301#if IS_ENABLED(CONFIG_IPV6) 1302 } else { 1303 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr; 1304 saddr.sa.sa_family = AF_INET6; 1305#endif 1306 } 1307 1308 if ((vxlan->cfg.flags & VXLAN_F_LEARN) && 1309 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni)) 1310 return false; 1311 1312 return true; 1313} 1314 1315static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, 1316 struct sk_buff *skb) 1317{ 1318 int err = 0; 1319 1320 if (vxlan_get_sk_family(vs) == AF_INET) 1321 err = IP_ECN_decapsulate(oiph, skb); 1322#if IS_ENABLED(CONFIG_IPV6) 1323 else 1324 err = IP6_ECN_decapsulate(oiph, skb); 1325#endif 1326 1327 if (unlikely(err) && log_ecn_error) { 1328 if (vxlan_get_sk_family(vs) == AF_INET) 1329 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1330 &((struct iphdr *)oiph)->saddr, 1331 ((struct iphdr *)oiph)->tos); 1332 else 1333 net_info_ratelimited("non-ECT from %pI6\n", 1334 &((struct ipv6hdr *)oiph)->saddr); 1335 } 1336 return err <= 1; 1337} 1338 1339/* Callback from net/ipv4/udp.c to receive packets */ 1340static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) 1341{ 1342 struct pcpu_sw_netstats *stats; 1343 struct vxlan_dev *vxlan; 1344 struct vxlan_sock *vs; 1345 struct vxlanhdr unparsed; 1346 struct vxlan_metadata _md; 1347 struct vxlan_metadata *md = &_md; 1348 __be16 protocol = htons(ETH_P_TEB); 1349 bool raw_proto = false; 1350 void *oiph; 1351 __be32 vni = 0; 1352 1353 /* Need UDP and VXLAN header to be present */ 1354 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1355 goto drop; 1356 1357 unparsed = *vxlan_hdr(skb); 1358 /* VNI flag always required to be set */ 1359 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { 1360 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1361 ntohl(vxlan_hdr(skb)->vx_flags), 1362 ntohl(vxlan_hdr(skb)->vx_vni)); 1363 /* Return non vxlan pkt */ 1364 goto drop; 1365 } 1366 unparsed.vx_flags &= ~VXLAN_HF_VNI; 1367 unparsed.vx_vni &= ~VXLAN_VNI_MASK; 1368 1369 vs = rcu_dereference_sk_user_data(sk); 1370 if (!vs) 1371 goto drop; 1372 1373 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); 1374 1375 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); 1376 if (!vxlan) 1377 goto drop; 1378 1379 /* For backwards compatibility, only allow reserved fields to be 1380 * used by VXLAN extensions if explicitly requested. 1381 */ 1382 if (vs->flags & VXLAN_F_GPE) { 1383 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) 1384 goto drop; 1385 raw_proto = true; 1386 } 1387 1388 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, 1389 !net_eq(vxlan->net, dev_net(vxlan->dev)))) 1390 goto drop; 1391 1392 if (vxlan_collect_metadata(vs)) { 1393 struct metadata_dst *tun_dst; 1394 1395 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, 1396 key32_to_tunnel_id(vni), sizeof(*md)); 1397 1398 if (!tun_dst) 1399 goto drop; 1400 1401 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 1402 1403 skb_dst_set(skb, (struct dst_entry *)tun_dst); 1404 } else { 1405 memset(md, 0, sizeof(*md)); 1406 } 1407 1408 if (vs->flags & VXLAN_F_REMCSUM_RX) 1409 if (!vxlan_remcsum(&unparsed, skb, vs->flags)) 1410 goto drop; 1411 if (vs->flags & VXLAN_F_GBP) 1412 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); 1413 /* Note that GBP and GPE can never be active together. This is 1414 * ensured in vxlan_dev_configure. 1415 */ 1416 1417 if (unparsed.vx_flags || unparsed.vx_vni) { 1418 /* If there are any unprocessed flags remaining treat 1419 * this as a malformed packet. This behavior diverges from 1420 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1421 * in reserved fields are to be ignored. The approach here 1422 * maintains compatibility with previous stack code, and also 1423 * is more robust and provides a little more security in 1424 * adding extensions to VXLAN. 1425 */ 1426 goto drop; 1427 } 1428 1429 if (!raw_proto) { 1430 if (!vxlan_set_mac(vxlan, vs, skb, vni)) 1431 goto drop; 1432 } else { 1433 skb_reset_mac_header(skb); 1434 skb->dev = vxlan->dev; 1435 skb->pkt_type = PACKET_HOST; 1436 } 1437 1438 oiph = skb_network_header(skb); 1439 skb_reset_network_header(skb); 1440 1441 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { 1442 ++vxlan->dev->stats.rx_frame_errors; 1443 ++vxlan->dev->stats.rx_errors; 1444 goto drop; 1445 } 1446 1447 stats = this_cpu_ptr(vxlan->dev->tstats); 1448 u64_stats_update_begin(&stats->syncp); 1449 stats->rx_packets++; 1450 stats->rx_bytes += skb->len; 1451 u64_stats_update_end(&stats->syncp); 1452 1453 gro_cells_receive(&vxlan->gro_cells, skb); 1454 return 0; 1455 1456drop: 1457 /* Consume bad packet */ 1458 kfree_skb(skb); 1459 return 0; 1460} 1461 1462static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) 1463{ 1464 struct vxlan_dev *vxlan = netdev_priv(dev); 1465 struct arphdr *parp; 1466 u8 *arpptr, *sha; 1467 __be32 sip, tip; 1468 struct neighbour *n; 1469 1470 if (dev->flags & IFF_NOARP) 1471 goto out; 1472 1473 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { 1474 dev->stats.tx_dropped++; 1475 goto out; 1476 } 1477 parp = arp_hdr(skb); 1478 1479 if ((parp->ar_hrd != htons(ARPHRD_ETHER) && 1480 parp->ar_hrd != htons(ARPHRD_IEEE802)) || 1481 parp->ar_pro != htons(ETH_P_IP) || 1482 parp->ar_op != htons(ARPOP_REQUEST) || 1483 parp->ar_hln != dev->addr_len || 1484 parp->ar_pln != 4) 1485 goto out; 1486 arpptr = (u8 *)parp + sizeof(struct arphdr); 1487 sha = arpptr; 1488 arpptr += dev->addr_len; /* sha */ 1489 memcpy(&sip, arpptr, sizeof(sip)); 1490 arpptr += sizeof(sip); 1491 arpptr += dev->addr_len; /* tha */ 1492 memcpy(&tip, arpptr, sizeof(tip)); 1493 1494 if (ipv4_is_loopback(tip) || 1495 ipv4_is_multicast(tip)) 1496 goto out; 1497 1498 n = neigh_lookup(&arp_tbl, &tip, dev); 1499 1500 if (n) { 1501 struct vxlan_fdb *f; 1502 struct sk_buff *reply; 1503 1504 if (!(n->nud_state & NUD_CONNECTED)) { 1505 neigh_release(n); 1506 goto out; 1507 } 1508 1509 f = vxlan_find_mac(vxlan, n->ha, vni); 1510 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1511 /* bridge-local neighbor */ 1512 neigh_release(n); 1513 goto out; 1514 } 1515 1516 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1517 n->ha, sha); 1518 1519 neigh_release(n); 1520 1521 if (reply == NULL) 1522 goto out; 1523 1524 skb_reset_mac_header(reply); 1525 __skb_pull(reply, skb_network_offset(reply)); 1526 reply->ip_summed = CHECKSUM_UNNECESSARY; 1527 reply->pkt_type = PACKET_HOST; 1528 1529 if (netif_rx_ni(reply) == NET_RX_DROP) 1530 dev->stats.rx_dropped++; 1531 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { 1532 union vxlan_addr ipa = { 1533 .sin.sin_addr.s_addr = tip, 1534 .sin.sin_family = AF_INET, 1535 }; 1536 1537 vxlan_ip_miss(dev, &ipa); 1538 } 1539out: 1540 consume_skb(skb); 1541 return NETDEV_TX_OK; 1542} 1543 1544#if IS_ENABLED(CONFIG_IPV6) 1545static struct sk_buff *vxlan_na_create(struct sk_buff *request, 1546 struct neighbour *n, bool isrouter) 1547{ 1548 struct net_device *dev = request->dev; 1549 struct sk_buff *reply; 1550 struct nd_msg *ns, *na; 1551 struct ipv6hdr *pip6; 1552 u8 *daddr; 1553 int na_olen = 8; /* opt hdr + ETH_ALEN for target */ 1554 int ns_olen; 1555 int i, len; 1556 1557 if (dev == NULL || !pskb_may_pull(request, request->len)) 1558 return NULL; 1559 1560 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + 1561 sizeof(*na) + na_olen + dev->needed_tailroom; 1562 reply = alloc_skb(len, GFP_ATOMIC); 1563 if (reply == NULL) 1564 return NULL; 1565 1566 reply->protocol = htons(ETH_P_IPV6); 1567 reply->dev = dev; 1568 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); 1569 skb_push(reply, sizeof(struct ethhdr)); 1570 skb_reset_mac_header(reply); 1571 1572 ns = (struct nd_msg *)(ipv6_hdr(request) + 1); 1573 1574 daddr = eth_hdr(request)->h_source; 1575 ns_olen = request->len - skb_network_offset(request) - 1576 sizeof(struct ipv6hdr) - sizeof(*ns); 1577 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { 1578 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { 1579 daddr = ns->opt + i + sizeof(struct nd_opt_hdr); 1580 break; 1581 } 1582 } 1583 1584 /* Ethernet header */ 1585 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); 1586 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); 1587 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); 1588 reply->protocol = htons(ETH_P_IPV6); 1589 1590 skb_pull(reply, sizeof(struct ethhdr)); 1591 skb_reset_network_header(reply); 1592 skb_put(reply, sizeof(struct ipv6hdr)); 1593 1594 /* IPv6 header */ 1595 1596 pip6 = ipv6_hdr(reply); 1597 memset(pip6, 0, sizeof(struct ipv6hdr)); 1598 pip6->version = 6; 1599 pip6->priority = ipv6_hdr(request)->priority; 1600 pip6->nexthdr = IPPROTO_ICMPV6; 1601 pip6->hop_limit = 255; 1602 pip6->daddr = ipv6_hdr(request)->saddr; 1603 pip6->saddr = *(struct in6_addr *)n->primary_key; 1604 1605 skb_pull(reply, sizeof(struct ipv6hdr)); 1606 skb_reset_transport_header(reply); 1607 1608 /* Neighbor Advertisement */ 1609 na = skb_put_zero(reply, sizeof(*na) + na_olen); 1610 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 1611 na->icmph.icmp6_router = isrouter; 1612 na->icmph.icmp6_override = 1; 1613 na->icmph.icmp6_solicited = 1; 1614 na->target = ns->target; 1615 ether_addr_copy(&na->opt[2], n->ha); 1616 na->opt[0] = ND_OPT_TARGET_LL_ADDR; 1617 na->opt[1] = na_olen >> 3; 1618 1619 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, 1620 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, 1621 csum_partial(na, sizeof(*na)+na_olen, 0)); 1622 1623 pip6->payload_len = htons(sizeof(*na)+na_olen); 1624 1625 skb_push(reply, sizeof(struct ipv6hdr)); 1626 1627 reply->ip_summed = CHECKSUM_UNNECESSARY; 1628 1629 return reply; 1630} 1631 1632static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) 1633{ 1634 struct vxlan_dev *vxlan = netdev_priv(dev); 1635 struct nd_msg *msg; 1636 const struct ipv6hdr *iphdr; 1637 const struct in6_addr *daddr; 1638 struct neighbour *n; 1639 struct inet6_dev *in6_dev; 1640 1641 in6_dev = __in6_dev_get(dev); 1642 if (!in6_dev) 1643 goto out; 1644 1645 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) 1646 goto out; 1647 1648 iphdr = ipv6_hdr(skb); 1649 daddr = &iphdr->daddr; 1650 1651 msg = (struct nd_msg *)(iphdr + 1); 1652 if (msg->icmph.icmp6_code != 0 || 1653 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) 1654 goto out; 1655 1656 if (ipv6_addr_loopback(daddr) || 1657 ipv6_addr_is_multicast(&msg->target)) 1658 goto out; 1659 1660 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); 1661 1662 if (n) { 1663 struct vxlan_fdb *f; 1664 struct sk_buff *reply; 1665 1666 if (!(n->nud_state & NUD_CONNECTED)) { 1667 neigh_release(n); 1668 goto out; 1669 } 1670 1671 f = vxlan_find_mac(vxlan, n->ha, vni); 1672 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1673 /* bridge-local neighbor */ 1674 neigh_release(n); 1675 goto out; 1676 } 1677 1678 reply = vxlan_na_create(skb, n, 1679 !!(f ? f->flags & NTF_ROUTER : 0)); 1680 1681 neigh_release(n); 1682 1683 if (reply == NULL) 1684 goto out; 1685 1686 if (netif_rx_ni(reply) == NET_RX_DROP) 1687 dev->stats.rx_dropped++; 1688 1689 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { 1690 union vxlan_addr ipa = { 1691 .sin6.sin6_addr = msg->target, 1692 .sin6.sin6_family = AF_INET6, 1693 }; 1694 1695 vxlan_ip_miss(dev, &ipa); 1696 } 1697 1698out: 1699 consume_skb(skb); 1700 return NETDEV_TX_OK; 1701} 1702#endif 1703 1704static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 1705{ 1706 struct vxlan_dev *vxlan = netdev_priv(dev); 1707 struct neighbour *n; 1708 1709 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1710 return false; 1711 1712 n = NULL; 1713 switch (ntohs(eth_hdr(skb)->h_proto)) { 1714 case ETH_P_IP: 1715 { 1716 struct iphdr *pip; 1717 1718 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1719 return false; 1720 pip = ip_hdr(skb); 1721 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 1722 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { 1723 union vxlan_addr ipa = { 1724 .sin.sin_addr.s_addr = pip->daddr, 1725 .sin.sin_family = AF_INET, 1726 }; 1727 1728 vxlan_ip_miss(dev, &ipa); 1729 return false; 1730 } 1731 1732 break; 1733 } 1734#if IS_ENABLED(CONFIG_IPV6) 1735 case ETH_P_IPV6: 1736 { 1737 struct ipv6hdr *pip6; 1738 1739 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 1740 return false; 1741 pip6 = ipv6_hdr(skb); 1742 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); 1743 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { 1744 union vxlan_addr ipa = { 1745 .sin6.sin6_addr = pip6->daddr, 1746 .sin6.sin6_family = AF_INET6, 1747 }; 1748 1749 vxlan_ip_miss(dev, &ipa); 1750 return false; 1751 } 1752 1753 break; 1754 } 1755#endif 1756 default: 1757 return false; 1758 } 1759 1760 if (n) { 1761 bool diff; 1762 1763 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); 1764 if (diff) { 1765 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 1766 dev->addr_len); 1767 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); 1768 } 1769 neigh_release(n); 1770 return diff; 1771 } 1772 1773 return false; 1774} 1775 1776static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, 1777 struct vxlan_metadata *md) 1778{ 1779 struct vxlanhdr_gbp *gbp; 1780 1781 if (!md->gbp) 1782 return; 1783 1784 gbp = (struct vxlanhdr_gbp *)vxh; 1785 vxh->vx_flags |= VXLAN_HF_GBP; 1786 1787 if (md->gbp & VXLAN_GBP_DONT_LEARN) 1788 gbp->dont_learn = 1; 1789 1790 if (md->gbp & VXLAN_GBP_POLICY_APPLIED) 1791 gbp->policy_applied = 1; 1792 1793 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); 1794} 1795 1796static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags, 1797 __be16 protocol) 1798{ 1799 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; 1800 1801 gpe->np_applied = 1; 1802 1803 switch (protocol) { 1804 case htons(ETH_P_IP): 1805 gpe->next_protocol = VXLAN_GPE_NP_IPV4; 1806 return 0; 1807 case htons(ETH_P_IPV6): 1808 gpe->next_protocol = VXLAN_GPE_NP_IPV6; 1809 return 0; 1810 case htons(ETH_P_TEB): 1811 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET; 1812 return 0; 1813 } 1814 return -EPFNOSUPPORT; 1815} 1816 1817static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, 1818 int iphdr_len, __be32 vni, 1819 struct vxlan_metadata *md, u32 vxflags, 1820 bool udp_sum) 1821{ 1822 struct vxlanhdr *vxh; 1823 int min_headroom; 1824 int err; 1825 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 1826 __be16 inner_protocol = htons(ETH_P_TEB); 1827 1828 if ((vxflags & VXLAN_F_REMCSUM_TX) && 1829 skb->ip_summed == CHECKSUM_PARTIAL) { 1830 int csum_start = skb_checksum_start_offset(skb); 1831 1832 if (csum_start <= VXLAN_MAX_REMCSUM_START && 1833 !(csum_start & VXLAN_RCO_SHIFT_MASK) && 1834 (skb->csum_offset == offsetof(struct udphdr, check) || 1835 skb->csum_offset == offsetof(struct tcphdr, check))) 1836 type |= SKB_GSO_TUNNEL_REMCSUM; 1837 } 1838 1839 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1840 + VXLAN_HLEN + iphdr_len; 1841 1842 /* Need space for new headers (invalidates iph ptr) */ 1843 err = skb_cow_head(skb, min_headroom); 1844 if (unlikely(err)) 1845 return err; 1846 1847 err = iptunnel_handle_offloads(skb, type); 1848 if (err) 1849 return err; 1850 1851 vxh = __skb_push(skb, sizeof(*vxh)); 1852 vxh->vx_flags = VXLAN_HF_VNI; 1853 vxh->vx_vni = vxlan_vni_field(vni); 1854 1855 if (type & SKB_GSO_TUNNEL_REMCSUM) { 1856 unsigned int start; 1857 1858 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); 1859 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); 1860 vxh->vx_flags |= VXLAN_HF_RCO; 1861 1862 if (!skb_is_gso(skb)) { 1863 skb->ip_summed = CHECKSUM_NONE; 1864 skb->encapsulation = 0; 1865 } 1866 } 1867 1868 if (vxflags & VXLAN_F_GBP) 1869 vxlan_build_gbp_hdr(vxh, vxflags, md); 1870 if (vxflags & VXLAN_F_GPE) { 1871 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol); 1872 if (err < 0) 1873 return err; 1874 inner_protocol = skb->protocol; 1875 } 1876 1877 skb_set_inner_protocol(skb, inner_protocol); 1878 return 0; 1879} 1880 1881static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, 1882 struct vxlan_sock *sock4, 1883 struct sk_buff *skb, int oif, u8 tos, 1884 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, 1885 struct dst_cache *dst_cache, 1886 const struct ip_tunnel_info *info) 1887{ 1888 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1889 struct rtable *rt = NULL; 1890 struct flowi4 fl4; 1891 1892 if (!sock4) 1893 return ERR_PTR(-EIO); 1894 1895 if (tos && !info) 1896 use_cache = false; 1897 if (use_cache) { 1898 rt = dst_cache_get_ip4(dst_cache, saddr); 1899 if (rt) 1900 return rt; 1901 } 1902 1903 memset(&fl4, 0, sizeof(fl4)); 1904 fl4.flowi4_oif = oif; 1905 fl4.flowi4_tos = RT_TOS(tos); 1906 fl4.flowi4_mark = skb->mark; 1907 fl4.flowi4_proto = IPPROTO_UDP; 1908 fl4.daddr = daddr; 1909 fl4.saddr = *saddr; 1910 fl4.fl4_dport = dport; 1911 fl4.fl4_sport = sport; 1912 1913 rt = ip_route_output_key(vxlan->net, &fl4); 1914 if (likely(!IS_ERR(rt))) { 1915 if (rt->dst.dev == dev) { 1916 netdev_dbg(dev, "circular route to %pI4\n", &daddr); 1917 ip_rt_put(rt); 1918 return ERR_PTR(-ELOOP); 1919 } 1920 1921 *saddr = fl4.saddr; 1922 if (use_cache) 1923 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 1924 } else { 1925 netdev_dbg(dev, "no route to %pI4\n", &daddr); 1926 return ERR_PTR(-ENETUNREACH); 1927 } 1928 return rt; 1929} 1930 1931#if IS_ENABLED(CONFIG_IPV6) 1932static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, 1933 struct net_device *dev, 1934 struct vxlan_sock *sock6, 1935 struct sk_buff *skb, int oif, u8 tos, 1936 __be32 label, 1937 const struct in6_addr *daddr, 1938 struct in6_addr *saddr, 1939 __be16 dport, __be16 sport, 1940 struct dst_cache *dst_cache, 1941 const struct ip_tunnel_info *info) 1942{ 1943 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1944 struct dst_entry *ndst; 1945 struct flowi6 fl6; 1946 int err; 1947 1948 if (!sock6) 1949 return ERR_PTR(-EIO); 1950 1951 if (tos && !info) 1952 use_cache = false; 1953 if (use_cache) { 1954 ndst = dst_cache_get_ip6(dst_cache, saddr); 1955 if (ndst) 1956 return ndst; 1957 } 1958 1959 memset(&fl6, 0, sizeof(fl6)); 1960 fl6.flowi6_oif = oif; 1961 fl6.daddr = *daddr; 1962 fl6.saddr = *saddr; 1963 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1964 fl6.flowi6_mark = skb->mark; 1965 fl6.flowi6_proto = IPPROTO_UDP; 1966 fl6.fl6_dport = dport; 1967 fl6.fl6_sport = sport; 1968 1969 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1970 sock6->sock->sk, 1971 &ndst, &fl6); 1972 if (unlikely(err < 0)) { 1973 netdev_dbg(dev, "no route to %pI6\n", daddr); 1974 return ERR_PTR(-ENETUNREACH); 1975 } 1976 1977 if (unlikely(ndst->dev == dev)) { 1978 netdev_dbg(dev, "circular route to %pI6\n", daddr); 1979 dst_release(ndst); 1980 return ERR_PTR(-ELOOP); 1981 } 1982 1983 *saddr = fl6.saddr; 1984 if (use_cache) 1985 dst_cache_set_ip6(dst_cache, ndst, saddr); 1986 return ndst; 1987} 1988#endif 1989 1990/* Bypass encapsulation if the destination is local */ 1991static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1992 struct vxlan_dev *dst_vxlan, __be32 vni) 1993{ 1994 struct pcpu_sw_netstats *tx_stats, *rx_stats; 1995 union vxlan_addr loopback; 1996 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1997 struct net_device *dev = skb->dev; 1998 int len = skb->len; 1999 2000 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 2001 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 2002 skb->pkt_type = PACKET_HOST; 2003 skb->encapsulation = 0; 2004 skb->dev = dst_vxlan->dev; 2005 __skb_pull(skb, skb_network_offset(skb)); 2006 2007 if (remote_ip->sa.sa_family == AF_INET) { 2008 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2009 loopback.sa.sa_family = AF_INET; 2010#if IS_ENABLED(CONFIG_IPV6) 2011 } else { 2012 loopback.sin6.sin6_addr = in6addr_loopback; 2013 loopback.sa.sa_family = AF_INET6; 2014#endif 2015 } 2016 2017 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) 2018 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, 2019 vni); 2020 2021 u64_stats_update_begin(&tx_stats->syncp); 2022 tx_stats->tx_packets++; 2023 tx_stats->tx_bytes += len; 2024 u64_stats_update_end(&tx_stats->syncp); 2025 2026 if (netif_rx(skb) == NET_RX_SUCCESS) { 2027 u64_stats_update_begin(&rx_stats->syncp); 2028 rx_stats->rx_packets++; 2029 rx_stats->rx_bytes += len; 2030 u64_stats_update_end(&rx_stats->syncp); 2031 } else { 2032 dev->stats.rx_dropped++; 2033 } 2034} 2035 2036static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, 2037 struct vxlan_dev *vxlan, 2038 union vxlan_addr *daddr, 2039 __be16 dst_port, int dst_ifindex, __be32 vni, 2040 struct dst_entry *dst, 2041 u32 rt_flags) 2042{ 2043#if IS_ENABLED(CONFIG_IPV6) 2044 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of 2045 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple 2046 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry. 2047 */ 2048 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL); 2049#endif 2050 /* Bypass encapsulation if the destination is local */ 2051 if (rt_flags & RTCF_LOCAL && 2052 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2053 struct vxlan_dev *dst_vxlan; 2054 2055 dst_release(dst); 2056 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, 2057 daddr->sa.sa_family, dst_port, 2058 vxlan->cfg.flags); 2059 if (!dst_vxlan) { 2060 dev->stats.tx_errors++; 2061 kfree_skb(skb); 2062 2063 return -ENOENT; 2064 } 2065 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni); 2066 return 1; 2067 } 2068 2069 return 0; 2070} 2071 2072static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 2073 __be32 default_vni, struct vxlan_rdst *rdst, 2074 bool did_rsc) 2075{ 2076 struct dst_cache *dst_cache; 2077 struct ip_tunnel_info *info; 2078 struct vxlan_dev *vxlan = netdev_priv(dev); 2079 const struct iphdr *old_iph = ip_hdr(skb); 2080 union vxlan_addr *dst; 2081 union vxlan_addr remote_ip, local_ip; 2082 struct vxlan_metadata _md; 2083 struct vxlan_metadata *md = &_md; 2084 __be16 src_port = 0, dst_port; 2085 struct dst_entry *ndst = NULL; 2086 __be32 vni, label; 2087 __u8 tos, ttl; 2088 int ifindex; 2089 int err; 2090 u32 flags = vxlan->cfg.flags; 2091 bool udp_sum = false; 2092 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); 2093 2094 info = skb_tunnel_info(skb); 2095 2096 if (rdst) { 2097 dst = &rdst->remote_ip; 2098 if (vxlan_addr_any(dst)) { 2099 if (did_rsc) { 2100 /* short-circuited back to local bridge */ 2101 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni); 2102 return; 2103 } 2104 goto drop; 2105 } 2106 2107 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 2108 vni = (rdst->remote_vni) ? : default_vni; 2109 ifindex = rdst->remote_ifindex; 2110 local_ip = vxlan->cfg.saddr; 2111 dst_cache = &rdst->dst_cache; 2112 md->gbp = skb->mark; 2113 ttl = vxlan->cfg.ttl; 2114 if (!ttl && vxlan_addr_multicast(dst)) 2115 ttl = 1; 2116 2117 tos = vxlan->cfg.tos; 2118 if (tos == 1) 2119 tos = ip_tunnel_get_dsfield(old_iph, skb); 2120 2121 if (dst->sa.sa_family == AF_INET) 2122 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); 2123 else 2124 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2125 label = vxlan->cfg.label; 2126 } else { 2127 if (!info) { 2128 WARN_ONCE(1, "%s: Missing encapsulation instructions\n", 2129 dev->name); 2130 goto drop; 2131 } 2132 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 2133 if (remote_ip.sa.sa_family == AF_INET) { 2134 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 2135 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; 2136 } else { 2137 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 2138 local_ip.sin6.sin6_addr = info->key.u.ipv6.src; 2139 } 2140 dst = &remote_ip; 2141 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 2142 vni = tunnel_id_to_key32(info->key.tun_id); 2143 ifindex = 0; 2144 dst_cache = &info->dst_cache; 2145 if (info->options_len) 2146 md = ip_tunnel_info_opts(info); 2147 ttl = info->key.ttl; 2148 tos = info->key.tos; 2149 label = info->key.label; 2150 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 2151 } 2152 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2153 vxlan->cfg.port_max, true); 2154 2155 rcu_read_lock(); 2156 if (dst->sa.sa_family == AF_INET) { 2157 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2158 struct rtable *rt; 2159 __be16 df = 0; 2160 2161 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, 2162 dst->sin.sin_addr.s_addr, 2163 &local_ip.sin.sin_addr.s_addr, 2164 dst_port, src_port, 2165 dst_cache, info); 2166 if (IS_ERR(rt)) { 2167 err = PTR_ERR(rt); 2168 goto tx_error; 2169 } 2170 2171 /* Bypass encapsulation if the destination is local */ 2172 if (!info) { 2173 err = encap_bypass_if_local(skb, dev, vxlan, dst, 2174 dst_port, ifindex, vni, 2175 &rt->dst, rt->rt_flags); 2176 if (err) 2177 goto out_unlock; 2178 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { 2179 df = htons(IP_DF); 2180 } 2181 2182 ndst = &rt->dst; 2183 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2184 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2185 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), 2186 vni, md, flags, udp_sum); 2187 if (err < 0) 2188 goto tx_error; 2189 2190 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr, 2191 dst->sin.sin_addr.s_addr, tos, ttl, df, 2192 src_port, dst_port, xnet, !udp_sum); 2193#if IS_ENABLED(CONFIG_IPV6) 2194 } else { 2195 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 2196 2197 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, 2198 label, &dst->sin6.sin6_addr, 2199 &local_ip.sin6.sin6_addr, 2200 dst_port, src_port, 2201 dst_cache, info); 2202 if (IS_ERR(ndst)) { 2203 err = PTR_ERR(ndst); 2204 ndst = NULL; 2205 goto tx_error; 2206 } 2207 2208 if (!info) { 2209 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2210 2211 err = encap_bypass_if_local(skb, dev, vxlan, dst, 2212 dst_port, ifindex, vni, 2213 ndst, rt6i_flags); 2214 if (err) 2215 goto out_unlock; 2216 } 2217 2218 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2219 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2220 skb_scrub_packet(skb, xnet); 2221 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), 2222 vni, md, flags, udp_sum); 2223 if (err < 0) 2224 goto tx_error; 2225 2226 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, 2227 &local_ip.sin6.sin6_addr, 2228 &dst->sin6.sin6_addr, tos, ttl, 2229 label, src_port, dst_port, !udp_sum); 2230#endif 2231 } 2232out_unlock: 2233 rcu_read_unlock(); 2234 return; 2235 2236drop: 2237 dev->stats.tx_dropped++; 2238 dev_kfree_skb(skb); 2239 return; 2240 2241tx_error: 2242 rcu_read_unlock(); 2243 if (err == -ELOOP) 2244 dev->stats.collisions++; 2245 else if (err == -ENETUNREACH) 2246 dev->stats.tx_carrier_errors++; 2247 dst_release(ndst); 2248 dev->stats.tx_errors++; 2249 kfree_skb(skb); 2250} 2251 2252/* Transmit local packets over Vxlan 2253 * 2254 * Outer IP header inherits ECN and DF from inner header. 2255 * Outer UDP destination is the VXLAN assigned port. 2256 * source port is based on hash of flow 2257 */ 2258static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 2259{ 2260 struct vxlan_dev *vxlan = netdev_priv(dev); 2261 const struct ip_tunnel_info *info; 2262 struct ethhdr *eth; 2263 bool did_rsc = false; 2264 struct vxlan_rdst *rdst, *fdst = NULL; 2265 struct vxlan_fdb *f; 2266 __be32 vni = 0; 2267 2268 info = skb_tunnel_info(skb); 2269 2270 skb_reset_mac_header(skb); 2271 2272 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { 2273 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE && 2274 info->mode & IP_TUNNEL_INFO_TX) { 2275 vni = tunnel_id_to_key32(info->key.tun_id); 2276 } else { 2277 if (info && info->mode & IP_TUNNEL_INFO_TX) 2278 vxlan_xmit_one(skb, dev, vni, NULL, false); 2279 else 2280 kfree_skb(skb); 2281 return NETDEV_TX_OK; 2282 } 2283 } 2284 2285 if (vxlan->cfg.flags & VXLAN_F_PROXY) { 2286 eth = eth_hdr(skb); 2287 if (ntohs(eth->h_proto) == ETH_P_ARP) 2288 return arp_reduce(dev, skb, vni); 2289#if IS_ENABLED(CONFIG_IPV6) 2290 else if (ntohs(eth->h_proto) == ETH_P_IPV6) { 2291 struct ipv6hdr *hdr, _hdr; 2292 if ((hdr = skb_header_pointer(skb, 2293 skb_network_offset(skb), 2294 sizeof(_hdr), &_hdr)) && 2295 hdr->nexthdr == IPPROTO_ICMPV6) 2296 return neigh_reduce(dev, skb, vni); 2297 } 2298#endif 2299 } 2300 2301 eth = eth_hdr(skb); 2302 f = vxlan_find_mac(vxlan, eth->h_dest, vni); 2303 did_rsc = false; 2304 2305 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) && 2306 (ntohs(eth->h_proto) == ETH_P_IP || 2307 ntohs(eth->h_proto) == ETH_P_IPV6)) { 2308 did_rsc = route_shortcircuit(dev, skb); 2309 if (did_rsc) 2310 f = vxlan_find_mac(vxlan, eth->h_dest, vni); 2311 } 2312 2313 if (f == NULL) { 2314 f = vxlan_find_mac(vxlan, all_zeros_mac, vni); 2315 if (f == NULL) { 2316 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) && 2317 !is_multicast_ether_addr(eth->h_dest)) 2318 vxlan_fdb_miss(vxlan, eth->h_dest); 2319 2320 dev->stats.tx_dropped++; 2321 kfree_skb(skb); 2322 return NETDEV_TX_OK; 2323 } 2324 } 2325 2326 list_for_each_entry_rcu(rdst, &f->remotes, list) { 2327 struct sk_buff *skb1; 2328 2329 if (!fdst) { 2330 fdst = rdst; 2331 continue; 2332 } 2333 skb1 = skb_clone(skb, GFP_ATOMIC); 2334 if (skb1) 2335 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); 2336 } 2337 2338 if (fdst) 2339 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); 2340 else 2341 kfree_skb(skb); 2342 return NETDEV_TX_OK; 2343} 2344 2345/* Walk the forwarding table and purge stale entries */ 2346static void vxlan_cleanup(unsigned long arg) 2347{ 2348 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; 2349 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2350 unsigned int h; 2351 2352 if (!netif_running(vxlan->dev)) 2353 return; 2354 2355 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2356 struct hlist_node *p, *n; 2357 2358 spin_lock_bh(&vxlan->hash_lock); 2359 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2360 struct vxlan_fdb *f 2361 = container_of(p, struct vxlan_fdb, hlist); 2362 unsigned long timeout; 2363 2364 if (f->state & (NUD_PERMANENT | NUD_NOARP)) 2365 continue; 2366 2367 if (f->flags & NTF_EXT_LEARNED) 2368 continue; 2369 2370 timeout = f->used + vxlan->cfg.age_interval * HZ; 2371 if (time_before_eq(timeout, jiffies)) { 2372 netdev_dbg(vxlan->dev, 2373 "garbage collect %pM\n", 2374 f->eth_addr); 2375 f->state = NUD_STALE; 2376 vxlan_fdb_destroy(vxlan, f); 2377 } else if (time_before(timeout, next_timer)) 2378 next_timer = timeout; 2379 } 2380 spin_unlock_bh(&vxlan->hash_lock); 2381 } 2382 2383 mod_timer(&vxlan->age_timer, next_timer); 2384} 2385 2386static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) 2387{ 2388 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2389 2390 spin_lock(&vn->sock_lock); 2391 hlist_del_init_rcu(&vxlan->hlist4.hlist); 2392#if IS_ENABLED(CONFIG_IPV6) 2393 hlist_del_init_rcu(&vxlan->hlist6.hlist); 2394#endif 2395 spin_unlock(&vn->sock_lock); 2396} 2397 2398static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, 2399 struct vxlan_dev_node *node) 2400{ 2401 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2402 __be32 vni = vxlan->default_dst.remote_vni; 2403 2404 node->vxlan = vxlan; 2405 spin_lock(&vn->sock_lock); 2406 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni)); 2407 spin_unlock(&vn->sock_lock); 2408} 2409 2410/* Setup stats when device is created */ 2411static int vxlan_init(struct net_device *dev) 2412{ 2413 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2414 if (!dev->tstats) 2415 return -ENOMEM; 2416 2417 return 0; 2418} 2419 2420static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) 2421{ 2422 struct vxlan_fdb *f; 2423 2424 spin_lock_bh(&vxlan->hash_lock); 2425 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); 2426 if (f) 2427 vxlan_fdb_destroy(vxlan, f); 2428 spin_unlock_bh(&vxlan->hash_lock); 2429} 2430 2431static void vxlan_uninit(struct net_device *dev) 2432{ 2433 struct vxlan_dev *vxlan = netdev_priv(dev); 2434 2435 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); 2436 2437 free_percpu(dev->tstats); 2438} 2439 2440/* Start ageing timer and join group when device is brought up */ 2441static int vxlan_open(struct net_device *dev) 2442{ 2443 struct vxlan_dev *vxlan = netdev_priv(dev); 2444 int ret; 2445 2446 ret = vxlan_sock_add(vxlan); 2447 if (ret < 0) 2448 return ret; 2449 2450 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { 2451 ret = vxlan_igmp_join(vxlan); 2452 if (ret == -EADDRINUSE) 2453 ret = 0; 2454 if (ret) { 2455 vxlan_sock_release(vxlan); 2456 return ret; 2457 } 2458 } 2459 2460 if (vxlan->cfg.age_interval) 2461 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); 2462 2463 return ret; 2464} 2465 2466/* Purge the forwarding table */ 2467static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) 2468{ 2469 unsigned int h; 2470 2471 spin_lock_bh(&vxlan->hash_lock); 2472 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2473 struct hlist_node *p, *n; 2474 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2475 struct vxlan_fdb *f 2476 = container_of(p, struct vxlan_fdb, hlist); 2477 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) 2478 continue; 2479 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2480 if (!is_zero_ether_addr(f->eth_addr)) 2481 vxlan_fdb_destroy(vxlan, f); 2482 } 2483 } 2484 spin_unlock_bh(&vxlan->hash_lock); 2485} 2486 2487/* Cleanup timer and forwarding table on shutdown */ 2488static int vxlan_stop(struct net_device *dev) 2489{ 2490 struct vxlan_dev *vxlan = netdev_priv(dev); 2491 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2492 int ret = 0; 2493 2494 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2495 !vxlan_group_used(vn, vxlan)) 2496 ret = vxlan_igmp_leave(vxlan); 2497 2498 del_timer_sync(&vxlan->age_timer); 2499 2500 vxlan_flush(vxlan, false); 2501 vxlan_sock_release(vxlan); 2502 2503 return ret; 2504} 2505 2506/* Stub, nothing needs to be done. */ 2507static void vxlan_set_multicast_list(struct net_device *dev) 2508{ 2509} 2510 2511static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2512{ 2513 struct vxlan_dev *vxlan = netdev_priv(dev); 2514 struct vxlan_rdst *dst = &vxlan->default_dst; 2515 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 2516 dst->remote_ifindex); 2517 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6); 2518 2519 /* This check is different than dev->max_mtu, because it looks at 2520 * the lowerdev->mtu, rather than the static dev->max_mtu 2521 */ 2522 if (lowerdev) { 2523 int max_mtu = lowerdev->mtu - 2524 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2525 if (new_mtu > max_mtu) 2526 return -EINVAL; 2527 } 2528 2529 dev->mtu = new_mtu; 2530 return 0; 2531} 2532 2533static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 2534{ 2535 struct vxlan_dev *vxlan = netdev_priv(dev); 2536 struct ip_tunnel_info *info = skb_tunnel_info(skb); 2537 __be16 sport, dport; 2538 2539 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2540 vxlan->cfg.port_max, true); 2541 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2542 2543 if (ip_tunnel_info_af(info) == AF_INET) { 2544 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2545 struct rtable *rt; 2546 2547 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, 2548 info->key.u.ipv4.dst, 2549 &info->key.u.ipv4.src, dport, sport, 2550 &info->dst_cache, info); 2551 if (IS_ERR(rt)) 2552 return PTR_ERR(rt); 2553 ip_rt_put(rt); 2554 } else { 2555#if IS_ENABLED(CONFIG_IPV6) 2556 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 2557 struct dst_entry *ndst; 2558 2559 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, 2560 info->key.label, &info->key.u.ipv6.dst, 2561 &info->key.u.ipv6.src, dport, sport, 2562 &info->dst_cache, info); 2563 if (IS_ERR(ndst)) 2564 return PTR_ERR(ndst); 2565 dst_release(ndst); 2566#else /* !CONFIG_IPV6 */ 2567 return -EPFNOSUPPORT; 2568#endif 2569 } 2570 info->key.tp_src = sport; 2571 info->key.tp_dst = dport; 2572 return 0; 2573} 2574 2575static const struct net_device_ops vxlan_netdev_ether_ops = { 2576 .ndo_init = vxlan_init, 2577 .ndo_uninit = vxlan_uninit, 2578 .ndo_open = vxlan_open, 2579 .ndo_stop = vxlan_stop, 2580 .ndo_start_xmit = vxlan_xmit, 2581 .ndo_get_stats64 = ip_tunnel_get_stats64, 2582 .ndo_set_rx_mode = vxlan_set_multicast_list, 2583 .ndo_change_mtu = vxlan_change_mtu, 2584 .ndo_validate_addr = eth_validate_addr, 2585 .ndo_set_mac_address = eth_mac_addr, 2586 .ndo_fdb_add = vxlan_fdb_add, 2587 .ndo_fdb_del = vxlan_fdb_delete, 2588 .ndo_fdb_dump = vxlan_fdb_dump, 2589 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2590}; 2591 2592static const struct net_device_ops vxlan_netdev_raw_ops = { 2593 .ndo_init = vxlan_init, 2594 .ndo_uninit = vxlan_uninit, 2595 .ndo_open = vxlan_open, 2596 .ndo_stop = vxlan_stop, 2597 .ndo_start_xmit = vxlan_xmit, 2598 .ndo_get_stats64 = ip_tunnel_get_stats64, 2599 .ndo_change_mtu = vxlan_change_mtu, 2600 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2601}; 2602 2603/* Info for udev, that this is a virtual tunnel endpoint */ 2604static struct device_type vxlan_type = { 2605 .name = "vxlan", 2606}; 2607 2608/* Calls the ndo_udp_tunnel_add of the caller in order to 2609 * supply the listening VXLAN udp ports. Callers are expected 2610 * to implement the ndo_udp_tunnel_add. 2611 */ 2612static void vxlan_push_rx_ports(struct net_device *dev) 2613{ 2614 struct vxlan_sock *vs; 2615 struct net *net = dev_net(dev); 2616 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2617 unsigned int i; 2618 2619 spin_lock(&vn->sock_lock); 2620 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2621 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) 2622 udp_tunnel_push_rx_port(dev, vs->sock, 2623 (vs->flags & VXLAN_F_GPE) ? 2624 UDP_TUNNEL_TYPE_VXLAN_GPE : 2625 UDP_TUNNEL_TYPE_VXLAN); 2626 } 2627 spin_unlock(&vn->sock_lock); 2628} 2629 2630/* Initialize the device structure. */ 2631static void vxlan_setup(struct net_device *dev) 2632{ 2633 struct vxlan_dev *vxlan = netdev_priv(dev); 2634 unsigned int h; 2635 2636 eth_hw_addr_random(dev); 2637 ether_setup(dev); 2638 2639 dev->needs_free_netdev = true; 2640 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2641 2642 dev->features |= NETIF_F_LLTX; 2643 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2644 dev->features |= NETIF_F_RXCSUM; 2645 dev->features |= NETIF_F_GSO_SOFTWARE; 2646 2647 dev->vlan_features = dev->features; 2648 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2649 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2650 netif_keep_dst(dev); 2651 dev->priv_flags |= IFF_NO_QUEUE; 2652 2653 /* MTU range: 68 - 65535 */ 2654 dev->min_mtu = ETH_MIN_MTU; 2655 dev->max_mtu = ETH_MAX_MTU; 2656 2657 INIT_LIST_HEAD(&vxlan->next); 2658 spin_lock_init(&vxlan->hash_lock); 2659 2660 init_timer_deferrable(&vxlan->age_timer); 2661 vxlan->age_timer.function = vxlan_cleanup; 2662 vxlan->age_timer.data = (unsigned long) vxlan; 2663 2664 vxlan->dev = dev; 2665 2666 gro_cells_init(&vxlan->gro_cells, dev); 2667 2668 for (h = 0; h < FDB_HASH_SIZE; ++h) 2669 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 2670} 2671 2672static void vxlan_ether_setup(struct net_device *dev) 2673{ 2674 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2675 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2676 dev->netdev_ops = &vxlan_netdev_ether_ops; 2677} 2678 2679static void vxlan_raw_setup(struct net_device *dev) 2680{ 2681 dev->header_ops = NULL; 2682 dev->type = ARPHRD_NONE; 2683 dev->hard_header_len = 0; 2684 dev->addr_len = 0; 2685 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 2686 dev->netdev_ops = &vxlan_netdev_raw_ops; 2687} 2688 2689static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2690 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2691 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2692 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 2693 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 2694 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 2695 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 2696 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 2697 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 2698 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 }, 2699 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 2700 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 2701 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 2702 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 2703 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, 2704 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 2705 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 2706 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 2707 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, 2708 [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, 2709 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, 2710 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 2711 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 2712 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 2713 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 2714 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 2715 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, 2716 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 2717}; 2718 2719static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], 2720 struct netlink_ext_ack *extack) 2721{ 2722 if (tb[IFLA_ADDRESS]) { 2723 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 2724 pr_debug("invalid link address (not ethernet)\n"); 2725 return -EINVAL; 2726 } 2727 2728 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 2729 pr_debug("invalid all zero ethernet address\n"); 2730 return -EADDRNOTAVAIL; 2731 } 2732 } 2733 2734 if (tb[IFLA_MTU]) { 2735 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 2736 2737 if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) 2738 return -EINVAL; 2739 } 2740 2741 if (!data) 2742 return -EINVAL; 2743 2744 if (data[IFLA_VXLAN_ID]) { 2745 u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); 2746 2747 if (id >= VXLAN_N_VID) 2748 return -ERANGE; 2749 } 2750 2751 if (data[IFLA_VXLAN_PORT_RANGE]) { 2752 const struct ifla_vxlan_port_range *p 2753 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 2754 2755 if (ntohs(p->high) < ntohs(p->low)) { 2756 pr_debug("port range %u .. %u not valid\n", 2757 ntohs(p->low), ntohs(p->high)); 2758 return -EINVAL; 2759 } 2760 } 2761 2762 return 0; 2763} 2764 2765static void vxlan_get_drvinfo(struct net_device *netdev, 2766 struct ethtool_drvinfo *drvinfo) 2767{ 2768 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); 2769 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); 2770} 2771 2772static const struct ethtool_ops vxlan_ethtool_ops = { 2773 .get_drvinfo = vxlan_get_drvinfo, 2774 .get_link = ethtool_op_get_link, 2775}; 2776 2777static struct socket *vxlan_create_sock(struct net *net, bool ipv6, 2778 __be16 port, u32 flags) 2779{ 2780 struct socket *sock; 2781 struct udp_port_cfg udp_conf; 2782 int err; 2783 2784 memset(&udp_conf, 0, sizeof(udp_conf)); 2785 2786 if (ipv6) { 2787 udp_conf.family = AF_INET6; 2788 udp_conf.use_udp6_rx_checksums = 2789 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2790 udp_conf.ipv6_v6only = 1; 2791 } else { 2792 udp_conf.family = AF_INET; 2793 } 2794 2795 udp_conf.local_udp_port = port; 2796 2797 /* Open UDP socket */ 2798 err = udp_sock_create(net, &udp_conf, &sock); 2799 if (err < 0) 2800 return ERR_PTR(err); 2801 2802 return sock; 2803} 2804 2805/* Create new listen socket if needed */ 2806static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, 2807 __be16 port, u32 flags) 2808{ 2809 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2810 struct vxlan_sock *vs; 2811 struct socket *sock; 2812 unsigned int h; 2813 struct udp_tunnel_sock_cfg tunnel_cfg; 2814 2815 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2816 if (!vs) 2817 return ERR_PTR(-ENOMEM); 2818 2819 for (h = 0; h < VNI_HASH_SIZE; ++h) 2820 INIT_HLIST_HEAD(&vs->vni_list[h]); 2821 2822 sock = vxlan_create_sock(net, ipv6, port, flags); 2823 if (IS_ERR(sock)) { 2824 kfree(vs); 2825 return ERR_CAST(sock); 2826 } 2827 2828 vs->sock = sock; 2829 refcount_set(&vs->refcnt, 1); 2830 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 2831 2832 spin_lock(&vn->sock_lock); 2833 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2834 udp_tunnel_notify_add_rx_port(sock, 2835 (vs->flags & VXLAN_F_GPE) ? 2836 UDP_TUNNEL_TYPE_VXLAN_GPE : 2837 UDP_TUNNEL_TYPE_VXLAN); 2838 spin_unlock(&vn->sock_lock); 2839 2840 /* Mark socket as an encapsulation socket. */ 2841 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 2842 tunnel_cfg.sk_user_data = vs; 2843 tunnel_cfg.encap_type = 1; 2844 tunnel_cfg.encap_rcv = vxlan_rcv; 2845 tunnel_cfg.encap_destroy = NULL; 2846 tunnel_cfg.gro_receive = vxlan_gro_receive; 2847 tunnel_cfg.gro_complete = vxlan_gro_complete; 2848 2849 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 2850 2851 return vs; 2852} 2853 2854static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) 2855{ 2856 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2857 struct vxlan_sock *vs = NULL; 2858 struct vxlan_dev_node *node; 2859 2860 if (!vxlan->cfg.no_share) { 2861 spin_lock(&vn->sock_lock); 2862 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, 2863 vxlan->cfg.dst_port, vxlan->cfg.flags); 2864 if (vs && !refcount_inc_not_zero(&vs->refcnt)) { 2865 spin_unlock(&vn->sock_lock); 2866 return -EBUSY; 2867 } 2868 spin_unlock(&vn->sock_lock); 2869 } 2870 if (!vs) 2871 vs = vxlan_socket_create(vxlan->net, ipv6, 2872 vxlan->cfg.dst_port, vxlan->cfg.flags); 2873 if (IS_ERR(vs)) 2874 return PTR_ERR(vs); 2875#if IS_ENABLED(CONFIG_IPV6) 2876 if (ipv6) { 2877 rcu_assign_pointer(vxlan->vn6_sock, vs); 2878 node = &vxlan->hlist6; 2879 } else 2880#endif 2881 { 2882 rcu_assign_pointer(vxlan->vn4_sock, vs); 2883 node = &vxlan->hlist4; 2884 } 2885 vxlan_vs_add_dev(vs, vxlan, node); 2886 return 0; 2887} 2888 2889static int vxlan_sock_add(struct vxlan_dev *vxlan) 2890{ 2891 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; 2892 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata; 2893 bool ipv4 = !ipv6 || metadata; 2894 int ret = 0; 2895 2896 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); 2897#if IS_ENABLED(CONFIG_IPV6) 2898 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); 2899 if (ipv6) { 2900 ret = __vxlan_sock_add(vxlan, true); 2901 if (ret < 0 && ret != -EAFNOSUPPORT) 2902 ipv4 = false; 2903 } 2904#endif 2905 if (ipv4) 2906 ret = __vxlan_sock_add(vxlan, false); 2907 if (ret < 0) 2908 vxlan_sock_release(vxlan); 2909 return ret; 2910} 2911 2912static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, 2913 struct net_device **lower, 2914 struct vxlan_dev *old) 2915{ 2916 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); 2917 struct vxlan_dev *tmp; 2918 bool use_ipv6 = false; 2919 2920 if (conf->flags & VXLAN_F_GPE) { 2921 /* For now, allow GPE only together with 2922 * COLLECT_METADATA. This can be relaxed later; in such 2923 * case, the other side of the PtP link will have to be 2924 * provided. 2925 */ 2926 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || 2927 !(conf->flags & VXLAN_F_COLLECT_METADATA)) { 2928 return -EINVAL; 2929 } 2930 } 2931 2932 if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) { 2933 /* Unless IPv6 is explicitly requested, assume IPv4 */ 2934 conf->remote_ip.sa.sa_family = AF_INET; 2935 conf->saddr.sa.sa_family = AF_INET; 2936 } else if (!conf->remote_ip.sa.sa_family) { 2937 conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family; 2938 } else if (!conf->saddr.sa.sa_family) { 2939 conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family; 2940 } 2941 2942 if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) 2943 return -EINVAL; 2944 2945 if (vxlan_addr_multicast(&conf->saddr)) 2946 return -EINVAL; 2947 2948 if (conf->saddr.sa.sa_family == AF_INET6) { 2949 if (!IS_ENABLED(CONFIG_IPV6)) 2950 return -EPFNOSUPPORT; 2951 use_ipv6 = true; 2952 conf->flags |= VXLAN_F_IPV6; 2953 2954 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) { 2955 int local_type = 2956 ipv6_addr_type(&conf->saddr.sin6.sin6_addr); 2957 int remote_type = 2958 ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr); 2959 2960 if (local_type & IPV6_ADDR_LINKLOCAL) { 2961 if (!(remote_type & IPV6_ADDR_LINKLOCAL) && 2962 (remote_type != IPV6_ADDR_ANY)) 2963 return -EINVAL; 2964 2965 conf->flags |= VXLAN_F_IPV6_LINKLOCAL; 2966 } else { 2967 if (remote_type == 2968 (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) 2969 return -EINVAL; 2970 2971 conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL; 2972 } 2973 } 2974 } 2975 2976 if (conf->label && !use_ipv6) 2977 return -EINVAL; 2978 2979 if (conf->remote_ifindex) { 2980 struct net_device *lowerdev; 2981 2982 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); 2983 if (!lowerdev) 2984 return -ENODEV; 2985 2986#if IS_ENABLED(CONFIG_IPV6) 2987 if (use_ipv6) { 2988 struct inet6_dev *idev = __in6_dev_get(lowerdev); 2989 if (idev && idev->cnf.disable_ipv6) 2990 return -EPERM; 2991 } 2992#endif 2993 2994 *lower = lowerdev; 2995 } else { 2996 if (vxlan_addr_multicast(&conf->remote_ip)) 2997 return -EINVAL; 2998 2999#if IS_ENABLED(CONFIG_IPV6) 3000 if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) 3001 return -EINVAL; 3002#endif 3003 3004 *lower = NULL; 3005 } 3006 3007 if (!conf->dst_port) { 3008 if (conf->flags & VXLAN_F_GPE) 3009 conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */ 3010 else 3011 conf->dst_port = htons(vxlan_port); 3012 } 3013 3014 if (!conf->age_interval) 3015 conf->age_interval = FDB_AGE_DEFAULT; 3016 3017 list_for_each_entry(tmp, &vn->vxlan_list, next) { 3018 if (tmp == old) 3019 continue; 3020 3021 if (tmp->cfg.vni != conf->vni) 3022 continue; 3023 if (tmp->cfg.dst_port != conf->dst_port) 3024 continue; 3025 if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) != 3026 (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6))) 3027 continue; 3028 3029 if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) && 3030 tmp->cfg.remote_ifindex != conf->remote_ifindex) 3031 continue; 3032 3033 return -EEXIST; 3034 } 3035 3036 return 0; 3037} 3038 3039static void vxlan_config_apply(struct net_device *dev, 3040 struct vxlan_config *conf, 3041 struct net_device *lowerdev, 3042 struct net *src_net, 3043 bool changelink) 3044{ 3045 struct vxlan_dev *vxlan = netdev_priv(dev); 3046 struct vxlan_rdst *dst = &vxlan->default_dst; 3047 unsigned short needed_headroom = ETH_HLEN; 3048 bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6); 3049 int max_mtu = ETH_MAX_MTU; 3050 3051 if (!changelink) { 3052 if (conf->flags & VXLAN_F_GPE) 3053 vxlan_raw_setup(dev); 3054 else 3055 vxlan_ether_setup(dev); 3056 3057 if (conf->mtu) 3058 dev->mtu = conf->mtu; 3059 3060 vxlan->net = src_net; 3061 } 3062 3063 dst->remote_vni = conf->vni; 3064 3065 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); 3066 3067 if (lowerdev) { 3068 dst->remote_ifindex = conf->remote_ifindex; 3069 3070 dev->gso_max_size = lowerdev->gso_max_size; 3071 dev->gso_max_segs = lowerdev->gso_max_segs; 3072 3073 needed_headroom = lowerdev->hard_header_len; 3074 3075 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : 3076 VXLAN_HEADROOM); 3077 } 3078 3079 if (dev->mtu > max_mtu) 3080 dev->mtu = max_mtu; 3081 3082 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 3083 needed_headroom += VXLAN6_HEADROOM; 3084 else 3085 needed_headroom += VXLAN_HEADROOM; 3086 dev->needed_headroom = needed_headroom; 3087 3088 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 3089} 3090 3091static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, 3092 struct vxlan_config *conf, 3093 bool changelink) 3094{ 3095 struct vxlan_dev *vxlan = netdev_priv(dev); 3096 struct net_device *lowerdev; 3097 int ret; 3098 3099 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan); 3100 if (ret) 3101 return ret; 3102 3103 vxlan_config_apply(dev, conf, lowerdev, src_net, changelink); 3104 3105 return 0; 3106} 3107 3108static int __vxlan_dev_create(struct net *net, struct net_device *dev, 3109 struct vxlan_config *conf) 3110{ 3111 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3112 struct vxlan_dev *vxlan = netdev_priv(dev); 3113 int err; 3114 3115 err = vxlan_dev_configure(net, dev, conf, false); 3116 if (err) 3117 return err; 3118 3119 dev->ethtool_ops = &vxlan_ethtool_ops; 3120 3121 /* create an fdb entry for a valid default destination */ 3122 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 3123 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3124 &vxlan->default_dst.remote_ip, 3125 NUD_REACHABLE | NUD_PERMANENT, 3126 NLM_F_EXCL | NLM_F_CREATE, 3127 vxlan->cfg.dst_port, 3128 vxlan->default_dst.remote_vni, 3129 vxlan->default_dst.remote_vni, 3130 vxlan->default_dst.remote_ifindex, 3131 NTF_SELF); 3132 if (err) 3133 return err; 3134 } 3135 3136 err = register_netdevice(dev); 3137 if (err) { 3138 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); 3139 return err; 3140 } 3141 3142 list_add(&vxlan->next, &vn->vxlan_list); 3143 return 0; 3144} 3145 3146static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], 3147 struct net_device *dev, struct vxlan_config *conf, 3148 bool changelink) 3149{ 3150 struct vxlan_dev *vxlan = netdev_priv(dev); 3151 3152 memset(conf, 0, sizeof(*conf)); 3153 3154 /* if changelink operation, start with old existing cfg */ 3155 if (changelink) 3156 memcpy(conf, &vxlan->cfg, sizeof(*conf)); 3157 3158 if (data[IFLA_VXLAN_ID]) { 3159 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 3160 3161 if (changelink && (vni != conf->vni)) 3162 return -EOPNOTSUPP; 3163 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 3164 } 3165 3166 if (data[IFLA_VXLAN_GROUP]) { 3167 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET)) 3168 return -EOPNOTSUPP; 3169 3170 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); 3171 conf->remote_ip.sa.sa_family = AF_INET; 3172 } else if (data[IFLA_VXLAN_GROUP6]) { 3173 if (!IS_ENABLED(CONFIG_IPV6)) 3174 return -EPFNOSUPPORT; 3175 3176 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6)) 3177 return -EOPNOTSUPP; 3178 3179 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); 3180 conf->remote_ip.sa.sa_family = AF_INET6; 3181 } 3182 3183 if (data[IFLA_VXLAN_LOCAL]) { 3184 if (changelink && (conf->saddr.sa.sa_family != AF_INET)) 3185 return -EOPNOTSUPP; 3186 3187 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); 3188 conf->saddr.sa.sa_family = AF_INET; 3189 } else if (data[IFLA_VXLAN_LOCAL6]) { 3190 if (!IS_ENABLED(CONFIG_IPV6)) 3191 return -EPFNOSUPPORT; 3192 3193 if (changelink && (conf->saddr.sa.sa_family != AF_INET6)) 3194 return -EOPNOTSUPP; 3195 3196 /* TODO: respect scope id */ 3197 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); 3198 conf->saddr.sa.sa_family = AF_INET6; 3199 } 3200 3201 if (data[IFLA_VXLAN_LINK]) 3202 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); 3203 3204 if (data[IFLA_VXLAN_TOS]) 3205 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 3206 3207 if (data[IFLA_VXLAN_TTL]) 3208 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); 3209 3210 if (data[IFLA_VXLAN_LABEL]) 3211 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & 3212 IPV6_FLOWLABEL_MASK; 3213 3214 if (data[IFLA_VXLAN_LEARNING]) { 3215 if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) 3216 conf->flags |= VXLAN_F_LEARN; 3217 else 3218 conf->flags &= ~VXLAN_F_LEARN; 3219 } else if (!changelink) { 3220 /* default to learn on a new device */ 3221 conf->flags |= VXLAN_F_LEARN; 3222 } 3223 3224 if (data[IFLA_VXLAN_AGEING]) { 3225 if (changelink) 3226 return -EOPNOTSUPP; 3227 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 3228 } 3229 3230 if (data[IFLA_VXLAN_PROXY]) { 3231 if (changelink) 3232 return -EOPNOTSUPP; 3233 if (nla_get_u8(data[IFLA_VXLAN_PROXY])) 3234 conf->flags |= VXLAN_F_PROXY; 3235 } 3236 3237 if (data[IFLA_VXLAN_RSC]) { 3238 if (changelink) 3239 return -EOPNOTSUPP; 3240 if (nla_get_u8(data[IFLA_VXLAN_RSC])) 3241 conf->flags |= VXLAN_F_RSC; 3242 } 3243 3244 if (data[IFLA_VXLAN_L2MISS]) { 3245 if (changelink) 3246 return -EOPNOTSUPP; 3247 if (nla_get_u8(data[IFLA_VXLAN_L2MISS])) 3248 conf->flags |= VXLAN_F_L2MISS; 3249 } 3250 3251 if (data[IFLA_VXLAN_L3MISS]) { 3252 if (changelink) 3253 return -EOPNOTSUPP; 3254 if (nla_get_u8(data[IFLA_VXLAN_L3MISS])) 3255 conf->flags |= VXLAN_F_L3MISS; 3256 } 3257 3258 if (data[IFLA_VXLAN_LIMIT]) { 3259 if (changelink) 3260 return -EOPNOTSUPP; 3261 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 3262 } 3263 3264 if (data[IFLA_VXLAN_COLLECT_METADATA]) { 3265 if (changelink) 3266 return -EOPNOTSUPP; 3267 if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) 3268 conf->flags |= VXLAN_F_COLLECT_METADATA; 3269 } 3270 3271 if (data[IFLA_VXLAN_PORT_RANGE]) { 3272 if (!changelink) { 3273 const struct ifla_vxlan_port_range *p 3274 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 3275 conf->port_min = ntohs(p->low); 3276 conf->port_max = ntohs(p->high); 3277 } else { 3278 return -EOPNOTSUPP; 3279 } 3280 } 3281 3282 if (data[IFLA_VXLAN_PORT]) { 3283 if (changelink) 3284 return -EOPNOTSUPP; 3285 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 3286 } 3287 3288 if (data[IFLA_VXLAN_UDP_CSUM]) { 3289 if (changelink) 3290 return -EOPNOTSUPP; 3291 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) 3292 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX; 3293 } 3294 3295 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) { 3296 if (changelink) 3297 return -EOPNOTSUPP; 3298 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) 3299 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; 3300 } 3301 3302 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) { 3303 if (changelink) 3304 return -EOPNOTSUPP; 3305 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 3306 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 3307 } 3308 3309 if (data[IFLA_VXLAN_REMCSUM_TX]) { 3310 if (changelink) 3311 return -EOPNOTSUPP; 3312 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) 3313 conf->flags |= VXLAN_F_REMCSUM_TX; 3314 } 3315 3316 if (data[IFLA_VXLAN_REMCSUM_RX]) { 3317 if (changelink) 3318 return -EOPNOTSUPP; 3319 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) 3320 conf->flags |= VXLAN_F_REMCSUM_RX; 3321 } 3322 3323 if (data[IFLA_VXLAN_GBP]) { 3324 if (changelink) 3325 return -EOPNOTSUPP; 3326 conf->flags |= VXLAN_F_GBP; 3327 } 3328 3329 if (data[IFLA_VXLAN_GPE]) { 3330 if (changelink) 3331 return -EOPNOTSUPP; 3332 conf->flags |= VXLAN_F_GPE; 3333 } 3334 3335 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) { 3336 if (changelink) 3337 return -EOPNOTSUPP; 3338 conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3339 } 3340 3341 if (tb[IFLA_MTU]) { 3342 if (changelink) 3343 return -EOPNOTSUPP; 3344 conf->mtu = nla_get_u32(tb[IFLA_MTU]); 3345 } 3346 3347 return 0; 3348} 3349 3350static int vxlan_newlink(struct net *src_net, struct net_device *dev, 3351 struct nlattr *tb[], struct nlattr *data[], 3352 struct netlink_ext_ack *extack) 3353{ 3354 struct vxlan_config conf; 3355 int err; 3356 3357 err = vxlan_nl2conf(tb, data, dev, &conf, false); 3358 if (err) 3359 return err; 3360 3361 return __vxlan_dev_create(src_net, dev, &conf); 3362} 3363 3364static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], 3365 struct nlattr *data[], 3366 struct netlink_ext_ack *extack) 3367{ 3368 struct vxlan_dev *vxlan = netdev_priv(dev); 3369 struct vxlan_rdst *dst = &vxlan->default_dst; 3370 struct vxlan_rdst old_dst; 3371 struct vxlan_config conf; 3372 int err; 3373 3374 err = vxlan_nl2conf(tb, data, 3375 dev, &conf, true); 3376 if (err) 3377 return err; 3378 3379 memcpy(&old_dst, dst, sizeof(struct vxlan_rdst)); 3380 3381 err = vxlan_dev_configure(vxlan->net, dev, &conf, true); 3382 if (err) 3383 return err; 3384 3385 /* handle default dst entry */ 3386 if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) { 3387 spin_lock_bh(&vxlan->hash_lock); 3388 if (!vxlan_addr_any(&old_dst.remote_ip)) 3389 __vxlan_fdb_delete(vxlan, all_zeros_mac, 3390 old_dst.remote_ip, 3391 vxlan->cfg.dst_port, 3392 old_dst.remote_vni, 3393 old_dst.remote_vni, 3394 old_dst.remote_ifindex, 0); 3395 3396 if (!vxlan_addr_any(&dst->remote_ip)) { 3397 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3398 &dst->remote_ip, 3399 NUD_REACHABLE | NUD_PERMANENT, 3400 NLM_F_CREATE | NLM_F_APPEND, 3401 vxlan->cfg.dst_port, 3402 dst->remote_vni, 3403 dst->remote_vni, 3404 dst->remote_ifindex, 3405 NTF_SELF); 3406 if (err) { 3407 spin_unlock_bh(&vxlan->hash_lock); 3408 return err; 3409 } 3410 } 3411 spin_unlock_bh(&vxlan->hash_lock); 3412 } 3413 3414 return 0; 3415} 3416 3417static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3418{ 3419 struct vxlan_dev *vxlan = netdev_priv(dev); 3420 3421 vxlan_flush(vxlan, true); 3422 3423 gro_cells_destroy(&vxlan->gro_cells); 3424 list_del(&vxlan->next); 3425 unregister_netdevice_queue(dev, head); 3426} 3427 3428static size_t vxlan_get_size(const struct net_device *dev) 3429{ 3430 3431 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 3432 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ 3433 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 3434 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 3435 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 3436 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 3437 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 3438 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 3439 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ 3440 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ 3441 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ 3442 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ 3443 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ 3444 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 3445 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 3446 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 3447 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ 3448 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ 3449 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ 3450 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */ 3451 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ 3452 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ 3453 0; 3454} 3455 3456static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 3457{ 3458 const struct vxlan_dev *vxlan = netdev_priv(dev); 3459 const struct vxlan_rdst *dst = &vxlan->default_dst; 3460 struct ifla_vxlan_port_range ports = { 3461 .low = htons(vxlan->cfg.port_min), 3462 .high = htons(vxlan->cfg.port_max), 3463 }; 3464 3465 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) 3466 goto nla_put_failure; 3467 3468 if (!vxlan_addr_any(&dst->remote_ip)) { 3469 if (dst->remote_ip.sa.sa_family == AF_INET) { 3470 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP, 3471 dst->remote_ip.sin.sin_addr.s_addr)) 3472 goto nla_put_failure; 3473#if IS_ENABLED(CONFIG_IPV6) 3474 } else { 3475 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6, 3476 &dst->remote_ip.sin6.sin6_addr)) 3477 goto nla_put_failure; 3478#endif 3479 } 3480 } 3481 3482 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 3483 goto nla_put_failure; 3484 3485 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { 3486 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { 3487 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, 3488 vxlan->cfg.saddr.sin.sin_addr.s_addr)) 3489 goto nla_put_failure; 3490#if IS_ENABLED(CONFIG_IPV6) 3491 } else { 3492 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, 3493 &vxlan->cfg.saddr.sin6.sin6_addr)) 3494 goto nla_put_failure; 3495#endif 3496 } 3497 } 3498 3499 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 3500 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 3501 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 3502 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 3503 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || 3504 nla_put_u8(skb, IFLA_VXLAN_PROXY, 3505 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) || 3506 nla_put_u8(skb, IFLA_VXLAN_RSC, 3507 !!(vxlan->cfg.flags & VXLAN_F_RSC)) || 3508 nla_put_u8(skb, IFLA_VXLAN_L2MISS, 3509 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) || 3510 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 3511 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) || 3512 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, 3513 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) || 3514 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || 3515 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || 3516 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || 3517 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, 3518 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || 3519 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 3520 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || 3521 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 3522 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || 3523 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX, 3524 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) || 3525 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX, 3526 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX))) 3527 goto nla_put_failure; 3528 3529 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 3530 goto nla_put_failure; 3531 3532 if (vxlan->cfg.flags & VXLAN_F_GBP && 3533 nla_put_flag(skb, IFLA_VXLAN_GBP)) 3534 goto nla_put_failure; 3535 3536 if (vxlan->cfg.flags & VXLAN_F_GPE && 3537 nla_put_flag(skb, IFLA_VXLAN_GPE)) 3538 goto nla_put_failure; 3539 3540 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL && 3541 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 3542 goto nla_put_failure; 3543 3544 return 0; 3545 3546nla_put_failure: 3547 return -EMSGSIZE; 3548} 3549 3550static struct net *vxlan_get_link_net(const struct net_device *dev) 3551{ 3552 struct vxlan_dev *vxlan = netdev_priv(dev); 3553 3554 return vxlan->net; 3555} 3556 3557static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 3558 .kind = "vxlan", 3559 .maxtype = IFLA_VXLAN_MAX, 3560 .policy = vxlan_policy, 3561 .priv_size = sizeof(struct vxlan_dev), 3562 .setup = vxlan_setup, 3563 .validate = vxlan_validate, 3564 .newlink = vxlan_newlink, 3565 .changelink = vxlan_changelink, 3566 .dellink = vxlan_dellink, 3567 .get_size = vxlan_get_size, 3568 .fill_info = vxlan_fill_info, 3569 .get_link_net = vxlan_get_link_net, 3570}; 3571 3572struct net_device *vxlan_dev_create(struct net *net, const char *name, 3573 u8 name_assign_type, 3574 struct vxlan_config *conf) 3575{ 3576 struct nlattr *tb[IFLA_MAX + 1]; 3577 struct net_device *dev; 3578 int err; 3579 3580 memset(&tb, 0, sizeof(tb)); 3581 3582 dev = rtnl_create_link(net, name, name_assign_type, 3583 &vxlan_link_ops, tb); 3584 if (IS_ERR(dev)) 3585 return dev; 3586 3587 err = __vxlan_dev_create(net, dev, conf); 3588 if (err < 0) { 3589 free_netdev(dev); 3590 return ERR_PTR(err); 3591 } 3592 3593 err = rtnl_configure_link(dev, NULL); 3594 if (err < 0) { 3595 LIST_HEAD(list_kill); 3596 3597 vxlan_dellink(dev, &list_kill); 3598 unregister_netdevice_many(&list_kill); 3599 return ERR_PTR(err); 3600 } 3601 3602 return dev; 3603} 3604EXPORT_SYMBOL_GPL(vxlan_dev_create); 3605 3606static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 3607 struct net_device *dev) 3608{ 3609 struct vxlan_dev *vxlan, *next; 3610 LIST_HEAD(list_kill); 3611 3612 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3613 struct vxlan_rdst *dst = &vxlan->default_dst; 3614 3615 /* In case we created vxlan device with carrier 3616 * and we loose the carrier due to module unload 3617 * we also need to remove vxlan device. In other 3618 * cases, it's not necessary and remote_ifindex 3619 * is 0 here, so no matches. 3620 */ 3621 if (dst->remote_ifindex == dev->ifindex) 3622 vxlan_dellink(vxlan->dev, &list_kill); 3623 } 3624 3625 unregister_netdevice_many(&list_kill); 3626} 3627 3628static int vxlan_netdevice_event(struct notifier_block *unused, 3629 unsigned long event, void *ptr) 3630{ 3631 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3632 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 3633 3634 if (event == NETDEV_UNREGISTER) 3635 vxlan_handle_lowerdev_unregister(vn, dev); 3636 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) 3637 vxlan_push_rx_ports(dev); 3638 3639 return NOTIFY_DONE; 3640} 3641 3642static struct notifier_block vxlan_notifier_block __read_mostly = { 3643 .notifier_call = vxlan_netdevice_event, 3644}; 3645 3646static __net_init int vxlan_init_net(struct net *net) 3647{ 3648 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3649 unsigned int h; 3650 3651 INIT_LIST_HEAD(&vn->vxlan_list); 3652 spin_lock_init(&vn->sock_lock); 3653 3654 for (h = 0; h < PORT_HASH_SIZE; ++h) 3655 INIT_HLIST_HEAD(&vn->sock_list[h]); 3656 3657 return 0; 3658} 3659 3660static void __net_exit vxlan_exit_net(struct net *net) 3661{ 3662 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3663 struct vxlan_dev *vxlan, *next; 3664 struct net_device *dev, *aux; 3665 LIST_HEAD(list); 3666 3667 rtnl_lock(); 3668 for_each_netdev_safe(net, dev, aux) 3669 if (dev->rtnl_link_ops == &vxlan_link_ops) 3670 unregister_netdevice_queue(dev, &list); 3671 3672 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 3673 /* If vxlan->dev is in the same netns, it has already been added 3674 * to the list by the previous loop. 3675 */ 3676 if (!net_eq(dev_net(vxlan->dev), net)) { 3677 gro_cells_destroy(&vxlan->gro_cells); 3678 unregister_netdevice_queue(vxlan->dev, &list); 3679 } 3680 } 3681 3682 unregister_netdevice_many(&list); 3683 rtnl_unlock(); 3684} 3685 3686static struct pernet_operations vxlan_net_ops = { 3687 .init = vxlan_init_net, 3688 .exit = vxlan_exit_net, 3689 .id = &vxlan_net_id, 3690 .size = sizeof(struct vxlan_net), 3691}; 3692 3693static int __init vxlan_init_module(void) 3694{ 3695 int rc; 3696 3697 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 3698 3699 rc = register_pernet_subsys(&vxlan_net_ops); 3700 if (rc) 3701 goto out1; 3702 3703 rc = register_netdevice_notifier(&vxlan_notifier_block); 3704 if (rc) 3705 goto out2; 3706 3707 rc = rtnl_link_register(&vxlan_link_ops); 3708 if (rc) 3709 goto out3; 3710 3711 return 0; 3712out3: 3713 unregister_netdevice_notifier(&vxlan_notifier_block); 3714out2: 3715 unregister_pernet_subsys(&vxlan_net_ops); 3716out1: 3717 return rc; 3718} 3719late_initcall(vxlan_init_module); 3720 3721static void __exit vxlan_cleanup_module(void) 3722{ 3723 rtnl_link_unregister(&vxlan_link_ops); 3724 unregister_netdevice_notifier(&vxlan_notifier_block); 3725 unregister_pernet_subsys(&vxlan_net_ops); 3726 /* rcu_barrier() is called by netns */ 3727} 3728module_exit(vxlan_cleanup_module); 3729 3730MODULE_LICENSE("GPL"); 3731MODULE_VERSION(VXLAN_VERSION); 3732MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); 3733MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); 3734MODULE_ALIAS_RTNL_LINK("vxlan");