Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/capability.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/skbuff.h>
22#include <linux/netdevice.h>
23#include <linux/in.h>
24#include <linux/tcp.h>
25#include <linux/udp.h>
26#include <linux/if_arp.h>
27#include <linux/if_vlan.h>
28#include <linux/init.h>
29#include <linux/in6.h>
30#include <linux/inetdevice.h>
31#include <linux/igmp.h>
32#include <linux/netfilter_ipv4.h>
33#include <linux/etherdevice.h>
34#include <linux/if_ether.h>
35
36#include <net/sock.h>
37#include <net/ip.h>
38#include <net/icmp.h>
39#include <net/protocol.h>
40#include <net/ip_tunnels.h>
41#include <net/arp.h>
42#include <net/checksum.h>
43#include <net/dsfield.h>
44#include <net/inet_ecn.h>
45#include <net/xfrm.h>
46#include <net/net_namespace.h>
47#include <net/netns/generic.h>
48#include <net/rtnetlink.h>
49#include <net/gre.h>
50#include <net/dst_metadata.h>
51#include <net/erspan.h>
52
53/*
54 Problems & solutions
55 --------------------
56
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
61
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
68
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
77
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
86
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
89
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
93
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
106
107 Alexey Kuznetsov.
108 */
109
110static bool log_ecn_error = true;
111module_param(log_ecn_error, bool, 0644);
112MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113
114static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115static int ipgre_tunnel_init(struct net_device *dev);
116static void erspan_build_header(struct sk_buff *skb,
117 u32 id, u32 index,
118 bool truncate, bool is_ipv4);
119
120static unsigned int ipgre_net_id __read_mostly;
121static unsigned int gre_tap_net_id __read_mostly;
122static unsigned int erspan_net_id __read_mostly;
123
124static int ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
126{
127
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
131
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
135 them "thank you".
136
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
139 by themselves???
140 */
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
147 struct ip_tunnel *t;
148
149 if (tpi->proto == htons(ETH_P_TEB))
150 itn = net_generic(net, gre_tap_net_id);
151 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
152 tpi->proto == htons(ETH_P_ERSPAN2))
153 itn = net_generic(net, erspan_net_id);
154 else
155 itn = net_generic(net, ipgre_net_id);
156
157 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
158 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
159 iph->daddr, iph->saddr, tpi->key);
160
161 if (!t)
162 return -ENOENT;
163
164 switch (type) {
165 default:
166 case ICMP_PARAMETERPROB:
167 return 0;
168
169 case ICMP_DEST_UNREACH:
170 switch (code) {
171 case ICMP_SR_FAILED:
172 case ICMP_PORT_UNREACH:
173 /* Impossible event. */
174 return 0;
175 default:
176 /* All others are translated to HOST_UNREACH.
177 rfc2003 contains "deep thoughts" about NET_UNREACH,
178 I believe they are just ether pollution. --ANK
179 */
180 break;
181 }
182 break;
183
184 case ICMP_TIME_EXCEEDED:
185 if (code != ICMP_EXC_TTL)
186 return 0;
187 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
188 break;
189
190 case ICMP_REDIRECT:
191 break;
192 }
193
194#if IS_ENABLED(CONFIG_IPV6)
195 if (tpi->proto == htons(ETH_P_IPV6) &&
196 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
197 type, data_len))
198 return 0;
199#endif
200
201 if (t->parms.iph.daddr == 0 ||
202 ipv4_is_multicast(t->parms.iph.daddr))
203 return 0;
204
205 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
206 return 0;
207
208 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
209 t->err_count++;
210 else
211 t->err_count = 1;
212 t->err_time = jiffies;
213
214 return 0;
215}
216
217static void gre_err(struct sk_buff *skb, u32 info)
218{
219 /* All the routers (except for Linux) return only
220 * 8 bytes of packet payload. It means, that precise relaying of
221 * ICMP in the real Internet is absolutely infeasible.
222 *
223 * Moreover, Cisco "wise men" put GRE key to the third word
224 * in GRE header. It makes impossible maintaining even soft
225 * state for keyed
226 * GRE tunnels with enabled checksum. Tell them "thank you".
227 *
228 * Well, I wonder, rfc1812 was written by Cisco employee,
229 * what the hell these idiots break standards established
230 * by themselves???
231 */
232
233 const struct iphdr *iph = (struct iphdr *)skb->data;
234 const int type = icmp_hdr(skb)->type;
235 const int code = icmp_hdr(skb)->code;
236 struct tnl_ptk_info tpi;
237
238 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
239 iph->ihl * 4) < 0)
240 return;
241
242 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
243 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
244 skb->dev->ifindex, IPPROTO_GRE);
245 return;
246 }
247 if (type == ICMP_REDIRECT) {
248 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
249 IPPROTO_GRE);
250 return;
251 }
252
253 ipgre_err(skb, info, &tpi);
254}
255
256static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
257 int gre_hdr_len)
258{
259 struct net *net = dev_net(skb->dev);
260 struct metadata_dst *tun_dst = NULL;
261 struct erspan_base_hdr *ershdr;
262 struct ip_tunnel_net *itn;
263 struct ip_tunnel *tunnel;
264 const struct iphdr *iph;
265 struct erspan_md2 *md2;
266 int ver;
267 int len;
268
269 itn = net_generic(net, erspan_net_id);
270
271 iph = ip_hdr(skb);
272 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
273 ver = ershdr->ver;
274
275 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
276 tpi->flags | TUNNEL_KEY,
277 iph->saddr, iph->daddr, tpi->key);
278
279 if (tunnel) {
280 len = gre_hdr_len + erspan_hdr_len(ver);
281 if (unlikely(!pskb_may_pull(skb, len)))
282 return PACKET_REJECT;
283
284 if (__iptunnel_pull_header(skb,
285 len,
286 htons(ETH_P_TEB),
287 false, false) < 0)
288 goto drop;
289
290 if (tunnel->collect_md) {
291 struct erspan_metadata *pkt_md, *md;
292 struct ip_tunnel_info *info;
293 unsigned char *gh;
294 __be64 tun_id;
295 __be16 flags;
296
297 tpi->flags |= TUNNEL_KEY;
298 flags = tpi->flags;
299 tun_id = key32_to_tunnel_id(tpi->key);
300
301 tun_dst = ip_tun_rx_dst(skb, flags,
302 tun_id, sizeof(*md));
303 if (!tun_dst)
304 return PACKET_REJECT;
305
306 /* skb can be uncloned in __iptunnel_pull_header, so
307 * old pkt_md is no longer valid and we need to reset
308 * it
309 */
310 gh = skb_network_header(skb) +
311 skb_network_header_len(skb);
312 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
313 sizeof(*ershdr));
314 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
315 md->version = ver;
316 md2 = &md->u.md2;
317 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
318 ERSPAN_V2_MDSIZE);
319
320 info = &tun_dst->u.tun_info;
321 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
322 info->options_len = sizeof(*md);
323 }
324
325 skb_reset_mac_header(skb);
326 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
327 return PACKET_RCVD;
328 }
329 return PACKET_REJECT;
330
331drop:
332 kfree_skb(skb);
333 return PACKET_RCVD;
334}
335
336static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
337 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
338{
339 struct metadata_dst *tun_dst = NULL;
340 const struct iphdr *iph;
341 struct ip_tunnel *tunnel;
342
343 iph = ip_hdr(skb);
344 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
345 iph->saddr, iph->daddr, tpi->key);
346
347 if (tunnel) {
348 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
349 raw_proto, false) < 0)
350 goto drop;
351
352 if (tunnel->dev->type != ARPHRD_NONE)
353 skb_pop_mac_header(skb);
354 else
355 skb_reset_mac_header(skb);
356 if (tunnel->collect_md) {
357 __be16 flags;
358 __be64 tun_id;
359
360 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
361 tun_id = key32_to_tunnel_id(tpi->key);
362 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
363 if (!tun_dst)
364 return PACKET_REJECT;
365 }
366
367 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
368 return PACKET_RCVD;
369 }
370 return PACKET_NEXT;
371
372drop:
373 kfree_skb(skb);
374 return PACKET_RCVD;
375}
376
377static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
378 int hdr_len)
379{
380 struct net *net = dev_net(skb->dev);
381 struct ip_tunnel_net *itn;
382 int res;
383
384 if (tpi->proto == htons(ETH_P_TEB))
385 itn = net_generic(net, gre_tap_net_id);
386 else
387 itn = net_generic(net, ipgre_net_id);
388
389 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
390 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
391 /* ipgre tunnels in collect metadata mode should receive
392 * also ETH_P_TEB traffic.
393 */
394 itn = net_generic(net, ipgre_net_id);
395 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
396 }
397 return res;
398}
399
400static int gre_rcv(struct sk_buff *skb)
401{
402 struct tnl_ptk_info tpi;
403 bool csum_err = false;
404 int hdr_len;
405
406#ifdef CONFIG_NET_IPGRE_BROADCAST
407 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
408 /* Looped back packet, drop it! */
409 if (rt_is_output_route(skb_rtable(skb)))
410 goto drop;
411 }
412#endif
413
414 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
415 if (hdr_len < 0)
416 goto drop;
417
418 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
419 tpi.proto == htons(ETH_P_ERSPAN2))) {
420 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
421 return 0;
422 goto out;
423 }
424
425 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
426 return 0;
427
428out:
429 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
430drop:
431 kfree_skb(skb);
432 return 0;
433}
434
435static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
436 const struct iphdr *tnl_params,
437 __be16 proto)
438{
439 struct ip_tunnel *tunnel = netdev_priv(dev);
440
441 if (tunnel->parms.o_flags & TUNNEL_SEQ)
442 tunnel->o_seqno++;
443
444 /* Push GRE header. */
445 gre_build_header(skb, tunnel->tun_hlen,
446 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
447 htonl(tunnel->o_seqno));
448
449 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
450}
451
452static int gre_handle_offloads(struct sk_buff *skb, bool csum)
453{
454 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
455}
456
457static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
458 __be16 proto)
459{
460 struct ip_tunnel *tunnel = netdev_priv(dev);
461 struct ip_tunnel_info *tun_info;
462 const struct ip_tunnel_key *key;
463 int tunnel_hlen;
464 __be16 flags;
465
466 tun_info = skb_tunnel_info(skb);
467 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
468 ip_tunnel_info_af(tun_info) != AF_INET))
469 goto err_free_skb;
470
471 key = &tun_info->key;
472 tunnel_hlen = gre_calc_hlen(key->tun_flags);
473
474 if (skb_cow_head(skb, dev->needed_headroom))
475 goto err_free_skb;
476
477 /* Push Tunnel header. */
478 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
479 goto err_free_skb;
480
481 flags = tun_info->key.tun_flags &
482 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
483 gre_build_header(skb, tunnel_hlen, flags, proto,
484 tunnel_id_to_key32(tun_info->key.tun_id),
485 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
486
487 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
488
489 return;
490
491err_free_skb:
492 kfree_skb(skb);
493 dev->stats.tx_dropped++;
494}
495
496static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
497{
498 struct ip_tunnel *tunnel = netdev_priv(dev);
499 struct ip_tunnel_info *tun_info;
500 const struct ip_tunnel_key *key;
501 struct erspan_metadata *md;
502 bool truncate = false;
503 __be16 proto;
504 int tunnel_hlen;
505 int version;
506 int nhoff;
507 int thoff;
508
509 tun_info = skb_tunnel_info(skb);
510 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
511 ip_tunnel_info_af(tun_info) != AF_INET))
512 goto err_free_skb;
513
514 key = &tun_info->key;
515 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
516 goto err_free_skb;
517 md = ip_tunnel_info_opts(tun_info);
518 if (!md)
519 goto err_free_skb;
520
521 /* ERSPAN has fixed 8 byte GRE header */
522 version = md->version;
523 tunnel_hlen = 8 + erspan_hdr_len(version);
524
525 if (skb_cow_head(skb, dev->needed_headroom))
526 goto err_free_skb;
527
528 if (gre_handle_offloads(skb, false))
529 goto err_free_skb;
530
531 if (skb->len > dev->mtu + dev->hard_header_len) {
532 pskb_trim(skb, dev->mtu + dev->hard_header_len);
533 truncate = true;
534 }
535
536 nhoff = skb_network_header(skb) - skb_mac_header(skb);
537 if (skb->protocol == htons(ETH_P_IP) &&
538 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
539 truncate = true;
540
541 thoff = skb_transport_header(skb) - skb_mac_header(skb);
542 if (skb->protocol == htons(ETH_P_IPV6) &&
543 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
544 truncate = true;
545
546 if (version == 1) {
547 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
548 ntohl(md->u.index), truncate, true);
549 proto = htons(ETH_P_ERSPAN);
550 } else if (version == 2) {
551 erspan_build_header_v2(skb,
552 ntohl(tunnel_id_to_key32(key->tun_id)),
553 md->u.md2.dir,
554 get_hwid(&md->u.md2),
555 truncate, true);
556 proto = htons(ETH_P_ERSPAN2);
557 } else {
558 goto err_free_skb;
559 }
560
561 gre_build_header(skb, 8, TUNNEL_SEQ,
562 proto, 0, htonl(tunnel->o_seqno++));
563
564 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
565
566 return;
567
568err_free_skb:
569 kfree_skb(skb);
570 dev->stats.tx_dropped++;
571}
572
573static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
574{
575 struct ip_tunnel_info *info = skb_tunnel_info(skb);
576 const struct ip_tunnel_key *key;
577 struct rtable *rt;
578 struct flowi4 fl4;
579
580 if (ip_tunnel_info_af(info) != AF_INET)
581 return -EINVAL;
582
583 key = &info->key;
584 ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
585 tunnel_id_to_key32(key->tun_id), key->tos, 0,
586 skb->mark, skb_get_hash(skb));
587 rt = ip_route_output_key(dev_net(dev), &fl4);
588 if (IS_ERR(rt))
589 return PTR_ERR(rt);
590
591 ip_rt_put(rt);
592 info->key.u.ipv4.src = fl4.saddr;
593 return 0;
594}
595
596static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
597 struct net_device *dev)
598{
599 struct ip_tunnel *tunnel = netdev_priv(dev);
600 const struct iphdr *tnl_params;
601
602 if (!pskb_inet_may_pull(skb))
603 goto free_skb;
604
605 if (tunnel->collect_md) {
606 gre_fb_xmit(skb, dev, skb->protocol);
607 return NETDEV_TX_OK;
608 }
609
610 if (dev->header_ops) {
611 /* Need space for new headers */
612 if (skb_cow_head(skb, dev->needed_headroom -
613 (tunnel->hlen + sizeof(struct iphdr))))
614 goto free_skb;
615
616 tnl_params = (const struct iphdr *)skb->data;
617
618 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
619 * to gre header.
620 */
621 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
622 skb_reset_mac_header(skb);
623 } else {
624 if (skb_cow_head(skb, dev->needed_headroom))
625 goto free_skb;
626
627 tnl_params = &tunnel->parms.iph;
628 }
629
630 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
631 goto free_skb;
632
633 __gre_xmit(skb, dev, tnl_params, skb->protocol);
634 return NETDEV_TX_OK;
635
636free_skb:
637 kfree_skb(skb);
638 dev->stats.tx_dropped++;
639 return NETDEV_TX_OK;
640}
641
642static netdev_tx_t erspan_xmit(struct sk_buff *skb,
643 struct net_device *dev)
644{
645 struct ip_tunnel *tunnel = netdev_priv(dev);
646 bool truncate = false;
647 __be16 proto;
648
649 if (!pskb_inet_may_pull(skb))
650 goto free_skb;
651
652 if (tunnel->collect_md) {
653 erspan_fb_xmit(skb, dev);
654 return NETDEV_TX_OK;
655 }
656
657 if (gre_handle_offloads(skb, false))
658 goto free_skb;
659
660 if (skb_cow_head(skb, dev->needed_headroom))
661 goto free_skb;
662
663 if (skb->len > dev->mtu + dev->hard_header_len) {
664 pskb_trim(skb, dev->mtu + dev->hard_header_len);
665 truncate = true;
666 }
667
668 /* Push ERSPAN header */
669 if (tunnel->erspan_ver == 1) {
670 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
671 tunnel->index,
672 truncate, true);
673 proto = htons(ETH_P_ERSPAN);
674 } else if (tunnel->erspan_ver == 2) {
675 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
676 tunnel->dir, tunnel->hwid,
677 truncate, true);
678 proto = htons(ETH_P_ERSPAN2);
679 } else {
680 goto free_skb;
681 }
682
683 tunnel->parms.o_flags &= ~TUNNEL_KEY;
684 __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
685 return NETDEV_TX_OK;
686
687free_skb:
688 kfree_skb(skb);
689 dev->stats.tx_dropped++;
690 return NETDEV_TX_OK;
691}
692
693static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
694 struct net_device *dev)
695{
696 struct ip_tunnel *tunnel = netdev_priv(dev);
697
698 if (!pskb_inet_may_pull(skb))
699 goto free_skb;
700
701 if (tunnel->collect_md) {
702 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
703 return NETDEV_TX_OK;
704 }
705
706 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
707 goto free_skb;
708
709 if (skb_cow_head(skb, dev->needed_headroom))
710 goto free_skb;
711
712 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
713 return NETDEV_TX_OK;
714
715free_skb:
716 kfree_skb(skb);
717 dev->stats.tx_dropped++;
718 return NETDEV_TX_OK;
719}
720
721static void ipgre_link_update(struct net_device *dev, bool set_mtu)
722{
723 struct ip_tunnel *tunnel = netdev_priv(dev);
724 int len;
725
726 len = tunnel->tun_hlen;
727 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
728 len = tunnel->tun_hlen - len;
729 tunnel->hlen = tunnel->hlen + len;
730
731 dev->needed_headroom = dev->needed_headroom + len;
732 if (set_mtu)
733 dev->mtu = max_t(int, dev->mtu - len, 68);
734
735 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
736 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
737 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
738 dev->features |= NETIF_F_GSO_SOFTWARE;
739 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
740 } else {
741 dev->features &= ~NETIF_F_GSO_SOFTWARE;
742 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
743 }
744 dev->features |= NETIF_F_LLTX;
745 } else {
746 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
747 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
748 }
749}
750
751static int ipgre_tunnel_ioctl(struct net_device *dev,
752 struct ifreq *ifr, int cmd)
753{
754 struct ip_tunnel_parm p;
755 int err;
756
757 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
758 return -EFAULT;
759
760 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
761 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
762 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
763 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
764 return -EINVAL;
765 }
766
767 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
768 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
769
770 err = ip_tunnel_ioctl(dev, &p, cmd);
771 if (err)
772 return err;
773
774 if (cmd == SIOCCHGTUNNEL) {
775 struct ip_tunnel *t = netdev_priv(dev);
776
777 t->parms.i_flags = p.i_flags;
778 t->parms.o_flags = p.o_flags;
779
780 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
781 ipgre_link_update(dev, true);
782 }
783
784 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
785 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
786
787 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
788 return -EFAULT;
789
790 return 0;
791}
792
793/* Nice toy. Unfortunately, useless in real life :-)
794 It allows to construct virtual multiprotocol broadcast "LAN"
795 over the Internet, provided multicast routing is tuned.
796
797
798 I have no idea was this bicycle invented before me,
799 so that I had to set ARPHRD_IPGRE to a random value.
800 I have an impression, that Cisco could make something similar,
801 but this feature is apparently missing in IOS<=11.2(8).
802
803 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
804 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
805
806 ping -t 255 224.66.66.66
807
808 If nobody answers, mbone does not work.
809
810 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
811 ip addr add 10.66.66.<somewhat>/24 dev Universe
812 ifconfig Universe up
813 ifconfig Universe add fe80::<Your_real_addr>/10
814 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
815 ftp 10.66.66.66
816 ...
817 ftp fec0:6666:6666::193.233.7.65
818 ...
819 */
820static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
821 unsigned short type,
822 const void *daddr, const void *saddr, unsigned int len)
823{
824 struct ip_tunnel *t = netdev_priv(dev);
825 struct iphdr *iph;
826 struct gre_base_hdr *greh;
827
828 iph = skb_push(skb, t->hlen + sizeof(*iph));
829 greh = (struct gre_base_hdr *)(iph+1);
830 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
831 greh->protocol = htons(type);
832
833 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
834
835 /* Set the source hardware address. */
836 if (saddr)
837 memcpy(&iph->saddr, saddr, 4);
838 if (daddr)
839 memcpy(&iph->daddr, daddr, 4);
840 if (iph->daddr)
841 return t->hlen + sizeof(*iph);
842
843 return -(t->hlen + sizeof(*iph));
844}
845
846static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
847{
848 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
849 memcpy(haddr, &iph->saddr, 4);
850 return 4;
851}
852
853static const struct header_ops ipgre_header_ops = {
854 .create = ipgre_header,
855 .parse = ipgre_header_parse,
856};
857
858#ifdef CONFIG_NET_IPGRE_BROADCAST
859static int ipgre_open(struct net_device *dev)
860{
861 struct ip_tunnel *t = netdev_priv(dev);
862
863 if (ipv4_is_multicast(t->parms.iph.daddr)) {
864 struct flowi4 fl4;
865 struct rtable *rt;
866
867 rt = ip_route_output_gre(t->net, &fl4,
868 t->parms.iph.daddr,
869 t->parms.iph.saddr,
870 t->parms.o_key,
871 RT_TOS(t->parms.iph.tos),
872 t->parms.link);
873 if (IS_ERR(rt))
874 return -EADDRNOTAVAIL;
875 dev = rt->dst.dev;
876 ip_rt_put(rt);
877 if (!__in_dev_get_rtnl(dev))
878 return -EADDRNOTAVAIL;
879 t->mlink = dev->ifindex;
880 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
881 }
882 return 0;
883}
884
885static int ipgre_close(struct net_device *dev)
886{
887 struct ip_tunnel *t = netdev_priv(dev);
888
889 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
890 struct in_device *in_dev;
891 in_dev = inetdev_by_index(t->net, t->mlink);
892 if (in_dev)
893 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
894 }
895 return 0;
896}
897#endif
898
899static const struct net_device_ops ipgre_netdev_ops = {
900 .ndo_init = ipgre_tunnel_init,
901 .ndo_uninit = ip_tunnel_uninit,
902#ifdef CONFIG_NET_IPGRE_BROADCAST
903 .ndo_open = ipgre_open,
904 .ndo_stop = ipgre_close,
905#endif
906 .ndo_start_xmit = ipgre_xmit,
907 .ndo_do_ioctl = ipgre_tunnel_ioctl,
908 .ndo_change_mtu = ip_tunnel_change_mtu,
909 .ndo_get_stats64 = ip_tunnel_get_stats64,
910 .ndo_get_iflink = ip_tunnel_get_iflink,
911};
912
913#define GRE_FEATURES (NETIF_F_SG | \
914 NETIF_F_FRAGLIST | \
915 NETIF_F_HIGHDMA | \
916 NETIF_F_HW_CSUM)
917
918static void ipgre_tunnel_setup(struct net_device *dev)
919{
920 dev->netdev_ops = &ipgre_netdev_ops;
921 dev->type = ARPHRD_IPGRE;
922 ip_tunnel_setup(dev, ipgre_net_id);
923}
924
925static void __gre_tunnel_init(struct net_device *dev)
926{
927 struct ip_tunnel *tunnel;
928
929 tunnel = netdev_priv(dev);
930 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
931 tunnel->parms.iph.protocol = IPPROTO_GRE;
932
933 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
934
935 dev->features |= GRE_FEATURES;
936 dev->hw_features |= GRE_FEATURES;
937
938 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
939 /* TCP offload with GRE SEQ is not supported, nor
940 * can we support 2 levels of outer headers requiring
941 * an update.
942 */
943 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
944 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
945 dev->features |= NETIF_F_GSO_SOFTWARE;
946 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
947 }
948
949 /* Can use a lockless transmit, unless we generate
950 * output sequences
951 */
952 dev->features |= NETIF_F_LLTX;
953 }
954}
955
956static int ipgre_tunnel_init(struct net_device *dev)
957{
958 struct ip_tunnel *tunnel = netdev_priv(dev);
959 struct iphdr *iph = &tunnel->parms.iph;
960
961 __gre_tunnel_init(dev);
962
963 memcpy(dev->dev_addr, &iph->saddr, 4);
964 memcpy(dev->broadcast, &iph->daddr, 4);
965
966 dev->flags = IFF_NOARP;
967 netif_keep_dst(dev);
968 dev->addr_len = 4;
969
970 if (iph->daddr && !tunnel->collect_md) {
971#ifdef CONFIG_NET_IPGRE_BROADCAST
972 if (ipv4_is_multicast(iph->daddr)) {
973 if (!iph->saddr)
974 return -EINVAL;
975 dev->flags = IFF_BROADCAST;
976 dev->header_ops = &ipgre_header_ops;
977 }
978#endif
979 } else if (!tunnel->collect_md) {
980 dev->header_ops = &ipgre_header_ops;
981 }
982
983 return ip_tunnel_init(dev);
984}
985
986static const struct gre_protocol ipgre_protocol = {
987 .handler = gre_rcv,
988 .err_handler = gre_err,
989};
990
991static int __net_init ipgre_init_net(struct net *net)
992{
993 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
994}
995
996static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
997{
998 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
999}
1000
1001static struct pernet_operations ipgre_net_ops = {
1002 .init = ipgre_init_net,
1003 .exit_batch = ipgre_exit_batch_net,
1004 .id = &ipgre_net_id,
1005 .size = sizeof(struct ip_tunnel_net),
1006};
1007
1008static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1009 struct netlink_ext_ack *extack)
1010{
1011 __be16 flags;
1012
1013 if (!data)
1014 return 0;
1015
1016 flags = 0;
1017 if (data[IFLA_GRE_IFLAGS])
1018 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1019 if (data[IFLA_GRE_OFLAGS])
1020 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1021 if (flags & (GRE_VERSION|GRE_ROUTING))
1022 return -EINVAL;
1023
1024 if (data[IFLA_GRE_COLLECT_METADATA] &&
1025 data[IFLA_GRE_ENCAP_TYPE] &&
1026 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1027 return -EINVAL;
1028
1029 return 0;
1030}
1031
1032static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1033 struct netlink_ext_ack *extack)
1034{
1035 __be32 daddr;
1036
1037 if (tb[IFLA_ADDRESS]) {
1038 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1039 return -EINVAL;
1040 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1041 return -EADDRNOTAVAIL;
1042 }
1043
1044 if (!data)
1045 goto out;
1046
1047 if (data[IFLA_GRE_REMOTE]) {
1048 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1049 if (!daddr)
1050 return -EINVAL;
1051 }
1052
1053out:
1054 return ipgre_tunnel_validate(tb, data, extack);
1055}
1056
1057static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1058 struct netlink_ext_ack *extack)
1059{
1060 __be16 flags = 0;
1061 int ret;
1062
1063 if (!data)
1064 return 0;
1065
1066 ret = ipgre_tap_validate(tb, data, extack);
1067 if (ret)
1068 return ret;
1069
1070 /* ERSPAN should only have GRE sequence and key flag */
1071 if (data[IFLA_GRE_OFLAGS])
1072 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1073 if (data[IFLA_GRE_IFLAGS])
1074 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1075 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1076 flags != (GRE_SEQ | GRE_KEY))
1077 return -EINVAL;
1078
1079 /* ERSPAN Session ID only has 10-bit. Since we reuse
1080 * 32-bit key field as ID, check it's range.
1081 */
1082 if (data[IFLA_GRE_IKEY] &&
1083 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1084 return -EINVAL;
1085
1086 if (data[IFLA_GRE_OKEY] &&
1087 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1088 return -EINVAL;
1089
1090 return 0;
1091}
1092
1093static int ipgre_netlink_parms(struct net_device *dev,
1094 struct nlattr *data[],
1095 struct nlattr *tb[],
1096 struct ip_tunnel_parm *parms,
1097 __u32 *fwmark)
1098{
1099 struct ip_tunnel *t = netdev_priv(dev);
1100
1101 memset(parms, 0, sizeof(*parms));
1102
1103 parms->iph.protocol = IPPROTO_GRE;
1104
1105 if (!data)
1106 return 0;
1107
1108 if (data[IFLA_GRE_LINK])
1109 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1110
1111 if (data[IFLA_GRE_IFLAGS])
1112 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1113
1114 if (data[IFLA_GRE_OFLAGS])
1115 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1116
1117 if (data[IFLA_GRE_IKEY])
1118 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1119
1120 if (data[IFLA_GRE_OKEY])
1121 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1122
1123 if (data[IFLA_GRE_LOCAL])
1124 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1125
1126 if (data[IFLA_GRE_REMOTE])
1127 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1128
1129 if (data[IFLA_GRE_TTL])
1130 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1131
1132 if (data[IFLA_GRE_TOS])
1133 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1134
1135 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1136 if (t->ignore_df)
1137 return -EINVAL;
1138 parms->iph.frag_off = htons(IP_DF);
1139 }
1140
1141 if (data[IFLA_GRE_COLLECT_METADATA]) {
1142 t->collect_md = true;
1143 if (dev->type == ARPHRD_IPGRE)
1144 dev->type = ARPHRD_NONE;
1145 }
1146
1147 if (data[IFLA_GRE_IGNORE_DF]) {
1148 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1149 && (parms->iph.frag_off & htons(IP_DF)))
1150 return -EINVAL;
1151 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1152 }
1153
1154 if (data[IFLA_GRE_FWMARK])
1155 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1156
1157 if (data[IFLA_GRE_ERSPAN_VER]) {
1158 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1159
1160 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1161 return -EINVAL;
1162 }
1163
1164 if (t->erspan_ver == 1) {
1165 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1166 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1167 if (t->index & ~INDEX_MASK)
1168 return -EINVAL;
1169 }
1170 } else if (t->erspan_ver == 2) {
1171 if (data[IFLA_GRE_ERSPAN_DIR]) {
1172 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1173 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1174 return -EINVAL;
1175 }
1176 if (data[IFLA_GRE_ERSPAN_HWID]) {
1177 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1178 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1179 return -EINVAL;
1180 }
1181 }
1182
1183 return 0;
1184}
1185
1186/* This function returns true when ENCAP attributes are present in the nl msg */
1187static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1188 struct ip_tunnel_encap *ipencap)
1189{
1190 bool ret = false;
1191
1192 memset(ipencap, 0, sizeof(*ipencap));
1193
1194 if (!data)
1195 return ret;
1196
1197 if (data[IFLA_GRE_ENCAP_TYPE]) {
1198 ret = true;
1199 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1200 }
1201
1202 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1203 ret = true;
1204 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1205 }
1206
1207 if (data[IFLA_GRE_ENCAP_SPORT]) {
1208 ret = true;
1209 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1210 }
1211
1212 if (data[IFLA_GRE_ENCAP_DPORT]) {
1213 ret = true;
1214 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1215 }
1216
1217 return ret;
1218}
1219
1220static int gre_tap_init(struct net_device *dev)
1221{
1222 __gre_tunnel_init(dev);
1223 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1224 netif_keep_dst(dev);
1225
1226 return ip_tunnel_init(dev);
1227}
1228
1229static const struct net_device_ops gre_tap_netdev_ops = {
1230 .ndo_init = gre_tap_init,
1231 .ndo_uninit = ip_tunnel_uninit,
1232 .ndo_start_xmit = gre_tap_xmit,
1233 .ndo_set_mac_address = eth_mac_addr,
1234 .ndo_validate_addr = eth_validate_addr,
1235 .ndo_change_mtu = ip_tunnel_change_mtu,
1236 .ndo_get_stats64 = ip_tunnel_get_stats64,
1237 .ndo_get_iflink = ip_tunnel_get_iflink,
1238 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1239};
1240
1241static int erspan_tunnel_init(struct net_device *dev)
1242{
1243 struct ip_tunnel *tunnel = netdev_priv(dev);
1244
1245 tunnel->tun_hlen = 8;
1246 tunnel->parms.iph.protocol = IPPROTO_GRE;
1247 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1248 erspan_hdr_len(tunnel->erspan_ver);
1249
1250 dev->features |= GRE_FEATURES;
1251 dev->hw_features |= GRE_FEATURES;
1252 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1253 netif_keep_dst(dev);
1254
1255 return ip_tunnel_init(dev);
1256}
1257
1258static const struct net_device_ops erspan_netdev_ops = {
1259 .ndo_init = erspan_tunnel_init,
1260 .ndo_uninit = ip_tunnel_uninit,
1261 .ndo_start_xmit = erspan_xmit,
1262 .ndo_set_mac_address = eth_mac_addr,
1263 .ndo_validate_addr = eth_validate_addr,
1264 .ndo_change_mtu = ip_tunnel_change_mtu,
1265 .ndo_get_stats64 = ip_tunnel_get_stats64,
1266 .ndo_get_iflink = ip_tunnel_get_iflink,
1267 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1268};
1269
1270static void ipgre_tap_setup(struct net_device *dev)
1271{
1272 ether_setup(dev);
1273 dev->max_mtu = 0;
1274 dev->netdev_ops = &gre_tap_netdev_ops;
1275 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1276 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1277 ip_tunnel_setup(dev, gre_tap_net_id);
1278}
1279
1280static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1281 struct nlattr *tb[], struct nlattr *data[],
1282 struct netlink_ext_ack *extack)
1283{
1284 struct ip_tunnel_parm p;
1285 struct ip_tunnel_encap ipencap;
1286 __u32 fwmark = 0;
1287 int err;
1288
1289 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1290 struct ip_tunnel *t = netdev_priv(dev);
1291 err = ip_tunnel_encap_setup(t, &ipencap);
1292
1293 if (err < 0)
1294 return err;
1295 }
1296
1297 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1298 if (err < 0)
1299 return err;
1300 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1301}
1302
1303static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1304 struct nlattr *data[],
1305 struct netlink_ext_ack *extack)
1306{
1307 struct ip_tunnel *t = netdev_priv(dev);
1308 struct ip_tunnel_encap ipencap;
1309 __u32 fwmark = t->fwmark;
1310 struct ip_tunnel_parm p;
1311 int err;
1312
1313 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1314 err = ip_tunnel_encap_setup(t, &ipencap);
1315
1316 if (err < 0)
1317 return err;
1318 }
1319
1320 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1321 if (err < 0)
1322 return err;
1323
1324 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1325 if (err < 0)
1326 return err;
1327
1328 t->parms.i_flags = p.i_flags;
1329 t->parms.o_flags = p.o_flags;
1330
1331 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1332 ipgre_link_update(dev, !tb[IFLA_MTU]);
1333
1334 return 0;
1335}
1336
1337static size_t ipgre_get_size(const struct net_device *dev)
1338{
1339 return
1340 /* IFLA_GRE_LINK */
1341 nla_total_size(4) +
1342 /* IFLA_GRE_IFLAGS */
1343 nla_total_size(2) +
1344 /* IFLA_GRE_OFLAGS */
1345 nla_total_size(2) +
1346 /* IFLA_GRE_IKEY */
1347 nla_total_size(4) +
1348 /* IFLA_GRE_OKEY */
1349 nla_total_size(4) +
1350 /* IFLA_GRE_LOCAL */
1351 nla_total_size(4) +
1352 /* IFLA_GRE_REMOTE */
1353 nla_total_size(4) +
1354 /* IFLA_GRE_TTL */
1355 nla_total_size(1) +
1356 /* IFLA_GRE_TOS */
1357 nla_total_size(1) +
1358 /* IFLA_GRE_PMTUDISC */
1359 nla_total_size(1) +
1360 /* IFLA_GRE_ENCAP_TYPE */
1361 nla_total_size(2) +
1362 /* IFLA_GRE_ENCAP_FLAGS */
1363 nla_total_size(2) +
1364 /* IFLA_GRE_ENCAP_SPORT */
1365 nla_total_size(2) +
1366 /* IFLA_GRE_ENCAP_DPORT */
1367 nla_total_size(2) +
1368 /* IFLA_GRE_COLLECT_METADATA */
1369 nla_total_size(0) +
1370 /* IFLA_GRE_IGNORE_DF */
1371 nla_total_size(1) +
1372 /* IFLA_GRE_FWMARK */
1373 nla_total_size(4) +
1374 /* IFLA_GRE_ERSPAN_INDEX */
1375 nla_total_size(4) +
1376 /* IFLA_GRE_ERSPAN_VER */
1377 nla_total_size(1) +
1378 /* IFLA_GRE_ERSPAN_DIR */
1379 nla_total_size(1) +
1380 /* IFLA_GRE_ERSPAN_HWID */
1381 nla_total_size(2) +
1382 0;
1383}
1384
1385static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1386{
1387 struct ip_tunnel *t = netdev_priv(dev);
1388 struct ip_tunnel_parm *p = &t->parms;
1389 __be16 o_flags = p->o_flags;
1390
1391 if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1392 if (!t->collect_md)
1393 o_flags |= TUNNEL_KEY;
1394
1395 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1396 goto nla_put_failure;
1397
1398 if (t->erspan_ver == 1) {
1399 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1400 goto nla_put_failure;
1401 } else {
1402 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1403 goto nla_put_failure;
1404 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1405 goto nla_put_failure;
1406 }
1407 }
1408
1409 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1410 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1411 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1412 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1413 gre_tnl_flags_to_gre_flags(o_flags)) ||
1414 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1415 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1416 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1417 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1418 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1419 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1420 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1421 !!(p->iph.frag_off & htons(IP_DF))) ||
1422 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1423 goto nla_put_failure;
1424
1425 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1426 t->encap.type) ||
1427 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1428 t->encap.sport) ||
1429 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1430 t->encap.dport) ||
1431 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1432 t->encap.flags))
1433 goto nla_put_failure;
1434
1435 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1436 goto nla_put_failure;
1437
1438 if (t->collect_md) {
1439 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1440 goto nla_put_failure;
1441 }
1442
1443 return 0;
1444
1445nla_put_failure:
1446 return -EMSGSIZE;
1447}
1448
1449static void erspan_setup(struct net_device *dev)
1450{
1451 struct ip_tunnel *t = netdev_priv(dev);
1452
1453 ether_setup(dev);
1454 dev->netdev_ops = &erspan_netdev_ops;
1455 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1456 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1457 ip_tunnel_setup(dev, erspan_net_id);
1458 t->erspan_ver = 1;
1459}
1460
1461static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1462 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1463 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1464 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1465 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1466 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1467 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1468 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1469 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1470 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1471 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1472 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1473 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1474 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1475 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1476 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1477 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1478 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1479 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1480 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1481 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1482 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1483};
1484
1485static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1486 .kind = "gre",
1487 .maxtype = IFLA_GRE_MAX,
1488 .policy = ipgre_policy,
1489 .priv_size = sizeof(struct ip_tunnel),
1490 .setup = ipgre_tunnel_setup,
1491 .validate = ipgre_tunnel_validate,
1492 .newlink = ipgre_newlink,
1493 .changelink = ipgre_changelink,
1494 .dellink = ip_tunnel_dellink,
1495 .get_size = ipgre_get_size,
1496 .fill_info = ipgre_fill_info,
1497 .get_link_net = ip_tunnel_get_link_net,
1498};
1499
1500static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1501 .kind = "gretap",
1502 .maxtype = IFLA_GRE_MAX,
1503 .policy = ipgre_policy,
1504 .priv_size = sizeof(struct ip_tunnel),
1505 .setup = ipgre_tap_setup,
1506 .validate = ipgre_tap_validate,
1507 .newlink = ipgre_newlink,
1508 .changelink = ipgre_changelink,
1509 .dellink = ip_tunnel_dellink,
1510 .get_size = ipgre_get_size,
1511 .fill_info = ipgre_fill_info,
1512 .get_link_net = ip_tunnel_get_link_net,
1513};
1514
1515static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1516 .kind = "erspan",
1517 .maxtype = IFLA_GRE_MAX,
1518 .policy = ipgre_policy,
1519 .priv_size = sizeof(struct ip_tunnel),
1520 .setup = erspan_setup,
1521 .validate = erspan_validate,
1522 .newlink = ipgre_newlink,
1523 .changelink = ipgre_changelink,
1524 .dellink = ip_tunnel_dellink,
1525 .get_size = ipgre_get_size,
1526 .fill_info = ipgre_fill_info,
1527 .get_link_net = ip_tunnel_get_link_net,
1528};
1529
1530struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1531 u8 name_assign_type)
1532{
1533 struct nlattr *tb[IFLA_MAX + 1];
1534 struct net_device *dev;
1535 LIST_HEAD(list_kill);
1536 struct ip_tunnel *t;
1537 int err;
1538
1539 memset(&tb, 0, sizeof(tb));
1540
1541 dev = rtnl_create_link(net, name, name_assign_type,
1542 &ipgre_tap_ops, tb, NULL);
1543 if (IS_ERR(dev))
1544 return dev;
1545
1546 /* Configure flow based GRE device. */
1547 t = netdev_priv(dev);
1548 t->collect_md = true;
1549
1550 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1551 if (err < 0) {
1552 free_netdev(dev);
1553 return ERR_PTR(err);
1554 }
1555
1556 /* openvswitch users expect packet sizes to be unrestricted,
1557 * so set the largest MTU we can.
1558 */
1559 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1560 if (err)
1561 goto out;
1562
1563 err = rtnl_configure_link(dev, NULL);
1564 if (err < 0)
1565 goto out;
1566
1567 return dev;
1568out:
1569 ip_tunnel_dellink(dev, &list_kill);
1570 unregister_netdevice_many(&list_kill);
1571 return ERR_PTR(err);
1572}
1573EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1574
1575static int __net_init ipgre_tap_init_net(struct net *net)
1576{
1577 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1578}
1579
1580static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1581{
1582 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1583}
1584
1585static struct pernet_operations ipgre_tap_net_ops = {
1586 .init = ipgre_tap_init_net,
1587 .exit_batch = ipgre_tap_exit_batch_net,
1588 .id = &gre_tap_net_id,
1589 .size = sizeof(struct ip_tunnel_net),
1590};
1591
1592static int __net_init erspan_init_net(struct net *net)
1593{
1594 return ip_tunnel_init_net(net, erspan_net_id,
1595 &erspan_link_ops, "erspan0");
1596}
1597
1598static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1599{
1600 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1601}
1602
1603static struct pernet_operations erspan_net_ops = {
1604 .init = erspan_init_net,
1605 .exit_batch = erspan_exit_batch_net,
1606 .id = &erspan_net_id,
1607 .size = sizeof(struct ip_tunnel_net),
1608};
1609
1610static int __init ipgre_init(void)
1611{
1612 int err;
1613
1614 pr_info("GRE over IPv4 tunneling driver\n");
1615
1616 err = register_pernet_device(&ipgre_net_ops);
1617 if (err < 0)
1618 return err;
1619
1620 err = register_pernet_device(&ipgre_tap_net_ops);
1621 if (err < 0)
1622 goto pnet_tap_failed;
1623
1624 err = register_pernet_device(&erspan_net_ops);
1625 if (err < 0)
1626 goto pnet_erspan_failed;
1627
1628 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1629 if (err < 0) {
1630 pr_info("%s: can't add protocol\n", __func__);
1631 goto add_proto_failed;
1632 }
1633
1634 err = rtnl_link_register(&ipgre_link_ops);
1635 if (err < 0)
1636 goto rtnl_link_failed;
1637
1638 err = rtnl_link_register(&ipgre_tap_ops);
1639 if (err < 0)
1640 goto tap_ops_failed;
1641
1642 err = rtnl_link_register(&erspan_link_ops);
1643 if (err < 0)
1644 goto erspan_link_failed;
1645
1646 return 0;
1647
1648erspan_link_failed:
1649 rtnl_link_unregister(&ipgre_tap_ops);
1650tap_ops_failed:
1651 rtnl_link_unregister(&ipgre_link_ops);
1652rtnl_link_failed:
1653 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1654add_proto_failed:
1655 unregister_pernet_device(&erspan_net_ops);
1656pnet_erspan_failed:
1657 unregister_pernet_device(&ipgre_tap_net_ops);
1658pnet_tap_failed:
1659 unregister_pernet_device(&ipgre_net_ops);
1660 return err;
1661}
1662
1663static void __exit ipgre_fini(void)
1664{
1665 rtnl_link_unregister(&ipgre_tap_ops);
1666 rtnl_link_unregister(&ipgre_link_ops);
1667 rtnl_link_unregister(&erspan_link_ops);
1668 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1669 unregister_pernet_device(&ipgre_tap_net_ops);
1670 unregister_pernet_device(&ipgre_net_ops);
1671 unregister_pernet_device(&erspan_net_ops);
1672}
1673
1674module_init(ipgre_init);
1675module_exit(ipgre_fini);
1676MODULE_LICENSE("GPL");
1677MODULE_ALIAS_RTNL_LINK("gre");
1678MODULE_ALIAS_RTNL_LINK("gretap");
1679MODULE_ALIAS_RTNL_LINK("erspan");
1680MODULE_ALIAS_NETDEV("gre0");
1681MODULE_ALIAS_NETDEV("gretap0");
1682MODULE_ALIAS_NETDEV("erspan0");