Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */
9
10/* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
19 * Ville Nuorvala
20 * Fixed routing subtrees.
21 */
22
23#define pr_fmt(fmt) "IPv6: " fmt
24
25#include <linux/capability.h>
26#include <linux/errno.h>
27#include <linux/export.h>
28#include <linux/types.h>
29#include <linux/times.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/route.h>
34#include <linux/netdevice.h>
35#include <linux/in6.h>
36#include <linux/mroute6.h>
37#include <linux/init.h>
38#include <linux/if_arp.h>
39#include <linux/proc_fs.h>
40#include <linux/seq_file.h>
41#include <linux/nsproxy.h>
42#include <linux/slab.h>
43#include <linux/jhash.h>
44#include <linux/siphash.h>
45#include <net/net_namespace.h>
46#include <net/snmp.h>
47#include <net/ipv6.h>
48#include <net/ip6_fib.h>
49#include <net/ip6_route.h>
50#include <net/ndisc.h>
51#include <net/addrconf.h>
52#include <net/tcp.h>
53#include <linux/rtnetlink.h>
54#include <net/dst.h>
55#include <net/dst_metadata.h>
56#include <net/xfrm.h>
57#include <net/netevent.h>
58#include <net/netlink.h>
59#include <net/rtnh.h>
60#include <net/lwtunnel.h>
61#include <net/ip_tunnels.h>
62#include <net/l3mdev.h>
63#include <net/ip.h>
64#include <linux/uaccess.h>
65#include <linux/btf_ids.h>
66
67#ifdef CONFIG_SYSCTL
68#include <linux/sysctl.h>
69#endif
70
71static int ip6_rt_type_to_error(u8 fib6_type);
72
73#define CREATE_TRACE_POINTS
74#include <trace/events/fib6.h>
75EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76#undef CREATE_TRACE_POINTS
77
78enum rt6_nud_state {
79 RT6_NUD_FAIL_HARD = -3,
80 RT6_NUD_FAIL_PROBE = -2,
81 RT6_NUD_FAIL_DO_RR = -1,
82 RT6_NUD_SUCCEED = 1
83};
84
85INDIRECT_CALLABLE_SCOPE
86struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
87static unsigned int ip6_default_advmss(const struct dst_entry *dst);
88INDIRECT_CALLABLE_SCOPE
89unsigned int ip6_mtu(const struct dst_entry *dst);
90static void ip6_negative_advice(struct sock *sk,
91 struct dst_entry *dst);
92static void ip6_dst_destroy(struct dst_entry *);
93static void ip6_dst_ifdown(struct dst_entry *,
94 struct net_device *dev);
95static void ip6_dst_gc(struct dst_ops *ops);
96
97static int ip6_pkt_discard(struct sk_buff *skb);
98static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
99static int ip6_pkt_prohibit(struct sk_buff *skb);
100static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
101static void ip6_link_failure(struct sk_buff *skb);
102static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
103 struct sk_buff *skb, u32 mtu,
104 bool confirm_neigh);
105static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
106 struct sk_buff *skb);
107static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
108 int strict);
109static size_t rt6_nlmsg_size(struct fib6_info *f6i);
110static int rt6_fill_node(struct net *net, struct sk_buff *skb,
111 struct fib6_info *rt, struct dst_entry *dst,
112 struct in6_addr *dest, struct in6_addr *src,
113 int iif, int type, u32 portid, u32 seq,
114 unsigned int flags);
115static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
116 const struct in6_addr *daddr,
117 const struct in6_addr *saddr);
118
119#ifdef CONFIG_IPV6_ROUTE_INFO
120static struct fib6_info *rt6_add_route_info(struct net *net,
121 const struct in6_addr *prefix, int prefixlen,
122 const struct in6_addr *gwaddr,
123 struct net_device *dev,
124 unsigned int pref);
125static struct fib6_info *rt6_get_route_info(struct net *net,
126 const struct in6_addr *prefix, int prefixlen,
127 const struct in6_addr *gwaddr,
128 struct net_device *dev);
129#endif
130
131struct uncached_list {
132 spinlock_t lock;
133 struct list_head head;
134 struct list_head quarantine;
135};
136
137static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
138
139void rt6_uncached_list_add(struct rt6_info *rt)
140{
141 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
142
143 rt->dst.rt_uncached_list = ul;
144
145 spin_lock_bh(&ul->lock);
146 list_add_tail(&rt->dst.rt_uncached, &ul->head);
147 spin_unlock_bh(&ul->lock);
148}
149
150void rt6_uncached_list_del(struct rt6_info *rt)
151{
152 if (!list_empty(&rt->dst.rt_uncached)) {
153 struct uncached_list *ul = rt->dst.rt_uncached_list;
154
155 spin_lock_bh(&ul->lock);
156 list_del_init(&rt->dst.rt_uncached);
157 spin_unlock_bh(&ul->lock);
158 }
159}
160
161static void rt6_uncached_list_flush_dev(struct net_device *dev)
162{
163 int cpu;
164
165 for_each_possible_cpu(cpu) {
166 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
167 struct rt6_info *rt, *safe;
168
169 if (list_empty(&ul->head))
170 continue;
171
172 spin_lock_bh(&ul->lock);
173 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
174 struct inet6_dev *rt_idev = rt->rt6i_idev;
175 struct net_device *rt_dev = rt->dst.dev;
176 bool handled = false;
177
178 if (rt_idev->dev == dev) {
179 rt->rt6i_idev = in6_dev_get(blackhole_netdev);
180 in6_dev_put(rt_idev);
181 handled = true;
182 }
183
184 if (rt_dev == dev) {
185 rt->dst.dev = blackhole_netdev;
186 netdev_ref_replace(rt_dev, blackhole_netdev,
187 &rt->dst.dev_tracker,
188 GFP_ATOMIC);
189 handled = true;
190 }
191 if (handled)
192 list_move(&rt->dst.rt_uncached,
193 &ul->quarantine);
194 }
195 spin_unlock_bh(&ul->lock);
196 }
197}
198
199static inline const void *choose_neigh_daddr(const struct in6_addr *p,
200 struct sk_buff *skb,
201 const void *daddr)
202{
203 if (!ipv6_addr_any(p))
204 return (const void *) p;
205 else if (skb)
206 return &ipv6_hdr(skb)->daddr;
207 return daddr;
208}
209
210struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
211 struct net_device *dev,
212 struct sk_buff *skb,
213 const void *daddr)
214{
215 struct neighbour *n;
216
217 daddr = choose_neigh_daddr(gw, skb, daddr);
218 n = __ipv6_neigh_lookup(dev, daddr);
219 if (n)
220 return n;
221
222 n = neigh_create(&nd_tbl, daddr, dev);
223 return IS_ERR(n) ? NULL : n;
224}
225
226static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
227 struct sk_buff *skb,
228 const void *daddr)
229{
230 const struct rt6_info *rt = dst_rt6_info(dst);
231
232 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
233 dst->dev, skb, daddr);
234}
235
236static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
237{
238 const struct rt6_info *rt = dst_rt6_info(dst);
239 struct net_device *dev = dst->dev;
240
241 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
242 if (!daddr)
243 return;
244 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
245 return;
246 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
247 return;
248 __ipv6_confirm_neigh(dev, daddr);
249}
250
251static struct dst_ops ip6_dst_ops_template = {
252 .family = AF_INET6,
253 .gc = ip6_dst_gc,
254 .gc_thresh = 1024,
255 .check = ip6_dst_check,
256 .default_advmss = ip6_default_advmss,
257 .mtu = ip6_mtu,
258 .cow_metrics = dst_cow_metrics_generic,
259 .destroy = ip6_dst_destroy,
260 .ifdown = ip6_dst_ifdown,
261 .negative_advice = ip6_negative_advice,
262 .link_failure = ip6_link_failure,
263 .update_pmtu = ip6_rt_update_pmtu,
264 .redirect = rt6_do_redirect,
265 .local_out = __ip6_local_out,
266 .neigh_lookup = ip6_dst_neigh_lookup,
267 .confirm_neigh = ip6_confirm_neigh,
268};
269
270static struct dst_ops ip6_dst_blackhole_ops = {
271 .family = AF_INET6,
272 .default_advmss = ip6_default_advmss,
273 .neigh_lookup = ip6_dst_neigh_lookup,
274 .check = ip6_dst_check,
275 .destroy = ip6_dst_destroy,
276 .cow_metrics = dst_cow_metrics_generic,
277 .update_pmtu = dst_blackhole_update_pmtu,
278 .redirect = dst_blackhole_redirect,
279 .mtu = dst_blackhole_mtu,
280};
281
282static const u32 ip6_template_metrics[RTAX_MAX] = {
283 [RTAX_HOPLIMIT - 1] = 0,
284};
285
286static const struct fib6_info fib6_null_entry_template = {
287 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
288 .fib6_protocol = RTPROT_KERNEL,
289 .fib6_metric = ~(u32)0,
290 .fib6_ref = REFCOUNT_INIT(1),
291 .fib6_type = RTN_UNREACHABLE,
292 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
293};
294
295static const struct rt6_info ip6_null_entry_template = {
296 .dst = {
297 .__rcuref = RCUREF_INIT(1),
298 .__use = 1,
299 .obsolete = DST_OBSOLETE_FORCE_CHK,
300 .error = -ENETUNREACH,
301 .input = ip6_pkt_discard,
302 .output = ip6_pkt_discard_out,
303 },
304 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
305};
306
307#ifdef CONFIG_IPV6_MULTIPLE_TABLES
308
309static const struct rt6_info ip6_prohibit_entry_template = {
310 .dst = {
311 .__rcuref = RCUREF_INIT(1),
312 .__use = 1,
313 .obsolete = DST_OBSOLETE_FORCE_CHK,
314 .error = -EACCES,
315 .input = ip6_pkt_prohibit,
316 .output = ip6_pkt_prohibit_out,
317 },
318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
319};
320
321static const struct rt6_info ip6_blk_hole_entry_template = {
322 .dst = {
323 .__rcuref = RCUREF_INIT(1),
324 .__use = 1,
325 .obsolete = DST_OBSOLETE_FORCE_CHK,
326 .error = -EINVAL,
327 .input = dst_discard,
328 .output = dst_discard_out,
329 },
330 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
331};
332
333#endif
334
335static void rt6_info_init(struct rt6_info *rt)
336{
337 memset_after(rt, 0, dst);
338}
339
340/* allocate dst with ip6_dst_ops */
341struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
342 int flags)
343{
344 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
345 DST_OBSOLETE_FORCE_CHK, flags);
346
347 if (rt) {
348 rt6_info_init(rt);
349 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
350 }
351
352 return rt;
353}
354EXPORT_SYMBOL(ip6_dst_alloc);
355
356static void ip6_dst_destroy(struct dst_entry *dst)
357{
358 struct rt6_info *rt = dst_rt6_info(dst);
359 struct fib6_info *from;
360 struct inet6_dev *idev;
361
362 ip_dst_metrics_put(dst);
363 rt6_uncached_list_del(rt);
364
365 idev = rt->rt6i_idev;
366 if (idev) {
367 rt->rt6i_idev = NULL;
368 in6_dev_put(idev);
369 }
370
371 from = xchg((__force struct fib6_info **)&rt->from, NULL);
372 fib6_info_release(from);
373}
374
375static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
376{
377 struct rt6_info *rt = dst_rt6_info(dst);
378 struct inet6_dev *idev = rt->rt6i_idev;
379
380 if (idev && idev->dev != blackhole_netdev) {
381 struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
382
383 if (blackhole_idev) {
384 rt->rt6i_idev = blackhole_idev;
385 in6_dev_put(idev);
386 }
387 }
388}
389
390static bool __rt6_check_expired(const struct rt6_info *rt)
391{
392 if (rt->rt6i_flags & RTF_EXPIRES)
393 return time_after(jiffies, rt->dst.expires);
394 else
395 return false;
396}
397
398static bool rt6_check_expired(const struct rt6_info *rt)
399{
400 struct fib6_info *from;
401
402 from = rcu_dereference(rt->from);
403
404 if (rt->rt6i_flags & RTF_EXPIRES) {
405 if (time_after(jiffies, rt->dst.expires))
406 return true;
407 } else if (from) {
408 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
409 fib6_check_expired(from);
410 }
411 return false;
412}
413
414void fib6_select_path(const struct net *net, struct fib6_result *res,
415 struct flowi6 *fl6, int oif, bool have_oif_match,
416 const struct sk_buff *skb, int strict)
417{
418 struct fib6_info *sibling, *next_sibling;
419 struct fib6_info *match = res->f6i;
420
421 if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
422 goto out;
423
424 if (match->nh && have_oif_match && res->nh)
425 return;
426
427 if (skb)
428 IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
429
430 /* We might have already computed the hash for ICMPv6 errors. In such
431 * case it will always be non-zero. Otherwise now is the time to do it.
432 */
433 if (!fl6->mp_hash &&
434 (!match->nh || nexthop_is_multipath(match->nh)))
435 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
436
437 if (unlikely(match->nh)) {
438 nexthop_path_fib6_result(res, fl6->mp_hash);
439 return;
440 }
441
442 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
443 goto out;
444
445 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
446 fib6_siblings) {
447 const struct fib6_nh *nh = sibling->fib6_nh;
448 int nh_upper_bound;
449
450 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
451 if (fl6->mp_hash > nh_upper_bound)
452 continue;
453 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
454 break;
455 match = sibling;
456 break;
457 }
458
459out:
460 res->f6i = match;
461 res->nh = match->fib6_nh;
462}
463
464/*
465 * Route lookup. rcu_read_lock() should be held.
466 */
467
468static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
469 const struct in6_addr *saddr, int oif, int flags)
470{
471 const struct net_device *dev;
472
473 if (nh->fib_nh_flags & RTNH_F_DEAD)
474 return false;
475
476 dev = nh->fib_nh_dev;
477 if (oif) {
478 if (dev->ifindex == oif)
479 return true;
480 } else {
481 if (ipv6_chk_addr(net, saddr, dev,
482 flags & RT6_LOOKUP_F_IFACE))
483 return true;
484 }
485
486 return false;
487}
488
489struct fib6_nh_dm_arg {
490 struct net *net;
491 const struct in6_addr *saddr;
492 int oif;
493 int flags;
494 struct fib6_nh *nh;
495};
496
497static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
498{
499 struct fib6_nh_dm_arg *arg = _arg;
500
501 arg->nh = nh;
502 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
503 arg->flags);
504}
505
506/* returns fib6_nh from nexthop or NULL */
507static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
508 struct fib6_result *res,
509 const struct in6_addr *saddr,
510 int oif, int flags)
511{
512 struct fib6_nh_dm_arg arg = {
513 .net = net,
514 .saddr = saddr,
515 .oif = oif,
516 .flags = flags,
517 };
518
519 if (nexthop_is_blackhole(nh))
520 return NULL;
521
522 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
523 return arg.nh;
524
525 return NULL;
526}
527
528static void rt6_device_match(struct net *net, struct fib6_result *res,
529 const struct in6_addr *saddr, int oif, int flags)
530{
531 struct fib6_info *f6i = res->f6i;
532 struct fib6_info *spf6i;
533 struct fib6_nh *nh;
534
535 if (!oif && ipv6_addr_any(saddr)) {
536 if (unlikely(f6i->nh)) {
537 nh = nexthop_fib6_nh(f6i->nh);
538 if (nexthop_is_blackhole(f6i->nh))
539 goto out_blackhole;
540 } else {
541 nh = f6i->fib6_nh;
542 }
543 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
544 goto out;
545 }
546
547 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
548 bool matched = false;
549
550 if (unlikely(spf6i->nh)) {
551 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
552 oif, flags);
553 if (nh)
554 matched = true;
555 } else {
556 nh = spf6i->fib6_nh;
557 if (__rt6_device_match(net, nh, saddr, oif, flags))
558 matched = true;
559 }
560 if (matched) {
561 res->f6i = spf6i;
562 goto out;
563 }
564 }
565
566 if (oif && flags & RT6_LOOKUP_F_IFACE) {
567 res->f6i = net->ipv6.fib6_null_entry;
568 nh = res->f6i->fib6_nh;
569 goto out;
570 }
571
572 if (unlikely(f6i->nh)) {
573 nh = nexthop_fib6_nh(f6i->nh);
574 if (nexthop_is_blackhole(f6i->nh))
575 goto out_blackhole;
576 } else {
577 nh = f6i->fib6_nh;
578 }
579
580 if (nh->fib_nh_flags & RTNH_F_DEAD) {
581 res->f6i = net->ipv6.fib6_null_entry;
582 nh = res->f6i->fib6_nh;
583 }
584out:
585 res->nh = nh;
586 res->fib6_type = res->f6i->fib6_type;
587 res->fib6_flags = res->f6i->fib6_flags;
588 return;
589
590out_blackhole:
591 res->fib6_flags |= RTF_REJECT;
592 res->fib6_type = RTN_BLACKHOLE;
593 res->nh = nh;
594}
595
596#ifdef CONFIG_IPV6_ROUTER_PREF
597struct __rt6_probe_work {
598 struct work_struct work;
599 struct in6_addr target;
600 struct net_device *dev;
601 netdevice_tracker dev_tracker;
602};
603
604static void rt6_probe_deferred(struct work_struct *w)
605{
606 struct in6_addr mcaddr;
607 struct __rt6_probe_work *work =
608 container_of(w, struct __rt6_probe_work, work);
609
610 addrconf_addr_solict_mult(&work->target, &mcaddr);
611 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
612 netdev_put(work->dev, &work->dev_tracker);
613 kfree(work);
614}
615
616static void rt6_probe(struct fib6_nh *fib6_nh)
617{
618 struct __rt6_probe_work *work = NULL;
619 const struct in6_addr *nh_gw;
620 unsigned long last_probe;
621 struct neighbour *neigh;
622 struct net_device *dev;
623 struct inet6_dev *idev;
624
625 /*
626 * Okay, this does not seem to be appropriate
627 * for now, however, we need to check if it
628 * is really so; aka Router Reachability Probing.
629 *
630 * Router Reachability Probe MUST be rate-limited
631 * to no more than one per minute.
632 */
633 if (!fib6_nh->fib_nh_gw_family)
634 return;
635
636 nh_gw = &fib6_nh->fib_nh_gw6;
637 dev = fib6_nh->fib_nh_dev;
638 rcu_read_lock();
639 last_probe = READ_ONCE(fib6_nh->last_probe);
640 idev = __in6_dev_get(dev);
641 if (!idev)
642 goto out;
643 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
644 if (neigh) {
645 if (READ_ONCE(neigh->nud_state) & NUD_VALID)
646 goto out;
647
648 write_lock_bh(&neigh->lock);
649 if (!(neigh->nud_state & NUD_VALID) &&
650 time_after(jiffies,
651 neigh->updated +
652 READ_ONCE(idev->cnf.rtr_probe_interval))) {
653 work = kmalloc(sizeof(*work), GFP_ATOMIC);
654 if (work)
655 __neigh_set_probe_once(neigh);
656 }
657 write_unlock_bh(&neigh->lock);
658 } else if (time_after(jiffies, last_probe +
659 READ_ONCE(idev->cnf.rtr_probe_interval))) {
660 work = kmalloc(sizeof(*work), GFP_ATOMIC);
661 }
662
663 if (!work || cmpxchg(&fib6_nh->last_probe,
664 last_probe, jiffies) != last_probe) {
665 kfree(work);
666 } else {
667 INIT_WORK(&work->work, rt6_probe_deferred);
668 work->target = *nh_gw;
669 netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
670 work->dev = dev;
671 schedule_work(&work->work);
672 }
673
674out:
675 rcu_read_unlock();
676}
677#else
678static inline void rt6_probe(struct fib6_nh *fib6_nh)
679{
680}
681#endif
682
683/*
684 * Default Router Selection (RFC 2461 6.3.6)
685 */
686static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
687{
688 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
689 struct neighbour *neigh;
690
691 rcu_read_lock();
692 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
693 &fib6_nh->fib_nh_gw6);
694 if (neigh) {
695 u8 nud_state = READ_ONCE(neigh->nud_state);
696
697 if (nud_state & NUD_VALID)
698 ret = RT6_NUD_SUCCEED;
699#ifdef CONFIG_IPV6_ROUTER_PREF
700 else if (!(nud_state & NUD_FAILED))
701 ret = RT6_NUD_SUCCEED;
702 else
703 ret = RT6_NUD_FAIL_PROBE;
704#endif
705 } else {
706 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
707 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
708 }
709 rcu_read_unlock();
710
711 return ret;
712}
713
714static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
715 int strict)
716{
717 int m = 0;
718
719 if (!oif || nh->fib_nh_dev->ifindex == oif)
720 m = 2;
721
722 if (!m && (strict & RT6_LOOKUP_F_IFACE))
723 return RT6_NUD_FAIL_HARD;
724#ifdef CONFIG_IPV6_ROUTER_PREF
725 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
726#endif
727 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
728 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
729 int n = rt6_check_neigh(nh);
730 if (n < 0)
731 return n;
732 }
733 return m;
734}
735
736static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
737 int oif, int strict, int *mpri, bool *do_rr)
738{
739 bool match_do_rr = false;
740 bool rc = false;
741 int m;
742
743 if (nh->fib_nh_flags & RTNH_F_DEAD)
744 goto out;
745
746 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
747 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
748 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
749 goto out;
750
751 m = rt6_score_route(nh, fib6_flags, oif, strict);
752 if (m == RT6_NUD_FAIL_DO_RR) {
753 match_do_rr = true;
754 m = 0; /* lowest valid score */
755 } else if (m == RT6_NUD_FAIL_HARD) {
756 goto out;
757 }
758
759 if (strict & RT6_LOOKUP_F_REACHABLE)
760 rt6_probe(nh);
761
762 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
763 if (m > *mpri) {
764 *do_rr = match_do_rr;
765 *mpri = m;
766 rc = true;
767 }
768out:
769 return rc;
770}
771
772struct fib6_nh_frl_arg {
773 u32 flags;
774 int oif;
775 int strict;
776 int *mpri;
777 bool *do_rr;
778 struct fib6_nh *nh;
779};
780
781static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
782{
783 struct fib6_nh_frl_arg *arg = _arg;
784
785 arg->nh = nh;
786 return find_match(nh, arg->flags, arg->oif, arg->strict,
787 arg->mpri, arg->do_rr);
788}
789
790static void __find_rr_leaf(struct fib6_info *f6i_start,
791 struct fib6_info *nomatch, u32 metric,
792 struct fib6_result *res, struct fib6_info **cont,
793 int oif, int strict, bool *do_rr, int *mpri)
794{
795 struct fib6_info *f6i;
796
797 for (f6i = f6i_start;
798 f6i && f6i != nomatch;
799 f6i = rcu_dereference(f6i->fib6_next)) {
800 bool matched = false;
801 struct fib6_nh *nh;
802
803 if (cont && f6i->fib6_metric != metric) {
804 *cont = f6i;
805 return;
806 }
807
808 if (fib6_check_expired(f6i))
809 continue;
810
811 if (unlikely(f6i->nh)) {
812 struct fib6_nh_frl_arg arg = {
813 .flags = f6i->fib6_flags,
814 .oif = oif,
815 .strict = strict,
816 .mpri = mpri,
817 .do_rr = do_rr
818 };
819
820 if (nexthop_is_blackhole(f6i->nh)) {
821 res->fib6_flags = RTF_REJECT;
822 res->fib6_type = RTN_BLACKHOLE;
823 res->f6i = f6i;
824 res->nh = nexthop_fib6_nh(f6i->nh);
825 return;
826 }
827 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
828 &arg)) {
829 matched = true;
830 nh = arg.nh;
831 }
832 } else {
833 nh = f6i->fib6_nh;
834 if (find_match(nh, f6i->fib6_flags, oif, strict,
835 mpri, do_rr))
836 matched = true;
837 }
838 if (matched) {
839 res->f6i = f6i;
840 res->nh = nh;
841 res->fib6_flags = f6i->fib6_flags;
842 res->fib6_type = f6i->fib6_type;
843 }
844 }
845}
846
847static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
848 struct fib6_info *rr_head, int oif, int strict,
849 bool *do_rr, struct fib6_result *res)
850{
851 u32 metric = rr_head->fib6_metric;
852 struct fib6_info *cont = NULL;
853 int mpri = -1;
854
855 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
856 oif, strict, do_rr, &mpri);
857
858 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
859 oif, strict, do_rr, &mpri);
860
861 if (res->f6i || !cont)
862 return;
863
864 __find_rr_leaf(cont, NULL, metric, res, NULL,
865 oif, strict, do_rr, &mpri);
866}
867
868static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
869 struct fib6_result *res, int strict)
870{
871 struct fib6_info *leaf = rcu_dereference(fn->leaf);
872 struct fib6_info *rt0;
873 bool do_rr = false;
874 int key_plen;
875
876 /* make sure this function or its helpers sets f6i */
877 res->f6i = NULL;
878
879 if (!leaf || leaf == net->ipv6.fib6_null_entry)
880 goto out;
881
882 rt0 = rcu_dereference(fn->rr_ptr);
883 if (!rt0)
884 rt0 = leaf;
885
886 /* Double check to make sure fn is not an intermediate node
887 * and fn->leaf does not points to its child's leaf
888 * (This might happen if all routes under fn are deleted from
889 * the tree and fib6_repair_tree() is called on the node.)
890 */
891 key_plen = rt0->fib6_dst.plen;
892#ifdef CONFIG_IPV6_SUBTREES
893 if (rt0->fib6_src.plen)
894 key_plen = rt0->fib6_src.plen;
895#endif
896 if (fn->fn_bit != key_plen)
897 goto out;
898
899 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
900 if (do_rr) {
901 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
902
903 /* no entries matched; do round-robin */
904 if (!next || next->fib6_metric != rt0->fib6_metric)
905 next = leaf;
906
907 if (next != rt0) {
908 spin_lock_bh(&leaf->fib6_table->tb6_lock);
909 /* make sure next is not being deleted from the tree */
910 if (next->fib6_node)
911 rcu_assign_pointer(fn->rr_ptr, next);
912 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
913 }
914 }
915
916out:
917 if (!res->f6i) {
918 res->f6i = net->ipv6.fib6_null_entry;
919 res->nh = res->f6i->fib6_nh;
920 res->fib6_flags = res->f6i->fib6_flags;
921 res->fib6_type = res->f6i->fib6_type;
922 }
923}
924
925static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
926{
927 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
928 res->nh->fib_nh_gw_family;
929}
930
931#ifdef CONFIG_IPV6_ROUTE_INFO
932int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
933 const struct in6_addr *gwaddr)
934{
935 struct net *net = dev_net(dev);
936 struct route_info *rinfo = (struct route_info *) opt;
937 struct in6_addr prefix_buf, *prefix;
938 struct fib6_table *table;
939 unsigned int pref;
940 unsigned long lifetime;
941 struct fib6_info *rt;
942
943 if (len < sizeof(struct route_info)) {
944 return -EINVAL;
945 }
946
947 /* Sanity check for prefix_len and length */
948 if (rinfo->length > 3) {
949 return -EINVAL;
950 } else if (rinfo->prefix_len > 128) {
951 return -EINVAL;
952 } else if (rinfo->prefix_len > 64) {
953 if (rinfo->length < 2) {
954 return -EINVAL;
955 }
956 } else if (rinfo->prefix_len > 0) {
957 if (rinfo->length < 1) {
958 return -EINVAL;
959 }
960 }
961
962 pref = rinfo->route_pref;
963 if (pref == ICMPV6_ROUTER_PREF_INVALID)
964 return -EINVAL;
965
966 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
967
968 if (rinfo->length == 3)
969 prefix = (struct in6_addr *)rinfo->prefix;
970 else {
971 /* this function is safe */
972 ipv6_addr_prefix(&prefix_buf,
973 (struct in6_addr *)rinfo->prefix,
974 rinfo->prefix_len);
975 prefix = &prefix_buf;
976 }
977
978 if (rinfo->prefix_len == 0)
979 rt = rt6_get_dflt_router(net, gwaddr, dev);
980 else
981 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
982 gwaddr, dev);
983
984 if (rt && !lifetime) {
985 ip6_del_rt(net, rt, false);
986 rt = NULL;
987 }
988
989 if (!rt && lifetime)
990 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
991 dev, pref);
992 else if (rt)
993 rt->fib6_flags = RTF_ROUTEINFO |
994 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
995
996 if (rt) {
997 table = rt->fib6_table;
998 spin_lock_bh(&table->tb6_lock);
999
1000 if (!addrconf_finite_timeout(lifetime)) {
1001 fib6_clean_expires(rt);
1002 fib6_remove_gc_list(rt);
1003 } else {
1004 fib6_set_expires(rt, jiffies + HZ * lifetime);
1005 fib6_add_gc_list(rt);
1006 }
1007
1008 spin_unlock_bh(&table->tb6_lock);
1009
1010 fib6_info_release(rt);
1011 }
1012 return 0;
1013}
1014#endif
1015
1016/*
1017 * Misc support functions
1018 */
1019
1020/* called with rcu_lock held */
1021static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1022{
1023 struct net_device *dev = res->nh->fib_nh_dev;
1024
1025 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1026 /* for copies of local routes, dst->dev needs to be the
1027 * device if it is a master device, the master device if
1028 * device is enslaved, and the loopback as the default
1029 */
1030 if (netif_is_l3_slave(dev) &&
1031 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1032 dev = l3mdev_master_dev_rcu(dev);
1033 else if (!netif_is_l3_master(dev))
1034 dev = dev_net(dev)->loopback_dev;
1035 /* last case is netif_is_l3_master(dev) is true in which
1036 * case we want dev returned to be dev
1037 */
1038 }
1039
1040 return dev;
1041}
1042
1043static const int fib6_prop[RTN_MAX + 1] = {
1044 [RTN_UNSPEC] = 0,
1045 [RTN_UNICAST] = 0,
1046 [RTN_LOCAL] = 0,
1047 [RTN_BROADCAST] = 0,
1048 [RTN_ANYCAST] = 0,
1049 [RTN_MULTICAST] = 0,
1050 [RTN_BLACKHOLE] = -EINVAL,
1051 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1052 [RTN_PROHIBIT] = -EACCES,
1053 [RTN_THROW] = -EAGAIN,
1054 [RTN_NAT] = -EINVAL,
1055 [RTN_XRESOLVE] = -EINVAL,
1056};
1057
1058static int ip6_rt_type_to_error(u8 fib6_type)
1059{
1060 return fib6_prop[fib6_type];
1061}
1062
1063static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1064{
1065 unsigned short flags = 0;
1066
1067 if (rt->dst_nocount)
1068 flags |= DST_NOCOUNT;
1069 if (rt->dst_nopolicy)
1070 flags |= DST_NOPOLICY;
1071
1072 return flags;
1073}
1074
1075static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1076{
1077 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1078
1079 switch (fib6_type) {
1080 case RTN_BLACKHOLE:
1081 rt->dst.output = dst_discard_out;
1082 rt->dst.input = dst_discard;
1083 break;
1084 case RTN_PROHIBIT:
1085 rt->dst.output = ip6_pkt_prohibit_out;
1086 rt->dst.input = ip6_pkt_prohibit;
1087 break;
1088 case RTN_THROW:
1089 case RTN_UNREACHABLE:
1090 default:
1091 rt->dst.output = ip6_pkt_discard_out;
1092 rt->dst.input = ip6_pkt_discard;
1093 break;
1094 }
1095}
1096
1097static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1098{
1099 struct fib6_info *f6i = res->f6i;
1100
1101 if (res->fib6_flags & RTF_REJECT) {
1102 ip6_rt_init_dst_reject(rt, res->fib6_type);
1103 return;
1104 }
1105
1106 rt->dst.error = 0;
1107 rt->dst.output = ip6_output;
1108
1109 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1110 rt->dst.input = ip6_input;
1111 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1112 rt->dst.input = ip6_mc_input;
1113 } else {
1114 rt->dst.input = ip6_forward;
1115 }
1116
1117 if (res->nh->fib_nh_lws) {
1118 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1119 lwtunnel_set_redirect(&rt->dst);
1120 }
1121
1122 rt->dst.lastuse = jiffies;
1123}
1124
1125/* Caller must already hold reference to @from */
1126static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1127{
1128 rt->rt6i_flags &= ~RTF_EXPIRES;
1129 rcu_assign_pointer(rt->from, from);
1130 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1131}
1132
1133/* Caller must already hold reference to f6i in result */
1134static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1135{
1136 const struct fib6_nh *nh = res->nh;
1137 const struct net_device *dev = nh->fib_nh_dev;
1138 struct fib6_info *f6i = res->f6i;
1139
1140 ip6_rt_init_dst(rt, res);
1141
1142 rt->rt6i_dst = f6i->fib6_dst;
1143 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1144 rt->rt6i_flags = res->fib6_flags;
1145 if (nh->fib_nh_gw_family) {
1146 rt->rt6i_gateway = nh->fib_nh_gw6;
1147 rt->rt6i_flags |= RTF_GATEWAY;
1148 }
1149 rt6_set_from(rt, f6i);
1150#ifdef CONFIG_IPV6_SUBTREES
1151 rt->rt6i_src = f6i->fib6_src;
1152#endif
1153}
1154
1155static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1156 struct in6_addr *saddr)
1157{
1158 struct fib6_node *pn, *sn;
1159 while (1) {
1160 if (fn->fn_flags & RTN_TL_ROOT)
1161 return NULL;
1162 pn = rcu_dereference(fn->parent);
1163 sn = FIB6_SUBTREE(pn);
1164 if (sn && sn != fn)
1165 fn = fib6_node_lookup(sn, NULL, saddr);
1166 else
1167 fn = pn;
1168 if (fn->fn_flags & RTN_RTINFO)
1169 return fn;
1170 }
1171}
1172
1173static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1174{
1175 struct rt6_info *rt = *prt;
1176
1177 if (dst_hold_safe(&rt->dst))
1178 return true;
1179 if (net) {
1180 rt = net->ipv6.ip6_null_entry;
1181 dst_hold(&rt->dst);
1182 } else {
1183 rt = NULL;
1184 }
1185 *prt = rt;
1186 return false;
1187}
1188
1189/* called with rcu_lock held */
1190static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1191{
1192 struct net_device *dev = res->nh->fib_nh_dev;
1193 struct fib6_info *f6i = res->f6i;
1194 unsigned short flags;
1195 struct rt6_info *nrt;
1196
1197 if (!fib6_info_hold_safe(f6i))
1198 goto fallback;
1199
1200 flags = fib6_info_dst_flags(f6i);
1201 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1202 if (!nrt) {
1203 fib6_info_release(f6i);
1204 goto fallback;
1205 }
1206
1207 ip6_rt_copy_init(nrt, res);
1208 return nrt;
1209
1210fallback:
1211 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1212 dst_hold(&nrt->dst);
1213 return nrt;
1214}
1215
1216INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1217 struct fib6_table *table,
1218 struct flowi6 *fl6,
1219 const struct sk_buff *skb,
1220 int flags)
1221{
1222 struct fib6_result res = {};
1223 struct fib6_node *fn;
1224 struct rt6_info *rt;
1225
1226 rcu_read_lock();
1227 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1228restart:
1229 res.f6i = rcu_dereference(fn->leaf);
1230 if (!res.f6i)
1231 res.f6i = net->ipv6.fib6_null_entry;
1232 else
1233 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1234 flags);
1235
1236 if (res.f6i == net->ipv6.fib6_null_entry) {
1237 fn = fib6_backtrack(fn, &fl6->saddr);
1238 if (fn)
1239 goto restart;
1240
1241 rt = net->ipv6.ip6_null_entry;
1242 dst_hold(&rt->dst);
1243 goto out;
1244 } else if (res.fib6_flags & RTF_REJECT) {
1245 goto do_create;
1246 }
1247
1248 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1249 fl6->flowi6_oif != 0, skb, flags);
1250
1251 /* Search through exception table */
1252 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1253 if (rt) {
1254 if (ip6_hold_safe(net, &rt))
1255 dst_use_noref(&rt->dst, jiffies);
1256 } else {
1257do_create:
1258 rt = ip6_create_rt_rcu(&res);
1259 }
1260
1261out:
1262 trace_fib6_table_lookup(net, &res, table, fl6);
1263
1264 rcu_read_unlock();
1265
1266 return rt;
1267}
1268
1269struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1270 const struct sk_buff *skb, int flags)
1271{
1272 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1273}
1274EXPORT_SYMBOL_GPL(ip6_route_lookup);
1275
1276struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1277 const struct in6_addr *saddr, int oif,
1278 const struct sk_buff *skb, int strict)
1279{
1280 struct flowi6 fl6 = {
1281 .flowi6_oif = oif,
1282 .daddr = *daddr,
1283 };
1284 struct dst_entry *dst;
1285 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1286
1287 if (saddr) {
1288 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1289 flags |= RT6_LOOKUP_F_HAS_SADDR;
1290 }
1291
1292 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1293 if (dst->error == 0)
1294 return dst_rt6_info(dst);
1295
1296 dst_release(dst);
1297
1298 return NULL;
1299}
1300EXPORT_SYMBOL(rt6_lookup);
1301
1302/* ip6_ins_rt is called with FREE table->tb6_lock.
1303 * It takes new route entry, the addition fails by any reason the
1304 * route is released.
1305 * Caller must hold dst before calling it.
1306 */
1307
1308static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1309 struct netlink_ext_ack *extack)
1310{
1311 int err;
1312 struct fib6_table *table;
1313
1314 table = rt->fib6_table;
1315 spin_lock_bh(&table->tb6_lock);
1316 err = fib6_add(&table->tb6_root, rt, info, extack);
1317 spin_unlock_bh(&table->tb6_lock);
1318
1319 return err;
1320}
1321
1322int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1323{
1324 struct nl_info info = { .nl_net = net, };
1325
1326 return __ip6_ins_rt(rt, &info, NULL);
1327}
1328
1329static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1330 const struct in6_addr *daddr,
1331 const struct in6_addr *saddr)
1332{
1333 struct fib6_info *f6i = res->f6i;
1334 struct net_device *dev;
1335 struct rt6_info *rt;
1336
1337 /*
1338 * Clone the route.
1339 */
1340
1341 if (!fib6_info_hold_safe(f6i))
1342 return NULL;
1343
1344 dev = ip6_rt_get_dev_rcu(res);
1345 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1346 if (!rt) {
1347 fib6_info_release(f6i);
1348 return NULL;
1349 }
1350
1351 ip6_rt_copy_init(rt, res);
1352 rt->rt6i_flags |= RTF_CACHE;
1353 rt->rt6i_dst.addr = *daddr;
1354 rt->rt6i_dst.plen = 128;
1355
1356 if (!rt6_is_gw_or_nonexthop(res)) {
1357 if (f6i->fib6_dst.plen != 128 &&
1358 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1359 rt->rt6i_flags |= RTF_ANYCAST;
1360#ifdef CONFIG_IPV6_SUBTREES
1361 if (rt->rt6i_src.plen && saddr) {
1362 rt->rt6i_src.addr = *saddr;
1363 rt->rt6i_src.plen = 128;
1364 }
1365#endif
1366 }
1367
1368 return rt;
1369}
1370
1371static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1372{
1373 struct fib6_info *f6i = res->f6i;
1374 unsigned short flags = fib6_info_dst_flags(f6i);
1375 struct net_device *dev;
1376 struct rt6_info *pcpu_rt;
1377
1378 if (!fib6_info_hold_safe(f6i))
1379 return NULL;
1380
1381 rcu_read_lock();
1382 dev = ip6_rt_get_dev_rcu(res);
1383 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1384 rcu_read_unlock();
1385 if (!pcpu_rt) {
1386 fib6_info_release(f6i);
1387 return NULL;
1388 }
1389 ip6_rt_copy_init(pcpu_rt, res);
1390 pcpu_rt->rt6i_flags |= RTF_PCPU;
1391
1392 if (f6i->nh)
1393 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1394
1395 return pcpu_rt;
1396}
1397
1398static bool rt6_is_valid(const struct rt6_info *rt6)
1399{
1400 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1401}
1402
1403/* It should be called with rcu_read_lock() acquired */
1404static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1405{
1406 struct rt6_info *pcpu_rt;
1407
1408 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1409
1410 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1411 struct rt6_info *prev, **p;
1412
1413 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1414 /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
1415 prev = xchg(p, NULL);
1416 if (prev) {
1417 dst_dev_put(&prev->dst);
1418 dst_release(&prev->dst);
1419 }
1420
1421 pcpu_rt = NULL;
1422 }
1423
1424 return pcpu_rt;
1425}
1426
1427static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1428 const struct fib6_result *res)
1429{
1430 struct rt6_info *pcpu_rt, *prev, **p;
1431
1432 pcpu_rt = ip6_rt_pcpu_alloc(res);
1433 if (!pcpu_rt)
1434 return NULL;
1435
1436 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1437 prev = cmpxchg(p, NULL, pcpu_rt);
1438 BUG_ON(prev);
1439
1440 if (res->f6i->fib6_destroying) {
1441 struct fib6_info *from;
1442
1443 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1444 fib6_info_release(from);
1445 }
1446
1447 return pcpu_rt;
1448}
1449
1450/* exception hash table implementation
1451 */
1452static DEFINE_SPINLOCK(rt6_exception_lock);
1453
1454/* Remove rt6_ex from hash table and free the memory
1455 * Caller must hold rt6_exception_lock
1456 */
1457static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1458 struct rt6_exception *rt6_ex)
1459{
1460 struct fib6_info *from;
1461 struct net *net;
1462
1463 if (!bucket || !rt6_ex)
1464 return;
1465
1466 net = dev_net(rt6_ex->rt6i->dst.dev);
1467 net->ipv6.rt6_stats->fib_rt_cache--;
1468
1469 /* purge completely the exception to allow releasing the held resources:
1470 * some [sk] cache may keep the dst around for unlimited time
1471 */
1472 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1473 fib6_info_release(from);
1474 dst_dev_put(&rt6_ex->rt6i->dst);
1475
1476 hlist_del_rcu(&rt6_ex->hlist);
1477 dst_release(&rt6_ex->rt6i->dst);
1478 kfree_rcu(rt6_ex, rcu);
1479 WARN_ON_ONCE(!bucket->depth);
1480 bucket->depth--;
1481}
1482
1483/* Remove oldest rt6_ex in bucket and free the memory
1484 * Caller must hold rt6_exception_lock
1485 */
1486static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1487{
1488 struct rt6_exception *rt6_ex, *oldest = NULL;
1489
1490 if (!bucket)
1491 return;
1492
1493 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1494 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1495 oldest = rt6_ex;
1496 }
1497 rt6_remove_exception(bucket, oldest);
1498}
1499
1500static u32 rt6_exception_hash(const struct in6_addr *dst,
1501 const struct in6_addr *src)
1502{
1503 static siphash_aligned_key_t rt6_exception_key;
1504 struct {
1505 struct in6_addr dst;
1506 struct in6_addr src;
1507 } __aligned(SIPHASH_ALIGNMENT) combined = {
1508 .dst = *dst,
1509 };
1510 u64 val;
1511
1512 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1513
1514#ifdef CONFIG_IPV6_SUBTREES
1515 if (src)
1516 combined.src = *src;
1517#endif
1518 val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1519
1520 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1521}
1522
1523/* Helper function to find the cached rt in the hash table
1524 * and update bucket pointer to point to the bucket for this
1525 * (daddr, saddr) pair
1526 * Caller must hold rt6_exception_lock
1527 */
1528static struct rt6_exception *
1529__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1530 const struct in6_addr *daddr,
1531 const struct in6_addr *saddr)
1532{
1533 struct rt6_exception *rt6_ex;
1534 u32 hval;
1535
1536 if (!(*bucket) || !daddr)
1537 return NULL;
1538
1539 hval = rt6_exception_hash(daddr, saddr);
1540 *bucket += hval;
1541
1542 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1543 struct rt6_info *rt6 = rt6_ex->rt6i;
1544 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1545
1546#ifdef CONFIG_IPV6_SUBTREES
1547 if (matched && saddr)
1548 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1549#endif
1550 if (matched)
1551 return rt6_ex;
1552 }
1553 return NULL;
1554}
1555
1556/* Helper function to find the cached rt in the hash table
1557 * and update bucket pointer to point to the bucket for this
1558 * (daddr, saddr) pair
1559 * Caller must hold rcu_read_lock()
1560 */
1561static struct rt6_exception *
1562__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1563 const struct in6_addr *daddr,
1564 const struct in6_addr *saddr)
1565{
1566 struct rt6_exception *rt6_ex;
1567 u32 hval;
1568
1569 WARN_ON_ONCE(!rcu_read_lock_held());
1570
1571 if (!(*bucket) || !daddr)
1572 return NULL;
1573
1574 hval = rt6_exception_hash(daddr, saddr);
1575 *bucket += hval;
1576
1577 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1578 struct rt6_info *rt6 = rt6_ex->rt6i;
1579 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1580
1581#ifdef CONFIG_IPV6_SUBTREES
1582 if (matched && saddr)
1583 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1584#endif
1585 if (matched)
1586 return rt6_ex;
1587 }
1588 return NULL;
1589}
1590
1591static unsigned int fib6_mtu(const struct fib6_result *res)
1592{
1593 const struct fib6_nh *nh = res->nh;
1594 unsigned int mtu;
1595
1596 if (res->f6i->fib6_pmtu) {
1597 mtu = res->f6i->fib6_pmtu;
1598 } else {
1599 struct net_device *dev = nh->fib_nh_dev;
1600 struct inet6_dev *idev;
1601
1602 rcu_read_lock();
1603 idev = __in6_dev_get(dev);
1604 mtu = READ_ONCE(idev->cnf.mtu6);
1605 rcu_read_unlock();
1606 }
1607
1608 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1609
1610 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1611}
1612
1613#define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1614
1615/* used when the flushed bit is not relevant, only access to the bucket
1616 * (ie., all bucket users except rt6_insert_exception);
1617 *
1618 * called under rcu lock; sometimes called with rt6_exception_lock held
1619 */
1620static
1621struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1622 spinlock_t *lock)
1623{
1624 struct rt6_exception_bucket *bucket;
1625
1626 if (lock)
1627 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1628 lockdep_is_held(lock));
1629 else
1630 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1631
1632 /* remove bucket flushed bit if set */
1633 if (bucket) {
1634 unsigned long p = (unsigned long)bucket;
1635
1636 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1637 bucket = (struct rt6_exception_bucket *)p;
1638 }
1639
1640 return bucket;
1641}
1642
1643static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1644{
1645 unsigned long p = (unsigned long)bucket;
1646
1647 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1648}
1649
1650/* called with rt6_exception_lock held */
1651static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1652 spinlock_t *lock)
1653{
1654 struct rt6_exception_bucket *bucket;
1655 unsigned long p;
1656
1657 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1658 lockdep_is_held(lock));
1659
1660 p = (unsigned long)bucket;
1661 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1662 bucket = (struct rt6_exception_bucket *)p;
1663 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1664}
1665
1666static int rt6_insert_exception(struct rt6_info *nrt,
1667 const struct fib6_result *res)
1668{
1669 struct net *net = dev_net(nrt->dst.dev);
1670 struct rt6_exception_bucket *bucket;
1671 struct fib6_info *f6i = res->f6i;
1672 struct in6_addr *src_key = NULL;
1673 struct rt6_exception *rt6_ex;
1674 struct fib6_nh *nh = res->nh;
1675 int max_depth;
1676 int err = 0;
1677
1678 spin_lock_bh(&rt6_exception_lock);
1679
1680 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1681 lockdep_is_held(&rt6_exception_lock));
1682 if (!bucket) {
1683 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1684 GFP_ATOMIC);
1685 if (!bucket) {
1686 err = -ENOMEM;
1687 goto out;
1688 }
1689 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1690 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1691 err = -EINVAL;
1692 goto out;
1693 }
1694
1695#ifdef CONFIG_IPV6_SUBTREES
1696 /* fib6_src.plen != 0 indicates f6i is in subtree
1697 * and exception table is indexed by a hash of
1698 * both fib6_dst and fib6_src.
1699 * Otherwise, the exception table is indexed by
1700 * a hash of only fib6_dst.
1701 */
1702 if (f6i->fib6_src.plen)
1703 src_key = &nrt->rt6i_src.addr;
1704#endif
1705 /* rt6_mtu_change() might lower mtu on f6i.
1706 * Only insert this exception route if its mtu
1707 * is less than f6i's mtu value.
1708 */
1709 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1710 err = -EINVAL;
1711 goto out;
1712 }
1713
1714 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1715 src_key);
1716 if (rt6_ex)
1717 rt6_remove_exception(bucket, rt6_ex);
1718
1719 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1720 if (!rt6_ex) {
1721 err = -ENOMEM;
1722 goto out;
1723 }
1724 rt6_ex->rt6i = nrt;
1725 rt6_ex->stamp = jiffies;
1726 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1727 bucket->depth++;
1728 net->ipv6.rt6_stats->fib_rt_cache++;
1729
1730 /* Randomize max depth to avoid some side channels attacks. */
1731 max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
1732 while (bucket->depth > max_depth)
1733 rt6_exception_remove_oldest(bucket);
1734
1735out:
1736 spin_unlock_bh(&rt6_exception_lock);
1737
1738 /* Update fn->fn_sernum to invalidate all cached dst */
1739 if (!err) {
1740 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1741 fib6_update_sernum(net, f6i);
1742 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1743 fib6_force_start_gc(net);
1744 }
1745
1746 return err;
1747}
1748
1749static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1750{
1751 struct rt6_exception_bucket *bucket;
1752 struct rt6_exception *rt6_ex;
1753 struct hlist_node *tmp;
1754 int i;
1755
1756 spin_lock_bh(&rt6_exception_lock);
1757
1758 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1759 if (!bucket)
1760 goto out;
1761
1762 /* Prevent rt6_insert_exception() to recreate the bucket list */
1763 if (!from)
1764 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1765
1766 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1767 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1768 if (!from ||
1769 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1770 rt6_remove_exception(bucket, rt6_ex);
1771 }
1772 WARN_ON_ONCE(!from && bucket->depth);
1773 bucket++;
1774 }
1775out:
1776 spin_unlock_bh(&rt6_exception_lock);
1777}
1778
1779static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1780{
1781 struct fib6_info *f6i = arg;
1782
1783 fib6_nh_flush_exceptions(nh, f6i);
1784
1785 return 0;
1786}
1787
1788void rt6_flush_exceptions(struct fib6_info *f6i)
1789{
1790 if (f6i->nh)
1791 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1792 f6i);
1793 else
1794 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1795}
1796
1797/* Find cached rt in the hash table inside passed in rt
1798 * Caller has to hold rcu_read_lock()
1799 */
1800static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1801 const struct in6_addr *daddr,
1802 const struct in6_addr *saddr)
1803{
1804 const struct in6_addr *src_key = NULL;
1805 struct rt6_exception_bucket *bucket;
1806 struct rt6_exception *rt6_ex;
1807 struct rt6_info *ret = NULL;
1808
1809#ifdef CONFIG_IPV6_SUBTREES
1810 /* fib6i_src.plen != 0 indicates f6i is in subtree
1811 * and exception table is indexed by a hash of
1812 * both fib6_dst and fib6_src.
1813 * However, the src addr used to create the hash
1814 * might not be exactly the passed in saddr which
1815 * is a /128 addr from the flow.
1816 * So we need to use f6i->fib6_src to redo lookup
1817 * if the passed in saddr does not find anything.
1818 * (See the logic in ip6_rt_cache_alloc() on how
1819 * rt->rt6i_src is updated.)
1820 */
1821 if (res->f6i->fib6_src.plen)
1822 src_key = saddr;
1823find_ex:
1824#endif
1825 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1826 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1827
1828 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1829 ret = rt6_ex->rt6i;
1830
1831#ifdef CONFIG_IPV6_SUBTREES
1832 /* Use fib6_src as src_key and redo lookup */
1833 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1834 src_key = &res->f6i->fib6_src.addr;
1835 goto find_ex;
1836 }
1837#endif
1838
1839 return ret;
1840}
1841
1842/* Remove the passed in cached rt from the hash table that contains it */
1843static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1844 const struct rt6_info *rt)
1845{
1846 const struct in6_addr *src_key = NULL;
1847 struct rt6_exception_bucket *bucket;
1848 struct rt6_exception *rt6_ex;
1849 int err;
1850
1851 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1852 return -ENOENT;
1853
1854 spin_lock_bh(&rt6_exception_lock);
1855 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1856
1857#ifdef CONFIG_IPV6_SUBTREES
1858 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1859 * and exception table is indexed by a hash of
1860 * both rt6i_dst and rt6i_src.
1861 * Otherwise, the exception table is indexed by
1862 * a hash of only rt6i_dst.
1863 */
1864 if (plen)
1865 src_key = &rt->rt6i_src.addr;
1866#endif
1867 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1868 &rt->rt6i_dst.addr,
1869 src_key);
1870 if (rt6_ex) {
1871 rt6_remove_exception(bucket, rt6_ex);
1872 err = 0;
1873 } else {
1874 err = -ENOENT;
1875 }
1876
1877 spin_unlock_bh(&rt6_exception_lock);
1878 return err;
1879}
1880
1881struct fib6_nh_excptn_arg {
1882 struct rt6_info *rt;
1883 int plen;
1884};
1885
1886static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1887{
1888 struct fib6_nh_excptn_arg *arg = _arg;
1889 int err;
1890
1891 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1892 if (err == 0)
1893 return 1;
1894
1895 return 0;
1896}
1897
1898static int rt6_remove_exception_rt(struct rt6_info *rt)
1899{
1900 struct fib6_info *from;
1901
1902 from = rcu_dereference(rt->from);
1903 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1904 return -EINVAL;
1905
1906 if (from->nh) {
1907 struct fib6_nh_excptn_arg arg = {
1908 .rt = rt,
1909 .plen = from->fib6_src.plen
1910 };
1911 int rc;
1912
1913 /* rc = 1 means an entry was found */
1914 rc = nexthop_for_each_fib6_nh(from->nh,
1915 rt6_nh_remove_exception_rt,
1916 &arg);
1917 return rc ? 0 : -ENOENT;
1918 }
1919
1920 return fib6_nh_remove_exception(from->fib6_nh,
1921 from->fib6_src.plen, rt);
1922}
1923
1924/* Find rt6_ex which contains the passed in rt cache and
1925 * refresh its stamp
1926 */
1927static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1928 const struct rt6_info *rt)
1929{
1930 const struct in6_addr *src_key = NULL;
1931 struct rt6_exception_bucket *bucket;
1932 struct rt6_exception *rt6_ex;
1933
1934 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1935#ifdef CONFIG_IPV6_SUBTREES
1936 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1937 * and exception table is indexed by a hash of
1938 * both rt6i_dst and rt6i_src.
1939 * Otherwise, the exception table is indexed by
1940 * a hash of only rt6i_dst.
1941 */
1942 if (plen)
1943 src_key = &rt->rt6i_src.addr;
1944#endif
1945 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1946 if (rt6_ex)
1947 rt6_ex->stamp = jiffies;
1948}
1949
1950struct fib6_nh_match_arg {
1951 const struct net_device *dev;
1952 const struct in6_addr *gw;
1953 struct fib6_nh *match;
1954};
1955
1956/* determine if fib6_nh has given device and gateway */
1957static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1958{
1959 struct fib6_nh_match_arg *arg = _arg;
1960
1961 if (arg->dev != nh->fib_nh_dev ||
1962 (arg->gw && !nh->fib_nh_gw_family) ||
1963 (!arg->gw && nh->fib_nh_gw_family) ||
1964 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1965 return 0;
1966
1967 arg->match = nh;
1968
1969 /* found a match, break the loop */
1970 return 1;
1971}
1972
1973static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1974{
1975 struct fib6_info *from;
1976 struct fib6_nh *fib6_nh;
1977
1978 rcu_read_lock();
1979
1980 from = rcu_dereference(rt->from);
1981 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1982 goto unlock;
1983
1984 if (from->nh) {
1985 struct fib6_nh_match_arg arg = {
1986 .dev = rt->dst.dev,
1987 .gw = &rt->rt6i_gateway,
1988 };
1989
1990 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1991
1992 if (!arg.match)
1993 goto unlock;
1994 fib6_nh = arg.match;
1995 } else {
1996 fib6_nh = from->fib6_nh;
1997 }
1998 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1999unlock:
2000 rcu_read_unlock();
2001}
2002
2003static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
2004 struct rt6_info *rt, int mtu)
2005{
2006 /* If the new MTU is lower than the route PMTU, this new MTU will be the
2007 * lowest MTU in the path: always allow updating the route PMTU to
2008 * reflect PMTU decreases.
2009 *
2010 * If the new MTU is higher, and the route PMTU is equal to the local
2011 * MTU, this means the old MTU is the lowest in the path, so allow
2012 * updating it: if other nodes now have lower MTUs, PMTU discovery will
2013 * handle this.
2014 */
2015
2016 if (dst_mtu(&rt->dst) >= mtu)
2017 return true;
2018
2019 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2020 return true;
2021
2022 return false;
2023}
2024
2025static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2026 const struct fib6_nh *nh, int mtu)
2027{
2028 struct rt6_exception_bucket *bucket;
2029 struct rt6_exception *rt6_ex;
2030 int i;
2031
2032 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2033 if (!bucket)
2034 return;
2035
2036 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2037 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2038 struct rt6_info *entry = rt6_ex->rt6i;
2039
2040 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2041 * route), the metrics of its rt->from have already
2042 * been updated.
2043 */
2044 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2045 rt6_mtu_change_route_allowed(idev, entry, mtu))
2046 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2047 }
2048 bucket++;
2049 }
2050}
2051
2052#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2053
2054static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2055 const struct in6_addr *gateway)
2056{
2057 struct rt6_exception_bucket *bucket;
2058 struct rt6_exception *rt6_ex;
2059 struct hlist_node *tmp;
2060 int i;
2061
2062 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2063 return;
2064
2065 spin_lock_bh(&rt6_exception_lock);
2066 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2067 if (bucket) {
2068 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2069 hlist_for_each_entry_safe(rt6_ex, tmp,
2070 &bucket->chain, hlist) {
2071 struct rt6_info *entry = rt6_ex->rt6i;
2072
2073 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2074 RTF_CACHE_GATEWAY &&
2075 ipv6_addr_equal(gateway,
2076 &entry->rt6i_gateway)) {
2077 rt6_remove_exception(bucket, rt6_ex);
2078 }
2079 }
2080 bucket++;
2081 }
2082 }
2083
2084 spin_unlock_bh(&rt6_exception_lock);
2085}
2086
2087static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2088 struct rt6_exception *rt6_ex,
2089 struct fib6_gc_args *gc_args,
2090 unsigned long now)
2091{
2092 struct rt6_info *rt = rt6_ex->rt6i;
2093
2094 /* we are pruning and obsoleting aged-out and non gateway exceptions
2095 * even if others have still references to them, so that on next
2096 * dst_check() such references can be dropped.
2097 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2098 * expired, independently from their aging, as per RFC 8201 section 4
2099 */
2100 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2101 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2102 pr_debug("aging clone %p\n", rt);
2103 rt6_remove_exception(bucket, rt6_ex);
2104 return;
2105 }
2106 } else if (time_after(jiffies, rt->dst.expires)) {
2107 pr_debug("purging expired route %p\n", rt);
2108 rt6_remove_exception(bucket, rt6_ex);
2109 return;
2110 }
2111
2112 if (rt->rt6i_flags & RTF_GATEWAY) {
2113 struct neighbour *neigh;
2114
2115 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2116
2117 if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2118 pr_debug("purging route %p via non-router but gateway\n",
2119 rt);
2120 rt6_remove_exception(bucket, rt6_ex);
2121 return;
2122 }
2123 }
2124
2125 gc_args->more++;
2126}
2127
2128static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2129 struct fib6_gc_args *gc_args,
2130 unsigned long now)
2131{
2132 struct rt6_exception_bucket *bucket;
2133 struct rt6_exception *rt6_ex;
2134 struct hlist_node *tmp;
2135 int i;
2136
2137 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2138 return;
2139
2140 rcu_read_lock_bh();
2141 spin_lock(&rt6_exception_lock);
2142 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2143 if (bucket) {
2144 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2145 hlist_for_each_entry_safe(rt6_ex, tmp,
2146 &bucket->chain, hlist) {
2147 rt6_age_examine_exception(bucket, rt6_ex,
2148 gc_args, now);
2149 }
2150 bucket++;
2151 }
2152 }
2153 spin_unlock(&rt6_exception_lock);
2154 rcu_read_unlock_bh();
2155}
2156
2157struct fib6_nh_age_excptn_arg {
2158 struct fib6_gc_args *gc_args;
2159 unsigned long now;
2160};
2161
2162static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2163{
2164 struct fib6_nh_age_excptn_arg *arg = _arg;
2165
2166 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2167 return 0;
2168}
2169
2170void rt6_age_exceptions(struct fib6_info *f6i,
2171 struct fib6_gc_args *gc_args,
2172 unsigned long now)
2173{
2174 if (f6i->nh) {
2175 struct fib6_nh_age_excptn_arg arg = {
2176 .gc_args = gc_args,
2177 .now = now
2178 };
2179
2180 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2181 &arg);
2182 } else {
2183 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2184 }
2185}
2186
2187/* must be called with rcu lock held */
2188int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2189 struct flowi6 *fl6, struct fib6_result *res, int strict)
2190{
2191 struct fib6_node *fn, *saved_fn;
2192
2193 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2194 saved_fn = fn;
2195
2196redo_rt6_select:
2197 rt6_select(net, fn, oif, res, strict);
2198 if (res->f6i == net->ipv6.fib6_null_entry) {
2199 fn = fib6_backtrack(fn, &fl6->saddr);
2200 if (fn)
2201 goto redo_rt6_select;
2202 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2203 /* also consider unreachable route */
2204 strict &= ~RT6_LOOKUP_F_REACHABLE;
2205 fn = saved_fn;
2206 goto redo_rt6_select;
2207 }
2208 }
2209
2210 trace_fib6_table_lookup(net, res, table, fl6);
2211
2212 return 0;
2213}
2214
2215struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2216 int oif, struct flowi6 *fl6,
2217 const struct sk_buff *skb, int flags)
2218{
2219 struct fib6_result res = {};
2220 struct rt6_info *rt = NULL;
2221 int strict = 0;
2222
2223 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2224 !rcu_read_lock_held());
2225
2226 strict |= flags & RT6_LOOKUP_F_IFACE;
2227 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2228 if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
2229 strict |= RT6_LOOKUP_F_REACHABLE;
2230
2231 rcu_read_lock();
2232
2233 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2234 if (res.f6i == net->ipv6.fib6_null_entry)
2235 goto out;
2236
2237 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2238
2239 /*Search through exception table */
2240 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2241 if (rt) {
2242 goto out;
2243 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2244 !res.nh->fib_nh_gw_family)) {
2245 /* Create a RTF_CACHE clone which will not be
2246 * owned by the fib6 tree. It is for the special case where
2247 * the daddr in the skb during the neighbor look-up is different
2248 * from the fl6->daddr used to look-up route here.
2249 */
2250 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2251
2252 if (rt) {
2253 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2254 * As rt6_uncached_list_add() does not consume refcnt,
2255 * this refcnt is always returned to the caller even
2256 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2257 */
2258 rt6_uncached_list_add(rt);
2259 rcu_read_unlock();
2260
2261 return rt;
2262 }
2263 } else {
2264 /* Get a percpu copy */
2265 local_bh_disable();
2266 rt = rt6_get_pcpu_route(&res);
2267
2268 if (!rt)
2269 rt = rt6_make_pcpu_route(net, &res);
2270
2271 local_bh_enable();
2272 }
2273out:
2274 if (!rt)
2275 rt = net->ipv6.ip6_null_entry;
2276 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2277 ip6_hold_safe(net, &rt);
2278 rcu_read_unlock();
2279
2280 return rt;
2281}
2282EXPORT_SYMBOL_GPL(ip6_pol_route);
2283
2284INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2285 struct fib6_table *table,
2286 struct flowi6 *fl6,
2287 const struct sk_buff *skb,
2288 int flags)
2289{
2290 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2291}
2292
2293struct dst_entry *ip6_route_input_lookup(struct net *net,
2294 struct net_device *dev,
2295 struct flowi6 *fl6,
2296 const struct sk_buff *skb,
2297 int flags)
2298{
2299 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2300 flags |= RT6_LOOKUP_F_IFACE;
2301
2302 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2303}
2304EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2305
2306static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2307 struct flow_keys *keys,
2308 struct flow_keys *flkeys)
2309{
2310 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2311 const struct ipv6hdr *key_iph = outer_iph;
2312 struct flow_keys *_flkeys = flkeys;
2313 const struct ipv6hdr *inner_iph;
2314 const struct icmp6hdr *icmph;
2315 struct ipv6hdr _inner_iph;
2316 struct icmp6hdr _icmph;
2317
2318 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2319 goto out;
2320
2321 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2322 sizeof(_icmph), &_icmph);
2323 if (!icmph)
2324 goto out;
2325
2326 if (!icmpv6_is_err(icmph->icmp6_type))
2327 goto out;
2328
2329 inner_iph = skb_header_pointer(skb,
2330 skb_transport_offset(skb) + sizeof(*icmph),
2331 sizeof(_inner_iph), &_inner_iph);
2332 if (!inner_iph)
2333 goto out;
2334
2335 key_iph = inner_iph;
2336 _flkeys = NULL;
2337out:
2338 if (_flkeys) {
2339 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2340 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2341 keys->tags.flow_label = _flkeys->tags.flow_label;
2342 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2343 } else {
2344 keys->addrs.v6addrs.src = key_iph->saddr;
2345 keys->addrs.v6addrs.dst = key_iph->daddr;
2346 keys->tags.flow_label = ip6_flowlabel(key_iph);
2347 keys->basic.ip_proto = key_iph->nexthdr;
2348 }
2349}
2350
2351static u32 rt6_multipath_custom_hash_outer(const struct net *net,
2352 const struct sk_buff *skb,
2353 bool *p_has_inner)
2354{
2355 u32 hash_fields = ip6_multipath_hash_fields(net);
2356 struct flow_keys keys, hash_keys;
2357
2358 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2359 return 0;
2360
2361 memset(&hash_keys, 0, sizeof(hash_keys));
2362 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
2363
2364 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2365 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2366 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2367 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2368 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2369 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2370 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2371 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2372 hash_keys.tags.flow_label = keys.tags.flow_label;
2373 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2374 hash_keys.ports.src = keys.ports.src;
2375 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2376 hash_keys.ports.dst = keys.ports.dst;
2377
2378 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
2379 return flow_hash_from_keys(&hash_keys);
2380}
2381
2382static u32 rt6_multipath_custom_hash_inner(const struct net *net,
2383 const struct sk_buff *skb,
2384 bool has_inner)
2385{
2386 u32 hash_fields = ip6_multipath_hash_fields(net);
2387 struct flow_keys keys, hash_keys;
2388
2389 /* We assume the packet carries an encapsulation, but if none was
2390 * encountered during dissection of the outer flow, then there is no
2391 * point in calling the flow dissector again.
2392 */
2393 if (!has_inner)
2394 return 0;
2395
2396 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
2397 return 0;
2398
2399 memset(&hash_keys, 0, sizeof(hash_keys));
2400 skb_flow_dissect_flow_keys(skb, &keys, 0);
2401
2402 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
2403 return 0;
2404
2405 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2406 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2407 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2408 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2409 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2410 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2411 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2412 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2413 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2414 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2415 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2416 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2417 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
2418 hash_keys.tags.flow_label = keys.tags.flow_label;
2419 }
2420
2421 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2422 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2423 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2424 hash_keys.ports.src = keys.ports.src;
2425 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2426 hash_keys.ports.dst = keys.ports.dst;
2427
2428 return flow_hash_from_keys(&hash_keys);
2429}
2430
2431static u32 rt6_multipath_custom_hash_skb(const struct net *net,
2432 const struct sk_buff *skb)
2433{
2434 u32 mhash, mhash_inner;
2435 bool has_inner = true;
2436
2437 mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
2438 mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
2439
2440 return jhash_2words(mhash, mhash_inner, 0);
2441}
2442
2443static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
2444 const struct flowi6 *fl6)
2445{
2446 u32 hash_fields = ip6_multipath_hash_fields(net);
2447 struct flow_keys hash_keys;
2448
2449 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2450 return 0;
2451
2452 memset(&hash_keys, 0, sizeof(hash_keys));
2453 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2454 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2455 hash_keys.addrs.v6addrs.src = fl6->saddr;
2456 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2457 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2458 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2459 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2460 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2461 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2462 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2463 hash_keys.ports.src = fl6->fl6_sport;
2464 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2465 hash_keys.ports.dst = fl6->fl6_dport;
2466
2467 return flow_hash_from_keys(&hash_keys);
2468}
2469
2470/* if skb is set it will be used and fl6 can be NULL */
2471u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2472 const struct sk_buff *skb, struct flow_keys *flkeys)
2473{
2474 struct flow_keys hash_keys;
2475 u32 mhash = 0;
2476
2477 switch (ip6_multipath_hash_policy(net)) {
2478 case 0:
2479 memset(&hash_keys, 0, sizeof(hash_keys));
2480 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2481 if (skb) {
2482 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2483 } else {
2484 hash_keys.addrs.v6addrs.src = fl6->saddr;
2485 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2486 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2487 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2488 }
2489 mhash = flow_hash_from_keys(&hash_keys);
2490 break;
2491 case 1:
2492 if (skb) {
2493 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2494 struct flow_keys keys;
2495
2496 /* short-circuit if we already have L4 hash present */
2497 if (skb->l4_hash)
2498 return skb_get_hash_raw(skb) >> 1;
2499
2500 memset(&hash_keys, 0, sizeof(hash_keys));
2501
2502 if (!flkeys) {
2503 skb_flow_dissect_flow_keys(skb, &keys, flag);
2504 flkeys = &keys;
2505 }
2506 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2507 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2508 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2509 hash_keys.ports.src = flkeys->ports.src;
2510 hash_keys.ports.dst = flkeys->ports.dst;
2511 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2512 } else {
2513 memset(&hash_keys, 0, sizeof(hash_keys));
2514 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2515 hash_keys.addrs.v6addrs.src = fl6->saddr;
2516 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2517 hash_keys.ports.src = fl6->fl6_sport;
2518 hash_keys.ports.dst = fl6->fl6_dport;
2519 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2520 }
2521 mhash = flow_hash_from_keys(&hash_keys);
2522 break;
2523 case 2:
2524 memset(&hash_keys, 0, sizeof(hash_keys));
2525 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2526 if (skb) {
2527 struct flow_keys keys;
2528
2529 if (!flkeys) {
2530 skb_flow_dissect_flow_keys(skb, &keys, 0);
2531 flkeys = &keys;
2532 }
2533
2534 /* Inner can be v4 or v6 */
2535 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2536 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2537 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2538 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2539 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2540 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2541 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2542 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2543 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2544 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2545 } else {
2546 /* Same as case 0 */
2547 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2548 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2549 }
2550 } else {
2551 /* Same as case 0 */
2552 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2553 hash_keys.addrs.v6addrs.src = fl6->saddr;
2554 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2555 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2556 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2557 }
2558 mhash = flow_hash_from_keys(&hash_keys);
2559 break;
2560 case 3:
2561 if (skb)
2562 mhash = rt6_multipath_custom_hash_skb(net, skb);
2563 else
2564 mhash = rt6_multipath_custom_hash_fl6(net, fl6);
2565 break;
2566 }
2567
2568 return mhash >> 1;
2569}
2570
2571/* Called with rcu held */
2572void ip6_route_input(struct sk_buff *skb)
2573{
2574 const struct ipv6hdr *iph = ipv6_hdr(skb);
2575 struct net *net = dev_net(skb->dev);
2576 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2577 struct ip_tunnel_info *tun_info;
2578 struct flowi6 fl6 = {
2579 .flowi6_iif = skb->dev->ifindex,
2580 .daddr = iph->daddr,
2581 .saddr = iph->saddr,
2582 .flowlabel = ip6_flowinfo(iph),
2583 .flowi6_mark = skb->mark,
2584 .flowi6_proto = iph->nexthdr,
2585 };
2586 struct flow_keys *flkeys = NULL, _flkeys;
2587
2588 tun_info = skb_tunnel_info(skb);
2589 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2590 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2591
2592 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2593 flkeys = &_flkeys;
2594
2595 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2596 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2597 skb_dst_drop(skb);
2598 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2599 &fl6, skb, flags));
2600}
2601
2602INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2603 struct fib6_table *table,
2604 struct flowi6 *fl6,
2605 const struct sk_buff *skb,
2606 int flags)
2607{
2608 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2609}
2610
2611static struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2612 const struct sock *sk,
2613 struct flowi6 *fl6,
2614 int flags)
2615{
2616 bool any_src;
2617
2618 if (ipv6_addr_type(&fl6->daddr) &
2619 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2620 struct dst_entry *dst;
2621
2622 /* This function does not take refcnt on the dst */
2623 dst = l3mdev_link_scope_lookup(net, fl6);
2624 if (dst)
2625 return dst;
2626 }
2627
2628 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2629
2630 flags |= RT6_LOOKUP_F_DST_NOREF;
2631 any_src = ipv6_addr_any(&fl6->saddr);
2632 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2633 (fl6->flowi6_oif && any_src))
2634 flags |= RT6_LOOKUP_F_IFACE;
2635
2636 if (!any_src)
2637 flags |= RT6_LOOKUP_F_HAS_SADDR;
2638 else if (sk)
2639 flags |= rt6_srcprefs2flags(READ_ONCE(inet6_sk(sk)->srcprefs));
2640
2641 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2642}
2643
2644struct dst_entry *ip6_route_output_flags(struct net *net,
2645 const struct sock *sk,
2646 struct flowi6 *fl6,
2647 int flags)
2648{
2649 struct dst_entry *dst;
2650 struct rt6_info *rt6;
2651
2652 rcu_read_lock();
2653 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2654 rt6 = dst_rt6_info(dst);
2655 /* For dst cached in uncached_list, refcnt is already taken. */
2656 if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
2657 dst = &net->ipv6.ip6_null_entry->dst;
2658 dst_hold(dst);
2659 }
2660 rcu_read_unlock();
2661
2662 return dst;
2663}
2664EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2665
2666struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2667{
2668 struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
2669 struct net_device *loopback_dev = net->loopback_dev;
2670 struct dst_entry *new = NULL;
2671
2672 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev,
2673 DST_OBSOLETE_DEAD, 0);
2674 if (rt) {
2675 rt6_info_init(rt);
2676 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2677
2678 new = &rt->dst;
2679 new->__use = 1;
2680 new->input = dst_discard;
2681 new->output = dst_discard_out;
2682
2683 dst_copy_metrics(new, &ort->dst);
2684
2685 rt->rt6i_idev = in6_dev_get(loopback_dev);
2686 rt->rt6i_gateway = ort->rt6i_gateway;
2687 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2688
2689 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2690#ifdef CONFIG_IPV6_SUBTREES
2691 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2692#endif
2693 }
2694
2695 dst_release(dst_orig);
2696 return new ? new : ERR_PTR(-ENOMEM);
2697}
2698
2699/*
2700 * Destination cache support functions
2701 */
2702
2703static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2704{
2705 u32 rt_cookie = 0;
2706
2707 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2708 return false;
2709
2710 if (fib6_check_expired(f6i))
2711 return false;
2712
2713 return true;
2714}
2715
2716static struct dst_entry *rt6_check(struct rt6_info *rt,
2717 struct fib6_info *from,
2718 u32 cookie)
2719{
2720 u32 rt_cookie = 0;
2721
2722 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2723 rt_cookie != cookie)
2724 return NULL;
2725
2726 if (rt6_check_expired(rt))
2727 return NULL;
2728
2729 return &rt->dst;
2730}
2731
2732static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2733 struct fib6_info *from,
2734 u32 cookie)
2735{
2736 if (!__rt6_check_expired(rt) &&
2737 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2738 fib6_check(from, cookie))
2739 return &rt->dst;
2740 else
2741 return NULL;
2742}
2743
2744INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2745 u32 cookie)
2746{
2747 struct dst_entry *dst_ret;
2748 struct fib6_info *from;
2749 struct rt6_info *rt;
2750
2751 rt = dst_rt6_info(dst);
2752
2753 if (rt->sernum)
2754 return rt6_is_valid(rt) ? dst : NULL;
2755
2756 rcu_read_lock();
2757
2758 /* All IPV6 dsts are created with ->obsolete set to the value
2759 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2760 * into this function always.
2761 */
2762
2763 from = rcu_dereference(rt->from);
2764
2765 if (from && (rt->rt6i_flags & RTF_PCPU ||
2766 unlikely(!list_empty(&rt->dst.rt_uncached))))
2767 dst_ret = rt6_dst_from_check(rt, from, cookie);
2768 else
2769 dst_ret = rt6_check(rt, from, cookie);
2770
2771 rcu_read_unlock();
2772
2773 return dst_ret;
2774}
2775EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2776
2777static void ip6_negative_advice(struct sock *sk,
2778 struct dst_entry *dst)
2779{
2780 struct rt6_info *rt = dst_rt6_info(dst);
2781
2782 if (rt->rt6i_flags & RTF_CACHE) {
2783 rcu_read_lock();
2784 if (rt6_check_expired(rt)) {
2785 /* counteract the dst_release() in sk_dst_reset() */
2786 dst_hold(dst);
2787 sk_dst_reset(sk);
2788
2789 rt6_remove_exception_rt(rt);
2790 }
2791 rcu_read_unlock();
2792 return;
2793 }
2794 sk_dst_reset(sk);
2795}
2796
2797static void ip6_link_failure(struct sk_buff *skb)
2798{
2799 struct rt6_info *rt;
2800
2801 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2802
2803 rt = dst_rt6_info(skb_dst(skb));
2804 if (rt) {
2805 rcu_read_lock();
2806 if (rt->rt6i_flags & RTF_CACHE) {
2807 rt6_remove_exception_rt(rt);
2808 } else {
2809 struct fib6_info *from;
2810 struct fib6_node *fn;
2811
2812 from = rcu_dereference(rt->from);
2813 if (from) {
2814 fn = rcu_dereference(from->fib6_node);
2815 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2816 WRITE_ONCE(fn->fn_sernum, -1);
2817 }
2818 }
2819 rcu_read_unlock();
2820 }
2821}
2822
2823static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2824{
2825 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2826 struct fib6_info *from;
2827
2828 rcu_read_lock();
2829 from = rcu_dereference(rt0->from);
2830 if (from)
2831 rt0->dst.expires = from->expires;
2832 rcu_read_unlock();
2833 }
2834
2835 dst_set_expires(&rt0->dst, timeout);
2836 rt0->rt6i_flags |= RTF_EXPIRES;
2837}
2838
2839static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2840{
2841 struct net *net = dev_net(rt->dst.dev);
2842
2843 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2844 rt->rt6i_flags |= RTF_MODIFIED;
2845 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2846}
2847
2848static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2849{
2850 return !(rt->rt6i_flags & RTF_CACHE) &&
2851 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2852}
2853
2854static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2855 const struct ipv6hdr *iph, u32 mtu,
2856 bool confirm_neigh)
2857{
2858 const struct in6_addr *daddr, *saddr;
2859 struct rt6_info *rt6 = dst_rt6_info(dst);
2860
2861 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2862 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2863 * [see also comment in rt6_mtu_change_route()]
2864 */
2865
2866 if (iph) {
2867 daddr = &iph->daddr;
2868 saddr = &iph->saddr;
2869 } else if (sk) {
2870 daddr = &sk->sk_v6_daddr;
2871 saddr = &inet6_sk(sk)->saddr;
2872 } else {
2873 daddr = NULL;
2874 saddr = NULL;
2875 }
2876
2877 if (confirm_neigh)
2878 dst_confirm_neigh(dst, daddr);
2879
2880 if (mtu < IPV6_MIN_MTU)
2881 return;
2882 if (mtu >= dst_mtu(dst))
2883 return;
2884
2885 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2886 rt6_do_update_pmtu(rt6, mtu);
2887 /* update rt6_ex->stamp for cache */
2888 if (rt6->rt6i_flags & RTF_CACHE)
2889 rt6_update_exception_stamp_rt(rt6);
2890 } else if (daddr) {
2891 struct fib6_result res = {};
2892 struct rt6_info *nrt6;
2893
2894 rcu_read_lock();
2895 res.f6i = rcu_dereference(rt6->from);
2896 if (!res.f6i)
2897 goto out_unlock;
2898
2899 res.fib6_flags = res.f6i->fib6_flags;
2900 res.fib6_type = res.f6i->fib6_type;
2901
2902 if (res.f6i->nh) {
2903 struct fib6_nh_match_arg arg = {
2904 .dev = dst->dev,
2905 .gw = &rt6->rt6i_gateway,
2906 };
2907
2908 nexthop_for_each_fib6_nh(res.f6i->nh,
2909 fib6_nh_find_match, &arg);
2910
2911 /* fib6_info uses a nexthop that does not have fib6_nh
2912 * using the dst->dev + gw. Should be impossible.
2913 */
2914 if (!arg.match)
2915 goto out_unlock;
2916
2917 res.nh = arg.match;
2918 } else {
2919 res.nh = res.f6i->fib6_nh;
2920 }
2921
2922 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2923 if (nrt6) {
2924 rt6_do_update_pmtu(nrt6, mtu);
2925 if (rt6_insert_exception(nrt6, &res))
2926 dst_release_immediate(&nrt6->dst);
2927 }
2928out_unlock:
2929 rcu_read_unlock();
2930 }
2931}
2932
2933static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2934 struct sk_buff *skb, u32 mtu,
2935 bool confirm_neigh)
2936{
2937 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2938 confirm_neigh);
2939}
2940
2941void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2942 int oif, u32 mark, kuid_t uid)
2943{
2944 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2945 struct dst_entry *dst;
2946 struct flowi6 fl6 = {
2947 .flowi6_oif = oif,
2948 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2949 .daddr = iph->daddr,
2950 .saddr = iph->saddr,
2951 .flowlabel = ip6_flowinfo(iph),
2952 .flowi6_uid = uid,
2953 };
2954
2955 dst = ip6_route_output(net, NULL, &fl6);
2956 if (!dst->error)
2957 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
2958 dst_release(dst);
2959}
2960EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2961
2962void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2963{
2964 int oif = sk->sk_bound_dev_if;
2965 struct dst_entry *dst;
2966
2967 if (!oif && skb->dev)
2968 oif = l3mdev_master_ifindex(skb->dev);
2969
2970 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
2971 sk->sk_uid);
2972
2973 dst = __sk_dst_get(sk);
2974 if (!dst || !dst->obsolete ||
2975 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2976 return;
2977
2978 bh_lock_sock(sk);
2979 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2980 ip6_datagram_dst_update(sk, false);
2981 bh_unlock_sock(sk);
2982}
2983EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2984
2985void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2986 const struct flowi6 *fl6)
2987{
2988#ifdef CONFIG_IPV6_SUBTREES
2989 struct ipv6_pinfo *np = inet6_sk(sk);
2990#endif
2991
2992 ip6_dst_store(sk, dst,
2993 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2994 &sk->sk_v6_daddr : NULL,
2995#ifdef CONFIG_IPV6_SUBTREES
2996 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2997 &np->saddr :
2998#endif
2999 NULL);
3000}
3001
3002static bool ip6_redirect_nh_match(const struct fib6_result *res,
3003 struct flowi6 *fl6,
3004 const struct in6_addr *gw,
3005 struct rt6_info **ret)
3006{
3007 const struct fib6_nh *nh = res->nh;
3008
3009 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
3010 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
3011 return false;
3012
3013 /* rt_cache's gateway might be different from its 'parent'
3014 * in the case of an ip redirect.
3015 * So we keep searching in the exception table if the gateway
3016 * is different.
3017 */
3018 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
3019 struct rt6_info *rt_cache;
3020
3021 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
3022 if (rt_cache &&
3023 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
3024 *ret = rt_cache;
3025 return true;
3026 }
3027 return false;
3028 }
3029 return true;
3030}
3031
3032struct fib6_nh_rd_arg {
3033 struct fib6_result *res;
3034 struct flowi6 *fl6;
3035 const struct in6_addr *gw;
3036 struct rt6_info **ret;
3037};
3038
3039static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
3040{
3041 struct fib6_nh_rd_arg *arg = _arg;
3042
3043 arg->res->nh = nh;
3044 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
3045}
3046
3047/* Handle redirects */
3048struct ip6rd_flowi {
3049 struct flowi6 fl6;
3050 struct in6_addr gateway;
3051};
3052
3053INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
3054 struct fib6_table *table,
3055 struct flowi6 *fl6,
3056 const struct sk_buff *skb,
3057 int flags)
3058{
3059 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
3060 struct rt6_info *ret = NULL;
3061 struct fib6_result res = {};
3062 struct fib6_nh_rd_arg arg = {
3063 .res = &res,
3064 .fl6 = fl6,
3065 .gw = &rdfl->gateway,
3066 .ret = &ret
3067 };
3068 struct fib6_info *rt;
3069 struct fib6_node *fn;
3070
3071 /* Get the "current" route for this destination and
3072 * check if the redirect has come from appropriate router.
3073 *
3074 * RFC 4861 specifies that redirects should only be
3075 * accepted if they come from the nexthop to the target.
3076 * Due to the way the routes are chosen, this notion
3077 * is a bit fuzzy and one might need to check all possible
3078 * routes.
3079 */
3080
3081 rcu_read_lock();
3082 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
3083restart:
3084 for_each_fib6_node_rt_rcu(fn) {
3085 res.f6i = rt;
3086 if (fib6_check_expired(rt))
3087 continue;
3088 if (rt->fib6_flags & RTF_REJECT)
3089 break;
3090 if (unlikely(rt->nh)) {
3091 if (nexthop_is_blackhole(rt->nh))
3092 continue;
3093 /* on match, res->nh is filled in and potentially ret */
3094 if (nexthop_for_each_fib6_nh(rt->nh,
3095 fib6_nh_redirect_match,
3096 &arg))
3097 goto out;
3098 } else {
3099 res.nh = rt->fib6_nh;
3100 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
3101 &ret))
3102 goto out;
3103 }
3104 }
3105
3106 if (!rt)
3107 rt = net->ipv6.fib6_null_entry;
3108 else if (rt->fib6_flags & RTF_REJECT) {
3109 ret = net->ipv6.ip6_null_entry;
3110 goto out;
3111 }
3112
3113 if (rt == net->ipv6.fib6_null_entry) {
3114 fn = fib6_backtrack(fn, &fl6->saddr);
3115 if (fn)
3116 goto restart;
3117 }
3118
3119 res.f6i = rt;
3120 res.nh = rt->fib6_nh;
3121out:
3122 if (ret) {
3123 ip6_hold_safe(net, &ret);
3124 } else {
3125 res.fib6_flags = res.f6i->fib6_flags;
3126 res.fib6_type = res.f6i->fib6_type;
3127 ret = ip6_create_rt_rcu(&res);
3128 }
3129
3130 rcu_read_unlock();
3131
3132 trace_fib6_table_lookup(net, &res, table, fl6);
3133 return ret;
3134};
3135
3136static struct dst_entry *ip6_route_redirect(struct net *net,
3137 const struct flowi6 *fl6,
3138 const struct sk_buff *skb,
3139 const struct in6_addr *gateway)
3140{
3141 int flags = RT6_LOOKUP_F_HAS_SADDR;
3142 struct ip6rd_flowi rdfl;
3143
3144 rdfl.fl6 = *fl6;
3145 rdfl.gateway = *gateway;
3146
3147 return fib6_rule_lookup(net, &rdfl.fl6, skb,
3148 flags, __ip6_route_redirect);
3149}
3150
3151void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3152 kuid_t uid)
3153{
3154 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3155 struct dst_entry *dst;
3156 struct flowi6 fl6 = {
3157 .flowi6_iif = LOOPBACK_IFINDEX,
3158 .flowi6_oif = oif,
3159 .flowi6_mark = mark,
3160 .daddr = iph->daddr,
3161 .saddr = iph->saddr,
3162 .flowlabel = ip6_flowinfo(iph),
3163 .flowi6_uid = uid,
3164 };
3165
3166 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3167 rt6_do_redirect(dst, NULL, skb);
3168 dst_release(dst);
3169}
3170EXPORT_SYMBOL_GPL(ip6_redirect);
3171
3172void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3173{
3174 const struct ipv6hdr *iph = ipv6_hdr(skb);
3175 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3176 struct dst_entry *dst;
3177 struct flowi6 fl6 = {
3178 .flowi6_iif = LOOPBACK_IFINDEX,
3179 .flowi6_oif = oif,
3180 .daddr = msg->dest,
3181 .saddr = iph->daddr,
3182 .flowi6_uid = sock_net_uid(net, NULL),
3183 };
3184
3185 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3186 rt6_do_redirect(dst, NULL, skb);
3187 dst_release(dst);
3188}
3189
3190void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3191{
3192 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
3193 READ_ONCE(sk->sk_mark), sk->sk_uid);
3194}
3195EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3196
3197static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3198{
3199 struct net_device *dev = dst->dev;
3200 unsigned int mtu = dst_mtu(dst);
3201 struct net *net = dev_net(dev);
3202
3203 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3204
3205 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3206 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3207
3208 /*
3209 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3210 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3211 * IPV6_MAXPLEN is also valid and means: "any MSS,
3212 * rely only on pmtu discovery"
3213 */
3214 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3215 mtu = IPV6_MAXPLEN;
3216 return mtu;
3217}
3218
3219INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3220{
3221 return ip6_dst_mtu_maybe_forward(dst, false);
3222}
3223EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3224
3225/* MTU selection:
3226 * 1. mtu on route is locked - use it
3227 * 2. mtu from nexthop exception
3228 * 3. mtu from egress device
3229 *
3230 * based on ip6_dst_mtu_forward and exception logic of
3231 * rt6_find_cached_rt; called with rcu_read_lock
3232 */
3233u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3234 const struct in6_addr *daddr,
3235 const struct in6_addr *saddr)
3236{
3237 const struct fib6_nh *nh = res->nh;
3238 struct fib6_info *f6i = res->f6i;
3239 struct inet6_dev *idev;
3240 struct rt6_info *rt;
3241 u32 mtu = 0;
3242
3243 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3244 mtu = f6i->fib6_pmtu;
3245 if (mtu)
3246 goto out;
3247 }
3248
3249 rt = rt6_find_cached_rt(res, daddr, saddr);
3250 if (unlikely(rt)) {
3251 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3252 } else {
3253 struct net_device *dev = nh->fib_nh_dev;
3254
3255 mtu = IPV6_MIN_MTU;
3256 idev = __in6_dev_get(dev);
3257 if (idev)
3258 mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
3259 }
3260
3261 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3262out:
3263 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3264}
3265
3266struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3267 struct flowi6 *fl6)
3268{
3269 struct dst_entry *dst;
3270 struct rt6_info *rt;
3271 struct inet6_dev *idev = in6_dev_get(dev);
3272 struct net *net = dev_net(dev);
3273
3274 if (unlikely(!idev))
3275 return ERR_PTR(-ENODEV);
3276
3277 rt = ip6_dst_alloc(net, dev, 0);
3278 if (unlikely(!rt)) {
3279 in6_dev_put(idev);
3280 dst = ERR_PTR(-ENOMEM);
3281 goto out;
3282 }
3283
3284 rt->dst.input = ip6_input;
3285 rt->dst.output = ip6_output;
3286 rt->rt6i_gateway = fl6->daddr;
3287 rt->rt6i_dst.addr = fl6->daddr;
3288 rt->rt6i_dst.plen = 128;
3289 rt->rt6i_idev = idev;
3290 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3291
3292 /* Add this dst into uncached_list so that rt6_disable_ip() can
3293 * do proper release of the net_device
3294 */
3295 rt6_uncached_list_add(rt);
3296
3297 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3298
3299out:
3300 return dst;
3301}
3302
3303static void ip6_dst_gc(struct dst_ops *ops)
3304{
3305 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3306 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3307 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3308 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3309 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3310 unsigned int val;
3311 int entries;
3312
3313 if (time_after(rt_last_gc + rt_min_interval, jiffies))
3314 goto out;
3315
3316 fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
3317 entries = dst_entries_get_slow(ops);
3318 if (entries < ops->gc_thresh)
3319 atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
3320out:
3321 val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
3322 atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
3323}
3324
3325static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3326 const struct in6_addr *gw_addr, u32 tbid,
3327 int flags, struct fib6_result *res)
3328{
3329 struct flowi6 fl6 = {
3330 .flowi6_oif = cfg->fc_ifindex,
3331 .daddr = *gw_addr,
3332 .saddr = cfg->fc_prefsrc,
3333 };
3334 struct fib6_table *table;
3335 int err;
3336
3337 table = fib6_get_table(net, tbid);
3338 if (!table)
3339 return -EINVAL;
3340
3341 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3342 flags |= RT6_LOOKUP_F_HAS_SADDR;
3343
3344 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3345
3346 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3347 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3348 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3349 cfg->fc_ifindex != 0, NULL, flags);
3350
3351 return err;
3352}
3353
3354static int ip6_route_check_nh_onlink(struct net *net,
3355 struct fib6_config *cfg,
3356 const struct net_device *dev,
3357 struct netlink_ext_ack *extack)
3358{
3359 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3360 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3361 struct fib6_result res = {};
3362 int err;
3363
3364 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3365 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3366 /* ignore match if it is the default route */
3367 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3368 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3369 NL_SET_ERR_MSG(extack,
3370 "Nexthop has invalid gateway or device mismatch");
3371 err = -EINVAL;
3372 }
3373
3374 return err;
3375}
3376
3377static int ip6_route_check_nh(struct net *net,
3378 struct fib6_config *cfg,
3379 struct net_device **_dev,
3380 netdevice_tracker *dev_tracker,
3381 struct inet6_dev **idev)
3382{
3383 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3384 struct net_device *dev = _dev ? *_dev : NULL;
3385 int flags = RT6_LOOKUP_F_IFACE;
3386 struct fib6_result res = {};
3387 int err = -EHOSTUNREACH;
3388
3389 if (cfg->fc_table) {
3390 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3391 cfg->fc_table, flags, &res);
3392 /* gw_addr can not require a gateway or resolve to a reject
3393 * route. If a device is given, it must match the result.
3394 */
3395 if (err || res.fib6_flags & RTF_REJECT ||
3396 res.nh->fib_nh_gw_family ||
3397 (dev && dev != res.nh->fib_nh_dev))
3398 err = -EHOSTUNREACH;
3399 }
3400
3401 if (err < 0) {
3402 struct flowi6 fl6 = {
3403 .flowi6_oif = cfg->fc_ifindex,
3404 .daddr = *gw_addr,
3405 };
3406
3407 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3408 if (err || res.fib6_flags & RTF_REJECT ||
3409 res.nh->fib_nh_gw_family)
3410 err = -EHOSTUNREACH;
3411
3412 if (err)
3413 return err;
3414
3415 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3416 cfg->fc_ifindex != 0, NULL, flags);
3417 }
3418
3419 err = 0;
3420 if (dev) {
3421 if (dev != res.nh->fib_nh_dev)
3422 err = -EHOSTUNREACH;
3423 } else {
3424 *_dev = dev = res.nh->fib_nh_dev;
3425 netdev_hold(dev, dev_tracker, GFP_ATOMIC);
3426 *idev = in6_dev_get(dev);
3427 }
3428
3429 return err;
3430}
3431
3432static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3433 struct net_device **_dev,
3434 netdevice_tracker *dev_tracker,
3435 struct inet6_dev **idev,
3436 struct netlink_ext_ack *extack)
3437{
3438 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3439 int gwa_type = ipv6_addr_type(gw_addr);
3440 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3441 const struct net_device *dev = *_dev;
3442 bool need_addr_check = !dev;
3443 int err = -EINVAL;
3444
3445 /* if gw_addr is local we will fail to detect this in case
3446 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3447 * will return already-added prefix route via interface that
3448 * prefix route was assigned to, which might be non-loopback.
3449 */
3450 if (dev &&
3451 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3452 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3453 goto out;
3454 }
3455
3456 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3457 /* IPv6 strictly inhibits using not link-local
3458 * addresses as nexthop address.
3459 * Otherwise, router will not able to send redirects.
3460 * It is very good, but in some (rare!) circumstances
3461 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3462 * some exceptions. --ANK
3463 * We allow IPv4-mapped nexthops to support RFC4798-type
3464 * addressing
3465 */
3466 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3467 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3468 goto out;
3469 }
3470
3471 rcu_read_lock();
3472
3473 if (cfg->fc_flags & RTNH_F_ONLINK)
3474 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3475 else
3476 err = ip6_route_check_nh(net, cfg, _dev, dev_tracker,
3477 idev);
3478
3479 rcu_read_unlock();
3480
3481 if (err)
3482 goto out;
3483 }
3484
3485 /* reload in case device was changed */
3486 dev = *_dev;
3487
3488 err = -EINVAL;
3489 if (!dev) {
3490 NL_SET_ERR_MSG(extack, "Egress device not specified");
3491 goto out;
3492 } else if (dev->flags & IFF_LOOPBACK) {
3493 NL_SET_ERR_MSG(extack,
3494 "Egress device can not be loopback device for this route");
3495 goto out;
3496 }
3497
3498 /* if we did not check gw_addr above, do so now that the
3499 * egress device has been resolved.
3500 */
3501 if (need_addr_check &&
3502 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3503 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3504 goto out;
3505 }
3506
3507 err = 0;
3508out:
3509 return err;
3510}
3511
3512static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3513{
3514 if ((flags & RTF_REJECT) ||
3515 (dev && (dev->flags & IFF_LOOPBACK) &&
3516 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3517 !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3518 return true;
3519
3520 return false;
3521}
3522
3523int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3524 struct fib6_config *cfg, gfp_t gfp_flags,
3525 struct netlink_ext_ack *extack)
3526{
3527 netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
3528 struct net_device *dev = NULL;
3529 struct inet6_dev *idev = NULL;
3530 int addr_type;
3531 int err;
3532
3533 fib6_nh->fib_nh_family = AF_INET6;
3534#ifdef CONFIG_IPV6_ROUTER_PREF
3535 fib6_nh->last_probe = jiffies;
3536#endif
3537 if (cfg->fc_is_fdb) {
3538 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3539 fib6_nh->fib_nh_gw_family = AF_INET6;
3540 return 0;
3541 }
3542
3543 err = -ENODEV;
3544 if (cfg->fc_ifindex) {
3545 dev = netdev_get_by_index(net, cfg->fc_ifindex,
3546 dev_tracker, gfp_flags);
3547 if (!dev)
3548 goto out;
3549 idev = in6_dev_get(dev);
3550 if (!idev)
3551 goto out;
3552 }
3553
3554 if (cfg->fc_flags & RTNH_F_ONLINK) {
3555 if (!dev) {
3556 NL_SET_ERR_MSG(extack,
3557 "Nexthop device required for onlink");
3558 goto out;
3559 }
3560
3561 if (!(dev->flags & IFF_UP)) {
3562 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3563 err = -ENETDOWN;
3564 goto out;
3565 }
3566
3567 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3568 }
3569
3570 fib6_nh->fib_nh_weight = 1;
3571
3572 /* We cannot add true routes via loopback here,
3573 * they would result in kernel looping; promote them to reject routes
3574 */
3575 addr_type = ipv6_addr_type(&cfg->fc_dst);
3576 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3577 /* hold loopback dev/idev if we haven't done so. */
3578 if (dev != net->loopback_dev) {
3579 if (dev) {
3580 netdev_put(dev, dev_tracker);
3581 in6_dev_put(idev);
3582 }
3583 dev = net->loopback_dev;
3584 netdev_hold(dev, dev_tracker, gfp_flags);
3585 idev = in6_dev_get(dev);
3586 if (!idev) {
3587 err = -ENODEV;
3588 goto out;
3589 }
3590 }
3591 goto pcpu_alloc;
3592 }
3593
3594 if (cfg->fc_flags & RTF_GATEWAY) {
3595 err = ip6_validate_gw(net, cfg, &dev, dev_tracker,
3596 &idev, extack);
3597 if (err)
3598 goto out;
3599
3600 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3601 fib6_nh->fib_nh_gw_family = AF_INET6;
3602 }
3603
3604 err = -ENODEV;
3605 if (!dev)
3606 goto out;
3607
3608 if (!idev || idev->cnf.disable_ipv6) {
3609 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3610 err = -EACCES;
3611 goto out;
3612 }
3613
3614 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3615 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3616 err = -ENETDOWN;
3617 goto out;
3618 }
3619
3620 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3621 !netif_carrier_ok(dev))
3622 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3623
3624 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3625 cfg->fc_encap_type, cfg, gfp_flags, extack);
3626 if (err)
3627 goto out;
3628
3629pcpu_alloc:
3630 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3631 if (!fib6_nh->rt6i_pcpu) {
3632 err = -ENOMEM;
3633 goto out;
3634 }
3635
3636 fib6_nh->fib_nh_dev = dev;
3637 fib6_nh->fib_nh_oif = dev->ifindex;
3638 err = 0;
3639out:
3640 if (idev)
3641 in6_dev_put(idev);
3642
3643 if (err) {
3644 lwtstate_put(fib6_nh->fib_nh_lws);
3645 fib6_nh->fib_nh_lws = NULL;
3646 netdev_put(dev, dev_tracker);
3647 }
3648
3649 return err;
3650}
3651
3652void fib6_nh_release(struct fib6_nh *fib6_nh)
3653{
3654 struct rt6_exception_bucket *bucket;
3655
3656 rcu_read_lock();
3657
3658 fib6_nh_flush_exceptions(fib6_nh, NULL);
3659 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3660 if (bucket) {
3661 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3662 kfree(bucket);
3663 }
3664
3665 rcu_read_unlock();
3666
3667 fib6_nh_release_dsts(fib6_nh);
3668 free_percpu(fib6_nh->rt6i_pcpu);
3669
3670 fib_nh_common_release(&fib6_nh->nh_common);
3671}
3672
3673void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3674{
3675 int cpu;
3676
3677 if (!fib6_nh->rt6i_pcpu)
3678 return;
3679
3680 for_each_possible_cpu(cpu) {
3681 struct rt6_info *pcpu_rt, **ppcpu_rt;
3682
3683 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3684 pcpu_rt = xchg(ppcpu_rt, NULL);
3685 if (pcpu_rt) {
3686 dst_dev_put(&pcpu_rt->dst);
3687 dst_release(&pcpu_rt->dst);
3688 }
3689 }
3690}
3691
3692static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3693 gfp_t gfp_flags,
3694 struct netlink_ext_ack *extack)
3695{
3696 struct net *net = cfg->fc_nlinfo.nl_net;
3697 struct fib6_info *rt = NULL;
3698 struct nexthop *nh = NULL;
3699 struct fib6_table *table;
3700 struct fib6_nh *fib6_nh;
3701 int err = -EINVAL;
3702 int addr_type;
3703
3704 /* RTF_PCPU is an internal flag; can not be set by userspace */
3705 if (cfg->fc_flags & RTF_PCPU) {
3706 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3707 goto out;
3708 }
3709
3710 /* RTF_CACHE is an internal flag; can not be set by userspace */
3711 if (cfg->fc_flags & RTF_CACHE) {
3712 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3713 goto out;
3714 }
3715
3716 if (cfg->fc_type > RTN_MAX) {
3717 NL_SET_ERR_MSG(extack, "Invalid route type");
3718 goto out;
3719 }
3720
3721 if (cfg->fc_dst_len > 128) {
3722 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3723 goto out;
3724 }
3725 if (cfg->fc_src_len > 128) {
3726 NL_SET_ERR_MSG(extack, "Invalid source address length");
3727 goto out;
3728 }
3729#ifndef CONFIG_IPV6_SUBTREES
3730 if (cfg->fc_src_len) {
3731 NL_SET_ERR_MSG(extack,
3732 "Specifying source address requires IPV6_SUBTREES to be enabled");
3733 goto out;
3734 }
3735#endif
3736 if (cfg->fc_nh_id) {
3737 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3738 if (!nh) {
3739 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3740 goto out;
3741 }
3742 err = fib6_check_nexthop(nh, cfg, extack);
3743 if (err)
3744 goto out;
3745 }
3746
3747 err = -ENOBUFS;
3748 if (cfg->fc_nlinfo.nlh &&
3749 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3750 table = fib6_get_table(net, cfg->fc_table);
3751 if (!table) {
3752 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3753 table = fib6_new_table(net, cfg->fc_table);
3754 }
3755 } else {
3756 table = fib6_new_table(net, cfg->fc_table);
3757 }
3758
3759 if (!table)
3760 goto out;
3761
3762 err = -ENOMEM;
3763 rt = fib6_info_alloc(gfp_flags, !nh);
3764 if (!rt)
3765 goto out;
3766
3767 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3768 extack);
3769 if (IS_ERR(rt->fib6_metrics)) {
3770 err = PTR_ERR(rt->fib6_metrics);
3771 /* Do not leave garbage there. */
3772 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3773 goto out_free;
3774 }
3775
3776 if (cfg->fc_flags & RTF_ADDRCONF)
3777 rt->dst_nocount = true;
3778
3779 if (cfg->fc_flags & RTF_EXPIRES)
3780 fib6_set_expires(rt, jiffies +
3781 clock_t_to_jiffies(cfg->fc_expires));
3782
3783 if (cfg->fc_protocol == RTPROT_UNSPEC)
3784 cfg->fc_protocol = RTPROT_BOOT;
3785 rt->fib6_protocol = cfg->fc_protocol;
3786
3787 rt->fib6_table = table;
3788 rt->fib6_metric = cfg->fc_metric;
3789 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3790 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3791
3792 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3793 rt->fib6_dst.plen = cfg->fc_dst_len;
3794
3795#ifdef CONFIG_IPV6_SUBTREES
3796 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3797 rt->fib6_src.plen = cfg->fc_src_len;
3798#endif
3799 if (nh) {
3800 if (rt->fib6_src.plen) {
3801 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3802 goto out_free;
3803 }
3804 if (!nexthop_get(nh)) {
3805 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3806 goto out_free;
3807 }
3808 rt->nh = nh;
3809 fib6_nh = nexthop_fib6_nh(rt->nh);
3810 } else {
3811 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3812 if (err)
3813 goto out;
3814
3815 fib6_nh = rt->fib6_nh;
3816
3817 /* We cannot add true routes via loopback here, they would
3818 * result in kernel looping; promote them to reject routes
3819 */
3820 addr_type = ipv6_addr_type(&cfg->fc_dst);
3821 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3822 addr_type))
3823 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3824 }
3825
3826 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3827 struct net_device *dev = fib6_nh->fib_nh_dev;
3828
3829 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3830 NL_SET_ERR_MSG(extack, "Invalid source address");
3831 err = -EINVAL;
3832 goto out;
3833 }
3834 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3835 rt->fib6_prefsrc.plen = 128;
3836 } else
3837 rt->fib6_prefsrc.plen = 0;
3838
3839 return rt;
3840out:
3841 fib6_info_release(rt);
3842 return ERR_PTR(err);
3843out_free:
3844 ip_fib_metrics_put(rt->fib6_metrics);
3845 kfree(rt);
3846 return ERR_PTR(err);
3847}
3848
3849int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3850 struct netlink_ext_ack *extack)
3851{
3852 struct fib6_info *rt;
3853 int err;
3854
3855 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3856 if (IS_ERR(rt))
3857 return PTR_ERR(rt);
3858
3859 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3860 fib6_info_release(rt);
3861
3862 return err;
3863}
3864
3865static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3866{
3867 struct net *net = info->nl_net;
3868 struct fib6_table *table;
3869 int err;
3870
3871 if (rt == net->ipv6.fib6_null_entry) {
3872 err = -ENOENT;
3873 goto out;
3874 }
3875
3876 table = rt->fib6_table;
3877 spin_lock_bh(&table->tb6_lock);
3878 err = fib6_del(rt, info);
3879 spin_unlock_bh(&table->tb6_lock);
3880
3881out:
3882 fib6_info_release(rt);
3883 return err;
3884}
3885
3886int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3887{
3888 struct nl_info info = {
3889 .nl_net = net,
3890 .skip_notify = skip_notify
3891 };
3892
3893 return __ip6_del_rt(rt, &info);
3894}
3895
3896static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3897{
3898 struct nl_info *info = &cfg->fc_nlinfo;
3899 struct net *net = info->nl_net;
3900 struct sk_buff *skb = NULL;
3901 struct fib6_table *table;
3902 int err = -ENOENT;
3903
3904 if (rt == net->ipv6.fib6_null_entry)
3905 goto out_put;
3906 table = rt->fib6_table;
3907 spin_lock_bh(&table->tb6_lock);
3908
3909 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3910 struct fib6_info *sibling, *next_sibling;
3911 struct fib6_node *fn;
3912
3913 /* prefer to send a single notification with all hops */
3914 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3915 if (skb) {
3916 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3917
3918 if (rt6_fill_node(net, skb, rt, NULL,
3919 NULL, NULL, 0, RTM_DELROUTE,
3920 info->portid, seq, 0) < 0) {
3921 kfree_skb(skb);
3922 skb = NULL;
3923 } else
3924 info->skip_notify = 1;
3925 }
3926
3927 /* 'rt' points to the first sibling route. If it is not the
3928 * leaf, then we do not need to send a notification. Otherwise,
3929 * we need to check if the last sibling has a next route or not
3930 * and emit a replace or delete notification, respectively.
3931 */
3932 info->skip_notify_kernel = 1;
3933 fn = rcu_dereference_protected(rt->fib6_node,
3934 lockdep_is_held(&table->tb6_lock));
3935 if (rcu_access_pointer(fn->leaf) == rt) {
3936 struct fib6_info *last_sibling, *replace_rt;
3937
3938 last_sibling = list_last_entry(&rt->fib6_siblings,
3939 struct fib6_info,
3940 fib6_siblings);
3941 replace_rt = rcu_dereference_protected(
3942 last_sibling->fib6_next,
3943 lockdep_is_held(&table->tb6_lock));
3944 if (replace_rt)
3945 call_fib6_entry_notifiers_replace(net,
3946 replace_rt);
3947 else
3948 call_fib6_multipath_entry_notifiers(net,
3949 FIB_EVENT_ENTRY_DEL,
3950 rt, rt->fib6_nsiblings,
3951 NULL);
3952 }
3953 list_for_each_entry_safe(sibling, next_sibling,
3954 &rt->fib6_siblings,
3955 fib6_siblings) {
3956 err = fib6_del(sibling, info);
3957 if (err)
3958 goto out_unlock;
3959 }
3960 }
3961
3962 err = fib6_del(rt, info);
3963out_unlock:
3964 spin_unlock_bh(&table->tb6_lock);
3965out_put:
3966 fib6_info_release(rt);
3967
3968 if (skb) {
3969 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3970 info->nlh, gfp_any());
3971 }
3972 return err;
3973}
3974
3975static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3976{
3977 int rc = -ESRCH;
3978
3979 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3980 goto out;
3981
3982 if (cfg->fc_flags & RTF_GATEWAY &&
3983 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3984 goto out;
3985
3986 rc = rt6_remove_exception_rt(rt);
3987out:
3988 return rc;
3989}
3990
3991static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3992 struct fib6_nh *nh)
3993{
3994 struct fib6_result res = {
3995 .f6i = rt,
3996 .nh = nh,
3997 };
3998 struct rt6_info *rt_cache;
3999
4000 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
4001 if (rt_cache)
4002 return __ip6_del_cached_rt(rt_cache, cfg);
4003
4004 return 0;
4005}
4006
4007struct fib6_nh_del_cached_rt_arg {
4008 struct fib6_config *cfg;
4009 struct fib6_info *f6i;
4010};
4011
4012static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
4013{
4014 struct fib6_nh_del_cached_rt_arg *arg = _arg;
4015 int rc;
4016
4017 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
4018 return rc != -ESRCH ? rc : 0;
4019}
4020
4021static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
4022{
4023 struct fib6_nh_del_cached_rt_arg arg = {
4024 .cfg = cfg,
4025 .f6i = f6i
4026 };
4027
4028 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
4029}
4030
4031static int ip6_route_del(struct fib6_config *cfg,
4032 struct netlink_ext_ack *extack)
4033{
4034 struct fib6_table *table;
4035 struct fib6_info *rt;
4036 struct fib6_node *fn;
4037 int err = -ESRCH;
4038
4039 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
4040 if (!table) {
4041 NL_SET_ERR_MSG(extack, "FIB table does not exist");
4042 return err;
4043 }
4044
4045 rcu_read_lock();
4046
4047 fn = fib6_locate(&table->tb6_root,
4048 &cfg->fc_dst, cfg->fc_dst_len,
4049 &cfg->fc_src, cfg->fc_src_len,
4050 !(cfg->fc_flags & RTF_CACHE));
4051
4052 if (fn) {
4053 for_each_fib6_node_rt_rcu(fn) {
4054 struct fib6_nh *nh;
4055
4056 if (rt->nh && cfg->fc_nh_id &&
4057 rt->nh->id != cfg->fc_nh_id)
4058 continue;
4059
4060 if (cfg->fc_flags & RTF_CACHE) {
4061 int rc = 0;
4062
4063 if (rt->nh) {
4064 rc = ip6_del_cached_rt_nh(cfg, rt);
4065 } else if (cfg->fc_nh_id) {
4066 continue;
4067 } else {
4068 nh = rt->fib6_nh;
4069 rc = ip6_del_cached_rt(cfg, rt, nh);
4070 }
4071 if (rc != -ESRCH) {
4072 rcu_read_unlock();
4073 return rc;
4074 }
4075 continue;
4076 }
4077
4078 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
4079 continue;
4080 if (cfg->fc_protocol &&
4081 cfg->fc_protocol != rt->fib6_protocol)
4082 continue;
4083
4084 if (rt->nh) {
4085 if (!fib6_info_hold_safe(rt))
4086 continue;
4087 rcu_read_unlock();
4088
4089 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4090 }
4091 if (cfg->fc_nh_id)
4092 continue;
4093
4094 nh = rt->fib6_nh;
4095 if (cfg->fc_ifindex &&
4096 (!nh->fib_nh_dev ||
4097 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
4098 continue;
4099 if (cfg->fc_flags & RTF_GATEWAY &&
4100 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4101 continue;
4102 if (!fib6_info_hold_safe(rt))
4103 continue;
4104 rcu_read_unlock();
4105
4106 /* if gateway was specified only delete the one hop */
4107 if (cfg->fc_flags & RTF_GATEWAY)
4108 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4109
4110 return __ip6_del_rt_siblings(rt, cfg);
4111 }
4112 }
4113 rcu_read_unlock();
4114
4115 return err;
4116}
4117
4118static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4119{
4120 struct netevent_redirect netevent;
4121 struct rt6_info *rt, *nrt = NULL;
4122 struct fib6_result res = {};
4123 struct ndisc_options ndopts;
4124 struct inet6_dev *in6_dev;
4125 struct neighbour *neigh;
4126 struct rd_msg *msg;
4127 int optlen, on_link;
4128 u8 *lladdr;
4129
4130 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4131 optlen -= sizeof(*msg);
4132
4133 if (optlen < 0) {
4134 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4135 return;
4136 }
4137
4138 msg = (struct rd_msg *)icmp6_hdr(skb);
4139
4140 if (ipv6_addr_is_multicast(&msg->dest)) {
4141 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4142 return;
4143 }
4144
4145 on_link = 0;
4146 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4147 on_link = 1;
4148 } else if (ipv6_addr_type(&msg->target) !=
4149 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4150 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4151 return;
4152 }
4153
4154 in6_dev = __in6_dev_get(skb->dev);
4155 if (!in6_dev)
4156 return;
4157 if (READ_ONCE(in6_dev->cnf.forwarding) ||
4158 !READ_ONCE(in6_dev->cnf.accept_redirects))
4159 return;
4160
4161 /* RFC2461 8.1:
4162 * The IP source address of the Redirect MUST be the same as the current
4163 * first-hop router for the specified ICMP Destination Address.
4164 */
4165
4166 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4167 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4168 return;
4169 }
4170
4171 lladdr = NULL;
4172 if (ndopts.nd_opts_tgt_lladdr) {
4173 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4174 skb->dev);
4175 if (!lladdr) {
4176 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4177 return;
4178 }
4179 }
4180
4181 rt = dst_rt6_info(dst);
4182 if (rt->rt6i_flags & RTF_REJECT) {
4183 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4184 return;
4185 }
4186
4187 /* Redirect received -> path was valid.
4188 * Look, redirects are sent only in response to data packets,
4189 * so that this nexthop apparently is reachable. --ANK
4190 */
4191 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4192
4193 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4194 if (!neigh)
4195 return;
4196
4197 /*
4198 * We have finally decided to accept it.
4199 */
4200
4201 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4202 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4203 NEIGH_UPDATE_F_OVERRIDE|
4204 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4205 NEIGH_UPDATE_F_ISROUTER)),
4206 NDISC_REDIRECT, &ndopts);
4207
4208 rcu_read_lock();
4209 res.f6i = rcu_dereference(rt->from);
4210 if (!res.f6i)
4211 goto out;
4212
4213 if (res.f6i->nh) {
4214 struct fib6_nh_match_arg arg = {
4215 .dev = dst->dev,
4216 .gw = &rt->rt6i_gateway,
4217 };
4218
4219 nexthop_for_each_fib6_nh(res.f6i->nh,
4220 fib6_nh_find_match, &arg);
4221
4222 /* fib6_info uses a nexthop that does not have fib6_nh
4223 * using the dst->dev. Should be impossible
4224 */
4225 if (!arg.match)
4226 goto out;
4227 res.nh = arg.match;
4228 } else {
4229 res.nh = res.f6i->fib6_nh;
4230 }
4231
4232 res.fib6_flags = res.f6i->fib6_flags;
4233 res.fib6_type = res.f6i->fib6_type;
4234 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4235 if (!nrt)
4236 goto out;
4237
4238 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4239 if (on_link)
4240 nrt->rt6i_flags &= ~RTF_GATEWAY;
4241
4242 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4243
4244 /* rt6_insert_exception() will take care of duplicated exceptions */
4245 if (rt6_insert_exception(nrt, &res)) {
4246 dst_release_immediate(&nrt->dst);
4247 goto out;
4248 }
4249
4250 netevent.old = &rt->dst;
4251 netevent.new = &nrt->dst;
4252 netevent.daddr = &msg->dest;
4253 netevent.neigh = neigh;
4254 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4255
4256out:
4257 rcu_read_unlock();
4258 neigh_release(neigh);
4259}
4260
4261#ifdef CONFIG_IPV6_ROUTE_INFO
4262static struct fib6_info *rt6_get_route_info(struct net *net,
4263 const struct in6_addr *prefix, int prefixlen,
4264 const struct in6_addr *gwaddr,
4265 struct net_device *dev)
4266{
4267 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4268 int ifindex = dev->ifindex;
4269 struct fib6_node *fn;
4270 struct fib6_info *rt = NULL;
4271 struct fib6_table *table;
4272
4273 table = fib6_get_table(net, tb_id);
4274 if (!table)
4275 return NULL;
4276
4277 rcu_read_lock();
4278 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4279 if (!fn)
4280 goto out;
4281
4282 for_each_fib6_node_rt_rcu(fn) {
4283 /* these routes do not use nexthops */
4284 if (rt->nh)
4285 continue;
4286 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4287 continue;
4288 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4289 !rt->fib6_nh->fib_nh_gw_family)
4290 continue;
4291 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4292 continue;
4293 if (!fib6_info_hold_safe(rt))
4294 continue;
4295 break;
4296 }
4297out:
4298 rcu_read_unlock();
4299 return rt;
4300}
4301
4302static struct fib6_info *rt6_add_route_info(struct net *net,
4303 const struct in6_addr *prefix, int prefixlen,
4304 const struct in6_addr *gwaddr,
4305 struct net_device *dev,
4306 unsigned int pref)
4307{
4308 struct fib6_config cfg = {
4309 .fc_metric = IP6_RT_PRIO_USER,
4310 .fc_ifindex = dev->ifindex,
4311 .fc_dst_len = prefixlen,
4312 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4313 RTF_UP | RTF_PREF(pref),
4314 .fc_protocol = RTPROT_RA,
4315 .fc_type = RTN_UNICAST,
4316 .fc_nlinfo.portid = 0,
4317 .fc_nlinfo.nlh = NULL,
4318 .fc_nlinfo.nl_net = net,
4319 };
4320
4321 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4322 cfg.fc_dst = *prefix;
4323 cfg.fc_gateway = *gwaddr;
4324
4325 /* We should treat it as a default route if prefix length is 0. */
4326 if (!prefixlen)
4327 cfg.fc_flags |= RTF_DEFAULT;
4328
4329 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4330
4331 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4332}
4333#endif
4334
4335struct fib6_info *rt6_get_dflt_router(struct net *net,
4336 const struct in6_addr *addr,
4337 struct net_device *dev)
4338{
4339 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4340 struct fib6_info *rt;
4341 struct fib6_table *table;
4342
4343 table = fib6_get_table(net, tb_id);
4344 if (!table)
4345 return NULL;
4346
4347 rcu_read_lock();
4348 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4349 struct fib6_nh *nh;
4350
4351 /* RA routes do not use nexthops */
4352 if (rt->nh)
4353 continue;
4354
4355 nh = rt->fib6_nh;
4356 if (dev == nh->fib_nh_dev &&
4357 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4358 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4359 break;
4360 }
4361 if (rt && !fib6_info_hold_safe(rt))
4362 rt = NULL;
4363 rcu_read_unlock();
4364 return rt;
4365}
4366
4367struct fib6_info *rt6_add_dflt_router(struct net *net,
4368 const struct in6_addr *gwaddr,
4369 struct net_device *dev,
4370 unsigned int pref,
4371 u32 defrtr_usr_metric,
4372 int lifetime)
4373{
4374 struct fib6_config cfg = {
4375 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4376 .fc_metric = defrtr_usr_metric,
4377 .fc_ifindex = dev->ifindex,
4378 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4379 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4380 .fc_protocol = RTPROT_RA,
4381 .fc_type = RTN_UNICAST,
4382 .fc_nlinfo.portid = 0,
4383 .fc_nlinfo.nlh = NULL,
4384 .fc_nlinfo.nl_net = net,
4385 .fc_expires = jiffies_to_clock_t(lifetime * HZ),
4386 };
4387
4388 cfg.fc_gateway = *gwaddr;
4389
4390 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4391 struct fib6_table *table;
4392
4393 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4394 if (table)
4395 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4396 }
4397
4398 return rt6_get_dflt_router(net, gwaddr, dev);
4399}
4400
4401static void __rt6_purge_dflt_routers(struct net *net,
4402 struct fib6_table *table)
4403{
4404 struct fib6_info *rt;
4405
4406restart:
4407 rcu_read_lock();
4408 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4409 struct net_device *dev = fib6_info_nh_dev(rt);
4410 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4411
4412 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4413 (!idev || idev->cnf.accept_ra != 2) &&
4414 fib6_info_hold_safe(rt)) {
4415 rcu_read_unlock();
4416 ip6_del_rt(net, rt, false);
4417 goto restart;
4418 }
4419 }
4420 rcu_read_unlock();
4421
4422 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4423}
4424
4425void rt6_purge_dflt_routers(struct net *net)
4426{
4427 struct fib6_table *table;
4428 struct hlist_head *head;
4429 unsigned int h;
4430
4431 rcu_read_lock();
4432
4433 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4434 head = &net->ipv6.fib_table_hash[h];
4435 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4436 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4437 __rt6_purge_dflt_routers(net, table);
4438 }
4439 }
4440
4441 rcu_read_unlock();
4442}
4443
4444static void rtmsg_to_fib6_config(struct net *net,
4445 struct in6_rtmsg *rtmsg,
4446 struct fib6_config *cfg)
4447{
4448 *cfg = (struct fib6_config){
4449 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4450 : RT6_TABLE_MAIN,
4451 .fc_ifindex = rtmsg->rtmsg_ifindex,
4452 .fc_metric = rtmsg->rtmsg_metric,
4453 .fc_expires = rtmsg->rtmsg_info,
4454 .fc_dst_len = rtmsg->rtmsg_dst_len,
4455 .fc_src_len = rtmsg->rtmsg_src_len,
4456 .fc_flags = rtmsg->rtmsg_flags,
4457 .fc_type = rtmsg->rtmsg_type,
4458
4459 .fc_nlinfo.nl_net = net,
4460
4461 .fc_dst = rtmsg->rtmsg_dst,
4462 .fc_src = rtmsg->rtmsg_src,
4463 .fc_gateway = rtmsg->rtmsg_gateway,
4464 };
4465}
4466
4467int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4468{
4469 struct fib6_config cfg;
4470 int err;
4471
4472 if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4473 return -EINVAL;
4474 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4475 return -EPERM;
4476
4477 rtmsg_to_fib6_config(net, rtmsg, &cfg);
4478
4479 rtnl_lock();
4480 switch (cmd) {
4481 case SIOCADDRT:
4482 /* Only do the default setting of fc_metric in route adding */
4483 if (cfg.fc_metric == 0)
4484 cfg.fc_metric = IP6_RT_PRIO_USER;
4485 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4486 break;
4487 case SIOCDELRT:
4488 err = ip6_route_del(&cfg, NULL);
4489 break;
4490 }
4491 rtnl_unlock();
4492 return err;
4493}
4494
4495/*
4496 * Drop the packet on the floor
4497 */
4498
4499static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4500{
4501 struct dst_entry *dst = skb_dst(skb);
4502 struct net *net = dev_net(dst->dev);
4503 struct inet6_dev *idev;
4504 SKB_DR(reason);
4505 int type;
4506
4507 if (netif_is_l3_master(skb->dev) ||
4508 dst->dev == net->loopback_dev)
4509 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4510 else
4511 idev = ip6_dst_idev(dst);
4512
4513 switch (ipstats_mib_noroutes) {
4514 case IPSTATS_MIB_INNOROUTES:
4515 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4516 if (type == IPV6_ADDR_ANY) {
4517 SKB_DR_SET(reason, IP_INADDRERRORS);
4518 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4519 break;
4520 }
4521 SKB_DR_SET(reason, IP_INNOROUTES);
4522 fallthrough;
4523 case IPSTATS_MIB_OUTNOROUTES:
4524 SKB_DR_OR(reason, IP_OUTNOROUTES);
4525 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4526 break;
4527 }
4528
4529 /* Start over by dropping the dst for l3mdev case */
4530 if (netif_is_l3_master(skb->dev))
4531 skb_dst_drop(skb);
4532
4533 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4534 kfree_skb_reason(skb, reason);
4535 return 0;
4536}
4537
4538static int ip6_pkt_discard(struct sk_buff *skb)
4539{
4540 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4541}
4542
4543static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4544{
4545 skb->dev = skb_dst(skb)->dev;
4546 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4547}
4548
4549static int ip6_pkt_prohibit(struct sk_buff *skb)
4550{
4551 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4552}
4553
4554static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4555{
4556 skb->dev = skb_dst(skb)->dev;
4557 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4558}
4559
4560/*
4561 * Allocate a dst for local (unicast / anycast) address.
4562 */
4563
4564struct fib6_info *addrconf_f6i_alloc(struct net *net,
4565 struct inet6_dev *idev,
4566 const struct in6_addr *addr,
4567 bool anycast, gfp_t gfp_flags,
4568 struct netlink_ext_ack *extack)
4569{
4570 struct fib6_config cfg = {
4571 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4572 .fc_ifindex = idev->dev->ifindex,
4573 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4574 .fc_dst = *addr,
4575 .fc_dst_len = 128,
4576 .fc_protocol = RTPROT_KERNEL,
4577 .fc_nlinfo.nl_net = net,
4578 .fc_ignore_dev_down = true,
4579 };
4580 struct fib6_info *f6i;
4581
4582 if (anycast) {
4583 cfg.fc_type = RTN_ANYCAST;
4584 cfg.fc_flags |= RTF_ANYCAST;
4585 } else {
4586 cfg.fc_type = RTN_LOCAL;
4587 cfg.fc_flags |= RTF_LOCAL;
4588 }
4589
4590 f6i = ip6_route_info_create(&cfg, gfp_flags, extack);
4591 if (!IS_ERR(f6i)) {
4592 f6i->dst_nocount = true;
4593
4594 if (!anycast &&
4595 (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
4596 READ_ONCE(idev->cnf.disable_policy)))
4597 f6i->dst_nopolicy = true;
4598 }
4599
4600 return f6i;
4601}
4602
4603/* remove deleted ip from prefsrc entries */
4604struct arg_dev_net_ip {
4605 struct net *net;
4606 struct in6_addr *addr;
4607};
4608
4609static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4610{
4611 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4612 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4613
4614 if (!rt->nh &&
4615 rt != net->ipv6.fib6_null_entry &&
4616 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr) &&
4617 !ipv6_chk_addr(net, addr, rt->fib6_nh->fib_nh_dev, 0)) {
4618 spin_lock_bh(&rt6_exception_lock);
4619 /* remove prefsrc entry */
4620 rt->fib6_prefsrc.plen = 0;
4621 spin_unlock_bh(&rt6_exception_lock);
4622 }
4623 return 0;
4624}
4625
4626void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4627{
4628 struct net *net = dev_net(ifp->idev->dev);
4629 struct arg_dev_net_ip adni = {
4630 .net = net,
4631 .addr = &ifp->addr,
4632 };
4633 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4634}
4635
4636#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4637
4638/* Remove routers and update dst entries when gateway turn into host. */
4639static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4640{
4641 struct in6_addr *gateway = (struct in6_addr *)arg;
4642 struct fib6_nh *nh;
4643
4644 /* RA routes do not use nexthops */
4645 if (rt->nh)
4646 return 0;
4647
4648 nh = rt->fib6_nh;
4649 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4650 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4651 return -1;
4652
4653 /* Further clean up cached routes in exception table.
4654 * This is needed because cached route may have a different
4655 * gateway than its 'parent' in the case of an ip redirect.
4656 */
4657 fib6_nh_exceptions_clean_tohost(nh, gateway);
4658
4659 return 0;
4660}
4661
4662void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4663{
4664 fib6_clean_all(net, fib6_clean_tohost, gateway);
4665}
4666
4667struct arg_netdev_event {
4668 const struct net_device *dev;
4669 union {
4670 unsigned char nh_flags;
4671 unsigned long event;
4672 };
4673};
4674
4675static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4676{
4677 struct fib6_info *iter;
4678 struct fib6_node *fn;
4679
4680 fn = rcu_dereference_protected(rt->fib6_node,
4681 lockdep_is_held(&rt->fib6_table->tb6_lock));
4682 iter = rcu_dereference_protected(fn->leaf,
4683 lockdep_is_held(&rt->fib6_table->tb6_lock));
4684 while (iter) {
4685 if (iter->fib6_metric == rt->fib6_metric &&
4686 rt6_qualify_for_ecmp(iter))
4687 return iter;
4688 iter = rcu_dereference_protected(iter->fib6_next,
4689 lockdep_is_held(&rt->fib6_table->tb6_lock));
4690 }
4691
4692 return NULL;
4693}
4694
4695/* only called for fib entries with builtin fib6_nh */
4696static bool rt6_is_dead(const struct fib6_info *rt)
4697{
4698 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4699 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4700 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4701 return true;
4702
4703 return false;
4704}
4705
4706static int rt6_multipath_total_weight(const struct fib6_info *rt)
4707{
4708 struct fib6_info *iter;
4709 int total = 0;
4710
4711 if (!rt6_is_dead(rt))
4712 total += rt->fib6_nh->fib_nh_weight;
4713
4714 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4715 if (!rt6_is_dead(iter))
4716 total += iter->fib6_nh->fib_nh_weight;
4717 }
4718
4719 return total;
4720}
4721
4722static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4723{
4724 int upper_bound = -1;
4725
4726 if (!rt6_is_dead(rt)) {
4727 *weight += rt->fib6_nh->fib_nh_weight;
4728 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4729 total) - 1;
4730 }
4731 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4732}
4733
4734static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4735{
4736 struct fib6_info *iter;
4737 int weight = 0;
4738
4739 rt6_upper_bound_set(rt, &weight, total);
4740
4741 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4742 rt6_upper_bound_set(iter, &weight, total);
4743}
4744
4745void rt6_multipath_rebalance(struct fib6_info *rt)
4746{
4747 struct fib6_info *first;
4748 int total;
4749
4750 /* In case the entire multipath route was marked for flushing,
4751 * then there is no need to rebalance upon the removal of every
4752 * sibling route.
4753 */
4754 if (!rt->fib6_nsiblings || rt->should_flush)
4755 return;
4756
4757 /* During lookup routes are evaluated in order, so we need to
4758 * make sure upper bounds are assigned from the first sibling
4759 * onwards.
4760 */
4761 first = rt6_multipath_first_sibling(rt);
4762 if (WARN_ON_ONCE(!first))
4763 return;
4764
4765 total = rt6_multipath_total_weight(first);
4766 rt6_multipath_upper_bound_set(first, total);
4767}
4768
4769static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4770{
4771 const struct arg_netdev_event *arg = p_arg;
4772 struct net *net = dev_net(arg->dev);
4773
4774 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4775 rt->fib6_nh->fib_nh_dev == arg->dev) {
4776 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4777 fib6_update_sernum_upto_root(net, rt);
4778 rt6_multipath_rebalance(rt);
4779 }
4780
4781 return 0;
4782}
4783
4784void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4785{
4786 struct arg_netdev_event arg = {
4787 .dev = dev,
4788 {
4789 .nh_flags = nh_flags,
4790 },
4791 };
4792
4793 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4794 arg.nh_flags |= RTNH_F_LINKDOWN;
4795
4796 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4797}
4798
4799/* only called for fib entries with inline fib6_nh */
4800static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4801 const struct net_device *dev)
4802{
4803 struct fib6_info *iter;
4804
4805 if (rt->fib6_nh->fib_nh_dev == dev)
4806 return true;
4807 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4808 if (iter->fib6_nh->fib_nh_dev == dev)
4809 return true;
4810
4811 return false;
4812}
4813
4814static void rt6_multipath_flush(struct fib6_info *rt)
4815{
4816 struct fib6_info *iter;
4817
4818 rt->should_flush = 1;
4819 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4820 iter->should_flush = 1;
4821}
4822
4823static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4824 const struct net_device *down_dev)
4825{
4826 struct fib6_info *iter;
4827 unsigned int dead = 0;
4828
4829 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4830 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4831 dead++;
4832 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4833 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4834 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4835 dead++;
4836
4837 return dead;
4838}
4839
4840static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4841 const struct net_device *dev,
4842 unsigned char nh_flags)
4843{
4844 struct fib6_info *iter;
4845
4846 if (rt->fib6_nh->fib_nh_dev == dev)
4847 rt->fib6_nh->fib_nh_flags |= nh_flags;
4848 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4849 if (iter->fib6_nh->fib_nh_dev == dev)
4850 iter->fib6_nh->fib_nh_flags |= nh_flags;
4851}
4852
4853/* called with write lock held for table with rt */
4854static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4855{
4856 const struct arg_netdev_event *arg = p_arg;
4857 const struct net_device *dev = arg->dev;
4858 struct net *net = dev_net(dev);
4859
4860 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4861 return 0;
4862
4863 switch (arg->event) {
4864 case NETDEV_UNREGISTER:
4865 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4866 case NETDEV_DOWN:
4867 if (rt->should_flush)
4868 return -1;
4869 if (!rt->fib6_nsiblings)
4870 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4871 if (rt6_multipath_uses_dev(rt, dev)) {
4872 unsigned int count;
4873
4874 count = rt6_multipath_dead_count(rt, dev);
4875 if (rt->fib6_nsiblings + 1 == count) {
4876 rt6_multipath_flush(rt);
4877 return -1;
4878 }
4879 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4880 RTNH_F_LINKDOWN);
4881 fib6_update_sernum(net, rt);
4882 rt6_multipath_rebalance(rt);
4883 }
4884 return -2;
4885 case NETDEV_CHANGE:
4886 if (rt->fib6_nh->fib_nh_dev != dev ||
4887 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4888 break;
4889 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4890 rt6_multipath_rebalance(rt);
4891 break;
4892 }
4893
4894 return 0;
4895}
4896
4897void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4898{
4899 struct arg_netdev_event arg = {
4900 .dev = dev,
4901 {
4902 .event = event,
4903 },
4904 };
4905 struct net *net = dev_net(dev);
4906
4907 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4908 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4909 else
4910 fib6_clean_all(net, fib6_ifdown, &arg);
4911}
4912
4913void rt6_disable_ip(struct net_device *dev, unsigned long event)
4914{
4915 rt6_sync_down_dev(dev, event);
4916 rt6_uncached_list_flush_dev(dev);
4917 neigh_ifdown(&nd_tbl, dev);
4918}
4919
4920struct rt6_mtu_change_arg {
4921 struct net_device *dev;
4922 unsigned int mtu;
4923 struct fib6_info *f6i;
4924};
4925
4926static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4927{
4928 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4929 struct fib6_info *f6i = arg->f6i;
4930
4931 /* For administrative MTU increase, there is no way to discover
4932 * IPv6 PMTU increase, so PMTU increase should be updated here.
4933 * Since RFC 1981 doesn't include administrative MTU increase
4934 * update PMTU increase is a MUST. (i.e. jumbo frame)
4935 */
4936 if (nh->fib_nh_dev == arg->dev) {
4937 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4938 u32 mtu = f6i->fib6_pmtu;
4939
4940 if (mtu >= arg->mtu ||
4941 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4942 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4943
4944 spin_lock_bh(&rt6_exception_lock);
4945 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4946 spin_unlock_bh(&rt6_exception_lock);
4947 }
4948
4949 return 0;
4950}
4951
4952static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4953{
4954 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4955 struct inet6_dev *idev;
4956
4957 /* In IPv6 pmtu discovery is not optional,
4958 so that RTAX_MTU lock cannot disable it.
4959 We still use this lock to block changes
4960 caused by addrconf/ndisc.
4961 */
4962
4963 idev = __in6_dev_get(arg->dev);
4964 if (!idev)
4965 return 0;
4966
4967 if (fib6_metric_locked(f6i, RTAX_MTU))
4968 return 0;
4969
4970 arg->f6i = f6i;
4971 if (f6i->nh) {
4972 /* fib6_nh_mtu_change only returns 0, so this is safe */
4973 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4974 arg);
4975 }
4976
4977 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4978}
4979
4980void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4981{
4982 struct rt6_mtu_change_arg arg = {
4983 .dev = dev,
4984 .mtu = mtu,
4985 };
4986
4987 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4988}
4989
4990static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4991 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4992 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4993 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4994 [RTA_OIF] = { .type = NLA_U32 },
4995 [RTA_IIF] = { .type = NLA_U32 },
4996 [RTA_PRIORITY] = { .type = NLA_U32 },
4997 [RTA_METRICS] = { .type = NLA_NESTED },
4998 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4999 [RTA_PREF] = { .type = NLA_U8 },
5000 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
5001 [RTA_ENCAP] = { .type = NLA_NESTED },
5002 [RTA_EXPIRES] = { .type = NLA_U32 },
5003 [RTA_UID] = { .type = NLA_U32 },
5004 [RTA_MARK] = { .type = NLA_U32 },
5005 [RTA_TABLE] = { .type = NLA_U32 },
5006 [RTA_IP_PROTO] = { .type = NLA_U8 },
5007 [RTA_SPORT] = { .type = NLA_U16 },
5008 [RTA_DPORT] = { .type = NLA_U16 },
5009 [RTA_NH_ID] = { .type = NLA_U32 },
5010};
5011
5012static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
5013 struct fib6_config *cfg,
5014 struct netlink_ext_ack *extack)
5015{
5016 struct rtmsg *rtm;
5017 struct nlattr *tb[RTA_MAX+1];
5018 unsigned int pref;
5019 int err;
5020
5021 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5022 rtm_ipv6_policy, extack);
5023 if (err < 0)
5024 goto errout;
5025
5026 err = -EINVAL;
5027 rtm = nlmsg_data(nlh);
5028
5029 if (rtm->rtm_tos) {
5030 NL_SET_ERR_MSG(extack,
5031 "Invalid dsfield (tos): option not available for IPv6");
5032 goto errout;
5033 }
5034
5035 *cfg = (struct fib6_config){
5036 .fc_table = rtm->rtm_table,
5037 .fc_dst_len = rtm->rtm_dst_len,
5038 .fc_src_len = rtm->rtm_src_len,
5039 .fc_flags = RTF_UP,
5040 .fc_protocol = rtm->rtm_protocol,
5041 .fc_type = rtm->rtm_type,
5042
5043 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
5044 .fc_nlinfo.nlh = nlh,
5045 .fc_nlinfo.nl_net = sock_net(skb->sk),
5046 };
5047
5048 if (rtm->rtm_type == RTN_UNREACHABLE ||
5049 rtm->rtm_type == RTN_BLACKHOLE ||
5050 rtm->rtm_type == RTN_PROHIBIT ||
5051 rtm->rtm_type == RTN_THROW)
5052 cfg->fc_flags |= RTF_REJECT;
5053
5054 if (rtm->rtm_type == RTN_LOCAL)
5055 cfg->fc_flags |= RTF_LOCAL;
5056
5057 if (rtm->rtm_flags & RTM_F_CLONED)
5058 cfg->fc_flags |= RTF_CACHE;
5059
5060 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
5061
5062 if (tb[RTA_NH_ID]) {
5063 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
5064 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
5065 NL_SET_ERR_MSG(extack,
5066 "Nexthop specification and nexthop id are mutually exclusive");
5067 goto errout;
5068 }
5069 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
5070 }
5071
5072 if (tb[RTA_GATEWAY]) {
5073 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
5074 cfg->fc_flags |= RTF_GATEWAY;
5075 }
5076 if (tb[RTA_VIA]) {
5077 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
5078 goto errout;
5079 }
5080
5081 if (tb[RTA_DST]) {
5082 int plen = (rtm->rtm_dst_len + 7) >> 3;
5083
5084 if (nla_len(tb[RTA_DST]) < plen)
5085 goto errout;
5086
5087 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
5088 }
5089
5090 if (tb[RTA_SRC]) {
5091 int plen = (rtm->rtm_src_len + 7) >> 3;
5092
5093 if (nla_len(tb[RTA_SRC]) < plen)
5094 goto errout;
5095
5096 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
5097 }
5098
5099 if (tb[RTA_PREFSRC])
5100 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
5101
5102 if (tb[RTA_OIF])
5103 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
5104
5105 if (tb[RTA_PRIORITY])
5106 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5107
5108 if (tb[RTA_METRICS]) {
5109 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5110 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5111 }
5112
5113 if (tb[RTA_TABLE])
5114 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5115
5116 if (tb[RTA_MULTIPATH]) {
5117 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5118 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5119
5120 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
5121 cfg->fc_mp_len, extack);
5122 if (err < 0)
5123 goto errout;
5124 }
5125
5126 if (tb[RTA_PREF]) {
5127 pref = nla_get_u8(tb[RTA_PREF]);
5128 if (pref != ICMPV6_ROUTER_PREF_LOW &&
5129 pref != ICMPV6_ROUTER_PREF_HIGH)
5130 pref = ICMPV6_ROUTER_PREF_MEDIUM;
5131 cfg->fc_flags |= RTF_PREF(pref);
5132 }
5133
5134 if (tb[RTA_ENCAP])
5135 cfg->fc_encap = tb[RTA_ENCAP];
5136
5137 if (tb[RTA_ENCAP_TYPE]) {
5138 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5139
5140 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5141 if (err < 0)
5142 goto errout;
5143 }
5144
5145 if (tb[RTA_EXPIRES]) {
5146 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5147
5148 if (addrconf_finite_timeout(timeout)) {
5149 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5150 cfg->fc_flags |= RTF_EXPIRES;
5151 }
5152 }
5153
5154 err = 0;
5155errout:
5156 return err;
5157}
5158
5159struct rt6_nh {
5160 struct fib6_info *fib6_info;
5161 struct fib6_config r_cfg;
5162 struct list_head next;
5163};
5164
5165static int ip6_route_info_append(struct net *net,
5166 struct list_head *rt6_nh_list,
5167 struct fib6_info *rt,
5168 struct fib6_config *r_cfg)
5169{
5170 struct rt6_nh *nh;
5171 int err = -EEXIST;
5172
5173 list_for_each_entry(nh, rt6_nh_list, next) {
5174 /* check if fib6_info already exists */
5175 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5176 return err;
5177 }
5178
5179 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5180 if (!nh)
5181 return -ENOMEM;
5182 nh->fib6_info = rt;
5183 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5184 list_add_tail(&nh->next, rt6_nh_list);
5185
5186 return 0;
5187}
5188
5189static void ip6_route_mpath_notify(struct fib6_info *rt,
5190 struct fib6_info *rt_last,
5191 struct nl_info *info,
5192 __u16 nlflags)
5193{
5194 /* if this is an APPEND route, then rt points to the first route
5195 * inserted and rt_last points to last route inserted. Userspace
5196 * wants a consistent dump of the route which starts at the first
5197 * nexthop. Since sibling routes are always added at the end of
5198 * the list, find the first sibling of the last route appended
5199 */
5200 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5201 rt = list_first_entry(&rt_last->fib6_siblings,
5202 struct fib6_info,
5203 fib6_siblings);
5204 }
5205
5206 if (rt)
5207 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5208}
5209
5210static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5211{
5212 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5213 bool should_notify = false;
5214 struct fib6_info *leaf;
5215 struct fib6_node *fn;
5216
5217 rcu_read_lock();
5218 fn = rcu_dereference(rt->fib6_node);
5219 if (!fn)
5220 goto out;
5221
5222 leaf = rcu_dereference(fn->leaf);
5223 if (!leaf)
5224 goto out;
5225
5226 if (rt == leaf ||
5227 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5228 rt6_qualify_for_ecmp(leaf)))
5229 should_notify = true;
5230out:
5231 rcu_read_unlock();
5232
5233 return should_notify;
5234}
5235
5236static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
5237 struct netlink_ext_ack *extack)
5238{
5239 if (nla_len(nla) < sizeof(*gw)) {
5240 NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
5241 return -EINVAL;
5242 }
5243
5244 *gw = nla_get_in6_addr(nla);
5245
5246 return 0;
5247}
5248
5249static int ip6_route_multipath_add(struct fib6_config *cfg,
5250 struct netlink_ext_ack *extack)
5251{
5252 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5253 struct nl_info *info = &cfg->fc_nlinfo;
5254 struct fib6_config r_cfg;
5255 struct rtnexthop *rtnh;
5256 struct fib6_info *rt;
5257 struct rt6_nh *err_nh;
5258 struct rt6_nh *nh, *nh_safe;
5259 __u16 nlflags;
5260 int remaining;
5261 int attrlen;
5262 int err = 1;
5263 int nhn = 0;
5264 int replace = (cfg->fc_nlinfo.nlh &&
5265 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5266 LIST_HEAD(rt6_nh_list);
5267
5268 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5269 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5270 nlflags |= NLM_F_APPEND;
5271
5272 remaining = cfg->fc_mp_len;
5273 rtnh = (struct rtnexthop *)cfg->fc_mp;
5274
5275 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5276 * fib6_info structs per nexthop
5277 */
5278 while (rtnh_ok(rtnh, remaining)) {
5279 memcpy(&r_cfg, cfg, sizeof(*cfg));
5280 if (rtnh->rtnh_ifindex)
5281 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5282
5283 attrlen = rtnh_attrlen(rtnh);
5284 if (attrlen > 0) {
5285 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5286
5287 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5288 if (nla) {
5289 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5290 extack);
5291 if (err)
5292 goto cleanup;
5293
5294 r_cfg.fc_flags |= RTF_GATEWAY;
5295 }
5296 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5297
5298 /* RTA_ENCAP_TYPE length checked in
5299 * lwtunnel_valid_encap_type_attr
5300 */
5301 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5302 if (nla)
5303 r_cfg.fc_encap_type = nla_get_u16(nla);
5304 }
5305
5306 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5307 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5308 if (IS_ERR(rt)) {
5309 err = PTR_ERR(rt);
5310 rt = NULL;
5311 goto cleanup;
5312 }
5313 if (!rt6_qualify_for_ecmp(rt)) {
5314 err = -EINVAL;
5315 NL_SET_ERR_MSG(extack,
5316 "Device only routes can not be added for IPv6 using the multipath API.");
5317 fib6_info_release(rt);
5318 goto cleanup;
5319 }
5320
5321 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5322
5323 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5324 rt, &r_cfg);
5325 if (err) {
5326 fib6_info_release(rt);
5327 goto cleanup;
5328 }
5329
5330 rtnh = rtnh_next(rtnh, &remaining);
5331 }
5332
5333 if (list_empty(&rt6_nh_list)) {
5334 NL_SET_ERR_MSG(extack,
5335 "Invalid nexthop configuration - no valid nexthops");
5336 return -EINVAL;
5337 }
5338
5339 /* for add and replace send one notification with all nexthops.
5340 * Skip the notification in fib6_add_rt2node and send one with
5341 * the full route when done
5342 */
5343 info->skip_notify = 1;
5344
5345 /* For add and replace, send one notification with all nexthops. For
5346 * append, send one notification with all appended nexthops.
5347 */
5348 info->skip_notify_kernel = 1;
5349
5350 err_nh = NULL;
5351 list_for_each_entry(nh, &rt6_nh_list, next) {
5352 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5353
5354 if (err) {
5355 if (replace && nhn)
5356 NL_SET_ERR_MSG_MOD(extack,
5357 "multipath route replace failed (check consistency of installed routes)");
5358 err_nh = nh;
5359 goto add_errout;
5360 }
5361 /* save reference to last route successfully inserted */
5362 rt_last = nh->fib6_info;
5363
5364 /* save reference to first route for notification */
5365 if (!rt_notif)
5366 rt_notif = nh->fib6_info;
5367
5368 /* Because each route is added like a single route we remove
5369 * these flags after the first nexthop: if there is a collision,
5370 * we have already failed to add the first nexthop:
5371 * fib6_add_rt2node() has rejected it; when replacing, old
5372 * nexthops have been replaced by first new, the rest should
5373 * be added to it.
5374 */
5375 if (cfg->fc_nlinfo.nlh) {
5376 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5377 NLM_F_REPLACE);
5378 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5379 }
5380 nhn++;
5381 }
5382
5383 /* An in-kernel notification should only be sent in case the new
5384 * multipath route is added as the first route in the node, or if
5385 * it was appended to it. We pass 'rt_notif' since it is the first
5386 * sibling and might allow us to skip some checks in the replace case.
5387 */
5388 if (ip6_route_mpath_should_notify(rt_notif)) {
5389 enum fib_event_type fib_event;
5390
5391 if (rt_notif->fib6_nsiblings != nhn - 1)
5392 fib_event = FIB_EVENT_ENTRY_APPEND;
5393 else
5394 fib_event = FIB_EVENT_ENTRY_REPLACE;
5395
5396 err = call_fib6_multipath_entry_notifiers(info->nl_net,
5397 fib_event, rt_notif,
5398 nhn - 1, extack);
5399 if (err) {
5400 /* Delete all the siblings that were just added */
5401 err_nh = NULL;
5402 goto add_errout;
5403 }
5404 }
5405
5406 /* success ... tell user about new route */
5407 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5408 goto cleanup;
5409
5410add_errout:
5411 /* send notification for routes that were added so that
5412 * the delete notifications sent by ip6_route_del are
5413 * coherent
5414 */
5415 if (rt_notif)
5416 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5417
5418 /* Delete routes that were already added */
5419 list_for_each_entry(nh, &rt6_nh_list, next) {
5420 if (err_nh == nh)
5421 break;
5422 ip6_route_del(&nh->r_cfg, extack);
5423 }
5424
5425cleanup:
5426 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5427 fib6_info_release(nh->fib6_info);
5428 list_del(&nh->next);
5429 kfree(nh);
5430 }
5431
5432 return err;
5433}
5434
5435static int ip6_route_multipath_del(struct fib6_config *cfg,
5436 struct netlink_ext_ack *extack)
5437{
5438 struct fib6_config r_cfg;
5439 struct rtnexthop *rtnh;
5440 int last_err = 0;
5441 int remaining;
5442 int attrlen;
5443 int err;
5444
5445 remaining = cfg->fc_mp_len;
5446 rtnh = (struct rtnexthop *)cfg->fc_mp;
5447
5448 /* Parse a Multipath Entry */
5449 while (rtnh_ok(rtnh, remaining)) {
5450 memcpy(&r_cfg, cfg, sizeof(*cfg));
5451 if (rtnh->rtnh_ifindex)
5452 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5453
5454 attrlen = rtnh_attrlen(rtnh);
5455 if (attrlen > 0) {
5456 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5457
5458 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5459 if (nla) {
5460 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
5461 extack);
5462 if (err) {
5463 last_err = err;
5464 goto next_rtnh;
5465 }
5466
5467 r_cfg.fc_flags |= RTF_GATEWAY;
5468 }
5469 }
5470 err = ip6_route_del(&r_cfg, extack);
5471 if (err)
5472 last_err = err;
5473
5474next_rtnh:
5475 rtnh = rtnh_next(rtnh, &remaining);
5476 }
5477
5478 return last_err;
5479}
5480
5481static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5482 struct netlink_ext_ack *extack)
5483{
5484 struct fib6_config cfg;
5485 int err;
5486
5487 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5488 if (err < 0)
5489 return err;
5490
5491 if (cfg.fc_nh_id &&
5492 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5493 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5494 return -EINVAL;
5495 }
5496
5497 if (cfg.fc_mp)
5498 return ip6_route_multipath_del(&cfg, extack);
5499 else {
5500 cfg.fc_delete_all_nh = 1;
5501 return ip6_route_del(&cfg, extack);
5502 }
5503}
5504
5505static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5506 struct netlink_ext_ack *extack)
5507{
5508 struct fib6_config cfg;
5509 int err;
5510
5511 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5512 if (err < 0)
5513 return err;
5514
5515 if (cfg.fc_metric == 0)
5516 cfg.fc_metric = IP6_RT_PRIO_USER;
5517
5518 if (cfg.fc_mp)
5519 return ip6_route_multipath_add(&cfg, extack);
5520 else
5521 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5522}
5523
5524/* add the overhead of this fib6_nh to nexthop_len */
5525static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5526{
5527 int *nexthop_len = arg;
5528
5529 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5530 + NLA_ALIGN(sizeof(struct rtnexthop))
5531 + nla_total_size(16); /* RTA_GATEWAY */
5532
5533 if (nh->fib_nh_lws) {
5534 /* RTA_ENCAP_TYPE */
5535 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5536 /* RTA_ENCAP */
5537 *nexthop_len += nla_total_size(2);
5538 }
5539
5540 return 0;
5541}
5542
5543static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5544{
5545 int nexthop_len;
5546
5547 if (f6i->nh) {
5548 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5549 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5550 &nexthop_len);
5551 } else {
5552 struct fib6_info *sibling, *next_sibling;
5553 struct fib6_nh *nh = f6i->fib6_nh;
5554
5555 nexthop_len = 0;
5556 if (f6i->fib6_nsiblings) {
5557 rt6_nh_nlmsg_size(nh, &nexthop_len);
5558
5559 list_for_each_entry_safe(sibling, next_sibling,
5560 &f6i->fib6_siblings, fib6_siblings) {
5561 rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
5562 }
5563 }
5564 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5565 }
5566
5567 return NLMSG_ALIGN(sizeof(struct rtmsg))
5568 + nla_total_size(16) /* RTA_SRC */
5569 + nla_total_size(16) /* RTA_DST */
5570 + nla_total_size(16) /* RTA_GATEWAY */
5571 + nla_total_size(16) /* RTA_PREFSRC */
5572 + nla_total_size(4) /* RTA_TABLE */
5573 + nla_total_size(4) /* RTA_IIF */
5574 + nla_total_size(4) /* RTA_OIF */
5575 + nla_total_size(4) /* RTA_PRIORITY */
5576 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5577 + nla_total_size(sizeof(struct rta_cacheinfo))
5578 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5579 + nla_total_size(1) /* RTA_PREF */
5580 + nexthop_len;
5581}
5582
5583static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5584 unsigned char *flags)
5585{
5586 if (nexthop_is_multipath(nh)) {
5587 struct nlattr *mp;
5588
5589 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5590 if (!mp)
5591 goto nla_put_failure;
5592
5593 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5594 goto nla_put_failure;
5595
5596 nla_nest_end(skb, mp);
5597 } else {
5598 struct fib6_nh *fib6_nh;
5599
5600 fib6_nh = nexthop_fib6_nh(nh);
5601 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5602 flags, false) < 0)
5603 goto nla_put_failure;
5604 }
5605
5606 return 0;
5607
5608nla_put_failure:
5609 return -EMSGSIZE;
5610}
5611
5612static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5613 struct fib6_info *rt, struct dst_entry *dst,
5614 struct in6_addr *dest, struct in6_addr *src,
5615 int iif, int type, u32 portid, u32 seq,
5616 unsigned int flags)
5617{
5618 struct rt6_info *rt6 = dst_rt6_info(dst);
5619 struct rt6key *rt6_dst, *rt6_src;
5620 u32 *pmetrics, table, rt6_flags;
5621 unsigned char nh_flags = 0;
5622 struct nlmsghdr *nlh;
5623 struct rtmsg *rtm;
5624 long expires = 0;
5625
5626 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5627 if (!nlh)
5628 return -EMSGSIZE;
5629
5630 if (rt6) {
5631 rt6_dst = &rt6->rt6i_dst;
5632 rt6_src = &rt6->rt6i_src;
5633 rt6_flags = rt6->rt6i_flags;
5634 } else {
5635 rt6_dst = &rt->fib6_dst;
5636 rt6_src = &rt->fib6_src;
5637 rt6_flags = rt->fib6_flags;
5638 }
5639
5640 rtm = nlmsg_data(nlh);
5641 rtm->rtm_family = AF_INET6;
5642 rtm->rtm_dst_len = rt6_dst->plen;
5643 rtm->rtm_src_len = rt6_src->plen;
5644 rtm->rtm_tos = 0;
5645 if (rt->fib6_table)
5646 table = rt->fib6_table->tb6_id;
5647 else
5648 table = RT6_TABLE_UNSPEC;
5649 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5650 if (nla_put_u32(skb, RTA_TABLE, table))
5651 goto nla_put_failure;
5652
5653 rtm->rtm_type = rt->fib6_type;
5654 rtm->rtm_flags = 0;
5655 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5656 rtm->rtm_protocol = rt->fib6_protocol;
5657
5658 if (rt6_flags & RTF_CACHE)
5659 rtm->rtm_flags |= RTM_F_CLONED;
5660
5661 if (dest) {
5662 if (nla_put_in6_addr(skb, RTA_DST, dest))
5663 goto nla_put_failure;
5664 rtm->rtm_dst_len = 128;
5665 } else if (rtm->rtm_dst_len)
5666 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5667 goto nla_put_failure;
5668#ifdef CONFIG_IPV6_SUBTREES
5669 if (src) {
5670 if (nla_put_in6_addr(skb, RTA_SRC, src))
5671 goto nla_put_failure;
5672 rtm->rtm_src_len = 128;
5673 } else if (rtm->rtm_src_len &&
5674 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5675 goto nla_put_failure;
5676#endif
5677 if (iif) {
5678#ifdef CONFIG_IPV6_MROUTE
5679 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5680 int err = ip6mr_get_route(net, skb, rtm, portid);
5681
5682 if (err == 0)
5683 return 0;
5684 if (err < 0)
5685 goto nla_put_failure;
5686 } else
5687#endif
5688 if (nla_put_u32(skb, RTA_IIF, iif))
5689 goto nla_put_failure;
5690 } else if (dest) {
5691 struct in6_addr saddr_buf;
5692 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5693 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5694 goto nla_put_failure;
5695 }
5696
5697 if (rt->fib6_prefsrc.plen) {
5698 struct in6_addr saddr_buf;
5699 saddr_buf = rt->fib6_prefsrc.addr;
5700 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5701 goto nla_put_failure;
5702 }
5703
5704 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5705 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5706 goto nla_put_failure;
5707
5708 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5709 goto nla_put_failure;
5710
5711 /* For multipath routes, walk the siblings list and add
5712 * each as a nexthop within RTA_MULTIPATH.
5713 */
5714 if (rt6) {
5715 if (rt6_flags & RTF_GATEWAY &&
5716 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5717 goto nla_put_failure;
5718
5719 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5720 goto nla_put_failure;
5721
5722 if (dst->lwtstate &&
5723 lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5724 goto nla_put_failure;
5725 } else if (rt->fib6_nsiblings) {
5726 struct fib6_info *sibling, *next_sibling;
5727 struct nlattr *mp;
5728
5729 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5730 if (!mp)
5731 goto nla_put_failure;
5732
5733 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5734 rt->fib6_nh->fib_nh_weight, AF_INET6,
5735 0) < 0)
5736 goto nla_put_failure;
5737
5738 list_for_each_entry_safe(sibling, next_sibling,
5739 &rt->fib6_siblings, fib6_siblings) {
5740 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5741 sibling->fib6_nh->fib_nh_weight,
5742 AF_INET6, 0) < 0)
5743 goto nla_put_failure;
5744 }
5745
5746 nla_nest_end(skb, mp);
5747 } else if (rt->nh) {
5748 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5749 goto nla_put_failure;
5750
5751 if (nexthop_is_blackhole(rt->nh))
5752 rtm->rtm_type = RTN_BLACKHOLE;
5753
5754 if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
5755 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5756 goto nla_put_failure;
5757
5758 rtm->rtm_flags |= nh_flags;
5759 } else {
5760 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5761 &nh_flags, false) < 0)
5762 goto nla_put_failure;
5763
5764 rtm->rtm_flags |= nh_flags;
5765 }
5766
5767 if (rt6_flags & RTF_EXPIRES) {
5768 expires = dst ? dst->expires : rt->expires;
5769 expires -= jiffies;
5770 }
5771
5772 if (!dst) {
5773 if (READ_ONCE(rt->offload))
5774 rtm->rtm_flags |= RTM_F_OFFLOAD;
5775 if (READ_ONCE(rt->trap))
5776 rtm->rtm_flags |= RTM_F_TRAP;
5777 if (READ_ONCE(rt->offload_failed))
5778 rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5779 }
5780
5781 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5782 goto nla_put_failure;
5783
5784 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5785 goto nla_put_failure;
5786
5787
5788 nlmsg_end(skb, nlh);
5789 return 0;
5790
5791nla_put_failure:
5792 nlmsg_cancel(skb, nlh);
5793 return -EMSGSIZE;
5794}
5795
5796static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5797{
5798 const struct net_device *dev = arg;
5799
5800 if (nh->fib_nh_dev == dev)
5801 return 1;
5802
5803 return 0;
5804}
5805
5806static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5807 const struct net_device *dev)
5808{
5809 if (f6i->nh) {
5810 struct net_device *_dev = (struct net_device *)dev;
5811
5812 return !!nexthop_for_each_fib6_nh(f6i->nh,
5813 fib6_info_nh_uses_dev,
5814 _dev);
5815 }
5816
5817 if (f6i->fib6_nh->fib_nh_dev == dev)
5818 return true;
5819
5820 if (f6i->fib6_nsiblings) {
5821 struct fib6_info *sibling, *next_sibling;
5822
5823 list_for_each_entry_safe(sibling, next_sibling,
5824 &f6i->fib6_siblings, fib6_siblings) {
5825 if (sibling->fib6_nh->fib_nh_dev == dev)
5826 return true;
5827 }
5828 }
5829
5830 return false;
5831}
5832
5833struct fib6_nh_exception_dump_walker {
5834 struct rt6_rtnl_dump_arg *dump;
5835 struct fib6_info *rt;
5836 unsigned int flags;
5837 unsigned int skip;
5838 unsigned int count;
5839};
5840
5841static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5842{
5843 struct fib6_nh_exception_dump_walker *w = arg;
5844 struct rt6_rtnl_dump_arg *dump = w->dump;
5845 struct rt6_exception_bucket *bucket;
5846 struct rt6_exception *rt6_ex;
5847 int i, err;
5848
5849 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5850 if (!bucket)
5851 return 0;
5852
5853 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5854 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5855 if (w->skip) {
5856 w->skip--;
5857 continue;
5858 }
5859
5860 /* Expiration of entries doesn't bump sernum, insertion
5861 * does. Removal is triggered by insertion, so we can
5862 * rely on the fact that if entries change between two
5863 * partial dumps, this node is scanned again completely,
5864 * see rt6_insert_exception() and fib6_dump_table().
5865 *
5866 * Count expired entries we go through as handled
5867 * entries that we'll skip next time, in case of partial
5868 * node dump. Otherwise, if entries expire meanwhile,
5869 * we'll skip the wrong amount.
5870 */
5871 if (rt6_check_expired(rt6_ex->rt6i)) {
5872 w->count++;
5873 continue;
5874 }
5875
5876 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5877 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5878 RTM_NEWROUTE,
5879 NETLINK_CB(dump->cb->skb).portid,
5880 dump->cb->nlh->nlmsg_seq, w->flags);
5881 if (err)
5882 return err;
5883
5884 w->count++;
5885 }
5886 bucket++;
5887 }
5888
5889 return 0;
5890}
5891
5892/* Return -1 if done with node, number of handled routes on partial dump */
5893int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5894{
5895 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5896 struct fib_dump_filter *filter = &arg->filter;
5897 unsigned int flags = NLM_F_MULTI;
5898 struct net *net = arg->net;
5899 int count = 0;
5900
5901 if (rt == net->ipv6.fib6_null_entry)
5902 return -1;
5903
5904 if ((filter->flags & RTM_F_PREFIX) &&
5905 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5906 /* success since this is not a prefix route */
5907 return -1;
5908 }
5909 if (filter->filter_set &&
5910 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5911 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5912 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5913 return -1;
5914 }
5915
5916 if (filter->filter_set ||
5917 !filter->dump_routes || !filter->dump_exceptions) {
5918 flags |= NLM_F_DUMP_FILTERED;
5919 }
5920
5921 if (filter->dump_routes) {
5922 if (skip) {
5923 skip--;
5924 } else {
5925 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5926 0, RTM_NEWROUTE,
5927 NETLINK_CB(arg->cb->skb).portid,
5928 arg->cb->nlh->nlmsg_seq, flags)) {
5929 return 0;
5930 }
5931 count++;
5932 }
5933 }
5934
5935 if (filter->dump_exceptions) {
5936 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5937 .rt = rt,
5938 .flags = flags,
5939 .skip = skip,
5940 .count = 0 };
5941 int err;
5942
5943 rcu_read_lock();
5944 if (rt->nh) {
5945 err = nexthop_for_each_fib6_nh(rt->nh,
5946 rt6_nh_dump_exceptions,
5947 &w);
5948 } else {
5949 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5950 }
5951 rcu_read_unlock();
5952
5953 if (err)
5954 return count + w.count;
5955 }
5956
5957 return -1;
5958}
5959
5960static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5961 const struct nlmsghdr *nlh,
5962 struct nlattr **tb,
5963 struct netlink_ext_ack *extack)
5964{
5965 struct rtmsg *rtm;
5966 int i, err;
5967
5968 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5969 NL_SET_ERR_MSG_MOD(extack,
5970 "Invalid header for get route request");
5971 return -EINVAL;
5972 }
5973
5974 if (!netlink_strict_get_check(skb))
5975 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5976 rtm_ipv6_policy, extack);
5977
5978 rtm = nlmsg_data(nlh);
5979 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5980 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5981 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5982 rtm->rtm_type) {
5983 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5984 return -EINVAL;
5985 }
5986 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5987 NL_SET_ERR_MSG_MOD(extack,
5988 "Invalid flags for get route request");
5989 return -EINVAL;
5990 }
5991
5992 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5993 rtm_ipv6_policy, extack);
5994 if (err)
5995 return err;
5996
5997 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5998 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5999 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
6000 return -EINVAL;
6001 }
6002
6003 for (i = 0; i <= RTA_MAX; i++) {
6004 if (!tb[i])
6005 continue;
6006
6007 switch (i) {
6008 case RTA_SRC:
6009 case RTA_DST:
6010 case RTA_IIF:
6011 case RTA_OIF:
6012 case RTA_MARK:
6013 case RTA_UID:
6014 case RTA_SPORT:
6015 case RTA_DPORT:
6016 case RTA_IP_PROTO:
6017 break;
6018 default:
6019 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
6020 return -EINVAL;
6021 }
6022 }
6023
6024 return 0;
6025}
6026
6027static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6028 struct netlink_ext_ack *extack)
6029{
6030 struct net *net = sock_net(in_skb->sk);
6031 struct nlattr *tb[RTA_MAX+1];
6032 int err, iif = 0, oif = 0;
6033 struct fib6_info *from;
6034 struct dst_entry *dst;
6035 struct rt6_info *rt;
6036 struct sk_buff *skb;
6037 struct rtmsg *rtm;
6038 struct flowi6 fl6 = {};
6039 bool fibmatch;
6040
6041 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
6042 if (err < 0)
6043 goto errout;
6044
6045 err = -EINVAL;
6046 rtm = nlmsg_data(nlh);
6047 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
6048 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
6049
6050 if (tb[RTA_SRC]) {
6051 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
6052 goto errout;
6053
6054 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
6055 }
6056
6057 if (tb[RTA_DST]) {
6058 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
6059 goto errout;
6060
6061 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
6062 }
6063
6064 if (tb[RTA_IIF])
6065 iif = nla_get_u32(tb[RTA_IIF]);
6066
6067 if (tb[RTA_OIF])
6068 oif = nla_get_u32(tb[RTA_OIF]);
6069
6070 if (tb[RTA_MARK])
6071 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
6072
6073 if (tb[RTA_UID])
6074 fl6.flowi6_uid = make_kuid(current_user_ns(),
6075 nla_get_u32(tb[RTA_UID]));
6076 else
6077 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
6078
6079 if (tb[RTA_SPORT])
6080 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
6081
6082 if (tb[RTA_DPORT])
6083 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
6084
6085 if (tb[RTA_IP_PROTO]) {
6086 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
6087 &fl6.flowi6_proto, AF_INET6,
6088 extack);
6089 if (err)
6090 goto errout;
6091 }
6092
6093 if (iif) {
6094 struct net_device *dev;
6095 int flags = 0;
6096
6097 rcu_read_lock();
6098
6099 dev = dev_get_by_index_rcu(net, iif);
6100 if (!dev) {
6101 rcu_read_unlock();
6102 err = -ENODEV;
6103 goto errout;
6104 }
6105
6106 fl6.flowi6_iif = iif;
6107
6108 if (!ipv6_addr_any(&fl6.saddr))
6109 flags |= RT6_LOOKUP_F_HAS_SADDR;
6110
6111 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
6112
6113 rcu_read_unlock();
6114 } else {
6115 fl6.flowi6_oif = oif;
6116
6117 dst = ip6_route_output(net, NULL, &fl6);
6118 }
6119
6120
6121 rt = dst_rt6_info(dst);
6122 if (rt->dst.error) {
6123 err = rt->dst.error;
6124 ip6_rt_put(rt);
6125 goto errout;
6126 }
6127
6128 if (rt == net->ipv6.ip6_null_entry) {
6129 err = rt->dst.error;
6130 ip6_rt_put(rt);
6131 goto errout;
6132 }
6133
6134 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6135 if (!skb) {
6136 ip6_rt_put(rt);
6137 err = -ENOBUFS;
6138 goto errout;
6139 }
6140
6141 skb_dst_set(skb, &rt->dst);
6142
6143 rcu_read_lock();
6144 from = rcu_dereference(rt->from);
6145 if (from) {
6146 if (fibmatch)
6147 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6148 iif, RTM_NEWROUTE,
6149 NETLINK_CB(in_skb).portid,
6150 nlh->nlmsg_seq, 0);
6151 else
6152 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6153 &fl6.saddr, iif, RTM_NEWROUTE,
6154 NETLINK_CB(in_skb).portid,
6155 nlh->nlmsg_seq, 0);
6156 } else {
6157 err = -ENETUNREACH;
6158 }
6159 rcu_read_unlock();
6160
6161 if (err < 0) {
6162 kfree_skb(skb);
6163 goto errout;
6164 }
6165
6166 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6167errout:
6168 return err;
6169}
6170
6171void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6172 unsigned int nlm_flags)
6173{
6174 struct sk_buff *skb;
6175 struct net *net = info->nl_net;
6176 u32 seq;
6177 int err;
6178
6179 err = -ENOBUFS;
6180 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6181
6182 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6183 if (!skb)
6184 goto errout;
6185
6186 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6187 event, info->portid, seq, nlm_flags);
6188 if (err < 0) {
6189 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6190 WARN_ON(err == -EMSGSIZE);
6191 kfree_skb(skb);
6192 goto errout;
6193 }
6194 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6195 info->nlh, gfp_any());
6196 return;
6197errout:
6198 if (err < 0)
6199 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6200}
6201
6202void fib6_rt_update(struct net *net, struct fib6_info *rt,
6203 struct nl_info *info)
6204{
6205 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6206 struct sk_buff *skb;
6207 int err = -ENOBUFS;
6208
6209 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6210 if (!skb)
6211 goto errout;
6212
6213 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6214 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6215 if (err < 0) {
6216 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6217 WARN_ON(err == -EMSGSIZE);
6218 kfree_skb(skb);
6219 goto errout;
6220 }
6221 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6222 info->nlh, gfp_any());
6223 return;
6224errout:
6225 if (err < 0)
6226 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6227}
6228
6229void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6230 bool offload, bool trap, bool offload_failed)
6231{
6232 struct sk_buff *skb;
6233 int err;
6234
6235 if (READ_ONCE(f6i->offload) == offload &&
6236 READ_ONCE(f6i->trap) == trap &&
6237 READ_ONCE(f6i->offload_failed) == offload_failed)
6238 return;
6239
6240 WRITE_ONCE(f6i->offload, offload);
6241 WRITE_ONCE(f6i->trap, trap);
6242
6243 /* 2 means send notifications only if offload_failed was changed. */
6244 if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
6245 READ_ONCE(f6i->offload_failed) == offload_failed)
6246 return;
6247
6248 WRITE_ONCE(f6i->offload_failed, offload_failed);
6249
6250 if (!rcu_access_pointer(f6i->fib6_node))
6251 /* The route was removed from the tree, do not send
6252 * notification.
6253 */
6254 return;
6255
6256 if (!net->ipv6.sysctl.fib_notify_on_flag_change)
6257 return;
6258
6259 skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6260 if (!skb) {
6261 err = -ENOBUFS;
6262 goto errout;
6263 }
6264
6265 err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6266 0, 0);
6267 if (err < 0) {
6268 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6269 WARN_ON(err == -EMSGSIZE);
6270 kfree_skb(skb);
6271 goto errout;
6272 }
6273
6274 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6275 return;
6276
6277errout:
6278 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6279}
6280EXPORT_SYMBOL(fib6_info_hw_flags_set);
6281
6282static int ip6_route_dev_notify(struct notifier_block *this,
6283 unsigned long event, void *ptr)
6284{
6285 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6286 struct net *net = dev_net(dev);
6287
6288 if (!(dev->flags & IFF_LOOPBACK))
6289 return NOTIFY_OK;
6290
6291 if (event == NETDEV_REGISTER) {
6292 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6293 net->ipv6.ip6_null_entry->dst.dev = dev;
6294 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6295#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6296 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6297 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6298 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6299 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6300#endif
6301 } else if (event == NETDEV_UNREGISTER &&
6302 dev->reg_state != NETREG_UNREGISTERED) {
6303 /* NETDEV_UNREGISTER could be fired for multiple times by
6304 * netdev_wait_allrefs(). Make sure we only call this once.
6305 */
6306 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6307#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6308 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6309 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6310#endif
6311 }
6312
6313 return NOTIFY_OK;
6314}
6315
6316/*
6317 * /proc
6318 */
6319
6320#ifdef CONFIG_PROC_FS
6321static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6322{
6323 struct net *net = (struct net *)seq->private;
6324 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6325 net->ipv6.rt6_stats->fib_nodes,
6326 net->ipv6.rt6_stats->fib_route_nodes,
6327 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6328 net->ipv6.rt6_stats->fib_rt_entries,
6329 net->ipv6.rt6_stats->fib_rt_cache,
6330 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6331 net->ipv6.rt6_stats->fib_discarded_routes);
6332
6333 return 0;
6334}
6335#endif /* CONFIG_PROC_FS */
6336
6337#ifdef CONFIG_SYSCTL
6338
6339static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6340 void *buffer, size_t *lenp, loff_t *ppos)
6341{
6342 struct net *net;
6343 int delay;
6344 int ret;
6345 if (!write)
6346 return -EINVAL;
6347
6348 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6349 if (ret)
6350 return ret;
6351
6352 net = (struct net *)ctl->extra1;
6353 delay = net->ipv6.sysctl.flush_delay;
6354 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6355 return 0;
6356}
6357
6358static struct ctl_table ipv6_route_table_template[] = {
6359 {
6360 .procname = "max_size",
6361 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6362 .maxlen = sizeof(int),
6363 .mode = 0644,
6364 .proc_handler = proc_dointvec,
6365 },
6366 {
6367 .procname = "gc_thresh",
6368 .data = &ip6_dst_ops_template.gc_thresh,
6369 .maxlen = sizeof(int),
6370 .mode = 0644,
6371 .proc_handler = proc_dointvec,
6372 },
6373 {
6374 .procname = "flush",
6375 .data = &init_net.ipv6.sysctl.flush_delay,
6376 .maxlen = sizeof(int),
6377 .mode = 0200,
6378 .proc_handler = ipv6_sysctl_rtcache_flush
6379 },
6380 {
6381 .procname = "gc_min_interval",
6382 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6383 .maxlen = sizeof(int),
6384 .mode = 0644,
6385 .proc_handler = proc_dointvec_jiffies,
6386 },
6387 {
6388 .procname = "gc_timeout",
6389 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6390 .maxlen = sizeof(int),
6391 .mode = 0644,
6392 .proc_handler = proc_dointvec_jiffies,
6393 },
6394 {
6395 .procname = "gc_interval",
6396 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6397 .maxlen = sizeof(int),
6398 .mode = 0644,
6399 .proc_handler = proc_dointvec_jiffies,
6400 },
6401 {
6402 .procname = "gc_elasticity",
6403 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6404 .maxlen = sizeof(int),
6405 .mode = 0644,
6406 .proc_handler = proc_dointvec,
6407 },
6408 {
6409 .procname = "mtu_expires",
6410 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6411 .maxlen = sizeof(int),
6412 .mode = 0644,
6413 .proc_handler = proc_dointvec_jiffies,
6414 },
6415 {
6416 .procname = "min_adv_mss",
6417 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6418 .maxlen = sizeof(int),
6419 .mode = 0644,
6420 .proc_handler = proc_dointvec,
6421 },
6422 {
6423 .procname = "gc_min_interval_ms",
6424 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6425 .maxlen = sizeof(int),
6426 .mode = 0644,
6427 .proc_handler = proc_dointvec_ms_jiffies,
6428 },
6429 {
6430 .procname = "skip_notify_on_dev_down",
6431 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6432 .maxlen = sizeof(u8),
6433 .mode = 0644,
6434 .proc_handler = proc_dou8vec_minmax,
6435 .extra1 = SYSCTL_ZERO,
6436 .extra2 = SYSCTL_ONE,
6437 },
6438};
6439
6440struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6441{
6442 struct ctl_table *table;
6443
6444 table = kmemdup(ipv6_route_table_template,
6445 sizeof(ipv6_route_table_template),
6446 GFP_KERNEL);
6447
6448 if (table) {
6449 table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
6450 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6451 table[2].data = &net->ipv6.sysctl.flush_delay;
6452 table[2].extra1 = net;
6453 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6454 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6455 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6456 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6457 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6458 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6459 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6460 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6461 }
6462
6463 return table;
6464}
6465
6466size_t ipv6_route_sysctl_table_size(struct net *net)
6467{
6468 /* Don't export sysctls to unprivileged users */
6469 if (net->user_ns != &init_user_ns)
6470 return 1;
6471
6472 return ARRAY_SIZE(ipv6_route_table_template);
6473}
6474#endif
6475
6476static int __net_init ip6_route_net_init(struct net *net)
6477{
6478 int ret = -ENOMEM;
6479
6480 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6481 sizeof(net->ipv6.ip6_dst_ops));
6482
6483 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6484 goto out_ip6_dst_ops;
6485
6486 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6487 if (!net->ipv6.fib6_null_entry)
6488 goto out_ip6_dst_entries;
6489 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6490 sizeof(*net->ipv6.fib6_null_entry));
6491
6492 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6493 sizeof(*net->ipv6.ip6_null_entry),
6494 GFP_KERNEL);
6495 if (!net->ipv6.ip6_null_entry)
6496 goto out_fib6_null_entry;
6497 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6498 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6499 ip6_template_metrics, true);
6500 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached);
6501
6502#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6503 net->ipv6.fib6_has_custom_rules = false;
6504 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6505 sizeof(*net->ipv6.ip6_prohibit_entry),
6506 GFP_KERNEL);
6507 if (!net->ipv6.ip6_prohibit_entry)
6508 goto out_ip6_null_entry;
6509 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6510 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6511 ip6_template_metrics, true);
6512 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached);
6513
6514 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6515 sizeof(*net->ipv6.ip6_blk_hole_entry),
6516 GFP_KERNEL);
6517 if (!net->ipv6.ip6_blk_hole_entry)
6518 goto out_ip6_prohibit_entry;
6519 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6520 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6521 ip6_template_metrics, true);
6522 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached);
6523#ifdef CONFIG_IPV6_SUBTREES
6524 net->ipv6.fib6_routes_require_src = 0;
6525#endif
6526#endif
6527
6528 net->ipv6.sysctl.flush_delay = 0;
6529 net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
6530 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6531 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6532 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6533 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6534 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6535 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6536 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6537
6538 atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
6539
6540 ret = 0;
6541out:
6542 return ret;
6543
6544#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6545out_ip6_prohibit_entry:
6546 kfree(net->ipv6.ip6_prohibit_entry);
6547out_ip6_null_entry:
6548 kfree(net->ipv6.ip6_null_entry);
6549#endif
6550out_fib6_null_entry:
6551 kfree(net->ipv6.fib6_null_entry);
6552out_ip6_dst_entries:
6553 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6554out_ip6_dst_ops:
6555 goto out;
6556}
6557
6558static void __net_exit ip6_route_net_exit(struct net *net)
6559{
6560 kfree(net->ipv6.fib6_null_entry);
6561 kfree(net->ipv6.ip6_null_entry);
6562#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6563 kfree(net->ipv6.ip6_prohibit_entry);
6564 kfree(net->ipv6.ip6_blk_hole_entry);
6565#endif
6566 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6567}
6568
6569static int __net_init ip6_route_net_init_late(struct net *net)
6570{
6571#ifdef CONFIG_PROC_FS
6572 if (!proc_create_net("ipv6_route", 0, net->proc_net,
6573 &ipv6_route_seq_ops,
6574 sizeof(struct ipv6_route_iter)))
6575 return -ENOMEM;
6576
6577 if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
6578 rt6_stats_seq_show, NULL)) {
6579 remove_proc_entry("ipv6_route", net->proc_net);
6580 return -ENOMEM;
6581 }
6582#endif
6583 return 0;
6584}
6585
6586static void __net_exit ip6_route_net_exit_late(struct net *net)
6587{
6588#ifdef CONFIG_PROC_FS
6589 remove_proc_entry("ipv6_route", net->proc_net);
6590 remove_proc_entry("rt6_stats", net->proc_net);
6591#endif
6592}
6593
6594static struct pernet_operations ip6_route_net_ops = {
6595 .init = ip6_route_net_init,
6596 .exit = ip6_route_net_exit,
6597};
6598
6599static int __net_init ipv6_inetpeer_init(struct net *net)
6600{
6601 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6602
6603 if (!bp)
6604 return -ENOMEM;
6605 inet_peer_base_init(bp);
6606 net->ipv6.peers = bp;
6607 return 0;
6608}
6609
6610static void __net_exit ipv6_inetpeer_exit(struct net *net)
6611{
6612 struct inet_peer_base *bp = net->ipv6.peers;
6613
6614 net->ipv6.peers = NULL;
6615 inetpeer_invalidate_tree(bp);
6616 kfree(bp);
6617}
6618
6619static struct pernet_operations ipv6_inetpeer_ops = {
6620 .init = ipv6_inetpeer_init,
6621 .exit = ipv6_inetpeer_exit,
6622};
6623
6624static struct pernet_operations ip6_route_net_late_ops = {
6625 .init = ip6_route_net_init_late,
6626 .exit = ip6_route_net_exit_late,
6627};
6628
6629static struct notifier_block ip6_route_dev_notifier = {
6630 .notifier_call = ip6_route_dev_notify,
6631 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6632};
6633
6634void __init ip6_route_init_special_entries(void)
6635{
6636 /* Registering of the loopback is done before this portion of code,
6637 * the loopback reference in rt6_info will not be taken, do it
6638 * manually for init_net */
6639 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6640 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6641 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6642 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6643 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6644 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6645 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6646 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6647 #endif
6648}
6649
6650#if IS_BUILTIN(CONFIG_IPV6)
6651#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6652DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6653
6654BTF_ID_LIST(btf_fib6_info_id)
6655BTF_ID(struct, fib6_info)
6656
6657static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6658 .seq_ops = &ipv6_route_seq_ops,
6659 .init_seq_private = bpf_iter_init_seq_net,
6660 .fini_seq_private = bpf_iter_fini_seq_net,
6661 .seq_priv_size = sizeof(struct ipv6_route_iter),
6662};
6663
6664static struct bpf_iter_reg ipv6_route_reg_info = {
6665 .target = "ipv6_route",
6666 .ctx_arg_info_size = 1,
6667 .ctx_arg_info = {
6668 { offsetof(struct bpf_iter__ipv6_route, rt),
6669 PTR_TO_BTF_ID_OR_NULL },
6670 },
6671 .seq_info = &ipv6_route_seq_info,
6672};
6673
6674static int __init bpf_iter_register(void)
6675{
6676 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6677 return bpf_iter_reg_target(&ipv6_route_reg_info);
6678}
6679
6680static void bpf_iter_unregister(void)
6681{
6682 bpf_iter_unreg_target(&ipv6_route_reg_info);
6683}
6684#endif
6685#endif
6686
6687int __init ip6_route_init(void)
6688{
6689 int ret;
6690 int cpu;
6691
6692 ret = -ENOMEM;
6693 ip6_dst_ops_template.kmem_cachep =
6694 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6695 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
6696 if (!ip6_dst_ops_template.kmem_cachep)
6697 goto out;
6698
6699 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6700 if (ret)
6701 goto out_kmem_cache;
6702
6703 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6704 if (ret)
6705 goto out_dst_entries;
6706
6707 ret = register_pernet_subsys(&ip6_route_net_ops);
6708 if (ret)
6709 goto out_register_inetpeer;
6710
6711 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6712
6713 ret = fib6_init();
6714 if (ret)
6715 goto out_register_subsys;
6716
6717 ret = xfrm6_init();
6718 if (ret)
6719 goto out_fib6_init;
6720
6721 ret = fib6_rules_init();
6722 if (ret)
6723 goto xfrm6_init;
6724
6725 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6726 if (ret)
6727 goto fib6_rules_init;
6728
6729 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6730 inet6_rtm_newroute, NULL, 0);
6731 if (ret < 0)
6732 goto out_register_late_subsys;
6733
6734 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6735 inet6_rtm_delroute, NULL, 0);
6736 if (ret < 0)
6737 goto out_register_late_subsys;
6738
6739 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6740 inet6_rtm_getroute, NULL,
6741 RTNL_FLAG_DOIT_UNLOCKED);
6742 if (ret < 0)
6743 goto out_register_late_subsys;
6744
6745 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6746 if (ret)
6747 goto out_register_late_subsys;
6748
6749#if IS_BUILTIN(CONFIG_IPV6)
6750#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6751 ret = bpf_iter_register();
6752 if (ret)
6753 goto out_register_late_subsys;
6754#endif
6755#endif
6756
6757 for_each_possible_cpu(cpu) {
6758 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6759
6760 INIT_LIST_HEAD(&ul->head);
6761 INIT_LIST_HEAD(&ul->quarantine);
6762 spin_lock_init(&ul->lock);
6763 }
6764
6765out:
6766 return ret;
6767
6768out_register_late_subsys:
6769 rtnl_unregister_all(PF_INET6);
6770 unregister_pernet_subsys(&ip6_route_net_late_ops);
6771fib6_rules_init:
6772 fib6_rules_cleanup();
6773xfrm6_init:
6774 xfrm6_fini();
6775out_fib6_init:
6776 fib6_gc_cleanup();
6777out_register_subsys:
6778 unregister_pernet_subsys(&ip6_route_net_ops);
6779out_register_inetpeer:
6780 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6781out_dst_entries:
6782 dst_entries_destroy(&ip6_dst_blackhole_ops);
6783out_kmem_cache:
6784 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6785 goto out;
6786}
6787
6788void ip6_route_cleanup(void)
6789{
6790#if IS_BUILTIN(CONFIG_IPV6)
6791#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6792 bpf_iter_unregister();
6793#endif
6794#endif
6795 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6796 unregister_pernet_subsys(&ip6_route_net_late_ops);
6797 fib6_rules_cleanup();
6798 xfrm6_fini();
6799 fib6_gc_cleanup();
6800 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6801 unregister_pernet_subsys(&ip6_route_net_ops);
6802 dst_entries_destroy(&ip6_dst_blackhole_ops);
6803 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6804}