Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */
9
10/* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
19 * Ville Nuorvala
20 * Fixed routing subtrees.
21 */
22
23#define pr_fmt(fmt) "IPv6: " fmt
24
25#include <linux/capability.h>
26#include <linux/errno.h>
27#include <linux/export.h>
28#include <linux/types.h>
29#include <linux/times.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/route.h>
34#include <linux/netdevice.h>
35#include <linux/in6.h>
36#include <linux/mroute6.h>
37#include <linux/init.h>
38#include <linux/if_arp.h>
39#include <linux/proc_fs.h>
40#include <linux/seq_file.h>
41#include <linux/nsproxy.h>
42#include <linux/slab.h>
43#include <linux/jhash.h>
44#include <net/net_namespace.h>
45#include <net/snmp.h>
46#include <net/ipv6.h>
47#include <net/ip6_fib.h>
48#include <net/ip6_route.h>
49#include <net/ndisc.h>
50#include <net/addrconf.h>
51#include <net/tcp.h>
52#include <linux/rtnetlink.h>
53#include <net/dst.h>
54#include <net/dst_metadata.h>
55#include <net/xfrm.h>
56#include <net/netevent.h>
57#include <net/netlink.h>
58#include <net/rtnh.h>
59#include <net/lwtunnel.h>
60#include <net/ip_tunnels.h>
61#include <net/l3mdev.h>
62#include <net/ip.h>
63#include <linux/uaccess.h>
64
65#ifdef CONFIG_SYSCTL
66#include <linux/sysctl.h>
67#endif
68
69static int ip6_rt_type_to_error(u8 fib6_type);
70
71#define CREATE_TRACE_POINTS
72#include <trace/events/fib6.h>
73EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
74#undef CREATE_TRACE_POINTS
75
76enum rt6_nud_state {
77 RT6_NUD_FAIL_HARD = -3,
78 RT6_NUD_FAIL_PROBE = -2,
79 RT6_NUD_FAIL_DO_RR = -1,
80 RT6_NUD_SUCCEED = 1
81};
82
83static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
84static unsigned int ip6_default_advmss(const struct dst_entry *dst);
85static unsigned int ip6_mtu(const struct dst_entry *dst);
86static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87static void ip6_dst_destroy(struct dst_entry *);
88static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
90static int ip6_dst_gc(struct dst_ops *ops);
91
92static int ip6_pkt_discard(struct sk_buff *skb);
93static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94static int ip6_pkt_prohibit(struct sk_buff *skb);
95static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96static void ip6_link_failure(struct sk_buff *skb);
97static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb, u32 mtu);
99static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb);
101static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
102 int strict);
103static size_t rt6_nlmsg_size(struct fib6_info *f6i);
104static int rt6_fill_node(struct net *net, struct sk_buff *skb,
105 struct fib6_info *rt, struct dst_entry *dst,
106 struct in6_addr *dest, struct in6_addr *src,
107 int iif, int type, u32 portid, u32 seq,
108 unsigned int flags);
109static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
110 const struct in6_addr *daddr,
111 const struct in6_addr *saddr);
112
113#ifdef CONFIG_IPV6_ROUTE_INFO
114static struct fib6_info *rt6_add_route_info(struct net *net,
115 const struct in6_addr *prefix, int prefixlen,
116 const struct in6_addr *gwaddr,
117 struct net_device *dev,
118 unsigned int pref);
119static struct fib6_info *rt6_get_route_info(struct net *net,
120 const struct in6_addr *prefix, int prefixlen,
121 const struct in6_addr *gwaddr,
122 struct net_device *dev);
123#endif
124
125struct uncached_list {
126 spinlock_t lock;
127 struct list_head head;
128};
129
130static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
131
132void rt6_uncached_list_add(struct rt6_info *rt)
133{
134 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
135
136 rt->rt6i_uncached_list = ul;
137
138 spin_lock_bh(&ul->lock);
139 list_add_tail(&rt->rt6i_uncached, &ul->head);
140 spin_unlock_bh(&ul->lock);
141}
142
143void rt6_uncached_list_del(struct rt6_info *rt)
144{
145 if (!list_empty(&rt->rt6i_uncached)) {
146 struct uncached_list *ul = rt->rt6i_uncached_list;
147 struct net *net = dev_net(rt->dst.dev);
148
149 spin_lock_bh(&ul->lock);
150 list_del(&rt->rt6i_uncached);
151 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
152 spin_unlock_bh(&ul->lock);
153 }
154}
155
156static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
157{
158 struct net_device *loopback_dev = net->loopback_dev;
159 int cpu;
160
161 if (dev == loopback_dev)
162 return;
163
164 for_each_possible_cpu(cpu) {
165 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 struct rt6_info *rt;
167
168 spin_lock_bh(&ul->lock);
169 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
170 struct inet6_dev *rt_idev = rt->rt6i_idev;
171 struct net_device *rt_dev = rt->dst.dev;
172
173 if (rt_idev->dev == dev) {
174 rt->rt6i_idev = in6_dev_get(loopback_dev);
175 in6_dev_put(rt_idev);
176 }
177
178 if (rt_dev == dev) {
179 rt->dst.dev = blackhole_netdev;
180 dev_hold(rt->dst.dev);
181 dev_put(rt_dev);
182 }
183 }
184 spin_unlock_bh(&ul->lock);
185 }
186}
187
188static inline const void *choose_neigh_daddr(const struct in6_addr *p,
189 struct sk_buff *skb,
190 const void *daddr)
191{
192 if (!ipv6_addr_any(p))
193 return (const void *) p;
194 else if (skb)
195 return &ipv6_hdr(skb)->daddr;
196 return daddr;
197}
198
199struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
200 struct net_device *dev,
201 struct sk_buff *skb,
202 const void *daddr)
203{
204 struct neighbour *n;
205
206 daddr = choose_neigh_daddr(gw, skb, daddr);
207 n = __ipv6_neigh_lookup(dev, daddr);
208 if (n)
209 return n;
210
211 n = neigh_create(&nd_tbl, daddr, dev);
212 return IS_ERR(n) ? NULL : n;
213}
214
215static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
216 struct sk_buff *skb,
217 const void *daddr)
218{
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
220
221 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
222 dst->dev, skb, daddr);
223}
224
225static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
226{
227 struct net_device *dev = dst->dev;
228 struct rt6_info *rt = (struct rt6_info *)dst;
229
230 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
231 if (!daddr)
232 return;
233 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
234 return;
235 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
236 return;
237 __ipv6_confirm_neigh(dev, daddr);
238}
239
240static struct dst_ops ip6_dst_ops_template = {
241 .family = AF_INET6,
242 .gc = ip6_dst_gc,
243 .gc_thresh = 1024,
244 .check = ip6_dst_check,
245 .default_advmss = ip6_default_advmss,
246 .mtu = ip6_mtu,
247 .cow_metrics = dst_cow_metrics_generic,
248 .destroy = ip6_dst_destroy,
249 .ifdown = ip6_dst_ifdown,
250 .negative_advice = ip6_negative_advice,
251 .link_failure = ip6_link_failure,
252 .update_pmtu = ip6_rt_update_pmtu,
253 .redirect = rt6_do_redirect,
254 .local_out = __ip6_local_out,
255 .neigh_lookup = ip6_dst_neigh_lookup,
256 .confirm_neigh = ip6_confirm_neigh,
257};
258
259static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
260{
261 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
262
263 return mtu ? : dst->dev->mtu;
264}
265
266static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
267 struct sk_buff *skb, u32 mtu)
268{
269}
270
271static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
272 struct sk_buff *skb)
273{
274}
275
276static struct dst_ops ip6_dst_blackhole_ops = {
277 .family = AF_INET6,
278 .destroy = ip6_dst_destroy,
279 .check = ip6_dst_check,
280 .mtu = ip6_blackhole_mtu,
281 .default_advmss = ip6_default_advmss,
282 .update_pmtu = ip6_rt_blackhole_update_pmtu,
283 .redirect = ip6_rt_blackhole_redirect,
284 .cow_metrics = dst_cow_metrics_generic,
285 .neigh_lookup = ip6_dst_neigh_lookup,
286};
287
288static const u32 ip6_template_metrics[RTAX_MAX] = {
289 [RTAX_HOPLIMIT - 1] = 0,
290};
291
292static const struct fib6_info fib6_null_entry_template = {
293 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
294 .fib6_protocol = RTPROT_KERNEL,
295 .fib6_metric = ~(u32)0,
296 .fib6_ref = REFCOUNT_INIT(1),
297 .fib6_type = RTN_UNREACHABLE,
298 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
299};
300
301static const struct rt6_info ip6_null_entry_template = {
302 .dst = {
303 .__refcnt = ATOMIC_INIT(1),
304 .__use = 1,
305 .obsolete = DST_OBSOLETE_FORCE_CHK,
306 .error = -ENETUNREACH,
307 .input = ip6_pkt_discard,
308 .output = ip6_pkt_discard_out,
309 },
310 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
311};
312
313#ifdef CONFIG_IPV6_MULTIPLE_TABLES
314
315static const struct rt6_info ip6_prohibit_entry_template = {
316 .dst = {
317 .__refcnt = ATOMIC_INIT(1),
318 .__use = 1,
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
320 .error = -EACCES,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
323 },
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
325};
326
327static const struct rt6_info ip6_blk_hole_entry_template = {
328 .dst = {
329 .__refcnt = ATOMIC_INIT(1),
330 .__use = 1,
331 .obsolete = DST_OBSOLETE_FORCE_CHK,
332 .error = -EINVAL,
333 .input = dst_discard,
334 .output = dst_discard_out,
335 },
336 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
337};
338
339#endif
340
341static void rt6_info_init(struct rt6_info *rt)
342{
343 struct dst_entry *dst = &rt->dst;
344
345 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
346 INIT_LIST_HEAD(&rt->rt6i_uncached);
347}
348
349/* allocate dst with ip6_dst_ops */
350struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
351 int flags)
352{
353 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
354 1, DST_OBSOLETE_FORCE_CHK, flags);
355
356 if (rt) {
357 rt6_info_init(rt);
358 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
359 }
360
361 return rt;
362}
363EXPORT_SYMBOL(ip6_dst_alloc);
364
365static void ip6_dst_destroy(struct dst_entry *dst)
366{
367 struct rt6_info *rt = (struct rt6_info *)dst;
368 struct fib6_info *from;
369 struct inet6_dev *idev;
370
371 ip_dst_metrics_put(dst);
372 rt6_uncached_list_del(rt);
373
374 idev = rt->rt6i_idev;
375 if (idev) {
376 rt->rt6i_idev = NULL;
377 in6_dev_put(idev);
378 }
379
380 from = xchg((__force struct fib6_info **)&rt->from, NULL);
381 fib6_info_release(from);
382}
383
384static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
385 int how)
386{
387 struct rt6_info *rt = (struct rt6_info *)dst;
388 struct inet6_dev *idev = rt->rt6i_idev;
389 struct net_device *loopback_dev =
390 dev_net(dev)->loopback_dev;
391
392 if (idev && idev->dev != loopback_dev) {
393 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
394 if (loopback_idev) {
395 rt->rt6i_idev = loopback_idev;
396 in6_dev_put(idev);
397 }
398 }
399}
400
401static bool __rt6_check_expired(const struct rt6_info *rt)
402{
403 if (rt->rt6i_flags & RTF_EXPIRES)
404 return time_after(jiffies, rt->dst.expires);
405 else
406 return false;
407}
408
409static bool rt6_check_expired(const struct rt6_info *rt)
410{
411 struct fib6_info *from;
412
413 from = rcu_dereference(rt->from);
414
415 if (rt->rt6i_flags & RTF_EXPIRES) {
416 if (time_after(jiffies, rt->dst.expires))
417 return true;
418 } else if (from) {
419 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
420 fib6_check_expired(from);
421 }
422 return false;
423}
424
425void fib6_select_path(const struct net *net, struct fib6_result *res,
426 struct flowi6 *fl6, int oif, bool have_oif_match,
427 const struct sk_buff *skb, int strict)
428{
429 struct fib6_info *sibling, *next_sibling;
430 struct fib6_info *match = res->f6i;
431
432 if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
433 goto out;
434
435 /* We might have already computed the hash for ICMPv6 errors. In such
436 * case it will always be non-zero. Otherwise now is the time to do it.
437 */
438 if (!fl6->mp_hash &&
439 (!match->nh || nexthop_is_multipath(match->nh)))
440 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
441
442 if (unlikely(match->nh)) {
443 nexthop_path_fib6_result(res, fl6->mp_hash);
444 return;
445 }
446
447 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
448 goto out;
449
450 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
451 fib6_siblings) {
452 const struct fib6_nh *nh = sibling->fib6_nh;
453 int nh_upper_bound;
454
455 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
456 if (fl6->mp_hash > nh_upper_bound)
457 continue;
458 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
459 break;
460 match = sibling;
461 break;
462 }
463
464out:
465 res->f6i = match;
466 res->nh = match->fib6_nh;
467}
468
469/*
470 * Route lookup. rcu_read_lock() should be held.
471 */
472
473static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
474 const struct in6_addr *saddr, int oif, int flags)
475{
476 const struct net_device *dev;
477
478 if (nh->fib_nh_flags & RTNH_F_DEAD)
479 return false;
480
481 dev = nh->fib_nh_dev;
482 if (oif) {
483 if (dev->ifindex == oif)
484 return true;
485 } else {
486 if (ipv6_chk_addr(net, saddr, dev,
487 flags & RT6_LOOKUP_F_IFACE))
488 return true;
489 }
490
491 return false;
492}
493
494struct fib6_nh_dm_arg {
495 struct net *net;
496 const struct in6_addr *saddr;
497 int oif;
498 int flags;
499 struct fib6_nh *nh;
500};
501
502static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
503{
504 struct fib6_nh_dm_arg *arg = _arg;
505
506 arg->nh = nh;
507 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
508 arg->flags);
509}
510
511/* returns fib6_nh from nexthop or NULL */
512static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
513 struct fib6_result *res,
514 const struct in6_addr *saddr,
515 int oif, int flags)
516{
517 struct fib6_nh_dm_arg arg = {
518 .net = net,
519 .saddr = saddr,
520 .oif = oif,
521 .flags = flags,
522 };
523
524 if (nexthop_is_blackhole(nh))
525 return NULL;
526
527 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
528 return arg.nh;
529
530 return NULL;
531}
532
533static void rt6_device_match(struct net *net, struct fib6_result *res,
534 const struct in6_addr *saddr, int oif, int flags)
535{
536 struct fib6_info *f6i = res->f6i;
537 struct fib6_info *spf6i;
538 struct fib6_nh *nh;
539
540 if (!oif && ipv6_addr_any(saddr)) {
541 if (unlikely(f6i->nh)) {
542 nh = nexthop_fib6_nh(f6i->nh);
543 if (nexthop_is_blackhole(f6i->nh))
544 goto out_blackhole;
545 } else {
546 nh = f6i->fib6_nh;
547 }
548 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
549 goto out;
550 }
551
552 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
553 bool matched = false;
554
555 if (unlikely(spf6i->nh)) {
556 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
557 oif, flags);
558 if (nh)
559 matched = true;
560 } else {
561 nh = spf6i->fib6_nh;
562 if (__rt6_device_match(net, nh, saddr, oif, flags))
563 matched = true;
564 }
565 if (matched) {
566 res->f6i = spf6i;
567 goto out;
568 }
569 }
570
571 if (oif && flags & RT6_LOOKUP_F_IFACE) {
572 res->f6i = net->ipv6.fib6_null_entry;
573 nh = res->f6i->fib6_nh;
574 goto out;
575 }
576
577 if (unlikely(f6i->nh)) {
578 nh = nexthop_fib6_nh(f6i->nh);
579 if (nexthop_is_blackhole(f6i->nh))
580 goto out_blackhole;
581 } else {
582 nh = f6i->fib6_nh;
583 }
584
585 if (nh->fib_nh_flags & RTNH_F_DEAD) {
586 res->f6i = net->ipv6.fib6_null_entry;
587 nh = res->f6i->fib6_nh;
588 }
589out:
590 res->nh = nh;
591 res->fib6_type = res->f6i->fib6_type;
592 res->fib6_flags = res->f6i->fib6_flags;
593 return;
594
595out_blackhole:
596 res->fib6_flags |= RTF_REJECT;
597 res->fib6_type = RTN_BLACKHOLE;
598 res->nh = nh;
599}
600
601#ifdef CONFIG_IPV6_ROUTER_PREF
602struct __rt6_probe_work {
603 struct work_struct work;
604 struct in6_addr target;
605 struct net_device *dev;
606};
607
608static void rt6_probe_deferred(struct work_struct *w)
609{
610 struct in6_addr mcaddr;
611 struct __rt6_probe_work *work =
612 container_of(w, struct __rt6_probe_work, work);
613
614 addrconf_addr_solict_mult(&work->target, &mcaddr);
615 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
616 dev_put(work->dev);
617 kfree(work);
618}
619
620static void rt6_probe(struct fib6_nh *fib6_nh)
621{
622 struct __rt6_probe_work *work = NULL;
623 const struct in6_addr *nh_gw;
624 unsigned long last_probe;
625 struct neighbour *neigh;
626 struct net_device *dev;
627 struct inet6_dev *idev;
628
629 /*
630 * Okay, this does not seem to be appropriate
631 * for now, however, we need to check if it
632 * is really so; aka Router Reachability Probing.
633 *
634 * Router Reachability Probe MUST be rate-limited
635 * to no more than one per minute.
636 */
637 if (!fib6_nh->fib_nh_gw_family)
638 return;
639
640 nh_gw = &fib6_nh->fib_nh_gw6;
641 dev = fib6_nh->fib_nh_dev;
642 rcu_read_lock_bh();
643 last_probe = READ_ONCE(fib6_nh->last_probe);
644 idev = __in6_dev_get(dev);
645 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
646 if (neigh) {
647 if (neigh->nud_state & NUD_VALID)
648 goto out;
649
650 write_lock(&neigh->lock);
651 if (!(neigh->nud_state & NUD_VALID) &&
652 time_after(jiffies,
653 neigh->updated + idev->cnf.rtr_probe_interval)) {
654 work = kmalloc(sizeof(*work), GFP_ATOMIC);
655 if (work)
656 __neigh_set_probe_once(neigh);
657 }
658 write_unlock(&neigh->lock);
659 } else if (time_after(jiffies, last_probe +
660 idev->cnf.rtr_probe_interval)) {
661 work = kmalloc(sizeof(*work), GFP_ATOMIC);
662 }
663
664 if (!work || cmpxchg(&fib6_nh->last_probe,
665 last_probe, jiffies) != last_probe) {
666 kfree(work);
667 } else {
668 INIT_WORK(&work->work, rt6_probe_deferred);
669 work->target = *nh_gw;
670 dev_hold(dev);
671 work->dev = dev;
672 schedule_work(&work->work);
673 }
674
675out:
676 rcu_read_unlock_bh();
677}
678#else
679static inline void rt6_probe(struct fib6_nh *fib6_nh)
680{
681}
682#endif
683
684/*
685 * Default Router Selection (RFC 2461 6.3.6)
686 */
687static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
688{
689 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
690 struct neighbour *neigh;
691
692 rcu_read_lock_bh();
693 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
694 &fib6_nh->fib_nh_gw6);
695 if (neigh) {
696 read_lock(&neigh->lock);
697 if (neigh->nud_state & NUD_VALID)
698 ret = RT6_NUD_SUCCEED;
699#ifdef CONFIG_IPV6_ROUTER_PREF
700 else if (!(neigh->nud_state & NUD_FAILED))
701 ret = RT6_NUD_SUCCEED;
702 else
703 ret = RT6_NUD_FAIL_PROBE;
704#endif
705 read_unlock(&neigh->lock);
706 } else {
707 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
708 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
709 }
710 rcu_read_unlock_bh();
711
712 return ret;
713}
714
715static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
716 int strict)
717{
718 int m = 0;
719
720 if (!oif || nh->fib_nh_dev->ifindex == oif)
721 m = 2;
722
723 if (!m && (strict & RT6_LOOKUP_F_IFACE))
724 return RT6_NUD_FAIL_HARD;
725#ifdef CONFIG_IPV6_ROUTER_PREF
726 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
727#endif
728 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
729 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
730 int n = rt6_check_neigh(nh);
731 if (n < 0)
732 return n;
733 }
734 return m;
735}
736
737static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
738 int oif, int strict, int *mpri, bool *do_rr)
739{
740 bool match_do_rr = false;
741 bool rc = false;
742 int m;
743
744 if (nh->fib_nh_flags & RTNH_F_DEAD)
745 goto out;
746
747 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
748 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
749 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
750 goto out;
751
752 m = rt6_score_route(nh, fib6_flags, oif, strict);
753 if (m == RT6_NUD_FAIL_DO_RR) {
754 match_do_rr = true;
755 m = 0; /* lowest valid score */
756 } else if (m == RT6_NUD_FAIL_HARD) {
757 goto out;
758 }
759
760 if (strict & RT6_LOOKUP_F_REACHABLE)
761 rt6_probe(nh);
762
763 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
764 if (m > *mpri) {
765 *do_rr = match_do_rr;
766 *mpri = m;
767 rc = true;
768 }
769out:
770 return rc;
771}
772
773struct fib6_nh_frl_arg {
774 u32 flags;
775 int oif;
776 int strict;
777 int *mpri;
778 bool *do_rr;
779 struct fib6_nh *nh;
780};
781
782static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
783{
784 struct fib6_nh_frl_arg *arg = _arg;
785
786 arg->nh = nh;
787 return find_match(nh, arg->flags, arg->oif, arg->strict,
788 arg->mpri, arg->do_rr);
789}
790
791static void __find_rr_leaf(struct fib6_info *f6i_start,
792 struct fib6_info *nomatch, u32 metric,
793 struct fib6_result *res, struct fib6_info **cont,
794 int oif, int strict, bool *do_rr, int *mpri)
795{
796 struct fib6_info *f6i;
797
798 for (f6i = f6i_start;
799 f6i && f6i != nomatch;
800 f6i = rcu_dereference(f6i->fib6_next)) {
801 bool matched = false;
802 struct fib6_nh *nh;
803
804 if (cont && f6i->fib6_metric != metric) {
805 *cont = f6i;
806 return;
807 }
808
809 if (fib6_check_expired(f6i))
810 continue;
811
812 if (unlikely(f6i->nh)) {
813 struct fib6_nh_frl_arg arg = {
814 .flags = f6i->fib6_flags,
815 .oif = oif,
816 .strict = strict,
817 .mpri = mpri,
818 .do_rr = do_rr
819 };
820
821 if (nexthop_is_blackhole(f6i->nh)) {
822 res->fib6_flags = RTF_REJECT;
823 res->fib6_type = RTN_BLACKHOLE;
824 res->f6i = f6i;
825 res->nh = nexthop_fib6_nh(f6i->nh);
826 return;
827 }
828 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
829 &arg)) {
830 matched = true;
831 nh = arg.nh;
832 }
833 } else {
834 nh = f6i->fib6_nh;
835 if (find_match(nh, f6i->fib6_flags, oif, strict,
836 mpri, do_rr))
837 matched = true;
838 }
839 if (matched) {
840 res->f6i = f6i;
841 res->nh = nh;
842 res->fib6_flags = f6i->fib6_flags;
843 res->fib6_type = f6i->fib6_type;
844 }
845 }
846}
847
848static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
849 struct fib6_info *rr_head, int oif, int strict,
850 bool *do_rr, struct fib6_result *res)
851{
852 u32 metric = rr_head->fib6_metric;
853 struct fib6_info *cont = NULL;
854 int mpri = -1;
855
856 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
857 oif, strict, do_rr, &mpri);
858
859 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
860 oif, strict, do_rr, &mpri);
861
862 if (res->f6i || !cont)
863 return;
864
865 __find_rr_leaf(cont, NULL, metric, res, NULL,
866 oif, strict, do_rr, &mpri);
867}
868
869static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
870 struct fib6_result *res, int strict)
871{
872 struct fib6_info *leaf = rcu_dereference(fn->leaf);
873 struct fib6_info *rt0;
874 bool do_rr = false;
875 int key_plen;
876
877 /* make sure this function or its helpers sets f6i */
878 res->f6i = NULL;
879
880 if (!leaf || leaf == net->ipv6.fib6_null_entry)
881 goto out;
882
883 rt0 = rcu_dereference(fn->rr_ptr);
884 if (!rt0)
885 rt0 = leaf;
886
887 /* Double check to make sure fn is not an intermediate node
888 * and fn->leaf does not points to its child's leaf
889 * (This might happen if all routes under fn are deleted from
890 * the tree and fib6_repair_tree() is called on the node.)
891 */
892 key_plen = rt0->fib6_dst.plen;
893#ifdef CONFIG_IPV6_SUBTREES
894 if (rt0->fib6_src.plen)
895 key_plen = rt0->fib6_src.plen;
896#endif
897 if (fn->fn_bit != key_plen)
898 goto out;
899
900 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
901 if (do_rr) {
902 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
903
904 /* no entries matched; do round-robin */
905 if (!next || next->fib6_metric != rt0->fib6_metric)
906 next = leaf;
907
908 if (next != rt0) {
909 spin_lock_bh(&leaf->fib6_table->tb6_lock);
910 /* make sure next is not being deleted from the tree */
911 if (next->fib6_node)
912 rcu_assign_pointer(fn->rr_ptr, next);
913 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
914 }
915 }
916
917out:
918 if (!res->f6i) {
919 res->f6i = net->ipv6.fib6_null_entry;
920 res->nh = res->f6i->fib6_nh;
921 res->fib6_flags = res->f6i->fib6_flags;
922 res->fib6_type = res->f6i->fib6_type;
923 }
924}
925
926static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
927{
928 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
929 res->nh->fib_nh_gw_family;
930}
931
932#ifdef CONFIG_IPV6_ROUTE_INFO
933int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
934 const struct in6_addr *gwaddr)
935{
936 struct net *net = dev_net(dev);
937 struct route_info *rinfo = (struct route_info *) opt;
938 struct in6_addr prefix_buf, *prefix;
939 unsigned int pref;
940 unsigned long lifetime;
941 struct fib6_info *rt;
942
943 if (len < sizeof(struct route_info)) {
944 return -EINVAL;
945 }
946
947 /* Sanity check for prefix_len and length */
948 if (rinfo->length > 3) {
949 return -EINVAL;
950 } else if (rinfo->prefix_len > 128) {
951 return -EINVAL;
952 } else if (rinfo->prefix_len > 64) {
953 if (rinfo->length < 2) {
954 return -EINVAL;
955 }
956 } else if (rinfo->prefix_len > 0) {
957 if (rinfo->length < 1) {
958 return -EINVAL;
959 }
960 }
961
962 pref = rinfo->route_pref;
963 if (pref == ICMPV6_ROUTER_PREF_INVALID)
964 return -EINVAL;
965
966 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
967
968 if (rinfo->length == 3)
969 prefix = (struct in6_addr *)rinfo->prefix;
970 else {
971 /* this function is safe */
972 ipv6_addr_prefix(&prefix_buf,
973 (struct in6_addr *)rinfo->prefix,
974 rinfo->prefix_len);
975 prefix = &prefix_buf;
976 }
977
978 if (rinfo->prefix_len == 0)
979 rt = rt6_get_dflt_router(net, gwaddr, dev);
980 else
981 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
982 gwaddr, dev);
983
984 if (rt && !lifetime) {
985 ip6_del_rt(net, rt);
986 rt = NULL;
987 }
988
989 if (!rt && lifetime)
990 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
991 dev, pref);
992 else if (rt)
993 rt->fib6_flags = RTF_ROUTEINFO |
994 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
995
996 if (rt) {
997 if (!addrconf_finite_timeout(lifetime))
998 fib6_clean_expires(rt);
999 else
1000 fib6_set_expires(rt, jiffies + HZ * lifetime);
1001
1002 fib6_info_release(rt);
1003 }
1004 return 0;
1005}
1006#endif
1007
1008/*
1009 * Misc support functions
1010 */
1011
1012/* called with rcu_lock held */
1013static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1014{
1015 struct net_device *dev = res->nh->fib_nh_dev;
1016
1017 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1018 /* for copies of local routes, dst->dev needs to be the
1019 * device if it is a master device, the master device if
1020 * device is enslaved, and the loopback as the default
1021 */
1022 if (netif_is_l3_slave(dev) &&
1023 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1024 dev = l3mdev_master_dev_rcu(dev);
1025 else if (!netif_is_l3_master(dev))
1026 dev = dev_net(dev)->loopback_dev;
1027 /* last case is netif_is_l3_master(dev) is true in which
1028 * case we want dev returned to be dev
1029 */
1030 }
1031
1032 return dev;
1033}
1034
1035static const int fib6_prop[RTN_MAX + 1] = {
1036 [RTN_UNSPEC] = 0,
1037 [RTN_UNICAST] = 0,
1038 [RTN_LOCAL] = 0,
1039 [RTN_BROADCAST] = 0,
1040 [RTN_ANYCAST] = 0,
1041 [RTN_MULTICAST] = 0,
1042 [RTN_BLACKHOLE] = -EINVAL,
1043 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1044 [RTN_PROHIBIT] = -EACCES,
1045 [RTN_THROW] = -EAGAIN,
1046 [RTN_NAT] = -EINVAL,
1047 [RTN_XRESOLVE] = -EINVAL,
1048};
1049
1050static int ip6_rt_type_to_error(u8 fib6_type)
1051{
1052 return fib6_prop[fib6_type];
1053}
1054
1055static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1056{
1057 unsigned short flags = 0;
1058
1059 if (rt->dst_nocount)
1060 flags |= DST_NOCOUNT;
1061 if (rt->dst_nopolicy)
1062 flags |= DST_NOPOLICY;
1063 if (rt->dst_host)
1064 flags |= DST_HOST;
1065
1066 return flags;
1067}
1068
1069static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1070{
1071 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1072
1073 switch (fib6_type) {
1074 case RTN_BLACKHOLE:
1075 rt->dst.output = dst_discard_out;
1076 rt->dst.input = dst_discard;
1077 break;
1078 case RTN_PROHIBIT:
1079 rt->dst.output = ip6_pkt_prohibit_out;
1080 rt->dst.input = ip6_pkt_prohibit;
1081 break;
1082 case RTN_THROW:
1083 case RTN_UNREACHABLE:
1084 default:
1085 rt->dst.output = ip6_pkt_discard_out;
1086 rt->dst.input = ip6_pkt_discard;
1087 break;
1088 }
1089}
1090
1091static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1092{
1093 struct fib6_info *f6i = res->f6i;
1094
1095 if (res->fib6_flags & RTF_REJECT) {
1096 ip6_rt_init_dst_reject(rt, res->fib6_type);
1097 return;
1098 }
1099
1100 rt->dst.error = 0;
1101 rt->dst.output = ip6_output;
1102
1103 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1104 rt->dst.input = ip6_input;
1105 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1106 rt->dst.input = ip6_mc_input;
1107 } else {
1108 rt->dst.input = ip6_forward;
1109 }
1110
1111 if (res->nh->fib_nh_lws) {
1112 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1113 lwtunnel_set_redirect(&rt->dst);
1114 }
1115
1116 rt->dst.lastuse = jiffies;
1117}
1118
1119/* Caller must already hold reference to @from */
1120static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1121{
1122 rt->rt6i_flags &= ~RTF_EXPIRES;
1123 rcu_assign_pointer(rt->from, from);
1124 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1125}
1126
1127/* Caller must already hold reference to f6i in result */
1128static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1129{
1130 const struct fib6_nh *nh = res->nh;
1131 const struct net_device *dev = nh->fib_nh_dev;
1132 struct fib6_info *f6i = res->f6i;
1133
1134 ip6_rt_init_dst(rt, res);
1135
1136 rt->rt6i_dst = f6i->fib6_dst;
1137 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1138 rt->rt6i_flags = res->fib6_flags;
1139 if (nh->fib_nh_gw_family) {
1140 rt->rt6i_gateway = nh->fib_nh_gw6;
1141 rt->rt6i_flags |= RTF_GATEWAY;
1142 }
1143 rt6_set_from(rt, f6i);
1144#ifdef CONFIG_IPV6_SUBTREES
1145 rt->rt6i_src = f6i->fib6_src;
1146#endif
1147}
1148
1149static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1150 struct in6_addr *saddr)
1151{
1152 struct fib6_node *pn, *sn;
1153 while (1) {
1154 if (fn->fn_flags & RTN_TL_ROOT)
1155 return NULL;
1156 pn = rcu_dereference(fn->parent);
1157 sn = FIB6_SUBTREE(pn);
1158 if (sn && sn != fn)
1159 fn = fib6_node_lookup(sn, NULL, saddr);
1160 else
1161 fn = pn;
1162 if (fn->fn_flags & RTN_RTINFO)
1163 return fn;
1164 }
1165}
1166
1167static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1168{
1169 struct rt6_info *rt = *prt;
1170
1171 if (dst_hold_safe(&rt->dst))
1172 return true;
1173 if (net) {
1174 rt = net->ipv6.ip6_null_entry;
1175 dst_hold(&rt->dst);
1176 } else {
1177 rt = NULL;
1178 }
1179 *prt = rt;
1180 return false;
1181}
1182
1183/* called with rcu_lock held */
1184static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1185{
1186 struct net_device *dev = res->nh->fib_nh_dev;
1187 struct fib6_info *f6i = res->f6i;
1188 unsigned short flags;
1189 struct rt6_info *nrt;
1190
1191 if (!fib6_info_hold_safe(f6i))
1192 goto fallback;
1193
1194 flags = fib6_info_dst_flags(f6i);
1195 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1196 if (!nrt) {
1197 fib6_info_release(f6i);
1198 goto fallback;
1199 }
1200
1201 ip6_rt_copy_init(nrt, res);
1202 return nrt;
1203
1204fallback:
1205 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1206 dst_hold(&nrt->dst);
1207 return nrt;
1208}
1209
1210static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1211 struct fib6_table *table,
1212 struct flowi6 *fl6,
1213 const struct sk_buff *skb,
1214 int flags)
1215{
1216 struct fib6_result res = {};
1217 struct fib6_node *fn;
1218 struct rt6_info *rt;
1219
1220 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1221 flags &= ~RT6_LOOKUP_F_IFACE;
1222
1223 rcu_read_lock();
1224 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1225restart:
1226 res.f6i = rcu_dereference(fn->leaf);
1227 if (!res.f6i)
1228 res.f6i = net->ipv6.fib6_null_entry;
1229 else
1230 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1231 flags);
1232
1233 if (res.f6i == net->ipv6.fib6_null_entry) {
1234 fn = fib6_backtrack(fn, &fl6->saddr);
1235 if (fn)
1236 goto restart;
1237
1238 rt = net->ipv6.ip6_null_entry;
1239 dst_hold(&rt->dst);
1240 goto out;
1241 } else if (res.fib6_flags & RTF_REJECT) {
1242 goto do_create;
1243 }
1244
1245 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1246 fl6->flowi6_oif != 0, skb, flags);
1247
1248 /* Search through exception table */
1249 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1250 if (rt) {
1251 if (ip6_hold_safe(net, &rt))
1252 dst_use_noref(&rt->dst, jiffies);
1253 } else {
1254do_create:
1255 rt = ip6_create_rt_rcu(&res);
1256 }
1257
1258out:
1259 trace_fib6_table_lookup(net, &res, table, fl6);
1260
1261 rcu_read_unlock();
1262
1263 return rt;
1264}
1265
1266struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1267 const struct sk_buff *skb, int flags)
1268{
1269 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1270}
1271EXPORT_SYMBOL_GPL(ip6_route_lookup);
1272
1273struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1274 const struct in6_addr *saddr, int oif,
1275 const struct sk_buff *skb, int strict)
1276{
1277 struct flowi6 fl6 = {
1278 .flowi6_oif = oif,
1279 .daddr = *daddr,
1280 };
1281 struct dst_entry *dst;
1282 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1283
1284 if (saddr) {
1285 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1286 flags |= RT6_LOOKUP_F_HAS_SADDR;
1287 }
1288
1289 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1290 if (dst->error == 0)
1291 return (struct rt6_info *) dst;
1292
1293 dst_release(dst);
1294
1295 return NULL;
1296}
1297EXPORT_SYMBOL(rt6_lookup);
1298
1299/* ip6_ins_rt is called with FREE table->tb6_lock.
1300 * It takes new route entry, the addition fails by any reason the
1301 * route is released.
1302 * Caller must hold dst before calling it.
1303 */
1304
1305static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1306 struct netlink_ext_ack *extack)
1307{
1308 int err;
1309 struct fib6_table *table;
1310
1311 table = rt->fib6_table;
1312 spin_lock_bh(&table->tb6_lock);
1313 err = fib6_add(&table->tb6_root, rt, info, extack);
1314 spin_unlock_bh(&table->tb6_lock);
1315
1316 return err;
1317}
1318
1319int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1320{
1321 struct nl_info info = { .nl_net = net, };
1322
1323 return __ip6_ins_rt(rt, &info, NULL);
1324}
1325
1326static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1327 const struct in6_addr *daddr,
1328 const struct in6_addr *saddr)
1329{
1330 struct fib6_info *f6i = res->f6i;
1331 struct net_device *dev;
1332 struct rt6_info *rt;
1333
1334 /*
1335 * Clone the route.
1336 */
1337
1338 if (!fib6_info_hold_safe(f6i))
1339 return NULL;
1340
1341 dev = ip6_rt_get_dev_rcu(res);
1342 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1343 if (!rt) {
1344 fib6_info_release(f6i);
1345 return NULL;
1346 }
1347
1348 ip6_rt_copy_init(rt, res);
1349 rt->rt6i_flags |= RTF_CACHE;
1350 rt->dst.flags |= DST_HOST;
1351 rt->rt6i_dst.addr = *daddr;
1352 rt->rt6i_dst.plen = 128;
1353
1354 if (!rt6_is_gw_or_nonexthop(res)) {
1355 if (f6i->fib6_dst.plen != 128 &&
1356 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1357 rt->rt6i_flags |= RTF_ANYCAST;
1358#ifdef CONFIG_IPV6_SUBTREES
1359 if (rt->rt6i_src.plen && saddr) {
1360 rt->rt6i_src.addr = *saddr;
1361 rt->rt6i_src.plen = 128;
1362 }
1363#endif
1364 }
1365
1366 return rt;
1367}
1368
1369static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1370{
1371 struct fib6_info *f6i = res->f6i;
1372 unsigned short flags = fib6_info_dst_flags(f6i);
1373 struct net_device *dev;
1374 struct rt6_info *pcpu_rt;
1375
1376 if (!fib6_info_hold_safe(f6i))
1377 return NULL;
1378
1379 rcu_read_lock();
1380 dev = ip6_rt_get_dev_rcu(res);
1381 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1382 rcu_read_unlock();
1383 if (!pcpu_rt) {
1384 fib6_info_release(f6i);
1385 return NULL;
1386 }
1387 ip6_rt_copy_init(pcpu_rt, res);
1388 pcpu_rt->rt6i_flags |= RTF_PCPU;
1389 return pcpu_rt;
1390}
1391
1392/* It should be called with rcu_read_lock() acquired */
1393static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1394{
1395 struct rt6_info *pcpu_rt;
1396
1397 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1398
1399 return pcpu_rt;
1400}
1401
1402static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1403 const struct fib6_result *res)
1404{
1405 struct rt6_info *pcpu_rt, *prev, **p;
1406
1407 pcpu_rt = ip6_rt_pcpu_alloc(res);
1408 if (!pcpu_rt)
1409 return NULL;
1410
1411 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1412 prev = cmpxchg(p, NULL, pcpu_rt);
1413 BUG_ON(prev);
1414
1415 if (res->f6i->fib6_destroying) {
1416 struct fib6_info *from;
1417
1418 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1419 fib6_info_release(from);
1420 }
1421
1422 return pcpu_rt;
1423}
1424
1425/* exception hash table implementation
1426 */
1427static DEFINE_SPINLOCK(rt6_exception_lock);
1428
1429/* Remove rt6_ex from hash table and free the memory
1430 * Caller must hold rt6_exception_lock
1431 */
1432static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1433 struct rt6_exception *rt6_ex)
1434{
1435 struct fib6_info *from;
1436 struct net *net;
1437
1438 if (!bucket || !rt6_ex)
1439 return;
1440
1441 net = dev_net(rt6_ex->rt6i->dst.dev);
1442 net->ipv6.rt6_stats->fib_rt_cache--;
1443
1444 /* purge completely the exception to allow releasing the held resources:
1445 * some [sk] cache may keep the dst around for unlimited time
1446 */
1447 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1448 fib6_info_release(from);
1449 dst_dev_put(&rt6_ex->rt6i->dst);
1450
1451 hlist_del_rcu(&rt6_ex->hlist);
1452 dst_release(&rt6_ex->rt6i->dst);
1453 kfree_rcu(rt6_ex, rcu);
1454 WARN_ON_ONCE(!bucket->depth);
1455 bucket->depth--;
1456}
1457
1458/* Remove oldest rt6_ex in bucket and free the memory
1459 * Caller must hold rt6_exception_lock
1460 */
1461static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1462{
1463 struct rt6_exception *rt6_ex, *oldest = NULL;
1464
1465 if (!bucket)
1466 return;
1467
1468 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1469 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1470 oldest = rt6_ex;
1471 }
1472 rt6_remove_exception(bucket, oldest);
1473}
1474
1475static u32 rt6_exception_hash(const struct in6_addr *dst,
1476 const struct in6_addr *src)
1477{
1478 static u32 seed __read_mostly;
1479 u32 val;
1480
1481 net_get_random_once(&seed, sizeof(seed));
1482 val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
1483
1484#ifdef CONFIG_IPV6_SUBTREES
1485 if (src)
1486 val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
1487#endif
1488 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1489}
1490
1491/* Helper function to find the cached rt in the hash table
1492 * and update bucket pointer to point to the bucket for this
1493 * (daddr, saddr) pair
1494 * Caller must hold rt6_exception_lock
1495 */
1496static struct rt6_exception *
1497__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1498 const struct in6_addr *daddr,
1499 const struct in6_addr *saddr)
1500{
1501 struct rt6_exception *rt6_ex;
1502 u32 hval;
1503
1504 if (!(*bucket) || !daddr)
1505 return NULL;
1506
1507 hval = rt6_exception_hash(daddr, saddr);
1508 *bucket += hval;
1509
1510 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1511 struct rt6_info *rt6 = rt6_ex->rt6i;
1512 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1513
1514#ifdef CONFIG_IPV6_SUBTREES
1515 if (matched && saddr)
1516 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1517#endif
1518 if (matched)
1519 return rt6_ex;
1520 }
1521 return NULL;
1522}
1523
1524/* Helper function to find the cached rt in the hash table
1525 * and update bucket pointer to point to the bucket for this
1526 * (daddr, saddr) pair
1527 * Caller must hold rcu_read_lock()
1528 */
1529static struct rt6_exception *
1530__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1531 const struct in6_addr *daddr,
1532 const struct in6_addr *saddr)
1533{
1534 struct rt6_exception *rt6_ex;
1535 u32 hval;
1536
1537 WARN_ON_ONCE(!rcu_read_lock_held());
1538
1539 if (!(*bucket) || !daddr)
1540 return NULL;
1541
1542 hval = rt6_exception_hash(daddr, saddr);
1543 *bucket += hval;
1544
1545 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1546 struct rt6_info *rt6 = rt6_ex->rt6i;
1547 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1548
1549#ifdef CONFIG_IPV6_SUBTREES
1550 if (matched && saddr)
1551 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1552#endif
1553 if (matched)
1554 return rt6_ex;
1555 }
1556 return NULL;
1557}
1558
1559static unsigned int fib6_mtu(const struct fib6_result *res)
1560{
1561 const struct fib6_nh *nh = res->nh;
1562 unsigned int mtu;
1563
1564 if (res->f6i->fib6_pmtu) {
1565 mtu = res->f6i->fib6_pmtu;
1566 } else {
1567 struct net_device *dev = nh->fib_nh_dev;
1568 struct inet6_dev *idev;
1569
1570 rcu_read_lock();
1571 idev = __in6_dev_get(dev);
1572 mtu = idev->cnf.mtu6;
1573 rcu_read_unlock();
1574 }
1575
1576 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1577
1578 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1579}
1580
1581#define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1582
1583/* used when the flushed bit is not relevant, only access to the bucket
1584 * (ie., all bucket users except rt6_insert_exception);
1585 *
1586 * called under rcu lock; sometimes called with rt6_exception_lock held
1587 */
1588static
1589struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1590 spinlock_t *lock)
1591{
1592 struct rt6_exception_bucket *bucket;
1593
1594 if (lock)
1595 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1596 lockdep_is_held(lock));
1597 else
1598 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1599
1600 /* remove bucket flushed bit if set */
1601 if (bucket) {
1602 unsigned long p = (unsigned long)bucket;
1603
1604 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1605 bucket = (struct rt6_exception_bucket *)p;
1606 }
1607
1608 return bucket;
1609}
1610
1611static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1612{
1613 unsigned long p = (unsigned long)bucket;
1614
1615 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1616}
1617
1618/* called with rt6_exception_lock held */
1619static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1620 spinlock_t *lock)
1621{
1622 struct rt6_exception_bucket *bucket;
1623 unsigned long p;
1624
1625 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1626 lockdep_is_held(lock));
1627
1628 p = (unsigned long)bucket;
1629 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1630 bucket = (struct rt6_exception_bucket *)p;
1631 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1632}
1633
1634static int rt6_insert_exception(struct rt6_info *nrt,
1635 const struct fib6_result *res)
1636{
1637 struct net *net = dev_net(nrt->dst.dev);
1638 struct rt6_exception_bucket *bucket;
1639 struct fib6_info *f6i = res->f6i;
1640 struct in6_addr *src_key = NULL;
1641 struct rt6_exception *rt6_ex;
1642 struct fib6_nh *nh = res->nh;
1643 int err = 0;
1644
1645 spin_lock_bh(&rt6_exception_lock);
1646
1647 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1648 lockdep_is_held(&rt6_exception_lock));
1649 if (!bucket) {
1650 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1651 GFP_ATOMIC);
1652 if (!bucket) {
1653 err = -ENOMEM;
1654 goto out;
1655 }
1656 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1657 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1658 err = -EINVAL;
1659 goto out;
1660 }
1661
1662#ifdef CONFIG_IPV6_SUBTREES
1663 /* fib6_src.plen != 0 indicates f6i is in subtree
1664 * and exception table is indexed by a hash of
1665 * both fib6_dst and fib6_src.
1666 * Otherwise, the exception table is indexed by
1667 * a hash of only fib6_dst.
1668 */
1669 if (f6i->fib6_src.plen)
1670 src_key = &nrt->rt6i_src.addr;
1671#endif
1672 /* rt6_mtu_change() might lower mtu on f6i.
1673 * Only insert this exception route if its mtu
1674 * is less than f6i's mtu value.
1675 */
1676 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1677 err = -EINVAL;
1678 goto out;
1679 }
1680
1681 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1682 src_key);
1683 if (rt6_ex)
1684 rt6_remove_exception(bucket, rt6_ex);
1685
1686 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1687 if (!rt6_ex) {
1688 err = -ENOMEM;
1689 goto out;
1690 }
1691 rt6_ex->rt6i = nrt;
1692 rt6_ex->stamp = jiffies;
1693 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1694 bucket->depth++;
1695 net->ipv6.rt6_stats->fib_rt_cache++;
1696
1697 if (bucket->depth > FIB6_MAX_DEPTH)
1698 rt6_exception_remove_oldest(bucket);
1699
1700out:
1701 spin_unlock_bh(&rt6_exception_lock);
1702
1703 /* Update fn->fn_sernum to invalidate all cached dst */
1704 if (!err) {
1705 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1706 fib6_update_sernum(net, f6i);
1707 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1708 fib6_force_start_gc(net);
1709 }
1710
1711 return err;
1712}
1713
1714static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1715{
1716 struct rt6_exception_bucket *bucket;
1717 struct rt6_exception *rt6_ex;
1718 struct hlist_node *tmp;
1719 int i;
1720
1721 spin_lock_bh(&rt6_exception_lock);
1722
1723 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1724 if (!bucket)
1725 goto out;
1726
1727 /* Prevent rt6_insert_exception() to recreate the bucket list */
1728 if (!from)
1729 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1730
1731 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1732 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1733 if (!from ||
1734 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1735 rt6_remove_exception(bucket, rt6_ex);
1736 }
1737 WARN_ON_ONCE(!from && bucket->depth);
1738 bucket++;
1739 }
1740out:
1741 spin_unlock_bh(&rt6_exception_lock);
1742}
1743
1744static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1745{
1746 struct fib6_info *f6i = arg;
1747
1748 fib6_nh_flush_exceptions(nh, f6i);
1749
1750 return 0;
1751}
1752
1753void rt6_flush_exceptions(struct fib6_info *f6i)
1754{
1755 if (f6i->nh)
1756 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1757 f6i);
1758 else
1759 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1760}
1761
1762/* Find cached rt in the hash table inside passed in rt
1763 * Caller has to hold rcu_read_lock()
1764 */
1765static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1766 const struct in6_addr *daddr,
1767 const struct in6_addr *saddr)
1768{
1769 const struct in6_addr *src_key = NULL;
1770 struct rt6_exception_bucket *bucket;
1771 struct rt6_exception *rt6_ex;
1772 struct rt6_info *ret = NULL;
1773
1774#ifdef CONFIG_IPV6_SUBTREES
1775 /* fib6i_src.plen != 0 indicates f6i is in subtree
1776 * and exception table is indexed by a hash of
1777 * both fib6_dst and fib6_src.
1778 * However, the src addr used to create the hash
1779 * might not be exactly the passed in saddr which
1780 * is a /128 addr from the flow.
1781 * So we need to use f6i->fib6_src to redo lookup
1782 * if the passed in saddr does not find anything.
1783 * (See the logic in ip6_rt_cache_alloc() on how
1784 * rt->rt6i_src is updated.)
1785 */
1786 if (res->f6i->fib6_src.plen)
1787 src_key = saddr;
1788find_ex:
1789#endif
1790 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1791 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1792
1793 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1794 ret = rt6_ex->rt6i;
1795
1796#ifdef CONFIG_IPV6_SUBTREES
1797 /* Use fib6_src as src_key and redo lookup */
1798 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1799 src_key = &res->f6i->fib6_src.addr;
1800 goto find_ex;
1801 }
1802#endif
1803
1804 return ret;
1805}
1806
1807/* Remove the passed in cached rt from the hash table that contains it */
1808static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1809 const struct rt6_info *rt)
1810{
1811 const struct in6_addr *src_key = NULL;
1812 struct rt6_exception_bucket *bucket;
1813 struct rt6_exception *rt6_ex;
1814 int err;
1815
1816 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1817 return -ENOENT;
1818
1819 spin_lock_bh(&rt6_exception_lock);
1820 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1821
1822#ifdef CONFIG_IPV6_SUBTREES
1823 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1824 * and exception table is indexed by a hash of
1825 * both rt6i_dst and rt6i_src.
1826 * Otherwise, the exception table is indexed by
1827 * a hash of only rt6i_dst.
1828 */
1829 if (plen)
1830 src_key = &rt->rt6i_src.addr;
1831#endif
1832 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1833 &rt->rt6i_dst.addr,
1834 src_key);
1835 if (rt6_ex) {
1836 rt6_remove_exception(bucket, rt6_ex);
1837 err = 0;
1838 } else {
1839 err = -ENOENT;
1840 }
1841
1842 spin_unlock_bh(&rt6_exception_lock);
1843 return err;
1844}
1845
1846struct fib6_nh_excptn_arg {
1847 struct rt6_info *rt;
1848 int plen;
1849};
1850
1851static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1852{
1853 struct fib6_nh_excptn_arg *arg = _arg;
1854 int err;
1855
1856 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1857 if (err == 0)
1858 return 1;
1859
1860 return 0;
1861}
1862
1863static int rt6_remove_exception_rt(struct rt6_info *rt)
1864{
1865 struct fib6_info *from;
1866
1867 from = rcu_dereference(rt->from);
1868 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1869 return -EINVAL;
1870
1871 if (from->nh) {
1872 struct fib6_nh_excptn_arg arg = {
1873 .rt = rt,
1874 .plen = from->fib6_src.plen
1875 };
1876 int rc;
1877
1878 /* rc = 1 means an entry was found */
1879 rc = nexthop_for_each_fib6_nh(from->nh,
1880 rt6_nh_remove_exception_rt,
1881 &arg);
1882 return rc ? 0 : -ENOENT;
1883 }
1884
1885 return fib6_nh_remove_exception(from->fib6_nh,
1886 from->fib6_src.plen, rt);
1887}
1888
1889/* Find rt6_ex which contains the passed in rt cache and
1890 * refresh its stamp
1891 */
1892static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1893 const struct rt6_info *rt)
1894{
1895 const struct in6_addr *src_key = NULL;
1896 struct rt6_exception_bucket *bucket;
1897 struct rt6_exception *rt6_ex;
1898
1899 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1900#ifdef CONFIG_IPV6_SUBTREES
1901 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1902 * and exception table is indexed by a hash of
1903 * both rt6i_dst and rt6i_src.
1904 * Otherwise, the exception table is indexed by
1905 * a hash of only rt6i_dst.
1906 */
1907 if (plen)
1908 src_key = &rt->rt6i_src.addr;
1909#endif
1910 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1911 if (rt6_ex)
1912 rt6_ex->stamp = jiffies;
1913}
1914
1915struct fib6_nh_match_arg {
1916 const struct net_device *dev;
1917 const struct in6_addr *gw;
1918 struct fib6_nh *match;
1919};
1920
1921/* determine if fib6_nh has given device and gateway */
1922static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1923{
1924 struct fib6_nh_match_arg *arg = _arg;
1925
1926 if (arg->dev != nh->fib_nh_dev ||
1927 (arg->gw && !nh->fib_nh_gw_family) ||
1928 (!arg->gw && nh->fib_nh_gw_family) ||
1929 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1930 return 0;
1931
1932 arg->match = nh;
1933
1934 /* found a match, break the loop */
1935 return 1;
1936}
1937
1938static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1939{
1940 struct fib6_info *from;
1941 struct fib6_nh *fib6_nh;
1942
1943 rcu_read_lock();
1944
1945 from = rcu_dereference(rt->from);
1946 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1947 goto unlock;
1948
1949 if (from->nh) {
1950 struct fib6_nh_match_arg arg = {
1951 .dev = rt->dst.dev,
1952 .gw = &rt->rt6i_gateway,
1953 };
1954
1955 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1956
1957 if (!arg.match)
1958 goto unlock;
1959 fib6_nh = arg.match;
1960 } else {
1961 fib6_nh = from->fib6_nh;
1962 }
1963 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1964unlock:
1965 rcu_read_unlock();
1966}
1967
1968static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1969 struct rt6_info *rt, int mtu)
1970{
1971 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1972 * lowest MTU in the path: always allow updating the route PMTU to
1973 * reflect PMTU decreases.
1974 *
1975 * If the new MTU is higher, and the route PMTU is equal to the local
1976 * MTU, this means the old MTU is the lowest in the path, so allow
1977 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1978 * handle this.
1979 */
1980
1981 if (dst_mtu(&rt->dst) >= mtu)
1982 return true;
1983
1984 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1985 return true;
1986
1987 return false;
1988}
1989
1990static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1991 const struct fib6_nh *nh, int mtu)
1992{
1993 struct rt6_exception_bucket *bucket;
1994 struct rt6_exception *rt6_ex;
1995 int i;
1996
1997 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1998 if (!bucket)
1999 return;
2000
2001 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2002 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2003 struct rt6_info *entry = rt6_ex->rt6i;
2004
2005 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2006 * route), the metrics of its rt->from have already
2007 * been updated.
2008 */
2009 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2010 rt6_mtu_change_route_allowed(idev, entry, mtu))
2011 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2012 }
2013 bucket++;
2014 }
2015}
2016
2017#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2018
2019static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2020 const struct in6_addr *gateway)
2021{
2022 struct rt6_exception_bucket *bucket;
2023 struct rt6_exception *rt6_ex;
2024 struct hlist_node *tmp;
2025 int i;
2026
2027 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2028 return;
2029
2030 spin_lock_bh(&rt6_exception_lock);
2031 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2032 if (bucket) {
2033 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2034 hlist_for_each_entry_safe(rt6_ex, tmp,
2035 &bucket->chain, hlist) {
2036 struct rt6_info *entry = rt6_ex->rt6i;
2037
2038 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2039 RTF_CACHE_GATEWAY &&
2040 ipv6_addr_equal(gateway,
2041 &entry->rt6i_gateway)) {
2042 rt6_remove_exception(bucket, rt6_ex);
2043 }
2044 }
2045 bucket++;
2046 }
2047 }
2048
2049 spin_unlock_bh(&rt6_exception_lock);
2050}
2051
2052static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2053 struct rt6_exception *rt6_ex,
2054 struct fib6_gc_args *gc_args,
2055 unsigned long now)
2056{
2057 struct rt6_info *rt = rt6_ex->rt6i;
2058
2059 /* we are pruning and obsoleting aged-out and non gateway exceptions
2060 * even if others have still references to them, so that on next
2061 * dst_check() such references can be dropped.
2062 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2063 * expired, independently from their aging, as per RFC 8201 section 4
2064 */
2065 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2066 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2067 RT6_TRACE("aging clone %p\n", rt);
2068 rt6_remove_exception(bucket, rt6_ex);
2069 return;
2070 }
2071 } else if (time_after(jiffies, rt->dst.expires)) {
2072 RT6_TRACE("purging expired route %p\n", rt);
2073 rt6_remove_exception(bucket, rt6_ex);
2074 return;
2075 }
2076
2077 if (rt->rt6i_flags & RTF_GATEWAY) {
2078 struct neighbour *neigh;
2079 __u8 neigh_flags = 0;
2080
2081 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2082 if (neigh)
2083 neigh_flags = neigh->flags;
2084
2085 if (!(neigh_flags & NTF_ROUTER)) {
2086 RT6_TRACE("purging route %p via non-router but gateway\n",
2087 rt);
2088 rt6_remove_exception(bucket, rt6_ex);
2089 return;
2090 }
2091 }
2092
2093 gc_args->more++;
2094}
2095
2096static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2097 struct fib6_gc_args *gc_args,
2098 unsigned long now)
2099{
2100 struct rt6_exception_bucket *bucket;
2101 struct rt6_exception *rt6_ex;
2102 struct hlist_node *tmp;
2103 int i;
2104
2105 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2106 return;
2107
2108 rcu_read_lock_bh();
2109 spin_lock(&rt6_exception_lock);
2110 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2111 if (bucket) {
2112 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2113 hlist_for_each_entry_safe(rt6_ex, tmp,
2114 &bucket->chain, hlist) {
2115 rt6_age_examine_exception(bucket, rt6_ex,
2116 gc_args, now);
2117 }
2118 bucket++;
2119 }
2120 }
2121 spin_unlock(&rt6_exception_lock);
2122 rcu_read_unlock_bh();
2123}
2124
2125struct fib6_nh_age_excptn_arg {
2126 struct fib6_gc_args *gc_args;
2127 unsigned long now;
2128};
2129
2130static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2131{
2132 struct fib6_nh_age_excptn_arg *arg = _arg;
2133
2134 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2135 return 0;
2136}
2137
2138void rt6_age_exceptions(struct fib6_info *f6i,
2139 struct fib6_gc_args *gc_args,
2140 unsigned long now)
2141{
2142 if (f6i->nh) {
2143 struct fib6_nh_age_excptn_arg arg = {
2144 .gc_args = gc_args,
2145 .now = now
2146 };
2147
2148 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2149 &arg);
2150 } else {
2151 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2152 }
2153}
2154
2155/* must be called with rcu lock held */
2156int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2157 struct flowi6 *fl6, struct fib6_result *res, int strict)
2158{
2159 struct fib6_node *fn, *saved_fn;
2160
2161 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2162 saved_fn = fn;
2163
2164 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2165 oif = 0;
2166
2167redo_rt6_select:
2168 rt6_select(net, fn, oif, res, strict);
2169 if (res->f6i == net->ipv6.fib6_null_entry) {
2170 fn = fib6_backtrack(fn, &fl6->saddr);
2171 if (fn)
2172 goto redo_rt6_select;
2173 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2174 /* also consider unreachable route */
2175 strict &= ~RT6_LOOKUP_F_REACHABLE;
2176 fn = saved_fn;
2177 goto redo_rt6_select;
2178 }
2179 }
2180
2181 trace_fib6_table_lookup(net, res, table, fl6);
2182
2183 return 0;
2184}
2185
2186struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2187 int oif, struct flowi6 *fl6,
2188 const struct sk_buff *skb, int flags)
2189{
2190 struct fib6_result res = {};
2191 struct rt6_info *rt = NULL;
2192 int strict = 0;
2193
2194 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2195 !rcu_read_lock_held());
2196
2197 strict |= flags & RT6_LOOKUP_F_IFACE;
2198 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2199 if (net->ipv6.devconf_all->forwarding == 0)
2200 strict |= RT6_LOOKUP_F_REACHABLE;
2201
2202 rcu_read_lock();
2203
2204 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2205 if (res.f6i == net->ipv6.fib6_null_entry)
2206 goto out;
2207
2208 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2209
2210 /*Search through exception table */
2211 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2212 if (rt) {
2213 goto out;
2214 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2215 !res.nh->fib_nh_gw_family)) {
2216 /* Create a RTF_CACHE clone which will not be
2217 * owned by the fib6 tree. It is for the special case where
2218 * the daddr in the skb during the neighbor look-up is different
2219 * from the fl6->daddr used to look-up route here.
2220 */
2221 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2222
2223 if (rt) {
2224 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2225 * As rt6_uncached_list_add() does not consume refcnt,
2226 * this refcnt is always returned to the caller even
2227 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2228 */
2229 rt6_uncached_list_add(rt);
2230 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2231 rcu_read_unlock();
2232
2233 return rt;
2234 }
2235 } else {
2236 /* Get a percpu copy */
2237 local_bh_disable();
2238 rt = rt6_get_pcpu_route(&res);
2239
2240 if (!rt)
2241 rt = rt6_make_pcpu_route(net, &res);
2242
2243 local_bh_enable();
2244 }
2245out:
2246 if (!rt)
2247 rt = net->ipv6.ip6_null_entry;
2248 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2249 ip6_hold_safe(net, &rt);
2250 rcu_read_unlock();
2251
2252 return rt;
2253}
2254EXPORT_SYMBOL_GPL(ip6_pol_route);
2255
2256static struct rt6_info *ip6_pol_route_input(struct net *net,
2257 struct fib6_table *table,
2258 struct flowi6 *fl6,
2259 const struct sk_buff *skb,
2260 int flags)
2261{
2262 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2263}
2264
2265struct dst_entry *ip6_route_input_lookup(struct net *net,
2266 struct net_device *dev,
2267 struct flowi6 *fl6,
2268 const struct sk_buff *skb,
2269 int flags)
2270{
2271 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2272 flags |= RT6_LOOKUP_F_IFACE;
2273
2274 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2275}
2276EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2277
2278static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2279 struct flow_keys *keys,
2280 struct flow_keys *flkeys)
2281{
2282 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2283 const struct ipv6hdr *key_iph = outer_iph;
2284 struct flow_keys *_flkeys = flkeys;
2285 const struct ipv6hdr *inner_iph;
2286 const struct icmp6hdr *icmph;
2287 struct ipv6hdr _inner_iph;
2288 struct icmp6hdr _icmph;
2289
2290 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2291 goto out;
2292
2293 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2294 sizeof(_icmph), &_icmph);
2295 if (!icmph)
2296 goto out;
2297
2298 if (!icmpv6_is_err(icmph->icmp6_type))
2299 goto out;
2300
2301 inner_iph = skb_header_pointer(skb,
2302 skb_transport_offset(skb) + sizeof(*icmph),
2303 sizeof(_inner_iph), &_inner_iph);
2304 if (!inner_iph)
2305 goto out;
2306
2307 key_iph = inner_iph;
2308 _flkeys = NULL;
2309out:
2310 if (_flkeys) {
2311 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2312 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2313 keys->tags.flow_label = _flkeys->tags.flow_label;
2314 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2315 } else {
2316 keys->addrs.v6addrs.src = key_iph->saddr;
2317 keys->addrs.v6addrs.dst = key_iph->daddr;
2318 keys->tags.flow_label = ip6_flowlabel(key_iph);
2319 keys->basic.ip_proto = key_iph->nexthdr;
2320 }
2321}
2322
2323/* if skb is set it will be used and fl6 can be NULL */
2324u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2325 const struct sk_buff *skb, struct flow_keys *flkeys)
2326{
2327 struct flow_keys hash_keys;
2328 u32 mhash;
2329
2330 switch (ip6_multipath_hash_policy(net)) {
2331 case 0:
2332 memset(&hash_keys, 0, sizeof(hash_keys));
2333 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2334 if (skb) {
2335 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2336 } else {
2337 hash_keys.addrs.v6addrs.src = fl6->saddr;
2338 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2339 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2340 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2341 }
2342 break;
2343 case 1:
2344 if (skb) {
2345 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2346 struct flow_keys keys;
2347
2348 /* short-circuit if we already have L4 hash present */
2349 if (skb->l4_hash)
2350 return skb_get_hash_raw(skb) >> 1;
2351
2352 memset(&hash_keys, 0, sizeof(hash_keys));
2353
2354 if (!flkeys) {
2355 skb_flow_dissect_flow_keys(skb, &keys, flag);
2356 flkeys = &keys;
2357 }
2358 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2359 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2360 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2361 hash_keys.ports.src = flkeys->ports.src;
2362 hash_keys.ports.dst = flkeys->ports.dst;
2363 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2364 } else {
2365 memset(&hash_keys, 0, sizeof(hash_keys));
2366 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2367 hash_keys.addrs.v6addrs.src = fl6->saddr;
2368 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2369 hash_keys.ports.src = fl6->fl6_sport;
2370 hash_keys.ports.dst = fl6->fl6_dport;
2371 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2372 }
2373 break;
2374 case 2:
2375 memset(&hash_keys, 0, sizeof(hash_keys));
2376 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2377 if (skb) {
2378 struct flow_keys keys;
2379
2380 if (!flkeys) {
2381 skb_flow_dissect_flow_keys(skb, &keys, 0);
2382 flkeys = &keys;
2383 }
2384
2385 /* Inner can be v4 or v6 */
2386 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2387 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2388 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2389 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2390 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2391 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2392 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2393 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2394 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2395 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2396 } else {
2397 /* Same as case 0 */
2398 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2399 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2400 }
2401 } else {
2402 /* Same as case 0 */
2403 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2404 hash_keys.addrs.v6addrs.src = fl6->saddr;
2405 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2406 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2407 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2408 }
2409 break;
2410 }
2411 mhash = flow_hash_from_keys(&hash_keys);
2412
2413 return mhash >> 1;
2414}
2415
2416/* Called with rcu held */
2417void ip6_route_input(struct sk_buff *skb)
2418{
2419 const struct ipv6hdr *iph = ipv6_hdr(skb);
2420 struct net *net = dev_net(skb->dev);
2421 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2422 struct ip_tunnel_info *tun_info;
2423 struct flowi6 fl6 = {
2424 .flowi6_iif = skb->dev->ifindex,
2425 .daddr = iph->daddr,
2426 .saddr = iph->saddr,
2427 .flowlabel = ip6_flowinfo(iph),
2428 .flowi6_mark = skb->mark,
2429 .flowi6_proto = iph->nexthdr,
2430 };
2431 struct flow_keys *flkeys = NULL, _flkeys;
2432
2433 tun_info = skb_tunnel_info(skb);
2434 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2435 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2436
2437 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2438 flkeys = &_flkeys;
2439
2440 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2441 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2442 skb_dst_drop(skb);
2443 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2444 &fl6, skb, flags));
2445}
2446
2447static struct rt6_info *ip6_pol_route_output(struct net *net,
2448 struct fib6_table *table,
2449 struct flowi6 *fl6,
2450 const struct sk_buff *skb,
2451 int flags)
2452{
2453 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2454}
2455
2456struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2457 const struct sock *sk,
2458 struct flowi6 *fl6, int flags)
2459{
2460 bool any_src;
2461
2462 if (ipv6_addr_type(&fl6->daddr) &
2463 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2464 struct dst_entry *dst;
2465
2466 /* This function does not take refcnt on the dst */
2467 dst = l3mdev_link_scope_lookup(net, fl6);
2468 if (dst)
2469 return dst;
2470 }
2471
2472 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2473
2474 flags |= RT6_LOOKUP_F_DST_NOREF;
2475 any_src = ipv6_addr_any(&fl6->saddr);
2476 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2477 (fl6->flowi6_oif && any_src))
2478 flags |= RT6_LOOKUP_F_IFACE;
2479
2480 if (!any_src)
2481 flags |= RT6_LOOKUP_F_HAS_SADDR;
2482 else if (sk)
2483 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2484
2485 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2486}
2487EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2488
2489struct dst_entry *ip6_route_output_flags(struct net *net,
2490 const struct sock *sk,
2491 struct flowi6 *fl6,
2492 int flags)
2493{
2494 struct dst_entry *dst;
2495 struct rt6_info *rt6;
2496
2497 rcu_read_lock();
2498 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2499 rt6 = (struct rt6_info *)dst;
2500 /* For dst cached in uncached_list, refcnt is already taken. */
2501 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2502 dst = &net->ipv6.ip6_null_entry->dst;
2503 dst_hold(dst);
2504 }
2505 rcu_read_unlock();
2506
2507 return dst;
2508}
2509EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2510
2511struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2512{
2513 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2514 struct net_device *loopback_dev = net->loopback_dev;
2515 struct dst_entry *new = NULL;
2516
2517 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2518 DST_OBSOLETE_DEAD, 0);
2519 if (rt) {
2520 rt6_info_init(rt);
2521 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2522
2523 new = &rt->dst;
2524 new->__use = 1;
2525 new->input = dst_discard;
2526 new->output = dst_discard_out;
2527
2528 dst_copy_metrics(new, &ort->dst);
2529
2530 rt->rt6i_idev = in6_dev_get(loopback_dev);
2531 rt->rt6i_gateway = ort->rt6i_gateway;
2532 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2533
2534 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2535#ifdef CONFIG_IPV6_SUBTREES
2536 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2537#endif
2538 }
2539
2540 dst_release(dst_orig);
2541 return new ? new : ERR_PTR(-ENOMEM);
2542}
2543
2544/*
2545 * Destination cache support functions
2546 */
2547
2548static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2549{
2550 u32 rt_cookie = 0;
2551
2552 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2553 return false;
2554
2555 if (fib6_check_expired(f6i))
2556 return false;
2557
2558 return true;
2559}
2560
2561static struct dst_entry *rt6_check(struct rt6_info *rt,
2562 struct fib6_info *from,
2563 u32 cookie)
2564{
2565 u32 rt_cookie = 0;
2566
2567 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2568 rt_cookie != cookie)
2569 return NULL;
2570
2571 if (rt6_check_expired(rt))
2572 return NULL;
2573
2574 return &rt->dst;
2575}
2576
2577static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2578 struct fib6_info *from,
2579 u32 cookie)
2580{
2581 if (!__rt6_check_expired(rt) &&
2582 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2583 fib6_check(from, cookie))
2584 return &rt->dst;
2585 else
2586 return NULL;
2587}
2588
2589static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2590{
2591 struct dst_entry *dst_ret;
2592 struct fib6_info *from;
2593 struct rt6_info *rt;
2594
2595 rt = container_of(dst, struct rt6_info, dst);
2596
2597 rcu_read_lock();
2598
2599 /* All IPV6 dsts are created with ->obsolete set to the value
2600 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2601 * into this function always.
2602 */
2603
2604 from = rcu_dereference(rt->from);
2605
2606 if (from && (rt->rt6i_flags & RTF_PCPU ||
2607 unlikely(!list_empty(&rt->rt6i_uncached))))
2608 dst_ret = rt6_dst_from_check(rt, from, cookie);
2609 else
2610 dst_ret = rt6_check(rt, from, cookie);
2611
2612 rcu_read_unlock();
2613
2614 return dst_ret;
2615}
2616
2617static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2618{
2619 struct rt6_info *rt = (struct rt6_info *) dst;
2620
2621 if (rt) {
2622 if (rt->rt6i_flags & RTF_CACHE) {
2623 rcu_read_lock();
2624 if (rt6_check_expired(rt)) {
2625 rt6_remove_exception_rt(rt);
2626 dst = NULL;
2627 }
2628 rcu_read_unlock();
2629 } else {
2630 dst_release(dst);
2631 dst = NULL;
2632 }
2633 }
2634 return dst;
2635}
2636
2637static void ip6_link_failure(struct sk_buff *skb)
2638{
2639 struct rt6_info *rt;
2640
2641 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2642
2643 rt = (struct rt6_info *) skb_dst(skb);
2644 if (rt) {
2645 rcu_read_lock();
2646 if (rt->rt6i_flags & RTF_CACHE) {
2647 rt6_remove_exception_rt(rt);
2648 } else {
2649 struct fib6_info *from;
2650 struct fib6_node *fn;
2651
2652 from = rcu_dereference(rt->from);
2653 if (from) {
2654 fn = rcu_dereference(from->fib6_node);
2655 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2656 fn->fn_sernum = -1;
2657 }
2658 }
2659 rcu_read_unlock();
2660 }
2661}
2662
2663static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2664{
2665 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2666 struct fib6_info *from;
2667
2668 rcu_read_lock();
2669 from = rcu_dereference(rt0->from);
2670 if (from)
2671 rt0->dst.expires = from->expires;
2672 rcu_read_unlock();
2673 }
2674
2675 dst_set_expires(&rt0->dst, timeout);
2676 rt0->rt6i_flags |= RTF_EXPIRES;
2677}
2678
2679static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2680{
2681 struct net *net = dev_net(rt->dst.dev);
2682
2683 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2684 rt->rt6i_flags |= RTF_MODIFIED;
2685 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2686}
2687
2688static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2689{
2690 return !(rt->rt6i_flags & RTF_CACHE) &&
2691 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2692}
2693
2694static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2695 const struct ipv6hdr *iph, u32 mtu)
2696{
2697 const struct in6_addr *daddr, *saddr;
2698 struct rt6_info *rt6 = (struct rt6_info *)dst;
2699
2700 if (dst_metric_locked(dst, RTAX_MTU))
2701 return;
2702
2703 if (iph) {
2704 daddr = &iph->daddr;
2705 saddr = &iph->saddr;
2706 } else if (sk) {
2707 daddr = &sk->sk_v6_daddr;
2708 saddr = &inet6_sk(sk)->saddr;
2709 } else {
2710 daddr = NULL;
2711 saddr = NULL;
2712 }
2713 dst_confirm_neigh(dst, daddr);
2714 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2715 if (mtu >= dst_mtu(dst))
2716 return;
2717
2718 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2719 rt6_do_update_pmtu(rt6, mtu);
2720 /* update rt6_ex->stamp for cache */
2721 if (rt6->rt6i_flags & RTF_CACHE)
2722 rt6_update_exception_stamp_rt(rt6);
2723 } else if (daddr) {
2724 struct fib6_result res = {};
2725 struct rt6_info *nrt6;
2726
2727 rcu_read_lock();
2728 res.f6i = rcu_dereference(rt6->from);
2729 if (!res.f6i)
2730 goto out_unlock;
2731
2732 res.fib6_flags = res.f6i->fib6_flags;
2733 res.fib6_type = res.f6i->fib6_type;
2734
2735 if (res.f6i->nh) {
2736 struct fib6_nh_match_arg arg = {
2737 .dev = dst->dev,
2738 .gw = &rt6->rt6i_gateway,
2739 };
2740
2741 nexthop_for_each_fib6_nh(res.f6i->nh,
2742 fib6_nh_find_match, &arg);
2743
2744 /* fib6_info uses a nexthop that does not have fib6_nh
2745 * using the dst->dev + gw. Should be impossible.
2746 */
2747 if (!arg.match)
2748 goto out_unlock;
2749
2750 res.nh = arg.match;
2751 } else {
2752 res.nh = res.f6i->fib6_nh;
2753 }
2754
2755 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2756 if (nrt6) {
2757 rt6_do_update_pmtu(nrt6, mtu);
2758 if (rt6_insert_exception(nrt6, &res))
2759 dst_release_immediate(&nrt6->dst);
2760 }
2761out_unlock:
2762 rcu_read_unlock();
2763 }
2764}
2765
2766static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2767 struct sk_buff *skb, u32 mtu)
2768{
2769 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2770}
2771
2772void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2773 int oif, u32 mark, kuid_t uid)
2774{
2775 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2776 struct dst_entry *dst;
2777 struct flowi6 fl6 = {
2778 .flowi6_oif = oif,
2779 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2780 .daddr = iph->daddr,
2781 .saddr = iph->saddr,
2782 .flowlabel = ip6_flowinfo(iph),
2783 .flowi6_uid = uid,
2784 };
2785
2786 dst = ip6_route_output(net, NULL, &fl6);
2787 if (!dst->error)
2788 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2789 dst_release(dst);
2790}
2791EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2792
2793void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2794{
2795 int oif = sk->sk_bound_dev_if;
2796 struct dst_entry *dst;
2797
2798 if (!oif && skb->dev)
2799 oif = l3mdev_master_ifindex(skb->dev);
2800
2801 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2802
2803 dst = __sk_dst_get(sk);
2804 if (!dst || !dst->obsolete ||
2805 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2806 return;
2807
2808 bh_lock_sock(sk);
2809 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2810 ip6_datagram_dst_update(sk, false);
2811 bh_unlock_sock(sk);
2812}
2813EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2814
2815void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2816 const struct flowi6 *fl6)
2817{
2818#ifdef CONFIG_IPV6_SUBTREES
2819 struct ipv6_pinfo *np = inet6_sk(sk);
2820#endif
2821
2822 ip6_dst_store(sk, dst,
2823 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2824 &sk->sk_v6_daddr : NULL,
2825#ifdef CONFIG_IPV6_SUBTREES
2826 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2827 &np->saddr :
2828#endif
2829 NULL);
2830}
2831
2832static bool ip6_redirect_nh_match(const struct fib6_result *res,
2833 struct flowi6 *fl6,
2834 const struct in6_addr *gw,
2835 struct rt6_info **ret)
2836{
2837 const struct fib6_nh *nh = res->nh;
2838
2839 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2840 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2841 return false;
2842
2843 /* rt_cache's gateway might be different from its 'parent'
2844 * in the case of an ip redirect.
2845 * So we keep searching in the exception table if the gateway
2846 * is different.
2847 */
2848 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2849 struct rt6_info *rt_cache;
2850
2851 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2852 if (rt_cache &&
2853 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2854 *ret = rt_cache;
2855 return true;
2856 }
2857 return false;
2858 }
2859 return true;
2860}
2861
2862struct fib6_nh_rd_arg {
2863 struct fib6_result *res;
2864 struct flowi6 *fl6;
2865 const struct in6_addr *gw;
2866 struct rt6_info **ret;
2867};
2868
2869static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2870{
2871 struct fib6_nh_rd_arg *arg = _arg;
2872
2873 arg->res->nh = nh;
2874 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2875}
2876
2877/* Handle redirects */
2878struct ip6rd_flowi {
2879 struct flowi6 fl6;
2880 struct in6_addr gateway;
2881};
2882
2883static struct rt6_info *__ip6_route_redirect(struct net *net,
2884 struct fib6_table *table,
2885 struct flowi6 *fl6,
2886 const struct sk_buff *skb,
2887 int flags)
2888{
2889 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2890 struct rt6_info *ret = NULL;
2891 struct fib6_result res = {};
2892 struct fib6_nh_rd_arg arg = {
2893 .res = &res,
2894 .fl6 = fl6,
2895 .gw = &rdfl->gateway,
2896 .ret = &ret
2897 };
2898 struct fib6_info *rt;
2899 struct fib6_node *fn;
2900
2901 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2902 * this case we must match on the real ingress device, so reset it
2903 */
2904 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2905 fl6->flowi6_oif = skb->dev->ifindex;
2906
2907 /* Get the "current" route for this destination and
2908 * check if the redirect has come from appropriate router.
2909 *
2910 * RFC 4861 specifies that redirects should only be
2911 * accepted if they come from the nexthop to the target.
2912 * Due to the way the routes are chosen, this notion
2913 * is a bit fuzzy and one might need to check all possible
2914 * routes.
2915 */
2916
2917 rcu_read_lock();
2918 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2919restart:
2920 for_each_fib6_node_rt_rcu(fn) {
2921 res.f6i = rt;
2922 if (fib6_check_expired(rt))
2923 continue;
2924 if (rt->fib6_flags & RTF_REJECT)
2925 break;
2926 if (unlikely(rt->nh)) {
2927 if (nexthop_is_blackhole(rt->nh))
2928 continue;
2929 /* on match, res->nh is filled in and potentially ret */
2930 if (nexthop_for_each_fib6_nh(rt->nh,
2931 fib6_nh_redirect_match,
2932 &arg))
2933 goto out;
2934 } else {
2935 res.nh = rt->fib6_nh;
2936 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2937 &ret))
2938 goto out;
2939 }
2940 }
2941
2942 if (!rt)
2943 rt = net->ipv6.fib6_null_entry;
2944 else if (rt->fib6_flags & RTF_REJECT) {
2945 ret = net->ipv6.ip6_null_entry;
2946 goto out;
2947 }
2948
2949 if (rt == net->ipv6.fib6_null_entry) {
2950 fn = fib6_backtrack(fn, &fl6->saddr);
2951 if (fn)
2952 goto restart;
2953 }
2954
2955 res.f6i = rt;
2956 res.nh = rt->fib6_nh;
2957out:
2958 if (ret) {
2959 ip6_hold_safe(net, &ret);
2960 } else {
2961 res.fib6_flags = res.f6i->fib6_flags;
2962 res.fib6_type = res.f6i->fib6_type;
2963 ret = ip6_create_rt_rcu(&res);
2964 }
2965
2966 rcu_read_unlock();
2967
2968 trace_fib6_table_lookup(net, &res, table, fl6);
2969 return ret;
2970};
2971
2972static struct dst_entry *ip6_route_redirect(struct net *net,
2973 const struct flowi6 *fl6,
2974 const struct sk_buff *skb,
2975 const struct in6_addr *gateway)
2976{
2977 int flags = RT6_LOOKUP_F_HAS_SADDR;
2978 struct ip6rd_flowi rdfl;
2979
2980 rdfl.fl6 = *fl6;
2981 rdfl.gateway = *gateway;
2982
2983 return fib6_rule_lookup(net, &rdfl.fl6, skb,
2984 flags, __ip6_route_redirect);
2985}
2986
2987void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2988 kuid_t uid)
2989{
2990 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2991 struct dst_entry *dst;
2992 struct flowi6 fl6 = {
2993 .flowi6_iif = LOOPBACK_IFINDEX,
2994 .flowi6_oif = oif,
2995 .flowi6_mark = mark,
2996 .daddr = iph->daddr,
2997 .saddr = iph->saddr,
2998 .flowlabel = ip6_flowinfo(iph),
2999 .flowi6_uid = uid,
3000 };
3001
3002 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3003 rt6_do_redirect(dst, NULL, skb);
3004 dst_release(dst);
3005}
3006EXPORT_SYMBOL_GPL(ip6_redirect);
3007
3008void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3009{
3010 const struct ipv6hdr *iph = ipv6_hdr(skb);
3011 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3012 struct dst_entry *dst;
3013 struct flowi6 fl6 = {
3014 .flowi6_iif = LOOPBACK_IFINDEX,
3015 .flowi6_oif = oif,
3016 .daddr = msg->dest,
3017 .saddr = iph->daddr,
3018 .flowi6_uid = sock_net_uid(net, NULL),
3019 };
3020
3021 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3022 rt6_do_redirect(dst, NULL, skb);
3023 dst_release(dst);
3024}
3025
3026void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3027{
3028 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3029 sk->sk_uid);
3030}
3031EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3032
3033static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3034{
3035 struct net_device *dev = dst->dev;
3036 unsigned int mtu = dst_mtu(dst);
3037 struct net *net = dev_net(dev);
3038
3039 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3040
3041 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3042 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3043
3044 /*
3045 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3046 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3047 * IPV6_MAXPLEN is also valid and means: "any MSS,
3048 * rely only on pmtu discovery"
3049 */
3050 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3051 mtu = IPV6_MAXPLEN;
3052 return mtu;
3053}
3054
3055static unsigned int ip6_mtu(const struct dst_entry *dst)
3056{
3057 struct inet6_dev *idev;
3058 unsigned int mtu;
3059
3060 mtu = dst_metric_raw(dst, RTAX_MTU);
3061 if (mtu)
3062 goto out;
3063
3064 mtu = IPV6_MIN_MTU;
3065
3066 rcu_read_lock();
3067 idev = __in6_dev_get(dst->dev);
3068 if (idev)
3069 mtu = idev->cnf.mtu6;
3070 rcu_read_unlock();
3071
3072out:
3073 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3074
3075 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3076}
3077
3078/* MTU selection:
3079 * 1. mtu on route is locked - use it
3080 * 2. mtu from nexthop exception
3081 * 3. mtu from egress device
3082 *
3083 * based on ip6_dst_mtu_forward and exception logic of
3084 * rt6_find_cached_rt; called with rcu_read_lock
3085 */
3086u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3087 const struct in6_addr *daddr,
3088 const struct in6_addr *saddr)
3089{
3090 const struct fib6_nh *nh = res->nh;
3091 struct fib6_info *f6i = res->f6i;
3092 struct inet6_dev *idev;
3093 struct rt6_info *rt;
3094 u32 mtu = 0;
3095
3096 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3097 mtu = f6i->fib6_pmtu;
3098 if (mtu)
3099 goto out;
3100 }
3101
3102 rt = rt6_find_cached_rt(res, daddr, saddr);
3103 if (unlikely(rt)) {
3104 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3105 } else {
3106 struct net_device *dev = nh->fib_nh_dev;
3107
3108 mtu = IPV6_MIN_MTU;
3109 idev = __in6_dev_get(dev);
3110 if (idev && idev->cnf.mtu6 > mtu)
3111 mtu = idev->cnf.mtu6;
3112 }
3113
3114 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3115out:
3116 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3117}
3118
3119struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3120 struct flowi6 *fl6)
3121{
3122 struct dst_entry *dst;
3123 struct rt6_info *rt;
3124 struct inet6_dev *idev = in6_dev_get(dev);
3125 struct net *net = dev_net(dev);
3126
3127 if (unlikely(!idev))
3128 return ERR_PTR(-ENODEV);
3129
3130 rt = ip6_dst_alloc(net, dev, 0);
3131 if (unlikely(!rt)) {
3132 in6_dev_put(idev);
3133 dst = ERR_PTR(-ENOMEM);
3134 goto out;
3135 }
3136
3137 rt->dst.flags |= DST_HOST;
3138 rt->dst.input = ip6_input;
3139 rt->dst.output = ip6_output;
3140 rt->rt6i_gateway = fl6->daddr;
3141 rt->rt6i_dst.addr = fl6->daddr;
3142 rt->rt6i_dst.plen = 128;
3143 rt->rt6i_idev = idev;
3144 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3145
3146 /* Add this dst into uncached_list so that rt6_disable_ip() can
3147 * do proper release of the net_device
3148 */
3149 rt6_uncached_list_add(rt);
3150 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
3151
3152 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3153
3154out:
3155 return dst;
3156}
3157
3158static int ip6_dst_gc(struct dst_ops *ops)
3159{
3160 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3161 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3162 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
3163 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3164 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3165 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3166 int entries;
3167
3168 entries = dst_entries_get_fast(ops);
3169 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
3170 entries <= rt_max_size)
3171 goto out;
3172
3173 net->ipv6.ip6_rt_gc_expire++;
3174 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
3175 entries = dst_entries_get_slow(ops);
3176 if (entries < ops->gc_thresh)
3177 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
3178out:
3179 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3180 return entries > rt_max_size;
3181}
3182
3183static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3184 const struct in6_addr *gw_addr, u32 tbid,
3185 int flags, struct fib6_result *res)
3186{
3187 struct flowi6 fl6 = {
3188 .flowi6_oif = cfg->fc_ifindex,
3189 .daddr = *gw_addr,
3190 .saddr = cfg->fc_prefsrc,
3191 };
3192 struct fib6_table *table;
3193 int err;
3194
3195 table = fib6_get_table(net, tbid);
3196 if (!table)
3197 return -EINVAL;
3198
3199 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3200 flags |= RT6_LOOKUP_F_HAS_SADDR;
3201
3202 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3203
3204 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3205 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3206 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3207 cfg->fc_ifindex != 0, NULL, flags);
3208
3209 return err;
3210}
3211
3212static int ip6_route_check_nh_onlink(struct net *net,
3213 struct fib6_config *cfg,
3214 const struct net_device *dev,
3215 struct netlink_ext_ack *extack)
3216{
3217 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3218 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3219 struct fib6_result res = {};
3220 int err;
3221
3222 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3223 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3224 /* ignore match if it is the default route */
3225 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3226 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3227 NL_SET_ERR_MSG(extack,
3228 "Nexthop has invalid gateway or device mismatch");
3229 err = -EINVAL;
3230 }
3231
3232 return err;
3233}
3234
3235static int ip6_route_check_nh(struct net *net,
3236 struct fib6_config *cfg,
3237 struct net_device **_dev,
3238 struct inet6_dev **idev)
3239{
3240 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3241 struct net_device *dev = _dev ? *_dev : NULL;
3242 int flags = RT6_LOOKUP_F_IFACE;
3243 struct fib6_result res = {};
3244 int err = -EHOSTUNREACH;
3245
3246 if (cfg->fc_table) {
3247 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3248 cfg->fc_table, flags, &res);
3249 /* gw_addr can not require a gateway or resolve to a reject
3250 * route. If a device is given, it must match the result.
3251 */
3252 if (err || res.fib6_flags & RTF_REJECT ||
3253 res.nh->fib_nh_gw_family ||
3254 (dev && dev != res.nh->fib_nh_dev))
3255 err = -EHOSTUNREACH;
3256 }
3257
3258 if (err < 0) {
3259 struct flowi6 fl6 = {
3260 .flowi6_oif = cfg->fc_ifindex,
3261 .daddr = *gw_addr,
3262 };
3263
3264 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3265 if (err || res.fib6_flags & RTF_REJECT ||
3266 res.nh->fib_nh_gw_family)
3267 err = -EHOSTUNREACH;
3268
3269 if (err)
3270 return err;
3271
3272 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3273 cfg->fc_ifindex != 0, NULL, flags);
3274 }
3275
3276 err = 0;
3277 if (dev) {
3278 if (dev != res.nh->fib_nh_dev)
3279 err = -EHOSTUNREACH;
3280 } else {
3281 *_dev = dev = res.nh->fib_nh_dev;
3282 dev_hold(dev);
3283 *idev = in6_dev_get(dev);
3284 }
3285
3286 return err;
3287}
3288
3289static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3290 struct net_device **_dev, struct inet6_dev **idev,
3291 struct netlink_ext_ack *extack)
3292{
3293 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3294 int gwa_type = ipv6_addr_type(gw_addr);
3295 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3296 const struct net_device *dev = *_dev;
3297 bool need_addr_check = !dev;
3298 int err = -EINVAL;
3299
3300 /* if gw_addr is local we will fail to detect this in case
3301 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3302 * will return already-added prefix route via interface that
3303 * prefix route was assigned to, which might be non-loopback.
3304 */
3305 if (dev &&
3306 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3307 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3308 goto out;
3309 }
3310
3311 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3312 /* IPv6 strictly inhibits using not link-local
3313 * addresses as nexthop address.
3314 * Otherwise, router will not able to send redirects.
3315 * It is very good, but in some (rare!) circumstances
3316 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3317 * some exceptions. --ANK
3318 * We allow IPv4-mapped nexthops to support RFC4798-type
3319 * addressing
3320 */
3321 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3322 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3323 goto out;
3324 }
3325
3326 rcu_read_lock();
3327
3328 if (cfg->fc_flags & RTNH_F_ONLINK)
3329 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3330 else
3331 err = ip6_route_check_nh(net, cfg, _dev, idev);
3332
3333 rcu_read_unlock();
3334
3335 if (err)
3336 goto out;
3337 }
3338
3339 /* reload in case device was changed */
3340 dev = *_dev;
3341
3342 err = -EINVAL;
3343 if (!dev) {
3344 NL_SET_ERR_MSG(extack, "Egress device not specified");
3345 goto out;
3346 } else if (dev->flags & IFF_LOOPBACK) {
3347 NL_SET_ERR_MSG(extack,
3348 "Egress device can not be loopback device for this route");
3349 goto out;
3350 }
3351
3352 /* if we did not check gw_addr above, do so now that the
3353 * egress device has been resolved.
3354 */
3355 if (need_addr_check &&
3356 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3357 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3358 goto out;
3359 }
3360
3361 err = 0;
3362out:
3363 return err;
3364}
3365
3366static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3367{
3368 if ((flags & RTF_REJECT) ||
3369 (dev && (dev->flags & IFF_LOOPBACK) &&
3370 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3371 !(flags & RTF_LOCAL)))
3372 return true;
3373
3374 return false;
3375}
3376
3377int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3378 struct fib6_config *cfg, gfp_t gfp_flags,
3379 struct netlink_ext_ack *extack)
3380{
3381 struct net_device *dev = NULL;
3382 struct inet6_dev *idev = NULL;
3383 int addr_type;
3384 int err;
3385
3386 fib6_nh->fib_nh_family = AF_INET6;
3387#ifdef CONFIG_IPV6_ROUTER_PREF
3388 fib6_nh->last_probe = jiffies;
3389#endif
3390
3391 err = -ENODEV;
3392 if (cfg->fc_ifindex) {
3393 dev = dev_get_by_index(net, cfg->fc_ifindex);
3394 if (!dev)
3395 goto out;
3396 idev = in6_dev_get(dev);
3397 if (!idev)
3398 goto out;
3399 }
3400
3401 if (cfg->fc_flags & RTNH_F_ONLINK) {
3402 if (!dev) {
3403 NL_SET_ERR_MSG(extack,
3404 "Nexthop device required for onlink");
3405 goto out;
3406 }
3407
3408 if (!(dev->flags & IFF_UP)) {
3409 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3410 err = -ENETDOWN;
3411 goto out;
3412 }
3413
3414 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3415 }
3416
3417 fib6_nh->fib_nh_weight = 1;
3418
3419 /* We cannot add true routes via loopback here,
3420 * they would result in kernel looping; promote them to reject routes
3421 */
3422 addr_type = ipv6_addr_type(&cfg->fc_dst);
3423 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3424 /* hold loopback dev/idev if we haven't done so. */
3425 if (dev != net->loopback_dev) {
3426 if (dev) {
3427 dev_put(dev);
3428 in6_dev_put(idev);
3429 }
3430 dev = net->loopback_dev;
3431 dev_hold(dev);
3432 idev = in6_dev_get(dev);
3433 if (!idev) {
3434 err = -ENODEV;
3435 goto out;
3436 }
3437 }
3438 goto pcpu_alloc;
3439 }
3440
3441 if (cfg->fc_flags & RTF_GATEWAY) {
3442 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3443 if (err)
3444 goto out;
3445
3446 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3447 fib6_nh->fib_nh_gw_family = AF_INET6;
3448 }
3449
3450 err = -ENODEV;
3451 if (!dev)
3452 goto out;
3453
3454 if (idev->cnf.disable_ipv6) {
3455 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3456 err = -EACCES;
3457 goto out;
3458 }
3459
3460 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3461 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3462 err = -ENETDOWN;
3463 goto out;
3464 }
3465
3466 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3467 !netif_carrier_ok(dev))
3468 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3469
3470 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3471 cfg->fc_encap_type, cfg, gfp_flags, extack);
3472 if (err)
3473 goto out;
3474
3475pcpu_alloc:
3476 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3477 if (!fib6_nh->rt6i_pcpu) {
3478 err = -ENOMEM;
3479 goto out;
3480 }
3481
3482 fib6_nh->fib_nh_dev = dev;
3483 fib6_nh->fib_nh_oif = dev->ifindex;
3484 err = 0;
3485out:
3486 if (idev)
3487 in6_dev_put(idev);
3488
3489 if (err) {
3490 lwtstate_put(fib6_nh->fib_nh_lws);
3491 fib6_nh->fib_nh_lws = NULL;
3492 if (dev)
3493 dev_put(dev);
3494 }
3495
3496 return err;
3497}
3498
3499void fib6_nh_release(struct fib6_nh *fib6_nh)
3500{
3501 struct rt6_exception_bucket *bucket;
3502
3503 rcu_read_lock();
3504
3505 fib6_nh_flush_exceptions(fib6_nh, NULL);
3506 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3507 if (bucket) {
3508 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3509 kfree(bucket);
3510 }
3511
3512 rcu_read_unlock();
3513
3514 if (fib6_nh->rt6i_pcpu) {
3515 int cpu;
3516
3517 for_each_possible_cpu(cpu) {
3518 struct rt6_info **ppcpu_rt;
3519 struct rt6_info *pcpu_rt;
3520
3521 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3522 pcpu_rt = *ppcpu_rt;
3523 if (pcpu_rt) {
3524 dst_dev_put(&pcpu_rt->dst);
3525 dst_release(&pcpu_rt->dst);
3526 *ppcpu_rt = NULL;
3527 }
3528 }
3529
3530 free_percpu(fib6_nh->rt6i_pcpu);
3531 }
3532
3533 fib_nh_common_release(&fib6_nh->nh_common);
3534}
3535
3536static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3537 gfp_t gfp_flags,
3538 struct netlink_ext_ack *extack)
3539{
3540 struct net *net = cfg->fc_nlinfo.nl_net;
3541 struct fib6_info *rt = NULL;
3542 struct nexthop *nh = NULL;
3543 struct fib6_table *table;
3544 struct fib6_nh *fib6_nh;
3545 int err = -EINVAL;
3546 int addr_type;
3547
3548 /* RTF_PCPU is an internal flag; can not be set by userspace */
3549 if (cfg->fc_flags & RTF_PCPU) {
3550 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3551 goto out;
3552 }
3553
3554 /* RTF_CACHE is an internal flag; can not be set by userspace */
3555 if (cfg->fc_flags & RTF_CACHE) {
3556 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3557 goto out;
3558 }
3559
3560 if (cfg->fc_type > RTN_MAX) {
3561 NL_SET_ERR_MSG(extack, "Invalid route type");
3562 goto out;
3563 }
3564
3565 if (cfg->fc_dst_len > 128) {
3566 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3567 goto out;
3568 }
3569 if (cfg->fc_src_len > 128) {
3570 NL_SET_ERR_MSG(extack, "Invalid source address length");
3571 goto out;
3572 }
3573#ifndef CONFIG_IPV6_SUBTREES
3574 if (cfg->fc_src_len) {
3575 NL_SET_ERR_MSG(extack,
3576 "Specifying source address requires IPV6_SUBTREES to be enabled");
3577 goto out;
3578 }
3579#endif
3580 if (cfg->fc_nh_id) {
3581 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3582 if (!nh) {
3583 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3584 goto out;
3585 }
3586 err = fib6_check_nexthop(nh, cfg, extack);
3587 if (err)
3588 goto out;
3589 }
3590
3591 err = -ENOBUFS;
3592 if (cfg->fc_nlinfo.nlh &&
3593 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3594 table = fib6_get_table(net, cfg->fc_table);
3595 if (!table) {
3596 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3597 table = fib6_new_table(net, cfg->fc_table);
3598 }
3599 } else {
3600 table = fib6_new_table(net, cfg->fc_table);
3601 }
3602
3603 if (!table)
3604 goto out;
3605
3606 err = -ENOMEM;
3607 rt = fib6_info_alloc(gfp_flags, !nh);
3608 if (!rt)
3609 goto out;
3610
3611 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3612 extack);
3613 if (IS_ERR(rt->fib6_metrics)) {
3614 err = PTR_ERR(rt->fib6_metrics);
3615 /* Do not leave garbage there. */
3616 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3617 goto out;
3618 }
3619
3620 if (cfg->fc_flags & RTF_ADDRCONF)
3621 rt->dst_nocount = true;
3622
3623 if (cfg->fc_flags & RTF_EXPIRES)
3624 fib6_set_expires(rt, jiffies +
3625 clock_t_to_jiffies(cfg->fc_expires));
3626 else
3627 fib6_clean_expires(rt);
3628
3629 if (cfg->fc_protocol == RTPROT_UNSPEC)
3630 cfg->fc_protocol = RTPROT_BOOT;
3631 rt->fib6_protocol = cfg->fc_protocol;
3632
3633 rt->fib6_table = table;
3634 rt->fib6_metric = cfg->fc_metric;
3635 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3636 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3637
3638 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3639 rt->fib6_dst.plen = cfg->fc_dst_len;
3640 if (rt->fib6_dst.plen == 128)
3641 rt->dst_host = true;
3642
3643#ifdef CONFIG_IPV6_SUBTREES
3644 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3645 rt->fib6_src.plen = cfg->fc_src_len;
3646#endif
3647 if (nh) {
3648 if (!nexthop_get(nh)) {
3649 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3650 goto out;
3651 }
3652 if (rt->fib6_src.plen) {
3653 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3654 goto out;
3655 }
3656 rt->nh = nh;
3657 fib6_nh = nexthop_fib6_nh(rt->nh);
3658 } else {
3659 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3660 if (err)
3661 goto out;
3662
3663 fib6_nh = rt->fib6_nh;
3664
3665 /* We cannot add true routes via loopback here, they would
3666 * result in kernel looping; promote them to reject routes
3667 */
3668 addr_type = ipv6_addr_type(&cfg->fc_dst);
3669 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3670 addr_type))
3671 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3672 }
3673
3674 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3675 struct net_device *dev = fib6_nh->fib_nh_dev;
3676
3677 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3678 NL_SET_ERR_MSG(extack, "Invalid source address");
3679 err = -EINVAL;
3680 goto out;
3681 }
3682 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3683 rt->fib6_prefsrc.plen = 128;
3684 } else
3685 rt->fib6_prefsrc.plen = 0;
3686
3687 return rt;
3688out:
3689 fib6_info_release(rt);
3690 return ERR_PTR(err);
3691}
3692
3693int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3694 struct netlink_ext_ack *extack)
3695{
3696 struct fib6_info *rt;
3697 int err;
3698
3699 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3700 if (IS_ERR(rt))
3701 return PTR_ERR(rt);
3702
3703 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3704 fib6_info_release(rt);
3705
3706 return err;
3707}
3708
3709static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3710{
3711 struct net *net = info->nl_net;
3712 struct fib6_table *table;
3713 int err;
3714
3715 if (rt == net->ipv6.fib6_null_entry) {
3716 err = -ENOENT;
3717 goto out;
3718 }
3719
3720 table = rt->fib6_table;
3721 spin_lock_bh(&table->tb6_lock);
3722 err = fib6_del(rt, info);
3723 spin_unlock_bh(&table->tb6_lock);
3724
3725out:
3726 fib6_info_release(rt);
3727 return err;
3728}
3729
3730int ip6_del_rt(struct net *net, struct fib6_info *rt)
3731{
3732 struct nl_info info = { .nl_net = net };
3733
3734 return __ip6_del_rt(rt, &info);
3735}
3736
3737static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3738{
3739 struct nl_info *info = &cfg->fc_nlinfo;
3740 struct net *net = info->nl_net;
3741 struct sk_buff *skb = NULL;
3742 struct fib6_table *table;
3743 int err = -ENOENT;
3744
3745 if (rt == net->ipv6.fib6_null_entry)
3746 goto out_put;
3747 table = rt->fib6_table;
3748 spin_lock_bh(&table->tb6_lock);
3749
3750 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3751 struct fib6_info *sibling, *next_sibling;
3752
3753 /* prefer to send a single notification with all hops */
3754 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3755 if (skb) {
3756 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3757
3758 if (rt6_fill_node(net, skb, rt, NULL,
3759 NULL, NULL, 0, RTM_DELROUTE,
3760 info->portid, seq, 0) < 0) {
3761 kfree_skb(skb);
3762 skb = NULL;
3763 } else
3764 info->skip_notify = 1;
3765 }
3766
3767 info->skip_notify_kernel = 1;
3768 call_fib6_multipath_entry_notifiers(net,
3769 FIB_EVENT_ENTRY_DEL,
3770 rt,
3771 rt->fib6_nsiblings,
3772 NULL);
3773 list_for_each_entry_safe(sibling, next_sibling,
3774 &rt->fib6_siblings,
3775 fib6_siblings) {
3776 err = fib6_del(sibling, info);
3777 if (err)
3778 goto out_unlock;
3779 }
3780 }
3781
3782 err = fib6_del(rt, info);
3783out_unlock:
3784 spin_unlock_bh(&table->tb6_lock);
3785out_put:
3786 fib6_info_release(rt);
3787
3788 if (skb) {
3789 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3790 info->nlh, gfp_any());
3791 }
3792 return err;
3793}
3794
3795static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3796{
3797 int rc = -ESRCH;
3798
3799 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3800 goto out;
3801
3802 if (cfg->fc_flags & RTF_GATEWAY &&
3803 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3804 goto out;
3805
3806 rc = rt6_remove_exception_rt(rt);
3807out:
3808 return rc;
3809}
3810
3811static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3812 struct fib6_nh *nh)
3813{
3814 struct fib6_result res = {
3815 .f6i = rt,
3816 .nh = nh,
3817 };
3818 struct rt6_info *rt_cache;
3819
3820 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3821 if (rt_cache)
3822 return __ip6_del_cached_rt(rt_cache, cfg);
3823
3824 return 0;
3825}
3826
3827struct fib6_nh_del_cached_rt_arg {
3828 struct fib6_config *cfg;
3829 struct fib6_info *f6i;
3830};
3831
3832static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3833{
3834 struct fib6_nh_del_cached_rt_arg *arg = _arg;
3835 int rc;
3836
3837 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3838 return rc != -ESRCH ? rc : 0;
3839}
3840
3841static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3842{
3843 struct fib6_nh_del_cached_rt_arg arg = {
3844 .cfg = cfg,
3845 .f6i = f6i
3846 };
3847
3848 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3849}
3850
3851static int ip6_route_del(struct fib6_config *cfg,
3852 struct netlink_ext_ack *extack)
3853{
3854 struct fib6_table *table;
3855 struct fib6_info *rt;
3856 struct fib6_node *fn;
3857 int err = -ESRCH;
3858
3859 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3860 if (!table) {
3861 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3862 return err;
3863 }
3864
3865 rcu_read_lock();
3866
3867 fn = fib6_locate(&table->tb6_root,
3868 &cfg->fc_dst, cfg->fc_dst_len,
3869 &cfg->fc_src, cfg->fc_src_len,
3870 !(cfg->fc_flags & RTF_CACHE));
3871
3872 if (fn) {
3873 for_each_fib6_node_rt_rcu(fn) {
3874 struct fib6_nh *nh;
3875
3876 if (rt->nh && cfg->fc_nh_id &&
3877 rt->nh->id != cfg->fc_nh_id)
3878 continue;
3879
3880 if (cfg->fc_flags & RTF_CACHE) {
3881 int rc = 0;
3882
3883 if (rt->nh) {
3884 rc = ip6_del_cached_rt_nh(cfg, rt);
3885 } else if (cfg->fc_nh_id) {
3886 continue;
3887 } else {
3888 nh = rt->fib6_nh;
3889 rc = ip6_del_cached_rt(cfg, rt, nh);
3890 }
3891 if (rc != -ESRCH) {
3892 rcu_read_unlock();
3893 return rc;
3894 }
3895 continue;
3896 }
3897
3898 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3899 continue;
3900 if (cfg->fc_protocol &&
3901 cfg->fc_protocol != rt->fib6_protocol)
3902 continue;
3903
3904 if (rt->nh) {
3905 if (!fib6_info_hold_safe(rt))
3906 continue;
3907 rcu_read_unlock();
3908
3909 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3910 }
3911 if (cfg->fc_nh_id)
3912 continue;
3913
3914 nh = rt->fib6_nh;
3915 if (cfg->fc_ifindex &&
3916 (!nh->fib_nh_dev ||
3917 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3918 continue;
3919 if (cfg->fc_flags & RTF_GATEWAY &&
3920 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3921 continue;
3922 if (!fib6_info_hold_safe(rt))
3923 continue;
3924 rcu_read_unlock();
3925
3926 /* if gateway was specified only delete the one hop */
3927 if (cfg->fc_flags & RTF_GATEWAY)
3928 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3929
3930 return __ip6_del_rt_siblings(rt, cfg);
3931 }
3932 }
3933 rcu_read_unlock();
3934
3935 return err;
3936}
3937
3938static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3939{
3940 struct netevent_redirect netevent;
3941 struct rt6_info *rt, *nrt = NULL;
3942 struct fib6_result res = {};
3943 struct ndisc_options ndopts;
3944 struct inet6_dev *in6_dev;
3945 struct neighbour *neigh;
3946 struct rd_msg *msg;
3947 int optlen, on_link;
3948 u8 *lladdr;
3949
3950 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3951 optlen -= sizeof(*msg);
3952
3953 if (optlen < 0) {
3954 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3955 return;
3956 }
3957
3958 msg = (struct rd_msg *)icmp6_hdr(skb);
3959
3960 if (ipv6_addr_is_multicast(&msg->dest)) {
3961 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3962 return;
3963 }
3964
3965 on_link = 0;
3966 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3967 on_link = 1;
3968 } else if (ipv6_addr_type(&msg->target) !=
3969 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3970 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3971 return;
3972 }
3973
3974 in6_dev = __in6_dev_get(skb->dev);
3975 if (!in6_dev)
3976 return;
3977 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3978 return;
3979
3980 /* RFC2461 8.1:
3981 * The IP source address of the Redirect MUST be the same as the current
3982 * first-hop router for the specified ICMP Destination Address.
3983 */
3984
3985 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3986 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3987 return;
3988 }
3989
3990 lladdr = NULL;
3991 if (ndopts.nd_opts_tgt_lladdr) {
3992 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3993 skb->dev);
3994 if (!lladdr) {
3995 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3996 return;
3997 }
3998 }
3999
4000 rt = (struct rt6_info *) dst;
4001 if (rt->rt6i_flags & RTF_REJECT) {
4002 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4003 return;
4004 }
4005
4006 /* Redirect received -> path was valid.
4007 * Look, redirects are sent only in response to data packets,
4008 * so that this nexthop apparently is reachable. --ANK
4009 */
4010 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4011
4012 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4013 if (!neigh)
4014 return;
4015
4016 /*
4017 * We have finally decided to accept it.
4018 */
4019
4020 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4021 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4022 NEIGH_UPDATE_F_OVERRIDE|
4023 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4024 NEIGH_UPDATE_F_ISROUTER)),
4025 NDISC_REDIRECT, &ndopts);
4026
4027 rcu_read_lock();
4028 res.f6i = rcu_dereference(rt->from);
4029 if (!res.f6i)
4030 goto out;
4031
4032 if (res.f6i->nh) {
4033 struct fib6_nh_match_arg arg = {
4034 .dev = dst->dev,
4035 .gw = &rt->rt6i_gateway,
4036 };
4037
4038 nexthop_for_each_fib6_nh(res.f6i->nh,
4039 fib6_nh_find_match, &arg);
4040
4041 /* fib6_info uses a nexthop that does not have fib6_nh
4042 * using the dst->dev. Should be impossible
4043 */
4044 if (!arg.match)
4045 goto out;
4046 res.nh = arg.match;
4047 } else {
4048 res.nh = res.f6i->fib6_nh;
4049 }
4050
4051 res.fib6_flags = res.f6i->fib6_flags;
4052 res.fib6_type = res.f6i->fib6_type;
4053 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4054 if (!nrt)
4055 goto out;
4056
4057 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4058 if (on_link)
4059 nrt->rt6i_flags &= ~RTF_GATEWAY;
4060
4061 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4062
4063 /* rt6_insert_exception() will take care of duplicated exceptions */
4064 if (rt6_insert_exception(nrt, &res)) {
4065 dst_release_immediate(&nrt->dst);
4066 goto out;
4067 }
4068
4069 netevent.old = &rt->dst;
4070 netevent.new = &nrt->dst;
4071 netevent.daddr = &msg->dest;
4072 netevent.neigh = neigh;
4073 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4074
4075out:
4076 rcu_read_unlock();
4077 neigh_release(neigh);
4078}
4079
4080#ifdef CONFIG_IPV6_ROUTE_INFO
4081static struct fib6_info *rt6_get_route_info(struct net *net,
4082 const struct in6_addr *prefix, int prefixlen,
4083 const struct in6_addr *gwaddr,
4084 struct net_device *dev)
4085{
4086 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4087 int ifindex = dev->ifindex;
4088 struct fib6_node *fn;
4089 struct fib6_info *rt = NULL;
4090 struct fib6_table *table;
4091
4092 table = fib6_get_table(net, tb_id);
4093 if (!table)
4094 return NULL;
4095
4096 rcu_read_lock();
4097 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4098 if (!fn)
4099 goto out;
4100
4101 for_each_fib6_node_rt_rcu(fn) {
4102 /* these routes do not use nexthops */
4103 if (rt->nh)
4104 continue;
4105 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4106 continue;
4107 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4108 !rt->fib6_nh->fib_nh_gw_family)
4109 continue;
4110 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4111 continue;
4112 if (!fib6_info_hold_safe(rt))
4113 continue;
4114 break;
4115 }
4116out:
4117 rcu_read_unlock();
4118 return rt;
4119}
4120
4121static struct fib6_info *rt6_add_route_info(struct net *net,
4122 const struct in6_addr *prefix, int prefixlen,
4123 const struct in6_addr *gwaddr,
4124 struct net_device *dev,
4125 unsigned int pref)
4126{
4127 struct fib6_config cfg = {
4128 .fc_metric = IP6_RT_PRIO_USER,
4129 .fc_ifindex = dev->ifindex,
4130 .fc_dst_len = prefixlen,
4131 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4132 RTF_UP | RTF_PREF(pref),
4133 .fc_protocol = RTPROT_RA,
4134 .fc_type = RTN_UNICAST,
4135 .fc_nlinfo.portid = 0,
4136 .fc_nlinfo.nlh = NULL,
4137 .fc_nlinfo.nl_net = net,
4138 };
4139
4140 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
4141 cfg.fc_dst = *prefix;
4142 cfg.fc_gateway = *gwaddr;
4143
4144 /* We should treat it as a default route if prefix length is 0. */
4145 if (!prefixlen)
4146 cfg.fc_flags |= RTF_DEFAULT;
4147
4148 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4149
4150 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4151}
4152#endif
4153
4154struct fib6_info *rt6_get_dflt_router(struct net *net,
4155 const struct in6_addr *addr,
4156 struct net_device *dev)
4157{
4158 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4159 struct fib6_info *rt;
4160 struct fib6_table *table;
4161
4162 table = fib6_get_table(net, tb_id);
4163 if (!table)
4164 return NULL;
4165
4166 rcu_read_lock();
4167 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4168 struct fib6_nh *nh;
4169
4170 /* RA routes do not use nexthops */
4171 if (rt->nh)
4172 continue;
4173
4174 nh = rt->fib6_nh;
4175 if (dev == nh->fib_nh_dev &&
4176 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4177 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4178 break;
4179 }
4180 if (rt && !fib6_info_hold_safe(rt))
4181 rt = NULL;
4182 rcu_read_unlock();
4183 return rt;
4184}
4185
4186struct fib6_info *rt6_add_dflt_router(struct net *net,
4187 const struct in6_addr *gwaddr,
4188 struct net_device *dev,
4189 unsigned int pref)
4190{
4191 struct fib6_config cfg = {
4192 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4193 .fc_metric = IP6_RT_PRIO_USER,
4194 .fc_ifindex = dev->ifindex,
4195 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4196 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4197 .fc_protocol = RTPROT_RA,
4198 .fc_type = RTN_UNICAST,
4199 .fc_nlinfo.portid = 0,
4200 .fc_nlinfo.nlh = NULL,
4201 .fc_nlinfo.nl_net = net,
4202 };
4203
4204 cfg.fc_gateway = *gwaddr;
4205
4206 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4207 struct fib6_table *table;
4208
4209 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4210 if (table)
4211 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4212 }
4213
4214 return rt6_get_dflt_router(net, gwaddr, dev);
4215}
4216
4217static void __rt6_purge_dflt_routers(struct net *net,
4218 struct fib6_table *table)
4219{
4220 struct fib6_info *rt;
4221
4222restart:
4223 rcu_read_lock();
4224 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4225 struct net_device *dev = fib6_info_nh_dev(rt);
4226 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4227
4228 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4229 (!idev || idev->cnf.accept_ra != 2) &&
4230 fib6_info_hold_safe(rt)) {
4231 rcu_read_unlock();
4232 ip6_del_rt(net, rt);
4233 goto restart;
4234 }
4235 }
4236 rcu_read_unlock();
4237
4238 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4239}
4240
4241void rt6_purge_dflt_routers(struct net *net)
4242{
4243 struct fib6_table *table;
4244 struct hlist_head *head;
4245 unsigned int h;
4246
4247 rcu_read_lock();
4248
4249 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4250 head = &net->ipv6.fib_table_hash[h];
4251 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4252 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4253 __rt6_purge_dflt_routers(net, table);
4254 }
4255 }
4256
4257 rcu_read_unlock();
4258}
4259
4260static void rtmsg_to_fib6_config(struct net *net,
4261 struct in6_rtmsg *rtmsg,
4262 struct fib6_config *cfg)
4263{
4264 *cfg = (struct fib6_config){
4265 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4266 : RT6_TABLE_MAIN,
4267 .fc_ifindex = rtmsg->rtmsg_ifindex,
4268 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4269 .fc_expires = rtmsg->rtmsg_info,
4270 .fc_dst_len = rtmsg->rtmsg_dst_len,
4271 .fc_src_len = rtmsg->rtmsg_src_len,
4272 .fc_flags = rtmsg->rtmsg_flags,
4273 .fc_type = rtmsg->rtmsg_type,
4274
4275 .fc_nlinfo.nl_net = net,
4276
4277 .fc_dst = rtmsg->rtmsg_dst,
4278 .fc_src = rtmsg->rtmsg_src,
4279 .fc_gateway = rtmsg->rtmsg_gateway,
4280 };
4281}
4282
4283int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4284{
4285 struct fib6_config cfg;
4286 struct in6_rtmsg rtmsg;
4287 int err;
4288
4289 switch (cmd) {
4290 case SIOCADDRT: /* Add a route */
4291 case SIOCDELRT: /* Delete a route */
4292 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4293 return -EPERM;
4294 err = copy_from_user(&rtmsg, arg,
4295 sizeof(struct in6_rtmsg));
4296 if (err)
4297 return -EFAULT;
4298
4299 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
4300
4301 rtnl_lock();
4302 switch (cmd) {
4303 case SIOCADDRT:
4304 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4305 break;
4306 case SIOCDELRT:
4307 err = ip6_route_del(&cfg, NULL);
4308 break;
4309 default:
4310 err = -EINVAL;
4311 }
4312 rtnl_unlock();
4313
4314 return err;
4315 }
4316
4317 return -EINVAL;
4318}
4319
4320/*
4321 * Drop the packet on the floor
4322 */
4323
4324static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4325{
4326 struct dst_entry *dst = skb_dst(skb);
4327 struct net *net = dev_net(dst->dev);
4328 struct inet6_dev *idev;
4329 int type;
4330
4331 if (netif_is_l3_master(skb->dev) &&
4332 dst->dev == net->loopback_dev)
4333 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4334 else
4335 idev = ip6_dst_idev(dst);
4336
4337 switch (ipstats_mib_noroutes) {
4338 case IPSTATS_MIB_INNOROUTES:
4339 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4340 if (type == IPV6_ADDR_ANY) {
4341 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4342 break;
4343 }
4344 /* FALLTHROUGH */
4345 case IPSTATS_MIB_OUTNOROUTES:
4346 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4347 break;
4348 }
4349
4350 /* Start over by dropping the dst for l3mdev case */
4351 if (netif_is_l3_master(skb->dev))
4352 skb_dst_drop(skb);
4353
4354 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4355 kfree_skb(skb);
4356 return 0;
4357}
4358
4359static int ip6_pkt_discard(struct sk_buff *skb)
4360{
4361 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4362}
4363
4364static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4365{
4366 skb->dev = skb_dst(skb)->dev;
4367 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4368}
4369
4370static int ip6_pkt_prohibit(struct sk_buff *skb)
4371{
4372 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4373}
4374
4375static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4376{
4377 skb->dev = skb_dst(skb)->dev;
4378 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4379}
4380
4381/*
4382 * Allocate a dst for local (unicast / anycast) address.
4383 */
4384
4385struct fib6_info *addrconf_f6i_alloc(struct net *net,
4386 struct inet6_dev *idev,
4387 const struct in6_addr *addr,
4388 bool anycast, gfp_t gfp_flags)
4389{
4390 struct fib6_config cfg = {
4391 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4392 .fc_ifindex = idev->dev->ifindex,
4393 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4394 .fc_dst = *addr,
4395 .fc_dst_len = 128,
4396 .fc_protocol = RTPROT_KERNEL,
4397 .fc_nlinfo.nl_net = net,
4398 .fc_ignore_dev_down = true,
4399 };
4400 struct fib6_info *f6i;
4401
4402 if (anycast) {
4403 cfg.fc_type = RTN_ANYCAST;
4404 cfg.fc_flags |= RTF_ANYCAST;
4405 } else {
4406 cfg.fc_type = RTN_LOCAL;
4407 cfg.fc_flags |= RTF_LOCAL;
4408 }
4409
4410 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4411 if (!IS_ERR(f6i))
4412 f6i->dst_nocount = true;
4413 return f6i;
4414}
4415
4416/* remove deleted ip from prefsrc entries */
4417struct arg_dev_net_ip {
4418 struct net_device *dev;
4419 struct net *net;
4420 struct in6_addr *addr;
4421};
4422
4423static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4424{
4425 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4426 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4427 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4428
4429 if (!rt->nh &&
4430 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4431 rt != net->ipv6.fib6_null_entry &&
4432 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4433 spin_lock_bh(&rt6_exception_lock);
4434 /* remove prefsrc entry */
4435 rt->fib6_prefsrc.plen = 0;
4436 spin_unlock_bh(&rt6_exception_lock);
4437 }
4438 return 0;
4439}
4440
4441void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4442{
4443 struct net *net = dev_net(ifp->idev->dev);
4444 struct arg_dev_net_ip adni = {
4445 .dev = ifp->idev->dev,
4446 .net = net,
4447 .addr = &ifp->addr,
4448 };
4449 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4450}
4451
4452#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4453
4454/* Remove routers and update dst entries when gateway turn into host. */
4455static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4456{
4457 struct in6_addr *gateway = (struct in6_addr *)arg;
4458 struct fib6_nh *nh;
4459
4460 /* RA routes do not use nexthops */
4461 if (rt->nh)
4462 return 0;
4463
4464 nh = rt->fib6_nh;
4465 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4466 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4467 return -1;
4468
4469 /* Further clean up cached routes in exception table.
4470 * This is needed because cached route may have a different
4471 * gateway than its 'parent' in the case of an ip redirect.
4472 */
4473 fib6_nh_exceptions_clean_tohost(nh, gateway);
4474
4475 return 0;
4476}
4477
4478void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4479{
4480 fib6_clean_all(net, fib6_clean_tohost, gateway);
4481}
4482
4483struct arg_netdev_event {
4484 const struct net_device *dev;
4485 union {
4486 unsigned char nh_flags;
4487 unsigned long event;
4488 };
4489};
4490
4491static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4492{
4493 struct fib6_info *iter;
4494 struct fib6_node *fn;
4495
4496 fn = rcu_dereference_protected(rt->fib6_node,
4497 lockdep_is_held(&rt->fib6_table->tb6_lock));
4498 iter = rcu_dereference_protected(fn->leaf,
4499 lockdep_is_held(&rt->fib6_table->tb6_lock));
4500 while (iter) {
4501 if (iter->fib6_metric == rt->fib6_metric &&
4502 rt6_qualify_for_ecmp(iter))
4503 return iter;
4504 iter = rcu_dereference_protected(iter->fib6_next,
4505 lockdep_is_held(&rt->fib6_table->tb6_lock));
4506 }
4507
4508 return NULL;
4509}
4510
4511/* only called for fib entries with builtin fib6_nh */
4512static bool rt6_is_dead(const struct fib6_info *rt)
4513{
4514 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4515 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4516 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4517 return true;
4518
4519 return false;
4520}
4521
4522static int rt6_multipath_total_weight(const struct fib6_info *rt)
4523{
4524 struct fib6_info *iter;
4525 int total = 0;
4526
4527 if (!rt6_is_dead(rt))
4528 total += rt->fib6_nh->fib_nh_weight;
4529
4530 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4531 if (!rt6_is_dead(iter))
4532 total += iter->fib6_nh->fib_nh_weight;
4533 }
4534
4535 return total;
4536}
4537
4538static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4539{
4540 int upper_bound = -1;
4541
4542 if (!rt6_is_dead(rt)) {
4543 *weight += rt->fib6_nh->fib_nh_weight;
4544 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4545 total) - 1;
4546 }
4547 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4548}
4549
4550static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4551{
4552 struct fib6_info *iter;
4553 int weight = 0;
4554
4555 rt6_upper_bound_set(rt, &weight, total);
4556
4557 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4558 rt6_upper_bound_set(iter, &weight, total);
4559}
4560
4561void rt6_multipath_rebalance(struct fib6_info *rt)
4562{
4563 struct fib6_info *first;
4564 int total;
4565
4566 /* In case the entire multipath route was marked for flushing,
4567 * then there is no need to rebalance upon the removal of every
4568 * sibling route.
4569 */
4570 if (!rt->fib6_nsiblings || rt->should_flush)
4571 return;
4572
4573 /* During lookup routes are evaluated in order, so we need to
4574 * make sure upper bounds are assigned from the first sibling
4575 * onwards.
4576 */
4577 first = rt6_multipath_first_sibling(rt);
4578 if (WARN_ON_ONCE(!first))
4579 return;
4580
4581 total = rt6_multipath_total_weight(first);
4582 rt6_multipath_upper_bound_set(first, total);
4583}
4584
4585static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4586{
4587 const struct arg_netdev_event *arg = p_arg;
4588 struct net *net = dev_net(arg->dev);
4589
4590 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4591 rt->fib6_nh->fib_nh_dev == arg->dev) {
4592 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4593 fib6_update_sernum_upto_root(net, rt);
4594 rt6_multipath_rebalance(rt);
4595 }
4596
4597 return 0;
4598}
4599
4600void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4601{
4602 struct arg_netdev_event arg = {
4603 .dev = dev,
4604 {
4605 .nh_flags = nh_flags,
4606 },
4607 };
4608
4609 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4610 arg.nh_flags |= RTNH_F_LINKDOWN;
4611
4612 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4613}
4614
4615/* only called for fib entries with inline fib6_nh */
4616static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4617 const struct net_device *dev)
4618{
4619 struct fib6_info *iter;
4620
4621 if (rt->fib6_nh->fib_nh_dev == dev)
4622 return true;
4623 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4624 if (iter->fib6_nh->fib_nh_dev == dev)
4625 return true;
4626
4627 return false;
4628}
4629
4630static void rt6_multipath_flush(struct fib6_info *rt)
4631{
4632 struct fib6_info *iter;
4633
4634 rt->should_flush = 1;
4635 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4636 iter->should_flush = 1;
4637}
4638
4639static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4640 const struct net_device *down_dev)
4641{
4642 struct fib6_info *iter;
4643 unsigned int dead = 0;
4644
4645 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4646 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4647 dead++;
4648 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4649 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4650 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4651 dead++;
4652
4653 return dead;
4654}
4655
4656static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4657 const struct net_device *dev,
4658 unsigned char nh_flags)
4659{
4660 struct fib6_info *iter;
4661
4662 if (rt->fib6_nh->fib_nh_dev == dev)
4663 rt->fib6_nh->fib_nh_flags |= nh_flags;
4664 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4665 if (iter->fib6_nh->fib_nh_dev == dev)
4666 iter->fib6_nh->fib_nh_flags |= nh_flags;
4667}
4668
4669/* called with write lock held for table with rt */
4670static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4671{
4672 const struct arg_netdev_event *arg = p_arg;
4673 const struct net_device *dev = arg->dev;
4674 struct net *net = dev_net(dev);
4675
4676 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4677 return 0;
4678
4679 switch (arg->event) {
4680 case NETDEV_UNREGISTER:
4681 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4682 case NETDEV_DOWN:
4683 if (rt->should_flush)
4684 return -1;
4685 if (!rt->fib6_nsiblings)
4686 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4687 if (rt6_multipath_uses_dev(rt, dev)) {
4688 unsigned int count;
4689
4690 count = rt6_multipath_dead_count(rt, dev);
4691 if (rt->fib6_nsiblings + 1 == count) {
4692 rt6_multipath_flush(rt);
4693 return -1;
4694 }
4695 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4696 RTNH_F_LINKDOWN);
4697 fib6_update_sernum(net, rt);
4698 rt6_multipath_rebalance(rt);
4699 }
4700 return -2;
4701 case NETDEV_CHANGE:
4702 if (rt->fib6_nh->fib_nh_dev != dev ||
4703 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4704 break;
4705 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4706 rt6_multipath_rebalance(rt);
4707 break;
4708 }
4709
4710 return 0;
4711}
4712
4713void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4714{
4715 struct arg_netdev_event arg = {
4716 .dev = dev,
4717 {
4718 .event = event,
4719 },
4720 };
4721 struct net *net = dev_net(dev);
4722
4723 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4724 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4725 else
4726 fib6_clean_all(net, fib6_ifdown, &arg);
4727}
4728
4729void rt6_disable_ip(struct net_device *dev, unsigned long event)
4730{
4731 rt6_sync_down_dev(dev, event);
4732 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4733 neigh_ifdown(&nd_tbl, dev);
4734}
4735
4736struct rt6_mtu_change_arg {
4737 struct net_device *dev;
4738 unsigned int mtu;
4739 struct fib6_info *f6i;
4740};
4741
4742static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4743{
4744 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4745 struct fib6_info *f6i = arg->f6i;
4746
4747 /* For administrative MTU increase, there is no way to discover
4748 * IPv6 PMTU increase, so PMTU increase should be updated here.
4749 * Since RFC 1981 doesn't include administrative MTU increase
4750 * update PMTU increase is a MUST. (i.e. jumbo frame)
4751 */
4752 if (nh->fib_nh_dev == arg->dev) {
4753 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4754 u32 mtu = f6i->fib6_pmtu;
4755
4756 if (mtu >= arg->mtu ||
4757 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4758 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4759
4760 spin_lock_bh(&rt6_exception_lock);
4761 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4762 spin_unlock_bh(&rt6_exception_lock);
4763 }
4764
4765 return 0;
4766}
4767
4768static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4769{
4770 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4771 struct inet6_dev *idev;
4772
4773 /* In IPv6 pmtu discovery is not optional,
4774 so that RTAX_MTU lock cannot disable it.
4775 We still use this lock to block changes
4776 caused by addrconf/ndisc.
4777 */
4778
4779 idev = __in6_dev_get(arg->dev);
4780 if (!idev)
4781 return 0;
4782
4783 if (fib6_metric_locked(f6i, RTAX_MTU))
4784 return 0;
4785
4786 arg->f6i = f6i;
4787 if (f6i->nh) {
4788 /* fib6_nh_mtu_change only returns 0, so this is safe */
4789 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4790 arg);
4791 }
4792
4793 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4794}
4795
4796void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4797{
4798 struct rt6_mtu_change_arg arg = {
4799 .dev = dev,
4800 .mtu = mtu,
4801 };
4802
4803 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4804}
4805
4806static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4807 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4808 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4809 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4810 [RTA_OIF] = { .type = NLA_U32 },
4811 [RTA_IIF] = { .type = NLA_U32 },
4812 [RTA_PRIORITY] = { .type = NLA_U32 },
4813 [RTA_METRICS] = { .type = NLA_NESTED },
4814 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4815 [RTA_PREF] = { .type = NLA_U8 },
4816 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4817 [RTA_ENCAP] = { .type = NLA_NESTED },
4818 [RTA_EXPIRES] = { .type = NLA_U32 },
4819 [RTA_UID] = { .type = NLA_U32 },
4820 [RTA_MARK] = { .type = NLA_U32 },
4821 [RTA_TABLE] = { .type = NLA_U32 },
4822 [RTA_IP_PROTO] = { .type = NLA_U8 },
4823 [RTA_SPORT] = { .type = NLA_U16 },
4824 [RTA_DPORT] = { .type = NLA_U16 },
4825 [RTA_NH_ID] = { .type = NLA_U32 },
4826};
4827
4828static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4829 struct fib6_config *cfg,
4830 struct netlink_ext_ack *extack)
4831{
4832 struct rtmsg *rtm;
4833 struct nlattr *tb[RTA_MAX+1];
4834 unsigned int pref;
4835 int err;
4836
4837 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4838 rtm_ipv6_policy, extack);
4839 if (err < 0)
4840 goto errout;
4841
4842 err = -EINVAL;
4843 rtm = nlmsg_data(nlh);
4844
4845 *cfg = (struct fib6_config){
4846 .fc_table = rtm->rtm_table,
4847 .fc_dst_len = rtm->rtm_dst_len,
4848 .fc_src_len = rtm->rtm_src_len,
4849 .fc_flags = RTF_UP,
4850 .fc_protocol = rtm->rtm_protocol,
4851 .fc_type = rtm->rtm_type,
4852
4853 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4854 .fc_nlinfo.nlh = nlh,
4855 .fc_nlinfo.nl_net = sock_net(skb->sk),
4856 };
4857
4858 if (rtm->rtm_type == RTN_UNREACHABLE ||
4859 rtm->rtm_type == RTN_BLACKHOLE ||
4860 rtm->rtm_type == RTN_PROHIBIT ||
4861 rtm->rtm_type == RTN_THROW)
4862 cfg->fc_flags |= RTF_REJECT;
4863
4864 if (rtm->rtm_type == RTN_LOCAL)
4865 cfg->fc_flags |= RTF_LOCAL;
4866
4867 if (rtm->rtm_flags & RTM_F_CLONED)
4868 cfg->fc_flags |= RTF_CACHE;
4869
4870 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4871
4872 if (tb[RTA_NH_ID]) {
4873 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
4874 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4875 NL_SET_ERR_MSG(extack,
4876 "Nexthop specification and nexthop id are mutually exclusive");
4877 goto errout;
4878 }
4879 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4880 }
4881
4882 if (tb[RTA_GATEWAY]) {
4883 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4884 cfg->fc_flags |= RTF_GATEWAY;
4885 }
4886 if (tb[RTA_VIA]) {
4887 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4888 goto errout;
4889 }
4890
4891 if (tb[RTA_DST]) {
4892 int plen = (rtm->rtm_dst_len + 7) >> 3;
4893
4894 if (nla_len(tb[RTA_DST]) < plen)
4895 goto errout;
4896
4897 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4898 }
4899
4900 if (tb[RTA_SRC]) {
4901 int plen = (rtm->rtm_src_len + 7) >> 3;
4902
4903 if (nla_len(tb[RTA_SRC]) < plen)
4904 goto errout;
4905
4906 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4907 }
4908
4909 if (tb[RTA_PREFSRC])
4910 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4911
4912 if (tb[RTA_OIF])
4913 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4914
4915 if (tb[RTA_PRIORITY])
4916 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4917
4918 if (tb[RTA_METRICS]) {
4919 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4920 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4921 }
4922
4923 if (tb[RTA_TABLE])
4924 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4925
4926 if (tb[RTA_MULTIPATH]) {
4927 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4928 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4929
4930 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4931 cfg->fc_mp_len, extack);
4932 if (err < 0)
4933 goto errout;
4934 }
4935
4936 if (tb[RTA_PREF]) {
4937 pref = nla_get_u8(tb[RTA_PREF]);
4938 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4939 pref != ICMPV6_ROUTER_PREF_HIGH)
4940 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4941 cfg->fc_flags |= RTF_PREF(pref);
4942 }
4943
4944 if (tb[RTA_ENCAP])
4945 cfg->fc_encap = tb[RTA_ENCAP];
4946
4947 if (tb[RTA_ENCAP_TYPE]) {
4948 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4949
4950 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4951 if (err < 0)
4952 goto errout;
4953 }
4954
4955 if (tb[RTA_EXPIRES]) {
4956 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4957
4958 if (addrconf_finite_timeout(timeout)) {
4959 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4960 cfg->fc_flags |= RTF_EXPIRES;
4961 }
4962 }
4963
4964 err = 0;
4965errout:
4966 return err;
4967}
4968
4969struct rt6_nh {
4970 struct fib6_info *fib6_info;
4971 struct fib6_config r_cfg;
4972 struct list_head next;
4973};
4974
4975static int ip6_route_info_append(struct net *net,
4976 struct list_head *rt6_nh_list,
4977 struct fib6_info *rt,
4978 struct fib6_config *r_cfg)
4979{
4980 struct rt6_nh *nh;
4981 int err = -EEXIST;
4982
4983 list_for_each_entry(nh, rt6_nh_list, next) {
4984 /* check if fib6_info already exists */
4985 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4986 return err;
4987 }
4988
4989 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4990 if (!nh)
4991 return -ENOMEM;
4992 nh->fib6_info = rt;
4993 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4994 list_add_tail(&nh->next, rt6_nh_list);
4995
4996 return 0;
4997}
4998
4999static void ip6_route_mpath_notify(struct fib6_info *rt,
5000 struct fib6_info *rt_last,
5001 struct nl_info *info,
5002 __u16 nlflags)
5003{
5004 /* if this is an APPEND route, then rt points to the first route
5005 * inserted and rt_last points to last route inserted. Userspace
5006 * wants a consistent dump of the route which starts at the first
5007 * nexthop. Since sibling routes are always added at the end of
5008 * the list, find the first sibling of the last route appended
5009 */
5010 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5011 rt = list_first_entry(&rt_last->fib6_siblings,
5012 struct fib6_info,
5013 fib6_siblings);
5014 }
5015
5016 if (rt)
5017 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5018}
5019
5020static int ip6_route_multipath_add(struct fib6_config *cfg,
5021 struct netlink_ext_ack *extack)
5022{
5023 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5024 struct nl_info *info = &cfg->fc_nlinfo;
5025 enum fib_event_type event_type;
5026 struct fib6_config r_cfg;
5027 struct rtnexthop *rtnh;
5028 struct fib6_info *rt;
5029 struct rt6_nh *err_nh;
5030 struct rt6_nh *nh, *nh_safe;
5031 __u16 nlflags;
5032 int remaining;
5033 int attrlen;
5034 int err = 1;
5035 int nhn = 0;
5036 int replace = (cfg->fc_nlinfo.nlh &&
5037 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5038 LIST_HEAD(rt6_nh_list);
5039
5040 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5041 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5042 nlflags |= NLM_F_APPEND;
5043
5044 remaining = cfg->fc_mp_len;
5045 rtnh = (struct rtnexthop *)cfg->fc_mp;
5046
5047 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5048 * fib6_info structs per nexthop
5049 */
5050 while (rtnh_ok(rtnh, remaining)) {
5051 memcpy(&r_cfg, cfg, sizeof(*cfg));
5052 if (rtnh->rtnh_ifindex)
5053 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5054
5055 attrlen = rtnh_attrlen(rtnh);
5056 if (attrlen > 0) {
5057 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5058
5059 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5060 if (nla) {
5061 r_cfg.fc_gateway = nla_get_in6_addr(nla);
5062 r_cfg.fc_flags |= RTF_GATEWAY;
5063 }
5064 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5065 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5066 if (nla)
5067 r_cfg.fc_encap_type = nla_get_u16(nla);
5068 }
5069
5070 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5071 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5072 if (IS_ERR(rt)) {
5073 err = PTR_ERR(rt);
5074 rt = NULL;
5075 goto cleanup;
5076 }
5077 if (!rt6_qualify_for_ecmp(rt)) {
5078 err = -EINVAL;
5079 NL_SET_ERR_MSG(extack,
5080 "Device only routes can not be added for IPv6 using the multipath API.");
5081 fib6_info_release(rt);
5082 goto cleanup;
5083 }
5084
5085 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5086
5087 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5088 rt, &r_cfg);
5089 if (err) {
5090 fib6_info_release(rt);
5091 goto cleanup;
5092 }
5093
5094 rtnh = rtnh_next(rtnh, &remaining);
5095 }
5096
5097 if (list_empty(&rt6_nh_list)) {
5098 NL_SET_ERR_MSG(extack,
5099 "Invalid nexthop configuration - no valid nexthops");
5100 return -EINVAL;
5101 }
5102
5103 /* for add and replace send one notification with all nexthops.
5104 * Skip the notification in fib6_add_rt2node and send one with
5105 * the full route when done
5106 */
5107 info->skip_notify = 1;
5108
5109 /* For add and replace, send one notification with all nexthops. For
5110 * append, send one notification with all appended nexthops.
5111 */
5112 info->skip_notify_kernel = 1;
5113
5114 err_nh = NULL;
5115 list_for_each_entry(nh, &rt6_nh_list, next) {
5116 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5117 fib6_info_release(nh->fib6_info);
5118
5119 if (!err) {
5120 /* save reference to last route successfully inserted */
5121 rt_last = nh->fib6_info;
5122
5123 /* save reference to first route for notification */
5124 if (!rt_notif)
5125 rt_notif = nh->fib6_info;
5126 }
5127
5128 /* nh->fib6_info is used or freed at this point, reset to NULL*/
5129 nh->fib6_info = NULL;
5130 if (err) {
5131 if (replace && nhn)
5132 NL_SET_ERR_MSG_MOD(extack,
5133 "multipath route replace failed (check consistency of installed routes)");
5134 err_nh = nh;
5135 goto add_errout;
5136 }
5137
5138 /* Because each route is added like a single route we remove
5139 * these flags after the first nexthop: if there is a collision,
5140 * we have already failed to add the first nexthop:
5141 * fib6_add_rt2node() has rejected it; when replacing, old
5142 * nexthops have been replaced by first new, the rest should
5143 * be added to it.
5144 */
5145 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5146 NLM_F_REPLACE);
5147 nhn++;
5148 }
5149
5150 event_type = replace ? FIB_EVENT_ENTRY_REPLACE : FIB_EVENT_ENTRY_ADD;
5151 err = call_fib6_multipath_entry_notifiers(info->nl_net, event_type,
5152 rt_notif, nhn - 1, extack);
5153 if (err) {
5154 /* Delete all the siblings that were just added */
5155 err_nh = NULL;
5156 goto add_errout;
5157 }
5158
5159 /* success ... tell user about new route */
5160 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5161 goto cleanup;
5162
5163add_errout:
5164 /* send notification for routes that were added so that
5165 * the delete notifications sent by ip6_route_del are
5166 * coherent
5167 */
5168 if (rt_notif)
5169 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5170
5171 /* Delete routes that were already added */
5172 list_for_each_entry(nh, &rt6_nh_list, next) {
5173 if (err_nh == nh)
5174 break;
5175 ip6_route_del(&nh->r_cfg, extack);
5176 }
5177
5178cleanup:
5179 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5180 if (nh->fib6_info)
5181 fib6_info_release(nh->fib6_info);
5182 list_del(&nh->next);
5183 kfree(nh);
5184 }
5185
5186 return err;
5187}
5188
5189static int ip6_route_multipath_del(struct fib6_config *cfg,
5190 struct netlink_ext_ack *extack)
5191{
5192 struct fib6_config r_cfg;
5193 struct rtnexthop *rtnh;
5194 int remaining;
5195 int attrlen;
5196 int err = 1, last_err = 0;
5197
5198 remaining = cfg->fc_mp_len;
5199 rtnh = (struct rtnexthop *)cfg->fc_mp;
5200
5201 /* Parse a Multipath Entry */
5202 while (rtnh_ok(rtnh, remaining)) {
5203 memcpy(&r_cfg, cfg, sizeof(*cfg));
5204 if (rtnh->rtnh_ifindex)
5205 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5206
5207 attrlen = rtnh_attrlen(rtnh);
5208 if (attrlen > 0) {
5209 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5210
5211 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5212 if (nla) {
5213 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
5214 r_cfg.fc_flags |= RTF_GATEWAY;
5215 }
5216 }
5217 err = ip6_route_del(&r_cfg, extack);
5218 if (err)
5219 last_err = err;
5220
5221 rtnh = rtnh_next(rtnh, &remaining);
5222 }
5223
5224 return last_err;
5225}
5226
5227static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5228 struct netlink_ext_ack *extack)
5229{
5230 struct fib6_config cfg;
5231 int err;
5232
5233 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5234 if (err < 0)
5235 return err;
5236
5237 if (cfg.fc_nh_id &&
5238 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5239 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5240 return -EINVAL;
5241 }
5242
5243 if (cfg.fc_mp)
5244 return ip6_route_multipath_del(&cfg, extack);
5245 else {
5246 cfg.fc_delete_all_nh = 1;
5247 return ip6_route_del(&cfg, extack);
5248 }
5249}
5250
5251static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5252 struct netlink_ext_ack *extack)
5253{
5254 struct fib6_config cfg;
5255 int err;
5256
5257 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5258 if (err < 0)
5259 return err;
5260
5261 if (cfg.fc_metric == 0)
5262 cfg.fc_metric = IP6_RT_PRIO_USER;
5263
5264 if (cfg.fc_mp)
5265 return ip6_route_multipath_add(&cfg, extack);
5266 else
5267 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5268}
5269
5270/* add the overhead of this fib6_nh to nexthop_len */
5271static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5272{
5273 int *nexthop_len = arg;
5274
5275 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5276 + NLA_ALIGN(sizeof(struct rtnexthop))
5277 + nla_total_size(16); /* RTA_GATEWAY */
5278
5279 if (nh->fib_nh_lws) {
5280 /* RTA_ENCAP_TYPE */
5281 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5282 /* RTA_ENCAP */
5283 *nexthop_len += nla_total_size(2);
5284 }
5285
5286 return 0;
5287}
5288
5289static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5290{
5291 int nexthop_len;
5292
5293 if (f6i->nh) {
5294 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5295 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5296 &nexthop_len);
5297 } else {
5298 struct fib6_nh *nh = f6i->fib6_nh;
5299
5300 nexthop_len = 0;
5301 if (f6i->fib6_nsiblings) {
5302 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
5303 + NLA_ALIGN(sizeof(struct rtnexthop))
5304 + nla_total_size(16) /* RTA_GATEWAY */
5305 + lwtunnel_get_encap_size(nh->fib_nh_lws);
5306
5307 nexthop_len *= f6i->fib6_nsiblings;
5308 }
5309 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5310 }
5311
5312 return NLMSG_ALIGN(sizeof(struct rtmsg))
5313 + nla_total_size(16) /* RTA_SRC */
5314 + nla_total_size(16) /* RTA_DST */
5315 + nla_total_size(16) /* RTA_GATEWAY */
5316 + nla_total_size(16) /* RTA_PREFSRC */
5317 + nla_total_size(4) /* RTA_TABLE */
5318 + nla_total_size(4) /* RTA_IIF */
5319 + nla_total_size(4) /* RTA_OIF */
5320 + nla_total_size(4) /* RTA_PRIORITY */
5321 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5322 + nla_total_size(sizeof(struct rta_cacheinfo))
5323 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5324 + nla_total_size(1) /* RTA_PREF */
5325 + nexthop_len;
5326}
5327
5328static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5329 unsigned char *flags)
5330{
5331 if (nexthop_is_multipath(nh)) {
5332 struct nlattr *mp;
5333
5334 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5335 if (!mp)
5336 goto nla_put_failure;
5337
5338 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5339 goto nla_put_failure;
5340
5341 nla_nest_end(skb, mp);
5342 } else {
5343 struct fib6_nh *fib6_nh;
5344
5345 fib6_nh = nexthop_fib6_nh(nh);
5346 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5347 flags, false) < 0)
5348 goto nla_put_failure;
5349 }
5350
5351 return 0;
5352
5353nla_put_failure:
5354 return -EMSGSIZE;
5355}
5356
5357static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5358 struct fib6_info *rt, struct dst_entry *dst,
5359 struct in6_addr *dest, struct in6_addr *src,
5360 int iif, int type, u32 portid, u32 seq,
5361 unsigned int flags)
5362{
5363 struct rt6_info *rt6 = (struct rt6_info *)dst;
5364 struct rt6key *rt6_dst, *rt6_src;
5365 u32 *pmetrics, table, rt6_flags;
5366 unsigned char nh_flags = 0;
5367 struct nlmsghdr *nlh;
5368 struct rtmsg *rtm;
5369 long expires = 0;
5370
5371 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5372 if (!nlh)
5373 return -EMSGSIZE;
5374
5375 if (rt6) {
5376 rt6_dst = &rt6->rt6i_dst;
5377 rt6_src = &rt6->rt6i_src;
5378 rt6_flags = rt6->rt6i_flags;
5379 } else {
5380 rt6_dst = &rt->fib6_dst;
5381 rt6_src = &rt->fib6_src;
5382 rt6_flags = rt->fib6_flags;
5383 }
5384
5385 rtm = nlmsg_data(nlh);
5386 rtm->rtm_family = AF_INET6;
5387 rtm->rtm_dst_len = rt6_dst->plen;
5388 rtm->rtm_src_len = rt6_src->plen;
5389 rtm->rtm_tos = 0;
5390 if (rt->fib6_table)
5391 table = rt->fib6_table->tb6_id;
5392 else
5393 table = RT6_TABLE_UNSPEC;
5394 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5395 if (nla_put_u32(skb, RTA_TABLE, table))
5396 goto nla_put_failure;
5397
5398 rtm->rtm_type = rt->fib6_type;
5399 rtm->rtm_flags = 0;
5400 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5401 rtm->rtm_protocol = rt->fib6_protocol;
5402
5403 if (rt6_flags & RTF_CACHE)
5404 rtm->rtm_flags |= RTM_F_CLONED;
5405
5406 if (dest) {
5407 if (nla_put_in6_addr(skb, RTA_DST, dest))
5408 goto nla_put_failure;
5409 rtm->rtm_dst_len = 128;
5410 } else if (rtm->rtm_dst_len)
5411 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5412 goto nla_put_failure;
5413#ifdef CONFIG_IPV6_SUBTREES
5414 if (src) {
5415 if (nla_put_in6_addr(skb, RTA_SRC, src))
5416 goto nla_put_failure;
5417 rtm->rtm_src_len = 128;
5418 } else if (rtm->rtm_src_len &&
5419 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5420 goto nla_put_failure;
5421#endif
5422 if (iif) {
5423#ifdef CONFIG_IPV6_MROUTE
5424 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5425 int err = ip6mr_get_route(net, skb, rtm, portid);
5426
5427 if (err == 0)
5428 return 0;
5429 if (err < 0)
5430 goto nla_put_failure;
5431 } else
5432#endif
5433 if (nla_put_u32(skb, RTA_IIF, iif))
5434 goto nla_put_failure;
5435 } else if (dest) {
5436 struct in6_addr saddr_buf;
5437 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5438 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5439 goto nla_put_failure;
5440 }
5441
5442 if (rt->fib6_prefsrc.plen) {
5443 struct in6_addr saddr_buf;
5444 saddr_buf = rt->fib6_prefsrc.addr;
5445 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5446 goto nla_put_failure;
5447 }
5448
5449 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5450 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5451 goto nla_put_failure;
5452
5453 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5454 goto nla_put_failure;
5455
5456 /* For multipath routes, walk the siblings list and add
5457 * each as a nexthop within RTA_MULTIPATH.
5458 */
5459 if (rt6) {
5460 if (rt6_flags & RTF_GATEWAY &&
5461 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5462 goto nla_put_failure;
5463
5464 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5465 goto nla_put_failure;
5466 } else if (rt->fib6_nsiblings) {
5467 struct fib6_info *sibling, *next_sibling;
5468 struct nlattr *mp;
5469
5470 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5471 if (!mp)
5472 goto nla_put_failure;
5473
5474 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5475 rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
5476 goto nla_put_failure;
5477
5478 list_for_each_entry_safe(sibling, next_sibling,
5479 &rt->fib6_siblings, fib6_siblings) {
5480 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5481 sibling->fib6_nh->fib_nh_weight,
5482 AF_INET6) < 0)
5483 goto nla_put_failure;
5484 }
5485
5486 nla_nest_end(skb, mp);
5487 } else if (rt->nh) {
5488 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5489 goto nla_put_failure;
5490
5491 if (nexthop_is_blackhole(rt->nh))
5492 rtm->rtm_type = RTN_BLACKHOLE;
5493
5494 if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5495 goto nla_put_failure;
5496
5497 rtm->rtm_flags |= nh_flags;
5498 } else {
5499 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5500 &nh_flags, false) < 0)
5501 goto nla_put_failure;
5502
5503 rtm->rtm_flags |= nh_flags;
5504 }
5505
5506 if (rt6_flags & RTF_EXPIRES) {
5507 expires = dst ? dst->expires : rt->expires;
5508 expires -= jiffies;
5509 }
5510
5511 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5512 goto nla_put_failure;
5513
5514 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5515 goto nla_put_failure;
5516
5517
5518 nlmsg_end(skb, nlh);
5519 return 0;
5520
5521nla_put_failure:
5522 nlmsg_cancel(skb, nlh);
5523 return -EMSGSIZE;
5524}
5525
5526static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5527{
5528 const struct net_device *dev = arg;
5529
5530 if (nh->fib_nh_dev == dev)
5531 return 1;
5532
5533 return 0;
5534}
5535
5536static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5537 const struct net_device *dev)
5538{
5539 if (f6i->nh) {
5540 struct net_device *_dev = (struct net_device *)dev;
5541
5542 return !!nexthop_for_each_fib6_nh(f6i->nh,
5543 fib6_info_nh_uses_dev,
5544 _dev);
5545 }
5546
5547 if (f6i->fib6_nh->fib_nh_dev == dev)
5548 return true;
5549
5550 if (f6i->fib6_nsiblings) {
5551 struct fib6_info *sibling, *next_sibling;
5552
5553 list_for_each_entry_safe(sibling, next_sibling,
5554 &f6i->fib6_siblings, fib6_siblings) {
5555 if (sibling->fib6_nh->fib_nh_dev == dev)
5556 return true;
5557 }
5558 }
5559
5560 return false;
5561}
5562
5563struct fib6_nh_exception_dump_walker {
5564 struct rt6_rtnl_dump_arg *dump;
5565 struct fib6_info *rt;
5566 unsigned int flags;
5567 unsigned int skip;
5568 unsigned int count;
5569};
5570
5571static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5572{
5573 struct fib6_nh_exception_dump_walker *w = arg;
5574 struct rt6_rtnl_dump_arg *dump = w->dump;
5575 struct rt6_exception_bucket *bucket;
5576 struct rt6_exception *rt6_ex;
5577 int i, err;
5578
5579 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5580 if (!bucket)
5581 return 0;
5582
5583 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5584 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5585 if (w->skip) {
5586 w->skip--;
5587 continue;
5588 }
5589
5590 /* Expiration of entries doesn't bump sernum, insertion
5591 * does. Removal is triggered by insertion, so we can
5592 * rely on the fact that if entries change between two
5593 * partial dumps, this node is scanned again completely,
5594 * see rt6_insert_exception() and fib6_dump_table().
5595 *
5596 * Count expired entries we go through as handled
5597 * entries that we'll skip next time, in case of partial
5598 * node dump. Otherwise, if entries expire meanwhile,
5599 * we'll skip the wrong amount.
5600 */
5601 if (rt6_check_expired(rt6_ex->rt6i)) {
5602 w->count++;
5603 continue;
5604 }
5605
5606 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5607 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5608 RTM_NEWROUTE,
5609 NETLINK_CB(dump->cb->skb).portid,
5610 dump->cb->nlh->nlmsg_seq, w->flags);
5611 if (err)
5612 return err;
5613
5614 w->count++;
5615 }
5616 bucket++;
5617 }
5618
5619 return 0;
5620}
5621
5622/* Return -1 if done with node, number of handled routes on partial dump */
5623int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5624{
5625 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5626 struct fib_dump_filter *filter = &arg->filter;
5627 unsigned int flags = NLM_F_MULTI;
5628 struct net *net = arg->net;
5629 int count = 0;
5630
5631 if (rt == net->ipv6.fib6_null_entry)
5632 return -1;
5633
5634 if ((filter->flags & RTM_F_PREFIX) &&
5635 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5636 /* success since this is not a prefix route */
5637 return -1;
5638 }
5639 if (filter->filter_set &&
5640 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5641 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5642 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5643 return -1;
5644 }
5645
5646 if (filter->filter_set ||
5647 !filter->dump_routes || !filter->dump_exceptions) {
5648 flags |= NLM_F_DUMP_FILTERED;
5649 }
5650
5651 if (filter->dump_routes) {
5652 if (skip) {
5653 skip--;
5654 } else {
5655 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5656 0, RTM_NEWROUTE,
5657 NETLINK_CB(arg->cb->skb).portid,
5658 arg->cb->nlh->nlmsg_seq, flags)) {
5659 return 0;
5660 }
5661 count++;
5662 }
5663 }
5664
5665 if (filter->dump_exceptions) {
5666 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5667 .rt = rt,
5668 .flags = flags,
5669 .skip = skip,
5670 .count = 0 };
5671 int err;
5672
5673 rcu_read_lock();
5674 if (rt->nh) {
5675 err = nexthop_for_each_fib6_nh(rt->nh,
5676 rt6_nh_dump_exceptions,
5677 &w);
5678 } else {
5679 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5680 }
5681 rcu_read_unlock();
5682
5683 if (err)
5684 return count += w.count;
5685 }
5686
5687 return -1;
5688}
5689
5690static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5691 const struct nlmsghdr *nlh,
5692 struct nlattr **tb,
5693 struct netlink_ext_ack *extack)
5694{
5695 struct rtmsg *rtm;
5696 int i, err;
5697
5698 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5699 NL_SET_ERR_MSG_MOD(extack,
5700 "Invalid header for get route request");
5701 return -EINVAL;
5702 }
5703
5704 if (!netlink_strict_get_check(skb))
5705 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5706 rtm_ipv6_policy, extack);
5707
5708 rtm = nlmsg_data(nlh);
5709 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5710 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5711 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5712 rtm->rtm_type) {
5713 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5714 return -EINVAL;
5715 }
5716 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5717 NL_SET_ERR_MSG_MOD(extack,
5718 "Invalid flags for get route request");
5719 return -EINVAL;
5720 }
5721
5722 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5723 rtm_ipv6_policy, extack);
5724 if (err)
5725 return err;
5726
5727 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5728 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5729 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5730 return -EINVAL;
5731 }
5732
5733 for (i = 0; i <= RTA_MAX; i++) {
5734 if (!tb[i])
5735 continue;
5736
5737 switch (i) {
5738 case RTA_SRC:
5739 case RTA_DST:
5740 case RTA_IIF:
5741 case RTA_OIF:
5742 case RTA_MARK:
5743 case RTA_UID:
5744 case RTA_SPORT:
5745 case RTA_DPORT:
5746 case RTA_IP_PROTO:
5747 break;
5748 default:
5749 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5750 return -EINVAL;
5751 }
5752 }
5753
5754 return 0;
5755}
5756
5757static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5758 struct netlink_ext_ack *extack)
5759{
5760 struct net *net = sock_net(in_skb->sk);
5761 struct nlattr *tb[RTA_MAX+1];
5762 int err, iif = 0, oif = 0;
5763 struct fib6_info *from;
5764 struct dst_entry *dst;
5765 struct rt6_info *rt;
5766 struct sk_buff *skb;
5767 struct rtmsg *rtm;
5768 struct flowi6 fl6 = {};
5769 bool fibmatch;
5770
5771 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5772 if (err < 0)
5773 goto errout;
5774
5775 err = -EINVAL;
5776 rtm = nlmsg_data(nlh);
5777 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5778 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5779
5780 if (tb[RTA_SRC]) {
5781 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5782 goto errout;
5783
5784 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5785 }
5786
5787 if (tb[RTA_DST]) {
5788 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5789 goto errout;
5790
5791 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5792 }
5793
5794 if (tb[RTA_IIF])
5795 iif = nla_get_u32(tb[RTA_IIF]);
5796
5797 if (tb[RTA_OIF])
5798 oif = nla_get_u32(tb[RTA_OIF]);
5799
5800 if (tb[RTA_MARK])
5801 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5802
5803 if (tb[RTA_UID])
5804 fl6.flowi6_uid = make_kuid(current_user_ns(),
5805 nla_get_u32(tb[RTA_UID]));
5806 else
5807 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5808
5809 if (tb[RTA_SPORT])
5810 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5811
5812 if (tb[RTA_DPORT])
5813 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5814
5815 if (tb[RTA_IP_PROTO]) {
5816 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5817 &fl6.flowi6_proto, AF_INET6,
5818 extack);
5819 if (err)
5820 goto errout;
5821 }
5822
5823 if (iif) {
5824 struct net_device *dev;
5825 int flags = 0;
5826
5827 rcu_read_lock();
5828
5829 dev = dev_get_by_index_rcu(net, iif);
5830 if (!dev) {
5831 rcu_read_unlock();
5832 err = -ENODEV;
5833 goto errout;
5834 }
5835
5836 fl6.flowi6_iif = iif;
5837
5838 if (!ipv6_addr_any(&fl6.saddr))
5839 flags |= RT6_LOOKUP_F_HAS_SADDR;
5840
5841 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5842
5843 rcu_read_unlock();
5844 } else {
5845 fl6.flowi6_oif = oif;
5846
5847 dst = ip6_route_output(net, NULL, &fl6);
5848 }
5849
5850
5851 rt = container_of(dst, struct rt6_info, dst);
5852 if (rt->dst.error) {
5853 err = rt->dst.error;
5854 ip6_rt_put(rt);
5855 goto errout;
5856 }
5857
5858 if (rt == net->ipv6.ip6_null_entry) {
5859 err = rt->dst.error;
5860 ip6_rt_put(rt);
5861 goto errout;
5862 }
5863
5864 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5865 if (!skb) {
5866 ip6_rt_put(rt);
5867 err = -ENOBUFS;
5868 goto errout;
5869 }
5870
5871 skb_dst_set(skb, &rt->dst);
5872
5873 rcu_read_lock();
5874 from = rcu_dereference(rt->from);
5875 if (from) {
5876 if (fibmatch)
5877 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5878 iif, RTM_NEWROUTE,
5879 NETLINK_CB(in_skb).portid,
5880 nlh->nlmsg_seq, 0);
5881 else
5882 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5883 &fl6.saddr, iif, RTM_NEWROUTE,
5884 NETLINK_CB(in_skb).portid,
5885 nlh->nlmsg_seq, 0);
5886 } else {
5887 err = -ENETUNREACH;
5888 }
5889 rcu_read_unlock();
5890
5891 if (err < 0) {
5892 kfree_skb(skb);
5893 goto errout;
5894 }
5895
5896 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5897errout:
5898 return err;
5899}
5900
5901void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5902 unsigned int nlm_flags)
5903{
5904 struct sk_buff *skb;
5905 struct net *net = info->nl_net;
5906 u32 seq;
5907 int err;
5908
5909 err = -ENOBUFS;
5910 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5911
5912 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5913 if (!skb)
5914 goto errout;
5915
5916 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5917 event, info->portid, seq, nlm_flags);
5918 if (err < 0) {
5919 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5920 WARN_ON(err == -EMSGSIZE);
5921 kfree_skb(skb);
5922 goto errout;
5923 }
5924 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5925 info->nlh, gfp_any());
5926 return;
5927errout:
5928 if (err < 0)
5929 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5930}
5931
5932void fib6_rt_update(struct net *net, struct fib6_info *rt,
5933 struct nl_info *info)
5934{
5935 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5936 struct sk_buff *skb;
5937 int err = -ENOBUFS;
5938
5939 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
5940 * is implemented and supported for nexthop objects
5941 */
5942 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
5943
5944 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5945 if (!skb)
5946 goto errout;
5947
5948 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5949 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
5950 if (err < 0) {
5951 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5952 WARN_ON(err == -EMSGSIZE);
5953 kfree_skb(skb);
5954 goto errout;
5955 }
5956 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5957 info->nlh, gfp_any());
5958 return;
5959errout:
5960 if (err < 0)
5961 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5962}
5963
5964static int ip6_route_dev_notify(struct notifier_block *this,
5965 unsigned long event, void *ptr)
5966{
5967 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5968 struct net *net = dev_net(dev);
5969
5970 if (!(dev->flags & IFF_LOOPBACK))
5971 return NOTIFY_OK;
5972
5973 if (event == NETDEV_REGISTER) {
5974 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
5975 net->ipv6.ip6_null_entry->dst.dev = dev;
5976 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5977#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5978 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5979 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5980 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5981 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5982#endif
5983 } else if (event == NETDEV_UNREGISTER &&
5984 dev->reg_state != NETREG_UNREGISTERED) {
5985 /* NETDEV_UNREGISTER could be fired for multiple times by
5986 * netdev_wait_allrefs(). Make sure we only call this once.
5987 */
5988 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5989#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5990 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
5991 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5992#endif
5993 }
5994
5995 return NOTIFY_OK;
5996}
5997
5998/*
5999 * /proc
6000 */
6001
6002#ifdef CONFIG_PROC_FS
6003static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6004{
6005 struct net *net = (struct net *)seq->private;
6006 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6007 net->ipv6.rt6_stats->fib_nodes,
6008 net->ipv6.rt6_stats->fib_route_nodes,
6009 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6010 net->ipv6.rt6_stats->fib_rt_entries,
6011 net->ipv6.rt6_stats->fib_rt_cache,
6012 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6013 net->ipv6.rt6_stats->fib_discarded_routes);
6014
6015 return 0;
6016}
6017#endif /* CONFIG_PROC_FS */
6018
6019#ifdef CONFIG_SYSCTL
6020
6021static
6022int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6023 void __user *buffer, size_t *lenp, loff_t *ppos)
6024{
6025 struct net *net;
6026 int delay;
6027 int ret;
6028 if (!write)
6029 return -EINVAL;
6030
6031 net = (struct net *)ctl->extra1;
6032 delay = net->ipv6.sysctl.flush_delay;
6033 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6034 if (ret)
6035 return ret;
6036
6037 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6038 return 0;
6039}
6040
6041static struct ctl_table ipv6_route_table_template[] = {
6042 {
6043 .procname = "flush",
6044 .data = &init_net.ipv6.sysctl.flush_delay,
6045 .maxlen = sizeof(int),
6046 .mode = 0200,
6047 .proc_handler = ipv6_sysctl_rtcache_flush
6048 },
6049 {
6050 .procname = "gc_thresh",
6051 .data = &ip6_dst_ops_template.gc_thresh,
6052 .maxlen = sizeof(int),
6053 .mode = 0644,
6054 .proc_handler = proc_dointvec,
6055 },
6056 {
6057 .procname = "max_size",
6058 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6059 .maxlen = sizeof(int),
6060 .mode = 0644,
6061 .proc_handler = proc_dointvec,
6062 },
6063 {
6064 .procname = "gc_min_interval",
6065 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6066 .maxlen = sizeof(int),
6067 .mode = 0644,
6068 .proc_handler = proc_dointvec_jiffies,
6069 },
6070 {
6071 .procname = "gc_timeout",
6072 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6073 .maxlen = sizeof(int),
6074 .mode = 0644,
6075 .proc_handler = proc_dointvec_jiffies,
6076 },
6077 {
6078 .procname = "gc_interval",
6079 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6080 .maxlen = sizeof(int),
6081 .mode = 0644,
6082 .proc_handler = proc_dointvec_jiffies,
6083 },
6084 {
6085 .procname = "gc_elasticity",
6086 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6087 .maxlen = sizeof(int),
6088 .mode = 0644,
6089 .proc_handler = proc_dointvec,
6090 },
6091 {
6092 .procname = "mtu_expires",
6093 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6094 .maxlen = sizeof(int),
6095 .mode = 0644,
6096 .proc_handler = proc_dointvec_jiffies,
6097 },
6098 {
6099 .procname = "min_adv_mss",
6100 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6101 .maxlen = sizeof(int),
6102 .mode = 0644,
6103 .proc_handler = proc_dointvec,
6104 },
6105 {
6106 .procname = "gc_min_interval_ms",
6107 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6108 .maxlen = sizeof(int),
6109 .mode = 0644,
6110 .proc_handler = proc_dointvec_ms_jiffies,
6111 },
6112 {
6113 .procname = "skip_notify_on_dev_down",
6114 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6115 .maxlen = sizeof(int),
6116 .mode = 0644,
6117 .proc_handler = proc_dointvec_minmax,
6118 .extra1 = SYSCTL_ZERO,
6119 .extra2 = SYSCTL_ONE,
6120 },
6121 { }
6122};
6123
6124struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6125{
6126 struct ctl_table *table;
6127
6128 table = kmemdup(ipv6_route_table_template,
6129 sizeof(ipv6_route_table_template),
6130 GFP_KERNEL);
6131
6132 if (table) {
6133 table[0].data = &net->ipv6.sysctl.flush_delay;
6134 table[0].extra1 = net;
6135 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6136 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6137 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6138 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6139 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6140 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6141 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6142 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6143 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6144 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6145
6146 /* Don't export sysctls to unprivileged users */
6147 if (net->user_ns != &init_user_ns)
6148 table[0].procname = NULL;
6149 }
6150
6151 return table;
6152}
6153#endif
6154
6155static int __net_init ip6_route_net_init(struct net *net)
6156{
6157 int ret = -ENOMEM;
6158
6159 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6160 sizeof(net->ipv6.ip6_dst_ops));
6161
6162 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6163 goto out_ip6_dst_ops;
6164
6165 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6166 if (!net->ipv6.fib6_null_entry)
6167 goto out_ip6_dst_entries;
6168 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6169 sizeof(*net->ipv6.fib6_null_entry));
6170
6171 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6172 sizeof(*net->ipv6.ip6_null_entry),
6173 GFP_KERNEL);
6174 if (!net->ipv6.ip6_null_entry)
6175 goto out_fib6_null_entry;
6176 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6177 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6178 ip6_template_metrics, true);
6179 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6180
6181#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6182 net->ipv6.fib6_has_custom_rules = false;
6183 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6184 sizeof(*net->ipv6.ip6_prohibit_entry),
6185 GFP_KERNEL);
6186 if (!net->ipv6.ip6_prohibit_entry)
6187 goto out_ip6_null_entry;
6188 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6189 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6190 ip6_template_metrics, true);
6191 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6192
6193 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6194 sizeof(*net->ipv6.ip6_blk_hole_entry),
6195 GFP_KERNEL);
6196 if (!net->ipv6.ip6_blk_hole_entry)
6197 goto out_ip6_prohibit_entry;
6198 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6199 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6200 ip6_template_metrics, true);
6201 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6202#ifdef CONFIG_IPV6_SUBTREES
6203 net->ipv6.fib6_routes_require_src = 0;
6204#endif
6205#endif
6206
6207 net->ipv6.sysctl.flush_delay = 0;
6208 net->ipv6.sysctl.ip6_rt_max_size = 4096;
6209 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6210 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6211 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6212 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6213 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6214 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6215 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6216
6217 net->ipv6.ip6_rt_gc_expire = 30*HZ;
6218
6219 ret = 0;
6220out:
6221 return ret;
6222
6223#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6224out_ip6_prohibit_entry:
6225 kfree(net->ipv6.ip6_prohibit_entry);
6226out_ip6_null_entry:
6227 kfree(net->ipv6.ip6_null_entry);
6228#endif
6229out_fib6_null_entry:
6230 kfree(net->ipv6.fib6_null_entry);
6231out_ip6_dst_entries:
6232 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6233out_ip6_dst_ops:
6234 goto out;
6235}
6236
6237static void __net_exit ip6_route_net_exit(struct net *net)
6238{
6239 kfree(net->ipv6.fib6_null_entry);
6240 kfree(net->ipv6.ip6_null_entry);
6241#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6242 kfree(net->ipv6.ip6_prohibit_entry);
6243 kfree(net->ipv6.ip6_blk_hole_entry);
6244#endif
6245 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6246}
6247
6248static int __net_init ip6_route_net_init_late(struct net *net)
6249{
6250#ifdef CONFIG_PROC_FS
6251 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
6252 sizeof(struct ipv6_route_iter));
6253 proc_create_net_single("rt6_stats", 0444, net->proc_net,
6254 rt6_stats_seq_show, NULL);
6255#endif
6256 return 0;
6257}
6258
6259static void __net_exit ip6_route_net_exit_late(struct net *net)
6260{
6261#ifdef CONFIG_PROC_FS
6262 remove_proc_entry("ipv6_route", net->proc_net);
6263 remove_proc_entry("rt6_stats", net->proc_net);
6264#endif
6265}
6266
6267static struct pernet_operations ip6_route_net_ops = {
6268 .init = ip6_route_net_init,
6269 .exit = ip6_route_net_exit,
6270};
6271
6272static int __net_init ipv6_inetpeer_init(struct net *net)
6273{
6274 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6275
6276 if (!bp)
6277 return -ENOMEM;
6278 inet_peer_base_init(bp);
6279 net->ipv6.peers = bp;
6280 return 0;
6281}
6282
6283static void __net_exit ipv6_inetpeer_exit(struct net *net)
6284{
6285 struct inet_peer_base *bp = net->ipv6.peers;
6286
6287 net->ipv6.peers = NULL;
6288 inetpeer_invalidate_tree(bp);
6289 kfree(bp);
6290}
6291
6292static struct pernet_operations ipv6_inetpeer_ops = {
6293 .init = ipv6_inetpeer_init,
6294 .exit = ipv6_inetpeer_exit,
6295};
6296
6297static struct pernet_operations ip6_route_net_late_ops = {
6298 .init = ip6_route_net_init_late,
6299 .exit = ip6_route_net_exit_late,
6300};
6301
6302static struct notifier_block ip6_route_dev_notifier = {
6303 .notifier_call = ip6_route_dev_notify,
6304 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6305};
6306
6307void __init ip6_route_init_special_entries(void)
6308{
6309 /* Registering of the loopback is done before this portion of code,
6310 * the loopback reference in rt6_info will not be taken, do it
6311 * manually for init_net */
6312 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6313 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6314 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6315 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6316 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6317 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6318 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6319 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6320 #endif
6321}
6322
6323int __init ip6_route_init(void)
6324{
6325 int ret;
6326 int cpu;
6327
6328 ret = -ENOMEM;
6329 ip6_dst_ops_template.kmem_cachep =
6330 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6331 SLAB_HWCACHE_ALIGN, NULL);
6332 if (!ip6_dst_ops_template.kmem_cachep)
6333 goto out;
6334
6335 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6336 if (ret)
6337 goto out_kmem_cache;
6338
6339 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6340 if (ret)
6341 goto out_dst_entries;
6342
6343 ret = register_pernet_subsys(&ip6_route_net_ops);
6344 if (ret)
6345 goto out_register_inetpeer;
6346
6347 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6348
6349 ret = fib6_init();
6350 if (ret)
6351 goto out_register_subsys;
6352
6353 ret = xfrm6_init();
6354 if (ret)
6355 goto out_fib6_init;
6356
6357 ret = fib6_rules_init();
6358 if (ret)
6359 goto xfrm6_init;
6360
6361 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6362 if (ret)
6363 goto fib6_rules_init;
6364
6365 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6366 inet6_rtm_newroute, NULL, 0);
6367 if (ret < 0)
6368 goto out_register_late_subsys;
6369
6370 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6371 inet6_rtm_delroute, NULL, 0);
6372 if (ret < 0)
6373 goto out_register_late_subsys;
6374
6375 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6376 inet6_rtm_getroute, NULL,
6377 RTNL_FLAG_DOIT_UNLOCKED);
6378 if (ret < 0)
6379 goto out_register_late_subsys;
6380
6381 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6382 if (ret)
6383 goto out_register_late_subsys;
6384
6385 for_each_possible_cpu(cpu) {
6386 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6387
6388 INIT_LIST_HEAD(&ul->head);
6389 spin_lock_init(&ul->lock);
6390 }
6391
6392out:
6393 return ret;
6394
6395out_register_late_subsys:
6396 rtnl_unregister_all(PF_INET6);
6397 unregister_pernet_subsys(&ip6_route_net_late_ops);
6398fib6_rules_init:
6399 fib6_rules_cleanup();
6400xfrm6_init:
6401 xfrm6_fini();
6402out_fib6_init:
6403 fib6_gc_cleanup();
6404out_register_subsys:
6405 unregister_pernet_subsys(&ip6_route_net_ops);
6406out_register_inetpeer:
6407 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6408out_dst_entries:
6409 dst_entries_destroy(&ip6_dst_blackhole_ops);
6410out_kmem_cache:
6411 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6412 goto out;
6413}
6414
6415void ip6_route_cleanup(void)
6416{
6417 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6418 unregister_pernet_subsys(&ip6_route_net_late_ops);
6419 fib6_rules_cleanup();
6420 xfrm6_fini();
6421 fib6_gc_cleanup();
6422 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6423 unregister_pernet_subsys(&ip6_route_net_ops);
6424 dst_entries_destroy(&ip6_dst_blackhole_ops);
6425 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6426}