]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/route.c
ipv6: fix possible deadlock in rt6_age_examine_exception()
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <linux/jhash.h>
48 #include <net/net_namespace.h>
49 #include <net/snmp.h>
50 #include <net/ipv6.h>
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
55 #include <net/tcp.h>
56 #include <linux/rtnetlink.h>
57 #include <net/dst.h>
58 #include <net/dst_metadata.h>
59 #include <net/xfrm.h>
60 #include <net/netevent.h>
61 #include <net/netlink.h>
62 #include <net/nexthop.h>
63 #include <net/lwtunnel.h>
64 #include <net/ip_tunnels.h>
65 #include <net/l3mdev.h>
66 #include <trace/events/fib6.h>
67
68 #include <linux/uaccess.h>
69
70 #ifdef CONFIG_SYSCTL
71 #include <linux/sysctl.h>
72 #endif
73
74 enum rt6_nud_state {
75 RT6_NUD_FAIL_HARD = -3,
76 RT6_NUD_FAIL_PROBE = -2,
77 RT6_NUD_FAIL_DO_RR = -1,
78 RT6_NUD_SUCCEED = 1
79 };
80
81 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
82 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
83 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
84 static unsigned int ip6_mtu(const struct dst_entry *dst);
85 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
86 static void ip6_dst_destroy(struct dst_entry *);
87 static void ip6_dst_ifdown(struct dst_entry *,
88 struct net_device *dev, int how);
89 static int ip6_dst_gc(struct dst_ops *ops);
90
91 static int ip6_pkt_discard(struct sk_buff *skb);
92 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
93 static int ip6_pkt_prohibit(struct sk_buff *skb);
94 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
95 static void ip6_link_failure(struct sk_buff *skb);
96 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
97 struct sk_buff *skb, u32 mtu);
98 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
99 struct sk_buff *skb);
100 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
101 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
102 static size_t rt6_nlmsg_size(struct rt6_info *rt);
103 static int rt6_fill_node(struct net *net,
104 struct sk_buff *skb, struct rt6_info *rt,
105 struct in6_addr *dst, struct in6_addr *src,
106 int iif, int type, u32 portid, u32 seq,
107 unsigned int flags);
108 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
109 struct in6_addr *daddr,
110 struct in6_addr *saddr);
111
112 #ifdef CONFIG_IPV6_ROUTE_INFO
113 static struct rt6_info *rt6_add_route_info(struct net *net,
114 const struct in6_addr *prefix, int prefixlen,
115 const struct in6_addr *gwaddr,
116 struct net_device *dev,
117 unsigned int pref);
118 static struct rt6_info *rt6_get_route_info(struct net *net,
119 const struct in6_addr *prefix, int prefixlen,
120 const struct in6_addr *gwaddr,
121 struct net_device *dev);
122 #endif
123
124 struct uncached_list {
125 spinlock_t lock;
126 struct list_head head;
127 };
128
129 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
130
131 static void rt6_uncached_list_add(struct rt6_info *rt)
132 {
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
134
135 rt->rt6i_uncached_list = ul;
136
137 spin_lock_bh(&ul->lock);
138 list_add_tail(&rt->rt6i_uncached, &ul->head);
139 spin_unlock_bh(&ul->lock);
140 }
141
142 static void rt6_uncached_list_del(struct rt6_info *rt)
143 {
144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list;
146 struct net *net = dev_net(rt->dst.dev);
147
148 spin_lock_bh(&ul->lock);
149 list_del(&rt->rt6i_uncached);
150 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
151 spin_unlock_bh(&ul->lock);
152 }
153 }
154
155 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
156 {
157 struct net_device *loopback_dev = net->loopback_dev;
158 int cpu;
159
160 if (dev == loopback_dev)
161 return;
162
163 for_each_possible_cpu(cpu) {
164 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
165 struct rt6_info *rt;
166
167 spin_lock_bh(&ul->lock);
168 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
169 struct inet6_dev *rt_idev = rt->rt6i_idev;
170 struct net_device *rt_dev = rt->dst.dev;
171
172 if (rt_idev->dev == dev) {
173 rt->rt6i_idev = in6_dev_get(loopback_dev);
174 in6_dev_put(rt_idev);
175 }
176
177 if (rt_dev == dev) {
178 rt->dst.dev = loopback_dev;
179 dev_hold(rt->dst.dev);
180 dev_put(rt_dev);
181 }
182 }
183 spin_unlock_bh(&ul->lock);
184 }
185 }
186
187 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
188 {
189 return dst_metrics_write_ptr(rt->dst.from);
190 }
191
192 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
193 {
194 struct rt6_info *rt = (struct rt6_info *)dst;
195
196 if (rt->rt6i_flags & RTF_PCPU)
197 return rt6_pcpu_cow_metrics(rt);
198 else if (rt->rt6i_flags & RTF_CACHE)
199 return NULL;
200 else
201 return dst_cow_metrics_generic(dst, old);
202 }
203
204 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
205 struct sk_buff *skb,
206 const void *daddr)
207 {
208 struct in6_addr *p = &rt->rt6i_gateway;
209
210 if (!ipv6_addr_any(p))
211 return (const void *) p;
212 else if (skb)
213 return &ipv6_hdr(skb)->daddr;
214 return daddr;
215 }
216
217 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
218 struct sk_buff *skb,
219 const void *daddr)
220 {
221 struct rt6_info *rt = (struct rt6_info *) dst;
222 struct neighbour *n;
223
224 daddr = choose_neigh_daddr(rt, skb, daddr);
225 n = __ipv6_neigh_lookup(dst->dev, daddr);
226 if (n)
227 return n;
228 return neigh_create(&nd_tbl, daddr, dst->dev);
229 }
230
231 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
232 {
233 struct net_device *dev = dst->dev;
234 struct rt6_info *rt = (struct rt6_info *)dst;
235
236 daddr = choose_neigh_daddr(rt, NULL, daddr);
237 if (!daddr)
238 return;
239 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
240 return;
241 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
242 return;
243 __ipv6_confirm_neigh(dev, daddr);
244 }
245
246 static struct dst_ops ip6_dst_ops_template = {
247 .family = AF_INET6,
248 .gc = ip6_dst_gc,
249 .gc_thresh = 1024,
250 .check = ip6_dst_check,
251 .default_advmss = ip6_default_advmss,
252 .mtu = ip6_mtu,
253 .cow_metrics = ipv6_cow_metrics,
254 .destroy = ip6_dst_destroy,
255 .ifdown = ip6_dst_ifdown,
256 .negative_advice = ip6_negative_advice,
257 .link_failure = ip6_link_failure,
258 .update_pmtu = ip6_rt_update_pmtu,
259 .redirect = rt6_do_redirect,
260 .local_out = __ip6_local_out,
261 .neigh_lookup = ip6_neigh_lookup,
262 .confirm_neigh = ip6_confirm_neigh,
263 };
264
265 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
266 {
267 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
268
269 return mtu ? : dst->dev->mtu;
270 }
271
272 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
273 struct sk_buff *skb, u32 mtu)
274 {
275 }
276
277 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
278 struct sk_buff *skb)
279 {
280 }
281
282 static struct dst_ops ip6_dst_blackhole_ops = {
283 .family = AF_INET6,
284 .destroy = ip6_dst_destroy,
285 .check = ip6_dst_check,
286 .mtu = ip6_blackhole_mtu,
287 .default_advmss = ip6_default_advmss,
288 .update_pmtu = ip6_rt_blackhole_update_pmtu,
289 .redirect = ip6_rt_blackhole_redirect,
290 .cow_metrics = dst_cow_metrics_generic,
291 .neigh_lookup = ip6_neigh_lookup,
292 };
293
294 static const u32 ip6_template_metrics[RTAX_MAX] = {
295 [RTAX_HOPLIMIT - 1] = 0,
296 };
297
298 static const struct rt6_info ip6_null_entry_template = {
299 .dst = {
300 .__refcnt = ATOMIC_INIT(1),
301 .__use = 1,
302 .obsolete = DST_OBSOLETE_FORCE_CHK,
303 .error = -ENETUNREACH,
304 .input = ip6_pkt_discard,
305 .output = ip6_pkt_discard_out,
306 },
307 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
308 .rt6i_protocol = RTPROT_KERNEL,
309 .rt6i_metric = ~(u32) 0,
310 .rt6i_ref = ATOMIC_INIT(1),
311 };
312
313 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
314
315 static const struct rt6_info ip6_prohibit_entry_template = {
316 .dst = {
317 .__refcnt = ATOMIC_INIT(1),
318 .__use = 1,
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
320 .error = -EACCES,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
323 },
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
325 .rt6i_protocol = RTPROT_KERNEL,
326 .rt6i_metric = ~(u32) 0,
327 .rt6i_ref = ATOMIC_INIT(1),
328 };
329
330 static const struct rt6_info ip6_blk_hole_entry_template = {
331 .dst = {
332 .__refcnt = ATOMIC_INIT(1),
333 .__use = 1,
334 .obsolete = DST_OBSOLETE_FORCE_CHK,
335 .error = -EINVAL,
336 .input = dst_discard,
337 .output = dst_discard_out,
338 },
339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
340 .rt6i_protocol = RTPROT_KERNEL,
341 .rt6i_metric = ~(u32) 0,
342 .rt6i_ref = ATOMIC_INIT(1),
343 };
344
345 #endif
346
347 static void rt6_info_init(struct rt6_info *rt)
348 {
349 struct dst_entry *dst = &rt->dst;
350
351 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
352 INIT_LIST_HEAD(&rt->rt6i_siblings);
353 INIT_LIST_HEAD(&rt->rt6i_uncached);
354 }
355
356 /* allocate dst with ip6_dst_ops */
357 static struct rt6_info *__ip6_dst_alloc(struct net *net,
358 struct net_device *dev,
359 int flags)
360 {
361 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
362 1, DST_OBSOLETE_FORCE_CHK, flags);
363
364 if (rt) {
365 rt6_info_init(rt);
366 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
367 }
368
369 return rt;
370 }
371
372 struct rt6_info *ip6_dst_alloc(struct net *net,
373 struct net_device *dev,
374 int flags)
375 {
376 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
377
378 if (rt) {
379 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
380 if (!rt->rt6i_pcpu) {
381 dst_release_immediate(&rt->dst);
382 return NULL;
383 }
384 }
385
386 return rt;
387 }
388 EXPORT_SYMBOL(ip6_dst_alloc);
389
390 static void ip6_dst_destroy(struct dst_entry *dst)
391 {
392 struct rt6_info *rt = (struct rt6_info *)dst;
393 struct rt6_exception_bucket *bucket;
394 struct dst_entry *from = dst->from;
395 struct inet6_dev *idev;
396
397 dst_destroy_metrics_generic(dst);
398 free_percpu(rt->rt6i_pcpu);
399 rt6_uncached_list_del(rt);
400
401 idev = rt->rt6i_idev;
402 if (idev) {
403 rt->rt6i_idev = NULL;
404 in6_dev_put(idev);
405 }
406 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1);
407 if (bucket) {
408 rt->rt6i_exception_bucket = NULL;
409 kfree(bucket);
410 }
411
412 dst->from = NULL;
413 dst_release(from);
414 }
415
416 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
417 int how)
418 {
419 struct rt6_info *rt = (struct rt6_info *)dst;
420 struct inet6_dev *idev = rt->rt6i_idev;
421 struct net_device *loopback_dev =
422 dev_net(dev)->loopback_dev;
423
424 if (idev && idev->dev != loopback_dev) {
425 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
426 if (loopback_idev) {
427 rt->rt6i_idev = loopback_idev;
428 in6_dev_put(idev);
429 }
430 }
431 }
432
433 static bool __rt6_check_expired(const struct rt6_info *rt)
434 {
435 if (rt->rt6i_flags & RTF_EXPIRES)
436 return time_after(jiffies, rt->dst.expires);
437 else
438 return false;
439 }
440
441 static bool rt6_check_expired(const struct rt6_info *rt)
442 {
443 if (rt->rt6i_flags & RTF_EXPIRES) {
444 if (time_after(jiffies, rt->dst.expires))
445 return true;
446 } else if (rt->dst.from) {
447 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
448 rt6_check_expired((struct rt6_info *)rt->dst.from);
449 }
450 return false;
451 }
452
453 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
454 struct flowi6 *fl6, int oif,
455 int strict)
456 {
457 struct rt6_info *sibling, *next_sibling;
458 int route_choosen;
459
460 /* We might have already computed the hash for ICMPv6 errors. In such
461 * case it will always be non-zero. Otherwise now is the time to do it.
462 */
463 if (!fl6->mp_hash)
464 fl6->mp_hash = rt6_multipath_hash(fl6, NULL);
465
466 route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1);
467 /* Don't change the route, if route_choosen == 0
468 * (siblings does not include ourself)
469 */
470 if (route_choosen)
471 list_for_each_entry_safe(sibling, next_sibling,
472 &match->rt6i_siblings, rt6i_siblings) {
473 route_choosen--;
474 if (route_choosen == 0) {
475 struct inet6_dev *idev = sibling->rt6i_idev;
476
477 if (!netif_carrier_ok(sibling->dst.dev) &&
478 idev->cnf.ignore_routes_with_linkdown)
479 break;
480 if (rt6_score_route(sibling, oif, strict) < 0)
481 break;
482 match = sibling;
483 break;
484 }
485 }
486 return match;
487 }
488
489 /*
490 * Route lookup. rcu_read_lock() should be held.
491 */
492
493 static inline struct rt6_info *rt6_device_match(struct net *net,
494 struct rt6_info *rt,
495 const struct in6_addr *saddr,
496 int oif,
497 int flags)
498 {
499 struct rt6_info *local = NULL;
500 struct rt6_info *sprt;
501
502 if (!oif && ipv6_addr_any(saddr))
503 goto out;
504
505 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->dst.rt6_next)) {
506 struct net_device *dev = sprt->dst.dev;
507
508 if (oif) {
509 if (dev->ifindex == oif)
510 return sprt;
511 if (dev->flags & IFF_LOOPBACK) {
512 if (!sprt->rt6i_idev ||
513 sprt->rt6i_idev->dev->ifindex != oif) {
514 if (flags & RT6_LOOKUP_F_IFACE)
515 continue;
516 if (local &&
517 local->rt6i_idev->dev->ifindex == oif)
518 continue;
519 }
520 local = sprt;
521 }
522 } else {
523 if (ipv6_chk_addr(net, saddr, dev,
524 flags & RT6_LOOKUP_F_IFACE))
525 return sprt;
526 }
527 }
528
529 if (oif) {
530 if (local)
531 return local;
532
533 if (flags & RT6_LOOKUP_F_IFACE)
534 return net->ipv6.ip6_null_entry;
535 }
536 out:
537 return rt;
538 }
539
540 #ifdef CONFIG_IPV6_ROUTER_PREF
541 struct __rt6_probe_work {
542 struct work_struct work;
543 struct in6_addr target;
544 struct net_device *dev;
545 };
546
547 static void rt6_probe_deferred(struct work_struct *w)
548 {
549 struct in6_addr mcaddr;
550 struct __rt6_probe_work *work =
551 container_of(w, struct __rt6_probe_work, work);
552
553 addrconf_addr_solict_mult(&work->target, &mcaddr);
554 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
555 dev_put(work->dev);
556 kfree(work);
557 }
558
559 static void rt6_probe(struct rt6_info *rt)
560 {
561 struct __rt6_probe_work *work;
562 struct neighbour *neigh;
563 /*
564 * Okay, this does not seem to be appropriate
565 * for now, however, we need to check if it
566 * is really so; aka Router Reachability Probing.
567 *
568 * Router Reachability Probe MUST be rate-limited
569 * to no more than one per minute.
570 */
571 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
572 return;
573 rcu_read_lock_bh();
574 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
575 if (neigh) {
576 if (neigh->nud_state & NUD_VALID)
577 goto out;
578
579 work = NULL;
580 write_lock(&neigh->lock);
581 if (!(neigh->nud_state & NUD_VALID) &&
582 time_after(jiffies,
583 neigh->updated +
584 rt->rt6i_idev->cnf.rtr_probe_interval)) {
585 work = kmalloc(sizeof(*work), GFP_ATOMIC);
586 if (work)
587 __neigh_set_probe_once(neigh);
588 }
589 write_unlock(&neigh->lock);
590 } else {
591 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 }
593
594 if (work) {
595 INIT_WORK(&work->work, rt6_probe_deferred);
596 work->target = rt->rt6i_gateway;
597 dev_hold(rt->dst.dev);
598 work->dev = rt->dst.dev;
599 schedule_work(&work->work);
600 }
601
602 out:
603 rcu_read_unlock_bh();
604 }
605 #else
606 static inline void rt6_probe(struct rt6_info *rt)
607 {
608 }
609 #endif
610
611 /*
612 * Default Router Selection (RFC 2461 6.3.6)
613 */
614 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
615 {
616 struct net_device *dev = rt->dst.dev;
617 if (!oif || dev->ifindex == oif)
618 return 2;
619 if ((dev->flags & IFF_LOOPBACK) &&
620 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
621 return 1;
622 return 0;
623 }
624
625 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
626 {
627 struct neighbour *neigh;
628 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
629
630 if (rt->rt6i_flags & RTF_NONEXTHOP ||
631 !(rt->rt6i_flags & RTF_GATEWAY))
632 return RT6_NUD_SUCCEED;
633
634 rcu_read_lock_bh();
635 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
636 if (neigh) {
637 read_lock(&neigh->lock);
638 if (neigh->nud_state & NUD_VALID)
639 ret = RT6_NUD_SUCCEED;
640 #ifdef CONFIG_IPV6_ROUTER_PREF
641 else if (!(neigh->nud_state & NUD_FAILED))
642 ret = RT6_NUD_SUCCEED;
643 else
644 ret = RT6_NUD_FAIL_PROBE;
645 #endif
646 read_unlock(&neigh->lock);
647 } else {
648 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
649 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
650 }
651 rcu_read_unlock_bh();
652
653 return ret;
654 }
655
656 static int rt6_score_route(struct rt6_info *rt, int oif,
657 int strict)
658 {
659 int m;
660
661 m = rt6_check_dev(rt, oif);
662 if (!m && (strict & RT6_LOOKUP_F_IFACE))
663 return RT6_NUD_FAIL_HARD;
664 #ifdef CONFIG_IPV6_ROUTER_PREF
665 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
666 #endif
667 if (strict & RT6_LOOKUP_F_REACHABLE) {
668 int n = rt6_check_neigh(rt);
669 if (n < 0)
670 return n;
671 }
672 return m;
673 }
674
675 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
676 int *mpri, struct rt6_info *match,
677 bool *do_rr)
678 {
679 int m;
680 bool match_do_rr = false;
681 struct inet6_dev *idev = rt->rt6i_idev;
682 struct net_device *dev = rt->dst.dev;
683
684 if (dev && !netif_carrier_ok(dev) &&
685 idev->cnf.ignore_routes_with_linkdown &&
686 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
687 goto out;
688
689 if (rt6_check_expired(rt))
690 goto out;
691
692 m = rt6_score_route(rt, oif, strict);
693 if (m == RT6_NUD_FAIL_DO_RR) {
694 match_do_rr = true;
695 m = 0; /* lowest valid score */
696 } else if (m == RT6_NUD_FAIL_HARD) {
697 goto out;
698 }
699
700 if (strict & RT6_LOOKUP_F_REACHABLE)
701 rt6_probe(rt);
702
703 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
704 if (m > *mpri) {
705 *do_rr = match_do_rr;
706 *mpri = m;
707 match = rt;
708 }
709 out:
710 return match;
711 }
712
713 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
714 struct rt6_info *leaf,
715 struct rt6_info *rr_head,
716 u32 metric, int oif, int strict,
717 bool *do_rr)
718 {
719 struct rt6_info *rt, *match, *cont;
720 int mpri = -1;
721
722 match = NULL;
723 cont = NULL;
724 for (rt = rr_head; rt; rt = rcu_dereference(rt->dst.rt6_next)) {
725 if (rt->rt6i_metric != metric) {
726 cont = rt;
727 break;
728 }
729
730 match = find_match(rt, oif, strict, &mpri, match, do_rr);
731 }
732
733 for (rt = leaf; rt && rt != rr_head;
734 rt = rcu_dereference(rt->dst.rt6_next)) {
735 if (rt->rt6i_metric != metric) {
736 cont = rt;
737 break;
738 }
739
740 match = find_match(rt, oif, strict, &mpri, match, do_rr);
741 }
742
743 if (match || !cont)
744 return match;
745
746 for (rt = cont; rt; rt = rcu_dereference(rt->dst.rt6_next))
747 match = find_match(rt, oif, strict, &mpri, match, do_rr);
748
749 return match;
750 }
751
752 static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
753 int oif, int strict)
754 {
755 struct rt6_info *leaf = rcu_dereference(fn->leaf);
756 struct rt6_info *match, *rt0;
757 bool do_rr = false;
758 int key_plen;
759
760 if (!leaf || leaf == net->ipv6.ip6_null_entry)
761 return net->ipv6.ip6_null_entry;
762
763 rt0 = rcu_dereference(fn->rr_ptr);
764 if (!rt0)
765 rt0 = leaf;
766
767 /* Double check to make sure fn is not an intermediate node
768 * and fn->leaf does not points to its child's leaf
769 * (This might happen if all routes under fn are deleted from
770 * the tree and fib6_repair_tree() is called on the node.)
771 */
772 key_plen = rt0->rt6i_dst.plen;
773 #ifdef CONFIG_IPV6_SUBTREES
774 if (rt0->rt6i_src.plen)
775 key_plen = rt0->rt6i_src.plen;
776 #endif
777 if (fn->fn_bit != key_plen)
778 return net->ipv6.ip6_null_entry;
779
780 match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict,
781 &do_rr);
782
783 if (do_rr) {
784 struct rt6_info *next = rcu_dereference(rt0->dst.rt6_next);
785
786 /* no entries matched; do round-robin */
787 if (!next || next->rt6i_metric != rt0->rt6i_metric)
788 next = leaf;
789
790 if (next != rt0) {
791 spin_lock_bh(&leaf->rt6i_table->tb6_lock);
792 /* make sure next is not being deleted from the tree */
793 if (next->rt6i_node)
794 rcu_assign_pointer(fn->rr_ptr, next);
795 spin_unlock_bh(&leaf->rt6i_table->tb6_lock);
796 }
797 }
798
799 return match ? match : net->ipv6.ip6_null_entry;
800 }
801
802 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
803 {
804 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
805 }
806
807 #ifdef CONFIG_IPV6_ROUTE_INFO
808 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
809 const struct in6_addr *gwaddr)
810 {
811 struct net *net = dev_net(dev);
812 struct route_info *rinfo = (struct route_info *) opt;
813 struct in6_addr prefix_buf, *prefix;
814 unsigned int pref;
815 unsigned long lifetime;
816 struct rt6_info *rt;
817
818 if (len < sizeof(struct route_info)) {
819 return -EINVAL;
820 }
821
822 /* Sanity check for prefix_len and length */
823 if (rinfo->length > 3) {
824 return -EINVAL;
825 } else if (rinfo->prefix_len > 128) {
826 return -EINVAL;
827 } else if (rinfo->prefix_len > 64) {
828 if (rinfo->length < 2) {
829 return -EINVAL;
830 }
831 } else if (rinfo->prefix_len > 0) {
832 if (rinfo->length < 1) {
833 return -EINVAL;
834 }
835 }
836
837 pref = rinfo->route_pref;
838 if (pref == ICMPV6_ROUTER_PREF_INVALID)
839 return -EINVAL;
840
841 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
842
843 if (rinfo->length == 3)
844 prefix = (struct in6_addr *)rinfo->prefix;
845 else {
846 /* this function is safe */
847 ipv6_addr_prefix(&prefix_buf,
848 (struct in6_addr *)rinfo->prefix,
849 rinfo->prefix_len);
850 prefix = &prefix_buf;
851 }
852
853 if (rinfo->prefix_len == 0)
854 rt = rt6_get_dflt_router(gwaddr, dev);
855 else
856 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
857 gwaddr, dev);
858
859 if (rt && !lifetime) {
860 ip6_del_rt(rt);
861 rt = NULL;
862 }
863
864 if (!rt && lifetime)
865 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
866 dev, pref);
867 else if (rt)
868 rt->rt6i_flags = RTF_ROUTEINFO |
869 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
870
871 if (rt) {
872 if (!addrconf_finite_timeout(lifetime))
873 rt6_clean_expires(rt);
874 else
875 rt6_set_expires(rt, jiffies + HZ * lifetime);
876
877 ip6_rt_put(rt);
878 }
879 return 0;
880 }
881 #endif
882
883 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
884 struct in6_addr *saddr)
885 {
886 struct fib6_node *pn, *sn;
887 while (1) {
888 if (fn->fn_flags & RTN_TL_ROOT)
889 return NULL;
890 pn = rcu_dereference(fn->parent);
891 sn = FIB6_SUBTREE(pn);
892 if (sn && sn != fn)
893 fn = fib6_lookup(sn, NULL, saddr);
894 else
895 fn = pn;
896 if (fn->fn_flags & RTN_RTINFO)
897 return fn;
898 }
899 }
900
901 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
902 bool null_fallback)
903 {
904 struct rt6_info *rt = *prt;
905
906 if (dst_hold_safe(&rt->dst))
907 return true;
908 if (null_fallback) {
909 rt = net->ipv6.ip6_null_entry;
910 dst_hold(&rt->dst);
911 } else {
912 rt = NULL;
913 }
914 *prt = rt;
915 return false;
916 }
917
918 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
919 struct fib6_table *table,
920 struct flowi6 *fl6, int flags)
921 {
922 struct rt6_info *rt, *rt_cache;
923 struct fib6_node *fn;
924
925 rcu_read_lock();
926 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
927 restart:
928 rt = rcu_dereference(fn->leaf);
929 if (!rt) {
930 rt = net->ipv6.ip6_null_entry;
931 } else {
932 rt = rt6_device_match(net, rt, &fl6->saddr,
933 fl6->flowi6_oif, flags);
934 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
935 rt = rt6_multipath_select(rt, fl6,
936 fl6->flowi6_oif, flags);
937 }
938 if (rt == net->ipv6.ip6_null_entry) {
939 fn = fib6_backtrack(fn, &fl6->saddr);
940 if (fn)
941 goto restart;
942 }
943 /* Search through exception table */
944 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
945 if (rt_cache)
946 rt = rt_cache;
947
948 if (ip6_hold_safe(net, &rt, true))
949 dst_use_noref(&rt->dst, jiffies);
950
951 rcu_read_unlock();
952
953 trace_fib6_table_lookup(net, rt, table, fl6);
954
955 return rt;
956
957 }
958
959 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
960 int flags)
961 {
962 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
963 }
964 EXPORT_SYMBOL_GPL(ip6_route_lookup);
965
966 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
967 const struct in6_addr *saddr, int oif, int strict)
968 {
969 struct flowi6 fl6 = {
970 .flowi6_oif = oif,
971 .daddr = *daddr,
972 };
973 struct dst_entry *dst;
974 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
975
976 if (saddr) {
977 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
978 flags |= RT6_LOOKUP_F_HAS_SADDR;
979 }
980
981 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
982 if (dst->error == 0)
983 return (struct rt6_info *) dst;
984
985 dst_release(dst);
986
987 return NULL;
988 }
989 EXPORT_SYMBOL(rt6_lookup);
990
991 /* ip6_ins_rt is called with FREE table->tb6_lock.
992 * It takes new route entry, the addition fails by any reason the
993 * route is released.
994 * Caller must hold dst before calling it.
995 */
996
997 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
998 struct mx6_config *mxc,
999 struct netlink_ext_ack *extack)
1000 {
1001 int err;
1002 struct fib6_table *table;
1003
1004 table = rt->rt6i_table;
1005 spin_lock_bh(&table->tb6_lock);
1006 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
1007 spin_unlock_bh(&table->tb6_lock);
1008
1009 return err;
1010 }
1011
1012 int ip6_ins_rt(struct rt6_info *rt)
1013 {
1014 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
1015 struct mx6_config mxc = { .mx = NULL, };
1016
1017 /* Hold dst to account for the reference from the fib6 tree */
1018 dst_hold(&rt->dst);
1019 return __ip6_ins_rt(rt, &info, &mxc, NULL);
1020 }
1021
1022 /* called with rcu_lock held */
1023 static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
1024 {
1025 struct net_device *dev = rt->dst.dev;
1026
1027 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1028 /* for copies of local routes, dst->dev needs to be the
1029 * device if it is a master device, the master device if
1030 * device is enslaved, and the loopback as the default
1031 */
1032 if (netif_is_l3_slave(dev) &&
1033 !rt6_need_strict(&rt->rt6i_dst.addr))
1034 dev = l3mdev_master_dev_rcu(dev);
1035 else if (!netif_is_l3_master(dev))
1036 dev = dev_net(dev)->loopback_dev;
1037 /* last case is netif_is_l3_master(dev) is true in which
1038 * case we want dev returned to be dev
1039 */
1040 }
1041
1042 return dev;
1043 }
1044
1045 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
1046 const struct in6_addr *daddr,
1047 const struct in6_addr *saddr)
1048 {
1049 struct net_device *dev;
1050 struct rt6_info *rt;
1051
1052 /*
1053 * Clone the route.
1054 */
1055
1056 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1057 ort = (struct rt6_info *)ort->dst.from;
1058
1059 rcu_read_lock();
1060 dev = ip6_rt_get_dev_rcu(ort);
1061 rt = __ip6_dst_alloc(dev_net(dev), dev, 0);
1062 rcu_read_unlock();
1063 if (!rt)
1064 return NULL;
1065
1066 ip6_rt_copy_init(rt, ort);
1067 rt->rt6i_flags |= RTF_CACHE;
1068 rt->rt6i_metric = 0;
1069 rt->dst.flags |= DST_HOST;
1070 rt->rt6i_dst.addr = *daddr;
1071 rt->rt6i_dst.plen = 128;
1072
1073 if (!rt6_is_gw_or_nonexthop(ort)) {
1074 if (ort->rt6i_dst.plen != 128 &&
1075 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
1076 rt->rt6i_flags |= RTF_ANYCAST;
1077 #ifdef CONFIG_IPV6_SUBTREES
1078 if (rt->rt6i_src.plen && saddr) {
1079 rt->rt6i_src.addr = *saddr;
1080 rt->rt6i_src.plen = 128;
1081 }
1082 #endif
1083 }
1084
1085 return rt;
1086 }
1087
1088 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1089 {
1090 struct net_device *dev;
1091 struct rt6_info *pcpu_rt;
1092
1093 rcu_read_lock();
1094 dev = ip6_rt_get_dev_rcu(rt);
1095 pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags);
1096 rcu_read_unlock();
1097 if (!pcpu_rt)
1098 return NULL;
1099 ip6_rt_copy_init(pcpu_rt, rt);
1100 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1101 pcpu_rt->rt6i_flags |= RTF_PCPU;
1102 return pcpu_rt;
1103 }
1104
1105 /* It should be called with rcu_read_lock() acquired */
1106 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1107 {
1108 struct rt6_info *pcpu_rt, **p;
1109
1110 p = this_cpu_ptr(rt->rt6i_pcpu);
1111 pcpu_rt = *p;
1112
1113 if (pcpu_rt && ip6_hold_safe(NULL, &pcpu_rt, false))
1114 rt6_dst_from_metrics_check(pcpu_rt);
1115
1116 return pcpu_rt;
1117 }
1118
1119 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1120 {
1121 struct rt6_info *pcpu_rt, *prev, **p;
1122
1123 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1124 if (!pcpu_rt) {
1125 struct net *net = dev_net(rt->dst.dev);
1126
1127 dst_hold(&net->ipv6.ip6_null_entry->dst);
1128 return net->ipv6.ip6_null_entry;
1129 }
1130
1131 dst_hold(&pcpu_rt->dst);
1132 p = this_cpu_ptr(rt->rt6i_pcpu);
1133 prev = cmpxchg(p, NULL, pcpu_rt);
1134 BUG_ON(prev);
1135
1136 rt6_dst_from_metrics_check(pcpu_rt);
1137 return pcpu_rt;
1138 }
1139
1140 /* exception hash table implementation
1141 */
1142 static DEFINE_SPINLOCK(rt6_exception_lock);
1143
1144 /* Remove rt6_ex from hash table and free the memory
1145 * Caller must hold rt6_exception_lock
1146 */
1147 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1148 struct rt6_exception *rt6_ex)
1149 {
1150 struct net *net;
1151
1152 if (!bucket || !rt6_ex)
1153 return;
1154
1155 net = dev_net(rt6_ex->rt6i->dst.dev);
1156 rt6_ex->rt6i->rt6i_node = NULL;
1157 hlist_del_rcu(&rt6_ex->hlist);
1158 rt6_release(rt6_ex->rt6i);
1159 kfree_rcu(rt6_ex, rcu);
1160 WARN_ON_ONCE(!bucket->depth);
1161 bucket->depth--;
1162 net->ipv6.rt6_stats->fib_rt_cache--;
1163 }
1164
1165 /* Remove oldest rt6_ex in bucket and free the memory
1166 * Caller must hold rt6_exception_lock
1167 */
1168 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1169 {
1170 struct rt6_exception *rt6_ex, *oldest = NULL;
1171
1172 if (!bucket)
1173 return;
1174
1175 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1176 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1177 oldest = rt6_ex;
1178 }
1179 rt6_remove_exception(bucket, oldest);
1180 }
1181
1182 static u32 rt6_exception_hash(const struct in6_addr *dst,
1183 const struct in6_addr *src)
1184 {
1185 static u32 seed __read_mostly;
1186 u32 val;
1187
1188 net_get_random_once(&seed, sizeof(seed));
1189 val = jhash(dst, sizeof(*dst), seed);
1190
1191 #ifdef CONFIG_IPV6_SUBTREES
1192 if (src)
1193 val = jhash(src, sizeof(*src), val);
1194 #endif
1195 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1196 }
1197
1198 /* Helper function to find the cached rt in the hash table
1199 * and update bucket pointer to point to the bucket for this
1200 * (daddr, saddr) pair
1201 * Caller must hold rt6_exception_lock
1202 */
1203 static struct rt6_exception *
1204 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1205 const struct in6_addr *daddr,
1206 const struct in6_addr *saddr)
1207 {
1208 struct rt6_exception *rt6_ex;
1209 u32 hval;
1210
1211 if (!(*bucket) || !daddr)
1212 return NULL;
1213
1214 hval = rt6_exception_hash(daddr, saddr);
1215 *bucket += hval;
1216
1217 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1218 struct rt6_info *rt6 = rt6_ex->rt6i;
1219 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1220
1221 #ifdef CONFIG_IPV6_SUBTREES
1222 if (matched && saddr)
1223 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1224 #endif
1225 if (matched)
1226 return rt6_ex;
1227 }
1228 return NULL;
1229 }
1230
1231 /* Helper function to find the cached rt in the hash table
1232 * and update bucket pointer to point to the bucket for this
1233 * (daddr, saddr) pair
1234 * Caller must hold rcu_read_lock()
1235 */
1236 static struct rt6_exception *
1237 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1238 const struct in6_addr *daddr,
1239 const struct in6_addr *saddr)
1240 {
1241 struct rt6_exception *rt6_ex;
1242 u32 hval;
1243
1244 WARN_ON_ONCE(!rcu_read_lock_held());
1245
1246 if (!(*bucket) || !daddr)
1247 return NULL;
1248
1249 hval = rt6_exception_hash(daddr, saddr);
1250 *bucket += hval;
1251
1252 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1253 struct rt6_info *rt6 = rt6_ex->rt6i;
1254 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1255
1256 #ifdef CONFIG_IPV6_SUBTREES
1257 if (matched && saddr)
1258 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1259 #endif
1260 if (matched)
1261 return rt6_ex;
1262 }
1263 return NULL;
1264 }
1265
1266 static int rt6_insert_exception(struct rt6_info *nrt,
1267 struct rt6_info *ort)
1268 {
1269 struct net *net = dev_net(ort->dst.dev);
1270 struct rt6_exception_bucket *bucket;
1271 struct in6_addr *src_key = NULL;
1272 struct rt6_exception *rt6_ex;
1273 int err = 0;
1274
1275 /* ort can't be a cache or pcpu route */
1276 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1277 ort = (struct rt6_info *)ort->dst.from;
1278 WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
1279
1280 spin_lock_bh(&rt6_exception_lock);
1281
1282 if (ort->exception_bucket_flushed) {
1283 err = -EINVAL;
1284 goto out;
1285 }
1286
1287 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1288 lockdep_is_held(&rt6_exception_lock));
1289 if (!bucket) {
1290 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1291 GFP_ATOMIC);
1292 if (!bucket) {
1293 err = -ENOMEM;
1294 goto out;
1295 }
1296 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1297 }
1298
1299 #ifdef CONFIG_IPV6_SUBTREES
1300 /* rt6i_src.plen != 0 indicates ort is in subtree
1301 * and exception table is indexed by a hash of
1302 * both rt6i_dst and rt6i_src.
1303 * Otherwise, the exception table is indexed by
1304 * a hash of only rt6i_dst.
1305 */
1306 if (ort->rt6i_src.plen)
1307 src_key = &nrt->rt6i_src.addr;
1308 #endif
1309
1310 /* Update rt6i_prefsrc as it could be changed
1311 * in rt6_remove_prefsrc()
1312 */
1313 nrt->rt6i_prefsrc = ort->rt6i_prefsrc;
1314 /* rt6_mtu_change() might lower mtu on ort.
1315 * Only insert this exception route if its mtu
1316 * is less than ort's mtu value.
1317 */
1318 if (nrt->rt6i_pmtu >= dst_mtu(&ort->dst)) {
1319 err = -EINVAL;
1320 goto out;
1321 }
1322
1323 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1324 src_key);
1325 if (rt6_ex)
1326 rt6_remove_exception(bucket, rt6_ex);
1327
1328 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1329 if (!rt6_ex) {
1330 err = -ENOMEM;
1331 goto out;
1332 }
1333 rt6_ex->rt6i = nrt;
1334 rt6_ex->stamp = jiffies;
1335 atomic_inc(&nrt->rt6i_ref);
1336 nrt->rt6i_node = ort->rt6i_node;
1337 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1338 bucket->depth++;
1339 net->ipv6.rt6_stats->fib_rt_cache++;
1340
1341 if (bucket->depth > FIB6_MAX_DEPTH)
1342 rt6_exception_remove_oldest(bucket);
1343
1344 out:
1345 spin_unlock_bh(&rt6_exception_lock);
1346
1347 /* Update fn->fn_sernum to invalidate all cached dst */
1348 if (!err) {
1349 fib6_update_sernum(ort);
1350 fib6_force_start_gc(net);
1351 }
1352
1353 return err;
1354 }
1355
1356 void rt6_flush_exceptions(struct rt6_info *rt)
1357 {
1358 struct rt6_exception_bucket *bucket;
1359 struct rt6_exception *rt6_ex;
1360 struct hlist_node *tmp;
1361 int i;
1362
1363 spin_lock_bh(&rt6_exception_lock);
1364 /* Prevent rt6_insert_exception() to recreate the bucket list */
1365 rt->exception_bucket_flushed = 1;
1366
1367 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1368 lockdep_is_held(&rt6_exception_lock));
1369 if (!bucket)
1370 goto out;
1371
1372 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1373 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1374 rt6_remove_exception(bucket, rt6_ex);
1375 WARN_ON_ONCE(bucket->depth);
1376 bucket++;
1377 }
1378
1379 out:
1380 spin_unlock_bh(&rt6_exception_lock);
1381 }
1382
1383 /* Find cached rt in the hash table inside passed in rt
1384 * Caller has to hold rcu_read_lock()
1385 */
1386 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
1387 struct in6_addr *daddr,
1388 struct in6_addr *saddr)
1389 {
1390 struct rt6_exception_bucket *bucket;
1391 struct in6_addr *src_key = NULL;
1392 struct rt6_exception *rt6_ex;
1393 struct rt6_info *res = NULL;
1394
1395 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1396
1397 #ifdef CONFIG_IPV6_SUBTREES
1398 /* rt6i_src.plen != 0 indicates rt is in subtree
1399 * and exception table is indexed by a hash of
1400 * both rt6i_dst and rt6i_src.
1401 * Otherwise, the exception table is indexed by
1402 * a hash of only rt6i_dst.
1403 */
1404 if (rt->rt6i_src.plen)
1405 src_key = saddr;
1406 #endif
1407 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1408
1409 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1410 res = rt6_ex->rt6i;
1411
1412 return res;
1413 }
1414
1415 /* Remove the passed in cached rt from the hash table that contains it */
1416 int rt6_remove_exception_rt(struct rt6_info *rt)
1417 {
1418 struct rt6_info *from = (struct rt6_info *)rt->dst.from;
1419 struct rt6_exception_bucket *bucket;
1420 struct in6_addr *src_key = NULL;
1421 struct rt6_exception *rt6_ex;
1422 int err;
1423
1424 if (!from ||
1425 !(rt->rt6i_flags & RTF_CACHE))
1426 return -EINVAL;
1427
1428 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1429 return -ENOENT;
1430
1431 spin_lock_bh(&rt6_exception_lock);
1432 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1433 lockdep_is_held(&rt6_exception_lock));
1434 #ifdef CONFIG_IPV6_SUBTREES
1435 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1436 * and exception table is indexed by a hash of
1437 * both rt6i_dst and rt6i_src.
1438 * Otherwise, the exception table is indexed by
1439 * a hash of only rt6i_dst.
1440 */
1441 if (from->rt6i_src.plen)
1442 src_key = &rt->rt6i_src.addr;
1443 #endif
1444 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1445 &rt->rt6i_dst.addr,
1446 src_key);
1447 if (rt6_ex) {
1448 rt6_remove_exception(bucket, rt6_ex);
1449 err = 0;
1450 } else {
1451 err = -ENOENT;
1452 }
1453
1454 spin_unlock_bh(&rt6_exception_lock);
1455 return err;
1456 }
1457
1458 /* Find rt6_ex which contains the passed in rt cache and
1459 * refresh its stamp
1460 */
1461 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1462 {
1463 struct rt6_info *from = (struct rt6_info *)rt->dst.from;
1464 struct rt6_exception_bucket *bucket;
1465 struct in6_addr *src_key = NULL;
1466 struct rt6_exception *rt6_ex;
1467
1468 if (!from ||
1469 !(rt->rt6i_flags & RTF_CACHE))
1470 return;
1471
1472 rcu_read_lock();
1473 bucket = rcu_dereference(from->rt6i_exception_bucket);
1474
1475 #ifdef CONFIG_IPV6_SUBTREES
1476 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1477 * and exception table is indexed by a hash of
1478 * both rt6i_dst and rt6i_src.
1479 * Otherwise, the exception table is indexed by
1480 * a hash of only rt6i_dst.
1481 */
1482 if (from->rt6i_src.plen)
1483 src_key = &rt->rt6i_src.addr;
1484 #endif
1485 rt6_ex = __rt6_find_exception_rcu(&bucket,
1486 &rt->rt6i_dst.addr,
1487 src_key);
1488 if (rt6_ex)
1489 rt6_ex->stamp = jiffies;
1490
1491 rcu_read_unlock();
1492 }
1493
1494 static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1495 {
1496 struct rt6_exception_bucket *bucket;
1497 struct rt6_exception *rt6_ex;
1498 int i;
1499
1500 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1501 lockdep_is_held(&rt6_exception_lock));
1502
1503 if (bucket) {
1504 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1505 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1506 rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
1507 }
1508 bucket++;
1509 }
1510 }
1511 }
1512
1513 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1514 struct rt6_info *rt, int mtu)
1515 {
1516 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1517 * lowest MTU in the path: always allow updating the route PMTU to
1518 * reflect PMTU decreases.
1519 *
1520 * If the new MTU is higher, and the route PMTU is equal to the local
1521 * MTU, this means the old MTU is the lowest in the path, so allow
1522 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1523 * handle this.
1524 */
1525
1526 if (dst_mtu(&rt->dst) >= mtu)
1527 return true;
1528
1529 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1530 return true;
1531
1532 return false;
1533 }
1534
1535 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1536 struct rt6_info *rt, int mtu)
1537 {
1538 struct rt6_exception_bucket *bucket;
1539 struct rt6_exception *rt6_ex;
1540 int i;
1541
1542 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1543 lockdep_is_held(&rt6_exception_lock));
1544
1545 if (!bucket)
1546 return;
1547
1548 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1549 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1550 struct rt6_info *entry = rt6_ex->rt6i;
1551
1552 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1553 * route), the metrics of its rt->dst.from have already
1554 * been updated.
1555 */
1556 if (entry->rt6i_pmtu &&
1557 rt6_mtu_change_route_allowed(idev, entry, mtu))
1558 entry->rt6i_pmtu = mtu;
1559 }
1560 bucket++;
1561 }
1562 }
1563
1564 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1565
1566 static void rt6_exceptions_clean_tohost(struct rt6_info *rt,
1567 struct in6_addr *gateway)
1568 {
1569 struct rt6_exception_bucket *bucket;
1570 struct rt6_exception *rt6_ex;
1571 struct hlist_node *tmp;
1572 int i;
1573
1574 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1575 return;
1576
1577 spin_lock_bh(&rt6_exception_lock);
1578 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1579 lockdep_is_held(&rt6_exception_lock));
1580
1581 if (bucket) {
1582 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1583 hlist_for_each_entry_safe(rt6_ex, tmp,
1584 &bucket->chain, hlist) {
1585 struct rt6_info *entry = rt6_ex->rt6i;
1586
1587 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1588 RTF_CACHE_GATEWAY &&
1589 ipv6_addr_equal(gateway,
1590 &entry->rt6i_gateway)) {
1591 rt6_remove_exception(bucket, rt6_ex);
1592 }
1593 }
1594 bucket++;
1595 }
1596 }
1597
1598 spin_unlock_bh(&rt6_exception_lock);
1599 }
1600
1601 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1602 struct rt6_exception *rt6_ex,
1603 struct fib6_gc_args *gc_args,
1604 unsigned long now)
1605 {
1606 struct rt6_info *rt = rt6_ex->rt6i;
1607
1608 /* we are pruning and obsoleting aged-out and non gateway exceptions
1609 * even if others have still references to them, so that on next
1610 * dst_check() such references can be dropped.
1611 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1612 * expired, independently from their aging, as per RFC 8201 section 4
1613 */
1614 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1615 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1616 RT6_TRACE("aging clone %p\n", rt);
1617 rt6_remove_exception(bucket, rt6_ex);
1618 return;
1619 }
1620 } else if (time_after(jiffies, rt->dst.expires)) {
1621 RT6_TRACE("purging expired route %p\n", rt);
1622 rt6_remove_exception(bucket, rt6_ex);
1623 return;
1624 }
1625
1626 if (rt->rt6i_flags & RTF_GATEWAY) {
1627 struct neighbour *neigh;
1628 __u8 neigh_flags = 0;
1629
1630 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1631 if (neigh)
1632 neigh_flags = neigh->flags;
1633
1634 if (!(neigh_flags & NTF_ROUTER)) {
1635 RT6_TRACE("purging route %p via non-router but gateway\n",
1636 rt);
1637 rt6_remove_exception(bucket, rt6_ex);
1638 return;
1639 }
1640 }
1641
1642 gc_args->more++;
1643 }
1644
1645 void rt6_age_exceptions(struct rt6_info *rt,
1646 struct fib6_gc_args *gc_args,
1647 unsigned long now)
1648 {
1649 struct rt6_exception_bucket *bucket;
1650 struct rt6_exception *rt6_ex;
1651 struct hlist_node *tmp;
1652 int i;
1653
1654 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1655 return;
1656
1657 rcu_read_lock_bh();
1658 spin_lock(&rt6_exception_lock);
1659 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1660 lockdep_is_held(&rt6_exception_lock));
1661
1662 if (bucket) {
1663 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1664 hlist_for_each_entry_safe(rt6_ex, tmp,
1665 &bucket->chain, hlist) {
1666 rt6_age_examine_exception(bucket, rt6_ex,
1667 gc_args, now);
1668 }
1669 bucket++;
1670 }
1671 }
1672 spin_unlock(&rt6_exception_lock);
1673 rcu_read_unlock_bh();
1674 }
1675
1676 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1677 int oif, struct flowi6 *fl6, int flags)
1678 {
1679 struct fib6_node *fn, *saved_fn;
1680 struct rt6_info *rt, *rt_cache;
1681 int strict = 0;
1682
1683 strict |= flags & RT6_LOOKUP_F_IFACE;
1684 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1685 if (net->ipv6.devconf_all->forwarding == 0)
1686 strict |= RT6_LOOKUP_F_REACHABLE;
1687
1688 rcu_read_lock();
1689
1690 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1691 saved_fn = fn;
1692
1693 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1694 oif = 0;
1695
1696 redo_rt6_select:
1697 rt = rt6_select(net, fn, oif, strict);
1698 if (rt->rt6i_nsiblings)
1699 rt = rt6_multipath_select(rt, fl6, oif, strict);
1700 if (rt == net->ipv6.ip6_null_entry) {
1701 fn = fib6_backtrack(fn, &fl6->saddr);
1702 if (fn)
1703 goto redo_rt6_select;
1704 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1705 /* also consider unreachable route */
1706 strict &= ~RT6_LOOKUP_F_REACHABLE;
1707 fn = saved_fn;
1708 goto redo_rt6_select;
1709 }
1710 }
1711
1712 /*Search through exception table */
1713 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
1714 if (rt_cache)
1715 rt = rt_cache;
1716
1717 if (rt == net->ipv6.ip6_null_entry) {
1718 rcu_read_unlock();
1719 dst_hold(&rt->dst);
1720 trace_fib6_table_lookup(net, rt, table, fl6);
1721 return rt;
1722 } else if (rt->rt6i_flags & RTF_CACHE) {
1723 if (ip6_hold_safe(net, &rt, true)) {
1724 dst_use_noref(&rt->dst, jiffies);
1725 rt6_dst_from_metrics_check(rt);
1726 }
1727 rcu_read_unlock();
1728 trace_fib6_table_lookup(net, rt, table, fl6);
1729 return rt;
1730 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1731 !(rt->rt6i_flags & RTF_GATEWAY))) {
1732 /* Create a RTF_CACHE clone which will not be
1733 * owned by the fib6 tree. It is for the special case where
1734 * the daddr in the skb during the neighbor look-up is different
1735 * from the fl6->daddr used to look-up route here.
1736 */
1737
1738 struct rt6_info *uncached_rt;
1739
1740 if (ip6_hold_safe(net, &rt, true)) {
1741 dst_use_noref(&rt->dst, jiffies);
1742 } else {
1743 rcu_read_unlock();
1744 uncached_rt = rt;
1745 goto uncached_rt_out;
1746 }
1747 rcu_read_unlock();
1748
1749 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1750 dst_release(&rt->dst);
1751
1752 if (uncached_rt) {
1753 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1754 * No need for another dst_hold()
1755 */
1756 rt6_uncached_list_add(uncached_rt);
1757 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1758 } else {
1759 uncached_rt = net->ipv6.ip6_null_entry;
1760 dst_hold(&uncached_rt->dst);
1761 }
1762
1763 uncached_rt_out:
1764 trace_fib6_table_lookup(net, uncached_rt, table, fl6);
1765 return uncached_rt;
1766
1767 } else {
1768 /* Get a percpu copy */
1769
1770 struct rt6_info *pcpu_rt;
1771
1772 dst_use_noref(&rt->dst, jiffies);
1773 local_bh_disable();
1774 pcpu_rt = rt6_get_pcpu_route(rt);
1775
1776 if (!pcpu_rt) {
1777 /* atomic_inc_not_zero() is needed when using rcu */
1778 if (atomic_inc_not_zero(&rt->rt6i_ref)) {
1779 /* No dst_hold() on rt is needed because grabbing
1780 * rt->rt6i_ref makes sure rt can't be released.
1781 */
1782 pcpu_rt = rt6_make_pcpu_route(rt);
1783 rt6_release(rt);
1784 } else {
1785 /* rt is already removed from tree */
1786 pcpu_rt = net->ipv6.ip6_null_entry;
1787 dst_hold(&pcpu_rt->dst);
1788 }
1789 }
1790 local_bh_enable();
1791 rcu_read_unlock();
1792 trace_fib6_table_lookup(net, pcpu_rt, table, fl6);
1793 return pcpu_rt;
1794 }
1795 }
1796 EXPORT_SYMBOL_GPL(ip6_pol_route);
1797
1798 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1799 struct flowi6 *fl6, int flags)
1800 {
1801 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1802 }
1803
1804 struct dst_entry *ip6_route_input_lookup(struct net *net,
1805 struct net_device *dev,
1806 struct flowi6 *fl6, int flags)
1807 {
1808 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1809 flags |= RT6_LOOKUP_F_IFACE;
1810
1811 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1812 }
1813 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1814
1815 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1816 struct flow_keys *keys)
1817 {
1818 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1819 const struct ipv6hdr *key_iph = outer_iph;
1820 const struct ipv6hdr *inner_iph;
1821 const struct icmp6hdr *icmph;
1822 struct ipv6hdr _inner_iph;
1823
1824 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1825 goto out;
1826
1827 icmph = icmp6_hdr(skb);
1828 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1829 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1830 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1831 icmph->icmp6_type != ICMPV6_PARAMPROB)
1832 goto out;
1833
1834 inner_iph = skb_header_pointer(skb,
1835 skb_transport_offset(skb) + sizeof(*icmph),
1836 sizeof(_inner_iph), &_inner_iph);
1837 if (!inner_iph)
1838 goto out;
1839
1840 key_iph = inner_iph;
1841 out:
1842 memset(keys, 0, sizeof(*keys));
1843 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1844 keys->addrs.v6addrs.src = key_iph->saddr;
1845 keys->addrs.v6addrs.dst = key_iph->daddr;
1846 keys->tags.flow_label = ip6_flowinfo(key_iph);
1847 keys->basic.ip_proto = key_iph->nexthdr;
1848 }
1849
1850 /* if skb is set it will be used and fl6 can be NULL */
1851 u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb)
1852 {
1853 struct flow_keys hash_keys;
1854
1855 if (skb) {
1856 ip6_multipath_l3_keys(skb, &hash_keys);
1857 return flow_hash_from_keys(&hash_keys);
1858 }
1859
1860 return get_hash_from_flowi6(fl6);
1861 }
1862
1863 void ip6_route_input(struct sk_buff *skb)
1864 {
1865 const struct ipv6hdr *iph = ipv6_hdr(skb);
1866 struct net *net = dev_net(skb->dev);
1867 int flags = RT6_LOOKUP_F_HAS_SADDR;
1868 struct ip_tunnel_info *tun_info;
1869 struct flowi6 fl6 = {
1870 .flowi6_iif = skb->dev->ifindex,
1871 .daddr = iph->daddr,
1872 .saddr = iph->saddr,
1873 .flowlabel = ip6_flowinfo(iph),
1874 .flowi6_mark = skb->mark,
1875 .flowi6_proto = iph->nexthdr,
1876 };
1877
1878 tun_info = skb_tunnel_info(skb);
1879 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1880 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1881 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
1882 fl6.mp_hash = rt6_multipath_hash(&fl6, skb);
1883 skb_dst_drop(skb);
1884 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1885 }
1886
1887 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1888 struct flowi6 *fl6, int flags)
1889 {
1890 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1891 }
1892
1893 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1894 struct flowi6 *fl6, int flags)
1895 {
1896 bool any_src;
1897
1898 if (rt6_need_strict(&fl6->daddr)) {
1899 struct dst_entry *dst;
1900
1901 dst = l3mdev_link_scope_lookup(net, fl6);
1902 if (dst)
1903 return dst;
1904 }
1905
1906 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1907
1908 any_src = ipv6_addr_any(&fl6->saddr);
1909 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1910 (fl6->flowi6_oif && any_src))
1911 flags |= RT6_LOOKUP_F_IFACE;
1912
1913 if (!any_src)
1914 flags |= RT6_LOOKUP_F_HAS_SADDR;
1915 else if (sk)
1916 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1917
1918 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1919 }
1920 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1921
1922 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1923 {
1924 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1925 struct net_device *loopback_dev = net->loopback_dev;
1926 struct dst_entry *new = NULL;
1927
1928 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1929 DST_OBSOLETE_DEAD, 0);
1930 if (rt) {
1931 rt6_info_init(rt);
1932 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
1933
1934 new = &rt->dst;
1935 new->__use = 1;
1936 new->input = dst_discard;
1937 new->output = dst_discard_out;
1938
1939 dst_copy_metrics(new, &ort->dst);
1940
1941 rt->rt6i_idev = in6_dev_get(loopback_dev);
1942 rt->rt6i_gateway = ort->rt6i_gateway;
1943 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1944 rt->rt6i_metric = 0;
1945
1946 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1947 #ifdef CONFIG_IPV6_SUBTREES
1948 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1949 #endif
1950 }
1951
1952 dst_release(dst_orig);
1953 return new ? new : ERR_PTR(-ENOMEM);
1954 }
1955
1956 /*
1957 * Destination cache support functions
1958 */
1959
1960 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1961 {
1962 if (rt->dst.from &&
1963 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1964 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1965 }
1966
1967 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1968 {
1969 u32 rt_cookie = 0;
1970
1971 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
1972 return NULL;
1973
1974 if (rt6_check_expired(rt))
1975 return NULL;
1976
1977 return &rt->dst;
1978 }
1979
1980 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1981 {
1982 if (!__rt6_check_expired(rt) &&
1983 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1984 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1985 return &rt->dst;
1986 else
1987 return NULL;
1988 }
1989
1990 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1991 {
1992 struct rt6_info *rt;
1993
1994 rt = (struct rt6_info *) dst;
1995
1996 /* All IPV6 dsts are created with ->obsolete set to the value
1997 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1998 * into this function always.
1999 */
2000
2001 rt6_dst_from_metrics_check(rt);
2002
2003 if (rt->rt6i_flags & RTF_PCPU ||
2004 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
2005 return rt6_dst_from_check(rt, cookie);
2006 else
2007 return rt6_check(rt, cookie);
2008 }
2009
2010 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2011 {
2012 struct rt6_info *rt = (struct rt6_info *) dst;
2013
2014 if (rt) {
2015 if (rt->rt6i_flags & RTF_CACHE) {
2016 if (rt6_check_expired(rt)) {
2017 ip6_del_rt(rt);
2018 dst = NULL;
2019 }
2020 } else {
2021 dst_release(dst);
2022 dst = NULL;
2023 }
2024 }
2025 return dst;
2026 }
2027
2028 static void ip6_link_failure(struct sk_buff *skb)
2029 {
2030 struct rt6_info *rt;
2031
2032 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2033
2034 rt = (struct rt6_info *) skb_dst(skb);
2035 if (rt) {
2036 if (rt->rt6i_flags & RTF_CACHE) {
2037 if (dst_hold_safe(&rt->dst))
2038 ip6_del_rt(rt);
2039 } else {
2040 struct fib6_node *fn;
2041
2042 rcu_read_lock();
2043 fn = rcu_dereference(rt->rt6i_node);
2044 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2045 fn->fn_sernum = -1;
2046 rcu_read_unlock();
2047 }
2048 }
2049 }
2050
2051 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2052 {
2053 struct net *net = dev_net(rt->dst.dev);
2054
2055 rt->rt6i_flags |= RTF_MODIFIED;
2056 rt->rt6i_pmtu = mtu;
2057 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2058 }
2059
2060 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2061 {
2062 return !(rt->rt6i_flags & RTF_CACHE) &&
2063 (rt->rt6i_flags & RTF_PCPU ||
2064 rcu_access_pointer(rt->rt6i_node));
2065 }
2066
2067 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2068 const struct ipv6hdr *iph, u32 mtu)
2069 {
2070 const struct in6_addr *daddr, *saddr;
2071 struct rt6_info *rt6 = (struct rt6_info *)dst;
2072
2073 if (rt6->rt6i_flags & RTF_LOCAL)
2074 return;
2075
2076 if (dst_metric_locked(dst, RTAX_MTU))
2077 return;
2078
2079 if (iph) {
2080 daddr = &iph->daddr;
2081 saddr = &iph->saddr;
2082 } else if (sk) {
2083 daddr = &sk->sk_v6_daddr;
2084 saddr = &inet6_sk(sk)->saddr;
2085 } else {
2086 daddr = NULL;
2087 saddr = NULL;
2088 }
2089 dst_confirm_neigh(dst, daddr);
2090 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2091 if (mtu >= dst_mtu(dst))
2092 return;
2093
2094 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2095 rt6_do_update_pmtu(rt6, mtu);
2096 /* update rt6_ex->stamp for cache */
2097 if (rt6->rt6i_flags & RTF_CACHE)
2098 rt6_update_exception_stamp_rt(rt6);
2099 } else if (daddr) {
2100 struct rt6_info *nrt6;
2101
2102 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
2103 if (nrt6) {
2104 rt6_do_update_pmtu(nrt6, mtu);
2105 if (rt6_insert_exception(nrt6, rt6))
2106 dst_release_immediate(&nrt6->dst);
2107 }
2108 }
2109 }
2110
2111 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2112 struct sk_buff *skb, u32 mtu)
2113 {
2114 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2115 }
2116
2117 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2118 int oif, u32 mark, kuid_t uid)
2119 {
2120 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2121 struct dst_entry *dst;
2122 struct flowi6 fl6;
2123
2124 memset(&fl6, 0, sizeof(fl6));
2125 fl6.flowi6_oif = oif;
2126 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
2127 fl6.daddr = iph->daddr;
2128 fl6.saddr = iph->saddr;
2129 fl6.flowlabel = ip6_flowinfo(iph);
2130 fl6.flowi6_uid = uid;
2131
2132 dst = ip6_route_output(net, NULL, &fl6);
2133 if (!dst->error)
2134 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2135 dst_release(dst);
2136 }
2137 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2138
2139 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2140 {
2141 struct dst_entry *dst;
2142
2143 ip6_update_pmtu(skb, sock_net(sk), mtu,
2144 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
2145
2146 dst = __sk_dst_get(sk);
2147 if (!dst || !dst->obsolete ||
2148 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2149 return;
2150
2151 bh_lock_sock(sk);
2152 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2153 ip6_datagram_dst_update(sk, false);
2154 bh_unlock_sock(sk);
2155 }
2156 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2157
2158 /* Handle redirects */
2159 struct ip6rd_flowi {
2160 struct flowi6 fl6;
2161 struct in6_addr gateway;
2162 };
2163
2164 static struct rt6_info *__ip6_route_redirect(struct net *net,
2165 struct fib6_table *table,
2166 struct flowi6 *fl6,
2167 int flags)
2168 {
2169 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2170 struct rt6_info *rt, *rt_cache;
2171 struct fib6_node *fn;
2172
2173 /* Get the "current" route for this destination and
2174 * check if the redirect has come from appropriate router.
2175 *
2176 * RFC 4861 specifies that redirects should only be
2177 * accepted if they come from the nexthop to the target.
2178 * Due to the way the routes are chosen, this notion
2179 * is a bit fuzzy and one might need to check all possible
2180 * routes.
2181 */
2182
2183 rcu_read_lock();
2184 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2185 restart:
2186 for_each_fib6_node_rt_rcu(fn) {
2187 if (rt6_check_expired(rt))
2188 continue;
2189 if (rt->dst.error)
2190 break;
2191 if (!(rt->rt6i_flags & RTF_GATEWAY))
2192 continue;
2193 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
2194 continue;
2195 /* rt_cache's gateway might be different from its 'parent'
2196 * in the case of an ip redirect.
2197 * So we keep searching in the exception table if the gateway
2198 * is different.
2199 */
2200 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) {
2201 rt_cache = rt6_find_cached_rt(rt,
2202 &fl6->daddr,
2203 &fl6->saddr);
2204 if (rt_cache &&
2205 ipv6_addr_equal(&rdfl->gateway,
2206 &rt_cache->rt6i_gateway)) {
2207 rt = rt_cache;
2208 break;
2209 }
2210 continue;
2211 }
2212 break;
2213 }
2214
2215 if (!rt)
2216 rt = net->ipv6.ip6_null_entry;
2217 else if (rt->dst.error) {
2218 rt = net->ipv6.ip6_null_entry;
2219 goto out;
2220 }
2221
2222 if (rt == net->ipv6.ip6_null_entry) {
2223 fn = fib6_backtrack(fn, &fl6->saddr);
2224 if (fn)
2225 goto restart;
2226 }
2227
2228 out:
2229 ip6_hold_safe(net, &rt, true);
2230
2231 rcu_read_unlock();
2232
2233 trace_fib6_table_lookup(net, rt, table, fl6);
2234 return rt;
2235 };
2236
2237 static struct dst_entry *ip6_route_redirect(struct net *net,
2238 const struct flowi6 *fl6,
2239 const struct in6_addr *gateway)
2240 {
2241 int flags = RT6_LOOKUP_F_HAS_SADDR;
2242 struct ip6rd_flowi rdfl;
2243
2244 rdfl.fl6 = *fl6;
2245 rdfl.gateway = *gateway;
2246
2247 return fib6_rule_lookup(net, &rdfl.fl6,
2248 flags, __ip6_route_redirect);
2249 }
2250
2251 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2252 kuid_t uid)
2253 {
2254 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2255 struct dst_entry *dst;
2256 struct flowi6 fl6;
2257
2258 memset(&fl6, 0, sizeof(fl6));
2259 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2260 fl6.flowi6_oif = oif;
2261 fl6.flowi6_mark = mark;
2262 fl6.daddr = iph->daddr;
2263 fl6.saddr = iph->saddr;
2264 fl6.flowlabel = ip6_flowinfo(iph);
2265 fl6.flowi6_uid = uid;
2266
2267 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
2268 rt6_do_redirect(dst, NULL, skb);
2269 dst_release(dst);
2270 }
2271 EXPORT_SYMBOL_GPL(ip6_redirect);
2272
2273 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
2274 u32 mark)
2275 {
2276 const struct ipv6hdr *iph = ipv6_hdr(skb);
2277 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2278 struct dst_entry *dst;
2279 struct flowi6 fl6;
2280
2281 memset(&fl6, 0, sizeof(fl6));
2282 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2283 fl6.flowi6_oif = oif;
2284 fl6.flowi6_mark = mark;
2285 fl6.daddr = msg->dest;
2286 fl6.saddr = iph->daddr;
2287 fl6.flowi6_uid = sock_net_uid(net, NULL);
2288
2289 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
2290 rt6_do_redirect(dst, NULL, skb);
2291 dst_release(dst);
2292 }
2293
2294 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2295 {
2296 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2297 sk->sk_uid);
2298 }
2299 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2300
2301 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2302 {
2303 struct net_device *dev = dst->dev;
2304 unsigned int mtu = dst_mtu(dst);
2305 struct net *net = dev_net(dev);
2306
2307 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2308
2309 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2310 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2311
2312 /*
2313 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2314 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2315 * IPV6_MAXPLEN is also valid and means: "any MSS,
2316 * rely only on pmtu discovery"
2317 */
2318 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2319 mtu = IPV6_MAXPLEN;
2320 return mtu;
2321 }
2322
2323 static unsigned int ip6_mtu(const struct dst_entry *dst)
2324 {
2325 const struct rt6_info *rt = (const struct rt6_info *)dst;
2326 unsigned int mtu = rt->rt6i_pmtu;
2327 struct inet6_dev *idev;
2328
2329 if (mtu)
2330 goto out;
2331
2332 mtu = dst_metric_raw(dst, RTAX_MTU);
2333 if (mtu)
2334 goto out;
2335
2336 mtu = IPV6_MIN_MTU;
2337
2338 rcu_read_lock();
2339 idev = __in6_dev_get(dst->dev);
2340 if (idev)
2341 mtu = idev->cnf.mtu6;
2342 rcu_read_unlock();
2343
2344 out:
2345 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2346
2347 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2348 }
2349
2350 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2351 struct flowi6 *fl6)
2352 {
2353 struct dst_entry *dst;
2354 struct rt6_info *rt;
2355 struct inet6_dev *idev = in6_dev_get(dev);
2356 struct net *net = dev_net(dev);
2357
2358 if (unlikely(!idev))
2359 return ERR_PTR(-ENODEV);
2360
2361 rt = ip6_dst_alloc(net, dev, 0);
2362 if (unlikely(!rt)) {
2363 in6_dev_put(idev);
2364 dst = ERR_PTR(-ENOMEM);
2365 goto out;
2366 }
2367
2368 rt->dst.flags |= DST_HOST;
2369 rt->dst.input = ip6_input;
2370 rt->dst.output = ip6_output;
2371 rt->rt6i_gateway = fl6->daddr;
2372 rt->rt6i_dst.addr = fl6->daddr;
2373 rt->rt6i_dst.plen = 128;
2374 rt->rt6i_idev = idev;
2375 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2376
2377 /* Add this dst into uncached_list so that rt6_ifdown() can
2378 * do proper release of the net_device
2379 */
2380 rt6_uncached_list_add(rt);
2381 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2382
2383 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2384
2385 out:
2386 return dst;
2387 }
2388
2389 static int ip6_dst_gc(struct dst_ops *ops)
2390 {
2391 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2392 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2393 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2394 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2395 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2396 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2397 int entries;
2398
2399 entries = dst_entries_get_fast(ops);
2400 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2401 entries <= rt_max_size)
2402 goto out;
2403
2404 net->ipv6.ip6_rt_gc_expire++;
2405 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2406 entries = dst_entries_get_slow(ops);
2407 if (entries < ops->gc_thresh)
2408 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2409 out:
2410 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2411 return entries > rt_max_size;
2412 }
2413
2414 static int ip6_convert_metrics(struct mx6_config *mxc,
2415 const struct fib6_config *cfg)
2416 {
2417 struct net *net = cfg->fc_nlinfo.nl_net;
2418 bool ecn_ca = false;
2419 struct nlattr *nla;
2420 int remaining;
2421 u32 *mp;
2422
2423 if (!cfg->fc_mx)
2424 return 0;
2425
2426 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
2427 if (unlikely(!mp))
2428 return -ENOMEM;
2429
2430 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
2431 int type = nla_type(nla);
2432 u32 val;
2433
2434 if (!type)
2435 continue;
2436 if (unlikely(type > RTAX_MAX))
2437 goto err;
2438
2439 if (type == RTAX_CC_ALGO) {
2440 char tmp[TCP_CA_NAME_MAX];
2441
2442 nla_strlcpy(tmp, nla, sizeof(tmp));
2443 val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
2444 if (val == TCP_CA_UNSPEC)
2445 goto err;
2446 } else {
2447 val = nla_get_u32(nla);
2448 }
2449 if (type == RTAX_HOPLIMIT && val > 255)
2450 val = 255;
2451 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
2452 goto err;
2453
2454 mp[type - 1] = val;
2455 __set_bit(type - 1, mxc->mx_valid);
2456 }
2457
2458 if (ecn_ca) {
2459 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
2460 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
2461 }
2462
2463 mxc->mx = mp;
2464 return 0;
2465 err:
2466 kfree(mp);
2467 return -EINVAL;
2468 }
2469
2470 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2471 struct fib6_config *cfg,
2472 const struct in6_addr *gw_addr)
2473 {
2474 struct flowi6 fl6 = {
2475 .flowi6_oif = cfg->fc_ifindex,
2476 .daddr = *gw_addr,
2477 .saddr = cfg->fc_prefsrc,
2478 };
2479 struct fib6_table *table;
2480 struct rt6_info *rt;
2481 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
2482
2483 table = fib6_get_table(net, cfg->fc_table);
2484 if (!table)
2485 return NULL;
2486
2487 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2488 flags |= RT6_LOOKUP_F_HAS_SADDR;
2489
2490 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
2491
2492 /* if table lookup failed, fall back to full lookup */
2493 if (rt == net->ipv6.ip6_null_entry) {
2494 ip6_rt_put(rt);
2495 rt = NULL;
2496 }
2497
2498 return rt;
2499 }
2500
2501 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
2502 struct netlink_ext_ack *extack)
2503 {
2504 struct net *net = cfg->fc_nlinfo.nl_net;
2505 struct rt6_info *rt = NULL;
2506 struct net_device *dev = NULL;
2507 struct inet6_dev *idev = NULL;
2508 struct fib6_table *table;
2509 int addr_type;
2510 int err = -EINVAL;
2511
2512 /* RTF_PCPU is an internal flag; can not be set by userspace */
2513 if (cfg->fc_flags & RTF_PCPU) {
2514 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2515 goto out;
2516 }
2517
2518 /* RTF_CACHE is an internal flag; can not be set by userspace */
2519 if (cfg->fc_flags & RTF_CACHE) {
2520 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
2521 goto out;
2522 }
2523
2524 if (cfg->fc_dst_len > 128) {
2525 NL_SET_ERR_MSG(extack, "Invalid prefix length");
2526 goto out;
2527 }
2528 if (cfg->fc_src_len > 128) {
2529 NL_SET_ERR_MSG(extack, "Invalid source address length");
2530 goto out;
2531 }
2532 #ifndef CONFIG_IPV6_SUBTREES
2533 if (cfg->fc_src_len) {
2534 NL_SET_ERR_MSG(extack,
2535 "Specifying source address requires IPV6_SUBTREES to be enabled");
2536 goto out;
2537 }
2538 #endif
2539 if (cfg->fc_ifindex) {
2540 err = -ENODEV;
2541 dev = dev_get_by_index(net, cfg->fc_ifindex);
2542 if (!dev)
2543 goto out;
2544 idev = in6_dev_get(dev);
2545 if (!idev)
2546 goto out;
2547 }
2548
2549 if (cfg->fc_metric == 0)
2550 cfg->fc_metric = IP6_RT_PRIO_USER;
2551
2552 err = -ENOBUFS;
2553 if (cfg->fc_nlinfo.nlh &&
2554 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
2555 table = fib6_get_table(net, cfg->fc_table);
2556 if (!table) {
2557 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
2558 table = fib6_new_table(net, cfg->fc_table);
2559 }
2560 } else {
2561 table = fib6_new_table(net, cfg->fc_table);
2562 }
2563
2564 if (!table)
2565 goto out;
2566
2567 rt = ip6_dst_alloc(net, NULL,
2568 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
2569
2570 if (!rt) {
2571 err = -ENOMEM;
2572 goto out;
2573 }
2574
2575 if (cfg->fc_flags & RTF_EXPIRES)
2576 rt6_set_expires(rt, jiffies +
2577 clock_t_to_jiffies(cfg->fc_expires));
2578 else
2579 rt6_clean_expires(rt);
2580
2581 if (cfg->fc_protocol == RTPROT_UNSPEC)
2582 cfg->fc_protocol = RTPROT_BOOT;
2583 rt->rt6i_protocol = cfg->fc_protocol;
2584
2585 addr_type = ipv6_addr_type(&cfg->fc_dst);
2586
2587 if (addr_type & IPV6_ADDR_MULTICAST)
2588 rt->dst.input = ip6_mc_input;
2589 else if (cfg->fc_flags & RTF_LOCAL)
2590 rt->dst.input = ip6_input;
2591 else
2592 rt->dst.input = ip6_forward;
2593
2594 rt->dst.output = ip6_output;
2595
2596 if (cfg->fc_encap) {
2597 struct lwtunnel_state *lwtstate;
2598
2599 err = lwtunnel_build_state(cfg->fc_encap_type,
2600 cfg->fc_encap, AF_INET6, cfg,
2601 &lwtstate, extack);
2602 if (err)
2603 goto out;
2604 rt->dst.lwtstate = lwtstate_get(lwtstate);
2605 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
2606 rt->dst.lwtstate->orig_output = rt->dst.output;
2607 rt->dst.output = lwtunnel_output;
2608 }
2609 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
2610 rt->dst.lwtstate->orig_input = rt->dst.input;
2611 rt->dst.input = lwtunnel_input;
2612 }
2613 }
2614
2615 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
2616 rt->rt6i_dst.plen = cfg->fc_dst_len;
2617 if (rt->rt6i_dst.plen == 128)
2618 rt->dst.flags |= DST_HOST;
2619
2620 #ifdef CONFIG_IPV6_SUBTREES
2621 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
2622 rt->rt6i_src.plen = cfg->fc_src_len;
2623 #endif
2624
2625 rt->rt6i_metric = cfg->fc_metric;
2626
2627 /* We cannot add true routes via loopback here,
2628 they would result in kernel looping; promote them to reject routes
2629 */
2630 if ((cfg->fc_flags & RTF_REJECT) ||
2631 (dev && (dev->flags & IFF_LOOPBACK) &&
2632 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2633 !(cfg->fc_flags & RTF_LOCAL))) {
2634 /* hold loopback dev/idev if we haven't done so. */
2635 if (dev != net->loopback_dev) {
2636 if (dev) {
2637 dev_put(dev);
2638 in6_dev_put(idev);
2639 }
2640 dev = net->loopback_dev;
2641 dev_hold(dev);
2642 idev = in6_dev_get(dev);
2643 if (!idev) {
2644 err = -ENODEV;
2645 goto out;
2646 }
2647 }
2648 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
2649 switch (cfg->fc_type) {
2650 case RTN_BLACKHOLE:
2651 rt->dst.error = -EINVAL;
2652 rt->dst.output = dst_discard_out;
2653 rt->dst.input = dst_discard;
2654 break;
2655 case RTN_PROHIBIT:
2656 rt->dst.error = -EACCES;
2657 rt->dst.output = ip6_pkt_prohibit_out;
2658 rt->dst.input = ip6_pkt_prohibit;
2659 break;
2660 case RTN_THROW:
2661 case RTN_UNREACHABLE:
2662 default:
2663 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
2664 : (cfg->fc_type == RTN_UNREACHABLE)
2665 ? -EHOSTUNREACH : -ENETUNREACH;
2666 rt->dst.output = ip6_pkt_discard_out;
2667 rt->dst.input = ip6_pkt_discard;
2668 break;
2669 }
2670 goto install_route;
2671 }
2672
2673 if (cfg->fc_flags & RTF_GATEWAY) {
2674 const struct in6_addr *gw_addr;
2675 int gwa_type;
2676
2677 gw_addr = &cfg->fc_gateway;
2678 gwa_type = ipv6_addr_type(gw_addr);
2679
2680 /* if gw_addr is local we will fail to detect this in case
2681 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2682 * will return already-added prefix route via interface that
2683 * prefix route was assigned to, which might be non-loopback.
2684 */
2685 err = -EINVAL;
2686 if (ipv6_chk_addr_and_flags(net, gw_addr,
2687 gwa_type & IPV6_ADDR_LINKLOCAL ?
2688 dev : NULL, 0, 0)) {
2689 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2690 goto out;
2691 }
2692 rt->rt6i_gateway = *gw_addr;
2693
2694 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
2695 struct rt6_info *grt = NULL;
2696
2697 /* IPv6 strictly inhibits using not link-local
2698 addresses as nexthop address.
2699 Otherwise, router will not able to send redirects.
2700 It is very good, but in some (rare!) circumstances
2701 (SIT, PtP, NBMA NOARP links) it is handy to allow
2702 some exceptions. --ANK
2703 We allow IPv4-mapped nexthops to support RFC4798-type
2704 addressing
2705 */
2706 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2707 IPV6_ADDR_MAPPED))) {
2708 NL_SET_ERR_MSG(extack,
2709 "Invalid gateway address");
2710 goto out;
2711 }
2712
2713 if (cfg->fc_table) {
2714 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2715
2716 if (grt) {
2717 if (grt->rt6i_flags & RTF_GATEWAY ||
2718 (dev && dev != grt->dst.dev)) {
2719 ip6_rt_put(grt);
2720 grt = NULL;
2721 }
2722 }
2723 }
2724
2725 if (!grt)
2726 grt = rt6_lookup(net, gw_addr, NULL,
2727 cfg->fc_ifindex, 1);
2728
2729 err = -EHOSTUNREACH;
2730 if (!grt)
2731 goto out;
2732 if (dev) {
2733 if (dev != grt->dst.dev) {
2734 ip6_rt_put(grt);
2735 goto out;
2736 }
2737 } else {
2738 dev = grt->dst.dev;
2739 idev = grt->rt6i_idev;
2740 dev_hold(dev);
2741 in6_dev_hold(grt->rt6i_idev);
2742 }
2743 if (!(grt->rt6i_flags & RTF_GATEWAY))
2744 err = 0;
2745 ip6_rt_put(grt);
2746
2747 if (err)
2748 goto out;
2749 }
2750 err = -EINVAL;
2751 if (!dev) {
2752 NL_SET_ERR_MSG(extack, "Egress device not specified");
2753 goto out;
2754 } else if (dev->flags & IFF_LOOPBACK) {
2755 NL_SET_ERR_MSG(extack,
2756 "Egress device can not be loopback device for this route");
2757 goto out;
2758 }
2759 }
2760
2761 err = -ENODEV;
2762 if (!dev)
2763 goto out;
2764
2765 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2766 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2767 NL_SET_ERR_MSG(extack, "Invalid source address");
2768 err = -EINVAL;
2769 goto out;
2770 }
2771 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2772 rt->rt6i_prefsrc.plen = 128;
2773 } else
2774 rt->rt6i_prefsrc.plen = 0;
2775
2776 rt->rt6i_flags = cfg->fc_flags;
2777
2778 install_route:
2779 rt->dst.dev = dev;
2780 rt->rt6i_idev = idev;
2781 rt->rt6i_table = table;
2782
2783 cfg->fc_nlinfo.nl_net = dev_net(dev);
2784
2785 return rt;
2786 out:
2787 if (dev)
2788 dev_put(dev);
2789 if (idev)
2790 in6_dev_put(idev);
2791 if (rt)
2792 dst_release_immediate(&rt->dst);
2793
2794 return ERR_PTR(err);
2795 }
2796
2797 int ip6_route_add(struct fib6_config *cfg,
2798 struct netlink_ext_ack *extack)
2799 {
2800 struct mx6_config mxc = { .mx = NULL, };
2801 struct rt6_info *rt;
2802 int err;
2803
2804 rt = ip6_route_info_create(cfg, extack);
2805 if (IS_ERR(rt)) {
2806 err = PTR_ERR(rt);
2807 rt = NULL;
2808 goto out;
2809 }
2810
2811 err = ip6_convert_metrics(&mxc, cfg);
2812 if (err)
2813 goto out;
2814
2815 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2816
2817 kfree(mxc.mx);
2818
2819 return err;
2820 out:
2821 if (rt)
2822 dst_release_immediate(&rt->dst);
2823
2824 return err;
2825 }
2826
2827 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2828 {
2829 int err;
2830 struct fib6_table *table;
2831 struct net *net = dev_net(rt->dst.dev);
2832
2833 if (rt == net->ipv6.ip6_null_entry) {
2834 err = -ENOENT;
2835 goto out;
2836 }
2837
2838 table = rt->rt6i_table;
2839 spin_lock_bh(&table->tb6_lock);
2840 err = fib6_del(rt, info);
2841 spin_unlock_bh(&table->tb6_lock);
2842
2843 out:
2844 ip6_rt_put(rt);
2845 return err;
2846 }
2847
2848 int ip6_del_rt(struct rt6_info *rt)
2849 {
2850 struct nl_info info = {
2851 .nl_net = dev_net(rt->dst.dev),
2852 };
2853 return __ip6_del_rt(rt, &info);
2854 }
2855
2856 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2857 {
2858 struct nl_info *info = &cfg->fc_nlinfo;
2859 struct net *net = info->nl_net;
2860 struct sk_buff *skb = NULL;
2861 struct fib6_table *table;
2862 int err = -ENOENT;
2863
2864 if (rt == net->ipv6.ip6_null_entry)
2865 goto out_put;
2866 table = rt->rt6i_table;
2867 spin_lock_bh(&table->tb6_lock);
2868
2869 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2870 struct rt6_info *sibling, *next_sibling;
2871
2872 /* prefer to send a single notification with all hops */
2873 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2874 if (skb) {
2875 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2876
2877 if (rt6_fill_node(net, skb, rt,
2878 NULL, NULL, 0, RTM_DELROUTE,
2879 info->portid, seq, 0) < 0) {
2880 kfree_skb(skb);
2881 skb = NULL;
2882 } else
2883 info->skip_notify = 1;
2884 }
2885
2886 list_for_each_entry_safe(sibling, next_sibling,
2887 &rt->rt6i_siblings,
2888 rt6i_siblings) {
2889 err = fib6_del(sibling, info);
2890 if (err)
2891 goto out_unlock;
2892 }
2893 }
2894
2895 err = fib6_del(rt, info);
2896 out_unlock:
2897 spin_unlock_bh(&table->tb6_lock);
2898 out_put:
2899 ip6_rt_put(rt);
2900
2901 if (skb) {
2902 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2903 info->nlh, gfp_any());
2904 }
2905 return err;
2906 }
2907
2908 static int ip6_route_del(struct fib6_config *cfg,
2909 struct netlink_ext_ack *extack)
2910 {
2911 struct rt6_info *rt, *rt_cache;
2912 struct fib6_table *table;
2913 struct fib6_node *fn;
2914 int err = -ESRCH;
2915
2916 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2917 if (!table) {
2918 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2919 return err;
2920 }
2921
2922 rcu_read_lock();
2923
2924 fn = fib6_locate(&table->tb6_root,
2925 &cfg->fc_dst, cfg->fc_dst_len,
2926 &cfg->fc_src, cfg->fc_src_len,
2927 !(cfg->fc_flags & RTF_CACHE));
2928
2929 if (fn) {
2930 for_each_fib6_node_rt_rcu(fn) {
2931 if (cfg->fc_flags & RTF_CACHE) {
2932 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
2933 &cfg->fc_src);
2934 if (!rt_cache)
2935 continue;
2936 rt = rt_cache;
2937 }
2938 if (cfg->fc_ifindex &&
2939 (!rt->dst.dev ||
2940 rt->dst.dev->ifindex != cfg->fc_ifindex))
2941 continue;
2942 if (cfg->fc_flags & RTF_GATEWAY &&
2943 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2944 continue;
2945 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2946 continue;
2947 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2948 continue;
2949 if (!dst_hold_safe(&rt->dst))
2950 break;
2951 rcu_read_unlock();
2952
2953 /* if gateway was specified only delete the one hop */
2954 if (cfg->fc_flags & RTF_GATEWAY)
2955 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2956
2957 return __ip6_del_rt_siblings(rt, cfg);
2958 }
2959 }
2960 rcu_read_unlock();
2961
2962 return err;
2963 }
2964
2965 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2966 {
2967 struct netevent_redirect netevent;
2968 struct rt6_info *rt, *nrt = NULL;
2969 struct ndisc_options ndopts;
2970 struct inet6_dev *in6_dev;
2971 struct neighbour *neigh;
2972 struct rd_msg *msg;
2973 int optlen, on_link;
2974 u8 *lladdr;
2975
2976 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2977 optlen -= sizeof(*msg);
2978
2979 if (optlen < 0) {
2980 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2981 return;
2982 }
2983
2984 msg = (struct rd_msg *)icmp6_hdr(skb);
2985
2986 if (ipv6_addr_is_multicast(&msg->dest)) {
2987 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2988 return;
2989 }
2990
2991 on_link = 0;
2992 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2993 on_link = 1;
2994 } else if (ipv6_addr_type(&msg->target) !=
2995 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2996 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2997 return;
2998 }
2999
3000 in6_dev = __in6_dev_get(skb->dev);
3001 if (!in6_dev)
3002 return;
3003 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3004 return;
3005
3006 /* RFC2461 8.1:
3007 * The IP source address of the Redirect MUST be the same as the current
3008 * first-hop router for the specified ICMP Destination Address.
3009 */
3010
3011 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3012 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3013 return;
3014 }
3015
3016 lladdr = NULL;
3017 if (ndopts.nd_opts_tgt_lladdr) {
3018 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3019 skb->dev);
3020 if (!lladdr) {
3021 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3022 return;
3023 }
3024 }
3025
3026 rt = (struct rt6_info *) dst;
3027 if (rt->rt6i_flags & RTF_REJECT) {
3028 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3029 return;
3030 }
3031
3032 /* Redirect received -> path was valid.
3033 * Look, redirects are sent only in response to data packets,
3034 * so that this nexthop apparently is reachable. --ANK
3035 */
3036 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3037
3038 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3039 if (!neigh)
3040 return;
3041
3042 /*
3043 * We have finally decided to accept it.
3044 */
3045
3046 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3047 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3048 NEIGH_UPDATE_F_OVERRIDE|
3049 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3050 NEIGH_UPDATE_F_ISROUTER)),
3051 NDISC_REDIRECT, &ndopts);
3052
3053 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
3054 if (!nrt)
3055 goto out;
3056
3057 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3058 if (on_link)
3059 nrt->rt6i_flags &= ~RTF_GATEWAY;
3060
3061 nrt->rt6i_protocol = RTPROT_REDIRECT;
3062 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3063
3064 /* No need to remove rt from the exception table if rt is
3065 * a cached route because rt6_insert_exception() will
3066 * takes care of it
3067 */
3068 if (rt6_insert_exception(nrt, rt)) {
3069 dst_release_immediate(&nrt->dst);
3070 goto out;
3071 }
3072
3073 netevent.old = &rt->dst;
3074 netevent.new = &nrt->dst;
3075 netevent.daddr = &msg->dest;
3076 netevent.neigh = neigh;
3077 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3078
3079 out:
3080 neigh_release(neigh);
3081 }
3082
3083 /*
3084 * Misc support functions
3085 */
3086
3087 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
3088 {
3089 BUG_ON(from->dst.from);
3090
3091 rt->rt6i_flags &= ~RTF_EXPIRES;
3092 dst_hold(&from->dst);
3093 rt->dst.from = &from->dst;
3094 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
3095 }
3096
3097 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
3098 {
3099 rt->dst.input = ort->dst.input;
3100 rt->dst.output = ort->dst.output;
3101 rt->rt6i_dst = ort->rt6i_dst;
3102 rt->dst.error = ort->dst.error;
3103 rt->rt6i_idev = ort->rt6i_idev;
3104 if (rt->rt6i_idev)
3105 in6_dev_hold(rt->rt6i_idev);
3106 rt->dst.lastuse = jiffies;
3107 rt->rt6i_gateway = ort->rt6i_gateway;
3108 rt->rt6i_flags = ort->rt6i_flags;
3109 rt6_set_from(rt, ort);
3110 rt->rt6i_metric = ort->rt6i_metric;
3111 #ifdef CONFIG_IPV6_SUBTREES
3112 rt->rt6i_src = ort->rt6i_src;
3113 #endif
3114 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
3115 rt->rt6i_table = ort->rt6i_table;
3116 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
3117 }
3118
3119 #ifdef CONFIG_IPV6_ROUTE_INFO
3120 static struct rt6_info *rt6_get_route_info(struct net *net,
3121 const struct in6_addr *prefix, int prefixlen,
3122 const struct in6_addr *gwaddr,
3123 struct net_device *dev)
3124 {
3125 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3126 int ifindex = dev->ifindex;
3127 struct fib6_node *fn;
3128 struct rt6_info *rt = NULL;
3129 struct fib6_table *table;
3130
3131 table = fib6_get_table(net, tb_id);
3132 if (!table)
3133 return NULL;
3134
3135 rcu_read_lock();
3136 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3137 if (!fn)
3138 goto out;
3139
3140 for_each_fib6_node_rt_rcu(fn) {
3141 if (rt->dst.dev->ifindex != ifindex)
3142 continue;
3143 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3144 continue;
3145 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
3146 continue;
3147 ip6_hold_safe(NULL, &rt, false);
3148 break;
3149 }
3150 out:
3151 rcu_read_unlock();
3152 return rt;
3153 }
3154
3155 static struct rt6_info *rt6_add_route_info(struct net *net,
3156 const struct in6_addr *prefix, int prefixlen,
3157 const struct in6_addr *gwaddr,
3158 struct net_device *dev,
3159 unsigned int pref)
3160 {
3161 struct fib6_config cfg = {
3162 .fc_metric = IP6_RT_PRIO_USER,
3163 .fc_ifindex = dev->ifindex,
3164 .fc_dst_len = prefixlen,
3165 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3166 RTF_UP | RTF_PREF(pref),
3167 .fc_protocol = RTPROT_RA,
3168 .fc_nlinfo.portid = 0,
3169 .fc_nlinfo.nlh = NULL,
3170 .fc_nlinfo.nl_net = net,
3171 };
3172
3173 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3174 cfg.fc_dst = *prefix;
3175 cfg.fc_gateway = *gwaddr;
3176
3177 /* We should treat it as a default route if prefix length is 0. */
3178 if (!prefixlen)
3179 cfg.fc_flags |= RTF_DEFAULT;
3180
3181 ip6_route_add(&cfg, NULL);
3182
3183 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3184 }
3185 #endif
3186
3187 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
3188 {
3189 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3190 struct rt6_info *rt;
3191 struct fib6_table *table;
3192
3193 table = fib6_get_table(dev_net(dev), tb_id);
3194 if (!table)
3195 return NULL;
3196
3197 rcu_read_lock();
3198 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3199 if (dev == rt->dst.dev &&
3200 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3201 ipv6_addr_equal(&rt->rt6i_gateway, addr))
3202 break;
3203 }
3204 if (rt)
3205 ip6_hold_safe(NULL, &rt, false);
3206 rcu_read_unlock();
3207 return rt;
3208 }
3209
3210 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
3211 struct net_device *dev,
3212 unsigned int pref)
3213 {
3214 struct fib6_config cfg = {
3215 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3216 .fc_metric = IP6_RT_PRIO_USER,
3217 .fc_ifindex = dev->ifindex,
3218 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3219 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3220 .fc_protocol = RTPROT_RA,
3221 .fc_nlinfo.portid = 0,
3222 .fc_nlinfo.nlh = NULL,
3223 .fc_nlinfo.nl_net = dev_net(dev),
3224 };
3225
3226 cfg.fc_gateway = *gwaddr;
3227
3228 if (!ip6_route_add(&cfg, NULL)) {
3229 struct fib6_table *table;
3230
3231 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3232 if (table)
3233 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3234 }
3235
3236 return rt6_get_dflt_router(gwaddr, dev);
3237 }
3238
3239 static void __rt6_purge_dflt_routers(struct fib6_table *table)
3240 {
3241 struct rt6_info *rt;
3242
3243 restart:
3244 rcu_read_lock();
3245 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3246 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3247 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
3248 if (dst_hold_safe(&rt->dst)) {
3249 rcu_read_unlock();
3250 ip6_del_rt(rt);
3251 } else {
3252 rcu_read_unlock();
3253 }
3254 goto restart;
3255 }
3256 }
3257 rcu_read_unlock();
3258
3259 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3260 }
3261
3262 void rt6_purge_dflt_routers(struct net *net)
3263 {
3264 struct fib6_table *table;
3265 struct hlist_head *head;
3266 unsigned int h;
3267
3268 rcu_read_lock();
3269
3270 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3271 head = &net->ipv6.fib_table_hash[h];
3272 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3273 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3274 __rt6_purge_dflt_routers(table);
3275 }
3276 }
3277
3278 rcu_read_unlock();
3279 }
3280
3281 static void rtmsg_to_fib6_config(struct net *net,
3282 struct in6_rtmsg *rtmsg,
3283 struct fib6_config *cfg)
3284 {
3285 memset(cfg, 0, sizeof(*cfg));
3286
3287 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3288 : RT6_TABLE_MAIN;
3289 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
3290 cfg->fc_metric = rtmsg->rtmsg_metric;
3291 cfg->fc_expires = rtmsg->rtmsg_info;
3292 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
3293 cfg->fc_src_len = rtmsg->rtmsg_src_len;
3294 cfg->fc_flags = rtmsg->rtmsg_flags;
3295
3296 cfg->fc_nlinfo.nl_net = net;
3297
3298 cfg->fc_dst = rtmsg->rtmsg_dst;
3299 cfg->fc_src = rtmsg->rtmsg_src;
3300 cfg->fc_gateway = rtmsg->rtmsg_gateway;
3301 }
3302
3303 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3304 {
3305 struct fib6_config cfg;
3306 struct in6_rtmsg rtmsg;
3307 int err;
3308
3309 switch (cmd) {
3310 case SIOCADDRT: /* Add a route */
3311 case SIOCDELRT: /* Delete a route */
3312 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3313 return -EPERM;
3314 err = copy_from_user(&rtmsg, arg,
3315 sizeof(struct in6_rtmsg));
3316 if (err)
3317 return -EFAULT;
3318
3319 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3320
3321 rtnl_lock();
3322 switch (cmd) {
3323 case SIOCADDRT:
3324 err = ip6_route_add(&cfg, NULL);
3325 break;
3326 case SIOCDELRT:
3327 err = ip6_route_del(&cfg, NULL);
3328 break;
3329 default:
3330 err = -EINVAL;
3331 }
3332 rtnl_unlock();
3333
3334 return err;
3335 }
3336
3337 return -EINVAL;
3338 }
3339
3340 /*
3341 * Drop the packet on the floor
3342 */
3343
3344 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3345 {
3346 int type;
3347 struct dst_entry *dst = skb_dst(skb);
3348 switch (ipstats_mib_noroutes) {
3349 case IPSTATS_MIB_INNOROUTES:
3350 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3351 if (type == IPV6_ADDR_ANY) {
3352 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3353 IPSTATS_MIB_INADDRERRORS);
3354 break;
3355 }
3356 /* FALLTHROUGH */
3357 case IPSTATS_MIB_OUTNOROUTES:
3358 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3359 ipstats_mib_noroutes);
3360 break;
3361 }
3362 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3363 kfree_skb(skb);
3364 return 0;
3365 }
3366
3367 static int ip6_pkt_discard(struct sk_buff *skb)
3368 {
3369 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3370 }
3371
3372 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3373 {
3374 skb->dev = skb_dst(skb)->dev;
3375 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3376 }
3377
3378 static int ip6_pkt_prohibit(struct sk_buff *skb)
3379 {
3380 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3381 }
3382
3383 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3384 {
3385 skb->dev = skb_dst(skb)->dev;
3386 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3387 }
3388
3389 /*
3390 * Allocate a dst for local (unicast / anycast) address.
3391 */
3392
3393 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
3394 const struct in6_addr *addr,
3395 bool anycast)
3396 {
3397 u32 tb_id;
3398 struct net *net = dev_net(idev->dev);
3399 struct net_device *dev = idev->dev;
3400 struct rt6_info *rt;
3401
3402 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
3403 if (!rt)
3404 return ERR_PTR(-ENOMEM);
3405
3406 in6_dev_hold(idev);
3407
3408 rt->dst.flags |= DST_HOST;
3409 rt->dst.input = ip6_input;
3410 rt->dst.output = ip6_output;
3411 rt->rt6i_idev = idev;
3412
3413 rt->rt6i_protocol = RTPROT_KERNEL;
3414 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
3415 if (anycast)
3416 rt->rt6i_flags |= RTF_ANYCAST;
3417 else
3418 rt->rt6i_flags |= RTF_LOCAL;
3419
3420 rt->rt6i_gateway = *addr;
3421 rt->rt6i_dst.addr = *addr;
3422 rt->rt6i_dst.plen = 128;
3423 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3424 rt->rt6i_table = fib6_get_table(net, tb_id);
3425
3426 return rt;
3427 }
3428
3429 /* remove deleted ip from prefsrc entries */
3430 struct arg_dev_net_ip {
3431 struct net_device *dev;
3432 struct net *net;
3433 struct in6_addr *addr;
3434 };
3435
3436 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
3437 {
3438 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3439 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3440 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3441
3442 if (((void *)rt->dst.dev == dev || !dev) &&
3443 rt != net->ipv6.ip6_null_entry &&
3444 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
3445 spin_lock_bh(&rt6_exception_lock);
3446 /* remove prefsrc entry */
3447 rt->rt6i_prefsrc.plen = 0;
3448 /* need to update cache as well */
3449 rt6_exceptions_remove_prefsrc(rt);
3450 spin_unlock_bh(&rt6_exception_lock);
3451 }
3452 return 0;
3453 }
3454
3455 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3456 {
3457 struct net *net = dev_net(ifp->idev->dev);
3458 struct arg_dev_net_ip adni = {
3459 .dev = ifp->idev->dev,
3460 .net = net,
3461 .addr = &ifp->addr,
3462 };
3463 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3464 }
3465
3466 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
3467
3468 /* Remove routers and update dst entries when gateway turn into host. */
3469 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
3470 {
3471 struct in6_addr *gateway = (struct in6_addr *)arg;
3472
3473 if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3474 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
3475 return -1;
3476 }
3477
3478 /* Further clean up cached routes in exception table.
3479 * This is needed because cached route may have a different
3480 * gateway than its 'parent' in the case of an ip redirect.
3481 */
3482 rt6_exceptions_clean_tohost(rt, gateway);
3483
3484 return 0;
3485 }
3486
3487 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3488 {
3489 fib6_clean_all(net, fib6_clean_tohost, gateway);
3490 }
3491
3492 struct arg_dev_net {
3493 struct net_device *dev;
3494 struct net *net;
3495 };
3496
3497 /* called with write lock held for table with rt */
3498 static int fib6_ifdown(struct rt6_info *rt, void *arg)
3499 {
3500 const struct arg_dev_net *adn = arg;
3501 const struct net_device *dev = adn->dev;
3502
3503 if ((rt->dst.dev == dev || !dev) &&
3504 rt != adn->net->ipv6.ip6_null_entry &&
3505 (rt->rt6i_nsiblings == 0 ||
3506 (dev && netdev_unregistering(dev)) ||
3507 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
3508 return -1;
3509
3510 return 0;
3511 }
3512
3513 void rt6_ifdown(struct net *net, struct net_device *dev)
3514 {
3515 struct arg_dev_net adn = {
3516 .dev = dev,
3517 .net = net,
3518 };
3519
3520 fib6_clean_all(net, fib6_ifdown, &adn);
3521 if (dev)
3522 rt6_uncached_list_flush_dev(net, dev);
3523 }
3524
3525 struct rt6_mtu_change_arg {
3526 struct net_device *dev;
3527 unsigned int mtu;
3528 };
3529
3530 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3531 {
3532 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
3533 struct inet6_dev *idev;
3534
3535 /* In IPv6 pmtu discovery is not optional,
3536 so that RTAX_MTU lock cannot disable it.
3537 We still use this lock to block changes
3538 caused by addrconf/ndisc.
3539 */
3540
3541 idev = __in6_dev_get(arg->dev);
3542 if (!idev)
3543 return 0;
3544
3545 /* For administrative MTU increase, there is no way to discover
3546 IPv6 PMTU increase, so PMTU increase should be updated here.
3547 Since RFC 1981 doesn't include administrative MTU increase
3548 update PMTU increase is a MUST. (i.e. jumbo frame)
3549 */
3550 if (rt->dst.dev == arg->dev &&
3551 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3552 spin_lock_bh(&rt6_exception_lock);
3553 if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
3554 rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
3555 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3556 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
3557 spin_unlock_bh(&rt6_exception_lock);
3558 }
3559 return 0;
3560 }
3561
3562 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3563 {
3564 struct rt6_mtu_change_arg arg = {
3565 .dev = dev,
3566 .mtu = mtu,
3567 };
3568
3569 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
3570 }
3571
3572 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3573 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3574 [RTA_OIF] = { .type = NLA_U32 },
3575 [RTA_IIF] = { .type = NLA_U32 },
3576 [RTA_PRIORITY] = { .type = NLA_U32 },
3577 [RTA_METRICS] = { .type = NLA_NESTED },
3578 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
3579 [RTA_PREF] = { .type = NLA_U8 },
3580 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
3581 [RTA_ENCAP] = { .type = NLA_NESTED },
3582 [RTA_EXPIRES] = { .type = NLA_U32 },
3583 [RTA_UID] = { .type = NLA_U32 },
3584 [RTA_MARK] = { .type = NLA_U32 },
3585 };
3586
3587 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
3588 struct fib6_config *cfg,
3589 struct netlink_ext_ack *extack)
3590 {
3591 struct rtmsg *rtm;
3592 struct nlattr *tb[RTA_MAX+1];
3593 unsigned int pref;
3594 int err;
3595
3596 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3597 NULL);
3598 if (err < 0)
3599 goto errout;
3600
3601 err = -EINVAL;
3602 rtm = nlmsg_data(nlh);
3603 memset(cfg, 0, sizeof(*cfg));
3604
3605 cfg->fc_table = rtm->rtm_table;
3606 cfg->fc_dst_len = rtm->rtm_dst_len;
3607 cfg->fc_src_len = rtm->rtm_src_len;
3608 cfg->fc_flags = RTF_UP;
3609 cfg->fc_protocol = rtm->rtm_protocol;
3610 cfg->fc_type = rtm->rtm_type;
3611
3612 if (rtm->rtm_type == RTN_UNREACHABLE ||
3613 rtm->rtm_type == RTN_BLACKHOLE ||
3614 rtm->rtm_type == RTN_PROHIBIT ||
3615 rtm->rtm_type == RTN_THROW)
3616 cfg->fc_flags |= RTF_REJECT;
3617
3618 if (rtm->rtm_type == RTN_LOCAL)
3619 cfg->fc_flags |= RTF_LOCAL;
3620
3621 if (rtm->rtm_flags & RTM_F_CLONED)
3622 cfg->fc_flags |= RTF_CACHE;
3623
3624 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
3625 cfg->fc_nlinfo.nlh = nlh;
3626 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
3627
3628 if (tb[RTA_GATEWAY]) {
3629 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
3630 cfg->fc_flags |= RTF_GATEWAY;
3631 }
3632
3633 if (tb[RTA_DST]) {
3634 int plen = (rtm->rtm_dst_len + 7) >> 3;
3635
3636 if (nla_len(tb[RTA_DST]) < plen)
3637 goto errout;
3638
3639 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
3640 }
3641
3642 if (tb[RTA_SRC]) {
3643 int plen = (rtm->rtm_src_len + 7) >> 3;
3644
3645 if (nla_len(tb[RTA_SRC]) < plen)
3646 goto errout;
3647
3648 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
3649 }
3650
3651 if (tb[RTA_PREFSRC])
3652 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
3653
3654 if (tb[RTA_OIF])
3655 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
3656
3657 if (tb[RTA_PRIORITY])
3658 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
3659
3660 if (tb[RTA_METRICS]) {
3661 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
3662 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
3663 }
3664
3665 if (tb[RTA_TABLE])
3666 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
3667
3668 if (tb[RTA_MULTIPATH]) {
3669 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
3670 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
3671
3672 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
3673 cfg->fc_mp_len, extack);
3674 if (err < 0)
3675 goto errout;
3676 }
3677
3678 if (tb[RTA_PREF]) {
3679 pref = nla_get_u8(tb[RTA_PREF]);
3680 if (pref != ICMPV6_ROUTER_PREF_LOW &&
3681 pref != ICMPV6_ROUTER_PREF_HIGH)
3682 pref = ICMPV6_ROUTER_PREF_MEDIUM;
3683 cfg->fc_flags |= RTF_PREF(pref);
3684 }
3685
3686 if (tb[RTA_ENCAP])
3687 cfg->fc_encap = tb[RTA_ENCAP];
3688
3689 if (tb[RTA_ENCAP_TYPE]) {
3690 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
3691
3692 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3693 if (err < 0)
3694 goto errout;
3695 }
3696
3697 if (tb[RTA_EXPIRES]) {
3698 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3699
3700 if (addrconf_finite_timeout(timeout)) {
3701 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3702 cfg->fc_flags |= RTF_EXPIRES;
3703 }
3704 }
3705
3706 err = 0;
3707 errout:
3708 return err;
3709 }
3710
3711 struct rt6_nh {
3712 struct rt6_info *rt6_info;
3713 struct fib6_config r_cfg;
3714 struct mx6_config mxc;
3715 struct list_head next;
3716 };
3717
3718 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3719 {
3720 struct rt6_nh *nh;
3721
3722 list_for_each_entry(nh, rt6_nh_list, next) {
3723 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3724 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3725 nh->r_cfg.fc_ifindex);
3726 }
3727 }
3728
3729 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3730 struct rt6_info *rt, struct fib6_config *r_cfg)
3731 {
3732 struct rt6_nh *nh;
3733 int err = -EEXIST;
3734
3735 list_for_each_entry(nh, rt6_nh_list, next) {
3736 /* check if rt6_info already exists */
3737 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3738 return err;
3739 }
3740
3741 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3742 if (!nh)
3743 return -ENOMEM;
3744 nh->rt6_info = rt;
3745 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3746 if (err) {
3747 kfree(nh);
3748 return err;
3749 }
3750 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3751 list_add_tail(&nh->next, rt6_nh_list);
3752
3753 return 0;
3754 }
3755
3756 static void ip6_route_mpath_notify(struct rt6_info *rt,
3757 struct rt6_info *rt_last,
3758 struct nl_info *info,
3759 __u16 nlflags)
3760 {
3761 /* if this is an APPEND route, then rt points to the first route
3762 * inserted and rt_last points to last route inserted. Userspace
3763 * wants a consistent dump of the route which starts at the first
3764 * nexthop. Since sibling routes are always added at the end of
3765 * the list, find the first sibling of the last route appended
3766 */
3767 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3768 rt = list_first_entry(&rt_last->rt6i_siblings,
3769 struct rt6_info,
3770 rt6i_siblings);
3771 }
3772
3773 if (rt)
3774 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3775 }
3776
3777 static int ip6_route_multipath_add(struct fib6_config *cfg,
3778 struct netlink_ext_ack *extack)
3779 {
3780 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3781 struct nl_info *info = &cfg->fc_nlinfo;
3782 struct fib6_config r_cfg;
3783 struct rtnexthop *rtnh;
3784 struct rt6_info *rt;
3785 struct rt6_nh *err_nh;
3786 struct rt6_nh *nh, *nh_safe;
3787 __u16 nlflags;
3788 int remaining;
3789 int attrlen;
3790 int err = 1;
3791 int nhn = 0;
3792 int replace = (cfg->fc_nlinfo.nlh &&
3793 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3794 LIST_HEAD(rt6_nh_list);
3795
3796 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
3797 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
3798 nlflags |= NLM_F_APPEND;
3799
3800 remaining = cfg->fc_mp_len;
3801 rtnh = (struct rtnexthop *)cfg->fc_mp;
3802
3803 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3804 * rt6_info structs per nexthop
3805 */
3806 while (rtnh_ok(rtnh, remaining)) {
3807 memcpy(&r_cfg, cfg, sizeof(*cfg));
3808 if (rtnh->rtnh_ifindex)
3809 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3810
3811 attrlen = rtnh_attrlen(rtnh);
3812 if (attrlen > 0) {
3813 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3814
3815 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3816 if (nla) {
3817 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3818 r_cfg.fc_flags |= RTF_GATEWAY;
3819 }
3820 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3821 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3822 if (nla)
3823 r_cfg.fc_encap_type = nla_get_u16(nla);
3824 }
3825
3826 rt = ip6_route_info_create(&r_cfg, extack);
3827 if (IS_ERR(rt)) {
3828 err = PTR_ERR(rt);
3829 rt = NULL;
3830 goto cleanup;
3831 }
3832
3833 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3834 if (err) {
3835 dst_release_immediate(&rt->dst);
3836 goto cleanup;
3837 }
3838
3839 rtnh = rtnh_next(rtnh, &remaining);
3840 }
3841
3842 /* for add and replace send one notification with all nexthops.
3843 * Skip the notification in fib6_add_rt2node and send one with
3844 * the full route when done
3845 */
3846 info->skip_notify = 1;
3847
3848 err_nh = NULL;
3849 list_for_each_entry(nh, &rt6_nh_list, next) {
3850 rt_last = nh->rt6_info;
3851 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
3852 /* save reference to first route for notification */
3853 if (!rt_notif && !err)
3854 rt_notif = nh->rt6_info;
3855
3856 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3857 nh->rt6_info = NULL;
3858 if (err) {
3859 if (replace && nhn)
3860 ip6_print_replace_route_err(&rt6_nh_list);
3861 err_nh = nh;
3862 goto add_errout;
3863 }
3864
3865 /* Because each route is added like a single route we remove
3866 * these flags after the first nexthop: if there is a collision,
3867 * we have already failed to add the first nexthop:
3868 * fib6_add_rt2node() has rejected it; when replacing, old
3869 * nexthops have been replaced by first new, the rest should
3870 * be added to it.
3871 */
3872 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3873 NLM_F_REPLACE);
3874 nhn++;
3875 }
3876
3877 /* success ... tell user about new route */
3878 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3879 goto cleanup;
3880
3881 add_errout:
3882 /* send notification for routes that were added so that
3883 * the delete notifications sent by ip6_route_del are
3884 * coherent
3885 */
3886 if (rt_notif)
3887 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3888
3889 /* Delete routes that were already added */
3890 list_for_each_entry(nh, &rt6_nh_list, next) {
3891 if (err_nh == nh)
3892 break;
3893 ip6_route_del(&nh->r_cfg, extack);
3894 }
3895
3896 cleanup:
3897 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3898 if (nh->rt6_info)
3899 dst_release_immediate(&nh->rt6_info->dst);
3900 kfree(nh->mxc.mx);
3901 list_del(&nh->next);
3902 kfree(nh);
3903 }
3904
3905 return err;
3906 }
3907
3908 static int ip6_route_multipath_del(struct fib6_config *cfg,
3909 struct netlink_ext_ack *extack)
3910 {
3911 struct fib6_config r_cfg;
3912 struct rtnexthop *rtnh;
3913 int remaining;
3914 int attrlen;
3915 int err = 1, last_err = 0;
3916
3917 remaining = cfg->fc_mp_len;
3918 rtnh = (struct rtnexthop *)cfg->fc_mp;
3919
3920 /* Parse a Multipath Entry */
3921 while (rtnh_ok(rtnh, remaining)) {
3922 memcpy(&r_cfg, cfg, sizeof(*cfg));
3923 if (rtnh->rtnh_ifindex)
3924 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3925
3926 attrlen = rtnh_attrlen(rtnh);
3927 if (attrlen > 0) {
3928 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3929
3930 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3931 if (nla) {
3932 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3933 r_cfg.fc_flags |= RTF_GATEWAY;
3934 }
3935 }
3936 err = ip6_route_del(&r_cfg, extack);
3937 if (err)
3938 last_err = err;
3939
3940 rtnh = rtnh_next(rtnh, &remaining);
3941 }
3942
3943 return last_err;
3944 }
3945
3946 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3947 struct netlink_ext_ack *extack)
3948 {
3949 struct fib6_config cfg;
3950 int err;
3951
3952 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3953 if (err < 0)
3954 return err;
3955
3956 if (cfg.fc_mp)
3957 return ip6_route_multipath_del(&cfg, extack);
3958 else {
3959 cfg.fc_delete_all_nh = 1;
3960 return ip6_route_del(&cfg, extack);
3961 }
3962 }
3963
3964 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3965 struct netlink_ext_ack *extack)
3966 {
3967 struct fib6_config cfg;
3968 int err;
3969
3970 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3971 if (err < 0)
3972 return err;
3973
3974 if (cfg.fc_mp)
3975 return ip6_route_multipath_add(&cfg, extack);
3976 else
3977 return ip6_route_add(&cfg, extack);
3978 }
3979
3980 static size_t rt6_nlmsg_size(struct rt6_info *rt)
3981 {
3982 int nexthop_len = 0;
3983
3984 if (rt->rt6i_nsiblings) {
3985 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3986 + NLA_ALIGN(sizeof(struct rtnexthop))
3987 + nla_total_size(16) /* RTA_GATEWAY */
3988 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3989
3990 nexthop_len *= rt->rt6i_nsiblings;
3991 }
3992
3993 return NLMSG_ALIGN(sizeof(struct rtmsg))
3994 + nla_total_size(16) /* RTA_SRC */
3995 + nla_total_size(16) /* RTA_DST */
3996 + nla_total_size(16) /* RTA_GATEWAY */
3997 + nla_total_size(16) /* RTA_PREFSRC */
3998 + nla_total_size(4) /* RTA_TABLE */
3999 + nla_total_size(4) /* RTA_IIF */
4000 + nla_total_size(4) /* RTA_OIF */
4001 + nla_total_size(4) /* RTA_PRIORITY */
4002 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4003 + nla_total_size(sizeof(struct rta_cacheinfo))
4004 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4005 + nla_total_size(1) /* RTA_PREF */
4006 + lwtunnel_get_encap_size(rt->dst.lwtstate)
4007 + nexthop_len;
4008 }
4009
4010 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
4011 unsigned int *flags, bool skip_oif)
4012 {
4013 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
4014 *flags |= RTNH_F_LINKDOWN;
4015 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
4016 *flags |= RTNH_F_DEAD;
4017 }
4018
4019 if (rt->rt6i_flags & RTF_GATEWAY) {
4020 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
4021 goto nla_put_failure;
4022 }
4023
4024 if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
4025 *flags |= RTNH_F_OFFLOAD;
4026
4027 /* not needed for multipath encoding b/c it has a rtnexthop struct */
4028 if (!skip_oif && rt->dst.dev &&
4029 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
4030 goto nla_put_failure;
4031
4032 if (rt->dst.lwtstate &&
4033 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
4034 goto nla_put_failure;
4035
4036 return 0;
4037
4038 nla_put_failure:
4039 return -EMSGSIZE;
4040 }
4041
4042 /* add multipath next hop */
4043 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
4044 {
4045 struct rtnexthop *rtnh;
4046 unsigned int flags = 0;
4047
4048 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4049 if (!rtnh)
4050 goto nla_put_failure;
4051
4052 rtnh->rtnh_hops = 0;
4053 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
4054
4055 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4056 goto nla_put_failure;
4057
4058 rtnh->rtnh_flags = flags;
4059
4060 /* length of rtnetlink header + attributes */
4061 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4062
4063 return 0;
4064
4065 nla_put_failure:
4066 return -EMSGSIZE;
4067 }
4068
4069 static int rt6_fill_node(struct net *net,
4070 struct sk_buff *skb, struct rt6_info *rt,
4071 struct in6_addr *dst, struct in6_addr *src,
4072 int iif, int type, u32 portid, u32 seq,
4073 unsigned int flags)
4074 {
4075 u32 metrics[RTAX_MAX];
4076 struct rtmsg *rtm;
4077 struct nlmsghdr *nlh;
4078 long expires;
4079 u32 table;
4080
4081 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4082 if (!nlh)
4083 return -EMSGSIZE;
4084
4085 rtm = nlmsg_data(nlh);
4086 rtm->rtm_family = AF_INET6;
4087 rtm->rtm_dst_len = rt->rt6i_dst.plen;
4088 rtm->rtm_src_len = rt->rt6i_src.plen;
4089 rtm->rtm_tos = 0;
4090 if (rt->rt6i_table)
4091 table = rt->rt6i_table->tb6_id;
4092 else
4093 table = RT6_TABLE_UNSPEC;
4094 rtm->rtm_table = table;
4095 if (nla_put_u32(skb, RTA_TABLE, table))
4096 goto nla_put_failure;
4097 if (rt->rt6i_flags & RTF_REJECT) {
4098 switch (rt->dst.error) {
4099 case -EINVAL:
4100 rtm->rtm_type = RTN_BLACKHOLE;
4101 break;
4102 case -EACCES:
4103 rtm->rtm_type = RTN_PROHIBIT;
4104 break;
4105 case -EAGAIN:
4106 rtm->rtm_type = RTN_THROW;
4107 break;
4108 default:
4109 rtm->rtm_type = RTN_UNREACHABLE;
4110 break;
4111 }
4112 }
4113 else if (rt->rt6i_flags & RTF_LOCAL)
4114 rtm->rtm_type = RTN_LOCAL;
4115 else if (rt->rt6i_flags & RTF_ANYCAST)
4116 rtm->rtm_type = RTN_ANYCAST;
4117 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
4118 rtm->rtm_type = RTN_LOCAL;
4119 else
4120 rtm->rtm_type = RTN_UNICAST;
4121 rtm->rtm_flags = 0;
4122 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4123 rtm->rtm_protocol = rt->rt6i_protocol;
4124
4125 if (rt->rt6i_flags & RTF_CACHE)
4126 rtm->rtm_flags |= RTM_F_CLONED;
4127
4128 if (dst) {
4129 if (nla_put_in6_addr(skb, RTA_DST, dst))
4130 goto nla_put_failure;
4131 rtm->rtm_dst_len = 128;
4132 } else if (rtm->rtm_dst_len)
4133 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
4134 goto nla_put_failure;
4135 #ifdef CONFIG_IPV6_SUBTREES
4136 if (src) {
4137 if (nla_put_in6_addr(skb, RTA_SRC, src))
4138 goto nla_put_failure;
4139 rtm->rtm_src_len = 128;
4140 } else if (rtm->rtm_src_len &&
4141 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
4142 goto nla_put_failure;
4143 #endif
4144 if (iif) {
4145 #ifdef CONFIG_IPV6_MROUTE
4146 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
4147 int err = ip6mr_get_route(net, skb, rtm, portid);
4148
4149 if (err == 0)
4150 return 0;
4151 if (err < 0)
4152 goto nla_put_failure;
4153 } else
4154 #endif
4155 if (nla_put_u32(skb, RTA_IIF, iif))
4156 goto nla_put_failure;
4157 } else if (dst) {
4158 struct in6_addr saddr_buf;
4159 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
4160 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4161 goto nla_put_failure;
4162 }
4163
4164 if (rt->rt6i_prefsrc.plen) {
4165 struct in6_addr saddr_buf;
4166 saddr_buf = rt->rt6i_prefsrc.addr;
4167 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4168 goto nla_put_failure;
4169 }
4170
4171 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
4172 if (rt->rt6i_pmtu)
4173 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
4174 if (rtnetlink_put_metrics(skb, metrics) < 0)
4175 goto nla_put_failure;
4176
4177 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
4178 goto nla_put_failure;
4179
4180 /* For multipath routes, walk the siblings list and add
4181 * each as a nexthop within RTA_MULTIPATH.
4182 */
4183 if (rt->rt6i_nsiblings) {
4184 struct rt6_info *sibling, *next_sibling;
4185 struct nlattr *mp;
4186
4187 mp = nla_nest_start(skb, RTA_MULTIPATH);
4188 if (!mp)
4189 goto nla_put_failure;
4190
4191 if (rt6_add_nexthop(skb, rt) < 0)
4192 goto nla_put_failure;
4193
4194 list_for_each_entry_safe(sibling, next_sibling,
4195 &rt->rt6i_siblings, rt6i_siblings) {
4196 if (rt6_add_nexthop(skb, sibling) < 0)
4197 goto nla_put_failure;
4198 }
4199
4200 nla_nest_end(skb, mp);
4201 } else {
4202 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4203 goto nla_put_failure;
4204 }
4205
4206 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
4207
4208 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
4209 goto nla_put_failure;
4210
4211 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
4212 goto nla_put_failure;
4213
4214
4215 nlmsg_end(skb, nlh);
4216 return 0;
4217
4218 nla_put_failure:
4219 nlmsg_cancel(skb, nlh);
4220 return -EMSGSIZE;
4221 }
4222
4223 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
4224 {
4225 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4226 struct net *net = arg->net;
4227
4228 if (rt == net->ipv6.ip6_null_entry)
4229 return 0;
4230
4231 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4232 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4233
4234 /* user wants prefix routes only */
4235 if (rtm->rtm_flags & RTM_F_PREFIX &&
4236 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
4237 /* success since this is not a prefix route */
4238 return 1;
4239 }
4240 }
4241
4242 return rt6_fill_node(net,
4243 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
4244 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
4245 NLM_F_MULTI);
4246 }
4247
4248 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4249 struct netlink_ext_ack *extack)
4250 {
4251 struct net *net = sock_net(in_skb->sk);
4252 struct nlattr *tb[RTA_MAX+1];
4253 int err, iif = 0, oif = 0;
4254 struct dst_entry *dst;
4255 struct rt6_info *rt;
4256 struct sk_buff *skb;
4257 struct rtmsg *rtm;
4258 struct flowi6 fl6;
4259 bool fibmatch;
4260
4261 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4262 extack);
4263 if (err < 0)
4264 goto errout;
4265
4266 err = -EINVAL;
4267 memset(&fl6, 0, sizeof(fl6));
4268 rtm = nlmsg_data(nlh);
4269 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4270 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
4271
4272 if (tb[RTA_SRC]) {
4273 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4274 goto errout;
4275
4276 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4277 }
4278
4279 if (tb[RTA_DST]) {
4280 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4281 goto errout;
4282
4283 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4284 }
4285
4286 if (tb[RTA_IIF])
4287 iif = nla_get_u32(tb[RTA_IIF]);
4288
4289 if (tb[RTA_OIF])
4290 oif = nla_get_u32(tb[RTA_OIF]);
4291
4292 if (tb[RTA_MARK])
4293 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4294
4295 if (tb[RTA_UID])
4296 fl6.flowi6_uid = make_kuid(current_user_ns(),
4297 nla_get_u32(tb[RTA_UID]));
4298 else
4299 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4300
4301 if (iif) {
4302 struct net_device *dev;
4303 int flags = 0;
4304
4305 rcu_read_lock();
4306
4307 dev = dev_get_by_index_rcu(net, iif);
4308 if (!dev) {
4309 rcu_read_unlock();
4310 err = -ENODEV;
4311 goto errout;
4312 }
4313
4314 fl6.flowi6_iif = iif;
4315
4316 if (!ipv6_addr_any(&fl6.saddr))
4317 flags |= RT6_LOOKUP_F_HAS_SADDR;
4318
4319 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
4320
4321 rcu_read_unlock();
4322 } else {
4323 fl6.flowi6_oif = oif;
4324
4325 dst = ip6_route_output(net, NULL, &fl6);
4326 }
4327
4328
4329 rt = container_of(dst, struct rt6_info, dst);
4330 if (rt->dst.error) {
4331 err = rt->dst.error;
4332 ip6_rt_put(rt);
4333 goto errout;
4334 }
4335
4336 if (rt == net->ipv6.ip6_null_entry) {
4337 err = rt->dst.error;
4338 ip6_rt_put(rt);
4339 goto errout;
4340 }
4341
4342 if (fibmatch && rt->dst.from) {
4343 struct rt6_info *ort = container_of(rt->dst.from,
4344 struct rt6_info, dst);
4345
4346 dst_hold(&ort->dst);
4347 ip6_rt_put(rt);
4348 rt = ort;
4349 }
4350
4351 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4352 if (!skb) {
4353 ip6_rt_put(rt);
4354 err = -ENOBUFS;
4355 goto errout;
4356 }
4357
4358 skb_dst_set(skb, &rt->dst);
4359 if (fibmatch)
4360 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
4361 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4362 nlh->nlmsg_seq, 0);
4363 else
4364 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
4365 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4366 nlh->nlmsg_seq, 0);
4367 if (err < 0) {
4368 kfree_skb(skb);
4369 goto errout;
4370 }
4371
4372 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4373 errout:
4374 return err;
4375 }
4376
4377 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
4378 unsigned int nlm_flags)
4379 {
4380 struct sk_buff *skb;
4381 struct net *net = info->nl_net;
4382 u32 seq;
4383 int err;
4384
4385 err = -ENOBUFS;
4386 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4387
4388 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4389 if (!skb)
4390 goto errout;
4391
4392 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
4393 event, info->portid, seq, nlm_flags);
4394 if (err < 0) {
4395 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4396 WARN_ON(err == -EMSGSIZE);
4397 kfree_skb(skb);
4398 goto errout;
4399 }
4400 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4401 info->nlh, gfp_any());
4402 return;
4403 errout:
4404 if (err < 0)
4405 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
4406 }
4407
4408 static int ip6_route_dev_notify(struct notifier_block *this,
4409 unsigned long event, void *ptr)
4410 {
4411 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4412 struct net *net = dev_net(dev);
4413
4414 if (!(dev->flags & IFF_LOOPBACK))
4415 return NOTIFY_OK;
4416
4417 if (event == NETDEV_REGISTER) {
4418 net->ipv6.ip6_null_entry->dst.dev = dev;
4419 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4420 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4421 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
4422 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
4423 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4424 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4425 #endif
4426 } else if (event == NETDEV_UNREGISTER &&
4427 dev->reg_state != NETREG_UNREGISTERED) {
4428 /* NETDEV_UNREGISTER could be fired for multiple times by
4429 * netdev_wait_allrefs(). Make sure we only call this once.
4430 */
4431 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
4432 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4433 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4434 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
4435 #endif
4436 }
4437
4438 return NOTIFY_OK;
4439 }
4440
4441 /*
4442 * /proc
4443 */
4444
4445 #ifdef CONFIG_PROC_FS
4446
4447 static const struct file_operations ipv6_route_proc_fops = {
4448 .owner = THIS_MODULE,
4449 .open = ipv6_route_open,
4450 .read = seq_read,
4451 .llseek = seq_lseek,
4452 .release = seq_release_net,
4453 };
4454
4455 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
4456 {
4457 struct net *net = (struct net *)seq->private;
4458 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
4459 net->ipv6.rt6_stats->fib_nodes,
4460 net->ipv6.rt6_stats->fib_route_nodes,
4461 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
4462 net->ipv6.rt6_stats->fib_rt_entries,
4463 net->ipv6.rt6_stats->fib_rt_cache,
4464 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
4465 net->ipv6.rt6_stats->fib_discarded_routes);
4466
4467 return 0;
4468 }
4469
4470 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
4471 {
4472 return single_open_net(inode, file, rt6_stats_seq_show);
4473 }
4474
4475 static const struct file_operations rt6_stats_seq_fops = {
4476 .owner = THIS_MODULE,
4477 .open = rt6_stats_seq_open,
4478 .read = seq_read,
4479 .llseek = seq_lseek,
4480 .release = single_release_net,
4481 };
4482 #endif /* CONFIG_PROC_FS */
4483
4484 #ifdef CONFIG_SYSCTL
4485
4486 static
4487 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
4488 void __user *buffer, size_t *lenp, loff_t *ppos)
4489 {
4490 struct net *net;
4491 int delay;
4492 if (!write)
4493 return -EINVAL;
4494
4495 net = (struct net *)ctl->extra1;
4496 delay = net->ipv6.sysctl.flush_delay;
4497 proc_dointvec(ctl, write, buffer, lenp, ppos);
4498 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
4499 return 0;
4500 }
4501
4502 struct ctl_table ipv6_route_table_template[] = {
4503 {
4504 .procname = "flush",
4505 .data = &init_net.ipv6.sysctl.flush_delay,
4506 .maxlen = sizeof(int),
4507 .mode = 0200,
4508 .proc_handler = ipv6_sysctl_rtcache_flush
4509 },
4510 {
4511 .procname = "gc_thresh",
4512 .data = &ip6_dst_ops_template.gc_thresh,
4513 .maxlen = sizeof(int),
4514 .mode = 0644,
4515 .proc_handler = proc_dointvec,
4516 },
4517 {
4518 .procname = "max_size",
4519 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
4520 .maxlen = sizeof(int),
4521 .mode = 0644,
4522 .proc_handler = proc_dointvec,
4523 },
4524 {
4525 .procname = "gc_min_interval",
4526 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4527 .maxlen = sizeof(int),
4528 .mode = 0644,
4529 .proc_handler = proc_dointvec_jiffies,
4530 },
4531 {
4532 .procname = "gc_timeout",
4533 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
4534 .maxlen = sizeof(int),
4535 .mode = 0644,
4536 .proc_handler = proc_dointvec_jiffies,
4537 },
4538 {
4539 .procname = "gc_interval",
4540 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
4541 .maxlen = sizeof(int),
4542 .mode = 0644,
4543 .proc_handler = proc_dointvec_jiffies,
4544 },
4545 {
4546 .procname = "gc_elasticity",
4547 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
4548 .maxlen = sizeof(int),
4549 .mode = 0644,
4550 .proc_handler = proc_dointvec,
4551 },
4552 {
4553 .procname = "mtu_expires",
4554 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
4555 .maxlen = sizeof(int),
4556 .mode = 0644,
4557 .proc_handler = proc_dointvec_jiffies,
4558 },
4559 {
4560 .procname = "min_adv_mss",
4561 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
4562 .maxlen = sizeof(int),
4563 .mode = 0644,
4564 .proc_handler = proc_dointvec,
4565 },
4566 {
4567 .procname = "gc_min_interval_ms",
4568 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4569 .maxlen = sizeof(int),
4570 .mode = 0644,
4571 .proc_handler = proc_dointvec_ms_jiffies,
4572 },
4573 { }
4574 };
4575
4576 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
4577 {
4578 struct ctl_table *table;
4579
4580 table = kmemdup(ipv6_route_table_template,
4581 sizeof(ipv6_route_table_template),
4582 GFP_KERNEL);
4583
4584 if (table) {
4585 table[0].data = &net->ipv6.sysctl.flush_delay;
4586 table[0].extra1 = net;
4587 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
4588 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
4589 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4590 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
4591 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
4592 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
4593 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
4594 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
4595 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4596
4597 /* Don't export sysctls to unprivileged users */
4598 if (net->user_ns != &init_user_ns)
4599 table[0].procname = NULL;
4600 }
4601
4602 return table;
4603 }
4604 #endif
4605
4606 static int __net_init ip6_route_net_init(struct net *net)
4607 {
4608 int ret = -ENOMEM;
4609
4610 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
4611 sizeof(net->ipv6.ip6_dst_ops));
4612
4613 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
4614 goto out_ip6_dst_ops;
4615
4616 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
4617 sizeof(*net->ipv6.ip6_null_entry),
4618 GFP_KERNEL);
4619 if (!net->ipv6.ip6_null_entry)
4620 goto out_ip6_dst_entries;
4621 net->ipv6.ip6_null_entry->dst.path =
4622 (struct dst_entry *)net->ipv6.ip6_null_entry;
4623 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4624 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
4625 ip6_template_metrics, true);
4626
4627 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4628 net->ipv6.fib6_has_custom_rules = false;
4629 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
4630 sizeof(*net->ipv6.ip6_prohibit_entry),
4631 GFP_KERNEL);
4632 if (!net->ipv6.ip6_prohibit_entry)
4633 goto out_ip6_null_entry;
4634 net->ipv6.ip6_prohibit_entry->dst.path =
4635 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
4636 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4637 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
4638 ip6_template_metrics, true);
4639
4640 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
4641 sizeof(*net->ipv6.ip6_blk_hole_entry),
4642 GFP_KERNEL);
4643 if (!net->ipv6.ip6_blk_hole_entry)
4644 goto out_ip6_prohibit_entry;
4645 net->ipv6.ip6_blk_hole_entry->dst.path =
4646 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
4647 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4648 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
4649 ip6_template_metrics, true);
4650 #endif
4651
4652 net->ipv6.sysctl.flush_delay = 0;
4653 net->ipv6.sysctl.ip6_rt_max_size = 4096;
4654 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
4655 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
4656 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
4657 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
4658 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
4659 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
4660
4661 net->ipv6.ip6_rt_gc_expire = 30*HZ;
4662
4663 ret = 0;
4664 out:
4665 return ret;
4666
4667 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4668 out_ip6_prohibit_entry:
4669 kfree(net->ipv6.ip6_prohibit_entry);
4670 out_ip6_null_entry:
4671 kfree(net->ipv6.ip6_null_entry);
4672 #endif
4673 out_ip6_dst_entries:
4674 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4675 out_ip6_dst_ops:
4676 goto out;
4677 }
4678
4679 static void __net_exit ip6_route_net_exit(struct net *net)
4680 {
4681 kfree(net->ipv6.ip6_null_entry);
4682 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4683 kfree(net->ipv6.ip6_prohibit_entry);
4684 kfree(net->ipv6.ip6_blk_hole_entry);
4685 #endif
4686 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4687 }
4688
4689 static int __net_init ip6_route_net_init_late(struct net *net)
4690 {
4691 #ifdef CONFIG_PROC_FS
4692 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
4693 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
4694 #endif
4695 return 0;
4696 }
4697
4698 static void __net_exit ip6_route_net_exit_late(struct net *net)
4699 {
4700 #ifdef CONFIG_PROC_FS
4701 remove_proc_entry("ipv6_route", net->proc_net);
4702 remove_proc_entry("rt6_stats", net->proc_net);
4703 #endif
4704 }
4705
4706 static struct pernet_operations ip6_route_net_ops = {
4707 .init = ip6_route_net_init,
4708 .exit = ip6_route_net_exit,
4709 };
4710
4711 static int __net_init ipv6_inetpeer_init(struct net *net)
4712 {
4713 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4714
4715 if (!bp)
4716 return -ENOMEM;
4717 inet_peer_base_init(bp);
4718 net->ipv6.peers = bp;
4719 return 0;
4720 }
4721
4722 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4723 {
4724 struct inet_peer_base *bp = net->ipv6.peers;
4725
4726 net->ipv6.peers = NULL;
4727 inetpeer_invalidate_tree(bp);
4728 kfree(bp);
4729 }
4730
4731 static struct pernet_operations ipv6_inetpeer_ops = {
4732 .init = ipv6_inetpeer_init,
4733 .exit = ipv6_inetpeer_exit,
4734 };
4735
4736 static struct pernet_operations ip6_route_net_late_ops = {
4737 .init = ip6_route_net_init_late,
4738 .exit = ip6_route_net_exit_late,
4739 };
4740
4741 static struct notifier_block ip6_route_dev_notifier = {
4742 .notifier_call = ip6_route_dev_notify,
4743 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4744 };
4745
4746 void __init ip6_route_init_special_entries(void)
4747 {
4748 /* Registering of the loopback is done before this portion of code,
4749 * the loopback reference in rt6_info will not be taken, do it
4750 * manually for init_net */
4751 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4752 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4753 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4754 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4755 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4756 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4757 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4758 #endif
4759 }
4760
4761 int __init ip6_route_init(void)
4762 {
4763 int ret;
4764 int cpu;
4765
4766 ret = -ENOMEM;
4767 ip6_dst_ops_template.kmem_cachep =
4768 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4769 SLAB_HWCACHE_ALIGN, NULL);
4770 if (!ip6_dst_ops_template.kmem_cachep)
4771 goto out;
4772
4773 ret = dst_entries_init(&ip6_dst_blackhole_ops);
4774 if (ret)
4775 goto out_kmem_cache;
4776
4777 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4778 if (ret)
4779 goto out_dst_entries;
4780
4781 ret = register_pernet_subsys(&ip6_route_net_ops);
4782 if (ret)
4783 goto out_register_inetpeer;
4784
4785 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4786
4787 ret = fib6_init();
4788 if (ret)
4789 goto out_register_subsys;
4790
4791 ret = xfrm6_init();
4792 if (ret)
4793 goto out_fib6_init;
4794
4795 ret = fib6_rules_init();
4796 if (ret)
4797 goto xfrm6_init;
4798
4799 ret = register_pernet_subsys(&ip6_route_net_late_ops);
4800 if (ret)
4801 goto fib6_rules_init;
4802
4803 ret = -ENOBUFS;
4804 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) ||
4805 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) ||
4806 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL,
4807 RTNL_FLAG_DOIT_UNLOCKED))
4808 goto out_register_late_subsys;
4809
4810 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
4811 if (ret)
4812 goto out_register_late_subsys;
4813
4814 for_each_possible_cpu(cpu) {
4815 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
4816
4817 INIT_LIST_HEAD(&ul->head);
4818 spin_lock_init(&ul->lock);
4819 }
4820
4821 out:
4822 return ret;
4823
4824 out_register_late_subsys:
4825 unregister_pernet_subsys(&ip6_route_net_late_ops);
4826 fib6_rules_init:
4827 fib6_rules_cleanup();
4828 xfrm6_init:
4829 xfrm6_fini();
4830 out_fib6_init:
4831 fib6_gc_cleanup();
4832 out_register_subsys:
4833 unregister_pernet_subsys(&ip6_route_net_ops);
4834 out_register_inetpeer:
4835 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4836 out_dst_entries:
4837 dst_entries_destroy(&ip6_dst_blackhole_ops);
4838 out_kmem_cache:
4839 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4840 goto out;
4841 }
4842
4843 void ip6_route_cleanup(void)
4844 {
4845 unregister_netdevice_notifier(&ip6_route_dev_notifier);
4846 unregister_pernet_subsys(&ip6_route_net_late_ops);
4847 fib6_rules_cleanup();
4848 xfrm6_fini();
4849 fib6_gc_cleanup();
4850 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4851 unregister_pernet_subsys(&ip6_route_net_ops);
4852 dst_entries_destroy(&ip6_dst_blackhole_ops);
4853 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4854 }