]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/route.c
cpufreq: CPPC: Don't set transition_latency
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <linux/jhash.h>
48 #include <net/net_namespace.h>
49 #include <net/snmp.h>
50 #include <net/ipv6.h>
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
55 #include <net/tcp.h>
56 #include <linux/rtnetlink.h>
57 #include <net/dst.h>
58 #include <net/dst_metadata.h>
59 #include <net/xfrm.h>
60 #include <net/netevent.h>
61 #include <net/netlink.h>
62 #include <net/nexthop.h>
63 #include <net/lwtunnel.h>
64 #include <net/ip_tunnels.h>
65 #include <net/l3mdev.h>
66 #include <trace/events/fib6.h>
67
68 #include <linux/uaccess.h>
69
70 #ifdef CONFIG_SYSCTL
71 #include <linux/sysctl.h>
72 #endif
73
74 enum rt6_nud_state {
75 RT6_NUD_FAIL_HARD = -3,
76 RT6_NUD_FAIL_PROBE = -2,
77 RT6_NUD_FAIL_DO_RR = -1,
78 RT6_NUD_SUCCEED = 1
79 };
80
81 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
82 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
83 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
84 static unsigned int ip6_mtu(const struct dst_entry *dst);
85 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
86 static void ip6_dst_destroy(struct dst_entry *);
87 static void ip6_dst_ifdown(struct dst_entry *,
88 struct net_device *dev, int how);
89 static int ip6_dst_gc(struct dst_ops *ops);
90
91 static int ip6_pkt_discard(struct sk_buff *skb);
92 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
93 static int ip6_pkt_prohibit(struct sk_buff *skb);
94 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
95 static void ip6_link_failure(struct sk_buff *skb);
96 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
97 struct sk_buff *skb, u32 mtu);
98 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
99 struct sk_buff *skb);
100 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
101 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
102 static size_t rt6_nlmsg_size(struct rt6_info *rt);
103 static int rt6_fill_node(struct net *net,
104 struct sk_buff *skb, struct rt6_info *rt,
105 struct in6_addr *dst, struct in6_addr *src,
106 int iif, int type, u32 portid, u32 seq,
107 unsigned int flags);
108 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
109 struct in6_addr *daddr,
110 struct in6_addr *saddr);
111
112 #ifdef CONFIG_IPV6_ROUTE_INFO
113 static struct rt6_info *rt6_add_route_info(struct net *net,
114 const struct in6_addr *prefix, int prefixlen,
115 const struct in6_addr *gwaddr,
116 struct net_device *dev,
117 unsigned int pref);
118 static struct rt6_info *rt6_get_route_info(struct net *net,
119 const struct in6_addr *prefix, int prefixlen,
120 const struct in6_addr *gwaddr,
121 struct net_device *dev);
122 #endif
123
124 struct uncached_list {
125 spinlock_t lock;
126 struct list_head head;
127 };
128
129 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
130
131 void rt6_uncached_list_add(struct rt6_info *rt)
132 {
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
134
135 rt->rt6i_uncached_list = ul;
136
137 spin_lock_bh(&ul->lock);
138 list_add_tail(&rt->rt6i_uncached, &ul->head);
139 spin_unlock_bh(&ul->lock);
140 }
141
142 void rt6_uncached_list_del(struct rt6_info *rt)
143 {
144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list;
146 struct net *net = dev_net(rt->dst.dev);
147
148 spin_lock_bh(&ul->lock);
149 list_del(&rt->rt6i_uncached);
150 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
151 spin_unlock_bh(&ul->lock);
152 }
153 }
154
155 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
156 {
157 struct net_device *loopback_dev = net->loopback_dev;
158 int cpu;
159
160 if (dev == loopback_dev)
161 return;
162
163 for_each_possible_cpu(cpu) {
164 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
165 struct rt6_info *rt;
166
167 spin_lock_bh(&ul->lock);
168 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
169 struct inet6_dev *rt_idev = rt->rt6i_idev;
170 struct net_device *rt_dev = rt->dst.dev;
171
172 if (rt_idev->dev == dev) {
173 rt->rt6i_idev = in6_dev_get(loopback_dev);
174 in6_dev_put(rt_idev);
175 }
176
177 if (rt_dev == dev) {
178 rt->dst.dev = loopback_dev;
179 dev_hold(rt->dst.dev);
180 dev_put(rt_dev);
181 }
182 }
183 spin_unlock_bh(&ul->lock);
184 }
185 }
186
187 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
188 {
189 return dst_metrics_write_ptr(rt->dst.from);
190 }
191
192 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
193 {
194 struct rt6_info *rt = (struct rt6_info *)dst;
195
196 if (rt->rt6i_flags & RTF_PCPU)
197 return rt6_pcpu_cow_metrics(rt);
198 else if (rt->rt6i_flags & RTF_CACHE)
199 return NULL;
200 else
201 return dst_cow_metrics_generic(dst, old);
202 }
203
204 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
205 struct sk_buff *skb,
206 const void *daddr)
207 {
208 struct in6_addr *p = &rt->rt6i_gateway;
209
210 if (!ipv6_addr_any(p))
211 return (const void *) p;
212 else if (skb)
213 return &ipv6_hdr(skb)->daddr;
214 return daddr;
215 }
216
217 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
218 struct sk_buff *skb,
219 const void *daddr)
220 {
221 struct rt6_info *rt = (struct rt6_info *) dst;
222 struct neighbour *n;
223
224 daddr = choose_neigh_daddr(rt, skb, daddr);
225 n = __ipv6_neigh_lookup(dst->dev, daddr);
226 if (n)
227 return n;
228 return neigh_create(&nd_tbl, daddr, dst->dev);
229 }
230
231 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
232 {
233 struct net_device *dev = dst->dev;
234 struct rt6_info *rt = (struct rt6_info *)dst;
235
236 daddr = choose_neigh_daddr(rt, NULL, daddr);
237 if (!daddr)
238 return;
239 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
240 return;
241 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
242 return;
243 __ipv6_confirm_neigh(dev, daddr);
244 }
245
246 static struct dst_ops ip6_dst_ops_template = {
247 .family = AF_INET6,
248 .gc = ip6_dst_gc,
249 .gc_thresh = 1024,
250 .check = ip6_dst_check,
251 .default_advmss = ip6_default_advmss,
252 .mtu = ip6_mtu,
253 .cow_metrics = ipv6_cow_metrics,
254 .destroy = ip6_dst_destroy,
255 .ifdown = ip6_dst_ifdown,
256 .negative_advice = ip6_negative_advice,
257 .link_failure = ip6_link_failure,
258 .update_pmtu = ip6_rt_update_pmtu,
259 .redirect = rt6_do_redirect,
260 .local_out = __ip6_local_out,
261 .neigh_lookup = ip6_neigh_lookup,
262 .confirm_neigh = ip6_confirm_neigh,
263 };
264
265 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
266 {
267 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
268
269 return mtu ? : dst->dev->mtu;
270 }
271
272 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
273 struct sk_buff *skb, u32 mtu)
274 {
275 }
276
277 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
278 struct sk_buff *skb)
279 {
280 }
281
282 static struct dst_ops ip6_dst_blackhole_ops = {
283 .family = AF_INET6,
284 .destroy = ip6_dst_destroy,
285 .check = ip6_dst_check,
286 .mtu = ip6_blackhole_mtu,
287 .default_advmss = ip6_default_advmss,
288 .update_pmtu = ip6_rt_blackhole_update_pmtu,
289 .redirect = ip6_rt_blackhole_redirect,
290 .cow_metrics = dst_cow_metrics_generic,
291 .neigh_lookup = ip6_neigh_lookup,
292 };
293
294 static const u32 ip6_template_metrics[RTAX_MAX] = {
295 [RTAX_HOPLIMIT - 1] = 0,
296 };
297
298 static const struct rt6_info ip6_null_entry_template = {
299 .dst = {
300 .__refcnt = ATOMIC_INIT(1),
301 .__use = 1,
302 .obsolete = DST_OBSOLETE_FORCE_CHK,
303 .error = -ENETUNREACH,
304 .input = ip6_pkt_discard,
305 .output = ip6_pkt_discard_out,
306 },
307 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
308 .rt6i_protocol = RTPROT_KERNEL,
309 .rt6i_metric = ~(u32) 0,
310 .rt6i_ref = ATOMIC_INIT(1),
311 };
312
313 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
314
315 static const struct rt6_info ip6_prohibit_entry_template = {
316 .dst = {
317 .__refcnt = ATOMIC_INIT(1),
318 .__use = 1,
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
320 .error = -EACCES,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
323 },
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
325 .rt6i_protocol = RTPROT_KERNEL,
326 .rt6i_metric = ~(u32) 0,
327 .rt6i_ref = ATOMIC_INIT(1),
328 };
329
330 static const struct rt6_info ip6_blk_hole_entry_template = {
331 .dst = {
332 .__refcnt = ATOMIC_INIT(1),
333 .__use = 1,
334 .obsolete = DST_OBSOLETE_FORCE_CHK,
335 .error = -EINVAL,
336 .input = dst_discard,
337 .output = dst_discard_out,
338 },
339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
340 .rt6i_protocol = RTPROT_KERNEL,
341 .rt6i_metric = ~(u32) 0,
342 .rt6i_ref = ATOMIC_INIT(1),
343 };
344
345 #endif
346
347 static void rt6_info_init(struct rt6_info *rt)
348 {
349 struct dst_entry *dst = &rt->dst;
350
351 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
352 INIT_LIST_HEAD(&rt->rt6i_siblings);
353 INIT_LIST_HEAD(&rt->rt6i_uncached);
354 }
355
356 /* allocate dst with ip6_dst_ops */
357 static struct rt6_info *__ip6_dst_alloc(struct net *net,
358 struct net_device *dev,
359 int flags)
360 {
361 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
362 1, DST_OBSOLETE_FORCE_CHK, flags);
363
364 if (rt) {
365 rt6_info_init(rt);
366 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
367 }
368
369 return rt;
370 }
371
372 struct rt6_info *ip6_dst_alloc(struct net *net,
373 struct net_device *dev,
374 int flags)
375 {
376 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
377
378 if (rt) {
379 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
380 if (!rt->rt6i_pcpu) {
381 dst_release_immediate(&rt->dst);
382 return NULL;
383 }
384 }
385
386 return rt;
387 }
388 EXPORT_SYMBOL(ip6_dst_alloc);
389
390 static void ip6_dst_destroy(struct dst_entry *dst)
391 {
392 struct rt6_info *rt = (struct rt6_info *)dst;
393 struct rt6_exception_bucket *bucket;
394 struct dst_entry *from = dst->from;
395 struct inet6_dev *idev;
396
397 dst_destroy_metrics_generic(dst);
398 free_percpu(rt->rt6i_pcpu);
399 rt6_uncached_list_del(rt);
400
401 idev = rt->rt6i_idev;
402 if (idev) {
403 rt->rt6i_idev = NULL;
404 in6_dev_put(idev);
405 }
406 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1);
407 if (bucket) {
408 rt->rt6i_exception_bucket = NULL;
409 kfree(bucket);
410 }
411
412 dst->from = NULL;
413 dst_release(from);
414 }
415
416 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
417 int how)
418 {
419 struct rt6_info *rt = (struct rt6_info *)dst;
420 struct inet6_dev *idev = rt->rt6i_idev;
421 struct net_device *loopback_dev =
422 dev_net(dev)->loopback_dev;
423
424 if (idev && idev->dev != loopback_dev) {
425 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
426 if (loopback_idev) {
427 rt->rt6i_idev = loopback_idev;
428 in6_dev_put(idev);
429 }
430 }
431 }
432
433 static bool __rt6_check_expired(const struct rt6_info *rt)
434 {
435 if (rt->rt6i_flags & RTF_EXPIRES)
436 return time_after(jiffies, rt->dst.expires);
437 else
438 return false;
439 }
440
441 static bool rt6_check_expired(const struct rt6_info *rt)
442 {
443 if (rt->rt6i_flags & RTF_EXPIRES) {
444 if (time_after(jiffies, rt->dst.expires))
445 return true;
446 } else if (rt->dst.from) {
447 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
448 rt6_check_expired((struct rt6_info *)rt->dst.from);
449 }
450 return false;
451 }
452
453 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
454 struct flowi6 *fl6, int oif,
455 int strict)
456 {
457 struct rt6_info *sibling, *next_sibling;
458 int route_choosen;
459
460 /* We might have already computed the hash for ICMPv6 errors. In such
461 * case it will always be non-zero. Otherwise now is the time to do it.
462 */
463 if (!fl6->mp_hash)
464 fl6->mp_hash = rt6_multipath_hash(fl6, NULL);
465
466 route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1);
467 /* Don't change the route, if route_choosen == 0
468 * (siblings does not include ourself)
469 */
470 if (route_choosen)
471 list_for_each_entry_safe(sibling, next_sibling,
472 &match->rt6i_siblings, rt6i_siblings) {
473 route_choosen--;
474 if (route_choosen == 0) {
475 struct inet6_dev *idev = sibling->rt6i_idev;
476
477 if (!netif_carrier_ok(sibling->dst.dev) &&
478 idev->cnf.ignore_routes_with_linkdown)
479 break;
480 if (rt6_score_route(sibling, oif, strict) < 0)
481 break;
482 match = sibling;
483 break;
484 }
485 }
486 return match;
487 }
488
489 /*
490 * Route lookup. rcu_read_lock() should be held.
491 */
492
493 static inline struct rt6_info *rt6_device_match(struct net *net,
494 struct rt6_info *rt,
495 const struct in6_addr *saddr,
496 int oif,
497 int flags)
498 {
499 struct rt6_info *local = NULL;
500 struct rt6_info *sprt;
501
502 if (!oif && ipv6_addr_any(saddr))
503 goto out;
504
505 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->dst.rt6_next)) {
506 struct net_device *dev = sprt->dst.dev;
507
508 if (oif) {
509 if (dev->ifindex == oif)
510 return sprt;
511 if (dev->flags & IFF_LOOPBACK) {
512 if (!sprt->rt6i_idev ||
513 sprt->rt6i_idev->dev->ifindex != oif) {
514 if (flags & RT6_LOOKUP_F_IFACE)
515 continue;
516 if (local &&
517 local->rt6i_idev->dev->ifindex == oif)
518 continue;
519 }
520 local = sprt;
521 }
522 } else {
523 if (ipv6_chk_addr(net, saddr, dev,
524 flags & RT6_LOOKUP_F_IFACE))
525 return sprt;
526 }
527 }
528
529 if (oif) {
530 if (local)
531 return local;
532
533 if (flags & RT6_LOOKUP_F_IFACE)
534 return net->ipv6.ip6_null_entry;
535 }
536 out:
537 return rt;
538 }
539
540 #ifdef CONFIG_IPV6_ROUTER_PREF
541 struct __rt6_probe_work {
542 struct work_struct work;
543 struct in6_addr target;
544 struct net_device *dev;
545 };
546
547 static void rt6_probe_deferred(struct work_struct *w)
548 {
549 struct in6_addr mcaddr;
550 struct __rt6_probe_work *work =
551 container_of(w, struct __rt6_probe_work, work);
552
553 addrconf_addr_solict_mult(&work->target, &mcaddr);
554 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
555 dev_put(work->dev);
556 kfree(work);
557 }
558
559 static void rt6_probe(struct rt6_info *rt)
560 {
561 struct __rt6_probe_work *work;
562 struct neighbour *neigh;
563 /*
564 * Okay, this does not seem to be appropriate
565 * for now, however, we need to check if it
566 * is really so; aka Router Reachability Probing.
567 *
568 * Router Reachability Probe MUST be rate-limited
569 * to no more than one per minute.
570 */
571 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
572 return;
573 rcu_read_lock_bh();
574 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
575 if (neigh) {
576 if (neigh->nud_state & NUD_VALID)
577 goto out;
578
579 work = NULL;
580 write_lock(&neigh->lock);
581 if (!(neigh->nud_state & NUD_VALID) &&
582 time_after(jiffies,
583 neigh->updated +
584 rt->rt6i_idev->cnf.rtr_probe_interval)) {
585 work = kmalloc(sizeof(*work), GFP_ATOMIC);
586 if (work)
587 __neigh_set_probe_once(neigh);
588 }
589 write_unlock(&neigh->lock);
590 } else {
591 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 }
593
594 if (work) {
595 INIT_WORK(&work->work, rt6_probe_deferred);
596 work->target = rt->rt6i_gateway;
597 dev_hold(rt->dst.dev);
598 work->dev = rt->dst.dev;
599 schedule_work(&work->work);
600 }
601
602 out:
603 rcu_read_unlock_bh();
604 }
605 #else
606 static inline void rt6_probe(struct rt6_info *rt)
607 {
608 }
609 #endif
610
611 /*
612 * Default Router Selection (RFC 2461 6.3.6)
613 */
614 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
615 {
616 struct net_device *dev = rt->dst.dev;
617 if (!oif || dev->ifindex == oif)
618 return 2;
619 if ((dev->flags & IFF_LOOPBACK) &&
620 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
621 return 1;
622 return 0;
623 }
624
625 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
626 {
627 struct neighbour *neigh;
628 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
629
630 if (rt->rt6i_flags & RTF_NONEXTHOP ||
631 !(rt->rt6i_flags & RTF_GATEWAY))
632 return RT6_NUD_SUCCEED;
633
634 rcu_read_lock_bh();
635 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
636 if (neigh) {
637 read_lock(&neigh->lock);
638 if (neigh->nud_state & NUD_VALID)
639 ret = RT6_NUD_SUCCEED;
640 #ifdef CONFIG_IPV6_ROUTER_PREF
641 else if (!(neigh->nud_state & NUD_FAILED))
642 ret = RT6_NUD_SUCCEED;
643 else
644 ret = RT6_NUD_FAIL_PROBE;
645 #endif
646 read_unlock(&neigh->lock);
647 } else {
648 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
649 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
650 }
651 rcu_read_unlock_bh();
652
653 return ret;
654 }
655
656 static int rt6_score_route(struct rt6_info *rt, int oif,
657 int strict)
658 {
659 int m;
660
661 m = rt6_check_dev(rt, oif);
662 if (!m && (strict & RT6_LOOKUP_F_IFACE))
663 return RT6_NUD_FAIL_HARD;
664 #ifdef CONFIG_IPV6_ROUTER_PREF
665 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
666 #endif
667 if (strict & RT6_LOOKUP_F_REACHABLE) {
668 int n = rt6_check_neigh(rt);
669 if (n < 0)
670 return n;
671 }
672 return m;
673 }
674
675 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
676 int *mpri, struct rt6_info *match,
677 bool *do_rr)
678 {
679 int m;
680 bool match_do_rr = false;
681 struct inet6_dev *idev = rt->rt6i_idev;
682 struct net_device *dev = rt->dst.dev;
683
684 if (dev && !netif_carrier_ok(dev) &&
685 idev->cnf.ignore_routes_with_linkdown &&
686 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
687 goto out;
688
689 if (rt6_check_expired(rt))
690 goto out;
691
692 m = rt6_score_route(rt, oif, strict);
693 if (m == RT6_NUD_FAIL_DO_RR) {
694 match_do_rr = true;
695 m = 0; /* lowest valid score */
696 } else if (m == RT6_NUD_FAIL_HARD) {
697 goto out;
698 }
699
700 if (strict & RT6_LOOKUP_F_REACHABLE)
701 rt6_probe(rt);
702
703 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
704 if (m > *mpri) {
705 *do_rr = match_do_rr;
706 *mpri = m;
707 match = rt;
708 }
709 out:
710 return match;
711 }
712
713 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
714 struct rt6_info *leaf,
715 struct rt6_info *rr_head,
716 u32 metric, int oif, int strict,
717 bool *do_rr)
718 {
719 struct rt6_info *rt, *match, *cont;
720 int mpri = -1;
721
722 match = NULL;
723 cont = NULL;
724 for (rt = rr_head; rt; rt = rcu_dereference(rt->dst.rt6_next)) {
725 if (rt->rt6i_metric != metric) {
726 cont = rt;
727 break;
728 }
729
730 match = find_match(rt, oif, strict, &mpri, match, do_rr);
731 }
732
733 for (rt = leaf; rt && rt != rr_head;
734 rt = rcu_dereference(rt->dst.rt6_next)) {
735 if (rt->rt6i_metric != metric) {
736 cont = rt;
737 break;
738 }
739
740 match = find_match(rt, oif, strict, &mpri, match, do_rr);
741 }
742
743 if (match || !cont)
744 return match;
745
746 for (rt = cont; rt; rt = rcu_dereference(rt->dst.rt6_next))
747 match = find_match(rt, oif, strict, &mpri, match, do_rr);
748
749 return match;
750 }
751
752 static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
753 int oif, int strict)
754 {
755 struct rt6_info *leaf = rcu_dereference(fn->leaf);
756 struct rt6_info *match, *rt0;
757 bool do_rr = false;
758 int key_plen;
759
760 if (!leaf || leaf == net->ipv6.ip6_null_entry)
761 return net->ipv6.ip6_null_entry;
762
763 rt0 = rcu_dereference(fn->rr_ptr);
764 if (!rt0)
765 rt0 = leaf;
766
767 /* Double check to make sure fn is not an intermediate node
768 * and fn->leaf does not points to its child's leaf
769 * (This might happen if all routes under fn are deleted from
770 * the tree and fib6_repair_tree() is called on the node.)
771 */
772 key_plen = rt0->rt6i_dst.plen;
773 #ifdef CONFIG_IPV6_SUBTREES
774 if (rt0->rt6i_src.plen)
775 key_plen = rt0->rt6i_src.plen;
776 #endif
777 if (fn->fn_bit != key_plen)
778 return net->ipv6.ip6_null_entry;
779
780 match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict,
781 &do_rr);
782
783 if (do_rr) {
784 struct rt6_info *next = rcu_dereference(rt0->dst.rt6_next);
785
786 /* no entries matched; do round-robin */
787 if (!next || next->rt6i_metric != rt0->rt6i_metric)
788 next = leaf;
789
790 if (next != rt0) {
791 spin_lock_bh(&leaf->rt6i_table->tb6_lock);
792 /* make sure next is not being deleted from the tree */
793 if (next->rt6i_node)
794 rcu_assign_pointer(fn->rr_ptr, next);
795 spin_unlock_bh(&leaf->rt6i_table->tb6_lock);
796 }
797 }
798
799 return match ? match : net->ipv6.ip6_null_entry;
800 }
801
802 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
803 {
804 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
805 }
806
807 #ifdef CONFIG_IPV6_ROUTE_INFO
808 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
809 const struct in6_addr *gwaddr)
810 {
811 struct net *net = dev_net(dev);
812 struct route_info *rinfo = (struct route_info *) opt;
813 struct in6_addr prefix_buf, *prefix;
814 unsigned int pref;
815 unsigned long lifetime;
816 struct rt6_info *rt;
817
818 if (len < sizeof(struct route_info)) {
819 return -EINVAL;
820 }
821
822 /* Sanity check for prefix_len and length */
823 if (rinfo->length > 3) {
824 return -EINVAL;
825 } else if (rinfo->prefix_len > 128) {
826 return -EINVAL;
827 } else if (rinfo->prefix_len > 64) {
828 if (rinfo->length < 2) {
829 return -EINVAL;
830 }
831 } else if (rinfo->prefix_len > 0) {
832 if (rinfo->length < 1) {
833 return -EINVAL;
834 }
835 }
836
837 pref = rinfo->route_pref;
838 if (pref == ICMPV6_ROUTER_PREF_INVALID)
839 return -EINVAL;
840
841 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
842
843 if (rinfo->length == 3)
844 prefix = (struct in6_addr *)rinfo->prefix;
845 else {
846 /* this function is safe */
847 ipv6_addr_prefix(&prefix_buf,
848 (struct in6_addr *)rinfo->prefix,
849 rinfo->prefix_len);
850 prefix = &prefix_buf;
851 }
852
853 if (rinfo->prefix_len == 0)
854 rt = rt6_get_dflt_router(gwaddr, dev);
855 else
856 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
857 gwaddr, dev);
858
859 if (rt && !lifetime) {
860 ip6_del_rt(rt);
861 rt = NULL;
862 }
863
864 if (!rt && lifetime)
865 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
866 dev, pref);
867 else if (rt)
868 rt->rt6i_flags = RTF_ROUTEINFO |
869 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
870
871 if (rt) {
872 if (!addrconf_finite_timeout(lifetime))
873 rt6_clean_expires(rt);
874 else
875 rt6_set_expires(rt, jiffies + HZ * lifetime);
876
877 ip6_rt_put(rt);
878 }
879 return 0;
880 }
881 #endif
882
883 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
884 struct in6_addr *saddr)
885 {
886 struct fib6_node *pn, *sn;
887 while (1) {
888 if (fn->fn_flags & RTN_TL_ROOT)
889 return NULL;
890 pn = rcu_dereference(fn->parent);
891 sn = FIB6_SUBTREE(pn);
892 if (sn && sn != fn)
893 fn = fib6_lookup(sn, NULL, saddr);
894 else
895 fn = pn;
896 if (fn->fn_flags & RTN_RTINFO)
897 return fn;
898 }
899 }
900
901 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
902 bool null_fallback)
903 {
904 struct rt6_info *rt = *prt;
905
906 if (dst_hold_safe(&rt->dst))
907 return true;
908 if (null_fallback) {
909 rt = net->ipv6.ip6_null_entry;
910 dst_hold(&rt->dst);
911 } else {
912 rt = NULL;
913 }
914 *prt = rt;
915 return false;
916 }
917
918 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
919 struct fib6_table *table,
920 struct flowi6 *fl6, int flags)
921 {
922 struct rt6_info *rt, *rt_cache;
923 struct fib6_node *fn;
924
925 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
926 flags &= ~RT6_LOOKUP_F_IFACE;
927
928 rcu_read_lock();
929 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
930 restart:
931 rt = rcu_dereference(fn->leaf);
932 if (!rt) {
933 rt = net->ipv6.ip6_null_entry;
934 } else {
935 rt = rt6_device_match(net, rt, &fl6->saddr,
936 fl6->flowi6_oif, flags);
937 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
938 rt = rt6_multipath_select(rt, fl6,
939 fl6->flowi6_oif, flags);
940 }
941 if (rt == net->ipv6.ip6_null_entry) {
942 fn = fib6_backtrack(fn, &fl6->saddr);
943 if (fn)
944 goto restart;
945 }
946 /* Search through exception table */
947 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
948 if (rt_cache)
949 rt = rt_cache;
950
951 if (ip6_hold_safe(net, &rt, true))
952 dst_use_noref(&rt->dst, jiffies);
953
954 rcu_read_unlock();
955
956 trace_fib6_table_lookup(net, rt, table, fl6);
957
958 return rt;
959
960 }
961
962 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
963 int flags)
964 {
965 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
966 }
967 EXPORT_SYMBOL_GPL(ip6_route_lookup);
968
969 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
970 const struct in6_addr *saddr, int oif, int strict)
971 {
972 struct flowi6 fl6 = {
973 .flowi6_oif = oif,
974 .daddr = *daddr,
975 };
976 struct dst_entry *dst;
977 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
978
979 if (saddr) {
980 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
981 flags |= RT6_LOOKUP_F_HAS_SADDR;
982 }
983
984 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
985 if (dst->error == 0)
986 return (struct rt6_info *) dst;
987
988 dst_release(dst);
989
990 return NULL;
991 }
992 EXPORT_SYMBOL(rt6_lookup);
993
994 /* ip6_ins_rt is called with FREE table->tb6_lock.
995 * It takes new route entry, the addition fails by any reason the
996 * route is released.
997 * Caller must hold dst before calling it.
998 */
999
1000 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
1001 struct mx6_config *mxc,
1002 struct netlink_ext_ack *extack)
1003 {
1004 int err;
1005 struct fib6_table *table;
1006
1007 table = rt->rt6i_table;
1008 spin_lock_bh(&table->tb6_lock);
1009 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
1010 spin_unlock_bh(&table->tb6_lock);
1011
1012 return err;
1013 }
1014
1015 int ip6_ins_rt(struct rt6_info *rt)
1016 {
1017 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
1018 struct mx6_config mxc = { .mx = NULL, };
1019
1020 /* Hold dst to account for the reference from the fib6 tree */
1021 dst_hold(&rt->dst);
1022 return __ip6_ins_rt(rt, &info, &mxc, NULL);
1023 }
1024
1025 /* called with rcu_lock held */
1026 static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
1027 {
1028 struct net_device *dev = rt->dst.dev;
1029
1030 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1031 /* for copies of local routes, dst->dev needs to be the
1032 * device if it is a master device, the master device if
1033 * device is enslaved, and the loopback as the default
1034 */
1035 if (netif_is_l3_slave(dev) &&
1036 !rt6_need_strict(&rt->rt6i_dst.addr))
1037 dev = l3mdev_master_dev_rcu(dev);
1038 else if (!netif_is_l3_master(dev))
1039 dev = dev_net(dev)->loopback_dev;
1040 /* last case is netif_is_l3_master(dev) is true in which
1041 * case we want dev returned to be dev
1042 */
1043 }
1044
1045 return dev;
1046 }
1047
1048 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
1049 const struct in6_addr *daddr,
1050 const struct in6_addr *saddr)
1051 {
1052 struct net_device *dev;
1053 struct rt6_info *rt;
1054
1055 /*
1056 * Clone the route.
1057 */
1058
1059 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1060 ort = (struct rt6_info *)ort->dst.from;
1061
1062 rcu_read_lock();
1063 dev = ip6_rt_get_dev_rcu(ort);
1064 rt = __ip6_dst_alloc(dev_net(dev), dev, 0);
1065 rcu_read_unlock();
1066 if (!rt)
1067 return NULL;
1068
1069 ip6_rt_copy_init(rt, ort);
1070 rt->rt6i_flags |= RTF_CACHE;
1071 rt->rt6i_metric = 0;
1072 rt->dst.flags |= DST_HOST;
1073 rt->rt6i_dst.addr = *daddr;
1074 rt->rt6i_dst.plen = 128;
1075
1076 if (!rt6_is_gw_or_nonexthop(ort)) {
1077 if (ort->rt6i_dst.plen != 128 &&
1078 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
1079 rt->rt6i_flags |= RTF_ANYCAST;
1080 #ifdef CONFIG_IPV6_SUBTREES
1081 if (rt->rt6i_src.plen && saddr) {
1082 rt->rt6i_src.addr = *saddr;
1083 rt->rt6i_src.plen = 128;
1084 }
1085 #endif
1086 }
1087
1088 return rt;
1089 }
1090
1091 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1092 {
1093 struct net_device *dev;
1094 struct rt6_info *pcpu_rt;
1095
1096 rcu_read_lock();
1097 dev = ip6_rt_get_dev_rcu(rt);
1098 pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags);
1099 rcu_read_unlock();
1100 if (!pcpu_rt)
1101 return NULL;
1102 ip6_rt_copy_init(pcpu_rt, rt);
1103 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1104 pcpu_rt->rt6i_flags |= RTF_PCPU;
1105 return pcpu_rt;
1106 }
1107
1108 /* It should be called with rcu_read_lock() acquired */
1109 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1110 {
1111 struct rt6_info *pcpu_rt, **p;
1112
1113 p = this_cpu_ptr(rt->rt6i_pcpu);
1114 pcpu_rt = *p;
1115
1116 if (pcpu_rt && ip6_hold_safe(NULL, &pcpu_rt, false))
1117 rt6_dst_from_metrics_check(pcpu_rt);
1118
1119 return pcpu_rt;
1120 }
1121
1122 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1123 {
1124 struct rt6_info *pcpu_rt, *prev, **p;
1125
1126 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1127 if (!pcpu_rt) {
1128 struct net *net = dev_net(rt->dst.dev);
1129
1130 dst_hold(&net->ipv6.ip6_null_entry->dst);
1131 return net->ipv6.ip6_null_entry;
1132 }
1133
1134 dst_hold(&pcpu_rt->dst);
1135 p = this_cpu_ptr(rt->rt6i_pcpu);
1136 prev = cmpxchg(p, NULL, pcpu_rt);
1137 BUG_ON(prev);
1138
1139 rt6_dst_from_metrics_check(pcpu_rt);
1140 return pcpu_rt;
1141 }
1142
1143 /* exception hash table implementation
1144 */
1145 static DEFINE_SPINLOCK(rt6_exception_lock);
1146
1147 /* Remove rt6_ex from hash table and free the memory
1148 * Caller must hold rt6_exception_lock
1149 */
1150 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1151 struct rt6_exception *rt6_ex)
1152 {
1153 struct net *net;
1154
1155 if (!bucket || !rt6_ex)
1156 return;
1157
1158 net = dev_net(rt6_ex->rt6i->dst.dev);
1159 rt6_ex->rt6i->rt6i_node = NULL;
1160 hlist_del_rcu(&rt6_ex->hlist);
1161 rt6_release(rt6_ex->rt6i);
1162 kfree_rcu(rt6_ex, rcu);
1163 WARN_ON_ONCE(!bucket->depth);
1164 bucket->depth--;
1165 net->ipv6.rt6_stats->fib_rt_cache--;
1166 }
1167
1168 /* Remove oldest rt6_ex in bucket and free the memory
1169 * Caller must hold rt6_exception_lock
1170 */
1171 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1172 {
1173 struct rt6_exception *rt6_ex, *oldest = NULL;
1174
1175 if (!bucket)
1176 return;
1177
1178 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1179 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1180 oldest = rt6_ex;
1181 }
1182 rt6_remove_exception(bucket, oldest);
1183 }
1184
1185 static u32 rt6_exception_hash(const struct in6_addr *dst,
1186 const struct in6_addr *src)
1187 {
1188 static u32 seed __read_mostly;
1189 u32 val;
1190
1191 net_get_random_once(&seed, sizeof(seed));
1192 val = jhash(dst, sizeof(*dst), seed);
1193
1194 #ifdef CONFIG_IPV6_SUBTREES
1195 if (src)
1196 val = jhash(src, sizeof(*src), val);
1197 #endif
1198 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1199 }
1200
1201 /* Helper function to find the cached rt in the hash table
1202 * and update bucket pointer to point to the bucket for this
1203 * (daddr, saddr) pair
1204 * Caller must hold rt6_exception_lock
1205 */
1206 static struct rt6_exception *
1207 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1208 const struct in6_addr *daddr,
1209 const struct in6_addr *saddr)
1210 {
1211 struct rt6_exception *rt6_ex;
1212 u32 hval;
1213
1214 if (!(*bucket) || !daddr)
1215 return NULL;
1216
1217 hval = rt6_exception_hash(daddr, saddr);
1218 *bucket += hval;
1219
1220 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1221 struct rt6_info *rt6 = rt6_ex->rt6i;
1222 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1223
1224 #ifdef CONFIG_IPV6_SUBTREES
1225 if (matched && saddr)
1226 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1227 #endif
1228 if (matched)
1229 return rt6_ex;
1230 }
1231 return NULL;
1232 }
1233
1234 /* Helper function to find the cached rt in the hash table
1235 * and update bucket pointer to point to the bucket for this
1236 * (daddr, saddr) pair
1237 * Caller must hold rcu_read_lock()
1238 */
1239 static struct rt6_exception *
1240 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1241 const struct in6_addr *daddr,
1242 const struct in6_addr *saddr)
1243 {
1244 struct rt6_exception *rt6_ex;
1245 u32 hval;
1246
1247 WARN_ON_ONCE(!rcu_read_lock_held());
1248
1249 if (!(*bucket) || !daddr)
1250 return NULL;
1251
1252 hval = rt6_exception_hash(daddr, saddr);
1253 *bucket += hval;
1254
1255 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1256 struct rt6_info *rt6 = rt6_ex->rt6i;
1257 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1258
1259 #ifdef CONFIG_IPV6_SUBTREES
1260 if (matched && saddr)
1261 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1262 #endif
1263 if (matched)
1264 return rt6_ex;
1265 }
1266 return NULL;
1267 }
1268
1269 static int rt6_insert_exception(struct rt6_info *nrt,
1270 struct rt6_info *ort)
1271 {
1272 struct net *net = dev_net(ort->dst.dev);
1273 struct rt6_exception_bucket *bucket;
1274 struct in6_addr *src_key = NULL;
1275 struct rt6_exception *rt6_ex;
1276 int err = 0;
1277
1278 /* ort can't be a cache or pcpu route */
1279 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1280 ort = (struct rt6_info *)ort->dst.from;
1281 WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
1282
1283 spin_lock_bh(&rt6_exception_lock);
1284
1285 if (ort->exception_bucket_flushed) {
1286 err = -EINVAL;
1287 goto out;
1288 }
1289
1290 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1291 lockdep_is_held(&rt6_exception_lock));
1292 if (!bucket) {
1293 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1294 GFP_ATOMIC);
1295 if (!bucket) {
1296 err = -ENOMEM;
1297 goto out;
1298 }
1299 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1300 }
1301
1302 #ifdef CONFIG_IPV6_SUBTREES
1303 /* rt6i_src.plen != 0 indicates ort is in subtree
1304 * and exception table is indexed by a hash of
1305 * both rt6i_dst and rt6i_src.
1306 * Otherwise, the exception table is indexed by
1307 * a hash of only rt6i_dst.
1308 */
1309 if (ort->rt6i_src.plen)
1310 src_key = &nrt->rt6i_src.addr;
1311 #endif
1312
1313 /* Update rt6i_prefsrc as it could be changed
1314 * in rt6_remove_prefsrc()
1315 */
1316 nrt->rt6i_prefsrc = ort->rt6i_prefsrc;
1317 /* rt6_mtu_change() might lower mtu on ort.
1318 * Only insert this exception route if its mtu
1319 * is less than ort's mtu value.
1320 */
1321 if (nrt->rt6i_pmtu >= dst_mtu(&ort->dst)) {
1322 err = -EINVAL;
1323 goto out;
1324 }
1325
1326 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1327 src_key);
1328 if (rt6_ex)
1329 rt6_remove_exception(bucket, rt6_ex);
1330
1331 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1332 if (!rt6_ex) {
1333 err = -ENOMEM;
1334 goto out;
1335 }
1336 rt6_ex->rt6i = nrt;
1337 rt6_ex->stamp = jiffies;
1338 atomic_inc(&nrt->rt6i_ref);
1339 nrt->rt6i_node = ort->rt6i_node;
1340 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1341 bucket->depth++;
1342 net->ipv6.rt6_stats->fib_rt_cache++;
1343
1344 if (bucket->depth > FIB6_MAX_DEPTH)
1345 rt6_exception_remove_oldest(bucket);
1346
1347 out:
1348 spin_unlock_bh(&rt6_exception_lock);
1349
1350 /* Update fn->fn_sernum to invalidate all cached dst */
1351 if (!err) {
1352 fib6_update_sernum(ort);
1353 fib6_force_start_gc(net);
1354 }
1355
1356 return err;
1357 }
1358
1359 void rt6_flush_exceptions(struct rt6_info *rt)
1360 {
1361 struct rt6_exception_bucket *bucket;
1362 struct rt6_exception *rt6_ex;
1363 struct hlist_node *tmp;
1364 int i;
1365
1366 spin_lock_bh(&rt6_exception_lock);
1367 /* Prevent rt6_insert_exception() to recreate the bucket list */
1368 rt->exception_bucket_flushed = 1;
1369
1370 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1371 lockdep_is_held(&rt6_exception_lock));
1372 if (!bucket)
1373 goto out;
1374
1375 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1376 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1377 rt6_remove_exception(bucket, rt6_ex);
1378 WARN_ON_ONCE(bucket->depth);
1379 bucket++;
1380 }
1381
1382 out:
1383 spin_unlock_bh(&rt6_exception_lock);
1384 }
1385
1386 /* Find cached rt in the hash table inside passed in rt
1387 * Caller has to hold rcu_read_lock()
1388 */
1389 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
1390 struct in6_addr *daddr,
1391 struct in6_addr *saddr)
1392 {
1393 struct rt6_exception_bucket *bucket;
1394 struct in6_addr *src_key = NULL;
1395 struct rt6_exception *rt6_ex;
1396 struct rt6_info *res = NULL;
1397
1398 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1399
1400 #ifdef CONFIG_IPV6_SUBTREES
1401 /* rt6i_src.plen != 0 indicates rt is in subtree
1402 * and exception table is indexed by a hash of
1403 * both rt6i_dst and rt6i_src.
1404 * Otherwise, the exception table is indexed by
1405 * a hash of only rt6i_dst.
1406 */
1407 if (rt->rt6i_src.plen)
1408 src_key = saddr;
1409 #endif
1410 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1411
1412 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1413 res = rt6_ex->rt6i;
1414
1415 return res;
1416 }
1417
1418 /* Remove the passed in cached rt from the hash table that contains it */
1419 int rt6_remove_exception_rt(struct rt6_info *rt)
1420 {
1421 struct rt6_info *from = (struct rt6_info *)rt->dst.from;
1422 struct rt6_exception_bucket *bucket;
1423 struct in6_addr *src_key = NULL;
1424 struct rt6_exception *rt6_ex;
1425 int err;
1426
1427 if (!from ||
1428 !(rt->rt6i_flags & RTF_CACHE))
1429 return -EINVAL;
1430
1431 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1432 return -ENOENT;
1433
1434 spin_lock_bh(&rt6_exception_lock);
1435 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1436 lockdep_is_held(&rt6_exception_lock));
1437 #ifdef CONFIG_IPV6_SUBTREES
1438 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1439 * and exception table is indexed by a hash of
1440 * both rt6i_dst and rt6i_src.
1441 * Otherwise, the exception table is indexed by
1442 * a hash of only rt6i_dst.
1443 */
1444 if (from->rt6i_src.plen)
1445 src_key = &rt->rt6i_src.addr;
1446 #endif
1447 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1448 &rt->rt6i_dst.addr,
1449 src_key);
1450 if (rt6_ex) {
1451 rt6_remove_exception(bucket, rt6_ex);
1452 err = 0;
1453 } else {
1454 err = -ENOENT;
1455 }
1456
1457 spin_unlock_bh(&rt6_exception_lock);
1458 return err;
1459 }
1460
1461 /* Find rt6_ex which contains the passed in rt cache and
1462 * refresh its stamp
1463 */
1464 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1465 {
1466 struct rt6_info *from = (struct rt6_info *)rt->dst.from;
1467 struct rt6_exception_bucket *bucket;
1468 struct in6_addr *src_key = NULL;
1469 struct rt6_exception *rt6_ex;
1470
1471 if (!from ||
1472 !(rt->rt6i_flags & RTF_CACHE))
1473 return;
1474
1475 rcu_read_lock();
1476 bucket = rcu_dereference(from->rt6i_exception_bucket);
1477
1478 #ifdef CONFIG_IPV6_SUBTREES
1479 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1480 * and exception table is indexed by a hash of
1481 * both rt6i_dst and rt6i_src.
1482 * Otherwise, the exception table is indexed by
1483 * a hash of only rt6i_dst.
1484 */
1485 if (from->rt6i_src.plen)
1486 src_key = &rt->rt6i_src.addr;
1487 #endif
1488 rt6_ex = __rt6_find_exception_rcu(&bucket,
1489 &rt->rt6i_dst.addr,
1490 src_key);
1491 if (rt6_ex)
1492 rt6_ex->stamp = jiffies;
1493
1494 rcu_read_unlock();
1495 }
1496
1497 static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1498 {
1499 struct rt6_exception_bucket *bucket;
1500 struct rt6_exception *rt6_ex;
1501 int i;
1502
1503 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1504 lockdep_is_held(&rt6_exception_lock));
1505
1506 if (bucket) {
1507 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1508 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1509 rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
1510 }
1511 bucket++;
1512 }
1513 }
1514 }
1515
1516 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1517 struct rt6_info *rt, int mtu)
1518 {
1519 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1520 * lowest MTU in the path: always allow updating the route PMTU to
1521 * reflect PMTU decreases.
1522 *
1523 * If the new MTU is higher, and the route PMTU is equal to the local
1524 * MTU, this means the old MTU is the lowest in the path, so allow
1525 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1526 * handle this.
1527 */
1528
1529 if (dst_mtu(&rt->dst) >= mtu)
1530 return true;
1531
1532 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1533 return true;
1534
1535 return false;
1536 }
1537
1538 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1539 struct rt6_info *rt, int mtu)
1540 {
1541 struct rt6_exception_bucket *bucket;
1542 struct rt6_exception *rt6_ex;
1543 int i;
1544
1545 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1546 lockdep_is_held(&rt6_exception_lock));
1547
1548 if (!bucket)
1549 return;
1550
1551 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1552 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1553 struct rt6_info *entry = rt6_ex->rt6i;
1554
1555 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1556 * route), the metrics of its rt->dst.from have already
1557 * been updated.
1558 */
1559 if (entry->rt6i_pmtu &&
1560 rt6_mtu_change_route_allowed(idev, entry, mtu))
1561 entry->rt6i_pmtu = mtu;
1562 }
1563 bucket++;
1564 }
1565 }
1566
1567 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1568
1569 static void rt6_exceptions_clean_tohost(struct rt6_info *rt,
1570 struct in6_addr *gateway)
1571 {
1572 struct rt6_exception_bucket *bucket;
1573 struct rt6_exception *rt6_ex;
1574 struct hlist_node *tmp;
1575 int i;
1576
1577 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1578 return;
1579
1580 spin_lock_bh(&rt6_exception_lock);
1581 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1582 lockdep_is_held(&rt6_exception_lock));
1583
1584 if (bucket) {
1585 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1586 hlist_for_each_entry_safe(rt6_ex, tmp,
1587 &bucket->chain, hlist) {
1588 struct rt6_info *entry = rt6_ex->rt6i;
1589
1590 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1591 RTF_CACHE_GATEWAY &&
1592 ipv6_addr_equal(gateway,
1593 &entry->rt6i_gateway)) {
1594 rt6_remove_exception(bucket, rt6_ex);
1595 }
1596 }
1597 bucket++;
1598 }
1599 }
1600
1601 spin_unlock_bh(&rt6_exception_lock);
1602 }
1603
1604 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1605 struct rt6_exception *rt6_ex,
1606 struct fib6_gc_args *gc_args,
1607 unsigned long now)
1608 {
1609 struct rt6_info *rt = rt6_ex->rt6i;
1610
1611 /* we are pruning and obsoleting aged-out and non gateway exceptions
1612 * even if others have still references to them, so that on next
1613 * dst_check() such references can be dropped.
1614 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1615 * expired, independently from their aging, as per RFC 8201 section 4
1616 */
1617 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1618 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1619 RT6_TRACE("aging clone %p\n", rt);
1620 rt6_remove_exception(bucket, rt6_ex);
1621 return;
1622 }
1623 } else if (time_after(jiffies, rt->dst.expires)) {
1624 RT6_TRACE("purging expired route %p\n", rt);
1625 rt6_remove_exception(bucket, rt6_ex);
1626 return;
1627 }
1628
1629 if (rt->rt6i_flags & RTF_GATEWAY) {
1630 struct neighbour *neigh;
1631 __u8 neigh_flags = 0;
1632
1633 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1634 if (neigh)
1635 neigh_flags = neigh->flags;
1636
1637 if (!(neigh_flags & NTF_ROUTER)) {
1638 RT6_TRACE("purging route %p via non-router but gateway\n",
1639 rt);
1640 rt6_remove_exception(bucket, rt6_ex);
1641 return;
1642 }
1643 }
1644
1645 gc_args->more++;
1646 }
1647
1648 void rt6_age_exceptions(struct rt6_info *rt,
1649 struct fib6_gc_args *gc_args,
1650 unsigned long now)
1651 {
1652 struct rt6_exception_bucket *bucket;
1653 struct rt6_exception *rt6_ex;
1654 struct hlist_node *tmp;
1655 int i;
1656
1657 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1658 return;
1659
1660 rcu_read_lock_bh();
1661 spin_lock(&rt6_exception_lock);
1662 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1663 lockdep_is_held(&rt6_exception_lock));
1664
1665 if (bucket) {
1666 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1667 hlist_for_each_entry_safe(rt6_ex, tmp,
1668 &bucket->chain, hlist) {
1669 rt6_age_examine_exception(bucket, rt6_ex,
1670 gc_args, now);
1671 }
1672 bucket++;
1673 }
1674 }
1675 spin_unlock(&rt6_exception_lock);
1676 rcu_read_unlock_bh();
1677 }
1678
1679 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1680 int oif, struct flowi6 *fl6, int flags)
1681 {
1682 struct fib6_node *fn, *saved_fn;
1683 struct rt6_info *rt, *rt_cache;
1684 int strict = 0;
1685
1686 strict |= flags & RT6_LOOKUP_F_IFACE;
1687 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1688 if (net->ipv6.devconf_all->forwarding == 0)
1689 strict |= RT6_LOOKUP_F_REACHABLE;
1690
1691 rcu_read_lock();
1692
1693 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1694 saved_fn = fn;
1695
1696 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1697 oif = 0;
1698
1699 redo_rt6_select:
1700 rt = rt6_select(net, fn, oif, strict);
1701 if (rt->rt6i_nsiblings)
1702 rt = rt6_multipath_select(rt, fl6, oif, strict);
1703 if (rt == net->ipv6.ip6_null_entry) {
1704 fn = fib6_backtrack(fn, &fl6->saddr);
1705 if (fn)
1706 goto redo_rt6_select;
1707 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1708 /* also consider unreachable route */
1709 strict &= ~RT6_LOOKUP_F_REACHABLE;
1710 fn = saved_fn;
1711 goto redo_rt6_select;
1712 }
1713 }
1714
1715 /*Search through exception table */
1716 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
1717 if (rt_cache)
1718 rt = rt_cache;
1719
1720 if (rt == net->ipv6.ip6_null_entry) {
1721 rcu_read_unlock();
1722 dst_hold(&rt->dst);
1723 trace_fib6_table_lookup(net, rt, table, fl6);
1724 return rt;
1725 } else if (rt->rt6i_flags & RTF_CACHE) {
1726 if (ip6_hold_safe(net, &rt, true)) {
1727 dst_use_noref(&rt->dst, jiffies);
1728 rt6_dst_from_metrics_check(rt);
1729 }
1730 rcu_read_unlock();
1731 trace_fib6_table_lookup(net, rt, table, fl6);
1732 return rt;
1733 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1734 !(rt->rt6i_flags & RTF_GATEWAY))) {
1735 /* Create a RTF_CACHE clone which will not be
1736 * owned by the fib6 tree. It is for the special case where
1737 * the daddr in the skb during the neighbor look-up is different
1738 * from the fl6->daddr used to look-up route here.
1739 */
1740
1741 struct rt6_info *uncached_rt;
1742
1743 if (ip6_hold_safe(net, &rt, true)) {
1744 dst_use_noref(&rt->dst, jiffies);
1745 } else {
1746 rcu_read_unlock();
1747 uncached_rt = rt;
1748 goto uncached_rt_out;
1749 }
1750 rcu_read_unlock();
1751
1752 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1753 dst_release(&rt->dst);
1754
1755 if (uncached_rt) {
1756 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1757 * No need for another dst_hold()
1758 */
1759 rt6_uncached_list_add(uncached_rt);
1760 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1761 } else {
1762 uncached_rt = net->ipv6.ip6_null_entry;
1763 dst_hold(&uncached_rt->dst);
1764 }
1765
1766 uncached_rt_out:
1767 trace_fib6_table_lookup(net, uncached_rt, table, fl6);
1768 return uncached_rt;
1769
1770 } else {
1771 /* Get a percpu copy */
1772
1773 struct rt6_info *pcpu_rt;
1774
1775 dst_use_noref(&rt->dst, jiffies);
1776 local_bh_disable();
1777 pcpu_rt = rt6_get_pcpu_route(rt);
1778
1779 if (!pcpu_rt) {
1780 /* atomic_inc_not_zero() is needed when using rcu */
1781 if (atomic_inc_not_zero(&rt->rt6i_ref)) {
1782 /* No dst_hold() on rt is needed because grabbing
1783 * rt->rt6i_ref makes sure rt can't be released.
1784 */
1785 pcpu_rt = rt6_make_pcpu_route(rt);
1786 rt6_release(rt);
1787 } else {
1788 /* rt is already removed from tree */
1789 pcpu_rt = net->ipv6.ip6_null_entry;
1790 dst_hold(&pcpu_rt->dst);
1791 }
1792 }
1793 local_bh_enable();
1794 rcu_read_unlock();
1795 trace_fib6_table_lookup(net, pcpu_rt, table, fl6);
1796 return pcpu_rt;
1797 }
1798 }
1799 EXPORT_SYMBOL_GPL(ip6_pol_route);
1800
1801 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1802 struct flowi6 *fl6, int flags)
1803 {
1804 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1805 }
1806
1807 struct dst_entry *ip6_route_input_lookup(struct net *net,
1808 struct net_device *dev,
1809 struct flowi6 *fl6, int flags)
1810 {
1811 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1812 flags |= RT6_LOOKUP_F_IFACE;
1813
1814 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1815 }
1816 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1817
1818 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1819 struct flow_keys *keys)
1820 {
1821 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1822 const struct ipv6hdr *key_iph = outer_iph;
1823 const struct ipv6hdr *inner_iph;
1824 const struct icmp6hdr *icmph;
1825 struct ipv6hdr _inner_iph;
1826 struct icmp6hdr _icmph;
1827
1828 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1829 goto out;
1830
1831 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
1832 sizeof(_icmph), &_icmph);
1833 if (!icmph)
1834 goto out;
1835
1836 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1837 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1838 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1839 icmph->icmp6_type != ICMPV6_PARAMPROB)
1840 goto out;
1841
1842 inner_iph = skb_header_pointer(skb,
1843 skb_transport_offset(skb) + sizeof(*icmph),
1844 sizeof(_inner_iph), &_inner_iph);
1845 if (!inner_iph)
1846 goto out;
1847
1848 key_iph = inner_iph;
1849 out:
1850 memset(keys, 0, sizeof(*keys));
1851 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1852 keys->addrs.v6addrs.src = key_iph->saddr;
1853 keys->addrs.v6addrs.dst = key_iph->daddr;
1854 keys->tags.flow_label = ip6_flowlabel(key_iph);
1855 keys->basic.ip_proto = key_iph->nexthdr;
1856 }
1857
1858 /* if skb is set it will be used and fl6 can be NULL */
1859 u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb)
1860 {
1861 struct flow_keys hash_keys;
1862
1863 if (skb) {
1864 ip6_multipath_l3_keys(skb, &hash_keys);
1865 return flow_hash_from_keys(&hash_keys);
1866 }
1867
1868 return get_hash_from_flowi6(fl6);
1869 }
1870
1871 void ip6_route_input(struct sk_buff *skb)
1872 {
1873 const struct ipv6hdr *iph = ipv6_hdr(skb);
1874 struct net *net = dev_net(skb->dev);
1875 int flags = RT6_LOOKUP_F_HAS_SADDR;
1876 struct ip_tunnel_info *tun_info;
1877 struct flowi6 fl6 = {
1878 .flowi6_iif = skb->dev->ifindex,
1879 .daddr = iph->daddr,
1880 .saddr = iph->saddr,
1881 .flowlabel = ip6_flowinfo(iph),
1882 .flowi6_mark = skb->mark,
1883 .flowi6_proto = iph->nexthdr,
1884 };
1885
1886 tun_info = skb_tunnel_info(skb);
1887 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1888 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1889 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
1890 fl6.mp_hash = rt6_multipath_hash(&fl6, skb);
1891 skb_dst_drop(skb);
1892 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1893 }
1894
1895 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1896 struct flowi6 *fl6, int flags)
1897 {
1898 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1899 }
1900
1901 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1902 struct flowi6 *fl6, int flags)
1903 {
1904 bool any_src;
1905
1906 if (rt6_need_strict(&fl6->daddr)) {
1907 struct dst_entry *dst;
1908
1909 dst = l3mdev_link_scope_lookup(net, fl6);
1910 if (dst)
1911 return dst;
1912 }
1913
1914 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1915
1916 any_src = ipv6_addr_any(&fl6->saddr);
1917 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1918 (fl6->flowi6_oif && any_src))
1919 flags |= RT6_LOOKUP_F_IFACE;
1920
1921 if (!any_src)
1922 flags |= RT6_LOOKUP_F_HAS_SADDR;
1923 else if (sk)
1924 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1925
1926 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1927 }
1928 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1929
1930 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1931 {
1932 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1933 struct net_device *loopback_dev = net->loopback_dev;
1934 struct dst_entry *new = NULL;
1935
1936 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1937 DST_OBSOLETE_DEAD, 0);
1938 if (rt) {
1939 rt6_info_init(rt);
1940 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
1941
1942 new = &rt->dst;
1943 new->__use = 1;
1944 new->input = dst_discard;
1945 new->output = dst_discard_out;
1946
1947 dst_copy_metrics(new, &ort->dst);
1948
1949 rt->rt6i_idev = in6_dev_get(loopback_dev);
1950 rt->rt6i_gateway = ort->rt6i_gateway;
1951 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1952 rt->rt6i_metric = 0;
1953
1954 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1955 #ifdef CONFIG_IPV6_SUBTREES
1956 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1957 #endif
1958 }
1959
1960 dst_release(dst_orig);
1961 return new ? new : ERR_PTR(-ENOMEM);
1962 }
1963
1964 /*
1965 * Destination cache support functions
1966 */
1967
1968 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1969 {
1970 if (rt->dst.from &&
1971 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1972 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1973 }
1974
1975 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1976 {
1977 u32 rt_cookie = 0;
1978
1979 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
1980 return NULL;
1981
1982 if (rt6_check_expired(rt))
1983 return NULL;
1984
1985 return &rt->dst;
1986 }
1987
1988 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1989 {
1990 if (!__rt6_check_expired(rt) &&
1991 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1992 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1993 return &rt->dst;
1994 else
1995 return NULL;
1996 }
1997
1998 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1999 {
2000 struct rt6_info *rt;
2001
2002 rt = (struct rt6_info *) dst;
2003
2004 /* All IPV6 dsts are created with ->obsolete set to the value
2005 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2006 * into this function always.
2007 */
2008
2009 rt6_dst_from_metrics_check(rt);
2010
2011 if (rt->rt6i_flags & RTF_PCPU ||
2012 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
2013 return rt6_dst_from_check(rt, cookie);
2014 else
2015 return rt6_check(rt, cookie);
2016 }
2017
2018 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2019 {
2020 struct rt6_info *rt = (struct rt6_info *) dst;
2021
2022 if (rt) {
2023 if (rt->rt6i_flags & RTF_CACHE) {
2024 if (rt6_check_expired(rt)) {
2025 ip6_del_rt(rt);
2026 dst = NULL;
2027 }
2028 } else {
2029 dst_release(dst);
2030 dst = NULL;
2031 }
2032 }
2033 return dst;
2034 }
2035
2036 static void ip6_link_failure(struct sk_buff *skb)
2037 {
2038 struct rt6_info *rt;
2039
2040 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2041
2042 rt = (struct rt6_info *) skb_dst(skb);
2043 if (rt) {
2044 if (rt->rt6i_flags & RTF_CACHE) {
2045 if (dst_hold_safe(&rt->dst))
2046 ip6_del_rt(rt);
2047 } else {
2048 struct fib6_node *fn;
2049
2050 rcu_read_lock();
2051 fn = rcu_dereference(rt->rt6i_node);
2052 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2053 fn->fn_sernum = -1;
2054 rcu_read_unlock();
2055 }
2056 }
2057 }
2058
2059 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2060 {
2061 struct net *net = dev_net(rt->dst.dev);
2062
2063 rt->rt6i_flags |= RTF_MODIFIED;
2064 rt->rt6i_pmtu = mtu;
2065 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2066 }
2067
2068 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2069 {
2070 return !(rt->rt6i_flags & RTF_CACHE) &&
2071 (rt->rt6i_flags & RTF_PCPU ||
2072 rcu_access_pointer(rt->rt6i_node));
2073 }
2074
2075 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2076 const struct ipv6hdr *iph, u32 mtu)
2077 {
2078 const struct in6_addr *daddr, *saddr;
2079 struct rt6_info *rt6 = (struct rt6_info *)dst;
2080
2081 if (rt6->rt6i_flags & RTF_LOCAL)
2082 return;
2083
2084 if (dst_metric_locked(dst, RTAX_MTU))
2085 return;
2086
2087 if (iph) {
2088 daddr = &iph->daddr;
2089 saddr = &iph->saddr;
2090 } else if (sk) {
2091 daddr = &sk->sk_v6_daddr;
2092 saddr = &inet6_sk(sk)->saddr;
2093 } else {
2094 daddr = NULL;
2095 saddr = NULL;
2096 }
2097 dst_confirm_neigh(dst, daddr);
2098 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2099 if (mtu >= dst_mtu(dst))
2100 return;
2101
2102 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2103 rt6_do_update_pmtu(rt6, mtu);
2104 /* update rt6_ex->stamp for cache */
2105 if (rt6->rt6i_flags & RTF_CACHE)
2106 rt6_update_exception_stamp_rt(rt6);
2107 } else if (daddr) {
2108 struct rt6_info *nrt6;
2109
2110 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
2111 if (nrt6) {
2112 rt6_do_update_pmtu(nrt6, mtu);
2113 if (rt6_insert_exception(nrt6, rt6))
2114 dst_release_immediate(&nrt6->dst);
2115 }
2116 }
2117 }
2118
2119 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2120 struct sk_buff *skb, u32 mtu)
2121 {
2122 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2123 }
2124
2125 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2126 int oif, u32 mark, kuid_t uid)
2127 {
2128 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2129 struct dst_entry *dst;
2130 struct flowi6 fl6;
2131
2132 memset(&fl6, 0, sizeof(fl6));
2133 fl6.flowi6_oif = oif;
2134 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
2135 fl6.daddr = iph->daddr;
2136 fl6.saddr = iph->saddr;
2137 fl6.flowlabel = ip6_flowinfo(iph);
2138 fl6.flowi6_uid = uid;
2139
2140 dst = ip6_route_output(net, NULL, &fl6);
2141 if (!dst->error)
2142 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2143 dst_release(dst);
2144 }
2145 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2146
2147 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2148 {
2149 struct dst_entry *dst;
2150
2151 ip6_update_pmtu(skb, sock_net(sk), mtu,
2152 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
2153
2154 dst = __sk_dst_get(sk);
2155 if (!dst || !dst->obsolete ||
2156 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2157 return;
2158
2159 bh_lock_sock(sk);
2160 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2161 ip6_datagram_dst_update(sk, false);
2162 bh_unlock_sock(sk);
2163 }
2164 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2165
2166 /* Handle redirects */
2167 struct ip6rd_flowi {
2168 struct flowi6 fl6;
2169 struct in6_addr gateway;
2170 };
2171
2172 static struct rt6_info *__ip6_route_redirect(struct net *net,
2173 struct fib6_table *table,
2174 struct flowi6 *fl6,
2175 int flags)
2176 {
2177 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2178 struct rt6_info *rt, *rt_cache;
2179 struct fib6_node *fn;
2180
2181 /* Get the "current" route for this destination and
2182 * check if the redirect has come from appropriate router.
2183 *
2184 * RFC 4861 specifies that redirects should only be
2185 * accepted if they come from the nexthop to the target.
2186 * Due to the way the routes are chosen, this notion
2187 * is a bit fuzzy and one might need to check all possible
2188 * routes.
2189 */
2190
2191 rcu_read_lock();
2192 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2193 restart:
2194 for_each_fib6_node_rt_rcu(fn) {
2195 if (rt6_check_expired(rt))
2196 continue;
2197 if (rt->dst.error)
2198 break;
2199 if (!(rt->rt6i_flags & RTF_GATEWAY))
2200 continue;
2201 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
2202 continue;
2203 /* rt_cache's gateway might be different from its 'parent'
2204 * in the case of an ip redirect.
2205 * So we keep searching in the exception table if the gateway
2206 * is different.
2207 */
2208 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) {
2209 rt_cache = rt6_find_cached_rt(rt,
2210 &fl6->daddr,
2211 &fl6->saddr);
2212 if (rt_cache &&
2213 ipv6_addr_equal(&rdfl->gateway,
2214 &rt_cache->rt6i_gateway)) {
2215 rt = rt_cache;
2216 break;
2217 }
2218 continue;
2219 }
2220 break;
2221 }
2222
2223 if (!rt)
2224 rt = net->ipv6.ip6_null_entry;
2225 else if (rt->dst.error) {
2226 rt = net->ipv6.ip6_null_entry;
2227 goto out;
2228 }
2229
2230 if (rt == net->ipv6.ip6_null_entry) {
2231 fn = fib6_backtrack(fn, &fl6->saddr);
2232 if (fn)
2233 goto restart;
2234 }
2235
2236 out:
2237 ip6_hold_safe(net, &rt, true);
2238
2239 rcu_read_unlock();
2240
2241 trace_fib6_table_lookup(net, rt, table, fl6);
2242 return rt;
2243 };
2244
2245 static struct dst_entry *ip6_route_redirect(struct net *net,
2246 const struct flowi6 *fl6,
2247 const struct in6_addr *gateway)
2248 {
2249 int flags = RT6_LOOKUP_F_HAS_SADDR;
2250 struct ip6rd_flowi rdfl;
2251
2252 rdfl.fl6 = *fl6;
2253 rdfl.gateway = *gateway;
2254
2255 return fib6_rule_lookup(net, &rdfl.fl6,
2256 flags, __ip6_route_redirect);
2257 }
2258
2259 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2260 kuid_t uid)
2261 {
2262 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2263 struct dst_entry *dst;
2264 struct flowi6 fl6;
2265
2266 memset(&fl6, 0, sizeof(fl6));
2267 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2268 fl6.flowi6_oif = oif;
2269 fl6.flowi6_mark = mark;
2270 fl6.daddr = iph->daddr;
2271 fl6.saddr = iph->saddr;
2272 fl6.flowlabel = ip6_flowinfo(iph);
2273 fl6.flowi6_uid = uid;
2274
2275 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
2276 rt6_do_redirect(dst, NULL, skb);
2277 dst_release(dst);
2278 }
2279 EXPORT_SYMBOL_GPL(ip6_redirect);
2280
2281 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
2282 u32 mark)
2283 {
2284 const struct ipv6hdr *iph = ipv6_hdr(skb);
2285 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2286 struct dst_entry *dst;
2287 struct flowi6 fl6;
2288
2289 memset(&fl6, 0, sizeof(fl6));
2290 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2291 fl6.flowi6_oif = oif;
2292 fl6.flowi6_mark = mark;
2293 fl6.daddr = msg->dest;
2294 fl6.saddr = iph->daddr;
2295 fl6.flowi6_uid = sock_net_uid(net, NULL);
2296
2297 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
2298 rt6_do_redirect(dst, NULL, skb);
2299 dst_release(dst);
2300 }
2301
2302 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2303 {
2304 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2305 sk->sk_uid);
2306 }
2307 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2308
2309 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2310 {
2311 struct net_device *dev = dst->dev;
2312 unsigned int mtu = dst_mtu(dst);
2313 struct net *net = dev_net(dev);
2314
2315 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2316
2317 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2318 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2319
2320 /*
2321 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2322 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2323 * IPV6_MAXPLEN is also valid and means: "any MSS,
2324 * rely only on pmtu discovery"
2325 */
2326 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2327 mtu = IPV6_MAXPLEN;
2328 return mtu;
2329 }
2330
2331 static unsigned int ip6_mtu(const struct dst_entry *dst)
2332 {
2333 const struct rt6_info *rt = (const struct rt6_info *)dst;
2334 unsigned int mtu = rt->rt6i_pmtu;
2335 struct inet6_dev *idev;
2336
2337 if (mtu)
2338 goto out;
2339
2340 mtu = dst_metric_raw(dst, RTAX_MTU);
2341 if (mtu)
2342 goto out;
2343
2344 mtu = IPV6_MIN_MTU;
2345
2346 rcu_read_lock();
2347 idev = __in6_dev_get(dst->dev);
2348 if (idev)
2349 mtu = idev->cnf.mtu6;
2350 rcu_read_unlock();
2351
2352 out:
2353 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2354
2355 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2356 }
2357
2358 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2359 struct flowi6 *fl6)
2360 {
2361 struct dst_entry *dst;
2362 struct rt6_info *rt;
2363 struct inet6_dev *idev = in6_dev_get(dev);
2364 struct net *net = dev_net(dev);
2365
2366 if (unlikely(!idev))
2367 return ERR_PTR(-ENODEV);
2368
2369 rt = ip6_dst_alloc(net, dev, 0);
2370 if (unlikely(!rt)) {
2371 in6_dev_put(idev);
2372 dst = ERR_PTR(-ENOMEM);
2373 goto out;
2374 }
2375
2376 rt->dst.flags |= DST_HOST;
2377 rt->dst.input = ip6_input;
2378 rt->dst.output = ip6_output;
2379 rt->rt6i_gateway = fl6->daddr;
2380 rt->rt6i_dst.addr = fl6->daddr;
2381 rt->rt6i_dst.plen = 128;
2382 rt->rt6i_idev = idev;
2383 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2384
2385 /* Add this dst into uncached_list so that rt6_ifdown() can
2386 * do proper release of the net_device
2387 */
2388 rt6_uncached_list_add(rt);
2389 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2390
2391 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2392
2393 out:
2394 return dst;
2395 }
2396
2397 static int ip6_dst_gc(struct dst_ops *ops)
2398 {
2399 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2400 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2401 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2402 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2403 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2404 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2405 int entries;
2406
2407 entries = dst_entries_get_fast(ops);
2408 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2409 entries <= rt_max_size)
2410 goto out;
2411
2412 net->ipv6.ip6_rt_gc_expire++;
2413 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2414 entries = dst_entries_get_slow(ops);
2415 if (entries < ops->gc_thresh)
2416 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2417 out:
2418 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2419 return entries > rt_max_size;
2420 }
2421
2422 static int ip6_convert_metrics(struct mx6_config *mxc,
2423 const struct fib6_config *cfg)
2424 {
2425 struct net *net = cfg->fc_nlinfo.nl_net;
2426 bool ecn_ca = false;
2427 struct nlattr *nla;
2428 int remaining;
2429 u32 *mp;
2430
2431 if (!cfg->fc_mx)
2432 return 0;
2433
2434 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
2435 if (unlikely(!mp))
2436 return -ENOMEM;
2437
2438 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
2439 int type = nla_type(nla);
2440 u32 val;
2441
2442 if (!type)
2443 continue;
2444 if (unlikely(type > RTAX_MAX))
2445 goto err;
2446
2447 if (type == RTAX_CC_ALGO) {
2448 char tmp[TCP_CA_NAME_MAX];
2449
2450 nla_strlcpy(tmp, nla, sizeof(tmp));
2451 val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
2452 if (val == TCP_CA_UNSPEC)
2453 goto err;
2454 } else {
2455 val = nla_get_u32(nla);
2456 }
2457 if (type == RTAX_HOPLIMIT && val > 255)
2458 val = 255;
2459 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
2460 goto err;
2461
2462 mp[type - 1] = val;
2463 __set_bit(type - 1, mxc->mx_valid);
2464 }
2465
2466 if (ecn_ca) {
2467 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
2468 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
2469 }
2470
2471 mxc->mx = mp;
2472 return 0;
2473 err:
2474 kfree(mp);
2475 return -EINVAL;
2476 }
2477
2478 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2479 struct fib6_config *cfg,
2480 const struct in6_addr *gw_addr)
2481 {
2482 struct flowi6 fl6 = {
2483 .flowi6_oif = cfg->fc_ifindex,
2484 .daddr = *gw_addr,
2485 .saddr = cfg->fc_prefsrc,
2486 };
2487 struct fib6_table *table;
2488 struct rt6_info *rt;
2489 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
2490
2491 table = fib6_get_table(net, cfg->fc_table);
2492 if (!table)
2493 return NULL;
2494
2495 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2496 flags |= RT6_LOOKUP_F_HAS_SADDR;
2497
2498 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
2499
2500 /* if table lookup failed, fall back to full lookup */
2501 if (rt == net->ipv6.ip6_null_entry) {
2502 ip6_rt_put(rt);
2503 rt = NULL;
2504 }
2505
2506 return rt;
2507 }
2508
2509 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
2510 struct netlink_ext_ack *extack)
2511 {
2512 struct net *net = cfg->fc_nlinfo.nl_net;
2513 struct rt6_info *rt = NULL;
2514 struct net_device *dev = NULL;
2515 struct inet6_dev *idev = NULL;
2516 struct fib6_table *table;
2517 int addr_type;
2518 int err = -EINVAL;
2519
2520 /* RTF_PCPU is an internal flag; can not be set by userspace */
2521 if (cfg->fc_flags & RTF_PCPU) {
2522 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2523 goto out;
2524 }
2525
2526 /* RTF_CACHE is an internal flag; can not be set by userspace */
2527 if (cfg->fc_flags & RTF_CACHE) {
2528 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
2529 goto out;
2530 }
2531
2532 if (cfg->fc_dst_len > 128) {
2533 NL_SET_ERR_MSG(extack, "Invalid prefix length");
2534 goto out;
2535 }
2536 if (cfg->fc_src_len > 128) {
2537 NL_SET_ERR_MSG(extack, "Invalid source address length");
2538 goto out;
2539 }
2540 #ifndef CONFIG_IPV6_SUBTREES
2541 if (cfg->fc_src_len) {
2542 NL_SET_ERR_MSG(extack,
2543 "Specifying source address requires IPV6_SUBTREES to be enabled");
2544 goto out;
2545 }
2546 #endif
2547 if (cfg->fc_ifindex) {
2548 err = -ENODEV;
2549 dev = dev_get_by_index(net, cfg->fc_ifindex);
2550 if (!dev)
2551 goto out;
2552 idev = in6_dev_get(dev);
2553 if (!idev)
2554 goto out;
2555 }
2556
2557 if (cfg->fc_metric == 0)
2558 cfg->fc_metric = IP6_RT_PRIO_USER;
2559
2560 err = -ENOBUFS;
2561 if (cfg->fc_nlinfo.nlh &&
2562 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
2563 table = fib6_get_table(net, cfg->fc_table);
2564 if (!table) {
2565 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
2566 table = fib6_new_table(net, cfg->fc_table);
2567 }
2568 } else {
2569 table = fib6_new_table(net, cfg->fc_table);
2570 }
2571
2572 if (!table)
2573 goto out;
2574
2575 rt = ip6_dst_alloc(net, NULL,
2576 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
2577
2578 if (!rt) {
2579 err = -ENOMEM;
2580 goto out;
2581 }
2582
2583 if (cfg->fc_flags & RTF_EXPIRES)
2584 rt6_set_expires(rt, jiffies +
2585 clock_t_to_jiffies(cfg->fc_expires));
2586 else
2587 rt6_clean_expires(rt);
2588
2589 if (cfg->fc_protocol == RTPROT_UNSPEC)
2590 cfg->fc_protocol = RTPROT_BOOT;
2591 rt->rt6i_protocol = cfg->fc_protocol;
2592
2593 addr_type = ipv6_addr_type(&cfg->fc_dst);
2594
2595 if (addr_type & IPV6_ADDR_MULTICAST)
2596 rt->dst.input = ip6_mc_input;
2597 else if (cfg->fc_flags & RTF_LOCAL)
2598 rt->dst.input = ip6_input;
2599 else
2600 rt->dst.input = ip6_forward;
2601
2602 rt->dst.output = ip6_output;
2603
2604 if (cfg->fc_encap) {
2605 struct lwtunnel_state *lwtstate;
2606
2607 err = lwtunnel_build_state(cfg->fc_encap_type,
2608 cfg->fc_encap, AF_INET6, cfg,
2609 &lwtstate, extack);
2610 if (err)
2611 goto out;
2612 rt->dst.lwtstate = lwtstate_get(lwtstate);
2613 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
2614 rt->dst.lwtstate->orig_output = rt->dst.output;
2615 rt->dst.output = lwtunnel_output;
2616 }
2617 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
2618 rt->dst.lwtstate->orig_input = rt->dst.input;
2619 rt->dst.input = lwtunnel_input;
2620 }
2621 }
2622
2623 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
2624 rt->rt6i_dst.plen = cfg->fc_dst_len;
2625 if (rt->rt6i_dst.plen == 128)
2626 rt->dst.flags |= DST_HOST;
2627
2628 #ifdef CONFIG_IPV6_SUBTREES
2629 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
2630 rt->rt6i_src.plen = cfg->fc_src_len;
2631 #endif
2632
2633 rt->rt6i_metric = cfg->fc_metric;
2634
2635 /* We cannot add true routes via loopback here,
2636 they would result in kernel looping; promote them to reject routes
2637 */
2638 if ((cfg->fc_flags & RTF_REJECT) ||
2639 (dev && (dev->flags & IFF_LOOPBACK) &&
2640 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2641 !(cfg->fc_flags & RTF_LOCAL))) {
2642 /* hold loopback dev/idev if we haven't done so. */
2643 if (dev != net->loopback_dev) {
2644 if (dev) {
2645 dev_put(dev);
2646 in6_dev_put(idev);
2647 }
2648 dev = net->loopback_dev;
2649 dev_hold(dev);
2650 idev = in6_dev_get(dev);
2651 if (!idev) {
2652 err = -ENODEV;
2653 goto out;
2654 }
2655 }
2656 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
2657 switch (cfg->fc_type) {
2658 case RTN_BLACKHOLE:
2659 rt->dst.error = -EINVAL;
2660 rt->dst.output = dst_discard_out;
2661 rt->dst.input = dst_discard;
2662 break;
2663 case RTN_PROHIBIT:
2664 rt->dst.error = -EACCES;
2665 rt->dst.output = ip6_pkt_prohibit_out;
2666 rt->dst.input = ip6_pkt_prohibit;
2667 break;
2668 case RTN_THROW:
2669 case RTN_UNREACHABLE:
2670 default:
2671 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
2672 : (cfg->fc_type == RTN_UNREACHABLE)
2673 ? -EHOSTUNREACH : -ENETUNREACH;
2674 rt->dst.output = ip6_pkt_discard_out;
2675 rt->dst.input = ip6_pkt_discard;
2676 break;
2677 }
2678 goto install_route;
2679 }
2680
2681 if (cfg->fc_flags & RTF_GATEWAY) {
2682 const struct in6_addr *gw_addr;
2683 int gwa_type;
2684
2685 gw_addr = &cfg->fc_gateway;
2686 gwa_type = ipv6_addr_type(gw_addr);
2687
2688 /* if gw_addr is local we will fail to detect this in case
2689 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2690 * will return already-added prefix route via interface that
2691 * prefix route was assigned to, which might be non-loopback.
2692 */
2693 err = -EINVAL;
2694 if (ipv6_chk_addr_and_flags(net, gw_addr,
2695 gwa_type & IPV6_ADDR_LINKLOCAL ?
2696 dev : NULL, 0, 0)) {
2697 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2698 goto out;
2699 }
2700 rt->rt6i_gateway = *gw_addr;
2701
2702 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
2703 struct rt6_info *grt = NULL;
2704
2705 /* IPv6 strictly inhibits using not link-local
2706 addresses as nexthop address.
2707 Otherwise, router will not able to send redirects.
2708 It is very good, but in some (rare!) circumstances
2709 (SIT, PtP, NBMA NOARP links) it is handy to allow
2710 some exceptions. --ANK
2711 We allow IPv4-mapped nexthops to support RFC4798-type
2712 addressing
2713 */
2714 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2715 IPV6_ADDR_MAPPED))) {
2716 NL_SET_ERR_MSG(extack,
2717 "Invalid gateway address");
2718 goto out;
2719 }
2720
2721 if (cfg->fc_table) {
2722 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2723
2724 if (grt) {
2725 if (grt->rt6i_flags & RTF_GATEWAY ||
2726 (dev && dev != grt->dst.dev)) {
2727 ip6_rt_put(grt);
2728 grt = NULL;
2729 }
2730 }
2731 }
2732
2733 if (!grt)
2734 grt = rt6_lookup(net, gw_addr, NULL,
2735 cfg->fc_ifindex, 1);
2736
2737 err = -EHOSTUNREACH;
2738 if (!grt)
2739 goto out;
2740 if (dev) {
2741 if (dev != grt->dst.dev) {
2742 ip6_rt_put(grt);
2743 goto out;
2744 }
2745 } else {
2746 dev = grt->dst.dev;
2747 idev = grt->rt6i_idev;
2748 dev_hold(dev);
2749 in6_dev_hold(grt->rt6i_idev);
2750 }
2751 if (!(grt->rt6i_flags & RTF_GATEWAY))
2752 err = 0;
2753 ip6_rt_put(grt);
2754
2755 if (err)
2756 goto out;
2757 }
2758 err = -EINVAL;
2759 if (!dev) {
2760 NL_SET_ERR_MSG(extack, "Egress device not specified");
2761 goto out;
2762 } else if (dev->flags & IFF_LOOPBACK) {
2763 NL_SET_ERR_MSG(extack,
2764 "Egress device can not be loopback device for this route");
2765 goto out;
2766 }
2767 }
2768
2769 err = -ENODEV;
2770 if (!dev)
2771 goto out;
2772
2773 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2774 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2775 NL_SET_ERR_MSG(extack, "Invalid source address");
2776 err = -EINVAL;
2777 goto out;
2778 }
2779 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2780 rt->rt6i_prefsrc.plen = 128;
2781 } else
2782 rt->rt6i_prefsrc.plen = 0;
2783
2784 rt->rt6i_flags = cfg->fc_flags;
2785
2786 install_route:
2787 rt->dst.dev = dev;
2788 rt->rt6i_idev = idev;
2789 rt->rt6i_table = table;
2790
2791 cfg->fc_nlinfo.nl_net = dev_net(dev);
2792
2793 return rt;
2794 out:
2795 if (dev)
2796 dev_put(dev);
2797 if (idev)
2798 in6_dev_put(idev);
2799 if (rt)
2800 dst_release_immediate(&rt->dst);
2801
2802 return ERR_PTR(err);
2803 }
2804
2805 int ip6_route_add(struct fib6_config *cfg,
2806 struct netlink_ext_ack *extack)
2807 {
2808 struct mx6_config mxc = { .mx = NULL, };
2809 struct rt6_info *rt;
2810 int err;
2811
2812 rt = ip6_route_info_create(cfg, extack);
2813 if (IS_ERR(rt)) {
2814 err = PTR_ERR(rt);
2815 rt = NULL;
2816 goto out;
2817 }
2818
2819 err = ip6_convert_metrics(&mxc, cfg);
2820 if (err)
2821 goto out;
2822
2823 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2824
2825 kfree(mxc.mx);
2826
2827 return err;
2828 out:
2829 if (rt)
2830 dst_release_immediate(&rt->dst);
2831
2832 return err;
2833 }
2834
2835 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2836 {
2837 int err;
2838 struct fib6_table *table;
2839 struct net *net = dev_net(rt->dst.dev);
2840
2841 if (rt == net->ipv6.ip6_null_entry) {
2842 err = -ENOENT;
2843 goto out;
2844 }
2845
2846 table = rt->rt6i_table;
2847 spin_lock_bh(&table->tb6_lock);
2848 err = fib6_del(rt, info);
2849 spin_unlock_bh(&table->tb6_lock);
2850
2851 out:
2852 ip6_rt_put(rt);
2853 return err;
2854 }
2855
2856 int ip6_del_rt(struct rt6_info *rt)
2857 {
2858 struct nl_info info = {
2859 .nl_net = dev_net(rt->dst.dev),
2860 };
2861 return __ip6_del_rt(rt, &info);
2862 }
2863
2864 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2865 {
2866 struct nl_info *info = &cfg->fc_nlinfo;
2867 struct net *net = info->nl_net;
2868 struct sk_buff *skb = NULL;
2869 struct fib6_table *table;
2870 int err = -ENOENT;
2871
2872 if (rt == net->ipv6.ip6_null_entry)
2873 goto out_put;
2874 table = rt->rt6i_table;
2875 spin_lock_bh(&table->tb6_lock);
2876
2877 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2878 struct rt6_info *sibling, *next_sibling;
2879
2880 /* prefer to send a single notification with all hops */
2881 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2882 if (skb) {
2883 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2884
2885 if (rt6_fill_node(net, skb, rt,
2886 NULL, NULL, 0, RTM_DELROUTE,
2887 info->portid, seq, 0) < 0) {
2888 kfree_skb(skb);
2889 skb = NULL;
2890 } else
2891 info->skip_notify = 1;
2892 }
2893
2894 list_for_each_entry_safe(sibling, next_sibling,
2895 &rt->rt6i_siblings,
2896 rt6i_siblings) {
2897 err = fib6_del(sibling, info);
2898 if (err)
2899 goto out_unlock;
2900 }
2901 }
2902
2903 err = fib6_del(rt, info);
2904 out_unlock:
2905 spin_unlock_bh(&table->tb6_lock);
2906 out_put:
2907 ip6_rt_put(rt);
2908
2909 if (skb) {
2910 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2911 info->nlh, gfp_any());
2912 }
2913 return err;
2914 }
2915
2916 static int ip6_route_del(struct fib6_config *cfg,
2917 struct netlink_ext_ack *extack)
2918 {
2919 struct rt6_info *rt, *rt_cache;
2920 struct fib6_table *table;
2921 struct fib6_node *fn;
2922 int err = -ESRCH;
2923
2924 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2925 if (!table) {
2926 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2927 return err;
2928 }
2929
2930 rcu_read_lock();
2931
2932 fn = fib6_locate(&table->tb6_root,
2933 &cfg->fc_dst, cfg->fc_dst_len,
2934 &cfg->fc_src, cfg->fc_src_len,
2935 !(cfg->fc_flags & RTF_CACHE));
2936
2937 if (fn) {
2938 for_each_fib6_node_rt_rcu(fn) {
2939 if (cfg->fc_flags & RTF_CACHE) {
2940 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
2941 &cfg->fc_src);
2942 if (!rt_cache)
2943 continue;
2944 rt = rt_cache;
2945 }
2946 if (cfg->fc_ifindex &&
2947 (!rt->dst.dev ||
2948 rt->dst.dev->ifindex != cfg->fc_ifindex))
2949 continue;
2950 if (cfg->fc_flags & RTF_GATEWAY &&
2951 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2952 continue;
2953 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2954 continue;
2955 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2956 continue;
2957 if (!dst_hold_safe(&rt->dst))
2958 break;
2959 rcu_read_unlock();
2960
2961 /* if gateway was specified only delete the one hop */
2962 if (cfg->fc_flags & RTF_GATEWAY)
2963 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2964
2965 return __ip6_del_rt_siblings(rt, cfg);
2966 }
2967 }
2968 rcu_read_unlock();
2969
2970 return err;
2971 }
2972
2973 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2974 {
2975 struct netevent_redirect netevent;
2976 struct rt6_info *rt, *nrt = NULL;
2977 struct ndisc_options ndopts;
2978 struct inet6_dev *in6_dev;
2979 struct neighbour *neigh;
2980 struct rd_msg *msg;
2981 int optlen, on_link;
2982 u8 *lladdr;
2983
2984 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2985 optlen -= sizeof(*msg);
2986
2987 if (optlen < 0) {
2988 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2989 return;
2990 }
2991
2992 msg = (struct rd_msg *)icmp6_hdr(skb);
2993
2994 if (ipv6_addr_is_multicast(&msg->dest)) {
2995 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2996 return;
2997 }
2998
2999 on_link = 0;
3000 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3001 on_link = 1;
3002 } else if (ipv6_addr_type(&msg->target) !=
3003 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3004 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3005 return;
3006 }
3007
3008 in6_dev = __in6_dev_get(skb->dev);
3009 if (!in6_dev)
3010 return;
3011 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3012 return;
3013
3014 /* RFC2461 8.1:
3015 * The IP source address of the Redirect MUST be the same as the current
3016 * first-hop router for the specified ICMP Destination Address.
3017 */
3018
3019 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3020 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3021 return;
3022 }
3023
3024 lladdr = NULL;
3025 if (ndopts.nd_opts_tgt_lladdr) {
3026 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3027 skb->dev);
3028 if (!lladdr) {
3029 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3030 return;
3031 }
3032 }
3033
3034 rt = (struct rt6_info *) dst;
3035 if (rt->rt6i_flags & RTF_REJECT) {
3036 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3037 return;
3038 }
3039
3040 /* Redirect received -> path was valid.
3041 * Look, redirects are sent only in response to data packets,
3042 * so that this nexthop apparently is reachable. --ANK
3043 */
3044 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3045
3046 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3047 if (!neigh)
3048 return;
3049
3050 /*
3051 * We have finally decided to accept it.
3052 */
3053
3054 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3055 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3056 NEIGH_UPDATE_F_OVERRIDE|
3057 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3058 NEIGH_UPDATE_F_ISROUTER)),
3059 NDISC_REDIRECT, &ndopts);
3060
3061 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
3062 if (!nrt)
3063 goto out;
3064
3065 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3066 if (on_link)
3067 nrt->rt6i_flags &= ~RTF_GATEWAY;
3068
3069 nrt->rt6i_protocol = RTPROT_REDIRECT;
3070 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3071
3072 /* No need to remove rt from the exception table if rt is
3073 * a cached route because rt6_insert_exception() will
3074 * takes care of it
3075 */
3076 if (rt6_insert_exception(nrt, rt)) {
3077 dst_release_immediate(&nrt->dst);
3078 goto out;
3079 }
3080
3081 netevent.old = &rt->dst;
3082 netevent.new = &nrt->dst;
3083 netevent.daddr = &msg->dest;
3084 netevent.neigh = neigh;
3085 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3086
3087 out:
3088 neigh_release(neigh);
3089 }
3090
3091 /*
3092 * Misc support functions
3093 */
3094
3095 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
3096 {
3097 BUG_ON(from->dst.from);
3098
3099 rt->rt6i_flags &= ~RTF_EXPIRES;
3100 dst_hold(&from->dst);
3101 rt->dst.from = &from->dst;
3102 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
3103 }
3104
3105 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
3106 {
3107 rt->dst.input = ort->dst.input;
3108 rt->dst.output = ort->dst.output;
3109 rt->rt6i_dst = ort->rt6i_dst;
3110 rt->dst.error = ort->dst.error;
3111 rt->rt6i_idev = ort->rt6i_idev;
3112 if (rt->rt6i_idev)
3113 in6_dev_hold(rt->rt6i_idev);
3114 rt->dst.lastuse = jiffies;
3115 rt->rt6i_gateway = ort->rt6i_gateway;
3116 rt->rt6i_flags = ort->rt6i_flags;
3117 rt6_set_from(rt, ort);
3118 rt->rt6i_metric = ort->rt6i_metric;
3119 #ifdef CONFIG_IPV6_SUBTREES
3120 rt->rt6i_src = ort->rt6i_src;
3121 #endif
3122 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
3123 rt->rt6i_table = ort->rt6i_table;
3124 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
3125 }
3126
3127 #ifdef CONFIG_IPV6_ROUTE_INFO
3128 static struct rt6_info *rt6_get_route_info(struct net *net,
3129 const struct in6_addr *prefix, int prefixlen,
3130 const struct in6_addr *gwaddr,
3131 struct net_device *dev)
3132 {
3133 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3134 int ifindex = dev->ifindex;
3135 struct fib6_node *fn;
3136 struct rt6_info *rt = NULL;
3137 struct fib6_table *table;
3138
3139 table = fib6_get_table(net, tb_id);
3140 if (!table)
3141 return NULL;
3142
3143 rcu_read_lock();
3144 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3145 if (!fn)
3146 goto out;
3147
3148 for_each_fib6_node_rt_rcu(fn) {
3149 if (rt->dst.dev->ifindex != ifindex)
3150 continue;
3151 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3152 continue;
3153 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
3154 continue;
3155 ip6_hold_safe(NULL, &rt, false);
3156 break;
3157 }
3158 out:
3159 rcu_read_unlock();
3160 return rt;
3161 }
3162
3163 static struct rt6_info *rt6_add_route_info(struct net *net,
3164 const struct in6_addr *prefix, int prefixlen,
3165 const struct in6_addr *gwaddr,
3166 struct net_device *dev,
3167 unsigned int pref)
3168 {
3169 struct fib6_config cfg = {
3170 .fc_metric = IP6_RT_PRIO_USER,
3171 .fc_ifindex = dev->ifindex,
3172 .fc_dst_len = prefixlen,
3173 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3174 RTF_UP | RTF_PREF(pref),
3175 .fc_protocol = RTPROT_RA,
3176 .fc_nlinfo.portid = 0,
3177 .fc_nlinfo.nlh = NULL,
3178 .fc_nlinfo.nl_net = net,
3179 };
3180
3181 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3182 cfg.fc_dst = *prefix;
3183 cfg.fc_gateway = *gwaddr;
3184
3185 /* We should treat it as a default route if prefix length is 0. */
3186 if (!prefixlen)
3187 cfg.fc_flags |= RTF_DEFAULT;
3188
3189 ip6_route_add(&cfg, NULL);
3190
3191 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3192 }
3193 #endif
3194
3195 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
3196 {
3197 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3198 struct rt6_info *rt;
3199 struct fib6_table *table;
3200
3201 table = fib6_get_table(dev_net(dev), tb_id);
3202 if (!table)
3203 return NULL;
3204
3205 rcu_read_lock();
3206 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3207 if (dev == rt->dst.dev &&
3208 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3209 ipv6_addr_equal(&rt->rt6i_gateway, addr))
3210 break;
3211 }
3212 if (rt)
3213 ip6_hold_safe(NULL, &rt, false);
3214 rcu_read_unlock();
3215 return rt;
3216 }
3217
3218 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
3219 struct net_device *dev,
3220 unsigned int pref)
3221 {
3222 struct fib6_config cfg = {
3223 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3224 .fc_metric = IP6_RT_PRIO_USER,
3225 .fc_ifindex = dev->ifindex,
3226 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3227 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3228 .fc_protocol = RTPROT_RA,
3229 .fc_nlinfo.portid = 0,
3230 .fc_nlinfo.nlh = NULL,
3231 .fc_nlinfo.nl_net = dev_net(dev),
3232 };
3233
3234 cfg.fc_gateway = *gwaddr;
3235
3236 if (!ip6_route_add(&cfg, NULL)) {
3237 struct fib6_table *table;
3238
3239 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3240 if (table)
3241 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3242 }
3243
3244 return rt6_get_dflt_router(gwaddr, dev);
3245 }
3246
3247 static void __rt6_purge_dflt_routers(struct fib6_table *table)
3248 {
3249 struct rt6_info *rt;
3250
3251 restart:
3252 rcu_read_lock();
3253 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3254 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3255 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
3256 if (dst_hold_safe(&rt->dst)) {
3257 rcu_read_unlock();
3258 ip6_del_rt(rt);
3259 } else {
3260 rcu_read_unlock();
3261 }
3262 goto restart;
3263 }
3264 }
3265 rcu_read_unlock();
3266
3267 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3268 }
3269
3270 void rt6_purge_dflt_routers(struct net *net)
3271 {
3272 struct fib6_table *table;
3273 struct hlist_head *head;
3274 unsigned int h;
3275
3276 rcu_read_lock();
3277
3278 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3279 head = &net->ipv6.fib_table_hash[h];
3280 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3281 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3282 __rt6_purge_dflt_routers(table);
3283 }
3284 }
3285
3286 rcu_read_unlock();
3287 }
3288
3289 static void rtmsg_to_fib6_config(struct net *net,
3290 struct in6_rtmsg *rtmsg,
3291 struct fib6_config *cfg)
3292 {
3293 memset(cfg, 0, sizeof(*cfg));
3294
3295 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3296 : RT6_TABLE_MAIN;
3297 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
3298 cfg->fc_metric = rtmsg->rtmsg_metric;
3299 cfg->fc_expires = rtmsg->rtmsg_info;
3300 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
3301 cfg->fc_src_len = rtmsg->rtmsg_src_len;
3302 cfg->fc_flags = rtmsg->rtmsg_flags;
3303
3304 cfg->fc_nlinfo.nl_net = net;
3305
3306 cfg->fc_dst = rtmsg->rtmsg_dst;
3307 cfg->fc_src = rtmsg->rtmsg_src;
3308 cfg->fc_gateway = rtmsg->rtmsg_gateway;
3309 }
3310
3311 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3312 {
3313 struct fib6_config cfg;
3314 struct in6_rtmsg rtmsg;
3315 int err;
3316
3317 switch (cmd) {
3318 case SIOCADDRT: /* Add a route */
3319 case SIOCDELRT: /* Delete a route */
3320 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3321 return -EPERM;
3322 err = copy_from_user(&rtmsg, arg,
3323 sizeof(struct in6_rtmsg));
3324 if (err)
3325 return -EFAULT;
3326
3327 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3328
3329 rtnl_lock();
3330 switch (cmd) {
3331 case SIOCADDRT:
3332 err = ip6_route_add(&cfg, NULL);
3333 break;
3334 case SIOCDELRT:
3335 err = ip6_route_del(&cfg, NULL);
3336 break;
3337 default:
3338 err = -EINVAL;
3339 }
3340 rtnl_unlock();
3341
3342 return err;
3343 }
3344
3345 return -EINVAL;
3346 }
3347
3348 /*
3349 * Drop the packet on the floor
3350 */
3351
3352 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3353 {
3354 int type;
3355 struct dst_entry *dst = skb_dst(skb);
3356 switch (ipstats_mib_noroutes) {
3357 case IPSTATS_MIB_INNOROUTES:
3358 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3359 if (type == IPV6_ADDR_ANY) {
3360 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3361 IPSTATS_MIB_INADDRERRORS);
3362 break;
3363 }
3364 /* FALLTHROUGH */
3365 case IPSTATS_MIB_OUTNOROUTES:
3366 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3367 ipstats_mib_noroutes);
3368 break;
3369 }
3370 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3371 kfree_skb(skb);
3372 return 0;
3373 }
3374
3375 static int ip6_pkt_discard(struct sk_buff *skb)
3376 {
3377 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3378 }
3379
3380 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3381 {
3382 skb->dev = skb_dst(skb)->dev;
3383 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3384 }
3385
3386 static int ip6_pkt_prohibit(struct sk_buff *skb)
3387 {
3388 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3389 }
3390
3391 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3392 {
3393 skb->dev = skb_dst(skb)->dev;
3394 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3395 }
3396
3397 /*
3398 * Allocate a dst for local (unicast / anycast) address.
3399 */
3400
3401 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
3402 const struct in6_addr *addr,
3403 bool anycast)
3404 {
3405 u32 tb_id;
3406 struct net *net = dev_net(idev->dev);
3407 struct net_device *dev = idev->dev;
3408 struct rt6_info *rt;
3409
3410 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
3411 if (!rt)
3412 return ERR_PTR(-ENOMEM);
3413
3414 in6_dev_hold(idev);
3415
3416 rt->dst.flags |= DST_HOST;
3417 rt->dst.input = ip6_input;
3418 rt->dst.output = ip6_output;
3419 rt->rt6i_idev = idev;
3420
3421 rt->rt6i_protocol = RTPROT_KERNEL;
3422 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
3423 if (anycast)
3424 rt->rt6i_flags |= RTF_ANYCAST;
3425 else
3426 rt->rt6i_flags |= RTF_LOCAL;
3427
3428 rt->rt6i_gateway = *addr;
3429 rt->rt6i_dst.addr = *addr;
3430 rt->rt6i_dst.plen = 128;
3431 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3432 rt->rt6i_table = fib6_get_table(net, tb_id);
3433
3434 return rt;
3435 }
3436
3437 /* remove deleted ip from prefsrc entries */
3438 struct arg_dev_net_ip {
3439 struct net_device *dev;
3440 struct net *net;
3441 struct in6_addr *addr;
3442 };
3443
3444 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
3445 {
3446 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3447 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3448 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3449
3450 if (((void *)rt->dst.dev == dev || !dev) &&
3451 rt != net->ipv6.ip6_null_entry &&
3452 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
3453 spin_lock_bh(&rt6_exception_lock);
3454 /* remove prefsrc entry */
3455 rt->rt6i_prefsrc.plen = 0;
3456 /* need to update cache as well */
3457 rt6_exceptions_remove_prefsrc(rt);
3458 spin_unlock_bh(&rt6_exception_lock);
3459 }
3460 return 0;
3461 }
3462
3463 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3464 {
3465 struct net *net = dev_net(ifp->idev->dev);
3466 struct arg_dev_net_ip adni = {
3467 .dev = ifp->idev->dev,
3468 .net = net,
3469 .addr = &ifp->addr,
3470 };
3471 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3472 }
3473
3474 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
3475
3476 /* Remove routers and update dst entries when gateway turn into host. */
3477 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
3478 {
3479 struct in6_addr *gateway = (struct in6_addr *)arg;
3480
3481 if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3482 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
3483 return -1;
3484 }
3485
3486 /* Further clean up cached routes in exception table.
3487 * This is needed because cached route may have a different
3488 * gateway than its 'parent' in the case of an ip redirect.
3489 */
3490 rt6_exceptions_clean_tohost(rt, gateway);
3491
3492 return 0;
3493 }
3494
3495 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3496 {
3497 fib6_clean_all(net, fib6_clean_tohost, gateway);
3498 }
3499
3500 struct arg_dev_net {
3501 struct net_device *dev;
3502 struct net *net;
3503 };
3504
3505 /* called with write lock held for table with rt */
3506 static int fib6_ifdown(struct rt6_info *rt, void *arg)
3507 {
3508 const struct arg_dev_net *adn = arg;
3509 const struct net_device *dev = adn->dev;
3510
3511 if ((rt->dst.dev == dev || !dev) &&
3512 rt != adn->net->ipv6.ip6_null_entry &&
3513 (rt->rt6i_nsiblings == 0 ||
3514 (dev && netdev_unregistering(dev)) ||
3515 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
3516 return -1;
3517
3518 return 0;
3519 }
3520
3521 void rt6_ifdown(struct net *net, struct net_device *dev)
3522 {
3523 struct arg_dev_net adn = {
3524 .dev = dev,
3525 .net = net,
3526 };
3527
3528 fib6_clean_all(net, fib6_ifdown, &adn);
3529 if (dev)
3530 rt6_uncached_list_flush_dev(net, dev);
3531 }
3532
3533 struct rt6_mtu_change_arg {
3534 struct net_device *dev;
3535 unsigned int mtu;
3536 };
3537
3538 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3539 {
3540 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
3541 struct inet6_dev *idev;
3542
3543 /* In IPv6 pmtu discovery is not optional,
3544 so that RTAX_MTU lock cannot disable it.
3545 We still use this lock to block changes
3546 caused by addrconf/ndisc.
3547 */
3548
3549 idev = __in6_dev_get(arg->dev);
3550 if (!idev)
3551 return 0;
3552
3553 /* For administrative MTU increase, there is no way to discover
3554 IPv6 PMTU increase, so PMTU increase should be updated here.
3555 Since RFC 1981 doesn't include administrative MTU increase
3556 update PMTU increase is a MUST. (i.e. jumbo frame)
3557 */
3558 if (rt->dst.dev == arg->dev &&
3559 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3560 spin_lock_bh(&rt6_exception_lock);
3561 if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
3562 rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
3563 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3564 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
3565 spin_unlock_bh(&rt6_exception_lock);
3566 }
3567 return 0;
3568 }
3569
3570 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3571 {
3572 struct rt6_mtu_change_arg arg = {
3573 .dev = dev,
3574 .mtu = mtu,
3575 };
3576
3577 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
3578 }
3579
3580 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3581 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3582 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
3583 [RTA_OIF] = { .type = NLA_U32 },
3584 [RTA_IIF] = { .type = NLA_U32 },
3585 [RTA_PRIORITY] = { .type = NLA_U32 },
3586 [RTA_METRICS] = { .type = NLA_NESTED },
3587 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
3588 [RTA_PREF] = { .type = NLA_U8 },
3589 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
3590 [RTA_ENCAP] = { .type = NLA_NESTED },
3591 [RTA_EXPIRES] = { .type = NLA_U32 },
3592 [RTA_UID] = { .type = NLA_U32 },
3593 [RTA_MARK] = { .type = NLA_U32 },
3594 [RTA_TABLE] = { .type = NLA_U32 },
3595 };
3596
3597 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
3598 struct fib6_config *cfg,
3599 struct netlink_ext_ack *extack)
3600 {
3601 struct rtmsg *rtm;
3602 struct nlattr *tb[RTA_MAX+1];
3603 unsigned int pref;
3604 int err;
3605
3606 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3607 NULL);
3608 if (err < 0)
3609 goto errout;
3610
3611 err = -EINVAL;
3612 rtm = nlmsg_data(nlh);
3613 memset(cfg, 0, sizeof(*cfg));
3614
3615 cfg->fc_table = rtm->rtm_table;
3616 cfg->fc_dst_len = rtm->rtm_dst_len;
3617 cfg->fc_src_len = rtm->rtm_src_len;
3618 cfg->fc_flags = RTF_UP;
3619 cfg->fc_protocol = rtm->rtm_protocol;
3620 cfg->fc_type = rtm->rtm_type;
3621
3622 if (rtm->rtm_type == RTN_UNREACHABLE ||
3623 rtm->rtm_type == RTN_BLACKHOLE ||
3624 rtm->rtm_type == RTN_PROHIBIT ||
3625 rtm->rtm_type == RTN_THROW)
3626 cfg->fc_flags |= RTF_REJECT;
3627
3628 if (rtm->rtm_type == RTN_LOCAL)
3629 cfg->fc_flags |= RTF_LOCAL;
3630
3631 if (rtm->rtm_flags & RTM_F_CLONED)
3632 cfg->fc_flags |= RTF_CACHE;
3633
3634 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
3635 cfg->fc_nlinfo.nlh = nlh;
3636 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
3637
3638 if (tb[RTA_GATEWAY]) {
3639 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
3640 cfg->fc_flags |= RTF_GATEWAY;
3641 }
3642
3643 if (tb[RTA_DST]) {
3644 int plen = (rtm->rtm_dst_len + 7) >> 3;
3645
3646 if (nla_len(tb[RTA_DST]) < plen)
3647 goto errout;
3648
3649 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
3650 }
3651
3652 if (tb[RTA_SRC]) {
3653 int plen = (rtm->rtm_src_len + 7) >> 3;
3654
3655 if (nla_len(tb[RTA_SRC]) < plen)
3656 goto errout;
3657
3658 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
3659 }
3660
3661 if (tb[RTA_PREFSRC])
3662 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
3663
3664 if (tb[RTA_OIF])
3665 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
3666
3667 if (tb[RTA_PRIORITY])
3668 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
3669
3670 if (tb[RTA_METRICS]) {
3671 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
3672 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
3673 }
3674
3675 if (tb[RTA_TABLE])
3676 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
3677
3678 if (tb[RTA_MULTIPATH]) {
3679 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
3680 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
3681
3682 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
3683 cfg->fc_mp_len, extack);
3684 if (err < 0)
3685 goto errout;
3686 }
3687
3688 if (tb[RTA_PREF]) {
3689 pref = nla_get_u8(tb[RTA_PREF]);
3690 if (pref != ICMPV6_ROUTER_PREF_LOW &&
3691 pref != ICMPV6_ROUTER_PREF_HIGH)
3692 pref = ICMPV6_ROUTER_PREF_MEDIUM;
3693 cfg->fc_flags |= RTF_PREF(pref);
3694 }
3695
3696 if (tb[RTA_ENCAP])
3697 cfg->fc_encap = tb[RTA_ENCAP];
3698
3699 if (tb[RTA_ENCAP_TYPE]) {
3700 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
3701
3702 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3703 if (err < 0)
3704 goto errout;
3705 }
3706
3707 if (tb[RTA_EXPIRES]) {
3708 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3709
3710 if (addrconf_finite_timeout(timeout)) {
3711 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3712 cfg->fc_flags |= RTF_EXPIRES;
3713 }
3714 }
3715
3716 err = 0;
3717 errout:
3718 return err;
3719 }
3720
3721 struct rt6_nh {
3722 struct rt6_info *rt6_info;
3723 struct fib6_config r_cfg;
3724 struct mx6_config mxc;
3725 struct list_head next;
3726 };
3727
3728 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3729 {
3730 struct rt6_nh *nh;
3731
3732 list_for_each_entry(nh, rt6_nh_list, next) {
3733 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3734 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3735 nh->r_cfg.fc_ifindex);
3736 }
3737 }
3738
3739 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3740 struct rt6_info *rt, struct fib6_config *r_cfg)
3741 {
3742 struct rt6_nh *nh;
3743 int err = -EEXIST;
3744
3745 list_for_each_entry(nh, rt6_nh_list, next) {
3746 /* check if rt6_info already exists */
3747 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3748 return err;
3749 }
3750
3751 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3752 if (!nh)
3753 return -ENOMEM;
3754 nh->rt6_info = rt;
3755 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3756 if (err) {
3757 kfree(nh);
3758 return err;
3759 }
3760 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3761 list_add_tail(&nh->next, rt6_nh_list);
3762
3763 return 0;
3764 }
3765
3766 static void ip6_route_mpath_notify(struct rt6_info *rt,
3767 struct rt6_info *rt_last,
3768 struct nl_info *info,
3769 __u16 nlflags)
3770 {
3771 /* if this is an APPEND route, then rt points to the first route
3772 * inserted and rt_last points to last route inserted. Userspace
3773 * wants a consistent dump of the route which starts at the first
3774 * nexthop. Since sibling routes are always added at the end of
3775 * the list, find the first sibling of the last route appended
3776 */
3777 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3778 rt = list_first_entry(&rt_last->rt6i_siblings,
3779 struct rt6_info,
3780 rt6i_siblings);
3781 }
3782
3783 if (rt)
3784 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3785 }
3786
3787 static int ip6_route_multipath_add(struct fib6_config *cfg,
3788 struct netlink_ext_ack *extack)
3789 {
3790 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3791 struct nl_info *info = &cfg->fc_nlinfo;
3792 struct fib6_config r_cfg;
3793 struct rtnexthop *rtnh;
3794 struct rt6_info *rt;
3795 struct rt6_nh *err_nh;
3796 struct rt6_nh *nh, *nh_safe;
3797 __u16 nlflags;
3798 int remaining;
3799 int attrlen;
3800 int err = 1;
3801 int nhn = 0;
3802 int replace = (cfg->fc_nlinfo.nlh &&
3803 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3804 LIST_HEAD(rt6_nh_list);
3805
3806 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
3807 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
3808 nlflags |= NLM_F_APPEND;
3809
3810 remaining = cfg->fc_mp_len;
3811 rtnh = (struct rtnexthop *)cfg->fc_mp;
3812
3813 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3814 * rt6_info structs per nexthop
3815 */
3816 while (rtnh_ok(rtnh, remaining)) {
3817 memcpy(&r_cfg, cfg, sizeof(*cfg));
3818 if (rtnh->rtnh_ifindex)
3819 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3820
3821 attrlen = rtnh_attrlen(rtnh);
3822 if (attrlen > 0) {
3823 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3824
3825 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3826 if (nla) {
3827 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3828 r_cfg.fc_flags |= RTF_GATEWAY;
3829 }
3830 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3831 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3832 if (nla)
3833 r_cfg.fc_encap_type = nla_get_u16(nla);
3834 }
3835
3836 rt = ip6_route_info_create(&r_cfg, extack);
3837 if (IS_ERR(rt)) {
3838 err = PTR_ERR(rt);
3839 rt = NULL;
3840 goto cleanup;
3841 }
3842
3843 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3844 if (err) {
3845 dst_release_immediate(&rt->dst);
3846 goto cleanup;
3847 }
3848
3849 rtnh = rtnh_next(rtnh, &remaining);
3850 }
3851
3852 /* for add and replace send one notification with all nexthops.
3853 * Skip the notification in fib6_add_rt2node and send one with
3854 * the full route when done
3855 */
3856 info->skip_notify = 1;
3857
3858 err_nh = NULL;
3859 list_for_each_entry(nh, &rt6_nh_list, next) {
3860 rt_last = nh->rt6_info;
3861 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
3862 /* save reference to first route for notification */
3863 if (!rt_notif && !err)
3864 rt_notif = nh->rt6_info;
3865
3866 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3867 nh->rt6_info = NULL;
3868 if (err) {
3869 if (replace && nhn)
3870 ip6_print_replace_route_err(&rt6_nh_list);
3871 err_nh = nh;
3872 goto add_errout;
3873 }
3874
3875 /* Because each route is added like a single route we remove
3876 * these flags after the first nexthop: if there is a collision,
3877 * we have already failed to add the first nexthop:
3878 * fib6_add_rt2node() has rejected it; when replacing, old
3879 * nexthops have been replaced by first new, the rest should
3880 * be added to it.
3881 */
3882 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3883 NLM_F_REPLACE);
3884 nhn++;
3885 }
3886
3887 /* success ... tell user about new route */
3888 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3889 goto cleanup;
3890
3891 add_errout:
3892 /* send notification for routes that were added so that
3893 * the delete notifications sent by ip6_route_del are
3894 * coherent
3895 */
3896 if (rt_notif)
3897 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3898
3899 /* Delete routes that were already added */
3900 list_for_each_entry(nh, &rt6_nh_list, next) {
3901 if (err_nh == nh)
3902 break;
3903 ip6_route_del(&nh->r_cfg, extack);
3904 }
3905
3906 cleanup:
3907 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3908 if (nh->rt6_info)
3909 dst_release_immediate(&nh->rt6_info->dst);
3910 kfree(nh->mxc.mx);
3911 list_del(&nh->next);
3912 kfree(nh);
3913 }
3914
3915 return err;
3916 }
3917
3918 static int ip6_route_multipath_del(struct fib6_config *cfg,
3919 struct netlink_ext_ack *extack)
3920 {
3921 struct fib6_config r_cfg;
3922 struct rtnexthop *rtnh;
3923 int remaining;
3924 int attrlen;
3925 int err = 1, last_err = 0;
3926
3927 remaining = cfg->fc_mp_len;
3928 rtnh = (struct rtnexthop *)cfg->fc_mp;
3929
3930 /* Parse a Multipath Entry */
3931 while (rtnh_ok(rtnh, remaining)) {
3932 memcpy(&r_cfg, cfg, sizeof(*cfg));
3933 if (rtnh->rtnh_ifindex)
3934 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3935
3936 attrlen = rtnh_attrlen(rtnh);
3937 if (attrlen > 0) {
3938 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3939
3940 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3941 if (nla) {
3942 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3943 r_cfg.fc_flags |= RTF_GATEWAY;
3944 }
3945 }
3946 err = ip6_route_del(&r_cfg, extack);
3947 if (err)
3948 last_err = err;
3949
3950 rtnh = rtnh_next(rtnh, &remaining);
3951 }
3952
3953 return last_err;
3954 }
3955
3956 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3957 struct netlink_ext_ack *extack)
3958 {
3959 struct fib6_config cfg;
3960 int err;
3961
3962 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3963 if (err < 0)
3964 return err;
3965
3966 if (cfg.fc_mp)
3967 return ip6_route_multipath_del(&cfg, extack);
3968 else {
3969 cfg.fc_delete_all_nh = 1;
3970 return ip6_route_del(&cfg, extack);
3971 }
3972 }
3973
3974 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3975 struct netlink_ext_ack *extack)
3976 {
3977 struct fib6_config cfg;
3978 int err;
3979
3980 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3981 if (err < 0)
3982 return err;
3983
3984 if (cfg.fc_mp)
3985 return ip6_route_multipath_add(&cfg, extack);
3986 else
3987 return ip6_route_add(&cfg, extack);
3988 }
3989
3990 static size_t rt6_nlmsg_size(struct rt6_info *rt)
3991 {
3992 int nexthop_len = 0;
3993
3994 if (rt->rt6i_nsiblings) {
3995 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3996 + NLA_ALIGN(sizeof(struct rtnexthop))
3997 + nla_total_size(16) /* RTA_GATEWAY */
3998 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3999
4000 nexthop_len *= rt->rt6i_nsiblings;
4001 }
4002
4003 return NLMSG_ALIGN(sizeof(struct rtmsg))
4004 + nla_total_size(16) /* RTA_SRC */
4005 + nla_total_size(16) /* RTA_DST */
4006 + nla_total_size(16) /* RTA_GATEWAY */
4007 + nla_total_size(16) /* RTA_PREFSRC */
4008 + nla_total_size(4) /* RTA_TABLE */
4009 + nla_total_size(4) /* RTA_IIF */
4010 + nla_total_size(4) /* RTA_OIF */
4011 + nla_total_size(4) /* RTA_PRIORITY */
4012 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4013 + nla_total_size(sizeof(struct rta_cacheinfo))
4014 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4015 + nla_total_size(1) /* RTA_PREF */
4016 + lwtunnel_get_encap_size(rt->dst.lwtstate)
4017 + nexthop_len;
4018 }
4019
4020 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
4021 unsigned int *flags, bool skip_oif)
4022 {
4023 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
4024 *flags |= RTNH_F_LINKDOWN;
4025 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
4026 *flags |= RTNH_F_DEAD;
4027 }
4028
4029 if (rt->rt6i_flags & RTF_GATEWAY) {
4030 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
4031 goto nla_put_failure;
4032 }
4033
4034 if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
4035 *flags |= RTNH_F_OFFLOAD;
4036
4037 /* not needed for multipath encoding b/c it has a rtnexthop struct */
4038 if (!skip_oif && rt->dst.dev &&
4039 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
4040 goto nla_put_failure;
4041
4042 if (rt->dst.lwtstate &&
4043 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
4044 goto nla_put_failure;
4045
4046 return 0;
4047
4048 nla_put_failure:
4049 return -EMSGSIZE;
4050 }
4051
4052 /* add multipath next hop */
4053 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
4054 {
4055 struct rtnexthop *rtnh;
4056 unsigned int flags = 0;
4057
4058 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4059 if (!rtnh)
4060 goto nla_put_failure;
4061
4062 rtnh->rtnh_hops = 0;
4063 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
4064
4065 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4066 goto nla_put_failure;
4067
4068 rtnh->rtnh_flags = flags;
4069
4070 /* length of rtnetlink header + attributes */
4071 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4072
4073 return 0;
4074
4075 nla_put_failure:
4076 return -EMSGSIZE;
4077 }
4078
4079 static int rt6_fill_node(struct net *net,
4080 struct sk_buff *skb, struct rt6_info *rt,
4081 struct in6_addr *dst, struct in6_addr *src,
4082 int iif, int type, u32 portid, u32 seq,
4083 unsigned int flags)
4084 {
4085 u32 metrics[RTAX_MAX];
4086 struct rtmsg *rtm;
4087 struct nlmsghdr *nlh;
4088 long expires;
4089 u32 table;
4090
4091 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4092 if (!nlh)
4093 return -EMSGSIZE;
4094
4095 rtm = nlmsg_data(nlh);
4096 rtm->rtm_family = AF_INET6;
4097 rtm->rtm_dst_len = rt->rt6i_dst.plen;
4098 rtm->rtm_src_len = rt->rt6i_src.plen;
4099 rtm->rtm_tos = 0;
4100 if (rt->rt6i_table)
4101 table = rt->rt6i_table->tb6_id;
4102 else
4103 table = RT6_TABLE_UNSPEC;
4104 rtm->rtm_table = table;
4105 if (nla_put_u32(skb, RTA_TABLE, table))
4106 goto nla_put_failure;
4107 if (rt->rt6i_flags & RTF_REJECT) {
4108 switch (rt->dst.error) {
4109 case -EINVAL:
4110 rtm->rtm_type = RTN_BLACKHOLE;
4111 break;
4112 case -EACCES:
4113 rtm->rtm_type = RTN_PROHIBIT;
4114 break;
4115 case -EAGAIN:
4116 rtm->rtm_type = RTN_THROW;
4117 break;
4118 default:
4119 rtm->rtm_type = RTN_UNREACHABLE;
4120 break;
4121 }
4122 }
4123 else if (rt->rt6i_flags & RTF_LOCAL)
4124 rtm->rtm_type = RTN_LOCAL;
4125 else if (rt->rt6i_flags & RTF_ANYCAST)
4126 rtm->rtm_type = RTN_ANYCAST;
4127 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
4128 rtm->rtm_type = RTN_LOCAL;
4129 else
4130 rtm->rtm_type = RTN_UNICAST;
4131 rtm->rtm_flags = 0;
4132 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4133 rtm->rtm_protocol = rt->rt6i_protocol;
4134
4135 if (rt->rt6i_flags & RTF_CACHE)
4136 rtm->rtm_flags |= RTM_F_CLONED;
4137
4138 if (dst) {
4139 if (nla_put_in6_addr(skb, RTA_DST, dst))
4140 goto nla_put_failure;
4141 rtm->rtm_dst_len = 128;
4142 } else if (rtm->rtm_dst_len)
4143 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
4144 goto nla_put_failure;
4145 #ifdef CONFIG_IPV6_SUBTREES
4146 if (src) {
4147 if (nla_put_in6_addr(skb, RTA_SRC, src))
4148 goto nla_put_failure;
4149 rtm->rtm_src_len = 128;
4150 } else if (rtm->rtm_src_len &&
4151 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
4152 goto nla_put_failure;
4153 #endif
4154 if (iif) {
4155 #ifdef CONFIG_IPV6_MROUTE
4156 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
4157 int err = ip6mr_get_route(net, skb, rtm, portid);
4158
4159 if (err == 0)
4160 return 0;
4161 if (err < 0)
4162 goto nla_put_failure;
4163 } else
4164 #endif
4165 if (nla_put_u32(skb, RTA_IIF, iif))
4166 goto nla_put_failure;
4167 } else if (dst) {
4168 struct in6_addr saddr_buf;
4169 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
4170 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4171 goto nla_put_failure;
4172 }
4173
4174 if (rt->rt6i_prefsrc.plen) {
4175 struct in6_addr saddr_buf;
4176 saddr_buf = rt->rt6i_prefsrc.addr;
4177 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4178 goto nla_put_failure;
4179 }
4180
4181 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
4182 if (rt->rt6i_pmtu)
4183 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
4184 if (rtnetlink_put_metrics(skb, metrics) < 0)
4185 goto nla_put_failure;
4186
4187 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
4188 goto nla_put_failure;
4189
4190 /* For multipath routes, walk the siblings list and add
4191 * each as a nexthop within RTA_MULTIPATH.
4192 */
4193 if (rt->rt6i_nsiblings) {
4194 struct rt6_info *sibling, *next_sibling;
4195 struct nlattr *mp;
4196
4197 mp = nla_nest_start(skb, RTA_MULTIPATH);
4198 if (!mp)
4199 goto nla_put_failure;
4200
4201 if (rt6_add_nexthop(skb, rt) < 0)
4202 goto nla_put_failure;
4203
4204 list_for_each_entry_safe(sibling, next_sibling,
4205 &rt->rt6i_siblings, rt6i_siblings) {
4206 if (rt6_add_nexthop(skb, sibling) < 0)
4207 goto nla_put_failure;
4208 }
4209
4210 nla_nest_end(skb, mp);
4211 } else {
4212 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4213 goto nla_put_failure;
4214 }
4215
4216 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
4217
4218 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
4219 goto nla_put_failure;
4220
4221 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
4222 goto nla_put_failure;
4223
4224
4225 nlmsg_end(skb, nlh);
4226 return 0;
4227
4228 nla_put_failure:
4229 nlmsg_cancel(skb, nlh);
4230 return -EMSGSIZE;
4231 }
4232
4233 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
4234 {
4235 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4236 struct net *net = arg->net;
4237
4238 if (rt == net->ipv6.ip6_null_entry)
4239 return 0;
4240
4241 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4242 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4243
4244 /* user wants prefix routes only */
4245 if (rtm->rtm_flags & RTM_F_PREFIX &&
4246 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
4247 /* success since this is not a prefix route */
4248 return 1;
4249 }
4250 }
4251
4252 return rt6_fill_node(net,
4253 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
4254 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
4255 NLM_F_MULTI);
4256 }
4257
4258 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4259 struct netlink_ext_ack *extack)
4260 {
4261 struct net *net = sock_net(in_skb->sk);
4262 struct nlattr *tb[RTA_MAX+1];
4263 int err, iif = 0, oif = 0;
4264 struct dst_entry *dst;
4265 struct rt6_info *rt;
4266 struct sk_buff *skb;
4267 struct rtmsg *rtm;
4268 struct flowi6 fl6;
4269 bool fibmatch;
4270
4271 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4272 extack);
4273 if (err < 0)
4274 goto errout;
4275
4276 err = -EINVAL;
4277 memset(&fl6, 0, sizeof(fl6));
4278 rtm = nlmsg_data(nlh);
4279 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4280 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
4281
4282 if (tb[RTA_SRC]) {
4283 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4284 goto errout;
4285
4286 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4287 }
4288
4289 if (tb[RTA_DST]) {
4290 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4291 goto errout;
4292
4293 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4294 }
4295
4296 if (tb[RTA_IIF])
4297 iif = nla_get_u32(tb[RTA_IIF]);
4298
4299 if (tb[RTA_OIF])
4300 oif = nla_get_u32(tb[RTA_OIF]);
4301
4302 if (tb[RTA_MARK])
4303 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4304
4305 if (tb[RTA_UID])
4306 fl6.flowi6_uid = make_kuid(current_user_ns(),
4307 nla_get_u32(tb[RTA_UID]));
4308 else
4309 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4310
4311 if (iif) {
4312 struct net_device *dev;
4313 int flags = 0;
4314
4315 rcu_read_lock();
4316
4317 dev = dev_get_by_index_rcu(net, iif);
4318 if (!dev) {
4319 rcu_read_unlock();
4320 err = -ENODEV;
4321 goto errout;
4322 }
4323
4324 fl6.flowi6_iif = iif;
4325
4326 if (!ipv6_addr_any(&fl6.saddr))
4327 flags |= RT6_LOOKUP_F_HAS_SADDR;
4328
4329 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
4330
4331 rcu_read_unlock();
4332 } else {
4333 fl6.flowi6_oif = oif;
4334
4335 dst = ip6_route_output(net, NULL, &fl6);
4336 }
4337
4338
4339 rt = container_of(dst, struct rt6_info, dst);
4340 if (rt->dst.error) {
4341 err = rt->dst.error;
4342 ip6_rt_put(rt);
4343 goto errout;
4344 }
4345
4346 if (rt == net->ipv6.ip6_null_entry) {
4347 err = rt->dst.error;
4348 ip6_rt_put(rt);
4349 goto errout;
4350 }
4351
4352 if (fibmatch && rt->dst.from) {
4353 struct rt6_info *ort = container_of(rt->dst.from,
4354 struct rt6_info, dst);
4355
4356 dst_hold(&ort->dst);
4357 ip6_rt_put(rt);
4358 rt = ort;
4359 }
4360
4361 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4362 if (!skb) {
4363 ip6_rt_put(rt);
4364 err = -ENOBUFS;
4365 goto errout;
4366 }
4367
4368 skb_dst_set(skb, &rt->dst);
4369 if (fibmatch)
4370 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
4371 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4372 nlh->nlmsg_seq, 0);
4373 else
4374 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
4375 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4376 nlh->nlmsg_seq, 0);
4377 if (err < 0) {
4378 kfree_skb(skb);
4379 goto errout;
4380 }
4381
4382 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4383 errout:
4384 return err;
4385 }
4386
4387 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
4388 unsigned int nlm_flags)
4389 {
4390 struct sk_buff *skb;
4391 struct net *net = info->nl_net;
4392 u32 seq;
4393 int err;
4394
4395 err = -ENOBUFS;
4396 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4397
4398 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4399 if (!skb)
4400 goto errout;
4401
4402 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
4403 event, info->portid, seq, nlm_flags);
4404 if (err < 0) {
4405 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4406 WARN_ON(err == -EMSGSIZE);
4407 kfree_skb(skb);
4408 goto errout;
4409 }
4410 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4411 info->nlh, gfp_any());
4412 return;
4413 errout:
4414 if (err < 0)
4415 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
4416 }
4417
4418 static int ip6_route_dev_notify(struct notifier_block *this,
4419 unsigned long event, void *ptr)
4420 {
4421 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4422 struct net *net = dev_net(dev);
4423
4424 if (!(dev->flags & IFF_LOOPBACK))
4425 return NOTIFY_OK;
4426
4427 if (event == NETDEV_REGISTER) {
4428 net->ipv6.ip6_null_entry->dst.dev = dev;
4429 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4430 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4431 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
4432 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
4433 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4434 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4435 #endif
4436 } else if (event == NETDEV_UNREGISTER &&
4437 dev->reg_state != NETREG_UNREGISTERED) {
4438 /* NETDEV_UNREGISTER could be fired for multiple times by
4439 * netdev_wait_allrefs(). Make sure we only call this once.
4440 */
4441 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
4442 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4443 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4444 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
4445 #endif
4446 }
4447
4448 return NOTIFY_OK;
4449 }
4450
4451 /*
4452 * /proc
4453 */
4454
4455 #ifdef CONFIG_PROC_FS
4456
4457 static const struct file_operations ipv6_route_proc_fops = {
4458 .owner = THIS_MODULE,
4459 .open = ipv6_route_open,
4460 .read = seq_read,
4461 .llseek = seq_lseek,
4462 .release = seq_release_net,
4463 };
4464
4465 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
4466 {
4467 struct net *net = (struct net *)seq->private;
4468 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
4469 net->ipv6.rt6_stats->fib_nodes,
4470 net->ipv6.rt6_stats->fib_route_nodes,
4471 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
4472 net->ipv6.rt6_stats->fib_rt_entries,
4473 net->ipv6.rt6_stats->fib_rt_cache,
4474 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
4475 net->ipv6.rt6_stats->fib_discarded_routes);
4476
4477 return 0;
4478 }
4479
4480 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
4481 {
4482 return single_open_net(inode, file, rt6_stats_seq_show);
4483 }
4484
4485 static const struct file_operations rt6_stats_seq_fops = {
4486 .owner = THIS_MODULE,
4487 .open = rt6_stats_seq_open,
4488 .read = seq_read,
4489 .llseek = seq_lseek,
4490 .release = single_release_net,
4491 };
4492 #endif /* CONFIG_PROC_FS */
4493
4494 #ifdef CONFIG_SYSCTL
4495
4496 static
4497 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
4498 void __user *buffer, size_t *lenp, loff_t *ppos)
4499 {
4500 struct net *net;
4501 int delay;
4502 if (!write)
4503 return -EINVAL;
4504
4505 net = (struct net *)ctl->extra1;
4506 delay = net->ipv6.sysctl.flush_delay;
4507 proc_dointvec(ctl, write, buffer, lenp, ppos);
4508 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
4509 return 0;
4510 }
4511
4512 struct ctl_table ipv6_route_table_template[] = {
4513 {
4514 .procname = "flush",
4515 .data = &init_net.ipv6.sysctl.flush_delay,
4516 .maxlen = sizeof(int),
4517 .mode = 0200,
4518 .proc_handler = ipv6_sysctl_rtcache_flush
4519 },
4520 {
4521 .procname = "gc_thresh",
4522 .data = &ip6_dst_ops_template.gc_thresh,
4523 .maxlen = sizeof(int),
4524 .mode = 0644,
4525 .proc_handler = proc_dointvec,
4526 },
4527 {
4528 .procname = "max_size",
4529 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
4530 .maxlen = sizeof(int),
4531 .mode = 0644,
4532 .proc_handler = proc_dointvec,
4533 },
4534 {
4535 .procname = "gc_min_interval",
4536 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4537 .maxlen = sizeof(int),
4538 .mode = 0644,
4539 .proc_handler = proc_dointvec_jiffies,
4540 },
4541 {
4542 .procname = "gc_timeout",
4543 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
4544 .maxlen = sizeof(int),
4545 .mode = 0644,
4546 .proc_handler = proc_dointvec_jiffies,
4547 },
4548 {
4549 .procname = "gc_interval",
4550 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
4551 .maxlen = sizeof(int),
4552 .mode = 0644,
4553 .proc_handler = proc_dointvec_jiffies,
4554 },
4555 {
4556 .procname = "gc_elasticity",
4557 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
4558 .maxlen = sizeof(int),
4559 .mode = 0644,
4560 .proc_handler = proc_dointvec,
4561 },
4562 {
4563 .procname = "mtu_expires",
4564 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
4565 .maxlen = sizeof(int),
4566 .mode = 0644,
4567 .proc_handler = proc_dointvec_jiffies,
4568 },
4569 {
4570 .procname = "min_adv_mss",
4571 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
4572 .maxlen = sizeof(int),
4573 .mode = 0644,
4574 .proc_handler = proc_dointvec,
4575 },
4576 {
4577 .procname = "gc_min_interval_ms",
4578 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4579 .maxlen = sizeof(int),
4580 .mode = 0644,
4581 .proc_handler = proc_dointvec_ms_jiffies,
4582 },
4583 { }
4584 };
4585
4586 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
4587 {
4588 struct ctl_table *table;
4589
4590 table = kmemdup(ipv6_route_table_template,
4591 sizeof(ipv6_route_table_template),
4592 GFP_KERNEL);
4593
4594 if (table) {
4595 table[0].data = &net->ipv6.sysctl.flush_delay;
4596 table[0].extra1 = net;
4597 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
4598 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
4599 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4600 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
4601 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
4602 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
4603 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
4604 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
4605 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4606
4607 /* Don't export sysctls to unprivileged users */
4608 if (net->user_ns != &init_user_ns)
4609 table[0].procname = NULL;
4610 }
4611
4612 return table;
4613 }
4614 #endif
4615
4616 static int __net_init ip6_route_net_init(struct net *net)
4617 {
4618 int ret = -ENOMEM;
4619
4620 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
4621 sizeof(net->ipv6.ip6_dst_ops));
4622
4623 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
4624 goto out_ip6_dst_ops;
4625
4626 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
4627 sizeof(*net->ipv6.ip6_null_entry),
4628 GFP_KERNEL);
4629 if (!net->ipv6.ip6_null_entry)
4630 goto out_ip6_dst_entries;
4631 net->ipv6.ip6_null_entry->dst.path =
4632 (struct dst_entry *)net->ipv6.ip6_null_entry;
4633 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4634 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
4635 ip6_template_metrics, true);
4636
4637 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4638 net->ipv6.fib6_has_custom_rules = false;
4639 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
4640 sizeof(*net->ipv6.ip6_prohibit_entry),
4641 GFP_KERNEL);
4642 if (!net->ipv6.ip6_prohibit_entry)
4643 goto out_ip6_null_entry;
4644 net->ipv6.ip6_prohibit_entry->dst.path =
4645 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
4646 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4647 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
4648 ip6_template_metrics, true);
4649
4650 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
4651 sizeof(*net->ipv6.ip6_blk_hole_entry),
4652 GFP_KERNEL);
4653 if (!net->ipv6.ip6_blk_hole_entry)
4654 goto out_ip6_prohibit_entry;
4655 net->ipv6.ip6_blk_hole_entry->dst.path =
4656 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
4657 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4658 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
4659 ip6_template_metrics, true);
4660 #endif
4661
4662 net->ipv6.sysctl.flush_delay = 0;
4663 net->ipv6.sysctl.ip6_rt_max_size = 4096;
4664 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
4665 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
4666 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
4667 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
4668 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
4669 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
4670
4671 net->ipv6.ip6_rt_gc_expire = 30*HZ;
4672
4673 ret = 0;
4674 out:
4675 return ret;
4676
4677 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4678 out_ip6_prohibit_entry:
4679 kfree(net->ipv6.ip6_prohibit_entry);
4680 out_ip6_null_entry:
4681 kfree(net->ipv6.ip6_null_entry);
4682 #endif
4683 out_ip6_dst_entries:
4684 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4685 out_ip6_dst_ops:
4686 goto out;
4687 }
4688
4689 static void __net_exit ip6_route_net_exit(struct net *net)
4690 {
4691 kfree(net->ipv6.ip6_null_entry);
4692 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4693 kfree(net->ipv6.ip6_prohibit_entry);
4694 kfree(net->ipv6.ip6_blk_hole_entry);
4695 #endif
4696 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4697 }
4698
4699 static int __net_init ip6_route_net_init_late(struct net *net)
4700 {
4701 #ifdef CONFIG_PROC_FS
4702 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
4703 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
4704 #endif
4705 return 0;
4706 }
4707
4708 static void __net_exit ip6_route_net_exit_late(struct net *net)
4709 {
4710 #ifdef CONFIG_PROC_FS
4711 remove_proc_entry("ipv6_route", net->proc_net);
4712 remove_proc_entry("rt6_stats", net->proc_net);
4713 #endif
4714 }
4715
4716 static struct pernet_operations ip6_route_net_ops = {
4717 .init = ip6_route_net_init,
4718 .exit = ip6_route_net_exit,
4719 };
4720
4721 static int __net_init ipv6_inetpeer_init(struct net *net)
4722 {
4723 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4724
4725 if (!bp)
4726 return -ENOMEM;
4727 inet_peer_base_init(bp);
4728 net->ipv6.peers = bp;
4729 return 0;
4730 }
4731
4732 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4733 {
4734 struct inet_peer_base *bp = net->ipv6.peers;
4735
4736 net->ipv6.peers = NULL;
4737 inetpeer_invalidate_tree(bp);
4738 kfree(bp);
4739 }
4740
4741 static struct pernet_operations ipv6_inetpeer_ops = {
4742 .init = ipv6_inetpeer_init,
4743 .exit = ipv6_inetpeer_exit,
4744 };
4745
4746 static struct pernet_operations ip6_route_net_late_ops = {
4747 .init = ip6_route_net_init_late,
4748 .exit = ip6_route_net_exit_late,
4749 };
4750
4751 static struct notifier_block ip6_route_dev_notifier = {
4752 .notifier_call = ip6_route_dev_notify,
4753 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4754 };
4755
4756 void __init ip6_route_init_special_entries(void)
4757 {
4758 /* Registering of the loopback is done before this portion of code,
4759 * the loopback reference in rt6_info will not be taken, do it
4760 * manually for init_net */
4761 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4762 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4763 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4764 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4765 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4766 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4767 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4768 #endif
4769 }
4770
4771 int __init ip6_route_init(void)
4772 {
4773 int ret;
4774 int cpu;
4775
4776 ret = -ENOMEM;
4777 ip6_dst_ops_template.kmem_cachep =
4778 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4779 SLAB_HWCACHE_ALIGN, NULL);
4780 if (!ip6_dst_ops_template.kmem_cachep)
4781 goto out;
4782
4783 ret = dst_entries_init(&ip6_dst_blackhole_ops);
4784 if (ret)
4785 goto out_kmem_cache;
4786
4787 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4788 if (ret)
4789 goto out_dst_entries;
4790
4791 ret = register_pernet_subsys(&ip6_route_net_ops);
4792 if (ret)
4793 goto out_register_inetpeer;
4794
4795 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4796
4797 ret = fib6_init();
4798 if (ret)
4799 goto out_register_subsys;
4800
4801 ret = xfrm6_init();
4802 if (ret)
4803 goto out_fib6_init;
4804
4805 ret = fib6_rules_init();
4806 if (ret)
4807 goto xfrm6_init;
4808
4809 ret = register_pernet_subsys(&ip6_route_net_late_ops);
4810 if (ret)
4811 goto fib6_rules_init;
4812
4813 ret = -ENOBUFS;
4814 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) ||
4815 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) ||
4816 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL,
4817 RTNL_FLAG_DOIT_UNLOCKED))
4818 goto out_register_late_subsys;
4819
4820 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
4821 if (ret)
4822 goto out_register_late_subsys;
4823
4824 for_each_possible_cpu(cpu) {
4825 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
4826
4827 INIT_LIST_HEAD(&ul->head);
4828 spin_lock_init(&ul->lock);
4829 }
4830
4831 out:
4832 return ret;
4833
4834 out_register_late_subsys:
4835 unregister_pernet_subsys(&ip6_route_net_late_ops);
4836 fib6_rules_init:
4837 fib6_rules_cleanup();
4838 xfrm6_init:
4839 xfrm6_fini();
4840 out_fib6_init:
4841 fib6_gc_cleanup();
4842 out_register_subsys:
4843 unregister_pernet_subsys(&ip6_route_net_ops);
4844 out_register_inetpeer:
4845 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4846 out_dst_entries:
4847 dst_entries_destroy(&ip6_dst_blackhole_ops);
4848 out_kmem_cache:
4849 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4850 goto out;
4851 }
4852
4853 void ip6_route_cleanup(void)
4854 {
4855 unregister_netdevice_notifier(&ip6_route_dev_notifier);
4856 unregister_pernet_subsys(&ip6_route_net_late_ops);
4857 fib6_rules_cleanup();
4858 xfrm6_fini();
4859 fib6_gc_cleanup();
4860 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4861 unregister_pernet_subsys(&ip6_route_net_ops);
4862 dst_entries_destroy(&ip6_dst_blackhole_ops);
4863 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4864 }