]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/route.c
xfrm: reuse uncached_list to track xdsts
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/dst_metadata.h>
58 #include <net/xfrm.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64 #include <net/l3mdev.h>
65 #include <trace/events/fib6.h>
66
67 #include <linux/uaccess.h>
68
69 #ifdef CONFIG_SYSCTL
70 #include <linux/sysctl.h>
71 #endif
72
73 enum rt6_nud_state {
74 RT6_NUD_FAIL_HARD = -3,
75 RT6_NUD_FAIL_PROBE = -2,
76 RT6_NUD_FAIL_DO_RR = -1,
77 RT6_NUD_SUCCEED = 1
78 };
79
80 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
81 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
82 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
83 static unsigned int ip6_mtu(const struct dst_entry *dst);
84 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
85 static void ip6_dst_destroy(struct dst_entry *);
86 static void ip6_dst_ifdown(struct dst_entry *,
87 struct net_device *dev, int how);
88 static int ip6_dst_gc(struct dst_ops *ops);
89
90 static int ip6_pkt_discard(struct sk_buff *skb);
91 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
92 static int ip6_pkt_prohibit(struct sk_buff *skb);
93 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static void ip6_link_failure(struct sk_buff *skb);
95 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
96 struct sk_buff *skb, u32 mtu);
97 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb);
99 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
100 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
101 static size_t rt6_nlmsg_size(struct rt6_info *rt);
102 static int rt6_fill_node(struct net *net,
103 struct sk_buff *skb, struct rt6_info *rt,
104 struct in6_addr *dst, struct in6_addr *src,
105 int iif, int type, u32 portid, u32 seq,
106 unsigned int flags);
107
108 #ifdef CONFIG_IPV6_ROUTE_INFO
109 static struct rt6_info *rt6_add_route_info(struct net *net,
110 const struct in6_addr *prefix, int prefixlen,
111 const struct in6_addr *gwaddr,
112 struct net_device *dev,
113 unsigned int pref);
114 static struct rt6_info *rt6_get_route_info(struct net *net,
115 const struct in6_addr *prefix, int prefixlen,
116 const struct in6_addr *gwaddr,
117 struct net_device *dev);
118 #endif
119
120 struct uncached_list {
121 spinlock_t lock;
122 struct list_head head;
123 };
124
125 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
126
127 void rt6_uncached_list_add(struct rt6_info *rt)
128 {
129 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
130
131 rt->rt6i_uncached_list = ul;
132
133 spin_lock_bh(&ul->lock);
134 list_add_tail(&rt->rt6i_uncached, &ul->head);
135 spin_unlock_bh(&ul->lock);
136 }
137
138 void rt6_uncached_list_del(struct rt6_info *rt)
139 {
140 if (!list_empty(&rt->rt6i_uncached)) {
141 struct uncached_list *ul = rt->rt6i_uncached_list;
142
143 spin_lock_bh(&ul->lock);
144 list_del(&rt->rt6i_uncached);
145 spin_unlock_bh(&ul->lock);
146 }
147 }
148
149 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
150 {
151 struct net_device *loopback_dev = net->loopback_dev;
152 int cpu;
153
154 if (dev == loopback_dev)
155 return;
156
157 for_each_possible_cpu(cpu) {
158 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
159 struct rt6_info *rt;
160
161 spin_lock_bh(&ul->lock);
162 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
163 struct inet6_dev *rt_idev = rt->rt6i_idev;
164 struct net_device *rt_dev = rt->dst.dev;
165
166 if (rt_idev->dev == dev) {
167 rt->rt6i_idev = in6_dev_get(loopback_dev);
168 in6_dev_put(rt_idev);
169 }
170
171 if (rt_dev == dev) {
172 rt->dst.dev = loopback_dev;
173 dev_hold(rt->dst.dev);
174 dev_put(rt_dev);
175 }
176 }
177 spin_unlock_bh(&ul->lock);
178 }
179 }
180
181 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
182 {
183 return dst_metrics_write_ptr(rt->dst.from);
184 }
185
186 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
187 {
188 struct rt6_info *rt = (struct rt6_info *)dst;
189
190 if (rt->rt6i_flags & RTF_PCPU)
191 return rt6_pcpu_cow_metrics(rt);
192 else if (rt->rt6i_flags & RTF_CACHE)
193 return NULL;
194 else
195 return dst_cow_metrics_generic(dst, old);
196 }
197
198 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
199 struct sk_buff *skb,
200 const void *daddr)
201 {
202 struct in6_addr *p = &rt->rt6i_gateway;
203
204 if (!ipv6_addr_any(p))
205 return (const void *) p;
206 else if (skb)
207 return &ipv6_hdr(skb)->daddr;
208 return daddr;
209 }
210
211 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
212 struct sk_buff *skb,
213 const void *daddr)
214 {
215 struct rt6_info *rt = (struct rt6_info *) dst;
216 struct neighbour *n;
217
218 daddr = choose_neigh_daddr(rt, skb, daddr);
219 n = __ipv6_neigh_lookup(dst->dev, daddr);
220 if (n)
221 return n;
222 return neigh_create(&nd_tbl, daddr, dst->dev);
223 }
224
225 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
226 {
227 struct net_device *dev = dst->dev;
228 struct rt6_info *rt = (struct rt6_info *)dst;
229
230 daddr = choose_neigh_daddr(rt, NULL, daddr);
231 if (!daddr)
232 return;
233 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
234 return;
235 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
236 return;
237 __ipv6_confirm_neigh(dev, daddr);
238 }
239
240 static struct dst_ops ip6_dst_ops_template = {
241 .family = AF_INET6,
242 .gc = ip6_dst_gc,
243 .gc_thresh = 1024,
244 .check = ip6_dst_check,
245 .default_advmss = ip6_default_advmss,
246 .mtu = ip6_mtu,
247 .cow_metrics = ipv6_cow_metrics,
248 .destroy = ip6_dst_destroy,
249 .ifdown = ip6_dst_ifdown,
250 .negative_advice = ip6_negative_advice,
251 .link_failure = ip6_link_failure,
252 .update_pmtu = ip6_rt_update_pmtu,
253 .redirect = rt6_do_redirect,
254 .local_out = __ip6_local_out,
255 .neigh_lookup = ip6_neigh_lookup,
256 .confirm_neigh = ip6_confirm_neigh,
257 };
258
259 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
260 {
261 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
262
263 return mtu ? : dst->dev->mtu;
264 }
265
266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
267 struct sk_buff *skb, u32 mtu)
268 {
269 }
270
271 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
272 struct sk_buff *skb)
273 {
274 }
275
276 static struct dst_ops ip6_dst_blackhole_ops = {
277 .family = AF_INET6,
278 .destroy = ip6_dst_destroy,
279 .check = ip6_dst_check,
280 .mtu = ip6_blackhole_mtu,
281 .default_advmss = ip6_default_advmss,
282 .update_pmtu = ip6_rt_blackhole_update_pmtu,
283 .redirect = ip6_rt_blackhole_redirect,
284 .cow_metrics = dst_cow_metrics_generic,
285 .neigh_lookup = ip6_neigh_lookup,
286 };
287
288 static const u32 ip6_template_metrics[RTAX_MAX] = {
289 [RTAX_HOPLIMIT - 1] = 0,
290 };
291
292 static const struct rt6_info ip6_null_entry_template = {
293 .dst = {
294 .__refcnt = ATOMIC_INIT(1),
295 .__use = 1,
296 .obsolete = DST_OBSOLETE_FORCE_CHK,
297 .error = -ENETUNREACH,
298 .input = ip6_pkt_discard,
299 .output = ip6_pkt_discard_out,
300 },
301 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
302 .rt6i_protocol = RTPROT_KERNEL,
303 .rt6i_metric = ~(u32) 0,
304 .rt6i_ref = ATOMIC_INIT(1),
305 };
306
307 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
308
309 static const struct rt6_info ip6_prohibit_entry_template = {
310 .dst = {
311 .__refcnt = ATOMIC_INIT(1),
312 .__use = 1,
313 .obsolete = DST_OBSOLETE_FORCE_CHK,
314 .error = -EACCES,
315 .input = ip6_pkt_prohibit,
316 .output = ip6_pkt_prohibit_out,
317 },
318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
319 .rt6i_protocol = RTPROT_KERNEL,
320 .rt6i_metric = ~(u32) 0,
321 .rt6i_ref = ATOMIC_INIT(1),
322 };
323
324 static const struct rt6_info ip6_blk_hole_entry_template = {
325 .dst = {
326 .__refcnt = ATOMIC_INIT(1),
327 .__use = 1,
328 .obsolete = DST_OBSOLETE_FORCE_CHK,
329 .error = -EINVAL,
330 .input = dst_discard,
331 .output = dst_discard_out,
332 },
333 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
334 .rt6i_protocol = RTPROT_KERNEL,
335 .rt6i_metric = ~(u32) 0,
336 .rt6i_ref = ATOMIC_INIT(1),
337 };
338
339 #endif
340
341 static void rt6_info_init(struct rt6_info *rt)
342 {
343 struct dst_entry *dst = &rt->dst;
344
345 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
346 INIT_LIST_HEAD(&rt->rt6i_siblings);
347 INIT_LIST_HEAD(&rt->rt6i_uncached);
348 }
349
350 /* allocate dst with ip6_dst_ops */
351 static struct rt6_info *__ip6_dst_alloc(struct net *net,
352 struct net_device *dev,
353 int flags)
354 {
355 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
356 1, DST_OBSOLETE_FORCE_CHK, flags);
357
358 if (rt)
359 rt6_info_init(rt);
360
361 return rt;
362 }
363
364 struct rt6_info *ip6_dst_alloc(struct net *net,
365 struct net_device *dev,
366 int flags)
367 {
368 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
369
370 if (rt) {
371 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
372 if (rt->rt6i_pcpu) {
373 int cpu;
374
375 for_each_possible_cpu(cpu) {
376 struct rt6_info **p;
377
378 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
379 /* no one shares rt */
380 *p = NULL;
381 }
382 } else {
383 dst_release_immediate(&rt->dst);
384 return NULL;
385 }
386 }
387
388 return rt;
389 }
390 EXPORT_SYMBOL(ip6_dst_alloc);
391
392 static void ip6_dst_destroy(struct dst_entry *dst)
393 {
394 struct rt6_info *rt = (struct rt6_info *)dst;
395 struct dst_entry *from = dst->from;
396 struct inet6_dev *idev;
397
398 dst_destroy_metrics_generic(dst);
399 free_percpu(rt->rt6i_pcpu);
400 rt6_uncached_list_del(rt);
401
402 idev = rt->rt6i_idev;
403 if (idev) {
404 rt->rt6i_idev = NULL;
405 in6_dev_put(idev);
406 }
407
408 dst->from = NULL;
409 dst_release(from);
410 }
411
412 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
413 int how)
414 {
415 struct rt6_info *rt = (struct rt6_info *)dst;
416 struct inet6_dev *idev = rt->rt6i_idev;
417 struct net_device *loopback_dev =
418 dev_net(dev)->loopback_dev;
419
420 if (idev && idev->dev != loopback_dev) {
421 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
422 if (loopback_idev) {
423 rt->rt6i_idev = loopback_idev;
424 in6_dev_put(idev);
425 }
426 }
427 }
428
429 static bool __rt6_check_expired(const struct rt6_info *rt)
430 {
431 if (rt->rt6i_flags & RTF_EXPIRES)
432 return time_after(jiffies, rt->dst.expires);
433 else
434 return false;
435 }
436
437 static bool rt6_check_expired(const struct rt6_info *rt)
438 {
439 if (rt->rt6i_flags & RTF_EXPIRES) {
440 if (time_after(jiffies, rt->dst.expires))
441 return true;
442 } else if (rt->dst.from) {
443 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
444 rt6_check_expired((struct rt6_info *)rt->dst.from);
445 }
446 return false;
447 }
448
449 /* Multipath route selection:
450 * Hash based function using packet header and flowlabel.
451 * Adapted from fib_info_hashfn()
452 */
453 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
454 const struct flowi6 *fl6)
455 {
456 return get_hash_from_flowi6(fl6) % candidate_count;
457 }
458
459 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
460 struct flowi6 *fl6, int oif,
461 int strict)
462 {
463 struct rt6_info *sibling, *next_sibling;
464 int route_choosen;
465
466 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
467 /* Don't change the route, if route_choosen == 0
468 * (siblings does not include ourself)
469 */
470 if (route_choosen)
471 list_for_each_entry_safe(sibling, next_sibling,
472 &match->rt6i_siblings, rt6i_siblings) {
473 route_choosen--;
474 if (route_choosen == 0) {
475 struct inet6_dev *idev = sibling->rt6i_idev;
476
477 if (!netif_carrier_ok(sibling->dst.dev) &&
478 idev->cnf.ignore_routes_with_linkdown)
479 break;
480 if (rt6_score_route(sibling, oif, strict) < 0)
481 break;
482 match = sibling;
483 break;
484 }
485 }
486 return match;
487 }
488
489 /*
490 * Route lookup. Any table->tb6_lock is implied.
491 */
492
493 static inline struct rt6_info *rt6_device_match(struct net *net,
494 struct rt6_info *rt,
495 const struct in6_addr *saddr,
496 int oif,
497 int flags)
498 {
499 struct rt6_info *local = NULL;
500 struct rt6_info *sprt;
501
502 if (!oif && ipv6_addr_any(saddr))
503 goto out;
504
505 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
506 struct net_device *dev = sprt->dst.dev;
507
508 if (oif) {
509 if (dev->ifindex == oif)
510 return sprt;
511 if (dev->flags & IFF_LOOPBACK) {
512 if (!sprt->rt6i_idev ||
513 sprt->rt6i_idev->dev->ifindex != oif) {
514 if (flags & RT6_LOOKUP_F_IFACE)
515 continue;
516 if (local &&
517 local->rt6i_idev->dev->ifindex == oif)
518 continue;
519 }
520 local = sprt;
521 }
522 } else {
523 if (ipv6_chk_addr(net, saddr, dev,
524 flags & RT6_LOOKUP_F_IFACE))
525 return sprt;
526 }
527 }
528
529 if (oif) {
530 if (local)
531 return local;
532
533 if (flags & RT6_LOOKUP_F_IFACE)
534 return net->ipv6.ip6_null_entry;
535 }
536 out:
537 return rt;
538 }
539
540 #ifdef CONFIG_IPV6_ROUTER_PREF
541 struct __rt6_probe_work {
542 struct work_struct work;
543 struct in6_addr target;
544 struct net_device *dev;
545 };
546
547 static void rt6_probe_deferred(struct work_struct *w)
548 {
549 struct in6_addr mcaddr;
550 struct __rt6_probe_work *work =
551 container_of(w, struct __rt6_probe_work, work);
552
553 addrconf_addr_solict_mult(&work->target, &mcaddr);
554 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
555 dev_put(work->dev);
556 kfree(work);
557 }
558
559 static void rt6_probe(struct rt6_info *rt)
560 {
561 struct __rt6_probe_work *work;
562 struct neighbour *neigh;
563 /*
564 * Okay, this does not seem to be appropriate
565 * for now, however, we need to check if it
566 * is really so; aka Router Reachability Probing.
567 *
568 * Router Reachability Probe MUST be rate-limited
569 * to no more than one per minute.
570 */
571 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
572 return;
573 rcu_read_lock_bh();
574 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
575 if (neigh) {
576 if (neigh->nud_state & NUD_VALID)
577 goto out;
578
579 work = NULL;
580 write_lock(&neigh->lock);
581 if (!(neigh->nud_state & NUD_VALID) &&
582 time_after(jiffies,
583 neigh->updated +
584 rt->rt6i_idev->cnf.rtr_probe_interval)) {
585 work = kmalloc(sizeof(*work), GFP_ATOMIC);
586 if (work)
587 __neigh_set_probe_once(neigh);
588 }
589 write_unlock(&neigh->lock);
590 } else {
591 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 }
593
594 if (work) {
595 INIT_WORK(&work->work, rt6_probe_deferred);
596 work->target = rt->rt6i_gateway;
597 dev_hold(rt->dst.dev);
598 work->dev = rt->dst.dev;
599 schedule_work(&work->work);
600 }
601
602 out:
603 rcu_read_unlock_bh();
604 }
605 #else
606 static inline void rt6_probe(struct rt6_info *rt)
607 {
608 }
609 #endif
610
611 /*
612 * Default Router Selection (RFC 2461 6.3.6)
613 */
614 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
615 {
616 struct net_device *dev = rt->dst.dev;
617 if (!oif || dev->ifindex == oif)
618 return 2;
619 if ((dev->flags & IFF_LOOPBACK) &&
620 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
621 return 1;
622 return 0;
623 }
624
625 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
626 {
627 struct neighbour *neigh;
628 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
629
630 if (rt->rt6i_flags & RTF_NONEXTHOP ||
631 !(rt->rt6i_flags & RTF_GATEWAY))
632 return RT6_NUD_SUCCEED;
633
634 rcu_read_lock_bh();
635 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
636 if (neigh) {
637 read_lock(&neigh->lock);
638 if (neigh->nud_state & NUD_VALID)
639 ret = RT6_NUD_SUCCEED;
640 #ifdef CONFIG_IPV6_ROUTER_PREF
641 else if (!(neigh->nud_state & NUD_FAILED))
642 ret = RT6_NUD_SUCCEED;
643 else
644 ret = RT6_NUD_FAIL_PROBE;
645 #endif
646 read_unlock(&neigh->lock);
647 } else {
648 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
649 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
650 }
651 rcu_read_unlock_bh();
652
653 return ret;
654 }
655
656 static int rt6_score_route(struct rt6_info *rt, int oif,
657 int strict)
658 {
659 int m;
660
661 m = rt6_check_dev(rt, oif);
662 if (!m && (strict & RT6_LOOKUP_F_IFACE))
663 return RT6_NUD_FAIL_HARD;
664 #ifdef CONFIG_IPV6_ROUTER_PREF
665 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
666 #endif
667 if (strict & RT6_LOOKUP_F_REACHABLE) {
668 int n = rt6_check_neigh(rt);
669 if (n < 0)
670 return n;
671 }
672 return m;
673 }
674
675 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
676 int *mpri, struct rt6_info *match,
677 bool *do_rr)
678 {
679 int m;
680 bool match_do_rr = false;
681 struct inet6_dev *idev = rt->rt6i_idev;
682 struct net_device *dev = rt->dst.dev;
683
684 if (dev && !netif_carrier_ok(dev) &&
685 idev->cnf.ignore_routes_with_linkdown &&
686 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
687 goto out;
688
689 if (rt6_check_expired(rt))
690 goto out;
691
692 m = rt6_score_route(rt, oif, strict);
693 if (m == RT6_NUD_FAIL_DO_RR) {
694 match_do_rr = true;
695 m = 0; /* lowest valid score */
696 } else if (m == RT6_NUD_FAIL_HARD) {
697 goto out;
698 }
699
700 if (strict & RT6_LOOKUP_F_REACHABLE)
701 rt6_probe(rt);
702
703 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
704 if (m > *mpri) {
705 *do_rr = match_do_rr;
706 *mpri = m;
707 match = rt;
708 }
709 out:
710 return match;
711 }
712
713 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
714 struct rt6_info *rr_head,
715 u32 metric, int oif, int strict,
716 bool *do_rr)
717 {
718 struct rt6_info *rt, *match, *cont;
719 int mpri = -1;
720
721 match = NULL;
722 cont = NULL;
723 for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
724 if (rt->rt6i_metric != metric) {
725 cont = rt;
726 break;
727 }
728
729 match = find_match(rt, oif, strict, &mpri, match, do_rr);
730 }
731
732 for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
733 if (rt->rt6i_metric != metric) {
734 cont = rt;
735 break;
736 }
737
738 match = find_match(rt, oif, strict, &mpri, match, do_rr);
739 }
740
741 if (match || !cont)
742 return match;
743
744 for (rt = cont; rt; rt = rt->dst.rt6_next)
745 match = find_match(rt, oif, strict, &mpri, match, do_rr);
746
747 return match;
748 }
749
750 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
751 {
752 struct rt6_info *match, *rt0;
753 struct net *net;
754 bool do_rr = false;
755
756 rt0 = fn->rr_ptr;
757 if (!rt0)
758 fn->rr_ptr = rt0 = fn->leaf;
759
760 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
761 &do_rr);
762
763 if (do_rr) {
764 struct rt6_info *next = rt0->dst.rt6_next;
765
766 /* no entries matched; do round-robin */
767 if (!next || next->rt6i_metric != rt0->rt6i_metric)
768 next = fn->leaf;
769
770 if (next != rt0)
771 fn->rr_ptr = next;
772 }
773
774 net = dev_net(rt0->dst.dev);
775 return match ? match : net->ipv6.ip6_null_entry;
776 }
777
778 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
779 {
780 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
781 }
782
783 #ifdef CONFIG_IPV6_ROUTE_INFO
784 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
785 const struct in6_addr *gwaddr)
786 {
787 struct net *net = dev_net(dev);
788 struct route_info *rinfo = (struct route_info *) opt;
789 struct in6_addr prefix_buf, *prefix;
790 unsigned int pref;
791 unsigned long lifetime;
792 struct rt6_info *rt;
793
794 if (len < sizeof(struct route_info)) {
795 return -EINVAL;
796 }
797
798 /* Sanity check for prefix_len and length */
799 if (rinfo->length > 3) {
800 return -EINVAL;
801 } else if (rinfo->prefix_len > 128) {
802 return -EINVAL;
803 } else if (rinfo->prefix_len > 64) {
804 if (rinfo->length < 2) {
805 return -EINVAL;
806 }
807 } else if (rinfo->prefix_len > 0) {
808 if (rinfo->length < 1) {
809 return -EINVAL;
810 }
811 }
812
813 pref = rinfo->route_pref;
814 if (pref == ICMPV6_ROUTER_PREF_INVALID)
815 return -EINVAL;
816
817 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
818
819 if (rinfo->length == 3)
820 prefix = (struct in6_addr *)rinfo->prefix;
821 else {
822 /* this function is safe */
823 ipv6_addr_prefix(&prefix_buf,
824 (struct in6_addr *)rinfo->prefix,
825 rinfo->prefix_len);
826 prefix = &prefix_buf;
827 }
828
829 if (rinfo->prefix_len == 0)
830 rt = rt6_get_dflt_router(gwaddr, dev);
831 else
832 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
833 gwaddr, dev);
834
835 if (rt && !lifetime) {
836 ip6_del_rt(rt);
837 rt = NULL;
838 }
839
840 if (!rt && lifetime)
841 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
842 dev, pref);
843 else if (rt)
844 rt->rt6i_flags = RTF_ROUTEINFO |
845 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
846
847 if (rt) {
848 if (!addrconf_finite_timeout(lifetime))
849 rt6_clean_expires(rt);
850 else
851 rt6_set_expires(rt, jiffies + HZ * lifetime);
852
853 ip6_rt_put(rt);
854 }
855 return 0;
856 }
857 #endif
858
859 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
860 struct in6_addr *saddr)
861 {
862 struct fib6_node *pn;
863 while (1) {
864 if (fn->fn_flags & RTN_TL_ROOT)
865 return NULL;
866 pn = fn->parent;
867 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
868 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
869 else
870 fn = pn;
871 if (fn->fn_flags & RTN_RTINFO)
872 return fn;
873 }
874 }
875
876 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
877 struct fib6_table *table,
878 struct flowi6 *fl6, int flags)
879 {
880 struct fib6_node *fn;
881 struct rt6_info *rt;
882
883 read_lock_bh(&table->tb6_lock);
884 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
885 restart:
886 rt = fn->leaf;
887 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
888 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
889 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
890 if (rt == net->ipv6.ip6_null_entry) {
891 fn = fib6_backtrack(fn, &fl6->saddr);
892 if (fn)
893 goto restart;
894 }
895 dst_use(&rt->dst, jiffies);
896 read_unlock_bh(&table->tb6_lock);
897
898 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
899
900 return rt;
901
902 }
903
904 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
905 int flags)
906 {
907 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
908 }
909 EXPORT_SYMBOL_GPL(ip6_route_lookup);
910
911 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
912 const struct in6_addr *saddr, int oif, int strict)
913 {
914 struct flowi6 fl6 = {
915 .flowi6_oif = oif,
916 .daddr = *daddr,
917 };
918 struct dst_entry *dst;
919 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
920
921 if (saddr) {
922 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
923 flags |= RT6_LOOKUP_F_HAS_SADDR;
924 }
925
926 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
927 if (dst->error == 0)
928 return (struct rt6_info *) dst;
929
930 dst_release(dst);
931
932 return NULL;
933 }
934 EXPORT_SYMBOL(rt6_lookup);
935
936 /* ip6_ins_rt is called with FREE table->tb6_lock.
937 * It takes new route entry, the addition fails by any reason the
938 * route is released.
939 * Caller must hold dst before calling it.
940 */
941
942 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
943 struct mx6_config *mxc,
944 struct netlink_ext_ack *extack)
945 {
946 int err;
947 struct fib6_table *table;
948
949 table = rt->rt6i_table;
950 write_lock_bh(&table->tb6_lock);
951 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
952 write_unlock_bh(&table->tb6_lock);
953
954 return err;
955 }
956
957 int ip6_ins_rt(struct rt6_info *rt)
958 {
959 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
960 struct mx6_config mxc = { .mx = NULL, };
961
962 /* Hold dst to account for the reference from the fib6 tree */
963 dst_hold(&rt->dst);
964 return __ip6_ins_rt(rt, &info, &mxc, NULL);
965 }
966
967 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
968 const struct in6_addr *daddr,
969 const struct in6_addr *saddr)
970 {
971 struct rt6_info *rt;
972
973 /*
974 * Clone the route.
975 */
976
977 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
978 ort = (struct rt6_info *)ort->dst.from;
979
980 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
981
982 if (!rt)
983 return NULL;
984
985 ip6_rt_copy_init(rt, ort);
986 rt->rt6i_flags |= RTF_CACHE;
987 rt->rt6i_metric = 0;
988 rt->dst.flags |= DST_HOST;
989 rt->rt6i_dst.addr = *daddr;
990 rt->rt6i_dst.plen = 128;
991
992 if (!rt6_is_gw_or_nonexthop(ort)) {
993 if (ort->rt6i_dst.plen != 128 &&
994 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
995 rt->rt6i_flags |= RTF_ANYCAST;
996 #ifdef CONFIG_IPV6_SUBTREES
997 if (rt->rt6i_src.plen && saddr) {
998 rt->rt6i_src.addr = *saddr;
999 rt->rt6i_src.plen = 128;
1000 }
1001 #endif
1002 }
1003
1004 return rt;
1005 }
1006
1007 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1008 {
1009 struct rt6_info *pcpu_rt;
1010
1011 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
1012 rt->dst.dev, rt->dst.flags);
1013
1014 if (!pcpu_rt)
1015 return NULL;
1016 ip6_rt_copy_init(pcpu_rt, rt);
1017 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1018 pcpu_rt->rt6i_flags |= RTF_PCPU;
1019 return pcpu_rt;
1020 }
1021
1022 /* It should be called with read_lock_bh(&tb6_lock) acquired */
1023 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1024 {
1025 struct rt6_info *pcpu_rt, **p;
1026
1027 p = this_cpu_ptr(rt->rt6i_pcpu);
1028 pcpu_rt = *p;
1029
1030 if (pcpu_rt) {
1031 dst_hold(&pcpu_rt->dst);
1032 rt6_dst_from_metrics_check(pcpu_rt);
1033 }
1034 return pcpu_rt;
1035 }
1036
1037 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1038 {
1039 struct fib6_table *table = rt->rt6i_table;
1040 struct rt6_info *pcpu_rt, *prev, **p;
1041
1042 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1043 if (!pcpu_rt) {
1044 struct net *net = dev_net(rt->dst.dev);
1045
1046 dst_hold(&net->ipv6.ip6_null_entry->dst);
1047 return net->ipv6.ip6_null_entry;
1048 }
1049
1050 read_lock_bh(&table->tb6_lock);
1051 if (rt->rt6i_pcpu) {
1052 p = this_cpu_ptr(rt->rt6i_pcpu);
1053 prev = cmpxchg(p, NULL, pcpu_rt);
1054 if (prev) {
1055 /* If someone did it before us, return prev instead */
1056 dst_release_immediate(&pcpu_rt->dst);
1057 pcpu_rt = prev;
1058 }
1059 } else {
1060 /* rt has been removed from the fib6 tree
1061 * before we have a chance to acquire the read_lock.
1062 * In this case, don't brother to create a pcpu rt
1063 * since rt is going away anyway. The next
1064 * dst_check() will trigger a re-lookup.
1065 */
1066 dst_release_immediate(&pcpu_rt->dst);
1067 pcpu_rt = rt;
1068 }
1069 dst_hold(&pcpu_rt->dst);
1070 rt6_dst_from_metrics_check(pcpu_rt);
1071 read_unlock_bh(&table->tb6_lock);
1072 return pcpu_rt;
1073 }
1074
1075 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1076 int oif, struct flowi6 *fl6, int flags)
1077 {
1078 struct fib6_node *fn, *saved_fn;
1079 struct rt6_info *rt;
1080 int strict = 0;
1081
1082 strict |= flags & RT6_LOOKUP_F_IFACE;
1083 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1084 if (net->ipv6.devconf_all->forwarding == 0)
1085 strict |= RT6_LOOKUP_F_REACHABLE;
1086
1087 read_lock_bh(&table->tb6_lock);
1088
1089 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1090 saved_fn = fn;
1091
1092 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1093 oif = 0;
1094
1095 redo_rt6_select:
1096 rt = rt6_select(fn, oif, strict);
1097 if (rt->rt6i_nsiblings)
1098 rt = rt6_multipath_select(rt, fl6, oif, strict);
1099 if (rt == net->ipv6.ip6_null_entry) {
1100 fn = fib6_backtrack(fn, &fl6->saddr);
1101 if (fn)
1102 goto redo_rt6_select;
1103 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1104 /* also consider unreachable route */
1105 strict &= ~RT6_LOOKUP_F_REACHABLE;
1106 fn = saved_fn;
1107 goto redo_rt6_select;
1108 }
1109 }
1110
1111
1112 if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1113 dst_use(&rt->dst, jiffies);
1114 read_unlock_bh(&table->tb6_lock);
1115
1116 rt6_dst_from_metrics_check(rt);
1117
1118 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1119 return rt;
1120 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1121 !(rt->rt6i_flags & RTF_GATEWAY))) {
1122 /* Create a RTF_CACHE clone which will not be
1123 * owned by the fib6 tree. It is for the special case where
1124 * the daddr in the skb during the neighbor look-up is different
1125 * from the fl6->daddr used to look-up route here.
1126 */
1127
1128 struct rt6_info *uncached_rt;
1129
1130 dst_use(&rt->dst, jiffies);
1131 read_unlock_bh(&table->tb6_lock);
1132
1133 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1134 dst_release(&rt->dst);
1135
1136 if (uncached_rt) {
1137 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1138 * No need for another dst_hold()
1139 */
1140 rt6_uncached_list_add(uncached_rt);
1141 } else {
1142 uncached_rt = net->ipv6.ip6_null_entry;
1143 dst_hold(&uncached_rt->dst);
1144 }
1145
1146 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
1147 return uncached_rt;
1148
1149 } else {
1150 /* Get a percpu copy */
1151
1152 struct rt6_info *pcpu_rt;
1153
1154 rt->dst.lastuse = jiffies;
1155 rt->dst.__use++;
1156 pcpu_rt = rt6_get_pcpu_route(rt);
1157
1158 if (pcpu_rt) {
1159 read_unlock_bh(&table->tb6_lock);
1160 } else {
1161 /* We have to do the read_unlock first
1162 * because rt6_make_pcpu_route() may trigger
1163 * ip6_dst_gc() which will take the write_lock.
1164 */
1165 dst_hold(&rt->dst);
1166 read_unlock_bh(&table->tb6_lock);
1167 pcpu_rt = rt6_make_pcpu_route(rt);
1168 dst_release(&rt->dst);
1169 }
1170
1171 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
1172 return pcpu_rt;
1173
1174 }
1175 }
1176 EXPORT_SYMBOL_GPL(ip6_pol_route);
1177
1178 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1179 struct flowi6 *fl6, int flags)
1180 {
1181 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1182 }
1183
1184 struct dst_entry *ip6_route_input_lookup(struct net *net,
1185 struct net_device *dev,
1186 struct flowi6 *fl6, int flags)
1187 {
1188 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1189 flags |= RT6_LOOKUP_F_IFACE;
1190
1191 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1192 }
1193 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1194
1195 void ip6_route_input(struct sk_buff *skb)
1196 {
1197 const struct ipv6hdr *iph = ipv6_hdr(skb);
1198 struct net *net = dev_net(skb->dev);
1199 int flags = RT6_LOOKUP_F_HAS_SADDR;
1200 struct ip_tunnel_info *tun_info;
1201 struct flowi6 fl6 = {
1202 .flowi6_iif = skb->dev->ifindex,
1203 .daddr = iph->daddr,
1204 .saddr = iph->saddr,
1205 .flowlabel = ip6_flowinfo(iph),
1206 .flowi6_mark = skb->mark,
1207 .flowi6_proto = iph->nexthdr,
1208 };
1209
1210 tun_info = skb_tunnel_info(skb);
1211 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1212 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1213 skb_dst_drop(skb);
1214 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1215 }
1216
1217 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1218 struct flowi6 *fl6, int flags)
1219 {
1220 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1221 }
1222
1223 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1224 struct flowi6 *fl6, int flags)
1225 {
1226 bool any_src;
1227
1228 if (rt6_need_strict(&fl6->daddr)) {
1229 struct dst_entry *dst;
1230
1231 dst = l3mdev_link_scope_lookup(net, fl6);
1232 if (dst)
1233 return dst;
1234 }
1235
1236 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1237
1238 any_src = ipv6_addr_any(&fl6->saddr);
1239 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1240 (fl6->flowi6_oif && any_src))
1241 flags |= RT6_LOOKUP_F_IFACE;
1242
1243 if (!any_src)
1244 flags |= RT6_LOOKUP_F_HAS_SADDR;
1245 else if (sk)
1246 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1247
1248 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1249 }
1250 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1251
1252 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1253 {
1254 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1255 struct net_device *loopback_dev = net->loopback_dev;
1256 struct dst_entry *new = NULL;
1257
1258 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1259 DST_OBSOLETE_DEAD, 0);
1260 if (rt) {
1261 rt6_info_init(rt);
1262
1263 new = &rt->dst;
1264 new->__use = 1;
1265 new->input = dst_discard;
1266 new->output = dst_discard_out;
1267
1268 dst_copy_metrics(new, &ort->dst);
1269
1270 rt->rt6i_idev = in6_dev_get(loopback_dev);
1271 rt->rt6i_gateway = ort->rt6i_gateway;
1272 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1273 rt->rt6i_metric = 0;
1274
1275 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1276 #ifdef CONFIG_IPV6_SUBTREES
1277 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1278 #endif
1279 }
1280
1281 dst_release(dst_orig);
1282 return new ? new : ERR_PTR(-ENOMEM);
1283 }
1284
1285 /*
1286 * Destination cache support functions
1287 */
1288
1289 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1290 {
1291 if (rt->dst.from &&
1292 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1293 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1294 }
1295
1296 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1297 {
1298 u32 rt_cookie = 0;
1299
1300 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
1301 return NULL;
1302
1303 if (rt6_check_expired(rt))
1304 return NULL;
1305
1306 return &rt->dst;
1307 }
1308
1309 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1310 {
1311 if (!__rt6_check_expired(rt) &&
1312 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1313 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1314 return &rt->dst;
1315 else
1316 return NULL;
1317 }
1318
1319 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1320 {
1321 struct rt6_info *rt;
1322
1323 rt = (struct rt6_info *) dst;
1324
1325 /* All IPV6 dsts are created with ->obsolete set to the value
1326 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1327 * into this function always.
1328 */
1329
1330 rt6_dst_from_metrics_check(rt);
1331
1332 if (rt->rt6i_flags & RTF_PCPU ||
1333 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
1334 return rt6_dst_from_check(rt, cookie);
1335 else
1336 return rt6_check(rt, cookie);
1337 }
1338
1339 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1340 {
1341 struct rt6_info *rt = (struct rt6_info *) dst;
1342
1343 if (rt) {
1344 if (rt->rt6i_flags & RTF_CACHE) {
1345 if (rt6_check_expired(rt)) {
1346 ip6_del_rt(rt);
1347 dst = NULL;
1348 }
1349 } else {
1350 dst_release(dst);
1351 dst = NULL;
1352 }
1353 }
1354 return dst;
1355 }
1356
1357 static void ip6_link_failure(struct sk_buff *skb)
1358 {
1359 struct rt6_info *rt;
1360
1361 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1362
1363 rt = (struct rt6_info *) skb_dst(skb);
1364 if (rt) {
1365 if (rt->rt6i_flags & RTF_CACHE) {
1366 if (dst_hold_safe(&rt->dst))
1367 ip6_del_rt(rt);
1368 } else {
1369 struct fib6_node *fn;
1370
1371 rcu_read_lock();
1372 fn = rcu_dereference(rt->rt6i_node);
1373 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
1374 fn->fn_sernum = -1;
1375 rcu_read_unlock();
1376 }
1377 }
1378 }
1379
1380 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1381 {
1382 struct net *net = dev_net(rt->dst.dev);
1383
1384 rt->rt6i_flags |= RTF_MODIFIED;
1385 rt->rt6i_pmtu = mtu;
1386 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1387 }
1388
1389 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1390 {
1391 return !(rt->rt6i_flags & RTF_CACHE) &&
1392 (rt->rt6i_flags & RTF_PCPU ||
1393 rcu_access_pointer(rt->rt6i_node));
1394 }
1395
1396 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1397 const struct ipv6hdr *iph, u32 mtu)
1398 {
1399 const struct in6_addr *daddr, *saddr;
1400 struct rt6_info *rt6 = (struct rt6_info *)dst;
1401
1402 if (rt6->rt6i_flags & RTF_LOCAL)
1403 return;
1404
1405 if (dst_metric_locked(dst, RTAX_MTU))
1406 return;
1407
1408 if (iph) {
1409 daddr = &iph->daddr;
1410 saddr = &iph->saddr;
1411 } else if (sk) {
1412 daddr = &sk->sk_v6_daddr;
1413 saddr = &inet6_sk(sk)->saddr;
1414 } else {
1415 daddr = NULL;
1416 saddr = NULL;
1417 }
1418 dst_confirm_neigh(dst, daddr);
1419 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1420 if (mtu >= dst_mtu(dst))
1421 return;
1422
1423 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1424 rt6_do_update_pmtu(rt6, mtu);
1425 } else if (daddr) {
1426 struct rt6_info *nrt6;
1427
1428 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1429 if (nrt6) {
1430 rt6_do_update_pmtu(nrt6, mtu);
1431
1432 /* ip6_ins_rt(nrt6) will bump the
1433 * rt6->rt6i_node->fn_sernum
1434 * which will fail the next rt6_check() and
1435 * invalidate the sk->sk_dst_cache.
1436 */
1437 ip6_ins_rt(nrt6);
1438 /* Release the reference taken in
1439 * ip6_rt_cache_alloc()
1440 */
1441 dst_release(&nrt6->dst);
1442 }
1443 }
1444 }
1445
1446 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1447 struct sk_buff *skb, u32 mtu)
1448 {
1449 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1450 }
1451
1452 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1453 int oif, u32 mark, kuid_t uid)
1454 {
1455 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1456 struct dst_entry *dst;
1457 struct flowi6 fl6;
1458
1459 memset(&fl6, 0, sizeof(fl6));
1460 fl6.flowi6_oif = oif;
1461 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1462 fl6.daddr = iph->daddr;
1463 fl6.saddr = iph->saddr;
1464 fl6.flowlabel = ip6_flowinfo(iph);
1465 fl6.flowi6_uid = uid;
1466
1467 dst = ip6_route_output(net, NULL, &fl6);
1468 if (!dst->error)
1469 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1470 dst_release(dst);
1471 }
1472 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1473
1474 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1475 {
1476 struct dst_entry *dst;
1477
1478 ip6_update_pmtu(skb, sock_net(sk), mtu,
1479 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
1480
1481 dst = __sk_dst_get(sk);
1482 if (!dst || !dst->obsolete ||
1483 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1484 return;
1485
1486 bh_lock_sock(sk);
1487 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1488 ip6_datagram_dst_update(sk, false);
1489 bh_unlock_sock(sk);
1490 }
1491 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1492
1493 /* Handle redirects */
1494 struct ip6rd_flowi {
1495 struct flowi6 fl6;
1496 struct in6_addr gateway;
1497 };
1498
1499 static struct rt6_info *__ip6_route_redirect(struct net *net,
1500 struct fib6_table *table,
1501 struct flowi6 *fl6,
1502 int flags)
1503 {
1504 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1505 struct rt6_info *rt;
1506 struct fib6_node *fn;
1507
1508 /* Get the "current" route for this destination and
1509 * check if the redirect has come from appropriate router.
1510 *
1511 * RFC 4861 specifies that redirects should only be
1512 * accepted if they come from the nexthop to the target.
1513 * Due to the way the routes are chosen, this notion
1514 * is a bit fuzzy and one might need to check all possible
1515 * routes.
1516 */
1517
1518 read_lock_bh(&table->tb6_lock);
1519 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1520 restart:
1521 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1522 if (rt6_check_expired(rt))
1523 continue;
1524 if (rt->dst.error)
1525 break;
1526 if (!(rt->rt6i_flags & RTF_GATEWAY))
1527 continue;
1528 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1529 continue;
1530 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1531 continue;
1532 break;
1533 }
1534
1535 if (!rt)
1536 rt = net->ipv6.ip6_null_entry;
1537 else if (rt->dst.error) {
1538 rt = net->ipv6.ip6_null_entry;
1539 goto out;
1540 }
1541
1542 if (rt == net->ipv6.ip6_null_entry) {
1543 fn = fib6_backtrack(fn, &fl6->saddr);
1544 if (fn)
1545 goto restart;
1546 }
1547
1548 out:
1549 dst_hold(&rt->dst);
1550
1551 read_unlock_bh(&table->tb6_lock);
1552
1553 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1554 return rt;
1555 };
1556
1557 static struct dst_entry *ip6_route_redirect(struct net *net,
1558 const struct flowi6 *fl6,
1559 const struct in6_addr *gateway)
1560 {
1561 int flags = RT6_LOOKUP_F_HAS_SADDR;
1562 struct ip6rd_flowi rdfl;
1563
1564 rdfl.fl6 = *fl6;
1565 rdfl.gateway = *gateway;
1566
1567 return fib6_rule_lookup(net, &rdfl.fl6,
1568 flags, __ip6_route_redirect);
1569 }
1570
1571 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
1572 kuid_t uid)
1573 {
1574 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1575 struct dst_entry *dst;
1576 struct flowi6 fl6;
1577
1578 memset(&fl6, 0, sizeof(fl6));
1579 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1580 fl6.flowi6_oif = oif;
1581 fl6.flowi6_mark = mark;
1582 fl6.daddr = iph->daddr;
1583 fl6.saddr = iph->saddr;
1584 fl6.flowlabel = ip6_flowinfo(iph);
1585 fl6.flowi6_uid = uid;
1586
1587 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1588 rt6_do_redirect(dst, NULL, skb);
1589 dst_release(dst);
1590 }
1591 EXPORT_SYMBOL_GPL(ip6_redirect);
1592
1593 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1594 u32 mark)
1595 {
1596 const struct ipv6hdr *iph = ipv6_hdr(skb);
1597 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1598 struct dst_entry *dst;
1599 struct flowi6 fl6;
1600
1601 memset(&fl6, 0, sizeof(fl6));
1602 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1603 fl6.flowi6_oif = oif;
1604 fl6.flowi6_mark = mark;
1605 fl6.daddr = msg->dest;
1606 fl6.saddr = iph->daddr;
1607 fl6.flowi6_uid = sock_net_uid(net, NULL);
1608
1609 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1610 rt6_do_redirect(dst, NULL, skb);
1611 dst_release(dst);
1612 }
1613
1614 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1615 {
1616 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
1617 sk->sk_uid);
1618 }
1619 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1620
1621 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1622 {
1623 struct net_device *dev = dst->dev;
1624 unsigned int mtu = dst_mtu(dst);
1625 struct net *net = dev_net(dev);
1626
1627 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1628
1629 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1630 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1631
1632 /*
1633 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1634 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1635 * IPV6_MAXPLEN is also valid and means: "any MSS,
1636 * rely only on pmtu discovery"
1637 */
1638 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1639 mtu = IPV6_MAXPLEN;
1640 return mtu;
1641 }
1642
1643 static unsigned int ip6_mtu(const struct dst_entry *dst)
1644 {
1645 const struct rt6_info *rt = (const struct rt6_info *)dst;
1646 unsigned int mtu = rt->rt6i_pmtu;
1647 struct inet6_dev *idev;
1648
1649 if (mtu)
1650 goto out;
1651
1652 mtu = dst_metric_raw(dst, RTAX_MTU);
1653 if (mtu)
1654 goto out;
1655
1656 mtu = IPV6_MIN_MTU;
1657
1658 rcu_read_lock();
1659 idev = __in6_dev_get(dst->dev);
1660 if (idev)
1661 mtu = idev->cnf.mtu6;
1662 rcu_read_unlock();
1663
1664 out:
1665 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1666
1667 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1668 }
1669
1670 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1671 struct flowi6 *fl6)
1672 {
1673 struct dst_entry *dst;
1674 struct rt6_info *rt;
1675 struct inet6_dev *idev = in6_dev_get(dev);
1676 struct net *net = dev_net(dev);
1677
1678 if (unlikely(!idev))
1679 return ERR_PTR(-ENODEV);
1680
1681 rt = ip6_dst_alloc(net, dev, 0);
1682 if (unlikely(!rt)) {
1683 in6_dev_put(idev);
1684 dst = ERR_PTR(-ENOMEM);
1685 goto out;
1686 }
1687
1688 rt->dst.flags |= DST_HOST;
1689 rt->dst.output = ip6_output;
1690 rt->rt6i_gateway = fl6->daddr;
1691 rt->rt6i_dst.addr = fl6->daddr;
1692 rt->rt6i_dst.plen = 128;
1693 rt->rt6i_idev = idev;
1694 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1695
1696 /* Add this dst into uncached_list so that rt6_ifdown() can
1697 * do proper release of the net_device
1698 */
1699 rt6_uncached_list_add(rt);
1700
1701 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1702
1703 out:
1704 return dst;
1705 }
1706
1707 static int ip6_dst_gc(struct dst_ops *ops)
1708 {
1709 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1710 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1711 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1712 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1713 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1714 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1715 int entries;
1716
1717 entries = dst_entries_get_fast(ops);
1718 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1719 entries <= rt_max_size)
1720 goto out;
1721
1722 net->ipv6.ip6_rt_gc_expire++;
1723 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1724 entries = dst_entries_get_slow(ops);
1725 if (entries < ops->gc_thresh)
1726 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1727 out:
1728 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1729 return entries > rt_max_size;
1730 }
1731
1732 static int ip6_convert_metrics(struct mx6_config *mxc,
1733 const struct fib6_config *cfg)
1734 {
1735 bool ecn_ca = false;
1736 struct nlattr *nla;
1737 int remaining;
1738 u32 *mp;
1739
1740 if (!cfg->fc_mx)
1741 return 0;
1742
1743 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1744 if (unlikely(!mp))
1745 return -ENOMEM;
1746
1747 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1748 int type = nla_type(nla);
1749 u32 val;
1750
1751 if (!type)
1752 continue;
1753 if (unlikely(type > RTAX_MAX))
1754 goto err;
1755
1756 if (type == RTAX_CC_ALGO) {
1757 char tmp[TCP_CA_NAME_MAX];
1758
1759 nla_strlcpy(tmp, nla, sizeof(tmp));
1760 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1761 if (val == TCP_CA_UNSPEC)
1762 goto err;
1763 } else {
1764 val = nla_get_u32(nla);
1765 }
1766 if (type == RTAX_HOPLIMIT && val > 255)
1767 val = 255;
1768 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1769 goto err;
1770
1771 mp[type - 1] = val;
1772 __set_bit(type - 1, mxc->mx_valid);
1773 }
1774
1775 if (ecn_ca) {
1776 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1777 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1778 }
1779
1780 mxc->mx = mp;
1781 return 0;
1782 err:
1783 kfree(mp);
1784 return -EINVAL;
1785 }
1786
1787 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1788 struct fib6_config *cfg,
1789 const struct in6_addr *gw_addr)
1790 {
1791 struct flowi6 fl6 = {
1792 .flowi6_oif = cfg->fc_ifindex,
1793 .daddr = *gw_addr,
1794 .saddr = cfg->fc_prefsrc,
1795 };
1796 struct fib6_table *table;
1797 struct rt6_info *rt;
1798 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1799
1800 table = fib6_get_table(net, cfg->fc_table);
1801 if (!table)
1802 return NULL;
1803
1804 if (!ipv6_addr_any(&cfg->fc_prefsrc))
1805 flags |= RT6_LOOKUP_F_HAS_SADDR;
1806
1807 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
1808
1809 /* if table lookup failed, fall back to full lookup */
1810 if (rt == net->ipv6.ip6_null_entry) {
1811 ip6_rt_put(rt);
1812 rt = NULL;
1813 }
1814
1815 return rt;
1816 }
1817
1818 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
1819 struct netlink_ext_ack *extack)
1820 {
1821 struct net *net = cfg->fc_nlinfo.nl_net;
1822 struct rt6_info *rt = NULL;
1823 struct net_device *dev = NULL;
1824 struct inet6_dev *idev = NULL;
1825 struct fib6_table *table;
1826 int addr_type;
1827 int err = -EINVAL;
1828
1829 /* RTF_PCPU is an internal flag; can not be set by userspace */
1830 if (cfg->fc_flags & RTF_PCPU) {
1831 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
1832 goto out;
1833 }
1834
1835 if (cfg->fc_dst_len > 128) {
1836 NL_SET_ERR_MSG(extack, "Invalid prefix length");
1837 goto out;
1838 }
1839 if (cfg->fc_src_len > 128) {
1840 NL_SET_ERR_MSG(extack, "Invalid source address length");
1841 goto out;
1842 }
1843 #ifndef CONFIG_IPV6_SUBTREES
1844 if (cfg->fc_src_len) {
1845 NL_SET_ERR_MSG(extack,
1846 "Specifying source address requires IPV6_SUBTREES to be enabled");
1847 goto out;
1848 }
1849 #endif
1850 if (cfg->fc_ifindex) {
1851 err = -ENODEV;
1852 dev = dev_get_by_index(net, cfg->fc_ifindex);
1853 if (!dev)
1854 goto out;
1855 idev = in6_dev_get(dev);
1856 if (!idev)
1857 goto out;
1858 }
1859
1860 if (cfg->fc_metric == 0)
1861 cfg->fc_metric = IP6_RT_PRIO_USER;
1862
1863 err = -ENOBUFS;
1864 if (cfg->fc_nlinfo.nlh &&
1865 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1866 table = fib6_get_table(net, cfg->fc_table);
1867 if (!table) {
1868 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1869 table = fib6_new_table(net, cfg->fc_table);
1870 }
1871 } else {
1872 table = fib6_new_table(net, cfg->fc_table);
1873 }
1874
1875 if (!table)
1876 goto out;
1877
1878 rt = ip6_dst_alloc(net, NULL,
1879 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1880
1881 if (!rt) {
1882 err = -ENOMEM;
1883 goto out;
1884 }
1885
1886 if (cfg->fc_flags & RTF_EXPIRES)
1887 rt6_set_expires(rt, jiffies +
1888 clock_t_to_jiffies(cfg->fc_expires));
1889 else
1890 rt6_clean_expires(rt);
1891
1892 if (cfg->fc_protocol == RTPROT_UNSPEC)
1893 cfg->fc_protocol = RTPROT_BOOT;
1894 rt->rt6i_protocol = cfg->fc_protocol;
1895
1896 addr_type = ipv6_addr_type(&cfg->fc_dst);
1897
1898 if (addr_type & IPV6_ADDR_MULTICAST)
1899 rt->dst.input = ip6_mc_input;
1900 else if (cfg->fc_flags & RTF_LOCAL)
1901 rt->dst.input = ip6_input;
1902 else
1903 rt->dst.input = ip6_forward;
1904
1905 rt->dst.output = ip6_output;
1906
1907 if (cfg->fc_encap) {
1908 struct lwtunnel_state *lwtstate;
1909
1910 err = lwtunnel_build_state(cfg->fc_encap_type,
1911 cfg->fc_encap, AF_INET6, cfg,
1912 &lwtstate, extack);
1913 if (err)
1914 goto out;
1915 rt->dst.lwtstate = lwtstate_get(lwtstate);
1916 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1917 rt->dst.lwtstate->orig_output = rt->dst.output;
1918 rt->dst.output = lwtunnel_output;
1919 }
1920 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1921 rt->dst.lwtstate->orig_input = rt->dst.input;
1922 rt->dst.input = lwtunnel_input;
1923 }
1924 }
1925
1926 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1927 rt->rt6i_dst.plen = cfg->fc_dst_len;
1928 if (rt->rt6i_dst.plen == 128)
1929 rt->dst.flags |= DST_HOST;
1930
1931 #ifdef CONFIG_IPV6_SUBTREES
1932 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1933 rt->rt6i_src.plen = cfg->fc_src_len;
1934 #endif
1935
1936 rt->rt6i_metric = cfg->fc_metric;
1937
1938 /* We cannot add true routes via loopback here,
1939 they would result in kernel looping; promote them to reject routes
1940 */
1941 if ((cfg->fc_flags & RTF_REJECT) ||
1942 (dev && (dev->flags & IFF_LOOPBACK) &&
1943 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1944 !(cfg->fc_flags & RTF_LOCAL))) {
1945 /* hold loopback dev/idev if we haven't done so. */
1946 if (dev != net->loopback_dev) {
1947 if (dev) {
1948 dev_put(dev);
1949 in6_dev_put(idev);
1950 }
1951 dev = net->loopback_dev;
1952 dev_hold(dev);
1953 idev = in6_dev_get(dev);
1954 if (!idev) {
1955 err = -ENODEV;
1956 goto out;
1957 }
1958 }
1959 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1960 switch (cfg->fc_type) {
1961 case RTN_BLACKHOLE:
1962 rt->dst.error = -EINVAL;
1963 rt->dst.output = dst_discard_out;
1964 rt->dst.input = dst_discard;
1965 break;
1966 case RTN_PROHIBIT:
1967 rt->dst.error = -EACCES;
1968 rt->dst.output = ip6_pkt_prohibit_out;
1969 rt->dst.input = ip6_pkt_prohibit;
1970 break;
1971 case RTN_THROW:
1972 case RTN_UNREACHABLE:
1973 default:
1974 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1975 : (cfg->fc_type == RTN_UNREACHABLE)
1976 ? -EHOSTUNREACH : -ENETUNREACH;
1977 rt->dst.output = ip6_pkt_discard_out;
1978 rt->dst.input = ip6_pkt_discard;
1979 break;
1980 }
1981 goto install_route;
1982 }
1983
1984 if (cfg->fc_flags & RTF_GATEWAY) {
1985 const struct in6_addr *gw_addr;
1986 int gwa_type;
1987
1988 gw_addr = &cfg->fc_gateway;
1989 gwa_type = ipv6_addr_type(gw_addr);
1990
1991 /* if gw_addr is local we will fail to detect this in case
1992 * address is still TENTATIVE (DAD in progress). rt6_lookup()
1993 * will return already-added prefix route via interface that
1994 * prefix route was assigned to, which might be non-loopback.
1995 */
1996 err = -EINVAL;
1997 if (ipv6_chk_addr_and_flags(net, gw_addr,
1998 gwa_type & IPV6_ADDR_LINKLOCAL ?
1999 dev : NULL, 0, 0)) {
2000 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2001 goto out;
2002 }
2003 rt->rt6i_gateway = *gw_addr;
2004
2005 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
2006 struct rt6_info *grt = NULL;
2007
2008 /* IPv6 strictly inhibits using not link-local
2009 addresses as nexthop address.
2010 Otherwise, router will not able to send redirects.
2011 It is very good, but in some (rare!) circumstances
2012 (SIT, PtP, NBMA NOARP links) it is handy to allow
2013 some exceptions. --ANK
2014 We allow IPv4-mapped nexthops to support RFC4798-type
2015 addressing
2016 */
2017 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2018 IPV6_ADDR_MAPPED))) {
2019 NL_SET_ERR_MSG(extack,
2020 "Invalid gateway address");
2021 goto out;
2022 }
2023
2024 if (cfg->fc_table) {
2025 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2026
2027 if (grt) {
2028 if (grt->rt6i_flags & RTF_GATEWAY ||
2029 (dev && dev != grt->dst.dev)) {
2030 ip6_rt_put(grt);
2031 grt = NULL;
2032 }
2033 }
2034 }
2035
2036 if (!grt)
2037 grt = rt6_lookup(net, gw_addr, NULL,
2038 cfg->fc_ifindex, 1);
2039
2040 err = -EHOSTUNREACH;
2041 if (!grt)
2042 goto out;
2043 if (dev) {
2044 if (dev != grt->dst.dev) {
2045 ip6_rt_put(grt);
2046 goto out;
2047 }
2048 } else {
2049 dev = grt->dst.dev;
2050 idev = grt->rt6i_idev;
2051 dev_hold(dev);
2052 in6_dev_hold(grt->rt6i_idev);
2053 }
2054 if (!(grt->rt6i_flags & RTF_GATEWAY))
2055 err = 0;
2056 ip6_rt_put(grt);
2057
2058 if (err)
2059 goto out;
2060 }
2061 err = -EINVAL;
2062 if (!dev) {
2063 NL_SET_ERR_MSG(extack, "Egress device not specified");
2064 goto out;
2065 } else if (dev->flags & IFF_LOOPBACK) {
2066 NL_SET_ERR_MSG(extack,
2067 "Egress device can not be loopback device for this route");
2068 goto out;
2069 }
2070 }
2071
2072 err = -ENODEV;
2073 if (!dev)
2074 goto out;
2075
2076 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2077 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2078 NL_SET_ERR_MSG(extack, "Invalid source address");
2079 err = -EINVAL;
2080 goto out;
2081 }
2082 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2083 rt->rt6i_prefsrc.plen = 128;
2084 } else
2085 rt->rt6i_prefsrc.plen = 0;
2086
2087 rt->rt6i_flags = cfg->fc_flags;
2088
2089 install_route:
2090 rt->dst.dev = dev;
2091 rt->rt6i_idev = idev;
2092 rt->rt6i_table = table;
2093
2094 cfg->fc_nlinfo.nl_net = dev_net(dev);
2095
2096 return rt;
2097 out:
2098 if (dev)
2099 dev_put(dev);
2100 if (idev)
2101 in6_dev_put(idev);
2102 if (rt)
2103 dst_release_immediate(&rt->dst);
2104
2105 return ERR_PTR(err);
2106 }
2107
2108 int ip6_route_add(struct fib6_config *cfg,
2109 struct netlink_ext_ack *extack)
2110 {
2111 struct mx6_config mxc = { .mx = NULL, };
2112 struct rt6_info *rt;
2113 int err;
2114
2115 rt = ip6_route_info_create(cfg, extack);
2116 if (IS_ERR(rt)) {
2117 err = PTR_ERR(rt);
2118 rt = NULL;
2119 goto out;
2120 }
2121
2122 err = ip6_convert_metrics(&mxc, cfg);
2123 if (err)
2124 goto out;
2125
2126 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2127
2128 kfree(mxc.mx);
2129
2130 return err;
2131 out:
2132 if (rt)
2133 dst_release_immediate(&rt->dst);
2134
2135 return err;
2136 }
2137
2138 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2139 {
2140 int err;
2141 struct fib6_table *table;
2142 struct net *net = dev_net(rt->dst.dev);
2143
2144 if (rt == net->ipv6.ip6_null_entry) {
2145 err = -ENOENT;
2146 goto out;
2147 }
2148
2149 table = rt->rt6i_table;
2150 write_lock_bh(&table->tb6_lock);
2151 err = fib6_del(rt, info);
2152 write_unlock_bh(&table->tb6_lock);
2153
2154 out:
2155 ip6_rt_put(rt);
2156 return err;
2157 }
2158
2159 int ip6_del_rt(struct rt6_info *rt)
2160 {
2161 struct nl_info info = {
2162 .nl_net = dev_net(rt->dst.dev),
2163 };
2164 return __ip6_del_rt(rt, &info);
2165 }
2166
2167 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2168 {
2169 struct nl_info *info = &cfg->fc_nlinfo;
2170 struct net *net = info->nl_net;
2171 struct sk_buff *skb = NULL;
2172 struct fib6_table *table;
2173 int err = -ENOENT;
2174
2175 if (rt == net->ipv6.ip6_null_entry)
2176 goto out_put;
2177 table = rt->rt6i_table;
2178 write_lock_bh(&table->tb6_lock);
2179
2180 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2181 struct rt6_info *sibling, *next_sibling;
2182
2183 /* prefer to send a single notification with all hops */
2184 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2185 if (skb) {
2186 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2187
2188 if (rt6_fill_node(net, skb, rt,
2189 NULL, NULL, 0, RTM_DELROUTE,
2190 info->portid, seq, 0) < 0) {
2191 kfree_skb(skb);
2192 skb = NULL;
2193 } else
2194 info->skip_notify = 1;
2195 }
2196
2197 list_for_each_entry_safe(sibling, next_sibling,
2198 &rt->rt6i_siblings,
2199 rt6i_siblings) {
2200 err = fib6_del(sibling, info);
2201 if (err)
2202 goto out_unlock;
2203 }
2204 }
2205
2206 err = fib6_del(rt, info);
2207 out_unlock:
2208 write_unlock_bh(&table->tb6_lock);
2209 out_put:
2210 ip6_rt_put(rt);
2211
2212 if (skb) {
2213 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2214 info->nlh, gfp_any());
2215 }
2216 return err;
2217 }
2218
2219 static int ip6_route_del(struct fib6_config *cfg,
2220 struct netlink_ext_ack *extack)
2221 {
2222 struct fib6_table *table;
2223 struct fib6_node *fn;
2224 struct rt6_info *rt;
2225 int err = -ESRCH;
2226
2227 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2228 if (!table) {
2229 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2230 return err;
2231 }
2232
2233 read_lock_bh(&table->tb6_lock);
2234
2235 fn = fib6_locate(&table->tb6_root,
2236 &cfg->fc_dst, cfg->fc_dst_len,
2237 &cfg->fc_src, cfg->fc_src_len);
2238
2239 if (fn) {
2240 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2241 if ((rt->rt6i_flags & RTF_CACHE) &&
2242 !(cfg->fc_flags & RTF_CACHE))
2243 continue;
2244 if (cfg->fc_ifindex &&
2245 (!rt->dst.dev ||
2246 rt->dst.dev->ifindex != cfg->fc_ifindex))
2247 continue;
2248 if (cfg->fc_flags & RTF_GATEWAY &&
2249 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2250 continue;
2251 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2252 continue;
2253 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2254 continue;
2255 dst_hold(&rt->dst);
2256 read_unlock_bh(&table->tb6_lock);
2257
2258 /* if gateway was specified only delete the one hop */
2259 if (cfg->fc_flags & RTF_GATEWAY)
2260 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2261
2262 return __ip6_del_rt_siblings(rt, cfg);
2263 }
2264 }
2265 read_unlock_bh(&table->tb6_lock);
2266
2267 return err;
2268 }
2269
2270 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2271 {
2272 struct netevent_redirect netevent;
2273 struct rt6_info *rt, *nrt = NULL;
2274 struct ndisc_options ndopts;
2275 struct inet6_dev *in6_dev;
2276 struct neighbour *neigh;
2277 struct rd_msg *msg;
2278 int optlen, on_link;
2279 u8 *lladdr;
2280
2281 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2282 optlen -= sizeof(*msg);
2283
2284 if (optlen < 0) {
2285 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2286 return;
2287 }
2288
2289 msg = (struct rd_msg *)icmp6_hdr(skb);
2290
2291 if (ipv6_addr_is_multicast(&msg->dest)) {
2292 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2293 return;
2294 }
2295
2296 on_link = 0;
2297 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2298 on_link = 1;
2299 } else if (ipv6_addr_type(&msg->target) !=
2300 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2301 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2302 return;
2303 }
2304
2305 in6_dev = __in6_dev_get(skb->dev);
2306 if (!in6_dev)
2307 return;
2308 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2309 return;
2310
2311 /* RFC2461 8.1:
2312 * The IP source address of the Redirect MUST be the same as the current
2313 * first-hop router for the specified ICMP Destination Address.
2314 */
2315
2316 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2317 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2318 return;
2319 }
2320
2321 lladdr = NULL;
2322 if (ndopts.nd_opts_tgt_lladdr) {
2323 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2324 skb->dev);
2325 if (!lladdr) {
2326 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2327 return;
2328 }
2329 }
2330
2331 rt = (struct rt6_info *) dst;
2332 if (rt->rt6i_flags & RTF_REJECT) {
2333 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2334 return;
2335 }
2336
2337 /* Redirect received -> path was valid.
2338 * Look, redirects are sent only in response to data packets,
2339 * so that this nexthop apparently is reachable. --ANK
2340 */
2341 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
2342
2343 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2344 if (!neigh)
2345 return;
2346
2347 /*
2348 * We have finally decided to accept it.
2349 */
2350
2351 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
2352 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2353 NEIGH_UPDATE_F_OVERRIDE|
2354 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2355 NEIGH_UPDATE_F_ISROUTER)),
2356 NDISC_REDIRECT, &ndopts);
2357
2358 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2359 if (!nrt)
2360 goto out;
2361
2362 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2363 if (on_link)
2364 nrt->rt6i_flags &= ~RTF_GATEWAY;
2365
2366 nrt->rt6i_protocol = RTPROT_REDIRECT;
2367 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2368
2369 if (ip6_ins_rt(nrt))
2370 goto out_release;
2371
2372 netevent.old = &rt->dst;
2373 netevent.new = &nrt->dst;
2374 netevent.daddr = &msg->dest;
2375 netevent.neigh = neigh;
2376 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2377
2378 if (rt->rt6i_flags & RTF_CACHE) {
2379 rt = (struct rt6_info *) dst_clone(&rt->dst);
2380 ip6_del_rt(rt);
2381 }
2382
2383 out_release:
2384 /* Release the reference taken in
2385 * ip6_rt_cache_alloc()
2386 */
2387 dst_release(&nrt->dst);
2388
2389 out:
2390 neigh_release(neigh);
2391 }
2392
2393 /*
2394 * Misc support functions
2395 */
2396
2397 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2398 {
2399 BUG_ON(from->dst.from);
2400
2401 rt->rt6i_flags &= ~RTF_EXPIRES;
2402 dst_hold(&from->dst);
2403 rt->dst.from = &from->dst;
2404 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2405 }
2406
2407 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2408 {
2409 rt->dst.input = ort->dst.input;
2410 rt->dst.output = ort->dst.output;
2411 rt->rt6i_dst = ort->rt6i_dst;
2412 rt->dst.error = ort->dst.error;
2413 rt->rt6i_idev = ort->rt6i_idev;
2414 if (rt->rt6i_idev)
2415 in6_dev_hold(rt->rt6i_idev);
2416 rt->dst.lastuse = jiffies;
2417 rt->rt6i_gateway = ort->rt6i_gateway;
2418 rt->rt6i_flags = ort->rt6i_flags;
2419 rt6_set_from(rt, ort);
2420 rt->rt6i_metric = ort->rt6i_metric;
2421 #ifdef CONFIG_IPV6_SUBTREES
2422 rt->rt6i_src = ort->rt6i_src;
2423 #endif
2424 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2425 rt->rt6i_table = ort->rt6i_table;
2426 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2427 }
2428
2429 #ifdef CONFIG_IPV6_ROUTE_INFO
2430 static struct rt6_info *rt6_get_route_info(struct net *net,
2431 const struct in6_addr *prefix, int prefixlen,
2432 const struct in6_addr *gwaddr,
2433 struct net_device *dev)
2434 {
2435 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
2436 int ifindex = dev->ifindex;
2437 struct fib6_node *fn;
2438 struct rt6_info *rt = NULL;
2439 struct fib6_table *table;
2440
2441 table = fib6_get_table(net, tb_id);
2442 if (!table)
2443 return NULL;
2444
2445 read_lock_bh(&table->tb6_lock);
2446 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2447 if (!fn)
2448 goto out;
2449
2450 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2451 if (rt->dst.dev->ifindex != ifindex)
2452 continue;
2453 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2454 continue;
2455 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2456 continue;
2457 dst_hold(&rt->dst);
2458 break;
2459 }
2460 out:
2461 read_unlock_bh(&table->tb6_lock);
2462 return rt;
2463 }
2464
2465 static struct rt6_info *rt6_add_route_info(struct net *net,
2466 const struct in6_addr *prefix, int prefixlen,
2467 const struct in6_addr *gwaddr,
2468 struct net_device *dev,
2469 unsigned int pref)
2470 {
2471 struct fib6_config cfg = {
2472 .fc_metric = IP6_RT_PRIO_USER,
2473 .fc_ifindex = dev->ifindex,
2474 .fc_dst_len = prefixlen,
2475 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2476 RTF_UP | RTF_PREF(pref),
2477 .fc_protocol = RTPROT_RA,
2478 .fc_nlinfo.portid = 0,
2479 .fc_nlinfo.nlh = NULL,
2480 .fc_nlinfo.nl_net = net,
2481 };
2482
2483 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
2484 cfg.fc_dst = *prefix;
2485 cfg.fc_gateway = *gwaddr;
2486
2487 /* We should treat it as a default route if prefix length is 0. */
2488 if (!prefixlen)
2489 cfg.fc_flags |= RTF_DEFAULT;
2490
2491 ip6_route_add(&cfg, NULL);
2492
2493 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
2494 }
2495 #endif
2496
2497 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2498 {
2499 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
2500 struct rt6_info *rt;
2501 struct fib6_table *table;
2502
2503 table = fib6_get_table(dev_net(dev), tb_id);
2504 if (!table)
2505 return NULL;
2506
2507 read_lock_bh(&table->tb6_lock);
2508 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2509 if (dev == rt->dst.dev &&
2510 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2511 ipv6_addr_equal(&rt->rt6i_gateway, addr))
2512 break;
2513 }
2514 if (rt)
2515 dst_hold(&rt->dst);
2516 read_unlock_bh(&table->tb6_lock);
2517 return rt;
2518 }
2519
2520 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2521 struct net_device *dev,
2522 unsigned int pref)
2523 {
2524 struct fib6_config cfg = {
2525 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
2526 .fc_metric = IP6_RT_PRIO_USER,
2527 .fc_ifindex = dev->ifindex,
2528 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2529 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2530 .fc_protocol = RTPROT_RA,
2531 .fc_nlinfo.portid = 0,
2532 .fc_nlinfo.nlh = NULL,
2533 .fc_nlinfo.nl_net = dev_net(dev),
2534 };
2535
2536 cfg.fc_gateway = *gwaddr;
2537
2538 if (!ip6_route_add(&cfg, NULL)) {
2539 struct fib6_table *table;
2540
2541 table = fib6_get_table(dev_net(dev), cfg.fc_table);
2542 if (table)
2543 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
2544 }
2545
2546 return rt6_get_dflt_router(gwaddr, dev);
2547 }
2548
2549 static void __rt6_purge_dflt_routers(struct fib6_table *table)
2550 {
2551 struct rt6_info *rt;
2552
2553 restart:
2554 read_lock_bh(&table->tb6_lock);
2555 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2556 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2557 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2558 dst_hold(&rt->dst);
2559 read_unlock_bh(&table->tb6_lock);
2560 ip6_del_rt(rt);
2561 goto restart;
2562 }
2563 }
2564 read_unlock_bh(&table->tb6_lock);
2565
2566 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
2567 }
2568
2569 void rt6_purge_dflt_routers(struct net *net)
2570 {
2571 struct fib6_table *table;
2572 struct hlist_head *head;
2573 unsigned int h;
2574
2575 rcu_read_lock();
2576
2577 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
2578 head = &net->ipv6.fib_table_hash[h];
2579 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
2580 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
2581 __rt6_purge_dflt_routers(table);
2582 }
2583 }
2584
2585 rcu_read_unlock();
2586 }
2587
2588 static void rtmsg_to_fib6_config(struct net *net,
2589 struct in6_rtmsg *rtmsg,
2590 struct fib6_config *cfg)
2591 {
2592 memset(cfg, 0, sizeof(*cfg));
2593
2594 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
2595 : RT6_TABLE_MAIN;
2596 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2597 cfg->fc_metric = rtmsg->rtmsg_metric;
2598 cfg->fc_expires = rtmsg->rtmsg_info;
2599 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2600 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2601 cfg->fc_flags = rtmsg->rtmsg_flags;
2602
2603 cfg->fc_nlinfo.nl_net = net;
2604
2605 cfg->fc_dst = rtmsg->rtmsg_dst;
2606 cfg->fc_src = rtmsg->rtmsg_src;
2607 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2608 }
2609
2610 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2611 {
2612 struct fib6_config cfg;
2613 struct in6_rtmsg rtmsg;
2614 int err;
2615
2616 switch (cmd) {
2617 case SIOCADDRT: /* Add a route */
2618 case SIOCDELRT: /* Delete a route */
2619 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2620 return -EPERM;
2621 err = copy_from_user(&rtmsg, arg,
2622 sizeof(struct in6_rtmsg));
2623 if (err)
2624 return -EFAULT;
2625
2626 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2627
2628 rtnl_lock();
2629 switch (cmd) {
2630 case SIOCADDRT:
2631 err = ip6_route_add(&cfg, NULL);
2632 break;
2633 case SIOCDELRT:
2634 err = ip6_route_del(&cfg, NULL);
2635 break;
2636 default:
2637 err = -EINVAL;
2638 }
2639 rtnl_unlock();
2640
2641 return err;
2642 }
2643
2644 return -EINVAL;
2645 }
2646
2647 /*
2648 * Drop the packet on the floor
2649 */
2650
2651 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2652 {
2653 int type;
2654 struct dst_entry *dst = skb_dst(skb);
2655 switch (ipstats_mib_noroutes) {
2656 case IPSTATS_MIB_INNOROUTES:
2657 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2658 if (type == IPV6_ADDR_ANY) {
2659 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2660 IPSTATS_MIB_INADDRERRORS);
2661 break;
2662 }
2663 /* FALLTHROUGH */
2664 case IPSTATS_MIB_OUTNOROUTES:
2665 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2666 ipstats_mib_noroutes);
2667 break;
2668 }
2669 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2670 kfree_skb(skb);
2671 return 0;
2672 }
2673
2674 static int ip6_pkt_discard(struct sk_buff *skb)
2675 {
2676 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2677 }
2678
2679 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2680 {
2681 skb->dev = skb_dst(skb)->dev;
2682 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2683 }
2684
2685 static int ip6_pkt_prohibit(struct sk_buff *skb)
2686 {
2687 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2688 }
2689
2690 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2691 {
2692 skb->dev = skb_dst(skb)->dev;
2693 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2694 }
2695
2696 /*
2697 * Allocate a dst for local (unicast / anycast) address.
2698 */
2699
2700 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2701 const struct in6_addr *addr,
2702 bool anycast)
2703 {
2704 u32 tb_id;
2705 struct net *net = dev_net(idev->dev);
2706 struct net_device *dev = net->loopback_dev;
2707 struct rt6_info *rt;
2708
2709 /* use L3 Master device as loopback for host routes if device
2710 * is enslaved and address is not link local or multicast
2711 */
2712 if (!rt6_need_strict(addr))
2713 dev = l3mdev_master_dev_rcu(idev->dev) ? : dev;
2714
2715 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
2716 if (!rt)
2717 return ERR_PTR(-ENOMEM);
2718
2719 in6_dev_hold(idev);
2720
2721 rt->dst.flags |= DST_HOST;
2722 rt->dst.input = ip6_input;
2723 rt->dst.output = ip6_output;
2724 rt->rt6i_idev = idev;
2725
2726 rt->rt6i_protocol = RTPROT_KERNEL;
2727 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2728 if (anycast)
2729 rt->rt6i_flags |= RTF_ANYCAST;
2730 else
2731 rt->rt6i_flags |= RTF_LOCAL;
2732
2733 rt->rt6i_gateway = *addr;
2734 rt->rt6i_dst.addr = *addr;
2735 rt->rt6i_dst.plen = 128;
2736 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
2737 rt->rt6i_table = fib6_get_table(net, tb_id);
2738
2739 return rt;
2740 }
2741
2742 /* remove deleted ip from prefsrc entries */
2743 struct arg_dev_net_ip {
2744 struct net_device *dev;
2745 struct net *net;
2746 struct in6_addr *addr;
2747 };
2748
2749 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2750 {
2751 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2752 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2753 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2754
2755 if (((void *)rt->dst.dev == dev || !dev) &&
2756 rt != net->ipv6.ip6_null_entry &&
2757 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2758 /* remove prefsrc entry */
2759 rt->rt6i_prefsrc.plen = 0;
2760 }
2761 return 0;
2762 }
2763
2764 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2765 {
2766 struct net *net = dev_net(ifp->idev->dev);
2767 struct arg_dev_net_ip adni = {
2768 .dev = ifp->idev->dev,
2769 .net = net,
2770 .addr = &ifp->addr,
2771 };
2772 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2773 }
2774
2775 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2776 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2777
2778 /* Remove routers and update dst entries when gateway turn into host. */
2779 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2780 {
2781 struct in6_addr *gateway = (struct in6_addr *)arg;
2782
2783 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2784 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2785 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2786 return -1;
2787 }
2788 return 0;
2789 }
2790
2791 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2792 {
2793 fib6_clean_all(net, fib6_clean_tohost, gateway);
2794 }
2795
2796 struct arg_dev_net {
2797 struct net_device *dev;
2798 struct net *net;
2799 };
2800
2801 /* called with write lock held for table with rt */
2802 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2803 {
2804 const struct arg_dev_net *adn = arg;
2805 const struct net_device *dev = adn->dev;
2806
2807 if ((rt->dst.dev == dev || !dev) &&
2808 rt != adn->net->ipv6.ip6_null_entry &&
2809 (rt->rt6i_nsiblings == 0 ||
2810 (dev && netdev_unregistering(dev)) ||
2811 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
2812 return -1;
2813
2814 return 0;
2815 }
2816
2817 void rt6_ifdown(struct net *net, struct net_device *dev)
2818 {
2819 struct arg_dev_net adn = {
2820 .dev = dev,
2821 .net = net,
2822 };
2823
2824 fib6_clean_all(net, fib6_ifdown, &adn);
2825 if (dev)
2826 rt6_uncached_list_flush_dev(net, dev);
2827 }
2828
2829 struct rt6_mtu_change_arg {
2830 struct net_device *dev;
2831 unsigned int mtu;
2832 };
2833
2834 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2835 {
2836 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2837 struct inet6_dev *idev;
2838
2839 /* In IPv6 pmtu discovery is not optional,
2840 so that RTAX_MTU lock cannot disable it.
2841 We still use this lock to block changes
2842 caused by addrconf/ndisc.
2843 */
2844
2845 idev = __in6_dev_get(arg->dev);
2846 if (!idev)
2847 return 0;
2848
2849 /* For administrative MTU increase, there is no way to discover
2850 IPv6 PMTU increase, so PMTU increase should be updated here.
2851 Since RFC 1981 doesn't include administrative MTU increase
2852 update PMTU increase is a MUST. (i.e. jumbo frame)
2853 */
2854 /*
2855 If new MTU is less than route PMTU, this new MTU will be the
2856 lowest MTU in the path, update the route PMTU to reflect PMTU
2857 decreases; if new MTU is greater than route PMTU, and the
2858 old MTU is the lowest MTU in the path, update the route PMTU
2859 to reflect the increase. In this case if the other nodes' MTU
2860 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2861 PMTU discovery.
2862 */
2863 if (rt->dst.dev == arg->dev &&
2864 dst_metric_raw(&rt->dst, RTAX_MTU) &&
2865 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2866 if (rt->rt6i_flags & RTF_CACHE) {
2867 /* For RTF_CACHE with rt6i_pmtu == 0
2868 * (i.e. a redirected route),
2869 * the metrics of its rt->dst.from has already
2870 * been updated.
2871 */
2872 if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2873 rt->rt6i_pmtu = arg->mtu;
2874 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2875 (dst_mtu(&rt->dst) < arg->mtu &&
2876 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2877 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2878 }
2879 }
2880 return 0;
2881 }
2882
2883 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2884 {
2885 struct rt6_mtu_change_arg arg = {
2886 .dev = dev,
2887 .mtu = mtu,
2888 };
2889
2890 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2891 }
2892
2893 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2894 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2895 [RTA_OIF] = { .type = NLA_U32 },
2896 [RTA_IIF] = { .type = NLA_U32 },
2897 [RTA_PRIORITY] = { .type = NLA_U32 },
2898 [RTA_METRICS] = { .type = NLA_NESTED },
2899 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2900 [RTA_PREF] = { .type = NLA_U8 },
2901 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2902 [RTA_ENCAP] = { .type = NLA_NESTED },
2903 [RTA_EXPIRES] = { .type = NLA_U32 },
2904 [RTA_UID] = { .type = NLA_U32 },
2905 [RTA_MARK] = { .type = NLA_U32 },
2906 };
2907
2908 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2909 struct fib6_config *cfg,
2910 struct netlink_ext_ack *extack)
2911 {
2912 struct rtmsg *rtm;
2913 struct nlattr *tb[RTA_MAX+1];
2914 unsigned int pref;
2915 int err;
2916
2917 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
2918 NULL);
2919 if (err < 0)
2920 goto errout;
2921
2922 err = -EINVAL;
2923 rtm = nlmsg_data(nlh);
2924 memset(cfg, 0, sizeof(*cfg));
2925
2926 cfg->fc_table = rtm->rtm_table;
2927 cfg->fc_dst_len = rtm->rtm_dst_len;
2928 cfg->fc_src_len = rtm->rtm_src_len;
2929 cfg->fc_flags = RTF_UP;
2930 cfg->fc_protocol = rtm->rtm_protocol;
2931 cfg->fc_type = rtm->rtm_type;
2932
2933 if (rtm->rtm_type == RTN_UNREACHABLE ||
2934 rtm->rtm_type == RTN_BLACKHOLE ||
2935 rtm->rtm_type == RTN_PROHIBIT ||
2936 rtm->rtm_type == RTN_THROW)
2937 cfg->fc_flags |= RTF_REJECT;
2938
2939 if (rtm->rtm_type == RTN_LOCAL)
2940 cfg->fc_flags |= RTF_LOCAL;
2941
2942 if (rtm->rtm_flags & RTM_F_CLONED)
2943 cfg->fc_flags |= RTF_CACHE;
2944
2945 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2946 cfg->fc_nlinfo.nlh = nlh;
2947 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2948
2949 if (tb[RTA_GATEWAY]) {
2950 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2951 cfg->fc_flags |= RTF_GATEWAY;
2952 }
2953
2954 if (tb[RTA_DST]) {
2955 int plen = (rtm->rtm_dst_len + 7) >> 3;
2956
2957 if (nla_len(tb[RTA_DST]) < plen)
2958 goto errout;
2959
2960 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2961 }
2962
2963 if (tb[RTA_SRC]) {
2964 int plen = (rtm->rtm_src_len + 7) >> 3;
2965
2966 if (nla_len(tb[RTA_SRC]) < plen)
2967 goto errout;
2968
2969 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2970 }
2971
2972 if (tb[RTA_PREFSRC])
2973 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2974
2975 if (tb[RTA_OIF])
2976 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2977
2978 if (tb[RTA_PRIORITY])
2979 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2980
2981 if (tb[RTA_METRICS]) {
2982 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2983 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2984 }
2985
2986 if (tb[RTA_TABLE])
2987 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2988
2989 if (tb[RTA_MULTIPATH]) {
2990 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2991 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2992
2993 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2994 cfg->fc_mp_len, extack);
2995 if (err < 0)
2996 goto errout;
2997 }
2998
2999 if (tb[RTA_PREF]) {
3000 pref = nla_get_u8(tb[RTA_PREF]);
3001 if (pref != ICMPV6_ROUTER_PREF_LOW &&
3002 pref != ICMPV6_ROUTER_PREF_HIGH)
3003 pref = ICMPV6_ROUTER_PREF_MEDIUM;
3004 cfg->fc_flags |= RTF_PREF(pref);
3005 }
3006
3007 if (tb[RTA_ENCAP])
3008 cfg->fc_encap = tb[RTA_ENCAP];
3009
3010 if (tb[RTA_ENCAP_TYPE]) {
3011 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
3012
3013 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3014 if (err < 0)
3015 goto errout;
3016 }
3017
3018 if (tb[RTA_EXPIRES]) {
3019 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3020
3021 if (addrconf_finite_timeout(timeout)) {
3022 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3023 cfg->fc_flags |= RTF_EXPIRES;
3024 }
3025 }
3026
3027 err = 0;
3028 errout:
3029 return err;
3030 }
3031
3032 struct rt6_nh {
3033 struct rt6_info *rt6_info;
3034 struct fib6_config r_cfg;
3035 struct mx6_config mxc;
3036 struct list_head next;
3037 };
3038
3039 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3040 {
3041 struct rt6_nh *nh;
3042
3043 list_for_each_entry(nh, rt6_nh_list, next) {
3044 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3045 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3046 nh->r_cfg.fc_ifindex);
3047 }
3048 }
3049
3050 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3051 struct rt6_info *rt, struct fib6_config *r_cfg)
3052 {
3053 struct rt6_nh *nh;
3054 int err = -EEXIST;
3055
3056 list_for_each_entry(nh, rt6_nh_list, next) {
3057 /* check if rt6_info already exists */
3058 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3059 return err;
3060 }
3061
3062 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3063 if (!nh)
3064 return -ENOMEM;
3065 nh->rt6_info = rt;
3066 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3067 if (err) {
3068 kfree(nh);
3069 return err;
3070 }
3071 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3072 list_add_tail(&nh->next, rt6_nh_list);
3073
3074 return 0;
3075 }
3076
3077 static void ip6_route_mpath_notify(struct rt6_info *rt,
3078 struct rt6_info *rt_last,
3079 struct nl_info *info,
3080 __u16 nlflags)
3081 {
3082 /* if this is an APPEND route, then rt points to the first route
3083 * inserted and rt_last points to last route inserted. Userspace
3084 * wants a consistent dump of the route which starts at the first
3085 * nexthop. Since sibling routes are always added at the end of
3086 * the list, find the first sibling of the last route appended
3087 */
3088 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3089 rt = list_first_entry(&rt_last->rt6i_siblings,
3090 struct rt6_info,
3091 rt6i_siblings);
3092 }
3093
3094 if (rt)
3095 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3096 }
3097
3098 static int ip6_route_multipath_add(struct fib6_config *cfg,
3099 struct netlink_ext_ack *extack)
3100 {
3101 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3102 struct nl_info *info = &cfg->fc_nlinfo;
3103 struct fib6_config r_cfg;
3104 struct rtnexthop *rtnh;
3105 struct rt6_info *rt;
3106 struct rt6_nh *err_nh;
3107 struct rt6_nh *nh, *nh_safe;
3108 __u16 nlflags;
3109 int remaining;
3110 int attrlen;
3111 int err = 1;
3112 int nhn = 0;
3113 int replace = (cfg->fc_nlinfo.nlh &&
3114 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3115 LIST_HEAD(rt6_nh_list);
3116
3117 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
3118 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
3119 nlflags |= NLM_F_APPEND;
3120
3121 remaining = cfg->fc_mp_len;
3122 rtnh = (struct rtnexthop *)cfg->fc_mp;
3123
3124 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3125 * rt6_info structs per nexthop
3126 */
3127 while (rtnh_ok(rtnh, remaining)) {
3128 memcpy(&r_cfg, cfg, sizeof(*cfg));
3129 if (rtnh->rtnh_ifindex)
3130 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3131
3132 attrlen = rtnh_attrlen(rtnh);
3133 if (attrlen > 0) {
3134 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3135
3136 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3137 if (nla) {
3138 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3139 r_cfg.fc_flags |= RTF_GATEWAY;
3140 }
3141 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3142 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3143 if (nla)
3144 r_cfg.fc_encap_type = nla_get_u16(nla);
3145 }
3146
3147 rt = ip6_route_info_create(&r_cfg, extack);
3148 if (IS_ERR(rt)) {
3149 err = PTR_ERR(rt);
3150 rt = NULL;
3151 goto cleanup;
3152 }
3153
3154 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3155 if (err) {
3156 dst_release_immediate(&rt->dst);
3157 goto cleanup;
3158 }
3159
3160 rtnh = rtnh_next(rtnh, &remaining);
3161 }
3162
3163 /* for add and replace send one notification with all nexthops.
3164 * Skip the notification in fib6_add_rt2node and send one with
3165 * the full route when done
3166 */
3167 info->skip_notify = 1;
3168
3169 err_nh = NULL;
3170 list_for_each_entry(nh, &rt6_nh_list, next) {
3171 rt_last = nh->rt6_info;
3172 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
3173 /* save reference to first route for notification */
3174 if (!rt_notif && !err)
3175 rt_notif = nh->rt6_info;
3176
3177 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3178 nh->rt6_info = NULL;
3179 if (err) {
3180 if (replace && nhn)
3181 ip6_print_replace_route_err(&rt6_nh_list);
3182 err_nh = nh;
3183 goto add_errout;
3184 }
3185
3186 /* Because each route is added like a single route we remove
3187 * these flags after the first nexthop: if there is a collision,
3188 * we have already failed to add the first nexthop:
3189 * fib6_add_rt2node() has rejected it; when replacing, old
3190 * nexthops have been replaced by first new, the rest should
3191 * be added to it.
3192 */
3193 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3194 NLM_F_REPLACE);
3195 nhn++;
3196 }
3197
3198 /* success ... tell user about new route */
3199 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3200 goto cleanup;
3201
3202 add_errout:
3203 /* send notification for routes that were added so that
3204 * the delete notifications sent by ip6_route_del are
3205 * coherent
3206 */
3207 if (rt_notif)
3208 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3209
3210 /* Delete routes that were already added */
3211 list_for_each_entry(nh, &rt6_nh_list, next) {
3212 if (err_nh == nh)
3213 break;
3214 ip6_route_del(&nh->r_cfg, extack);
3215 }
3216
3217 cleanup:
3218 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3219 if (nh->rt6_info)
3220 dst_release_immediate(&nh->rt6_info->dst);
3221 kfree(nh->mxc.mx);
3222 list_del(&nh->next);
3223 kfree(nh);
3224 }
3225
3226 return err;
3227 }
3228
3229 static int ip6_route_multipath_del(struct fib6_config *cfg,
3230 struct netlink_ext_ack *extack)
3231 {
3232 struct fib6_config r_cfg;
3233 struct rtnexthop *rtnh;
3234 int remaining;
3235 int attrlen;
3236 int err = 1, last_err = 0;
3237
3238 remaining = cfg->fc_mp_len;
3239 rtnh = (struct rtnexthop *)cfg->fc_mp;
3240
3241 /* Parse a Multipath Entry */
3242 while (rtnh_ok(rtnh, remaining)) {
3243 memcpy(&r_cfg, cfg, sizeof(*cfg));
3244 if (rtnh->rtnh_ifindex)
3245 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3246
3247 attrlen = rtnh_attrlen(rtnh);
3248 if (attrlen > 0) {
3249 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3250
3251 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3252 if (nla) {
3253 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3254 r_cfg.fc_flags |= RTF_GATEWAY;
3255 }
3256 }
3257 err = ip6_route_del(&r_cfg, extack);
3258 if (err)
3259 last_err = err;
3260
3261 rtnh = rtnh_next(rtnh, &remaining);
3262 }
3263
3264 return last_err;
3265 }
3266
3267 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3268 struct netlink_ext_ack *extack)
3269 {
3270 struct fib6_config cfg;
3271 int err;
3272
3273 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3274 if (err < 0)
3275 return err;
3276
3277 if (cfg.fc_mp)
3278 return ip6_route_multipath_del(&cfg, extack);
3279 else {
3280 cfg.fc_delete_all_nh = 1;
3281 return ip6_route_del(&cfg, extack);
3282 }
3283 }
3284
3285 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3286 struct netlink_ext_ack *extack)
3287 {
3288 struct fib6_config cfg;
3289 int err;
3290
3291 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3292 if (err < 0)
3293 return err;
3294
3295 if (cfg.fc_mp)
3296 return ip6_route_multipath_add(&cfg, extack);
3297 else
3298 return ip6_route_add(&cfg, extack);
3299 }
3300
3301 static size_t rt6_nlmsg_size(struct rt6_info *rt)
3302 {
3303 int nexthop_len = 0;
3304
3305 if (rt->rt6i_nsiblings) {
3306 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3307 + NLA_ALIGN(sizeof(struct rtnexthop))
3308 + nla_total_size(16) /* RTA_GATEWAY */
3309 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3310
3311 nexthop_len *= rt->rt6i_nsiblings;
3312 }
3313
3314 return NLMSG_ALIGN(sizeof(struct rtmsg))
3315 + nla_total_size(16) /* RTA_SRC */
3316 + nla_total_size(16) /* RTA_DST */
3317 + nla_total_size(16) /* RTA_GATEWAY */
3318 + nla_total_size(16) /* RTA_PREFSRC */
3319 + nla_total_size(4) /* RTA_TABLE */
3320 + nla_total_size(4) /* RTA_IIF */
3321 + nla_total_size(4) /* RTA_OIF */
3322 + nla_total_size(4) /* RTA_PRIORITY */
3323 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3324 + nla_total_size(sizeof(struct rta_cacheinfo))
3325 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3326 + nla_total_size(1) /* RTA_PREF */
3327 + lwtunnel_get_encap_size(rt->dst.lwtstate)
3328 + nexthop_len;
3329 }
3330
3331 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3332 unsigned int *flags, bool skip_oif)
3333 {
3334 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3335 *flags |= RTNH_F_LINKDOWN;
3336 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3337 *flags |= RTNH_F_DEAD;
3338 }
3339
3340 if (rt->rt6i_flags & RTF_GATEWAY) {
3341 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3342 goto nla_put_failure;
3343 }
3344
3345 /* not needed for multipath encoding b/c it has a rtnexthop struct */
3346 if (!skip_oif && rt->dst.dev &&
3347 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3348 goto nla_put_failure;
3349
3350 if (rt->dst.lwtstate &&
3351 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3352 goto nla_put_failure;
3353
3354 return 0;
3355
3356 nla_put_failure:
3357 return -EMSGSIZE;
3358 }
3359
3360 /* add multipath next hop */
3361 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3362 {
3363 struct rtnexthop *rtnh;
3364 unsigned int flags = 0;
3365
3366 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
3367 if (!rtnh)
3368 goto nla_put_failure;
3369
3370 rtnh->rtnh_hops = 0;
3371 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
3372
3373 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
3374 goto nla_put_failure;
3375
3376 rtnh->rtnh_flags = flags;
3377
3378 /* length of rtnetlink header + attributes */
3379 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
3380
3381 return 0;
3382
3383 nla_put_failure:
3384 return -EMSGSIZE;
3385 }
3386
3387 static int rt6_fill_node(struct net *net,
3388 struct sk_buff *skb, struct rt6_info *rt,
3389 struct in6_addr *dst, struct in6_addr *src,
3390 int iif, int type, u32 portid, u32 seq,
3391 unsigned int flags)
3392 {
3393 u32 metrics[RTAX_MAX];
3394 struct rtmsg *rtm;
3395 struct nlmsghdr *nlh;
3396 long expires;
3397 u32 table;
3398
3399 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
3400 if (!nlh)
3401 return -EMSGSIZE;
3402
3403 rtm = nlmsg_data(nlh);
3404 rtm->rtm_family = AF_INET6;
3405 rtm->rtm_dst_len = rt->rt6i_dst.plen;
3406 rtm->rtm_src_len = rt->rt6i_src.plen;
3407 rtm->rtm_tos = 0;
3408 if (rt->rt6i_table)
3409 table = rt->rt6i_table->tb6_id;
3410 else
3411 table = RT6_TABLE_UNSPEC;
3412 rtm->rtm_table = table;
3413 if (nla_put_u32(skb, RTA_TABLE, table))
3414 goto nla_put_failure;
3415 if (rt->rt6i_flags & RTF_REJECT) {
3416 switch (rt->dst.error) {
3417 case -EINVAL:
3418 rtm->rtm_type = RTN_BLACKHOLE;
3419 break;
3420 case -EACCES:
3421 rtm->rtm_type = RTN_PROHIBIT;
3422 break;
3423 case -EAGAIN:
3424 rtm->rtm_type = RTN_THROW;
3425 break;
3426 default:
3427 rtm->rtm_type = RTN_UNREACHABLE;
3428 break;
3429 }
3430 }
3431 else if (rt->rt6i_flags & RTF_LOCAL)
3432 rtm->rtm_type = RTN_LOCAL;
3433 else if (rt->rt6i_flags & RTF_ANYCAST)
3434 rtm->rtm_type = RTN_ANYCAST;
3435 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3436 rtm->rtm_type = RTN_LOCAL;
3437 else
3438 rtm->rtm_type = RTN_UNICAST;
3439 rtm->rtm_flags = 0;
3440 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3441 rtm->rtm_protocol = rt->rt6i_protocol;
3442
3443 if (rt->rt6i_flags & RTF_CACHE)
3444 rtm->rtm_flags |= RTM_F_CLONED;
3445
3446 if (dst) {
3447 if (nla_put_in6_addr(skb, RTA_DST, dst))
3448 goto nla_put_failure;
3449 rtm->rtm_dst_len = 128;
3450 } else if (rtm->rtm_dst_len)
3451 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
3452 goto nla_put_failure;
3453 #ifdef CONFIG_IPV6_SUBTREES
3454 if (src) {
3455 if (nla_put_in6_addr(skb, RTA_SRC, src))
3456 goto nla_put_failure;
3457 rtm->rtm_src_len = 128;
3458 } else if (rtm->rtm_src_len &&
3459 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
3460 goto nla_put_failure;
3461 #endif
3462 if (iif) {
3463 #ifdef CONFIG_IPV6_MROUTE
3464 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3465 int err = ip6mr_get_route(net, skb, rtm, portid);
3466
3467 if (err == 0)
3468 return 0;
3469 if (err < 0)
3470 goto nla_put_failure;
3471 } else
3472 #endif
3473 if (nla_put_u32(skb, RTA_IIF, iif))
3474 goto nla_put_failure;
3475 } else if (dst) {
3476 struct in6_addr saddr_buf;
3477 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3478 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3479 goto nla_put_failure;
3480 }
3481
3482 if (rt->rt6i_prefsrc.plen) {
3483 struct in6_addr saddr_buf;
3484 saddr_buf = rt->rt6i_prefsrc.addr;
3485 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3486 goto nla_put_failure;
3487 }
3488
3489 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3490 if (rt->rt6i_pmtu)
3491 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3492 if (rtnetlink_put_metrics(skb, metrics) < 0)
3493 goto nla_put_failure;
3494
3495 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3496 goto nla_put_failure;
3497
3498 /* For multipath routes, walk the siblings list and add
3499 * each as a nexthop within RTA_MULTIPATH.
3500 */
3501 if (rt->rt6i_nsiblings) {
3502 struct rt6_info *sibling, *next_sibling;
3503 struct nlattr *mp;
3504
3505 mp = nla_nest_start(skb, RTA_MULTIPATH);
3506 if (!mp)
3507 goto nla_put_failure;
3508
3509 if (rt6_add_nexthop(skb, rt) < 0)
3510 goto nla_put_failure;
3511
3512 list_for_each_entry_safe(sibling, next_sibling,
3513 &rt->rt6i_siblings, rt6i_siblings) {
3514 if (rt6_add_nexthop(skb, sibling) < 0)
3515 goto nla_put_failure;
3516 }
3517
3518 nla_nest_end(skb, mp);
3519 } else {
3520 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
3521 goto nla_put_failure;
3522 }
3523
3524 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3525
3526 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3527 goto nla_put_failure;
3528
3529 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3530 goto nla_put_failure;
3531
3532
3533 nlmsg_end(skb, nlh);
3534 return 0;
3535
3536 nla_put_failure:
3537 nlmsg_cancel(skb, nlh);
3538 return -EMSGSIZE;
3539 }
3540
3541 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3542 {
3543 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3544 struct net *net = arg->net;
3545
3546 if (rt == net->ipv6.ip6_null_entry)
3547 return 0;
3548
3549 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3550 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3551
3552 /* user wants prefix routes only */
3553 if (rtm->rtm_flags & RTM_F_PREFIX &&
3554 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
3555 /* success since this is not a prefix route */
3556 return 1;
3557 }
3558 }
3559
3560 return rt6_fill_node(net,
3561 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3562 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3563 NLM_F_MULTI);
3564 }
3565
3566 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3567 struct netlink_ext_ack *extack)
3568 {
3569 struct net *net = sock_net(in_skb->sk);
3570 struct nlattr *tb[RTA_MAX+1];
3571 int err, iif = 0, oif = 0;
3572 struct dst_entry *dst;
3573 struct rt6_info *rt;
3574 struct sk_buff *skb;
3575 struct rtmsg *rtm;
3576 struct flowi6 fl6;
3577 bool fibmatch;
3578
3579 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3580 extack);
3581 if (err < 0)
3582 goto errout;
3583
3584 err = -EINVAL;
3585 memset(&fl6, 0, sizeof(fl6));
3586 rtm = nlmsg_data(nlh);
3587 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
3588 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
3589
3590 if (tb[RTA_SRC]) {
3591 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3592 goto errout;
3593
3594 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3595 }
3596
3597 if (tb[RTA_DST]) {
3598 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3599 goto errout;
3600
3601 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3602 }
3603
3604 if (tb[RTA_IIF])
3605 iif = nla_get_u32(tb[RTA_IIF]);
3606
3607 if (tb[RTA_OIF])
3608 oif = nla_get_u32(tb[RTA_OIF]);
3609
3610 if (tb[RTA_MARK])
3611 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3612
3613 if (tb[RTA_UID])
3614 fl6.flowi6_uid = make_kuid(current_user_ns(),
3615 nla_get_u32(tb[RTA_UID]));
3616 else
3617 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
3618
3619 if (iif) {
3620 struct net_device *dev;
3621 int flags = 0;
3622
3623 dev = __dev_get_by_index(net, iif);
3624 if (!dev) {
3625 err = -ENODEV;
3626 goto errout;
3627 }
3628
3629 fl6.flowi6_iif = iif;
3630
3631 if (!ipv6_addr_any(&fl6.saddr))
3632 flags |= RT6_LOOKUP_F_HAS_SADDR;
3633
3634 if (!fibmatch)
3635 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
3636 } else {
3637 fl6.flowi6_oif = oif;
3638
3639 if (!fibmatch)
3640 dst = ip6_route_output(net, NULL, &fl6);
3641 }
3642
3643 if (fibmatch)
3644 dst = ip6_route_lookup(net, &fl6, 0);
3645
3646 rt = container_of(dst, struct rt6_info, dst);
3647 if (rt->dst.error) {
3648 err = rt->dst.error;
3649 ip6_rt_put(rt);
3650 goto errout;
3651 }
3652
3653 if (rt == net->ipv6.ip6_null_entry) {
3654 err = rt->dst.error;
3655 ip6_rt_put(rt);
3656 goto errout;
3657 }
3658
3659 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3660 if (!skb) {
3661 ip6_rt_put(rt);
3662 err = -ENOBUFS;
3663 goto errout;
3664 }
3665
3666 skb_dst_set(skb, &rt->dst);
3667 if (fibmatch)
3668 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
3669 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3670 nlh->nlmsg_seq, 0);
3671 else
3672 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3673 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3674 nlh->nlmsg_seq, 0);
3675 if (err < 0) {
3676 kfree_skb(skb);
3677 goto errout;
3678 }
3679
3680 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3681 errout:
3682 return err;
3683 }
3684
3685 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3686 unsigned int nlm_flags)
3687 {
3688 struct sk_buff *skb;
3689 struct net *net = info->nl_net;
3690 u32 seq;
3691 int err;
3692
3693 err = -ENOBUFS;
3694 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3695
3696 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3697 if (!skb)
3698 goto errout;
3699
3700 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3701 event, info->portid, seq, nlm_flags);
3702 if (err < 0) {
3703 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3704 WARN_ON(err == -EMSGSIZE);
3705 kfree_skb(skb);
3706 goto errout;
3707 }
3708 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3709 info->nlh, gfp_any());
3710 return;
3711 errout:
3712 if (err < 0)
3713 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3714 }
3715
3716 static int ip6_route_dev_notify(struct notifier_block *this,
3717 unsigned long event, void *ptr)
3718 {
3719 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3720 struct net *net = dev_net(dev);
3721
3722 if (!(dev->flags & IFF_LOOPBACK))
3723 return NOTIFY_OK;
3724
3725 if (event == NETDEV_REGISTER) {
3726 net->ipv6.ip6_null_entry->dst.dev = dev;
3727 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3728 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3729 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3730 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3731 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3732 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3733 #endif
3734 } else if (event == NETDEV_UNREGISTER &&
3735 dev->reg_state != NETREG_UNREGISTERED) {
3736 /* NETDEV_UNREGISTER could be fired for multiple times by
3737 * netdev_wait_allrefs(). Make sure we only call this once.
3738 */
3739 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
3740 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3741 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
3742 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
3743 #endif
3744 }
3745
3746 return NOTIFY_OK;
3747 }
3748
3749 /*
3750 * /proc
3751 */
3752
3753 #ifdef CONFIG_PROC_FS
3754
3755 static const struct file_operations ipv6_route_proc_fops = {
3756 .owner = THIS_MODULE,
3757 .open = ipv6_route_open,
3758 .read = seq_read,
3759 .llseek = seq_lseek,
3760 .release = seq_release_net,
3761 };
3762
3763 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3764 {
3765 struct net *net = (struct net *)seq->private;
3766 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3767 net->ipv6.rt6_stats->fib_nodes,
3768 net->ipv6.rt6_stats->fib_route_nodes,
3769 net->ipv6.rt6_stats->fib_rt_alloc,
3770 net->ipv6.rt6_stats->fib_rt_entries,
3771 net->ipv6.rt6_stats->fib_rt_cache,
3772 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3773 net->ipv6.rt6_stats->fib_discarded_routes);
3774
3775 return 0;
3776 }
3777
3778 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3779 {
3780 return single_open_net(inode, file, rt6_stats_seq_show);
3781 }
3782
3783 static const struct file_operations rt6_stats_seq_fops = {
3784 .owner = THIS_MODULE,
3785 .open = rt6_stats_seq_open,
3786 .read = seq_read,
3787 .llseek = seq_lseek,
3788 .release = single_release_net,
3789 };
3790 #endif /* CONFIG_PROC_FS */
3791
3792 #ifdef CONFIG_SYSCTL
3793
3794 static
3795 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3796 void __user *buffer, size_t *lenp, loff_t *ppos)
3797 {
3798 struct net *net;
3799 int delay;
3800 if (!write)
3801 return -EINVAL;
3802
3803 net = (struct net *)ctl->extra1;
3804 delay = net->ipv6.sysctl.flush_delay;
3805 proc_dointvec(ctl, write, buffer, lenp, ppos);
3806 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3807 return 0;
3808 }
3809
3810 struct ctl_table ipv6_route_table_template[] = {
3811 {
3812 .procname = "flush",
3813 .data = &init_net.ipv6.sysctl.flush_delay,
3814 .maxlen = sizeof(int),
3815 .mode = 0200,
3816 .proc_handler = ipv6_sysctl_rtcache_flush
3817 },
3818 {
3819 .procname = "gc_thresh",
3820 .data = &ip6_dst_ops_template.gc_thresh,
3821 .maxlen = sizeof(int),
3822 .mode = 0644,
3823 .proc_handler = proc_dointvec,
3824 },
3825 {
3826 .procname = "max_size",
3827 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
3828 .maxlen = sizeof(int),
3829 .mode = 0644,
3830 .proc_handler = proc_dointvec,
3831 },
3832 {
3833 .procname = "gc_min_interval",
3834 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3835 .maxlen = sizeof(int),
3836 .mode = 0644,
3837 .proc_handler = proc_dointvec_jiffies,
3838 },
3839 {
3840 .procname = "gc_timeout",
3841 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3842 .maxlen = sizeof(int),
3843 .mode = 0644,
3844 .proc_handler = proc_dointvec_jiffies,
3845 },
3846 {
3847 .procname = "gc_interval",
3848 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3849 .maxlen = sizeof(int),
3850 .mode = 0644,
3851 .proc_handler = proc_dointvec_jiffies,
3852 },
3853 {
3854 .procname = "gc_elasticity",
3855 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3856 .maxlen = sizeof(int),
3857 .mode = 0644,
3858 .proc_handler = proc_dointvec,
3859 },
3860 {
3861 .procname = "mtu_expires",
3862 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3863 .maxlen = sizeof(int),
3864 .mode = 0644,
3865 .proc_handler = proc_dointvec_jiffies,
3866 },
3867 {
3868 .procname = "min_adv_mss",
3869 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3870 .maxlen = sizeof(int),
3871 .mode = 0644,
3872 .proc_handler = proc_dointvec,
3873 },
3874 {
3875 .procname = "gc_min_interval_ms",
3876 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3877 .maxlen = sizeof(int),
3878 .mode = 0644,
3879 .proc_handler = proc_dointvec_ms_jiffies,
3880 },
3881 { }
3882 };
3883
3884 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3885 {
3886 struct ctl_table *table;
3887
3888 table = kmemdup(ipv6_route_table_template,
3889 sizeof(ipv6_route_table_template),
3890 GFP_KERNEL);
3891
3892 if (table) {
3893 table[0].data = &net->ipv6.sysctl.flush_delay;
3894 table[0].extra1 = net;
3895 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3896 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3897 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3898 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3899 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3900 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3901 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3902 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3903 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3904
3905 /* Don't export sysctls to unprivileged users */
3906 if (net->user_ns != &init_user_ns)
3907 table[0].procname = NULL;
3908 }
3909
3910 return table;
3911 }
3912 #endif
3913
3914 static int __net_init ip6_route_net_init(struct net *net)
3915 {
3916 int ret = -ENOMEM;
3917
3918 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3919 sizeof(net->ipv6.ip6_dst_ops));
3920
3921 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3922 goto out_ip6_dst_ops;
3923
3924 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3925 sizeof(*net->ipv6.ip6_null_entry),
3926 GFP_KERNEL);
3927 if (!net->ipv6.ip6_null_entry)
3928 goto out_ip6_dst_entries;
3929 net->ipv6.ip6_null_entry->dst.path =
3930 (struct dst_entry *)net->ipv6.ip6_null_entry;
3931 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3932 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3933 ip6_template_metrics, true);
3934
3935 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3936 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3937 sizeof(*net->ipv6.ip6_prohibit_entry),
3938 GFP_KERNEL);
3939 if (!net->ipv6.ip6_prohibit_entry)
3940 goto out_ip6_null_entry;
3941 net->ipv6.ip6_prohibit_entry->dst.path =
3942 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3943 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3944 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3945 ip6_template_metrics, true);
3946
3947 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3948 sizeof(*net->ipv6.ip6_blk_hole_entry),
3949 GFP_KERNEL);
3950 if (!net->ipv6.ip6_blk_hole_entry)
3951 goto out_ip6_prohibit_entry;
3952 net->ipv6.ip6_blk_hole_entry->dst.path =
3953 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3954 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3955 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3956 ip6_template_metrics, true);
3957 #endif
3958
3959 net->ipv6.sysctl.flush_delay = 0;
3960 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3961 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3962 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3963 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3964 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3965 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3966 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3967
3968 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3969
3970 ret = 0;
3971 out:
3972 return ret;
3973
3974 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3975 out_ip6_prohibit_entry:
3976 kfree(net->ipv6.ip6_prohibit_entry);
3977 out_ip6_null_entry:
3978 kfree(net->ipv6.ip6_null_entry);
3979 #endif
3980 out_ip6_dst_entries:
3981 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3982 out_ip6_dst_ops:
3983 goto out;
3984 }
3985
3986 static void __net_exit ip6_route_net_exit(struct net *net)
3987 {
3988 kfree(net->ipv6.ip6_null_entry);
3989 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3990 kfree(net->ipv6.ip6_prohibit_entry);
3991 kfree(net->ipv6.ip6_blk_hole_entry);
3992 #endif
3993 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3994 }
3995
3996 static int __net_init ip6_route_net_init_late(struct net *net)
3997 {
3998 #ifdef CONFIG_PROC_FS
3999 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
4000 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
4001 #endif
4002 return 0;
4003 }
4004
4005 static void __net_exit ip6_route_net_exit_late(struct net *net)
4006 {
4007 #ifdef CONFIG_PROC_FS
4008 remove_proc_entry("ipv6_route", net->proc_net);
4009 remove_proc_entry("rt6_stats", net->proc_net);
4010 #endif
4011 }
4012
4013 static struct pernet_operations ip6_route_net_ops = {
4014 .init = ip6_route_net_init,
4015 .exit = ip6_route_net_exit,
4016 };
4017
4018 static int __net_init ipv6_inetpeer_init(struct net *net)
4019 {
4020 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4021
4022 if (!bp)
4023 return -ENOMEM;
4024 inet_peer_base_init(bp);
4025 net->ipv6.peers = bp;
4026 return 0;
4027 }
4028
4029 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4030 {
4031 struct inet_peer_base *bp = net->ipv6.peers;
4032
4033 net->ipv6.peers = NULL;
4034 inetpeer_invalidate_tree(bp);
4035 kfree(bp);
4036 }
4037
4038 static struct pernet_operations ipv6_inetpeer_ops = {
4039 .init = ipv6_inetpeer_init,
4040 .exit = ipv6_inetpeer_exit,
4041 };
4042
4043 static struct pernet_operations ip6_route_net_late_ops = {
4044 .init = ip6_route_net_init_late,
4045 .exit = ip6_route_net_exit_late,
4046 };
4047
4048 static struct notifier_block ip6_route_dev_notifier = {
4049 .notifier_call = ip6_route_dev_notify,
4050 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4051 };
4052
4053 void __init ip6_route_init_special_entries(void)
4054 {
4055 /* Registering of the loopback is done before this portion of code,
4056 * the loopback reference in rt6_info will not be taken, do it
4057 * manually for init_net */
4058 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4059 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4060 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4061 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4062 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4063 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4064 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4065 #endif
4066 }
4067
4068 int __init ip6_route_init(void)
4069 {
4070 int ret;
4071 int cpu;
4072
4073 ret = -ENOMEM;
4074 ip6_dst_ops_template.kmem_cachep =
4075 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4076 SLAB_HWCACHE_ALIGN, NULL);
4077 if (!ip6_dst_ops_template.kmem_cachep)
4078 goto out;
4079
4080 ret = dst_entries_init(&ip6_dst_blackhole_ops);
4081 if (ret)
4082 goto out_kmem_cache;
4083
4084 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4085 if (ret)
4086 goto out_dst_entries;
4087
4088 ret = register_pernet_subsys(&ip6_route_net_ops);
4089 if (ret)
4090 goto out_register_inetpeer;
4091
4092 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4093
4094 ret = fib6_init();
4095 if (ret)
4096 goto out_register_subsys;
4097
4098 ret = xfrm6_init();
4099 if (ret)
4100 goto out_fib6_init;
4101
4102 ret = fib6_rules_init();
4103 if (ret)
4104 goto xfrm6_init;
4105
4106 ret = register_pernet_subsys(&ip6_route_net_late_ops);
4107 if (ret)
4108 goto fib6_rules_init;
4109
4110 ret = -ENOBUFS;
4111 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
4112 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
4113 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
4114 goto out_register_late_subsys;
4115
4116 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
4117 if (ret)
4118 goto out_register_late_subsys;
4119
4120 for_each_possible_cpu(cpu) {
4121 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
4122
4123 INIT_LIST_HEAD(&ul->head);
4124 spin_lock_init(&ul->lock);
4125 }
4126
4127 out:
4128 return ret;
4129
4130 out_register_late_subsys:
4131 unregister_pernet_subsys(&ip6_route_net_late_ops);
4132 fib6_rules_init:
4133 fib6_rules_cleanup();
4134 xfrm6_init:
4135 xfrm6_fini();
4136 out_fib6_init:
4137 fib6_gc_cleanup();
4138 out_register_subsys:
4139 unregister_pernet_subsys(&ip6_route_net_ops);
4140 out_register_inetpeer:
4141 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4142 out_dst_entries:
4143 dst_entries_destroy(&ip6_dst_blackhole_ops);
4144 out_kmem_cache:
4145 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4146 goto out;
4147 }
4148
4149 void ip6_route_cleanup(void)
4150 {
4151 unregister_netdevice_notifier(&ip6_route_dev_notifier);
4152 unregister_pernet_subsys(&ip6_route_net_late_ops);
4153 fib6_rules_cleanup();
4154 xfrm6_fini();
4155 fib6_gc_cleanup();
4156 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4157 unregister_pernet_subsys(&ip6_route_net_ops);
4158 dst_entries_destroy(&ip6_dst_blackhole_ops);
4159 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4160 }