]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv6/route.c
9737526aefdf7a7e387f60dfbc127936701aa5ef
[mirror_ubuntu-zesty-kernel.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/dst_metadata.h>
58 #include <net/xfrm.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64 #include <net/l3mdev.h>
65 #include <trace/events/fib6.h>
66
67 #include <linux/uaccess.h>
68
69 #ifdef CONFIG_SYSCTL
70 #include <linux/sysctl.h>
71 #endif
72
73 enum rt6_nud_state {
74 RT6_NUD_FAIL_HARD = -3,
75 RT6_NUD_FAIL_PROBE = -2,
76 RT6_NUD_FAIL_DO_RR = -1,
77 RT6_NUD_SUCCEED = 1
78 };
79
80 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
81 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
82 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
83 static unsigned int ip6_mtu(const struct dst_entry *dst);
84 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
85 static void ip6_dst_destroy(struct dst_entry *);
86 static void ip6_dst_ifdown(struct dst_entry *,
87 struct net_device *dev, int how);
88 static int ip6_dst_gc(struct dst_ops *ops);
89
90 static int ip6_pkt_discard(struct sk_buff *skb);
91 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
92 static int ip6_pkt_prohibit(struct sk_buff *skb);
93 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static void ip6_link_failure(struct sk_buff *skb);
95 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
96 struct sk_buff *skb, u32 mtu);
97 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb);
99 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
100 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
101
102 #ifdef CONFIG_IPV6_ROUTE_INFO
103 static struct rt6_info *rt6_add_route_info(struct net *net,
104 const struct in6_addr *prefix, int prefixlen,
105 const struct in6_addr *gwaddr,
106 struct net_device *dev,
107 unsigned int pref);
108 static struct rt6_info *rt6_get_route_info(struct net *net,
109 const struct in6_addr *prefix, int prefixlen,
110 const struct in6_addr *gwaddr,
111 struct net_device *dev);
112 #endif
113
114 struct uncached_list {
115 spinlock_t lock;
116 struct list_head head;
117 };
118
119 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
120
121 static void rt6_uncached_list_add(struct rt6_info *rt)
122 {
123 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
124
125 rt->dst.flags |= DST_NOCACHE;
126 rt->rt6i_uncached_list = ul;
127
128 spin_lock_bh(&ul->lock);
129 list_add_tail(&rt->rt6i_uncached, &ul->head);
130 spin_unlock_bh(&ul->lock);
131 }
132
133 static void rt6_uncached_list_del(struct rt6_info *rt)
134 {
135 if (!list_empty(&rt->rt6i_uncached)) {
136 struct uncached_list *ul = rt->rt6i_uncached_list;
137
138 spin_lock_bh(&ul->lock);
139 list_del(&rt->rt6i_uncached);
140 spin_unlock_bh(&ul->lock);
141 }
142 }
143
144 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
145 {
146 struct net_device *loopback_dev = net->loopback_dev;
147 int cpu;
148
149 if (dev == loopback_dev)
150 return;
151
152 for_each_possible_cpu(cpu) {
153 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
154 struct rt6_info *rt;
155
156 spin_lock_bh(&ul->lock);
157 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
158 struct inet6_dev *rt_idev = rt->rt6i_idev;
159 struct net_device *rt_dev = rt->dst.dev;
160
161 if (rt_idev->dev == dev) {
162 rt->rt6i_idev = in6_dev_get(loopback_dev);
163 in6_dev_put(rt_idev);
164 }
165
166 if (rt_dev == dev) {
167 rt->dst.dev = loopback_dev;
168 dev_hold(rt->dst.dev);
169 dev_put(rt_dev);
170 }
171 }
172 spin_unlock_bh(&ul->lock);
173 }
174 }
175
176 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
177 {
178 return dst_metrics_write_ptr(rt->dst.from);
179 }
180
181 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
182 {
183 struct rt6_info *rt = (struct rt6_info *)dst;
184
185 if (rt->rt6i_flags & RTF_PCPU)
186 return rt6_pcpu_cow_metrics(rt);
187 else if (rt->rt6i_flags & RTF_CACHE)
188 return NULL;
189 else
190 return dst_cow_metrics_generic(dst, old);
191 }
192
193 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
194 struct sk_buff *skb,
195 const void *daddr)
196 {
197 struct in6_addr *p = &rt->rt6i_gateway;
198
199 if (!ipv6_addr_any(p))
200 return (const void *) p;
201 else if (skb)
202 return &ipv6_hdr(skb)->daddr;
203 return daddr;
204 }
205
206 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
207 struct sk_buff *skb,
208 const void *daddr)
209 {
210 struct rt6_info *rt = (struct rt6_info *) dst;
211 struct neighbour *n;
212
213 daddr = choose_neigh_daddr(rt, skb, daddr);
214 n = __ipv6_neigh_lookup(dst->dev, daddr);
215 if (n)
216 return n;
217 return neigh_create(&nd_tbl, daddr, dst->dev);
218 }
219
220 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
221 {
222 struct net_device *dev = dst->dev;
223 struct rt6_info *rt = (struct rt6_info *)dst;
224
225 daddr = choose_neigh_daddr(rt, NULL, daddr);
226 if (!daddr)
227 return;
228 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
229 return;
230 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
231 return;
232 __ipv6_confirm_neigh(dev, daddr);
233 }
234
235 static struct dst_ops ip6_dst_ops_template = {
236 .family = AF_INET6,
237 .gc = ip6_dst_gc,
238 .gc_thresh = 1024,
239 .check = ip6_dst_check,
240 .default_advmss = ip6_default_advmss,
241 .mtu = ip6_mtu,
242 .cow_metrics = ipv6_cow_metrics,
243 .destroy = ip6_dst_destroy,
244 .ifdown = ip6_dst_ifdown,
245 .negative_advice = ip6_negative_advice,
246 .link_failure = ip6_link_failure,
247 .update_pmtu = ip6_rt_update_pmtu,
248 .redirect = rt6_do_redirect,
249 .local_out = __ip6_local_out,
250 .neigh_lookup = ip6_neigh_lookup,
251 .confirm_neigh = ip6_confirm_neigh,
252 };
253
254 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
255 {
256 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
257
258 return mtu ? : dst->dev->mtu;
259 }
260
261 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
262 struct sk_buff *skb, u32 mtu)
263 {
264 }
265
266 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
267 struct sk_buff *skb)
268 {
269 }
270
271 static struct dst_ops ip6_dst_blackhole_ops = {
272 .family = AF_INET6,
273 .destroy = ip6_dst_destroy,
274 .check = ip6_dst_check,
275 .mtu = ip6_blackhole_mtu,
276 .default_advmss = ip6_default_advmss,
277 .update_pmtu = ip6_rt_blackhole_update_pmtu,
278 .redirect = ip6_rt_blackhole_redirect,
279 .cow_metrics = dst_cow_metrics_generic,
280 .neigh_lookup = ip6_neigh_lookup,
281 };
282
283 static const u32 ip6_template_metrics[RTAX_MAX] = {
284 [RTAX_HOPLIMIT - 1] = 0,
285 };
286
287 static const struct rt6_info ip6_null_entry_template = {
288 .dst = {
289 .__refcnt = ATOMIC_INIT(1),
290 .__use = 1,
291 .obsolete = DST_OBSOLETE_FORCE_CHK,
292 .error = -ENETUNREACH,
293 .input = ip6_pkt_discard,
294 .output = ip6_pkt_discard_out,
295 },
296 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
297 .rt6i_protocol = RTPROT_KERNEL,
298 .rt6i_metric = ~(u32) 0,
299 .rt6i_ref = ATOMIC_INIT(1),
300 };
301
302 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
303
304 static const struct rt6_info ip6_prohibit_entry_template = {
305 .dst = {
306 .__refcnt = ATOMIC_INIT(1),
307 .__use = 1,
308 .obsolete = DST_OBSOLETE_FORCE_CHK,
309 .error = -EACCES,
310 .input = ip6_pkt_prohibit,
311 .output = ip6_pkt_prohibit_out,
312 },
313 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
314 .rt6i_protocol = RTPROT_KERNEL,
315 .rt6i_metric = ~(u32) 0,
316 .rt6i_ref = ATOMIC_INIT(1),
317 };
318
319 static const struct rt6_info ip6_blk_hole_entry_template = {
320 .dst = {
321 .__refcnt = ATOMIC_INIT(1),
322 .__use = 1,
323 .obsolete = DST_OBSOLETE_FORCE_CHK,
324 .error = -EINVAL,
325 .input = dst_discard,
326 .output = dst_discard_out,
327 },
328 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
329 .rt6i_protocol = RTPROT_KERNEL,
330 .rt6i_metric = ~(u32) 0,
331 .rt6i_ref = ATOMIC_INIT(1),
332 };
333
334 #endif
335
336 static void rt6_info_init(struct rt6_info *rt)
337 {
338 struct dst_entry *dst = &rt->dst;
339
340 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
341 INIT_LIST_HEAD(&rt->rt6i_siblings);
342 INIT_LIST_HEAD(&rt->rt6i_uncached);
343 }
344
345 /* allocate dst with ip6_dst_ops */
346 static struct rt6_info *__ip6_dst_alloc(struct net *net,
347 struct net_device *dev,
348 int flags)
349 {
350 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
351 0, DST_OBSOLETE_FORCE_CHK, flags);
352
353 if (rt)
354 rt6_info_init(rt);
355
356 return rt;
357 }
358
359 struct rt6_info *ip6_dst_alloc(struct net *net,
360 struct net_device *dev,
361 int flags)
362 {
363 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
364
365 if (rt) {
366 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
367 if (rt->rt6i_pcpu) {
368 int cpu;
369
370 for_each_possible_cpu(cpu) {
371 struct rt6_info **p;
372
373 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
374 /* no one shares rt */
375 *p = NULL;
376 }
377 } else {
378 dst_destroy((struct dst_entry *)rt);
379 return NULL;
380 }
381 }
382
383 return rt;
384 }
385 EXPORT_SYMBOL(ip6_dst_alloc);
386
387 static void ip6_dst_destroy(struct dst_entry *dst)
388 {
389 struct rt6_info *rt = (struct rt6_info *)dst;
390 struct dst_entry *from = dst->from;
391 struct inet6_dev *idev;
392
393 dst_destroy_metrics_generic(dst);
394 free_percpu(rt->rt6i_pcpu);
395 rt6_uncached_list_del(rt);
396
397 idev = rt->rt6i_idev;
398 if (idev) {
399 rt->rt6i_idev = NULL;
400 in6_dev_put(idev);
401 }
402
403 dst->from = NULL;
404 dst_release(from);
405 }
406
407 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
408 int how)
409 {
410 struct rt6_info *rt = (struct rt6_info *)dst;
411 struct inet6_dev *idev = rt->rt6i_idev;
412 struct net_device *loopback_dev =
413 dev_net(dev)->loopback_dev;
414
415 if (dev != loopback_dev) {
416 if (idev && idev->dev == dev) {
417 struct inet6_dev *loopback_idev =
418 in6_dev_get(loopback_dev);
419 if (loopback_idev) {
420 rt->rt6i_idev = loopback_idev;
421 in6_dev_put(idev);
422 }
423 }
424 }
425 }
426
427 static bool __rt6_check_expired(const struct rt6_info *rt)
428 {
429 if (rt->rt6i_flags & RTF_EXPIRES)
430 return time_after(jiffies, rt->dst.expires);
431 else
432 return false;
433 }
434
435 static bool rt6_check_expired(const struct rt6_info *rt)
436 {
437 if (rt->rt6i_flags & RTF_EXPIRES) {
438 if (time_after(jiffies, rt->dst.expires))
439 return true;
440 } else if (rt->dst.from) {
441 return rt6_check_expired((struct rt6_info *) rt->dst.from);
442 }
443 return false;
444 }
445
446 /* Multipath route selection:
447 * Hash based function using packet header and flowlabel.
448 * Adapted from fib_info_hashfn()
449 */
450 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
451 const struct flowi6 *fl6)
452 {
453 return get_hash_from_flowi6(fl6) % candidate_count;
454 }
455
456 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
457 struct flowi6 *fl6, int oif,
458 int strict)
459 {
460 struct rt6_info *sibling, *next_sibling;
461 int route_choosen;
462
463 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
464 /* Don't change the route, if route_choosen == 0
465 * (siblings does not include ourself)
466 */
467 if (route_choosen)
468 list_for_each_entry_safe(sibling, next_sibling,
469 &match->rt6i_siblings, rt6i_siblings) {
470 route_choosen--;
471 if (route_choosen == 0) {
472 if (rt6_score_route(sibling, oif, strict) < 0)
473 break;
474 match = sibling;
475 break;
476 }
477 }
478 return match;
479 }
480
481 /*
482 * Route lookup. Any table->tb6_lock is implied.
483 */
484
485 static inline struct rt6_info *rt6_device_match(struct net *net,
486 struct rt6_info *rt,
487 const struct in6_addr *saddr,
488 int oif,
489 int flags)
490 {
491 struct rt6_info *local = NULL;
492 struct rt6_info *sprt;
493
494 if (!oif && ipv6_addr_any(saddr))
495 goto out;
496
497 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
498 struct net_device *dev = sprt->dst.dev;
499
500 if (oif) {
501 if (dev->ifindex == oif)
502 return sprt;
503 if (dev->flags & IFF_LOOPBACK) {
504 if (!sprt->rt6i_idev ||
505 sprt->rt6i_idev->dev->ifindex != oif) {
506 if (flags & RT6_LOOKUP_F_IFACE)
507 continue;
508 if (local &&
509 local->rt6i_idev->dev->ifindex == oif)
510 continue;
511 }
512 local = sprt;
513 }
514 } else {
515 if (ipv6_chk_addr(net, saddr, dev,
516 flags & RT6_LOOKUP_F_IFACE))
517 return sprt;
518 }
519 }
520
521 if (oif) {
522 if (local)
523 return local;
524
525 if (flags & RT6_LOOKUP_F_IFACE)
526 return net->ipv6.ip6_null_entry;
527 }
528 out:
529 return rt;
530 }
531
532 #ifdef CONFIG_IPV6_ROUTER_PREF
533 struct __rt6_probe_work {
534 struct work_struct work;
535 struct in6_addr target;
536 struct net_device *dev;
537 };
538
539 static void rt6_probe_deferred(struct work_struct *w)
540 {
541 struct in6_addr mcaddr;
542 struct __rt6_probe_work *work =
543 container_of(w, struct __rt6_probe_work, work);
544
545 addrconf_addr_solict_mult(&work->target, &mcaddr);
546 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
547 dev_put(work->dev);
548 kfree(work);
549 }
550
551 static void rt6_probe(struct rt6_info *rt)
552 {
553 struct __rt6_probe_work *work;
554 struct neighbour *neigh;
555 /*
556 * Okay, this does not seem to be appropriate
557 * for now, however, we need to check if it
558 * is really so; aka Router Reachability Probing.
559 *
560 * Router Reachability Probe MUST be rate-limited
561 * to no more than one per minute.
562 */
563 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
564 return;
565 rcu_read_lock_bh();
566 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
567 if (neigh) {
568 if (neigh->nud_state & NUD_VALID)
569 goto out;
570
571 work = NULL;
572 write_lock(&neigh->lock);
573 if (!(neigh->nud_state & NUD_VALID) &&
574 time_after(jiffies,
575 neigh->updated +
576 rt->rt6i_idev->cnf.rtr_probe_interval)) {
577 work = kmalloc(sizeof(*work), GFP_ATOMIC);
578 if (work)
579 __neigh_set_probe_once(neigh);
580 }
581 write_unlock(&neigh->lock);
582 } else {
583 work = kmalloc(sizeof(*work), GFP_ATOMIC);
584 }
585
586 if (work) {
587 INIT_WORK(&work->work, rt6_probe_deferred);
588 work->target = rt->rt6i_gateway;
589 dev_hold(rt->dst.dev);
590 work->dev = rt->dst.dev;
591 schedule_work(&work->work);
592 }
593
594 out:
595 rcu_read_unlock_bh();
596 }
597 #else
598 static inline void rt6_probe(struct rt6_info *rt)
599 {
600 }
601 #endif
602
603 /*
604 * Default Router Selection (RFC 2461 6.3.6)
605 */
606 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
607 {
608 struct net_device *dev = rt->dst.dev;
609 if (!oif || dev->ifindex == oif)
610 return 2;
611 if ((dev->flags & IFF_LOOPBACK) &&
612 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
613 return 1;
614 return 0;
615 }
616
617 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
618 {
619 struct neighbour *neigh;
620 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
621
622 if (rt->rt6i_flags & RTF_NONEXTHOP ||
623 !(rt->rt6i_flags & RTF_GATEWAY))
624 return RT6_NUD_SUCCEED;
625
626 rcu_read_lock_bh();
627 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
628 if (neigh) {
629 read_lock(&neigh->lock);
630 if (neigh->nud_state & NUD_VALID)
631 ret = RT6_NUD_SUCCEED;
632 #ifdef CONFIG_IPV6_ROUTER_PREF
633 else if (!(neigh->nud_state & NUD_FAILED))
634 ret = RT6_NUD_SUCCEED;
635 else
636 ret = RT6_NUD_FAIL_PROBE;
637 #endif
638 read_unlock(&neigh->lock);
639 } else {
640 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
641 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
642 }
643 rcu_read_unlock_bh();
644
645 return ret;
646 }
647
648 static int rt6_score_route(struct rt6_info *rt, int oif,
649 int strict)
650 {
651 int m;
652
653 m = rt6_check_dev(rt, oif);
654 if (!m && (strict & RT6_LOOKUP_F_IFACE))
655 return RT6_NUD_FAIL_HARD;
656 #ifdef CONFIG_IPV6_ROUTER_PREF
657 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
658 #endif
659 if (strict & RT6_LOOKUP_F_REACHABLE) {
660 int n = rt6_check_neigh(rt);
661 if (n < 0)
662 return n;
663 }
664 return m;
665 }
666
667 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
668 int *mpri, struct rt6_info *match,
669 bool *do_rr)
670 {
671 int m;
672 bool match_do_rr = false;
673 struct inet6_dev *idev = rt->rt6i_idev;
674 struct net_device *dev = rt->dst.dev;
675
676 if (dev && !netif_carrier_ok(dev) &&
677 idev->cnf.ignore_routes_with_linkdown &&
678 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
679 goto out;
680
681 if (rt6_check_expired(rt))
682 goto out;
683
684 m = rt6_score_route(rt, oif, strict);
685 if (m == RT6_NUD_FAIL_DO_RR) {
686 match_do_rr = true;
687 m = 0; /* lowest valid score */
688 } else if (m == RT6_NUD_FAIL_HARD) {
689 goto out;
690 }
691
692 if (strict & RT6_LOOKUP_F_REACHABLE)
693 rt6_probe(rt);
694
695 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
696 if (m > *mpri) {
697 *do_rr = match_do_rr;
698 *mpri = m;
699 match = rt;
700 }
701 out:
702 return match;
703 }
704
705 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
706 struct rt6_info *rr_head,
707 u32 metric, int oif, int strict,
708 bool *do_rr)
709 {
710 struct rt6_info *rt, *match, *cont;
711 int mpri = -1;
712
713 match = NULL;
714 cont = NULL;
715 for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
716 if (rt->rt6i_metric != metric) {
717 cont = rt;
718 break;
719 }
720
721 match = find_match(rt, oif, strict, &mpri, match, do_rr);
722 }
723
724 for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
725 if (rt->rt6i_metric != metric) {
726 cont = rt;
727 break;
728 }
729
730 match = find_match(rt, oif, strict, &mpri, match, do_rr);
731 }
732
733 if (match || !cont)
734 return match;
735
736 for (rt = cont; rt; rt = rt->dst.rt6_next)
737 match = find_match(rt, oif, strict, &mpri, match, do_rr);
738
739 return match;
740 }
741
742 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
743 {
744 struct rt6_info *match, *rt0;
745 struct net *net;
746 bool do_rr = false;
747
748 rt0 = fn->rr_ptr;
749 if (!rt0)
750 fn->rr_ptr = rt0 = fn->leaf;
751
752 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
753 &do_rr);
754
755 if (do_rr) {
756 struct rt6_info *next = rt0->dst.rt6_next;
757
758 /* no entries matched; do round-robin */
759 if (!next || next->rt6i_metric != rt0->rt6i_metric)
760 next = fn->leaf;
761
762 if (next != rt0)
763 fn->rr_ptr = next;
764 }
765
766 net = dev_net(rt0->dst.dev);
767 return match ? match : net->ipv6.ip6_null_entry;
768 }
769
770 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
771 {
772 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
773 }
774
775 #ifdef CONFIG_IPV6_ROUTE_INFO
776 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
777 const struct in6_addr *gwaddr)
778 {
779 struct net *net = dev_net(dev);
780 struct route_info *rinfo = (struct route_info *) opt;
781 struct in6_addr prefix_buf, *prefix;
782 unsigned int pref;
783 unsigned long lifetime;
784 struct rt6_info *rt;
785
786 if (len < sizeof(struct route_info)) {
787 return -EINVAL;
788 }
789
790 /* Sanity check for prefix_len and length */
791 if (rinfo->length > 3) {
792 return -EINVAL;
793 } else if (rinfo->prefix_len > 128) {
794 return -EINVAL;
795 } else if (rinfo->prefix_len > 64) {
796 if (rinfo->length < 2) {
797 return -EINVAL;
798 }
799 } else if (rinfo->prefix_len > 0) {
800 if (rinfo->length < 1) {
801 return -EINVAL;
802 }
803 }
804
805 pref = rinfo->route_pref;
806 if (pref == ICMPV6_ROUTER_PREF_INVALID)
807 return -EINVAL;
808
809 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
810
811 if (rinfo->length == 3)
812 prefix = (struct in6_addr *)rinfo->prefix;
813 else {
814 /* this function is safe */
815 ipv6_addr_prefix(&prefix_buf,
816 (struct in6_addr *)rinfo->prefix,
817 rinfo->prefix_len);
818 prefix = &prefix_buf;
819 }
820
821 if (rinfo->prefix_len == 0)
822 rt = rt6_get_dflt_router(gwaddr, dev);
823 else
824 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
825 gwaddr, dev);
826
827 if (rt && !lifetime) {
828 ip6_del_rt(rt);
829 rt = NULL;
830 }
831
832 if (!rt && lifetime)
833 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
834 dev, pref);
835 else if (rt)
836 rt->rt6i_flags = RTF_ROUTEINFO |
837 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
838
839 if (rt) {
840 if (!addrconf_finite_timeout(lifetime))
841 rt6_clean_expires(rt);
842 else
843 rt6_set_expires(rt, jiffies + HZ * lifetime);
844
845 ip6_rt_put(rt);
846 }
847 return 0;
848 }
849 #endif
850
851 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
852 struct in6_addr *saddr)
853 {
854 struct fib6_node *pn;
855 while (1) {
856 if (fn->fn_flags & RTN_TL_ROOT)
857 return NULL;
858 pn = fn->parent;
859 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
860 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
861 else
862 fn = pn;
863 if (fn->fn_flags & RTN_RTINFO)
864 return fn;
865 }
866 }
867
868 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
869 struct fib6_table *table,
870 struct flowi6 *fl6, int flags)
871 {
872 struct fib6_node *fn;
873 struct rt6_info *rt;
874
875 read_lock_bh(&table->tb6_lock);
876 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
877 restart:
878 rt = fn->leaf;
879 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
880 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
881 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
882 if (rt == net->ipv6.ip6_null_entry) {
883 fn = fib6_backtrack(fn, &fl6->saddr);
884 if (fn)
885 goto restart;
886 }
887 dst_use(&rt->dst, jiffies);
888 read_unlock_bh(&table->tb6_lock);
889
890 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
891
892 return rt;
893
894 }
895
896 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
897 int flags)
898 {
899 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
900 }
901 EXPORT_SYMBOL_GPL(ip6_route_lookup);
902
903 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
904 const struct in6_addr *saddr, int oif, int strict)
905 {
906 struct flowi6 fl6 = {
907 .flowi6_oif = oif,
908 .daddr = *daddr,
909 };
910 struct dst_entry *dst;
911 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
912
913 if (saddr) {
914 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
915 flags |= RT6_LOOKUP_F_HAS_SADDR;
916 }
917
918 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
919 if (dst->error == 0)
920 return (struct rt6_info *) dst;
921
922 dst_release(dst);
923
924 return NULL;
925 }
926 EXPORT_SYMBOL(rt6_lookup);
927
928 /* ip6_ins_rt is called with FREE table->tb6_lock.
929 It takes new route entry, the addition fails by any reason the
930 route is freed. In any case, if caller does not hold it, it may
931 be destroyed.
932 */
933
934 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
935 struct mx6_config *mxc)
936 {
937 int err;
938 struct fib6_table *table;
939
940 table = rt->rt6i_table;
941 write_lock_bh(&table->tb6_lock);
942 err = fib6_add(&table->tb6_root, rt, info, mxc);
943 write_unlock_bh(&table->tb6_lock);
944
945 return err;
946 }
947
948 int ip6_ins_rt(struct rt6_info *rt)
949 {
950 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
951 struct mx6_config mxc = { .mx = NULL, };
952
953 return __ip6_ins_rt(rt, &info, &mxc);
954 }
955
956 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
957 const struct in6_addr *daddr,
958 const struct in6_addr *saddr)
959 {
960 struct rt6_info *rt;
961
962 /*
963 * Clone the route.
964 */
965
966 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
967 ort = (struct rt6_info *)ort->dst.from;
968
969 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
970
971 if (!rt)
972 return NULL;
973
974 ip6_rt_copy_init(rt, ort);
975 rt->rt6i_flags |= RTF_CACHE;
976 rt->rt6i_metric = 0;
977 rt->dst.flags |= DST_HOST;
978 rt->rt6i_dst.addr = *daddr;
979 rt->rt6i_dst.plen = 128;
980
981 if (!rt6_is_gw_or_nonexthop(ort)) {
982 if (ort->rt6i_dst.plen != 128 &&
983 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
984 rt->rt6i_flags |= RTF_ANYCAST;
985 #ifdef CONFIG_IPV6_SUBTREES
986 if (rt->rt6i_src.plen && saddr) {
987 rt->rt6i_src.addr = *saddr;
988 rt->rt6i_src.plen = 128;
989 }
990 #endif
991 }
992
993 return rt;
994 }
995
996 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
997 {
998 struct rt6_info *pcpu_rt;
999
1000 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
1001 rt->dst.dev, rt->dst.flags);
1002
1003 if (!pcpu_rt)
1004 return NULL;
1005 ip6_rt_copy_init(pcpu_rt, rt);
1006 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1007 pcpu_rt->rt6i_flags |= RTF_PCPU;
1008 return pcpu_rt;
1009 }
1010
1011 /* It should be called with read_lock_bh(&tb6_lock) acquired */
1012 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1013 {
1014 struct rt6_info *pcpu_rt, **p;
1015
1016 p = this_cpu_ptr(rt->rt6i_pcpu);
1017 pcpu_rt = *p;
1018
1019 if (pcpu_rt) {
1020 dst_hold(&pcpu_rt->dst);
1021 rt6_dst_from_metrics_check(pcpu_rt);
1022 }
1023 return pcpu_rt;
1024 }
1025
1026 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1027 {
1028 struct fib6_table *table = rt->rt6i_table;
1029 struct rt6_info *pcpu_rt, *prev, **p;
1030
1031 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1032 if (!pcpu_rt) {
1033 struct net *net = dev_net(rt->dst.dev);
1034
1035 dst_hold(&net->ipv6.ip6_null_entry->dst);
1036 return net->ipv6.ip6_null_entry;
1037 }
1038
1039 read_lock_bh(&table->tb6_lock);
1040 if (rt->rt6i_pcpu) {
1041 p = this_cpu_ptr(rt->rt6i_pcpu);
1042 prev = cmpxchg(p, NULL, pcpu_rt);
1043 if (prev) {
1044 /* If someone did it before us, return prev instead */
1045 dst_destroy(&pcpu_rt->dst);
1046 pcpu_rt = prev;
1047 }
1048 } else {
1049 /* rt has been removed from the fib6 tree
1050 * before we have a chance to acquire the read_lock.
1051 * In this case, don't brother to create a pcpu rt
1052 * since rt is going away anyway. The next
1053 * dst_check() will trigger a re-lookup.
1054 */
1055 dst_destroy(&pcpu_rt->dst);
1056 pcpu_rt = rt;
1057 }
1058 dst_hold(&pcpu_rt->dst);
1059 rt6_dst_from_metrics_check(pcpu_rt);
1060 read_unlock_bh(&table->tb6_lock);
1061 return pcpu_rt;
1062 }
1063
1064 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1065 int oif, struct flowi6 *fl6, int flags)
1066 {
1067 struct fib6_node *fn, *saved_fn;
1068 struct rt6_info *rt;
1069 int strict = 0;
1070
1071 strict |= flags & RT6_LOOKUP_F_IFACE;
1072 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1073 if (net->ipv6.devconf_all->forwarding == 0)
1074 strict |= RT6_LOOKUP_F_REACHABLE;
1075
1076 read_lock_bh(&table->tb6_lock);
1077
1078 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1079 saved_fn = fn;
1080
1081 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1082 oif = 0;
1083
1084 redo_rt6_select:
1085 rt = rt6_select(fn, oif, strict);
1086 if (rt->rt6i_nsiblings)
1087 rt = rt6_multipath_select(rt, fl6, oif, strict);
1088 if (rt == net->ipv6.ip6_null_entry) {
1089 fn = fib6_backtrack(fn, &fl6->saddr);
1090 if (fn)
1091 goto redo_rt6_select;
1092 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1093 /* also consider unreachable route */
1094 strict &= ~RT6_LOOKUP_F_REACHABLE;
1095 fn = saved_fn;
1096 goto redo_rt6_select;
1097 }
1098 }
1099
1100
1101 if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1102 dst_use(&rt->dst, jiffies);
1103 read_unlock_bh(&table->tb6_lock);
1104
1105 rt6_dst_from_metrics_check(rt);
1106
1107 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1108 return rt;
1109 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1110 !(rt->rt6i_flags & RTF_GATEWAY))) {
1111 /* Create a RTF_CACHE clone which will not be
1112 * owned by the fib6 tree. It is for the special case where
1113 * the daddr in the skb during the neighbor look-up is different
1114 * from the fl6->daddr used to look-up route here.
1115 */
1116
1117 struct rt6_info *uncached_rt;
1118
1119 dst_use(&rt->dst, jiffies);
1120 read_unlock_bh(&table->tb6_lock);
1121
1122 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1123 dst_release(&rt->dst);
1124
1125 if (uncached_rt)
1126 rt6_uncached_list_add(uncached_rt);
1127 else
1128 uncached_rt = net->ipv6.ip6_null_entry;
1129
1130 dst_hold(&uncached_rt->dst);
1131
1132 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
1133 return uncached_rt;
1134
1135 } else {
1136 /* Get a percpu copy */
1137
1138 struct rt6_info *pcpu_rt;
1139
1140 rt->dst.lastuse = jiffies;
1141 rt->dst.__use++;
1142 pcpu_rt = rt6_get_pcpu_route(rt);
1143
1144 if (pcpu_rt) {
1145 read_unlock_bh(&table->tb6_lock);
1146 } else {
1147 /* We have to do the read_unlock first
1148 * because rt6_make_pcpu_route() may trigger
1149 * ip6_dst_gc() which will take the write_lock.
1150 */
1151 dst_hold(&rt->dst);
1152 read_unlock_bh(&table->tb6_lock);
1153 pcpu_rt = rt6_make_pcpu_route(rt);
1154 dst_release(&rt->dst);
1155 }
1156
1157 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
1158 return pcpu_rt;
1159
1160 }
1161 }
1162 EXPORT_SYMBOL_GPL(ip6_pol_route);
1163
1164 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1165 struct flowi6 *fl6, int flags)
1166 {
1167 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1168 }
1169
1170 struct dst_entry *ip6_route_input_lookup(struct net *net,
1171 struct net_device *dev,
1172 struct flowi6 *fl6, int flags)
1173 {
1174 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1175 flags |= RT6_LOOKUP_F_IFACE;
1176
1177 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1178 }
1179 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1180
1181 void ip6_route_input(struct sk_buff *skb)
1182 {
1183 const struct ipv6hdr *iph = ipv6_hdr(skb);
1184 struct net *net = dev_net(skb->dev);
1185 int flags = RT6_LOOKUP_F_HAS_SADDR;
1186 struct ip_tunnel_info *tun_info;
1187 struct flowi6 fl6 = {
1188 .flowi6_iif = skb->dev->ifindex,
1189 .daddr = iph->daddr,
1190 .saddr = iph->saddr,
1191 .flowlabel = ip6_flowinfo(iph),
1192 .flowi6_mark = skb->mark,
1193 .flowi6_proto = iph->nexthdr,
1194 };
1195
1196 tun_info = skb_tunnel_info(skb);
1197 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1198 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1199 skb_dst_drop(skb);
1200 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1201 }
1202
1203 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1204 struct flowi6 *fl6, int flags)
1205 {
1206 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1207 }
1208
1209 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1210 struct flowi6 *fl6, int flags)
1211 {
1212 bool any_src;
1213
1214 if (rt6_need_strict(&fl6->daddr)) {
1215 struct dst_entry *dst;
1216
1217 dst = l3mdev_link_scope_lookup(net, fl6);
1218 if (dst)
1219 return dst;
1220 }
1221
1222 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1223
1224 any_src = ipv6_addr_any(&fl6->saddr);
1225 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1226 (fl6->flowi6_oif && any_src))
1227 flags |= RT6_LOOKUP_F_IFACE;
1228
1229 if (!any_src)
1230 flags |= RT6_LOOKUP_F_HAS_SADDR;
1231 else if (sk)
1232 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1233
1234 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1235 }
1236 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1237
1238 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1239 {
1240 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1241 struct dst_entry *new = NULL;
1242
1243 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1244 if (rt) {
1245 rt6_info_init(rt);
1246
1247 new = &rt->dst;
1248 new->__use = 1;
1249 new->input = dst_discard;
1250 new->output = dst_discard_out;
1251
1252 dst_copy_metrics(new, &ort->dst);
1253 rt->rt6i_idev = ort->rt6i_idev;
1254 if (rt->rt6i_idev)
1255 in6_dev_hold(rt->rt6i_idev);
1256
1257 rt->rt6i_gateway = ort->rt6i_gateway;
1258 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1259 rt->rt6i_metric = 0;
1260
1261 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1262 #ifdef CONFIG_IPV6_SUBTREES
1263 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1264 #endif
1265
1266 dst_free(new);
1267 }
1268
1269 dst_release(dst_orig);
1270 return new ? new : ERR_PTR(-ENOMEM);
1271 }
1272
1273 /*
1274 * Destination cache support functions
1275 */
1276
1277 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1278 {
1279 if (rt->dst.from &&
1280 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1281 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1282 }
1283
1284 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1285 {
1286 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1287 return NULL;
1288
1289 if (rt6_check_expired(rt))
1290 return NULL;
1291
1292 return &rt->dst;
1293 }
1294
1295 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1296 {
1297 if (!__rt6_check_expired(rt) &&
1298 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1299 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1300 return &rt->dst;
1301 else
1302 return NULL;
1303 }
1304
1305 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1306 {
1307 struct rt6_info *rt;
1308
1309 rt = (struct rt6_info *) dst;
1310
1311 /* All IPV6 dsts are created with ->obsolete set to the value
1312 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1313 * into this function always.
1314 */
1315
1316 rt6_dst_from_metrics_check(rt);
1317
1318 if (rt->rt6i_flags & RTF_PCPU ||
1319 (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
1320 return rt6_dst_from_check(rt, cookie);
1321 else
1322 return rt6_check(rt, cookie);
1323 }
1324
1325 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1326 {
1327 struct rt6_info *rt = (struct rt6_info *) dst;
1328
1329 if (rt) {
1330 if (rt->rt6i_flags & RTF_CACHE) {
1331 if (rt6_check_expired(rt)) {
1332 ip6_del_rt(rt);
1333 dst = NULL;
1334 }
1335 } else {
1336 dst_release(dst);
1337 dst = NULL;
1338 }
1339 }
1340 return dst;
1341 }
1342
1343 static void ip6_link_failure(struct sk_buff *skb)
1344 {
1345 struct rt6_info *rt;
1346
1347 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1348
1349 rt = (struct rt6_info *) skb_dst(skb);
1350 if (rt) {
1351 if (rt->rt6i_flags & RTF_CACHE) {
1352 dst_hold(&rt->dst);
1353 ip6_del_rt(rt);
1354 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1355 rt->rt6i_node->fn_sernum = -1;
1356 }
1357 }
1358 }
1359
1360 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1361 {
1362 struct net *net = dev_net(rt->dst.dev);
1363
1364 rt->rt6i_flags |= RTF_MODIFIED;
1365 rt->rt6i_pmtu = mtu;
1366 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1367 }
1368
1369 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1370 {
1371 return !(rt->rt6i_flags & RTF_CACHE) &&
1372 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1373 }
1374
1375 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1376 const struct ipv6hdr *iph, u32 mtu)
1377 {
1378 struct rt6_info *rt6 = (struct rt6_info *)dst;
1379
1380 if (rt6->rt6i_flags & RTF_LOCAL)
1381 return;
1382
1383 if (dst_metric_locked(dst, RTAX_MTU))
1384 return;
1385
1386 dst_confirm(dst);
1387 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1388 if (mtu >= dst_mtu(dst))
1389 return;
1390
1391 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1392 rt6_do_update_pmtu(rt6, mtu);
1393 } else {
1394 const struct in6_addr *daddr, *saddr;
1395 struct rt6_info *nrt6;
1396
1397 if (iph) {
1398 daddr = &iph->daddr;
1399 saddr = &iph->saddr;
1400 } else if (sk) {
1401 daddr = &sk->sk_v6_daddr;
1402 saddr = &inet6_sk(sk)->saddr;
1403 } else {
1404 return;
1405 }
1406 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1407 if (nrt6) {
1408 rt6_do_update_pmtu(nrt6, mtu);
1409
1410 /* ip6_ins_rt(nrt6) will bump the
1411 * rt6->rt6i_node->fn_sernum
1412 * which will fail the next rt6_check() and
1413 * invalidate the sk->sk_dst_cache.
1414 */
1415 ip6_ins_rt(nrt6);
1416 }
1417 }
1418 }
1419
1420 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1421 struct sk_buff *skb, u32 mtu)
1422 {
1423 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1424 }
1425
1426 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1427 int oif, u32 mark, kuid_t uid)
1428 {
1429 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1430 struct dst_entry *dst;
1431 struct flowi6 fl6;
1432
1433 memset(&fl6, 0, sizeof(fl6));
1434 fl6.flowi6_oif = oif;
1435 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1436 fl6.daddr = iph->daddr;
1437 fl6.saddr = iph->saddr;
1438 fl6.flowlabel = ip6_flowinfo(iph);
1439 fl6.flowi6_uid = uid;
1440
1441 dst = ip6_route_output(net, NULL, &fl6);
1442 if (!dst->error)
1443 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1444 dst_release(dst);
1445 }
1446 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1447
1448 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1449 {
1450 struct dst_entry *dst;
1451
1452 ip6_update_pmtu(skb, sock_net(sk), mtu,
1453 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
1454
1455 dst = __sk_dst_get(sk);
1456 if (!dst || !dst->obsolete ||
1457 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1458 return;
1459
1460 bh_lock_sock(sk);
1461 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1462 ip6_datagram_dst_update(sk, false);
1463 bh_unlock_sock(sk);
1464 }
1465 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1466
1467 /* Handle redirects */
1468 struct ip6rd_flowi {
1469 struct flowi6 fl6;
1470 struct in6_addr gateway;
1471 };
1472
1473 static struct rt6_info *__ip6_route_redirect(struct net *net,
1474 struct fib6_table *table,
1475 struct flowi6 *fl6,
1476 int flags)
1477 {
1478 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1479 struct rt6_info *rt;
1480 struct fib6_node *fn;
1481
1482 /* Get the "current" route for this destination and
1483 * check if the redirect has come from appropriate router.
1484 *
1485 * RFC 4861 specifies that redirects should only be
1486 * accepted if they come from the nexthop to the target.
1487 * Due to the way the routes are chosen, this notion
1488 * is a bit fuzzy and one might need to check all possible
1489 * routes.
1490 */
1491
1492 read_lock_bh(&table->tb6_lock);
1493 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1494 restart:
1495 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1496 if (rt6_check_expired(rt))
1497 continue;
1498 if (rt->dst.error)
1499 break;
1500 if (!(rt->rt6i_flags & RTF_GATEWAY))
1501 continue;
1502 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1503 continue;
1504 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1505 continue;
1506 break;
1507 }
1508
1509 if (!rt)
1510 rt = net->ipv6.ip6_null_entry;
1511 else if (rt->dst.error) {
1512 rt = net->ipv6.ip6_null_entry;
1513 goto out;
1514 }
1515
1516 if (rt == net->ipv6.ip6_null_entry) {
1517 fn = fib6_backtrack(fn, &fl6->saddr);
1518 if (fn)
1519 goto restart;
1520 }
1521
1522 out:
1523 dst_hold(&rt->dst);
1524
1525 read_unlock_bh(&table->tb6_lock);
1526
1527 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1528 return rt;
1529 };
1530
1531 static struct dst_entry *ip6_route_redirect(struct net *net,
1532 const struct flowi6 *fl6,
1533 const struct in6_addr *gateway)
1534 {
1535 int flags = RT6_LOOKUP_F_HAS_SADDR;
1536 struct ip6rd_flowi rdfl;
1537
1538 rdfl.fl6 = *fl6;
1539 rdfl.gateway = *gateway;
1540
1541 return fib6_rule_lookup(net, &rdfl.fl6,
1542 flags, __ip6_route_redirect);
1543 }
1544
1545 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
1546 kuid_t uid)
1547 {
1548 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1549 struct dst_entry *dst;
1550 struct flowi6 fl6;
1551
1552 memset(&fl6, 0, sizeof(fl6));
1553 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1554 fl6.flowi6_oif = oif;
1555 fl6.flowi6_mark = mark;
1556 fl6.daddr = iph->daddr;
1557 fl6.saddr = iph->saddr;
1558 fl6.flowlabel = ip6_flowinfo(iph);
1559 fl6.flowi6_uid = uid;
1560
1561 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1562 rt6_do_redirect(dst, NULL, skb);
1563 dst_release(dst);
1564 }
1565 EXPORT_SYMBOL_GPL(ip6_redirect);
1566
1567 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1568 u32 mark)
1569 {
1570 const struct ipv6hdr *iph = ipv6_hdr(skb);
1571 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1572 struct dst_entry *dst;
1573 struct flowi6 fl6;
1574
1575 memset(&fl6, 0, sizeof(fl6));
1576 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1577 fl6.flowi6_oif = oif;
1578 fl6.flowi6_mark = mark;
1579 fl6.daddr = msg->dest;
1580 fl6.saddr = iph->daddr;
1581 fl6.flowi6_uid = sock_net_uid(net, NULL);
1582
1583 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1584 rt6_do_redirect(dst, NULL, skb);
1585 dst_release(dst);
1586 }
1587
1588 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1589 {
1590 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
1591 sk->sk_uid);
1592 }
1593 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1594
1595 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1596 {
1597 struct net_device *dev = dst->dev;
1598 unsigned int mtu = dst_mtu(dst);
1599 struct net *net = dev_net(dev);
1600
1601 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1602
1603 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1604 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1605
1606 /*
1607 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1608 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1609 * IPV6_MAXPLEN is also valid and means: "any MSS,
1610 * rely only on pmtu discovery"
1611 */
1612 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1613 mtu = IPV6_MAXPLEN;
1614 return mtu;
1615 }
1616
1617 static unsigned int ip6_mtu(const struct dst_entry *dst)
1618 {
1619 const struct rt6_info *rt = (const struct rt6_info *)dst;
1620 unsigned int mtu = rt->rt6i_pmtu;
1621 struct inet6_dev *idev;
1622
1623 if (mtu)
1624 goto out;
1625
1626 mtu = dst_metric_raw(dst, RTAX_MTU);
1627 if (mtu)
1628 goto out;
1629
1630 mtu = IPV6_MIN_MTU;
1631
1632 rcu_read_lock();
1633 idev = __in6_dev_get(dst->dev);
1634 if (idev)
1635 mtu = idev->cnf.mtu6;
1636 rcu_read_unlock();
1637
1638 out:
1639 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1640
1641 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1642 }
1643
1644 static struct dst_entry *icmp6_dst_gc_list;
1645 static DEFINE_SPINLOCK(icmp6_dst_lock);
1646
1647 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1648 struct flowi6 *fl6)
1649 {
1650 struct dst_entry *dst;
1651 struct rt6_info *rt;
1652 struct inet6_dev *idev = in6_dev_get(dev);
1653 struct net *net = dev_net(dev);
1654
1655 if (unlikely(!idev))
1656 return ERR_PTR(-ENODEV);
1657
1658 rt = ip6_dst_alloc(net, dev, 0);
1659 if (unlikely(!rt)) {
1660 in6_dev_put(idev);
1661 dst = ERR_PTR(-ENOMEM);
1662 goto out;
1663 }
1664
1665 rt->dst.flags |= DST_HOST;
1666 rt->dst.output = ip6_output;
1667 atomic_set(&rt->dst.__refcnt, 1);
1668 rt->rt6i_gateway = fl6->daddr;
1669 rt->rt6i_dst.addr = fl6->daddr;
1670 rt->rt6i_dst.plen = 128;
1671 rt->rt6i_idev = idev;
1672 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1673
1674 spin_lock_bh(&icmp6_dst_lock);
1675 rt->dst.next = icmp6_dst_gc_list;
1676 icmp6_dst_gc_list = &rt->dst;
1677 spin_unlock_bh(&icmp6_dst_lock);
1678
1679 fib6_force_start_gc(net);
1680
1681 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1682
1683 out:
1684 return dst;
1685 }
1686
1687 int icmp6_dst_gc(void)
1688 {
1689 struct dst_entry *dst, **pprev;
1690 int more = 0;
1691
1692 spin_lock_bh(&icmp6_dst_lock);
1693 pprev = &icmp6_dst_gc_list;
1694
1695 while ((dst = *pprev) != NULL) {
1696 if (!atomic_read(&dst->__refcnt)) {
1697 *pprev = dst->next;
1698 dst_free(dst);
1699 } else {
1700 pprev = &dst->next;
1701 ++more;
1702 }
1703 }
1704
1705 spin_unlock_bh(&icmp6_dst_lock);
1706
1707 return more;
1708 }
1709
1710 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1711 void *arg)
1712 {
1713 struct dst_entry *dst, **pprev;
1714
1715 spin_lock_bh(&icmp6_dst_lock);
1716 pprev = &icmp6_dst_gc_list;
1717 while ((dst = *pprev) != NULL) {
1718 struct rt6_info *rt = (struct rt6_info *) dst;
1719 if (func(rt, arg)) {
1720 *pprev = dst->next;
1721 dst_free(dst);
1722 } else {
1723 pprev = &dst->next;
1724 }
1725 }
1726 spin_unlock_bh(&icmp6_dst_lock);
1727 }
1728
1729 static int ip6_dst_gc(struct dst_ops *ops)
1730 {
1731 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1732 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1733 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1734 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1735 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1736 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1737 int entries;
1738
1739 entries = dst_entries_get_fast(ops);
1740 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1741 entries <= rt_max_size)
1742 goto out;
1743
1744 net->ipv6.ip6_rt_gc_expire++;
1745 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1746 entries = dst_entries_get_slow(ops);
1747 if (entries < ops->gc_thresh)
1748 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1749 out:
1750 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1751 return entries > rt_max_size;
1752 }
1753
1754 static int ip6_convert_metrics(struct mx6_config *mxc,
1755 const struct fib6_config *cfg)
1756 {
1757 bool ecn_ca = false;
1758 struct nlattr *nla;
1759 int remaining;
1760 u32 *mp;
1761
1762 if (!cfg->fc_mx)
1763 return 0;
1764
1765 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1766 if (unlikely(!mp))
1767 return -ENOMEM;
1768
1769 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1770 int type = nla_type(nla);
1771 u32 val;
1772
1773 if (!type)
1774 continue;
1775 if (unlikely(type > RTAX_MAX))
1776 goto err;
1777
1778 if (type == RTAX_CC_ALGO) {
1779 char tmp[TCP_CA_NAME_MAX];
1780
1781 nla_strlcpy(tmp, nla, sizeof(tmp));
1782 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1783 if (val == TCP_CA_UNSPEC)
1784 goto err;
1785 } else {
1786 val = nla_get_u32(nla);
1787 }
1788 if (type == RTAX_HOPLIMIT && val > 255)
1789 val = 255;
1790 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1791 goto err;
1792
1793 mp[type - 1] = val;
1794 __set_bit(type - 1, mxc->mx_valid);
1795 }
1796
1797 if (ecn_ca) {
1798 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1799 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1800 }
1801
1802 mxc->mx = mp;
1803 return 0;
1804 err:
1805 kfree(mp);
1806 return -EINVAL;
1807 }
1808
1809 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1810 struct fib6_config *cfg,
1811 const struct in6_addr *gw_addr)
1812 {
1813 struct flowi6 fl6 = {
1814 .flowi6_oif = cfg->fc_ifindex,
1815 .daddr = *gw_addr,
1816 .saddr = cfg->fc_prefsrc,
1817 };
1818 struct fib6_table *table;
1819 struct rt6_info *rt;
1820 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1821
1822 table = fib6_get_table(net, cfg->fc_table);
1823 if (!table)
1824 return NULL;
1825
1826 if (!ipv6_addr_any(&cfg->fc_prefsrc))
1827 flags |= RT6_LOOKUP_F_HAS_SADDR;
1828
1829 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
1830
1831 /* if table lookup failed, fall back to full lookup */
1832 if (rt == net->ipv6.ip6_null_entry) {
1833 ip6_rt_put(rt);
1834 rt = NULL;
1835 }
1836
1837 return rt;
1838 }
1839
1840 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1841 {
1842 struct net *net = cfg->fc_nlinfo.nl_net;
1843 struct rt6_info *rt = NULL;
1844 struct net_device *dev = NULL;
1845 struct inet6_dev *idev = NULL;
1846 struct fib6_table *table;
1847 int addr_type;
1848 int err = -EINVAL;
1849
1850 /* RTF_PCPU is an internal flag; can not be set by userspace */
1851 if (cfg->fc_flags & RTF_PCPU)
1852 goto out;
1853
1854 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1855 goto out;
1856 #ifndef CONFIG_IPV6_SUBTREES
1857 if (cfg->fc_src_len)
1858 goto out;
1859 #endif
1860 if (cfg->fc_ifindex) {
1861 err = -ENODEV;
1862 dev = dev_get_by_index(net, cfg->fc_ifindex);
1863 if (!dev)
1864 goto out;
1865 idev = in6_dev_get(dev);
1866 if (!idev)
1867 goto out;
1868 }
1869
1870 if (cfg->fc_metric == 0)
1871 cfg->fc_metric = IP6_RT_PRIO_USER;
1872
1873 err = -ENOBUFS;
1874 if (cfg->fc_nlinfo.nlh &&
1875 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1876 table = fib6_get_table(net, cfg->fc_table);
1877 if (!table) {
1878 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1879 table = fib6_new_table(net, cfg->fc_table);
1880 }
1881 } else {
1882 table = fib6_new_table(net, cfg->fc_table);
1883 }
1884
1885 if (!table)
1886 goto out;
1887
1888 rt = ip6_dst_alloc(net, NULL,
1889 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1890
1891 if (!rt) {
1892 err = -ENOMEM;
1893 goto out;
1894 }
1895
1896 if (cfg->fc_flags & RTF_EXPIRES)
1897 rt6_set_expires(rt, jiffies +
1898 clock_t_to_jiffies(cfg->fc_expires));
1899 else
1900 rt6_clean_expires(rt);
1901
1902 if (cfg->fc_protocol == RTPROT_UNSPEC)
1903 cfg->fc_protocol = RTPROT_BOOT;
1904 rt->rt6i_protocol = cfg->fc_protocol;
1905
1906 addr_type = ipv6_addr_type(&cfg->fc_dst);
1907
1908 if (addr_type & IPV6_ADDR_MULTICAST)
1909 rt->dst.input = ip6_mc_input;
1910 else if (cfg->fc_flags & RTF_LOCAL)
1911 rt->dst.input = ip6_input;
1912 else
1913 rt->dst.input = ip6_forward;
1914
1915 rt->dst.output = ip6_output;
1916
1917 if (cfg->fc_encap) {
1918 struct lwtunnel_state *lwtstate;
1919
1920 err = lwtunnel_build_state(dev, cfg->fc_encap_type,
1921 cfg->fc_encap, AF_INET6, cfg,
1922 &lwtstate);
1923 if (err)
1924 goto out;
1925 rt->dst.lwtstate = lwtstate_get(lwtstate);
1926 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1927 rt->dst.lwtstate->orig_output = rt->dst.output;
1928 rt->dst.output = lwtunnel_output;
1929 }
1930 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1931 rt->dst.lwtstate->orig_input = rt->dst.input;
1932 rt->dst.input = lwtunnel_input;
1933 }
1934 }
1935
1936 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1937 rt->rt6i_dst.plen = cfg->fc_dst_len;
1938 if (rt->rt6i_dst.plen == 128)
1939 rt->dst.flags |= DST_HOST;
1940
1941 #ifdef CONFIG_IPV6_SUBTREES
1942 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1943 rt->rt6i_src.plen = cfg->fc_src_len;
1944 #endif
1945
1946 rt->rt6i_metric = cfg->fc_metric;
1947
1948 /* We cannot add true routes via loopback here,
1949 they would result in kernel looping; promote them to reject routes
1950 */
1951 if ((cfg->fc_flags & RTF_REJECT) ||
1952 (dev && (dev->flags & IFF_LOOPBACK) &&
1953 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1954 !(cfg->fc_flags & RTF_LOCAL))) {
1955 /* hold loopback dev/idev if we haven't done so. */
1956 if (dev != net->loopback_dev) {
1957 if (dev) {
1958 dev_put(dev);
1959 in6_dev_put(idev);
1960 }
1961 dev = net->loopback_dev;
1962 dev_hold(dev);
1963 idev = in6_dev_get(dev);
1964 if (!idev) {
1965 err = -ENODEV;
1966 goto out;
1967 }
1968 }
1969 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1970 switch (cfg->fc_type) {
1971 case RTN_BLACKHOLE:
1972 rt->dst.error = -EINVAL;
1973 rt->dst.output = dst_discard_out;
1974 rt->dst.input = dst_discard;
1975 break;
1976 case RTN_PROHIBIT:
1977 rt->dst.error = -EACCES;
1978 rt->dst.output = ip6_pkt_prohibit_out;
1979 rt->dst.input = ip6_pkt_prohibit;
1980 break;
1981 case RTN_THROW:
1982 case RTN_UNREACHABLE:
1983 default:
1984 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1985 : (cfg->fc_type == RTN_UNREACHABLE)
1986 ? -EHOSTUNREACH : -ENETUNREACH;
1987 rt->dst.output = ip6_pkt_discard_out;
1988 rt->dst.input = ip6_pkt_discard;
1989 break;
1990 }
1991 goto install_route;
1992 }
1993
1994 if (cfg->fc_flags & RTF_GATEWAY) {
1995 const struct in6_addr *gw_addr;
1996 int gwa_type;
1997
1998 gw_addr = &cfg->fc_gateway;
1999 gwa_type = ipv6_addr_type(gw_addr);
2000
2001 /* if gw_addr is local we will fail to detect this in case
2002 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2003 * will return already-added prefix route via interface that
2004 * prefix route was assigned to, which might be non-loopback.
2005 */
2006 err = -EINVAL;
2007 if (ipv6_chk_addr_and_flags(net, gw_addr,
2008 gwa_type & IPV6_ADDR_LINKLOCAL ?
2009 dev : NULL, 0, 0))
2010 goto out;
2011
2012 rt->rt6i_gateway = *gw_addr;
2013
2014 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
2015 struct rt6_info *grt = NULL;
2016
2017 /* IPv6 strictly inhibits using not link-local
2018 addresses as nexthop address.
2019 Otherwise, router will not able to send redirects.
2020 It is very good, but in some (rare!) circumstances
2021 (SIT, PtP, NBMA NOARP links) it is handy to allow
2022 some exceptions. --ANK
2023 We allow IPv4-mapped nexthops to support RFC4798-type
2024 addressing
2025 */
2026 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2027 IPV6_ADDR_MAPPED)))
2028 goto out;
2029
2030 if (cfg->fc_table) {
2031 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2032
2033 if (grt) {
2034 if (grt->rt6i_flags & RTF_GATEWAY ||
2035 (dev && dev != grt->dst.dev)) {
2036 ip6_rt_put(grt);
2037 grt = NULL;
2038 }
2039 }
2040 }
2041
2042 if (!grt)
2043 grt = rt6_lookup(net, gw_addr, NULL,
2044 cfg->fc_ifindex, 1);
2045
2046 err = -EHOSTUNREACH;
2047 if (!grt)
2048 goto out;
2049 if (dev) {
2050 if (dev != grt->dst.dev) {
2051 ip6_rt_put(grt);
2052 goto out;
2053 }
2054 } else {
2055 dev = grt->dst.dev;
2056 idev = grt->rt6i_idev;
2057 dev_hold(dev);
2058 in6_dev_hold(grt->rt6i_idev);
2059 }
2060 if (!(grt->rt6i_flags & RTF_GATEWAY))
2061 err = 0;
2062 ip6_rt_put(grt);
2063
2064 if (err)
2065 goto out;
2066 }
2067 err = -EINVAL;
2068 if (!dev || (dev->flags & IFF_LOOPBACK))
2069 goto out;
2070 }
2071
2072 err = -ENODEV;
2073 if (!dev)
2074 goto out;
2075
2076 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2077 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2078 err = -EINVAL;
2079 goto out;
2080 }
2081 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2082 rt->rt6i_prefsrc.plen = 128;
2083 } else
2084 rt->rt6i_prefsrc.plen = 0;
2085
2086 rt->rt6i_flags = cfg->fc_flags;
2087
2088 install_route:
2089 rt->dst.dev = dev;
2090 rt->rt6i_idev = idev;
2091 rt->rt6i_table = table;
2092
2093 cfg->fc_nlinfo.nl_net = dev_net(dev);
2094
2095 return rt;
2096 out:
2097 if (dev)
2098 dev_put(dev);
2099 if (idev)
2100 in6_dev_put(idev);
2101 if (rt)
2102 dst_free(&rt->dst);
2103
2104 return ERR_PTR(err);
2105 }
2106
2107 int ip6_route_add(struct fib6_config *cfg)
2108 {
2109 struct mx6_config mxc = { .mx = NULL, };
2110 struct rt6_info *rt;
2111 int err;
2112
2113 rt = ip6_route_info_create(cfg);
2114 if (IS_ERR(rt)) {
2115 err = PTR_ERR(rt);
2116 rt = NULL;
2117 goto out;
2118 }
2119
2120 err = ip6_convert_metrics(&mxc, cfg);
2121 if (err)
2122 goto out;
2123
2124 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
2125
2126 kfree(mxc.mx);
2127
2128 return err;
2129 out:
2130 if (rt)
2131 dst_free(&rt->dst);
2132
2133 return err;
2134 }
2135
2136 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2137 {
2138 int err;
2139 struct fib6_table *table;
2140 struct net *net = dev_net(rt->dst.dev);
2141
2142 if (rt == net->ipv6.ip6_null_entry ||
2143 rt->dst.flags & DST_NOCACHE) {
2144 err = -ENOENT;
2145 goto out;
2146 }
2147
2148 table = rt->rt6i_table;
2149 write_lock_bh(&table->tb6_lock);
2150 err = fib6_del(rt, info);
2151 write_unlock_bh(&table->tb6_lock);
2152
2153 out:
2154 ip6_rt_put(rt);
2155 return err;
2156 }
2157
2158 int ip6_del_rt(struct rt6_info *rt)
2159 {
2160 struct nl_info info = {
2161 .nl_net = dev_net(rt->dst.dev),
2162 };
2163 return __ip6_del_rt(rt, &info);
2164 }
2165
2166 static int ip6_route_del(struct fib6_config *cfg)
2167 {
2168 struct fib6_table *table;
2169 struct fib6_node *fn;
2170 struct rt6_info *rt;
2171 int err = -ESRCH;
2172
2173 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2174 if (!table)
2175 return err;
2176
2177 read_lock_bh(&table->tb6_lock);
2178
2179 fn = fib6_locate(&table->tb6_root,
2180 &cfg->fc_dst, cfg->fc_dst_len,
2181 &cfg->fc_src, cfg->fc_src_len);
2182
2183 if (fn) {
2184 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2185 if ((rt->rt6i_flags & RTF_CACHE) &&
2186 !(cfg->fc_flags & RTF_CACHE))
2187 continue;
2188 if (cfg->fc_ifindex &&
2189 (!rt->dst.dev ||
2190 rt->dst.dev->ifindex != cfg->fc_ifindex))
2191 continue;
2192 if (cfg->fc_flags & RTF_GATEWAY &&
2193 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2194 continue;
2195 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2196 continue;
2197 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2198 continue;
2199 dst_hold(&rt->dst);
2200 read_unlock_bh(&table->tb6_lock);
2201
2202 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2203 }
2204 }
2205 read_unlock_bh(&table->tb6_lock);
2206
2207 return err;
2208 }
2209
2210 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2211 {
2212 struct netevent_redirect netevent;
2213 struct rt6_info *rt, *nrt = NULL;
2214 struct ndisc_options ndopts;
2215 struct inet6_dev *in6_dev;
2216 struct neighbour *neigh;
2217 struct rd_msg *msg;
2218 int optlen, on_link;
2219 u8 *lladdr;
2220
2221 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2222 optlen -= sizeof(*msg);
2223
2224 if (optlen < 0) {
2225 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2226 return;
2227 }
2228
2229 msg = (struct rd_msg *)icmp6_hdr(skb);
2230
2231 if (ipv6_addr_is_multicast(&msg->dest)) {
2232 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2233 return;
2234 }
2235
2236 on_link = 0;
2237 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2238 on_link = 1;
2239 } else if (ipv6_addr_type(&msg->target) !=
2240 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2241 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2242 return;
2243 }
2244
2245 in6_dev = __in6_dev_get(skb->dev);
2246 if (!in6_dev)
2247 return;
2248 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2249 return;
2250
2251 /* RFC2461 8.1:
2252 * The IP source address of the Redirect MUST be the same as the current
2253 * first-hop router for the specified ICMP Destination Address.
2254 */
2255
2256 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2257 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2258 return;
2259 }
2260
2261 lladdr = NULL;
2262 if (ndopts.nd_opts_tgt_lladdr) {
2263 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2264 skb->dev);
2265 if (!lladdr) {
2266 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2267 return;
2268 }
2269 }
2270
2271 rt = (struct rt6_info *) dst;
2272 if (rt->rt6i_flags & RTF_REJECT) {
2273 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2274 return;
2275 }
2276
2277 /* Redirect received -> path was valid.
2278 * Look, redirects are sent only in response to data packets,
2279 * so that this nexthop apparently is reachable. --ANK
2280 */
2281 dst_confirm(&rt->dst);
2282
2283 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2284 if (!neigh)
2285 return;
2286
2287 /*
2288 * We have finally decided to accept it.
2289 */
2290
2291 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
2292 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2293 NEIGH_UPDATE_F_OVERRIDE|
2294 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2295 NEIGH_UPDATE_F_ISROUTER)),
2296 NDISC_REDIRECT, &ndopts);
2297
2298 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2299 if (!nrt)
2300 goto out;
2301
2302 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2303 if (on_link)
2304 nrt->rt6i_flags &= ~RTF_GATEWAY;
2305
2306 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2307
2308 if (ip6_ins_rt(nrt))
2309 goto out;
2310
2311 netevent.old = &rt->dst;
2312 netevent.new = &nrt->dst;
2313 netevent.daddr = &msg->dest;
2314 netevent.neigh = neigh;
2315 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2316
2317 if (rt->rt6i_flags & RTF_CACHE) {
2318 rt = (struct rt6_info *) dst_clone(&rt->dst);
2319 ip6_del_rt(rt);
2320 }
2321
2322 out:
2323 neigh_release(neigh);
2324 }
2325
2326 /*
2327 * Misc support functions
2328 */
2329
2330 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2331 {
2332 BUG_ON(from->dst.from);
2333
2334 rt->rt6i_flags &= ~RTF_EXPIRES;
2335 dst_hold(&from->dst);
2336 rt->dst.from = &from->dst;
2337 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2338 }
2339
2340 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2341 {
2342 rt->dst.input = ort->dst.input;
2343 rt->dst.output = ort->dst.output;
2344 rt->rt6i_dst = ort->rt6i_dst;
2345 rt->dst.error = ort->dst.error;
2346 rt->rt6i_idev = ort->rt6i_idev;
2347 if (rt->rt6i_idev)
2348 in6_dev_hold(rt->rt6i_idev);
2349 rt->dst.lastuse = jiffies;
2350 rt->rt6i_gateway = ort->rt6i_gateway;
2351 rt->rt6i_flags = ort->rt6i_flags;
2352 rt6_set_from(rt, ort);
2353 rt->rt6i_metric = ort->rt6i_metric;
2354 #ifdef CONFIG_IPV6_SUBTREES
2355 rt->rt6i_src = ort->rt6i_src;
2356 #endif
2357 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2358 rt->rt6i_table = ort->rt6i_table;
2359 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2360 }
2361
2362 #ifdef CONFIG_IPV6_ROUTE_INFO
2363 static struct rt6_info *rt6_get_route_info(struct net *net,
2364 const struct in6_addr *prefix, int prefixlen,
2365 const struct in6_addr *gwaddr,
2366 struct net_device *dev)
2367 {
2368 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
2369 int ifindex = dev->ifindex;
2370 struct fib6_node *fn;
2371 struct rt6_info *rt = NULL;
2372 struct fib6_table *table;
2373
2374 table = fib6_get_table(net, tb_id);
2375 if (!table)
2376 return NULL;
2377
2378 read_lock_bh(&table->tb6_lock);
2379 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2380 if (!fn)
2381 goto out;
2382
2383 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2384 if (rt->dst.dev->ifindex != ifindex)
2385 continue;
2386 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2387 continue;
2388 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2389 continue;
2390 dst_hold(&rt->dst);
2391 break;
2392 }
2393 out:
2394 read_unlock_bh(&table->tb6_lock);
2395 return rt;
2396 }
2397
2398 static struct rt6_info *rt6_add_route_info(struct net *net,
2399 const struct in6_addr *prefix, int prefixlen,
2400 const struct in6_addr *gwaddr,
2401 struct net_device *dev,
2402 unsigned int pref)
2403 {
2404 struct fib6_config cfg = {
2405 .fc_metric = IP6_RT_PRIO_USER,
2406 .fc_ifindex = dev->ifindex,
2407 .fc_dst_len = prefixlen,
2408 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2409 RTF_UP | RTF_PREF(pref),
2410 .fc_nlinfo.portid = 0,
2411 .fc_nlinfo.nlh = NULL,
2412 .fc_nlinfo.nl_net = net,
2413 };
2414
2415 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
2416 cfg.fc_dst = *prefix;
2417 cfg.fc_gateway = *gwaddr;
2418
2419 /* We should treat it as a default route if prefix length is 0. */
2420 if (!prefixlen)
2421 cfg.fc_flags |= RTF_DEFAULT;
2422
2423 ip6_route_add(&cfg);
2424
2425 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
2426 }
2427 #endif
2428
2429 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2430 {
2431 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
2432 struct rt6_info *rt;
2433 struct fib6_table *table;
2434
2435 table = fib6_get_table(dev_net(dev), tb_id);
2436 if (!table)
2437 return NULL;
2438
2439 read_lock_bh(&table->tb6_lock);
2440 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2441 if (dev == rt->dst.dev &&
2442 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2443 ipv6_addr_equal(&rt->rt6i_gateway, addr))
2444 break;
2445 }
2446 if (rt)
2447 dst_hold(&rt->dst);
2448 read_unlock_bh(&table->tb6_lock);
2449 return rt;
2450 }
2451
2452 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2453 struct net_device *dev,
2454 unsigned int pref)
2455 {
2456 struct fib6_config cfg = {
2457 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
2458 .fc_metric = IP6_RT_PRIO_USER,
2459 .fc_ifindex = dev->ifindex,
2460 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2461 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2462 .fc_nlinfo.portid = 0,
2463 .fc_nlinfo.nlh = NULL,
2464 .fc_nlinfo.nl_net = dev_net(dev),
2465 };
2466
2467 cfg.fc_gateway = *gwaddr;
2468
2469 if (!ip6_route_add(&cfg)) {
2470 struct fib6_table *table;
2471
2472 table = fib6_get_table(dev_net(dev), cfg.fc_table);
2473 if (table)
2474 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
2475 }
2476
2477 return rt6_get_dflt_router(gwaddr, dev);
2478 }
2479
2480 static void __rt6_purge_dflt_routers(struct fib6_table *table)
2481 {
2482 struct rt6_info *rt;
2483
2484 restart:
2485 read_lock_bh(&table->tb6_lock);
2486 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2487 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2488 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2489 dst_hold(&rt->dst);
2490 read_unlock_bh(&table->tb6_lock);
2491 ip6_del_rt(rt);
2492 goto restart;
2493 }
2494 }
2495 read_unlock_bh(&table->tb6_lock);
2496
2497 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
2498 }
2499
2500 void rt6_purge_dflt_routers(struct net *net)
2501 {
2502 struct fib6_table *table;
2503 struct hlist_head *head;
2504 unsigned int h;
2505
2506 rcu_read_lock();
2507
2508 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
2509 head = &net->ipv6.fib_table_hash[h];
2510 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
2511 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
2512 __rt6_purge_dflt_routers(table);
2513 }
2514 }
2515
2516 rcu_read_unlock();
2517 }
2518
2519 static void rtmsg_to_fib6_config(struct net *net,
2520 struct in6_rtmsg *rtmsg,
2521 struct fib6_config *cfg)
2522 {
2523 memset(cfg, 0, sizeof(*cfg));
2524
2525 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
2526 : RT6_TABLE_MAIN;
2527 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2528 cfg->fc_metric = rtmsg->rtmsg_metric;
2529 cfg->fc_expires = rtmsg->rtmsg_info;
2530 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2531 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2532 cfg->fc_flags = rtmsg->rtmsg_flags;
2533
2534 cfg->fc_nlinfo.nl_net = net;
2535
2536 cfg->fc_dst = rtmsg->rtmsg_dst;
2537 cfg->fc_src = rtmsg->rtmsg_src;
2538 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2539 }
2540
2541 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2542 {
2543 struct fib6_config cfg;
2544 struct in6_rtmsg rtmsg;
2545 int err;
2546
2547 switch (cmd) {
2548 case SIOCADDRT: /* Add a route */
2549 case SIOCDELRT: /* Delete a route */
2550 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2551 return -EPERM;
2552 err = copy_from_user(&rtmsg, arg,
2553 sizeof(struct in6_rtmsg));
2554 if (err)
2555 return -EFAULT;
2556
2557 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2558
2559 rtnl_lock();
2560 switch (cmd) {
2561 case SIOCADDRT:
2562 err = ip6_route_add(&cfg);
2563 break;
2564 case SIOCDELRT:
2565 err = ip6_route_del(&cfg);
2566 break;
2567 default:
2568 err = -EINVAL;
2569 }
2570 rtnl_unlock();
2571
2572 return err;
2573 }
2574
2575 return -EINVAL;
2576 }
2577
2578 /*
2579 * Drop the packet on the floor
2580 */
2581
2582 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2583 {
2584 int type;
2585 struct dst_entry *dst = skb_dst(skb);
2586 switch (ipstats_mib_noroutes) {
2587 case IPSTATS_MIB_INNOROUTES:
2588 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2589 if (type == IPV6_ADDR_ANY) {
2590 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2591 IPSTATS_MIB_INADDRERRORS);
2592 break;
2593 }
2594 /* FALLTHROUGH */
2595 case IPSTATS_MIB_OUTNOROUTES:
2596 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2597 ipstats_mib_noroutes);
2598 break;
2599 }
2600 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2601 kfree_skb(skb);
2602 return 0;
2603 }
2604
2605 static int ip6_pkt_discard(struct sk_buff *skb)
2606 {
2607 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2608 }
2609
2610 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2611 {
2612 skb->dev = skb_dst(skb)->dev;
2613 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2614 }
2615
2616 static int ip6_pkt_prohibit(struct sk_buff *skb)
2617 {
2618 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2619 }
2620
2621 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2622 {
2623 skb->dev = skb_dst(skb)->dev;
2624 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2625 }
2626
2627 /*
2628 * Allocate a dst for local (unicast / anycast) address.
2629 */
2630
2631 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2632 const struct in6_addr *addr,
2633 bool anycast)
2634 {
2635 u32 tb_id;
2636 struct net *net = dev_net(idev->dev);
2637 struct net_device *dev = net->loopback_dev;
2638 struct rt6_info *rt;
2639
2640 /* use L3 Master device as loopback for host routes if device
2641 * is enslaved and address is not link local or multicast
2642 */
2643 if (!rt6_need_strict(addr))
2644 dev = l3mdev_master_dev_rcu(idev->dev) ? : dev;
2645
2646 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
2647 if (!rt)
2648 return ERR_PTR(-ENOMEM);
2649
2650 in6_dev_hold(idev);
2651
2652 rt->dst.flags |= DST_HOST;
2653 rt->dst.input = ip6_input;
2654 rt->dst.output = ip6_output;
2655 rt->rt6i_idev = idev;
2656
2657 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2658 if (anycast)
2659 rt->rt6i_flags |= RTF_ANYCAST;
2660 else
2661 rt->rt6i_flags |= RTF_LOCAL;
2662
2663 rt->rt6i_gateway = *addr;
2664 rt->rt6i_dst.addr = *addr;
2665 rt->rt6i_dst.plen = 128;
2666 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
2667 rt->rt6i_table = fib6_get_table(net, tb_id);
2668 rt->dst.flags |= DST_NOCACHE;
2669
2670 atomic_set(&rt->dst.__refcnt, 1);
2671
2672 return rt;
2673 }
2674
2675 /* remove deleted ip from prefsrc entries */
2676 struct arg_dev_net_ip {
2677 struct net_device *dev;
2678 struct net *net;
2679 struct in6_addr *addr;
2680 };
2681
2682 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2683 {
2684 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2685 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2686 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2687
2688 if (((void *)rt->dst.dev == dev || !dev) &&
2689 rt != net->ipv6.ip6_null_entry &&
2690 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2691 /* remove prefsrc entry */
2692 rt->rt6i_prefsrc.plen = 0;
2693 }
2694 return 0;
2695 }
2696
2697 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2698 {
2699 struct net *net = dev_net(ifp->idev->dev);
2700 struct arg_dev_net_ip adni = {
2701 .dev = ifp->idev->dev,
2702 .net = net,
2703 .addr = &ifp->addr,
2704 };
2705 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2706 }
2707
2708 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2709 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2710
2711 /* Remove routers and update dst entries when gateway turn into host. */
2712 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2713 {
2714 struct in6_addr *gateway = (struct in6_addr *)arg;
2715
2716 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2717 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2718 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2719 return -1;
2720 }
2721 return 0;
2722 }
2723
2724 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2725 {
2726 fib6_clean_all(net, fib6_clean_tohost, gateway);
2727 }
2728
2729 struct arg_dev_net {
2730 struct net_device *dev;
2731 struct net *net;
2732 };
2733
2734 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2735 {
2736 const struct arg_dev_net *adn = arg;
2737 const struct net_device *dev = adn->dev;
2738
2739 if ((rt->dst.dev == dev || !dev) &&
2740 rt != adn->net->ipv6.ip6_null_entry)
2741 return -1;
2742
2743 return 0;
2744 }
2745
2746 void rt6_ifdown(struct net *net, struct net_device *dev)
2747 {
2748 struct arg_dev_net adn = {
2749 .dev = dev,
2750 .net = net,
2751 };
2752
2753 fib6_clean_all(net, fib6_ifdown, &adn);
2754 icmp6_clean_all(fib6_ifdown, &adn);
2755 if (dev)
2756 rt6_uncached_list_flush_dev(net, dev);
2757 }
2758
2759 struct rt6_mtu_change_arg {
2760 struct net_device *dev;
2761 unsigned int mtu;
2762 };
2763
2764 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2765 {
2766 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2767 struct inet6_dev *idev;
2768
2769 /* In IPv6 pmtu discovery is not optional,
2770 so that RTAX_MTU lock cannot disable it.
2771 We still use this lock to block changes
2772 caused by addrconf/ndisc.
2773 */
2774
2775 idev = __in6_dev_get(arg->dev);
2776 if (!idev)
2777 return 0;
2778
2779 /* For administrative MTU increase, there is no way to discover
2780 IPv6 PMTU increase, so PMTU increase should be updated here.
2781 Since RFC 1981 doesn't include administrative MTU increase
2782 update PMTU increase is a MUST. (i.e. jumbo frame)
2783 */
2784 /*
2785 If new MTU is less than route PMTU, this new MTU will be the
2786 lowest MTU in the path, update the route PMTU to reflect PMTU
2787 decreases; if new MTU is greater than route PMTU, and the
2788 old MTU is the lowest MTU in the path, update the route PMTU
2789 to reflect the increase. In this case if the other nodes' MTU
2790 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2791 PMTU discovery.
2792 */
2793 if (rt->dst.dev == arg->dev &&
2794 dst_metric_raw(&rt->dst, RTAX_MTU) &&
2795 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2796 if (rt->rt6i_flags & RTF_CACHE) {
2797 /* For RTF_CACHE with rt6i_pmtu == 0
2798 * (i.e. a redirected route),
2799 * the metrics of its rt->dst.from has already
2800 * been updated.
2801 */
2802 if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2803 rt->rt6i_pmtu = arg->mtu;
2804 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2805 (dst_mtu(&rt->dst) < arg->mtu &&
2806 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2807 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2808 }
2809 }
2810 return 0;
2811 }
2812
2813 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2814 {
2815 struct rt6_mtu_change_arg arg = {
2816 .dev = dev,
2817 .mtu = mtu,
2818 };
2819
2820 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2821 }
2822
2823 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2824 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2825 [RTA_OIF] = { .type = NLA_U32 },
2826 [RTA_IIF] = { .type = NLA_U32 },
2827 [RTA_PRIORITY] = { .type = NLA_U32 },
2828 [RTA_METRICS] = { .type = NLA_NESTED },
2829 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2830 [RTA_PREF] = { .type = NLA_U8 },
2831 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2832 [RTA_ENCAP] = { .type = NLA_NESTED },
2833 [RTA_EXPIRES] = { .type = NLA_U32 },
2834 [RTA_UID] = { .type = NLA_U32 },
2835 };
2836
2837 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2838 struct fib6_config *cfg)
2839 {
2840 struct rtmsg *rtm;
2841 struct nlattr *tb[RTA_MAX+1];
2842 unsigned int pref;
2843 int err;
2844
2845 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2846 if (err < 0)
2847 goto errout;
2848
2849 err = -EINVAL;
2850 rtm = nlmsg_data(nlh);
2851 memset(cfg, 0, sizeof(*cfg));
2852
2853 cfg->fc_table = rtm->rtm_table;
2854 cfg->fc_dst_len = rtm->rtm_dst_len;
2855 cfg->fc_src_len = rtm->rtm_src_len;
2856 cfg->fc_flags = RTF_UP;
2857 cfg->fc_protocol = rtm->rtm_protocol;
2858 cfg->fc_type = rtm->rtm_type;
2859
2860 if (rtm->rtm_type == RTN_UNREACHABLE ||
2861 rtm->rtm_type == RTN_BLACKHOLE ||
2862 rtm->rtm_type == RTN_PROHIBIT ||
2863 rtm->rtm_type == RTN_THROW)
2864 cfg->fc_flags |= RTF_REJECT;
2865
2866 if (rtm->rtm_type == RTN_LOCAL)
2867 cfg->fc_flags |= RTF_LOCAL;
2868
2869 if (rtm->rtm_flags & RTM_F_CLONED)
2870 cfg->fc_flags |= RTF_CACHE;
2871
2872 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2873 cfg->fc_nlinfo.nlh = nlh;
2874 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2875
2876 if (tb[RTA_GATEWAY]) {
2877 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2878 cfg->fc_flags |= RTF_GATEWAY;
2879 }
2880
2881 if (tb[RTA_DST]) {
2882 int plen = (rtm->rtm_dst_len + 7) >> 3;
2883
2884 if (nla_len(tb[RTA_DST]) < plen)
2885 goto errout;
2886
2887 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2888 }
2889
2890 if (tb[RTA_SRC]) {
2891 int plen = (rtm->rtm_src_len + 7) >> 3;
2892
2893 if (nla_len(tb[RTA_SRC]) < plen)
2894 goto errout;
2895
2896 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2897 }
2898
2899 if (tb[RTA_PREFSRC])
2900 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2901
2902 if (tb[RTA_OIF])
2903 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2904
2905 if (tb[RTA_PRIORITY])
2906 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2907
2908 if (tb[RTA_METRICS]) {
2909 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2910 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2911 }
2912
2913 if (tb[RTA_TABLE])
2914 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2915
2916 if (tb[RTA_MULTIPATH]) {
2917 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2918 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2919
2920 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2921 cfg->fc_mp_len);
2922 if (err < 0)
2923 goto errout;
2924 }
2925
2926 if (tb[RTA_PREF]) {
2927 pref = nla_get_u8(tb[RTA_PREF]);
2928 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2929 pref != ICMPV6_ROUTER_PREF_HIGH)
2930 pref = ICMPV6_ROUTER_PREF_MEDIUM;
2931 cfg->fc_flags |= RTF_PREF(pref);
2932 }
2933
2934 if (tb[RTA_ENCAP])
2935 cfg->fc_encap = tb[RTA_ENCAP];
2936
2937 if (tb[RTA_ENCAP_TYPE]) {
2938 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2939
2940 err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
2941 if (err < 0)
2942 goto errout;
2943 }
2944
2945 if (tb[RTA_EXPIRES]) {
2946 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
2947
2948 if (addrconf_finite_timeout(timeout)) {
2949 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
2950 cfg->fc_flags |= RTF_EXPIRES;
2951 }
2952 }
2953
2954 err = 0;
2955 errout:
2956 return err;
2957 }
2958
2959 struct rt6_nh {
2960 struct rt6_info *rt6_info;
2961 struct fib6_config r_cfg;
2962 struct mx6_config mxc;
2963 struct list_head next;
2964 };
2965
2966 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
2967 {
2968 struct rt6_nh *nh;
2969
2970 list_for_each_entry(nh, rt6_nh_list, next) {
2971 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
2972 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
2973 nh->r_cfg.fc_ifindex);
2974 }
2975 }
2976
2977 static int ip6_route_info_append(struct list_head *rt6_nh_list,
2978 struct rt6_info *rt, struct fib6_config *r_cfg)
2979 {
2980 struct rt6_nh *nh;
2981 struct rt6_info *rtnh;
2982 int err = -EEXIST;
2983
2984 list_for_each_entry(nh, rt6_nh_list, next) {
2985 /* check if rt6_info already exists */
2986 rtnh = nh->rt6_info;
2987
2988 if (rtnh->dst.dev == rt->dst.dev &&
2989 rtnh->rt6i_idev == rt->rt6i_idev &&
2990 ipv6_addr_equal(&rtnh->rt6i_gateway,
2991 &rt->rt6i_gateway))
2992 return err;
2993 }
2994
2995 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
2996 if (!nh)
2997 return -ENOMEM;
2998 nh->rt6_info = rt;
2999 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3000 if (err) {
3001 kfree(nh);
3002 return err;
3003 }
3004 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3005 list_add_tail(&nh->next, rt6_nh_list);
3006
3007 return 0;
3008 }
3009
3010 static int ip6_route_multipath_add(struct fib6_config *cfg)
3011 {
3012 struct fib6_config r_cfg;
3013 struct rtnexthop *rtnh;
3014 struct rt6_info *rt;
3015 struct rt6_nh *err_nh;
3016 struct rt6_nh *nh, *nh_safe;
3017 int remaining;
3018 int attrlen;
3019 int err = 1;
3020 int nhn = 0;
3021 int replace = (cfg->fc_nlinfo.nlh &&
3022 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3023 LIST_HEAD(rt6_nh_list);
3024
3025 remaining = cfg->fc_mp_len;
3026 rtnh = (struct rtnexthop *)cfg->fc_mp;
3027
3028 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3029 * rt6_info structs per nexthop
3030 */
3031 while (rtnh_ok(rtnh, remaining)) {
3032 memcpy(&r_cfg, cfg, sizeof(*cfg));
3033 if (rtnh->rtnh_ifindex)
3034 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3035
3036 attrlen = rtnh_attrlen(rtnh);
3037 if (attrlen > 0) {
3038 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3039
3040 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3041 if (nla) {
3042 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3043 r_cfg.fc_flags |= RTF_GATEWAY;
3044 }
3045 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3046 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3047 if (nla)
3048 r_cfg.fc_encap_type = nla_get_u16(nla);
3049 }
3050
3051 rt = ip6_route_info_create(&r_cfg);
3052 if (IS_ERR(rt)) {
3053 err = PTR_ERR(rt);
3054 rt = NULL;
3055 goto cleanup;
3056 }
3057
3058 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3059 if (err) {
3060 dst_free(&rt->dst);
3061 goto cleanup;
3062 }
3063
3064 rtnh = rtnh_next(rtnh, &remaining);
3065 }
3066
3067 err_nh = NULL;
3068 list_for_each_entry(nh, &rt6_nh_list, next) {
3069 err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
3070 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3071 nh->rt6_info = NULL;
3072 if (err) {
3073 if (replace && nhn)
3074 ip6_print_replace_route_err(&rt6_nh_list);
3075 err_nh = nh;
3076 goto add_errout;
3077 }
3078
3079 /* Because each route is added like a single route we remove
3080 * these flags after the first nexthop: if there is a collision,
3081 * we have already failed to add the first nexthop:
3082 * fib6_add_rt2node() has rejected it; when replacing, old
3083 * nexthops have been replaced by first new, the rest should
3084 * be added to it.
3085 */
3086 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3087 NLM_F_REPLACE);
3088 nhn++;
3089 }
3090
3091 goto cleanup;
3092
3093 add_errout:
3094 /* Delete routes that were already added */
3095 list_for_each_entry(nh, &rt6_nh_list, next) {
3096 if (err_nh == nh)
3097 break;
3098 ip6_route_del(&nh->r_cfg);
3099 }
3100
3101 cleanup:
3102 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3103 if (nh->rt6_info)
3104 dst_free(&nh->rt6_info->dst);
3105 kfree(nh->mxc.mx);
3106 list_del(&nh->next);
3107 kfree(nh);
3108 }
3109
3110 return err;
3111 }
3112
3113 static int ip6_route_multipath_del(struct fib6_config *cfg)
3114 {
3115 struct fib6_config r_cfg;
3116 struct rtnexthop *rtnh;
3117 int remaining;
3118 int attrlen;
3119 int err = 1, last_err = 0;
3120
3121 remaining = cfg->fc_mp_len;
3122 rtnh = (struct rtnexthop *)cfg->fc_mp;
3123
3124 /* Parse a Multipath Entry */
3125 while (rtnh_ok(rtnh, remaining)) {
3126 memcpy(&r_cfg, cfg, sizeof(*cfg));
3127 if (rtnh->rtnh_ifindex)
3128 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3129
3130 attrlen = rtnh_attrlen(rtnh);
3131 if (attrlen > 0) {
3132 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3133
3134 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3135 if (nla) {
3136 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3137 r_cfg.fc_flags |= RTF_GATEWAY;
3138 }
3139 }
3140 err = ip6_route_del(&r_cfg);
3141 if (err)
3142 last_err = err;
3143
3144 rtnh = rtnh_next(rtnh, &remaining);
3145 }
3146
3147 return last_err;
3148 }
3149
3150 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
3151 {
3152 struct fib6_config cfg;
3153 int err;
3154
3155 err = rtm_to_fib6_config(skb, nlh, &cfg);
3156 if (err < 0)
3157 return err;
3158
3159 if (cfg.fc_mp)
3160 return ip6_route_multipath_del(&cfg);
3161 else
3162 return ip6_route_del(&cfg);
3163 }
3164
3165 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
3166 {
3167 struct fib6_config cfg;
3168 int err;
3169
3170 err = rtm_to_fib6_config(skb, nlh, &cfg);
3171 if (err < 0)
3172 return err;
3173
3174 if (cfg.fc_mp)
3175 return ip6_route_multipath_add(&cfg);
3176 else
3177 return ip6_route_add(&cfg);
3178 }
3179
3180 static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
3181 {
3182 return NLMSG_ALIGN(sizeof(struct rtmsg))
3183 + nla_total_size(16) /* RTA_SRC */
3184 + nla_total_size(16) /* RTA_DST */
3185 + nla_total_size(16) /* RTA_GATEWAY */
3186 + nla_total_size(16) /* RTA_PREFSRC */
3187 + nla_total_size(4) /* RTA_TABLE */
3188 + nla_total_size(4) /* RTA_IIF */
3189 + nla_total_size(4) /* RTA_OIF */
3190 + nla_total_size(4) /* RTA_PRIORITY */
3191 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3192 + nla_total_size(sizeof(struct rta_cacheinfo))
3193 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3194 + nla_total_size(1) /* RTA_PREF */
3195 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3196 }
3197
3198 static int rt6_fill_node(struct net *net,
3199 struct sk_buff *skb, struct rt6_info *rt,
3200 struct in6_addr *dst, struct in6_addr *src,
3201 int iif, int type, u32 portid, u32 seq,
3202 int prefix, int nowait, unsigned int flags)
3203 {
3204 u32 metrics[RTAX_MAX];
3205 struct rtmsg *rtm;
3206 struct nlmsghdr *nlh;
3207 long expires;
3208 u32 table;
3209
3210 if (prefix) { /* user wants prefix routes only */
3211 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
3212 /* success since this is not a prefix route */
3213 return 1;
3214 }
3215 }
3216
3217 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
3218 if (!nlh)
3219 return -EMSGSIZE;
3220
3221 rtm = nlmsg_data(nlh);
3222 rtm->rtm_family = AF_INET6;
3223 rtm->rtm_dst_len = rt->rt6i_dst.plen;
3224 rtm->rtm_src_len = rt->rt6i_src.plen;
3225 rtm->rtm_tos = 0;
3226 if (rt->rt6i_table)
3227 table = rt->rt6i_table->tb6_id;
3228 else
3229 table = RT6_TABLE_UNSPEC;
3230 rtm->rtm_table = table;
3231 if (nla_put_u32(skb, RTA_TABLE, table))
3232 goto nla_put_failure;
3233 if (rt->rt6i_flags & RTF_REJECT) {
3234 switch (rt->dst.error) {
3235 case -EINVAL:
3236 rtm->rtm_type = RTN_BLACKHOLE;
3237 break;
3238 case -EACCES:
3239 rtm->rtm_type = RTN_PROHIBIT;
3240 break;
3241 case -EAGAIN:
3242 rtm->rtm_type = RTN_THROW;
3243 break;
3244 default:
3245 rtm->rtm_type = RTN_UNREACHABLE;
3246 break;
3247 }
3248 }
3249 else if (rt->rt6i_flags & RTF_LOCAL)
3250 rtm->rtm_type = RTN_LOCAL;
3251 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3252 rtm->rtm_type = RTN_LOCAL;
3253 else
3254 rtm->rtm_type = RTN_UNICAST;
3255 rtm->rtm_flags = 0;
3256 if (!netif_carrier_ok(rt->dst.dev)) {
3257 rtm->rtm_flags |= RTNH_F_LINKDOWN;
3258 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3259 rtm->rtm_flags |= RTNH_F_DEAD;
3260 }
3261 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3262 rtm->rtm_protocol = rt->rt6i_protocol;
3263 if (rt->rt6i_flags & RTF_DYNAMIC)
3264 rtm->rtm_protocol = RTPROT_REDIRECT;
3265 else if (rt->rt6i_flags & RTF_ADDRCONF) {
3266 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
3267 rtm->rtm_protocol = RTPROT_RA;
3268 else
3269 rtm->rtm_protocol = RTPROT_KERNEL;
3270 }
3271
3272 if (rt->rt6i_flags & RTF_CACHE)
3273 rtm->rtm_flags |= RTM_F_CLONED;
3274
3275 if (dst) {
3276 if (nla_put_in6_addr(skb, RTA_DST, dst))
3277 goto nla_put_failure;
3278 rtm->rtm_dst_len = 128;
3279 } else if (rtm->rtm_dst_len)
3280 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
3281 goto nla_put_failure;
3282 #ifdef CONFIG_IPV6_SUBTREES
3283 if (src) {
3284 if (nla_put_in6_addr(skb, RTA_SRC, src))
3285 goto nla_put_failure;
3286 rtm->rtm_src_len = 128;
3287 } else if (rtm->rtm_src_len &&
3288 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
3289 goto nla_put_failure;
3290 #endif
3291 if (iif) {
3292 #ifdef CONFIG_IPV6_MROUTE
3293 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3294 int err = ip6mr_get_route(net, skb, rtm, nowait,
3295 portid);
3296
3297 if (err <= 0) {
3298 if (!nowait) {
3299 if (err == 0)
3300 return 0;
3301 goto nla_put_failure;
3302 } else {
3303 if (err == -EMSGSIZE)
3304 goto nla_put_failure;
3305 }
3306 }
3307 } else
3308 #endif
3309 if (nla_put_u32(skb, RTA_IIF, iif))
3310 goto nla_put_failure;
3311 } else if (dst) {
3312 struct in6_addr saddr_buf;
3313 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3314 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3315 goto nla_put_failure;
3316 }
3317
3318 if (rt->rt6i_prefsrc.plen) {
3319 struct in6_addr saddr_buf;
3320 saddr_buf = rt->rt6i_prefsrc.addr;
3321 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3322 goto nla_put_failure;
3323 }
3324
3325 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3326 if (rt->rt6i_pmtu)
3327 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3328 if (rtnetlink_put_metrics(skb, metrics) < 0)
3329 goto nla_put_failure;
3330
3331 if (rt->rt6i_flags & RTF_GATEWAY) {
3332 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3333 goto nla_put_failure;
3334 }
3335
3336 if (rt->dst.dev &&
3337 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3338 goto nla_put_failure;
3339 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3340 goto nla_put_failure;
3341
3342 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3343
3344 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3345 goto nla_put_failure;
3346
3347 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3348 goto nla_put_failure;
3349
3350 if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3351 goto nla_put_failure;
3352
3353 nlmsg_end(skb, nlh);
3354 return 0;
3355
3356 nla_put_failure:
3357 nlmsg_cancel(skb, nlh);
3358 return -EMSGSIZE;
3359 }
3360
3361 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3362 {
3363 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3364 int prefix;
3365
3366 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3367 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3368 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
3369 } else
3370 prefix = 0;
3371
3372 return rt6_fill_node(arg->net,
3373 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3374 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3375 prefix, 0, NLM_F_MULTI);
3376 }
3377
3378 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3379 {
3380 struct net *net = sock_net(in_skb->sk);
3381 struct nlattr *tb[RTA_MAX+1];
3382 struct rt6_info *rt;
3383 struct sk_buff *skb;
3384 struct rtmsg *rtm;
3385 struct flowi6 fl6;
3386 int err, iif = 0, oif = 0;
3387
3388 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
3389 if (err < 0)
3390 goto errout;
3391
3392 err = -EINVAL;
3393 memset(&fl6, 0, sizeof(fl6));
3394 rtm = nlmsg_data(nlh);
3395 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
3396
3397 if (tb[RTA_SRC]) {
3398 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3399 goto errout;
3400
3401 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3402 }
3403
3404 if (tb[RTA_DST]) {
3405 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3406 goto errout;
3407
3408 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3409 }
3410
3411 if (tb[RTA_IIF])
3412 iif = nla_get_u32(tb[RTA_IIF]);
3413
3414 if (tb[RTA_OIF])
3415 oif = nla_get_u32(tb[RTA_OIF]);
3416
3417 if (tb[RTA_MARK])
3418 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3419
3420 if (tb[RTA_UID])
3421 fl6.flowi6_uid = make_kuid(current_user_ns(),
3422 nla_get_u32(tb[RTA_UID]));
3423 else
3424 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
3425
3426 if (iif) {
3427 struct net_device *dev;
3428 int flags = 0;
3429
3430 dev = __dev_get_by_index(net, iif);
3431 if (!dev) {
3432 err = -ENODEV;
3433 goto errout;
3434 }
3435
3436 fl6.flowi6_iif = iif;
3437
3438 if (!ipv6_addr_any(&fl6.saddr))
3439 flags |= RT6_LOOKUP_F_HAS_SADDR;
3440
3441 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
3442 flags);
3443 } else {
3444 fl6.flowi6_oif = oif;
3445
3446 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
3447 }
3448
3449 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3450 if (!skb) {
3451 ip6_rt_put(rt);
3452 err = -ENOBUFS;
3453 goto errout;
3454 }
3455
3456 /* Reserve room for dummy headers, this skb can pass
3457 through good chunk of routing engine.
3458 */
3459 skb_reset_mac_header(skb);
3460 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
3461
3462 skb_dst_set(skb, &rt->dst);
3463
3464 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3465 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3466 nlh->nlmsg_seq, 0, 0, 0);
3467 if (err < 0) {
3468 kfree_skb(skb);
3469 goto errout;
3470 }
3471
3472 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3473 errout:
3474 return err;
3475 }
3476
3477 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3478 unsigned int nlm_flags)
3479 {
3480 struct sk_buff *skb;
3481 struct net *net = info->nl_net;
3482 u32 seq;
3483 int err;
3484
3485 err = -ENOBUFS;
3486 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3487
3488 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3489 if (!skb)
3490 goto errout;
3491
3492 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3493 event, info->portid, seq, 0, 0, nlm_flags);
3494 if (err < 0) {
3495 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3496 WARN_ON(err == -EMSGSIZE);
3497 kfree_skb(skb);
3498 goto errout;
3499 }
3500 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3501 info->nlh, gfp_any());
3502 return;
3503 errout:
3504 if (err < 0)
3505 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3506 }
3507
3508 static int ip6_route_dev_notify(struct notifier_block *this,
3509 unsigned long event, void *ptr)
3510 {
3511 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3512 struct net *net = dev_net(dev);
3513
3514 if (!(dev->flags & IFF_LOOPBACK))
3515 return NOTIFY_OK;
3516
3517 if (event == NETDEV_REGISTER) {
3518 net->ipv6.ip6_null_entry->dst.dev = dev;
3519 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3520 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3521 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3522 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3523 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3524 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3525 #endif
3526 } else if (event == NETDEV_UNREGISTER) {
3527 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
3528 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3529 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
3530 in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
3531 #endif
3532 }
3533
3534 return NOTIFY_OK;
3535 }
3536
3537 /*
3538 * /proc
3539 */
3540
3541 #ifdef CONFIG_PROC_FS
3542
3543 static const struct file_operations ipv6_route_proc_fops = {
3544 .owner = THIS_MODULE,
3545 .open = ipv6_route_open,
3546 .read = seq_read,
3547 .llseek = seq_lseek,
3548 .release = seq_release_net,
3549 };
3550
3551 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3552 {
3553 struct net *net = (struct net *)seq->private;
3554 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3555 net->ipv6.rt6_stats->fib_nodes,
3556 net->ipv6.rt6_stats->fib_route_nodes,
3557 net->ipv6.rt6_stats->fib_rt_alloc,
3558 net->ipv6.rt6_stats->fib_rt_entries,
3559 net->ipv6.rt6_stats->fib_rt_cache,
3560 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3561 net->ipv6.rt6_stats->fib_discarded_routes);
3562
3563 return 0;
3564 }
3565
3566 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3567 {
3568 return single_open_net(inode, file, rt6_stats_seq_show);
3569 }
3570
3571 static const struct file_operations rt6_stats_seq_fops = {
3572 .owner = THIS_MODULE,
3573 .open = rt6_stats_seq_open,
3574 .read = seq_read,
3575 .llseek = seq_lseek,
3576 .release = single_release_net,
3577 };
3578 #endif /* CONFIG_PROC_FS */
3579
3580 #ifdef CONFIG_SYSCTL
3581
3582 static
3583 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3584 void __user *buffer, size_t *lenp, loff_t *ppos)
3585 {
3586 struct net *net;
3587 int delay;
3588 if (!write)
3589 return -EINVAL;
3590
3591 net = (struct net *)ctl->extra1;
3592 delay = net->ipv6.sysctl.flush_delay;
3593 proc_dointvec(ctl, write, buffer, lenp, ppos);
3594 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3595 return 0;
3596 }
3597
3598 struct ctl_table ipv6_route_table_template[] = {
3599 {
3600 .procname = "flush",
3601 .data = &init_net.ipv6.sysctl.flush_delay,
3602 .maxlen = sizeof(int),
3603 .mode = 0200,
3604 .proc_handler = ipv6_sysctl_rtcache_flush
3605 },
3606 {
3607 .procname = "gc_thresh",
3608 .data = &ip6_dst_ops_template.gc_thresh,
3609 .maxlen = sizeof(int),
3610 .mode = 0644,
3611 .proc_handler = proc_dointvec,
3612 },
3613 {
3614 .procname = "max_size",
3615 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
3616 .maxlen = sizeof(int),
3617 .mode = 0644,
3618 .proc_handler = proc_dointvec,
3619 },
3620 {
3621 .procname = "gc_min_interval",
3622 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3623 .maxlen = sizeof(int),
3624 .mode = 0644,
3625 .proc_handler = proc_dointvec_jiffies,
3626 },
3627 {
3628 .procname = "gc_timeout",
3629 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3630 .maxlen = sizeof(int),
3631 .mode = 0644,
3632 .proc_handler = proc_dointvec_jiffies,
3633 },
3634 {
3635 .procname = "gc_interval",
3636 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3637 .maxlen = sizeof(int),
3638 .mode = 0644,
3639 .proc_handler = proc_dointvec_jiffies,
3640 },
3641 {
3642 .procname = "gc_elasticity",
3643 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3644 .maxlen = sizeof(int),
3645 .mode = 0644,
3646 .proc_handler = proc_dointvec,
3647 },
3648 {
3649 .procname = "mtu_expires",
3650 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3651 .maxlen = sizeof(int),
3652 .mode = 0644,
3653 .proc_handler = proc_dointvec_jiffies,
3654 },
3655 {
3656 .procname = "min_adv_mss",
3657 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3658 .maxlen = sizeof(int),
3659 .mode = 0644,
3660 .proc_handler = proc_dointvec,
3661 },
3662 {
3663 .procname = "gc_min_interval_ms",
3664 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3665 .maxlen = sizeof(int),
3666 .mode = 0644,
3667 .proc_handler = proc_dointvec_ms_jiffies,
3668 },
3669 { }
3670 };
3671
3672 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3673 {
3674 struct ctl_table *table;
3675
3676 table = kmemdup(ipv6_route_table_template,
3677 sizeof(ipv6_route_table_template),
3678 GFP_KERNEL);
3679
3680 if (table) {
3681 table[0].data = &net->ipv6.sysctl.flush_delay;
3682 table[0].extra1 = net;
3683 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3684 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3685 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3686 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3687 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3688 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3689 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3690 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3691 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3692
3693 /* Don't export sysctls to unprivileged users */
3694 if (net->user_ns != &init_user_ns)
3695 table[0].procname = NULL;
3696 }
3697
3698 return table;
3699 }
3700 #endif
3701
3702 static int __net_init ip6_route_net_init(struct net *net)
3703 {
3704 int ret = -ENOMEM;
3705
3706 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3707 sizeof(net->ipv6.ip6_dst_ops));
3708
3709 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3710 goto out_ip6_dst_ops;
3711
3712 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3713 sizeof(*net->ipv6.ip6_null_entry),
3714 GFP_KERNEL);
3715 if (!net->ipv6.ip6_null_entry)
3716 goto out_ip6_dst_entries;
3717 net->ipv6.ip6_null_entry->dst.path =
3718 (struct dst_entry *)net->ipv6.ip6_null_entry;
3719 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3720 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3721 ip6_template_metrics, true);
3722
3723 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3724 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3725 sizeof(*net->ipv6.ip6_prohibit_entry),
3726 GFP_KERNEL);
3727 if (!net->ipv6.ip6_prohibit_entry)
3728 goto out_ip6_null_entry;
3729 net->ipv6.ip6_prohibit_entry->dst.path =
3730 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3731 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3732 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3733 ip6_template_metrics, true);
3734
3735 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3736 sizeof(*net->ipv6.ip6_blk_hole_entry),
3737 GFP_KERNEL);
3738 if (!net->ipv6.ip6_blk_hole_entry)
3739 goto out_ip6_prohibit_entry;
3740 net->ipv6.ip6_blk_hole_entry->dst.path =
3741 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3742 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3743 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3744 ip6_template_metrics, true);
3745 #endif
3746
3747 net->ipv6.sysctl.flush_delay = 0;
3748 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3749 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3750 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3751 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3752 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3753 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3754 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3755
3756 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3757
3758 ret = 0;
3759 out:
3760 return ret;
3761
3762 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3763 out_ip6_prohibit_entry:
3764 kfree(net->ipv6.ip6_prohibit_entry);
3765 out_ip6_null_entry:
3766 kfree(net->ipv6.ip6_null_entry);
3767 #endif
3768 out_ip6_dst_entries:
3769 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3770 out_ip6_dst_ops:
3771 goto out;
3772 }
3773
3774 static void __net_exit ip6_route_net_exit(struct net *net)
3775 {
3776 kfree(net->ipv6.ip6_null_entry);
3777 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3778 kfree(net->ipv6.ip6_prohibit_entry);
3779 kfree(net->ipv6.ip6_blk_hole_entry);
3780 #endif
3781 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3782 }
3783
3784 static int __net_init ip6_route_net_init_late(struct net *net)
3785 {
3786 #ifdef CONFIG_PROC_FS
3787 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3788 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3789 #endif
3790 return 0;
3791 }
3792
3793 static void __net_exit ip6_route_net_exit_late(struct net *net)
3794 {
3795 #ifdef CONFIG_PROC_FS
3796 remove_proc_entry("ipv6_route", net->proc_net);
3797 remove_proc_entry("rt6_stats", net->proc_net);
3798 #endif
3799 }
3800
3801 static struct pernet_operations ip6_route_net_ops = {
3802 .init = ip6_route_net_init,
3803 .exit = ip6_route_net_exit,
3804 };
3805
3806 static int __net_init ipv6_inetpeer_init(struct net *net)
3807 {
3808 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3809
3810 if (!bp)
3811 return -ENOMEM;
3812 inet_peer_base_init(bp);
3813 net->ipv6.peers = bp;
3814 return 0;
3815 }
3816
3817 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3818 {
3819 struct inet_peer_base *bp = net->ipv6.peers;
3820
3821 net->ipv6.peers = NULL;
3822 inetpeer_invalidate_tree(bp);
3823 kfree(bp);
3824 }
3825
3826 static struct pernet_operations ipv6_inetpeer_ops = {
3827 .init = ipv6_inetpeer_init,
3828 .exit = ipv6_inetpeer_exit,
3829 };
3830
3831 static struct pernet_operations ip6_route_net_late_ops = {
3832 .init = ip6_route_net_init_late,
3833 .exit = ip6_route_net_exit_late,
3834 };
3835
3836 static struct notifier_block ip6_route_dev_notifier = {
3837 .notifier_call = ip6_route_dev_notify,
3838 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
3839 };
3840
3841 void __init ip6_route_init_special_entries(void)
3842 {
3843 /* Registering of the loopback is done before this portion of code,
3844 * the loopback reference in rt6_info will not be taken, do it
3845 * manually for init_net */
3846 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3847 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3848 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3849 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3850 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3851 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3852 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3853 #endif
3854 }
3855
3856 int __init ip6_route_init(void)
3857 {
3858 int ret;
3859 int cpu;
3860
3861 ret = -ENOMEM;
3862 ip6_dst_ops_template.kmem_cachep =
3863 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3864 SLAB_HWCACHE_ALIGN, NULL);
3865 if (!ip6_dst_ops_template.kmem_cachep)
3866 goto out;
3867
3868 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3869 if (ret)
3870 goto out_kmem_cache;
3871
3872 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3873 if (ret)
3874 goto out_dst_entries;
3875
3876 ret = register_pernet_subsys(&ip6_route_net_ops);
3877 if (ret)
3878 goto out_register_inetpeer;
3879
3880 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3881
3882 ret = fib6_init();
3883 if (ret)
3884 goto out_register_subsys;
3885
3886 ret = xfrm6_init();
3887 if (ret)
3888 goto out_fib6_init;
3889
3890 ret = fib6_rules_init();
3891 if (ret)
3892 goto xfrm6_init;
3893
3894 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3895 if (ret)
3896 goto fib6_rules_init;
3897
3898 ret = -ENOBUFS;
3899 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3900 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3901 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3902 goto out_register_late_subsys;
3903
3904 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3905 if (ret)
3906 goto out_register_late_subsys;
3907
3908 for_each_possible_cpu(cpu) {
3909 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
3910
3911 INIT_LIST_HEAD(&ul->head);
3912 spin_lock_init(&ul->lock);
3913 }
3914
3915 out:
3916 return ret;
3917
3918 out_register_late_subsys:
3919 unregister_pernet_subsys(&ip6_route_net_late_ops);
3920 fib6_rules_init:
3921 fib6_rules_cleanup();
3922 xfrm6_init:
3923 xfrm6_fini();
3924 out_fib6_init:
3925 fib6_gc_cleanup();
3926 out_register_subsys:
3927 unregister_pernet_subsys(&ip6_route_net_ops);
3928 out_register_inetpeer:
3929 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3930 out_dst_entries:
3931 dst_entries_destroy(&ip6_dst_blackhole_ops);
3932 out_kmem_cache:
3933 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3934 goto out;
3935 }
3936
3937 void ip6_route_cleanup(void)
3938 {
3939 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3940 unregister_pernet_subsys(&ip6_route_net_late_ops);
3941 fib6_rules_cleanup();
3942 xfrm6_fini();
3943 fib6_gc_cleanup();
3944 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3945 unregister_pernet_subsys(&ip6_route_net_ops);
3946 dst_entries_destroy(&ip6_dst_blackhole_ops);
3947 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3948 }