]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/route.c
[NET] NETNS: Omit sock->sk_net without CONFIG_NET_NS.
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 /* Changes:
17 *
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
25 * Ville Nuorvala
26 * Fixed routing subtrees.
27 */
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/times.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
36 #include <linux/route.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/nsproxy.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/xfrm.h>
55 #include <net/netevent.h>
56 #include <net/netlink.h>
57
58 #include <asm/uaccess.h>
59
60 #ifdef CONFIG_SYSCTL
61 #include <linux/sysctl.h>
62 #endif
63
64 /* Set to 3 to get tracing. */
65 #define RT6_DEBUG 2
66
67 #if RT6_DEBUG >= 3
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
70 #else
71 #define RDBG(x)
72 #define RT6_TRACE(x...) do { ; } while (0)
73 #endif
74
75 #define CLONE_OFFLINK_ROUTE 0
76
77 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
78 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void ip6_dst_destroy(struct dst_entry *);
81 static void ip6_dst_ifdown(struct dst_entry *,
82 struct net_device *dev, int how);
83 static int ip6_dst_gc(struct dst_ops *ops);
84
85 static int ip6_pkt_discard(struct sk_buff *skb);
86 static int ip6_pkt_discard_out(struct sk_buff *skb);
87 static void ip6_link_failure(struct sk_buff *skb);
88 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89
90 #ifdef CONFIG_IPV6_ROUTE_INFO
91 static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex,
94 unsigned pref);
95 static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex);
98 #endif
99
100 static struct dst_ops ip6_dst_ops_template = {
101 .family = AF_INET6,
102 .protocol = __constant_htons(ETH_P_IPV6),
103 .gc = ip6_dst_gc,
104 .gc_thresh = 1024,
105 .check = ip6_dst_check,
106 .destroy = ip6_dst_destroy,
107 .ifdown = ip6_dst_ifdown,
108 .negative_advice = ip6_negative_advice,
109 .link_failure = ip6_link_failure,
110 .update_pmtu = ip6_rt_update_pmtu,
111 .local_out = ip6_local_out,
112 .entry_size = sizeof(struct rt6_info),
113 .entries = ATOMIC_INIT(0),
114 };
115
116 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
117 {
118 }
119
120 static struct dst_ops ip6_dst_blackhole_ops = {
121 .family = AF_INET6,
122 .protocol = __constant_htons(ETH_P_IPV6),
123 .destroy = ip6_dst_destroy,
124 .check = ip6_dst_check,
125 .update_pmtu = ip6_rt_blackhole_update_pmtu,
126 .entry_size = sizeof(struct rt6_info),
127 .entries = ATOMIC_INIT(0),
128 };
129
130 static struct rt6_info ip6_null_entry_template = {
131 .u = {
132 .dst = {
133 .__refcnt = ATOMIC_INIT(1),
134 .__use = 1,
135 .obsolete = -1,
136 .error = -ENETUNREACH,
137 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
138 .input = ip6_pkt_discard,
139 .output = ip6_pkt_discard_out,
140 }
141 },
142 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
143 .rt6i_metric = ~(u32) 0,
144 .rt6i_ref = ATOMIC_INIT(1),
145 };
146
147 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
148
149 static int ip6_pkt_prohibit(struct sk_buff *skb);
150 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
151
152 struct rt6_info ip6_prohibit_entry_template = {
153 .u = {
154 .dst = {
155 .__refcnt = ATOMIC_INIT(1),
156 .__use = 1,
157 .obsolete = -1,
158 .error = -EACCES,
159 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
160 .input = ip6_pkt_prohibit,
161 .output = ip6_pkt_prohibit_out,
162 }
163 },
164 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
165 .rt6i_metric = ~(u32) 0,
166 .rt6i_ref = ATOMIC_INIT(1),
167 };
168
169 static struct rt6_info ip6_blk_hole_entry_template = {
170 .u = {
171 .dst = {
172 .__refcnt = ATOMIC_INIT(1),
173 .__use = 1,
174 .obsolete = -1,
175 .error = -EINVAL,
176 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
177 .input = dst_discard,
178 .output = dst_discard,
179 }
180 },
181 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
182 .rt6i_metric = ~(u32) 0,
183 .rt6i_ref = ATOMIC_INIT(1),
184 };
185
186 #endif
187
188 /* allocate dst with ip6_dst_ops */
189 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
190 {
191 return (struct rt6_info *)dst_alloc(ops);
192 }
193
194 static void ip6_dst_destroy(struct dst_entry *dst)
195 {
196 struct rt6_info *rt = (struct rt6_info *)dst;
197 struct inet6_dev *idev = rt->rt6i_idev;
198
199 if (idev != NULL) {
200 rt->rt6i_idev = NULL;
201 in6_dev_put(idev);
202 }
203 }
204
205 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
206 int how)
207 {
208 struct rt6_info *rt = (struct rt6_info *)dst;
209 struct inet6_dev *idev = rt->rt6i_idev;
210 struct net_device *loopback_dev =
211 dev_net(dev)->loopback_dev;
212
213 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
214 struct inet6_dev *loopback_idev =
215 in6_dev_get(loopback_dev);
216 if (loopback_idev != NULL) {
217 rt->rt6i_idev = loopback_idev;
218 in6_dev_put(idev);
219 }
220 }
221 }
222
223 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
224 {
225 return (rt->rt6i_flags & RTF_EXPIRES &&
226 time_after(jiffies, rt->rt6i_expires));
227 }
228
229 static inline int rt6_need_strict(struct in6_addr *daddr)
230 {
231 return (ipv6_addr_type(daddr) &
232 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
233 }
234
235 /*
236 * Route lookup. Any table->tb6_lock is implied.
237 */
238
239 static inline struct rt6_info *rt6_device_match(struct net *net,
240 struct rt6_info *rt,
241 int oif,
242 int strict)
243 {
244 struct rt6_info *local = NULL;
245 struct rt6_info *sprt;
246
247 if (oif) {
248 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
249 struct net_device *dev = sprt->rt6i_dev;
250 if (dev->ifindex == oif)
251 return sprt;
252 if (dev->flags & IFF_LOOPBACK) {
253 if (sprt->rt6i_idev == NULL ||
254 sprt->rt6i_idev->dev->ifindex != oif) {
255 if (strict && oif)
256 continue;
257 if (local && (!oif ||
258 local->rt6i_idev->dev->ifindex == oif))
259 continue;
260 }
261 local = sprt;
262 }
263 }
264
265 if (local)
266 return local;
267
268 if (strict)
269 return net->ipv6.ip6_null_entry;
270 }
271 return rt;
272 }
273
274 #ifdef CONFIG_IPV6_ROUTER_PREF
275 static void rt6_probe(struct rt6_info *rt)
276 {
277 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
278 /*
279 * Okay, this does not seem to be appropriate
280 * for now, however, we need to check if it
281 * is really so; aka Router Reachability Probing.
282 *
283 * Router Reachability Probe MUST be rate-limited
284 * to no more than one per minute.
285 */
286 if (!neigh || (neigh->nud_state & NUD_VALID))
287 return;
288 read_lock_bh(&neigh->lock);
289 if (!(neigh->nud_state & NUD_VALID) &&
290 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
291 struct in6_addr mcaddr;
292 struct in6_addr *target;
293
294 neigh->updated = jiffies;
295 read_unlock_bh(&neigh->lock);
296
297 target = (struct in6_addr *)&neigh->primary_key;
298 addrconf_addr_solict_mult(target, &mcaddr);
299 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
300 } else
301 read_unlock_bh(&neigh->lock);
302 }
303 #else
304 static inline void rt6_probe(struct rt6_info *rt)
305 {
306 return;
307 }
308 #endif
309
310 /*
311 * Default Router Selection (RFC 2461 6.3.6)
312 */
313 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
314 {
315 struct net_device *dev = rt->rt6i_dev;
316 if (!oif || dev->ifindex == oif)
317 return 2;
318 if ((dev->flags & IFF_LOOPBACK) &&
319 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
320 return 1;
321 return 0;
322 }
323
324 static inline int rt6_check_neigh(struct rt6_info *rt)
325 {
326 struct neighbour *neigh = rt->rt6i_nexthop;
327 int m;
328 if (rt->rt6i_flags & RTF_NONEXTHOP ||
329 !(rt->rt6i_flags & RTF_GATEWAY))
330 m = 1;
331 else if (neigh) {
332 read_lock_bh(&neigh->lock);
333 if (neigh->nud_state & NUD_VALID)
334 m = 2;
335 #ifdef CONFIG_IPV6_ROUTER_PREF
336 else if (neigh->nud_state & NUD_FAILED)
337 m = 0;
338 #endif
339 else
340 m = 1;
341 read_unlock_bh(&neigh->lock);
342 } else
343 m = 0;
344 return m;
345 }
346
347 static int rt6_score_route(struct rt6_info *rt, int oif,
348 int strict)
349 {
350 int m, n;
351
352 m = rt6_check_dev(rt, oif);
353 if (!m && (strict & RT6_LOOKUP_F_IFACE))
354 return -1;
355 #ifdef CONFIG_IPV6_ROUTER_PREF
356 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
357 #endif
358 n = rt6_check_neigh(rt);
359 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
360 return -1;
361 return m;
362 }
363
364 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
365 int *mpri, struct rt6_info *match)
366 {
367 int m;
368
369 if (rt6_check_expired(rt))
370 goto out;
371
372 m = rt6_score_route(rt, oif, strict);
373 if (m < 0)
374 goto out;
375
376 if (m > *mpri) {
377 if (strict & RT6_LOOKUP_F_REACHABLE)
378 rt6_probe(match);
379 *mpri = m;
380 match = rt;
381 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
382 rt6_probe(rt);
383 }
384
385 out:
386 return match;
387 }
388
389 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
390 struct rt6_info *rr_head,
391 u32 metric, int oif, int strict)
392 {
393 struct rt6_info *rt, *match;
394 int mpri = -1;
395
396 match = NULL;
397 for (rt = rr_head; rt && rt->rt6i_metric == metric;
398 rt = rt->u.dst.rt6_next)
399 match = find_match(rt, oif, strict, &mpri, match);
400 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
401 rt = rt->u.dst.rt6_next)
402 match = find_match(rt, oif, strict, &mpri, match);
403
404 return match;
405 }
406
407 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
408 {
409 struct rt6_info *match, *rt0;
410 struct net *net;
411
412 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
413 __func__, fn->leaf, oif);
414
415 rt0 = fn->rr_ptr;
416 if (!rt0)
417 fn->rr_ptr = rt0 = fn->leaf;
418
419 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
420
421 if (!match &&
422 (strict & RT6_LOOKUP_F_REACHABLE)) {
423 struct rt6_info *next = rt0->u.dst.rt6_next;
424
425 /* no entries matched; do round-robin */
426 if (!next || next->rt6i_metric != rt0->rt6i_metric)
427 next = fn->leaf;
428
429 if (next != rt0)
430 fn->rr_ptr = next;
431 }
432
433 RT6_TRACE("%s() => %p\n",
434 __func__, match);
435
436 net = dev_net(rt0->rt6i_dev);
437 return (match ? match : net->ipv6.ip6_null_entry);
438 }
439
440 #ifdef CONFIG_IPV6_ROUTE_INFO
441 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
442 struct in6_addr *gwaddr)
443 {
444 struct net *net = dev_net(dev);
445 struct route_info *rinfo = (struct route_info *) opt;
446 struct in6_addr prefix_buf, *prefix;
447 unsigned int pref;
448 u32 lifetime;
449 struct rt6_info *rt;
450
451 if (len < sizeof(struct route_info)) {
452 return -EINVAL;
453 }
454
455 /* Sanity check for prefix_len and length */
456 if (rinfo->length > 3) {
457 return -EINVAL;
458 } else if (rinfo->prefix_len > 128) {
459 return -EINVAL;
460 } else if (rinfo->prefix_len > 64) {
461 if (rinfo->length < 2) {
462 return -EINVAL;
463 }
464 } else if (rinfo->prefix_len > 0) {
465 if (rinfo->length < 1) {
466 return -EINVAL;
467 }
468 }
469
470 pref = rinfo->route_pref;
471 if (pref == ICMPV6_ROUTER_PREF_INVALID)
472 pref = ICMPV6_ROUTER_PREF_MEDIUM;
473
474 lifetime = ntohl(rinfo->lifetime);
475 if (lifetime == 0xffffffff) {
476 /* infinity */
477 } else if (lifetime > 0x7fffffff/HZ) {
478 /* Avoid arithmetic overflow */
479 lifetime = 0x7fffffff/HZ - 1;
480 }
481
482 if (rinfo->length == 3)
483 prefix = (struct in6_addr *)rinfo->prefix;
484 else {
485 /* this function is safe */
486 ipv6_addr_prefix(&prefix_buf,
487 (struct in6_addr *)rinfo->prefix,
488 rinfo->prefix_len);
489 prefix = &prefix_buf;
490 }
491
492 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
493 dev->ifindex);
494
495 if (rt && !lifetime) {
496 ip6_del_rt(rt);
497 rt = NULL;
498 }
499
500 if (!rt && lifetime)
501 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
502 pref);
503 else if (rt)
504 rt->rt6i_flags = RTF_ROUTEINFO |
505 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
506
507 if (rt) {
508 if (lifetime == 0xffffffff) {
509 rt->rt6i_flags &= ~RTF_EXPIRES;
510 } else {
511 rt->rt6i_expires = jiffies + HZ * lifetime;
512 rt->rt6i_flags |= RTF_EXPIRES;
513 }
514 dst_release(&rt->u.dst);
515 }
516 return 0;
517 }
518 #endif
519
520 #define BACKTRACK(__net, saddr) \
521 do { \
522 if (rt == __net->ipv6.ip6_null_entry) { \
523 struct fib6_node *pn; \
524 while (1) { \
525 if (fn->fn_flags & RTN_TL_ROOT) \
526 goto out; \
527 pn = fn->parent; \
528 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
529 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
530 else \
531 fn = pn; \
532 if (fn->fn_flags & RTN_RTINFO) \
533 goto restart; \
534 } \
535 } \
536 } while(0)
537
538 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
539 struct fib6_table *table,
540 struct flowi *fl, int flags)
541 {
542 struct fib6_node *fn;
543 struct rt6_info *rt;
544
545 read_lock_bh(&table->tb6_lock);
546 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
547 restart:
548 rt = fn->leaf;
549 rt = rt6_device_match(net, rt, fl->oif, flags);
550 BACKTRACK(net, &fl->fl6_src);
551 out:
552 dst_use(&rt->u.dst, jiffies);
553 read_unlock_bh(&table->tb6_lock);
554 return rt;
555
556 }
557
558 struct rt6_info *rt6_lookup(struct net *net, struct in6_addr *daddr,
559 struct in6_addr *saddr, int oif, int strict)
560 {
561 struct flowi fl = {
562 .oif = oif,
563 .nl_u = {
564 .ip6_u = {
565 .daddr = *daddr,
566 },
567 },
568 };
569 struct dst_entry *dst;
570 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
571
572 if (saddr) {
573 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
574 flags |= RT6_LOOKUP_F_HAS_SADDR;
575 }
576
577 dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
578 if (dst->error == 0)
579 return (struct rt6_info *) dst;
580
581 dst_release(dst);
582
583 return NULL;
584 }
585
586 EXPORT_SYMBOL(rt6_lookup);
587
588 /* ip6_ins_rt is called with FREE table->tb6_lock.
589 It takes new route entry, the addition fails by any reason the
590 route is freed. In any case, if caller does not hold it, it may
591 be destroyed.
592 */
593
594 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
595 {
596 int err;
597 struct fib6_table *table;
598
599 table = rt->rt6i_table;
600 write_lock_bh(&table->tb6_lock);
601 err = fib6_add(&table->tb6_root, rt, info);
602 write_unlock_bh(&table->tb6_lock);
603
604 return err;
605 }
606
607 int ip6_ins_rt(struct rt6_info *rt)
608 {
609 struct nl_info info = {
610 .nl_net = dev_net(rt->rt6i_dev),
611 };
612 return __ip6_ins_rt(rt, &info);
613 }
614
615 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
616 struct in6_addr *saddr)
617 {
618 struct rt6_info *rt;
619
620 /*
621 * Clone the route.
622 */
623
624 rt = ip6_rt_copy(ort);
625
626 if (rt) {
627 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
628 if (rt->rt6i_dst.plen != 128 &&
629 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
630 rt->rt6i_flags |= RTF_ANYCAST;
631 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
632 }
633
634 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
635 rt->rt6i_dst.plen = 128;
636 rt->rt6i_flags |= RTF_CACHE;
637 rt->u.dst.flags |= DST_HOST;
638
639 #ifdef CONFIG_IPV6_SUBTREES
640 if (rt->rt6i_src.plen && saddr) {
641 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
642 rt->rt6i_src.plen = 128;
643 }
644 #endif
645
646 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
647
648 }
649
650 return rt;
651 }
652
653 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
654 {
655 struct rt6_info *rt = ip6_rt_copy(ort);
656 if (rt) {
657 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
658 rt->rt6i_dst.plen = 128;
659 rt->rt6i_flags |= RTF_CACHE;
660 rt->u.dst.flags |= DST_HOST;
661 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
662 }
663 return rt;
664 }
665
666 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
667 struct flowi *fl, int flags)
668 {
669 struct fib6_node *fn;
670 struct rt6_info *rt, *nrt;
671 int strict = 0;
672 int attempts = 3;
673 int err;
674 int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
675
676 strict |= flags & RT6_LOOKUP_F_IFACE;
677
678 relookup:
679 read_lock_bh(&table->tb6_lock);
680
681 restart_2:
682 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
683
684 restart:
685 rt = rt6_select(fn, oif, strict | reachable);
686
687 BACKTRACK(net, &fl->fl6_src);
688 if (rt == net->ipv6.ip6_null_entry ||
689 rt->rt6i_flags & RTF_CACHE)
690 goto out;
691
692 dst_hold(&rt->u.dst);
693 read_unlock_bh(&table->tb6_lock);
694
695 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
696 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
697 else {
698 #if CLONE_OFFLINK_ROUTE
699 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
700 #else
701 goto out2;
702 #endif
703 }
704
705 dst_release(&rt->u.dst);
706 rt = nrt ? : net->ipv6.ip6_null_entry;
707
708 dst_hold(&rt->u.dst);
709 if (nrt) {
710 err = ip6_ins_rt(nrt);
711 if (!err)
712 goto out2;
713 }
714
715 if (--attempts <= 0)
716 goto out2;
717
718 /*
719 * Race condition! In the gap, when table->tb6_lock was
720 * released someone could insert this route. Relookup.
721 */
722 dst_release(&rt->u.dst);
723 goto relookup;
724
725 out:
726 if (reachable) {
727 reachable = 0;
728 goto restart_2;
729 }
730 dst_hold(&rt->u.dst);
731 read_unlock_bh(&table->tb6_lock);
732 out2:
733 rt->u.dst.lastuse = jiffies;
734 rt->u.dst.__use++;
735
736 return rt;
737 }
738
739 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
740 struct flowi *fl, int flags)
741 {
742 return ip6_pol_route(net, table, fl->iif, fl, flags);
743 }
744
745 void ip6_route_input(struct sk_buff *skb)
746 {
747 struct ipv6hdr *iph = ipv6_hdr(skb);
748 struct net *net = dev_net(skb->dev);
749 int flags = RT6_LOOKUP_F_HAS_SADDR;
750 struct flowi fl = {
751 .iif = skb->dev->ifindex,
752 .nl_u = {
753 .ip6_u = {
754 .daddr = iph->daddr,
755 .saddr = iph->saddr,
756 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
757 },
758 },
759 .mark = skb->mark,
760 .proto = iph->nexthdr,
761 };
762
763 if (rt6_need_strict(&iph->daddr))
764 flags |= RT6_LOOKUP_F_IFACE;
765
766 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
767 }
768
769 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
770 struct flowi *fl, int flags)
771 {
772 return ip6_pol_route(net, table, fl->oif, fl, flags);
773 }
774
775 struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
776 struct flowi *fl)
777 {
778 int flags = 0;
779
780 if (rt6_need_strict(&fl->fl6_dst))
781 flags |= RT6_LOOKUP_F_IFACE;
782
783 if (!ipv6_addr_any(&fl->fl6_src))
784 flags |= RT6_LOOKUP_F_HAS_SADDR;
785 else if (sk) {
786 unsigned int prefs = inet6_sk(sk)->srcprefs;
787 if (prefs & IPV6_PREFER_SRC_TMP)
788 flags |= RT6_LOOKUP_F_SRCPREF_TMP;
789 if (prefs & IPV6_PREFER_SRC_PUBLIC)
790 flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC;
791 if (prefs & IPV6_PREFER_SRC_COA)
792 flags |= RT6_LOOKUP_F_SRCPREF_COA;
793 }
794
795 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
796 }
797
798 EXPORT_SYMBOL(ip6_route_output);
799
800 int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
801 {
802 struct rt6_info *ort = (struct rt6_info *) *dstp;
803 struct rt6_info *rt = (struct rt6_info *)
804 dst_alloc(&ip6_dst_blackhole_ops);
805 struct dst_entry *new = NULL;
806
807 if (rt) {
808 new = &rt->u.dst;
809
810 atomic_set(&new->__refcnt, 1);
811 new->__use = 1;
812 new->input = dst_discard;
813 new->output = dst_discard;
814
815 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
816 new->dev = ort->u.dst.dev;
817 if (new->dev)
818 dev_hold(new->dev);
819 rt->rt6i_idev = ort->rt6i_idev;
820 if (rt->rt6i_idev)
821 in6_dev_hold(rt->rt6i_idev);
822 rt->rt6i_expires = 0;
823
824 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
825 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
826 rt->rt6i_metric = 0;
827
828 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
829 #ifdef CONFIG_IPV6_SUBTREES
830 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
831 #endif
832
833 dst_free(new);
834 }
835
836 dst_release(*dstp);
837 *dstp = new;
838 return (new ? 0 : -ENOMEM);
839 }
840 EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
841
842 /*
843 * Destination cache support functions
844 */
845
846 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
847 {
848 struct rt6_info *rt;
849
850 rt = (struct rt6_info *) dst;
851
852 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
853 return dst;
854
855 return NULL;
856 }
857
858 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
859 {
860 struct rt6_info *rt = (struct rt6_info *) dst;
861
862 if (rt) {
863 if (rt->rt6i_flags & RTF_CACHE)
864 ip6_del_rt(rt);
865 else
866 dst_release(dst);
867 }
868 return NULL;
869 }
870
871 static void ip6_link_failure(struct sk_buff *skb)
872 {
873 struct rt6_info *rt;
874
875 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
876
877 rt = (struct rt6_info *) skb->dst;
878 if (rt) {
879 if (rt->rt6i_flags&RTF_CACHE) {
880 dst_set_expires(&rt->u.dst, 0);
881 rt->rt6i_flags |= RTF_EXPIRES;
882 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
883 rt->rt6i_node->fn_sernum = -1;
884 }
885 }
886
887 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
888 {
889 struct rt6_info *rt6 = (struct rt6_info*)dst;
890
891 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
892 rt6->rt6i_flags |= RTF_MODIFIED;
893 if (mtu < IPV6_MIN_MTU) {
894 mtu = IPV6_MIN_MTU;
895 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
896 }
897 dst->metrics[RTAX_MTU-1] = mtu;
898 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
899 }
900 }
901
902 static int ipv6_get_mtu(struct net_device *dev);
903
904 static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu)
905 {
906 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
907
908 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
909 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
910
911 /*
912 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
913 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
914 * IPV6_MAXPLEN is also valid and means: "any MSS,
915 * rely only on pmtu discovery"
916 */
917 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
918 mtu = IPV6_MAXPLEN;
919 return mtu;
920 }
921
922 static struct dst_entry *icmp6_dst_gc_list;
923 static DEFINE_SPINLOCK(icmp6_dst_lock);
924
925 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
926 struct neighbour *neigh,
927 struct in6_addr *addr)
928 {
929 struct rt6_info *rt;
930 struct inet6_dev *idev = in6_dev_get(dev);
931 struct net *net = dev_net(dev);
932
933 if (unlikely(idev == NULL))
934 return NULL;
935
936 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
937 if (unlikely(rt == NULL)) {
938 in6_dev_put(idev);
939 goto out;
940 }
941
942 dev_hold(dev);
943 if (neigh)
944 neigh_hold(neigh);
945 else
946 neigh = ndisc_get_neigh(dev, addr);
947
948 rt->rt6i_dev = dev;
949 rt->rt6i_idev = idev;
950 rt->rt6i_nexthop = neigh;
951 atomic_set(&rt->u.dst.__refcnt, 1);
952 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
953 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
954 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
955 rt->u.dst.output = ip6_output;
956
957 #if 0 /* there's no chance to use these for ndisc */
958 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
959 ? DST_HOST
960 : 0;
961 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
962 rt->rt6i_dst.plen = 128;
963 #endif
964
965 spin_lock_bh(&icmp6_dst_lock);
966 rt->u.dst.next = icmp6_dst_gc_list;
967 icmp6_dst_gc_list = &rt->u.dst;
968 spin_unlock_bh(&icmp6_dst_lock);
969
970 fib6_force_start_gc(net);
971
972 out:
973 return &rt->u.dst;
974 }
975
976 int icmp6_dst_gc(int *more)
977 {
978 struct dst_entry *dst, *next, **pprev;
979 int freed;
980
981 next = NULL;
982 freed = 0;
983
984 spin_lock_bh(&icmp6_dst_lock);
985 pprev = &icmp6_dst_gc_list;
986
987 while ((dst = *pprev) != NULL) {
988 if (!atomic_read(&dst->__refcnt)) {
989 *pprev = dst->next;
990 dst_free(dst);
991 freed++;
992 } else {
993 pprev = &dst->next;
994 (*more)++;
995 }
996 }
997
998 spin_unlock_bh(&icmp6_dst_lock);
999
1000 return freed;
1001 }
1002
1003 static int ip6_dst_gc(struct dst_ops *ops)
1004 {
1005 unsigned long now = jiffies;
1006 struct net *net = ops->dst_net;
1007 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1008 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1009 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1010 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1011 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1012
1013 if (time_after(rt_last_gc + rt_min_interval, now) &&
1014 atomic_read(&ops->entries) <= rt_max_size)
1015 goto out;
1016
1017 net->ipv6.ip6_rt_gc_expire++;
1018 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1019 net->ipv6.ip6_rt_last_gc = now;
1020 if (atomic_read(&ops->entries) < ops->gc_thresh)
1021 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1022 out:
1023 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1024 return (atomic_read(&ops->entries) > rt_max_size);
1025 }
1026
1027 /* Clean host part of a prefix. Not necessary in radix tree,
1028 but results in cleaner routing tables.
1029
1030 Remove it only when all the things will work!
1031 */
1032
1033 static int ipv6_get_mtu(struct net_device *dev)
1034 {
1035 int mtu = IPV6_MIN_MTU;
1036 struct inet6_dev *idev;
1037
1038 idev = in6_dev_get(dev);
1039 if (idev) {
1040 mtu = idev->cnf.mtu6;
1041 in6_dev_put(idev);
1042 }
1043 return mtu;
1044 }
1045
1046 int ip6_dst_hoplimit(struct dst_entry *dst)
1047 {
1048 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
1049 if (hoplimit < 0) {
1050 struct net_device *dev = dst->dev;
1051 struct inet6_dev *idev = in6_dev_get(dev);
1052 if (idev) {
1053 hoplimit = idev->cnf.hop_limit;
1054 in6_dev_put(idev);
1055 } else
1056 hoplimit = ipv6_devconf.hop_limit;
1057 }
1058 return hoplimit;
1059 }
1060
1061 /*
1062 *
1063 */
1064
1065 int ip6_route_add(struct fib6_config *cfg)
1066 {
1067 int err;
1068 struct net *net = cfg->fc_nlinfo.nl_net;
1069 struct rt6_info *rt = NULL;
1070 struct net_device *dev = NULL;
1071 struct inet6_dev *idev = NULL;
1072 struct fib6_table *table;
1073 int addr_type;
1074
1075 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1076 return -EINVAL;
1077 #ifndef CONFIG_IPV6_SUBTREES
1078 if (cfg->fc_src_len)
1079 return -EINVAL;
1080 #endif
1081 if (cfg->fc_ifindex) {
1082 err = -ENODEV;
1083 dev = dev_get_by_index(net, cfg->fc_ifindex);
1084 if (!dev)
1085 goto out;
1086 idev = in6_dev_get(dev);
1087 if (!idev)
1088 goto out;
1089 }
1090
1091 if (cfg->fc_metric == 0)
1092 cfg->fc_metric = IP6_RT_PRIO_USER;
1093
1094 table = fib6_new_table(net, cfg->fc_table);
1095 if (table == NULL) {
1096 err = -ENOBUFS;
1097 goto out;
1098 }
1099
1100 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1101
1102 if (rt == NULL) {
1103 err = -ENOMEM;
1104 goto out;
1105 }
1106
1107 rt->u.dst.obsolete = -1;
1108 rt->rt6i_expires = jiffies + clock_t_to_jiffies(cfg->fc_expires);
1109
1110 if (cfg->fc_protocol == RTPROT_UNSPEC)
1111 cfg->fc_protocol = RTPROT_BOOT;
1112 rt->rt6i_protocol = cfg->fc_protocol;
1113
1114 addr_type = ipv6_addr_type(&cfg->fc_dst);
1115
1116 if (addr_type & IPV6_ADDR_MULTICAST)
1117 rt->u.dst.input = ip6_mc_input;
1118 else
1119 rt->u.dst.input = ip6_forward;
1120
1121 rt->u.dst.output = ip6_output;
1122
1123 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1124 rt->rt6i_dst.plen = cfg->fc_dst_len;
1125 if (rt->rt6i_dst.plen == 128)
1126 rt->u.dst.flags = DST_HOST;
1127
1128 #ifdef CONFIG_IPV6_SUBTREES
1129 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1130 rt->rt6i_src.plen = cfg->fc_src_len;
1131 #endif
1132
1133 rt->rt6i_metric = cfg->fc_metric;
1134
1135 /* We cannot add true routes via loopback here,
1136 they would result in kernel looping; promote them to reject routes
1137 */
1138 if ((cfg->fc_flags & RTF_REJECT) ||
1139 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1140 /* hold loopback dev/idev if we haven't done so. */
1141 if (dev != net->loopback_dev) {
1142 if (dev) {
1143 dev_put(dev);
1144 in6_dev_put(idev);
1145 }
1146 dev = net->loopback_dev;
1147 dev_hold(dev);
1148 idev = in6_dev_get(dev);
1149 if (!idev) {
1150 err = -ENODEV;
1151 goto out;
1152 }
1153 }
1154 rt->u.dst.output = ip6_pkt_discard_out;
1155 rt->u.dst.input = ip6_pkt_discard;
1156 rt->u.dst.error = -ENETUNREACH;
1157 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1158 goto install_route;
1159 }
1160
1161 if (cfg->fc_flags & RTF_GATEWAY) {
1162 struct in6_addr *gw_addr;
1163 int gwa_type;
1164
1165 gw_addr = &cfg->fc_gateway;
1166 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1167 gwa_type = ipv6_addr_type(gw_addr);
1168
1169 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1170 struct rt6_info *grt;
1171
1172 /* IPv6 strictly inhibits using not link-local
1173 addresses as nexthop address.
1174 Otherwise, router will not able to send redirects.
1175 It is very good, but in some (rare!) circumstances
1176 (SIT, PtP, NBMA NOARP links) it is handy to allow
1177 some exceptions. --ANK
1178 */
1179 err = -EINVAL;
1180 if (!(gwa_type&IPV6_ADDR_UNICAST))
1181 goto out;
1182
1183 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1184
1185 err = -EHOSTUNREACH;
1186 if (grt == NULL)
1187 goto out;
1188 if (dev) {
1189 if (dev != grt->rt6i_dev) {
1190 dst_release(&grt->u.dst);
1191 goto out;
1192 }
1193 } else {
1194 dev = grt->rt6i_dev;
1195 idev = grt->rt6i_idev;
1196 dev_hold(dev);
1197 in6_dev_hold(grt->rt6i_idev);
1198 }
1199 if (!(grt->rt6i_flags&RTF_GATEWAY))
1200 err = 0;
1201 dst_release(&grt->u.dst);
1202
1203 if (err)
1204 goto out;
1205 }
1206 err = -EINVAL;
1207 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1208 goto out;
1209 }
1210
1211 err = -ENODEV;
1212 if (dev == NULL)
1213 goto out;
1214
1215 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1216 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1217 if (IS_ERR(rt->rt6i_nexthop)) {
1218 err = PTR_ERR(rt->rt6i_nexthop);
1219 rt->rt6i_nexthop = NULL;
1220 goto out;
1221 }
1222 }
1223
1224 rt->rt6i_flags = cfg->fc_flags;
1225
1226 install_route:
1227 if (cfg->fc_mx) {
1228 struct nlattr *nla;
1229 int remaining;
1230
1231 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1232 int type = nla_type(nla);
1233
1234 if (type) {
1235 if (type > RTAX_MAX) {
1236 err = -EINVAL;
1237 goto out;
1238 }
1239
1240 rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
1241 }
1242 }
1243 }
1244
1245 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1246 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1247 if (!rt->u.dst.metrics[RTAX_MTU-1])
1248 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1249 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1250 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1251 rt->u.dst.dev = dev;
1252 rt->rt6i_idev = idev;
1253 rt->rt6i_table = table;
1254
1255 cfg->fc_nlinfo.nl_net = dev_net(dev);
1256
1257 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1258
1259 out:
1260 if (dev)
1261 dev_put(dev);
1262 if (idev)
1263 in6_dev_put(idev);
1264 if (rt)
1265 dst_free(&rt->u.dst);
1266 return err;
1267 }
1268
1269 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1270 {
1271 int err;
1272 struct fib6_table *table;
1273 struct net *net = dev_net(rt->rt6i_dev);
1274
1275 if (rt == net->ipv6.ip6_null_entry)
1276 return -ENOENT;
1277
1278 table = rt->rt6i_table;
1279 write_lock_bh(&table->tb6_lock);
1280
1281 err = fib6_del(rt, info);
1282 dst_release(&rt->u.dst);
1283
1284 write_unlock_bh(&table->tb6_lock);
1285
1286 return err;
1287 }
1288
1289 int ip6_del_rt(struct rt6_info *rt)
1290 {
1291 struct nl_info info = {
1292 .nl_net = dev_net(rt->rt6i_dev),
1293 };
1294 return __ip6_del_rt(rt, &info);
1295 }
1296
1297 static int ip6_route_del(struct fib6_config *cfg)
1298 {
1299 struct fib6_table *table;
1300 struct fib6_node *fn;
1301 struct rt6_info *rt;
1302 int err = -ESRCH;
1303
1304 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1305 if (table == NULL)
1306 return err;
1307
1308 read_lock_bh(&table->tb6_lock);
1309
1310 fn = fib6_locate(&table->tb6_root,
1311 &cfg->fc_dst, cfg->fc_dst_len,
1312 &cfg->fc_src, cfg->fc_src_len);
1313
1314 if (fn) {
1315 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1316 if (cfg->fc_ifindex &&
1317 (rt->rt6i_dev == NULL ||
1318 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1319 continue;
1320 if (cfg->fc_flags & RTF_GATEWAY &&
1321 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1322 continue;
1323 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1324 continue;
1325 dst_hold(&rt->u.dst);
1326 read_unlock_bh(&table->tb6_lock);
1327
1328 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1329 }
1330 }
1331 read_unlock_bh(&table->tb6_lock);
1332
1333 return err;
1334 }
1335
1336 /*
1337 * Handle redirects
1338 */
1339 struct ip6rd_flowi {
1340 struct flowi fl;
1341 struct in6_addr gateway;
1342 };
1343
1344 static struct rt6_info *__ip6_route_redirect(struct net *net,
1345 struct fib6_table *table,
1346 struct flowi *fl,
1347 int flags)
1348 {
1349 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1350 struct rt6_info *rt;
1351 struct fib6_node *fn;
1352
1353 /*
1354 * Get the "current" route for this destination and
1355 * check if the redirect has come from approriate router.
1356 *
1357 * RFC 2461 specifies that redirects should only be
1358 * accepted if they come from the nexthop to the target.
1359 * Due to the way the routes are chosen, this notion
1360 * is a bit fuzzy and one might need to check all possible
1361 * routes.
1362 */
1363
1364 read_lock_bh(&table->tb6_lock);
1365 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1366 restart:
1367 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1368 /*
1369 * Current route is on-link; redirect is always invalid.
1370 *
1371 * Seems, previous statement is not true. It could
1372 * be node, which looks for us as on-link (f.e. proxy ndisc)
1373 * But then router serving it might decide, that we should
1374 * know truth 8)8) --ANK (980726).
1375 */
1376 if (rt6_check_expired(rt))
1377 continue;
1378 if (!(rt->rt6i_flags & RTF_GATEWAY))
1379 continue;
1380 if (fl->oif != rt->rt6i_dev->ifindex)
1381 continue;
1382 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1383 continue;
1384 break;
1385 }
1386
1387 if (!rt)
1388 rt = net->ipv6.ip6_null_entry;
1389 BACKTRACK(net, &fl->fl6_src);
1390 out:
1391 dst_hold(&rt->u.dst);
1392
1393 read_unlock_bh(&table->tb6_lock);
1394
1395 return rt;
1396 };
1397
1398 static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1399 struct in6_addr *src,
1400 struct in6_addr *gateway,
1401 struct net_device *dev)
1402 {
1403 int flags = RT6_LOOKUP_F_HAS_SADDR;
1404 struct net *net = dev_net(dev);
1405 struct ip6rd_flowi rdfl = {
1406 .fl = {
1407 .oif = dev->ifindex,
1408 .nl_u = {
1409 .ip6_u = {
1410 .daddr = *dest,
1411 .saddr = *src,
1412 },
1413 },
1414 },
1415 .gateway = *gateway,
1416 };
1417
1418 if (rt6_need_strict(dest))
1419 flags |= RT6_LOOKUP_F_IFACE;
1420
1421 return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
1422 flags, __ip6_route_redirect);
1423 }
1424
1425 void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1426 struct in6_addr *saddr,
1427 struct neighbour *neigh, u8 *lladdr, int on_link)
1428 {
1429 struct rt6_info *rt, *nrt = NULL;
1430 struct netevent_redirect netevent;
1431 struct net *net = dev_net(neigh->dev);
1432
1433 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1434
1435 if (rt == net->ipv6.ip6_null_entry) {
1436 if (net_ratelimit())
1437 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1438 "for redirect target\n");
1439 goto out;
1440 }
1441
1442 /*
1443 * We have finally decided to accept it.
1444 */
1445
1446 neigh_update(neigh, lladdr, NUD_STALE,
1447 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1448 NEIGH_UPDATE_F_OVERRIDE|
1449 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1450 NEIGH_UPDATE_F_ISROUTER))
1451 );
1452
1453 /*
1454 * Redirect received -> path was valid.
1455 * Look, redirects are sent only in response to data packets,
1456 * so that this nexthop apparently is reachable. --ANK
1457 */
1458 dst_confirm(&rt->u.dst);
1459
1460 /* Duplicate redirect: silently ignore. */
1461 if (neigh == rt->u.dst.neighbour)
1462 goto out;
1463
1464 nrt = ip6_rt_copy(rt);
1465 if (nrt == NULL)
1466 goto out;
1467
1468 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1469 if (on_link)
1470 nrt->rt6i_flags &= ~RTF_GATEWAY;
1471
1472 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1473 nrt->rt6i_dst.plen = 128;
1474 nrt->u.dst.flags |= DST_HOST;
1475
1476 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1477 nrt->rt6i_nexthop = neigh_clone(neigh);
1478 /* Reset pmtu, it may be better */
1479 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1480 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
1481 dst_mtu(&nrt->u.dst));
1482
1483 if (ip6_ins_rt(nrt))
1484 goto out;
1485
1486 netevent.old = &rt->u.dst;
1487 netevent.new = &nrt->u.dst;
1488 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1489
1490 if (rt->rt6i_flags&RTF_CACHE) {
1491 ip6_del_rt(rt);
1492 return;
1493 }
1494
1495 out:
1496 dst_release(&rt->u.dst);
1497 return;
1498 }
1499
1500 /*
1501 * Handle ICMP "packet too big" messages
1502 * i.e. Path MTU discovery
1503 */
1504
1505 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1506 struct net_device *dev, u32 pmtu)
1507 {
1508 struct rt6_info *rt, *nrt;
1509 struct net *net = dev_net(dev);
1510 int allfrag = 0;
1511
1512 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
1513 if (rt == NULL)
1514 return;
1515
1516 if (pmtu >= dst_mtu(&rt->u.dst))
1517 goto out;
1518
1519 if (pmtu < IPV6_MIN_MTU) {
1520 /*
1521 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1522 * MTU (1280) and a fragment header should always be included
1523 * after a node receiving Too Big message reporting PMTU is
1524 * less than the IPv6 Minimum Link MTU.
1525 */
1526 pmtu = IPV6_MIN_MTU;
1527 allfrag = 1;
1528 }
1529
1530 /* New mtu received -> path was valid.
1531 They are sent only in response to data packets,
1532 so that this nexthop apparently is reachable. --ANK
1533 */
1534 dst_confirm(&rt->u.dst);
1535
1536 /* Host route. If it is static, it would be better
1537 not to override it, but add new one, so that
1538 when cache entry will expire old pmtu
1539 would return automatically.
1540 */
1541 if (rt->rt6i_flags & RTF_CACHE) {
1542 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1543 if (allfrag)
1544 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1545 dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1546 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1547 goto out;
1548 }
1549
1550 /* Network route.
1551 Two cases are possible:
1552 1. It is connected route. Action: COW
1553 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1554 */
1555 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1556 nrt = rt6_alloc_cow(rt, daddr, saddr);
1557 else
1558 nrt = rt6_alloc_clone(rt, daddr);
1559
1560 if (nrt) {
1561 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1562 if (allfrag)
1563 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1564
1565 /* According to RFC 1981, detecting PMTU increase shouldn't be
1566 * happened within 5 mins, the recommended timer is 10 mins.
1567 * Here this route expiration time is set to ip6_rt_mtu_expires
1568 * which is 10 mins. After 10 mins the decreased pmtu is expired
1569 * and detecting PMTU increase will be automatically happened.
1570 */
1571 dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1572 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1573
1574 ip6_ins_rt(nrt);
1575 }
1576 out:
1577 dst_release(&rt->u.dst);
1578 }
1579
1580 /*
1581 * Misc support functions
1582 */
1583
1584 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1585 {
1586 struct net *net = dev_net(ort->rt6i_dev);
1587 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1588
1589 if (rt) {
1590 rt->u.dst.input = ort->u.dst.input;
1591 rt->u.dst.output = ort->u.dst.output;
1592
1593 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1594 rt->u.dst.error = ort->u.dst.error;
1595 rt->u.dst.dev = ort->u.dst.dev;
1596 if (rt->u.dst.dev)
1597 dev_hold(rt->u.dst.dev);
1598 rt->rt6i_idev = ort->rt6i_idev;
1599 if (rt->rt6i_idev)
1600 in6_dev_hold(rt->rt6i_idev);
1601 rt->u.dst.lastuse = jiffies;
1602 rt->rt6i_expires = 0;
1603
1604 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1605 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1606 rt->rt6i_metric = 0;
1607
1608 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1609 #ifdef CONFIG_IPV6_SUBTREES
1610 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1611 #endif
1612 rt->rt6i_table = ort->rt6i_table;
1613 }
1614 return rt;
1615 }
1616
1617 #ifdef CONFIG_IPV6_ROUTE_INFO
1618 static struct rt6_info *rt6_get_route_info(struct net *net,
1619 struct in6_addr *prefix, int prefixlen,
1620 struct in6_addr *gwaddr, int ifindex)
1621 {
1622 struct fib6_node *fn;
1623 struct rt6_info *rt = NULL;
1624 struct fib6_table *table;
1625
1626 table = fib6_get_table(net, RT6_TABLE_INFO);
1627 if (table == NULL)
1628 return NULL;
1629
1630 write_lock_bh(&table->tb6_lock);
1631 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1632 if (!fn)
1633 goto out;
1634
1635 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1636 if (rt->rt6i_dev->ifindex != ifindex)
1637 continue;
1638 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1639 continue;
1640 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1641 continue;
1642 dst_hold(&rt->u.dst);
1643 break;
1644 }
1645 out:
1646 write_unlock_bh(&table->tb6_lock);
1647 return rt;
1648 }
1649
1650 static struct rt6_info *rt6_add_route_info(struct net *net,
1651 struct in6_addr *prefix, int prefixlen,
1652 struct in6_addr *gwaddr, int ifindex,
1653 unsigned pref)
1654 {
1655 struct fib6_config cfg = {
1656 .fc_table = RT6_TABLE_INFO,
1657 .fc_metric = IP6_RT_PRIO_USER,
1658 .fc_ifindex = ifindex,
1659 .fc_dst_len = prefixlen,
1660 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1661 RTF_UP | RTF_PREF(pref),
1662 .fc_nlinfo.pid = 0,
1663 .fc_nlinfo.nlh = NULL,
1664 .fc_nlinfo.nl_net = net,
1665 };
1666
1667 ipv6_addr_copy(&cfg.fc_dst, prefix);
1668 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1669
1670 /* We should treat it as a default route if prefix length is 0. */
1671 if (!prefixlen)
1672 cfg.fc_flags |= RTF_DEFAULT;
1673
1674 ip6_route_add(&cfg);
1675
1676 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1677 }
1678 #endif
1679
1680 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1681 {
1682 struct rt6_info *rt;
1683 struct fib6_table *table;
1684
1685 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1686 if (table == NULL)
1687 return NULL;
1688
1689 write_lock_bh(&table->tb6_lock);
1690 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
1691 if (dev == rt->rt6i_dev &&
1692 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1693 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1694 break;
1695 }
1696 if (rt)
1697 dst_hold(&rt->u.dst);
1698 write_unlock_bh(&table->tb6_lock);
1699 return rt;
1700 }
1701
1702 EXPORT_SYMBOL(rt6_get_dflt_router);
1703
1704 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1705 struct net_device *dev,
1706 unsigned int pref)
1707 {
1708 struct fib6_config cfg = {
1709 .fc_table = RT6_TABLE_DFLT,
1710 .fc_metric = IP6_RT_PRIO_USER,
1711 .fc_ifindex = dev->ifindex,
1712 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1713 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1714 .fc_nlinfo.pid = 0,
1715 .fc_nlinfo.nlh = NULL,
1716 .fc_nlinfo.nl_net = dev_net(dev),
1717 };
1718
1719 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1720
1721 ip6_route_add(&cfg);
1722
1723 return rt6_get_dflt_router(gwaddr, dev);
1724 }
1725
1726 void rt6_purge_dflt_routers(struct net *net)
1727 {
1728 struct rt6_info *rt;
1729 struct fib6_table *table;
1730
1731 /* NOTE: Keep consistent with rt6_get_dflt_router */
1732 table = fib6_get_table(net, RT6_TABLE_DFLT);
1733 if (table == NULL)
1734 return;
1735
1736 restart:
1737 read_lock_bh(&table->tb6_lock);
1738 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
1739 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1740 dst_hold(&rt->u.dst);
1741 read_unlock_bh(&table->tb6_lock);
1742 ip6_del_rt(rt);
1743 goto restart;
1744 }
1745 }
1746 read_unlock_bh(&table->tb6_lock);
1747 }
1748
1749 static void rtmsg_to_fib6_config(struct net *net,
1750 struct in6_rtmsg *rtmsg,
1751 struct fib6_config *cfg)
1752 {
1753 memset(cfg, 0, sizeof(*cfg));
1754
1755 cfg->fc_table = RT6_TABLE_MAIN;
1756 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1757 cfg->fc_metric = rtmsg->rtmsg_metric;
1758 cfg->fc_expires = rtmsg->rtmsg_info;
1759 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1760 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1761 cfg->fc_flags = rtmsg->rtmsg_flags;
1762
1763 cfg->fc_nlinfo.nl_net = net;
1764
1765 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1766 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1767 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1768 }
1769
1770 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1771 {
1772 struct fib6_config cfg;
1773 struct in6_rtmsg rtmsg;
1774 int err;
1775
1776 switch(cmd) {
1777 case SIOCADDRT: /* Add a route */
1778 case SIOCDELRT: /* Delete a route */
1779 if (!capable(CAP_NET_ADMIN))
1780 return -EPERM;
1781 err = copy_from_user(&rtmsg, arg,
1782 sizeof(struct in6_rtmsg));
1783 if (err)
1784 return -EFAULT;
1785
1786 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1787
1788 rtnl_lock();
1789 switch (cmd) {
1790 case SIOCADDRT:
1791 err = ip6_route_add(&cfg);
1792 break;
1793 case SIOCDELRT:
1794 err = ip6_route_del(&cfg);
1795 break;
1796 default:
1797 err = -EINVAL;
1798 }
1799 rtnl_unlock();
1800
1801 return err;
1802 }
1803
1804 return -EINVAL;
1805 }
1806
1807 /*
1808 * Drop the packet on the floor
1809 */
1810
1811 static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes)
1812 {
1813 int type;
1814 switch (ipstats_mib_noroutes) {
1815 case IPSTATS_MIB_INNOROUTES:
1816 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1817 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) {
1818 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS);
1819 break;
1820 }
1821 /* FALLTHROUGH */
1822 case IPSTATS_MIB_OUTNOROUTES:
1823 IP6_INC_STATS(ip6_dst_idev(skb->dst), ipstats_mib_noroutes);
1824 break;
1825 }
1826 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
1827 kfree_skb(skb);
1828 return 0;
1829 }
1830
1831 static int ip6_pkt_discard(struct sk_buff *skb)
1832 {
1833 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1834 }
1835
1836 static int ip6_pkt_discard_out(struct sk_buff *skb)
1837 {
1838 skb->dev = skb->dst->dev;
1839 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1840 }
1841
1842 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1843
1844 static int ip6_pkt_prohibit(struct sk_buff *skb)
1845 {
1846 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1847 }
1848
1849 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1850 {
1851 skb->dev = skb->dst->dev;
1852 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1853 }
1854
1855 #endif
1856
1857 /*
1858 * Allocate a dst for local (unicast / anycast) address.
1859 */
1860
1861 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1862 const struct in6_addr *addr,
1863 int anycast)
1864 {
1865 struct net *net = dev_net(idev->dev);
1866 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1867
1868 if (rt == NULL)
1869 return ERR_PTR(-ENOMEM);
1870
1871 dev_hold(net->loopback_dev);
1872 in6_dev_hold(idev);
1873
1874 rt->u.dst.flags = DST_HOST;
1875 rt->u.dst.input = ip6_input;
1876 rt->u.dst.output = ip6_output;
1877 rt->rt6i_dev = net->loopback_dev;
1878 rt->rt6i_idev = idev;
1879 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1880 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1881 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1882 rt->u.dst.obsolete = -1;
1883
1884 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1885 if (anycast)
1886 rt->rt6i_flags |= RTF_ANYCAST;
1887 else
1888 rt->rt6i_flags |= RTF_LOCAL;
1889 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1890 if (rt->rt6i_nexthop == NULL) {
1891 dst_free(&rt->u.dst);
1892 return ERR_PTR(-ENOMEM);
1893 }
1894
1895 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1896 rt->rt6i_dst.plen = 128;
1897 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1898
1899 atomic_set(&rt->u.dst.__refcnt, 1);
1900
1901 return rt;
1902 }
1903
1904 struct arg_dev_net {
1905 struct net_device *dev;
1906 struct net *net;
1907 };
1908
1909 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1910 {
1911 struct net_device *dev = ((struct arg_dev_net *)arg)->dev;
1912 struct net *net = ((struct arg_dev_net *)arg)->net;
1913
1914 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
1915 rt != net->ipv6.ip6_null_entry) {
1916 RT6_TRACE("deleted by ifdown %p\n", rt);
1917 return -1;
1918 }
1919 return 0;
1920 }
1921
1922 void rt6_ifdown(struct net *net, struct net_device *dev)
1923 {
1924 struct arg_dev_net adn = {
1925 .dev = dev,
1926 .net = net,
1927 };
1928
1929 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1930 }
1931
1932 struct rt6_mtu_change_arg
1933 {
1934 struct net_device *dev;
1935 unsigned mtu;
1936 };
1937
1938 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1939 {
1940 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1941 struct inet6_dev *idev;
1942 struct net *net = dev_net(arg->dev);
1943
1944 /* In IPv6 pmtu discovery is not optional,
1945 so that RTAX_MTU lock cannot disable it.
1946 We still use this lock to block changes
1947 caused by addrconf/ndisc.
1948 */
1949
1950 idev = __in6_dev_get(arg->dev);
1951 if (idev == NULL)
1952 return 0;
1953
1954 /* For administrative MTU increase, there is no way to discover
1955 IPv6 PMTU increase, so PMTU increase should be updated here.
1956 Since RFC 1981 doesn't include administrative MTU increase
1957 update PMTU increase is a MUST. (i.e. jumbo frame)
1958 */
1959 /*
1960 If new MTU is less than route PMTU, this new MTU will be the
1961 lowest MTU in the path, update the route PMTU to reflect PMTU
1962 decreases; if new MTU is greater than route PMTU, and the
1963 old MTU is the lowest MTU in the path, update the route PMTU
1964 to reflect the increase. In this case if the other nodes' MTU
1965 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1966 PMTU discouvery.
1967 */
1968 if (rt->rt6i_dev == arg->dev &&
1969 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1970 (dst_mtu(&rt->u.dst) >= arg->mtu ||
1971 (dst_mtu(&rt->u.dst) < arg->mtu &&
1972 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
1973 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1974 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
1975 }
1976 return 0;
1977 }
1978
1979 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1980 {
1981 struct rt6_mtu_change_arg arg = {
1982 .dev = dev,
1983 .mtu = mtu,
1984 };
1985
1986 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
1987 }
1988
1989 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
1990 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
1991 [RTA_OIF] = { .type = NLA_U32 },
1992 [RTA_IIF] = { .type = NLA_U32 },
1993 [RTA_PRIORITY] = { .type = NLA_U32 },
1994 [RTA_METRICS] = { .type = NLA_NESTED },
1995 };
1996
1997 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1998 struct fib6_config *cfg)
1999 {
2000 struct rtmsg *rtm;
2001 struct nlattr *tb[RTA_MAX+1];
2002 int err;
2003
2004 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2005 if (err < 0)
2006 goto errout;
2007
2008 err = -EINVAL;
2009 rtm = nlmsg_data(nlh);
2010 memset(cfg, 0, sizeof(*cfg));
2011
2012 cfg->fc_table = rtm->rtm_table;
2013 cfg->fc_dst_len = rtm->rtm_dst_len;
2014 cfg->fc_src_len = rtm->rtm_src_len;
2015 cfg->fc_flags = RTF_UP;
2016 cfg->fc_protocol = rtm->rtm_protocol;
2017
2018 if (rtm->rtm_type == RTN_UNREACHABLE)
2019 cfg->fc_flags |= RTF_REJECT;
2020
2021 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2022 cfg->fc_nlinfo.nlh = nlh;
2023 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2024
2025 if (tb[RTA_GATEWAY]) {
2026 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2027 cfg->fc_flags |= RTF_GATEWAY;
2028 }
2029
2030 if (tb[RTA_DST]) {
2031 int plen = (rtm->rtm_dst_len + 7) >> 3;
2032
2033 if (nla_len(tb[RTA_DST]) < plen)
2034 goto errout;
2035
2036 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2037 }
2038
2039 if (tb[RTA_SRC]) {
2040 int plen = (rtm->rtm_src_len + 7) >> 3;
2041
2042 if (nla_len(tb[RTA_SRC]) < plen)
2043 goto errout;
2044
2045 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2046 }
2047
2048 if (tb[RTA_OIF])
2049 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2050
2051 if (tb[RTA_PRIORITY])
2052 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2053
2054 if (tb[RTA_METRICS]) {
2055 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2056 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2057 }
2058
2059 if (tb[RTA_TABLE])
2060 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2061
2062 err = 0;
2063 errout:
2064 return err;
2065 }
2066
2067 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2068 {
2069 struct fib6_config cfg;
2070 int err;
2071
2072 err = rtm_to_fib6_config(skb, nlh, &cfg);
2073 if (err < 0)
2074 return err;
2075
2076 return ip6_route_del(&cfg);
2077 }
2078
2079 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2080 {
2081 struct fib6_config cfg;
2082 int err;
2083
2084 err = rtm_to_fib6_config(skb, nlh, &cfg);
2085 if (err < 0)
2086 return err;
2087
2088 return ip6_route_add(&cfg);
2089 }
2090
2091 static inline size_t rt6_nlmsg_size(void)
2092 {
2093 return NLMSG_ALIGN(sizeof(struct rtmsg))
2094 + nla_total_size(16) /* RTA_SRC */
2095 + nla_total_size(16) /* RTA_DST */
2096 + nla_total_size(16) /* RTA_GATEWAY */
2097 + nla_total_size(16) /* RTA_PREFSRC */
2098 + nla_total_size(4) /* RTA_TABLE */
2099 + nla_total_size(4) /* RTA_IIF */
2100 + nla_total_size(4) /* RTA_OIF */
2101 + nla_total_size(4) /* RTA_PRIORITY */
2102 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2103 + nla_total_size(sizeof(struct rta_cacheinfo));
2104 }
2105
2106 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2107 struct in6_addr *dst, struct in6_addr *src,
2108 int iif, int type, u32 pid, u32 seq,
2109 int prefix, unsigned int flags)
2110 {
2111 struct rtmsg *rtm;
2112 struct nlmsghdr *nlh;
2113 long expires;
2114 u32 table;
2115
2116 if (prefix) { /* user wants prefix routes only */
2117 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2118 /* success since this is not a prefix route */
2119 return 1;
2120 }
2121 }
2122
2123 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2124 if (nlh == NULL)
2125 return -EMSGSIZE;
2126
2127 rtm = nlmsg_data(nlh);
2128 rtm->rtm_family = AF_INET6;
2129 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2130 rtm->rtm_src_len = rt->rt6i_src.plen;
2131 rtm->rtm_tos = 0;
2132 if (rt->rt6i_table)
2133 table = rt->rt6i_table->tb6_id;
2134 else
2135 table = RT6_TABLE_UNSPEC;
2136 rtm->rtm_table = table;
2137 NLA_PUT_U32(skb, RTA_TABLE, table);
2138 if (rt->rt6i_flags&RTF_REJECT)
2139 rtm->rtm_type = RTN_UNREACHABLE;
2140 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2141 rtm->rtm_type = RTN_LOCAL;
2142 else
2143 rtm->rtm_type = RTN_UNICAST;
2144 rtm->rtm_flags = 0;
2145 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2146 rtm->rtm_protocol = rt->rt6i_protocol;
2147 if (rt->rt6i_flags&RTF_DYNAMIC)
2148 rtm->rtm_protocol = RTPROT_REDIRECT;
2149 else if (rt->rt6i_flags & RTF_ADDRCONF)
2150 rtm->rtm_protocol = RTPROT_KERNEL;
2151 else if (rt->rt6i_flags&RTF_DEFAULT)
2152 rtm->rtm_protocol = RTPROT_RA;
2153
2154 if (rt->rt6i_flags&RTF_CACHE)
2155 rtm->rtm_flags |= RTM_F_CLONED;
2156
2157 if (dst) {
2158 NLA_PUT(skb, RTA_DST, 16, dst);
2159 rtm->rtm_dst_len = 128;
2160 } else if (rtm->rtm_dst_len)
2161 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2162 #ifdef CONFIG_IPV6_SUBTREES
2163 if (src) {
2164 NLA_PUT(skb, RTA_SRC, 16, src);
2165 rtm->rtm_src_len = 128;
2166 } else if (rtm->rtm_src_len)
2167 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2168 #endif
2169 if (iif)
2170 NLA_PUT_U32(skb, RTA_IIF, iif);
2171 else if (dst) {
2172 struct in6_addr saddr_buf;
2173 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
2174 dst, 0, &saddr_buf) == 0)
2175 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2176 }
2177
2178 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2179 goto nla_put_failure;
2180
2181 if (rt->u.dst.neighbour)
2182 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
2183
2184 if (rt->u.dst.dev)
2185 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2186
2187 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2188
2189 expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0;
2190 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
2191 expires, rt->u.dst.error) < 0)
2192 goto nla_put_failure;
2193
2194 return nlmsg_end(skb, nlh);
2195
2196 nla_put_failure:
2197 nlmsg_cancel(skb, nlh);
2198 return -EMSGSIZE;
2199 }
2200
2201 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2202 {
2203 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2204 int prefix;
2205
2206 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2207 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2208 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2209 } else
2210 prefix = 0;
2211
2212 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2213 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2214 prefix, NLM_F_MULTI);
2215 }
2216
2217 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2218 {
2219 struct net *net = sock_net(in_skb->sk);
2220 struct nlattr *tb[RTA_MAX+1];
2221 struct rt6_info *rt;
2222 struct sk_buff *skb;
2223 struct rtmsg *rtm;
2224 struct flowi fl;
2225 int err, iif = 0;
2226
2227 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2228 if (err < 0)
2229 goto errout;
2230
2231 err = -EINVAL;
2232 memset(&fl, 0, sizeof(fl));
2233
2234 if (tb[RTA_SRC]) {
2235 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2236 goto errout;
2237
2238 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2239 }
2240
2241 if (tb[RTA_DST]) {
2242 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2243 goto errout;
2244
2245 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2246 }
2247
2248 if (tb[RTA_IIF])
2249 iif = nla_get_u32(tb[RTA_IIF]);
2250
2251 if (tb[RTA_OIF])
2252 fl.oif = nla_get_u32(tb[RTA_OIF]);
2253
2254 if (iif) {
2255 struct net_device *dev;
2256 dev = __dev_get_by_index(net, iif);
2257 if (!dev) {
2258 err = -ENODEV;
2259 goto errout;
2260 }
2261 }
2262
2263 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2264 if (skb == NULL) {
2265 err = -ENOBUFS;
2266 goto errout;
2267 }
2268
2269 /* Reserve room for dummy headers, this skb can pass
2270 through good chunk of routing engine.
2271 */
2272 skb_reset_mac_header(skb);
2273 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2274
2275 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2276 skb->dst = &rt->u.dst;
2277
2278 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2279 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2280 nlh->nlmsg_seq, 0, 0);
2281 if (err < 0) {
2282 kfree_skb(skb);
2283 goto errout;
2284 }
2285
2286 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2287 errout:
2288 return err;
2289 }
2290
2291 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2292 {
2293 struct sk_buff *skb;
2294 struct net *net = info->nl_net;
2295 u32 seq;
2296 int err;
2297
2298 err = -ENOBUFS;
2299 seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
2300
2301 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2302 if (skb == NULL)
2303 goto errout;
2304
2305 err = rt6_fill_node(skb, rt, NULL, NULL, 0,
2306 event, info->pid, seq, 0, 0);
2307 if (err < 0) {
2308 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2309 WARN_ON(err == -EMSGSIZE);
2310 kfree_skb(skb);
2311 goto errout;
2312 }
2313 err = rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2314 info->nlh, gfp_any());
2315 errout:
2316 if (err < 0)
2317 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2318 }
2319
2320 static int ip6_route_dev_notify(struct notifier_block *this,
2321 unsigned long event, void *data)
2322 {
2323 struct net_device *dev = (struct net_device *)data;
2324 struct net *net = dev_net(dev);
2325
2326 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2327 net->ipv6.ip6_null_entry->u.dst.dev = dev;
2328 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2329 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2330 net->ipv6.ip6_prohibit_entry->u.dst.dev = dev;
2331 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2332 net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev;
2333 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2334 #endif
2335 }
2336
2337 return NOTIFY_OK;
2338 }
2339
2340 /*
2341 * /proc
2342 */
2343
2344 #ifdef CONFIG_PROC_FS
2345
2346 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2347
2348 struct rt6_proc_arg
2349 {
2350 char *buffer;
2351 int offset;
2352 int length;
2353 int skip;
2354 int len;
2355 };
2356
2357 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2358 {
2359 struct seq_file *m = p_arg;
2360
2361 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_dst.addr),
2362 rt->rt6i_dst.plen);
2363
2364 #ifdef CONFIG_IPV6_SUBTREES
2365 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_src.addr),
2366 rt->rt6i_src.plen);
2367 #else
2368 seq_puts(m, "00000000000000000000000000000000 00 ");
2369 #endif
2370
2371 if (rt->rt6i_nexthop) {
2372 seq_printf(m, NIP6_SEQFMT,
2373 NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
2374 } else {
2375 seq_puts(m, "00000000000000000000000000000000");
2376 }
2377 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2378 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2379 rt->u.dst.__use, rt->rt6i_flags,
2380 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2381 return 0;
2382 }
2383
2384 static int ipv6_route_show(struct seq_file *m, void *v)
2385 {
2386 struct net *net = (struct net *)m->private;
2387 fib6_clean_all(net, rt6_info_route, 0, m);
2388 return 0;
2389 }
2390
2391 static int ipv6_route_open(struct inode *inode, struct file *file)
2392 {
2393 struct net *net = get_proc_net(inode);
2394 if (!net)
2395 return -ENXIO;
2396 return single_open(file, ipv6_route_show, net);
2397 }
2398
2399 static int ipv6_route_release(struct inode *inode, struct file *file)
2400 {
2401 struct seq_file *seq = file->private_data;
2402 struct net *net = seq->private;
2403 put_net(net);
2404 return single_release(inode, file);
2405 }
2406
2407 static const struct file_operations ipv6_route_proc_fops = {
2408 .owner = THIS_MODULE,
2409 .open = ipv6_route_open,
2410 .read = seq_read,
2411 .llseek = seq_lseek,
2412 .release = ipv6_route_release,
2413 };
2414
2415 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2416 {
2417 struct net *net = (struct net *)seq->private;
2418 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2419 net->ipv6.rt6_stats->fib_nodes,
2420 net->ipv6.rt6_stats->fib_route_nodes,
2421 net->ipv6.rt6_stats->fib_rt_alloc,
2422 net->ipv6.rt6_stats->fib_rt_entries,
2423 net->ipv6.rt6_stats->fib_rt_cache,
2424 atomic_read(&net->ipv6.ip6_dst_ops->entries),
2425 net->ipv6.rt6_stats->fib_discarded_routes);
2426
2427 return 0;
2428 }
2429
2430 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2431 {
2432 struct net *net = get_proc_net(inode);
2433 return single_open(file, rt6_stats_seq_show, net);
2434 }
2435
2436 static int rt6_stats_seq_release(struct inode *inode, struct file *file)
2437 {
2438 struct seq_file *seq = file->private_data;
2439 struct net *net = (struct net *)seq->private;
2440 put_net(net);
2441 return single_release(inode, file);
2442 }
2443
2444 static const struct file_operations rt6_stats_seq_fops = {
2445 .owner = THIS_MODULE,
2446 .open = rt6_stats_seq_open,
2447 .read = seq_read,
2448 .llseek = seq_lseek,
2449 .release = rt6_stats_seq_release,
2450 };
2451 #endif /* CONFIG_PROC_FS */
2452
2453 #ifdef CONFIG_SYSCTL
2454
2455 static
2456 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2457 void __user *buffer, size_t *lenp, loff_t *ppos)
2458 {
2459 struct net *net = current->nsproxy->net_ns;
2460 int delay = net->ipv6.sysctl.flush_delay;
2461 if (write) {
2462 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2463 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2464 return 0;
2465 } else
2466 return -EINVAL;
2467 }
2468
2469 ctl_table ipv6_route_table_template[] = {
2470 {
2471 .procname = "flush",
2472 .data = &init_net.ipv6.sysctl.flush_delay,
2473 .maxlen = sizeof(int),
2474 .mode = 0200,
2475 .proc_handler = &ipv6_sysctl_rtcache_flush
2476 },
2477 {
2478 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2479 .procname = "gc_thresh",
2480 .data = &ip6_dst_ops_template.gc_thresh,
2481 .maxlen = sizeof(int),
2482 .mode = 0644,
2483 .proc_handler = &proc_dointvec,
2484 },
2485 {
2486 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2487 .procname = "max_size",
2488 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2489 .maxlen = sizeof(int),
2490 .mode = 0644,
2491 .proc_handler = &proc_dointvec,
2492 },
2493 {
2494 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2495 .procname = "gc_min_interval",
2496 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2497 .maxlen = sizeof(int),
2498 .mode = 0644,
2499 .proc_handler = &proc_dointvec_jiffies,
2500 .strategy = &sysctl_jiffies,
2501 },
2502 {
2503 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2504 .procname = "gc_timeout",
2505 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2506 .maxlen = sizeof(int),
2507 .mode = 0644,
2508 .proc_handler = &proc_dointvec_jiffies,
2509 .strategy = &sysctl_jiffies,
2510 },
2511 {
2512 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2513 .procname = "gc_interval",
2514 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2515 .maxlen = sizeof(int),
2516 .mode = 0644,
2517 .proc_handler = &proc_dointvec_jiffies,
2518 .strategy = &sysctl_jiffies,
2519 },
2520 {
2521 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2522 .procname = "gc_elasticity",
2523 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2524 .maxlen = sizeof(int),
2525 .mode = 0644,
2526 .proc_handler = &proc_dointvec_jiffies,
2527 .strategy = &sysctl_jiffies,
2528 },
2529 {
2530 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2531 .procname = "mtu_expires",
2532 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2533 .maxlen = sizeof(int),
2534 .mode = 0644,
2535 .proc_handler = &proc_dointvec_jiffies,
2536 .strategy = &sysctl_jiffies,
2537 },
2538 {
2539 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2540 .procname = "min_adv_mss",
2541 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2542 .maxlen = sizeof(int),
2543 .mode = 0644,
2544 .proc_handler = &proc_dointvec_jiffies,
2545 .strategy = &sysctl_jiffies,
2546 },
2547 {
2548 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2549 .procname = "gc_min_interval_ms",
2550 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2551 .maxlen = sizeof(int),
2552 .mode = 0644,
2553 .proc_handler = &proc_dointvec_ms_jiffies,
2554 .strategy = &sysctl_ms_jiffies,
2555 },
2556 { .ctl_name = 0 }
2557 };
2558
2559 struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2560 {
2561 struct ctl_table *table;
2562
2563 table = kmemdup(ipv6_route_table_template,
2564 sizeof(ipv6_route_table_template),
2565 GFP_KERNEL);
2566
2567 if (table) {
2568 table[0].data = &net->ipv6.sysctl.flush_delay;
2569 table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh;
2570 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2571 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2572 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2573 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2574 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2575 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2576 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2577 }
2578
2579 return table;
2580 }
2581 #endif
2582
2583 static int ip6_route_net_init(struct net *net)
2584 {
2585 int ret = 0;
2586
2587 ret = -ENOMEM;
2588 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template,
2589 sizeof(*net->ipv6.ip6_dst_ops),
2590 GFP_KERNEL);
2591 if (!net->ipv6.ip6_dst_ops)
2592 goto out;
2593 net->ipv6.ip6_dst_ops->dst_net = net;
2594
2595 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2596 sizeof(*net->ipv6.ip6_null_entry),
2597 GFP_KERNEL);
2598 if (!net->ipv6.ip6_null_entry)
2599 goto out_ip6_dst_ops;
2600 net->ipv6.ip6_null_entry->u.dst.path =
2601 (struct dst_entry *)net->ipv6.ip6_null_entry;
2602 net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2603
2604 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2605 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2606 sizeof(*net->ipv6.ip6_prohibit_entry),
2607 GFP_KERNEL);
2608 if (!net->ipv6.ip6_prohibit_entry) {
2609 kfree(net->ipv6.ip6_null_entry);
2610 goto out;
2611 }
2612 net->ipv6.ip6_prohibit_entry->u.dst.path =
2613 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2614 net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2615
2616 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2617 sizeof(*net->ipv6.ip6_blk_hole_entry),
2618 GFP_KERNEL);
2619 if (!net->ipv6.ip6_blk_hole_entry) {
2620 kfree(net->ipv6.ip6_null_entry);
2621 kfree(net->ipv6.ip6_prohibit_entry);
2622 goto out;
2623 }
2624 net->ipv6.ip6_blk_hole_entry->u.dst.path =
2625 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2626 net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2627 #endif
2628
2629 #ifdef CONFIG_PROC_FS
2630 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2631 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2632 #endif
2633 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2634
2635 ret = 0;
2636 out:
2637 return ret;
2638
2639 out_ip6_dst_ops:
2640 kfree(net->ipv6.ip6_dst_ops);
2641 goto out;
2642 }
2643
2644 static void ip6_route_net_exit(struct net *net)
2645 {
2646 #ifdef CONFIG_PROC_FS
2647 proc_net_remove(net, "ipv6_route");
2648 proc_net_remove(net, "rt6_stats");
2649 #endif
2650 kfree(net->ipv6.ip6_null_entry);
2651 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2652 kfree(net->ipv6.ip6_prohibit_entry);
2653 kfree(net->ipv6.ip6_blk_hole_entry);
2654 #endif
2655 kfree(net->ipv6.ip6_dst_ops);
2656 }
2657
2658 static struct pernet_operations ip6_route_net_ops = {
2659 .init = ip6_route_net_init,
2660 .exit = ip6_route_net_exit,
2661 };
2662
2663 static struct notifier_block ip6_route_dev_notifier = {
2664 .notifier_call = ip6_route_dev_notify,
2665 .priority = 0,
2666 };
2667
2668 int __init ip6_route_init(void)
2669 {
2670 int ret;
2671
2672 ret = -ENOMEM;
2673 ip6_dst_ops_template.kmem_cachep =
2674 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2675 SLAB_HWCACHE_ALIGN, NULL);
2676 if (!ip6_dst_ops_template.kmem_cachep)
2677 goto out;;
2678
2679 ret = register_pernet_subsys(&ip6_route_net_ops);
2680 if (ret)
2681 goto out_kmem_cache;
2682
2683 /* Registering of the loopback is done before this portion of code,
2684 * the loopback reference in rt6_info will not be taken, do it
2685 * manually for init_net */
2686 init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev;
2687 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2688 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2689 init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev;
2690 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2691 init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev;
2692 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2693 #endif
2694 ret = fib6_init();
2695 if (ret)
2696 goto out_register_subsys;
2697
2698 ret = xfrm6_init();
2699 if (ret)
2700 goto out_fib6_init;
2701
2702 ret = fib6_rules_init();
2703 if (ret)
2704 goto xfrm6_init;
2705
2706 ret = -ENOBUFS;
2707 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
2708 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
2709 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2710 goto fib6_rules_init;
2711
2712 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2713 if (ret)
2714 goto fib6_rules_init;
2715
2716 out:
2717 return ret;
2718
2719 fib6_rules_init:
2720 fib6_rules_cleanup();
2721 xfrm6_init:
2722 xfrm6_fini();
2723 out_fib6_init:
2724 fib6_gc_cleanup();
2725 out_register_subsys:
2726 unregister_pernet_subsys(&ip6_route_net_ops);
2727 out_kmem_cache:
2728 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2729 goto out;
2730 }
2731
2732 void ip6_route_cleanup(void)
2733 {
2734 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2735 fib6_rules_cleanup();
2736 xfrm6_fini();
2737 fib6_gc_cleanup();
2738 unregister_pernet_subsys(&ip6_route_net_ops);
2739 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2740 }