]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - net/ipv6/route.c
Merge branch 'for-davem' of ssh://master.kernel.org/pub/scm/linux/kernel/git/linville...
[mirror_ubuntu-kernels.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/times.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/route.h>
35 #include <linux/netdevice.h>
36 #include <linux/in6.h>
37 #include <linux/mroute6.h>
38 #include <linux/init.h>
39 #include <linux/if_arp.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/nsproxy.h>
43 #include <linux/slab.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/xfrm.h>
55 #include <net/netevent.h>
56 #include <net/netlink.h>
57
58 #include <asm/uaccess.h>
59
60 #ifdef CONFIG_SYSCTL
61 #include <linux/sysctl.h>
62 #endif
63
64 /* Set to 3 to get tracing. */
65 #define RT6_DEBUG 2
66
67 #if RT6_DEBUG >= 3
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
70 #else
71 #define RDBG(x)
72 #define RT6_TRACE(x...) do { ; } while (0)
73 #endif
74
75 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
76 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
77 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
78 static unsigned int ip6_default_mtu(const struct dst_entry *dst);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void ip6_dst_destroy(struct dst_entry *);
81 static void ip6_dst_ifdown(struct dst_entry *,
82 struct net_device *dev, int how);
83 static int ip6_dst_gc(struct dst_ops *ops);
84
85 static int ip6_pkt_discard(struct sk_buff *skb);
86 static int ip6_pkt_discard_out(struct sk_buff *skb);
87 static void ip6_link_failure(struct sk_buff *skb);
88 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89
90 #ifdef CONFIG_IPV6_ROUTE_INFO
91 static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex,
94 unsigned pref);
95 static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex);
98 #endif
99
100 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
101 {
102 struct rt6_info *rt = (struct rt6_info *) dst;
103 struct inet_peer *peer;
104 u32 *p = NULL;
105
106 if (!rt->rt6i_peer)
107 rt6_bind_peer(rt, 1);
108
109 peer = rt->rt6i_peer;
110 if (peer) {
111 u32 *old_p = __DST_METRICS_PTR(old);
112 unsigned long prev, new;
113
114 p = peer->metrics;
115 if (inet_metrics_new(peer))
116 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
117
118 new = (unsigned long) p;
119 prev = cmpxchg(&dst->_metrics, old, new);
120
121 if (prev != old) {
122 p = __DST_METRICS_PTR(prev);
123 if (prev & DST_METRICS_READ_ONLY)
124 p = NULL;
125 }
126 }
127 return p;
128 }
129
130 static struct dst_ops ip6_dst_ops_template = {
131 .family = AF_INET6,
132 .protocol = cpu_to_be16(ETH_P_IPV6),
133 .gc = ip6_dst_gc,
134 .gc_thresh = 1024,
135 .check = ip6_dst_check,
136 .default_advmss = ip6_default_advmss,
137 .default_mtu = ip6_default_mtu,
138 .cow_metrics = ipv6_cow_metrics,
139 .destroy = ip6_dst_destroy,
140 .ifdown = ip6_dst_ifdown,
141 .negative_advice = ip6_negative_advice,
142 .link_failure = ip6_link_failure,
143 .update_pmtu = ip6_rt_update_pmtu,
144 .local_out = __ip6_local_out,
145 };
146
147 static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
148 {
149 return 0;
150 }
151
152 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
153 {
154 }
155
156 static struct dst_ops ip6_dst_blackhole_ops = {
157 .family = AF_INET6,
158 .protocol = cpu_to_be16(ETH_P_IPV6),
159 .destroy = ip6_dst_destroy,
160 .check = ip6_dst_check,
161 .default_mtu = ip6_blackhole_default_mtu,
162 .default_advmss = ip6_default_advmss,
163 .update_pmtu = ip6_rt_blackhole_update_pmtu,
164 };
165
166 static const u32 ip6_template_metrics[RTAX_MAX] = {
167 [RTAX_HOPLIMIT - 1] = 255,
168 };
169
170 static struct rt6_info ip6_null_entry_template = {
171 .dst = {
172 .__refcnt = ATOMIC_INIT(1),
173 .__use = 1,
174 .obsolete = -1,
175 .error = -ENETUNREACH,
176 .input = ip6_pkt_discard,
177 .output = ip6_pkt_discard_out,
178 },
179 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
180 .rt6i_protocol = RTPROT_KERNEL,
181 .rt6i_metric = ~(u32) 0,
182 .rt6i_ref = ATOMIC_INIT(1),
183 };
184
185 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
186
187 static int ip6_pkt_prohibit(struct sk_buff *skb);
188 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
189
190 static struct rt6_info ip6_prohibit_entry_template = {
191 .dst = {
192 .__refcnt = ATOMIC_INIT(1),
193 .__use = 1,
194 .obsolete = -1,
195 .error = -EACCES,
196 .input = ip6_pkt_prohibit,
197 .output = ip6_pkt_prohibit_out,
198 },
199 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
200 .rt6i_protocol = RTPROT_KERNEL,
201 .rt6i_metric = ~(u32) 0,
202 .rt6i_ref = ATOMIC_INIT(1),
203 };
204
205 static struct rt6_info ip6_blk_hole_entry_template = {
206 .dst = {
207 .__refcnt = ATOMIC_INIT(1),
208 .__use = 1,
209 .obsolete = -1,
210 .error = -EINVAL,
211 .input = dst_discard,
212 .output = dst_discard,
213 },
214 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
215 .rt6i_protocol = RTPROT_KERNEL,
216 .rt6i_metric = ~(u32) 0,
217 .rt6i_ref = ATOMIC_INIT(1),
218 };
219
220 #endif
221
222 /* allocate dst with ip6_dst_ops */
223 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
224 {
225 return (struct rt6_info *)dst_alloc(ops, 0);
226 }
227
228 static void ip6_dst_destroy(struct dst_entry *dst)
229 {
230 struct rt6_info *rt = (struct rt6_info *)dst;
231 struct inet6_dev *idev = rt->rt6i_idev;
232 struct inet_peer *peer = rt->rt6i_peer;
233
234 if (idev != NULL) {
235 rt->rt6i_idev = NULL;
236 in6_dev_put(idev);
237 }
238 if (peer) {
239 rt->rt6i_peer = NULL;
240 inet_putpeer(peer);
241 }
242 }
243
244 static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
245
246 static u32 rt6_peer_genid(void)
247 {
248 return atomic_read(&__rt6_peer_genid);
249 }
250
251 void rt6_bind_peer(struct rt6_info *rt, int create)
252 {
253 struct inet_peer *peer;
254
255 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
256 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
257 inet_putpeer(peer);
258 else
259 rt->rt6i_peer_genid = rt6_peer_genid();
260 }
261
262 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
263 int how)
264 {
265 struct rt6_info *rt = (struct rt6_info *)dst;
266 struct inet6_dev *idev = rt->rt6i_idev;
267 struct net_device *loopback_dev =
268 dev_net(dev)->loopback_dev;
269
270 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
271 struct inet6_dev *loopback_idev =
272 in6_dev_get(loopback_dev);
273 if (loopback_idev != NULL) {
274 rt->rt6i_idev = loopback_idev;
275 in6_dev_put(idev);
276 }
277 }
278 }
279
280 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
281 {
282 return (rt->rt6i_flags & RTF_EXPIRES) &&
283 time_after(jiffies, rt->rt6i_expires);
284 }
285
286 static inline int rt6_need_strict(struct in6_addr *daddr)
287 {
288 return ipv6_addr_type(daddr) &
289 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
290 }
291
292 /*
293 * Route lookup. Any table->tb6_lock is implied.
294 */
295
296 static inline struct rt6_info *rt6_device_match(struct net *net,
297 struct rt6_info *rt,
298 struct in6_addr *saddr,
299 int oif,
300 int flags)
301 {
302 struct rt6_info *local = NULL;
303 struct rt6_info *sprt;
304
305 if (!oif && ipv6_addr_any(saddr))
306 goto out;
307
308 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
309 struct net_device *dev = sprt->rt6i_dev;
310
311 if (oif) {
312 if (dev->ifindex == oif)
313 return sprt;
314 if (dev->flags & IFF_LOOPBACK) {
315 if (sprt->rt6i_idev == NULL ||
316 sprt->rt6i_idev->dev->ifindex != oif) {
317 if (flags & RT6_LOOKUP_F_IFACE && oif)
318 continue;
319 if (local && (!oif ||
320 local->rt6i_idev->dev->ifindex == oif))
321 continue;
322 }
323 local = sprt;
324 }
325 } else {
326 if (ipv6_chk_addr(net, saddr, dev,
327 flags & RT6_LOOKUP_F_IFACE))
328 return sprt;
329 }
330 }
331
332 if (oif) {
333 if (local)
334 return local;
335
336 if (flags & RT6_LOOKUP_F_IFACE)
337 return net->ipv6.ip6_null_entry;
338 }
339 out:
340 return rt;
341 }
342
343 #ifdef CONFIG_IPV6_ROUTER_PREF
344 static void rt6_probe(struct rt6_info *rt)
345 {
346 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
347 /*
348 * Okay, this does not seem to be appropriate
349 * for now, however, we need to check if it
350 * is really so; aka Router Reachability Probing.
351 *
352 * Router Reachability Probe MUST be rate-limited
353 * to no more than one per minute.
354 */
355 if (!neigh || (neigh->nud_state & NUD_VALID))
356 return;
357 read_lock_bh(&neigh->lock);
358 if (!(neigh->nud_state & NUD_VALID) &&
359 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
360 struct in6_addr mcaddr;
361 struct in6_addr *target;
362
363 neigh->updated = jiffies;
364 read_unlock_bh(&neigh->lock);
365
366 target = (struct in6_addr *)&neigh->primary_key;
367 addrconf_addr_solict_mult(target, &mcaddr);
368 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
369 } else
370 read_unlock_bh(&neigh->lock);
371 }
372 #else
373 static inline void rt6_probe(struct rt6_info *rt)
374 {
375 }
376 #endif
377
378 /*
379 * Default Router Selection (RFC 2461 6.3.6)
380 */
381 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
382 {
383 struct net_device *dev = rt->rt6i_dev;
384 if (!oif || dev->ifindex == oif)
385 return 2;
386 if ((dev->flags & IFF_LOOPBACK) &&
387 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
388 return 1;
389 return 0;
390 }
391
392 static inline int rt6_check_neigh(struct rt6_info *rt)
393 {
394 struct neighbour *neigh = rt->rt6i_nexthop;
395 int m;
396 if (rt->rt6i_flags & RTF_NONEXTHOP ||
397 !(rt->rt6i_flags & RTF_GATEWAY))
398 m = 1;
399 else if (neigh) {
400 read_lock_bh(&neigh->lock);
401 if (neigh->nud_state & NUD_VALID)
402 m = 2;
403 #ifdef CONFIG_IPV6_ROUTER_PREF
404 else if (neigh->nud_state & NUD_FAILED)
405 m = 0;
406 #endif
407 else
408 m = 1;
409 read_unlock_bh(&neigh->lock);
410 } else
411 m = 0;
412 return m;
413 }
414
415 static int rt6_score_route(struct rt6_info *rt, int oif,
416 int strict)
417 {
418 int m, n;
419
420 m = rt6_check_dev(rt, oif);
421 if (!m && (strict & RT6_LOOKUP_F_IFACE))
422 return -1;
423 #ifdef CONFIG_IPV6_ROUTER_PREF
424 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
425 #endif
426 n = rt6_check_neigh(rt);
427 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
428 return -1;
429 return m;
430 }
431
432 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
433 int *mpri, struct rt6_info *match)
434 {
435 int m;
436
437 if (rt6_check_expired(rt))
438 goto out;
439
440 m = rt6_score_route(rt, oif, strict);
441 if (m < 0)
442 goto out;
443
444 if (m > *mpri) {
445 if (strict & RT6_LOOKUP_F_REACHABLE)
446 rt6_probe(match);
447 *mpri = m;
448 match = rt;
449 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
450 rt6_probe(rt);
451 }
452
453 out:
454 return match;
455 }
456
457 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
458 struct rt6_info *rr_head,
459 u32 metric, int oif, int strict)
460 {
461 struct rt6_info *rt, *match;
462 int mpri = -1;
463
464 match = NULL;
465 for (rt = rr_head; rt && rt->rt6i_metric == metric;
466 rt = rt->dst.rt6_next)
467 match = find_match(rt, oif, strict, &mpri, match);
468 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
469 rt = rt->dst.rt6_next)
470 match = find_match(rt, oif, strict, &mpri, match);
471
472 return match;
473 }
474
475 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
476 {
477 struct rt6_info *match, *rt0;
478 struct net *net;
479
480 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
481 __func__, fn->leaf, oif);
482
483 rt0 = fn->rr_ptr;
484 if (!rt0)
485 fn->rr_ptr = rt0 = fn->leaf;
486
487 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
488
489 if (!match &&
490 (strict & RT6_LOOKUP_F_REACHABLE)) {
491 struct rt6_info *next = rt0->dst.rt6_next;
492
493 /* no entries matched; do round-robin */
494 if (!next || next->rt6i_metric != rt0->rt6i_metric)
495 next = fn->leaf;
496
497 if (next != rt0)
498 fn->rr_ptr = next;
499 }
500
501 RT6_TRACE("%s() => %p\n",
502 __func__, match);
503
504 net = dev_net(rt0->rt6i_dev);
505 return match ? match : net->ipv6.ip6_null_entry;
506 }
507
508 #ifdef CONFIG_IPV6_ROUTE_INFO
509 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
510 struct in6_addr *gwaddr)
511 {
512 struct net *net = dev_net(dev);
513 struct route_info *rinfo = (struct route_info *) opt;
514 struct in6_addr prefix_buf, *prefix;
515 unsigned int pref;
516 unsigned long lifetime;
517 struct rt6_info *rt;
518
519 if (len < sizeof(struct route_info)) {
520 return -EINVAL;
521 }
522
523 /* Sanity check for prefix_len and length */
524 if (rinfo->length > 3) {
525 return -EINVAL;
526 } else if (rinfo->prefix_len > 128) {
527 return -EINVAL;
528 } else if (rinfo->prefix_len > 64) {
529 if (rinfo->length < 2) {
530 return -EINVAL;
531 }
532 } else if (rinfo->prefix_len > 0) {
533 if (rinfo->length < 1) {
534 return -EINVAL;
535 }
536 }
537
538 pref = rinfo->route_pref;
539 if (pref == ICMPV6_ROUTER_PREF_INVALID)
540 return -EINVAL;
541
542 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
543
544 if (rinfo->length == 3)
545 prefix = (struct in6_addr *)rinfo->prefix;
546 else {
547 /* this function is safe */
548 ipv6_addr_prefix(&prefix_buf,
549 (struct in6_addr *)rinfo->prefix,
550 rinfo->prefix_len);
551 prefix = &prefix_buf;
552 }
553
554 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
555 dev->ifindex);
556
557 if (rt && !lifetime) {
558 ip6_del_rt(rt);
559 rt = NULL;
560 }
561
562 if (!rt && lifetime)
563 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
564 pref);
565 else if (rt)
566 rt->rt6i_flags = RTF_ROUTEINFO |
567 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
568
569 if (rt) {
570 if (!addrconf_finite_timeout(lifetime)) {
571 rt->rt6i_flags &= ~RTF_EXPIRES;
572 } else {
573 rt->rt6i_expires = jiffies + HZ * lifetime;
574 rt->rt6i_flags |= RTF_EXPIRES;
575 }
576 dst_release(&rt->dst);
577 }
578 return 0;
579 }
580 #endif
581
582 #define BACKTRACK(__net, saddr) \
583 do { \
584 if (rt == __net->ipv6.ip6_null_entry) { \
585 struct fib6_node *pn; \
586 while (1) { \
587 if (fn->fn_flags & RTN_TL_ROOT) \
588 goto out; \
589 pn = fn->parent; \
590 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
591 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
592 else \
593 fn = pn; \
594 if (fn->fn_flags & RTN_RTINFO) \
595 goto restart; \
596 } \
597 } \
598 } while(0)
599
600 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
601 struct fib6_table *table,
602 struct flowi *fl, int flags)
603 {
604 struct fib6_node *fn;
605 struct rt6_info *rt;
606
607 read_lock_bh(&table->tb6_lock);
608 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
609 restart:
610 rt = fn->leaf;
611 rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
612 BACKTRACK(net, &fl->fl6_src);
613 out:
614 dst_use(&rt->dst, jiffies);
615 read_unlock_bh(&table->tb6_lock);
616 return rt;
617
618 }
619
620 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
621 const struct in6_addr *saddr, int oif, int strict)
622 {
623 struct flowi fl = {
624 .oif = oif,
625 .fl6_dst = *daddr,
626 };
627 struct dst_entry *dst;
628 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
629
630 if (saddr) {
631 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
632 flags |= RT6_LOOKUP_F_HAS_SADDR;
633 }
634
635 dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
636 if (dst->error == 0)
637 return (struct rt6_info *) dst;
638
639 dst_release(dst);
640
641 return NULL;
642 }
643
644 EXPORT_SYMBOL(rt6_lookup);
645
646 /* ip6_ins_rt is called with FREE table->tb6_lock.
647 It takes new route entry, the addition fails by any reason the
648 route is freed. In any case, if caller does not hold it, it may
649 be destroyed.
650 */
651
652 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
653 {
654 int err;
655 struct fib6_table *table;
656
657 table = rt->rt6i_table;
658 write_lock_bh(&table->tb6_lock);
659 err = fib6_add(&table->tb6_root, rt, info);
660 write_unlock_bh(&table->tb6_lock);
661
662 return err;
663 }
664
665 int ip6_ins_rt(struct rt6_info *rt)
666 {
667 struct nl_info info = {
668 .nl_net = dev_net(rt->rt6i_dev),
669 };
670 return __ip6_ins_rt(rt, &info);
671 }
672
673 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
674 struct in6_addr *saddr)
675 {
676 struct rt6_info *rt;
677
678 /*
679 * Clone the route.
680 */
681
682 rt = ip6_rt_copy(ort);
683
684 if (rt) {
685 struct neighbour *neigh;
686 int attempts = !in_softirq();
687
688 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
689 if (rt->rt6i_dst.plen != 128 &&
690 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
691 rt->rt6i_flags |= RTF_ANYCAST;
692 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
693 }
694
695 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
696 rt->rt6i_dst.plen = 128;
697 rt->rt6i_flags |= RTF_CACHE;
698 rt->dst.flags |= DST_HOST;
699
700 #ifdef CONFIG_IPV6_SUBTREES
701 if (rt->rt6i_src.plen && saddr) {
702 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
703 rt->rt6i_src.plen = 128;
704 }
705 #endif
706
707 retry:
708 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
709 if (IS_ERR(neigh)) {
710 struct net *net = dev_net(rt->rt6i_dev);
711 int saved_rt_min_interval =
712 net->ipv6.sysctl.ip6_rt_gc_min_interval;
713 int saved_rt_elasticity =
714 net->ipv6.sysctl.ip6_rt_gc_elasticity;
715
716 if (attempts-- > 0) {
717 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
718 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
719
720 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
721
722 net->ipv6.sysctl.ip6_rt_gc_elasticity =
723 saved_rt_elasticity;
724 net->ipv6.sysctl.ip6_rt_gc_min_interval =
725 saved_rt_min_interval;
726 goto retry;
727 }
728
729 if (net_ratelimit())
730 printk(KERN_WARNING
731 "ipv6: Neighbour table overflow.\n");
732 dst_free(&rt->dst);
733 return NULL;
734 }
735 rt->rt6i_nexthop = neigh;
736
737 }
738
739 return rt;
740 }
741
742 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
743 {
744 struct rt6_info *rt = ip6_rt_copy(ort);
745 if (rt) {
746 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
747 rt->rt6i_dst.plen = 128;
748 rt->rt6i_flags |= RTF_CACHE;
749 rt->dst.flags |= DST_HOST;
750 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
751 }
752 return rt;
753 }
754
755 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
756 struct flowi *fl, int flags)
757 {
758 struct fib6_node *fn;
759 struct rt6_info *rt, *nrt;
760 int strict = 0;
761 int attempts = 3;
762 int err;
763 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
764
765 strict |= flags & RT6_LOOKUP_F_IFACE;
766
767 relookup:
768 read_lock_bh(&table->tb6_lock);
769
770 restart_2:
771 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
772
773 restart:
774 rt = rt6_select(fn, oif, strict | reachable);
775
776 BACKTRACK(net, &fl->fl6_src);
777 if (rt == net->ipv6.ip6_null_entry ||
778 rt->rt6i_flags & RTF_CACHE)
779 goto out;
780
781 dst_hold(&rt->dst);
782 read_unlock_bh(&table->tb6_lock);
783
784 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
785 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
786 else
787 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
788
789 dst_release(&rt->dst);
790 rt = nrt ? : net->ipv6.ip6_null_entry;
791
792 dst_hold(&rt->dst);
793 if (nrt) {
794 err = ip6_ins_rt(nrt);
795 if (!err)
796 goto out2;
797 }
798
799 if (--attempts <= 0)
800 goto out2;
801
802 /*
803 * Race condition! In the gap, when table->tb6_lock was
804 * released someone could insert this route. Relookup.
805 */
806 dst_release(&rt->dst);
807 goto relookup;
808
809 out:
810 if (reachable) {
811 reachable = 0;
812 goto restart_2;
813 }
814 dst_hold(&rt->dst);
815 read_unlock_bh(&table->tb6_lock);
816 out2:
817 rt->dst.lastuse = jiffies;
818 rt->dst.__use++;
819
820 return rt;
821 }
822
823 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
824 struct flowi *fl, int flags)
825 {
826 return ip6_pol_route(net, table, fl->iif, fl, flags);
827 }
828
829 void ip6_route_input(struct sk_buff *skb)
830 {
831 struct ipv6hdr *iph = ipv6_hdr(skb);
832 struct net *net = dev_net(skb->dev);
833 int flags = RT6_LOOKUP_F_HAS_SADDR;
834 struct flowi fl = {
835 .iif = skb->dev->ifindex,
836 .fl6_dst = iph->daddr,
837 .fl6_src = iph->saddr,
838 .fl6_flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
839 .mark = skb->mark,
840 .proto = iph->nexthdr,
841 };
842
843 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
844 flags |= RT6_LOOKUP_F_IFACE;
845
846 skb_dst_set(skb, fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input));
847 }
848
849 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
850 struct flowi *fl, int flags)
851 {
852 return ip6_pol_route(net, table, fl->oif, fl, flags);
853 }
854
855 struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
856 struct flowi *fl)
857 {
858 int flags = 0;
859
860 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
861 flags |= RT6_LOOKUP_F_IFACE;
862
863 if (!ipv6_addr_any(&fl->fl6_src))
864 flags |= RT6_LOOKUP_F_HAS_SADDR;
865 else if (sk)
866 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
867
868 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
869 }
870
871 EXPORT_SYMBOL(ip6_route_output);
872
873 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
874 {
875 struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1);
876 struct rt6_info *ort = (struct rt6_info *) dst_orig;
877 struct dst_entry *new = NULL;
878
879 if (rt) {
880 new = &rt->dst;
881
882 new->__use = 1;
883 new->input = dst_discard;
884 new->output = dst_discard;
885
886 dst_copy_metrics(new, &ort->dst);
887 new->dev = ort->dst.dev;
888 if (new->dev)
889 dev_hold(new->dev);
890 rt->rt6i_idev = ort->rt6i_idev;
891 if (rt->rt6i_idev)
892 in6_dev_hold(rt->rt6i_idev);
893 rt->rt6i_expires = 0;
894
895 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
896 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
897 rt->rt6i_metric = 0;
898
899 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
900 #ifdef CONFIG_IPV6_SUBTREES
901 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
902 #endif
903
904 dst_free(new);
905 }
906
907 dst_release(dst_orig);
908 return new ? new : ERR_PTR(-ENOMEM);
909 }
910
911 /*
912 * Destination cache support functions
913 */
914
915 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
916 {
917 struct rt6_info *rt;
918
919 rt = (struct rt6_info *) dst;
920
921 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
922 if (rt->rt6i_peer_genid != rt6_peer_genid()) {
923 if (!rt->rt6i_peer)
924 rt6_bind_peer(rt, 0);
925 rt->rt6i_peer_genid = rt6_peer_genid();
926 }
927 return dst;
928 }
929 return NULL;
930 }
931
932 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
933 {
934 struct rt6_info *rt = (struct rt6_info *) dst;
935
936 if (rt) {
937 if (rt->rt6i_flags & RTF_CACHE) {
938 if (rt6_check_expired(rt)) {
939 ip6_del_rt(rt);
940 dst = NULL;
941 }
942 } else {
943 dst_release(dst);
944 dst = NULL;
945 }
946 }
947 return dst;
948 }
949
950 static void ip6_link_failure(struct sk_buff *skb)
951 {
952 struct rt6_info *rt;
953
954 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
955
956 rt = (struct rt6_info *) skb_dst(skb);
957 if (rt) {
958 if (rt->rt6i_flags&RTF_CACHE) {
959 dst_set_expires(&rt->dst, 0);
960 rt->rt6i_flags |= RTF_EXPIRES;
961 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
962 rt->rt6i_node->fn_sernum = -1;
963 }
964 }
965
966 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
967 {
968 struct rt6_info *rt6 = (struct rt6_info*)dst;
969
970 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
971 rt6->rt6i_flags |= RTF_MODIFIED;
972 if (mtu < IPV6_MIN_MTU) {
973 u32 features = dst_metric(dst, RTAX_FEATURES);
974 mtu = IPV6_MIN_MTU;
975 features |= RTAX_FEATURE_ALLFRAG;
976 dst_metric_set(dst, RTAX_FEATURES, features);
977 }
978 dst_metric_set(dst, RTAX_MTU, mtu);
979 }
980 }
981
982 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
983 {
984 struct net_device *dev = dst->dev;
985 unsigned int mtu = dst_mtu(dst);
986 struct net *net = dev_net(dev);
987
988 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
989
990 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
991 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
992
993 /*
994 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
995 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
996 * IPV6_MAXPLEN is also valid and means: "any MSS,
997 * rely only on pmtu discovery"
998 */
999 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1000 mtu = IPV6_MAXPLEN;
1001 return mtu;
1002 }
1003
1004 static unsigned int ip6_default_mtu(const struct dst_entry *dst)
1005 {
1006 unsigned int mtu = IPV6_MIN_MTU;
1007 struct inet6_dev *idev;
1008
1009 rcu_read_lock();
1010 idev = __in6_dev_get(dst->dev);
1011 if (idev)
1012 mtu = idev->cnf.mtu6;
1013 rcu_read_unlock();
1014
1015 return mtu;
1016 }
1017
1018 static struct dst_entry *icmp6_dst_gc_list;
1019 static DEFINE_SPINLOCK(icmp6_dst_lock);
1020
1021 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1022 struct neighbour *neigh,
1023 const struct in6_addr *addr)
1024 {
1025 struct rt6_info *rt;
1026 struct inet6_dev *idev = in6_dev_get(dev);
1027 struct net *net = dev_net(dev);
1028
1029 if (unlikely(idev == NULL))
1030 return NULL;
1031
1032 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1033 if (unlikely(rt == NULL)) {
1034 in6_dev_put(idev);
1035 goto out;
1036 }
1037
1038 dev_hold(dev);
1039 if (neigh)
1040 neigh_hold(neigh);
1041 else {
1042 neigh = ndisc_get_neigh(dev, addr);
1043 if (IS_ERR(neigh))
1044 neigh = NULL;
1045 }
1046
1047 rt->rt6i_dev = dev;
1048 rt->rt6i_idev = idev;
1049 rt->rt6i_nexthop = neigh;
1050 atomic_set(&rt->dst.__refcnt, 1);
1051 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1052 rt->dst.output = ip6_output;
1053
1054 #if 0 /* there's no chance to use these for ndisc */
1055 rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
1056 ? DST_HOST
1057 : 0;
1058 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1059 rt->rt6i_dst.plen = 128;
1060 #endif
1061
1062 spin_lock_bh(&icmp6_dst_lock);
1063 rt->dst.next = icmp6_dst_gc_list;
1064 icmp6_dst_gc_list = &rt->dst;
1065 spin_unlock_bh(&icmp6_dst_lock);
1066
1067 fib6_force_start_gc(net);
1068
1069 out:
1070 return &rt->dst;
1071 }
1072
1073 int icmp6_dst_gc(void)
1074 {
1075 struct dst_entry *dst, **pprev;
1076 int more = 0;
1077
1078 spin_lock_bh(&icmp6_dst_lock);
1079 pprev = &icmp6_dst_gc_list;
1080
1081 while ((dst = *pprev) != NULL) {
1082 if (!atomic_read(&dst->__refcnt)) {
1083 *pprev = dst->next;
1084 dst_free(dst);
1085 } else {
1086 pprev = &dst->next;
1087 ++more;
1088 }
1089 }
1090
1091 spin_unlock_bh(&icmp6_dst_lock);
1092
1093 return more;
1094 }
1095
1096 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1097 void *arg)
1098 {
1099 struct dst_entry *dst, **pprev;
1100
1101 spin_lock_bh(&icmp6_dst_lock);
1102 pprev = &icmp6_dst_gc_list;
1103 while ((dst = *pprev) != NULL) {
1104 struct rt6_info *rt = (struct rt6_info *) dst;
1105 if (func(rt, arg)) {
1106 *pprev = dst->next;
1107 dst_free(dst);
1108 } else {
1109 pprev = &dst->next;
1110 }
1111 }
1112 spin_unlock_bh(&icmp6_dst_lock);
1113 }
1114
1115 static int ip6_dst_gc(struct dst_ops *ops)
1116 {
1117 unsigned long now = jiffies;
1118 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1119 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1120 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1121 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1122 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1123 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1124 int entries;
1125
1126 entries = dst_entries_get_fast(ops);
1127 if (time_after(rt_last_gc + rt_min_interval, now) &&
1128 entries <= rt_max_size)
1129 goto out;
1130
1131 net->ipv6.ip6_rt_gc_expire++;
1132 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1133 net->ipv6.ip6_rt_last_gc = now;
1134 entries = dst_entries_get_slow(ops);
1135 if (entries < ops->gc_thresh)
1136 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1137 out:
1138 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1139 return entries > rt_max_size;
1140 }
1141
1142 /* Clean host part of a prefix. Not necessary in radix tree,
1143 but results in cleaner routing tables.
1144
1145 Remove it only when all the things will work!
1146 */
1147
1148 int ip6_dst_hoplimit(struct dst_entry *dst)
1149 {
1150 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1151 if (hoplimit == 0) {
1152 struct net_device *dev = dst->dev;
1153 struct inet6_dev *idev;
1154
1155 rcu_read_lock();
1156 idev = __in6_dev_get(dev);
1157 if (idev)
1158 hoplimit = idev->cnf.hop_limit;
1159 else
1160 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1161 rcu_read_unlock();
1162 }
1163 return hoplimit;
1164 }
1165 EXPORT_SYMBOL(ip6_dst_hoplimit);
1166
1167 /*
1168 *
1169 */
1170
1171 int ip6_route_add(struct fib6_config *cfg)
1172 {
1173 int err;
1174 struct net *net = cfg->fc_nlinfo.nl_net;
1175 struct rt6_info *rt = NULL;
1176 struct net_device *dev = NULL;
1177 struct inet6_dev *idev = NULL;
1178 struct fib6_table *table;
1179 int addr_type;
1180
1181 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1182 return -EINVAL;
1183 #ifndef CONFIG_IPV6_SUBTREES
1184 if (cfg->fc_src_len)
1185 return -EINVAL;
1186 #endif
1187 if (cfg->fc_ifindex) {
1188 err = -ENODEV;
1189 dev = dev_get_by_index(net, cfg->fc_ifindex);
1190 if (!dev)
1191 goto out;
1192 idev = in6_dev_get(dev);
1193 if (!idev)
1194 goto out;
1195 }
1196
1197 if (cfg->fc_metric == 0)
1198 cfg->fc_metric = IP6_RT_PRIO_USER;
1199
1200 table = fib6_new_table(net, cfg->fc_table);
1201 if (table == NULL) {
1202 err = -ENOBUFS;
1203 goto out;
1204 }
1205
1206 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1207
1208 if (rt == NULL) {
1209 err = -ENOMEM;
1210 goto out;
1211 }
1212
1213 rt->dst.obsolete = -1;
1214 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
1215 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1216 0;
1217
1218 if (cfg->fc_protocol == RTPROT_UNSPEC)
1219 cfg->fc_protocol = RTPROT_BOOT;
1220 rt->rt6i_protocol = cfg->fc_protocol;
1221
1222 addr_type = ipv6_addr_type(&cfg->fc_dst);
1223
1224 if (addr_type & IPV6_ADDR_MULTICAST)
1225 rt->dst.input = ip6_mc_input;
1226 else if (cfg->fc_flags & RTF_LOCAL)
1227 rt->dst.input = ip6_input;
1228 else
1229 rt->dst.input = ip6_forward;
1230
1231 rt->dst.output = ip6_output;
1232
1233 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1234 rt->rt6i_dst.plen = cfg->fc_dst_len;
1235 if (rt->rt6i_dst.plen == 128)
1236 rt->dst.flags = DST_HOST;
1237
1238 #ifdef CONFIG_IPV6_SUBTREES
1239 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1240 rt->rt6i_src.plen = cfg->fc_src_len;
1241 #endif
1242
1243 rt->rt6i_metric = cfg->fc_metric;
1244
1245 /* We cannot add true routes via loopback here,
1246 they would result in kernel looping; promote them to reject routes
1247 */
1248 if ((cfg->fc_flags & RTF_REJECT) ||
1249 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
1250 && !(cfg->fc_flags&RTF_LOCAL))) {
1251 /* hold loopback dev/idev if we haven't done so. */
1252 if (dev != net->loopback_dev) {
1253 if (dev) {
1254 dev_put(dev);
1255 in6_dev_put(idev);
1256 }
1257 dev = net->loopback_dev;
1258 dev_hold(dev);
1259 idev = in6_dev_get(dev);
1260 if (!idev) {
1261 err = -ENODEV;
1262 goto out;
1263 }
1264 }
1265 rt->dst.output = ip6_pkt_discard_out;
1266 rt->dst.input = ip6_pkt_discard;
1267 rt->dst.error = -ENETUNREACH;
1268 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1269 goto install_route;
1270 }
1271
1272 if (cfg->fc_flags & RTF_GATEWAY) {
1273 struct in6_addr *gw_addr;
1274 int gwa_type;
1275
1276 gw_addr = &cfg->fc_gateway;
1277 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1278 gwa_type = ipv6_addr_type(gw_addr);
1279
1280 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1281 struct rt6_info *grt;
1282
1283 /* IPv6 strictly inhibits using not link-local
1284 addresses as nexthop address.
1285 Otherwise, router will not able to send redirects.
1286 It is very good, but in some (rare!) circumstances
1287 (SIT, PtP, NBMA NOARP links) it is handy to allow
1288 some exceptions. --ANK
1289 */
1290 err = -EINVAL;
1291 if (!(gwa_type&IPV6_ADDR_UNICAST))
1292 goto out;
1293
1294 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1295
1296 err = -EHOSTUNREACH;
1297 if (grt == NULL)
1298 goto out;
1299 if (dev) {
1300 if (dev != grt->rt6i_dev) {
1301 dst_release(&grt->dst);
1302 goto out;
1303 }
1304 } else {
1305 dev = grt->rt6i_dev;
1306 idev = grt->rt6i_idev;
1307 dev_hold(dev);
1308 in6_dev_hold(grt->rt6i_idev);
1309 }
1310 if (!(grt->rt6i_flags&RTF_GATEWAY))
1311 err = 0;
1312 dst_release(&grt->dst);
1313
1314 if (err)
1315 goto out;
1316 }
1317 err = -EINVAL;
1318 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1319 goto out;
1320 }
1321
1322 err = -ENODEV;
1323 if (dev == NULL)
1324 goto out;
1325
1326 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1327 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1328 if (IS_ERR(rt->rt6i_nexthop)) {
1329 err = PTR_ERR(rt->rt6i_nexthop);
1330 rt->rt6i_nexthop = NULL;
1331 goto out;
1332 }
1333 }
1334
1335 rt->rt6i_flags = cfg->fc_flags;
1336
1337 install_route:
1338 if (cfg->fc_mx) {
1339 struct nlattr *nla;
1340 int remaining;
1341
1342 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1343 int type = nla_type(nla);
1344
1345 if (type) {
1346 if (type > RTAX_MAX) {
1347 err = -EINVAL;
1348 goto out;
1349 }
1350
1351 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1352 }
1353 }
1354 }
1355
1356 rt->dst.dev = dev;
1357 rt->rt6i_idev = idev;
1358 rt->rt6i_table = table;
1359
1360 cfg->fc_nlinfo.nl_net = dev_net(dev);
1361
1362 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1363
1364 out:
1365 if (dev)
1366 dev_put(dev);
1367 if (idev)
1368 in6_dev_put(idev);
1369 if (rt)
1370 dst_free(&rt->dst);
1371 return err;
1372 }
1373
1374 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1375 {
1376 int err;
1377 struct fib6_table *table;
1378 struct net *net = dev_net(rt->rt6i_dev);
1379
1380 if (rt == net->ipv6.ip6_null_entry)
1381 return -ENOENT;
1382
1383 table = rt->rt6i_table;
1384 write_lock_bh(&table->tb6_lock);
1385
1386 err = fib6_del(rt, info);
1387 dst_release(&rt->dst);
1388
1389 write_unlock_bh(&table->tb6_lock);
1390
1391 return err;
1392 }
1393
1394 int ip6_del_rt(struct rt6_info *rt)
1395 {
1396 struct nl_info info = {
1397 .nl_net = dev_net(rt->rt6i_dev),
1398 };
1399 return __ip6_del_rt(rt, &info);
1400 }
1401
1402 static int ip6_route_del(struct fib6_config *cfg)
1403 {
1404 struct fib6_table *table;
1405 struct fib6_node *fn;
1406 struct rt6_info *rt;
1407 int err = -ESRCH;
1408
1409 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1410 if (table == NULL)
1411 return err;
1412
1413 read_lock_bh(&table->tb6_lock);
1414
1415 fn = fib6_locate(&table->tb6_root,
1416 &cfg->fc_dst, cfg->fc_dst_len,
1417 &cfg->fc_src, cfg->fc_src_len);
1418
1419 if (fn) {
1420 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1421 if (cfg->fc_ifindex &&
1422 (rt->rt6i_dev == NULL ||
1423 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1424 continue;
1425 if (cfg->fc_flags & RTF_GATEWAY &&
1426 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1427 continue;
1428 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1429 continue;
1430 dst_hold(&rt->dst);
1431 read_unlock_bh(&table->tb6_lock);
1432
1433 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1434 }
1435 }
1436 read_unlock_bh(&table->tb6_lock);
1437
1438 return err;
1439 }
1440
1441 /*
1442 * Handle redirects
1443 */
1444 struct ip6rd_flowi {
1445 struct flowi fl;
1446 struct in6_addr gateway;
1447 };
1448
1449 static struct rt6_info *__ip6_route_redirect(struct net *net,
1450 struct fib6_table *table,
1451 struct flowi *fl,
1452 int flags)
1453 {
1454 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1455 struct rt6_info *rt;
1456 struct fib6_node *fn;
1457
1458 /*
1459 * Get the "current" route for this destination and
1460 * check if the redirect has come from approriate router.
1461 *
1462 * RFC 2461 specifies that redirects should only be
1463 * accepted if they come from the nexthop to the target.
1464 * Due to the way the routes are chosen, this notion
1465 * is a bit fuzzy and one might need to check all possible
1466 * routes.
1467 */
1468
1469 read_lock_bh(&table->tb6_lock);
1470 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1471 restart:
1472 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1473 /*
1474 * Current route is on-link; redirect is always invalid.
1475 *
1476 * Seems, previous statement is not true. It could
1477 * be node, which looks for us as on-link (f.e. proxy ndisc)
1478 * But then router serving it might decide, that we should
1479 * know truth 8)8) --ANK (980726).
1480 */
1481 if (rt6_check_expired(rt))
1482 continue;
1483 if (!(rt->rt6i_flags & RTF_GATEWAY))
1484 continue;
1485 if (fl->oif != rt->rt6i_dev->ifindex)
1486 continue;
1487 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1488 continue;
1489 break;
1490 }
1491
1492 if (!rt)
1493 rt = net->ipv6.ip6_null_entry;
1494 BACKTRACK(net, &fl->fl6_src);
1495 out:
1496 dst_hold(&rt->dst);
1497
1498 read_unlock_bh(&table->tb6_lock);
1499
1500 return rt;
1501 };
1502
1503 static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1504 struct in6_addr *src,
1505 struct in6_addr *gateway,
1506 struct net_device *dev)
1507 {
1508 int flags = RT6_LOOKUP_F_HAS_SADDR;
1509 struct net *net = dev_net(dev);
1510 struct ip6rd_flowi rdfl = {
1511 .fl = {
1512 .oif = dev->ifindex,
1513 .fl6_dst = *dest,
1514 .fl6_src = *src,
1515 },
1516 };
1517
1518 ipv6_addr_copy(&rdfl.gateway, gateway);
1519
1520 if (rt6_need_strict(dest))
1521 flags |= RT6_LOOKUP_F_IFACE;
1522
1523 return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
1524 flags, __ip6_route_redirect);
1525 }
1526
1527 void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1528 struct in6_addr *saddr,
1529 struct neighbour *neigh, u8 *lladdr, int on_link)
1530 {
1531 struct rt6_info *rt, *nrt = NULL;
1532 struct netevent_redirect netevent;
1533 struct net *net = dev_net(neigh->dev);
1534
1535 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1536
1537 if (rt == net->ipv6.ip6_null_entry) {
1538 if (net_ratelimit())
1539 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1540 "for redirect target\n");
1541 goto out;
1542 }
1543
1544 /*
1545 * We have finally decided to accept it.
1546 */
1547
1548 neigh_update(neigh, lladdr, NUD_STALE,
1549 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1550 NEIGH_UPDATE_F_OVERRIDE|
1551 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1552 NEIGH_UPDATE_F_ISROUTER))
1553 );
1554
1555 /*
1556 * Redirect received -> path was valid.
1557 * Look, redirects are sent only in response to data packets,
1558 * so that this nexthop apparently is reachable. --ANK
1559 */
1560 dst_confirm(&rt->dst);
1561
1562 /* Duplicate redirect: silently ignore. */
1563 if (neigh == rt->dst.neighbour)
1564 goto out;
1565
1566 nrt = ip6_rt_copy(rt);
1567 if (nrt == NULL)
1568 goto out;
1569
1570 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1571 if (on_link)
1572 nrt->rt6i_flags &= ~RTF_GATEWAY;
1573
1574 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1575 nrt->rt6i_dst.plen = 128;
1576 nrt->dst.flags |= DST_HOST;
1577
1578 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1579 nrt->rt6i_nexthop = neigh_clone(neigh);
1580
1581 if (ip6_ins_rt(nrt))
1582 goto out;
1583
1584 netevent.old = &rt->dst;
1585 netevent.new = &nrt->dst;
1586 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1587
1588 if (rt->rt6i_flags&RTF_CACHE) {
1589 ip6_del_rt(rt);
1590 return;
1591 }
1592
1593 out:
1594 dst_release(&rt->dst);
1595 }
1596
1597 /*
1598 * Handle ICMP "packet too big" messages
1599 * i.e. Path MTU discovery
1600 */
1601
1602 static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
1603 struct net *net, u32 pmtu, int ifindex)
1604 {
1605 struct rt6_info *rt, *nrt;
1606 int allfrag = 0;
1607 again:
1608 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1609 if (rt == NULL)
1610 return;
1611
1612 if (rt6_check_expired(rt)) {
1613 ip6_del_rt(rt);
1614 goto again;
1615 }
1616
1617 if (pmtu >= dst_mtu(&rt->dst))
1618 goto out;
1619
1620 if (pmtu < IPV6_MIN_MTU) {
1621 /*
1622 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1623 * MTU (1280) and a fragment header should always be included
1624 * after a node receiving Too Big message reporting PMTU is
1625 * less than the IPv6 Minimum Link MTU.
1626 */
1627 pmtu = IPV6_MIN_MTU;
1628 allfrag = 1;
1629 }
1630
1631 /* New mtu received -> path was valid.
1632 They are sent only in response to data packets,
1633 so that this nexthop apparently is reachable. --ANK
1634 */
1635 dst_confirm(&rt->dst);
1636
1637 /* Host route. If it is static, it would be better
1638 not to override it, but add new one, so that
1639 when cache entry will expire old pmtu
1640 would return automatically.
1641 */
1642 if (rt->rt6i_flags & RTF_CACHE) {
1643 dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
1644 if (allfrag) {
1645 u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
1646 features |= RTAX_FEATURE_ALLFRAG;
1647 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1648 }
1649 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1650 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1651 goto out;
1652 }
1653
1654 /* Network route.
1655 Two cases are possible:
1656 1. It is connected route. Action: COW
1657 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1658 */
1659 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1660 nrt = rt6_alloc_cow(rt, daddr, saddr);
1661 else
1662 nrt = rt6_alloc_clone(rt, daddr);
1663
1664 if (nrt) {
1665 dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
1666 if (allfrag) {
1667 u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
1668 features |= RTAX_FEATURE_ALLFRAG;
1669 dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
1670 }
1671
1672 /* According to RFC 1981, detecting PMTU increase shouldn't be
1673 * happened within 5 mins, the recommended timer is 10 mins.
1674 * Here this route expiration time is set to ip6_rt_mtu_expires
1675 * which is 10 mins. After 10 mins the decreased pmtu is expired
1676 * and detecting PMTU increase will be automatically happened.
1677 */
1678 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1679 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1680
1681 ip6_ins_rt(nrt);
1682 }
1683 out:
1684 dst_release(&rt->dst);
1685 }
1686
1687 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1688 struct net_device *dev, u32 pmtu)
1689 {
1690 struct net *net = dev_net(dev);
1691
1692 /*
1693 * RFC 1981 states that a node "MUST reduce the size of the packets it
1694 * is sending along the path" that caused the Packet Too Big message.
1695 * Since it's not possible in the general case to determine which
1696 * interface was used to send the original packet, we update the MTU
1697 * on the interface that will be used to send future packets. We also
1698 * update the MTU on the interface that received the Packet Too Big in
1699 * case the original packet was forced out that interface with
1700 * SO_BINDTODEVICE or similar. This is the next best thing to the
1701 * correct behaviour, which would be to update the MTU on all
1702 * interfaces.
1703 */
1704 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1705 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1706 }
1707
1708 /*
1709 * Misc support functions
1710 */
1711
1712 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1713 {
1714 struct net *net = dev_net(ort->rt6i_dev);
1715 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1716
1717 if (rt) {
1718 rt->dst.input = ort->dst.input;
1719 rt->dst.output = ort->dst.output;
1720
1721 dst_copy_metrics(&rt->dst, &ort->dst);
1722 rt->dst.error = ort->dst.error;
1723 rt->dst.dev = ort->dst.dev;
1724 if (rt->dst.dev)
1725 dev_hold(rt->dst.dev);
1726 rt->rt6i_idev = ort->rt6i_idev;
1727 if (rt->rt6i_idev)
1728 in6_dev_hold(rt->rt6i_idev);
1729 rt->dst.lastuse = jiffies;
1730 rt->rt6i_expires = 0;
1731
1732 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1733 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1734 rt->rt6i_metric = 0;
1735
1736 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1737 #ifdef CONFIG_IPV6_SUBTREES
1738 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1739 #endif
1740 rt->rt6i_table = ort->rt6i_table;
1741 }
1742 return rt;
1743 }
1744
1745 #ifdef CONFIG_IPV6_ROUTE_INFO
1746 static struct rt6_info *rt6_get_route_info(struct net *net,
1747 struct in6_addr *prefix, int prefixlen,
1748 struct in6_addr *gwaddr, int ifindex)
1749 {
1750 struct fib6_node *fn;
1751 struct rt6_info *rt = NULL;
1752 struct fib6_table *table;
1753
1754 table = fib6_get_table(net, RT6_TABLE_INFO);
1755 if (table == NULL)
1756 return NULL;
1757
1758 write_lock_bh(&table->tb6_lock);
1759 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1760 if (!fn)
1761 goto out;
1762
1763 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1764 if (rt->rt6i_dev->ifindex != ifindex)
1765 continue;
1766 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1767 continue;
1768 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1769 continue;
1770 dst_hold(&rt->dst);
1771 break;
1772 }
1773 out:
1774 write_unlock_bh(&table->tb6_lock);
1775 return rt;
1776 }
1777
1778 static struct rt6_info *rt6_add_route_info(struct net *net,
1779 struct in6_addr *prefix, int prefixlen,
1780 struct in6_addr *gwaddr, int ifindex,
1781 unsigned pref)
1782 {
1783 struct fib6_config cfg = {
1784 .fc_table = RT6_TABLE_INFO,
1785 .fc_metric = IP6_RT_PRIO_USER,
1786 .fc_ifindex = ifindex,
1787 .fc_dst_len = prefixlen,
1788 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1789 RTF_UP | RTF_PREF(pref),
1790 .fc_nlinfo.pid = 0,
1791 .fc_nlinfo.nlh = NULL,
1792 .fc_nlinfo.nl_net = net,
1793 };
1794
1795 ipv6_addr_copy(&cfg.fc_dst, prefix);
1796 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1797
1798 /* We should treat it as a default route if prefix length is 0. */
1799 if (!prefixlen)
1800 cfg.fc_flags |= RTF_DEFAULT;
1801
1802 ip6_route_add(&cfg);
1803
1804 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1805 }
1806 #endif
1807
1808 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1809 {
1810 struct rt6_info *rt;
1811 struct fib6_table *table;
1812
1813 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1814 if (table == NULL)
1815 return NULL;
1816
1817 write_lock_bh(&table->tb6_lock);
1818 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1819 if (dev == rt->rt6i_dev &&
1820 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1821 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1822 break;
1823 }
1824 if (rt)
1825 dst_hold(&rt->dst);
1826 write_unlock_bh(&table->tb6_lock);
1827 return rt;
1828 }
1829
1830 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1831 struct net_device *dev,
1832 unsigned int pref)
1833 {
1834 struct fib6_config cfg = {
1835 .fc_table = RT6_TABLE_DFLT,
1836 .fc_metric = IP6_RT_PRIO_USER,
1837 .fc_ifindex = dev->ifindex,
1838 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1839 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1840 .fc_nlinfo.pid = 0,
1841 .fc_nlinfo.nlh = NULL,
1842 .fc_nlinfo.nl_net = dev_net(dev),
1843 };
1844
1845 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1846
1847 ip6_route_add(&cfg);
1848
1849 return rt6_get_dflt_router(gwaddr, dev);
1850 }
1851
1852 void rt6_purge_dflt_routers(struct net *net)
1853 {
1854 struct rt6_info *rt;
1855 struct fib6_table *table;
1856
1857 /* NOTE: Keep consistent with rt6_get_dflt_router */
1858 table = fib6_get_table(net, RT6_TABLE_DFLT);
1859 if (table == NULL)
1860 return;
1861
1862 restart:
1863 read_lock_bh(&table->tb6_lock);
1864 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1865 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1866 dst_hold(&rt->dst);
1867 read_unlock_bh(&table->tb6_lock);
1868 ip6_del_rt(rt);
1869 goto restart;
1870 }
1871 }
1872 read_unlock_bh(&table->tb6_lock);
1873 }
1874
1875 static void rtmsg_to_fib6_config(struct net *net,
1876 struct in6_rtmsg *rtmsg,
1877 struct fib6_config *cfg)
1878 {
1879 memset(cfg, 0, sizeof(*cfg));
1880
1881 cfg->fc_table = RT6_TABLE_MAIN;
1882 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1883 cfg->fc_metric = rtmsg->rtmsg_metric;
1884 cfg->fc_expires = rtmsg->rtmsg_info;
1885 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1886 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1887 cfg->fc_flags = rtmsg->rtmsg_flags;
1888
1889 cfg->fc_nlinfo.nl_net = net;
1890
1891 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1892 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1893 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1894 }
1895
1896 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1897 {
1898 struct fib6_config cfg;
1899 struct in6_rtmsg rtmsg;
1900 int err;
1901
1902 switch(cmd) {
1903 case SIOCADDRT: /* Add a route */
1904 case SIOCDELRT: /* Delete a route */
1905 if (!capable(CAP_NET_ADMIN))
1906 return -EPERM;
1907 err = copy_from_user(&rtmsg, arg,
1908 sizeof(struct in6_rtmsg));
1909 if (err)
1910 return -EFAULT;
1911
1912 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1913
1914 rtnl_lock();
1915 switch (cmd) {
1916 case SIOCADDRT:
1917 err = ip6_route_add(&cfg);
1918 break;
1919 case SIOCDELRT:
1920 err = ip6_route_del(&cfg);
1921 break;
1922 default:
1923 err = -EINVAL;
1924 }
1925 rtnl_unlock();
1926
1927 return err;
1928 }
1929
1930 return -EINVAL;
1931 }
1932
1933 /*
1934 * Drop the packet on the floor
1935 */
1936
1937 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1938 {
1939 int type;
1940 struct dst_entry *dst = skb_dst(skb);
1941 switch (ipstats_mib_noroutes) {
1942 case IPSTATS_MIB_INNOROUTES:
1943 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1944 if (type == IPV6_ADDR_ANY) {
1945 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1946 IPSTATS_MIB_INADDRERRORS);
1947 break;
1948 }
1949 /* FALLTHROUGH */
1950 case IPSTATS_MIB_OUTNOROUTES:
1951 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1952 ipstats_mib_noroutes);
1953 break;
1954 }
1955 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
1956 kfree_skb(skb);
1957 return 0;
1958 }
1959
1960 static int ip6_pkt_discard(struct sk_buff *skb)
1961 {
1962 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1963 }
1964
1965 static int ip6_pkt_discard_out(struct sk_buff *skb)
1966 {
1967 skb->dev = skb_dst(skb)->dev;
1968 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1969 }
1970
1971 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1972
1973 static int ip6_pkt_prohibit(struct sk_buff *skb)
1974 {
1975 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1976 }
1977
1978 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1979 {
1980 skb->dev = skb_dst(skb)->dev;
1981 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1982 }
1983
1984 #endif
1985
1986 /*
1987 * Allocate a dst for local (unicast / anycast) address.
1988 */
1989
1990 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1991 const struct in6_addr *addr,
1992 int anycast)
1993 {
1994 struct net *net = dev_net(idev->dev);
1995 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1996 struct neighbour *neigh;
1997
1998 if (rt == NULL) {
1999 if (net_ratelimit())
2000 pr_warning("IPv6: Maximum number of routes reached,"
2001 " consider increasing route/max_size.\n");
2002 return ERR_PTR(-ENOMEM);
2003 }
2004
2005 dev_hold(net->loopback_dev);
2006 in6_dev_hold(idev);
2007
2008 rt->dst.flags = DST_HOST;
2009 rt->dst.input = ip6_input;
2010 rt->dst.output = ip6_output;
2011 rt->rt6i_dev = net->loopback_dev;
2012 rt->rt6i_idev = idev;
2013 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
2014 rt->dst.obsolete = -1;
2015
2016 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2017 if (anycast)
2018 rt->rt6i_flags |= RTF_ANYCAST;
2019 else
2020 rt->rt6i_flags |= RTF_LOCAL;
2021 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
2022 if (IS_ERR(neigh)) {
2023 dst_free(&rt->dst);
2024
2025 return ERR_CAST(neigh);
2026 }
2027 rt->rt6i_nexthop = neigh;
2028
2029 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
2030 rt->rt6i_dst.plen = 128;
2031 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2032
2033 atomic_set(&rt->dst.__refcnt, 1);
2034
2035 return rt;
2036 }
2037
2038 struct arg_dev_net {
2039 struct net_device *dev;
2040 struct net *net;
2041 };
2042
2043 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2044 {
2045 const struct arg_dev_net *adn = arg;
2046 const struct net_device *dev = adn->dev;
2047
2048 if ((rt->rt6i_dev == dev || dev == NULL) &&
2049 rt != adn->net->ipv6.ip6_null_entry) {
2050 RT6_TRACE("deleted by ifdown %p\n", rt);
2051 return -1;
2052 }
2053 return 0;
2054 }
2055
2056 void rt6_ifdown(struct net *net, struct net_device *dev)
2057 {
2058 struct arg_dev_net adn = {
2059 .dev = dev,
2060 .net = net,
2061 };
2062
2063 fib6_clean_all(net, fib6_ifdown, 0, &adn);
2064 icmp6_clean_all(fib6_ifdown, &adn);
2065 }
2066
2067 struct rt6_mtu_change_arg
2068 {
2069 struct net_device *dev;
2070 unsigned mtu;
2071 };
2072
2073 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2074 {
2075 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2076 struct inet6_dev *idev;
2077
2078 /* In IPv6 pmtu discovery is not optional,
2079 so that RTAX_MTU lock cannot disable it.
2080 We still use this lock to block changes
2081 caused by addrconf/ndisc.
2082 */
2083
2084 idev = __in6_dev_get(arg->dev);
2085 if (idev == NULL)
2086 return 0;
2087
2088 /* For administrative MTU increase, there is no way to discover
2089 IPv6 PMTU increase, so PMTU increase should be updated here.
2090 Since RFC 1981 doesn't include administrative MTU increase
2091 update PMTU increase is a MUST. (i.e. jumbo frame)
2092 */
2093 /*
2094 If new MTU is less than route PMTU, this new MTU will be the
2095 lowest MTU in the path, update the route PMTU to reflect PMTU
2096 decreases; if new MTU is greater than route PMTU, and the
2097 old MTU is the lowest MTU in the path, update the route PMTU
2098 to reflect the increase. In this case if the other nodes' MTU
2099 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2100 PMTU discouvery.
2101 */
2102 if (rt->rt6i_dev == arg->dev &&
2103 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2104 (dst_mtu(&rt->dst) >= arg->mtu ||
2105 (dst_mtu(&rt->dst) < arg->mtu &&
2106 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2107 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2108 }
2109 return 0;
2110 }
2111
2112 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
2113 {
2114 struct rt6_mtu_change_arg arg = {
2115 .dev = dev,
2116 .mtu = mtu,
2117 };
2118
2119 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2120 }
2121
2122 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2123 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2124 [RTA_OIF] = { .type = NLA_U32 },
2125 [RTA_IIF] = { .type = NLA_U32 },
2126 [RTA_PRIORITY] = { .type = NLA_U32 },
2127 [RTA_METRICS] = { .type = NLA_NESTED },
2128 };
2129
2130 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2131 struct fib6_config *cfg)
2132 {
2133 struct rtmsg *rtm;
2134 struct nlattr *tb[RTA_MAX+1];
2135 int err;
2136
2137 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2138 if (err < 0)
2139 goto errout;
2140
2141 err = -EINVAL;
2142 rtm = nlmsg_data(nlh);
2143 memset(cfg, 0, sizeof(*cfg));
2144
2145 cfg->fc_table = rtm->rtm_table;
2146 cfg->fc_dst_len = rtm->rtm_dst_len;
2147 cfg->fc_src_len = rtm->rtm_src_len;
2148 cfg->fc_flags = RTF_UP;
2149 cfg->fc_protocol = rtm->rtm_protocol;
2150
2151 if (rtm->rtm_type == RTN_UNREACHABLE)
2152 cfg->fc_flags |= RTF_REJECT;
2153
2154 if (rtm->rtm_type == RTN_LOCAL)
2155 cfg->fc_flags |= RTF_LOCAL;
2156
2157 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2158 cfg->fc_nlinfo.nlh = nlh;
2159 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2160
2161 if (tb[RTA_GATEWAY]) {
2162 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2163 cfg->fc_flags |= RTF_GATEWAY;
2164 }
2165
2166 if (tb[RTA_DST]) {
2167 int plen = (rtm->rtm_dst_len + 7) >> 3;
2168
2169 if (nla_len(tb[RTA_DST]) < plen)
2170 goto errout;
2171
2172 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2173 }
2174
2175 if (tb[RTA_SRC]) {
2176 int plen = (rtm->rtm_src_len + 7) >> 3;
2177
2178 if (nla_len(tb[RTA_SRC]) < plen)
2179 goto errout;
2180
2181 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2182 }
2183
2184 if (tb[RTA_OIF])
2185 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2186
2187 if (tb[RTA_PRIORITY])
2188 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2189
2190 if (tb[RTA_METRICS]) {
2191 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2192 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2193 }
2194
2195 if (tb[RTA_TABLE])
2196 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2197
2198 err = 0;
2199 errout:
2200 return err;
2201 }
2202
2203 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2204 {
2205 struct fib6_config cfg;
2206 int err;
2207
2208 err = rtm_to_fib6_config(skb, nlh, &cfg);
2209 if (err < 0)
2210 return err;
2211
2212 return ip6_route_del(&cfg);
2213 }
2214
2215 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2216 {
2217 struct fib6_config cfg;
2218 int err;
2219
2220 err = rtm_to_fib6_config(skb, nlh, &cfg);
2221 if (err < 0)
2222 return err;
2223
2224 return ip6_route_add(&cfg);
2225 }
2226
2227 static inline size_t rt6_nlmsg_size(void)
2228 {
2229 return NLMSG_ALIGN(sizeof(struct rtmsg))
2230 + nla_total_size(16) /* RTA_SRC */
2231 + nla_total_size(16) /* RTA_DST */
2232 + nla_total_size(16) /* RTA_GATEWAY */
2233 + nla_total_size(16) /* RTA_PREFSRC */
2234 + nla_total_size(4) /* RTA_TABLE */
2235 + nla_total_size(4) /* RTA_IIF */
2236 + nla_total_size(4) /* RTA_OIF */
2237 + nla_total_size(4) /* RTA_PRIORITY */
2238 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2239 + nla_total_size(sizeof(struct rta_cacheinfo));
2240 }
2241
2242 static int rt6_fill_node(struct net *net,
2243 struct sk_buff *skb, struct rt6_info *rt,
2244 struct in6_addr *dst, struct in6_addr *src,
2245 int iif, int type, u32 pid, u32 seq,
2246 int prefix, int nowait, unsigned int flags)
2247 {
2248 struct rtmsg *rtm;
2249 struct nlmsghdr *nlh;
2250 long expires;
2251 u32 table;
2252
2253 if (prefix) { /* user wants prefix routes only */
2254 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2255 /* success since this is not a prefix route */
2256 return 1;
2257 }
2258 }
2259
2260 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2261 if (nlh == NULL)
2262 return -EMSGSIZE;
2263
2264 rtm = nlmsg_data(nlh);
2265 rtm->rtm_family = AF_INET6;
2266 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2267 rtm->rtm_src_len = rt->rt6i_src.plen;
2268 rtm->rtm_tos = 0;
2269 if (rt->rt6i_table)
2270 table = rt->rt6i_table->tb6_id;
2271 else
2272 table = RT6_TABLE_UNSPEC;
2273 rtm->rtm_table = table;
2274 NLA_PUT_U32(skb, RTA_TABLE, table);
2275 if (rt->rt6i_flags&RTF_REJECT)
2276 rtm->rtm_type = RTN_UNREACHABLE;
2277 else if (rt->rt6i_flags&RTF_LOCAL)
2278 rtm->rtm_type = RTN_LOCAL;
2279 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2280 rtm->rtm_type = RTN_LOCAL;
2281 else
2282 rtm->rtm_type = RTN_UNICAST;
2283 rtm->rtm_flags = 0;
2284 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2285 rtm->rtm_protocol = rt->rt6i_protocol;
2286 if (rt->rt6i_flags&RTF_DYNAMIC)
2287 rtm->rtm_protocol = RTPROT_REDIRECT;
2288 else if (rt->rt6i_flags & RTF_ADDRCONF)
2289 rtm->rtm_protocol = RTPROT_KERNEL;
2290 else if (rt->rt6i_flags&RTF_DEFAULT)
2291 rtm->rtm_protocol = RTPROT_RA;
2292
2293 if (rt->rt6i_flags&RTF_CACHE)
2294 rtm->rtm_flags |= RTM_F_CLONED;
2295
2296 if (dst) {
2297 NLA_PUT(skb, RTA_DST, 16, dst);
2298 rtm->rtm_dst_len = 128;
2299 } else if (rtm->rtm_dst_len)
2300 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2301 #ifdef CONFIG_IPV6_SUBTREES
2302 if (src) {
2303 NLA_PUT(skb, RTA_SRC, 16, src);
2304 rtm->rtm_src_len = 128;
2305 } else if (rtm->rtm_src_len)
2306 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2307 #endif
2308 if (iif) {
2309 #ifdef CONFIG_IPV6_MROUTE
2310 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2311 int err = ip6mr_get_route(net, skb, rtm, nowait);
2312 if (err <= 0) {
2313 if (!nowait) {
2314 if (err == 0)
2315 return 0;
2316 goto nla_put_failure;
2317 } else {
2318 if (err == -EMSGSIZE)
2319 goto nla_put_failure;
2320 }
2321 }
2322 } else
2323 #endif
2324 NLA_PUT_U32(skb, RTA_IIF, iif);
2325 } else if (dst) {
2326 struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
2327 struct in6_addr saddr_buf;
2328 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2329 dst, 0, &saddr_buf) == 0)
2330 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2331 }
2332
2333 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2334 goto nla_put_failure;
2335
2336 if (rt->dst.neighbour)
2337 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key);
2338
2339 if (rt->dst.dev)
2340 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2341
2342 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2343
2344 if (!(rt->rt6i_flags & RTF_EXPIRES))
2345 expires = 0;
2346 else if (rt->rt6i_expires - jiffies < INT_MAX)
2347 expires = rt->rt6i_expires - jiffies;
2348 else
2349 expires = INT_MAX;
2350
2351 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
2352 expires, rt->dst.error) < 0)
2353 goto nla_put_failure;
2354
2355 return nlmsg_end(skb, nlh);
2356
2357 nla_put_failure:
2358 nlmsg_cancel(skb, nlh);
2359 return -EMSGSIZE;
2360 }
2361
2362 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2363 {
2364 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2365 int prefix;
2366
2367 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2368 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2369 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2370 } else
2371 prefix = 0;
2372
2373 return rt6_fill_node(arg->net,
2374 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2375 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2376 prefix, 0, NLM_F_MULTI);
2377 }
2378
2379 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2380 {
2381 struct net *net = sock_net(in_skb->sk);
2382 struct nlattr *tb[RTA_MAX+1];
2383 struct rt6_info *rt;
2384 struct sk_buff *skb;
2385 struct rtmsg *rtm;
2386 struct flowi fl;
2387 int err, iif = 0;
2388
2389 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2390 if (err < 0)
2391 goto errout;
2392
2393 err = -EINVAL;
2394 memset(&fl, 0, sizeof(fl));
2395
2396 if (tb[RTA_SRC]) {
2397 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2398 goto errout;
2399
2400 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2401 }
2402
2403 if (tb[RTA_DST]) {
2404 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2405 goto errout;
2406
2407 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2408 }
2409
2410 if (tb[RTA_IIF])
2411 iif = nla_get_u32(tb[RTA_IIF]);
2412
2413 if (tb[RTA_OIF])
2414 fl.oif = nla_get_u32(tb[RTA_OIF]);
2415
2416 if (iif) {
2417 struct net_device *dev;
2418 dev = __dev_get_by_index(net, iif);
2419 if (!dev) {
2420 err = -ENODEV;
2421 goto errout;
2422 }
2423 }
2424
2425 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2426 if (skb == NULL) {
2427 err = -ENOBUFS;
2428 goto errout;
2429 }
2430
2431 /* Reserve room for dummy headers, this skb can pass
2432 through good chunk of routing engine.
2433 */
2434 skb_reset_mac_header(skb);
2435 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2436
2437 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2438 skb_dst_set(skb, &rt->dst);
2439
2440 err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2441 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2442 nlh->nlmsg_seq, 0, 0, 0);
2443 if (err < 0) {
2444 kfree_skb(skb);
2445 goto errout;
2446 }
2447
2448 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2449 errout:
2450 return err;
2451 }
2452
2453 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2454 {
2455 struct sk_buff *skb;
2456 struct net *net = info->nl_net;
2457 u32 seq;
2458 int err;
2459
2460 err = -ENOBUFS;
2461 seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
2462
2463 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2464 if (skb == NULL)
2465 goto errout;
2466
2467 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2468 event, info->pid, seq, 0, 0, 0);
2469 if (err < 0) {
2470 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2471 WARN_ON(err == -EMSGSIZE);
2472 kfree_skb(skb);
2473 goto errout;
2474 }
2475 rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2476 info->nlh, gfp_any());
2477 return;
2478 errout:
2479 if (err < 0)
2480 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2481 }
2482
2483 static int ip6_route_dev_notify(struct notifier_block *this,
2484 unsigned long event, void *data)
2485 {
2486 struct net_device *dev = (struct net_device *)data;
2487 struct net *net = dev_net(dev);
2488
2489 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2490 net->ipv6.ip6_null_entry->dst.dev = dev;
2491 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2492 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2493 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2494 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2495 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2496 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2497 #endif
2498 }
2499
2500 return NOTIFY_OK;
2501 }
2502
2503 /*
2504 * /proc
2505 */
2506
2507 #ifdef CONFIG_PROC_FS
2508
2509 struct rt6_proc_arg
2510 {
2511 char *buffer;
2512 int offset;
2513 int length;
2514 int skip;
2515 int len;
2516 };
2517
2518 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2519 {
2520 struct seq_file *m = p_arg;
2521
2522 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2523
2524 #ifdef CONFIG_IPV6_SUBTREES
2525 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2526 #else
2527 seq_puts(m, "00000000000000000000000000000000 00 ");
2528 #endif
2529
2530 if (rt->rt6i_nexthop) {
2531 seq_printf(m, "%pi6", rt->rt6i_nexthop->primary_key);
2532 } else {
2533 seq_puts(m, "00000000000000000000000000000000");
2534 }
2535 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2536 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2537 rt->dst.__use, rt->rt6i_flags,
2538 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2539 return 0;
2540 }
2541
2542 static int ipv6_route_show(struct seq_file *m, void *v)
2543 {
2544 struct net *net = (struct net *)m->private;
2545 fib6_clean_all(net, rt6_info_route, 0, m);
2546 return 0;
2547 }
2548
2549 static int ipv6_route_open(struct inode *inode, struct file *file)
2550 {
2551 return single_open_net(inode, file, ipv6_route_show);
2552 }
2553
2554 static const struct file_operations ipv6_route_proc_fops = {
2555 .owner = THIS_MODULE,
2556 .open = ipv6_route_open,
2557 .read = seq_read,
2558 .llseek = seq_lseek,
2559 .release = single_release_net,
2560 };
2561
2562 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2563 {
2564 struct net *net = (struct net *)seq->private;
2565 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2566 net->ipv6.rt6_stats->fib_nodes,
2567 net->ipv6.rt6_stats->fib_route_nodes,
2568 net->ipv6.rt6_stats->fib_rt_alloc,
2569 net->ipv6.rt6_stats->fib_rt_entries,
2570 net->ipv6.rt6_stats->fib_rt_cache,
2571 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2572 net->ipv6.rt6_stats->fib_discarded_routes);
2573
2574 return 0;
2575 }
2576
2577 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2578 {
2579 return single_open_net(inode, file, rt6_stats_seq_show);
2580 }
2581
2582 static const struct file_operations rt6_stats_seq_fops = {
2583 .owner = THIS_MODULE,
2584 .open = rt6_stats_seq_open,
2585 .read = seq_read,
2586 .llseek = seq_lseek,
2587 .release = single_release_net,
2588 };
2589 #endif /* CONFIG_PROC_FS */
2590
2591 #ifdef CONFIG_SYSCTL
2592
2593 static
2594 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2595 void __user *buffer, size_t *lenp, loff_t *ppos)
2596 {
2597 struct net *net;
2598 int delay;
2599 if (!write)
2600 return -EINVAL;
2601
2602 net = (struct net *)ctl->extra1;
2603 delay = net->ipv6.sysctl.flush_delay;
2604 proc_dointvec(ctl, write, buffer, lenp, ppos);
2605 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2606 return 0;
2607 }
2608
2609 ctl_table ipv6_route_table_template[] = {
2610 {
2611 .procname = "flush",
2612 .data = &init_net.ipv6.sysctl.flush_delay,
2613 .maxlen = sizeof(int),
2614 .mode = 0200,
2615 .proc_handler = ipv6_sysctl_rtcache_flush
2616 },
2617 {
2618 .procname = "gc_thresh",
2619 .data = &ip6_dst_ops_template.gc_thresh,
2620 .maxlen = sizeof(int),
2621 .mode = 0644,
2622 .proc_handler = proc_dointvec,
2623 },
2624 {
2625 .procname = "max_size",
2626 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2627 .maxlen = sizeof(int),
2628 .mode = 0644,
2629 .proc_handler = proc_dointvec,
2630 },
2631 {
2632 .procname = "gc_min_interval",
2633 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2634 .maxlen = sizeof(int),
2635 .mode = 0644,
2636 .proc_handler = proc_dointvec_jiffies,
2637 },
2638 {
2639 .procname = "gc_timeout",
2640 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2641 .maxlen = sizeof(int),
2642 .mode = 0644,
2643 .proc_handler = proc_dointvec_jiffies,
2644 },
2645 {
2646 .procname = "gc_interval",
2647 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2648 .maxlen = sizeof(int),
2649 .mode = 0644,
2650 .proc_handler = proc_dointvec_jiffies,
2651 },
2652 {
2653 .procname = "gc_elasticity",
2654 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2655 .maxlen = sizeof(int),
2656 .mode = 0644,
2657 .proc_handler = proc_dointvec,
2658 },
2659 {
2660 .procname = "mtu_expires",
2661 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2662 .maxlen = sizeof(int),
2663 .mode = 0644,
2664 .proc_handler = proc_dointvec_jiffies,
2665 },
2666 {
2667 .procname = "min_adv_mss",
2668 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2669 .maxlen = sizeof(int),
2670 .mode = 0644,
2671 .proc_handler = proc_dointvec,
2672 },
2673 {
2674 .procname = "gc_min_interval_ms",
2675 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2676 .maxlen = sizeof(int),
2677 .mode = 0644,
2678 .proc_handler = proc_dointvec_ms_jiffies,
2679 },
2680 { }
2681 };
2682
2683 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2684 {
2685 struct ctl_table *table;
2686
2687 table = kmemdup(ipv6_route_table_template,
2688 sizeof(ipv6_route_table_template),
2689 GFP_KERNEL);
2690
2691 if (table) {
2692 table[0].data = &net->ipv6.sysctl.flush_delay;
2693 table[0].extra1 = net;
2694 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2695 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2696 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2697 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2698 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2699 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2700 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2701 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2702 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2703 }
2704
2705 return table;
2706 }
2707 #endif
2708
2709 static int __net_init ip6_route_net_init(struct net *net)
2710 {
2711 int ret = -ENOMEM;
2712
2713 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2714 sizeof(net->ipv6.ip6_dst_ops));
2715
2716 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
2717 goto out_ip6_dst_ops;
2718
2719 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2720 sizeof(*net->ipv6.ip6_null_entry),
2721 GFP_KERNEL);
2722 if (!net->ipv6.ip6_null_entry)
2723 goto out_ip6_dst_entries;
2724 net->ipv6.ip6_null_entry->dst.path =
2725 (struct dst_entry *)net->ipv6.ip6_null_entry;
2726 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2727 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
2728 ip6_template_metrics, true);
2729
2730 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2731 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2732 sizeof(*net->ipv6.ip6_prohibit_entry),
2733 GFP_KERNEL);
2734 if (!net->ipv6.ip6_prohibit_entry)
2735 goto out_ip6_null_entry;
2736 net->ipv6.ip6_prohibit_entry->dst.path =
2737 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2738 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2739 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
2740 ip6_template_metrics, true);
2741
2742 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2743 sizeof(*net->ipv6.ip6_blk_hole_entry),
2744 GFP_KERNEL);
2745 if (!net->ipv6.ip6_blk_hole_entry)
2746 goto out_ip6_prohibit_entry;
2747 net->ipv6.ip6_blk_hole_entry->dst.path =
2748 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2749 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2750 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
2751 ip6_template_metrics, true);
2752 #endif
2753
2754 net->ipv6.sysctl.flush_delay = 0;
2755 net->ipv6.sysctl.ip6_rt_max_size = 4096;
2756 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
2757 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
2758 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
2759 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
2760 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2761 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2762
2763 #ifdef CONFIG_PROC_FS
2764 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2765 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2766 #endif
2767 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2768
2769 ret = 0;
2770 out:
2771 return ret;
2772
2773 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2774 out_ip6_prohibit_entry:
2775 kfree(net->ipv6.ip6_prohibit_entry);
2776 out_ip6_null_entry:
2777 kfree(net->ipv6.ip6_null_entry);
2778 #endif
2779 out_ip6_dst_entries:
2780 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2781 out_ip6_dst_ops:
2782 goto out;
2783 }
2784
2785 static void __net_exit ip6_route_net_exit(struct net *net)
2786 {
2787 #ifdef CONFIG_PROC_FS
2788 proc_net_remove(net, "ipv6_route");
2789 proc_net_remove(net, "rt6_stats");
2790 #endif
2791 kfree(net->ipv6.ip6_null_entry);
2792 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2793 kfree(net->ipv6.ip6_prohibit_entry);
2794 kfree(net->ipv6.ip6_blk_hole_entry);
2795 #endif
2796 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2797 }
2798
2799 static struct pernet_operations ip6_route_net_ops = {
2800 .init = ip6_route_net_init,
2801 .exit = ip6_route_net_exit,
2802 };
2803
2804 static struct notifier_block ip6_route_dev_notifier = {
2805 .notifier_call = ip6_route_dev_notify,
2806 .priority = 0,
2807 };
2808
2809 int __init ip6_route_init(void)
2810 {
2811 int ret;
2812
2813 ret = -ENOMEM;
2814 ip6_dst_ops_template.kmem_cachep =
2815 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2816 SLAB_HWCACHE_ALIGN, NULL);
2817 if (!ip6_dst_ops_template.kmem_cachep)
2818 goto out;
2819
2820 ret = dst_entries_init(&ip6_dst_blackhole_ops);
2821 if (ret)
2822 goto out_kmem_cache;
2823
2824 ret = register_pernet_subsys(&ip6_route_net_ops);
2825 if (ret)
2826 goto out_dst_entries;
2827
2828 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2829
2830 /* Registering of the loopback is done before this portion of code,
2831 * the loopback reference in rt6_info will not be taken, do it
2832 * manually for init_net */
2833 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
2834 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2835 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2836 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
2837 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2838 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
2839 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2840 #endif
2841 ret = fib6_init();
2842 if (ret)
2843 goto out_register_subsys;
2844
2845 ret = xfrm6_init();
2846 if (ret)
2847 goto out_fib6_init;
2848
2849 ret = fib6_rules_init();
2850 if (ret)
2851 goto xfrm6_init;
2852
2853 ret = -ENOBUFS;
2854 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
2855 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
2856 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2857 goto fib6_rules_init;
2858
2859 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2860 if (ret)
2861 goto fib6_rules_init;
2862
2863 out:
2864 return ret;
2865
2866 fib6_rules_init:
2867 fib6_rules_cleanup();
2868 xfrm6_init:
2869 xfrm6_fini();
2870 out_fib6_init:
2871 fib6_gc_cleanup();
2872 out_register_subsys:
2873 unregister_pernet_subsys(&ip6_route_net_ops);
2874 out_dst_entries:
2875 dst_entries_destroy(&ip6_dst_blackhole_ops);
2876 out_kmem_cache:
2877 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2878 goto out;
2879 }
2880
2881 void ip6_route_cleanup(void)
2882 {
2883 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2884 fib6_rules_cleanup();
2885 xfrm6_fini();
2886 fib6_gc_cleanup();
2887 unregister_pernet_subsys(&ip6_route_net_ops);
2888 dst_entries_destroy(&ip6_dst_blackhole_ops);
2889 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2890 }