]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/route.c
net: remove ipv6_addr_copy()
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/times.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/route.h>
36 #include <linux/netdevice.h>
37 #include <linux/in6.h>
38 #include <linux/mroute6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/nsproxy.h>
44 #include <linux/slab.h>
45 #include <net/net_namespace.h>
46 #include <net/snmp.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
52 #include <net/tcp.h>
53 #include <linux/rtnetlink.h>
54 #include <net/dst.h>
55 #include <net/xfrm.h>
56 #include <net/netevent.h>
57 #include <net/netlink.h>
58
59 #include <asm/uaccess.h>
60
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64
65 /* Set to 3 to get tracing. */
66 #define RT6_DEBUG 2
67
68 #if RT6_DEBUG >= 3
69 #define RDBG(x) printk x
70 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
71 #else
72 #define RDBG(x)
73 #define RT6_TRACE(x...) do { ; } while (0)
74 #endif
75
76 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
77 const struct in6_addr *dest);
78 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
79 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
80 static unsigned int ip6_default_mtu(const struct dst_entry *dst);
81 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
82 static void ip6_dst_destroy(struct dst_entry *);
83 static void ip6_dst_ifdown(struct dst_entry *,
84 struct net_device *dev, int how);
85 static int ip6_dst_gc(struct dst_ops *ops);
86
87 static int ip6_pkt_discard(struct sk_buff *skb);
88 static int ip6_pkt_discard_out(struct sk_buff *skb);
89 static void ip6_link_failure(struct sk_buff *skb);
90 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
91
92 #ifdef CONFIG_IPV6_ROUTE_INFO
93 static struct rt6_info *rt6_add_route_info(struct net *net,
94 const struct in6_addr *prefix, int prefixlen,
95 const struct in6_addr *gwaddr, int ifindex,
96 unsigned pref);
97 static struct rt6_info *rt6_get_route_info(struct net *net,
98 const struct in6_addr *prefix, int prefixlen,
99 const struct in6_addr *gwaddr, int ifindex);
100 #endif
101
102 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
103 {
104 struct rt6_info *rt = (struct rt6_info *) dst;
105 struct inet_peer *peer;
106 u32 *p = NULL;
107
108 if (!(rt->dst.flags & DST_HOST))
109 return NULL;
110
111 if (!rt->rt6i_peer)
112 rt6_bind_peer(rt, 1);
113
114 peer = rt->rt6i_peer;
115 if (peer) {
116 u32 *old_p = __DST_METRICS_PTR(old);
117 unsigned long prev, new;
118
119 p = peer->metrics;
120 if (inet_metrics_new(peer))
121 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
122
123 new = (unsigned long) p;
124 prev = cmpxchg(&dst->_metrics, old, new);
125
126 if (prev != old) {
127 p = __DST_METRICS_PTR(prev);
128 if (prev & DST_METRICS_READ_ONLY)
129 p = NULL;
130 }
131 }
132 return p;
133 }
134
135 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
136 {
137 return __neigh_lookup_errno(&nd_tbl, daddr, dst->dev);
138 }
139
140 static struct dst_ops ip6_dst_ops_template = {
141 .family = AF_INET6,
142 .protocol = cpu_to_be16(ETH_P_IPV6),
143 .gc = ip6_dst_gc,
144 .gc_thresh = 1024,
145 .check = ip6_dst_check,
146 .default_advmss = ip6_default_advmss,
147 .default_mtu = ip6_default_mtu,
148 .cow_metrics = ipv6_cow_metrics,
149 .destroy = ip6_dst_destroy,
150 .ifdown = ip6_dst_ifdown,
151 .negative_advice = ip6_negative_advice,
152 .link_failure = ip6_link_failure,
153 .update_pmtu = ip6_rt_update_pmtu,
154 .local_out = __ip6_local_out,
155 .neigh_lookup = ip6_neigh_lookup,
156 };
157
158 static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
159 {
160 return 0;
161 }
162
163 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
164 {
165 }
166
167 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
168 unsigned long old)
169 {
170 return NULL;
171 }
172
173 static struct dst_ops ip6_dst_blackhole_ops = {
174 .family = AF_INET6,
175 .protocol = cpu_to_be16(ETH_P_IPV6),
176 .destroy = ip6_dst_destroy,
177 .check = ip6_dst_check,
178 .default_mtu = ip6_blackhole_default_mtu,
179 .default_advmss = ip6_default_advmss,
180 .update_pmtu = ip6_rt_blackhole_update_pmtu,
181 .cow_metrics = ip6_rt_blackhole_cow_metrics,
182 .neigh_lookup = ip6_neigh_lookup,
183 };
184
185 static const u32 ip6_template_metrics[RTAX_MAX] = {
186 [RTAX_HOPLIMIT - 1] = 255,
187 };
188
189 static struct rt6_info ip6_null_entry_template = {
190 .dst = {
191 .__refcnt = ATOMIC_INIT(1),
192 .__use = 1,
193 .obsolete = -1,
194 .error = -ENETUNREACH,
195 .input = ip6_pkt_discard,
196 .output = ip6_pkt_discard_out,
197 },
198 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
199 .rt6i_protocol = RTPROT_KERNEL,
200 .rt6i_metric = ~(u32) 0,
201 .rt6i_ref = ATOMIC_INIT(1),
202 };
203
204 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
205
206 static int ip6_pkt_prohibit(struct sk_buff *skb);
207 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
208
209 static struct rt6_info ip6_prohibit_entry_template = {
210 .dst = {
211 .__refcnt = ATOMIC_INIT(1),
212 .__use = 1,
213 .obsolete = -1,
214 .error = -EACCES,
215 .input = ip6_pkt_prohibit,
216 .output = ip6_pkt_prohibit_out,
217 },
218 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
219 .rt6i_protocol = RTPROT_KERNEL,
220 .rt6i_metric = ~(u32) 0,
221 .rt6i_ref = ATOMIC_INIT(1),
222 };
223
224 static struct rt6_info ip6_blk_hole_entry_template = {
225 .dst = {
226 .__refcnt = ATOMIC_INIT(1),
227 .__use = 1,
228 .obsolete = -1,
229 .error = -EINVAL,
230 .input = dst_discard,
231 .output = dst_discard,
232 },
233 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
234 .rt6i_protocol = RTPROT_KERNEL,
235 .rt6i_metric = ~(u32) 0,
236 .rt6i_ref = ATOMIC_INIT(1),
237 };
238
239 #endif
240
241 /* allocate dst with ip6_dst_ops */
242 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
243 struct net_device *dev,
244 int flags)
245 {
246 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
247
248 if (rt != NULL)
249 memset(&rt->rt6i_table, 0,
250 sizeof(*rt) - sizeof(struct dst_entry));
251
252 return rt;
253 }
254
255 static void ip6_dst_destroy(struct dst_entry *dst)
256 {
257 struct rt6_info *rt = (struct rt6_info *)dst;
258 struct inet6_dev *idev = rt->rt6i_idev;
259 struct inet_peer *peer = rt->rt6i_peer;
260
261 if (!(rt->dst.flags & DST_HOST))
262 dst_destroy_metrics_generic(dst);
263
264 if (idev != NULL) {
265 rt->rt6i_idev = NULL;
266 in6_dev_put(idev);
267 }
268 if (peer) {
269 rt->rt6i_peer = NULL;
270 inet_putpeer(peer);
271 }
272 }
273
274 static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
275
276 static u32 rt6_peer_genid(void)
277 {
278 return atomic_read(&__rt6_peer_genid);
279 }
280
281 void rt6_bind_peer(struct rt6_info *rt, int create)
282 {
283 struct inet_peer *peer;
284
285 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
286 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
287 inet_putpeer(peer);
288 else
289 rt->rt6i_peer_genid = rt6_peer_genid();
290 }
291
292 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
293 int how)
294 {
295 struct rt6_info *rt = (struct rt6_info *)dst;
296 struct inet6_dev *idev = rt->rt6i_idev;
297 struct net_device *loopback_dev =
298 dev_net(dev)->loopback_dev;
299
300 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
301 struct inet6_dev *loopback_idev =
302 in6_dev_get(loopback_dev);
303 if (loopback_idev != NULL) {
304 rt->rt6i_idev = loopback_idev;
305 in6_dev_put(idev);
306 }
307 }
308 }
309
310 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
311 {
312 return (rt->rt6i_flags & RTF_EXPIRES) &&
313 time_after(jiffies, rt->rt6i_expires);
314 }
315
316 static inline int rt6_need_strict(const struct in6_addr *daddr)
317 {
318 return ipv6_addr_type(daddr) &
319 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
320 }
321
322 /*
323 * Route lookup. Any table->tb6_lock is implied.
324 */
325
326 static inline struct rt6_info *rt6_device_match(struct net *net,
327 struct rt6_info *rt,
328 const struct in6_addr *saddr,
329 int oif,
330 int flags)
331 {
332 struct rt6_info *local = NULL;
333 struct rt6_info *sprt;
334
335 if (!oif && ipv6_addr_any(saddr))
336 goto out;
337
338 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
339 struct net_device *dev = sprt->rt6i_dev;
340
341 if (oif) {
342 if (dev->ifindex == oif)
343 return sprt;
344 if (dev->flags & IFF_LOOPBACK) {
345 if (sprt->rt6i_idev == NULL ||
346 sprt->rt6i_idev->dev->ifindex != oif) {
347 if (flags & RT6_LOOKUP_F_IFACE && oif)
348 continue;
349 if (local && (!oif ||
350 local->rt6i_idev->dev->ifindex == oif))
351 continue;
352 }
353 local = sprt;
354 }
355 } else {
356 if (ipv6_chk_addr(net, saddr, dev,
357 flags & RT6_LOOKUP_F_IFACE))
358 return sprt;
359 }
360 }
361
362 if (oif) {
363 if (local)
364 return local;
365
366 if (flags & RT6_LOOKUP_F_IFACE)
367 return net->ipv6.ip6_null_entry;
368 }
369 out:
370 return rt;
371 }
372
373 #ifdef CONFIG_IPV6_ROUTER_PREF
374 static void rt6_probe(struct rt6_info *rt)
375 {
376 struct neighbour *neigh;
377 /*
378 * Okay, this does not seem to be appropriate
379 * for now, however, we need to check if it
380 * is really so; aka Router Reachability Probing.
381 *
382 * Router Reachability Probe MUST be rate-limited
383 * to no more than one per minute.
384 */
385 rcu_read_lock();
386 neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
387 if (!neigh || (neigh->nud_state & NUD_VALID))
388 goto out;
389 read_lock_bh(&neigh->lock);
390 if (!(neigh->nud_state & NUD_VALID) &&
391 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
392 struct in6_addr mcaddr;
393 struct in6_addr *target;
394
395 neigh->updated = jiffies;
396 read_unlock_bh(&neigh->lock);
397
398 target = (struct in6_addr *)&neigh->primary_key;
399 addrconf_addr_solict_mult(target, &mcaddr);
400 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
401 } else {
402 read_unlock_bh(&neigh->lock);
403 }
404 out:
405 rcu_read_unlock();
406 }
407 #else
408 static inline void rt6_probe(struct rt6_info *rt)
409 {
410 }
411 #endif
412
413 /*
414 * Default Router Selection (RFC 2461 6.3.6)
415 */
416 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
417 {
418 struct net_device *dev = rt->rt6i_dev;
419 if (!oif || dev->ifindex == oif)
420 return 2;
421 if ((dev->flags & IFF_LOOPBACK) &&
422 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
423 return 1;
424 return 0;
425 }
426
427 static inline int rt6_check_neigh(struct rt6_info *rt)
428 {
429 struct neighbour *neigh;
430 int m;
431
432 rcu_read_lock();
433 neigh = dst_get_neighbour(&rt->dst);
434 if (rt->rt6i_flags & RTF_NONEXTHOP ||
435 !(rt->rt6i_flags & RTF_GATEWAY))
436 m = 1;
437 else if (neigh) {
438 read_lock_bh(&neigh->lock);
439 if (neigh->nud_state & NUD_VALID)
440 m = 2;
441 #ifdef CONFIG_IPV6_ROUTER_PREF
442 else if (neigh->nud_state & NUD_FAILED)
443 m = 0;
444 #endif
445 else
446 m = 1;
447 read_unlock_bh(&neigh->lock);
448 } else
449 m = 0;
450 rcu_read_unlock();
451 return m;
452 }
453
454 static int rt6_score_route(struct rt6_info *rt, int oif,
455 int strict)
456 {
457 int m, n;
458
459 m = rt6_check_dev(rt, oif);
460 if (!m && (strict & RT6_LOOKUP_F_IFACE))
461 return -1;
462 #ifdef CONFIG_IPV6_ROUTER_PREF
463 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
464 #endif
465 n = rt6_check_neigh(rt);
466 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
467 return -1;
468 return m;
469 }
470
471 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
472 int *mpri, struct rt6_info *match)
473 {
474 int m;
475
476 if (rt6_check_expired(rt))
477 goto out;
478
479 m = rt6_score_route(rt, oif, strict);
480 if (m < 0)
481 goto out;
482
483 if (m > *mpri) {
484 if (strict & RT6_LOOKUP_F_REACHABLE)
485 rt6_probe(match);
486 *mpri = m;
487 match = rt;
488 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
489 rt6_probe(rt);
490 }
491
492 out:
493 return match;
494 }
495
496 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
497 struct rt6_info *rr_head,
498 u32 metric, int oif, int strict)
499 {
500 struct rt6_info *rt, *match;
501 int mpri = -1;
502
503 match = NULL;
504 for (rt = rr_head; rt && rt->rt6i_metric == metric;
505 rt = rt->dst.rt6_next)
506 match = find_match(rt, oif, strict, &mpri, match);
507 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
508 rt = rt->dst.rt6_next)
509 match = find_match(rt, oif, strict, &mpri, match);
510
511 return match;
512 }
513
514 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
515 {
516 struct rt6_info *match, *rt0;
517 struct net *net;
518
519 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
520 __func__, fn->leaf, oif);
521
522 rt0 = fn->rr_ptr;
523 if (!rt0)
524 fn->rr_ptr = rt0 = fn->leaf;
525
526 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
527
528 if (!match &&
529 (strict & RT6_LOOKUP_F_REACHABLE)) {
530 struct rt6_info *next = rt0->dst.rt6_next;
531
532 /* no entries matched; do round-robin */
533 if (!next || next->rt6i_metric != rt0->rt6i_metric)
534 next = fn->leaf;
535
536 if (next != rt0)
537 fn->rr_ptr = next;
538 }
539
540 RT6_TRACE("%s() => %p\n",
541 __func__, match);
542
543 net = dev_net(rt0->rt6i_dev);
544 return match ? match : net->ipv6.ip6_null_entry;
545 }
546
547 #ifdef CONFIG_IPV6_ROUTE_INFO
548 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
549 const struct in6_addr *gwaddr)
550 {
551 struct net *net = dev_net(dev);
552 struct route_info *rinfo = (struct route_info *) opt;
553 struct in6_addr prefix_buf, *prefix;
554 unsigned int pref;
555 unsigned long lifetime;
556 struct rt6_info *rt;
557
558 if (len < sizeof(struct route_info)) {
559 return -EINVAL;
560 }
561
562 /* Sanity check for prefix_len and length */
563 if (rinfo->length > 3) {
564 return -EINVAL;
565 } else if (rinfo->prefix_len > 128) {
566 return -EINVAL;
567 } else if (rinfo->prefix_len > 64) {
568 if (rinfo->length < 2) {
569 return -EINVAL;
570 }
571 } else if (rinfo->prefix_len > 0) {
572 if (rinfo->length < 1) {
573 return -EINVAL;
574 }
575 }
576
577 pref = rinfo->route_pref;
578 if (pref == ICMPV6_ROUTER_PREF_INVALID)
579 return -EINVAL;
580
581 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
582
583 if (rinfo->length == 3)
584 prefix = (struct in6_addr *)rinfo->prefix;
585 else {
586 /* this function is safe */
587 ipv6_addr_prefix(&prefix_buf,
588 (struct in6_addr *)rinfo->prefix,
589 rinfo->prefix_len);
590 prefix = &prefix_buf;
591 }
592
593 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
594 dev->ifindex);
595
596 if (rt && !lifetime) {
597 ip6_del_rt(rt);
598 rt = NULL;
599 }
600
601 if (!rt && lifetime)
602 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
603 pref);
604 else if (rt)
605 rt->rt6i_flags = RTF_ROUTEINFO |
606 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
607
608 if (rt) {
609 if (!addrconf_finite_timeout(lifetime)) {
610 rt->rt6i_flags &= ~RTF_EXPIRES;
611 } else {
612 rt->rt6i_expires = jiffies + HZ * lifetime;
613 rt->rt6i_flags |= RTF_EXPIRES;
614 }
615 dst_release(&rt->dst);
616 }
617 return 0;
618 }
619 #endif
620
621 #define BACKTRACK(__net, saddr) \
622 do { \
623 if (rt == __net->ipv6.ip6_null_entry) { \
624 struct fib6_node *pn; \
625 while (1) { \
626 if (fn->fn_flags & RTN_TL_ROOT) \
627 goto out; \
628 pn = fn->parent; \
629 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
630 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
631 else \
632 fn = pn; \
633 if (fn->fn_flags & RTN_RTINFO) \
634 goto restart; \
635 } \
636 } \
637 } while(0)
638
639 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
640 struct fib6_table *table,
641 struct flowi6 *fl6, int flags)
642 {
643 struct fib6_node *fn;
644 struct rt6_info *rt;
645
646 read_lock_bh(&table->tb6_lock);
647 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
648 restart:
649 rt = fn->leaf;
650 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
651 BACKTRACK(net, &fl6->saddr);
652 out:
653 dst_use(&rt->dst, jiffies);
654 read_unlock_bh(&table->tb6_lock);
655 return rt;
656
657 }
658
659 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
660 const struct in6_addr *saddr, int oif, int strict)
661 {
662 struct flowi6 fl6 = {
663 .flowi6_oif = oif,
664 .daddr = *daddr,
665 };
666 struct dst_entry *dst;
667 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
668
669 if (saddr) {
670 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
671 flags |= RT6_LOOKUP_F_HAS_SADDR;
672 }
673
674 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
675 if (dst->error == 0)
676 return (struct rt6_info *) dst;
677
678 dst_release(dst);
679
680 return NULL;
681 }
682
683 EXPORT_SYMBOL(rt6_lookup);
684
685 /* ip6_ins_rt is called with FREE table->tb6_lock.
686 It takes new route entry, the addition fails by any reason the
687 route is freed. In any case, if caller does not hold it, it may
688 be destroyed.
689 */
690
691 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
692 {
693 int err;
694 struct fib6_table *table;
695
696 table = rt->rt6i_table;
697 write_lock_bh(&table->tb6_lock);
698 err = fib6_add(&table->tb6_root, rt, info);
699 write_unlock_bh(&table->tb6_lock);
700
701 return err;
702 }
703
704 int ip6_ins_rt(struct rt6_info *rt)
705 {
706 struct nl_info info = {
707 .nl_net = dev_net(rt->rt6i_dev),
708 };
709 return __ip6_ins_rt(rt, &info);
710 }
711
712 static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
713 const struct in6_addr *daddr,
714 const struct in6_addr *saddr)
715 {
716 struct rt6_info *rt;
717
718 /*
719 * Clone the route.
720 */
721
722 rt = ip6_rt_copy(ort, daddr);
723
724 if (rt) {
725 struct neighbour *neigh;
726 int attempts = !in_softirq();
727
728 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
729 if (rt->rt6i_dst.plen != 128 &&
730 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
731 rt->rt6i_flags |= RTF_ANYCAST;
732 rt->rt6i_gateway = *daddr;
733 }
734
735 rt->rt6i_flags |= RTF_CACHE;
736
737 #ifdef CONFIG_IPV6_SUBTREES
738 if (rt->rt6i_src.plen && saddr) {
739 rt->rt6i_src.addr = *saddr;
740 rt->rt6i_src.plen = 128;
741 }
742 #endif
743
744 retry:
745 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
746 if (IS_ERR(neigh)) {
747 struct net *net = dev_net(rt->rt6i_dev);
748 int saved_rt_min_interval =
749 net->ipv6.sysctl.ip6_rt_gc_min_interval;
750 int saved_rt_elasticity =
751 net->ipv6.sysctl.ip6_rt_gc_elasticity;
752
753 if (attempts-- > 0) {
754 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
755 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
756
757 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
758
759 net->ipv6.sysctl.ip6_rt_gc_elasticity =
760 saved_rt_elasticity;
761 net->ipv6.sysctl.ip6_rt_gc_min_interval =
762 saved_rt_min_interval;
763 goto retry;
764 }
765
766 if (net_ratelimit())
767 printk(KERN_WARNING
768 "ipv6: Neighbour table overflow.\n");
769 dst_free(&rt->dst);
770 return NULL;
771 }
772 dst_set_neighbour(&rt->dst, neigh);
773
774 }
775
776 return rt;
777 }
778
779 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
780 const struct in6_addr *daddr)
781 {
782 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
783
784 if (rt) {
785 rt->rt6i_flags |= RTF_CACHE;
786 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
787 }
788 return rt;
789 }
790
791 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
792 struct flowi6 *fl6, int flags)
793 {
794 struct fib6_node *fn;
795 struct rt6_info *rt, *nrt;
796 int strict = 0;
797 int attempts = 3;
798 int err;
799 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
800
801 strict |= flags & RT6_LOOKUP_F_IFACE;
802
803 relookup:
804 read_lock_bh(&table->tb6_lock);
805
806 restart_2:
807 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
808
809 restart:
810 rt = rt6_select(fn, oif, strict | reachable);
811
812 BACKTRACK(net, &fl6->saddr);
813 if (rt == net->ipv6.ip6_null_entry ||
814 rt->rt6i_flags & RTF_CACHE)
815 goto out;
816
817 dst_hold(&rt->dst);
818 read_unlock_bh(&table->tb6_lock);
819
820 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
821 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
822 else if (!(rt->dst.flags & DST_HOST))
823 nrt = rt6_alloc_clone(rt, &fl6->daddr);
824 else
825 goto out2;
826
827 dst_release(&rt->dst);
828 rt = nrt ? : net->ipv6.ip6_null_entry;
829
830 dst_hold(&rt->dst);
831 if (nrt) {
832 err = ip6_ins_rt(nrt);
833 if (!err)
834 goto out2;
835 }
836
837 if (--attempts <= 0)
838 goto out2;
839
840 /*
841 * Race condition! In the gap, when table->tb6_lock was
842 * released someone could insert this route. Relookup.
843 */
844 dst_release(&rt->dst);
845 goto relookup;
846
847 out:
848 if (reachable) {
849 reachable = 0;
850 goto restart_2;
851 }
852 dst_hold(&rt->dst);
853 read_unlock_bh(&table->tb6_lock);
854 out2:
855 rt->dst.lastuse = jiffies;
856 rt->dst.__use++;
857
858 return rt;
859 }
860
861 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
862 struct flowi6 *fl6, int flags)
863 {
864 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
865 }
866
867 void ip6_route_input(struct sk_buff *skb)
868 {
869 const struct ipv6hdr *iph = ipv6_hdr(skb);
870 struct net *net = dev_net(skb->dev);
871 int flags = RT6_LOOKUP_F_HAS_SADDR;
872 struct flowi6 fl6 = {
873 .flowi6_iif = skb->dev->ifindex,
874 .daddr = iph->daddr,
875 .saddr = iph->saddr,
876 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
877 .flowi6_mark = skb->mark,
878 .flowi6_proto = iph->nexthdr,
879 };
880
881 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
882 flags |= RT6_LOOKUP_F_IFACE;
883
884 skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input));
885 }
886
887 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
888 struct flowi6 *fl6, int flags)
889 {
890 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
891 }
892
893 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
894 struct flowi6 *fl6)
895 {
896 int flags = 0;
897
898 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
899 flags |= RT6_LOOKUP_F_IFACE;
900
901 if (!ipv6_addr_any(&fl6->saddr))
902 flags |= RT6_LOOKUP_F_HAS_SADDR;
903 else if (sk)
904 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
905
906 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
907 }
908
909 EXPORT_SYMBOL(ip6_route_output);
910
911 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
912 {
913 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
914 struct dst_entry *new = NULL;
915
916 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
917 if (rt) {
918 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
919
920 new = &rt->dst;
921
922 new->__use = 1;
923 new->input = dst_discard;
924 new->output = dst_discard;
925
926 if (dst_metrics_read_only(&ort->dst))
927 new->_metrics = ort->dst._metrics;
928 else
929 dst_copy_metrics(new, &ort->dst);
930 rt->rt6i_idev = ort->rt6i_idev;
931 if (rt->rt6i_idev)
932 in6_dev_hold(rt->rt6i_idev);
933 rt->rt6i_expires = 0;
934
935 rt->rt6i_gateway = ort->rt6i_gateway;
936 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
937 rt->rt6i_metric = 0;
938
939 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
940 #ifdef CONFIG_IPV6_SUBTREES
941 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
942 #endif
943
944 dst_free(new);
945 }
946
947 dst_release(dst_orig);
948 return new ? new : ERR_PTR(-ENOMEM);
949 }
950
951 /*
952 * Destination cache support functions
953 */
954
955 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
956 {
957 struct rt6_info *rt;
958
959 rt = (struct rt6_info *) dst;
960
961 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
962 if (rt->rt6i_peer_genid != rt6_peer_genid()) {
963 if (!rt->rt6i_peer)
964 rt6_bind_peer(rt, 0);
965 rt->rt6i_peer_genid = rt6_peer_genid();
966 }
967 return dst;
968 }
969 return NULL;
970 }
971
972 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
973 {
974 struct rt6_info *rt = (struct rt6_info *) dst;
975
976 if (rt) {
977 if (rt->rt6i_flags & RTF_CACHE) {
978 if (rt6_check_expired(rt)) {
979 ip6_del_rt(rt);
980 dst = NULL;
981 }
982 } else {
983 dst_release(dst);
984 dst = NULL;
985 }
986 }
987 return dst;
988 }
989
990 static void ip6_link_failure(struct sk_buff *skb)
991 {
992 struct rt6_info *rt;
993
994 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
995
996 rt = (struct rt6_info *) skb_dst(skb);
997 if (rt) {
998 if (rt->rt6i_flags&RTF_CACHE) {
999 dst_set_expires(&rt->dst, 0);
1000 rt->rt6i_flags |= RTF_EXPIRES;
1001 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1002 rt->rt6i_node->fn_sernum = -1;
1003 }
1004 }
1005
1006 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1007 {
1008 struct rt6_info *rt6 = (struct rt6_info*)dst;
1009
1010 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1011 rt6->rt6i_flags |= RTF_MODIFIED;
1012 if (mtu < IPV6_MIN_MTU) {
1013 u32 features = dst_metric(dst, RTAX_FEATURES);
1014 mtu = IPV6_MIN_MTU;
1015 features |= RTAX_FEATURE_ALLFRAG;
1016 dst_metric_set(dst, RTAX_FEATURES, features);
1017 }
1018 dst_metric_set(dst, RTAX_MTU, mtu);
1019 }
1020 }
1021
1022 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1023 {
1024 struct net_device *dev = dst->dev;
1025 unsigned int mtu = dst_mtu(dst);
1026 struct net *net = dev_net(dev);
1027
1028 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1029
1030 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1031 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1032
1033 /*
1034 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1035 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1036 * IPV6_MAXPLEN is also valid and means: "any MSS,
1037 * rely only on pmtu discovery"
1038 */
1039 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1040 mtu = IPV6_MAXPLEN;
1041 return mtu;
1042 }
1043
1044 static unsigned int ip6_default_mtu(const struct dst_entry *dst)
1045 {
1046 unsigned int mtu = IPV6_MIN_MTU;
1047 struct inet6_dev *idev;
1048
1049 rcu_read_lock();
1050 idev = __in6_dev_get(dst->dev);
1051 if (idev)
1052 mtu = idev->cnf.mtu6;
1053 rcu_read_unlock();
1054
1055 return mtu;
1056 }
1057
1058 static struct dst_entry *icmp6_dst_gc_list;
1059 static DEFINE_SPINLOCK(icmp6_dst_lock);
1060
1061 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1062 struct neighbour *neigh,
1063 const struct in6_addr *addr)
1064 {
1065 struct rt6_info *rt;
1066 struct inet6_dev *idev = in6_dev_get(dev);
1067 struct net *net = dev_net(dev);
1068
1069 if (unlikely(idev == NULL))
1070 return NULL;
1071
1072 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
1073 if (unlikely(rt == NULL)) {
1074 in6_dev_put(idev);
1075 goto out;
1076 }
1077
1078 if (neigh)
1079 neigh_hold(neigh);
1080 else {
1081 neigh = ndisc_get_neigh(dev, addr);
1082 if (IS_ERR(neigh))
1083 neigh = NULL;
1084 }
1085
1086 rt->dst.flags |= DST_HOST;
1087 rt->dst.output = ip6_output;
1088 dst_set_neighbour(&rt->dst, neigh);
1089 atomic_set(&rt->dst.__refcnt, 1);
1090 rt->rt6i_dst.addr = *addr;
1091 rt->rt6i_dst.plen = 128;
1092 rt->rt6i_idev = idev;
1093 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1094
1095 spin_lock_bh(&icmp6_dst_lock);
1096 rt->dst.next = icmp6_dst_gc_list;
1097 icmp6_dst_gc_list = &rt->dst;
1098 spin_unlock_bh(&icmp6_dst_lock);
1099
1100 fib6_force_start_gc(net);
1101
1102 out:
1103 return &rt->dst;
1104 }
1105
1106 int icmp6_dst_gc(void)
1107 {
1108 struct dst_entry *dst, **pprev;
1109 int more = 0;
1110
1111 spin_lock_bh(&icmp6_dst_lock);
1112 pprev = &icmp6_dst_gc_list;
1113
1114 while ((dst = *pprev) != NULL) {
1115 if (!atomic_read(&dst->__refcnt)) {
1116 *pprev = dst->next;
1117 dst_free(dst);
1118 } else {
1119 pprev = &dst->next;
1120 ++more;
1121 }
1122 }
1123
1124 spin_unlock_bh(&icmp6_dst_lock);
1125
1126 return more;
1127 }
1128
1129 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1130 void *arg)
1131 {
1132 struct dst_entry *dst, **pprev;
1133
1134 spin_lock_bh(&icmp6_dst_lock);
1135 pprev = &icmp6_dst_gc_list;
1136 while ((dst = *pprev) != NULL) {
1137 struct rt6_info *rt = (struct rt6_info *) dst;
1138 if (func(rt, arg)) {
1139 *pprev = dst->next;
1140 dst_free(dst);
1141 } else {
1142 pprev = &dst->next;
1143 }
1144 }
1145 spin_unlock_bh(&icmp6_dst_lock);
1146 }
1147
1148 static int ip6_dst_gc(struct dst_ops *ops)
1149 {
1150 unsigned long now = jiffies;
1151 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1152 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1153 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1154 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1155 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1156 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1157 int entries;
1158
1159 entries = dst_entries_get_fast(ops);
1160 if (time_after(rt_last_gc + rt_min_interval, now) &&
1161 entries <= rt_max_size)
1162 goto out;
1163
1164 net->ipv6.ip6_rt_gc_expire++;
1165 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1166 net->ipv6.ip6_rt_last_gc = now;
1167 entries = dst_entries_get_slow(ops);
1168 if (entries < ops->gc_thresh)
1169 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1170 out:
1171 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1172 return entries > rt_max_size;
1173 }
1174
1175 /* Clean host part of a prefix. Not necessary in radix tree,
1176 but results in cleaner routing tables.
1177
1178 Remove it only when all the things will work!
1179 */
1180
1181 int ip6_dst_hoplimit(struct dst_entry *dst)
1182 {
1183 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1184 if (hoplimit == 0) {
1185 struct net_device *dev = dst->dev;
1186 struct inet6_dev *idev;
1187
1188 rcu_read_lock();
1189 idev = __in6_dev_get(dev);
1190 if (idev)
1191 hoplimit = idev->cnf.hop_limit;
1192 else
1193 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1194 rcu_read_unlock();
1195 }
1196 return hoplimit;
1197 }
1198 EXPORT_SYMBOL(ip6_dst_hoplimit);
1199
1200 /*
1201 *
1202 */
1203
1204 int ip6_route_add(struct fib6_config *cfg)
1205 {
1206 int err;
1207 struct net *net = cfg->fc_nlinfo.nl_net;
1208 struct rt6_info *rt = NULL;
1209 struct net_device *dev = NULL;
1210 struct inet6_dev *idev = NULL;
1211 struct fib6_table *table;
1212 int addr_type;
1213
1214 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1215 return -EINVAL;
1216 #ifndef CONFIG_IPV6_SUBTREES
1217 if (cfg->fc_src_len)
1218 return -EINVAL;
1219 #endif
1220 if (cfg->fc_ifindex) {
1221 err = -ENODEV;
1222 dev = dev_get_by_index(net, cfg->fc_ifindex);
1223 if (!dev)
1224 goto out;
1225 idev = in6_dev_get(dev);
1226 if (!idev)
1227 goto out;
1228 }
1229
1230 if (cfg->fc_metric == 0)
1231 cfg->fc_metric = IP6_RT_PRIO_USER;
1232
1233 err = -ENOBUFS;
1234 if (NULL != cfg->fc_nlinfo.nlh &&
1235 !(cfg->fc_nlinfo.nlh->nlmsg_flags&NLM_F_CREATE)) {
1236 table = fib6_get_table(net, cfg->fc_table);
1237 if (table == NULL) {
1238 printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
1239 table = fib6_new_table(net, cfg->fc_table);
1240 }
1241 } else {
1242 table = fib6_new_table(net, cfg->fc_table);
1243 }
1244 if (table == NULL) {
1245 goto out;
1246 }
1247
1248 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
1249
1250 if (rt == NULL) {
1251 err = -ENOMEM;
1252 goto out;
1253 }
1254
1255 rt->dst.obsolete = -1;
1256 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
1257 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1258 0;
1259
1260 if (cfg->fc_protocol == RTPROT_UNSPEC)
1261 cfg->fc_protocol = RTPROT_BOOT;
1262 rt->rt6i_protocol = cfg->fc_protocol;
1263
1264 addr_type = ipv6_addr_type(&cfg->fc_dst);
1265
1266 if (addr_type & IPV6_ADDR_MULTICAST)
1267 rt->dst.input = ip6_mc_input;
1268 else if (cfg->fc_flags & RTF_LOCAL)
1269 rt->dst.input = ip6_input;
1270 else
1271 rt->dst.input = ip6_forward;
1272
1273 rt->dst.output = ip6_output;
1274
1275 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1276 rt->rt6i_dst.plen = cfg->fc_dst_len;
1277 if (rt->rt6i_dst.plen == 128)
1278 rt->dst.flags |= DST_HOST;
1279
1280 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1281 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1282 if (!metrics) {
1283 err = -ENOMEM;
1284 goto out;
1285 }
1286 dst_init_metrics(&rt->dst, metrics, 0);
1287 }
1288 #ifdef CONFIG_IPV6_SUBTREES
1289 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1290 rt->rt6i_src.plen = cfg->fc_src_len;
1291 #endif
1292
1293 rt->rt6i_metric = cfg->fc_metric;
1294
1295 /* We cannot add true routes via loopback here,
1296 they would result in kernel looping; promote them to reject routes
1297 */
1298 if ((cfg->fc_flags & RTF_REJECT) ||
1299 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
1300 && !(cfg->fc_flags&RTF_LOCAL))) {
1301 /* hold loopback dev/idev if we haven't done so. */
1302 if (dev != net->loopback_dev) {
1303 if (dev) {
1304 dev_put(dev);
1305 in6_dev_put(idev);
1306 }
1307 dev = net->loopback_dev;
1308 dev_hold(dev);
1309 idev = in6_dev_get(dev);
1310 if (!idev) {
1311 err = -ENODEV;
1312 goto out;
1313 }
1314 }
1315 rt->dst.output = ip6_pkt_discard_out;
1316 rt->dst.input = ip6_pkt_discard;
1317 rt->dst.error = -ENETUNREACH;
1318 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1319 goto install_route;
1320 }
1321
1322 if (cfg->fc_flags & RTF_GATEWAY) {
1323 const struct in6_addr *gw_addr;
1324 int gwa_type;
1325
1326 gw_addr = &cfg->fc_gateway;
1327 rt->rt6i_gateway = *gw_addr;
1328 gwa_type = ipv6_addr_type(gw_addr);
1329
1330 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1331 struct rt6_info *grt;
1332
1333 /* IPv6 strictly inhibits using not link-local
1334 addresses as nexthop address.
1335 Otherwise, router will not able to send redirects.
1336 It is very good, but in some (rare!) circumstances
1337 (SIT, PtP, NBMA NOARP links) it is handy to allow
1338 some exceptions. --ANK
1339 */
1340 err = -EINVAL;
1341 if (!(gwa_type&IPV6_ADDR_UNICAST))
1342 goto out;
1343
1344 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1345
1346 err = -EHOSTUNREACH;
1347 if (grt == NULL)
1348 goto out;
1349 if (dev) {
1350 if (dev != grt->rt6i_dev) {
1351 dst_release(&grt->dst);
1352 goto out;
1353 }
1354 } else {
1355 dev = grt->rt6i_dev;
1356 idev = grt->rt6i_idev;
1357 dev_hold(dev);
1358 in6_dev_hold(grt->rt6i_idev);
1359 }
1360 if (!(grt->rt6i_flags&RTF_GATEWAY))
1361 err = 0;
1362 dst_release(&grt->dst);
1363
1364 if (err)
1365 goto out;
1366 }
1367 err = -EINVAL;
1368 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1369 goto out;
1370 }
1371
1372 err = -ENODEV;
1373 if (dev == NULL)
1374 goto out;
1375
1376 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1377 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1378 err = -EINVAL;
1379 goto out;
1380 }
1381 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1382 rt->rt6i_prefsrc.plen = 128;
1383 } else
1384 rt->rt6i_prefsrc.plen = 0;
1385
1386 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1387 struct neighbour *n = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1388 if (IS_ERR(n)) {
1389 err = PTR_ERR(n);
1390 goto out;
1391 }
1392 dst_set_neighbour(&rt->dst, n);
1393 }
1394
1395 rt->rt6i_flags = cfg->fc_flags;
1396
1397 install_route:
1398 if (cfg->fc_mx) {
1399 struct nlattr *nla;
1400 int remaining;
1401
1402 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1403 int type = nla_type(nla);
1404
1405 if (type) {
1406 if (type > RTAX_MAX) {
1407 err = -EINVAL;
1408 goto out;
1409 }
1410
1411 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1412 }
1413 }
1414 }
1415
1416 rt->dst.dev = dev;
1417 rt->rt6i_idev = idev;
1418 rt->rt6i_table = table;
1419
1420 cfg->fc_nlinfo.nl_net = dev_net(dev);
1421
1422 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1423
1424 out:
1425 if (dev)
1426 dev_put(dev);
1427 if (idev)
1428 in6_dev_put(idev);
1429 if (rt)
1430 dst_free(&rt->dst);
1431 return err;
1432 }
1433
1434 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1435 {
1436 int err;
1437 struct fib6_table *table;
1438 struct net *net = dev_net(rt->rt6i_dev);
1439
1440 if (rt == net->ipv6.ip6_null_entry)
1441 return -ENOENT;
1442
1443 table = rt->rt6i_table;
1444 write_lock_bh(&table->tb6_lock);
1445
1446 err = fib6_del(rt, info);
1447 dst_release(&rt->dst);
1448
1449 write_unlock_bh(&table->tb6_lock);
1450
1451 return err;
1452 }
1453
1454 int ip6_del_rt(struct rt6_info *rt)
1455 {
1456 struct nl_info info = {
1457 .nl_net = dev_net(rt->rt6i_dev),
1458 };
1459 return __ip6_del_rt(rt, &info);
1460 }
1461
1462 static int ip6_route_del(struct fib6_config *cfg)
1463 {
1464 struct fib6_table *table;
1465 struct fib6_node *fn;
1466 struct rt6_info *rt;
1467 int err = -ESRCH;
1468
1469 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1470 if (table == NULL)
1471 return err;
1472
1473 read_lock_bh(&table->tb6_lock);
1474
1475 fn = fib6_locate(&table->tb6_root,
1476 &cfg->fc_dst, cfg->fc_dst_len,
1477 &cfg->fc_src, cfg->fc_src_len);
1478
1479 if (fn) {
1480 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1481 if (cfg->fc_ifindex &&
1482 (rt->rt6i_dev == NULL ||
1483 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1484 continue;
1485 if (cfg->fc_flags & RTF_GATEWAY &&
1486 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1487 continue;
1488 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1489 continue;
1490 dst_hold(&rt->dst);
1491 read_unlock_bh(&table->tb6_lock);
1492
1493 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1494 }
1495 }
1496 read_unlock_bh(&table->tb6_lock);
1497
1498 return err;
1499 }
1500
1501 /*
1502 * Handle redirects
1503 */
1504 struct ip6rd_flowi {
1505 struct flowi6 fl6;
1506 struct in6_addr gateway;
1507 };
1508
1509 static struct rt6_info *__ip6_route_redirect(struct net *net,
1510 struct fib6_table *table,
1511 struct flowi6 *fl6,
1512 int flags)
1513 {
1514 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1515 struct rt6_info *rt;
1516 struct fib6_node *fn;
1517
1518 /*
1519 * Get the "current" route for this destination and
1520 * check if the redirect has come from approriate router.
1521 *
1522 * RFC 2461 specifies that redirects should only be
1523 * accepted if they come from the nexthop to the target.
1524 * Due to the way the routes are chosen, this notion
1525 * is a bit fuzzy and one might need to check all possible
1526 * routes.
1527 */
1528
1529 read_lock_bh(&table->tb6_lock);
1530 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1531 restart:
1532 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1533 /*
1534 * Current route is on-link; redirect is always invalid.
1535 *
1536 * Seems, previous statement is not true. It could
1537 * be node, which looks for us as on-link (f.e. proxy ndisc)
1538 * But then router serving it might decide, that we should
1539 * know truth 8)8) --ANK (980726).
1540 */
1541 if (rt6_check_expired(rt))
1542 continue;
1543 if (!(rt->rt6i_flags & RTF_GATEWAY))
1544 continue;
1545 if (fl6->flowi6_oif != rt->rt6i_dev->ifindex)
1546 continue;
1547 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1548 continue;
1549 break;
1550 }
1551
1552 if (!rt)
1553 rt = net->ipv6.ip6_null_entry;
1554 BACKTRACK(net, &fl6->saddr);
1555 out:
1556 dst_hold(&rt->dst);
1557
1558 read_unlock_bh(&table->tb6_lock);
1559
1560 return rt;
1561 };
1562
1563 static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1564 const struct in6_addr *src,
1565 const struct in6_addr *gateway,
1566 struct net_device *dev)
1567 {
1568 int flags = RT6_LOOKUP_F_HAS_SADDR;
1569 struct net *net = dev_net(dev);
1570 struct ip6rd_flowi rdfl = {
1571 .fl6 = {
1572 .flowi6_oif = dev->ifindex,
1573 .daddr = *dest,
1574 .saddr = *src,
1575 },
1576 };
1577
1578 rdfl.gateway = *gateway;
1579
1580 if (rt6_need_strict(dest))
1581 flags |= RT6_LOOKUP_F_IFACE;
1582
1583 return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6,
1584 flags, __ip6_route_redirect);
1585 }
1586
1587 void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1588 const struct in6_addr *saddr,
1589 struct neighbour *neigh, u8 *lladdr, int on_link)
1590 {
1591 struct rt6_info *rt, *nrt = NULL;
1592 struct netevent_redirect netevent;
1593 struct net *net = dev_net(neigh->dev);
1594
1595 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1596
1597 if (rt == net->ipv6.ip6_null_entry) {
1598 if (net_ratelimit())
1599 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1600 "for redirect target\n");
1601 goto out;
1602 }
1603
1604 /*
1605 * We have finally decided to accept it.
1606 */
1607
1608 neigh_update(neigh, lladdr, NUD_STALE,
1609 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1610 NEIGH_UPDATE_F_OVERRIDE|
1611 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1612 NEIGH_UPDATE_F_ISROUTER))
1613 );
1614
1615 /*
1616 * Redirect received -> path was valid.
1617 * Look, redirects are sent only in response to data packets,
1618 * so that this nexthop apparently is reachable. --ANK
1619 */
1620 dst_confirm(&rt->dst);
1621
1622 /* Duplicate redirect: silently ignore. */
1623 if (neigh == dst_get_neighbour_raw(&rt->dst))
1624 goto out;
1625
1626 nrt = ip6_rt_copy(rt, dest);
1627 if (nrt == NULL)
1628 goto out;
1629
1630 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1631 if (on_link)
1632 nrt->rt6i_flags &= ~RTF_GATEWAY;
1633
1634 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1635 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1636
1637 if (ip6_ins_rt(nrt))
1638 goto out;
1639
1640 netevent.old = &rt->dst;
1641 netevent.new = &nrt->dst;
1642 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1643
1644 if (rt->rt6i_flags&RTF_CACHE) {
1645 ip6_del_rt(rt);
1646 return;
1647 }
1648
1649 out:
1650 dst_release(&rt->dst);
1651 }
1652
1653 /*
1654 * Handle ICMP "packet too big" messages
1655 * i.e. Path MTU discovery
1656 */
1657
1658 static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1659 struct net *net, u32 pmtu, int ifindex)
1660 {
1661 struct rt6_info *rt, *nrt;
1662 int allfrag = 0;
1663 again:
1664 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1665 if (rt == NULL)
1666 return;
1667
1668 if (rt6_check_expired(rt)) {
1669 ip6_del_rt(rt);
1670 goto again;
1671 }
1672
1673 if (pmtu >= dst_mtu(&rt->dst))
1674 goto out;
1675
1676 if (pmtu < IPV6_MIN_MTU) {
1677 /*
1678 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1679 * MTU (1280) and a fragment header should always be included
1680 * after a node receiving Too Big message reporting PMTU is
1681 * less than the IPv6 Minimum Link MTU.
1682 */
1683 pmtu = IPV6_MIN_MTU;
1684 allfrag = 1;
1685 }
1686
1687 /* New mtu received -> path was valid.
1688 They are sent only in response to data packets,
1689 so that this nexthop apparently is reachable. --ANK
1690 */
1691 dst_confirm(&rt->dst);
1692
1693 /* Host route. If it is static, it would be better
1694 not to override it, but add new one, so that
1695 when cache entry will expire old pmtu
1696 would return automatically.
1697 */
1698 if (rt->rt6i_flags & RTF_CACHE) {
1699 dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
1700 if (allfrag) {
1701 u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
1702 features |= RTAX_FEATURE_ALLFRAG;
1703 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1704 }
1705 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1706 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1707 goto out;
1708 }
1709
1710 /* Network route.
1711 Two cases are possible:
1712 1. It is connected route. Action: COW
1713 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1714 */
1715 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1716 nrt = rt6_alloc_cow(rt, daddr, saddr);
1717 else
1718 nrt = rt6_alloc_clone(rt, daddr);
1719
1720 if (nrt) {
1721 dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
1722 if (allfrag) {
1723 u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
1724 features |= RTAX_FEATURE_ALLFRAG;
1725 dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
1726 }
1727
1728 /* According to RFC 1981, detecting PMTU increase shouldn't be
1729 * happened within 5 mins, the recommended timer is 10 mins.
1730 * Here this route expiration time is set to ip6_rt_mtu_expires
1731 * which is 10 mins. After 10 mins the decreased pmtu is expired
1732 * and detecting PMTU increase will be automatically happened.
1733 */
1734 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1735 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1736
1737 ip6_ins_rt(nrt);
1738 }
1739 out:
1740 dst_release(&rt->dst);
1741 }
1742
1743 void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1744 struct net_device *dev, u32 pmtu)
1745 {
1746 struct net *net = dev_net(dev);
1747
1748 /*
1749 * RFC 1981 states that a node "MUST reduce the size of the packets it
1750 * is sending along the path" that caused the Packet Too Big message.
1751 * Since it's not possible in the general case to determine which
1752 * interface was used to send the original packet, we update the MTU
1753 * on the interface that will be used to send future packets. We also
1754 * update the MTU on the interface that received the Packet Too Big in
1755 * case the original packet was forced out that interface with
1756 * SO_BINDTODEVICE or similar. This is the next best thing to the
1757 * correct behaviour, which would be to update the MTU on all
1758 * interfaces.
1759 */
1760 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1761 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1762 }
1763
1764 /*
1765 * Misc support functions
1766 */
1767
1768 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1769 const struct in6_addr *dest)
1770 {
1771 struct net *net = dev_net(ort->rt6i_dev);
1772 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1773 ort->dst.dev, 0);
1774
1775 if (rt) {
1776 rt->dst.input = ort->dst.input;
1777 rt->dst.output = ort->dst.output;
1778 rt->dst.flags |= DST_HOST;
1779
1780 rt->rt6i_dst.addr = *dest;
1781 rt->rt6i_dst.plen = 128;
1782 dst_copy_metrics(&rt->dst, &ort->dst);
1783 rt->dst.error = ort->dst.error;
1784 rt->rt6i_idev = ort->rt6i_idev;
1785 if (rt->rt6i_idev)
1786 in6_dev_hold(rt->rt6i_idev);
1787 rt->dst.lastuse = jiffies;
1788 rt->rt6i_expires = 0;
1789
1790 rt->rt6i_gateway = ort->rt6i_gateway;
1791 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1792 rt->rt6i_metric = 0;
1793
1794 #ifdef CONFIG_IPV6_SUBTREES
1795 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1796 #endif
1797 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1798 rt->rt6i_table = ort->rt6i_table;
1799 }
1800 return rt;
1801 }
1802
1803 #ifdef CONFIG_IPV6_ROUTE_INFO
1804 static struct rt6_info *rt6_get_route_info(struct net *net,
1805 const struct in6_addr *prefix, int prefixlen,
1806 const struct in6_addr *gwaddr, int ifindex)
1807 {
1808 struct fib6_node *fn;
1809 struct rt6_info *rt = NULL;
1810 struct fib6_table *table;
1811
1812 table = fib6_get_table(net, RT6_TABLE_INFO);
1813 if (table == NULL)
1814 return NULL;
1815
1816 write_lock_bh(&table->tb6_lock);
1817 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1818 if (!fn)
1819 goto out;
1820
1821 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1822 if (rt->rt6i_dev->ifindex != ifindex)
1823 continue;
1824 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1825 continue;
1826 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1827 continue;
1828 dst_hold(&rt->dst);
1829 break;
1830 }
1831 out:
1832 write_unlock_bh(&table->tb6_lock);
1833 return rt;
1834 }
1835
1836 static struct rt6_info *rt6_add_route_info(struct net *net,
1837 const struct in6_addr *prefix, int prefixlen,
1838 const struct in6_addr *gwaddr, int ifindex,
1839 unsigned pref)
1840 {
1841 struct fib6_config cfg = {
1842 .fc_table = RT6_TABLE_INFO,
1843 .fc_metric = IP6_RT_PRIO_USER,
1844 .fc_ifindex = ifindex,
1845 .fc_dst_len = prefixlen,
1846 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1847 RTF_UP | RTF_PREF(pref),
1848 .fc_nlinfo.pid = 0,
1849 .fc_nlinfo.nlh = NULL,
1850 .fc_nlinfo.nl_net = net,
1851 };
1852
1853 cfg.fc_dst = *prefix;
1854 cfg.fc_gateway = *gwaddr;
1855
1856 /* We should treat it as a default route if prefix length is 0. */
1857 if (!prefixlen)
1858 cfg.fc_flags |= RTF_DEFAULT;
1859
1860 ip6_route_add(&cfg);
1861
1862 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1863 }
1864 #endif
1865
1866 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1867 {
1868 struct rt6_info *rt;
1869 struct fib6_table *table;
1870
1871 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1872 if (table == NULL)
1873 return NULL;
1874
1875 write_lock_bh(&table->tb6_lock);
1876 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1877 if (dev == rt->rt6i_dev &&
1878 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1879 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1880 break;
1881 }
1882 if (rt)
1883 dst_hold(&rt->dst);
1884 write_unlock_bh(&table->tb6_lock);
1885 return rt;
1886 }
1887
1888 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1889 struct net_device *dev,
1890 unsigned int pref)
1891 {
1892 struct fib6_config cfg = {
1893 .fc_table = RT6_TABLE_DFLT,
1894 .fc_metric = IP6_RT_PRIO_USER,
1895 .fc_ifindex = dev->ifindex,
1896 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1897 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1898 .fc_nlinfo.pid = 0,
1899 .fc_nlinfo.nlh = NULL,
1900 .fc_nlinfo.nl_net = dev_net(dev),
1901 };
1902
1903 cfg.fc_gateway = *gwaddr;
1904
1905 ip6_route_add(&cfg);
1906
1907 return rt6_get_dflt_router(gwaddr, dev);
1908 }
1909
1910 void rt6_purge_dflt_routers(struct net *net)
1911 {
1912 struct rt6_info *rt;
1913 struct fib6_table *table;
1914
1915 /* NOTE: Keep consistent with rt6_get_dflt_router */
1916 table = fib6_get_table(net, RT6_TABLE_DFLT);
1917 if (table == NULL)
1918 return;
1919
1920 restart:
1921 read_lock_bh(&table->tb6_lock);
1922 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1923 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1924 dst_hold(&rt->dst);
1925 read_unlock_bh(&table->tb6_lock);
1926 ip6_del_rt(rt);
1927 goto restart;
1928 }
1929 }
1930 read_unlock_bh(&table->tb6_lock);
1931 }
1932
1933 static void rtmsg_to_fib6_config(struct net *net,
1934 struct in6_rtmsg *rtmsg,
1935 struct fib6_config *cfg)
1936 {
1937 memset(cfg, 0, sizeof(*cfg));
1938
1939 cfg->fc_table = RT6_TABLE_MAIN;
1940 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1941 cfg->fc_metric = rtmsg->rtmsg_metric;
1942 cfg->fc_expires = rtmsg->rtmsg_info;
1943 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1944 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1945 cfg->fc_flags = rtmsg->rtmsg_flags;
1946
1947 cfg->fc_nlinfo.nl_net = net;
1948
1949 cfg->fc_dst = rtmsg->rtmsg_dst;
1950 cfg->fc_src = rtmsg->rtmsg_src;
1951 cfg->fc_gateway = rtmsg->rtmsg_gateway;
1952 }
1953
1954 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1955 {
1956 struct fib6_config cfg;
1957 struct in6_rtmsg rtmsg;
1958 int err;
1959
1960 switch(cmd) {
1961 case SIOCADDRT: /* Add a route */
1962 case SIOCDELRT: /* Delete a route */
1963 if (!capable(CAP_NET_ADMIN))
1964 return -EPERM;
1965 err = copy_from_user(&rtmsg, arg,
1966 sizeof(struct in6_rtmsg));
1967 if (err)
1968 return -EFAULT;
1969
1970 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1971
1972 rtnl_lock();
1973 switch (cmd) {
1974 case SIOCADDRT:
1975 err = ip6_route_add(&cfg);
1976 break;
1977 case SIOCDELRT:
1978 err = ip6_route_del(&cfg);
1979 break;
1980 default:
1981 err = -EINVAL;
1982 }
1983 rtnl_unlock();
1984
1985 return err;
1986 }
1987
1988 return -EINVAL;
1989 }
1990
1991 /*
1992 * Drop the packet on the floor
1993 */
1994
1995 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1996 {
1997 int type;
1998 struct dst_entry *dst = skb_dst(skb);
1999 switch (ipstats_mib_noroutes) {
2000 case IPSTATS_MIB_INNOROUTES:
2001 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2002 if (type == IPV6_ADDR_ANY) {
2003 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2004 IPSTATS_MIB_INADDRERRORS);
2005 break;
2006 }
2007 /* FALLTHROUGH */
2008 case IPSTATS_MIB_OUTNOROUTES:
2009 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2010 ipstats_mib_noroutes);
2011 break;
2012 }
2013 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2014 kfree_skb(skb);
2015 return 0;
2016 }
2017
2018 static int ip6_pkt_discard(struct sk_buff *skb)
2019 {
2020 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2021 }
2022
2023 static int ip6_pkt_discard_out(struct sk_buff *skb)
2024 {
2025 skb->dev = skb_dst(skb)->dev;
2026 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2027 }
2028
2029 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2030
2031 static int ip6_pkt_prohibit(struct sk_buff *skb)
2032 {
2033 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2034 }
2035
2036 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2037 {
2038 skb->dev = skb_dst(skb)->dev;
2039 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2040 }
2041
2042 #endif
2043
2044 /*
2045 * Allocate a dst for local (unicast / anycast) address.
2046 */
2047
2048 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2049 const struct in6_addr *addr,
2050 int anycast)
2051 {
2052 struct net *net = dev_net(idev->dev);
2053 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2054 net->loopback_dev, 0);
2055 struct neighbour *neigh;
2056
2057 if (rt == NULL) {
2058 if (net_ratelimit())
2059 pr_warning("IPv6: Maximum number of routes reached,"
2060 " consider increasing route/max_size.\n");
2061 return ERR_PTR(-ENOMEM);
2062 }
2063
2064 in6_dev_hold(idev);
2065
2066 rt->dst.flags |= DST_HOST;
2067 rt->dst.input = ip6_input;
2068 rt->dst.output = ip6_output;
2069 rt->rt6i_idev = idev;
2070 rt->dst.obsolete = -1;
2071
2072 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2073 if (anycast)
2074 rt->rt6i_flags |= RTF_ANYCAST;
2075 else
2076 rt->rt6i_flags |= RTF_LOCAL;
2077 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
2078 if (IS_ERR(neigh)) {
2079 dst_free(&rt->dst);
2080
2081 return ERR_CAST(neigh);
2082 }
2083 dst_set_neighbour(&rt->dst, neigh);
2084
2085 rt->rt6i_dst.addr = *addr;
2086 rt->rt6i_dst.plen = 128;
2087 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2088
2089 atomic_set(&rt->dst.__refcnt, 1);
2090
2091 return rt;
2092 }
2093
2094 int ip6_route_get_saddr(struct net *net,
2095 struct rt6_info *rt,
2096 const struct in6_addr *daddr,
2097 unsigned int prefs,
2098 struct in6_addr *saddr)
2099 {
2100 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2101 int err = 0;
2102 if (rt->rt6i_prefsrc.plen)
2103 *saddr = rt->rt6i_prefsrc.addr;
2104 else
2105 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2106 daddr, prefs, saddr);
2107 return err;
2108 }
2109
2110 /* remove deleted ip from prefsrc entries */
2111 struct arg_dev_net_ip {
2112 struct net_device *dev;
2113 struct net *net;
2114 struct in6_addr *addr;
2115 };
2116
2117 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2118 {
2119 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2120 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2121 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2122
2123 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
2124 rt != net->ipv6.ip6_null_entry &&
2125 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2126 /* remove prefsrc entry */
2127 rt->rt6i_prefsrc.plen = 0;
2128 }
2129 return 0;
2130 }
2131
2132 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2133 {
2134 struct net *net = dev_net(ifp->idev->dev);
2135 struct arg_dev_net_ip adni = {
2136 .dev = ifp->idev->dev,
2137 .net = net,
2138 .addr = &ifp->addr,
2139 };
2140 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2141 }
2142
2143 struct arg_dev_net {
2144 struct net_device *dev;
2145 struct net *net;
2146 };
2147
2148 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2149 {
2150 const struct arg_dev_net *adn = arg;
2151 const struct net_device *dev = adn->dev;
2152
2153 if ((rt->rt6i_dev == dev || dev == NULL) &&
2154 rt != adn->net->ipv6.ip6_null_entry) {
2155 RT6_TRACE("deleted by ifdown %p\n", rt);
2156 return -1;
2157 }
2158 return 0;
2159 }
2160
2161 void rt6_ifdown(struct net *net, struct net_device *dev)
2162 {
2163 struct arg_dev_net adn = {
2164 .dev = dev,
2165 .net = net,
2166 };
2167
2168 fib6_clean_all(net, fib6_ifdown, 0, &adn);
2169 icmp6_clean_all(fib6_ifdown, &adn);
2170 }
2171
2172 struct rt6_mtu_change_arg
2173 {
2174 struct net_device *dev;
2175 unsigned mtu;
2176 };
2177
2178 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2179 {
2180 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2181 struct inet6_dev *idev;
2182
2183 /* In IPv6 pmtu discovery is not optional,
2184 so that RTAX_MTU lock cannot disable it.
2185 We still use this lock to block changes
2186 caused by addrconf/ndisc.
2187 */
2188
2189 idev = __in6_dev_get(arg->dev);
2190 if (idev == NULL)
2191 return 0;
2192
2193 /* For administrative MTU increase, there is no way to discover
2194 IPv6 PMTU increase, so PMTU increase should be updated here.
2195 Since RFC 1981 doesn't include administrative MTU increase
2196 update PMTU increase is a MUST. (i.e. jumbo frame)
2197 */
2198 /*
2199 If new MTU is less than route PMTU, this new MTU will be the
2200 lowest MTU in the path, update the route PMTU to reflect PMTU
2201 decreases; if new MTU is greater than route PMTU, and the
2202 old MTU is the lowest MTU in the path, update the route PMTU
2203 to reflect the increase. In this case if the other nodes' MTU
2204 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2205 PMTU discouvery.
2206 */
2207 if (rt->rt6i_dev == arg->dev &&
2208 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2209 (dst_mtu(&rt->dst) >= arg->mtu ||
2210 (dst_mtu(&rt->dst) < arg->mtu &&
2211 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2212 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2213 }
2214 return 0;
2215 }
2216
2217 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
2218 {
2219 struct rt6_mtu_change_arg arg = {
2220 .dev = dev,
2221 .mtu = mtu,
2222 };
2223
2224 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2225 }
2226
2227 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2228 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2229 [RTA_OIF] = { .type = NLA_U32 },
2230 [RTA_IIF] = { .type = NLA_U32 },
2231 [RTA_PRIORITY] = { .type = NLA_U32 },
2232 [RTA_METRICS] = { .type = NLA_NESTED },
2233 };
2234
2235 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2236 struct fib6_config *cfg)
2237 {
2238 struct rtmsg *rtm;
2239 struct nlattr *tb[RTA_MAX+1];
2240 int err;
2241
2242 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2243 if (err < 0)
2244 goto errout;
2245
2246 err = -EINVAL;
2247 rtm = nlmsg_data(nlh);
2248 memset(cfg, 0, sizeof(*cfg));
2249
2250 cfg->fc_table = rtm->rtm_table;
2251 cfg->fc_dst_len = rtm->rtm_dst_len;
2252 cfg->fc_src_len = rtm->rtm_src_len;
2253 cfg->fc_flags = RTF_UP;
2254 cfg->fc_protocol = rtm->rtm_protocol;
2255
2256 if (rtm->rtm_type == RTN_UNREACHABLE)
2257 cfg->fc_flags |= RTF_REJECT;
2258
2259 if (rtm->rtm_type == RTN_LOCAL)
2260 cfg->fc_flags |= RTF_LOCAL;
2261
2262 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2263 cfg->fc_nlinfo.nlh = nlh;
2264 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2265
2266 if (tb[RTA_GATEWAY]) {
2267 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2268 cfg->fc_flags |= RTF_GATEWAY;
2269 }
2270
2271 if (tb[RTA_DST]) {
2272 int plen = (rtm->rtm_dst_len + 7) >> 3;
2273
2274 if (nla_len(tb[RTA_DST]) < plen)
2275 goto errout;
2276
2277 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2278 }
2279
2280 if (tb[RTA_SRC]) {
2281 int plen = (rtm->rtm_src_len + 7) >> 3;
2282
2283 if (nla_len(tb[RTA_SRC]) < plen)
2284 goto errout;
2285
2286 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2287 }
2288
2289 if (tb[RTA_PREFSRC])
2290 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2291
2292 if (tb[RTA_OIF])
2293 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2294
2295 if (tb[RTA_PRIORITY])
2296 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2297
2298 if (tb[RTA_METRICS]) {
2299 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2300 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2301 }
2302
2303 if (tb[RTA_TABLE])
2304 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2305
2306 err = 0;
2307 errout:
2308 return err;
2309 }
2310
2311 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2312 {
2313 struct fib6_config cfg;
2314 int err;
2315
2316 err = rtm_to_fib6_config(skb, nlh, &cfg);
2317 if (err < 0)
2318 return err;
2319
2320 return ip6_route_del(&cfg);
2321 }
2322
2323 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2324 {
2325 struct fib6_config cfg;
2326 int err;
2327
2328 err = rtm_to_fib6_config(skb, nlh, &cfg);
2329 if (err < 0)
2330 return err;
2331
2332 return ip6_route_add(&cfg);
2333 }
2334
2335 static inline size_t rt6_nlmsg_size(void)
2336 {
2337 return NLMSG_ALIGN(sizeof(struct rtmsg))
2338 + nla_total_size(16) /* RTA_SRC */
2339 + nla_total_size(16) /* RTA_DST */
2340 + nla_total_size(16) /* RTA_GATEWAY */
2341 + nla_total_size(16) /* RTA_PREFSRC */
2342 + nla_total_size(4) /* RTA_TABLE */
2343 + nla_total_size(4) /* RTA_IIF */
2344 + nla_total_size(4) /* RTA_OIF */
2345 + nla_total_size(4) /* RTA_PRIORITY */
2346 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2347 + nla_total_size(sizeof(struct rta_cacheinfo));
2348 }
2349
2350 static int rt6_fill_node(struct net *net,
2351 struct sk_buff *skb, struct rt6_info *rt,
2352 struct in6_addr *dst, struct in6_addr *src,
2353 int iif, int type, u32 pid, u32 seq,
2354 int prefix, int nowait, unsigned int flags)
2355 {
2356 struct rtmsg *rtm;
2357 struct nlmsghdr *nlh;
2358 long expires;
2359 u32 table;
2360 struct neighbour *n;
2361
2362 if (prefix) { /* user wants prefix routes only */
2363 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2364 /* success since this is not a prefix route */
2365 return 1;
2366 }
2367 }
2368
2369 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2370 if (nlh == NULL)
2371 return -EMSGSIZE;
2372
2373 rtm = nlmsg_data(nlh);
2374 rtm->rtm_family = AF_INET6;
2375 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2376 rtm->rtm_src_len = rt->rt6i_src.plen;
2377 rtm->rtm_tos = 0;
2378 if (rt->rt6i_table)
2379 table = rt->rt6i_table->tb6_id;
2380 else
2381 table = RT6_TABLE_UNSPEC;
2382 rtm->rtm_table = table;
2383 NLA_PUT_U32(skb, RTA_TABLE, table);
2384 if (rt->rt6i_flags&RTF_REJECT)
2385 rtm->rtm_type = RTN_UNREACHABLE;
2386 else if (rt->rt6i_flags&RTF_LOCAL)
2387 rtm->rtm_type = RTN_LOCAL;
2388 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2389 rtm->rtm_type = RTN_LOCAL;
2390 else
2391 rtm->rtm_type = RTN_UNICAST;
2392 rtm->rtm_flags = 0;
2393 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2394 rtm->rtm_protocol = rt->rt6i_protocol;
2395 if (rt->rt6i_flags&RTF_DYNAMIC)
2396 rtm->rtm_protocol = RTPROT_REDIRECT;
2397 else if (rt->rt6i_flags & RTF_ADDRCONF)
2398 rtm->rtm_protocol = RTPROT_KERNEL;
2399 else if (rt->rt6i_flags&RTF_DEFAULT)
2400 rtm->rtm_protocol = RTPROT_RA;
2401
2402 if (rt->rt6i_flags&RTF_CACHE)
2403 rtm->rtm_flags |= RTM_F_CLONED;
2404
2405 if (dst) {
2406 NLA_PUT(skb, RTA_DST, 16, dst);
2407 rtm->rtm_dst_len = 128;
2408 } else if (rtm->rtm_dst_len)
2409 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2410 #ifdef CONFIG_IPV6_SUBTREES
2411 if (src) {
2412 NLA_PUT(skb, RTA_SRC, 16, src);
2413 rtm->rtm_src_len = 128;
2414 } else if (rtm->rtm_src_len)
2415 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2416 #endif
2417 if (iif) {
2418 #ifdef CONFIG_IPV6_MROUTE
2419 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2420 int err = ip6mr_get_route(net, skb, rtm, nowait);
2421 if (err <= 0) {
2422 if (!nowait) {
2423 if (err == 0)
2424 return 0;
2425 goto nla_put_failure;
2426 } else {
2427 if (err == -EMSGSIZE)
2428 goto nla_put_failure;
2429 }
2430 }
2431 } else
2432 #endif
2433 NLA_PUT_U32(skb, RTA_IIF, iif);
2434 } else if (dst) {
2435 struct in6_addr saddr_buf;
2436 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
2437 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2438 }
2439
2440 if (rt->rt6i_prefsrc.plen) {
2441 struct in6_addr saddr_buf;
2442 saddr_buf = rt->rt6i_prefsrc.addr;
2443 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2444 }
2445
2446 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2447 goto nla_put_failure;
2448
2449 rcu_read_lock();
2450 n = dst_get_neighbour(&rt->dst);
2451 if (n)
2452 NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
2453 rcu_read_unlock();
2454
2455 if (rt->dst.dev)
2456 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2457
2458 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2459
2460 if (!(rt->rt6i_flags & RTF_EXPIRES))
2461 expires = 0;
2462 else if (rt->rt6i_expires - jiffies < INT_MAX)
2463 expires = rt->rt6i_expires - jiffies;
2464 else
2465 expires = INT_MAX;
2466
2467 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
2468 expires, rt->dst.error) < 0)
2469 goto nla_put_failure;
2470
2471 return nlmsg_end(skb, nlh);
2472
2473 nla_put_failure:
2474 nlmsg_cancel(skb, nlh);
2475 return -EMSGSIZE;
2476 }
2477
2478 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2479 {
2480 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2481 int prefix;
2482
2483 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2484 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2485 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2486 } else
2487 prefix = 0;
2488
2489 return rt6_fill_node(arg->net,
2490 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2491 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2492 prefix, 0, NLM_F_MULTI);
2493 }
2494
2495 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2496 {
2497 struct net *net = sock_net(in_skb->sk);
2498 struct nlattr *tb[RTA_MAX+1];
2499 struct rt6_info *rt;
2500 struct sk_buff *skb;
2501 struct rtmsg *rtm;
2502 struct flowi6 fl6;
2503 int err, iif = 0;
2504
2505 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2506 if (err < 0)
2507 goto errout;
2508
2509 err = -EINVAL;
2510 memset(&fl6, 0, sizeof(fl6));
2511
2512 if (tb[RTA_SRC]) {
2513 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2514 goto errout;
2515
2516 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2517 }
2518
2519 if (tb[RTA_DST]) {
2520 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2521 goto errout;
2522
2523 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2524 }
2525
2526 if (tb[RTA_IIF])
2527 iif = nla_get_u32(tb[RTA_IIF]);
2528
2529 if (tb[RTA_OIF])
2530 fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]);
2531
2532 if (iif) {
2533 struct net_device *dev;
2534 dev = __dev_get_by_index(net, iif);
2535 if (!dev) {
2536 err = -ENODEV;
2537 goto errout;
2538 }
2539 }
2540
2541 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2542 if (skb == NULL) {
2543 err = -ENOBUFS;
2544 goto errout;
2545 }
2546
2547 /* Reserve room for dummy headers, this skb can pass
2548 through good chunk of routing engine.
2549 */
2550 skb_reset_mac_header(skb);
2551 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2552
2553 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6);
2554 skb_dst_set(skb, &rt->dst);
2555
2556 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2557 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2558 nlh->nlmsg_seq, 0, 0, 0);
2559 if (err < 0) {
2560 kfree_skb(skb);
2561 goto errout;
2562 }
2563
2564 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2565 errout:
2566 return err;
2567 }
2568
2569 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2570 {
2571 struct sk_buff *skb;
2572 struct net *net = info->nl_net;
2573 u32 seq;
2574 int err;
2575
2576 err = -ENOBUFS;
2577 seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
2578
2579 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2580 if (skb == NULL)
2581 goto errout;
2582
2583 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2584 event, info->pid, seq, 0, 0, 0);
2585 if (err < 0) {
2586 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2587 WARN_ON(err == -EMSGSIZE);
2588 kfree_skb(skb);
2589 goto errout;
2590 }
2591 rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2592 info->nlh, gfp_any());
2593 return;
2594 errout:
2595 if (err < 0)
2596 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2597 }
2598
2599 static int ip6_route_dev_notify(struct notifier_block *this,
2600 unsigned long event, void *data)
2601 {
2602 struct net_device *dev = (struct net_device *)data;
2603 struct net *net = dev_net(dev);
2604
2605 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2606 net->ipv6.ip6_null_entry->dst.dev = dev;
2607 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2608 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2609 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2610 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2611 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2612 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2613 #endif
2614 }
2615
2616 return NOTIFY_OK;
2617 }
2618
2619 /*
2620 * /proc
2621 */
2622
2623 #ifdef CONFIG_PROC_FS
2624
2625 struct rt6_proc_arg
2626 {
2627 char *buffer;
2628 int offset;
2629 int length;
2630 int skip;
2631 int len;
2632 };
2633
2634 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2635 {
2636 struct seq_file *m = p_arg;
2637 struct neighbour *n;
2638
2639 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2640
2641 #ifdef CONFIG_IPV6_SUBTREES
2642 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2643 #else
2644 seq_puts(m, "00000000000000000000000000000000 00 ");
2645 #endif
2646 rcu_read_lock();
2647 n = dst_get_neighbour(&rt->dst);
2648 if (n) {
2649 seq_printf(m, "%pi6", n->primary_key);
2650 } else {
2651 seq_puts(m, "00000000000000000000000000000000");
2652 }
2653 rcu_read_unlock();
2654 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2655 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2656 rt->dst.__use, rt->rt6i_flags,
2657 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2658 return 0;
2659 }
2660
2661 static int ipv6_route_show(struct seq_file *m, void *v)
2662 {
2663 struct net *net = (struct net *)m->private;
2664 fib6_clean_all(net, rt6_info_route, 0, m);
2665 return 0;
2666 }
2667
2668 static int ipv6_route_open(struct inode *inode, struct file *file)
2669 {
2670 return single_open_net(inode, file, ipv6_route_show);
2671 }
2672
2673 static const struct file_operations ipv6_route_proc_fops = {
2674 .owner = THIS_MODULE,
2675 .open = ipv6_route_open,
2676 .read = seq_read,
2677 .llseek = seq_lseek,
2678 .release = single_release_net,
2679 };
2680
2681 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2682 {
2683 struct net *net = (struct net *)seq->private;
2684 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2685 net->ipv6.rt6_stats->fib_nodes,
2686 net->ipv6.rt6_stats->fib_route_nodes,
2687 net->ipv6.rt6_stats->fib_rt_alloc,
2688 net->ipv6.rt6_stats->fib_rt_entries,
2689 net->ipv6.rt6_stats->fib_rt_cache,
2690 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2691 net->ipv6.rt6_stats->fib_discarded_routes);
2692
2693 return 0;
2694 }
2695
2696 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2697 {
2698 return single_open_net(inode, file, rt6_stats_seq_show);
2699 }
2700
2701 static const struct file_operations rt6_stats_seq_fops = {
2702 .owner = THIS_MODULE,
2703 .open = rt6_stats_seq_open,
2704 .read = seq_read,
2705 .llseek = seq_lseek,
2706 .release = single_release_net,
2707 };
2708 #endif /* CONFIG_PROC_FS */
2709
2710 #ifdef CONFIG_SYSCTL
2711
2712 static
2713 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2714 void __user *buffer, size_t *lenp, loff_t *ppos)
2715 {
2716 struct net *net;
2717 int delay;
2718 if (!write)
2719 return -EINVAL;
2720
2721 net = (struct net *)ctl->extra1;
2722 delay = net->ipv6.sysctl.flush_delay;
2723 proc_dointvec(ctl, write, buffer, lenp, ppos);
2724 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2725 return 0;
2726 }
2727
2728 ctl_table ipv6_route_table_template[] = {
2729 {
2730 .procname = "flush",
2731 .data = &init_net.ipv6.sysctl.flush_delay,
2732 .maxlen = sizeof(int),
2733 .mode = 0200,
2734 .proc_handler = ipv6_sysctl_rtcache_flush
2735 },
2736 {
2737 .procname = "gc_thresh",
2738 .data = &ip6_dst_ops_template.gc_thresh,
2739 .maxlen = sizeof(int),
2740 .mode = 0644,
2741 .proc_handler = proc_dointvec,
2742 },
2743 {
2744 .procname = "max_size",
2745 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2746 .maxlen = sizeof(int),
2747 .mode = 0644,
2748 .proc_handler = proc_dointvec,
2749 },
2750 {
2751 .procname = "gc_min_interval",
2752 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2753 .maxlen = sizeof(int),
2754 .mode = 0644,
2755 .proc_handler = proc_dointvec_jiffies,
2756 },
2757 {
2758 .procname = "gc_timeout",
2759 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2760 .maxlen = sizeof(int),
2761 .mode = 0644,
2762 .proc_handler = proc_dointvec_jiffies,
2763 },
2764 {
2765 .procname = "gc_interval",
2766 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2767 .maxlen = sizeof(int),
2768 .mode = 0644,
2769 .proc_handler = proc_dointvec_jiffies,
2770 },
2771 {
2772 .procname = "gc_elasticity",
2773 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2774 .maxlen = sizeof(int),
2775 .mode = 0644,
2776 .proc_handler = proc_dointvec,
2777 },
2778 {
2779 .procname = "mtu_expires",
2780 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2781 .maxlen = sizeof(int),
2782 .mode = 0644,
2783 .proc_handler = proc_dointvec_jiffies,
2784 },
2785 {
2786 .procname = "min_adv_mss",
2787 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2788 .maxlen = sizeof(int),
2789 .mode = 0644,
2790 .proc_handler = proc_dointvec,
2791 },
2792 {
2793 .procname = "gc_min_interval_ms",
2794 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2795 .maxlen = sizeof(int),
2796 .mode = 0644,
2797 .proc_handler = proc_dointvec_ms_jiffies,
2798 },
2799 { }
2800 };
2801
2802 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2803 {
2804 struct ctl_table *table;
2805
2806 table = kmemdup(ipv6_route_table_template,
2807 sizeof(ipv6_route_table_template),
2808 GFP_KERNEL);
2809
2810 if (table) {
2811 table[0].data = &net->ipv6.sysctl.flush_delay;
2812 table[0].extra1 = net;
2813 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2814 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2815 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2816 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2817 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2818 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2819 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2820 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2821 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2822 }
2823
2824 return table;
2825 }
2826 #endif
2827
2828 static int __net_init ip6_route_net_init(struct net *net)
2829 {
2830 int ret = -ENOMEM;
2831
2832 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2833 sizeof(net->ipv6.ip6_dst_ops));
2834
2835 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
2836 goto out_ip6_dst_ops;
2837
2838 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2839 sizeof(*net->ipv6.ip6_null_entry),
2840 GFP_KERNEL);
2841 if (!net->ipv6.ip6_null_entry)
2842 goto out_ip6_dst_entries;
2843 net->ipv6.ip6_null_entry->dst.path =
2844 (struct dst_entry *)net->ipv6.ip6_null_entry;
2845 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2846 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
2847 ip6_template_metrics, true);
2848
2849 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2850 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2851 sizeof(*net->ipv6.ip6_prohibit_entry),
2852 GFP_KERNEL);
2853 if (!net->ipv6.ip6_prohibit_entry)
2854 goto out_ip6_null_entry;
2855 net->ipv6.ip6_prohibit_entry->dst.path =
2856 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2857 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2858 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
2859 ip6_template_metrics, true);
2860
2861 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2862 sizeof(*net->ipv6.ip6_blk_hole_entry),
2863 GFP_KERNEL);
2864 if (!net->ipv6.ip6_blk_hole_entry)
2865 goto out_ip6_prohibit_entry;
2866 net->ipv6.ip6_blk_hole_entry->dst.path =
2867 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2868 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2869 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
2870 ip6_template_metrics, true);
2871 #endif
2872
2873 net->ipv6.sysctl.flush_delay = 0;
2874 net->ipv6.sysctl.ip6_rt_max_size = 4096;
2875 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
2876 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
2877 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
2878 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
2879 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2880 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2881
2882 #ifdef CONFIG_PROC_FS
2883 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2884 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2885 #endif
2886 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2887
2888 ret = 0;
2889 out:
2890 return ret;
2891
2892 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2893 out_ip6_prohibit_entry:
2894 kfree(net->ipv6.ip6_prohibit_entry);
2895 out_ip6_null_entry:
2896 kfree(net->ipv6.ip6_null_entry);
2897 #endif
2898 out_ip6_dst_entries:
2899 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2900 out_ip6_dst_ops:
2901 goto out;
2902 }
2903
2904 static void __net_exit ip6_route_net_exit(struct net *net)
2905 {
2906 #ifdef CONFIG_PROC_FS
2907 proc_net_remove(net, "ipv6_route");
2908 proc_net_remove(net, "rt6_stats");
2909 #endif
2910 kfree(net->ipv6.ip6_null_entry);
2911 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2912 kfree(net->ipv6.ip6_prohibit_entry);
2913 kfree(net->ipv6.ip6_blk_hole_entry);
2914 #endif
2915 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2916 }
2917
2918 static struct pernet_operations ip6_route_net_ops = {
2919 .init = ip6_route_net_init,
2920 .exit = ip6_route_net_exit,
2921 };
2922
2923 static struct notifier_block ip6_route_dev_notifier = {
2924 .notifier_call = ip6_route_dev_notify,
2925 .priority = 0,
2926 };
2927
2928 int __init ip6_route_init(void)
2929 {
2930 int ret;
2931
2932 ret = -ENOMEM;
2933 ip6_dst_ops_template.kmem_cachep =
2934 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2935 SLAB_HWCACHE_ALIGN, NULL);
2936 if (!ip6_dst_ops_template.kmem_cachep)
2937 goto out;
2938
2939 ret = dst_entries_init(&ip6_dst_blackhole_ops);
2940 if (ret)
2941 goto out_kmem_cache;
2942
2943 ret = register_pernet_subsys(&ip6_route_net_ops);
2944 if (ret)
2945 goto out_dst_entries;
2946
2947 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2948
2949 /* Registering of the loopback is done before this portion of code,
2950 * the loopback reference in rt6_info will not be taken, do it
2951 * manually for init_net */
2952 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
2953 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2954 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2955 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
2956 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2957 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
2958 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2959 #endif
2960 ret = fib6_init();
2961 if (ret)
2962 goto out_register_subsys;
2963
2964 ret = xfrm6_init();
2965 if (ret)
2966 goto out_fib6_init;
2967
2968 ret = fib6_rules_init();
2969 if (ret)
2970 goto xfrm6_init;
2971
2972 ret = -ENOBUFS;
2973 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
2974 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
2975 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
2976 goto fib6_rules_init;
2977
2978 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2979 if (ret)
2980 goto fib6_rules_init;
2981
2982 out:
2983 return ret;
2984
2985 fib6_rules_init:
2986 fib6_rules_cleanup();
2987 xfrm6_init:
2988 xfrm6_fini();
2989 out_fib6_init:
2990 fib6_gc_cleanup();
2991 out_register_subsys:
2992 unregister_pernet_subsys(&ip6_route_net_ops);
2993 out_dst_entries:
2994 dst_entries_destroy(&ip6_dst_blackhole_ops);
2995 out_kmem_cache:
2996 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2997 goto out;
2998 }
2999
3000 void ip6_route_cleanup(void)
3001 {
3002 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3003 fib6_rules_cleanup();
3004 xfrm6_fini();
3005 fib6_gc_cleanup();
3006 unregister_pernet_subsys(&ip6_route_net_ops);
3007 dst_entries_destroy(&ip6_dst_blackhole_ops);
3008 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3009 }