2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
26 * Fixed routing subtrees.
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/times.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
36 #include <linux/route.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ndisc.h>
52 #include <net/addrconf.h>
54 #include <linux/rtnetlink.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
60 #include <asm/uaccess.h>
63 #include <linux/sysctl.h>
66 /* Set to 3 to get tracing. */
70 #define RDBG(x) printk x
71 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
74 #define RT6_TRACE(x...) do { ; } while (0)
77 #define CLONE_OFFLINK_ROUTE 0
79 static int ip6_rt_max_size
= 4096;
80 static int ip6_rt_gc_min_interval
= HZ
/ 2;
81 static int ip6_rt_gc_timeout
= 60*HZ
;
82 int ip6_rt_gc_interval
= 30*HZ
;
83 static int ip6_rt_gc_elasticity
= 9;
84 static int ip6_rt_mtu_expires
= 10*60*HZ
;
85 static int ip6_rt_min_advmss
= IPV6_MIN_MTU
- 20 - 40;
87 static struct rt6_info
* ip6_rt_copy(struct rt6_info
*ort
);
88 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
);
89 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*);
90 static void ip6_dst_destroy(struct dst_entry
*);
91 static void ip6_dst_ifdown(struct dst_entry
*,
92 struct net_device
*dev
, int how
);
93 static int ip6_dst_gc(void);
95 static int ip6_pkt_discard(struct sk_buff
*skb
);
96 static int ip6_pkt_discard_out(struct sk_buff
*skb
);
97 static void ip6_link_failure(struct sk_buff
*skb
);
98 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
100 #ifdef CONFIG_IPV6_ROUTE_INFO
101 static struct rt6_info
*rt6_add_route_info(struct in6_addr
*prefix
, int prefixlen
,
102 struct in6_addr
*gwaddr
, int ifindex
,
104 static struct rt6_info
*rt6_get_route_info(struct in6_addr
*prefix
, int prefixlen
,
105 struct in6_addr
*gwaddr
, int ifindex
);
108 static struct dst_ops ip6_dst_ops
= {
110 .protocol
= __constant_htons(ETH_P_IPV6
),
113 .check
= ip6_dst_check
,
114 .destroy
= ip6_dst_destroy
,
115 .ifdown
= ip6_dst_ifdown
,
116 .negative_advice
= ip6_negative_advice
,
117 .link_failure
= ip6_link_failure
,
118 .update_pmtu
= ip6_rt_update_pmtu
,
119 .entry_size
= sizeof(struct rt6_info
),
122 struct rt6_info ip6_null_entry
= {
125 .__refcnt
= ATOMIC_INIT(1),
127 .dev
= &loopback_dev
,
129 .error
= -ENETUNREACH
,
130 .metrics
= { [RTAX_HOPLIMIT
- 1] = 255, },
131 .input
= ip6_pkt_discard
,
132 .output
= ip6_pkt_discard_out
,
134 .path
= (struct dst_entry
*)&ip6_null_entry
,
137 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
138 .rt6i_metric
= ~(u32
) 0,
139 .rt6i_ref
= ATOMIC_INIT(1),
142 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
144 static int ip6_pkt_prohibit(struct sk_buff
*skb
);
145 static int ip6_pkt_prohibit_out(struct sk_buff
*skb
);
146 static int ip6_pkt_blk_hole(struct sk_buff
*skb
);
148 struct rt6_info ip6_prohibit_entry
= {
151 .__refcnt
= ATOMIC_INIT(1),
153 .dev
= &loopback_dev
,
156 .metrics
= { [RTAX_HOPLIMIT
- 1] = 255, },
157 .input
= ip6_pkt_prohibit
,
158 .output
= ip6_pkt_prohibit_out
,
160 .path
= (struct dst_entry
*)&ip6_prohibit_entry
,
163 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
164 .rt6i_metric
= ~(u32
) 0,
165 .rt6i_ref
= ATOMIC_INIT(1),
168 struct rt6_info ip6_blk_hole_entry
= {
171 .__refcnt
= ATOMIC_INIT(1),
173 .dev
= &loopback_dev
,
176 .metrics
= { [RTAX_HOPLIMIT
- 1] = 255, },
177 .input
= ip6_pkt_blk_hole
,
178 .output
= ip6_pkt_blk_hole
,
180 .path
= (struct dst_entry
*)&ip6_blk_hole_entry
,
183 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
184 .rt6i_metric
= ~(u32
) 0,
185 .rt6i_ref
= ATOMIC_INIT(1),
190 /* allocate dst with ip6_dst_ops */
191 static __inline__
struct rt6_info
*ip6_dst_alloc(void)
193 return (struct rt6_info
*)dst_alloc(&ip6_dst_ops
);
196 static void ip6_dst_destroy(struct dst_entry
*dst
)
198 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
199 struct inet6_dev
*idev
= rt
->rt6i_idev
;
202 rt
->rt6i_idev
= NULL
;
207 static void ip6_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
210 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
211 struct inet6_dev
*idev
= rt
->rt6i_idev
;
213 if (dev
!= &loopback_dev
&& idev
!= NULL
&& idev
->dev
== dev
) {
214 struct inet6_dev
*loopback_idev
= in6_dev_get(&loopback_dev
);
215 if (loopback_idev
!= NULL
) {
216 rt
->rt6i_idev
= loopback_idev
;
222 static __inline__
int rt6_check_expired(const struct rt6_info
*rt
)
224 return (rt
->rt6i_flags
& RTF_EXPIRES
&&
225 time_after(jiffies
, rt
->rt6i_expires
));
228 static inline int rt6_need_strict(struct in6_addr
*daddr
)
230 return (ipv6_addr_type(daddr
) &
231 (IPV6_ADDR_MULTICAST
| IPV6_ADDR_LINKLOCAL
));
235 * Route lookup. Any table->tb6_lock is implied.
238 static __inline__
struct rt6_info
*rt6_device_match(struct rt6_info
*rt
,
242 struct rt6_info
*local
= NULL
;
243 struct rt6_info
*sprt
;
246 for (sprt
= rt
; sprt
; sprt
= sprt
->u
.next
) {
247 struct net_device
*dev
= sprt
->rt6i_dev
;
248 if (dev
->ifindex
== oif
)
250 if (dev
->flags
& IFF_LOOPBACK
) {
251 if (sprt
->rt6i_idev
== NULL
||
252 sprt
->rt6i_idev
->dev
->ifindex
!= oif
) {
255 if (local
&& (!oif
||
256 local
->rt6i_idev
->dev
->ifindex
== oif
))
267 return &ip6_null_entry
;
272 #ifdef CONFIG_IPV6_ROUTER_PREF
273 static void rt6_probe(struct rt6_info
*rt
)
275 struct neighbour
*neigh
= rt
? rt
->rt6i_nexthop
: NULL
;
277 * Okay, this does not seem to be appropriate
278 * for now, however, we need to check if it
279 * is really so; aka Router Reachability Probing.
281 * Router Reachability Probe MUST be rate-limited
282 * to no more than one per minute.
284 if (!neigh
|| (neigh
->nud_state
& NUD_VALID
))
286 read_lock_bh(&neigh
->lock
);
287 if (!(neigh
->nud_state
& NUD_VALID
) &&
288 time_after(jiffies
, neigh
->updated
+ rt
->rt6i_idev
->cnf
.rtr_probe_interval
)) {
289 struct in6_addr mcaddr
;
290 struct in6_addr
*target
;
292 neigh
->updated
= jiffies
;
293 read_unlock_bh(&neigh
->lock
);
295 target
= (struct in6_addr
*)&neigh
->primary_key
;
296 addrconf_addr_solict_mult(target
, &mcaddr
);
297 ndisc_send_ns(rt
->rt6i_dev
, NULL
, target
, &mcaddr
, NULL
);
299 read_unlock_bh(&neigh
->lock
);
302 static inline void rt6_probe(struct rt6_info
*rt
)
309 * Default Router Selection (RFC 2461 6.3.6)
311 static int inline rt6_check_dev(struct rt6_info
*rt
, int oif
)
313 struct net_device
*dev
= rt
->rt6i_dev
;
314 if (!oif
|| dev
->ifindex
== oif
)
316 if ((dev
->flags
& IFF_LOOPBACK
) &&
317 rt
->rt6i_idev
&& rt
->rt6i_idev
->dev
->ifindex
== oif
)
322 static int inline rt6_check_neigh(struct rt6_info
*rt
)
324 struct neighbour
*neigh
= rt
->rt6i_nexthop
;
326 if (rt
->rt6i_flags
& RTF_NONEXTHOP
||
327 !(rt
->rt6i_flags
& RTF_GATEWAY
))
330 read_lock_bh(&neigh
->lock
);
331 if (neigh
->nud_state
& NUD_VALID
)
333 else if (!(neigh
->nud_state
& NUD_FAILED
))
335 read_unlock_bh(&neigh
->lock
);
340 static int rt6_score_route(struct rt6_info
*rt
, int oif
,
345 m
= rt6_check_dev(rt
, oif
);
346 if (!m
&& (strict
& RT6_LOOKUP_F_IFACE
))
348 #ifdef CONFIG_IPV6_ROUTER_PREF
349 m
|= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt
->rt6i_flags
)) << 2;
351 n
= rt6_check_neigh(rt
);
354 else if (!n
&& strict
& RT6_LOOKUP_F_REACHABLE
)
359 static struct rt6_info
*rt6_select(struct rt6_info
**head
, int oif
,
362 struct rt6_info
*match
= NULL
, *last
= NULL
;
363 struct rt6_info
*rt
, *rt0
= *head
;
367 RT6_TRACE("%s(head=%p(*head=%p), oif=%d)\n",
368 __FUNCTION__
, head
, head
? *head
: NULL
, oif
);
370 for (rt
= rt0
, metric
= rt0
->rt6i_metric
;
371 rt
&& rt
->rt6i_metric
== metric
&& (!last
|| rt
!= rt0
);
375 if (rt6_check_expired(rt
))
380 m
= rt6_score_route(rt
, oif
, strict
);
394 (strict
& RT6_LOOKUP_F_REACHABLE
) &&
395 last
&& last
!= rt0
) {
396 /* no entries matched; do round-robin */
397 static DEFINE_SPINLOCK(lock
);
400 rt0
->u
.next
= last
->u
.next
;
405 RT6_TRACE("%s() => %p, score=%d\n",
406 __FUNCTION__
, match
, mpri
);
408 return (match
? match
: &ip6_null_entry
);
411 #ifdef CONFIG_IPV6_ROUTE_INFO
412 int rt6_route_rcv(struct net_device
*dev
, u8
*opt
, int len
,
413 struct in6_addr
*gwaddr
)
415 struct route_info
*rinfo
= (struct route_info
*) opt
;
416 struct in6_addr prefix_buf
, *prefix
;
421 if (len
< sizeof(struct route_info
)) {
425 /* Sanity check for prefix_len and length */
426 if (rinfo
->length
> 3) {
428 } else if (rinfo
->prefix_len
> 128) {
430 } else if (rinfo
->prefix_len
> 64) {
431 if (rinfo
->length
< 2) {
434 } else if (rinfo
->prefix_len
> 0) {
435 if (rinfo
->length
< 1) {
440 pref
= rinfo
->route_pref
;
441 if (pref
== ICMPV6_ROUTER_PREF_INVALID
)
442 pref
= ICMPV6_ROUTER_PREF_MEDIUM
;
444 lifetime
= htonl(rinfo
->lifetime
);
445 if (lifetime
== 0xffffffff) {
447 } else if (lifetime
> 0x7fffffff/HZ
) {
448 /* Avoid arithmetic overflow */
449 lifetime
= 0x7fffffff/HZ
- 1;
452 if (rinfo
->length
== 3)
453 prefix
= (struct in6_addr
*)rinfo
->prefix
;
455 /* this function is safe */
456 ipv6_addr_prefix(&prefix_buf
,
457 (struct in6_addr
*)rinfo
->prefix
,
459 prefix
= &prefix_buf
;
462 rt
= rt6_get_route_info(prefix
, rinfo
->prefix_len
, gwaddr
, dev
->ifindex
);
464 if (rt
&& !lifetime
) {
470 rt
= rt6_add_route_info(prefix
, rinfo
->prefix_len
, gwaddr
, dev
->ifindex
,
473 rt
->rt6i_flags
= RTF_ROUTEINFO
|
474 (rt
->rt6i_flags
& ~RTF_PREF_MASK
) | RTF_PREF(pref
);
477 if (lifetime
== 0xffffffff) {
478 rt
->rt6i_flags
&= ~RTF_EXPIRES
;
480 rt
->rt6i_expires
= jiffies
+ HZ
* lifetime
;
481 rt
->rt6i_flags
|= RTF_EXPIRES
;
483 dst_release(&rt
->u
.dst
);
489 #define BACKTRACK(saddr) \
491 if (rt == &ip6_null_entry) { \
492 struct fib6_node *pn; \
494 if (fn->fn_flags & RTN_TL_ROOT) \
497 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
498 fn = fib6_lookup(pn->subtree, NULL, saddr); \
501 if (fn->fn_flags & RTN_RTINFO) \
507 static struct rt6_info
*ip6_pol_route_lookup(struct fib6_table
*table
,
508 struct flowi
*fl
, int flags
)
510 struct fib6_node
*fn
;
513 read_lock_bh(&table
->tb6_lock
);
514 fn
= fib6_lookup(&table
->tb6_root
, &fl
->fl6_dst
, &fl
->fl6_src
);
517 rt
= rt6_device_match(rt
, fl
->oif
, flags
);
518 BACKTRACK(&fl
->fl6_src
);
520 dst_hold(&rt
->u
.dst
);
521 read_unlock_bh(&table
->tb6_lock
);
523 rt
->u
.dst
.lastuse
= jiffies
;
530 struct rt6_info
*rt6_lookup(struct in6_addr
*daddr
, struct in6_addr
*saddr
,
541 struct dst_entry
*dst
;
542 int flags
= strict
? RT6_LOOKUP_F_IFACE
: 0;
545 memcpy(&fl
.fl6_src
, saddr
, sizeof(*saddr
));
546 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
549 dst
= fib6_rule_lookup(&fl
, flags
, ip6_pol_route_lookup
);
551 return (struct rt6_info
*) dst
;
558 /* ip6_ins_rt is called with FREE table->tb6_lock.
559 It takes new route entry, the addition fails by any reason the
560 route is freed. In any case, if caller does not hold it, it may
564 static int __ip6_ins_rt(struct rt6_info
*rt
, struct nl_info
*info
)
567 struct fib6_table
*table
;
569 table
= rt
->rt6i_table
;
570 write_lock_bh(&table
->tb6_lock
);
571 err
= fib6_add(&table
->tb6_root
, rt
, info
);
572 write_unlock_bh(&table
->tb6_lock
);
577 int ip6_ins_rt(struct rt6_info
*rt
)
579 return __ip6_ins_rt(rt
, NULL
);
582 static struct rt6_info
*rt6_alloc_cow(struct rt6_info
*ort
, struct in6_addr
*daddr
,
583 struct in6_addr
*saddr
)
591 rt
= ip6_rt_copy(ort
);
594 if (!(rt
->rt6i_flags
&RTF_GATEWAY
)) {
595 if (rt
->rt6i_dst
.plen
!= 128 &&
596 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, daddr
))
597 rt
->rt6i_flags
|= RTF_ANYCAST
;
598 ipv6_addr_copy(&rt
->rt6i_gateway
, daddr
);
601 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, daddr
);
602 rt
->rt6i_dst
.plen
= 128;
603 rt
->rt6i_flags
|= RTF_CACHE
;
604 rt
->u
.dst
.flags
|= DST_HOST
;
606 #ifdef CONFIG_IPV6_SUBTREES
607 if (rt
->rt6i_src
.plen
&& saddr
) {
608 ipv6_addr_copy(&rt
->rt6i_src
.addr
, saddr
);
609 rt
->rt6i_src
.plen
= 128;
613 rt
->rt6i_nexthop
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
620 static struct rt6_info
*rt6_alloc_clone(struct rt6_info
*ort
, struct in6_addr
*daddr
)
622 struct rt6_info
*rt
= ip6_rt_copy(ort
);
624 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, daddr
);
625 rt
->rt6i_dst
.plen
= 128;
626 rt
->rt6i_flags
|= RTF_CACHE
;
627 rt
->u
.dst
.flags
|= DST_HOST
;
628 rt
->rt6i_nexthop
= neigh_clone(ort
->rt6i_nexthop
);
633 static struct rt6_info
*ip6_pol_route_input(struct fib6_table
*table
,
634 struct flowi
*fl
, int flags
)
636 struct fib6_node
*fn
;
637 struct rt6_info
*rt
, *nrt
;
641 int reachable
= RT6_LOOKUP_F_REACHABLE
;
643 strict
|= flags
& RT6_LOOKUP_F_IFACE
;
646 read_lock_bh(&table
->tb6_lock
);
649 fn
= fib6_lookup(&table
->tb6_root
, &fl
->fl6_dst
, &fl
->fl6_src
);
652 rt
= rt6_select(&fn
->leaf
, fl
->iif
, strict
| reachable
);
653 BACKTRACK(&fl
->fl6_src
);
654 if (rt
== &ip6_null_entry
||
655 rt
->rt6i_flags
& RTF_CACHE
)
658 dst_hold(&rt
->u
.dst
);
659 read_unlock_bh(&table
->tb6_lock
);
661 if (!rt
->rt6i_nexthop
&& !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
662 nrt
= rt6_alloc_cow(rt
, &fl
->fl6_dst
, &fl
->fl6_src
);
664 #if CLONE_OFFLINK_ROUTE
665 nrt
= rt6_alloc_clone(rt
, &fl
->fl6_dst
);
671 dst_release(&rt
->u
.dst
);
672 rt
= nrt
? : &ip6_null_entry
;
674 dst_hold(&rt
->u
.dst
);
676 err
= ip6_ins_rt(nrt
);
685 * Race condition! In the gap, when table->tb6_lock was
686 * released someone could insert this route. Relookup.
688 dst_release(&rt
->u
.dst
);
696 dst_hold(&rt
->u
.dst
);
697 read_unlock_bh(&table
->tb6_lock
);
699 rt
->u
.dst
.lastuse
= jiffies
;
705 void ip6_route_input(struct sk_buff
*skb
)
707 struct ipv6hdr
*iph
= skb
->nh
.ipv6h
;
708 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
710 .iif
= skb
->dev
->ifindex
,
715 #ifdef CONFIG_IPV6_ROUTE_FWMARK
716 .fwmark
= skb
->nfmark
,
718 .flowlabel
= (* (u32
*) iph
)&IPV6_FLOWINFO_MASK
,
721 .proto
= iph
->nexthdr
,
724 if (rt6_need_strict(&iph
->daddr
))
725 flags
|= RT6_LOOKUP_F_IFACE
;
727 skb
->dst
= fib6_rule_lookup(&fl
, flags
, ip6_pol_route_input
);
730 static struct rt6_info
*ip6_pol_route_output(struct fib6_table
*table
,
731 struct flowi
*fl
, int flags
)
733 struct fib6_node
*fn
;
734 struct rt6_info
*rt
, *nrt
;
738 int reachable
= RT6_LOOKUP_F_REACHABLE
;
740 strict
|= flags
& RT6_LOOKUP_F_IFACE
;
743 read_lock_bh(&table
->tb6_lock
);
746 fn
= fib6_lookup(&table
->tb6_root
, &fl
->fl6_dst
, &fl
->fl6_src
);
749 rt
= rt6_select(&fn
->leaf
, fl
->oif
, strict
| reachable
);
750 BACKTRACK(&fl
->fl6_src
);
751 if (rt
== &ip6_null_entry
||
752 rt
->rt6i_flags
& RTF_CACHE
)
755 dst_hold(&rt
->u
.dst
);
756 read_unlock_bh(&table
->tb6_lock
);
758 if (!rt
->rt6i_nexthop
&& !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
759 nrt
= rt6_alloc_cow(rt
, &fl
->fl6_dst
, &fl
->fl6_src
);
761 #if CLONE_OFFLINK_ROUTE
762 nrt
= rt6_alloc_clone(rt
, &fl
->fl6_dst
);
768 dst_release(&rt
->u
.dst
);
769 rt
= nrt
? : &ip6_null_entry
;
771 dst_hold(&rt
->u
.dst
);
773 err
= ip6_ins_rt(nrt
);
782 * Race condition! In the gap, when table->tb6_lock was
783 * released someone could insert this route. Relookup.
785 dst_release(&rt
->u
.dst
);
793 dst_hold(&rt
->u
.dst
);
794 read_unlock_bh(&table
->tb6_lock
);
796 rt
->u
.dst
.lastuse
= jiffies
;
801 struct dst_entry
* ip6_route_output(struct sock
*sk
, struct flowi
*fl
)
805 if (rt6_need_strict(&fl
->fl6_dst
))
806 flags
|= RT6_LOOKUP_F_IFACE
;
808 if (!ipv6_addr_any(&fl
->fl6_src
))
809 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
811 return fib6_rule_lookup(fl
, flags
, ip6_pol_route_output
);
816 * Destination cache support functions
819 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
)
823 rt
= (struct rt6_info
*) dst
;
825 if (rt
&& rt
->rt6i_node
&& (rt
->rt6i_node
->fn_sernum
== cookie
))
831 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*dst
)
833 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
836 if (rt
->rt6i_flags
& RTF_CACHE
)
844 static void ip6_link_failure(struct sk_buff
*skb
)
848 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_ADDR_UNREACH
, 0, skb
->dev
);
850 rt
= (struct rt6_info
*) skb
->dst
;
852 if (rt
->rt6i_flags
&RTF_CACHE
) {
853 dst_set_expires(&rt
->u
.dst
, 0);
854 rt
->rt6i_flags
|= RTF_EXPIRES
;
855 } else if (rt
->rt6i_node
&& (rt
->rt6i_flags
& RTF_DEFAULT
))
856 rt
->rt6i_node
->fn_sernum
= -1;
860 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
862 struct rt6_info
*rt6
= (struct rt6_info
*)dst
;
864 if (mtu
< dst_mtu(dst
) && rt6
->rt6i_dst
.plen
== 128) {
865 rt6
->rt6i_flags
|= RTF_MODIFIED
;
866 if (mtu
< IPV6_MIN_MTU
) {
868 dst
->metrics
[RTAX_FEATURES
-1] |= RTAX_FEATURE_ALLFRAG
;
870 dst
->metrics
[RTAX_MTU
-1] = mtu
;
871 call_netevent_notifiers(NETEVENT_PMTU_UPDATE
, dst
);
875 static int ipv6_get_mtu(struct net_device
*dev
);
877 static inline unsigned int ipv6_advmss(unsigned int mtu
)
879 mtu
-= sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
);
881 if (mtu
< ip6_rt_min_advmss
)
882 mtu
= ip6_rt_min_advmss
;
885 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
886 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
887 * IPV6_MAXPLEN is also valid and means: "any MSS,
888 * rely only on pmtu discovery"
890 if (mtu
> IPV6_MAXPLEN
- sizeof(struct tcphdr
))
895 static struct dst_entry
*ndisc_dst_gc_list
;
896 static DEFINE_SPINLOCK(ndisc_lock
);
898 struct dst_entry
*ndisc_dst_alloc(struct net_device
*dev
,
899 struct neighbour
*neigh
,
900 struct in6_addr
*addr
,
901 int (*output
)(struct sk_buff
*))
904 struct inet6_dev
*idev
= in6_dev_get(dev
);
906 if (unlikely(idev
== NULL
))
909 rt
= ip6_dst_alloc();
910 if (unlikely(rt
== NULL
)) {
919 neigh
= ndisc_get_neigh(dev
, addr
);
922 rt
->rt6i_idev
= idev
;
923 rt
->rt6i_nexthop
= neigh
;
924 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
925 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = 255;
926 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(rt
->rt6i_dev
);
927 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&rt
->u
.dst
));
928 rt
->u
.dst
.output
= output
;
930 #if 0 /* there's no chance to use these for ndisc */
931 rt
->u
.dst
.flags
= ipv6_addr_type(addr
) & IPV6_ADDR_UNICAST
934 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, addr
);
935 rt
->rt6i_dst
.plen
= 128;
938 spin_lock_bh(&ndisc_lock
);
939 rt
->u
.dst
.next
= ndisc_dst_gc_list
;
940 ndisc_dst_gc_list
= &rt
->u
.dst
;
941 spin_unlock_bh(&ndisc_lock
);
943 fib6_force_start_gc();
946 return (struct dst_entry
*)rt
;
949 int ndisc_dst_gc(int *more
)
951 struct dst_entry
*dst
, *next
, **pprev
;
957 spin_lock_bh(&ndisc_lock
);
958 pprev
= &ndisc_dst_gc_list
;
960 while ((dst
= *pprev
) != NULL
) {
961 if (!atomic_read(&dst
->__refcnt
)) {
971 spin_unlock_bh(&ndisc_lock
);
976 static int ip6_dst_gc(void)
978 static unsigned expire
= 30*HZ
;
979 static unsigned long last_gc
;
980 unsigned long now
= jiffies
;
982 if (time_after(last_gc
+ ip6_rt_gc_min_interval
, now
) &&
983 atomic_read(&ip6_dst_ops
.entries
) <= ip6_rt_max_size
)
989 if (atomic_read(&ip6_dst_ops
.entries
) < ip6_dst_ops
.gc_thresh
)
990 expire
= ip6_rt_gc_timeout
>>1;
993 expire
-= expire
>>ip6_rt_gc_elasticity
;
994 return (atomic_read(&ip6_dst_ops
.entries
) > ip6_rt_max_size
);
997 /* Clean host part of a prefix. Not necessary in radix tree,
998 but results in cleaner routing tables.
1000 Remove it only when all the things will work!
1003 static int ipv6_get_mtu(struct net_device
*dev
)
1005 int mtu
= IPV6_MIN_MTU
;
1006 struct inet6_dev
*idev
;
1008 idev
= in6_dev_get(dev
);
1010 mtu
= idev
->cnf
.mtu6
;
1016 int ipv6_get_hoplimit(struct net_device
*dev
)
1018 int hoplimit
= ipv6_devconf
.hop_limit
;
1019 struct inet6_dev
*idev
;
1021 idev
= in6_dev_get(dev
);
1023 hoplimit
= idev
->cnf
.hop_limit
;
1033 int ip6_route_add(struct fib6_config
*cfg
)
1036 struct rt6_info
*rt
= NULL
;
1037 struct net_device
*dev
= NULL
;
1038 struct inet6_dev
*idev
= NULL
;
1039 struct fib6_table
*table
;
1042 if (cfg
->fc_dst_len
> 128 || cfg
->fc_src_len
> 128)
1044 #ifndef CONFIG_IPV6_SUBTREES
1045 if (cfg
->fc_src_len
)
1048 if (cfg
->fc_ifindex
) {
1050 dev
= dev_get_by_index(cfg
->fc_ifindex
);
1053 idev
= in6_dev_get(dev
);
1058 if (cfg
->fc_metric
== 0)
1059 cfg
->fc_metric
= IP6_RT_PRIO_USER
;
1061 table
= fib6_new_table(cfg
->fc_table
);
1062 if (table
== NULL
) {
1067 rt
= ip6_dst_alloc();
1074 rt
->u
.dst
.obsolete
= -1;
1075 rt
->rt6i_expires
= jiffies
+ clock_t_to_jiffies(cfg
->fc_expires
);
1077 if (cfg
->fc_protocol
== RTPROT_UNSPEC
)
1078 cfg
->fc_protocol
= RTPROT_BOOT
;
1079 rt
->rt6i_protocol
= cfg
->fc_protocol
;
1081 addr_type
= ipv6_addr_type(&cfg
->fc_dst
);
1083 if (addr_type
& IPV6_ADDR_MULTICAST
)
1084 rt
->u
.dst
.input
= ip6_mc_input
;
1086 rt
->u
.dst
.input
= ip6_forward
;
1088 rt
->u
.dst
.output
= ip6_output
;
1090 ipv6_addr_prefix(&rt
->rt6i_dst
.addr
, &cfg
->fc_dst
, cfg
->fc_dst_len
);
1091 rt
->rt6i_dst
.plen
= cfg
->fc_dst_len
;
1092 if (rt
->rt6i_dst
.plen
== 128)
1093 rt
->u
.dst
.flags
= DST_HOST
;
1095 #ifdef CONFIG_IPV6_SUBTREES
1096 ipv6_addr_prefix(&rt
->rt6i_src
.addr
, &cfg
->fc_src
, cfg
->fc_src_len
);
1097 rt
->rt6i_src
.plen
= cfg
->fc_src_len
;
1100 rt
->rt6i_metric
= cfg
->fc_metric
;
1102 /* We cannot add true routes via loopback here,
1103 they would result in kernel looping; promote them to reject routes
1105 if ((cfg
->fc_flags
& RTF_REJECT
) ||
1106 (dev
&& (dev
->flags
&IFF_LOOPBACK
) && !(addr_type
&IPV6_ADDR_LOOPBACK
))) {
1107 /* hold loopback dev/idev if we haven't done so. */
1108 if (dev
!= &loopback_dev
) {
1113 dev
= &loopback_dev
;
1115 idev
= in6_dev_get(dev
);
1121 rt
->u
.dst
.output
= ip6_pkt_discard_out
;
1122 rt
->u
.dst
.input
= ip6_pkt_discard
;
1123 rt
->u
.dst
.error
= -ENETUNREACH
;
1124 rt
->rt6i_flags
= RTF_REJECT
|RTF_NONEXTHOP
;
1128 if (cfg
->fc_flags
& RTF_GATEWAY
) {
1129 struct in6_addr
*gw_addr
;
1132 gw_addr
= &cfg
->fc_gateway
;
1133 ipv6_addr_copy(&rt
->rt6i_gateway
, gw_addr
);
1134 gwa_type
= ipv6_addr_type(gw_addr
);
1136 if (gwa_type
!= (IPV6_ADDR_LINKLOCAL
|IPV6_ADDR_UNICAST
)) {
1137 struct rt6_info
*grt
;
1139 /* IPv6 strictly inhibits using not link-local
1140 addresses as nexthop address.
1141 Otherwise, router will not able to send redirects.
1142 It is very good, but in some (rare!) circumstances
1143 (SIT, PtP, NBMA NOARP links) it is handy to allow
1144 some exceptions. --ANK
1147 if (!(gwa_type
&IPV6_ADDR_UNICAST
))
1150 grt
= rt6_lookup(gw_addr
, NULL
, cfg
->fc_ifindex
, 1);
1152 err
= -EHOSTUNREACH
;
1156 if (dev
!= grt
->rt6i_dev
) {
1157 dst_release(&grt
->u
.dst
);
1161 dev
= grt
->rt6i_dev
;
1162 idev
= grt
->rt6i_idev
;
1164 in6_dev_hold(grt
->rt6i_idev
);
1166 if (!(grt
->rt6i_flags
&RTF_GATEWAY
))
1168 dst_release(&grt
->u
.dst
);
1174 if (dev
== NULL
|| (dev
->flags
&IFF_LOOPBACK
))
1182 if (cfg
->fc_flags
& (RTF_GATEWAY
| RTF_NONEXTHOP
)) {
1183 rt
->rt6i_nexthop
= __neigh_lookup_errno(&nd_tbl
, &rt
->rt6i_gateway
, dev
);
1184 if (IS_ERR(rt
->rt6i_nexthop
)) {
1185 err
= PTR_ERR(rt
->rt6i_nexthop
);
1186 rt
->rt6i_nexthop
= NULL
;
1191 rt
->rt6i_flags
= cfg
->fc_flags
;
1198 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
1199 int type
= nla
->nla_type
;
1202 if (type
> RTAX_MAX
) {
1207 rt
->u
.dst
.metrics
[type
- 1] = nla_get_u32(nla
);
1212 if (rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] == 0)
1213 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = -1;
1214 if (!rt
->u
.dst
.metrics
[RTAX_MTU
-1])
1215 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(dev
);
1216 if (!rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1])
1217 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&rt
->u
.dst
));
1218 rt
->u
.dst
.dev
= dev
;
1219 rt
->rt6i_idev
= idev
;
1220 rt
->rt6i_table
= table
;
1221 return __ip6_ins_rt(rt
, &cfg
->fc_nlinfo
);
1229 dst_free((struct dst_entry
*) rt
);
1233 static int __ip6_del_rt(struct rt6_info
*rt
, struct nl_info
*info
)
1236 struct fib6_table
*table
;
1238 if (rt
== &ip6_null_entry
)
1241 table
= rt
->rt6i_table
;
1242 write_lock_bh(&table
->tb6_lock
);
1244 err
= fib6_del(rt
, info
);
1245 dst_release(&rt
->u
.dst
);
1247 write_unlock_bh(&table
->tb6_lock
);
1252 int ip6_del_rt(struct rt6_info
*rt
)
1254 return __ip6_del_rt(rt
, NULL
);
1257 static int ip6_route_del(struct fib6_config
*cfg
)
1259 struct fib6_table
*table
;
1260 struct fib6_node
*fn
;
1261 struct rt6_info
*rt
;
1264 table
= fib6_get_table(cfg
->fc_table
);
1268 read_lock_bh(&table
->tb6_lock
);
1270 fn
= fib6_locate(&table
->tb6_root
,
1271 &cfg
->fc_dst
, cfg
->fc_dst_len
,
1272 &cfg
->fc_src
, cfg
->fc_src_len
);
1275 for (rt
= fn
->leaf
; rt
; rt
= rt
->u
.next
) {
1276 if (cfg
->fc_ifindex
&&
1277 (rt
->rt6i_dev
== NULL
||
1278 rt
->rt6i_dev
->ifindex
!= cfg
->fc_ifindex
))
1280 if (cfg
->fc_flags
& RTF_GATEWAY
&&
1281 !ipv6_addr_equal(&cfg
->fc_gateway
, &rt
->rt6i_gateway
))
1283 if (cfg
->fc_metric
&& cfg
->fc_metric
!= rt
->rt6i_metric
)
1285 dst_hold(&rt
->u
.dst
);
1286 read_unlock_bh(&table
->tb6_lock
);
1288 return __ip6_del_rt(rt
, &cfg
->fc_nlinfo
);
1291 read_unlock_bh(&table
->tb6_lock
);
1299 struct ip6rd_flowi
{
1301 struct in6_addr gateway
;
1304 static struct rt6_info
*__ip6_route_redirect(struct fib6_table
*table
,
1308 struct ip6rd_flowi
*rdfl
= (struct ip6rd_flowi
*)fl
;
1309 struct rt6_info
*rt
;
1310 struct fib6_node
*fn
;
1313 * Get the "current" route for this destination and
1314 * check if the redirect has come from approriate router.
1316 * RFC 2461 specifies that redirects should only be
1317 * accepted if they come from the nexthop to the target.
1318 * Due to the way the routes are chosen, this notion
1319 * is a bit fuzzy and one might need to check all possible
1323 read_lock_bh(&table
->tb6_lock
);
1324 fn
= fib6_lookup(&table
->tb6_root
, &fl
->fl6_dst
, &fl
->fl6_src
);
1326 for (rt
= fn
->leaf
; rt
; rt
= rt
->u
.next
) {
1328 * Current route is on-link; redirect is always invalid.
1330 * Seems, previous statement is not true. It could
1331 * be node, which looks for us as on-link (f.e. proxy ndisc)
1332 * But then router serving it might decide, that we should
1333 * know truth 8)8) --ANK (980726).
1335 if (rt6_check_expired(rt
))
1337 if (!(rt
->rt6i_flags
& RTF_GATEWAY
))
1339 if (fl
->oif
!= rt
->rt6i_dev
->ifindex
)
1341 if (!ipv6_addr_equal(&rdfl
->gateway
, &rt
->rt6i_gateway
))
1347 rt
= &ip6_null_entry
;
1348 BACKTRACK(&fl
->fl6_src
);
1350 dst_hold(&rt
->u
.dst
);
1352 read_unlock_bh(&table
->tb6_lock
);
1357 static struct rt6_info
*ip6_route_redirect(struct in6_addr
*dest
,
1358 struct in6_addr
*src
,
1359 struct in6_addr
*gateway
,
1360 struct net_device
*dev
)
1362 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
1363 struct ip6rd_flowi rdfl
= {
1365 .oif
= dev
->ifindex
,
1373 .gateway
= *gateway
,
1376 if (rt6_need_strict(dest
))
1377 flags
|= RT6_LOOKUP_F_IFACE
;
1379 return (struct rt6_info
*)fib6_rule_lookup((struct flowi
*)&rdfl
, flags
, __ip6_route_redirect
);
1382 void rt6_redirect(struct in6_addr
*dest
, struct in6_addr
*src
,
1383 struct in6_addr
*saddr
,
1384 struct neighbour
*neigh
, u8
*lladdr
, int on_link
)
1386 struct rt6_info
*rt
, *nrt
= NULL
;
1387 struct netevent_redirect netevent
;
1389 rt
= ip6_route_redirect(dest
, src
, saddr
, neigh
->dev
);
1391 if (rt
== &ip6_null_entry
) {
1392 if (net_ratelimit())
1393 printk(KERN_DEBUG
"rt6_redirect: source isn't a valid nexthop "
1394 "for redirect target\n");
1399 * We have finally decided to accept it.
1402 neigh_update(neigh
, lladdr
, NUD_STALE
,
1403 NEIGH_UPDATE_F_WEAK_OVERRIDE
|
1404 NEIGH_UPDATE_F_OVERRIDE
|
1405 (on_link
? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER
|
1406 NEIGH_UPDATE_F_ISROUTER
))
1410 * Redirect received -> path was valid.
1411 * Look, redirects are sent only in response to data packets,
1412 * so that this nexthop apparently is reachable. --ANK
1414 dst_confirm(&rt
->u
.dst
);
1416 /* Duplicate redirect: silently ignore. */
1417 if (neigh
== rt
->u
.dst
.neighbour
)
1420 nrt
= ip6_rt_copy(rt
);
1424 nrt
->rt6i_flags
= RTF_GATEWAY
|RTF_UP
|RTF_DYNAMIC
|RTF_CACHE
;
1426 nrt
->rt6i_flags
&= ~RTF_GATEWAY
;
1428 ipv6_addr_copy(&nrt
->rt6i_dst
.addr
, dest
);
1429 nrt
->rt6i_dst
.plen
= 128;
1430 nrt
->u
.dst
.flags
|= DST_HOST
;
1432 ipv6_addr_copy(&nrt
->rt6i_gateway
, (struct in6_addr
*)neigh
->primary_key
);
1433 nrt
->rt6i_nexthop
= neigh_clone(neigh
);
1434 /* Reset pmtu, it may be better */
1435 nrt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(neigh
->dev
);
1436 nrt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&nrt
->u
.dst
));
1438 if (ip6_ins_rt(nrt
))
1441 netevent
.old
= &rt
->u
.dst
;
1442 netevent
.new = &nrt
->u
.dst
;
1443 call_netevent_notifiers(NETEVENT_REDIRECT
, &netevent
);
1445 if (rt
->rt6i_flags
&RTF_CACHE
) {
1451 dst_release(&rt
->u
.dst
);
1456 * Handle ICMP "packet too big" messages
1457 * i.e. Path MTU discovery
1460 void rt6_pmtu_discovery(struct in6_addr
*daddr
, struct in6_addr
*saddr
,
1461 struct net_device
*dev
, u32 pmtu
)
1463 struct rt6_info
*rt
, *nrt
;
1466 rt
= rt6_lookup(daddr
, saddr
, dev
->ifindex
, 0);
1470 if (pmtu
>= dst_mtu(&rt
->u
.dst
))
1473 if (pmtu
< IPV6_MIN_MTU
) {
1475 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1476 * MTU (1280) and a fragment header should always be included
1477 * after a node receiving Too Big message reporting PMTU is
1478 * less than the IPv6 Minimum Link MTU.
1480 pmtu
= IPV6_MIN_MTU
;
1484 /* New mtu received -> path was valid.
1485 They are sent only in response to data packets,
1486 so that this nexthop apparently is reachable. --ANK
1488 dst_confirm(&rt
->u
.dst
);
1490 /* Host route. If it is static, it would be better
1491 not to override it, but add new one, so that
1492 when cache entry will expire old pmtu
1493 would return automatically.
1495 if (rt
->rt6i_flags
& RTF_CACHE
) {
1496 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = pmtu
;
1498 rt
->u
.dst
.metrics
[RTAX_FEATURES
-1] |= RTAX_FEATURE_ALLFRAG
;
1499 dst_set_expires(&rt
->u
.dst
, ip6_rt_mtu_expires
);
1500 rt
->rt6i_flags
|= RTF_MODIFIED
|RTF_EXPIRES
;
1505 Two cases are possible:
1506 1. It is connected route. Action: COW
1507 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1509 if (!rt
->rt6i_nexthop
&& !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
1510 nrt
= rt6_alloc_cow(rt
, daddr
, saddr
);
1512 nrt
= rt6_alloc_clone(rt
, daddr
);
1515 nrt
->u
.dst
.metrics
[RTAX_MTU
-1] = pmtu
;
1517 nrt
->u
.dst
.metrics
[RTAX_FEATURES
-1] |= RTAX_FEATURE_ALLFRAG
;
1519 /* According to RFC 1981, detecting PMTU increase shouldn't be
1520 * happened within 5 mins, the recommended timer is 10 mins.
1521 * Here this route expiration time is set to ip6_rt_mtu_expires
1522 * which is 10 mins. After 10 mins the decreased pmtu is expired
1523 * and detecting PMTU increase will be automatically happened.
1525 dst_set_expires(&nrt
->u
.dst
, ip6_rt_mtu_expires
);
1526 nrt
->rt6i_flags
|= RTF_DYNAMIC
|RTF_EXPIRES
;
1531 dst_release(&rt
->u
.dst
);
1535 * Misc support functions
1538 static struct rt6_info
* ip6_rt_copy(struct rt6_info
*ort
)
1540 struct rt6_info
*rt
= ip6_dst_alloc();
1543 rt
->u
.dst
.input
= ort
->u
.dst
.input
;
1544 rt
->u
.dst
.output
= ort
->u
.dst
.output
;
1546 memcpy(rt
->u
.dst
.metrics
, ort
->u
.dst
.metrics
, RTAX_MAX
*sizeof(u32
));
1547 rt
->u
.dst
.error
= ort
->u
.dst
.error
;
1548 rt
->u
.dst
.dev
= ort
->u
.dst
.dev
;
1550 dev_hold(rt
->u
.dst
.dev
);
1551 rt
->rt6i_idev
= ort
->rt6i_idev
;
1553 in6_dev_hold(rt
->rt6i_idev
);
1554 rt
->u
.dst
.lastuse
= jiffies
;
1555 rt
->rt6i_expires
= 0;
1557 ipv6_addr_copy(&rt
->rt6i_gateway
, &ort
->rt6i_gateway
);
1558 rt
->rt6i_flags
= ort
->rt6i_flags
& ~RTF_EXPIRES
;
1559 rt
->rt6i_metric
= 0;
1561 memcpy(&rt
->rt6i_dst
, &ort
->rt6i_dst
, sizeof(struct rt6key
));
1562 #ifdef CONFIG_IPV6_SUBTREES
1563 memcpy(&rt
->rt6i_src
, &ort
->rt6i_src
, sizeof(struct rt6key
));
1565 rt
->rt6i_table
= ort
->rt6i_table
;
1570 #ifdef CONFIG_IPV6_ROUTE_INFO
1571 static struct rt6_info
*rt6_get_route_info(struct in6_addr
*prefix
, int prefixlen
,
1572 struct in6_addr
*gwaddr
, int ifindex
)
1574 struct fib6_node
*fn
;
1575 struct rt6_info
*rt
= NULL
;
1576 struct fib6_table
*table
;
1578 table
= fib6_get_table(RT6_TABLE_INFO
);
1582 write_lock_bh(&table
->tb6_lock
);
1583 fn
= fib6_locate(&table
->tb6_root
, prefix
,prefixlen
, NULL
, 0);
1587 for (rt
= fn
->leaf
; rt
; rt
= rt
->u
.next
) {
1588 if (rt
->rt6i_dev
->ifindex
!= ifindex
)
1590 if ((rt
->rt6i_flags
& (RTF_ROUTEINFO
|RTF_GATEWAY
)) != (RTF_ROUTEINFO
|RTF_GATEWAY
))
1592 if (!ipv6_addr_equal(&rt
->rt6i_gateway
, gwaddr
))
1594 dst_hold(&rt
->u
.dst
);
1598 write_unlock_bh(&table
->tb6_lock
);
1602 static struct rt6_info
*rt6_add_route_info(struct in6_addr
*prefix
, int prefixlen
,
1603 struct in6_addr
*gwaddr
, int ifindex
,
1606 struct fib6_config cfg
= {
1607 .fc_table
= RT6_TABLE_INFO
,
1609 .fc_ifindex
= ifindex
,
1610 .fc_dst_len
= prefixlen
,
1611 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_ROUTEINFO
|
1612 RTF_UP
| RTF_PREF(pref
),
1615 ipv6_addr_copy(&cfg
.fc_dst
, prefix
);
1616 ipv6_addr_copy(&cfg
.fc_gateway
, gwaddr
);
1618 /* We should treat it as a default route if prefix length is 0. */
1620 cfg
.fc_flags
|= RTF_DEFAULT
;
1622 ip6_route_add(&cfg
);
1624 return rt6_get_route_info(prefix
, prefixlen
, gwaddr
, ifindex
);
1628 struct rt6_info
*rt6_get_dflt_router(struct in6_addr
*addr
, struct net_device
*dev
)
1630 struct rt6_info
*rt
;
1631 struct fib6_table
*table
;
1633 table
= fib6_get_table(RT6_TABLE_DFLT
);
1637 write_lock_bh(&table
->tb6_lock
);
1638 for (rt
= table
->tb6_root
.leaf
; rt
; rt
=rt
->u
.next
) {
1639 if (dev
== rt
->rt6i_dev
&&
1640 ((rt
->rt6i_flags
& (RTF_ADDRCONF
| RTF_DEFAULT
)) == (RTF_ADDRCONF
| RTF_DEFAULT
)) &&
1641 ipv6_addr_equal(&rt
->rt6i_gateway
, addr
))
1645 dst_hold(&rt
->u
.dst
);
1646 write_unlock_bh(&table
->tb6_lock
);
1650 struct rt6_info
*rt6_add_dflt_router(struct in6_addr
*gwaddr
,
1651 struct net_device
*dev
,
1654 struct fib6_config cfg
= {
1655 .fc_table
= RT6_TABLE_DFLT
,
1657 .fc_ifindex
= dev
->ifindex
,
1658 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_DEFAULT
|
1659 RTF_UP
| RTF_EXPIRES
| RTF_PREF(pref
),
1662 ipv6_addr_copy(&cfg
.fc_gateway
, gwaddr
);
1664 ip6_route_add(&cfg
);
1666 return rt6_get_dflt_router(gwaddr
, dev
);
1669 void rt6_purge_dflt_routers(void)
1671 struct rt6_info
*rt
;
1672 struct fib6_table
*table
;
1674 /* NOTE: Keep consistent with rt6_get_dflt_router */
1675 table
= fib6_get_table(RT6_TABLE_DFLT
);
1680 read_lock_bh(&table
->tb6_lock
);
1681 for (rt
= table
->tb6_root
.leaf
; rt
; rt
= rt
->u
.next
) {
1682 if (rt
->rt6i_flags
& (RTF_DEFAULT
| RTF_ADDRCONF
)) {
1683 dst_hold(&rt
->u
.dst
);
1684 read_unlock_bh(&table
->tb6_lock
);
1689 read_unlock_bh(&table
->tb6_lock
);
1692 static void rtmsg_to_fib6_config(struct in6_rtmsg
*rtmsg
,
1693 struct fib6_config
*cfg
)
1695 memset(cfg
, 0, sizeof(*cfg
));
1697 cfg
->fc_table
= RT6_TABLE_MAIN
;
1698 cfg
->fc_ifindex
= rtmsg
->rtmsg_ifindex
;
1699 cfg
->fc_metric
= rtmsg
->rtmsg_metric
;
1700 cfg
->fc_expires
= rtmsg
->rtmsg_info
;
1701 cfg
->fc_dst_len
= rtmsg
->rtmsg_dst_len
;
1702 cfg
->fc_src_len
= rtmsg
->rtmsg_src_len
;
1703 cfg
->fc_flags
= rtmsg
->rtmsg_flags
;
1705 ipv6_addr_copy(&cfg
->fc_dst
, &rtmsg
->rtmsg_dst
);
1706 ipv6_addr_copy(&cfg
->fc_src
, &rtmsg
->rtmsg_src
);
1707 ipv6_addr_copy(&cfg
->fc_gateway
, &rtmsg
->rtmsg_gateway
);
1710 int ipv6_route_ioctl(unsigned int cmd
, void __user
*arg
)
1712 struct fib6_config cfg
;
1713 struct in6_rtmsg rtmsg
;
1717 case SIOCADDRT
: /* Add a route */
1718 case SIOCDELRT
: /* Delete a route */
1719 if (!capable(CAP_NET_ADMIN
))
1721 err
= copy_from_user(&rtmsg
, arg
,
1722 sizeof(struct in6_rtmsg
));
1726 rtmsg_to_fib6_config(&rtmsg
, &cfg
);
1731 err
= ip6_route_add(&cfg
);
1734 err
= ip6_route_del(&cfg
);
1748 * Drop the packet on the floor
1751 static inline int ip6_pkt_drop(struct sk_buff
*skb
, int code
)
1753 int type
= ipv6_addr_type(&skb
->nh
.ipv6h
->daddr
);
1754 if (type
== IPV6_ADDR_ANY
|| type
== IPV6_ADDR_RESERVED
)
1755 IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS
);
1757 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES
);
1758 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, code
, 0, skb
->dev
);
1763 static int ip6_pkt_discard(struct sk_buff
*skb
)
1765 return ip6_pkt_drop(skb
, ICMPV6_NOROUTE
);
1768 static int ip6_pkt_discard_out(struct sk_buff
*skb
)
1770 skb
->dev
= skb
->dst
->dev
;
1771 return ip6_pkt_discard(skb
);
1774 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1776 static int ip6_pkt_prohibit(struct sk_buff
*skb
)
1778 return ip6_pkt_drop(skb
, ICMPV6_ADM_PROHIBITED
);
1781 static int ip6_pkt_prohibit_out(struct sk_buff
*skb
)
1783 skb
->dev
= skb
->dst
->dev
;
1784 return ip6_pkt_prohibit(skb
);
1787 static int ip6_pkt_blk_hole(struct sk_buff
*skb
)
1796 * Allocate a dst for local (unicast / anycast) address.
1799 struct rt6_info
*addrconf_dst_alloc(struct inet6_dev
*idev
,
1800 const struct in6_addr
*addr
,
1803 struct rt6_info
*rt
= ip6_dst_alloc();
1806 return ERR_PTR(-ENOMEM
);
1808 dev_hold(&loopback_dev
);
1811 rt
->u
.dst
.flags
= DST_HOST
;
1812 rt
->u
.dst
.input
= ip6_input
;
1813 rt
->u
.dst
.output
= ip6_output
;
1814 rt
->rt6i_dev
= &loopback_dev
;
1815 rt
->rt6i_idev
= idev
;
1816 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(rt
->rt6i_dev
);
1817 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&rt
->u
.dst
));
1818 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = -1;
1819 rt
->u
.dst
.obsolete
= -1;
1821 rt
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
;
1823 rt
->rt6i_flags
|= RTF_ANYCAST
;
1825 rt
->rt6i_flags
|= RTF_LOCAL
;
1826 rt
->rt6i_nexthop
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
1827 if (rt
->rt6i_nexthop
== NULL
) {
1828 dst_free((struct dst_entry
*) rt
);
1829 return ERR_PTR(-ENOMEM
);
1832 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, addr
);
1833 rt
->rt6i_dst
.plen
= 128;
1834 rt
->rt6i_table
= fib6_get_table(RT6_TABLE_LOCAL
);
1836 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
1841 static int fib6_ifdown(struct rt6_info
*rt
, void *arg
)
1843 if (((void*)rt
->rt6i_dev
== arg
|| arg
== NULL
) &&
1844 rt
!= &ip6_null_entry
) {
1845 RT6_TRACE("deleted by ifdown %p\n", rt
);
1851 void rt6_ifdown(struct net_device
*dev
)
1853 fib6_clean_all(fib6_ifdown
, 0, dev
);
1856 struct rt6_mtu_change_arg
1858 struct net_device
*dev
;
1862 static int rt6_mtu_change_route(struct rt6_info
*rt
, void *p_arg
)
1864 struct rt6_mtu_change_arg
*arg
= (struct rt6_mtu_change_arg
*) p_arg
;
1865 struct inet6_dev
*idev
;
1867 /* In IPv6 pmtu discovery is not optional,
1868 so that RTAX_MTU lock cannot disable it.
1869 We still use this lock to block changes
1870 caused by addrconf/ndisc.
1873 idev
= __in6_dev_get(arg
->dev
);
1877 /* For administrative MTU increase, there is no way to discover
1878 IPv6 PMTU increase, so PMTU increase should be updated here.
1879 Since RFC 1981 doesn't include administrative MTU increase
1880 update PMTU increase is a MUST. (i.e. jumbo frame)
1883 If new MTU is less than route PMTU, this new MTU will be the
1884 lowest MTU in the path, update the route PMTU to reflect PMTU
1885 decreases; if new MTU is greater than route PMTU, and the
1886 old MTU is the lowest MTU in the path, update the route PMTU
1887 to reflect the increase. In this case if the other nodes' MTU
1888 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1891 if (rt
->rt6i_dev
== arg
->dev
&&
1892 !dst_metric_locked(&rt
->u
.dst
, RTAX_MTU
) &&
1893 (dst_mtu(&rt
->u
.dst
) > arg
->mtu
||
1894 (dst_mtu(&rt
->u
.dst
) < arg
->mtu
&&
1895 dst_mtu(&rt
->u
.dst
) == idev
->cnf
.mtu6
)))
1896 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = arg
->mtu
;
1897 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(arg
->mtu
);
1901 void rt6_mtu_change(struct net_device
*dev
, unsigned mtu
)
1903 struct rt6_mtu_change_arg arg
= {
1908 fib6_clean_all(rt6_mtu_change_route
, 0, &arg
);
1911 static struct nla_policy rtm_ipv6_policy
[RTA_MAX
+1] __read_mostly
= {
1912 [RTA_GATEWAY
] = { .len
= sizeof(struct in6_addr
) },
1913 [RTA_OIF
] = { .type
= NLA_U32
},
1914 [RTA_IIF
] = { .type
= NLA_U32
},
1915 [RTA_PRIORITY
] = { .type
= NLA_U32
},
1916 [RTA_METRICS
] = { .type
= NLA_NESTED
},
1919 static int rtm_to_fib6_config(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1920 struct fib6_config
*cfg
)
1923 struct nlattr
*tb
[RTA_MAX
+1];
1926 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
1931 rtm
= nlmsg_data(nlh
);
1932 memset(cfg
, 0, sizeof(*cfg
));
1934 cfg
->fc_table
= rtm
->rtm_table
;
1935 cfg
->fc_dst_len
= rtm
->rtm_dst_len
;
1936 cfg
->fc_src_len
= rtm
->rtm_src_len
;
1937 cfg
->fc_flags
= RTF_UP
;
1938 cfg
->fc_protocol
= rtm
->rtm_protocol
;
1940 if (rtm
->rtm_type
== RTN_UNREACHABLE
)
1941 cfg
->fc_flags
|= RTF_REJECT
;
1943 cfg
->fc_nlinfo
.pid
= NETLINK_CB(skb
).pid
;
1944 cfg
->fc_nlinfo
.nlh
= nlh
;
1946 if (tb
[RTA_GATEWAY
]) {
1947 nla_memcpy(&cfg
->fc_gateway
, tb
[RTA_GATEWAY
], 16);
1948 cfg
->fc_flags
|= RTF_GATEWAY
;
1952 int plen
= (rtm
->rtm_dst_len
+ 7) >> 3;
1954 if (nla_len(tb
[RTA_DST
]) < plen
)
1957 nla_memcpy(&cfg
->fc_dst
, tb
[RTA_DST
], plen
);
1961 int plen
= (rtm
->rtm_src_len
+ 7) >> 3;
1963 if (nla_len(tb
[RTA_SRC
]) < plen
)
1966 nla_memcpy(&cfg
->fc_src
, tb
[RTA_SRC
], plen
);
1970 cfg
->fc_ifindex
= nla_get_u32(tb
[RTA_OIF
]);
1972 if (tb
[RTA_PRIORITY
])
1973 cfg
->fc_metric
= nla_get_u32(tb
[RTA_PRIORITY
]);
1975 if (tb
[RTA_METRICS
]) {
1976 cfg
->fc_mx
= nla_data(tb
[RTA_METRICS
]);
1977 cfg
->fc_mx_len
= nla_len(tb
[RTA_METRICS
]);
1981 cfg
->fc_table
= nla_get_u32(tb
[RTA_TABLE
]);
1988 int inet6_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
1990 struct fib6_config cfg
;
1993 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
1997 return ip6_route_del(&cfg
);
2000 int inet6_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
2002 struct fib6_config cfg
;
2005 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
2009 return ip6_route_add(&cfg
);
2012 static int rt6_fill_node(struct sk_buff
*skb
, struct rt6_info
*rt
,
2013 struct in6_addr
*dst
, struct in6_addr
*src
,
2014 int iif
, int type
, u32 pid
, u32 seq
,
2015 int prefix
, unsigned int flags
)
2018 struct nlmsghdr
*nlh
;
2019 struct rta_cacheinfo ci
;
2022 if (prefix
) { /* user wants prefix routes only */
2023 if (!(rt
->rt6i_flags
& RTF_PREFIX_RT
)) {
2024 /* success since this is not a prefix route */
2029 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*rtm
), flags
);
2033 rtm
= nlmsg_data(nlh
);
2034 rtm
->rtm_family
= AF_INET6
;
2035 rtm
->rtm_dst_len
= rt
->rt6i_dst
.plen
;
2036 rtm
->rtm_src_len
= rt
->rt6i_src
.plen
;
2039 table
= rt
->rt6i_table
->tb6_id
;
2041 table
= RT6_TABLE_UNSPEC
;
2042 rtm
->rtm_table
= table
;
2043 NLA_PUT_U32(skb
, RTA_TABLE
, table
);
2044 if (rt
->rt6i_flags
&RTF_REJECT
)
2045 rtm
->rtm_type
= RTN_UNREACHABLE
;
2046 else if (rt
->rt6i_dev
&& (rt
->rt6i_dev
->flags
&IFF_LOOPBACK
))
2047 rtm
->rtm_type
= RTN_LOCAL
;
2049 rtm
->rtm_type
= RTN_UNICAST
;
2051 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2052 rtm
->rtm_protocol
= rt
->rt6i_protocol
;
2053 if (rt
->rt6i_flags
&RTF_DYNAMIC
)
2054 rtm
->rtm_protocol
= RTPROT_REDIRECT
;
2055 else if (rt
->rt6i_flags
& RTF_ADDRCONF
)
2056 rtm
->rtm_protocol
= RTPROT_KERNEL
;
2057 else if (rt
->rt6i_flags
&RTF_DEFAULT
)
2058 rtm
->rtm_protocol
= RTPROT_RA
;
2060 if (rt
->rt6i_flags
&RTF_CACHE
)
2061 rtm
->rtm_flags
|= RTM_F_CLONED
;
2064 NLA_PUT(skb
, RTA_DST
, 16, dst
);
2065 rtm
->rtm_dst_len
= 128;
2066 } else if (rtm
->rtm_dst_len
)
2067 NLA_PUT(skb
, RTA_DST
, 16, &rt
->rt6i_dst
.addr
);
2068 #ifdef CONFIG_IPV6_SUBTREES
2070 NLA_PUT(skb
, RTA_SRC
, 16, src
);
2071 rtm
->rtm_src_len
= 128;
2072 } else if (rtm
->rtm_src_len
)
2073 NLA_PUT(skb
, RTA_SRC
, 16, &rt
->rt6i_src
.addr
);
2076 NLA_PUT_U32(skb
, RTA_IIF
, iif
);
2078 struct in6_addr saddr_buf
;
2079 if (ipv6_get_saddr(&rt
->u
.dst
, dst
, &saddr_buf
) == 0)
2080 NLA_PUT(skb
, RTA_PREFSRC
, 16, &saddr_buf
);
2083 if (rtnetlink_put_metrics(skb
, rt
->u
.dst
.metrics
) < 0)
2084 goto nla_put_failure
;
2086 if (rt
->u
.dst
.neighbour
)
2087 NLA_PUT(skb
, RTA_GATEWAY
, 16, &rt
->u
.dst
.neighbour
->primary_key
);
2090 NLA_PUT_U32(skb
, RTA_OIF
, rt
->rt6i_dev
->ifindex
);
2092 NLA_PUT_U32(skb
, RTA_PRIORITY
, rt
->rt6i_metric
);
2093 ci
.rta_lastuse
= jiffies_to_clock_t(jiffies
- rt
->u
.dst
.lastuse
);
2094 if (rt
->rt6i_expires
)
2095 ci
.rta_expires
= jiffies_to_clock_t(rt
->rt6i_expires
- jiffies
);
2098 ci
.rta_used
= rt
->u
.dst
.__use
;
2099 ci
.rta_clntref
= atomic_read(&rt
->u
.dst
.__refcnt
);
2100 ci
.rta_error
= rt
->u
.dst
.error
;
2104 NLA_PUT(skb
, RTA_CACHEINFO
, sizeof(ci
), &ci
);
2106 return nlmsg_end(skb
, nlh
);
2109 return nlmsg_cancel(skb
, nlh
);
2112 int rt6_dump_route(struct rt6_info
*rt
, void *p_arg
)
2114 struct rt6_rtnl_dump_arg
*arg
= (struct rt6_rtnl_dump_arg
*) p_arg
;
2117 if (nlmsg_len(arg
->cb
->nlh
) >= sizeof(struct rtmsg
)) {
2118 struct rtmsg
*rtm
= nlmsg_data(arg
->cb
->nlh
);
2119 prefix
= (rtm
->rtm_flags
& RTM_F_PREFIX
) != 0;
2123 return rt6_fill_node(arg
->skb
, rt
, NULL
, NULL
, 0, RTM_NEWROUTE
,
2124 NETLINK_CB(arg
->cb
->skb
).pid
, arg
->cb
->nlh
->nlmsg_seq
,
2125 prefix
, NLM_F_MULTI
);
2128 int inet6_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2130 struct nlattr
*tb
[RTA_MAX
+1];
2131 struct rt6_info
*rt
;
2132 struct sk_buff
*skb
;
2137 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
2142 memset(&fl
, 0, sizeof(fl
));
2145 if (nla_len(tb
[RTA_SRC
]) < sizeof(struct in6_addr
))
2148 ipv6_addr_copy(&fl
.fl6_src
, nla_data(tb
[RTA_SRC
]));
2152 if (nla_len(tb
[RTA_DST
]) < sizeof(struct in6_addr
))
2155 ipv6_addr_copy(&fl
.fl6_dst
, nla_data(tb
[RTA_DST
]));
2159 iif
= nla_get_u32(tb
[RTA_IIF
]);
2162 fl
.oif
= nla_get_u32(tb
[RTA_OIF
]);
2165 struct net_device
*dev
;
2166 dev
= __dev_get_by_index(iif
);
2173 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2179 /* Reserve room for dummy headers, this skb can pass
2180 through good chunk of routing engine.
2182 skb
->mac
.raw
= skb
->data
;
2183 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct ipv6hdr
));
2185 rt
= (struct rt6_info
*) ip6_route_output(NULL
, &fl
);
2186 skb
->dst
= &rt
->u
.dst
;
2188 err
= rt6_fill_node(skb
, rt
, &fl
.fl6_dst
, &fl
.fl6_src
, iif
,
2189 RTM_NEWROUTE
, NETLINK_CB(in_skb
).pid
,
2190 nlh
->nlmsg_seq
, 0, 0);
2196 err
= rtnl_unicast(skb
, NETLINK_CB(in_skb
).pid
);
2201 void inet6_rt_notify(int event
, struct rt6_info
*rt
, struct nl_info
*info
)
2203 struct sk_buff
*skb
;
2204 u32 pid
= 0, seq
= 0;
2205 struct nlmsghdr
*nlh
= NULL
;
2206 int payload
= sizeof(struct rtmsg
) + 256;
2213 seq
= nlh
->nlmsg_seq
;
2216 skb
= nlmsg_new(nlmsg_total_size(payload
), gfp_any());
2220 err
= rt6_fill_node(skb
, rt
, NULL
, NULL
, 0, event
, pid
, seq
, 0, 0);
2226 err
= rtnl_notify(skb
, pid
, RTNLGRP_IPV6_ROUTE
, nlh
, gfp_any());
2229 rtnl_set_sk_err(RTNLGRP_IPV6_ROUTE
, err
);
2236 #ifdef CONFIG_PROC_FS
2238 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2249 static int rt6_info_route(struct rt6_info
*rt
, void *p_arg
)
2251 struct rt6_proc_arg
*arg
= (struct rt6_proc_arg
*) p_arg
;
2254 if (arg
->skip
< arg
->offset
/ RT6_INFO_LEN
) {
2259 if (arg
->len
>= arg
->length
)
2262 for (i
=0; i
<16; i
++) {
2263 sprintf(arg
->buffer
+ arg
->len
, "%02x",
2264 rt
->rt6i_dst
.addr
.s6_addr
[i
]);
2267 arg
->len
+= sprintf(arg
->buffer
+ arg
->len
, " %02x ",
2270 #ifdef CONFIG_IPV6_SUBTREES
2271 for (i
=0; i
<16; i
++) {
2272 sprintf(arg
->buffer
+ arg
->len
, "%02x",
2273 rt
->rt6i_src
.addr
.s6_addr
[i
]);
2276 arg
->len
+= sprintf(arg
->buffer
+ arg
->len
, " %02x ",
2279 sprintf(arg
->buffer
+ arg
->len
,
2280 "00000000000000000000000000000000 00 ");
2284 if (rt
->rt6i_nexthop
) {
2285 for (i
=0; i
<16; i
++) {
2286 sprintf(arg
->buffer
+ arg
->len
, "%02x",
2287 rt
->rt6i_nexthop
->primary_key
[i
]);
2291 sprintf(arg
->buffer
+ arg
->len
,
2292 "00000000000000000000000000000000");
2295 arg
->len
+= sprintf(arg
->buffer
+ arg
->len
,
2296 " %08x %08x %08x %08x %8s\n",
2297 rt
->rt6i_metric
, atomic_read(&rt
->u
.dst
.__refcnt
),
2298 rt
->u
.dst
.__use
, rt
->rt6i_flags
,
2299 rt
->rt6i_dev
? rt
->rt6i_dev
->name
: "");
2303 static int rt6_proc_info(char *buffer
, char **start
, off_t offset
, int length
)
2305 struct rt6_proc_arg arg
= {
2311 fib6_clean_all(rt6_info_route
, 0, &arg
);
2315 *start
+= offset
% RT6_INFO_LEN
;
2317 arg
.len
-= offset
% RT6_INFO_LEN
;
2319 if (arg
.len
> length
)
2327 static int rt6_stats_seq_show(struct seq_file
*seq
, void *v
)
2329 seq_printf(seq
, "%04x %04x %04x %04x %04x %04x %04x\n",
2330 rt6_stats
.fib_nodes
, rt6_stats
.fib_route_nodes
,
2331 rt6_stats
.fib_rt_alloc
, rt6_stats
.fib_rt_entries
,
2332 rt6_stats
.fib_rt_cache
,
2333 atomic_read(&ip6_dst_ops
.entries
),
2334 rt6_stats
.fib_discarded_routes
);
2339 static int rt6_stats_seq_open(struct inode
*inode
, struct file
*file
)
2341 return single_open(file
, rt6_stats_seq_show
, NULL
);
2344 static struct file_operations rt6_stats_seq_fops
= {
2345 .owner
= THIS_MODULE
,
2346 .open
= rt6_stats_seq_open
,
2348 .llseek
= seq_lseek
,
2349 .release
= single_release
,
2351 #endif /* CONFIG_PROC_FS */
2353 #ifdef CONFIG_SYSCTL
2355 static int flush_delay
;
2358 int ipv6_sysctl_rtcache_flush(ctl_table
*ctl
, int write
, struct file
* filp
,
2359 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2362 proc_dointvec(ctl
, write
, filp
, buffer
, lenp
, ppos
);
2363 fib6_run_gc(flush_delay
<= 0 ? ~0UL : (unsigned long)flush_delay
);
2369 ctl_table ipv6_route_table
[] = {
2371 .ctl_name
= NET_IPV6_ROUTE_FLUSH
,
2372 .procname
= "flush",
2373 .data
= &flush_delay
,
2374 .maxlen
= sizeof(int),
2376 .proc_handler
= &ipv6_sysctl_rtcache_flush
2379 .ctl_name
= NET_IPV6_ROUTE_GC_THRESH
,
2380 .procname
= "gc_thresh",
2381 .data
= &ip6_dst_ops
.gc_thresh
,
2382 .maxlen
= sizeof(int),
2384 .proc_handler
= &proc_dointvec
,
2387 .ctl_name
= NET_IPV6_ROUTE_MAX_SIZE
,
2388 .procname
= "max_size",
2389 .data
= &ip6_rt_max_size
,
2390 .maxlen
= sizeof(int),
2392 .proc_handler
= &proc_dointvec
,
2395 .ctl_name
= NET_IPV6_ROUTE_GC_MIN_INTERVAL
,
2396 .procname
= "gc_min_interval",
2397 .data
= &ip6_rt_gc_min_interval
,
2398 .maxlen
= sizeof(int),
2400 .proc_handler
= &proc_dointvec_jiffies
,
2401 .strategy
= &sysctl_jiffies
,
2404 .ctl_name
= NET_IPV6_ROUTE_GC_TIMEOUT
,
2405 .procname
= "gc_timeout",
2406 .data
= &ip6_rt_gc_timeout
,
2407 .maxlen
= sizeof(int),
2409 .proc_handler
= &proc_dointvec_jiffies
,
2410 .strategy
= &sysctl_jiffies
,
2413 .ctl_name
= NET_IPV6_ROUTE_GC_INTERVAL
,
2414 .procname
= "gc_interval",
2415 .data
= &ip6_rt_gc_interval
,
2416 .maxlen
= sizeof(int),
2418 .proc_handler
= &proc_dointvec_jiffies
,
2419 .strategy
= &sysctl_jiffies
,
2422 .ctl_name
= NET_IPV6_ROUTE_GC_ELASTICITY
,
2423 .procname
= "gc_elasticity",
2424 .data
= &ip6_rt_gc_elasticity
,
2425 .maxlen
= sizeof(int),
2427 .proc_handler
= &proc_dointvec_jiffies
,
2428 .strategy
= &sysctl_jiffies
,
2431 .ctl_name
= NET_IPV6_ROUTE_MTU_EXPIRES
,
2432 .procname
= "mtu_expires",
2433 .data
= &ip6_rt_mtu_expires
,
2434 .maxlen
= sizeof(int),
2436 .proc_handler
= &proc_dointvec_jiffies
,
2437 .strategy
= &sysctl_jiffies
,
2440 .ctl_name
= NET_IPV6_ROUTE_MIN_ADVMSS
,
2441 .procname
= "min_adv_mss",
2442 .data
= &ip6_rt_min_advmss
,
2443 .maxlen
= sizeof(int),
2445 .proc_handler
= &proc_dointvec_jiffies
,
2446 .strategy
= &sysctl_jiffies
,
2449 .ctl_name
= NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS
,
2450 .procname
= "gc_min_interval_ms",
2451 .data
= &ip6_rt_gc_min_interval
,
2452 .maxlen
= sizeof(int),
2454 .proc_handler
= &proc_dointvec_ms_jiffies
,
2455 .strategy
= &sysctl_ms_jiffies
,
2462 void __init
ip6_route_init(void)
2464 struct proc_dir_entry
*p
;
2466 ip6_dst_ops
.kmem_cachep
=
2467 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info
), 0,
2468 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
, NULL
);
2470 #ifdef CONFIG_PROC_FS
2471 p
= proc_net_create("ipv6_route", 0, rt6_proc_info
);
2473 p
->owner
= THIS_MODULE
;
2475 proc_net_fops_create("rt6_stats", S_IRUGO
, &rt6_stats_seq_fops
);
2480 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2485 void ip6_route_cleanup(void)
2487 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2488 fib6_rules_cleanup();
2490 #ifdef CONFIG_PROC_FS
2491 proc_net_remove("ipv6_route");
2492 proc_net_remove("rt6_stats");
2499 kmem_cache_destroy(ip6_dst_ops
.kmem_cachep
);