2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 * Alan Cox : Verify area fixes.
18 * Alan Cox : cli() protects routing changes
19 * Rui Oliveira : ICMP routing table updates
20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window
27 * Sam Lantinga : Fixed route matching in rt_del()
28 * Alan Cox : Routing cache support.
29 * Alan Cox : Removed compatibility cruft.
30 * Alan Cox : RTF_REJECT support.
31 * Alan Cox : TCP irtt support.
32 * Jonathan Naylor : Added Metric support.
33 * Miquel van Smoorenburg : BSD API fixes.
34 * Miquel van Smoorenburg : Metrics.
35 * Alan Cox : Use __u32 properly
36 * Alan Cox : Aligned routing errors more closely with BSD
37 * our system is still very different.
38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour.
42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope)
45 * Pavel Krauz : Limited broadcast fixed
46 * Mike McLagan : Routing by source
47 * Alexey Kuznetsov : End of old history. Split to fib.c and
48 * route.c and rewritten from scratch.
49 * Andi Kleen : Load-limit warning messages.
50 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
51 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
52 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
53 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
54 * Marc Boucher : routing by fwmark
55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file
57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
58 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
61 * This program is free software; you can redistribute it and/or
62 * modify it under the terms of the GNU General Public License
63 * as published by the Free Software Foundation; either version
64 * 2 of the License, or (at your option) any later version.
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 #include <linux/bitops.h>
71 #include <linux/types.h>
72 #include <linux/kernel.h>
74 #include <linux/bootmem.h>
75 #include <linux/string.h>
76 #include <linux/socket.h>
77 #include <linux/sockios.h>
78 #include <linux/errno.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/proc_fs.h>
83 #include <linux/init.h>
84 #include <linux/workqueue.h>
85 #include <linux/skbuff.h>
86 #include <linux/inetdevice.h>
87 #include <linux/igmp.h>
88 #include <linux/pkt_sched.h>
89 #include <linux/mroute.h>
90 #include <linux/netfilter_ipv4.h>
91 #include <linux/random.h>
92 #include <linux/jhash.h>
93 #include <linux/rcupdate.h>
94 #include <linux/times.h>
96 #include <net/net_namespace.h>
97 #include <net/protocol.h>
99 #include <net/route.h>
100 #include <net/inetpeer.h>
101 #include <net/sock.h>
102 #include <net/ip_fib.h>
105 #include <net/icmp.h>
106 #include <net/xfrm.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
113 #define RT_FL_TOS(oldflp) \
114 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
116 #define IP_MAX_MTU 0xFFF0
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size
;
121 static int ip_rt_gc_timeout
= RT_GC_TIMEOUT
;
122 static int ip_rt_gc_interval
= 60 * HZ
;
123 static int ip_rt_gc_min_interval
= HZ
/ 2;
124 static int ip_rt_redirect_number
= 9;
125 static int ip_rt_redirect_load
= HZ
/ 50;
126 static int ip_rt_redirect_silence
= ((HZ
/ 50) << (9 + 1));
127 static int ip_rt_error_cost
= HZ
;
128 static int ip_rt_error_burst
= 5 * HZ
;
129 static int ip_rt_gc_elasticity
= 8;
130 static int ip_rt_mtu_expires
= 10 * 60 * HZ
;
131 static int ip_rt_min_pmtu
= 512 + 20 + 20;
132 static int ip_rt_min_advmss
= 256;
133 static int ip_rt_secret_interval
= 10 * 60 * HZ
;
135 #define RTprint(a...) printk(KERN_DEBUG a)
137 static void rt_worker_func(struct work_struct
*work
);
138 static DECLARE_DELAYED_WORK(expires_work
, rt_worker_func
);
139 static struct timer_list rt_secret_timer
;
142 * Interface to generic destination cache.
145 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
);
146 static void ipv4_dst_destroy(struct dst_entry
*dst
);
147 static void ipv4_dst_ifdown(struct dst_entry
*dst
,
148 struct net_device
*dev
, int how
);
149 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
);
150 static void ipv4_link_failure(struct sk_buff
*skb
);
151 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
152 static int rt_garbage_collect(struct dst_ops
*ops
);
155 static struct dst_ops ipv4_dst_ops
= {
157 .protocol
= __constant_htons(ETH_P_IP
),
158 .gc
= rt_garbage_collect
,
159 .check
= ipv4_dst_check
,
160 .destroy
= ipv4_dst_destroy
,
161 .ifdown
= ipv4_dst_ifdown
,
162 .negative_advice
= ipv4_negative_advice
,
163 .link_failure
= ipv4_link_failure
,
164 .update_pmtu
= ip_rt_update_pmtu
,
165 .local_out
= ip_local_out
,
166 .entry_size
= sizeof(struct rtable
),
167 .entries
= ATOMIC_INIT(0),
170 #define ECN_OR_COST(class) TC_PRIO_##class
172 const __u8 ip_tos2prio
[16] = {
176 ECN_OR_COST(BESTEFFORT
),
182 ECN_OR_COST(INTERACTIVE
),
184 ECN_OR_COST(INTERACTIVE
),
185 TC_PRIO_INTERACTIVE_BULK
,
186 ECN_OR_COST(INTERACTIVE_BULK
),
187 TC_PRIO_INTERACTIVE_BULK
,
188 ECN_OR_COST(INTERACTIVE_BULK
)
196 /* The locking scheme is rather straight forward:
198 * 1) Read-Copy Update protects the buckets of the central route hash.
199 * 2) Only writers remove entries, and they hold the lock
200 * as they look at rtable reference counts.
201 * 3) Only readers acquire references to rtable entries,
202 * they do so with atomic increments and with the
206 struct rt_hash_bucket
{
207 struct rtable
*chain
;
209 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
210 defined(CONFIG_PROVE_LOCKING)
212 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
213 * The size of this table is a power of two and depends on the number of CPUS.
214 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
216 #ifdef CONFIG_LOCKDEP
217 # define RT_HASH_LOCK_SZ 256
220 # define RT_HASH_LOCK_SZ 4096
222 # define RT_HASH_LOCK_SZ 2048
224 # define RT_HASH_LOCK_SZ 1024
226 # define RT_HASH_LOCK_SZ 512
228 # define RT_HASH_LOCK_SZ 256
232 static spinlock_t
*rt_hash_locks
;
233 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
235 static __init
void rt_hash_lock_init(void)
239 rt_hash_locks
= kmalloc(sizeof(spinlock_t
) * RT_HASH_LOCK_SZ
,
242 panic("IP: failed to allocate rt_hash_locks\n");
244 for (i
= 0; i
< RT_HASH_LOCK_SZ
; i
++)
245 spin_lock_init(&rt_hash_locks
[i
]);
248 # define rt_hash_lock_addr(slot) NULL
250 static inline void rt_hash_lock_init(void)
255 static struct rt_hash_bucket
*rt_hash_table
;
256 static unsigned rt_hash_mask
;
257 static unsigned int rt_hash_log
;
258 static atomic_t rt_genid
;
260 static DEFINE_PER_CPU(struct rt_cache_stat
, rt_cache_stat
);
261 #define RT_CACHE_STAT_INC(field) \
262 (__raw_get_cpu_var(rt_cache_stat).field++)
264 static unsigned int rt_hash_code(u32 daddr
, u32 saddr
)
266 return jhash_2words(daddr
, saddr
, atomic_read(&rt_genid
))
270 #define rt_hash(daddr, saddr, idx) \
271 rt_hash_code((__force u32)(__be32)(daddr),\
272 (__force u32)(__be32)(saddr) ^ ((idx) << 5))
274 #ifdef CONFIG_PROC_FS
275 struct rt_cache_iter_state
{
276 struct seq_net_private p
;
281 static struct rtable
*rt_cache_get_first(struct rt_cache_iter_state
*st
)
283 struct rtable
*r
= NULL
;
285 for (st
->bucket
= rt_hash_mask
; st
->bucket
>= 0; --st
->bucket
) {
287 r
= rcu_dereference(rt_hash_table
[st
->bucket
].chain
);
289 if (r
->u
.dst
.dev
->nd_net
== st
->p
.net
&&
290 r
->rt_genid
== st
->genid
)
292 r
= rcu_dereference(r
->u
.dst
.rt_next
);
294 rcu_read_unlock_bh();
299 static struct rtable
*__rt_cache_get_next(struct rt_cache_iter_state
*st
,
302 r
= r
->u
.dst
.rt_next
;
304 rcu_read_unlock_bh();
305 if (--st
->bucket
< 0)
308 r
= rt_hash_table
[st
->bucket
].chain
;
310 return rcu_dereference(r
);
313 static struct rtable
*rt_cache_get_next(struct rt_cache_iter_state
*st
,
316 while ((r
= __rt_cache_get_next(st
, r
)) != NULL
) {
317 if (r
->u
.dst
.dev
->nd_net
!= st
->p
.net
)
319 if (r
->rt_genid
== st
->genid
)
325 static struct rtable
*rt_cache_get_idx(struct rt_cache_iter_state
*st
, loff_t pos
)
327 struct rtable
*r
= rt_cache_get_first(st
);
330 while (pos
&& (r
= rt_cache_get_next(st
, r
)))
332 return pos
? NULL
: r
;
335 static void *rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
337 struct rt_cache_iter_state
*st
= seq
->private;
340 return rt_cache_get_idx(st
, *pos
- 1);
341 st
->genid
= atomic_read(&rt_genid
);
342 return SEQ_START_TOKEN
;
345 static void *rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
348 struct rt_cache_iter_state
*st
= seq
->private;
350 if (v
== SEQ_START_TOKEN
)
351 r
= rt_cache_get_first(st
);
353 r
= rt_cache_get_next(st
, v
);
358 static void rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
360 if (v
&& v
!= SEQ_START_TOKEN
)
361 rcu_read_unlock_bh();
364 static int rt_cache_seq_show(struct seq_file
*seq
, void *v
)
366 if (v
== SEQ_START_TOKEN
)
367 seq_printf(seq
, "%-127s\n",
368 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
369 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
372 struct rtable
*r
= v
;
375 sprintf(temp
, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
376 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
377 r
->u
.dst
.dev
? r
->u
.dst
.dev
->name
: "*",
378 (unsigned long)r
->rt_dst
, (unsigned long)r
->rt_gateway
,
379 r
->rt_flags
, atomic_read(&r
->u
.dst
.__refcnt
),
380 r
->u
.dst
.__use
, 0, (unsigned long)r
->rt_src
,
381 (dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) ?
382 (int)dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) + 40 : 0),
383 dst_metric(&r
->u
.dst
, RTAX_WINDOW
),
384 (int)((dst_metric(&r
->u
.dst
, RTAX_RTT
) >> 3) +
385 dst_metric(&r
->u
.dst
, RTAX_RTTVAR
)),
387 r
->u
.dst
.hh
? atomic_read(&r
->u
.dst
.hh
->hh_refcnt
) : -1,
388 r
->u
.dst
.hh
? (r
->u
.dst
.hh
->hh_output
==
391 seq_printf(seq
, "%-127s\n", temp
);
396 static const struct seq_operations rt_cache_seq_ops
= {
397 .start
= rt_cache_seq_start
,
398 .next
= rt_cache_seq_next
,
399 .stop
= rt_cache_seq_stop
,
400 .show
= rt_cache_seq_show
,
403 static int rt_cache_seq_open(struct inode
*inode
, struct file
*file
)
405 return seq_open_net(inode
, file
, &rt_cache_seq_ops
,
406 sizeof(struct rt_cache_iter_state
));
409 static const struct file_operations rt_cache_seq_fops
= {
410 .owner
= THIS_MODULE
,
411 .open
= rt_cache_seq_open
,
414 .release
= seq_release_net
,
418 static void *rt_cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
423 return SEQ_START_TOKEN
;
425 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
426 if (!cpu_possible(cpu
))
429 return &per_cpu(rt_cache_stat
, cpu
);
434 static void *rt_cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
438 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
439 if (!cpu_possible(cpu
))
442 return &per_cpu(rt_cache_stat
, cpu
);
448 static void rt_cpu_seq_stop(struct seq_file
*seq
, void *v
)
453 static int rt_cpu_seq_show(struct seq_file
*seq
, void *v
)
455 struct rt_cache_stat
*st
= v
;
457 if (v
== SEQ_START_TOKEN
) {
458 seq_printf(seq
, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
462 seq_printf(seq
,"%08x %08x %08x %08x %08x %08x %08x %08x "
463 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
464 atomic_read(&ipv4_dst_ops
.entries
),
487 static const struct seq_operations rt_cpu_seq_ops
= {
488 .start
= rt_cpu_seq_start
,
489 .next
= rt_cpu_seq_next
,
490 .stop
= rt_cpu_seq_stop
,
491 .show
= rt_cpu_seq_show
,
495 static int rt_cpu_seq_open(struct inode
*inode
, struct file
*file
)
497 return seq_open(file
, &rt_cpu_seq_ops
);
500 static const struct file_operations rt_cpu_seq_fops
= {
501 .owner
= THIS_MODULE
,
502 .open
= rt_cpu_seq_open
,
505 .release
= seq_release
,
508 #ifdef CONFIG_NET_CLS_ROUTE
509 static int ip_rt_acct_read(char *buffer
, char **start
, off_t offset
,
510 int length
, int *eof
, void *data
)
514 if ((offset
& 3) || (length
& 3))
517 if (offset
>= sizeof(struct ip_rt_acct
) * 256) {
522 if (offset
+ length
>= sizeof(struct ip_rt_acct
) * 256) {
523 length
= sizeof(struct ip_rt_acct
) * 256 - offset
;
527 offset
/= sizeof(u32
);
530 u32
*dst
= (u32
*) buffer
;
533 memset(dst
, 0, length
);
535 for_each_possible_cpu(i
) {
539 src
= ((u32
*) per_cpu_ptr(ip_rt_acct
, i
)) + offset
;
540 for (j
= 0; j
< length
/4; j
++)
548 static __init
int ip_rt_proc_init(struct net
*net
)
550 struct proc_dir_entry
*pde
;
552 pde
= proc_net_fops_create(net
, "rt_cache", S_IRUGO
,
557 pde
= proc_create("rt_cache", S_IRUGO
,
558 net
->proc_net_stat
, &rt_cpu_seq_fops
);
562 #ifdef CONFIG_NET_CLS_ROUTE
563 pde
= create_proc_read_entry("rt_acct", 0, net
->proc_net
,
564 ip_rt_acct_read
, NULL
);
570 #ifdef CONFIG_NET_CLS_ROUTE
572 remove_proc_entry("rt_cache", net
->proc_net_stat
);
575 remove_proc_entry("rt_cache", net
->proc_net
);
580 static inline int ip_rt_proc_init(struct net
*net
)
584 #endif /* CONFIG_PROC_FS */
586 static __inline__
void rt_free(struct rtable
*rt
)
588 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
591 static __inline__
void rt_drop(struct rtable
*rt
)
594 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
597 static __inline__
int rt_fast_clean(struct rtable
*rth
)
599 /* Kill broadcast/multicast entries very aggresively, if they
600 collide in hash table with more useful entries */
601 return (rth
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) &&
602 rth
->fl
.iif
&& rth
->u
.dst
.rt_next
;
605 static __inline__
int rt_valuable(struct rtable
*rth
)
607 return (rth
->rt_flags
& (RTCF_REDIRECTED
| RTCF_NOTIFY
)) ||
611 static int rt_may_expire(struct rtable
*rth
, unsigned long tmo1
, unsigned long tmo2
)
616 if (atomic_read(&rth
->u
.dst
.__refcnt
))
620 if (rth
->u
.dst
.expires
&&
621 time_after_eq(jiffies
, rth
->u
.dst
.expires
))
624 age
= jiffies
- rth
->u
.dst
.lastuse
;
626 if ((age
<= tmo1
&& !rt_fast_clean(rth
)) ||
627 (age
<= tmo2
&& rt_valuable(rth
)))
633 /* Bits of score are:
635 * 30: not quite useless
636 * 29..0: usage counter
638 static inline u32
rt_score(struct rtable
*rt
)
640 u32 score
= jiffies
- rt
->u
.dst
.lastuse
;
642 score
= ~score
& ~(3<<30);
648 !(rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
|RTCF_LOCAL
)))
654 static inline int compare_keys(struct flowi
*fl1
, struct flowi
*fl2
)
656 return ((__force u32
)((fl1
->nl_u
.ip4_u
.daddr
^ fl2
->nl_u
.ip4_u
.daddr
) |
657 (fl1
->nl_u
.ip4_u
.saddr
^ fl2
->nl_u
.ip4_u
.saddr
)) |
658 (fl1
->mark
^ fl2
->mark
) |
659 (*(u16
*)&fl1
->nl_u
.ip4_u
.tos
^
660 *(u16
*)&fl2
->nl_u
.ip4_u
.tos
) |
661 (fl1
->oif
^ fl2
->oif
) |
662 (fl1
->iif
^ fl2
->iif
)) == 0;
665 static inline int compare_netns(struct rtable
*rt1
, struct rtable
*rt2
)
667 return rt1
->u
.dst
.dev
->nd_net
== rt2
->u
.dst
.dev
->nd_net
;
671 * Perform a full scan of hash table and free all entries.
672 * Can be called by a softirq or a process.
673 * In the later case, we want to be reschedule if necessary
675 static void rt_do_flush(int process_context
)
678 struct rtable
*rth
, *next
;
680 for (i
= 0; i
<= rt_hash_mask
; i
++) {
681 if (process_context
&& need_resched())
683 rth
= rt_hash_table
[i
].chain
;
687 spin_lock_bh(rt_hash_lock_addr(i
));
688 rth
= rt_hash_table
[i
].chain
;
689 rt_hash_table
[i
].chain
= NULL
;
690 spin_unlock_bh(rt_hash_lock_addr(i
));
692 for (; rth
; rth
= next
) {
693 next
= rth
->u
.dst
.rt_next
;
699 static void rt_check_expire(void)
701 static unsigned int rover
;
702 unsigned int i
= rover
, goal
;
703 struct rtable
*rth
, **rthp
;
706 mult
= ((u64
)ip_rt_gc_interval
) << rt_hash_log
;
707 if (ip_rt_gc_timeout
> 1)
708 do_div(mult
, ip_rt_gc_timeout
);
709 goal
= (unsigned int)mult
;
710 if (goal
> rt_hash_mask
)
711 goal
= rt_hash_mask
+ 1;
712 for (; goal
> 0; goal
--) {
713 unsigned long tmo
= ip_rt_gc_timeout
;
715 i
= (i
+ 1) & rt_hash_mask
;
716 rthp
= &rt_hash_table
[i
].chain
;
723 spin_lock_bh(rt_hash_lock_addr(i
));
724 while ((rth
= *rthp
) != NULL
) {
725 if (rth
->rt_genid
!= atomic_read(&rt_genid
)) {
726 *rthp
= rth
->u
.dst
.rt_next
;
730 if (rth
->u
.dst
.expires
) {
731 /* Entry is expired even if it is in use */
732 if (time_before_eq(jiffies
, rth
->u
.dst
.expires
)) {
734 rthp
= &rth
->u
.dst
.rt_next
;
737 } else if (!rt_may_expire(rth
, tmo
, ip_rt_gc_timeout
)) {
739 rthp
= &rth
->u
.dst
.rt_next
;
743 /* Cleanup aged off entries. */
744 *rthp
= rth
->u
.dst
.rt_next
;
747 spin_unlock_bh(rt_hash_lock_addr(i
));
753 * rt_worker_func() is run in process context.
754 * we call rt_check_expire() to scan part of the hash table
756 static void rt_worker_func(struct work_struct
*work
)
759 schedule_delayed_work(&expires_work
, ip_rt_gc_interval
);
763 * Pertubation of rt_genid by a small quantity [1..256]
764 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
765 * many times (2^24) without giving recent rt_genid.
766 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
768 static void rt_cache_invalidate(void)
770 unsigned char shuffle
;
772 get_random_bytes(&shuffle
, sizeof(shuffle
));
773 atomic_add(shuffle
+ 1U, &rt_genid
);
777 * delay < 0 : invalidate cache (fast : entries will be deleted later)
778 * delay >= 0 : invalidate & flush cache (can be long)
780 void rt_cache_flush(int delay
)
782 rt_cache_invalidate();
784 rt_do_flush(!in_softirq());
788 * We change rt_genid and let gc do the cleanup
790 static void rt_secret_rebuild(unsigned long dummy
)
792 rt_cache_invalidate();
793 mod_timer(&rt_secret_timer
, jiffies
+ ip_rt_secret_interval
);
797 Short description of GC goals.
799 We want to build algorithm, which will keep routing cache
800 at some equilibrium point, when number of aged off entries
801 is kept approximately equal to newly generated ones.
803 Current expiration strength is variable "expire".
804 We try to adjust it dynamically, so that if networking
805 is idle expires is large enough to keep enough of warm entries,
806 and when load increases it reduces to limit cache size.
809 static int rt_garbage_collect(struct dst_ops
*ops
)
811 static unsigned long expire
= RT_GC_TIMEOUT
;
812 static unsigned long last_gc
;
814 static int equilibrium
;
815 struct rtable
*rth
, **rthp
;
816 unsigned long now
= jiffies
;
820 * Garbage collection is pretty expensive,
821 * do not make it too frequently.
824 RT_CACHE_STAT_INC(gc_total
);
826 if (now
- last_gc
< ip_rt_gc_min_interval
&&
827 atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
) {
828 RT_CACHE_STAT_INC(gc_ignored
);
832 /* Calculate number of entries, which we want to expire now. */
833 goal
= atomic_read(&ipv4_dst_ops
.entries
) -
834 (ip_rt_gc_elasticity
<< rt_hash_log
);
836 if (equilibrium
< ipv4_dst_ops
.gc_thresh
)
837 equilibrium
= ipv4_dst_ops
.gc_thresh
;
838 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
840 equilibrium
+= min_t(unsigned int, goal
>> 1, rt_hash_mask
+ 1);
841 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
844 /* We are in dangerous area. Try to reduce cache really
847 goal
= max_t(unsigned int, goal
>> 1, rt_hash_mask
+ 1);
848 equilibrium
= atomic_read(&ipv4_dst_ops
.entries
) - goal
;
851 if (now
- last_gc
>= ip_rt_gc_min_interval
)
862 for (i
= rt_hash_mask
, k
= rover
; i
>= 0; i
--) {
863 unsigned long tmo
= expire
;
865 k
= (k
+ 1) & rt_hash_mask
;
866 rthp
= &rt_hash_table
[k
].chain
;
867 spin_lock_bh(rt_hash_lock_addr(k
));
868 while ((rth
= *rthp
) != NULL
) {
869 if (rth
->rt_genid
== atomic_read(&rt_genid
) &&
870 !rt_may_expire(rth
, tmo
, expire
)) {
872 rthp
= &rth
->u
.dst
.rt_next
;
875 *rthp
= rth
->u
.dst
.rt_next
;
879 spin_unlock_bh(rt_hash_lock_addr(k
));
888 /* Goal is not achieved. We stop process if:
890 - if expire reduced to zero. Otherwise, expire is halfed.
891 - if table is not full.
892 - if we are called from interrupt.
893 - jiffies check is just fallback/debug loop breaker.
894 We will not spin here for long time in any case.
897 RT_CACHE_STAT_INC(gc_goal_miss
);
903 #if RT_CACHE_DEBUG >= 2
904 printk(KERN_DEBUG
"expire>> %u %d %d %d\n", expire
,
905 atomic_read(&ipv4_dst_ops
.entries
), goal
, i
);
908 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
910 } while (!in_softirq() && time_before_eq(jiffies
, now
));
912 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
915 printk(KERN_WARNING
"dst cache overflow\n");
916 RT_CACHE_STAT_INC(gc_dst_overflow
);
920 expire
+= ip_rt_gc_min_interval
;
921 if (expire
> ip_rt_gc_timeout
||
922 atomic_read(&ipv4_dst_ops
.entries
) < ipv4_dst_ops
.gc_thresh
)
923 expire
= ip_rt_gc_timeout
;
924 #if RT_CACHE_DEBUG >= 2
925 printk(KERN_DEBUG
"expire++ %u %d %d %d\n", expire
,
926 atomic_read(&ipv4_dst_ops
.entries
), goal
, rover
);
931 static int rt_intern_hash(unsigned hash
, struct rtable
*rt
, struct rtable
**rp
)
933 struct rtable
*rth
, **rthp
;
935 struct rtable
*cand
, **candp
;
938 int attempts
= !in_softirq();
947 rthp
= &rt_hash_table
[hash
].chain
;
949 spin_lock_bh(rt_hash_lock_addr(hash
));
950 while ((rth
= *rthp
) != NULL
) {
951 if (rth
->rt_genid
!= atomic_read(&rt_genid
)) {
952 *rthp
= rth
->u
.dst
.rt_next
;
956 if (compare_keys(&rth
->fl
, &rt
->fl
) && compare_netns(rth
, rt
)) {
958 *rthp
= rth
->u
.dst
.rt_next
;
960 * Since lookup is lockfree, the deletion
961 * must be visible to another weakly ordered CPU before
962 * the insertion at the start of the hash chain.
964 rcu_assign_pointer(rth
->u
.dst
.rt_next
,
965 rt_hash_table
[hash
].chain
);
967 * Since lookup is lockfree, the update writes
968 * must be ordered for consistency on SMP.
970 rcu_assign_pointer(rt_hash_table
[hash
].chain
, rth
);
972 dst_use(&rth
->u
.dst
, now
);
973 spin_unlock_bh(rt_hash_lock_addr(hash
));
980 if (!atomic_read(&rth
->u
.dst
.__refcnt
)) {
981 u32 score
= rt_score(rth
);
983 if (score
<= min_score
) {
992 rthp
= &rth
->u
.dst
.rt_next
;
996 /* ip_rt_gc_elasticity used to be average length of chain
997 * length, when exceeded gc becomes really aggressive.
999 * The second limit is less certain. At the moment it allows
1000 * only 2 entries per bucket. We will see.
1002 if (chain_length
> ip_rt_gc_elasticity
) {
1003 *candp
= cand
->u
.dst
.rt_next
;
1008 /* Try to bind route to arp only if it is output
1009 route or unicast forwarding path.
1011 if (rt
->rt_type
== RTN_UNICAST
|| rt
->fl
.iif
== 0) {
1012 int err
= arp_bind_neighbour(&rt
->u
.dst
);
1014 spin_unlock_bh(rt_hash_lock_addr(hash
));
1016 if (err
!= -ENOBUFS
) {
1021 /* Neighbour tables are full and nothing
1022 can be released. Try to shrink route cache,
1023 it is most likely it holds some neighbour records.
1025 if (attempts
-- > 0) {
1026 int saved_elasticity
= ip_rt_gc_elasticity
;
1027 int saved_int
= ip_rt_gc_min_interval
;
1028 ip_rt_gc_elasticity
= 1;
1029 ip_rt_gc_min_interval
= 0;
1030 rt_garbage_collect(&ipv4_dst_ops
);
1031 ip_rt_gc_min_interval
= saved_int
;
1032 ip_rt_gc_elasticity
= saved_elasticity
;
1036 if (net_ratelimit())
1037 printk(KERN_WARNING
"Neighbour table overflow.\n");
1043 rt
->u
.dst
.rt_next
= rt_hash_table
[hash
].chain
;
1044 #if RT_CACHE_DEBUG >= 2
1045 if (rt
->u
.dst
.rt_next
) {
1047 printk(KERN_DEBUG
"rt_cache @%02x: %u.%u.%u.%u", hash
,
1048 NIPQUAD(rt
->rt_dst
));
1049 for (trt
= rt
->u
.dst
.rt_next
; trt
; trt
= trt
->u
.dst
.rt_next
)
1050 printk(" . %u.%u.%u.%u", NIPQUAD(trt
->rt_dst
));
1054 rt_hash_table
[hash
].chain
= rt
;
1055 spin_unlock_bh(rt_hash_lock_addr(hash
));
1060 void rt_bind_peer(struct rtable
*rt
, int create
)
1062 static DEFINE_SPINLOCK(rt_peer_lock
);
1063 struct inet_peer
*peer
;
1065 peer
= inet_getpeer(rt
->rt_dst
, create
);
1067 spin_lock_bh(&rt_peer_lock
);
1068 if (rt
->peer
== NULL
) {
1072 spin_unlock_bh(&rt_peer_lock
);
1078 * Peer allocation may fail only in serious out-of-memory conditions. However
1079 * we still can generate some output.
1080 * Random ID selection looks a bit dangerous because we have no chances to
1081 * select ID being unique in a reasonable period of time.
1082 * But broken packet identifier may be better than no packet at all.
1084 static void ip_select_fb_ident(struct iphdr
*iph
)
1086 static DEFINE_SPINLOCK(ip_fb_id_lock
);
1087 static u32 ip_fallback_id
;
1090 spin_lock_bh(&ip_fb_id_lock
);
1091 salt
= secure_ip_id((__force __be32
)ip_fallback_id
^ iph
->daddr
);
1092 iph
->id
= htons(salt
& 0xFFFF);
1093 ip_fallback_id
= salt
;
1094 spin_unlock_bh(&ip_fb_id_lock
);
1097 void __ip_select_ident(struct iphdr
*iph
, struct dst_entry
*dst
, int more
)
1099 struct rtable
*rt
= (struct rtable
*) dst
;
1102 if (rt
->peer
== NULL
)
1103 rt_bind_peer(rt
, 1);
1105 /* If peer is attached to destination, it is never detached,
1106 so that we need not to grab a lock to dereference it.
1109 iph
->id
= htons(inet_getid(rt
->peer
, more
));
1113 printk(KERN_DEBUG
"rt_bind_peer(0) @%p\n",
1114 __builtin_return_address(0));
1116 ip_select_fb_ident(iph
);
1119 static void rt_del(unsigned hash
, struct rtable
*rt
)
1121 struct rtable
**rthp
, *aux
;
1123 rthp
= &rt_hash_table
[hash
].chain
;
1124 spin_lock_bh(rt_hash_lock_addr(hash
));
1126 while ((aux
= *rthp
) != NULL
) {
1127 if (aux
== rt
|| (aux
->rt_genid
!= atomic_read(&rt_genid
))) {
1128 *rthp
= aux
->u
.dst
.rt_next
;
1132 rthp
= &aux
->u
.dst
.rt_next
;
1134 spin_unlock_bh(rt_hash_lock_addr(hash
));
1137 void ip_rt_redirect(__be32 old_gw
, __be32 daddr
, __be32 new_gw
,
1138 __be32 saddr
, struct net_device
*dev
)
1141 struct in_device
*in_dev
= in_dev_get(dev
);
1142 struct rtable
*rth
, **rthp
;
1143 __be32 skeys
[2] = { saddr
, 0 };
1144 int ikeys
[2] = { dev
->ifindex
, 0 };
1145 struct netevent_redirect netevent
;
1152 if (new_gw
== old_gw
|| !IN_DEV_RX_REDIRECTS(in_dev
)
1153 || ipv4_is_multicast(new_gw
) || ipv4_is_lbcast(new_gw
)
1154 || ipv4_is_zeronet(new_gw
))
1155 goto reject_redirect
;
1157 if (!IN_DEV_SHARED_MEDIA(in_dev
)) {
1158 if (!inet_addr_onlink(in_dev
, new_gw
, old_gw
))
1159 goto reject_redirect
;
1160 if (IN_DEV_SEC_REDIRECTS(in_dev
) && ip_fib_check_default(new_gw
, dev
))
1161 goto reject_redirect
;
1163 if (inet_addr_type(net
, new_gw
) != RTN_UNICAST
)
1164 goto reject_redirect
;
1167 for (i
= 0; i
< 2; i
++) {
1168 for (k
= 0; k
< 2; k
++) {
1169 unsigned hash
= rt_hash(daddr
, skeys
[i
], ikeys
[k
]);
1171 rthp
=&rt_hash_table
[hash
].chain
;
1174 while ((rth
= rcu_dereference(*rthp
)) != NULL
) {
1177 if (rth
->fl
.fl4_dst
!= daddr
||
1178 rth
->fl
.fl4_src
!= skeys
[i
] ||
1179 rth
->fl
.oif
!= ikeys
[k
] ||
1181 rth
->rt_genid
!= atomic_read(&rt_genid
) ||
1182 rth
->u
.dst
.dev
->nd_net
!= net
) {
1183 rthp
= &rth
->u
.dst
.rt_next
;
1187 if (rth
->rt_dst
!= daddr
||
1188 rth
->rt_src
!= saddr
||
1190 rth
->rt_gateway
!= old_gw
||
1191 rth
->u
.dst
.dev
!= dev
)
1194 dst_hold(&rth
->u
.dst
);
1197 rt
= dst_alloc(&ipv4_dst_ops
);
1204 /* Copy all the information. */
1206 INIT_RCU_HEAD(&rt
->u
.dst
.rcu_head
);
1207 rt
->u
.dst
.__use
= 1;
1208 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
1209 rt
->u
.dst
.child
= NULL
;
1211 dev_hold(rt
->u
.dst
.dev
);
1213 in_dev_hold(rt
->idev
);
1214 rt
->u
.dst
.obsolete
= 0;
1215 rt
->u
.dst
.lastuse
= jiffies
;
1216 rt
->u
.dst
.path
= &rt
->u
.dst
;
1217 rt
->u
.dst
.neighbour
= NULL
;
1218 rt
->u
.dst
.hh
= NULL
;
1219 rt
->u
.dst
.xfrm
= NULL
;
1220 rt
->rt_genid
= atomic_read(&rt_genid
);
1221 rt
->rt_flags
|= RTCF_REDIRECTED
;
1223 /* Gateway is different ... */
1224 rt
->rt_gateway
= new_gw
;
1226 /* Redirect received -> path was valid */
1227 dst_confirm(&rth
->u
.dst
);
1230 atomic_inc(&rt
->peer
->refcnt
);
1232 if (arp_bind_neighbour(&rt
->u
.dst
) ||
1233 !(rt
->u
.dst
.neighbour
->nud_state
&
1235 if (rt
->u
.dst
.neighbour
)
1236 neigh_event_send(rt
->u
.dst
.neighbour
, NULL
);
1242 netevent
.old
= &rth
->u
.dst
;
1243 netevent
.new = &rt
->u
.dst
;
1244 call_netevent_notifiers(NETEVENT_REDIRECT
,
1248 if (!rt_intern_hash(hash
, rt
, &rt
))
1261 #ifdef CONFIG_IP_ROUTE_VERBOSE
1262 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
1263 printk(KERN_INFO
"Redirect from %u.%u.%u.%u on %s about "
1264 "%u.%u.%u.%u ignored.\n"
1265 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1266 NIPQUAD(old_gw
), dev
->name
, NIPQUAD(new_gw
),
1267 NIPQUAD(saddr
), NIPQUAD(daddr
));
1272 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
)
1274 struct rtable
*rt
= (struct rtable
*)dst
;
1275 struct dst_entry
*ret
= dst
;
1278 if (dst
->obsolete
) {
1281 } else if ((rt
->rt_flags
& RTCF_REDIRECTED
) ||
1282 rt
->u
.dst
.expires
) {
1283 unsigned hash
= rt_hash(rt
->fl
.fl4_dst
, rt
->fl
.fl4_src
,
1285 #if RT_CACHE_DEBUG >= 1
1286 printk(KERN_DEBUG
"ipv4_negative_advice: redirect to "
1287 "%u.%u.%u.%u/%02x dropped\n",
1288 NIPQUAD(rt
->rt_dst
), rt
->fl
.fl4_tos
);
1299 * 1. The first ip_rt_redirect_number redirects are sent
1300 * with exponential backoff, then we stop sending them at all,
1301 * assuming that the host ignores our redirects.
1302 * 2. If we did not see packets requiring redirects
1303 * during ip_rt_redirect_silence, we assume that the host
1304 * forgot redirected route and start to send redirects again.
1306 * This algorithm is much cheaper and more intelligent than dumb load limiting
1309 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1310 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1313 void ip_rt_send_redirect(struct sk_buff
*skb
)
1315 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1316 struct in_device
*in_dev
= in_dev_get(rt
->u
.dst
.dev
);
1321 if (!IN_DEV_TX_REDIRECTS(in_dev
))
1324 /* No redirected packets during ip_rt_redirect_silence;
1325 * reset the algorithm.
1327 if (time_after(jiffies
, rt
->u
.dst
.rate_last
+ ip_rt_redirect_silence
))
1328 rt
->u
.dst
.rate_tokens
= 0;
1330 /* Too many ignored redirects; do not send anything
1331 * set u.dst.rate_last to the last seen redirected packet.
1333 if (rt
->u
.dst
.rate_tokens
>= ip_rt_redirect_number
) {
1334 rt
->u
.dst
.rate_last
= jiffies
;
1338 /* Check for load limit; set rate_last to the latest sent
1341 if (rt
->u
.dst
.rate_tokens
== 0 ||
1343 (rt
->u
.dst
.rate_last
+
1344 (ip_rt_redirect_load
<< rt
->u
.dst
.rate_tokens
)))) {
1345 icmp_send(skb
, ICMP_REDIRECT
, ICMP_REDIR_HOST
, rt
->rt_gateway
);
1346 rt
->u
.dst
.rate_last
= jiffies
;
1347 ++rt
->u
.dst
.rate_tokens
;
1348 #ifdef CONFIG_IP_ROUTE_VERBOSE
1349 if (IN_DEV_LOG_MARTIANS(in_dev
) &&
1350 rt
->u
.dst
.rate_tokens
== ip_rt_redirect_number
&&
1352 printk(KERN_WARNING
"host %u.%u.%u.%u/if%d ignores "
1353 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1354 NIPQUAD(rt
->rt_src
), rt
->rt_iif
,
1355 NIPQUAD(rt
->rt_dst
), NIPQUAD(rt
->rt_gateway
));
1362 static int ip_error(struct sk_buff
*skb
)
1364 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1368 switch (rt
->u
.dst
.error
) {
1373 code
= ICMP_HOST_UNREACH
;
1376 code
= ICMP_NET_UNREACH
;
1377 IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES
);
1380 code
= ICMP_PKT_FILTERED
;
1385 rt
->u
.dst
.rate_tokens
+= now
- rt
->u
.dst
.rate_last
;
1386 if (rt
->u
.dst
.rate_tokens
> ip_rt_error_burst
)
1387 rt
->u
.dst
.rate_tokens
= ip_rt_error_burst
;
1388 rt
->u
.dst
.rate_last
= now
;
1389 if (rt
->u
.dst
.rate_tokens
>= ip_rt_error_cost
) {
1390 rt
->u
.dst
.rate_tokens
-= ip_rt_error_cost
;
1391 icmp_send(skb
, ICMP_DEST_UNREACH
, code
, 0);
1394 out
: kfree_skb(skb
);
1399 * The last two values are not from the RFC but
1400 * are needed for AMPRnet AX.25 paths.
1403 static const unsigned short mtu_plateau
[] =
1404 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1406 static __inline__
unsigned short guess_mtu(unsigned short old_mtu
)
1410 for (i
= 0; i
< ARRAY_SIZE(mtu_plateau
); i
++)
1411 if (old_mtu
> mtu_plateau
[i
])
1412 return mtu_plateau
[i
];
1416 unsigned short ip_rt_frag_needed(struct net
*net
, struct iphdr
*iph
,
1417 unsigned short new_mtu
)
1420 unsigned short old_mtu
= ntohs(iph
->tot_len
);
1422 __be32 skeys
[2] = { iph
->saddr
, 0, };
1423 __be32 daddr
= iph
->daddr
;
1424 unsigned short est_mtu
= 0;
1426 if (ipv4_config
.no_pmtu_disc
)
1429 for (i
= 0; i
< 2; i
++) {
1430 unsigned hash
= rt_hash(daddr
, skeys
[i
], 0);
1433 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
1434 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
1435 if (rth
->fl
.fl4_dst
== daddr
&&
1436 rth
->fl
.fl4_src
== skeys
[i
] &&
1437 rth
->rt_dst
== daddr
&&
1438 rth
->rt_src
== iph
->saddr
&&
1440 !(dst_metric_locked(&rth
->u
.dst
, RTAX_MTU
)) &&
1441 rth
->u
.dst
.dev
->nd_net
== net
&&
1442 rth
->rt_genid
== atomic_read(&rt_genid
)) {
1443 unsigned short mtu
= new_mtu
;
1445 if (new_mtu
< 68 || new_mtu
>= old_mtu
) {
1447 /* BSD 4.2 compatibility hack :-( */
1449 old_mtu
>= rth
->u
.dst
.metrics
[RTAX_MTU
-1] &&
1450 old_mtu
>= 68 + (iph
->ihl
<< 2))
1451 old_mtu
-= iph
->ihl
<< 2;
1453 mtu
= guess_mtu(old_mtu
);
1455 if (mtu
<= rth
->u
.dst
.metrics
[RTAX_MTU
-1]) {
1456 if (mtu
< rth
->u
.dst
.metrics
[RTAX_MTU
-1]) {
1457 dst_confirm(&rth
->u
.dst
);
1458 if (mtu
< ip_rt_min_pmtu
) {
1459 mtu
= ip_rt_min_pmtu
;
1460 rth
->u
.dst
.metrics
[RTAX_LOCK
-1] |=
1463 rth
->u
.dst
.metrics
[RTAX_MTU
-1] = mtu
;
1464 dst_set_expires(&rth
->u
.dst
,
1473 return est_mtu
? : new_mtu
;
1476 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
1478 if (dst
->metrics
[RTAX_MTU
-1] > mtu
&& mtu
>= 68 &&
1479 !(dst_metric_locked(dst
, RTAX_MTU
))) {
1480 if (mtu
< ip_rt_min_pmtu
) {
1481 mtu
= ip_rt_min_pmtu
;
1482 dst
->metrics
[RTAX_LOCK
-1] |= (1 << RTAX_MTU
);
1484 dst
->metrics
[RTAX_MTU
-1] = mtu
;
1485 dst_set_expires(dst
, ip_rt_mtu_expires
);
1486 call_netevent_notifiers(NETEVENT_PMTU_UPDATE
, dst
);
1490 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
)
1495 static void ipv4_dst_destroy(struct dst_entry
*dst
)
1497 struct rtable
*rt
= (struct rtable
*) dst
;
1498 struct inet_peer
*peer
= rt
->peer
;
1499 struct in_device
*idev
= rt
->idev
;
1512 static void ipv4_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
1515 struct rtable
*rt
= (struct rtable
*) dst
;
1516 struct in_device
*idev
= rt
->idev
;
1517 if (dev
!= dev
->nd_net
->loopback_dev
&& idev
&& idev
->dev
== dev
) {
1518 struct in_device
*loopback_idev
=
1519 in_dev_get(dev
->nd_net
->loopback_dev
);
1520 if (loopback_idev
) {
1521 rt
->idev
= loopback_idev
;
1527 static void ipv4_link_failure(struct sk_buff
*skb
)
1531 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
1533 rt
= (struct rtable
*) skb
->dst
;
1535 dst_set_expires(&rt
->u
.dst
, 0);
1538 static int ip_rt_bug(struct sk_buff
*skb
)
1540 printk(KERN_DEBUG
"ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
1541 NIPQUAD(ip_hdr(skb
)->saddr
), NIPQUAD(ip_hdr(skb
)->daddr
),
1542 skb
->dev
? skb
->dev
->name
: "?");
1548 We do not cache source address of outgoing interface,
1549 because it is used only by IP RR, TS and SRR options,
1550 so that it out of fast path.
1552 BTW remember: "addr" is allowed to be not aligned
1556 void ip_rt_get_source(u8
*addr
, struct rtable
*rt
)
1559 struct fib_result res
;
1561 if (rt
->fl
.iif
== 0)
1563 else if (fib_lookup(rt
->u
.dst
.dev
->nd_net
, &rt
->fl
, &res
) == 0) {
1564 src
= FIB_RES_PREFSRC(res
);
1567 src
= inet_select_addr(rt
->u
.dst
.dev
, rt
->rt_gateway
,
1569 memcpy(addr
, &src
, 4);
1572 #ifdef CONFIG_NET_CLS_ROUTE
1573 static void set_class_tag(struct rtable
*rt
, u32 tag
)
1575 if (!(rt
->u
.dst
.tclassid
& 0xFFFF))
1576 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF;
1577 if (!(rt
->u
.dst
.tclassid
& 0xFFFF0000))
1578 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF0000;
1582 static void rt_set_nexthop(struct rtable
*rt
, struct fib_result
*res
, u32 itag
)
1584 struct fib_info
*fi
= res
->fi
;
1587 if (FIB_RES_GW(*res
) &&
1588 FIB_RES_NH(*res
).nh_scope
== RT_SCOPE_LINK
)
1589 rt
->rt_gateway
= FIB_RES_GW(*res
);
1590 memcpy(rt
->u
.dst
.metrics
, fi
->fib_metrics
,
1591 sizeof(rt
->u
.dst
.metrics
));
1592 if (fi
->fib_mtu
== 0) {
1593 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = rt
->u
.dst
.dev
->mtu
;
1594 if (rt
->u
.dst
.metrics
[RTAX_LOCK
-1] & (1 << RTAX_MTU
) &&
1595 rt
->rt_gateway
!= rt
->rt_dst
&&
1596 rt
->u
.dst
.dev
->mtu
> 576)
1597 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = 576;
1599 #ifdef CONFIG_NET_CLS_ROUTE
1600 rt
->u
.dst
.tclassid
= FIB_RES_NH(*res
).nh_tclassid
;
1603 rt
->u
.dst
.metrics
[RTAX_MTU
-1]= rt
->u
.dst
.dev
->mtu
;
1605 if (rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] == 0)
1606 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = sysctl_ip_default_ttl
;
1607 if (rt
->u
.dst
.metrics
[RTAX_MTU
-1] > IP_MAX_MTU
)
1608 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = IP_MAX_MTU
;
1609 if (rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] == 0)
1610 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = max_t(unsigned int, rt
->u
.dst
.dev
->mtu
- 40,
1612 if (rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] > 65535 - 40)
1613 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = 65535 - 40;
1615 #ifdef CONFIG_NET_CLS_ROUTE
1616 #ifdef CONFIG_IP_MULTIPLE_TABLES
1617 set_class_tag(rt
, fib_rules_tclass(res
));
1619 set_class_tag(rt
, itag
);
1621 rt
->rt_type
= res
->type
;
1624 static int ip_route_input_mc(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
1625 u8 tos
, struct net_device
*dev
, int our
)
1630 struct in_device
*in_dev
= in_dev_get(dev
);
1633 /* Primary sanity checks. */
1638 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1639 ipv4_is_loopback(saddr
) || skb
->protocol
!= htons(ETH_P_IP
))
1642 if (ipv4_is_zeronet(saddr
)) {
1643 if (!ipv4_is_local_multicast(daddr
))
1645 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1646 } else if (fib_validate_source(saddr
, 0, tos
, 0,
1647 dev
, &spec_dst
, &itag
) < 0)
1650 rth
= dst_alloc(&ipv4_dst_ops
);
1654 rth
->u
.dst
.output
= ip_rt_bug
;
1656 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1657 rth
->u
.dst
.flags
= DST_HOST
;
1658 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
1659 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1660 rth
->fl
.fl4_dst
= daddr
;
1661 rth
->rt_dst
= daddr
;
1662 rth
->fl
.fl4_tos
= tos
;
1663 rth
->fl
.mark
= skb
->mark
;
1664 rth
->fl
.fl4_src
= saddr
;
1665 rth
->rt_src
= saddr
;
1666 #ifdef CONFIG_NET_CLS_ROUTE
1667 rth
->u
.dst
.tclassid
= itag
;
1670 rth
->fl
.iif
= dev
->ifindex
;
1671 rth
->u
.dst
.dev
= init_net
.loopback_dev
;
1672 dev_hold(rth
->u
.dst
.dev
);
1673 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1675 rth
->rt_gateway
= daddr
;
1676 rth
->rt_spec_dst
= spec_dst
;
1677 rth
->rt_genid
= atomic_read(&rt_genid
);
1678 rth
->rt_flags
= RTCF_MULTICAST
;
1679 rth
->rt_type
= RTN_MULTICAST
;
1681 rth
->u
.dst
.input
= ip_local_deliver
;
1682 rth
->rt_flags
|= RTCF_LOCAL
;
1685 #ifdef CONFIG_IP_MROUTE
1686 if (!ipv4_is_local_multicast(daddr
) && IN_DEV_MFORWARD(in_dev
))
1687 rth
->u
.dst
.input
= ip_mr_input
;
1689 RT_CACHE_STAT_INC(in_slow_mc
);
1692 hash
= rt_hash(daddr
, saddr
, dev
->ifindex
);
1693 return rt_intern_hash(hash
, rth
, (struct rtable
**) &skb
->dst
);
1705 static void ip_handle_martian_source(struct net_device
*dev
,
1706 struct in_device
*in_dev
,
1707 struct sk_buff
*skb
,
1711 RT_CACHE_STAT_INC(in_martian_src
);
1712 #ifdef CONFIG_IP_ROUTE_VERBOSE
1713 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit()) {
1715 * RFC1812 recommendation, if source is martian,
1716 * the only hint is MAC header.
1718 printk(KERN_WARNING
"martian source %u.%u.%u.%u from "
1719 "%u.%u.%u.%u, on dev %s\n",
1720 NIPQUAD(daddr
), NIPQUAD(saddr
), dev
->name
);
1721 if (dev
->hard_header_len
&& skb_mac_header_was_set(skb
)) {
1723 const unsigned char *p
= skb_mac_header(skb
);
1724 printk(KERN_WARNING
"ll header: ");
1725 for (i
= 0; i
< dev
->hard_header_len
; i
++, p
++) {
1727 if (i
< (dev
->hard_header_len
- 1))
1736 static inline int __mkroute_input(struct sk_buff
*skb
,
1737 struct fib_result
* res
,
1738 struct in_device
*in_dev
,
1739 __be32 daddr
, __be32 saddr
, u32 tos
,
1740 struct rtable
**result
)
1745 struct in_device
*out_dev
;
1750 /* get a working reference to the output device */
1751 out_dev
= in_dev_get(FIB_RES_DEV(*res
));
1752 if (out_dev
== NULL
) {
1753 if (net_ratelimit())
1754 printk(KERN_CRIT
"Bug in ip_route_input" \
1755 "_slow(). Please, report\n");
1760 err
= fib_validate_source(saddr
, daddr
, tos
, FIB_RES_OIF(*res
),
1761 in_dev
->dev
, &spec_dst
, &itag
);
1763 ip_handle_martian_source(in_dev
->dev
, in_dev
, skb
, daddr
,
1771 flags
|= RTCF_DIRECTSRC
;
1773 if (out_dev
== in_dev
&& err
&& !(flags
& RTCF_MASQ
) &&
1774 (IN_DEV_SHARED_MEDIA(out_dev
) ||
1775 inet_addr_onlink(out_dev
, saddr
, FIB_RES_GW(*res
))))
1776 flags
|= RTCF_DOREDIRECT
;
1778 if (skb
->protocol
!= htons(ETH_P_IP
)) {
1779 /* Not IP (i.e. ARP). Do not create route, if it is
1780 * invalid for proxy arp. DNAT routes are always valid.
1782 if (out_dev
== in_dev
) {
1789 rth
= dst_alloc(&ipv4_dst_ops
);
1795 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1796 rth
->u
.dst
.flags
= DST_HOST
;
1797 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
1798 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1799 if (IN_DEV_CONF_GET(out_dev
, NOXFRM
))
1800 rth
->u
.dst
.flags
|= DST_NOXFRM
;
1801 rth
->fl
.fl4_dst
= daddr
;
1802 rth
->rt_dst
= daddr
;
1803 rth
->fl
.fl4_tos
= tos
;
1804 rth
->fl
.mark
= skb
->mark
;
1805 rth
->fl
.fl4_src
= saddr
;
1806 rth
->rt_src
= saddr
;
1807 rth
->rt_gateway
= daddr
;
1809 rth
->fl
.iif
= in_dev
->dev
->ifindex
;
1810 rth
->u
.dst
.dev
= (out_dev
)->dev
;
1811 dev_hold(rth
->u
.dst
.dev
);
1812 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1814 rth
->rt_spec_dst
= spec_dst
;
1816 rth
->u
.dst
.input
= ip_forward
;
1817 rth
->u
.dst
.output
= ip_output
;
1818 rth
->rt_genid
= atomic_read(&rt_genid
);
1820 rt_set_nexthop(rth
, res
, itag
);
1822 rth
->rt_flags
= flags
;
1827 /* release the working reference to the output device */
1828 in_dev_put(out_dev
);
1832 static inline int ip_mkroute_input(struct sk_buff
*skb
,
1833 struct fib_result
* res
,
1834 const struct flowi
*fl
,
1835 struct in_device
*in_dev
,
1836 __be32 daddr
, __be32 saddr
, u32 tos
)
1838 struct rtable
* rth
= NULL
;
1842 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1843 if (res
->fi
&& res
->fi
->fib_nhs
> 1 && fl
->oif
== 0)
1844 fib_select_multipath(fl
, res
);
1847 /* create a routing cache entry */
1848 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
, &rth
);
1852 /* put it into the cache */
1853 hash
= rt_hash(daddr
, saddr
, fl
->iif
);
1854 return rt_intern_hash(hash
, rth
, (struct rtable
**)&skb
->dst
);
1858 * NOTE. We drop all the packets that has local source
1859 * addresses, because every properly looped back packet
1860 * must have correct destination already attached by output routine.
1862 * Such approach solves two big problems:
1863 * 1. Not simplex devices are handled properly.
1864 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1867 static int ip_route_input_slow(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
1868 u8 tos
, struct net_device
*dev
)
1870 struct fib_result res
;
1871 struct in_device
*in_dev
= in_dev_get(dev
);
1872 struct flowi fl
= { .nl_u
= { .ip4_u
=
1876 .scope
= RT_SCOPE_UNIVERSE
,
1879 .iif
= dev
->ifindex
};
1882 struct rtable
* rth
;
1887 struct net
* net
= dev
->nd_net
;
1889 /* IP on this device is disabled. */
1894 /* Check for the most weird martians, which can be not detected
1898 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1899 ipv4_is_loopback(saddr
))
1900 goto martian_source
;
1902 if (daddr
== htonl(0xFFFFFFFF) || (saddr
== 0 && daddr
== 0))
1905 /* Accept zero addresses only to limited broadcast;
1906 * I even do not know to fix it or not. Waiting for complains :-)
1908 if (ipv4_is_zeronet(saddr
))
1909 goto martian_source
;
1911 if (ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(daddr
) ||
1912 ipv4_is_loopback(daddr
))
1913 goto martian_destination
;
1916 * Now we are ready to route packet.
1918 if ((err
= fib_lookup(net
, &fl
, &res
)) != 0) {
1919 if (!IN_DEV_FORWARD(in_dev
))
1925 RT_CACHE_STAT_INC(in_slow_tot
);
1927 if (res
.type
== RTN_BROADCAST
)
1930 if (res
.type
== RTN_LOCAL
) {
1932 result
= fib_validate_source(saddr
, daddr
, tos
,
1933 net
->loopback_dev
->ifindex
,
1934 dev
, &spec_dst
, &itag
);
1936 goto martian_source
;
1938 flags
|= RTCF_DIRECTSRC
;
1943 if (!IN_DEV_FORWARD(in_dev
))
1945 if (res
.type
!= RTN_UNICAST
)
1946 goto martian_destination
;
1948 err
= ip_mkroute_input(skb
, &res
, &fl
, in_dev
, daddr
, saddr
, tos
);
1956 if (skb
->protocol
!= htons(ETH_P_IP
))
1959 if (ipv4_is_zeronet(saddr
))
1960 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1962 err
= fib_validate_source(saddr
, 0, tos
, 0, dev
, &spec_dst
,
1965 goto martian_source
;
1967 flags
|= RTCF_DIRECTSRC
;
1969 flags
|= RTCF_BROADCAST
;
1970 res
.type
= RTN_BROADCAST
;
1971 RT_CACHE_STAT_INC(in_brd
);
1974 rth
= dst_alloc(&ipv4_dst_ops
);
1978 rth
->u
.dst
.output
= ip_rt_bug
;
1979 rth
->rt_genid
= atomic_read(&rt_genid
);
1981 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1982 rth
->u
.dst
.flags
= DST_HOST
;
1983 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
1984 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1985 rth
->fl
.fl4_dst
= daddr
;
1986 rth
->rt_dst
= daddr
;
1987 rth
->fl
.fl4_tos
= tos
;
1988 rth
->fl
.mark
= skb
->mark
;
1989 rth
->fl
.fl4_src
= saddr
;
1990 rth
->rt_src
= saddr
;
1991 #ifdef CONFIG_NET_CLS_ROUTE
1992 rth
->u
.dst
.tclassid
= itag
;
1995 rth
->fl
.iif
= dev
->ifindex
;
1996 rth
->u
.dst
.dev
= net
->loopback_dev
;
1997 dev_hold(rth
->u
.dst
.dev
);
1998 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1999 rth
->rt_gateway
= daddr
;
2000 rth
->rt_spec_dst
= spec_dst
;
2001 rth
->u
.dst
.input
= ip_local_deliver
;
2002 rth
->rt_flags
= flags
|RTCF_LOCAL
;
2003 if (res
.type
== RTN_UNREACHABLE
) {
2004 rth
->u
.dst
.input
= ip_error
;
2005 rth
->u
.dst
.error
= -err
;
2006 rth
->rt_flags
&= ~RTCF_LOCAL
;
2008 rth
->rt_type
= res
.type
;
2009 hash
= rt_hash(daddr
, saddr
, fl
.iif
);
2010 err
= rt_intern_hash(hash
, rth
, (struct rtable
**)&skb
->dst
);
2014 RT_CACHE_STAT_INC(in_no_route
);
2015 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_UNIVERSE
);
2016 res
.type
= RTN_UNREACHABLE
;
2022 * Do not cache martian addresses: they should be logged (RFC1812)
2024 martian_destination
:
2025 RT_CACHE_STAT_INC(in_martian_dst
);
2026 #ifdef CONFIG_IP_ROUTE_VERBOSE
2027 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
2028 printk(KERN_WARNING
"martian destination %u.%u.%u.%u from "
2029 "%u.%u.%u.%u, dev %s\n",
2030 NIPQUAD(daddr
), NIPQUAD(saddr
), dev
->name
);
2034 err
= -EHOSTUNREACH
;
2046 ip_handle_martian_source(dev
, in_dev
, skb
, daddr
, saddr
);
2050 int ip_route_input(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
2051 u8 tos
, struct net_device
*dev
)
2053 struct rtable
* rth
;
2055 int iif
= dev
->ifindex
;
2059 tos
&= IPTOS_RT_MASK
;
2060 hash
= rt_hash(daddr
, saddr
, iif
);
2063 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2064 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
2065 if (rth
->fl
.fl4_dst
== daddr
&&
2066 rth
->fl
.fl4_src
== saddr
&&
2067 rth
->fl
.iif
== iif
&&
2069 rth
->fl
.mark
== skb
->mark
&&
2070 rth
->fl
.fl4_tos
== tos
&&
2071 rth
->u
.dst
.dev
->nd_net
== net
&&
2072 rth
->rt_genid
== atomic_read(&rt_genid
)) {
2073 dst_use(&rth
->u
.dst
, jiffies
);
2074 RT_CACHE_STAT_INC(in_hit
);
2076 skb
->dst
= (struct dst_entry
*)rth
;
2079 RT_CACHE_STAT_INC(in_hlist_search
);
2083 /* Multicast recognition logic is moved from route cache to here.
2084 The problem was that too many Ethernet cards have broken/missing
2085 hardware multicast filters :-( As result the host on multicasting
2086 network acquires a lot of useless route cache entries, sort of
2087 SDR messages from all the world. Now we try to get rid of them.
2088 Really, provided software IP multicast filter is organized
2089 reasonably (at least, hashed), it does not result in a slowdown
2090 comparing with route cache reject entries.
2091 Note, that multicast routers are not affected, because
2092 route cache entry is created eventually.
2094 if (ipv4_is_multicast(daddr
)) {
2095 struct in_device
*in_dev
;
2098 if ((in_dev
= __in_dev_get_rcu(dev
)) != NULL
) {
2099 int our
= ip_check_mc(in_dev
, daddr
, saddr
,
2100 ip_hdr(skb
)->protocol
);
2102 #ifdef CONFIG_IP_MROUTE
2103 || (!ipv4_is_local_multicast(daddr
) &&
2104 IN_DEV_MFORWARD(in_dev
))
2108 return ip_route_input_mc(skb
, daddr
, saddr
,
2115 return ip_route_input_slow(skb
, daddr
, saddr
, tos
, dev
);
2118 static inline int __mkroute_output(struct rtable
**result
,
2119 struct fib_result
* res
,
2120 const struct flowi
*fl
,
2121 const struct flowi
*oldflp
,
2122 struct net_device
*dev_out
,
2126 struct in_device
*in_dev
;
2127 u32 tos
= RT_FL_TOS(oldflp
);
2130 if (ipv4_is_loopback(fl
->fl4_src
) && !(dev_out
->flags
&IFF_LOOPBACK
))
2133 if (fl
->fl4_dst
== htonl(0xFFFFFFFF))
2134 res
->type
= RTN_BROADCAST
;
2135 else if (ipv4_is_multicast(fl
->fl4_dst
))
2136 res
->type
= RTN_MULTICAST
;
2137 else if (ipv4_is_lbcast(fl
->fl4_dst
) || ipv4_is_zeronet(fl
->fl4_dst
))
2140 if (dev_out
->flags
& IFF_LOOPBACK
)
2141 flags
|= RTCF_LOCAL
;
2143 /* get work reference to inet device */
2144 in_dev
= in_dev_get(dev_out
);
2148 if (res
->type
== RTN_BROADCAST
) {
2149 flags
|= RTCF_BROADCAST
| RTCF_LOCAL
;
2151 fib_info_put(res
->fi
);
2154 } else if (res
->type
== RTN_MULTICAST
) {
2155 flags
|= RTCF_MULTICAST
|RTCF_LOCAL
;
2156 if (!ip_check_mc(in_dev
, oldflp
->fl4_dst
, oldflp
->fl4_src
,
2158 flags
&= ~RTCF_LOCAL
;
2159 /* If multicast route do not exist use
2160 default one, but do not gateway in this case.
2163 if (res
->fi
&& res
->prefixlen
< 4) {
2164 fib_info_put(res
->fi
);
2170 rth
= dst_alloc(&ipv4_dst_ops
);
2176 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2177 rth
->u
.dst
.flags
= DST_HOST
;
2178 if (IN_DEV_CONF_GET(in_dev
, NOXFRM
))
2179 rth
->u
.dst
.flags
|= DST_NOXFRM
;
2180 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
2181 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2183 rth
->fl
.fl4_dst
= oldflp
->fl4_dst
;
2184 rth
->fl
.fl4_tos
= tos
;
2185 rth
->fl
.fl4_src
= oldflp
->fl4_src
;
2186 rth
->fl
.oif
= oldflp
->oif
;
2187 rth
->fl
.mark
= oldflp
->mark
;
2188 rth
->rt_dst
= fl
->fl4_dst
;
2189 rth
->rt_src
= fl
->fl4_src
;
2190 rth
->rt_iif
= oldflp
->oif
? : dev_out
->ifindex
;
2191 /* get references to the devices that are to be hold by the routing
2193 rth
->u
.dst
.dev
= dev_out
;
2195 rth
->idev
= in_dev_get(dev_out
);
2196 rth
->rt_gateway
= fl
->fl4_dst
;
2197 rth
->rt_spec_dst
= fl
->fl4_src
;
2199 rth
->u
.dst
.output
=ip_output
;
2200 rth
->rt_genid
= atomic_read(&rt_genid
);
2202 RT_CACHE_STAT_INC(out_slow_tot
);
2204 if (flags
& RTCF_LOCAL
) {
2205 rth
->u
.dst
.input
= ip_local_deliver
;
2206 rth
->rt_spec_dst
= fl
->fl4_dst
;
2208 if (flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) {
2209 rth
->rt_spec_dst
= fl
->fl4_src
;
2210 if (flags
& RTCF_LOCAL
&&
2211 !(dev_out
->flags
& IFF_LOOPBACK
)) {
2212 rth
->u
.dst
.output
= ip_mc_output
;
2213 RT_CACHE_STAT_INC(out_slow_mc
);
2215 #ifdef CONFIG_IP_MROUTE
2216 if (res
->type
== RTN_MULTICAST
) {
2217 if (IN_DEV_MFORWARD(in_dev
) &&
2218 !ipv4_is_local_multicast(oldflp
->fl4_dst
)) {
2219 rth
->u
.dst
.input
= ip_mr_input
;
2220 rth
->u
.dst
.output
= ip_mc_output
;
2226 rt_set_nexthop(rth
, res
, 0);
2228 rth
->rt_flags
= flags
;
2232 /* release work reference to inet device */
2238 static inline int ip_mkroute_output(struct rtable
**rp
,
2239 struct fib_result
* res
,
2240 const struct flowi
*fl
,
2241 const struct flowi
*oldflp
,
2242 struct net_device
*dev_out
,
2245 struct rtable
*rth
= NULL
;
2246 int err
= __mkroute_output(&rth
, res
, fl
, oldflp
, dev_out
, flags
);
2249 hash
= rt_hash(oldflp
->fl4_dst
, oldflp
->fl4_src
, oldflp
->oif
);
2250 err
= rt_intern_hash(hash
, rth
, rp
);
2257 * Major route resolver routine.
2260 static int ip_route_output_slow(struct net
*net
, struct rtable
**rp
,
2261 const struct flowi
*oldflp
)
2263 u32 tos
= RT_FL_TOS(oldflp
);
2264 struct flowi fl
= { .nl_u
= { .ip4_u
=
2265 { .daddr
= oldflp
->fl4_dst
,
2266 .saddr
= oldflp
->fl4_src
,
2267 .tos
= tos
& IPTOS_RT_MASK
,
2268 .scope
= ((tos
& RTO_ONLINK
) ?
2272 .mark
= oldflp
->mark
,
2273 .iif
= net
->loopback_dev
->ifindex
,
2274 .oif
= oldflp
->oif
};
2275 struct fib_result res
;
2277 struct net_device
*dev_out
= NULL
;
2283 #ifdef CONFIG_IP_MULTIPLE_TABLES
2287 if (oldflp
->fl4_src
) {
2289 if (ipv4_is_multicast(oldflp
->fl4_src
) ||
2290 ipv4_is_lbcast(oldflp
->fl4_src
) ||
2291 ipv4_is_zeronet(oldflp
->fl4_src
))
2294 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2295 dev_out
= ip_dev_find(net
, oldflp
->fl4_src
);
2296 if (dev_out
== NULL
)
2299 /* I removed check for oif == dev_out->oif here.
2300 It was wrong for two reasons:
2301 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2302 is assigned to multiple interfaces.
2303 2. Moreover, we are allowed to send packets with saddr
2304 of another iface. --ANK
2307 if (oldflp
->oif
== 0
2308 && (ipv4_is_multicast(oldflp
->fl4_dst
) ||
2309 oldflp
->fl4_dst
== htonl(0xFFFFFFFF))) {
2310 /* Special hack: user can direct multicasts
2311 and limited broadcast via necessary interface
2312 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2313 This hack is not just for fun, it allows
2314 vic,vat and friends to work.
2315 They bind socket to loopback, set ttl to zero
2316 and expect that it will work.
2317 From the viewpoint of routing cache they are broken,
2318 because we are not allowed to build multicast path
2319 with loopback source addr (look, routing cache
2320 cannot know, that ttl is zero, so that packet
2321 will not leave this host and route is valid).
2322 Luckily, this hack is good workaround.
2325 fl
.oif
= dev_out
->ifindex
;
2335 dev_out
= dev_get_by_index(net
, oldflp
->oif
);
2337 if (dev_out
== NULL
)
2340 /* RACE: Check return value of inet_select_addr instead. */
2341 if (__in_dev_get_rtnl(dev_out
) == NULL
) {
2343 goto out
; /* Wrong error code */
2346 if (ipv4_is_local_multicast(oldflp
->fl4_dst
) ||
2347 oldflp
->fl4_dst
== htonl(0xFFFFFFFF)) {
2349 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2354 if (ipv4_is_multicast(oldflp
->fl4_dst
))
2355 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2357 else if (!oldflp
->fl4_dst
)
2358 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2364 fl
.fl4_dst
= fl
.fl4_src
;
2366 fl
.fl4_dst
= fl
.fl4_src
= htonl(INADDR_LOOPBACK
);
2369 dev_out
= net
->loopback_dev
;
2371 fl
.oif
= net
->loopback_dev
->ifindex
;
2372 res
.type
= RTN_LOCAL
;
2373 flags
|= RTCF_LOCAL
;
2377 if (fib_lookup(net
, &fl
, &res
)) {
2380 /* Apparently, routing tables are wrong. Assume,
2381 that the destination is on link.
2384 Because we are allowed to send to iface
2385 even if it has NO routes and NO assigned
2386 addresses. When oif is specified, routing
2387 tables are looked up with only one purpose:
2388 to catch if destination is gatewayed, rather than
2389 direct. Moreover, if MSG_DONTROUTE is set,
2390 we send packet, ignoring both routing tables
2391 and ifaddr state. --ANK
2394 We could make it even if oif is unknown,
2395 likely IPv6, but we do not.
2398 if (fl
.fl4_src
== 0)
2399 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2401 res
.type
= RTN_UNICAST
;
2411 if (res
.type
== RTN_LOCAL
) {
2413 fl
.fl4_src
= fl
.fl4_dst
;
2416 dev_out
= net
->loopback_dev
;
2418 fl
.oif
= dev_out
->ifindex
;
2420 fib_info_put(res
.fi
);
2422 flags
|= RTCF_LOCAL
;
2426 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2427 if (res
.fi
->fib_nhs
> 1 && fl
.oif
== 0)
2428 fib_select_multipath(&fl
, &res
);
2431 if (!res
.prefixlen
&& res
.type
== RTN_UNICAST
&& !fl
.oif
)
2432 fib_select_default(net
, &fl
, &res
);
2435 fl
.fl4_src
= FIB_RES_PREFSRC(res
);
2439 dev_out
= FIB_RES_DEV(res
);
2441 fl
.oif
= dev_out
->ifindex
;
2445 err
= ip_mkroute_output(rp
, &res
, &fl
, oldflp
, dev_out
, flags
);
2455 int __ip_route_output_key(struct net
*net
, struct rtable
**rp
,
2456 const struct flowi
*flp
)
2461 hash
= rt_hash(flp
->fl4_dst
, flp
->fl4_src
, flp
->oif
);
2464 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2465 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
2466 if (rth
->fl
.fl4_dst
== flp
->fl4_dst
&&
2467 rth
->fl
.fl4_src
== flp
->fl4_src
&&
2469 rth
->fl
.oif
== flp
->oif
&&
2470 rth
->fl
.mark
== flp
->mark
&&
2471 !((rth
->fl
.fl4_tos
^ flp
->fl4_tos
) &
2472 (IPTOS_RT_MASK
| RTO_ONLINK
)) &&
2473 rth
->u
.dst
.dev
->nd_net
== net
&&
2474 rth
->rt_genid
== atomic_read(&rt_genid
)) {
2475 dst_use(&rth
->u
.dst
, jiffies
);
2476 RT_CACHE_STAT_INC(out_hit
);
2477 rcu_read_unlock_bh();
2481 RT_CACHE_STAT_INC(out_hlist_search
);
2483 rcu_read_unlock_bh();
2485 return ip_route_output_slow(net
, rp
, flp
);
2488 EXPORT_SYMBOL_GPL(__ip_route_output_key
);
2490 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
2494 static struct dst_ops ipv4_dst_blackhole_ops
= {
2496 .protocol
= __constant_htons(ETH_P_IP
),
2497 .destroy
= ipv4_dst_destroy
,
2498 .check
= ipv4_dst_check
,
2499 .update_pmtu
= ipv4_rt_blackhole_update_pmtu
,
2500 .entry_size
= sizeof(struct rtable
),
2501 .entries
= ATOMIC_INIT(0),
2505 static int ipv4_dst_blackhole(struct rtable
**rp
, struct flowi
*flp
, struct sock
*sk
)
2507 struct rtable
*ort
= *rp
;
2508 struct rtable
*rt
= (struct rtable
*)
2509 dst_alloc(&ipv4_dst_blackhole_ops
);
2512 struct dst_entry
*new = &rt
->u
.dst
;
2514 atomic_set(&new->__refcnt
, 1);
2516 new->input
= dst_discard
;
2517 new->output
= dst_discard
;
2518 memcpy(new->metrics
, ort
->u
.dst
.metrics
, RTAX_MAX
*sizeof(u32
));
2520 new->dev
= ort
->u
.dst
.dev
;
2526 rt
->idev
= ort
->idev
;
2528 in_dev_hold(rt
->idev
);
2529 rt
->rt_genid
= atomic_read(&rt_genid
);
2530 rt
->rt_flags
= ort
->rt_flags
;
2531 rt
->rt_type
= ort
->rt_type
;
2532 rt
->rt_dst
= ort
->rt_dst
;
2533 rt
->rt_src
= ort
->rt_src
;
2534 rt
->rt_iif
= ort
->rt_iif
;
2535 rt
->rt_gateway
= ort
->rt_gateway
;
2536 rt
->rt_spec_dst
= ort
->rt_spec_dst
;
2537 rt
->peer
= ort
->peer
;
2539 atomic_inc(&rt
->peer
->refcnt
);
2544 dst_release(&(*rp
)->u
.dst
);
2546 return (rt
? 0 : -ENOMEM
);
2549 int ip_route_output_flow(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
,
2550 struct sock
*sk
, int flags
)
2554 if ((err
= __ip_route_output_key(net
, rp
, flp
)) != 0)
2559 flp
->fl4_src
= (*rp
)->rt_src
;
2561 flp
->fl4_dst
= (*rp
)->rt_dst
;
2562 err
= __xfrm_lookup((struct dst_entry
**)rp
, flp
, sk
,
2563 flags
? XFRM_LOOKUP_WAIT
: 0);
2564 if (err
== -EREMOTE
)
2565 err
= ipv4_dst_blackhole(rp
, flp
, sk
);
2573 EXPORT_SYMBOL_GPL(ip_route_output_flow
);
2575 int ip_route_output_key(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
)
2577 return ip_route_output_flow(net
, rp
, flp
, NULL
, 0);
2580 static int rt_fill_info(struct sk_buff
*skb
, u32 pid
, u32 seq
, int event
,
2581 int nowait
, unsigned int flags
)
2583 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
2585 struct nlmsghdr
*nlh
;
2587 u32 id
= 0, ts
= 0, tsage
= 0, error
;
2589 nlh
= nlmsg_put(skb
, pid
, seq
, event
, sizeof(*r
), flags
);
2593 r
= nlmsg_data(nlh
);
2594 r
->rtm_family
= AF_INET
;
2595 r
->rtm_dst_len
= 32;
2597 r
->rtm_tos
= rt
->fl
.fl4_tos
;
2598 r
->rtm_table
= RT_TABLE_MAIN
;
2599 NLA_PUT_U32(skb
, RTA_TABLE
, RT_TABLE_MAIN
);
2600 r
->rtm_type
= rt
->rt_type
;
2601 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2602 r
->rtm_protocol
= RTPROT_UNSPEC
;
2603 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
2604 if (rt
->rt_flags
& RTCF_NOTIFY
)
2605 r
->rtm_flags
|= RTM_F_NOTIFY
;
2607 NLA_PUT_BE32(skb
, RTA_DST
, rt
->rt_dst
);
2609 if (rt
->fl
.fl4_src
) {
2610 r
->rtm_src_len
= 32;
2611 NLA_PUT_BE32(skb
, RTA_SRC
, rt
->fl
.fl4_src
);
2614 NLA_PUT_U32(skb
, RTA_OIF
, rt
->u
.dst
.dev
->ifindex
);
2615 #ifdef CONFIG_NET_CLS_ROUTE
2616 if (rt
->u
.dst
.tclassid
)
2617 NLA_PUT_U32(skb
, RTA_FLOW
, rt
->u
.dst
.tclassid
);
2620 NLA_PUT_BE32(skb
, RTA_PREFSRC
, rt
->rt_spec_dst
);
2621 else if (rt
->rt_src
!= rt
->fl
.fl4_src
)
2622 NLA_PUT_BE32(skb
, RTA_PREFSRC
, rt
->rt_src
);
2624 if (rt
->rt_dst
!= rt
->rt_gateway
)
2625 NLA_PUT_BE32(skb
, RTA_GATEWAY
, rt
->rt_gateway
);
2627 if (rtnetlink_put_metrics(skb
, rt
->u
.dst
.metrics
) < 0)
2628 goto nla_put_failure
;
2630 error
= rt
->u
.dst
.error
;
2631 expires
= rt
->u
.dst
.expires
? rt
->u
.dst
.expires
- jiffies
: 0;
2633 id
= rt
->peer
->ip_id_count
;
2634 if (rt
->peer
->tcp_ts_stamp
) {
2635 ts
= rt
->peer
->tcp_ts
;
2636 tsage
= get_seconds() - rt
->peer
->tcp_ts_stamp
;
2641 #ifdef CONFIG_IP_MROUTE
2642 __be32 dst
= rt
->rt_dst
;
2644 if (ipv4_is_multicast(dst
) && !ipv4_is_local_multicast(dst
) &&
2645 IPV4_DEVCONF_ALL(&init_net
, MC_FORWARDING
)) {
2646 int err
= ipmr_get_route(skb
, r
, nowait
);
2651 goto nla_put_failure
;
2653 if (err
== -EMSGSIZE
)
2654 goto nla_put_failure
;
2660 NLA_PUT_U32(skb
, RTA_IIF
, rt
->fl
.iif
);
2663 if (rtnl_put_cacheinfo(skb
, &rt
->u
.dst
, id
, ts
, tsage
,
2664 expires
, error
) < 0)
2665 goto nla_put_failure
;
2667 return nlmsg_end(skb
, nlh
);
2670 nlmsg_cancel(skb
, nlh
);
2674 static int inet_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2676 struct net
*net
= in_skb
->sk
->sk_net
;
2678 struct nlattr
*tb
[RTA_MAX
+1];
2679 struct rtable
*rt
= NULL
;
2684 struct sk_buff
*skb
;
2686 if (net
!= &init_net
)
2689 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv4_policy
);
2693 rtm
= nlmsg_data(nlh
);
2695 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2701 /* Reserve room for dummy headers, this skb can pass
2702 through good chunk of routing engine.
2704 skb_reset_mac_header(skb
);
2705 skb_reset_network_header(skb
);
2707 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2708 ip_hdr(skb
)->protocol
= IPPROTO_ICMP
;
2709 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct iphdr
));
2711 src
= tb
[RTA_SRC
] ? nla_get_be32(tb
[RTA_SRC
]) : 0;
2712 dst
= tb
[RTA_DST
] ? nla_get_be32(tb
[RTA_DST
]) : 0;
2713 iif
= tb
[RTA_IIF
] ? nla_get_u32(tb
[RTA_IIF
]) : 0;
2716 struct net_device
*dev
;
2718 dev
= __dev_get_by_index(&init_net
, iif
);
2724 skb
->protocol
= htons(ETH_P_IP
);
2727 err
= ip_route_input(skb
, dst
, src
, rtm
->rtm_tos
, dev
);
2730 rt
= (struct rtable
*) skb
->dst
;
2731 if (err
== 0 && rt
->u
.dst
.error
)
2732 err
= -rt
->u
.dst
.error
;
2739 .tos
= rtm
->rtm_tos
,
2742 .oif
= tb
[RTA_OIF
] ? nla_get_u32(tb
[RTA_OIF
]) : 0,
2744 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
2750 skb
->dst
= &rt
->u
.dst
;
2751 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
2752 rt
->rt_flags
|= RTCF_NOTIFY
;
2754 err
= rt_fill_info(skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
2755 RTM_NEWROUTE
, 0, 0);
2759 err
= rtnl_unicast(skb
, &init_net
, NETLINK_CB(in_skb
).pid
);
2768 int ip_rt_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2777 s_idx
= idx
= cb
->args
[1];
2778 for (h
= s_h
; h
<= rt_hash_mask
; h
++) {
2780 for (rt
= rcu_dereference(rt_hash_table
[h
].chain
), idx
= 0; rt
;
2781 rt
= rcu_dereference(rt
->u
.dst
.rt_next
), idx
++) {
2784 if (rt
->rt_genid
!= atomic_read(&rt_genid
))
2786 skb
->dst
= dst_clone(&rt
->u
.dst
);
2787 if (rt_fill_info(skb
, NETLINK_CB(cb
->skb
).pid
,
2788 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
2789 1, NLM_F_MULTI
) <= 0) {
2790 dst_release(xchg(&skb
->dst
, NULL
));
2791 rcu_read_unlock_bh();
2794 dst_release(xchg(&skb
->dst
, NULL
));
2796 rcu_read_unlock_bh();
2806 void ip_rt_multicast_event(struct in_device
*in_dev
)
2811 #ifdef CONFIG_SYSCTL
2812 static int flush_delay
;
2814 static int ipv4_sysctl_rtcache_flush(ctl_table
*ctl
, int write
,
2815 struct file
*filp
, void __user
*buffer
,
2816 size_t *lenp
, loff_t
*ppos
)
2819 proc_dointvec(ctl
, write
, filp
, buffer
, lenp
, ppos
);
2820 rt_cache_flush(flush_delay
);
2827 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table
*table
,
2830 void __user
*oldval
,
2831 size_t __user
*oldlenp
,
2832 void __user
*newval
,
2836 if (newlen
!= sizeof(int))
2838 if (get_user(delay
, (int __user
*)newval
))
2840 rt_cache_flush(delay
);
2844 ctl_table ipv4_route_table
[] = {
2846 .ctl_name
= NET_IPV4_ROUTE_FLUSH
,
2847 .procname
= "flush",
2848 .data
= &flush_delay
,
2849 .maxlen
= sizeof(int),
2851 .proc_handler
= &ipv4_sysctl_rtcache_flush
,
2852 .strategy
= &ipv4_sysctl_rtcache_flush_strategy
,
2855 .ctl_name
= NET_IPV4_ROUTE_GC_THRESH
,
2856 .procname
= "gc_thresh",
2857 .data
= &ipv4_dst_ops
.gc_thresh
,
2858 .maxlen
= sizeof(int),
2860 .proc_handler
= &proc_dointvec
,
2863 .ctl_name
= NET_IPV4_ROUTE_MAX_SIZE
,
2864 .procname
= "max_size",
2865 .data
= &ip_rt_max_size
,
2866 .maxlen
= sizeof(int),
2868 .proc_handler
= &proc_dointvec
,
2871 /* Deprecated. Use gc_min_interval_ms */
2873 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL
,
2874 .procname
= "gc_min_interval",
2875 .data
= &ip_rt_gc_min_interval
,
2876 .maxlen
= sizeof(int),
2878 .proc_handler
= &proc_dointvec_jiffies
,
2879 .strategy
= &sysctl_jiffies
,
2882 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS
,
2883 .procname
= "gc_min_interval_ms",
2884 .data
= &ip_rt_gc_min_interval
,
2885 .maxlen
= sizeof(int),
2887 .proc_handler
= &proc_dointvec_ms_jiffies
,
2888 .strategy
= &sysctl_ms_jiffies
,
2891 .ctl_name
= NET_IPV4_ROUTE_GC_TIMEOUT
,
2892 .procname
= "gc_timeout",
2893 .data
= &ip_rt_gc_timeout
,
2894 .maxlen
= sizeof(int),
2896 .proc_handler
= &proc_dointvec_jiffies
,
2897 .strategy
= &sysctl_jiffies
,
2900 .ctl_name
= NET_IPV4_ROUTE_GC_INTERVAL
,
2901 .procname
= "gc_interval",
2902 .data
= &ip_rt_gc_interval
,
2903 .maxlen
= sizeof(int),
2905 .proc_handler
= &proc_dointvec_jiffies
,
2906 .strategy
= &sysctl_jiffies
,
2909 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_LOAD
,
2910 .procname
= "redirect_load",
2911 .data
= &ip_rt_redirect_load
,
2912 .maxlen
= sizeof(int),
2914 .proc_handler
= &proc_dointvec
,
2917 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_NUMBER
,
2918 .procname
= "redirect_number",
2919 .data
= &ip_rt_redirect_number
,
2920 .maxlen
= sizeof(int),
2922 .proc_handler
= &proc_dointvec
,
2925 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_SILENCE
,
2926 .procname
= "redirect_silence",
2927 .data
= &ip_rt_redirect_silence
,
2928 .maxlen
= sizeof(int),
2930 .proc_handler
= &proc_dointvec
,
2933 .ctl_name
= NET_IPV4_ROUTE_ERROR_COST
,
2934 .procname
= "error_cost",
2935 .data
= &ip_rt_error_cost
,
2936 .maxlen
= sizeof(int),
2938 .proc_handler
= &proc_dointvec
,
2941 .ctl_name
= NET_IPV4_ROUTE_ERROR_BURST
,
2942 .procname
= "error_burst",
2943 .data
= &ip_rt_error_burst
,
2944 .maxlen
= sizeof(int),
2946 .proc_handler
= &proc_dointvec
,
2949 .ctl_name
= NET_IPV4_ROUTE_GC_ELASTICITY
,
2950 .procname
= "gc_elasticity",
2951 .data
= &ip_rt_gc_elasticity
,
2952 .maxlen
= sizeof(int),
2954 .proc_handler
= &proc_dointvec
,
2957 .ctl_name
= NET_IPV4_ROUTE_MTU_EXPIRES
,
2958 .procname
= "mtu_expires",
2959 .data
= &ip_rt_mtu_expires
,
2960 .maxlen
= sizeof(int),
2962 .proc_handler
= &proc_dointvec_jiffies
,
2963 .strategy
= &sysctl_jiffies
,
2966 .ctl_name
= NET_IPV4_ROUTE_MIN_PMTU
,
2967 .procname
= "min_pmtu",
2968 .data
= &ip_rt_min_pmtu
,
2969 .maxlen
= sizeof(int),
2971 .proc_handler
= &proc_dointvec
,
2974 .ctl_name
= NET_IPV4_ROUTE_MIN_ADVMSS
,
2975 .procname
= "min_adv_mss",
2976 .data
= &ip_rt_min_advmss
,
2977 .maxlen
= sizeof(int),
2979 .proc_handler
= &proc_dointvec
,
2982 .ctl_name
= NET_IPV4_ROUTE_SECRET_INTERVAL
,
2983 .procname
= "secret_interval",
2984 .data
= &ip_rt_secret_interval
,
2985 .maxlen
= sizeof(int),
2987 .proc_handler
= &proc_dointvec_jiffies
,
2988 .strategy
= &sysctl_jiffies
,
2994 #ifdef CONFIG_NET_CLS_ROUTE
2995 struct ip_rt_acct
*ip_rt_acct __read_mostly
;
2996 #endif /* CONFIG_NET_CLS_ROUTE */
2998 static __initdata
unsigned long rhash_entries
;
2999 static int __init
set_rhash_entries(char *str
)
3003 rhash_entries
= simple_strtoul(str
, &str
, 0);
3006 __setup("rhash_entries=", set_rhash_entries
);
3008 int __init
ip_rt_init(void)
3012 atomic_set(&rt_genid
, (int) ((num_physpages
^ (num_physpages
>>8)) ^
3013 (jiffies
^ (jiffies
>> 7))));
3015 #ifdef CONFIG_NET_CLS_ROUTE
3016 ip_rt_acct
= __alloc_percpu(256 * sizeof(struct ip_rt_acct
));
3018 panic("IP: failed to allocate ip_rt_acct\n");
3021 ipv4_dst_ops
.kmem_cachep
=
3022 kmem_cache_create("ip_dst_cache", sizeof(struct rtable
), 0,
3023 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3025 ipv4_dst_blackhole_ops
.kmem_cachep
= ipv4_dst_ops
.kmem_cachep
;
3027 rt_hash_table
= (struct rt_hash_bucket
*)
3028 alloc_large_system_hash("IP route cache",
3029 sizeof(struct rt_hash_bucket
),
3031 (num_physpages
>= 128 * 1024) ?
3037 memset(rt_hash_table
, 0, (rt_hash_mask
+ 1) * sizeof(struct rt_hash_bucket
));
3038 rt_hash_lock_init();
3040 ipv4_dst_ops
.gc_thresh
= (rt_hash_mask
+ 1);
3041 ip_rt_max_size
= (rt_hash_mask
+ 1) * 16;
3046 setup_timer(&rt_secret_timer
, rt_secret_rebuild
, 0);
3048 /* All the timers, started at system startup tend
3049 to synchronize. Perturb it a bit.
3051 schedule_delayed_work(&expires_work
,
3052 net_random() % ip_rt_gc_interval
+ ip_rt_gc_interval
);
3054 rt_secret_timer
.expires
= jiffies
+ net_random() % ip_rt_secret_interval
+
3055 ip_rt_secret_interval
;
3056 add_timer(&rt_secret_timer
);
3058 if (ip_rt_proc_init(&init_net
))
3059 printk(KERN_ERR
"Unable to create route proc files\n");
3064 rtnl_register(PF_INET
, RTM_GETROUTE
, inet_rtm_getroute
, NULL
);
3069 EXPORT_SYMBOL(__ip_select_ident
);
3070 EXPORT_SYMBOL(ip_route_input
);
3071 EXPORT_SYMBOL(ip_route_output_key
);