2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/bootmem.h>
74 #include <linux/string.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
79 #include <linux/inet.h>
80 #include <linux/netdevice.h>
81 #include <linux/proc_fs.h>
82 #include <linux/init.h>
83 #include <linux/workqueue.h>
84 #include <linux/skbuff.h>
85 #include <linux/inetdevice.h>
86 #include <linux/igmp.h>
87 #include <linux/pkt_sched.h>
88 #include <linux/mroute.h>
89 #include <linux/netfilter_ipv4.h>
90 #include <linux/random.h>
91 #include <linux/jhash.h>
92 #include <linux/rcupdate.h>
93 #include <linux/times.h>
94 #include <linux/slab.h>
95 #include <linux/prefetch.h>
97 #include <net/net_namespace.h>
98 #include <net/protocol.h>
100 #include <net/route.h>
101 #include <net/inetpeer.h>
102 #include <net/sock.h>
103 #include <net/ip_fib.h>
106 #include <net/icmp.h>
107 #include <net/xfrm.h>
108 #include <net/netevent.h>
109 #include <net/rtnetlink.h>
111 #include <linux/sysctl.h>
112 #include <linux/kmemleak.h>
114 #include <net/secure_seq.h>
116 #define RT_FL_TOS(oldflp4) \
117 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
119 #define IP_MAX_MTU 0xFFF0
121 #define RT_GC_TIMEOUT (300*HZ)
123 static int ip_rt_max_size
;
124 static int ip_rt_gc_timeout __read_mostly
= RT_GC_TIMEOUT
;
125 static int ip_rt_gc_interval __read_mostly
= 60 * HZ
;
126 static int ip_rt_gc_min_interval __read_mostly
= HZ
/ 2;
127 static int ip_rt_redirect_number __read_mostly
= 9;
128 static int ip_rt_redirect_load __read_mostly
= HZ
/ 50;
129 static int ip_rt_redirect_silence __read_mostly
= ((HZ
/ 50) << (9 + 1));
130 static int ip_rt_error_cost __read_mostly
= HZ
;
131 static int ip_rt_error_burst __read_mostly
= 5 * HZ
;
132 static int ip_rt_gc_elasticity __read_mostly
= 8;
133 static int ip_rt_mtu_expires __read_mostly
= 10 * 60 * HZ
;
134 static int ip_rt_min_pmtu __read_mostly
= 512 + 20 + 20;
135 static int ip_rt_min_advmss __read_mostly
= 256;
138 * Interface to generic destination cache.
141 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
);
142 static unsigned int ipv4_default_advmss(const struct dst_entry
*dst
);
143 static unsigned int ipv4_mtu(const struct dst_entry
*dst
);
144 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
);
145 static void ipv4_link_failure(struct sk_buff
*skb
);
146 static void ip_rt_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
147 struct sk_buff
*skb
, u32 mtu
);
148 static void ip_do_redirect(struct dst_entry
*dst
, struct sock
*sk
,
149 struct sk_buff
*skb
);
151 static void ipv4_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
156 static u32
*ipv4_cow_metrics(struct dst_entry
*dst
, unsigned long old
)
162 static struct neighbour
*ipv4_neigh_lookup(const struct dst_entry
*dst
,
166 static struct dst_ops ipv4_dst_ops
= {
168 .protocol
= cpu_to_be16(ETH_P_IP
),
169 .check
= ipv4_dst_check
,
170 .default_advmss
= ipv4_default_advmss
,
172 .cow_metrics
= ipv4_cow_metrics
,
173 .ifdown
= ipv4_dst_ifdown
,
174 .negative_advice
= ipv4_negative_advice
,
175 .link_failure
= ipv4_link_failure
,
176 .update_pmtu
= ip_rt_update_pmtu
,
177 .redirect
= ip_do_redirect
,
178 .local_out
= __ip_local_out
,
179 .neigh_lookup
= ipv4_neigh_lookup
,
182 #define ECN_OR_COST(class) TC_PRIO_##class
184 const __u8 ip_tos2prio
[16] = {
186 ECN_OR_COST(BESTEFFORT
),
188 ECN_OR_COST(BESTEFFORT
),
194 ECN_OR_COST(INTERACTIVE
),
196 ECN_OR_COST(INTERACTIVE
),
197 TC_PRIO_INTERACTIVE_BULK
,
198 ECN_OR_COST(INTERACTIVE_BULK
),
199 TC_PRIO_INTERACTIVE_BULK
,
200 ECN_OR_COST(INTERACTIVE_BULK
)
202 EXPORT_SYMBOL(ip_tos2prio
);
204 static DEFINE_PER_CPU(struct rt_cache_stat
, rt_cache_stat
);
205 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
207 static inline int rt_genid(struct net
*net
)
209 return atomic_read(&net
->ipv4
.rt_genid
);
212 #ifdef CONFIG_PROC_FS
213 static void *rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
217 return SEQ_START_TOKEN
;
220 static void *rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
226 static void rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
230 static int rt_cache_seq_show(struct seq_file
*seq
, void *v
)
232 if (v
== SEQ_START_TOKEN
)
233 seq_printf(seq
, "%-127s\n",
234 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
235 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
240 static const struct seq_operations rt_cache_seq_ops
= {
241 .start
= rt_cache_seq_start
,
242 .next
= rt_cache_seq_next
,
243 .stop
= rt_cache_seq_stop
,
244 .show
= rt_cache_seq_show
,
247 static int rt_cache_seq_open(struct inode
*inode
, struct file
*file
)
249 return seq_open(file
, &rt_cache_seq_ops
);
252 static const struct file_operations rt_cache_seq_fops
= {
253 .owner
= THIS_MODULE
,
254 .open
= rt_cache_seq_open
,
257 .release
= seq_release
,
261 static void *rt_cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
266 return SEQ_START_TOKEN
;
268 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
269 if (!cpu_possible(cpu
))
272 return &per_cpu(rt_cache_stat
, cpu
);
277 static void *rt_cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
281 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
282 if (!cpu_possible(cpu
))
285 return &per_cpu(rt_cache_stat
, cpu
);
291 static void rt_cpu_seq_stop(struct seq_file
*seq
, void *v
)
296 static int rt_cpu_seq_show(struct seq_file
*seq
, void *v
)
298 struct rt_cache_stat
*st
= v
;
300 if (v
== SEQ_START_TOKEN
) {
301 seq_printf(seq
, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
305 seq_printf(seq
,"%08x %08x %08x %08x %08x %08x %08x %08x "
306 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
307 dst_entries_get_slow(&ipv4_dst_ops
),
330 static const struct seq_operations rt_cpu_seq_ops
= {
331 .start
= rt_cpu_seq_start
,
332 .next
= rt_cpu_seq_next
,
333 .stop
= rt_cpu_seq_stop
,
334 .show
= rt_cpu_seq_show
,
338 static int rt_cpu_seq_open(struct inode
*inode
, struct file
*file
)
340 return seq_open(file
, &rt_cpu_seq_ops
);
343 static const struct file_operations rt_cpu_seq_fops
= {
344 .owner
= THIS_MODULE
,
345 .open
= rt_cpu_seq_open
,
348 .release
= seq_release
,
351 #ifdef CONFIG_IP_ROUTE_CLASSID
352 static int rt_acct_proc_show(struct seq_file
*m
, void *v
)
354 struct ip_rt_acct
*dst
, *src
;
357 dst
= kcalloc(256, sizeof(struct ip_rt_acct
), GFP_KERNEL
);
361 for_each_possible_cpu(i
) {
362 src
= (struct ip_rt_acct
*)per_cpu_ptr(ip_rt_acct
, i
);
363 for (j
= 0; j
< 256; j
++) {
364 dst
[j
].o_bytes
+= src
[j
].o_bytes
;
365 dst
[j
].o_packets
+= src
[j
].o_packets
;
366 dst
[j
].i_bytes
+= src
[j
].i_bytes
;
367 dst
[j
].i_packets
+= src
[j
].i_packets
;
371 seq_write(m
, dst
, 256 * sizeof(struct ip_rt_acct
));
376 static int rt_acct_proc_open(struct inode
*inode
, struct file
*file
)
378 return single_open(file
, rt_acct_proc_show
, NULL
);
381 static const struct file_operations rt_acct_proc_fops
= {
382 .owner
= THIS_MODULE
,
383 .open
= rt_acct_proc_open
,
386 .release
= single_release
,
390 static int __net_init
ip_rt_do_proc_init(struct net
*net
)
392 struct proc_dir_entry
*pde
;
394 pde
= proc_net_fops_create(net
, "rt_cache", S_IRUGO
,
399 pde
= proc_create("rt_cache", S_IRUGO
,
400 net
->proc_net_stat
, &rt_cpu_seq_fops
);
404 #ifdef CONFIG_IP_ROUTE_CLASSID
405 pde
= proc_create("rt_acct", 0, net
->proc_net
, &rt_acct_proc_fops
);
411 #ifdef CONFIG_IP_ROUTE_CLASSID
413 remove_proc_entry("rt_cache", net
->proc_net_stat
);
416 remove_proc_entry("rt_cache", net
->proc_net
);
421 static void __net_exit
ip_rt_do_proc_exit(struct net
*net
)
423 remove_proc_entry("rt_cache", net
->proc_net_stat
);
424 remove_proc_entry("rt_cache", net
->proc_net
);
425 #ifdef CONFIG_IP_ROUTE_CLASSID
426 remove_proc_entry("rt_acct", net
->proc_net
);
430 static struct pernet_operations ip_rt_proc_ops __net_initdata
= {
431 .init
= ip_rt_do_proc_init
,
432 .exit
= ip_rt_do_proc_exit
,
435 static int __init
ip_rt_proc_init(void)
437 return register_pernet_subsys(&ip_rt_proc_ops
);
441 static inline int ip_rt_proc_init(void)
445 #endif /* CONFIG_PROC_FS */
447 static inline bool rt_is_expired(const struct rtable
*rth
)
449 return rth
->rt_genid
!= rt_genid(dev_net(rth
->dst
.dev
));
453 * Perturbation of rt_genid by a small quantity [1..256]
454 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
455 * many times (2^24) without giving recent rt_genid.
456 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
458 static void rt_cache_invalidate(struct net
*net
)
460 unsigned char shuffle
;
462 get_random_bytes(&shuffle
, sizeof(shuffle
));
463 atomic_add(shuffle
+ 1U, &net
->ipv4
.rt_genid
);
467 * delay < 0 : invalidate cache (fast : entries will be deleted later)
468 * delay >= 0 : invalidate & flush cache (can be long)
470 void rt_cache_flush(struct net
*net
, int delay
)
472 rt_cache_invalidate(net
);
475 static struct neighbour
*ipv4_neigh_lookup(const struct dst_entry
*dst
,
479 struct net_device
*dev
= dst
->dev
;
480 const __be32
*pkey
= daddr
;
481 const struct rtable
*rt
;
484 rt
= (const struct rtable
*) dst
;
486 pkey
= (const __be32
*) &rt
->rt_gateway
;
488 pkey
= &ip_hdr(skb
)->daddr
;
490 n
= __ipv4_neigh_lookup(dev
, *(__force u32
*)pkey
);
493 return neigh_create(&arp_tbl
, pkey
, dev
);
497 * Peer allocation may fail only in serious out-of-memory conditions. However
498 * we still can generate some output.
499 * Random ID selection looks a bit dangerous because we have no chances to
500 * select ID being unique in a reasonable period of time.
501 * But broken packet identifier may be better than no packet at all.
503 static void ip_select_fb_ident(struct iphdr
*iph
)
505 static DEFINE_SPINLOCK(ip_fb_id_lock
);
506 static u32 ip_fallback_id
;
509 spin_lock_bh(&ip_fb_id_lock
);
510 salt
= secure_ip_id((__force __be32
)ip_fallback_id
^ iph
->daddr
);
511 iph
->id
= htons(salt
& 0xFFFF);
512 ip_fallback_id
= salt
;
513 spin_unlock_bh(&ip_fb_id_lock
);
516 void __ip_select_ident(struct iphdr
*iph
, struct dst_entry
*dst
, int more
)
518 struct net
*net
= dev_net(dst
->dev
);
519 struct inet_peer
*peer
;
521 peer
= inet_getpeer_v4(net
->ipv4
.peers
, iph
->daddr
, 1);
523 iph
->id
= htons(inet_getid(peer
, more
));
528 ip_select_fb_ident(iph
);
530 EXPORT_SYMBOL(__ip_select_ident
);
532 static void __build_flow_key(struct flowi4
*fl4
, const struct sock
*sk
,
533 const struct iphdr
*iph
,
535 u8 prot
, u32 mark
, int flow_flags
)
538 const struct inet_sock
*inet
= inet_sk(sk
);
540 oif
= sk
->sk_bound_dev_if
;
542 tos
= RT_CONN_FLAGS(sk
);
543 prot
= inet
->hdrincl
? IPPROTO_RAW
: sk
->sk_protocol
;
545 flowi4_init_output(fl4
, oif
, mark
, tos
,
546 RT_SCOPE_UNIVERSE
, prot
,
548 iph
->daddr
, iph
->saddr
, 0, 0);
551 static void build_skb_flow_key(struct flowi4
*fl4
, const struct sk_buff
*skb
,
552 const struct sock
*sk
)
554 const struct iphdr
*iph
= ip_hdr(skb
);
555 int oif
= skb
->dev
->ifindex
;
556 u8 tos
= RT_TOS(iph
->tos
);
557 u8 prot
= iph
->protocol
;
558 u32 mark
= skb
->mark
;
560 __build_flow_key(fl4
, sk
, iph
, oif
, tos
, prot
, mark
, 0);
563 static void build_sk_flow_key(struct flowi4
*fl4
, const struct sock
*sk
)
565 const struct inet_sock
*inet
= inet_sk(sk
);
566 const struct ip_options_rcu
*inet_opt
;
567 __be32 daddr
= inet
->inet_daddr
;
570 inet_opt
= rcu_dereference(inet
->inet_opt
);
571 if (inet_opt
&& inet_opt
->opt
.srr
)
572 daddr
= inet_opt
->opt
.faddr
;
573 flowi4_init_output(fl4
, sk
->sk_bound_dev_if
, sk
->sk_mark
,
574 RT_CONN_FLAGS(sk
), RT_SCOPE_UNIVERSE
,
575 inet
->hdrincl
? IPPROTO_RAW
: sk
->sk_protocol
,
576 inet_sk_flowi_flags(sk
),
577 daddr
, inet
->inet_saddr
, 0, 0);
581 static void ip_rt_build_flow_key(struct flowi4
*fl4
, const struct sock
*sk
,
582 const struct sk_buff
*skb
)
585 build_skb_flow_key(fl4
, skb
, sk
);
587 build_sk_flow_key(fl4
, sk
);
590 static DEFINE_SEQLOCK(fnhe_seqlock
);
592 static struct fib_nh_exception
*fnhe_oldest(struct fnhe_hash_bucket
*hash
)
594 struct fib_nh_exception
*fnhe
, *oldest
;
596 oldest
= rcu_dereference(hash
->chain
);
597 for (fnhe
= rcu_dereference(oldest
->fnhe_next
); fnhe
;
598 fnhe
= rcu_dereference(fnhe
->fnhe_next
)) {
599 if (time_before(fnhe
->fnhe_stamp
, oldest
->fnhe_stamp
))
605 static inline u32
fnhe_hashfun(__be32 daddr
)
609 hval
= (__force u32
) daddr
;
610 hval
^= (hval
>> 11) ^ (hval
>> 22);
612 return hval
& (FNHE_HASH_SIZE
- 1);
615 static void update_or_create_fnhe(struct fib_nh
*nh
, __be32 daddr
, __be32 gw
,
616 u32 pmtu
, unsigned long expires
)
618 struct fnhe_hash_bucket
*hash
;
619 struct fib_nh_exception
*fnhe
;
621 u32 hval
= fnhe_hashfun(daddr
);
623 write_seqlock_bh(&fnhe_seqlock
);
625 hash
= nh
->nh_exceptions
;
627 hash
= kzalloc(FNHE_HASH_SIZE
* sizeof(*hash
), GFP_ATOMIC
);
630 nh
->nh_exceptions
= hash
;
636 for (fnhe
= rcu_dereference(hash
->chain
); fnhe
;
637 fnhe
= rcu_dereference(fnhe
->fnhe_next
)) {
638 if (fnhe
->fnhe_daddr
== daddr
)
647 fnhe
->fnhe_pmtu
= pmtu
;
648 fnhe
->fnhe_expires
= expires
;
651 if (depth
> FNHE_RECLAIM_DEPTH
)
652 fnhe
= fnhe_oldest(hash
);
654 fnhe
= kzalloc(sizeof(*fnhe
), GFP_ATOMIC
);
658 fnhe
->fnhe_next
= hash
->chain
;
659 rcu_assign_pointer(hash
->chain
, fnhe
);
661 fnhe
->fnhe_daddr
= daddr
;
663 fnhe
->fnhe_pmtu
= pmtu
;
664 fnhe
->fnhe_expires
= expires
;
667 fnhe
->fnhe_stamp
= jiffies
;
670 write_sequnlock_bh(&fnhe_seqlock
);
674 static void __ip_do_redirect(struct rtable
*rt
, struct sk_buff
*skb
, struct flowi4
*fl4
,
677 __be32 new_gw
= icmp_hdr(skb
)->un
.gateway
;
678 __be32 old_gw
= ip_hdr(skb
)->saddr
;
679 struct net_device
*dev
= skb
->dev
;
680 struct in_device
*in_dev
;
681 struct fib_result res
;
685 switch (icmp_hdr(skb
)->code
& 7) {
687 case ICMP_REDIR_NETTOS
:
688 case ICMP_REDIR_HOST
:
689 case ICMP_REDIR_HOSTTOS
:
696 if (rt
->rt_gateway
!= old_gw
)
699 in_dev
= __in_dev_get_rcu(dev
);
704 if (new_gw
== old_gw
|| !IN_DEV_RX_REDIRECTS(in_dev
) ||
705 ipv4_is_multicast(new_gw
) || ipv4_is_lbcast(new_gw
) ||
706 ipv4_is_zeronet(new_gw
))
707 goto reject_redirect
;
709 if (!IN_DEV_SHARED_MEDIA(in_dev
)) {
710 if (!inet_addr_onlink(in_dev
, new_gw
, old_gw
))
711 goto reject_redirect
;
712 if (IN_DEV_SEC_REDIRECTS(in_dev
) && ip_fib_check_default(new_gw
, dev
))
713 goto reject_redirect
;
715 if (inet_addr_type(net
, new_gw
) != RTN_UNICAST
)
716 goto reject_redirect
;
719 n
= ipv4_neigh_lookup(&rt
->dst
, NULL
, &new_gw
);
721 if (!(n
->nud_state
& NUD_VALID
)) {
722 neigh_event_send(n
, NULL
);
724 if (fib_lookup(net
, fl4
, &res
) == 0) {
725 struct fib_nh
*nh
= &FIB_RES_NH(res
);
727 update_or_create_fnhe(nh
, fl4
->daddr
, new_gw
,
731 rt
->dst
.obsolete
= DST_OBSOLETE_KILL
;
732 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, n
);
739 #ifdef CONFIG_IP_ROUTE_VERBOSE
740 if (IN_DEV_LOG_MARTIANS(in_dev
)) {
741 const struct iphdr
*iph
= (const struct iphdr
*) skb
->data
;
742 __be32 daddr
= iph
->daddr
;
743 __be32 saddr
= iph
->saddr
;
745 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
746 " Advised path = %pI4 -> %pI4\n",
747 &old_gw
, dev
->name
, &new_gw
,
754 static void ip_do_redirect(struct dst_entry
*dst
, struct sock
*sk
, struct sk_buff
*skb
)
759 rt
= (struct rtable
*) dst
;
761 ip_rt_build_flow_key(&fl4
, sk
, skb
);
762 __ip_do_redirect(rt
, skb
, &fl4
, true);
765 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
)
767 struct rtable
*rt
= (struct rtable
*)dst
;
768 struct dst_entry
*ret
= dst
;
771 if (dst
->obsolete
> 0) {
774 } else if ((rt
->rt_flags
& RTCF_REDIRECTED
) ||
785 * 1. The first ip_rt_redirect_number redirects are sent
786 * with exponential backoff, then we stop sending them at all,
787 * assuming that the host ignores our redirects.
788 * 2. If we did not see packets requiring redirects
789 * during ip_rt_redirect_silence, we assume that the host
790 * forgot redirected route and start to send redirects again.
792 * This algorithm is much cheaper and more intelligent than dumb load limiting
795 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
796 * and "frag. need" (breaks PMTU discovery) in icmp.c.
799 void ip_rt_send_redirect(struct sk_buff
*skb
)
801 struct rtable
*rt
= skb_rtable(skb
);
802 struct in_device
*in_dev
;
803 struct inet_peer
*peer
;
808 in_dev
= __in_dev_get_rcu(rt
->dst
.dev
);
809 if (!in_dev
|| !IN_DEV_TX_REDIRECTS(in_dev
)) {
813 log_martians
= IN_DEV_LOG_MARTIANS(in_dev
);
816 net
= dev_net(rt
->dst
.dev
);
817 peer
= inet_getpeer_v4(net
->ipv4
.peers
, ip_hdr(skb
)->saddr
, 1);
819 icmp_send(skb
, ICMP_REDIRECT
, ICMP_REDIR_HOST
, rt
->rt_gateway
);
823 /* No redirected packets during ip_rt_redirect_silence;
824 * reset the algorithm.
826 if (time_after(jiffies
, peer
->rate_last
+ ip_rt_redirect_silence
))
827 peer
->rate_tokens
= 0;
829 /* Too many ignored redirects; do not send anything
830 * set dst.rate_last to the last seen redirected packet.
832 if (peer
->rate_tokens
>= ip_rt_redirect_number
) {
833 peer
->rate_last
= jiffies
;
837 /* Check for load limit; set rate_last to the latest sent
840 if (peer
->rate_tokens
== 0 ||
843 (ip_rt_redirect_load
<< peer
->rate_tokens
)))) {
844 icmp_send(skb
, ICMP_REDIRECT
, ICMP_REDIR_HOST
, rt
->rt_gateway
);
845 peer
->rate_last
= jiffies
;
847 #ifdef CONFIG_IP_ROUTE_VERBOSE
849 peer
->rate_tokens
== ip_rt_redirect_number
)
850 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
851 &ip_hdr(skb
)->saddr
, inet_iif(skb
),
852 &ip_hdr(skb
)->daddr
, &rt
->rt_gateway
);
859 static int ip_error(struct sk_buff
*skb
)
861 struct in_device
*in_dev
= __in_dev_get_rcu(skb
->dev
);
862 struct rtable
*rt
= skb_rtable(skb
);
863 struct inet_peer
*peer
;
869 net
= dev_net(rt
->dst
.dev
);
870 if (!IN_DEV_FORWARD(in_dev
)) {
871 switch (rt
->dst
.error
) {
873 IP_INC_STATS_BH(net
, IPSTATS_MIB_INADDRERRORS
);
877 IP_INC_STATS_BH(net
, IPSTATS_MIB_INNOROUTES
);
883 switch (rt
->dst
.error
) {
888 code
= ICMP_HOST_UNREACH
;
891 code
= ICMP_NET_UNREACH
;
892 IP_INC_STATS_BH(net
, IPSTATS_MIB_INNOROUTES
);
895 code
= ICMP_PKT_FILTERED
;
899 peer
= inet_getpeer_v4(net
->ipv4
.peers
, ip_hdr(skb
)->saddr
, 1);
904 peer
->rate_tokens
+= now
- peer
->rate_last
;
905 if (peer
->rate_tokens
> ip_rt_error_burst
)
906 peer
->rate_tokens
= ip_rt_error_burst
;
907 peer
->rate_last
= now
;
908 if (peer
->rate_tokens
>= ip_rt_error_cost
)
909 peer
->rate_tokens
-= ip_rt_error_cost
;
915 icmp_send(skb
, ICMP_DEST_UNREACH
, code
, 0);
921 static u32
__ip_rt_update_pmtu(struct rtable
*rt
, struct flowi4
*fl4
, u32 mtu
)
923 struct fib_result res
;
925 if (mtu
< ip_rt_min_pmtu
)
926 mtu
= ip_rt_min_pmtu
;
928 if (fib_lookup(dev_net(rt
->dst
.dev
), fl4
, &res
) == 0) {
929 struct fib_nh
*nh
= &FIB_RES_NH(res
);
931 update_or_create_fnhe(nh
, fl4
->daddr
, 0, mtu
,
932 jiffies
+ ip_rt_mtu_expires
);
937 static void ip_rt_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
938 struct sk_buff
*skb
, u32 mtu
)
940 struct rtable
*rt
= (struct rtable
*) dst
;
943 ip_rt_build_flow_key(&fl4
, sk
, skb
);
944 mtu
= __ip_rt_update_pmtu(rt
, &fl4
, mtu
);
947 dst
->obsolete
= DST_OBSOLETE_KILL
;
950 dst_set_expires(&rt
->dst
, ip_rt_mtu_expires
);
954 void ipv4_update_pmtu(struct sk_buff
*skb
, struct net
*net
, u32 mtu
,
955 int oif
, u32 mark
, u8 protocol
, int flow_flags
)
957 const struct iphdr
*iph
= (const struct iphdr
*) skb
->data
;
961 __build_flow_key(&fl4
, NULL
, iph
, oif
,
962 RT_TOS(iph
->tos
), protocol
, mark
, flow_flags
);
963 rt
= __ip_route_output_key(net
, &fl4
);
965 __ip_rt_update_pmtu(rt
, &fl4
, mtu
);
969 EXPORT_SYMBOL_GPL(ipv4_update_pmtu
);
971 void ipv4_sk_update_pmtu(struct sk_buff
*skb
, struct sock
*sk
, u32 mtu
)
973 const struct iphdr
*iph
= (const struct iphdr
*) skb
->data
;
977 __build_flow_key(&fl4
, sk
, iph
, 0, 0, 0, 0, 0);
978 rt
= __ip_route_output_key(sock_net(sk
), &fl4
);
980 __ip_rt_update_pmtu(rt
, &fl4
, mtu
);
984 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu
);
986 void ipv4_redirect(struct sk_buff
*skb
, struct net
*net
,
987 int oif
, u32 mark
, u8 protocol
, int flow_flags
)
989 const struct iphdr
*iph
= (const struct iphdr
*) skb
->data
;
993 __build_flow_key(&fl4
, NULL
, iph
, oif
,
994 RT_TOS(iph
->tos
), protocol
, mark
, flow_flags
);
995 rt
= __ip_route_output_key(net
, &fl4
);
997 __ip_do_redirect(rt
, skb
, &fl4
, false);
1001 EXPORT_SYMBOL_GPL(ipv4_redirect
);
1003 void ipv4_sk_redirect(struct sk_buff
*skb
, struct sock
*sk
)
1005 const struct iphdr
*iph
= (const struct iphdr
*) skb
->data
;
1009 __build_flow_key(&fl4
, sk
, iph
, 0, 0, 0, 0, 0);
1010 rt
= __ip_route_output_key(sock_net(sk
), &fl4
);
1012 __ip_do_redirect(rt
, skb
, &fl4
, false);
1016 EXPORT_SYMBOL_GPL(ipv4_sk_redirect
);
1018 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
)
1020 struct rtable
*rt
= (struct rtable
*) dst
;
1022 /* All IPV4 dsts are created with ->obsolete set to the value
1023 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1024 * into this function always.
1026 * When a PMTU/redirect information update invalidates a
1027 * route, this is indicated by setting obsolete to
1028 * DST_OBSOLETE_KILL.
1030 if (dst
->obsolete
== DST_OBSOLETE_KILL
|| rt_is_expired(rt
))
1035 static void ipv4_link_failure(struct sk_buff
*skb
)
1039 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
1041 rt
= skb_rtable(skb
);
1043 dst_set_expires(&rt
->dst
, 0);
1046 static int ip_rt_bug(struct sk_buff
*skb
)
1048 pr_debug("%s: %pI4 -> %pI4, %s\n",
1049 __func__
, &ip_hdr(skb
)->saddr
, &ip_hdr(skb
)->daddr
,
1050 skb
->dev
? skb
->dev
->name
: "?");
1057 We do not cache source address of outgoing interface,
1058 because it is used only by IP RR, TS and SRR options,
1059 so that it out of fast path.
1061 BTW remember: "addr" is allowed to be not aligned
1065 void ip_rt_get_source(u8
*addr
, struct sk_buff
*skb
, struct rtable
*rt
)
1069 if (rt_is_output_route(rt
))
1070 src
= ip_hdr(skb
)->saddr
;
1072 struct fib_result res
;
1078 memset(&fl4
, 0, sizeof(fl4
));
1079 fl4
.daddr
= iph
->daddr
;
1080 fl4
.saddr
= iph
->saddr
;
1081 fl4
.flowi4_tos
= RT_TOS(iph
->tos
);
1082 fl4
.flowi4_oif
= rt
->dst
.dev
->ifindex
;
1083 fl4
.flowi4_iif
= skb
->dev
->ifindex
;
1084 fl4
.flowi4_mark
= skb
->mark
;
1087 if (fib_lookup(dev_net(rt
->dst
.dev
), &fl4
, &res
) == 0)
1088 src
= FIB_RES_PREFSRC(dev_net(rt
->dst
.dev
), res
);
1090 src
= inet_select_addr(rt
->dst
.dev
,
1091 rt_nexthop(rt
, iph
->daddr
),
1095 memcpy(addr
, &src
, 4);
1098 #ifdef CONFIG_IP_ROUTE_CLASSID
1099 static void set_class_tag(struct rtable
*rt
, u32 tag
)
1101 if (!(rt
->dst
.tclassid
& 0xFFFF))
1102 rt
->dst
.tclassid
|= tag
& 0xFFFF;
1103 if (!(rt
->dst
.tclassid
& 0xFFFF0000))
1104 rt
->dst
.tclassid
|= tag
& 0xFFFF0000;
1108 static unsigned int ipv4_default_advmss(const struct dst_entry
*dst
)
1110 unsigned int advmss
= dst_metric_raw(dst
, RTAX_ADVMSS
);
1113 advmss
= max_t(unsigned int, dst
->dev
->mtu
- 40,
1115 if (advmss
> 65535 - 40)
1116 advmss
= 65535 - 40;
1121 static unsigned int ipv4_mtu(const struct dst_entry
*dst
)
1123 const struct rtable
*rt
= (const struct rtable
*) dst
;
1124 unsigned int mtu
= rt
->rt_pmtu
;
1126 if (mtu
&& time_after_eq(jiffies
, rt
->dst
.expires
))
1130 mtu
= dst_metric_raw(dst
, RTAX_MTU
);
1132 if (mtu
&& rt_is_output_route(rt
))
1135 mtu
= dst
->dev
->mtu
;
1137 if (unlikely(dst_metric_locked(dst
, RTAX_MTU
))) {
1138 if (rt
->rt_gateway
&& mtu
> 576)
1142 if (mtu
> IP_MAX_MTU
)
1148 static struct fib_nh_exception
*find_exception(struct fib_nh
*nh
, __be32 daddr
)
1150 struct fnhe_hash_bucket
*hash
= nh
->nh_exceptions
;
1151 struct fib_nh_exception
*fnhe
;
1157 hval
= fnhe_hashfun(daddr
);
1159 for (fnhe
= rcu_dereference(hash
[hval
].chain
); fnhe
;
1160 fnhe
= rcu_dereference(fnhe
->fnhe_next
)) {
1161 if (fnhe
->fnhe_daddr
== daddr
)
1167 static void rt_bind_exception(struct rtable
*rt
, struct fib_nh_exception
*fnhe
,
1170 __be32 fnhe_daddr
, gw
;
1171 unsigned long expires
;
1176 seq
= read_seqbegin(&fnhe_seqlock
);
1177 fnhe_daddr
= fnhe
->fnhe_daddr
;
1179 pmtu
= fnhe
->fnhe_pmtu
;
1180 expires
= fnhe
->fnhe_expires
;
1181 if (read_seqretry(&fnhe_seqlock
, seq
))
1184 if (daddr
!= fnhe_daddr
)
1188 unsigned long diff
= expires
- jiffies
;
1190 if (time_before(jiffies
, expires
)) {
1192 dst_set_expires(&rt
->dst
, diff
);
1196 rt
->rt_flags
|= RTCF_REDIRECTED
;
1197 rt
->rt_gateway
= gw
;
1199 fnhe
->fnhe_stamp
= jiffies
;
1202 static inline void rt_release_rcu(struct rcu_head
*head
)
1204 struct dst_entry
*dst
= container_of(head
, struct dst_entry
, rcu_head
);
1208 static void rt_cache_route(struct fib_nh
*nh
, struct rtable
*rt
)
1210 struct rtable
*orig
, *prev
, **p
= &nh
->nh_rth_output
;
1212 if (rt_is_input_route(rt
))
1213 p
= &nh
->nh_rth_input
;
1217 prev
= cmpxchg(p
, orig
, rt
);
1219 dst_clone(&rt
->dst
);
1221 call_rcu_bh(&orig
->dst
.rcu_head
, rt_release_rcu
);
1225 static bool rt_cache_valid(const struct rtable
*rt
)
1228 rt
->dst
.obsolete
== DST_OBSOLETE_FORCE_CHK
&&
1232 static void rt_set_nexthop(struct rtable
*rt
, __be32 daddr
,
1233 const struct fib_result
*res
,
1234 struct fib_nh_exception
*fnhe
,
1235 struct fib_info
*fi
, u16 type
, u32 itag
)
1238 struct fib_nh
*nh
= &FIB_RES_NH(*res
);
1240 if (nh
->nh_gw
&& nh
->nh_scope
== RT_SCOPE_LINK
)
1241 rt
->rt_gateway
= nh
->nh_gw
;
1243 rt_bind_exception(rt
, fnhe
, daddr
);
1244 dst_init_metrics(&rt
->dst
, fi
->fib_metrics
, true);
1245 #ifdef CONFIG_IP_ROUTE_CLASSID
1246 rt
->dst
.tclassid
= nh
->nh_tclassid
;
1248 if (!(rt
->dst
.flags
& DST_HOST
))
1249 rt_cache_route(nh
, rt
);
1252 #ifdef CONFIG_IP_ROUTE_CLASSID
1253 #ifdef CONFIG_IP_MULTIPLE_TABLES
1254 set_class_tag(rt
, res
->tclassid
);
1256 set_class_tag(rt
, itag
);
1260 static struct rtable
*rt_dst_alloc(struct net_device
*dev
,
1261 bool nopolicy
, bool noxfrm
, bool will_cache
)
1263 return dst_alloc(&ipv4_dst_ops
, dev
, 1, DST_OBSOLETE_FORCE_CHK
,
1264 (will_cache
? 0 : DST_HOST
) | DST_NOCACHE
|
1265 (nopolicy
? DST_NOPOLICY
: 0) |
1266 (noxfrm
? DST_NOXFRM
: 0));
1269 /* called in rcu_read_lock() section */
1270 static int ip_route_input_mc(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
1271 u8 tos
, struct net_device
*dev
, int our
)
1274 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
1278 /* Primary sanity checks. */
1283 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1284 skb
->protocol
!= htons(ETH_P_IP
))
1287 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev
)))
1288 if (ipv4_is_loopback(saddr
))
1291 if (ipv4_is_zeronet(saddr
)) {
1292 if (!ipv4_is_local_multicast(daddr
))
1295 err
= fib_validate_source(skb
, saddr
, 0, tos
, 0, dev
,
1300 rth
= rt_dst_alloc(dev_net(dev
)->loopback_dev
,
1301 IN_DEV_CONF_GET(in_dev
, NOPOLICY
), false, false);
1305 #ifdef CONFIG_IP_ROUTE_CLASSID
1306 rth
->dst
.tclassid
= itag
;
1308 rth
->dst
.output
= ip_rt_bug
;
1310 rth
->rt_genid
= rt_genid(dev_net(dev
));
1311 rth
->rt_flags
= RTCF_MULTICAST
;
1312 rth
->rt_type
= RTN_MULTICAST
;
1313 rth
->rt_is_input
= 1;
1316 rth
->rt_gateway
= 0;
1318 rth
->dst
.input
= ip_local_deliver
;
1319 rth
->rt_flags
|= RTCF_LOCAL
;
1322 #ifdef CONFIG_IP_MROUTE
1323 if (!ipv4_is_local_multicast(daddr
) && IN_DEV_MFORWARD(in_dev
))
1324 rth
->dst
.input
= ip_mr_input
;
1326 RT_CACHE_STAT_INC(in_slow_mc
);
1328 skb_dst_set(skb
, &rth
->dst
);
1340 static void ip_handle_martian_source(struct net_device
*dev
,
1341 struct in_device
*in_dev
,
1342 struct sk_buff
*skb
,
1346 RT_CACHE_STAT_INC(in_martian_src
);
1347 #ifdef CONFIG_IP_ROUTE_VERBOSE
1348 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit()) {
1350 * RFC1812 recommendation, if source is martian,
1351 * the only hint is MAC header.
1353 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1354 &daddr
, &saddr
, dev
->name
);
1355 if (dev
->hard_header_len
&& skb_mac_header_was_set(skb
)) {
1356 print_hex_dump(KERN_WARNING
, "ll header: ",
1357 DUMP_PREFIX_OFFSET
, 16, 1,
1358 skb_mac_header(skb
),
1359 dev
->hard_header_len
, true);
1365 /* called in rcu_read_lock() section */
1366 static int __mkroute_input(struct sk_buff
*skb
,
1367 const struct fib_result
*res
,
1368 struct in_device
*in_dev
,
1369 __be32 daddr
, __be32 saddr
, u32 tos
,
1370 struct rtable
**result
)
1374 struct in_device
*out_dev
;
1375 unsigned int flags
= 0;
1379 /* get a working reference to the output device */
1380 out_dev
= __in_dev_get_rcu(FIB_RES_DEV(*res
));
1381 if (out_dev
== NULL
) {
1382 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1387 err
= fib_validate_source(skb
, saddr
, daddr
, tos
, FIB_RES_OIF(*res
),
1388 in_dev
->dev
, in_dev
, &itag
);
1390 ip_handle_martian_source(in_dev
->dev
, in_dev
, skb
, daddr
,
1396 if (out_dev
== in_dev
&& err
&&
1397 (IN_DEV_SHARED_MEDIA(out_dev
) ||
1398 inet_addr_onlink(out_dev
, saddr
, FIB_RES_GW(*res
))))
1399 flags
|= RTCF_DOREDIRECT
;
1401 if (skb
->protocol
!= htons(ETH_P_IP
)) {
1402 /* Not IP (i.e. ARP). Do not create route, if it is
1403 * invalid for proxy arp. DNAT routes are always valid.
1405 * Proxy arp feature have been extended to allow, ARP
1406 * replies back to the same interface, to support
1407 * Private VLAN switch technologies. See arp.c.
1409 if (out_dev
== in_dev
&&
1410 IN_DEV_PROXY_ARP_PVLAN(in_dev
) == 0) {
1419 rth
= FIB_RES_NH(*res
).nh_rth_input
;
1420 if (rt_cache_valid(rth
)) {
1421 dst_hold(&rth
->dst
);
1428 rth
= rt_dst_alloc(out_dev
->dev
,
1429 IN_DEV_CONF_GET(in_dev
, NOPOLICY
),
1430 IN_DEV_CONF_GET(out_dev
, NOXFRM
), do_cache
);
1436 rth
->rt_genid
= rt_genid(dev_net(rth
->dst
.dev
));
1437 rth
->rt_flags
= flags
;
1438 rth
->rt_type
= res
->type
;
1439 rth
->rt_is_input
= 1;
1442 rth
->rt_gateway
= 0;
1444 rth
->dst
.input
= ip_forward
;
1445 rth
->dst
.output
= ip_output
;
1447 rt_set_nexthop(rth
, daddr
, res
, NULL
, res
->fi
, res
->type
, itag
);
1455 static int ip_mkroute_input(struct sk_buff
*skb
,
1456 struct fib_result
*res
,
1457 const struct flowi4
*fl4
,
1458 struct in_device
*in_dev
,
1459 __be32 daddr
, __be32 saddr
, u32 tos
)
1461 struct rtable
*rth
= NULL
;
1464 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1465 if (res
->fi
&& res
->fi
->fib_nhs
> 1)
1466 fib_select_multipath(res
);
1469 /* create a routing cache entry */
1470 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
, &rth
);
1474 skb_dst_set(skb
, &rth
->dst
);
1479 * NOTE. We drop all the packets that has local source
1480 * addresses, because every properly looped back packet
1481 * must have correct destination already attached by output routine.
1483 * Such approach solves two big problems:
1484 * 1. Not simplex devices are handled properly.
1485 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1486 * called with rcu_read_lock()
1489 static int ip_route_input_slow(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
1490 u8 tos
, struct net_device
*dev
)
1492 struct fib_result res
;
1493 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
1495 unsigned int flags
= 0;
1499 struct net
*net
= dev_net(dev
);
1502 /* IP on this device is disabled. */
1507 /* Check for the most weird martians, which can be not detected
1511 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
))
1512 goto martian_source
;
1515 if (ipv4_is_lbcast(daddr
) || (saddr
== 0 && daddr
== 0))
1518 /* Accept zero addresses only to limited broadcast;
1519 * I even do not know to fix it or not. Waiting for complains :-)
1521 if (ipv4_is_zeronet(saddr
))
1522 goto martian_source
;
1524 if (ipv4_is_zeronet(daddr
))
1525 goto martian_destination
;
1527 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev
))) {
1528 if (ipv4_is_loopback(daddr
))
1529 goto martian_destination
;
1531 if (ipv4_is_loopback(saddr
))
1532 goto martian_source
;
1536 * Now we are ready to route packet.
1539 fl4
.flowi4_iif
= dev
->ifindex
;
1540 fl4
.flowi4_mark
= skb
->mark
;
1541 fl4
.flowi4_tos
= tos
;
1542 fl4
.flowi4_scope
= RT_SCOPE_UNIVERSE
;
1545 err
= fib_lookup(net
, &fl4
, &res
);
1549 RT_CACHE_STAT_INC(in_slow_tot
);
1551 if (res
.type
== RTN_BROADCAST
)
1554 if (res
.type
== RTN_LOCAL
) {
1555 err
= fib_validate_source(skb
, saddr
, daddr
, tos
,
1556 net
->loopback_dev
->ifindex
,
1557 dev
, in_dev
, &itag
);
1559 goto martian_source_keep_err
;
1563 if (!IN_DEV_FORWARD(in_dev
))
1565 if (res
.type
!= RTN_UNICAST
)
1566 goto martian_destination
;
1568 err
= ip_mkroute_input(skb
, &res
, &fl4
, in_dev
, daddr
, saddr
, tos
);
1572 if (skb
->protocol
!= htons(ETH_P_IP
))
1575 if (!ipv4_is_zeronet(saddr
)) {
1576 err
= fib_validate_source(skb
, saddr
, 0, tos
, 0, dev
,
1579 goto martian_source_keep_err
;
1581 flags
|= RTCF_BROADCAST
;
1582 res
.type
= RTN_BROADCAST
;
1583 RT_CACHE_STAT_INC(in_brd
);
1589 rth
= FIB_RES_NH(res
).nh_rth_input
;
1590 if (rt_cache_valid(rth
)) {
1591 dst_hold(&rth
->dst
);
1598 rth
= rt_dst_alloc(net
->loopback_dev
,
1599 IN_DEV_CONF_GET(in_dev
, NOPOLICY
), false, do_cache
);
1603 rth
->dst
.input
= ip_local_deliver
;
1604 rth
->dst
.output
= ip_rt_bug
;
1605 #ifdef CONFIG_IP_ROUTE_CLASSID
1606 rth
->dst
.tclassid
= itag
;
1609 rth
->rt_genid
= rt_genid(net
);
1610 rth
->rt_flags
= flags
|RTCF_LOCAL
;
1611 rth
->rt_type
= res
.type
;
1612 rth
->rt_is_input
= 1;
1615 rth
->rt_gateway
= 0;
1616 if (res
.type
== RTN_UNREACHABLE
) {
1617 rth
->dst
.input
= ip_error
;
1618 rth
->dst
.error
= -err
;
1619 rth
->rt_flags
&= ~RTCF_LOCAL
;
1622 rt_cache_route(&FIB_RES_NH(res
), rth
);
1624 skb_dst_set(skb
, &rth
->dst
);
1629 RT_CACHE_STAT_INC(in_no_route
);
1630 res
.type
= RTN_UNREACHABLE
;
1636 * Do not cache martian addresses: they should be logged (RFC1812)
1638 martian_destination
:
1639 RT_CACHE_STAT_INC(in_martian_dst
);
1640 #ifdef CONFIG_IP_ROUTE_VERBOSE
1641 if (IN_DEV_LOG_MARTIANS(in_dev
))
1642 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1643 &daddr
, &saddr
, dev
->name
);
1656 martian_source_keep_err
:
1657 ip_handle_martian_source(dev
, in_dev
, skb
, daddr
, saddr
);
1661 int ip_route_input(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
1662 u8 tos
, struct net_device
*dev
)
1668 /* Multicast recognition logic is moved from route cache to here.
1669 The problem was that too many Ethernet cards have broken/missing
1670 hardware multicast filters :-( As result the host on multicasting
1671 network acquires a lot of useless route cache entries, sort of
1672 SDR messages from all the world. Now we try to get rid of them.
1673 Really, provided software IP multicast filter is organized
1674 reasonably (at least, hashed), it does not result in a slowdown
1675 comparing with route cache reject entries.
1676 Note, that multicast routers are not affected, because
1677 route cache entry is created eventually.
1679 if (ipv4_is_multicast(daddr
)) {
1680 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
1683 int our
= ip_check_mc_rcu(in_dev
, daddr
, saddr
,
1684 ip_hdr(skb
)->protocol
);
1686 #ifdef CONFIG_IP_MROUTE
1688 (!ipv4_is_local_multicast(daddr
) &&
1689 IN_DEV_MFORWARD(in_dev
))
1692 int res
= ip_route_input_mc(skb
, daddr
, saddr
,
1701 res
= ip_route_input_slow(skb
, daddr
, saddr
, tos
, dev
);
1705 EXPORT_SYMBOL(ip_route_input
);
1707 /* called with rcu_read_lock() */
1708 static struct rtable
*__mkroute_output(const struct fib_result
*res
,
1709 const struct flowi4
*fl4
, int orig_oif
,
1710 struct net_device
*dev_out
,
1713 struct fib_info
*fi
= res
->fi
;
1714 struct fib_nh_exception
*fnhe
;
1715 struct in_device
*in_dev
;
1716 u16 type
= res
->type
;
1719 in_dev
= __in_dev_get_rcu(dev_out
);
1721 return ERR_PTR(-EINVAL
);
1723 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev
)))
1724 if (ipv4_is_loopback(fl4
->saddr
) && !(dev_out
->flags
& IFF_LOOPBACK
))
1725 return ERR_PTR(-EINVAL
);
1727 if (ipv4_is_lbcast(fl4
->daddr
))
1728 type
= RTN_BROADCAST
;
1729 else if (ipv4_is_multicast(fl4
->daddr
))
1730 type
= RTN_MULTICAST
;
1731 else if (ipv4_is_zeronet(fl4
->daddr
))
1732 return ERR_PTR(-EINVAL
);
1734 if (dev_out
->flags
& IFF_LOOPBACK
)
1735 flags
|= RTCF_LOCAL
;
1737 if (type
== RTN_BROADCAST
) {
1738 flags
|= RTCF_BROADCAST
| RTCF_LOCAL
;
1740 } else if (type
== RTN_MULTICAST
) {
1741 flags
|= RTCF_MULTICAST
| RTCF_LOCAL
;
1742 if (!ip_check_mc_rcu(in_dev
, fl4
->daddr
, fl4
->saddr
,
1744 flags
&= ~RTCF_LOCAL
;
1745 /* If multicast route do not exist use
1746 * default one, but do not gateway in this case.
1749 if (fi
&& res
->prefixlen
< 4)
1755 fnhe
= find_exception(&FIB_RES_NH(*res
), fl4
->daddr
);
1757 rth
= FIB_RES_NH(*res
).nh_rth_output
;
1758 if (rt_cache_valid(rth
)) {
1759 dst_hold(&rth
->dst
);
1764 rth
= rt_dst_alloc(dev_out
,
1765 IN_DEV_CONF_GET(in_dev
, NOPOLICY
),
1766 IN_DEV_CONF_GET(in_dev
, NOXFRM
),
1769 return ERR_PTR(-ENOBUFS
);
1771 rth
->dst
.output
= ip_output
;
1773 rth
->rt_genid
= rt_genid(dev_net(dev_out
));
1774 rth
->rt_flags
= flags
;
1775 rth
->rt_type
= type
;
1776 rth
->rt_is_input
= 0;
1777 rth
->rt_iif
= orig_oif
? : 0;
1779 rth
->rt_gateway
= 0;
1781 RT_CACHE_STAT_INC(out_slow_tot
);
1783 if (flags
& RTCF_LOCAL
)
1784 rth
->dst
.input
= ip_local_deliver
;
1785 if (flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) {
1786 if (flags
& RTCF_LOCAL
&&
1787 !(dev_out
->flags
& IFF_LOOPBACK
)) {
1788 rth
->dst
.output
= ip_mc_output
;
1789 RT_CACHE_STAT_INC(out_slow_mc
);
1791 #ifdef CONFIG_IP_MROUTE
1792 if (type
== RTN_MULTICAST
) {
1793 if (IN_DEV_MFORWARD(in_dev
) &&
1794 !ipv4_is_local_multicast(fl4
->daddr
)) {
1795 rth
->dst
.input
= ip_mr_input
;
1796 rth
->dst
.output
= ip_mc_output
;
1802 rt_set_nexthop(rth
, fl4
->daddr
, res
, fnhe
, fi
, type
, 0);
1808 * Major route resolver routine.
1811 struct rtable
*__ip_route_output_key(struct net
*net
, struct flowi4
*fl4
)
1813 struct net_device
*dev_out
= NULL
;
1814 __u8 tos
= RT_FL_TOS(fl4
);
1815 unsigned int flags
= 0;
1816 struct fib_result res
;
1824 orig_oif
= fl4
->flowi4_oif
;
1826 fl4
->flowi4_iif
= net
->loopback_dev
->ifindex
;
1827 fl4
->flowi4_tos
= tos
& IPTOS_RT_MASK
;
1828 fl4
->flowi4_scope
= ((tos
& RTO_ONLINK
) ?
1829 RT_SCOPE_LINK
: RT_SCOPE_UNIVERSE
);
1833 rth
= ERR_PTR(-EINVAL
);
1834 if (ipv4_is_multicast(fl4
->saddr
) ||
1835 ipv4_is_lbcast(fl4
->saddr
) ||
1836 ipv4_is_zeronet(fl4
->saddr
))
1839 /* I removed check for oif == dev_out->oif here.
1840 It was wrong for two reasons:
1841 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
1842 is assigned to multiple interfaces.
1843 2. Moreover, we are allowed to send packets with saddr
1844 of another iface. --ANK
1847 if (fl4
->flowi4_oif
== 0 &&
1848 (ipv4_is_multicast(fl4
->daddr
) ||
1849 ipv4_is_lbcast(fl4
->daddr
))) {
1850 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1851 dev_out
= __ip_dev_find(net
, fl4
->saddr
, false);
1852 if (dev_out
== NULL
)
1855 /* Special hack: user can direct multicasts
1856 and limited broadcast via necessary interface
1857 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
1858 This hack is not just for fun, it allows
1859 vic,vat and friends to work.
1860 They bind socket to loopback, set ttl to zero
1861 and expect that it will work.
1862 From the viewpoint of routing cache they are broken,
1863 because we are not allowed to build multicast path
1864 with loopback source addr (look, routing cache
1865 cannot know, that ttl is zero, so that packet
1866 will not leave this host and route is valid).
1867 Luckily, this hack is good workaround.
1870 fl4
->flowi4_oif
= dev_out
->ifindex
;
1874 if (!(fl4
->flowi4_flags
& FLOWI_FLAG_ANYSRC
)) {
1875 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1876 if (!__ip_dev_find(net
, fl4
->saddr
, false))
1882 if (fl4
->flowi4_oif
) {
1883 dev_out
= dev_get_by_index_rcu(net
, fl4
->flowi4_oif
);
1884 rth
= ERR_PTR(-ENODEV
);
1885 if (dev_out
== NULL
)
1888 /* RACE: Check return value of inet_select_addr instead. */
1889 if (!(dev_out
->flags
& IFF_UP
) || !__in_dev_get_rcu(dev_out
)) {
1890 rth
= ERR_PTR(-ENETUNREACH
);
1893 if (ipv4_is_local_multicast(fl4
->daddr
) ||
1894 ipv4_is_lbcast(fl4
->daddr
)) {
1896 fl4
->saddr
= inet_select_addr(dev_out
, 0,
1901 if (ipv4_is_multicast(fl4
->daddr
))
1902 fl4
->saddr
= inet_select_addr(dev_out
, 0,
1904 else if (!fl4
->daddr
)
1905 fl4
->saddr
= inet_select_addr(dev_out
, 0,
1911 fl4
->daddr
= fl4
->saddr
;
1913 fl4
->daddr
= fl4
->saddr
= htonl(INADDR_LOOPBACK
);
1914 dev_out
= net
->loopback_dev
;
1915 fl4
->flowi4_oif
= net
->loopback_dev
->ifindex
;
1916 res
.type
= RTN_LOCAL
;
1917 flags
|= RTCF_LOCAL
;
1921 if (fib_lookup(net
, fl4
, &res
)) {
1924 if (fl4
->flowi4_oif
) {
1925 /* Apparently, routing tables are wrong. Assume,
1926 that the destination is on link.
1929 Because we are allowed to send to iface
1930 even if it has NO routes and NO assigned
1931 addresses. When oif is specified, routing
1932 tables are looked up with only one purpose:
1933 to catch if destination is gatewayed, rather than
1934 direct. Moreover, if MSG_DONTROUTE is set,
1935 we send packet, ignoring both routing tables
1936 and ifaddr state. --ANK
1939 We could make it even if oif is unknown,
1940 likely IPv6, but we do not.
1943 if (fl4
->saddr
== 0)
1944 fl4
->saddr
= inet_select_addr(dev_out
, 0,
1946 res
.type
= RTN_UNICAST
;
1949 rth
= ERR_PTR(-ENETUNREACH
);
1953 if (res
.type
== RTN_LOCAL
) {
1955 if (res
.fi
->fib_prefsrc
)
1956 fl4
->saddr
= res
.fi
->fib_prefsrc
;
1958 fl4
->saddr
= fl4
->daddr
;
1960 dev_out
= net
->loopback_dev
;
1961 fl4
->flowi4_oif
= dev_out
->ifindex
;
1963 flags
|= RTCF_LOCAL
;
1967 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1968 if (res
.fi
->fib_nhs
> 1 && fl4
->flowi4_oif
== 0)
1969 fib_select_multipath(&res
);
1972 if (!res
.prefixlen
&&
1973 res
.table
->tb_num_default
> 1 &&
1974 res
.type
== RTN_UNICAST
&& !fl4
->flowi4_oif
)
1975 fib_select_default(&res
);
1978 fl4
->saddr
= FIB_RES_PREFSRC(net
, res
);
1980 dev_out
= FIB_RES_DEV(res
);
1981 fl4
->flowi4_oif
= dev_out
->ifindex
;
1985 rth
= __mkroute_output(&res
, fl4
, orig_oif
, dev_out
, flags
);
1991 EXPORT_SYMBOL_GPL(__ip_route_output_key
);
1993 static struct dst_entry
*ipv4_blackhole_dst_check(struct dst_entry
*dst
, u32 cookie
)
1998 static unsigned int ipv4_blackhole_mtu(const struct dst_entry
*dst
)
2000 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
2002 return mtu
? : dst
->dev
->mtu
;
2005 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
2006 struct sk_buff
*skb
, u32 mtu
)
2010 static void ipv4_rt_blackhole_redirect(struct dst_entry
*dst
, struct sock
*sk
,
2011 struct sk_buff
*skb
)
2015 static u32
*ipv4_rt_blackhole_cow_metrics(struct dst_entry
*dst
,
2021 static struct dst_ops ipv4_dst_blackhole_ops
= {
2023 .protocol
= cpu_to_be16(ETH_P_IP
),
2024 .check
= ipv4_blackhole_dst_check
,
2025 .mtu
= ipv4_blackhole_mtu
,
2026 .default_advmss
= ipv4_default_advmss
,
2027 .update_pmtu
= ipv4_rt_blackhole_update_pmtu
,
2028 .redirect
= ipv4_rt_blackhole_redirect
,
2029 .cow_metrics
= ipv4_rt_blackhole_cow_metrics
,
2030 .neigh_lookup
= ipv4_neigh_lookup
,
2033 struct dst_entry
*ipv4_blackhole_route(struct net
*net
, struct dst_entry
*dst_orig
)
2035 struct rtable
*ort
= (struct rtable
*) dst_orig
;
2038 rt
= dst_alloc(&ipv4_dst_blackhole_ops
, NULL
, 1, DST_OBSOLETE_NONE
, 0);
2040 struct dst_entry
*new = &rt
->dst
;
2043 new->input
= dst_discard
;
2044 new->output
= dst_discard
;
2046 new->dev
= ort
->dst
.dev
;
2050 rt
->rt_is_input
= ort
->rt_is_input
;
2051 rt
->rt_iif
= ort
->rt_iif
;
2052 rt
->rt_pmtu
= ort
->rt_pmtu
;
2054 rt
->rt_genid
= rt_genid(net
);
2055 rt
->rt_flags
= ort
->rt_flags
;
2056 rt
->rt_type
= ort
->rt_type
;
2057 rt
->rt_gateway
= ort
->rt_gateway
;
2062 dst_release(dst_orig
);
2064 return rt
? &rt
->dst
: ERR_PTR(-ENOMEM
);
2067 struct rtable
*ip_route_output_flow(struct net
*net
, struct flowi4
*flp4
,
2070 struct rtable
*rt
= __ip_route_output_key(net
, flp4
);
2075 if (flp4
->flowi4_proto
)
2076 rt
= (struct rtable
*) xfrm_lookup(net
, &rt
->dst
,
2077 flowi4_to_flowi(flp4
),
2082 EXPORT_SYMBOL_GPL(ip_route_output_flow
);
2084 static int rt_fill_info(struct net
*net
, __be32 dst
, __be32 src
,
2085 struct flowi4
*fl4
, struct sk_buff
*skb
, u32 pid
,
2086 u32 seq
, int event
, int nowait
, unsigned int flags
)
2088 struct rtable
*rt
= skb_rtable(skb
);
2090 struct nlmsghdr
*nlh
;
2091 unsigned long expires
= 0;
2093 u32 metrics
[RTAX_MAX
];
2095 nlh
= nlmsg_put(skb
, pid
, seq
, event
, sizeof(*r
), flags
);
2099 r
= nlmsg_data(nlh
);
2100 r
->rtm_family
= AF_INET
;
2101 r
->rtm_dst_len
= 32;
2103 r
->rtm_tos
= fl4
->flowi4_tos
;
2104 r
->rtm_table
= RT_TABLE_MAIN
;
2105 if (nla_put_u32(skb
, RTA_TABLE
, RT_TABLE_MAIN
))
2106 goto nla_put_failure
;
2107 r
->rtm_type
= rt
->rt_type
;
2108 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2109 r
->rtm_protocol
= RTPROT_UNSPEC
;
2110 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
2111 if (rt
->rt_flags
& RTCF_NOTIFY
)
2112 r
->rtm_flags
|= RTM_F_NOTIFY
;
2114 if (nla_put_be32(skb
, RTA_DST
, dst
))
2115 goto nla_put_failure
;
2117 r
->rtm_src_len
= 32;
2118 if (nla_put_be32(skb
, RTA_SRC
, src
))
2119 goto nla_put_failure
;
2122 nla_put_u32(skb
, RTA_OIF
, rt
->dst
.dev
->ifindex
))
2123 goto nla_put_failure
;
2124 #ifdef CONFIG_IP_ROUTE_CLASSID
2125 if (rt
->dst
.tclassid
&&
2126 nla_put_u32(skb
, RTA_FLOW
, rt
->dst
.tclassid
))
2127 goto nla_put_failure
;
2129 if (!rt_is_input_route(rt
) &&
2130 fl4
->saddr
!= src
) {
2131 if (nla_put_be32(skb
, RTA_PREFSRC
, fl4
->saddr
))
2132 goto nla_put_failure
;
2134 if (rt
->rt_gateway
&&
2135 nla_put_be32(skb
, RTA_GATEWAY
, rt
->rt_gateway
))
2136 goto nla_put_failure
;
2138 memcpy(metrics
, dst_metrics_ptr(&rt
->dst
), sizeof(metrics
));
2140 metrics
[RTAX_MTU
- 1] = rt
->rt_pmtu
;
2141 if (rtnetlink_put_metrics(skb
, metrics
) < 0)
2142 goto nla_put_failure
;
2144 if (fl4
->flowi4_mark
&&
2145 nla_put_be32(skb
, RTA_MARK
, fl4
->flowi4_mark
))
2146 goto nla_put_failure
;
2148 error
= rt
->dst
.error
;
2149 expires
= rt
->dst
.expires
;
2151 if (time_before(jiffies
, expires
))
2157 if (rt_is_input_route(rt
)) {
2158 if (nla_put_u32(skb
, RTA_IIF
, rt
->rt_iif
))
2159 goto nla_put_failure
;
2162 if (rtnl_put_cacheinfo(skb
, &rt
->dst
, 0, expires
, error
) < 0)
2163 goto nla_put_failure
;
2165 return nlmsg_end(skb
, nlh
);
2168 nlmsg_cancel(skb
, nlh
);
2172 static int inet_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
, void *arg
)
2174 struct net
*net
= sock_net(in_skb
->sk
);
2176 struct nlattr
*tb
[RTA_MAX
+1];
2177 struct rtable
*rt
= NULL
;
2184 struct sk_buff
*skb
;
2186 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv4_policy
);
2190 rtm
= nlmsg_data(nlh
);
2192 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2198 /* Reserve room for dummy headers, this skb can pass
2199 through good chunk of routing engine.
2201 skb_reset_mac_header(skb
);
2202 skb_reset_network_header(skb
);
2204 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2205 ip_hdr(skb
)->protocol
= IPPROTO_ICMP
;
2206 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct iphdr
));
2208 src
= tb
[RTA_SRC
] ? nla_get_be32(tb
[RTA_SRC
]) : 0;
2209 dst
= tb
[RTA_DST
] ? nla_get_be32(tb
[RTA_DST
]) : 0;
2210 iif
= tb
[RTA_IIF
] ? nla_get_u32(tb
[RTA_IIF
]) : 0;
2211 mark
= tb
[RTA_MARK
] ? nla_get_u32(tb
[RTA_MARK
]) : 0;
2213 memset(&fl4
, 0, sizeof(fl4
));
2216 fl4
.flowi4_tos
= rtm
->rtm_tos
;
2217 fl4
.flowi4_oif
= tb
[RTA_OIF
] ? nla_get_u32(tb
[RTA_OIF
]) : 0;
2218 fl4
.flowi4_mark
= mark
;
2221 struct net_device
*dev
;
2223 dev
= __dev_get_by_index(net
, iif
);
2229 skb
->protocol
= htons(ETH_P_IP
);
2233 err
= ip_route_input(skb
, dst
, src
, rtm
->rtm_tos
, dev
);
2236 rt
= skb_rtable(skb
);
2237 if (err
== 0 && rt
->dst
.error
)
2238 err
= -rt
->dst
.error
;
2240 rt
= ip_route_output_key(net
, &fl4
);
2250 skb_dst_set(skb
, &rt
->dst
);
2251 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
2252 rt
->rt_flags
|= RTCF_NOTIFY
;
2254 err
= rt_fill_info(net
, dst
, src
, &fl4
, skb
,
2255 NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
2256 RTM_NEWROUTE
, 0, 0);
2260 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).pid
);
2269 int ip_rt_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2274 void ip_rt_multicast_event(struct in_device
*in_dev
)
2276 rt_cache_flush(dev_net(in_dev
->dev
), 0);
2279 #ifdef CONFIG_SYSCTL
2280 static int ipv4_sysctl_rtcache_flush(ctl_table
*__ctl
, int write
,
2281 void __user
*buffer
,
2282 size_t *lenp
, loff_t
*ppos
)
2289 memcpy(&ctl
, __ctl
, sizeof(ctl
));
2290 ctl
.data
= &flush_delay
;
2291 proc_dointvec(&ctl
, write
, buffer
, lenp
, ppos
);
2293 net
= (struct net
*)__ctl
->extra1
;
2294 rt_cache_flush(net
, flush_delay
);
2301 static ctl_table ipv4_route_table
[] = {
2303 .procname
= "gc_thresh",
2304 .data
= &ipv4_dst_ops
.gc_thresh
,
2305 .maxlen
= sizeof(int),
2307 .proc_handler
= proc_dointvec
,
2310 .procname
= "max_size",
2311 .data
= &ip_rt_max_size
,
2312 .maxlen
= sizeof(int),
2314 .proc_handler
= proc_dointvec
,
2317 /* Deprecated. Use gc_min_interval_ms */
2319 .procname
= "gc_min_interval",
2320 .data
= &ip_rt_gc_min_interval
,
2321 .maxlen
= sizeof(int),
2323 .proc_handler
= proc_dointvec_jiffies
,
2326 .procname
= "gc_min_interval_ms",
2327 .data
= &ip_rt_gc_min_interval
,
2328 .maxlen
= sizeof(int),
2330 .proc_handler
= proc_dointvec_ms_jiffies
,
2333 .procname
= "gc_timeout",
2334 .data
= &ip_rt_gc_timeout
,
2335 .maxlen
= sizeof(int),
2337 .proc_handler
= proc_dointvec_jiffies
,
2340 .procname
= "gc_interval",
2341 .data
= &ip_rt_gc_interval
,
2342 .maxlen
= sizeof(int),
2344 .proc_handler
= proc_dointvec_jiffies
,
2347 .procname
= "redirect_load",
2348 .data
= &ip_rt_redirect_load
,
2349 .maxlen
= sizeof(int),
2351 .proc_handler
= proc_dointvec
,
2354 .procname
= "redirect_number",
2355 .data
= &ip_rt_redirect_number
,
2356 .maxlen
= sizeof(int),
2358 .proc_handler
= proc_dointvec
,
2361 .procname
= "redirect_silence",
2362 .data
= &ip_rt_redirect_silence
,
2363 .maxlen
= sizeof(int),
2365 .proc_handler
= proc_dointvec
,
2368 .procname
= "error_cost",
2369 .data
= &ip_rt_error_cost
,
2370 .maxlen
= sizeof(int),
2372 .proc_handler
= proc_dointvec
,
2375 .procname
= "error_burst",
2376 .data
= &ip_rt_error_burst
,
2377 .maxlen
= sizeof(int),
2379 .proc_handler
= proc_dointvec
,
2382 .procname
= "gc_elasticity",
2383 .data
= &ip_rt_gc_elasticity
,
2384 .maxlen
= sizeof(int),
2386 .proc_handler
= proc_dointvec
,
2389 .procname
= "mtu_expires",
2390 .data
= &ip_rt_mtu_expires
,
2391 .maxlen
= sizeof(int),
2393 .proc_handler
= proc_dointvec_jiffies
,
2396 .procname
= "min_pmtu",
2397 .data
= &ip_rt_min_pmtu
,
2398 .maxlen
= sizeof(int),
2400 .proc_handler
= proc_dointvec
,
2403 .procname
= "min_adv_mss",
2404 .data
= &ip_rt_min_advmss
,
2405 .maxlen
= sizeof(int),
2407 .proc_handler
= proc_dointvec
,
2412 static struct ctl_table ipv4_route_flush_table
[] = {
2414 .procname
= "flush",
2415 .maxlen
= sizeof(int),
2417 .proc_handler
= ipv4_sysctl_rtcache_flush
,
2422 static __net_init
int sysctl_route_net_init(struct net
*net
)
2424 struct ctl_table
*tbl
;
2426 tbl
= ipv4_route_flush_table
;
2427 if (!net_eq(net
, &init_net
)) {
2428 tbl
= kmemdup(tbl
, sizeof(ipv4_route_flush_table
), GFP_KERNEL
);
2432 tbl
[0].extra1
= net
;
2434 net
->ipv4
.route_hdr
= register_net_sysctl(net
, "net/ipv4/route", tbl
);
2435 if (net
->ipv4
.route_hdr
== NULL
)
2440 if (tbl
!= ipv4_route_flush_table
)
2446 static __net_exit
void sysctl_route_net_exit(struct net
*net
)
2448 struct ctl_table
*tbl
;
2450 tbl
= net
->ipv4
.route_hdr
->ctl_table_arg
;
2451 unregister_net_sysctl_table(net
->ipv4
.route_hdr
);
2452 BUG_ON(tbl
== ipv4_route_flush_table
);
2456 static __net_initdata
struct pernet_operations sysctl_route_ops
= {
2457 .init
= sysctl_route_net_init
,
2458 .exit
= sysctl_route_net_exit
,
2462 static __net_init
int rt_genid_init(struct net
*net
)
2464 get_random_bytes(&net
->ipv4
.rt_genid
,
2465 sizeof(net
->ipv4
.rt_genid
));
2466 get_random_bytes(&net
->ipv4
.dev_addr_genid
,
2467 sizeof(net
->ipv4
.dev_addr_genid
));
2471 static __net_initdata
struct pernet_operations rt_genid_ops
= {
2472 .init
= rt_genid_init
,
2475 static int __net_init
ipv4_inetpeer_init(struct net
*net
)
2477 struct inet_peer_base
*bp
= kmalloc(sizeof(*bp
), GFP_KERNEL
);
2481 inet_peer_base_init(bp
);
2482 net
->ipv4
.peers
= bp
;
2486 static void __net_exit
ipv4_inetpeer_exit(struct net
*net
)
2488 struct inet_peer_base
*bp
= net
->ipv4
.peers
;
2490 net
->ipv4
.peers
= NULL
;
2491 inetpeer_invalidate_tree(bp
);
2495 static __net_initdata
struct pernet_operations ipv4_inetpeer_ops
= {
2496 .init
= ipv4_inetpeer_init
,
2497 .exit
= ipv4_inetpeer_exit
,
2500 #ifdef CONFIG_IP_ROUTE_CLASSID
2501 struct ip_rt_acct __percpu
*ip_rt_acct __read_mostly
;
2502 #endif /* CONFIG_IP_ROUTE_CLASSID */
2504 int __init
ip_rt_init(void)
2508 #ifdef CONFIG_IP_ROUTE_CLASSID
2509 ip_rt_acct
= __alloc_percpu(256 * sizeof(struct ip_rt_acct
), __alignof__(struct ip_rt_acct
));
2511 panic("IP: failed to allocate ip_rt_acct\n");
2514 ipv4_dst_ops
.kmem_cachep
=
2515 kmem_cache_create("ip_dst_cache", sizeof(struct rtable
), 0,
2516 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
2518 ipv4_dst_blackhole_ops
.kmem_cachep
= ipv4_dst_ops
.kmem_cachep
;
2520 if (dst_entries_init(&ipv4_dst_ops
) < 0)
2521 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2523 if (dst_entries_init(&ipv4_dst_blackhole_ops
) < 0)
2524 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2526 ipv4_dst_ops
.gc_thresh
= ~0;
2527 ip_rt_max_size
= INT_MAX
;
2532 if (ip_rt_proc_init())
2533 pr_err("Unable to create route proc files\n");
2536 xfrm4_init(ip_rt_max_size
);
2538 rtnl_register(PF_INET
, RTM_GETROUTE
, inet_rtm_getroute
, NULL
, NULL
);
2540 #ifdef CONFIG_SYSCTL
2541 register_pernet_subsys(&sysctl_route_ops
);
2543 register_pernet_subsys(&rt_genid_ops
);
2544 register_pernet_subsys(&ipv4_inetpeer_ops
);
2548 #ifdef CONFIG_SYSCTL
2550 * We really need to sanitize the damn ipv4 init order, then all
2551 * this nonsense will go away.
2553 void __init
ip_static_sysctl_init(void)
2555 register_net_sysctl(&init_net
, "net/ipv4/route", ipv4_route_table
);