]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ipv4/route.c
ipv4: Remove redundant RCU locking in ip_check_mc().
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
1da177e4
LT
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
1da177e4 71#include <linux/mm.h>
424c4b70 72#include <linux/bootmem.h>
1da177e4
LT
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
39c90ece 82#include <linux/workqueue.h>
1da177e4 83#include <linux/skbuff.h>
1da177e4
LT
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
5a0e3ad6 93#include <linux/slab.h>
352e512c 94#include <net/dst.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
8d71740c 106#include <net/netevent.h>
63f3444f 107#include <net/rtnetlink.h>
1da177e4
LT
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
111
112#define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114
115#define IP_MAX_MTU 0xFFF0
116
117#define RT_GC_TIMEOUT (300*HZ)
118
1da177e4 119static int ip_rt_max_size;
817bc4db
SH
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
1080d709 132static int rt_chain_length_max __read_mostly = 20;
1da177e4 133
1da177e4
LT
134/*
135 * Interface to generic destination cache.
136 */
137
138static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 139static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
d33e4553 140static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
1da177e4 141static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
142static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143static void ipv4_link_failure(struct sk_buff *skb);
144static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 145static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 146
72cdd1d9
ED
147static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
148 int how)
149{
150}
1da177e4 151
62fa8a84
DM
152static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153{
06582540
DM
154 struct rtable *rt = (struct rtable *) dst;
155 struct inet_peer *peer;
156 u32 *p = NULL;
157
158 if (!rt->peer)
159 rt_bind_peer(rt, 1);
62fa8a84 160
06582540
DM
161 peer = rt->peer;
162 if (peer) {
62fa8a84
DM
163 u32 *old_p = __DST_METRICS_PTR(old);
164 unsigned long prev, new;
165
06582540
DM
166 p = peer->metrics;
167 if (inet_metrics_new(peer))
168 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
62fa8a84
DM
169
170 new = (unsigned long) p;
171 prev = cmpxchg(&dst->_metrics, old, new);
172
173 if (prev != old) {
62fa8a84
DM
174 p = __DST_METRICS_PTR(prev);
175 if (prev & DST_METRICS_READ_ONLY)
176 p = NULL;
177 } else {
62fa8a84
DM
178 if (rt->fi) {
179 fib_info_put(rt->fi);
180 rt->fi = NULL;
181 }
182 }
183 }
184 return p;
185}
186
1da177e4
LT
187static struct dst_ops ipv4_dst_ops = {
188 .family = AF_INET,
09640e63 189 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
190 .gc = rt_garbage_collect,
191 .check = ipv4_dst_check,
0dbaee3b 192 .default_advmss = ipv4_default_advmss,
d33e4553 193 .default_mtu = ipv4_default_mtu,
62fa8a84 194 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
195 .destroy = ipv4_dst_destroy,
196 .ifdown = ipv4_dst_ifdown,
197 .negative_advice = ipv4_negative_advice,
198 .link_failure = ipv4_link_failure,
199 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 200 .local_out = __ip_local_out,
1da177e4
LT
201};
202
203#define ECN_OR_COST(class) TC_PRIO_##class
204
4839c52b 205const __u8 ip_tos2prio[16] = {
1da177e4
LT
206 TC_PRIO_BESTEFFORT,
207 ECN_OR_COST(FILLER),
208 TC_PRIO_BESTEFFORT,
209 ECN_OR_COST(BESTEFFORT),
210 TC_PRIO_BULK,
211 ECN_OR_COST(BULK),
212 TC_PRIO_BULK,
213 ECN_OR_COST(BULK),
214 TC_PRIO_INTERACTIVE,
215 ECN_OR_COST(INTERACTIVE),
216 TC_PRIO_INTERACTIVE,
217 ECN_OR_COST(INTERACTIVE),
218 TC_PRIO_INTERACTIVE_BULK,
219 ECN_OR_COST(INTERACTIVE_BULK),
220 TC_PRIO_INTERACTIVE_BULK,
221 ECN_OR_COST(INTERACTIVE_BULK)
222};
223
224
225/*
226 * Route cache.
227 */
228
229/* The locking scheme is rather straight forward:
230 *
231 * 1) Read-Copy Update protects the buckets of the central route hash.
232 * 2) Only writers remove entries, and they hold the lock
233 * as they look at rtable reference counts.
234 * 3) Only readers acquire references to rtable entries,
235 * they do so with atomic increments and with the
236 * lock held.
237 */
238
239struct rt_hash_bucket {
1c31720a 240 struct rtable __rcu *chain;
22c047cc 241};
1080d709 242
8a25d5de
IM
243#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
244 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
245/*
246 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
247 * The size of this table is a power of two and depends on the number of CPUS.
62051200 248 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 249 */
62051200
IM
250#ifdef CONFIG_LOCKDEP
251# define RT_HASH_LOCK_SZ 256
22c047cc 252#else
62051200
IM
253# if NR_CPUS >= 32
254# define RT_HASH_LOCK_SZ 4096
255# elif NR_CPUS >= 16
256# define RT_HASH_LOCK_SZ 2048
257# elif NR_CPUS >= 8
258# define RT_HASH_LOCK_SZ 1024
259# elif NR_CPUS >= 4
260# define RT_HASH_LOCK_SZ 512
261# else
262# define RT_HASH_LOCK_SZ 256
263# endif
22c047cc
ED
264#endif
265
266static spinlock_t *rt_hash_locks;
267# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
268
269static __init void rt_hash_lock_init(void)
270{
271 int i;
272
273 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
274 GFP_KERNEL);
275 if (!rt_hash_locks)
276 panic("IP: failed to allocate rt_hash_locks\n");
277
278 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
279 spin_lock_init(&rt_hash_locks[i]);
280}
22c047cc
ED
281#else
282# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
283
284static inline void rt_hash_lock_init(void)
285{
286}
22c047cc 287#endif
1da177e4 288
817bc4db
SH
289static struct rt_hash_bucket *rt_hash_table __read_mostly;
290static unsigned rt_hash_mask __read_mostly;
291static unsigned int rt_hash_log __read_mostly;
1da177e4 292
2f970d83 293static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 294#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 295
b00180de 296static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 297 int genid)
1da177e4 298{
0eae88f3 299 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 300 idx, genid)
29e75252 301 & rt_hash_mask;
1da177e4
LT
302}
303
e84f84f2
DL
304static inline int rt_genid(struct net *net)
305{
306 return atomic_read(&net->ipv4.rt_genid);
307}
308
1da177e4
LT
309#ifdef CONFIG_PROC_FS
310struct rt_cache_iter_state {
a75e936f 311 struct seq_net_private p;
1da177e4 312 int bucket;
29e75252 313 int genid;
1da177e4
LT
314};
315
1218854a 316static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 317{
1218854a 318 struct rt_cache_iter_state *st = seq->private;
1da177e4 319 struct rtable *r = NULL;
1da177e4
LT
320
321 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
1c31720a 322 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
a6272665 323 continue;
1da177e4 324 rcu_read_lock_bh();
a898def2 325 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 326 while (r) {
d8d1f30b 327 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 328 r->rt_genid == st->genid)
29e75252 329 return r;
d8d1f30b 330 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 331 }
1da177e4
LT
332 rcu_read_unlock_bh();
333 }
29e75252 334 return r;
1da177e4
LT
335}
336
1218854a 337static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 338 struct rtable *r)
1da177e4 339{
1218854a 340 struct rt_cache_iter_state *st = seq->private;
a6272665 341
1c31720a 342 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
343 while (!r) {
344 rcu_read_unlock_bh();
a6272665
ED
345 do {
346 if (--st->bucket < 0)
347 return NULL;
1c31720a 348 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
1da177e4 349 rcu_read_lock_bh();
1c31720a 350 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 351 }
1c31720a 352 return r;
1da177e4
LT
353}
354
1218854a 355static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
356 struct rtable *r)
357{
1218854a
YH
358 struct rt_cache_iter_state *st = seq->private;
359 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 360 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 361 continue;
642d6318
DL
362 if (r->rt_genid == st->genid)
363 break;
364 }
365 return r;
366}
367
1218854a 368static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 369{
1218854a 370 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
371
372 if (r)
1218854a 373 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
374 --pos;
375 return pos ? NULL : r;
376}
377
378static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
379{
29e75252 380 struct rt_cache_iter_state *st = seq->private;
29e75252 381 if (*pos)
1218854a 382 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 383 st->genid = rt_genid(seq_file_net(seq));
29e75252 384 return SEQ_START_TOKEN;
1da177e4
LT
385}
386
387static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
388{
29e75252 389 struct rtable *r;
1da177e4
LT
390
391 if (v == SEQ_START_TOKEN)
1218854a 392 r = rt_cache_get_first(seq);
1da177e4 393 else
1218854a 394 r = rt_cache_get_next(seq, v);
1da177e4
LT
395 ++*pos;
396 return r;
397}
398
399static void rt_cache_seq_stop(struct seq_file *seq, void *v)
400{
401 if (v && v != SEQ_START_TOKEN)
402 rcu_read_unlock_bh();
403}
404
405static int rt_cache_seq_show(struct seq_file *seq, void *v)
406{
407 if (v == SEQ_START_TOKEN)
408 seq_printf(seq, "%-127s\n",
409 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
410 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
411 "HHUptod\tSpecDst");
412 else {
413 struct rtable *r = v;
5e659e4c 414 int len;
1da177e4 415
0eae88f3
ED
416 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 418 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
419 (__force u32)r->rt_dst,
420 (__force u32)r->rt_gateway,
d8d1f30b
CG
421 r->rt_flags, atomic_read(&r->dst.__refcnt),
422 r->dst.__use, 0, (__force u32)r->rt_src,
0dbaee3b 423 dst_metric_advmss(&r->dst) + 40,
d8d1f30b
CG
424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)),
5e2b61f7 427 r->rt_tos,
d8d1f30b
CG
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output ==
1da177e4 430 dev_queue_xmit) : 0,
5e659e4c
PE
431 r->rt_spec_dst, &len);
432
433 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
434 }
435 return 0;
1da177e4
LT
436}
437
f690808e 438static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
439 .start = rt_cache_seq_start,
440 .next = rt_cache_seq_next,
441 .stop = rt_cache_seq_stop,
442 .show = rt_cache_seq_show,
443};
444
445static int rt_cache_seq_open(struct inode *inode, struct file *file)
446{
a75e936f 447 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 448 sizeof(struct rt_cache_iter_state));
1da177e4
LT
449}
450
9a32144e 451static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
452 .owner = THIS_MODULE,
453 .open = rt_cache_seq_open,
454 .read = seq_read,
455 .llseek = seq_lseek,
a75e936f 456 .release = seq_release_net,
1da177e4
LT
457};
458
459
460static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
461{
462 int cpu;
463
464 if (*pos == 0)
465 return SEQ_START_TOKEN;
466
0f23174a 467 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
468 if (!cpu_possible(cpu))
469 continue;
470 *pos = cpu+1;
2f970d83 471 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
472 }
473 return NULL;
474}
475
476static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
477{
478 int cpu;
479
0f23174a 480 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
481 if (!cpu_possible(cpu))
482 continue;
483 *pos = cpu+1;
2f970d83 484 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
485 }
486 return NULL;
e905a9ed 487
1da177e4
LT
488}
489
490static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
491{
492
493}
494
495static int rt_cpu_seq_show(struct seq_file *seq, void *v)
496{
497 struct rt_cache_stat *st = v;
498
499 if (v == SEQ_START_TOKEN) {
5bec0039 500 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
501 return 0;
502 }
e905a9ed 503
1da177e4
LT
504 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
505 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 506 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
507 st->in_hit,
508 st->in_slow_tot,
509 st->in_slow_mc,
510 st->in_no_route,
511 st->in_brd,
512 st->in_martian_dst,
513 st->in_martian_src,
514
515 st->out_hit,
516 st->out_slow_tot,
e905a9ed 517 st->out_slow_mc,
1da177e4
LT
518
519 st->gc_total,
520 st->gc_ignored,
521 st->gc_goal_miss,
522 st->gc_dst_overflow,
523 st->in_hlist_search,
524 st->out_hlist_search
525 );
526 return 0;
527}
528
f690808e 529static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
530 .start = rt_cpu_seq_start,
531 .next = rt_cpu_seq_next,
532 .stop = rt_cpu_seq_stop,
533 .show = rt_cpu_seq_show,
534};
535
536
537static int rt_cpu_seq_open(struct inode *inode, struct file *file)
538{
539 return seq_open(file, &rt_cpu_seq_ops);
540}
541
9a32144e 542static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
543 .owner = THIS_MODULE,
544 .open = rt_cpu_seq_open,
545 .read = seq_read,
546 .llseek = seq_lseek,
547 .release = seq_release,
548};
549
c7066f70 550#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 551static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 552{
a661c419
AD
553 struct ip_rt_acct *dst, *src;
554 unsigned int i, j;
555
556 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
557 if (!dst)
558 return -ENOMEM;
559
560 for_each_possible_cpu(i) {
561 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
562 for (j = 0; j < 256; j++) {
563 dst[j].o_bytes += src[j].o_bytes;
564 dst[j].o_packets += src[j].o_packets;
565 dst[j].i_bytes += src[j].i_bytes;
566 dst[j].i_packets += src[j].i_packets;
567 }
78c686e9
PE
568 }
569
a661c419
AD
570 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
571 kfree(dst);
572 return 0;
573}
78c686e9 574
a661c419
AD
575static int rt_acct_proc_open(struct inode *inode, struct file *file)
576{
577 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 578}
a661c419
AD
579
580static const struct file_operations rt_acct_proc_fops = {
581 .owner = THIS_MODULE,
582 .open = rt_acct_proc_open,
583 .read = seq_read,
584 .llseek = seq_lseek,
585 .release = single_release,
586};
78c686e9 587#endif
107f1634 588
73b38711 589static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
590{
591 struct proc_dir_entry *pde;
592
593 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
594 &rt_cache_seq_fops);
595 if (!pde)
596 goto err1;
597
77020720
WC
598 pde = proc_create("rt_cache", S_IRUGO,
599 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
600 if (!pde)
601 goto err2;
602
c7066f70 603#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 604 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
605 if (!pde)
606 goto err3;
607#endif
608 return 0;
609
c7066f70 610#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
611err3:
612 remove_proc_entry("rt_cache", net->proc_net_stat);
613#endif
614err2:
615 remove_proc_entry("rt_cache", net->proc_net);
616err1:
617 return -ENOMEM;
618}
73b38711
DL
619
620static void __net_exit ip_rt_do_proc_exit(struct net *net)
621{
622 remove_proc_entry("rt_cache", net->proc_net_stat);
623 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 624#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 625 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 626#endif
73b38711
DL
627}
628
629static struct pernet_operations ip_rt_proc_ops __net_initdata = {
630 .init = ip_rt_do_proc_init,
631 .exit = ip_rt_do_proc_exit,
632};
633
634static int __init ip_rt_proc_init(void)
635{
636 return register_pernet_subsys(&ip_rt_proc_ops);
637}
638
107f1634 639#else
73b38711 640static inline int ip_rt_proc_init(void)
107f1634
PE
641{
642 return 0;
643}
1da177e4 644#endif /* CONFIG_PROC_FS */
e905a9ed 645
5969f71d 646static inline void rt_free(struct rtable *rt)
1da177e4 647{
d8d1f30b 648 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
649}
650
5969f71d 651static inline void rt_drop(struct rtable *rt)
1da177e4 652{
1da177e4 653 ip_rt_put(rt);
d8d1f30b 654 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
655}
656
5969f71d 657static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
658{
659 /* Kill broadcast/multicast entries very aggresively, if they
660 collide in hash table with more useful entries */
661 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 662 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
663}
664
5969f71d 665static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
666{
667 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
2c8cec5c 668 (rth->peer && rth->peer->pmtu_expires);
1da177e4
LT
669}
670
671static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
672{
673 unsigned long age;
674 int ret = 0;
675
d8d1f30b 676 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
677 goto out;
678
d8d1f30b 679 age = jiffies - rth->dst.lastuse;
1da177e4
LT
680 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
681 (age <= tmo2 && rt_valuable(rth)))
682 goto out;
683 ret = 1;
684out: return ret;
685}
686
687/* Bits of score are:
688 * 31: very valuable
689 * 30: not quite useless
690 * 29..0: usage counter
691 */
692static inline u32 rt_score(struct rtable *rt)
693{
d8d1f30b 694 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
695
696 score = ~score & ~(3<<30);
697
698 if (rt_valuable(rt))
699 score |= (1<<31);
700
c7537967 701 if (rt_is_output_route(rt) ||
1da177e4
LT
702 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
703 score |= (1<<30);
704
705 return score;
706}
707
1080d709
NH
708static inline bool rt_caching(const struct net *net)
709{
710 return net->ipv4.current_rt_cache_rebuild_count <=
711 net->ipv4.sysctl_rt_cache_rebuild_count;
712}
713
5e2b61f7
DM
714static inline bool compare_hash_inputs(const struct rtable *rt1,
715 const struct rtable *rt2)
1080d709 716{
5e2b61f7
DM
717 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
718 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
719 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
1080d709
NH
720}
721
5e2b61f7 722static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
1da177e4 723{
5e2b61f7
DM
724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_tos ^ rt2->rt_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
1da177e4
LT
730}
731
b5921910
DL
732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
733{
d8d1f30b 734 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
735}
736
e84f84f2
DL
737static inline int rt_is_expired(struct rtable *rth)
738{
d8d1f30b 739 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
740}
741
beb659bd
ED
742/*
743 * Perform a full scan of hash table and free all entries.
744 * Can be called by a softirq or a process.
745 * In the later case, we want to be reschedule if necessary
746 */
6561a3b1 747static void rt_do_flush(struct net *net, int process_context)
beb659bd
ED
748{
749 unsigned int i;
750 struct rtable *rth, *next;
751
752 for (i = 0; i <= rt_hash_mask; i++) {
6561a3b1
DM
753 struct rtable __rcu **pprev;
754 struct rtable *list;
755
beb659bd
ED
756 if (process_context && need_resched())
757 cond_resched();
1c31720a 758 rth = rcu_dereference_raw(rt_hash_table[i].chain);
beb659bd
ED
759 if (!rth)
760 continue;
761
762 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e 763
6561a3b1
DM
764 list = NULL;
765 pprev = &rt_hash_table[i].chain;
766 rth = rcu_dereference_protected(*pprev,
1c31720a 767 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 768
6561a3b1
DM
769 while (rth) {
770 next = rcu_dereference_protected(rth->dst.rt_next,
1c31720a 771 lockdep_is_held(rt_hash_lock_addr(i)));
6561a3b1
DM
772
773 if (!net ||
774 net_eq(dev_net(rth->dst.dev), net)) {
775 rcu_assign_pointer(*pprev, next);
776 rcu_assign_pointer(rth->dst.rt_next, list);
777 list = rth;
32cb5b4e 778 } else {
6561a3b1 779 pprev = &rth->dst.rt_next;
32cb5b4e 780 }
6561a3b1 781 rth = next;
32cb5b4e 782 }
6561a3b1 783
beb659bd
ED
784 spin_unlock_bh(rt_hash_lock_addr(i));
785
6561a3b1
DM
786 for (; list; list = next) {
787 next = rcu_dereference_protected(list->dst.rt_next, 1);
788 rt_free(list);
beb659bd
ED
789 }
790 }
791}
792
1080d709
NH
793/*
794 * While freeing expired entries, we compute average chain length
795 * and standard deviation, using fixed-point arithmetic.
796 * This to have an estimation of rt_chain_length_max
797 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
798 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
799 */
800
801#define FRACT_BITS 3
802#define ONE (1UL << FRACT_BITS)
803
98376387
ED
804/*
805 * Given a hash chain and an item in this hash chain,
806 * find if a previous entry has the same hash_inputs
807 * (but differs on tos, mark or oif)
808 * Returns 0 if an alias is found.
809 * Returns ONE if rth has no alias before itself.
810 */
811static int has_noalias(const struct rtable *head, const struct rtable *rth)
812{
813 const struct rtable *aux = head;
814
815 while (aux != rth) {
5e2b61f7 816 if (compare_hash_inputs(aux, rth))
98376387 817 return 0;
1c31720a 818 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
819 }
820 return ONE;
821}
822
29e75252
ED
823/*
824 * Pertubation of rt_genid by a small quantity [1..256]
825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
826 * many times (2^24) without giving recent rt_genid.
827 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 828 */
86c657f6 829static void rt_cache_invalidate(struct net *net)
1da177e4 830{
29e75252 831 unsigned char shuffle;
1da177e4 832
29e75252 833 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 834 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
835}
836
29e75252
ED
837/*
838 * delay < 0 : invalidate cache (fast : entries will be deleted later)
839 * delay >= 0 : invalidate & flush cache (can be long)
840 */
76e6ebfb 841void rt_cache_flush(struct net *net, int delay)
1da177e4 842{
86c657f6 843 rt_cache_invalidate(net);
29e75252 844 if (delay >= 0)
6561a3b1 845 rt_do_flush(net, !in_softirq());
1da177e4
LT
846}
847
a5ee1551 848/* Flush previous cache invalidated entries from the cache */
6561a3b1 849void rt_cache_flush_batch(struct net *net)
a5ee1551 850{
6561a3b1 851 rt_do_flush(net, !in_softirq());
a5ee1551
EB
852}
853
1080d709
NH
854static void rt_emergency_hash_rebuild(struct net *net)
855{
3ee94372 856 if (net_ratelimit())
1080d709 857 printk(KERN_WARNING "Route hash chain too long!\n");
3ee94372 858 rt_cache_invalidate(net);
1080d709
NH
859}
860
1da177e4
LT
861/*
862 Short description of GC goals.
863
864 We want to build algorithm, which will keep routing cache
865 at some equilibrium point, when number of aged off entries
866 is kept approximately equal to newly generated ones.
867
868 Current expiration strength is variable "expire".
869 We try to adjust it dynamically, so that if networking
870 is idle expires is large enough to keep enough of warm entries,
871 and when load increases it reduces to limit cache size.
872 */
873
569d3645 874static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
875{
876 static unsigned long expire = RT_GC_TIMEOUT;
877 static unsigned long last_gc;
878 static int rover;
879 static int equilibrium;
1c31720a
ED
880 struct rtable *rth;
881 struct rtable __rcu **rthp;
1da177e4
LT
882 unsigned long now = jiffies;
883 int goal;
fc66f95c 884 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
885
886 /*
887 * Garbage collection is pretty expensive,
888 * do not make it too frequently.
889 */
890
891 RT_CACHE_STAT_INC(gc_total);
892
893 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 894 entries < ip_rt_max_size) {
1da177e4
LT
895 RT_CACHE_STAT_INC(gc_ignored);
896 goto out;
897 }
898
fc66f95c 899 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 900 /* Calculate number of entries, which we want to expire now. */
fc66f95c 901 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
902 if (goal <= 0) {
903 if (equilibrium < ipv4_dst_ops.gc_thresh)
904 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 905 goal = entries - equilibrium;
1da177e4 906 if (goal > 0) {
b790cedd 907 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 908 goal = entries - equilibrium;
1da177e4
LT
909 }
910 } else {
911 /* We are in dangerous area. Try to reduce cache really
912 * aggressively.
913 */
b790cedd 914 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 915 equilibrium = entries - goal;
1da177e4
LT
916 }
917
918 if (now - last_gc >= ip_rt_gc_min_interval)
919 last_gc = now;
920
921 if (goal <= 0) {
922 equilibrium += goal;
923 goto work_done;
924 }
925
926 do {
927 int i, k;
928
929 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
930 unsigned long tmo = expire;
931
932 k = (k + 1) & rt_hash_mask;
933 rthp = &rt_hash_table[k].chain;
22c047cc 934 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
935 while ((rth = rcu_dereference_protected(*rthp,
936 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 937 if (!rt_is_expired(rth) &&
29e75252 938 !rt_may_expire(rth, tmo, expire)) {
1da177e4 939 tmo >>= 1;
d8d1f30b 940 rthp = &rth->dst.rt_next;
1da177e4
LT
941 continue;
942 }
d8d1f30b 943 *rthp = rth->dst.rt_next;
1da177e4
LT
944 rt_free(rth);
945 goal--;
1da177e4 946 }
22c047cc 947 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
948 if (goal <= 0)
949 break;
950 }
951 rover = k;
952
953 if (goal <= 0)
954 goto work_done;
955
956 /* Goal is not achieved. We stop process if:
957
958 - if expire reduced to zero. Otherwise, expire is halfed.
959 - if table is not full.
960 - if we are called from interrupt.
961 - jiffies check is just fallback/debug loop breaker.
962 We will not spin here for long time in any case.
963 */
964
965 RT_CACHE_STAT_INC(gc_goal_miss);
966
967 if (expire == 0)
968 break;
969
970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
fc66f95c 973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1da177e4
LT
974#endif
975
fc66f95c 976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
977 goto out;
978 } while (!in_softirq() && time_before_eq(jiffies, now));
979
fc66f95c
ED
980 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
981 goto out;
982 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
983 goto out;
984 if (net_ratelimit())
985 printk(KERN_WARNING "dst cache overflow\n");
986 RT_CACHE_STAT_INC(gc_dst_overflow);
987 return 1;
988
989work_done:
990 expire += ip_rt_gc_min_interval;
991 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4
LT
994 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
fc66f95c 997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1da177e4
LT
998#endif
999out: return 0;
1000}
1001
98376387
ED
1002/*
1003 * Returns number of entries in a hash chain that have different hash_inputs
1004 */
1005static int slow_chain_length(const struct rtable *head)
1006{
1007 int length = 0;
1008 const struct rtable *rth = head;
1009
1010 while (rth) {
1011 length += has_noalias(head, rth);
1c31720a 1012 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1013 }
1014 return length >> FRACT_BITS;
1015}
1016
b23dd4fe
DM
1017static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1018 struct sk_buff *skb, int ifindex)
1da177e4 1019{
1c31720a
ED
1020 struct rtable *rth, *cand;
1021 struct rtable __rcu **rthp, **candp;
1da177e4 1022 unsigned long now;
1da177e4
LT
1023 u32 min_score;
1024 int chain_length;
1025 int attempts = !in_softirq();
1026
1027restart:
1028 chain_length = 0;
1029 min_score = ~(u32)0;
1030 cand = NULL;
1031 candp = NULL;
1032 now = jiffies;
1033
d8d1f30b 1034 if (!rt_caching(dev_net(rt->dst.dev))) {
73e42897
NH
1035 /*
1036 * If we're not caching, just tell the caller we
1037 * were successful and don't touch the route. The
1038 * caller hold the sole reference to the cache entry, and
1039 * it will be released when the caller is done with it.
1040 * If we drop it here, the callers have no way to resolve routes
1041 * when we're not caching. Instead, just point *rp at rt, so
1042 * the caller gets a single use out of the route
b6280b47
NH
1043 * Note that we do rt_free on this new route entry, so that
1044 * once its refcount hits zero, we are still able to reap it
1045 * (Thanks Alexey)
27b75c95
ED
1046 * Note: To avoid expensive rcu stuff for this uncached dst,
1047 * we set DST_NOCACHE so that dst_release() can free dst without
1048 * waiting a grace period.
73e42897 1049 */
b6280b47 1050
c7d4426a 1051 rt->dst.flags |= DST_NOCACHE;
c7537967 1052 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1053 int err = arp_bind_neighbour(&rt->dst);
b6280b47
NH
1054 if (err) {
1055 if (net_ratelimit())
1056 printk(KERN_WARNING
1057 "Neighbour table failure & not caching routes.\n");
27b75c95 1058 ip_rt_put(rt);
b23dd4fe 1059 return ERR_PTR(err);
b6280b47
NH
1060 }
1061 }
1062
b6280b47 1063 goto skip_hashing;
1080d709
NH
1064 }
1065
1da177e4
LT
1066 rthp = &rt_hash_table[hash].chain;
1067
22c047cc 1068 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1069 while ((rth = rcu_dereference_protected(*rthp,
1070 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1071 if (rt_is_expired(rth)) {
d8d1f30b 1072 *rthp = rth->dst.rt_next;
29e75252
ED
1073 rt_free(rth);
1074 continue;
1075 }
5e2b61f7 1076 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1da177e4 1077 /* Put it first */
d8d1f30b 1078 *rthp = rth->dst.rt_next;
1da177e4
LT
1079 /*
1080 * Since lookup is lockfree, the deletion
1081 * must be visible to another weakly ordered CPU before
1082 * the insertion at the start of the hash chain.
1083 */
d8d1f30b 1084 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1085 rt_hash_table[hash].chain);
1086 /*
1087 * Since lookup is lockfree, the update writes
1088 * must be ordered for consistency on SMP.
1089 */
1090 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1091
d8d1f30b 1092 dst_use(&rth->dst, now);
22c047cc 1093 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1094
1095 rt_drop(rt);
b23dd4fe 1096 if (skb)
d8d1f30b 1097 skb_dst_set(skb, &rth->dst);
b23dd4fe 1098 return rth;
1da177e4
LT
1099 }
1100
d8d1f30b 1101 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1102 u32 score = rt_score(rth);
1103
1104 if (score <= min_score) {
1105 cand = rth;
1106 candp = rthp;
1107 min_score = score;
1108 }
1109 }
1110
1111 chain_length++;
1112
d8d1f30b 1113 rthp = &rth->dst.rt_next;
1da177e4
LT
1114 }
1115
1116 if (cand) {
1117 /* ip_rt_gc_elasticity used to be average length of chain
1118 * length, when exceeded gc becomes really aggressive.
1119 *
1120 * The second limit is less certain. At the moment it allows
1121 * only 2 entries per bucket. We will see.
1122 */
1123 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1124 *candp = cand->dst.rt_next;
1da177e4
LT
1125 rt_free(cand);
1126 }
1080d709 1127 } else {
98376387
ED
1128 if (chain_length > rt_chain_length_max &&
1129 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1130 struct net *net = dev_net(rt->dst.dev);
1080d709 1131 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1132 if (!rt_caching(net)) {
1080d709 1133 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1134 rt->dst.dev->name, num);
1080d709 1135 }
b35ecb5d 1136 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1137 spin_unlock_bh(rt_hash_lock_addr(hash));
1138
5e2b61f7 1139 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
6a2bad70
PE
1140 ifindex, rt_genid(net));
1141 goto restart;
1080d709 1142 }
1da177e4
LT
1143 }
1144
1145 /* Try to bind route to arp only if it is output
1146 route or unicast forwarding path.
1147 */
c7537967 1148 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1149 int err = arp_bind_neighbour(&rt->dst);
1da177e4 1150 if (err) {
22c047cc 1151 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1152
1153 if (err != -ENOBUFS) {
1154 rt_drop(rt);
b23dd4fe 1155 return ERR_PTR(err);
1da177e4
LT
1156 }
1157
1158 /* Neighbour tables are full and nothing
1159 can be released. Try to shrink route cache,
1160 it is most likely it holds some neighbour records.
1161 */
1162 if (attempts-- > 0) {
1163 int saved_elasticity = ip_rt_gc_elasticity;
1164 int saved_int = ip_rt_gc_min_interval;
1165 ip_rt_gc_elasticity = 1;
1166 ip_rt_gc_min_interval = 0;
569d3645 1167 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1168 ip_rt_gc_min_interval = saved_int;
1169 ip_rt_gc_elasticity = saved_elasticity;
1170 goto restart;
1171 }
1172
1173 if (net_ratelimit())
7e1b33e5 1174 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1da177e4 1175 rt_drop(rt);
b23dd4fe 1176 return ERR_PTR(-ENOBUFS);
1da177e4
LT
1177 }
1178 }
1179
d8d1f30b 1180 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1181
1da177e4 1182#if RT_CACHE_DEBUG >= 2
d8d1f30b 1183 if (rt->dst.rt_next) {
1da177e4 1184 struct rtable *trt;
b6280b47
NH
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
d8d1f30b 1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
673d57e7 1188 printk(" . %pI4", &trt->rt_dst);
1da177e4
LT
1189 printk("\n");
1190 }
1191#endif
00269b54
ED
1192 /*
1193 * Since lookup is lockfree, we must make sure
1194 * previous writes to rt are comitted to memory
1195 * before making rt visible to other CPUS.
1196 */
1ddbcb00 1197 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1198
22c047cc 1199 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1200
b6280b47 1201skip_hashing:
b23dd4fe 1202 if (skb)
d8d1f30b 1203 skb_dst_set(skb, &rt->dst);
b23dd4fe 1204 return rt;
1da177e4
LT
1205}
1206
6431cbc2
DM
1207static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1208
1209static u32 rt_peer_genid(void)
1210{
1211 return atomic_read(&__rt_peer_genid);
1212}
1213
1da177e4
LT
1214void rt_bind_peer(struct rtable *rt, int create)
1215{
1da177e4
LT
1216 struct inet_peer *peer;
1217
b534ecf1 1218 peer = inet_getpeer_v4(rt->rt_dst, create);
1da177e4 1219
49e8ab03 1220 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1da177e4 1221 inet_putpeer(peer);
6431cbc2
DM
1222 else
1223 rt->rt_peer_genid = rt_peer_genid();
1da177e4
LT
1224}
1225
1226/*
1227 * Peer allocation may fail only in serious out-of-memory conditions. However
1228 * we still can generate some output.
1229 * Random ID selection looks a bit dangerous because we have no chances to
1230 * select ID being unique in a reasonable period of time.
1231 * But broken packet identifier may be better than no packet at all.
1232 */
1233static void ip_select_fb_ident(struct iphdr *iph)
1234{
1235 static DEFINE_SPINLOCK(ip_fb_id_lock);
1236 static u32 ip_fallback_id;
1237 u32 salt;
1238
1239 spin_lock_bh(&ip_fb_id_lock);
e448515c 1240 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1241 iph->id = htons(salt & 0xFFFF);
1242 ip_fallback_id = salt;
1243 spin_unlock_bh(&ip_fb_id_lock);
1244}
1245
1246void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1247{
1248 struct rtable *rt = (struct rtable *) dst;
1249
1250 if (rt) {
1251 if (rt->peer == NULL)
1252 rt_bind_peer(rt, 1);
1253
1254 /* If peer is attached to destination, it is never detached,
1255 so that we need not to grab a lock to dereference it.
1256 */
1257 if (rt->peer) {
1258 iph->id = htons(inet_getid(rt->peer, more));
1259 return;
1260 }
1261 } else
e905a9ed 1262 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
9c2b3328 1263 __builtin_return_address(0));
1da177e4
LT
1264
1265 ip_select_fb_ident(iph);
1266}
4bc2f18b 1267EXPORT_SYMBOL(__ip_select_ident);
1da177e4
LT
1268
1269static void rt_del(unsigned hash, struct rtable *rt)
1270{
1c31720a
ED
1271 struct rtable __rcu **rthp;
1272 struct rtable *aux;
1da177e4 1273
29e75252 1274 rthp = &rt_hash_table[hash].chain;
22c047cc 1275 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1276 ip_rt_put(rt);
1c31720a
ED
1277 while ((aux = rcu_dereference_protected(*rthp,
1278 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1279 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1280 *rthp = aux->dst.rt_next;
29e75252
ED
1281 rt_free(aux);
1282 continue;
1da177e4 1283 }
d8d1f30b 1284 rthp = &aux->dst.rt_next;
29e75252 1285 }
22c047cc 1286 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1287}
1288
ed7865a4 1289/* called in rcu_read_lock() section */
f7655229
AV
1290void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1291 __be32 saddr, struct net_device *dev)
1da177e4 1292{
ed7865a4 1293 struct in_device *in_dev = __in_dev_get_rcu(dev);
f39925db 1294 struct inet_peer *peer;
317805b8 1295 struct net *net;
1da177e4 1296
1da177e4
LT
1297 if (!in_dev)
1298 return;
1299
c346dca1 1300 net = dev_net(dev);
9d4fb27d
JP
1301 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1302 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1303 ipv4_is_zeronet(new_gw))
1da177e4
LT
1304 goto reject_redirect;
1305
1306 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1307 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1308 goto reject_redirect;
1309 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1310 goto reject_redirect;
1311 } else {
317805b8 1312 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1313 goto reject_redirect;
1314 }
1315
f39925db
DM
1316 peer = inet_getpeer_v4(daddr, 1);
1317 if (peer) {
1318 peer->redirect_learned.a4 = new_gw;
e905a9ed 1319
f39925db 1320 inet_putpeer(peer);
1da177e4 1321
f39925db 1322 atomic_inc(&__rt_peer_genid);
1da177e4 1323 }
1da177e4
LT
1324 return;
1325
1326reject_redirect:
1327#ifdef CONFIG_IP_ROUTE_VERBOSE
1328 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
1329 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1330 " Advised path = %pI4 -> %pI4\n",
1331 &old_gw, dev->name, &new_gw,
1332 &saddr, &daddr);
1da177e4 1333#endif
ed7865a4 1334 ;
1da177e4
LT
1335}
1336
1337static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1338{
ee6b9673 1339 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1340 struct dst_entry *ret = dst;
1341
1342 if (rt) {
d11a4dc1 1343 if (dst->obsolete > 0) {
1da177e4
LT
1344 ip_rt_put(rt);
1345 ret = NULL;
2c8cec5c 1346 } else if (rt->rt_flags & RTCF_REDIRECTED) {
5e2b61f7
DM
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif,
e84f84f2 1349 rt_genid(dev_net(dst->dev)));
1da177e4 1350#if RT_CACHE_DEBUG >= 1
673d57e7 1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
5e2b61f7 1352 &rt->rt_dst, rt->rt_tos);
1da177e4
LT
1353#endif
1354 rt_del(hash, rt);
1355 ret = NULL;
2c8cec5c
DM
1356 } else if (rt->peer &&
1357 rt->peer->pmtu_expires &&
1358 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1359 unsigned long orig = rt->peer->pmtu_expires;
1360
1361 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1362 dst_metric_set(dst, RTAX_MTU,
1363 rt->peer->pmtu_orig);
1da177e4
LT
1364 }
1365 }
1366 return ret;
1367}
1368
1369/*
1370 * Algorithm:
1371 * 1. The first ip_rt_redirect_number redirects are sent
1372 * with exponential backoff, then we stop sending them at all,
1373 * assuming that the host ignores our redirects.
1374 * 2. If we did not see packets requiring redirects
1375 * during ip_rt_redirect_silence, we assume that the host
1376 * forgot redirected route and start to send redirects again.
1377 *
1378 * This algorithm is much cheaper and more intelligent than dumb load limiting
1379 * in icmp.c.
1380 *
1381 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1382 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1383 */
1384
1385void ip_rt_send_redirect(struct sk_buff *skb)
1386{
511c3f92 1387 struct rtable *rt = skb_rtable(skb);
30038fc6 1388 struct in_device *in_dev;
92d86829 1389 struct inet_peer *peer;
30038fc6 1390 int log_martians;
1da177e4 1391
30038fc6 1392 rcu_read_lock();
d8d1f30b 1393 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1394 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1395 rcu_read_unlock();
1da177e4 1396 return;
30038fc6
ED
1397 }
1398 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1399 rcu_read_unlock();
1da177e4 1400
92d86829
DM
1401 if (!rt->peer)
1402 rt_bind_peer(rt, 1);
1403 peer = rt->peer;
1404 if (!peer) {
1405 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1406 return;
1407 }
1408
1da177e4
LT
1409 /* No redirected packets during ip_rt_redirect_silence;
1410 * reset the algorithm.
1411 */
92d86829
DM
1412 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1413 peer->rate_tokens = 0;
1da177e4
LT
1414
1415 /* Too many ignored redirects; do not send anything
d8d1f30b 1416 * set dst.rate_last to the last seen redirected packet.
1da177e4 1417 */
92d86829
DM
1418 if (peer->rate_tokens >= ip_rt_redirect_number) {
1419 peer->rate_last = jiffies;
30038fc6 1420 return;
1da177e4
LT
1421 }
1422
1423 /* Check for load limit; set rate_last to the latest sent
1424 * redirect.
1425 */
92d86829 1426 if (peer->rate_tokens == 0 ||
14fb8a76 1427 time_after(jiffies,
92d86829
DM
1428 (peer->rate_last +
1429 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 1430 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
1431 peer->rate_last = jiffies;
1432 ++peer->rate_tokens;
1da177e4 1433#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1434 if (log_martians &&
92d86829 1435 peer->rate_tokens == ip_rt_redirect_number &&
1da177e4 1436 net_ratelimit())
673d57e7
HH
1437 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1438 &rt->rt_src, rt->rt_iif,
1439 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1440#endif
1441 }
1da177e4
LT
1442}
1443
1444static int ip_error(struct sk_buff *skb)
1445{
511c3f92 1446 struct rtable *rt = skb_rtable(skb);
92d86829 1447 struct inet_peer *peer;
1da177e4 1448 unsigned long now;
92d86829 1449 bool send;
1da177e4
LT
1450 int code;
1451
d8d1f30b 1452 switch (rt->dst.error) {
1da177e4
LT
1453 case EINVAL:
1454 default:
1455 goto out;
1456 case EHOSTUNREACH:
1457 code = ICMP_HOST_UNREACH;
1458 break;
1459 case ENETUNREACH:
1460 code = ICMP_NET_UNREACH;
d8d1f30b 1461 IP_INC_STATS_BH(dev_net(rt->dst.dev),
7c73a6fa 1462 IPSTATS_MIB_INNOROUTES);
1da177e4
LT
1463 break;
1464 case EACCES:
1465 code = ICMP_PKT_FILTERED;
1466 break;
1467 }
1468
92d86829
DM
1469 if (!rt->peer)
1470 rt_bind_peer(rt, 1);
1471 peer = rt->peer;
1472
1473 send = true;
1474 if (peer) {
1475 now = jiffies;
1476 peer->rate_tokens += now - peer->rate_last;
1477 if (peer->rate_tokens > ip_rt_error_burst)
1478 peer->rate_tokens = ip_rt_error_burst;
1479 peer->rate_last = now;
1480 if (peer->rate_tokens >= ip_rt_error_cost)
1481 peer->rate_tokens -= ip_rt_error_cost;
1482 else
1483 send = false;
1da177e4 1484 }
92d86829
DM
1485 if (send)
1486 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
1487
1488out: kfree_skb(skb);
1489 return 0;
e905a9ed 1490}
1da177e4
LT
1491
1492/*
1493 * The last two values are not from the RFC but
1494 * are needed for AMPRnet AX.25 paths.
1495 */
1496
9b5b5cff 1497static const unsigned short mtu_plateau[] =
1da177e4
LT
1498{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1499
5969f71d 1500static inline unsigned short guess_mtu(unsigned short old_mtu)
1da177e4
LT
1501{
1502 int i;
e905a9ed 1503
1da177e4
LT
1504 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1505 if (old_mtu > mtu_plateau[i])
1506 return mtu_plateau[i];
1507 return 68;
1508}
1509
b5921910 1510unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
0010e465
TT
1511 unsigned short new_mtu,
1512 struct net_device *dev)
1da177e4 1513{
1da177e4 1514 unsigned short old_mtu = ntohs(iph->tot_len);
1da177e4 1515 unsigned short est_mtu = 0;
2c8cec5c 1516 struct inet_peer *peer;
1da177e4 1517
2c8cec5c
DM
1518 peer = inet_getpeer_v4(iph->daddr, 1);
1519 if (peer) {
1520 unsigned short mtu = new_mtu;
1da177e4 1521
2c8cec5c
DM
1522 if (new_mtu < 68 || new_mtu >= old_mtu) {
1523 /* BSD 4.2 derived systems incorrectly adjust
1524 * tot_len by the IP header length, and report
1525 * a zero MTU in the ICMP message.
1526 */
1527 if (mtu == 0 &&
1528 old_mtu >= 68 + (iph->ihl << 2))
1529 old_mtu -= iph->ihl << 2;
1530 mtu = guess_mtu(old_mtu);
1531 }
0010e465 1532
2c8cec5c
DM
1533 if (mtu < ip_rt_min_pmtu)
1534 mtu = ip_rt_min_pmtu;
1535 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1536 est_mtu = mtu;
1537 peer->pmtu_learned = mtu;
1538 peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
1539 }
1da177e4 1540
2c8cec5c 1541 inet_putpeer(peer);
1da177e4 1542
2c8cec5c 1543 atomic_inc(&__rt_peer_genid);
1da177e4
LT
1544 }
1545 return est_mtu ? : new_mtu;
1546}
1547
2c8cec5c
DM
1548static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1549{
1550 unsigned long expires = peer->pmtu_expires;
1551
1552 if (time_before(expires, jiffies)) {
1553 u32 orig_dst_mtu = dst_mtu(dst);
1554 if (peer->pmtu_learned < orig_dst_mtu) {
1555 if (!peer->pmtu_orig)
1556 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1557 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1558 }
1559 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1560 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1561}
1562
1da177e4
LT
1563static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1564{
2c8cec5c
DM
1565 struct rtable *rt = (struct rtable *) dst;
1566 struct inet_peer *peer;
1567
1568 dst_confirm(dst);
1569
1570 if (!rt->peer)
1571 rt_bind_peer(rt, 1);
1572 peer = rt->peer;
1573 if (peer) {
1574 if (mtu < ip_rt_min_pmtu)
1da177e4 1575 mtu = ip_rt_min_pmtu;
2c8cec5c
DM
1576 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1577 peer->pmtu_learned = mtu;
1578 peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
1579
1580 atomic_inc(&__rt_peer_genid);
1581 rt->rt_peer_genid = rt_peer_genid();
1582
1583 check_peer_pmtu(dst, peer);
1da177e4 1584 }
2c8cec5c 1585 inet_putpeer(peer);
1da177e4
LT
1586 }
1587}
1588
f39925db
DM
1589static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1590{
1591 struct rtable *rt = (struct rtable *) dst;
1592 __be32 orig_gw = rt->rt_gateway;
1593
1594 dst_confirm(&rt->dst);
1595
1596 neigh_release(rt->dst.neighbour);
1597 rt->dst.neighbour = NULL;
1598
1599 rt->rt_gateway = peer->redirect_learned.a4;
1600 if (arp_bind_neighbour(&rt->dst) ||
1601 !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1602 if (rt->dst.neighbour)
1603 neigh_event_send(rt->dst.neighbour, NULL);
1604 rt->rt_gateway = orig_gw;
1605 return -EAGAIN;
1606 } else {
1607 rt->rt_flags |= RTCF_REDIRECTED;
1608 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1609 rt->dst.neighbour);
1610 }
1611 return 0;
1612}
1613
1da177e4
LT
1614static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1615{
6431cbc2
DM
1616 struct rtable *rt = (struct rtable *) dst;
1617
1618 if (rt_is_expired(rt))
d11a4dc1 1619 return NULL;
6431cbc2 1620 if (rt->rt_peer_genid != rt_peer_genid()) {
2c8cec5c
DM
1621 struct inet_peer *peer;
1622
6431cbc2
DM
1623 if (!rt->peer)
1624 rt_bind_peer(rt, 0);
1625
2c8cec5c
DM
1626 peer = rt->peer;
1627 if (peer && peer->pmtu_expires)
1628 check_peer_pmtu(dst, peer);
1629
f39925db
DM
1630 if (peer && peer->redirect_learned.a4 &&
1631 peer->redirect_learned.a4 != rt->rt_gateway) {
1632 if (check_peer_redir(dst, peer))
1633 return NULL;
1634 }
1635
6431cbc2
DM
1636 rt->rt_peer_genid = rt_peer_genid();
1637 }
d11a4dc1 1638 return dst;
1da177e4
LT
1639}
1640
1641static void ipv4_dst_destroy(struct dst_entry *dst)
1642{
1643 struct rtable *rt = (struct rtable *) dst;
1644 struct inet_peer *peer = rt->peer;
1da177e4 1645
62fa8a84
DM
1646 if (rt->fi) {
1647 fib_info_put(rt->fi);
1648 rt->fi = NULL;
1649 }
1da177e4
LT
1650 if (peer) {
1651 rt->peer = NULL;
1652 inet_putpeer(peer);
1653 }
1da177e4
LT
1654}
1655
1da177e4
LT
1656
1657static void ipv4_link_failure(struct sk_buff *skb)
1658{
1659 struct rtable *rt;
1660
1661 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1662
511c3f92 1663 rt = skb_rtable(skb);
2c8cec5c
DM
1664 if (rt &&
1665 rt->peer &&
1666 rt->peer->pmtu_expires) {
1667 unsigned long orig = rt->peer->pmtu_expires;
1668
1669 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1670 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1671 }
1da177e4
LT
1672}
1673
1674static int ip_rt_bug(struct sk_buff *skb)
1675{
673d57e7
HH
1676 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1677 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1da177e4
LT
1678 skb->dev ? skb->dev->name : "?");
1679 kfree_skb(skb);
1680 return 0;
1681}
1682
1683/*
1684 We do not cache source address of outgoing interface,
1685 because it is used only by IP RR, TS and SRR options,
1686 so that it out of fast path.
1687
1688 BTW remember: "addr" is allowed to be not aligned
1689 in IP options!
1690 */
1691
1692void ip_rt_get_source(u8 *addr, struct rtable *rt)
1693{
a61ced5d 1694 __be32 src;
1da177e4
LT
1695 struct fib_result res;
1696
c7537967 1697 if (rt_is_output_route(rt))
1da177e4 1698 src = rt->rt_src;
ebc0ffae 1699 else {
5e2b61f7
DM
1700 struct flowi fl = {
1701 .fl4_dst = rt->rt_key_dst,
1702 .fl4_src = rt->rt_key_src,
1703 .fl4_tos = rt->rt_tos,
1704 .oif = rt->rt_oif,
1705 .iif = rt->rt_iif,
1706 .mark = rt->rt_mark,
1707 };
1708
ebc0ffae 1709 rcu_read_lock();
5e2b61f7 1710 if (fib_lookup(dev_net(rt->dst.dev), &fl, &res) == 0)
ebc0ffae
ED
1711 src = FIB_RES_PREFSRC(res);
1712 else
1713 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1714 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1715 rcu_read_unlock();
1716 }
1da177e4
LT
1717 memcpy(addr, &src, 4);
1718}
1719
c7066f70 1720#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1721static void set_class_tag(struct rtable *rt, u32 tag)
1722{
d8d1f30b
CG
1723 if (!(rt->dst.tclassid & 0xFFFF))
1724 rt->dst.tclassid |= tag & 0xFFFF;
1725 if (!(rt->dst.tclassid & 0xFFFF0000))
1726 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1727}
1728#endif
1729
0dbaee3b
DM
1730static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1731{
1732 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1733
1734 if (advmss == 0) {
1735 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1736 ip_rt_min_advmss);
1737 if (advmss > 65535 - 40)
1738 advmss = 65535 - 40;
1739 }
1740 return advmss;
1741}
1742
d33e4553
DM
1743static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1744{
1745 unsigned int mtu = dst->dev->mtu;
1746
1747 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1748 const struct rtable *rt = (const struct rtable *) dst;
1749
1750 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1751 mtu = 576;
1752 }
1753
1754 if (mtu > IP_MAX_MTU)
1755 mtu = IP_MAX_MTU;
1756
1757 return mtu;
1758}
1759
5e2b61f7
DM
1760static void rt_init_metrics(struct rtable *rt, const struct flowi *oldflp,
1761 struct fib_info *fi)
a4daad6b 1762{
0131ba45
DM
1763 struct inet_peer *peer;
1764 int create = 0;
a4daad6b 1765
0131ba45
DM
1766 /* If a peer entry exists for this destination, we must hook
1767 * it up in order to get at cached metrics.
1768 */
5e2b61f7 1769 if (oldflp && (oldflp->flags & FLOWI_FLAG_PRECOW_METRICS))
0131ba45
DM
1770 create = 1;
1771
3c0afdca 1772 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
0131ba45 1773 if (peer) {
3c0afdca 1774 rt->rt_peer_genid = rt_peer_genid();
a4daad6b
DM
1775 if (inet_metrics_new(peer))
1776 memcpy(peer->metrics, fi->fib_metrics,
1777 sizeof(u32) * RTAX_MAX);
1778 dst_init_metrics(&rt->dst, peer->metrics, false);
2c8cec5c
DM
1779
1780 if (peer->pmtu_expires)
1781 check_peer_pmtu(&rt->dst, peer);
f39925db
DM
1782 if (peer->redirect_learned.a4 &&
1783 peer->redirect_learned.a4 != rt->rt_gateway) {
1784 rt->rt_gateway = peer->redirect_learned.a4;
1785 rt->rt_flags |= RTCF_REDIRECTED;
1786 }
0131ba45
DM
1787 } else {
1788 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1789 rt->fi = fi;
1790 atomic_inc(&fi->fib_clntref);
1791 }
1792 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
a4daad6b
DM
1793 }
1794}
1795
5e2b61f7
DM
1796static void rt_set_nexthop(struct rtable *rt, const struct flowi *oldflp,
1797 const struct fib_result *res,
982721f3 1798 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1799{
defb3519 1800 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1801
1802 if (fi) {
1803 if (FIB_RES_GW(*res) &&
1804 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1805 rt->rt_gateway = FIB_RES_GW(*res);
5e2b61f7 1806 rt_init_metrics(rt, oldflp, fi);
c7066f70 1807#ifdef CONFIG_IP_ROUTE_CLASSID
defb3519 1808 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4 1809#endif
d33e4553 1810 }
defb3519 1811
defb3519
DM
1812 if (dst_mtu(dst) > IP_MAX_MTU)
1813 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
0dbaee3b 1814 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
defb3519 1815 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1da177e4 1816
c7066f70 1817#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1818#ifdef CONFIG_IP_MULTIPLE_TABLES
1819 set_class_tag(rt, fib_rules_tclass(res));
1820#endif
1821 set_class_tag(rt, itag);
1822#endif
982721f3 1823 rt->rt_type = type;
1da177e4
LT
1824}
1825
0c4dcd58
DM
1826static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
1827{
3c7bd1a1 1828 struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
0c4dcd58
DM
1829 if (rt) {
1830 rt->dst.obsolete = -1;
1831
0c4dcd58
DM
1832 rt->dst.flags = DST_HOST |
1833 (nopolicy ? DST_NOPOLICY : 0) |
1834 (noxfrm ? DST_NOXFRM : 0);
1835 }
1836 return rt;
1837}
1838
96d36220 1839/* called in rcu_read_lock() section */
9e12bb22 1840static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1841 u8 tos, struct net_device *dev, int our)
1842{
96d36220 1843 unsigned int hash;
1da177e4 1844 struct rtable *rth;
a61ced5d 1845 __be32 spec_dst;
96d36220 1846 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1847 u32 itag = 0;
b5f7e755 1848 int err;
1da177e4
LT
1849
1850 /* Primary sanity checks. */
1851
1852 if (in_dev == NULL)
1853 return -EINVAL;
1854
1e637c74 1855 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 1856 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1857 goto e_inval;
1858
f97c1e0c
JP
1859 if (ipv4_is_zeronet(saddr)) {
1860 if (!ipv4_is_local_multicast(daddr))
1da177e4
LT
1861 goto e_inval;
1862 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
b5f7e755
ED
1863 } else {
1864 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1865 &itag, 0);
1866 if (err < 0)
1867 goto e_err;
1868 }
0c4dcd58 1869 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
1870 if (!rth)
1871 goto e_nobufs;
1872
d8d1f30b 1873 rth->dst.output = ip_rt_bug;
1da177e4 1874
5e2b61f7 1875 rth->rt_key_dst = daddr;
1da177e4 1876 rth->rt_dst = daddr;
5e2b61f7
DM
1877 rth->rt_tos = tos;
1878 rth->rt_mark = skb->mark;
1879 rth->rt_key_src = saddr;
1da177e4 1880 rth->rt_src = saddr;
c7066f70 1881#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 1882 rth->dst.tclassid = itag;
1da177e4 1883#endif
5e2b61f7 1884 rth->rt_iif = dev->ifindex;
d8d1f30b
CG
1885 rth->dst.dev = init_net.loopback_dev;
1886 dev_hold(rth->dst.dev);
5e2b61f7 1887 rth->rt_oif = 0;
1da177e4
LT
1888 rth->rt_gateway = daddr;
1889 rth->rt_spec_dst= spec_dst;
e84f84f2 1890 rth->rt_genid = rt_genid(dev_net(dev));
1da177e4 1891 rth->rt_flags = RTCF_MULTICAST;
29e75252 1892 rth->rt_type = RTN_MULTICAST;
1da177e4 1893 if (our) {
d8d1f30b 1894 rth->dst.input= ip_local_deliver;
1da177e4
LT
1895 rth->rt_flags |= RTCF_LOCAL;
1896 }
1897
1898#ifdef CONFIG_IP_MROUTE
f97c1e0c 1899 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1900 rth->dst.input = ip_mr_input;
1da177e4
LT
1901#endif
1902 RT_CACHE_STAT_INC(in_slow_mc);
1903
e84f84f2 1904 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
b23dd4fe
DM
1905 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1906 err = 0;
1907 if (IS_ERR(rth))
1908 err = PTR_ERR(rth);
1da177e4
LT
1909
1910e_nobufs:
1da177e4 1911 return -ENOBUFS;
1da177e4 1912e_inval:
96d36220 1913 return -EINVAL;
b5f7e755 1914e_err:
b5f7e755 1915 return err;
1da177e4
LT
1916}
1917
1918
1919static void ip_handle_martian_source(struct net_device *dev,
1920 struct in_device *in_dev,
1921 struct sk_buff *skb,
9e12bb22
AV
1922 __be32 daddr,
1923 __be32 saddr)
1da177e4
LT
1924{
1925 RT_CACHE_STAT_INC(in_martian_src);
1926#ifdef CONFIG_IP_ROUTE_VERBOSE
1927 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1928 /*
1929 * RFC1812 recommendation, if source is martian,
1930 * the only hint is MAC header.
1931 */
673d57e7
HH
1932 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1933 &daddr, &saddr, dev->name);
98e399f8 1934 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1da177e4 1935 int i;
98e399f8 1936 const unsigned char *p = skb_mac_header(skb);
1da177e4
LT
1937 printk(KERN_WARNING "ll header: ");
1938 for (i = 0; i < dev->hard_header_len; i++, p++) {
1939 printk("%02x", *p);
1940 if (i < (dev->hard_header_len - 1))
1941 printk(":");
1942 }
1943 printk("\n");
1944 }
1945 }
1946#endif
1947}
1948
47360228 1949/* called in rcu_read_lock() section */
5969f71d 1950static int __mkroute_input(struct sk_buff *skb,
982721f3 1951 const struct fib_result *res,
5969f71d
SH
1952 struct in_device *in_dev,
1953 __be32 daddr, __be32 saddr, u32 tos,
1954 struct rtable **result)
1da177e4 1955{
1da177e4
LT
1956 struct rtable *rth;
1957 int err;
1958 struct in_device *out_dev;
47360228 1959 unsigned int flags = 0;
d9c9df8c
AV
1960 __be32 spec_dst;
1961 u32 itag;
1da177e4
LT
1962
1963 /* get a working reference to the output device */
47360228 1964 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4
LT
1965 if (out_dev == NULL) {
1966 if (net_ratelimit())
1967 printk(KERN_CRIT "Bug in ip_route_input" \
1968 "_slow(). Please, report\n");
1969 return -EINVAL;
1970 }
1971
1972
e905a9ed 1973 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
b0c110ca 1974 in_dev->dev, &spec_dst, &itag, skb->mark);
1da177e4 1975 if (err < 0) {
e905a9ed 1976 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1977 saddr);
e905a9ed 1978
1da177e4
LT
1979 goto cleanup;
1980 }
1981
1982 if (err)
1983 flags |= RTCF_DIRECTSRC;
1984
51b77cae 1985 if (out_dev == in_dev && err &&
1da177e4
LT
1986 (IN_DEV_SHARED_MEDIA(out_dev) ||
1987 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1988 flags |= RTCF_DOREDIRECT;
1989
1990 if (skb->protocol != htons(ETH_P_IP)) {
1991 /* Not IP (i.e. ARP). Do not create route, if it is
1992 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
1993 *
1994 * Proxy arp feature have been extended to allow, ARP
1995 * replies back to the same interface, to support
1996 * Private VLAN switch technologies. See arp.c.
1da177e4 1997 */
65324144
JDB
1998 if (out_dev == in_dev &&
1999 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
2000 err = -EINVAL;
2001 goto cleanup;
2002 }
2003 }
2004
0c4dcd58
DM
2005 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
2006 IN_DEV_CONF_GET(out_dev, NOXFRM));
1da177e4
LT
2007 if (!rth) {
2008 err = -ENOBUFS;
2009 goto cleanup;
2010 }
2011
5e2b61f7 2012 rth->rt_key_dst = daddr;
1da177e4 2013 rth->rt_dst = daddr;
5e2b61f7
DM
2014 rth->rt_tos = tos;
2015 rth->rt_mark = skb->mark;
2016 rth->rt_key_src = saddr;
1da177e4
LT
2017 rth->rt_src = saddr;
2018 rth->rt_gateway = daddr;
5e2b61f7 2019 rth->rt_iif = in_dev->dev->ifindex;
d8d1f30b
CG
2020 rth->dst.dev = (out_dev)->dev;
2021 dev_hold(rth->dst.dev);
5e2b61f7 2022 rth->rt_oif = 0;
1da177e4
LT
2023 rth->rt_spec_dst= spec_dst;
2024
d8d1f30b
CG
2025 rth->dst.input = ip_forward;
2026 rth->dst.output = ip_output;
2027 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1da177e4 2028
5e2b61f7 2029 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1da177e4
LT
2030
2031 rth->rt_flags = flags;
2032
2033 *result = rth;
2034 err = 0;
2035 cleanup:
1da177e4 2036 return err;
e905a9ed 2037}
1da177e4 2038
5969f71d
SH
2039static int ip_mkroute_input(struct sk_buff *skb,
2040 struct fib_result *res,
2041 const struct flowi *fl,
2042 struct in_device *in_dev,
2043 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2044{
7abaa27c 2045 struct rtable* rth = NULL;
1da177e4
LT
2046 int err;
2047 unsigned hash;
2048
2049#ifdef CONFIG_IP_ROUTE_MULTIPATH
2050 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2051 fib_select_multipath(fl, res);
2052#endif
2053
2054 /* create a routing cache entry */
2055 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2056 if (err)
2057 return err;
1da177e4
LT
2058
2059 /* put it into the cache */
e84f84f2 2060 hash = rt_hash(daddr, saddr, fl->iif,
d8d1f30b 2061 rt_genid(dev_net(rth->dst.dev)));
b23dd4fe
DM
2062 rth = rt_intern_hash(hash, rth, skb, fl->iif);
2063 if (IS_ERR(rth))
2064 return PTR_ERR(rth);
2065 return 0;
1da177e4
LT
2066}
2067
1da177e4
LT
2068/*
2069 * NOTE. We drop all the packets that has local source
2070 * addresses, because every properly looped back packet
2071 * must have correct destination already attached by output routine.
2072 *
2073 * Such approach solves two big problems:
2074 * 1. Not simplex devices are handled properly.
2075 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2076 * called with rcu_read_lock()
1da177e4
LT
2077 */
2078
9e12bb22 2079static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
2080 u8 tos, struct net_device *dev)
2081{
2082 struct fib_result res;
96d36220 2083 struct in_device *in_dev = __in_dev_get_rcu(dev);
67e28ffd 2084 struct flowi fl;
1da177e4
LT
2085 unsigned flags = 0;
2086 u32 itag = 0;
2087 struct rtable * rth;
2088 unsigned hash;
9e12bb22 2089 __be32 spec_dst;
1da177e4 2090 int err = -EINVAL;
c346dca1 2091 struct net * net = dev_net(dev);
1da177e4
LT
2092
2093 /* IP on this device is disabled. */
2094
2095 if (!in_dev)
2096 goto out;
2097
2098 /* Check for the most weird martians, which can be not detected
2099 by fib_lookup.
2100 */
2101
1e637c74 2102 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 2103 ipv4_is_loopback(saddr))
1da177e4
LT
2104 goto martian_source;
2105
27a954bd 2106 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2107 goto brd_input;
2108
2109 /* Accept zero addresses only to limited broadcast;
2110 * I even do not know to fix it or not. Waiting for complains :-)
2111 */
f97c1e0c 2112 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2113 goto martian_source;
2114
27a954bd 2115 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
1da177e4
LT
2116 goto martian_destination;
2117
2118 /*
2119 * Now we are ready to route packet.
2120 */
67e28ffd
DM
2121 fl.oif = 0;
2122 fl.iif = dev->ifindex;
2123 fl.mark = skb->mark;
2124 fl.fl4_dst = daddr;
2125 fl.fl4_src = saddr;
2126 fl.fl4_tos = tos;
2127 fl.fl4_scope = RT_SCOPE_UNIVERSE;
ebc0ffae
ED
2128 err = fib_lookup(net, &fl, &res);
2129 if (err != 0) {
1da177e4 2130 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2131 goto e_hostunreach;
1da177e4
LT
2132 goto no_route;
2133 }
1da177e4
LT
2134
2135 RT_CACHE_STAT_INC(in_slow_tot);
2136
2137 if (res.type == RTN_BROADCAST)
2138 goto brd_input;
2139
2140 if (res.type == RTN_LOCAL) {
b5f7e755 2141 err = fib_validate_source(saddr, daddr, tos,
ebc0ffae
ED
2142 net->loopback_dev->ifindex,
2143 dev, &spec_dst, &itag, skb->mark);
b5f7e755
ED
2144 if (err < 0)
2145 goto martian_source_keep_err;
2146 if (err)
1da177e4
LT
2147 flags |= RTCF_DIRECTSRC;
2148 spec_dst = daddr;
2149 goto local_input;
2150 }
2151
2152 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2153 goto e_hostunreach;
1da177e4
LT
2154 if (res.type != RTN_UNICAST)
2155 goto martian_destination;
2156
2157 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1da177e4
LT
2158out: return err;
2159
2160brd_input:
2161 if (skb->protocol != htons(ETH_P_IP))
2162 goto e_inval;
2163
f97c1e0c 2164 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2165 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2166 else {
2167 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
b0c110ca 2168 &itag, skb->mark);
1da177e4 2169 if (err < 0)
b5f7e755 2170 goto martian_source_keep_err;
1da177e4
LT
2171 if (err)
2172 flags |= RTCF_DIRECTSRC;
2173 }
2174 flags |= RTCF_BROADCAST;
2175 res.type = RTN_BROADCAST;
2176 RT_CACHE_STAT_INC(in_brd);
2177
2178local_input:
0c4dcd58 2179 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
2180 if (!rth)
2181 goto e_nobufs;
2182
d8d1f30b 2183 rth->dst.output= ip_rt_bug;
e84f84f2 2184 rth->rt_genid = rt_genid(net);
1da177e4 2185
5e2b61f7 2186 rth->rt_key_dst = daddr;
1da177e4 2187 rth->rt_dst = daddr;
5e2b61f7
DM
2188 rth->rt_tos = tos;
2189 rth->rt_mark = skb->mark;
2190 rth->rt_key_src = saddr;
1da177e4 2191 rth->rt_src = saddr;
c7066f70 2192#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 2193 rth->dst.tclassid = itag;
1da177e4 2194#endif
5e2b61f7 2195 rth->rt_iif = dev->ifindex;
d8d1f30b
CG
2196 rth->dst.dev = net->loopback_dev;
2197 dev_hold(rth->dst.dev);
1da177e4
LT
2198 rth->rt_gateway = daddr;
2199 rth->rt_spec_dst= spec_dst;
d8d1f30b 2200 rth->dst.input= ip_local_deliver;
1da177e4
LT
2201 rth->rt_flags = flags|RTCF_LOCAL;
2202 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2203 rth->dst.input= ip_error;
2204 rth->dst.error= -err;
1da177e4
LT
2205 rth->rt_flags &= ~RTCF_LOCAL;
2206 }
2207 rth->rt_type = res.type;
e84f84f2 2208 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
b23dd4fe
DM
2209 rth = rt_intern_hash(hash, rth, skb, fl.iif);
2210 err = 0;
2211 if (IS_ERR(rth))
2212 err = PTR_ERR(rth);
ebc0ffae 2213 goto out;
1da177e4
LT
2214
2215no_route:
2216 RT_CACHE_STAT_INC(in_no_route);
2217 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2218 res.type = RTN_UNREACHABLE;
7f53878d
MC
2219 if (err == -ESRCH)
2220 err = -ENETUNREACH;
1da177e4
LT
2221 goto local_input;
2222
2223 /*
2224 * Do not cache martian addresses: they should be logged (RFC1812)
2225 */
2226martian_destination:
2227 RT_CACHE_STAT_INC(in_martian_dst);
2228#ifdef CONFIG_IP_ROUTE_VERBOSE
2229 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
2230 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2231 &daddr, &saddr, dev->name);
1da177e4 2232#endif
2c2910a4
DE
2233
2234e_hostunreach:
e905a9ed 2235 err = -EHOSTUNREACH;
ebc0ffae 2236 goto out;
2c2910a4 2237
1da177e4
LT
2238e_inval:
2239 err = -EINVAL;
ebc0ffae 2240 goto out;
1da177e4
LT
2241
2242e_nobufs:
2243 err = -ENOBUFS;
ebc0ffae 2244 goto out;
1da177e4
LT
2245
2246martian_source:
b5f7e755
ED
2247 err = -EINVAL;
2248martian_source_keep_err:
1da177e4 2249 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2250 goto out;
1da177e4
LT
2251}
2252
407eadd9
ED
2253int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2254 u8 tos, struct net_device *dev, bool noref)
1da177e4
LT
2255{
2256 struct rtable * rth;
2257 unsigned hash;
2258 int iif = dev->ifindex;
b5921910 2259 struct net *net;
96d36220 2260 int res;
1da177e4 2261
c346dca1 2262 net = dev_net(dev);
1080d709 2263
96d36220
ED
2264 rcu_read_lock();
2265
1080d709
NH
2266 if (!rt_caching(net))
2267 goto skip_cache;
2268
1da177e4 2269 tos &= IPTOS_RT_MASK;
e84f84f2 2270 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2271
1da177e4 2272 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2273 rth = rcu_dereference(rth->dst.rt_next)) {
5e2b61f7
DM
2274 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2275 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2276 (rth->rt_iif ^ iif) |
2277 rth->rt_oif |
2278 (rth->rt_tos ^ tos)) == 0 &&
2279 rth->rt_mark == skb->mark &&
d8d1f30b 2280 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2281 !rt_is_expired(rth)) {
407eadd9 2282 if (noref) {
d8d1f30b
CG
2283 dst_use_noref(&rth->dst, jiffies);
2284 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2285 } else {
d8d1f30b
CG
2286 dst_use(&rth->dst, jiffies);
2287 skb_dst_set(skb, &rth->dst);
407eadd9 2288 }
1da177e4
LT
2289 RT_CACHE_STAT_INC(in_hit);
2290 rcu_read_unlock();
1da177e4
LT
2291 return 0;
2292 }
2293 RT_CACHE_STAT_INC(in_hlist_search);
2294 }
1da177e4 2295
1080d709 2296skip_cache:
1da177e4
LT
2297 /* Multicast recognition logic is moved from route cache to here.
2298 The problem was that too many Ethernet cards have broken/missing
2299 hardware multicast filters :-( As result the host on multicasting
2300 network acquires a lot of useless route cache entries, sort of
2301 SDR messages from all the world. Now we try to get rid of them.
2302 Really, provided software IP multicast filter is organized
2303 reasonably (at least, hashed), it does not result in a slowdown
2304 comparing with route cache reject entries.
2305 Note, that multicast routers are not affected, because
2306 route cache entry is created eventually.
2307 */
f97c1e0c 2308 if (ipv4_is_multicast(daddr)) {
96d36220 2309 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2310
96d36220 2311 if (in_dev) {
dbdd9a52
DM
2312 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2313 ip_hdr(skb)->protocol);
1da177e4
LT
2314 if (our
2315#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2316 ||
2317 (!ipv4_is_local_multicast(daddr) &&
2318 IN_DEV_MFORWARD(in_dev))
1da177e4 2319#endif
9d4fb27d 2320 ) {
96d36220
ED
2321 int res = ip_route_input_mc(skb, daddr, saddr,
2322 tos, dev, our);
1da177e4 2323 rcu_read_unlock();
96d36220 2324 return res;
1da177e4
LT
2325 }
2326 }
2327 rcu_read_unlock();
2328 return -EINVAL;
2329 }
96d36220
ED
2330 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2331 rcu_read_unlock();
2332 return res;
1da177e4 2333}
407eadd9 2334EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2335
ebc0ffae 2336/* called with rcu_read_lock() */
982721f3 2337static struct rtable *__mkroute_output(const struct fib_result *res,
5ada5527
DM
2338 const struct flowi *fl,
2339 const struct flowi *oldflp,
2340 struct net_device *dev_out,
2341 unsigned int flags)
1da177e4 2342{
982721f3 2343 struct fib_info *fi = res->fi;
1da177e4 2344 u32 tos = RT_FL_TOS(oldflp);
5ada5527 2345 struct in_device *in_dev;
982721f3 2346 u16 type = res->type;
5ada5527 2347 struct rtable *rth;
1da177e4 2348
dd28d1a0 2349 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
5ada5527 2350 return ERR_PTR(-EINVAL);
1da177e4 2351
27a954bd 2352 if (ipv4_is_lbcast(fl->fl4_dst))
982721f3 2353 type = RTN_BROADCAST;
f97c1e0c 2354 else if (ipv4_is_multicast(fl->fl4_dst))
982721f3 2355 type = RTN_MULTICAST;
27a954bd 2356 else if (ipv4_is_zeronet(fl->fl4_dst))
5ada5527 2357 return ERR_PTR(-EINVAL);
1da177e4
LT
2358
2359 if (dev_out->flags & IFF_LOOPBACK)
2360 flags |= RTCF_LOCAL;
2361
dd28d1a0 2362 in_dev = __in_dev_get_rcu(dev_out);
ebc0ffae 2363 if (!in_dev)
5ada5527 2364 return ERR_PTR(-EINVAL);
ebc0ffae 2365
982721f3 2366 if (type == RTN_BROADCAST) {
1da177e4 2367 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
2368 fi = NULL;
2369 } else if (type == RTN_MULTICAST) {
dd28d1a0 2370 flags |= RTCF_MULTICAST | RTCF_LOCAL;
dbdd9a52
DM
2371 if (!ip_check_mc_rcu(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2372 oldflp->proto))
1da177e4
LT
2373 flags &= ~RTCF_LOCAL;
2374 /* If multicast route do not exist use
dd28d1a0
ED
2375 * default one, but do not gateway in this case.
2376 * Yes, it is hack.
1da177e4 2377 */
982721f3
DM
2378 if (fi && res->prefixlen < 4)
2379 fi = NULL;
1da177e4
LT
2380 }
2381
0c4dcd58
DM
2382 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
2383 IN_DEV_CONF_GET(in_dev, NOXFRM));
8391d07b 2384 if (!rth)
5ada5527 2385 return ERR_PTR(-ENOBUFS);
8391d07b 2386
5e2b61f7
DM
2387 rth->rt_key_dst = oldflp->fl4_dst;
2388 rth->rt_tos = tos;
2389 rth->rt_key_src = oldflp->fl4_src;
2390 rth->rt_oif = oldflp->oif;
2391 rth->rt_mark = oldflp->mark;
1da177e4
LT
2392 rth->rt_dst = fl->fl4_dst;
2393 rth->rt_src = fl->fl4_src;
1018b5c0 2394 rth->rt_iif = 0;
e905a9ed 2395 /* get references to the devices that are to be hold by the routing
1da177e4 2396 cache entry */
d8d1f30b 2397 rth->dst.dev = dev_out;
1da177e4 2398 dev_hold(dev_out);
1da177e4
LT
2399 rth->rt_gateway = fl->fl4_dst;
2400 rth->rt_spec_dst= fl->fl4_src;
2401
d8d1f30b 2402 rth->dst.output=ip_output;
e84f84f2 2403 rth->rt_genid = rt_genid(dev_net(dev_out));
1da177e4
LT
2404
2405 RT_CACHE_STAT_INC(out_slow_tot);
2406
2407 if (flags & RTCF_LOCAL) {
d8d1f30b 2408 rth->dst.input = ip_local_deliver;
1da177e4
LT
2409 rth->rt_spec_dst = fl->fl4_dst;
2410 }
2411 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2412 rth->rt_spec_dst = fl->fl4_src;
e905a9ed 2413 if (flags & RTCF_LOCAL &&
1da177e4 2414 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2415 rth->dst.output = ip_mc_output;
1da177e4
LT
2416 RT_CACHE_STAT_INC(out_slow_mc);
2417 }
2418#ifdef CONFIG_IP_MROUTE
982721f3 2419 if (type == RTN_MULTICAST) {
1da177e4 2420 if (IN_DEV_MFORWARD(in_dev) &&
f97c1e0c 2421 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
d8d1f30b
CG
2422 rth->dst.input = ip_mr_input;
2423 rth->dst.output = ip_mc_output;
1da177e4
LT
2424 }
2425 }
2426#endif
2427 }
2428
5e2b61f7 2429 rt_set_nexthop(rth, oldflp, res, fi, type, 0);
1da177e4
LT
2430
2431 rth->rt_flags = flags;
5ada5527 2432 return rth;
1da177e4
LT
2433}
2434
1da177e4
LT
2435/*
2436 * Major route resolver routine.
0197aa38 2437 * called with rcu_read_lock();
1da177e4
LT
2438 */
2439
b23dd4fe
DM
2440static struct rtable *ip_route_output_slow(struct net *net,
2441 const struct flowi *oldflp)
1da177e4
LT
2442{
2443 u32 tos = RT_FL_TOS(oldflp);
44713b67 2444 struct flowi fl;
1da177e4 2445 struct fib_result res;
0197aa38 2446 unsigned int flags = 0;
1da177e4 2447 struct net_device *dev_out = NULL;
5ada5527 2448 struct rtable *rth;
1da177e4
LT
2449
2450 res.fi = NULL;
2451#ifdef CONFIG_IP_MULTIPLE_TABLES
2452 res.r = NULL;
2453#endif
2454
44713b67
DM
2455 fl.oif = oldflp->oif;
2456 fl.iif = net->loopback_dev->ifindex;
2457 fl.mark = oldflp->mark;
2458 fl.fl4_dst = oldflp->fl4_dst;
2459 fl.fl4_src = oldflp->fl4_src;
2460 fl.fl4_tos = tos & IPTOS_RT_MASK;
2461 fl.fl4_scope = ((tos & RTO_ONLINK) ?
2462 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2463
010c2708 2464 rcu_read_lock();
1da177e4 2465 if (oldflp->fl4_src) {
b23dd4fe 2466 rth = ERR_PTR(-EINVAL);
f97c1e0c 2467 if (ipv4_is_multicast(oldflp->fl4_src) ||
1e637c74 2468 ipv4_is_lbcast(oldflp->fl4_src) ||
f97c1e0c 2469 ipv4_is_zeronet(oldflp->fl4_src))
1da177e4
LT
2470 goto out;
2471
1da177e4
LT
2472 /* I removed check for oif == dev_out->oif here.
2473 It was wrong for two reasons:
1ab35276
DL
2474 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2475 is assigned to multiple interfaces.
1da177e4
LT
2476 2. Moreover, we are allowed to send packets with saddr
2477 of another iface. --ANK
2478 */
2479
9d4fb27d
JP
2480 if (oldflp->oif == 0 &&
2481 (ipv4_is_multicast(oldflp->fl4_dst) ||
27a954bd 2482 ipv4_is_lbcast(oldflp->fl4_dst))) {
a210d01a 2483 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
0197aa38 2484 dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
a210d01a
JA
2485 if (dev_out == NULL)
2486 goto out;
2487
1da177e4
LT
2488 /* Special hack: user can direct multicasts
2489 and limited broadcast via necessary interface
2490 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2491 This hack is not just for fun, it allows
2492 vic,vat and friends to work.
2493 They bind socket to loopback, set ttl to zero
2494 and expect that it will work.
2495 From the viewpoint of routing cache they are broken,
2496 because we are not allowed to build multicast path
2497 with loopback source addr (look, routing cache
2498 cannot know, that ttl is zero, so that packet
2499 will not leave this host and route is valid).
2500 Luckily, this hack is good workaround.
2501 */
2502
2503 fl.oif = dev_out->ifindex;
2504 goto make_route;
2505 }
a210d01a
JA
2506
2507 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2508 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
0197aa38 2509 if (!__ip_dev_find(net, oldflp->fl4_src, false))
a210d01a 2510 goto out;
a210d01a 2511 }
1da177e4
LT
2512 }
2513
2514
2515 if (oldflp->oif) {
0197aa38 2516 dev_out = dev_get_by_index_rcu(net, oldflp->oif);
b23dd4fe 2517 rth = ERR_PTR(-ENODEV);
1da177e4
LT
2518 if (dev_out == NULL)
2519 goto out;
e5ed6399
HX
2520
2521 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83 2522 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
b23dd4fe 2523 rth = ERR_PTR(-ENETUNREACH);
fc75fc83
ED
2524 goto out;
2525 }
f97c1e0c 2526 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
27a954bd 2527 ipv4_is_lbcast(oldflp->fl4_dst)) {
1da177e4
LT
2528 if (!fl.fl4_src)
2529 fl.fl4_src = inet_select_addr(dev_out, 0,
2530 RT_SCOPE_LINK);
2531 goto make_route;
2532 }
2533 if (!fl.fl4_src) {
f97c1e0c 2534 if (ipv4_is_multicast(oldflp->fl4_dst))
1da177e4
LT
2535 fl.fl4_src = inet_select_addr(dev_out, 0,
2536 fl.fl4_scope);
2537 else if (!oldflp->fl4_dst)
2538 fl.fl4_src = inet_select_addr(dev_out, 0,
2539 RT_SCOPE_HOST);
2540 }
2541 }
2542
2543 if (!fl.fl4_dst) {
2544 fl.fl4_dst = fl.fl4_src;
2545 if (!fl.fl4_dst)
2546 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
b40afd0e 2547 dev_out = net->loopback_dev;
b40afd0e 2548 fl.oif = net->loopback_dev->ifindex;
1da177e4
LT
2549 res.type = RTN_LOCAL;
2550 flags |= RTCF_LOCAL;
2551 goto make_route;
2552 }
2553
b40afd0e 2554 if (fib_lookup(net, &fl, &res)) {
1da177e4
LT
2555 res.fi = NULL;
2556 if (oldflp->oif) {
2557 /* Apparently, routing tables are wrong. Assume,
2558 that the destination is on link.
2559
2560 WHY? DW.
2561 Because we are allowed to send to iface
2562 even if it has NO routes and NO assigned
2563 addresses. When oif is specified, routing
2564 tables are looked up with only one purpose:
2565 to catch if destination is gatewayed, rather than
2566 direct. Moreover, if MSG_DONTROUTE is set,
2567 we send packet, ignoring both routing tables
2568 and ifaddr state. --ANK
2569
2570
2571 We could make it even if oif is unknown,
2572 likely IPv6, but we do not.
2573 */
2574
2575 if (fl.fl4_src == 0)
2576 fl.fl4_src = inet_select_addr(dev_out, 0,
2577 RT_SCOPE_LINK);
2578 res.type = RTN_UNICAST;
2579 goto make_route;
2580 }
b23dd4fe 2581 rth = ERR_PTR(-ENETUNREACH);
1da177e4
LT
2582 goto out;
2583 }
1da177e4
LT
2584
2585 if (res.type == RTN_LOCAL) {
9fc3bbb4
JS
2586 if (!fl.fl4_src) {
2587 if (res.fi->fib_prefsrc)
2588 fl.fl4_src = res.fi->fib_prefsrc;
2589 else
2590 fl.fl4_src = fl.fl4_dst;
2591 }
b40afd0e 2592 dev_out = net->loopback_dev;
1da177e4 2593 fl.oif = dev_out->ifindex;
1da177e4
LT
2594 res.fi = NULL;
2595 flags |= RTCF_LOCAL;
2596 goto make_route;
2597 }
2598
2599#ifdef CONFIG_IP_ROUTE_MULTIPATH
2600 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2601 fib_select_multipath(&fl, &res);
2602 else
2603#endif
2604 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
0c838ff1 2605 fib_select_default(&res);
1da177e4
LT
2606
2607 if (!fl.fl4_src)
2608 fl.fl4_src = FIB_RES_PREFSRC(res);
2609
1da177e4 2610 dev_out = FIB_RES_DEV(res);
1da177e4
LT
2611 fl.oif = dev_out->ifindex;
2612
2613
2614make_route:
5ada5527 2615 rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags);
b23dd4fe 2616 if (!IS_ERR(rth)) {
5ada5527
DM
2617 unsigned int hash;
2618
2619 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2620 rt_genid(dev_net(dev_out)));
b23dd4fe 2621 rth = rt_intern_hash(hash, rth, NULL, oldflp->oif);
5ada5527 2622 }
1da177e4 2623
010c2708
DM
2624out:
2625 rcu_read_unlock();
b23dd4fe 2626 return rth;
1da177e4
LT
2627}
2628
b23dd4fe 2629struct rtable *__ip_route_output_key(struct net *net, const struct flowi *flp)
1da177e4 2630{
1da177e4 2631 struct rtable *rth;
010c2708 2632 unsigned int hash;
1da177e4 2633
1080d709
NH
2634 if (!rt_caching(net))
2635 goto slow_output;
2636
e84f84f2 2637 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
1da177e4
LT
2638
2639 rcu_read_lock_bh();
a898def2 2640 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2641 rth = rcu_dereference_bh(rth->dst.rt_next)) {
5e2b61f7
DM
2642 if (rth->rt_key_dst == flp->fl4_dst &&
2643 rth->rt_key_src == flp->fl4_src &&
c7537967 2644 rt_is_output_route(rth) &&
5e2b61f7
DM
2645 rth->rt_oif == flp->oif &&
2646 rth->rt_mark == flp->mark &&
2647 !((rth->rt_tos ^ flp->fl4_tos) &
b5921910 2648 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2649 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2650 !rt_is_expired(rth)) {
d8d1f30b 2651 dst_use(&rth->dst, jiffies);
1da177e4
LT
2652 RT_CACHE_STAT_INC(out_hit);
2653 rcu_read_unlock_bh();
b23dd4fe 2654 return rth;
1da177e4
LT
2655 }
2656 RT_CACHE_STAT_INC(out_hlist_search);
2657 }
2658 rcu_read_unlock_bh();
2659
1080d709 2660slow_output:
b23dd4fe 2661 return ip_route_output_slow(net, flp);
1da177e4 2662}
d8c97a94
ACM
2663EXPORT_SYMBOL_GPL(__ip_route_output_key);
2664
ae2688d5
JW
2665static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2666{
2667 return NULL;
2668}
2669
ec831ea7
RD
2670static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2671{
2672 return 0;
2673}
2674
14e50e57
DM
2675static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2676{
2677}
2678
2679static struct dst_ops ipv4_dst_blackhole_ops = {
2680 .family = AF_INET,
09640e63 2681 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2682 .destroy = ipv4_dst_destroy,
ae2688d5 2683 .check = ipv4_blackhole_dst_check,
ec831ea7 2684 .default_mtu = ipv4_blackhole_default_mtu,
214f45c9 2685 .default_advmss = ipv4_default_advmss,
14e50e57 2686 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
14e50e57
DM
2687};
2688
2774c131 2689struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2690{
2774c131
DM
2691 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
2692 struct rtable *ort = (struct rtable *) dst_orig;
14e50e57
DM
2693
2694 if (rt) {
d8d1f30b 2695 struct dst_entry *new = &rt->dst;
14e50e57 2696
14e50e57 2697 new->__use = 1;
352e512c
HX
2698 new->input = dst_discard;
2699 new->output = dst_discard;
defb3519 2700 dst_copy_metrics(new, &ort->dst);
14e50e57 2701
d8d1f30b 2702 new->dev = ort->dst.dev;
14e50e57
DM
2703 if (new->dev)
2704 dev_hold(new->dev);
2705
5e2b61f7
DM
2706 rt->rt_key_dst = ort->rt_key_dst;
2707 rt->rt_key_src = ort->rt_key_src;
2708 rt->rt_tos = ort->rt_tos;
2709 rt->rt_iif = ort->rt_iif;
2710 rt->rt_oif = ort->rt_oif;
2711 rt->rt_mark = ort->rt_mark;
14e50e57 2712
e84f84f2 2713 rt->rt_genid = rt_genid(net);
14e50e57
DM
2714 rt->rt_flags = ort->rt_flags;
2715 rt->rt_type = ort->rt_type;
2716 rt->rt_dst = ort->rt_dst;
2717 rt->rt_src = ort->rt_src;
2718 rt->rt_iif = ort->rt_iif;
2719 rt->rt_gateway = ort->rt_gateway;
2720 rt->rt_spec_dst = ort->rt_spec_dst;
2721 rt->peer = ort->peer;
2722 if (rt->peer)
2723 atomic_inc(&rt->peer->refcnt);
62fa8a84
DM
2724 rt->fi = ort->fi;
2725 if (rt->fi)
2726 atomic_inc(&rt->fi->fib_clntref);
14e50e57
DM
2727
2728 dst_free(new);
2729 }
2730
2774c131
DM
2731 dst_release(dst_orig);
2732
2733 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2734}
2735
b23dd4fe
DM
2736struct rtable *ip_route_output_flow(struct net *net, struct flowi *flp,
2737 struct sock *sk)
1da177e4 2738{
b23dd4fe 2739 struct rtable *rt = __ip_route_output_key(net, flp);
1da177e4 2740
b23dd4fe
DM
2741 if (IS_ERR(rt))
2742 return rt;
1da177e4
LT
2743
2744 if (flp->proto) {
2745 if (!flp->fl4_src)
b23dd4fe 2746 flp->fl4_src = rt->rt_src;
1da177e4 2747 if (!flp->fl4_dst)
b23dd4fe
DM
2748 flp->fl4_dst = rt->rt_dst;
2749 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flp, sk, 0);
1da177e4
LT
2750 }
2751
b23dd4fe 2752 return rt;
1da177e4 2753}
d8c97a94
ACM
2754EXPORT_SYMBOL_GPL(ip_route_output_flow);
2755
4feb88e5
BT
2756static int rt_fill_info(struct net *net,
2757 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2758 int nowait, unsigned int flags)
1da177e4 2759{
511c3f92 2760 struct rtable *rt = skb_rtable(skb);
1da177e4 2761 struct rtmsg *r;
be403ea1 2762 struct nlmsghdr *nlh;
e3703b3d
TG
2763 long expires;
2764 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2765
2766 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2767 if (nlh == NULL)
26932566 2768 return -EMSGSIZE;
be403ea1
TG
2769
2770 r = nlmsg_data(nlh);
1da177e4
LT
2771 r->rtm_family = AF_INET;
2772 r->rtm_dst_len = 32;
2773 r->rtm_src_len = 0;
5e2b61f7 2774 r->rtm_tos = rt->rt_tos;
1da177e4 2775 r->rtm_table = RT_TABLE_MAIN;
be403ea1 2776 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1da177e4
LT
2777 r->rtm_type = rt->rt_type;
2778 r->rtm_scope = RT_SCOPE_UNIVERSE;
2779 r->rtm_protocol = RTPROT_UNSPEC;
2780 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2781 if (rt->rt_flags & RTCF_NOTIFY)
2782 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2783
17fb2c64 2784 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
be403ea1 2785
5e2b61f7 2786 if (rt->rt_key_src) {
1da177e4 2787 r->rtm_src_len = 32;
5e2b61f7 2788 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
1da177e4 2789 }
d8d1f30b
CG
2790 if (rt->dst.dev)
2791 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
c7066f70 2792#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b
CG
2793 if (rt->dst.tclassid)
2794 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
1da177e4 2795#endif
c7537967 2796 if (rt_is_input_route(rt))
17fb2c64 2797 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
5e2b61f7 2798 else if (rt->rt_src != rt->rt_key_src)
17fb2c64 2799 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
be403ea1 2800
1da177e4 2801 if (rt->rt_dst != rt->rt_gateway)
17fb2c64 2802 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
be403ea1 2803
defb3519 2804 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2805 goto nla_put_failure;
2806
5e2b61f7
DM
2807 if (rt->rt_mark)
2808 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
963bfeee 2809
d8d1f30b 2810 error = rt->dst.error;
2c8cec5c
DM
2811 expires = (rt->peer && rt->peer->pmtu_expires) ?
2812 rt->peer->pmtu_expires - jiffies : 0;
1da177e4 2813 if (rt->peer) {
317fe0e6 2814 inet_peer_refcheck(rt->peer);
2c1409a0 2815 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
1da177e4 2816 if (rt->peer->tcp_ts_stamp) {
e3703b3d 2817 ts = rt->peer->tcp_ts;
9d729f72 2818 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
1da177e4
LT
2819 }
2820 }
be403ea1 2821
c7537967 2822 if (rt_is_input_route(rt)) {
1da177e4 2823#ifdef CONFIG_IP_MROUTE
e448515c 2824 __be32 dst = rt->rt_dst;
1da177e4 2825
f97c1e0c 2826 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5
BT
2827 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2828 int err = ipmr_get_route(net, skb, r, nowait);
1da177e4
LT
2829 if (err <= 0) {
2830 if (!nowait) {
2831 if (err == 0)
2832 return 0;
be403ea1 2833 goto nla_put_failure;
1da177e4
LT
2834 } else {
2835 if (err == -EMSGSIZE)
be403ea1 2836 goto nla_put_failure;
e3703b3d 2837 error = err;
1da177e4
LT
2838 }
2839 }
2840 } else
2841#endif
5e2b61f7 2842 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
1da177e4
LT
2843 }
2844
d8d1f30b 2845 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2846 expires, error) < 0)
2847 goto nla_put_failure;
be403ea1
TG
2848
2849 return nlmsg_end(skb, nlh);
1da177e4 2850
be403ea1 2851nla_put_failure:
26932566
PM
2852 nlmsg_cancel(skb, nlh);
2853 return -EMSGSIZE;
1da177e4
LT
2854}
2855
63f3444f 2856static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1da177e4 2857{
3b1e0a65 2858 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2859 struct rtmsg *rtm;
2860 struct nlattr *tb[RTA_MAX+1];
1da177e4 2861 struct rtable *rt = NULL;
9e12bb22
AV
2862 __be32 dst = 0;
2863 __be32 src = 0;
2864 u32 iif;
d889ce3b 2865 int err;
963bfeee 2866 int mark;
1da177e4
LT
2867 struct sk_buff *skb;
2868
d889ce3b
TG
2869 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2870 if (err < 0)
2871 goto errout;
2872
2873 rtm = nlmsg_data(nlh);
2874
1da177e4 2875 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2876 if (skb == NULL) {
2877 err = -ENOBUFS;
2878 goto errout;
2879 }
1da177e4
LT
2880
2881 /* Reserve room for dummy headers, this skb can pass
2882 through good chunk of routing engine.
2883 */
459a98ed 2884 skb_reset_mac_header(skb);
c1d2bbe1 2885 skb_reset_network_header(skb);
d2c962b8
SH
2886
2887 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2888 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2889 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2890
17fb2c64
AV
2891 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2892 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2893 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2894 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
2895
2896 if (iif) {
d889ce3b
TG
2897 struct net_device *dev;
2898
1937504d 2899 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2900 if (dev == NULL) {
2901 err = -ENODEV;
2902 goto errout_free;
2903 }
2904
1da177e4
LT
2905 skb->protocol = htons(ETH_P_IP);
2906 skb->dev = dev;
963bfeee 2907 skb->mark = mark;
1da177e4
LT
2908 local_bh_disable();
2909 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2910 local_bh_enable();
d889ce3b 2911
511c3f92 2912 rt = skb_rtable(skb);
d8d1f30b
CG
2913 if (err == 0 && rt->dst.error)
2914 err = -rt->dst.error;
1da177e4 2915 } else {
d889ce3b 2916 struct flowi fl = {
5811662b
CG
2917 .fl4_dst = dst,
2918 .fl4_src = src,
2919 .fl4_tos = rtm->rtm_tos,
d889ce3b 2920 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
963bfeee 2921 .mark = mark,
d889ce3b 2922 };
b23dd4fe
DM
2923 rt = ip_route_output_key(net, &fl);
2924
2925 err = 0;
2926 if (IS_ERR(rt))
2927 err = PTR_ERR(rt);
1da177e4 2928 }
d889ce3b 2929
1da177e4 2930 if (err)
d889ce3b 2931 goto errout_free;
1da177e4 2932
d8d1f30b 2933 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2934 if (rtm->rtm_flags & RTM_F_NOTIFY)
2935 rt->rt_flags |= RTCF_NOTIFY;
2936
4feb88e5 2937 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2938 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2939 if (err <= 0)
2940 goto errout_free;
1da177e4 2941
1937504d 2942 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2943errout:
2942e900 2944 return err;
1da177e4 2945
d889ce3b 2946errout_free:
1da177e4 2947 kfree_skb(skb);
d889ce3b 2948 goto errout;
1da177e4
LT
2949}
2950
2951int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2952{
2953 struct rtable *rt;
2954 int h, s_h;
2955 int idx, s_idx;
1937504d
DL
2956 struct net *net;
2957
3b1e0a65 2958 net = sock_net(skb->sk);
1da177e4
LT
2959
2960 s_h = cb->args[0];
d8c92830
ED
2961 if (s_h < 0)
2962 s_h = 0;
1da177e4 2963 s_idx = idx = cb->args[1];
a6272665
ED
2964 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2965 if (!rt_hash_table[h].chain)
2966 continue;
1da177e4 2967 rcu_read_lock_bh();
a898def2 2968 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
2969 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2970 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 2971 continue;
e84f84f2 2972 if (rt_is_expired(rt))
29e75252 2973 continue;
d8d1f30b 2974 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 2975 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 2976 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 2977 1, NLM_F_MULTI) <= 0) {
adf30907 2978 skb_dst_drop(skb);
1da177e4
LT
2979 rcu_read_unlock_bh();
2980 goto done;
2981 }
adf30907 2982 skb_dst_drop(skb);
1da177e4
LT
2983 }
2984 rcu_read_unlock_bh();
2985 }
2986
2987done:
2988 cb->args[0] = h;
2989 cb->args[1] = idx;
2990 return skb->len;
2991}
2992
2993void ip_rt_multicast_event(struct in_device *in_dev)
2994{
76e6ebfb 2995 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
2996}
2997
2998#ifdef CONFIG_SYSCTL
81c684d1 2999static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 3000 void __user *buffer,
1da177e4
LT
3001 size_t *lenp, loff_t *ppos)
3002{
3003 if (write) {
639e104f 3004 int flush_delay;
81c684d1 3005 ctl_table ctl;
39a23e75 3006 struct net *net;
639e104f 3007
81c684d1
DL
3008 memcpy(&ctl, __ctl, sizeof(ctl));
3009 ctl.data = &flush_delay;
8d65af78 3010 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3011
81c684d1 3012 net = (struct net *)__ctl->extra1;
39a23e75 3013 rt_cache_flush(net, flush_delay);
1da177e4 3014 return 0;
e905a9ed 3015 }
1da177e4
LT
3016
3017 return -EINVAL;
3018}
3019
eeb61f71 3020static ctl_table ipv4_route_table[] = {
1da177e4 3021 {
1da177e4
LT
3022 .procname = "gc_thresh",
3023 .data = &ipv4_dst_ops.gc_thresh,
3024 .maxlen = sizeof(int),
3025 .mode = 0644,
6d9f239a 3026 .proc_handler = proc_dointvec,
1da177e4
LT
3027 },
3028 {
1da177e4
LT
3029 .procname = "max_size",
3030 .data = &ip_rt_max_size,
3031 .maxlen = sizeof(int),
3032 .mode = 0644,
6d9f239a 3033 .proc_handler = proc_dointvec,
1da177e4
LT
3034 },
3035 {
3036 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3037
1da177e4
LT
3038 .procname = "gc_min_interval",
3039 .data = &ip_rt_gc_min_interval,
3040 .maxlen = sizeof(int),
3041 .mode = 0644,
6d9f239a 3042 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3043 },
3044 {
1da177e4
LT
3045 .procname = "gc_min_interval_ms",
3046 .data = &ip_rt_gc_min_interval,
3047 .maxlen = sizeof(int),
3048 .mode = 0644,
6d9f239a 3049 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3050 },
3051 {
1da177e4
LT
3052 .procname = "gc_timeout",
3053 .data = &ip_rt_gc_timeout,
3054 .maxlen = sizeof(int),
3055 .mode = 0644,
6d9f239a 3056 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3057 },
3058 {
1da177e4
LT
3059 .procname = "gc_interval",
3060 .data = &ip_rt_gc_interval,
3061 .maxlen = sizeof(int),
3062 .mode = 0644,
6d9f239a 3063 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3064 },
3065 {
1da177e4
LT
3066 .procname = "redirect_load",
3067 .data = &ip_rt_redirect_load,
3068 .maxlen = sizeof(int),
3069 .mode = 0644,
6d9f239a 3070 .proc_handler = proc_dointvec,
1da177e4
LT
3071 },
3072 {
1da177e4
LT
3073 .procname = "redirect_number",
3074 .data = &ip_rt_redirect_number,
3075 .maxlen = sizeof(int),
3076 .mode = 0644,
6d9f239a 3077 .proc_handler = proc_dointvec,
1da177e4
LT
3078 },
3079 {
1da177e4
LT
3080 .procname = "redirect_silence",
3081 .data = &ip_rt_redirect_silence,
3082 .maxlen = sizeof(int),
3083 .mode = 0644,
6d9f239a 3084 .proc_handler = proc_dointvec,
1da177e4
LT
3085 },
3086 {
1da177e4
LT
3087 .procname = "error_cost",
3088 .data = &ip_rt_error_cost,
3089 .maxlen = sizeof(int),
3090 .mode = 0644,
6d9f239a 3091 .proc_handler = proc_dointvec,
1da177e4
LT
3092 },
3093 {
1da177e4
LT
3094 .procname = "error_burst",
3095 .data = &ip_rt_error_burst,
3096 .maxlen = sizeof(int),
3097 .mode = 0644,
6d9f239a 3098 .proc_handler = proc_dointvec,
1da177e4
LT
3099 },
3100 {
1da177e4
LT
3101 .procname = "gc_elasticity",
3102 .data = &ip_rt_gc_elasticity,
3103 .maxlen = sizeof(int),
3104 .mode = 0644,
6d9f239a 3105 .proc_handler = proc_dointvec,
1da177e4
LT
3106 },
3107 {
1da177e4
LT
3108 .procname = "mtu_expires",
3109 .data = &ip_rt_mtu_expires,
3110 .maxlen = sizeof(int),
3111 .mode = 0644,
6d9f239a 3112 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3113 },
3114 {
1da177e4
LT
3115 .procname = "min_pmtu",
3116 .data = &ip_rt_min_pmtu,
3117 .maxlen = sizeof(int),
3118 .mode = 0644,
6d9f239a 3119 .proc_handler = proc_dointvec,
1da177e4
LT
3120 },
3121 {
1da177e4
LT
3122 .procname = "min_adv_mss",
3123 .data = &ip_rt_min_advmss,
3124 .maxlen = sizeof(int),
3125 .mode = 0644,
6d9f239a 3126 .proc_handler = proc_dointvec,
1da177e4 3127 },
f8572d8f 3128 { }
1da177e4 3129};
39a23e75 3130
2f4520d3
AV
3131static struct ctl_table empty[1];
3132
3133static struct ctl_table ipv4_skeleton[] =
3134{
f8572d8f 3135 { .procname = "route",
d994af0d 3136 .mode = 0555, .child = ipv4_route_table},
f8572d8f 3137 { .procname = "neigh",
d994af0d 3138 .mode = 0555, .child = empty},
2f4520d3
AV
3139 { }
3140};
3141
3142static __net_initdata struct ctl_path ipv4_path[] = {
f8572d8f
EB
3143 { .procname = "net", },
3144 { .procname = "ipv4", },
39a23e75
DL
3145 { },
3146};
3147
39a23e75
DL
3148static struct ctl_table ipv4_route_flush_table[] = {
3149 {
39a23e75
DL
3150 .procname = "flush",
3151 .maxlen = sizeof(int),
3152 .mode = 0200,
6d9f239a 3153 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3154 },
f8572d8f 3155 { },
39a23e75
DL
3156};
3157
2f4520d3 3158static __net_initdata struct ctl_path ipv4_route_path[] = {
f8572d8f
EB
3159 { .procname = "net", },
3160 { .procname = "ipv4", },
3161 { .procname = "route", },
2f4520d3
AV
3162 { },
3163};
3164
39a23e75
DL
3165static __net_init int sysctl_route_net_init(struct net *net)
3166{
3167 struct ctl_table *tbl;
3168
3169 tbl = ipv4_route_flush_table;
09ad9bc7 3170 if (!net_eq(net, &init_net)) {
39a23e75
DL
3171 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3172 if (tbl == NULL)
3173 goto err_dup;
3174 }
3175 tbl[0].extra1 = net;
3176
3177 net->ipv4.route_hdr =
3178 register_net_sysctl_table(net, ipv4_route_path, tbl);
3179 if (net->ipv4.route_hdr == NULL)
3180 goto err_reg;
3181 return 0;
3182
3183err_reg:
3184 if (tbl != ipv4_route_flush_table)
3185 kfree(tbl);
3186err_dup:
3187 return -ENOMEM;
3188}
3189
3190static __net_exit void sysctl_route_net_exit(struct net *net)
3191{
3192 struct ctl_table *tbl;
3193
3194 tbl = net->ipv4.route_hdr->ctl_table_arg;
3195 unregister_net_sysctl_table(net->ipv4.route_hdr);
3196 BUG_ON(tbl == ipv4_route_flush_table);
3197 kfree(tbl);
3198}
3199
3200static __net_initdata struct pernet_operations sysctl_route_ops = {
3201 .init = sysctl_route_net_init,
3202 .exit = sysctl_route_net_exit,
3203};
1da177e4
LT
3204#endif
3205
3ee94372 3206static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3207{
3ee94372
NH
3208 get_random_bytes(&net->ipv4.rt_genid,
3209 sizeof(net->ipv4.rt_genid));
9f5e97e5
DL
3210 return 0;
3211}
3212
3ee94372
NH
3213static __net_initdata struct pernet_operations rt_genid_ops = {
3214 .init = rt_genid_init,
9f5e97e5
DL
3215};
3216
3217
c7066f70 3218#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 3219struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 3220#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4
LT
3221
3222static __initdata unsigned long rhash_entries;
3223static int __init set_rhash_entries(char *str)
3224{
3225 if (!str)
3226 return 0;
3227 rhash_entries = simple_strtoul(str, &str, 0);
3228 return 1;
3229}
3230__setup("rhash_entries=", set_rhash_entries);
3231
3232int __init ip_rt_init(void)
3233{
424c4b70 3234 int rc = 0;
1da177e4 3235
c7066f70 3236#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 3237 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3238 if (!ip_rt_acct)
3239 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3240#endif
3241
e5d679f3
AD
3242 ipv4_dst_ops.kmem_cachep =
3243 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3244 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3245
14e50e57
DM
3246 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3247
fc66f95c
ED
3248 if (dst_entries_init(&ipv4_dst_ops) < 0)
3249 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3250
3251 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3252 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3253
424c4b70
ED
3254 rt_hash_table = (struct rt_hash_bucket *)
3255 alloc_large_system_hash("IP route cache",
3256 sizeof(struct rt_hash_bucket),
3257 rhash_entries,
4481374c 3258 (totalram_pages >= 128 * 1024) ?
18955cfc 3259 15 : 17,
8d1502de 3260 0,
424c4b70
ED
3261 &rt_hash_log,
3262 &rt_hash_mask,
c9503e0f 3263 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3264 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3265 rt_hash_lock_init();
1da177e4
LT
3266
3267 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3268 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3269
1da177e4
LT
3270 devinet_init();
3271 ip_fib_init();
3272
73b38711 3273 if (ip_rt_proc_init())
107f1634 3274 printk(KERN_ERR "Unable to create route proc files\n");
1da177e4
LT
3275#ifdef CONFIG_XFRM
3276 xfrm_init();
a33bc5c1 3277 xfrm4_init(ip_rt_max_size);
1da177e4 3278#endif
63f3444f
TG
3279 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3280
39a23e75
DL
3281#ifdef CONFIG_SYSCTL
3282 register_pernet_subsys(&sysctl_route_ops);
3283#endif
3ee94372 3284 register_pernet_subsys(&rt_genid_ops);
1da177e4
LT
3285 return rc;
3286}
3287
a1bc6eb4 3288#ifdef CONFIG_SYSCTL
eeb61f71
AV
3289/*
3290 * We really need to sanitize the damn ipv4 init order, then all
3291 * this nonsense will go away.
3292 */
3293void __init ip_static_sysctl_init(void)
3294{
2f4520d3 3295 register_sysctl_paths(ipv4_path, ipv4_skeleton);
eeb61f71 3296}
a1bc6eb4 3297#endif