]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ipv4/route.c
net: Make dst_alloc() take more explicit initializations.
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
1da177e4
LT
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
1da177e4 71#include <linux/mm.h>
424c4b70 72#include <linux/bootmem.h>
1da177e4
LT
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
39c90ece 82#include <linux/workqueue.h>
1da177e4 83#include <linux/skbuff.h>
1da177e4
LT
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
5a0e3ad6 93#include <linux/slab.h>
352e512c 94#include <net/dst.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
8d71740c 106#include <net/netevent.h>
63f3444f 107#include <net/rtnetlink.h>
1da177e4
LT
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
111
68a5e3dd
DM
112#define RT_FL_TOS(oldflp4) \
113 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
1da177e4
LT
114
115#define IP_MAX_MTU 0xFFF0
116
117#define RT_GC_TIMEOUT (300*HZ)
118
1da177e4 119static int ip_rt_max_size;
817bc4db
SH
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
1080d709 132static int rt_chain_length_max __read_mostly = 20;
1da177e4 133
1da177e4
LT
134/*
135 * Interface to generic destination cache.
136 */
137
138static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 139static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
d33e4553 140static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
1da177e4 141static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
142static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143static void ipv4_link_failure(struct sk_buff *skb);
144static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 145static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 146
72cdd1d9
ED
147static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
148 int how)
149{
150}
1da177e4 151
62fa8a84
DM
152static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153{
06582540
DM
154 struct rtable *rt = (struct rtable *) dst;
155 struct inet_peer *peer;
156 u32 *p = NULL;
157
158 if (!rt->peer)
159 rt_bind_peer(rt, 1);
62fa8a84 160
06582540
DM
161 peer = rt->peer;
162 if (peer) {
62fa8a84
DM
163 u32 *old_p = __DST_METRICS_PTR(old);
164 unsigned long prev, new;
165
06582540
DM
166 p = peer->metrics;
167 if (inet_metrics_new(peer))
168 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
62fa8a84
DM
169
170 new = (unsigned long) p;
171 prev = cmpxchg(&dst->_metrics, old, new);
172
173 if (prev != old) {
62fa8a84
DM
174 p = __DST_METRICS_PTR(prev);
175 if (prev & DST_METRICS_READ_ONLY)
176 p = NULL;
177 } else {
62fa8a84
DM
178 if (rt->fi) {
179 fib_info_put(rt->fi);
180 rt->fi = NULL;
181 }
182 }
183 }
184 return p;
185}
186
1da177e4
LT
187static struct dst_ops ipv4_dst_ops = {
188 .family = AF_INET,
09640e63 189 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
190 .gc = rt_garbage_collect,
191 .check = ipv4_dst_check,
0dbaee3b 192 .default_advmss = ipv4_default_advmss,
d33e4553 193 .default_mtu = ipv4_default_mtu,
62fa8a84 194 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
195 .destroy = ipv4_dst_destroy,
196 .ifdown = ipv4_dst_ifdown,
197 .negative_advice = ipv4_negative_advice,
198 .link_failure = ipv4_link_failure,
199 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 200 .local_out = __ip_local_out,
1da177e4
LT
201};
202
203#define ECN_OR_COST(class) TC_PRIO_##class
204
4839c52b 205const __u8 ip_tos2prio[16] = {
1da177e4 206 TC_PRIO_BESTEFFORT,
4a2b9c37 207 ECN_OR_COST(BESTEFFORT),
1da177e4
LT
208 TC_PRIO_BESTEFFORT,
209 ECN_OR_COST(BESTEFFORT),
210 TC_PRIO_BULK,
211 ECN_OR_COST(BULK),
212 TC_PRIO_BULK,
213 ECN_OR_COST(BULK),
214 TC_PRIO_INTERACTIVE,
215 ECN_OR_COST(INTERACTIVE),
216 TC_PRIO_INTERACTIVE,
217 ECN_OR_COST(INTERACTIVE),
218 TC_PRIO_INTERACTIVE_BULK,
219 ECN_OR_COST(INTERACTIVE_BULK),
220 TC_PRIO_INTERACTIVE_BULK,
221 ECN_OR_COST(INTERACTIVE_BULK)
222};
223
224
225/*
226 * Route cache.
227 */
228
229/* The locking scheme is rather straight forward:
230 *
231 * 1) Read-Copy Update protects the buckets of the central route hash.
232 * 2) Only writers remove entries, and they hold the lock
233 * as they look at rtable reference counts.
234 * 3) Only readers acquire references to rtable entries,
235 * they do so with atomic increments and with the
236 * lock held.
237 */
238
239struct rt_hash_bucket {
1c31720a 240 struct rtable __rcu *chain;
22c047cc 241};
1080d709 242
8a25d5de
IM
243#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
244 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
245/*
246 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
247 * The size of this table is a power of two and depends on the number of CPUS.
62051200 248 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 249 */
62051200
IM
250#ifdef CONFIG_LOCKDEP
251# define RT_HASH_LOCK_SZ 256
22c047cc 252#else
62051200
IM
253# if NR_CPUS >= 32
254# define RT_HASH_LOCK_SZ 4096
255# elif NR_CPUS >= 16
256# define RT_HASH_LOCK_SZ 2048
257# elif NR_CPUS >= 8
258# define RT_HASH_LOCK_SZ 1024
259# elif NR_CPUS >= 4
260# define RT_HASH_LOCK_SZ 512
261# else
262# define RT_HASH_LOCK_SZ 256
263# endif
22c047cc
ED
264#endif
265
266static spinlock_t *rt_hash_locks;
267# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
268
269static __init void rt_hash_lock_init(void)
270{
271 int i;
272
273 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
274 GFP_KERNEL);
275 if (!rt_hash_locks)
276 panic("IP: failed to allocate rt_hash_locks\n");
277
278 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
279 spin_lock_init(&rt_hash_locks[i]);
280}
22c047cc
ED
281#else
282# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
283
284static inline void rt_hash_lock_init(void)
285{
286}
22c047cc 287#endif
1da177e4 288
817bc4db
SH
289static struct rt_hash_bucket *rt_hash_table __read_mostly;
290static unsigned rt_hash_mask __read_mostly;
291static unsigned int rt_hash_log __read_mostly;
1da177e4 292
2f970d83 293static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 294#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 295
b00180de 296static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 297 int genid)
1da177e4 298{
0eae88f3 299 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 300 idx, genid)
29e75252 301 & rt_hash_mask;
1da177e4
LT
302}
303
e84f84f2
DL
304static inline int rt_genid(struct net *net)
305{
306 return atomic_read(&net->ipv4.rt_genid);
307}
308
1da177e4
LT
309#ifdef CONFIG_PROC_FS
310struct rt_cache_iter_state {
a75e936f 311 struct seq_net_private p;
1da177e4 312 int bucket;
29e75252 313 int genid;
1da177e4
LT
314};
315
1218854a 316static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 317{
1218854a 318 struct rt_cache_iter_state *st = seq->private;
1da177e4 319 struct rtable *r = NULL;
1da177e4
LT
320
321 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
1c31720a 322 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
a6272665 323 continue;
1da177e4 324 rcu_read_lock_bh();
a898def2 325 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 326 while (r) {
d8d1f30b 327 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 328 r->rt_genid == st->genid)
29e75252 329 return r;
d8d1f30b 330 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 331 }
1da177e4
LT
332 rcu_read_unlock_bh();
333 }
29e75252 334 return r;
1da177e4
LT
335}
336
1218854a 337static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 338 struct rtable *r)
1da177e4 339{
1218854a 340 struct rt_cache_iter_state *st = seq->private;
a6272665 341
1c31720a 342 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
343 while (!r) {
344 rcu_read_unlock_bh();
a6272665
ED
345 do {
346 if (--st->bucket < 0)
347 return NULL;
1c31720a 348 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
1da177e4 349 rcu_read_lock_bh();
1c31720a 350 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 351 }
1c31720a 352 return r;
1da177e4
LT
353}
354
1218854a 355static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
356 struct rtable *r)
357{
1218854a
YH
358 struct rt_cache_iter_state *st = seq->private;
359 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 360 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 361 continue;
642d6318
DL
362 if (r->rt_genid == st->genid)
363 break;
364 }
365 return r;
366}
367
1218854a 368static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 369{
1218854a 370 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
371
372 if (r)
1218854a 373 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
374 --pos;
375 return pos ? NULL : r;
376}
377
378static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
379{
29e75252 380 struct rt_cache_iter_state *st = seq->private;
29e75252 381 if (*pos)
1218854a 382 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 383 st->genid = rt_genid(seq_file_net(seq));
29e75252 384 return SEQ_START_TOKEN;
1da177e4
LT
385}
386
387static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
388{
29e75252 389 struct rtable *r;
1da177e4
LT
390
391 if (v == SEQ_START_TOKEN)
1218854a 392 r = rt_cache_get_first(seq);
1da177e4 393 else
1218854a 394 r = rt_cache_get_next(seq, v);
1da177e4
LT
395 ++*pos;
396 return r;
397}
398
399static void rt_cache_seq_stop(struct seq_file *seq, void *v)
400{
401 if (v && v != SEQ_START_TOKEN)
402 rcu_read_unlock_bh();
403}
404
405static int rt_cache_seq_show(struct seq_file *seq, void *v)
406{
407 if (v == SEQ_START_TOKEN)
408 seq_printf(seq, "%-127s\n",
409 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
410 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
411 "HHUptod\tSpecDst");
412 else {
413 struct rtable *r = v;
5e659e4c 414 int len;
1da177e4 415
0eae88f3
ED
416 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 418 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
419 (__force u32)r->rt_dst,
420 (__force u32)r->rt_gateway,
d8d1f30b
CG
421 r->rt_flags, atomic_read(&r->dst.__refcnt),
422 r->dst.__use, 0, (__force u32)r->rt_src,
0dbaee3b 423 dst_metric_advmss(&r->dst) + 40,
d8d1f30b
CG
424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)),
5e2b61f7 427 r->rt_tos,
d8d1f30b
CG
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output ==
1da177e4 430 dev_queue_xmit) : 0,
5e659e4c
PE
431 r->rt_spec_dst, &len);
432
433 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
434 }
435 return 0;
1da177e4
LT
436}
437
f690808e 438static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
439 .start = rt_cache_seq_start,
440 .next = rt_cache_seq_next,
441 .stop = rt_cache_seq_stop,
442 .show = rt_cache_seq_show,
443};
444
445static int rt_cache_seq_open(struct inode *inode, struct file *file)
446{
a75e936f 447 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 448 sizeof(struct rt_cache_iter_state));
1da177e4
LT
449}
450
9a32144e 451static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
452 .owner = THIS_MODULE,
453 .open = rt_cache_seq_open,
454 .read = seq_read,
455 .llseek = seq_lseek,
a75e936f 456 .release = seq_release_net,
1da177e4
LT
457};
458
459
460static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
461{
462 int cpu;
463
464 if (*pos == 0)
465 return SEQ_START_TOKEN;
466
0f23174a 467 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
468 if (!cpu_possible(cpu))
469 continue;
470 *pos = cpu+1;
2f970d83 471 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
472 }
473 return NULL;
474}
475
476static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
477{
478 int cpu;
479
0f23174a 480 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
481 if (!cpu_possible(cpu))
482 continue;
483 *pos = cpu+1;
2f970d83 484 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
485 }
486 return NULL;
e905a9ed 487
1da177e4
LT
488}
489
490static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
491{
492
493}
494
495static int rt_cpu_seq_show(struct seq_file *seq, void *v)
496{
497 struct rt_cache_stat *st = v;
498
499 if (v == SEQ_START_TOKEN) {
5bec0039 500 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
501 return 0;
502 }
e905a9ed 503
1da177e4
LT
504 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
505 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 506 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
507 st->in_hit,
508 st->in_slow_tot,
509 st->in_slow_mc,
510 st->in_no_route,
511 st->in_brd,
512 st->in_martian_dst,
513 st->in_martian_src,
514
515 st->out_hit,
516 st->out_slow_tot,
e905a9ed 517 st->out_slow_mc,
1da177e4
LT
518
519 st->gc_total,
520 st->gc_ignored,
521 st->gc_goal_miss,
522 st->gc_dst_overflow,
523 st->in_hlist_search,
524 st->out_hlist_search
525 );
526 return 0;
527}
528
f690808e 529static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
530 .start = rt_cpu_seq_start,
531 .next = rt_cpu_seq_next,
532 .stop = rt_cpu_seq_stop,
533 .show = rt_cpu_seq_show,
534};
535
536
537static int rt_cpu_seq_open(struct inode *inode, struct file *file)
538{
539 return seq_open(file, &rt_cpu_seq_ops);
540}
541
9a32144e 542static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
543 .owner = THIS_MODULE,
544 .open = rt_cpu_seq_open,
545 .read = seq_read,
546 .llseek = seq_lseek,
547 .release = seq_release,
548};
549
c7066f70 550#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 551static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 552{
a661c419
AD
553 struct ip_rt_acct *dst, *src;
554 unsigned int i, j;
555
556 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
557 if (!dst)
558 return -ENOMEM;
559
560 for_each_possible_cpu(i) {
561 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
562 for (j = 0; j < 256; j++) {
563 dst[j].o_bytes += src[j].o_bytes;
564 dst[j].o_packets += src[j].o_packets;
565 dst[j].i_bytes += src[j].i_bytes;
566 dst[j].i_packets += src[j].i_packets;
567 }
78c686e9
PE
568 }
569
a661c419
AD
570 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
571 kfree(dst);
572 return 0;
573}
78c686e9 574
a661c419
AD
575static int rt_acct_proc_open(struct inode *inode, struct file *file)
576{
577 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 578}
a661c419
AD
579
580static const struct file_operations rt_acct_proc_fops = {
581 .owner = THIS_MODULE,
582 .open = rt_acct_proc_open,
583 .read = seq_read,
584 .llseek = seq_lseek,
585 .release = single_release,
586};
78c686e9 587#endif
107f1634 588
73b38711 589static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
590{
591 struct proc_dir_entry *pde;
592
593 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
594 &rt_cache_seq_fops);
595 if (!pde)
596 goto err1;
597
77020720
WC
598 pde = proc_create("rt_cache", S_IRUGO,
599 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
600 if (!pde)
601 goto err2;
602
c7066f70 603#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 604 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
605 if (!pde)
606 goto err3;
607#endif
608 return 0;
609
c7066f70 610#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
611err3:
612 remove_proc_entry("rt_cache", net->proc_net_stat);
613#endif
614err2:
615 remove_proc_entry("rt_cache", net->proc_net);
616err1:
617 return -ENOMEM;
618}
73b38711
DL
619
620static void __net_exit ip_rt_do_proc_exit(struct net *net)
621{
622 remove_proc_entry("rt_cache", net->proc_net_stat);
623 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 624#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 625 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 626#endif
73b38711
DL
627}
628
629static struct pernet_operations ip_rt_proc_ops __net_initdata = {
630 .init = ip_rt_do_proc_init,
631 .exit = ip_rt_do_proc_exit,
632};
633
634static int __init ip_rt_proc_init(void)
635{
636 return register_pernet_subsys(&ip_rt_proc_ops);
637}
638
107f1634 639#else
73b38711 640static inline int ip_rt_proc_init(void)
107f1634
PE
641{
642 return 0;
643}
1da177e4 644#endif /* CONFIG_PROC_FS */
e905a9ed 645
5969f71d 646static inline void rt_free(struct rtable *rt)
1da177e4 647{
d8d1f30b 648 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
649}
650
5969f71d 651static inline void rt_drop(struct rtable *rt)
1da177e4 652{
1da177e4 653 ip_rt_put(rt);
d8d1f30b 654 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
655}
656
5969f71d 657static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
658{
659 /* Kill broadcast/multicast entries very aggresively, if they
660 collide in hash table with more useful entries */
661 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 662 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
663}
664
5969f71d 665static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
666{
667 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
2c8cec5c 668 (rth->peer && rth->peer->pmtu_expires);
1da177e4
LT
669}
670
671static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
672{
673 unsigned long age;
674 int ret = 0;
675
d8d1f30b 676 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
677 goto out;
678
d8d1f30b 679 age = jiffies - rth->dst.lastuse;
1da177e4
LT
680 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
681 (age <= tmo2 && rt_valuable(rth)))
682 goto out;
683 ret = 1;
684out: return ret;
685}
686
687/* Bits of score are:
688 * 31: very valuable
689 * 30: not quite useless
690 * 29..0: usage counter
691 */
692static inline u32 rt_score(struct rtable *rt)
693{
d8d1f30b 694 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
695
696 score = ~score & ~(3<<30);
697
698 if (rt_valuable(rt))
699 score |= (1<<31);
700
c7537967 701 if (rt_is_output_route(rt) ||
1da177e4
LT
702 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
703 score |= (1<<30);
704
705 return score;
706}
707
1080d709
NH
708static inline bool rt_caching(const struct net *net)
709{
710 return net->ipv4.current_rt_cache_rebuild_count <=
711 net->ipv4.sysctl_rt_cache_rebuild_count;
712}
713
5e2b61f7
DM
714static inline bool compare_hash_inputs(const struct rtable *rt1,
715 const struct rtable *rt2)
1080d709 716{
5e2b61f7
DM
717 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
718 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
719 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
1080d709
NH
720}
721
5e2b61f7 722static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
1da177e4 723{
5e2b61f7
DM
724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_tos ^ rt2->rt_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
1da177e4
LT
730}
731
b5921910
DL
732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
733{
d8d1f30b 734 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
735}
736
e84f84f2
DL
737static inline int rt_is_expired(struct rtable *rth)
738{
d8d1f30b 739 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
740}
741
beb659bd
ED
742/*
743 * Perform a full scan of hash table and free all entries.
744 * Can be called by a softirq or a process.
745 * In the later case, we want to be reschedule if necessary
746 */
6561a3b1 747static void rt_do_flush(struct net *net, int process_context)
beb659bd
ED
748{
749 unsigned int i;
750 struct rtable *rth, *next;
751
752 for (i = 0; i <= rt_hash_mask; i++) {
6561a3b1
DM
753 struct rtable __rcu **pprev;
754 struct rtable *list;
755
beb659bd
ED
756 if (process_context && need_resched())
757 cond_resched();
1c31720a 758 rth = rcu_dereference_raw(rt_hash_table[i].chain);
beb659bd
ED
759 if (!rth)
760 continue;
761
762 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e 763
6561a3b1
DM
764 list = NULL;
765 pprev = &rt_hash_table[i].chain;
766 rth = rcu_dereference_protected(*pprev,
1c31720a 767 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 768
6561a3b1
DM
769 while (rth) {
770 next = rcu_dereference_protected(rth->dst.rt_next,
1c31720a 771 lockdep_is_held(rt_hash_lock_addr(i)));
6561a3b1
DM
772
773 if (!net ||
774 net_eq(dev_net(rth->dst.dev), net)) {
775 rcu_assign_pointer(*pprev, next);
776 rcu_assign_pointer(rth->dst.rt_next, list);
777 list = rth;
32cb5b4e 778 } else {
6561a3b1 779 pprev = &rth->dst.rt_next;
32cb5b4e 780 }
6561a3b1 781 rth = next;
32cb5b4e 782 }
6561a3b1 783
beb659bd
ED
784 spin_unlock_bh(rt_hash_lock_addr(i));
785
6561a3b1
DM
786 for (; list; list = next) {
787 next = rcu_dereference_protected(list->dst.rt_next, 1);
788 rt_free(list);
beb659bd
ED
789 }
790 }
791}
792
1080d709
NH
793/*
794 * While freeing expired entries, we compute average chain length
795 * and standard deviation, using fixed-point arithmetic.
796 * This to have an estimation of rt_chain_length_max
797 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
798 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
799 */
800
801#define FRACT_BITS 3
802#define ONE (1UL << FRACT_BITS)
803
98376387
ED
804/*
805 * Given a hash chain and an item in this hash chain,
806 * find if a previous entry has the same hash_inputs
807 * (but differs on tos, mark or oif)
808 * Returns 0 if an alias is found.
809 * Returns ONE if rth has no alias before itself.
810 */
811static int has_noalias(const struct rtable *head, const struct rtable *rth)
812{
813 const struct rtable *aux = head;
814
815 while (aux != rth) {
5e2b61f7 816 if (compare_hash_inputs(aux, rth))
98376387 817 return 0;
1c31720a 818 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
819 }
820 return ONE;
821}
822
29e75252 823/*
25985edc 824 * Perturbation of rt_genid by a small quantity [1..256]
29e75252
ED
825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
826 * many times (2^24) without giving recent rt_genid.
827 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 828 */
86c657f6 829static void rt_cache_invalidate(struct net *net)
1da177e4 830{
29e75252 831 unsigned char shuffle;
1da177e4 832
29e75252 833 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 834 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
835}
836
29e75252
ED
837/*
838 * delay < 0 : invalidate cache (fast : entries will be deleted later)
839 * delay >= 0 : invalidate & flush cache (can be long)
840 */
76e6ebfb 841void rt_cache_flush(struct net *net, int delay)
1da177e4 842{
86c657f6 843 rt_cache_invalidate(net);
29e75252 844 if (delay >= 0)
6561a3b1 845 rt_do_flush(net, !in_softirq());
1da177e4
LT
846}
847
a5ee1551 848/* Flush previous cache invalidated entries from the cache */
6561a3b1 849void rt_cache_flush_batch(struct net *net)
a5ee1551 850{
6561a3b1 851 rt_do_flush(net, !in_softirq());
a5ee1551
EB
852}
853
1080d709
NH
854static void rt_emergency_hash_rebuild(struct net *net)
855{
3ee94372 856 if (net_ratelimit())
1080d709 857 printk(KERN_WARNING "Route hash chain too long!\n");
3ee94372 858 rt_cache_invalidate(net);
1080d709
NH
859}
860
1da177e4
LT
861/*
862 Short description of GC goals.
863
864 We want to build algorithm, which will keep routing cache
865 at some equilibrium point, when number of aged off entries
866 is kept approximately equal to newly generated ones.
867
868 Current expiration strength is variable "expire".
869 We try to adjust it dynamically, so that if networking
870 is idle expires is large enough to keep enough of warm entries,
871 and when load increases it reduces to limit cache size.
872 */
873
569d3645 874static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
875{
876 static unsigned long expire = RT_GC_TIMEOUT;
877 static unsigned long last_gc;
878 static int rover;
879 static int equilibrium;
1c31720a
ED
880 struct rtable *rth;
881 struct rtable __rcu **rthp;
1da177e4
LT
882 unsigned long now = jiffies;
883 int goal;
fc66f95c 884 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
885
886 /*
887 * Garbage collection is pretty expensive,
888 * do not make it too frequently.
889 */
890
891 RT_CACHE_STAT_INC(gc_total);
892
893 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 894 entries < ip_rt_max_size) {
1da177e4
LT
895 RT_CACHE_STAT_INC(gc_ignored);
896 goto out;
897 }
898
fc66f95c 899 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 900 /* Calculate number of entries, which we want to expire now. */
fc66f95c 901 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
902 if (goal <= 0) {
903 if (equilibrium < ipv4_dst_ops.gc_thresh)
904 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 905 goal = entries - equilibrium;
1da177e4 906 if (goal > 0) {
b790cedd 907 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 908 goal = entries - equilibrium;
1da177e4
LT
909 }
910 } else {
911 /* We are in dangerous area. Try to reduce cache really
912 * aggressively.
913 */
b790cedd 914 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 915 equilibrium = entries - goal;
1da177e4
LT
916 }
917
918 if (now - last_gc >= ip_rt_gc_min_interval)
919 last_gc = now;
920
921 if (goal <= 0) {
922 equilibrium += goal;
923 goto work_done;
924 }
925
926 do {
927 int i, k;
928
929 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
930 unsigned long tmo = expire;
931
932 k = (k + 1) & rt_hash_mask;
933 rthp = &rt_hash_table[k].chain;
22c047cc 934 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
935 while ((rth = rcu_dereference_protected(*rthp,
936 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 937 if (!rt_is_expired(rth) &&
29e75252 938 !rt_may_expire(rth, tmo, expire)) {
1da177e4 939 tmo >>= 1;
d8d1f30b 940 rthp = &rth->dst.rt_next;
1da177e4
LT
941 continue;
942 }
d8d1f30b 943 *rthp = rth->dst.rt_next;
1da177e4
LT
944 rt_free(rth);
945 goal--;
1da177e4 946 }
22c047cc 947 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
948 if (goal <= 0)
949 break;
950 }
951 rover = k;
952
953 if (goal <= 0)
954 goto work_done;
955
956 /* Goal is not achieved. We stop process if:
957
958 - if expire reduced to zero. Otherwise, expire is halfed.
959 - if table is not full.
960 - if we are called from interrupt.
961 - jiffies check is just fallback/debug loop breaker.
962 We will not spin here for long time in any case.
963 */
964
965 RT_CACHE_STAT_INC(gc_goal_miss);
966
967 if (expire == 0)
968 break;
969
970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
fc66f95c 973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1da177e4
LT
974#endif
975
fc66f95c 976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
977 goto out;
978 } while (!in_softirq() && time_before_eq(jiffies, now));
979
fc66f95c
ED
980 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
981 goto out;
982 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
983 goto out;
984 if (net_ratelimit())
985 printk(KERN_WARNING "dst cache overflow\n");
986 RT_CACHE_STAT_INC(gc_dst_overflow);
987 return 1;
988
989work_done:
990 expire += ip_rt_gc_min_interval;
991 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4
LT
994 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
fc66f95c 997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1da177e4
LT
998#endif
999out: return 0;
1000}
1001
98376387
ED
1002/*
1003 * Returns number of entries in a hash chain that have different hash_inputs
1004 */
1005static int slow_chain_length(const struct rtable *head)
1006{
1007 int length = 0;
1008 const struct rtable *rth = head;
1009
1010 while (rth) {
1011 length += has_noalias(head, rth);
1c31720a 1012 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1013 }
1014 return length >> FRACT_BITS;
1015}
1016
b23dd4fe
DM
1017static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1018 struct sk_buff *skb, int ifindex)
1da177e4 1019{
1c31720a
ED
1020 struct rtable *rth, *cand;
1021 struct rtable __rcu **rthp, **candp;
1da177e4 1022 unsigned long now;
1da177e4
LT
1023 u32 min_score;
1024 int chain_length;
1025 int attempts = !in_softirq();
1026
1027restart:
1028 chain_length = 0;
1029 min_score = ~(u32)0;
1030 cand = NULL;
1031 candp = NULL;
1032 now = jiffies;
1033
d8d1f30b 1034 if (!rt_caching(dev_net(rt->dst.dev))) {
73e42897
NH
1035 /*
1036 * If we're not caching, just tell the caller we
1037 * were successful and don't touch the route. The
1038 * caller hold the sole reference to the cache entry, and
1039 * it will be released when the caller is done with it.
1040 * If we drop it here, the callers have no way to resolve routes
1041 * when we're not caching. Instead, just point *rp at rt, so
1042 * the caller gets a single use out of the route
b6280b47
NH
1043 * Note that we do rt_free on this new route entry, so that
1044 * once its refcount hits zero, we are still able to reap it
1045 * (Thanks Alexey)
27b75c95
ED
1046 * Note: To avoid expensive rcu stuff for this uncached dst,
1047 * we set DST_NOCACHE so that dst_release() can free dst without
1048 * waiting a grace period.
73e42897 1049 */
b6280b47 1050
c7d4426a 1051 rt->dst.flags |= DST_NOCACHE;
c7537967 1052 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1053 int err = arp_bind_neighbour(&rt->dst);
b6280b47
NH
1054 if (err) {
1055 if (net_ratelimit())
1056 printk(KERN_WARNING
1057 "Neighbour table failure & not caching routes.\n");
27b75c95 1058 ip_rt_put(rt);
b23dd4fe 1059 return ERR_PTR(err);
b6280b47
NH
1060 }
1061 }
1062
b6280b47 1063 goto skip_hashing;
1080d709
NH
1064 }
1065
1da177e4
LT
1066 rthp = &rt_hash_table[hash].chain;
1067
22c047cc 1068 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1069 while ((rth = rcu_dereference_protected(*rthp,
1070 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1071 if (rt_is_expired(rth)) {
d8d1f30b 1072 *rthp = rth->dst.rt_next;
29e75252
ED
1073 rt_free(rth);
1074 continue;
1075 }
5e2b61f7 1076 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1da177e4 1077 /* Put it first */
d8d1f30b 1078 *rthp = rth->dst.rt_next;
1da177e4
LT
1079 /*
1080 * Since lookup is lockfree, the deletion
1081 * must be visible to another weakly ordered CPU before
1082 * the insertion at the start of the hash chain.
1083 */
d8d1f30b 1084 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1085 rt_hash_table[hash].chain);
1086 /*
1087 * Since lookup is lockfree, the update writes
1088 * must be ordered for consistency on SMP.
1089 */
1090 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1091
d8d1f30b 1092 dst_use(&rth->dst, now);
22c047cc 1093 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1094
1095 rt_drop(rt);
b23dd4fe 1096 if (skb)
d8d1f30b 1097 skb_dst_set(skb, &rth->dst);
b23dd4fe 1098 return rth;
1da177e4
LT
1099 }
1100
d8d1f30b 1101 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1102 u32 score = rt_score(rth);
1103
1104 if (score <= min_score) {
1105 cand = rth;
1106 candp = rthp;
1107 min_score = score;
1108 }
1109 }
1110
1111 chain_length++;
1112
d8d1f30b 1113 rthp = &rth->dst.rt_next;
1da177e4
LT
1114 }
1115
1116 if (cand) {
1117 /* ip_rt_gc_elasticity used to be average length of chain
1118 * length, when exceeded gc becomes really aggressive.
1119 *
1120 * The second limit is less certain. At the moment it allows
1121 * only 2 entries per bucket. We will see.
1122 */
1123 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1124 *candp = cand->dst.rt_next;
1da177e4
LT
1125 rt_free(cand);
1126 }
1080d709 1127 } else {
98376387
ED
1128 if (chain_length > rt_chain_length_max &&
1129 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1130 struct net *net = dev_net(rt->dst.dev);
1080d709 1131 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1132 if (!rt_caching(net)) {
1080d709 1133 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1134 rt->dst.dev->name, num);
1080d709 1135 }
b35ecb5d 1136 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1137 spin_unlock_bh(rt_hash_lock_addr(hash));
1138
5e2b61f7 1139 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
6a2bad70
PE
1140 ifindex, rt_genid(net));
1141 goto restart;
1080d709 1142 }
1da177e4
LT
1143 }
1144
1145 /* Try to bind route to arp only if it is output
1146 route or unicast forwarding path.
1147 */
c7537967 1148 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1149 int err = arp_bind_neighbour(&rt->dst);
1da177e4 1150 if (err) {
22c047cc 1151 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1152
1153 if (err != -ENOBUFS) {
1154 rt_drop(rt);
b23dd4fe 1155 return ERR_PTR(err);
1da177e4
LT
1156 }
1157
1158 /* Neighbour tables are full and nothing
1159 can be released. Try to shrink route cache,
1160 it is most likely it holds some neighbour records.
1161 */
1162 if (attempts-- > 0) {
1163 int saved_elasticity = ip_rt_gc_elasticity;
1164 int saved_int = ip_rt_gc_min_interval;
1165 ip_rt_gc_elasticity = 1;
1166 ip_rt_gc_min_interval = 0;
569d3645 1167 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1168 ip_rt_gc_min_interval = saved_int;
1169 ip_rt_gc_elasticity = saved_elasticity;
1170 goto restart;
1171 }
1172
1173 if (net_ratelimit())
7e1b33e5 1174 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1da177e4 1175 rt_drop(rt);
b23dd4fe 1176 return ERR_PTR(-ENOBUFS);
1da177e4
LT
1177 }
1178 }
1179
d8d1f30b 1180 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1181
1da177e4 1182#if RT_CACHE_DEBUG >= 2
d8d1f30b 1183 if (rt->dst.rt_next) {
1da177e4 1184 struct rtable *trt;
b6280b47
NH
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
d8d1f30b 1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
673d57e7 1188 printk(" . %pI4", &trt->rt_dst);
1da177e4
LT
1189 printk("\n");
1190 }
1191#endif
00269b54
ED
1192 /*
1193 * Since lookup is lockfree, we must make sure
25985edc 1194 * previous writes to rt are committed to memory
00269b54
ED
1195 * before making rt visible to other CPUS.
1196 */
1ddbcb00 1197 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1198
22c047cc 1199 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1200
b6280b47 1201skip_hashing:
b23dd4fe 1202 if (skb)
d8d1f30b 1203 skb_dst_set(skb, &rt->dst);
b23dd4fe 1204 return rt;
1da177e4
LT
1205}
1206
6431cbc2
DM
1207static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1208
1209static u32 rt_peer_genid(void)
1210{
1211 return atomic_read(&__rt_peer_genid);
1212}
1213
1da177e4
LT
1214void rt_bind_peer(struct rtable *rt, int create)
1215{
1da177e4
LT
1216 struct inet_peer *peer;
1217
b534ecf1 1218 peer = inet_getpeer_v4(rt->rt_dst, create);
1da177e4 1219
49e8ab03 1220 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1da177e4 1221 inet_putpeer(peer);
6431cbc2
DM
1222 else
1223 rt->rt_peer_genid = rt_peer_genid();
1da177e4
LT
1224}
1225
1226/*
1227 * Peer allocation may fail only in serious out-of-memory conditions. However
1228 * we still can generate some output.
1229 * Random ID selection looks a bit dangerous because we have no chances to
1230 * select ID being unique in a reasonable period of time.
1231 * But broken packet identifier may be better than no packet at all.
1232 */
1233static void ip_select_fb_ident(struct iphdr *iph)
1234{
1235 static DEFINE_SPINLOCK(ip_fb_id_lock);
1236 static u32 ip_fallback_id;
1237 u32 salt;
1238
1239 spin_lock_bh(&ip_fb_id_lock);
e448515c 1240 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1241 iph->id = htons(salt & 0xFFFF);
1242 ip_fallback_id = salt;
1243 spin_unlock_bh(&ip_fb_id_lock);
1244}
1245
1246void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1247{
1248 struct rtable *rt = (struct rtable *) dst;
1249
1250 if (rt) {
1251 if (rt->peer == NULL)
1252 rt_bind_peer(rt, 1);
1253
1254 /* If peer is attached to destination, it is never detached,
1255 so that we need not to grab a lock to dereference it.
1256 */
1257 if (rt->peer) {
1258 iph->id = htons(inet_getid(rt->peer, more));
1259 return;
1260 }
1261 } else
e905a9ed 1262 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
9c2b3328 1263 __builtin_return_address(0));
1da177e4
LT
1264
1265 ip_select_fb_ident(iph);
1266}
4bc2f18b 1267EXPORT_SYMBOL(__ip_select_ident);
1da177e4
LT
1268
1269static void rt_del(unsigned hash, struct rtable *rt)
1270{
1c31720a
ED
1271 struct rtable __rcu **rthp;
1272 struct rtable *aux;
1da177e4 1273
29e75252 1274 rthp = &rt_hash_table[hash].chain;
22c047cc 1275 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1276 ip_rt_put(rt);
1c31720a
ED
1277 while ((aux = rcu_dereference_protected(*rthp,
1278 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1279 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1280 *rthp = aux->dst.rt_next;
29e75252
ED
1281 rt_free(aux);
1282 continue;
1da177e4 1283 }
d8d1f30b 1284 rthp = &aux->dst.rt_next;
29e75252 1285 }
22c047cc 1286 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1287}
1288
ed7865a4 1289/* called in rcu_read_lock() section */
f7655229
AV
1290void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1291 __be32 saddr, struct net_device *dev)
1da177e4 1292{
ed7865a4 1293 struct in_device *in_dev = __in_dev_get_rcu(dev);
f39925db 1294 struct inet_peer *peer;
317805b8 1295 struct net *net;
1da177e4 1296
1da177e4
LT
1297 if (!in_dev)
1298 return;
1299
c346dca1 1300 net = dev_net(dev);
9d4fb27d
JP
1301 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1302 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1303 ipv4_is_zeronet(new_gw))
1da177e4
LT
1304 goto reject_redirect;
1305
1306 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1307 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1308 goto reject_redirect;
1309 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1310 goto reject_redirect;
1311 } else {
317805b8 1312 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1313 goto reject_redirect;
1314 }
1315
f39925db
DM
1316 peer = inet_getpeer_v4(daddr, 1);
1317 if (peer) {
1318 peer->redirect_learned.a4 = new_gw;
e905a9ed 1319
f39925db 1320 inet_putpeer(peer);
1da177e4 1321
f39925db 1322 atomic_inc(&__rt_peer_genid);
1da177e4 1323 }
1da177e4
LT
1324 return;
1325
1326reject_redirect:
1327#ifdef CONFIG_IP_ROUTE_VERBOSE
1328 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
1329 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1330 " Advised path = %pI4 -> %pI4\n",
1331 &old_gw, dev->name, &new_gw,
1332 &saddr, &daddr);
1da177e4 1333#endif
ed7865a4 1334 ;
1da177e4
LT
1335}
1336
1337static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1338{
ee6b9673 1339 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1340 struct dst_entry *ret = dst;
1341
1342 if (rt) {
d11a4dc1 1343 if (dst->obsolete > 0) {
1da177e4
LT
1344 ip_rt_put(rt);
1345 ret = NULL;
2c8cec5c 1346 } else if (rt->rt_flags & RTCF_REDIRECTED) {
5e2b61f7
DM
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif,
e84f84f2 1349 rt_genid(dev_net(dst->dev)));
1da177e4 1350#if RT_CACHE_DEBUG >= 1
673d57e7 1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
5e2b61f7 1352 &rt->rt_dst, rt->rt_tos);
1da177e4
LT
1353#endif
1354 rt_del(hash, rt);
1355 ret = NULL;
2c8cec5c
DM
1356 } else if (rt->peer &&
1357 rt->peer->pmtu_expires &&
1358 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1359 unsigned long orig = rt->peer->pmtu_expires;
1360
1361 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1362 dst_metric_set(dst, RTAX_MTU,
1363 rt->peer->pmtu_orig);
1da177e4
LT
1364 }
1365 }
1366 return ret;
1367}
1368
1369/*
1370 * Algorithm:
1371 * 1. The first ip_rt_redirect_number redirects are sent
1372 * with exponential backoff, then we stop sending them at all,
1373 * assuming that the host ignores our redirects.
1374 * 2. If we did not see packets requiring redirects
1375 * during ip_rt_redirect_silence, we assume that the host
1376 * forgot redirected route and start to send redirects again.
1377 *
1378 * This algorithm is much cheaper and more intelligent than dumb load limiting
1379 * in icmp.c.
1380 *
1381 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1382 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1383 */
1384
1385void ip_rt_send_redirect(struct sk_buff *skb)
1386{
511c3f92 1387 struct rtable *rt = skb_rtable(skb);
30038fc6 1388 struct in_device *in_dev;
92d86829 1389 struct inet_peer *peer;
30038fc6 1390 int log_martians;
1da177e4 1391
30038fc6 1392 rcu_read_lock();
d8d1f30b 1393 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1394 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1395 rcu_read_unlock();
1da177e4 1396 return;
30038fc6
ED
1397 }
1398 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1399 rcu_read_unlock();
1da177e4 1400
92d86829
DM
1401 if (!rt->peer)
1402 rt_bind_peer(rt, 1);
1403 peer = rt->peer;
1404 if (!peer) {
1405 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1406 return;
1407 }
1408
1da177e4
LT
1409 /* No redirected packets during ip_rt_redirect_silence;
1410 * reset the algorithm.
1411 */
92d86829
DM
1412 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1413 peer->rate_tokens = 0;
1da177e4
LT
1414
1415 /* Too many ignored redirects; do not send anything
d8d1f30b 1416 * set dst.rate_last to the last seen redirected packet.
1da177e4 1417 */
92d86829
DM
1418 if (peer->rate_tokens >= ip_rt_redirect_number) {
1419 peer->rate_last = jiffies;
30038fc6 1420 return;
1da177e4
LT
1421 }
1422
1423 /* Check for load limit; set rate_last to the latest sent
1424 * redirect.
1425 */
92d86829 1426 if (peer->rate_tokens == 0 ||
14fb8a76 1427 time_after(jiffies,
92d86829
DM
1428 (peer->rate_last +
1429 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 1430 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
1431 peer->rate_last = jiffies;
1432 ++peer->rate_tokens;
1da177e4 1433#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1434 if (log_martians &&
92d86829 1435 peer->rate_tokens == ip_rt_redirect_number &&
1da177e4 1436 net_ratelimit())
673d57e7
HH
1437 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1438 &rt->rt_src, rt->rt_iif,
1439 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1440#endif
1441 }
1da177e4
LT
1442}
1443
1444static int ip_error(struct sk_buff *skb)
1445{
511c3f92 1446 struct rtable *rt = skb_rtable(skb);
92d86829 1447 struct inet_peer *peer;
1da177e4 1448 unsigned long now;
92d86829 1449 bool send;
1da177e4
LT
1450 int code;
1451
d8d1f30b 1452 switch (rt->dst.error) {
1da177e4
LT
1453 case EINVAL:
1454 default:
1455 goto out;
1456 case EHOSTUNREACH:
1457 code = ICMP_HOST_UNREACH;
1458 break;
1459 case ENETUNREACH:
1460 code = ICMP_NET_UNREACH;
d8d1f30b 1461 IP_INC_STATS_BH(dev_net(rt->dst.dev),
7c73a6fa 1462 IPSTATS_MIB_INNOROUTES);
1da177e4
LT
1463 break;
1464 case EACCES:
1465 code = ICMP_PKT_FILTERED;
1466 break;
1467 }
1468
92d86829
DM
1469 if (!rt->peer)
1470 rt_bind_peer(rt, 1);
1471 peer = rt->peer;
1472
1473 send = true;
1474 if (peer) {
1475 now = jiffies;
1476 peer->rate_tokens += now - peer->rate_last;
1477 if (peer->rate_tokens > ip_rt_error_burst)
1478 peer->rate_tokens = ip_rt_error_burst;
1479 peer->rate_last = now;
1480 if (peer->rate_tokens >= ip_rt_error_cost)
1481 peer->rate_tokens -= ip_rt_error_cost;
1482 else
1483 send = false;
1da177e4 1484 }
92d86829
DM
1485 if (send)
1486 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
1487
1488out: kfree_skb(skb);
1489 return 0;
e905a9ed 1490}
1da177e4
LT
1491
1492/*
1493 * The last two values are not from the RFC but
1494 * are needed for AMPRnet AX.25 paths.
1495 */
1496
9b5b5cff 1497static const unsigned short mtu_plateau[] =
1da177e4
LT
1498{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1499
5969f71d 1500static inline unsigned short guess_mtu(unsigned short old_mtu)
1da177e4
LT
1501{
1502 int i;
e905a9ed 1503
1da177e4
LT
1504 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1505 if (old_mtu > mtu_plateau[i])
1506 return mtu_plateau[i];
1507 return 68;
1508}
1509
b71d1d42 1510unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
0010e465
TT
1511 unsigned short new_mtu,
1512 struct net_device *dev)
1da177e4 1513{
1da177e4 1514 unsigned short old_mtu = ntohs(iph->tot_len);
1da177e4 1515 unsigned short est_mtu = 0;
2c8cec5c 1516 struct inet_peer *peer;
1da177e4 1517
2c8cec5c
DM
1518 peer = inet_getpeer_v4(iph->daddr, 1);
1519 if (peer) {
1520 unsigned short mtu = new_mtu;
1da177e4 1521
2c8cec5c
DM
1522 if (new_mtu < 68 || new_mtu >= old_mtu) {
1523 /* BSD 4.2 derived systems incorrectly adjust
1524 * tot_len by the IP header length, and report
1525 * a zero MTU in the ICMP message.
1526 */
1527 if (mtu == 0 &&
1528 old_mtu >= 68 + (iph->ihl << 2))
1529 old_mtu -= iph->ihl << 2;
1530 mtu = guess_mtu(old_mtu);
1531 }
0010e465 1532
2c8cec5c
DM
1533 if (mtu < ip_rt_min_pmtu)
1534 mtu = ip_rt_min_pmtu;
1535 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1536 unsigned long pmtu_expires;
1537
1538 pmtu_expires = jiffies + ip_rt_mtu_expires;
1539 if (!pmtu_expires)
1540 pmtu_expires = 1UL;
1541
2c8cec5c
DM
1542 est_mtu = mtu;
1543 peer->pmtu_learned = mtu;
46af3180 1544 peer->pmtu_expires = pmtu_expires;
2c8cec5c 1545 }
1da177e4 1546
2c8cec5c 1547 inet_putpeer(peer);
1da177e4 1548
2c8cec5c 1549 atomic_inc(&__rt_peer_genid);
1da177e4
LT
1550 }
1551 return est_mtu ? : new_mtu;
1552}
1553
2c8cec5c
DM
1554static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1555{
1556 unsigned long expires = peer->pmtu_expires;
1557
46af3180 1558 if (time_before(jiffies, expires)) {
2c8cec5c
DM
1559 u32 orig_dst_mtu = dst_mtu(dst);
1560 if (peer->pmtu_learned < orig_dst_mtu) {
1561 if (!peer->pmtu_orig)
1562 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1563 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1564 }
1565 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1566 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1567}
1568
1da177e4
LT
1569static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1570{
2c8cec5c
DM
1571 struct rtable *rt = (struct rtable *) dst;
1572 struct inet_peer *peer;
1573
1574 dst_confirm(dst);
1575
1576 if (!rt->peer)
1577 rt_bind_peer(rt, 1);
1578 peer = rt->peer;
1579 if (peer) {
1580 if (mtu < ip_rt_min_pmtu)
1da177e4 1581 mtu = ip_rt_min_pmtu;
2c8cec5c 1582 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1583 unsigned long pmtu_expires;
1584
1585 pmtu_expires = jiffies + ip_rt_mtu_expires;
1586 if (!pmtu_expires)
1587 pmtu_expires = 1UL;
1588
2c8cec5c 1589 peer->pmtu_learned = mtu;
46af3180 1590 peer->pmtu_expires = pmtu_expires;
2c8cec5c
DM
1591
1592 atomic_inc(&__rt_peer_genid);
1593 rt->rt_peer_genid = rt_peer_genid();
1da177e4 1594 }
46af3180 1595 check_peer_pmtu(dst, peer);
1da177e4
LT
1596 }
1597}
1598
f39925db
DM
1599static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1600{
1601 struct rtable *rt = (struct rtable *) dst;
1602 __be32 orig_gw = rt->rt_gateway;
1603
1604 dst_confirm(&rt->dst);
1605
1606 neigh_release(rt->dst.neighbour);
1607 rt->dst.neighbour = NULL;
1608
1609 rt->rt_gateway = peer->redirect_learned.a4;
1610 if (arp_bind_neighbour(&rt->dst) ||
1611 !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1612 if (rt->dst.neighbour)
1613 neigh_event_send(rt->dst.neighbour, NULL);
1614 rt->rt_gateway = orig_gw;
1615 return -EAGAIN;
1616 } else {
1617 rt->rt_flags |= RTCF_REDIRECTED;
1618 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1619 rt->dst.neighbour);
1620 }
1621 return 0;
1622}
1623
1da177e4
LT
1624static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1625{
6431cbc2
DM
1626 struct rtable *rt = (struct rtable *) dst;
1627
1628 if (rt_is_expired(rt))
d11a4dc1 1629 return NULL;
6431cbc2 1630 if (rt->rt_peer_genid != rt_peer_genid()) {
2c8cec5c
DM
1631 struct inet_peer *peer;
1632
6431cbc2
DM
1633 if (!rt->peer)
1634 rt_bind_peer(rt, 0);
1635
2c8cec5c
DM
1636 peer = rt->peer;
1637 if (peer && peer->pmtu_expires)
1638 check_peer_pmtu(dst, peer);
1639
f39925db
DM
1640 if (peer && peer->redirect_learned.a4 &&
1641 peer->redirect_learned.a4 != rt->rt_gateway) {
1642 if (check_peer_redir(dst, peer))
1643 return NULL;
1644 }
1645
6431cbc2
DM
1646 rt->rt_peer_genid = rt_peer_genid();
1647 }
d11a4dc1 1648 return dst;
1da177e4
LT
1649}
1650
1651static void ipv4_dst_destroy(struct dst_entry *dst)
1652{
1653 struct rtable *rt = (struct rtable *) dst;
1654 struct inet_peer *peer = rt->peer;
1da177e4 1655
62fa8a84
DM
1656 if (rt->fi) {
1657 fib_info_put(rt->fi);
1658 rt->fi = NULL;
1659 }
1da177e4
LT
1660 if (peer) {
1661 rt->peer = NULL;
1662 inet_putpeer(peer);
1663 }
1da177e4
LT
1664}
1665
1da177e4
LT
1666
1667static void ipv4_link_failure(struct sk_buff *skb)
1668{
1669 struct rtable *rt;
1670
1671 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1672
511c3f92 1673 rt = skb_rtable(skb);
2c8cec5c
DM
1674 if (rt &&
1675 rt->peer &&
1676 rt->peer->pmtu_expires) {
1677 unsigned long orig = rt->peer->pmtu_expires;
1678
1679 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1680 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1681 }
1da177e4
LT
1682}
1683
1684static int ip_rt_bug(struct sk_buff *skb)
1685{
673d57e7
HH
1686 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1687 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1da177e4
LT
1688 skb->dev ? skb->dev->name : "?");
1689 kfree_skb(skb);
1690 return 0;
1691}
1692
1693/*
1694 We do not cache source address of outgoing interface,
1695 because it is used only by IP RR, TS and SRR options,
1696 so that it out of fast path.
1697
1698 BTW remember: "addr" is allowed to be not aligned
1699 in IP options!
1700 */
1701
1702void ip_rt_get_source(u8 *addr, struct rtable *rt)
1703{
a61ced5d 1704 __be32 src;
1da177e4
LT
1705 struct fib_result res;
1706
c7537967 1707 if (rt_is_output_route(rt))
1da177e4 1708 src = rt->rt_src;
ebc0ffae 1709 else {
68a5e3dd
DM
1710 struct flowi4 fl4 = {
1711 .daddr = rt->rt_key_dst,
1712 .saddr = rt->rt_key_src,
1713 .flowi4_tos = rt->rt_tos,
1714 .flowi4_oif = rt->rt_oif,
1715 .flowi4_iif = rt->rt_iif,
1716 .flowi4_mark = rt->rt_mark,
5e2b61f7
DM
1717 };
1718
ebc0ffae 1719 rcu_read_lock();
68a5e3dd 1720 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
436c3b66 1721 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
ebc0ffae
ED
1722 else
1723 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1724 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1725 rcu_read_unlock();
1726 }
1da177e4
LT
1727 memcpy(addr, &src, 4);
1728}
1729
c7066f70 1730#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1731static void set_class_tag(struct rtable *rt, u32 tag)
1732{
d8d1f30b
CG
1733 if (!(rt->dst.tclassid & 0xFFFF))
1734 rt->dst.tclassid |= tag & 0xFFFF;
1735 if (!(rt->dst.tclassid & 0xFFFF0000))
1736 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1737}
1738#endif
1739
0dbaee3b
DM
1740static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1741{
1742 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1743
1744 if (advmss == 0) {
1745 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1746 ip_rt_min_advmss);
1747 if (advmss > 65535 - 40)
1748 advmss = 65535 - 40;
1749 }
1750 return advmss;
1751}
1752
d33e4553
DM
1753static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1754{
1755 unsigned int mtu = dst->dev->mtu;
1756
1757 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1758 const struct rtable *rt = (const struct rtable *) dst;
1759
1760 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1761 mtu = 576;
1762 }
1763
1764 if (mtu > IP_MAX_MTU)
1765 mtu = IP_MAX_MTU;
1766
1767 return mtu;
1768}
1769
68a5e3dd 1770static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
5e2b61f7 1771 struct fib_info *fi)
a4daad6b 1772{
0131ba45
DM
1773 struct inet_peer *peer;
1774 int create = 0;
a4daad6b 1775
0131ba45
DM
1776 /* If a peer entry exists for this destination, we must hook
1777 * it up in order to get at cached metrics.
1778 */
68a5e3dd 1779 if (oldflp4 && (oldflp4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
0131ba45
DM
1780 create = 1;
1781
3c0afdca 1782 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
0131ba45 1783 if (peer) {
3c0afdca 1784 rt->rt_peer_genid = rt_peer_genid();
a4daad6b
DM
1785 if (inet_metrics_new(peer))
1786 memcpy(peer->metrics, fi->fib_metrics,
1787 sizeof(u32) * RTAX_MAX);
1788 dst_init_metrics(&rt->dst, peer->metrics, false);
2c8cec5c
DM
1789
1790 if (peer->pmtu_expires)
1791 check_peer_pmtu(&rt->dst, peer);
f39925db
DM
1792 if (peer->redirect_learned.a4 &&
1793 peer->redirect_learned.a4 != rt->rt_gateway) {
1794 rt->rt_gateway = peer->redirect_learned.a4;
1795 rt->rt_flags |= RTCF_REDIRECTED;
1796 }
0131ba45
DM
1797 } else {
1798 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1799 rt->fi = fi;
1800 atomic_inc(&fi->fib_clntref);
1801 }
1802 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
a4daad6b
DM
1803 }
1804}
1805
68a5e3dd 1806static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
5e2b61f7 1807 const struct fib_result *res,
982721f3 1808 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1809{
defb3519 1810 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1811
1812 if (fi) {
1813 if (FIB_RES_GW(*res) &&
1814 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1815 rt->rt_gateway = FIB_RES_GW(*res);
68a5e3dd 1816 rt_init_metrics(rt, oldflp4, fi);
c7066f70 1817#ifdef CONFIG_IP_ROUTE_CLASSID
defb3519 1818 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4 1819#endif
d33e4553 1820 }
defb3519 1821
defb3519
DM
1822 if (dst_mtu(dst) > IP_MAX_MTU)
1823 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
0dbaee3b 1824 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
defb3519 1825 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1da177e4 1826
c7066f70 1827#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1828#ifdef CONFIG_IP_MULTIPLE_TABLES
1829 set_class_tag(rt, fib_rules_tclass(res));
1830#endif
1831 set_class_tag(rt, itag);
1832#endif
982721f3 1833 rt->rt_type = type;
1da177e4
LT
1834}
1835
5c1e6aa3
DM
1836static struct rtable *rt_dst_alloc(struct net_device *dev,
1837 bool nopolicy, bool noxfrm)
0c4dcd58 1838{
5c1e6aa3
DM
1839 return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1840 DST_HOST |
1841 (nopolicy ? DST_NOPOLICY : 0) |
1842 (noxfrm ? DST_NOXFRM : 0));
0c4dcd58
DM
1843}
1844
96d36220 1845/* called in rcu_read_lock() section */
9e12bb22 1846static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1847 u8 tos, struct net_device *dev, int our)
1848{
96d36220 1849 unsigned int hash;
1da177e4 1850 struct rtable *rth;
a61ced5d 1851 __be32 spec_dst;
96d36220 1852 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1853 u32 itag = 0;
b5f7e755 1854 int err;
1da177e4
LT
1855
1856 /* Primary sanity checks. */
1857
1858 if (in_dev == NULL)
1859 return -EINVAL;
1860
1e637c74 1861 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 1862 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1863 goto e_inval;
1864
f97c1e0c
JP
1865 if (ipv4_is_zeronet(saddr)) {
1866 if (!ipv4_is_local_multicast(daddr))
1da177e4
LT
1867 goto e_inval;
1868 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
b5f7e755 1869 } else {
5c04c819
MS
1870 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1871 &itag);
b5f7e755
ED
1872 if (err < 0)
1873 goto e_err;
1874 }
5c1e6aa3
DM
1875 rth = rt_dst_alloc(init_net.loopback_dev,
1876 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
1877 if (!rth)
1878 goto e_nobufs;
1879
d8d1f30b 1880 rth->dst.output = ip_rt_bug;
1da177e4 1881
5e2b61f7 1882 rth->rt_key_dst = daddr;
1da177e4 1883 rth->rt_dst = daddr;
5e2b61f7
DM
1884 rth->rt_tos = tos;
1885 rth->rt_mark = skb->mark;
1886 rth->rt_key_src = saddr;
1da177e4 1887 rth->rt_src = saddr;
c7066f70 1888#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 1889 rth->dst.tclassid = itag;
1da177e4 1890#endif
1b86a58f 1891 rth->rt_route_iif = dev->ifindex;
5e2b61f7 1892 rth->rt_iif = dev->ifindex;
5e2b61f7 1893 rth->rt_oif = 0;
1da177e4
LT
1894 rth->rt_gateway = daddr;
1895 rth->rt_spec_dst= spec_dst;
e84f84f2 1896 rth->rt_genid = rt_genid(dev_net(dev));
1da177e4 1897 rth->rt_flags = RTCF_MULTICAST;
29e75252 1898 rth->rt_type = RTN_MULTICAST;
1da177e4 1899 if (our) {
d8d1f30b 1900 rth->dst.input= ip_local_deliver;
1da177e4
LT
1901 rth->rt_flags |= RTCF_LOCAL;
1902 }
1903
1904#ifdef CONFIG_IP_MROUTE
f97c1e0c 1905 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1906 rth->dst.input = ip_mr_input;
1da177e4
LT
1907#endif
1908 RT_CACHE_STAT_INC(in_slow_mc);
1909
e84f84f2 1910 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
b23dd4fe
DM
1911 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1912 err = 0;
1913 if (IS_ERR(rth))
1914 err = PTR_ERR(rth);
1da177e4
LT
1915
1916e_nobufs:
1da177e4 1917 return -ENOBUFS;
1da177e4 1918e_inval:
96d36220 1919 return -EINVAL;
b5f7e755 1920e_err:
b5f7e755 1921 return err;
1da177e4
LT
1922}
1923
1924
1925static void ip_handle_martian_source(struct net_device *dev,
1926 struct in_device *in_dev,
1927 struct sk_buff *skb,
9e12bb22
AV
1928 __be32 daddr,
1929 __be32 saddr)
1da177e4
LT
1930{
1931 RT_CACHE_STAT_INC(in_martian_src);
1932#ifdef CONFIG_IP_ROUTE_VERBOSE
1933 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1934 /*
1935 * RFC1812 recommendation, if source is martian,
1936 * the only hint is MAC header.
1937 */
673d57e7
HH
1938 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1939 &daddr, &saddr, dev->name);
98e399f8 1940 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1da177e4 1941 int i;
98e399f8 1942 const unsigned char *p = skb_mac_header(skb);
1da177e4
LT
1943 printk(KERN_WARNING "ll header: ");
1944 for (i = 0; i < dev->hard_header_len; i++, p++) {
1945 printk("%02x", *p);
1946 if (i < (dev->hard_header_len - 1))
1947 printk(":");
1948 }
1949 printk("\n");
1950 }
1951 }
1952#endif
1953}
1954
47360228 1955/* called in rcu_read_lock() section */
5969f71d 1956static int __mkroute_input(struct sk_buff *skb,
982721f3 1957 const struct fib_result *res,
5969f71d
SH
1958 struct in_device *in_dev,
1959 __be32 daddr, __be32 saddr, u32 tos,
1960 struct rtable **result)
1da177e4 1961{
1da177e4
LT
1962 struct rtable *rth;
1963 int err;
1964 struct in_device *out_dev;
47360228 1965 unsigned int flags = 0;
d9c9df8c
AV
1966 __be32 spec_dst;
1967 u32 itag;
1da177e4
LT
1968
1969 /* get a working reference to the output device */
47360228 1970 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4
LT
1971 if (out_dev == NULL) {
1972 if (net_ratelimit())
1973 printk(KERN_CRIT "Bug in ip_route_input" \
1974 "_slow(). Please, report\n");
1975 return -EINVAL;
1976 }
1977
1978
5c04c819
MS
1979 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1980 in_dev->dev, &spec_dst, &itag);
1da177e4 1981 if (err < 0) {
e905a9ed 1982 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1983 saddr);
e905a9ed 1984
1da177e4
LT
1985 goto cleanup;
1986 }
1987
1988 if (err)
1989 flags |= RTCF_DIRECTSRC;
1990
51b77cae 1991 if (out_dev == in_dev && err &&
1da177e4
LT
1992 (IN_DEV_SHARED_MEDIA(out_dev) ||
1993 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1994 flags |= RTCF_DOREDIRECT;
1995
1996 if (skb->protocol != htons(ETH_P_IP)) {
1997 /* Not IP (i.e. ARP). Do not create route, if it is
1998 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
1999 *
2000 * Proxy arp feature have been extended to allow, ARP
2001 * replies back to the same interface, to support
2002 * Private VLAN switch technologies. See arp.c.
1da177e4 2003 */
65324144
JDB
2004 if (out_dev == in_dev &&
2005 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
2006 err = -EINVAL;
2007 goto cleanup;
2008 }
2009 }
2010
5c1e6aa3
DM
2011 rth = rt_dst_alloc(out_dev->dev,
2012 IN_DEV_CONF_GET(in_dev, NOPOLICY),
0c4dcd58 2013 IN_DEV_CONF_GET(out_dev, NOXFRM));
1da177e4
LT
2014 if (!rth) {
2015 err = -ENOBUFS;
2016 goto cleanup;
2017 }
2018
5e2b61f7 2019 rth->rt_key_dst = daddr;
1da177e4 2020 rth->rt_dst = daddr;
5e2b61f7
DM
2021 rth->rt_tos = tos;
2022 rth->rt_mark = skb->mark;
2023 rth->rt_key_src = saddr;
1da177e4
LT
2024 rth->rt_src = saddr;
2025 rth->rt_gateway = daddr;
1b86a58f 2026 rth->rt_route_iif = in_dev->dev->ifindex;
5e2b61f7 2027 rth->rt_iif = in_dev->dev->ifindex;
5e2b61f7 2028 rth->rt_oif = 0;
1da177e4
LT
2029 rth->rt_spec_dst= spec_dst;
2030
d8d1f30b
CG
2031 rth->dst.input = ip_forward;
2032 rth->dst.output = ip_output;
2033 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1da177e4 2034
5e2b61f7 2035 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1da177e4
LT
2036
2037 rth->rt_flags = flags;
2038
2039 *result = rth;
2040 err = 0;
2041 cleanup:
1da177e4 2042 return err;
e905a9ed 2043}
1da177e4 2044
5969f71d
SH
2045static int ip_mkroute_input(struct sk_buff *skb,
2046 struct fib_result *res,
68a5e3dd 2047 const struct flowi4 *fl4,
5969f71d
SH
2048 struct in_device *in_dev,
2049 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2050{
7abaa27c 2051 struct rtable* rth = NULL;
1da177e4
LT
2052 int err;
2053 unsigned hash;
2054
2055#ifdef CONFIG_IP_ROUTE_MULTIPATH
ff3fccb3 2056 if (res->fi && res->fi->fib_nhs > 1)
1b7fe593 2057 fib_select_multipath(res);
1da177e4
LT
2058#endif
2059
2060 /* create a routing cache entry */
2061 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2062 if (err)
2063 return err;
1da177e4
LT
2064
2065 /* put it into the cache */
68a5e3dd 2066 hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
d8d1f30b 2067 rt_genid(dev_net(rth->dst.dev)));
68a5e3dd 2068 rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
b23dd4fe
DM
2069 if (IS_ERR(rth))
2070 return PTR_ERR(rth);
2071 return 0;
1da177e4
LT
2072}
2073
1da177e4
LT
2074/*
2075 * NOTE. We drop all the packets that has local source
2076 * addresses, because every properly looped back packet
2077 * must have correct destination already attached by output routine.
2078 *
2079 * Such approach solves two big problems:
2080 * 1. Not simplex devices are handled properly.
2081 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2082 * called with rcu_read_lock()
1da177e4
LT
2083 */
2084
9e12bb22 2085static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
2086 u8 tos, struct net_device *dev)
2087{
2088 struct fib_result res;
96d36220 2089 struct in_device *in_dev = __in_dev_get_rcu(dev);
68a5e3dd 2090 struct flowi4 fl4;
1da177e4
LT
2091 unsigned flags = 0;
2092 u32 itag = 0;
2093 struct rtable * rth;
2094 unsigned hash;
9e12bb22 2095 __be32 spec_dst;
1da177e4 2096 int err = -EINVAL;
c346dca1 2097 struct net * net = dev_net(dev);
1da177e4
LT
2098
2099 /* IP on this device is disabled. */
2100
2101 if (!in_dev)
2102 goto out;
2103
2104 /* Check for the most weird martians, which can be not detected
2105 by fib_lookup.
2106 */
2107
1e637c74 2108 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 2109 ipv4_is_loopback(saddr))
1da177e4
LT
2110 goto martian_source;
2111
27a954bd 2112 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2113 goto brd_input;
2114
2115 /* Accept zero addresses only to limited broadcast;
2116 * I even do not know to fix it or not. Waiting for complains :-)
2117 */
f97c1e0c 2118 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2119 goto martian_source;
2120
27a954bd 2121 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
1da177e4
LT
2122 goto martian_destination;
2123
2124 /*
2125 * Now we are ready to route packet.
2126 */
68a5e3dd
DM
2127 fl4.flowi4_oif = 0;
2128 fl4.flowi4_iif = dev->ifindex;
2129 fl4.flowi4_mark = skb->mark;
2130 fl4.flowi4_tos = tos;
2131 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2132 fl4.daddr = daddr;
2133 fl4.saddr = saddr;
2134 err = fib_lookup(net, &fl4, &res);
ebc0ffae 2135 if (err != 0) {
1da177e4 2136 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2137 goto e_hostunreach;
1da177e4
LT
2138 goto no_route;
2139 }
1da177e4
LT
2140
2141 RT_CACHE_STAT_INC(in_slow_tot);
2142
2143 if (res.type == RTN_BROADCAST)
2144 goto brd_input;
2145
2146 if (res.type == RTN_LOCAL) {
5c04c819 2147 err = fib_validate_source(skb, saddr, daddr, tos,
ebc0ffae 2148 net->loopback_dev->ifindex,
5c04c819 2149 dev, &spec_dst, &itag);
b5f7e755
ED
2150 if (err < 0)
2151 goto martian_source_keep_err;
2152 if (err)
1da177e4
LT
2153 flags |= RTCF_DIRECTSRC;
2154 spec_dst = daddr;
2155 goto local_input;
2156 }
2157
2158 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2159 goto e_hostunreach;
1da177e4
LT
2160 if (res.type != RTN_UNICAST)
2161 goto martian_destination;
2162
68a5e3dd 2163 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1da177e4
LT
2164out: return err;
2165
2166brd_input:
2167 if (skb->protocol != htons(ETH_P_IP))
2168 goto e_inval;
2169
f97c1e0c 2170 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2171 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2172 else {
5c04c819
MS
2173 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2174 &itag);
1da177e4 2175 if (err < 0)
b5f7e755 2176 goto martian_source_keep_err;
1da177e4
LT
2177 if (err)
2178 flags |= RTCF_DIRECTSRC;
2179 }
2180 flags |= RTCF_BROADCAST;
2181 res.type = RTN_BROADCAST;
2182 RT_CACHE_STAT_INC(in_brd);
2183
2184local_input:
5c1e6aa3
DM
2185 rth = rt_dst_alloc(net->loopback_dev,
2186 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
2187 if (!rth)
2188 goto e_nobufs;
2189
d8d1f30b 2190 rth->dst.output= ip_rt_bug;
e84f84f2 2191 rth->rt_genid = rt_genid(net);
1da177e4 2192
5e2b61f7 2193 rth->rt_key_dst = daddr;
1da177e4 2194 rth->rt_dst = daddr;
5e2b61f7
DM
2195 rth->rt_tos = tos;
2196 rth->rt_mark = skb->mark;
2197 rth->rt_key_src = saddr;
1da177e4 2198 rth->rt_src = saddr;
c7066f70 2199#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 2200 rth->dst.tclassid = itag;
1da177e4 2201#endif
1b86a58f 2202 rth->rt_route_iif = dev->ifindex;
5e2b61f7 2203 rth->rt_iif = dev->ifindex;
1da177e4
LT
2204 rth->rt_gateway = daddr;
2205 rth->rt_spec_dst= spec_dst;
d8d1f30b 2206 rth->dst.input= ip_local_deliver;
1da177e4
LT
2207 rth->rt_flags = flags|RTCF_LOCAL;
2208 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2209 rth->dst.input= ip_error;
2210 rth->dst.error= -err;
1da177e4
LT
2211 rth->rt_flags &= ~RTCF_LOCAL;
2212 }
2213 rth->rt_type = res.type;
68a5e3dd
DM
2214 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2215 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
b23dd4fe
DM
2216 err = 0;
2217 if (IS_ERR(rth))
2218 err = PTR_ERR(rth);
ebc0ffae 2219 goto out;
1da177e4
LT
2220
2221no_route:
2222 RT_CACHE_STAT_INC(in_no_route);
2223 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2224 res.type = RTN_UNREACHABLE;
7f53878d
MC
2225 if (err == -ESRCH)
2226 err = -ENETUNREACH;
1da177e4
LT
2227 goto local_input;
2228
2229 /*
2230 * Do not cache martian addresses: they should be logged (RFC1812)
2231 */
2232martian_destination:
2233 RT_CACHE_STAT_INC(in_martian_dst);
2234#ifdef CONFIG_IP_ROUTE_VERBOSE
2235 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
2236 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2237 &daddr, &saddr, dev->name);
1da177e4 2238#endif
2c2910a4
DE
2239
2240e_hostunreach:
e905a9ed 2241 err = -EHOSTUNREACH;
ebc0ffae 2242 goto out;
2c2910a4 2243
1da177e4
LT
2244e_inval:
2245 err = -EINVAL;
ebc0ffae 2246 goto out;
1da177e4
LT
2247
2248e_nobufs:
2249 err = -ENOBUFS;
ebc0ffae 2250 goto out;
1da177e4
LT
2251
2252martian_source:
b5f7e755
ED
2253 err = -EINVAL;
2254martian_source_keep_err:
1da177e4 2255 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2256 goto out;
1da177e4
LT
2257}
2258
407eadd9
ED
2259int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2260 u8 tos, struct net_device *dev, bool noref)
1da177e4
LT
2261{
2262 struct rtable * rth;
2263 unsigned hash;
2264 int iif = dev->ifindex;
b5921910 2265 struct net *net;
96d36220 2266 int res;
1da177e4 2267
c346dca1 2268 net = dev_net(dev);
1080d709 2269
96d36220
ED
2270 rcu_read_lock();
2271
1080d709
NH
2272 if (!rt_caching(net))
2273 goto skip_cache;
2274
1da177e4 2275 tos &= IPTOS_RT_MASK;
e84f84f2 2276 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2277
1da177e4 2278 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2279 rth = rcu_dereference(rth->dst.rt_next)) {
5e2b61f7
DM
2280 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2281 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2282 (rth->rt_iif ^ iif) |
2283 rth->rt_oif |
2284 (rth->rt_tos ^ tos)) == 0 &&
2285 rth->rt_mark == skb->mark &&
d8d1f30b 2286 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2287 !rt_is_expired(rth)) {
407eadd9 2288 if (noref) {
d8d1f30b
CG
2289 dst_use_noref(&rth->dst, jiffies);
2290 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2291 } else {
d8d1f30b
CG
2292 dst_use(&rth->dst, jiffies);
2293 skb_dst_set(skb, &rth->dst);
407eadd9 2294 }
1da177e4
LT
2295 RT_CACHE_STAT_INC(in_hit);
2296 rcu_read_unlock();
1da177e4
LT
2297 return 0;
2298 }
2299 RT_CACHE_STAT_INC(in_hlist_search);
2300 }
1da177e4 2301
1080d709 2302skip_cache:
1da177e4
LT
2303 /* Multicast recognition logic is moved from route cache to here.
2304 The problem was that too many Ethernet cards have broken/missing
2305 hardware multicast filters :-( As result the host on multicasting
2306 network acquires a lot of useless route cache entries, sort of
2307 SDR messages from all the world. Now we try to get rid of them.
2308 Really, provided software IP multicast filter is organized
2309 reasonably (at least, hashed), it does not result in a slowdown
2310 comparing with route cache reject entries.
2311 Note, that multicast routers are not affected, because
2312 route cache entry is created eventually.
2313 */
f97c1e0c 2314 if (ipv4_is_multicast(daddr)) {
96d36220 2315 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2316
96d36220 2317 if (in_dev) {
dbdd9a52
DM
2318 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2319 ip_hdr(skb)->protocol);
1da177e4
LT
2320 if (our
2321#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2322 ||
2323 (!ipv4_is_local_multicast(daddr) &&
2324 IN_DEV_MFORWARD(in_dev))
1da177e4 2325#endif
9d4fb27d 2326 ) {
96d36220
ED
2327 int res = ip_route_input_mc(skb, daddr, saddr,
2328 tos, dev, our);
1da177e4 2329 rcu_read_unlock();
96d36220 2330 return res;
1da177e4
LT
2331 }
2332 }
2333 rcu_read_unlock();
2334 return -EINVAL;
2335 }
96d36220
ED
2336 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2337 rcu_read_unlock();
2338 return res;
1da177e4 2339}
407eadd9 2340EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2341
ebc0ffae 2342/* called with rcu_read_lock() */
982721f3 2343static struct rtable *__mkroute_output(const struct fib_result *res,
68a5e3dd
DM
2344 const struct flowi4 *fl4,
2345 const struct flowi4 *oldflp4,
5ada5527
DM
2346 struct net_device *dev_out,
2347 unsigned int flags)
1da177e4 2348{
982721f3 2349 struct fib_info *fi = res->fi;
68a5e3dd 2350 u32 tos = RT_FL_TOS(oldflp4);
5ada5527 2351 struct in_device *in_dev;
982721f3 2352 u16 type = res->type;
5ada5527 2353 struct rtable *rth;
1da177e4 2354
68a5e3dd 2355 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
5ada5527 2356 return ERR_PTR(-EINVAL);
1da177e4 2357
68a5e3dd 2358 if (ipv4_is_lbcast(fl4->daddr))
982721f3 2359 type = RTN_BROADCAST;
68a5e3dd 2360 else if (ipv4_is_multicast(fl4->daddr))
982721f3 2361 type = RTN_MULTICAST;
68a5e3dd 2362 else if (ipv4_is_zeronet(fl4->daddr))
5ada5527 2363 return ERR_PTR(-EINVAL);
1da177e4
LT
2364
2365 if (dev_out->flags & IFF_LOOPBACK)
2366 flags |= RTCF_LOCAL;
2367
dd28d1a0 2368 in_dev = __in_dev_get_rcu(dev_out);
ebc0ffae 2369 if (!in_dev)
5ada5527 2370 return ERR_PTR(-EINVAL);
ebc0ffae 2371
982721f3 2372 if (type == RTN_BROADCAST) {
1da177e4 2373 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
2374 fi = NULL;
2375 } else if (type == RTN_MULTICAST) {
dd28d1a0 2376 flags |= RTCF_MULTICAST | RTCF_LOCAL;
68a5e3dd
DM
2377 if (!ip_check_mc_rcu(in_dev, oldflp4->daddr, oldflp4->saddr,
2378 oldflp4->flowi4_proto))
1da177e4
LT
2379 flags &= ~RTCF_LOCAL;
2380 /* If multicast route do not exist use
dd28d1a0
ED
2381 * default one, but do not gateway in this case.
2382 * Yes, it is hack.
1da177e4 2383 */
982721f3
DM
2384 if (fi && res->prefixlen < 4)
2385 fi = NULL;
1da177e4
LT
2386 }
2387
5c1e6aa3
DM
2388 rth = rt_dst_alloc(dev_out,
2389 IN_DEV_CONF_GET(in_dev, NOPOLICY),
0c4dcd58 2390 IN_DEV_CONF_GET(in_dev, NOXFRM));
8391d07b 2391 if (!rth)
5ada5527 2392 return ERR_PTR(-ENOBUFS);
8391d07b 2393
68a5e3dd 2394 rth->rt_key_dst = oldflp4->daddr;
5e2b61f7 2395 rth->rt_tos = tos;
68a5e3dd
DM
2396 rth->rt_key_src = oldflp4->saddr;
2397 rth->rt_oif = oldflp4->flowi4_oif;
2398 rth->rt_mark = oldflp4->flowi4_mark;
2399 rth->rt_dst = fl4->daddr;
2400 rth->rt_src = fl4->saddr;
1b86a58f
OH
2401 rth->rt_route_iif = 0;
2402 rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
68a5e3dd
DM
2403 rth->rt_gateway = fl4->daddr;
2404 rth->rt_spec_dst= fl4->saddr;
1da177e4 2405
d8d1f30b 2406 rth->dst.output=ip_output;
e84f84f2 2407 rth->rt_genid = rt_genid(dev_net(dev_out));
1da177e4
LT
2408
2409 RT_CACHE_STAT_INC(out_slow_tot);
2410
2411 if (flags & RTCF_LOCAL) {
d8d1f30b 2412 rth->dst.input = ip_local_deliver;
68a5e3dd 2413 rth->rt_spec_dst = fl4->daddr;
1da177e4
LT
2414 }
2415 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
68a5e3dd 2416 rth->rt_spec_dst = fl4->saddr;
e905a9ed 2417 if (flags & RTCF_LOCAL &&
1da177e4 2418 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2419 rth->dst.output = ip_mc_output;
1da177e4
LT
2420 RT_CACHE_STAT_INC(out_slow_mc);
2421 }
2422#ifdef CONFIG_IP_MROUTE
982721f3 2423 if (type == RTN_MULTICAST) {
1da177e4 2424 if (IN_DEV_MFORWARD(in_dev) &&
68a5e3dd 2425 !ipv4_is_local_multicast(oldflp4->daddr)) {
d8d1f30b
CG
2426 rth->dst.input = ip_mr_input;
2427 rth->dst.output = ip_mc_output;
1da177e4
LT
2428 }
2429 }
2430#endif
2431 }
2432
68a5e3dd 2433 rt_set_nexthop(rth, oldflp4, res, fi, type, 0);
1da177e4
LT
2434
2435 rth->rt_flags = flags;
5ada5527 2436 return rth;
1da177e4
LT
2437}
2438
1da177e4
LT
2439/*
2440 * Major route resolver routine.
0197aa38 2441 * called with rcu_read_lock();
1da177e4
LT
2442 */
2443
b23dd4fe 2444static struct rtable *ip_route_output_slow(struct net *net,
68a5e3dd 2445 const struct flowi4 *oldflp4)
1da177e4 2446{
68a5e3dd
DM
2447 u32 tos = RT_FL_TOS(oldflp4);
2448 struct flowi4 fl4;
1da177e4 2449 struct fib_result res;
0197aa38 2450 unsigned int flags = 0;
1da177e4 2451 struct net_device *dev_out = NULL;
5ada5527 2452 struct rtable *rth;
1da177e4
LT
2453
2454 res.fi = NULL;
2455#ifdef CONFIG_IP_MULTIPLE_TABLES
2456 res.r = NULL;
2457#endif
2458
68a5e3dd
DM
2459 fl4.flowi4_oif = oldflp4->flowi4_oif;
2460 fl4.flowi4_iif = net->loopback_dev->ifindex;
2461 fl4.flowi4_mark = oldflp4->flowi4_mark;
2462 fl4.daddr = oldflp4->daddr;
2463 fl4.saddr = oldflp4->saddr;
2464 fl4.flowi4_tos = tos & IPTOS_RT_MASK;
2465 fl4.flowi4_scope = ((tos & RTO_ONLINK) ?
44713b67
DM
2466 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2467
010c2708 2468 rcu_read_lock();
68a5e3dd 2469 if (oldflp4->saddr) {
b23dd4fe 2470 rth = ERR_PTR(-EINVAL);
68a5e3dd
DM
2471 if (ipv4_is_multicast(oldflp4->saddr) ||
2472 ipv4_is_lbcast(oldflp4->saddr) ||
2473 ipv4_is_zeronet(oldflp4->saddr))
1da177e4
LT
2474 goto out;
2475
1da177e4
LT
2476 /* I removed check for oif == dev_out->oif here.
2477 It was wrong for two reasons:
1ab35276
DL
2478 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2479 is assigned to multiple interfaces.
1da177e4
LT
2480 2. Moreover, we are allowed to send packets with saddr
2481 of another iface. --ANK
2482 */
2483
68a5e3dd
DM
2484 if (oldflp4->flowi4_oif == 0 &&
2485 (ipv4_is_multicast(oldflp4->daddr) ||
2486 ipv4_is_lbcast(oldflp4->daddr))) {
a210d01a 2487 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
68a5e3dd 2488 dev_out = __ip_dev_find(net, oldflp4->saddr, false);
a210d01a
JA
2489 if (dev_out == NULL)
2490 goto out;
2491
1da177e4
LT
2492 /* Special hack: user can direct multicasts
2493 and limited broadcast via necessary interface
2494 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2495 This hack is not just for fun, it allows
2496 vic,vat and friends to work.
2497 They bind socket to loopback, set ttl to zero
2498 and expect that it will work.
2499 From the viewpoint of routing cache they are broken,
2500 because we are not allowed to build multicast path
2501 with loopback source addr (look, routing cache
2502 cannot know, that ttl is zero, so that packet
2503 will not leave this host and route is valid).
2504 Luckily, this hack is good workaround.
2505 */
2506
68a5e3dd 2507 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2508 goto make_route;
2509 }
a210d01a 2510
68a5e3dd 2511 if (!(oldflp4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
a210d01a 2512 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
68a5e3dd 2513 if (!__ip_dev_find(net, oldflp4->saddr, false))
a210d01a 2514 goto out;
a210d01a 2515 }
1da177e4
LT
2516 }
2517
2518
68a5e3dd
DM
2519 if (oldflp4->flowi4_oif) {
2520 dev_out = dev_get_by_index_rcu(net, oldflp4->flowi4_oif);
b23dd4fe 2521 rth = ERR_PTR(-ENODEV);
1da177e4
LT
2522 if (dev_out == NULL)
2523 goto out;
e5ed6399
HX
2524
2525 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83 2526 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
b23dd4fe 2527 rth = ERR_PTR(-ENETUNREACH);
fc75fc83
ED
2528 goto out;
2529 }
68a5e3dd
DM
2530 if (ipv4_is_local_multicast(oldflp4->daddr) ||
2531 ipv4_is_lbcast(oldflp4->daddr)) {
2532 if (!fl4.saddr)
2533 fl4.saddr = inet_select_addr(dev_out, 0,
2534 RT_SCOPE_LINK);
1da177e4
LT
2535 goto make_route;
2536 }
68a5e3dd
DM
2537 if (!fl4.saddr) {
2538 if (ipv4_is_multicast(oldflp4->daddr))
2539 fl4.saddr = inet_select_addr(dev_out, 0,
2540 fl4.flowi4_scope);
2541 else if (!oldflp4->daddr)
2542 fl4.saddr = inet_select_addr(dev_out, 0,
2543 RT_SCOPE_HOST);
1da177e4
LT
2544 }
2545 }
2546
68a5e3dd
DM
2547 if (!fl4.daddr) {
2548 fl4.daddr = fl4.saddr;
2549 if (!fl4.daddr)
2550 fl4.daddr = fl4.saddr = htonl(INADDR_LOOPBACK);
b40afd0e 2551 dev_out = net->loopback_dev;
68a5e3dd 2552 fl4.flowi4_oif = net->loopback_dev->ifindex;
1da177e4
LT
2553 res.type = RTN_LOCAL;
2554 flags |= RTCF_LOCAL;
2555 goto make_route;
2556 }
2557
68a5e3dd 2558 if (fib_lookup(net, &fl4, &res)) {
1da177e4 2559 res.fi = NULL;
68a5e3dd 2560 if (oldflp4->flowi4_oif) {
1da177e4
LT
2561 /* Apparently, routing tables are wrong. Assume,
2562 that the destination is on link.
2563
2564 WHY? DW.
2565 Because we are allowed to send to iface
2566 even if it has NO routes and NO assigned
2567 addresses. When oif is specified, routing
2568 tables are looked up with only one purpose:
2569 to catch if destination is gatewayed, rather than
2570 direct. Moreover, if MSG_DONTROUTE is set,
2571 we send packet, ignoring both routing tables
2572 and ifaddr state. --ANK
2573
2574
2575 We could make it even if oif is unknown,
2576 likely IPv6, but we do not.
2577 */
2578
68a5e3dd
DM
2579 if (fl4.saddr == 0)
2580 fl4.saddr = inet_select_addr(dev_out, 0,
2581 RT_SCOPE_LINK);
1da177e4
LT
2582 res.type = RTN_UNICAST;
2583 goto make_route;
2584 }
b23dd4fe 2585 rth = ERR_PTR(-ENETUNREACH);
1da177e4
LT
2586 goto out;
2587 }
1da177e4
LT
2588
2589 if (res.type == RTN_LOCAL) {
68a5e3dd 2590 if (!fl4.saddr) {
9fc3bbb4 2591 if (res.fi->fib_prefsrc)
68a5e3dd 2592 fl4.saddr = res.fi->fib_prefsrc;
9fc3bbb4 2593 else
68a5e3dd 2594 fl4.saddr = fl4.daddr;
9fc3bbb4 2595 }
b40afd0e 2596 dev_out = net->loopback_dev;
68a5e3dd 2597 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2598 res.fi = NULL;
2599 flags |= RTCF_LOCAL;
2600 goto make_route;
2601 }
2602
2603#ifdef CONFIG_IP_ROUTE_MULTIPATH
68a5e3dd 2604 if (res.fi->fib_nhs > 1 && fl4.flowi4_oif == 0)
1b7fe593 2605 fib_select_multipath(&res);
1da177e4
LT
2606 else
2607#endif
21d8c49e
DM
2608 if (!res.prefixlen &&
2609 res.table->tb_num_default > 1 &&
2610 res.type == RTN_UNICAST && !fl4.flowi4_oif)
0c838ff1 2611 fib_select_default(&res);
1da177e4 2612
68a5e3dd 2613 if (!fl4.saddr)
436c3b66 2614 fl4.saddr = FIB_RES_PREFSRC(net, res);
1da177e4 2615
1da177e4 2616 dev_out = FIB_RES_DEV(res);
68a5e3dd 2617 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2618
2619
2620make_route:
68a5e3dd 2621 rth = __mkroute_output(&res, &fl4, oldflp4, dev_out, flags);
b23dd4fe 2622 if (!IS_ERR(rth)) {
5ada5527
DM
2623 unsigned int hash;
2624
68a5e3dd 2625 hash = rt_hash(oldflp4->daddr, oldflp4->saddr, oldflp4->flowi4_oif,
5ada5527 2626 rt_genid(dev_net(dev_out)));
68a5e3dd 2627 rth = rt_intern_hash(hash, rth, NULL, oldflp4->flowi4_oif);
5ada5527 2628 }
1da177e4 2629
010c2708
DM
2630out:
2631 rcu_read_unlock();
b23dd4fe 2632 return rth;
1da177e4
LT
2633}
2634
9d6ec938 2635struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4)
1da177e4 2636{
1da177e4 2637 struct rtable *rth;
010c2708 2638 unsigned int hash;
1da177e4 2639
1080d709
NH
2640 if (!rt_caching(net))
2641 goto slow_output;
2642
9d6ec938 2643 hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
1da177e4
LT
2644
2645 rcu_read_lock_bh();
a898def2 2646 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2647 rth = rcu_dereference_bh(rth->dst.rt_next)) {
9d6ec938
DM
2648 if (rth->rt_key_dst == flp4->daddr &&
2649 rth->rt_key_src == flp4->saddr &&
c7537967 2650 rt_is_output_route(rth) &&
9d6ec938
DM
2651 rth->rt_oif == flp4->flowi4_oif &&
2652 rth->rt_mark == flp4->flowi4_mark &&
2653 !((rth->rt_tos ^ flp4->flowi4_tos) &
b5921910 2654 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2655 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2656 !rt_is_expired(rth)) {
d8d1f30b 2657 dst_use(&rth->dst, jiffies);
1da177e4
LT
2658 RT_CACHE_STAT_INC(out_hit);
2659 rcu_read_unlock_bh();
b23dd4fe 2660 return rth;
1da177e4
LT
2661 }
2662 RT_CACHE_STAT_INC(out_hlist_search);
2663 }
2664 rcu_read_unlock_bh();
2665
1080d709 2666slow_output:
9d6ec938 2667 return ip_route_output_slow(net, flp4);
1da177e4 2668}
d8c97a94
ACM
2669EXPORT_SYMBOL_GPL(__ip_route_output_key);
2670
ae2688d5
JW
2671static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2672{
2673 return NULL;
2674}
2675
ec831ea7
RD
2676static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2677{
2678 return 0;
2679}
2680
14e50e57
DM
2681static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2682{
2683}
2684
0972ddb2
HB
2685static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2686 unsigned long old)
2687{
2688 return NULL;
2689}
2690
14e50e57
DM
2691static struct dst_ops ipv4_dst_blackhole_ops = {
2692 .family = AF_INET,
09640e63 2693 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2694 .destroy = ipv4_dst_destroy,
ae2688d5 2695 .check = ipv4_blackhole_dst_check,
ec831ea7 2696 .default_mtu = ipv4_blackhole_default_mtu,
214f45c9 2697 .default_advmss = ipv4_default_advmss,
14e50e57 2698 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
0972ddb2 2699 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
14e50e57
DM
2700};
2701
2774c131 2702struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2703{
5c1e6aa3 2704 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2774c131 2705 struct rtable *ort = (struct rtable *) dst_orig;
14e50e57
DM
2706
2707 if (rt) {
d8d1f30b 2708 struct dst_entry *new = &rt->dst;
14e50e57 2709
14e50e57 2710 new->__use = 1;
352e512c
HX
2711 new->input = dst_discard;
2712 new->output = dst_discard;
defb3519 2713 dst_copy_metrics(new, &ort->dst);
14e50e57 2714
d8d1f30b 2715 new->dev = ort->dst.dev;
14e50e57
DM
2716 if (new->dev)
2717 dev_hold(new->dev);
2718
5e2b61f7
DM
2719 rt->rt_key_dst = ort->rt_key_dst;
2720 rt->rt_key_src = ort->rt_key_src;
2721 rt->rt_tos = ort->rt_tos;
1b86a58f 2722 rt->rt_route_iif = ort->rt_route_iif;
5e2b61f7
DM
2723 rt->rt_iif = ort->rt_iif;
2724 rt->rt_oif = ort->rt_oif;
2725 rt->rt_mark = ort->rt_mark;
14e50e57 2726
e84f84f2 2727 rt->rt_genid = rt_genid(net);
14e50e57
DM
2728 rt->rt_flags = ort->rt_flags;
2729 rt->rt_type = ort->rt_type;
2730 rt->rt_dst = ort->rt_dst;
2731 rt->rt_src = ort->rt_src;
14e50e57
DM
2732 rt->rt_gateway = ort->rt_gateway;
2733 rt->rt_spec_dst = ort->rt_spec_dst;
2734 rt->peer = ort->peer;
2735 if (rt->peer)
2736 atomic_inc(&rt->peer->refcnt);
62fa8a84
DM
2737 rt->fi = ort->fi;
2738 if (rt->fi)
2739 atomic_inc(&rt->fi->fib_clntref);
14e50e57
DM
2740
2741 dst_free(new);
2742 }
2743
2774c131
DM
2744 dst_release(dst_orig);
2745
2746 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2747}
2748
9d6ec938 2749struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
b23dd4fe 2750 struct sock *sk)
1da177e4 2751{
9d6ec938 2752 struct rtable *rt = __ip_route_output_key(net, flp4);
1da177e4 2753
b23dd4fe
DM
2754 if (IS_ERR(rt))
2755 return rt;
1da177e4 2756
9d6ec938
DM
2757 if (flp4->flowi4_proto) {
2758 if (!flp4->saddr)
2759 flp4->saddr = rt->rt_src;
2760 if (!flp4->daddr)
2761 flp4->daddr = rt->rt_dst;
2762 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2763 flowi4_to_flowi(flp4),
2764 sk, 0);
1da177e4
LT
2765 }
2766
b23dd4fe 2767 return rt;
1da177e4 2768}
d8c97a94
ACM
2769EXPORT_SYMBOL_GPL(ip_route_output_flow);
2770
4feb88e5
BT
2771static int rt_fill_info(struct net *net,
2772 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2773 int nowait, unsigned int flags)
1da177e4 2774{
511c3f92 2775 struct rtable *rt = skb_rtable(skb);
1da177e4 2776 struct rtmsg *r;
be403ea1 2777 struct nlmsghdr *nlh;
e3703b3d
TG
2778 long expires;
2779 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2780
2781 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2782 if (nlh == NULL)
26932566 2783 return -EMSGSIZE;
be403ea1
TG
2784
2785 r = nlmsg_data(nlh);
1da177e4
LT
2786 r->rtm_family = AF_INET;
2787 r->rtm_dst_len = 32;
2788 r->rtm_src_len = 0;
5e2b61f7 2789 r->rtm_tos = rt->rt_tos;
1da177e4 2790 r->rtm_table = RT_TABLE_MAIN;
be403ea1 2791 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1da177e4
LT
2792 r->rtm_type = rt->rt_type;
2793 r->rtm_scope = RT_SCOPE_UNIVERSE;
2794 r->rtm_protocol = RTPROT_UNSPEC;
2795 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2796 if (rt->rt_flags & RTCF_NOTIFY)
2797 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2798
17fb2c64 2799 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
be403ea1 2800
5e2b61f7 2801 if (rt->rt_key_src) {
1da177e4 2802 r->rtm_src_len = 32;
5e2b61f7 2803 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
1da177e4 2804 }
d8d1f30b
CG
2805 if (rt->dst.dev)
2806 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
c7066f70 2807#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b
CG
2808 if (rt->dst.tclassid)
2809 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
1da177e4 2810#endif
c7537967 2811 if (rt_is_input_route(rt))
17fb2c64 2812 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
5e2b61f7 2813 else if (rt->rt_src != rt->rt_key_src)
17fb2c64 2814 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
be403ea1 2815
1da177e4 2816 if (rt->rt_dst != rt->rt_gateway)
17fb2c64 2817 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
be403ea1 2818
defb3519 2819 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2820 goto nla_put_failure;
2821
5e2b61f7
DM
2822 if (rt->rt_mark)
2823 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
963bfeee 2824
d8d1f30b 2825 error = rt->dst.error;
2c8cec5c
DM
2826 expires = (rt->peer && rt->peer->pmtu_expires) ?
2827 rt->peer->pmtu_expires - jiffies : 0;
1da177e4 2828 if (rt->peer) {
317fe0e6 2829 inet_peer_refcheck(rt->peer);
2c1409a0 2830 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
1da177e4 2831 if (rt->peer->tcp_ts_stamp) {
e3703b3d 2832 ts = rt->peer->tcp_ts;
9d729f72 2833 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
1da177e4
LT
2834 }
2835 }
be403ea1 2836
c7537967 2837 if (rt_is_input_route(rt)) {
1da177e4 2838#ifdef CONFIG_IP_MROUTE
e448515c 2839 __be32 dst = rt->rt_dst;
1da177e4 2840
f97c1e0c 2841 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5
BT
2842 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2843 int err = ipmr_get_route(net, skb, r, nowait);
1da177e4
LT
2844 if (err <= 0) {
2845 if (!nowait) {
2846 if (err == 0)
2847 return 0;
be403ea1 2848 goto nla_put_failure;
1da177e4
LT
2849 } else {
2850 if (err == -EMSGSIZE)
be403ea1 2851 goto nla_put_failure;
e3703b3d 2852 error = err;
1da177e4
LT
2853 }
2854 }
2855 } else
2856#endif
5e2b61f7 2857 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
1da177e4
LT
2858 }
2859
d8d1f30b 2860 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2861 expires, error) < 0)
2862 goto nla_put_failure;
be403ea1
TG
2863
2864 return nlmsg_end(skb, nlh);
1da177e4 2865
be403ea1 2866nla_put_failure:
26932566
PM
2867 nlmsg_cancel(skb, nlh);
2868 return -EMSGSIZE;
1da177e4
LT
2869}
2870
63f3444f 2871static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1da177e4 2872{
3b1e0a65 2873 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2874 struct rtmsg *rtm;
2875 struct nlattr *tb[RTA_MAX+1];
1da177e4 2876 struct rtable *rt = NULL;
9e12bb22
AV
2877 __be32 dst = 0;
2878 __be32 src = 0;
2879 u32 iif;
d889ce3b 2880 int err;
963bfeee 2881 int mark;
1da177e4
LT
2882 struct sk_buff *skb;
2883
d889ce3b
TG
2884 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2885 if (err < 0)
2886 goto errout;
2887
2888 rtm = nlmsg_data(nlh);
2889
1da177e4 2890 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2891 if (skb == NULL) {
2892 err = -ENOBUFS;
2893 goto errout;
2894 }
1da177e4
LT
2895
2896 /* Reserve room for dummy headers, this skb can pass
2897 through good chunk of routing engine.
2898 */
459a98ed 2899 skb_reset_mac_header(skb);
c1d2bbe1 2900 skb_reset_network_header(skb);
d2c962b8
SH
2901
2902 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2903 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2904 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2905
17fb2c64
AV
2906 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2907 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2908 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2909 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
2910
2911 if (iif) {
d889ce3b
TG
2912 struct net_device *dev;
2913
1937504d 2914 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2915 if (dev == NULL) {
2916 err = -ENODEV;
2917 goto errout_free;
2918 }
2919
1da177e4
LT
2920 skb->protocol = htons(ETH_P_IP);
2921 skb->dev = dev;
963bfeee 2922 skb->mark = mark;
1da177e4
LT
2923 local_bh_disable();
2924 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2925 local_bh_enable();
d889ce3b 2926
511c3f92 2927 rt = skb_rtable(skb);
d8d1f30b
CG
2928 if (err == 0 && rt->dst.error)
2929 err = -rt->dst.error;
1da177e4 2930 } else {
68a5e3dd
DM
2931 struct flowi4 fl4 = {
2932 .daddr = dst,
2933 .saddr = src,
2934 .flowi4_tos = rtm->rtm_tos,
2935 .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2936 .flowi4_mark = mark,
d889ce3b 2937 };
9d6ec938 2938 rt = ip_route_output_key(net, &fl4);
b23dd4fe
DM
2939
2940 err = 0;
2941 if (IS_ERR(rt))
2942 err = PTR_ERR(rt);
1da177e4 2943 }
d889ce3b 2944
1da177e4 2945 if (err)
d889ce3b 2946 goto errout_free;
1da177e4 2947
d8d1f30b 2948 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2949 if (rtm->rtm_flags & RTM_F_NOTIFY)
2950 rt->rt_flags |= RTCF_NOTIFY;
2951
4feb88e5 2952 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2953 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2954 if (err <= 0)
2955 goto errout_free;
1da177e4 2956
1937504d 2957 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2958errout:
2942e900 2959 return err;
1da177e4 2960
d889ce3b 2961errout_free:
1da177e4 2962 kfree_skb(skb);
d889ce3b 2963 goto errout;
1da177e4
LT
2964}
2965
2966int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2967{
2968 struct rtable *rt;
2969 int h, s_h;
2970 int idx, s_idx;
1937504d
DL
2971 struct net *net;
2972
3b1e0a65 2973 net = sock_net(skb->sk);
1da177e4
LT
2974
2975 s_h = cb->args[0];
d8c92830
ED
2976 if (s_h < 0)
2977 s_h = 0;
1da177e4 2978 s_idx = idx = cb->args[1];
a6272665
ED
2979 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2980 if (!rt_hash_table[h].chain)
2981 continue;
1da177e4 2982 rcu_read_lock_bh();
a898def2 2983 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
2984 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2985 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 2986 continue;
e84f84f2 2987 if (rt_is_expired(rt))
29e75252 2988 continue;
d8d1f30b 2989 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 2990 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 2991 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 2992 1, NLM_F_MULTI) <= 0) {
adf30907 2993 skb_dst_drop(skb);
1da177e4
LT
2994 rcu_read_unlock_bh();
2995 goto done;
2996 }
adf30907 2997 skb_dst_drop(skb);
1da177e4
LT
2998 }
2999 rcu_read_unlock_bh();
3000 }
3001
3002done:
3003 cb->args[0] = h;
3004 cb->args[1] = idx;
3005 return skb->len;
3006}
3007
3008void ip_rt_multicast_event(struct in_device *in_dev)
3009{
76e6ebfb 3010 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
3011}
3012
3013#ifdef CONFIG_SYSCTL
81c684d1 3014static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 3015 void __user *buffer,
1da177e4
LT
3016 size_t *lenp, loff_t *ppos)
3017{
3018 if (write) {
639e104f 3019 int flush_delay;
81c684d1 3020 ctl_table ctl;
39a23e75 3021 struct net *net;
639e104f 3022
81c684d1
DL
3023 memcpy(&ctl, __ctl, sizeof(ctl));
3024 ctl.data = &flush_delay;
8d65af78 3025 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3026
81c684d1 3027 net = (struct net *)__ctl->extra1;
39a23e75 3028 rt_cache_flush(net, flush_delay);
1da177e4 3029 return 0;
e905a9ed 3030 }
1da177e4
LT
3031
3032 return -EINVAL;
3033}
3034
eeb61f71 3035static ctl_table ipv4_route_table[] = {
1da177e4 3036 {
1da177e4
LT
3037 .procname = "gc_thresh",
3038 .data = &ipv4_dst_ops.gc_thresh,
3039 .maxlen = sizeof(int),
3040 .mode = 0644,
6d9f239a 3041 .proc_handler = proc_dointvec,
1da177e4
LT
3042 },
3043 {
1da177e4
LT
3044 .procname = "max_size",
3045 .data = &ip_rt_max_size,
3046 .maxlen = sizeof(int),
3047 .mode = 0644,
6d9f239a 3048 .proc_handler = proc_dointvec,
1da177e4
LT
3049 },
3050 {
3051 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3052
1da177e4
LT
3053 .procname = "gc_min_interval",
3054 .data = &ip_rt_gc_min_interval,
3055 .maxlen = sizeof(int),
3056 .mode = 0644,
6d9f239a 3057 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3058 },
3059 {
1da177e4
LT
3060 .procname = "gc_min_interval_ms",
3061 .data = &ip_rt_gc_min_interval,
3062 .maxlen = sizeof(int),
3063 .mode = 0644,
6d9f239a 3064 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3065 },
3066 {
1da177e4
LT
3067 .procname = "gc_timeout",
3068 .data = &ip_rt_gc_timeout,
3069 .maxlen = sizeof(int),
3070 .mode = 0644,
6d9f239a 3071 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3072 },
3073 {
1da177e4
LT
3074 .procname = "gc_interval",
3075 .data = &ip_rt_gc_interval,
3076 .maxlen = sizeof(int),
3077 .mode = 0644,
6d9f239a 3078 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3079 },
3080 {
1da177e4
LT
3081 .procname = "redirect_load",
3082 .data = &ip_rt_redirect_load,
3083 .maxlen = sizeof(int),
3084 .mode = 0644,
6d9f239a 3085 .proc_handler = proc_dointvec,
1da177e4
LT
3086 },
3087 {
1da177e4
LT
3088 .procname = "redirect_number",
3089 .data = &ip_rt_redirect_number,
3090 .maxlen = sizeof(int),
3091 .mode = 0644,
6d9f239a 3092 .proc_handler = proc_dointvec,
1da177e4
LT
3093 },
3094 {
1da177e4
LT
3095 .procname = "redirect_silence",
3096 .data = &ip_rt_redirect_silence,
3097 .maxlen = sizeof(int),
3098 .mode = 0644,
6d9f239a 3099 .proc_handler = proc_dointvec,
1da177e4
LT
3100 },
3101 {
1da177e4
LT
3102 .procname = "error_cost",
3103 .data = &ip_rt_error_cost,
3104 .maxlen = sizeof(int),
3105 .mode = 0644,
6d9f239a 3106 .proc_handler = proc_dointvec,
1da177e4
LT
3107 },
3108 {
1da177e4
LT
3109 .procname = "error_burst",
3110 .data = &ip_rt_error_burst,
3111 .maxlen = sizeof(int),
3112 .mode = 0644,
6d9f239a 3113 .proc_handler = proc_dointvec,
1da177e4
LT
3114 },
3115 {
1da177e4
LT
3116 .procname = "gc_elasticity",
3117 .data = &ip_rt_gc_elasticity,
3118 .maxlen = sizeof(int),
3119 .mode = 0644,
6d9f239a 3120 .proc_handler = proc_dointvec,
1da177e4
LT
3121 },
3122 {
1da177e4
LT
3123 .procname = "mtu_expires",
3124 .data = &ip_rt_mtu_expires,
3125 .maxlen = sizeof(int),
3126 .mode = 0644,
6d9f239a 3127 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3128 },
3129 {
1da177e4
LT
3130 .procname = "min_pmtu",
3131 .data = &ip_rt_min_pmtu,
3132 .maxlen = sizeof(int),
3133 .mode = 0644,
6d9f239a 3134 .proc_handler = proc_dointvec,
1da177e4
LT
3135 },
3136 {
1da177e4
LT
3137 .procname = "min_adv_mss",
3138 .data = &ip_rt_min_advmss,
3139 .maxlen = sizeof(int),
3140 .mode = 0644,
6d9f239a 3141 .proc_handler = proc_dointvec,
1da177e4 3142 },
f8572d8f 3143 { }
1da177e4 3144};
39a23e75 3145
2f4520d3
AV
3146static struct ctl_table empty[1];
3147
3148static struct ctl_table ipv4_skeleton[] =
3149{
f8572d8f 3150 { .procname = "route",
d994af0d 3151 .mode = 0555, .child = ipv4_route_table},
f8572d8f 3152 { .procname = "neigh",
d994af0d 3153 .mode = 0555, .child = empty},
2f4520d3
AV
3154 { }
3155};
3156
3157static __net_initdata struct ctl_path ipv4_path[] = {
f8572d8f
EB
3158 { .procname = "net", },
3159 { .procname = "ipv4", },
39a23e75
DL
3160 { },
3161};
3162
39a23e75
DL
3163static struct ctl_table ipv4_route_flush_table[] = {
3164 {
39a23e75
DL
3165 .procname = "flush",
3166 .maxlen = sizeof(int),
3167 .mode = 0200,
6d9f239a 3168 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3169 },
f8572d8f 3170 { },
39a23e75
DL
3171};
3172
2f4520d3 3173static __net_initdata struct ctl_path ipv4_route_path[] = {
f8572d8f
EB
3174 { .procname = "net", },
3175 { .procname = "ipv4", },
3176 { .procname = "route", },
2f4520d3
AV
3177 { },
3178};
3179
39a23e75
DL
3180static __net_init int sysctl_route_net_init(struct net *net)
3181{
3182 struct ctl_table *tbl;
3183
3184 tbl = ipv4_route_flush_table;
09ad9bc7 3185 if (!net_eq(net, &init_net)) {
39a23e75
DL
3186 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3187 if (tbl == NULL)
3188 goto err_dup;
3189 }
3190 tbl[0].extra1 = net;
3191
3192 net->ipv4.route_hdr =
3193 register_net_sysctl_table(net, ipv4_route_path, tbl);
3194 if (net->ipv4.route_hdr == NULL)
3195 goto err_reg;
3196 return 0;
3197
3198err_reg:
3199 if (tbl != ipv4_route_flush_table)
3200 kfree(tbl);
3201err_dup:
3202 return -ENOMEM;
3203}
3204
3205static __net_exit void sysctl_route_net_exit(struct net *net)
3206{
3207 struct ctl_table *tbl;
3208
3209 tbl = net->ipv4.route_hdr->ctl_table_arg;
3210 unregister_net_sysctl_table(net->ipv4.route_hdr);
3211 BUG_ON(tbl == ipv4_route_flush_table);
3212 kfree(tbl);
3213}
3214
3215static __net_initdata struct pernet_operations sysctl_route_ops = {
3216 .init = sysctl_route_net_init,
3217 .exit = sysctl_route_net_exit,
3218};
1da177e4
LT
3219#endif
3220
3ee94372 3221static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3222{
3ee94372
NH
3223 get_random_bytes(&net->ipv4.rt_genid,
3224 sizeof(net->ipv4.rt_genid));
436c3b66
DM
3225 get_random_bytes(&net->ipv4.dev_addr_genid,
3226 sizeof(net->ipv4.dev_addr_genid));
9f5e97e5
DL
3227 return 0;
3228}
3229
3ee94372
NH
3230static __net_initdata struct pernet_operations rt_genid_ops = {
3231 .init = rt_genid_init,
9f5e97e5
DL
3232};
3233
3234
c7066f70 3235#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 3236struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 3237#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4
LT
3238
3239static __initdata unsigned long rhash_entries;
3240static int __init set_rhash_entries(char *str)
3241{
3242 if (!str)
3243 return 0;
3244 rhash_entries = simple_strtoul(str, &str, 0);
3245 return 1;
3246}
3247__setup("rhash_entries=", set_rhash_entries);
3248
3249int __init ip_rt_init(void)
3250{
424c4b70 3251 int rc = 0;
1da177e4 3252
c7066f70 3253#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 3254 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3255 if (!ip_rt_acct)
3256 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3257#endif
3258
e5d679f3
AD
3259 ipv4_dst_ops.kmem_cachep =
3260 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3261 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3262
14e50e57
DM
3263 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3264
fc66f95c
ED
3265 if (dst_entries_init(&ipv4_dst_ops) < 0)
3266 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3267
3268 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3269 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3270
424c4b70
ED
3271 rt_hash_table = (struct rt_hash_bucket *)
3272 alloc_large_system_hash("IP route cache",
3273 sizeof(struct rt_hash_bucket),
3274 rhash_entries,
4481374c 3275 (totalram_pages >= 128 * 1024) ?
18955cfc 3276 15 : 17,
8d1502de 3277 0,
424c4b70
ED
3278 &rt_hash_log,
3279 &rt_hash_mask,
c9503e0f 3280 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3281 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3282 rt_hash_lock_init();
1da177e4
LT
3283
3284 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3285 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3286
1da177e4
LT
3287 devinet_init();
3288 ip_fib_init();
3289
73b38711 3290 if (ip_rt_proc_init())
107f1634 3291 printk(KERN_ERR "Unable to create route proc files\n");
1da177e4
LT
3292#ifdef CONFIG_XFRM
3293 xfrm_init();
a33bc5c1 3294 xfrm4_init(ip_rt_max_size);
1da177e4 3295#endif
63f3444f
TG
3296 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3297
39a23e75
DL
3298#ifdef CONFIG_SYSCTL
3299 register_pernet_subsys(&sysctl_route_ops);
3300#endif
3ee94372 3301 register_pernet_subsys(&rt_genid_ops);
1da177e4
LT
3302 return rc;
3303}
3304
a1bc6eb4 3305#ifdef CONFIG_SYSCTL
eeb61f71
AV
3306/*
3307 * We really need to sanitize the damn ipv4 init order, then all
3308 * this nonsense will go away.
3309 */
3310void __init ip_static_sysctl_init(void)
3311{
2f4520d3 3312 register_sysctl_paths(ipv4_path, ipv4_skeleton);
eeb61f71 3313}
a1bc6eb4 3314#endif