]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/route.c
net: Do delayed neigh confirmation.
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
afd46503
JP
65#define pr_fmt(fmt) "IPv4: " fmt
66
1da177e4
LT
67#include <linux/module.h>
68#include <asm/uaccess.h>
1da177e4
LT
69#include <linux/bitops.h>
70#include <linux/types.h>
71#include <linux/kernel.h>
1da177e4 72#include <linux/mm.h>
424c4b70 73#include <linux/bootmem.h>
1da177e4
LT
74#include <linux/string.h>
75#include <linux/socket.h>
76#include <linux/sockios.h>
77#include <linux/errno.h>
78#include <linux/in.h>
79#include <linux/inet.h>
80#include <linux/netdevice.h>
81#include <linux/proc_fs.h>
82#include <linux/init.h>
39c90ece 83#include <linux/workqueue.h>
1da177e4 84#include <linux/skbuff.h>
1da177e4
LT
85#include <linux/inetdevice.h>
86#include <linux/igmp.h>
87#include <linux/pkt_sched.h>
88#include <linux/mroute.h>
89#include <linux/netfilter_ipv4.h>
90#include <linux/random.h>
91#include <linux/jhash.h>
92#include <linux/rcupdate.h>
93#include <linux/times.h>
5a0e3ad6 94#include <linux/slab.h>
b9eda06f 95#include <linux/prefetch.h>
352e512c 96#include <net/dst.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/protocol.h>
99#include <net/ip.h>
100#include <net/route.h>
101#include <net/inetpeer.h>
102#include <net/sock.h>
103#include <net/ip_fib.h>
104#include <net/arp.h>
105#include <net/tcp.h>
106#include <net/icmp.h>
107#include <net/xfrm.h>
8d71740c 108#include <net/netevent.h>
63f3444f 109#include <net/rtnetlink.h>
1da177e4
LT
110#ifdef CONFIG_SYSCTL
111#include <linux/sysctl.h>
7426a564 112#include <linux/kmemleak.h>
1da177e4 113#endif
6e5714ea 114#include <net/secure_seq.h>
1da177e4 115
68a5e3dd 116#define RT_FL_TOS(oldflp4) \
f61759e6 117 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
1da177e4
LT
118
119#define IP_MAX_MTU 0xFFF0
120
121#define RT_GC_TIMEOUT (300*HZ)
122
1da177e4 123static int ip_rt_max_size;
817bc4db 124static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
9f28a2fc 125static int ip_rt_gc_interval __read_mostly = 60 * HZ;
817bc4db
SH
126static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
127static int ip_rt_redirect_number __read_mostly = 9;
128static int ip_rt_redirect_load __read_mostly = HZ / 50;
129static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
130static int ip_rt_error_cost __read_mostly = HZ;
131static int ip_rt_error_burst __read_mostly = 5 * HZ;
132static int ip_rt_gc_elasticity __read_mostly = 8;
133static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
134static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
135static int ip_rt_min_advmss __read_mostly = 256;
1080d709 136static int rt_chain_length_max __read_mostly = 20;
1da177e4 137
9f28a2fc
ED
138static struct delayed_work expires_work;
139static unsigned long expires_ljiffies;
140
1da177e4
LT
141/*
142 * Interface to generic destination cache.
143 */
144
145static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 146static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
ebb762f2 147static unsigned int ipv4_mtu(const struct dst_entry *dst);
1da177e4 148static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
149static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
150static void ipv4_link_failure(struct sk_buff *skb);
151static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 152static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 153
72cdd1d9
ED
154static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
155 int how)
156{
157}
1da177e4 158
62fa8a84
DM
159static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
160{
06582540
DM
161 struct rtable *rt = (struct rtable *) dst;
162 struct inet_peer *peer;
163 u32 *p = NULL;
164
fbfe95a4 165 peer = rt_get_peer_create(rt, rt->rt_dst);
06582540 166 if (peer) {
62fa8a84
DM
167 u32 *old_p = __DST_METRICS_PTR(old);
168 unsigned long prev, new;
169
06582540
DM
170 p = peer->metrics;
171 if (inet_metrics_new(peer))
172 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
62fa8a84
DM
173
174 new = (unsigned long) p;
175 prev = cmpxchg(&dst->_metrics, old, new);
176
177 if (prev != old) {
62fa8a84
DM
178 p = __DST_METRICS_PTR(prev);
179 if (prev & DST_METRICS_READ_ONLY)
180 p = NULL;
181 } else {
62fa8a84
DM
182 if (rt->fi) {
183 fib_info_put(rt->fi);
184 rt->fi = NULL;
185 }
186 }
187 }
188 return p;
189}
190
d3aaeb38
DM
191static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
192
1da177e4
LT
193static struct dst_ops ipv4_dst_ops = {
194 .family = AF_INET,
09640e63 195 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
196 .gc = rt_garbage_collect,
197 .check = ipv4_dst_check,
0dbaee3b 198 .default_advmss = ipv4_default_advmss,
ebb762f2 199 .mtu = ipv4_mtu,
62fa8a84 200 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
201 .destroy = ipv4_dst_destroy,
202 .ifdown = ipv4_dst_ifdown,
203 .negative_advice = ipv4_negative_advice,
204 .link_failure = ipv4_link_failure,
205 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 206 .local_out = __ip_local_out,
d3aaeb38 207 .neigh_lookup = ipv4_neigh_lookup,
1da177e4
LT
208};
209
210#define ECN_OR_COST(class) TC_PRIO_##class
211
4839c52b 212const __u8 ip_tos2prio[16] = {
1da177e4 213 TC_PRIO_BESTEFFORT,
4a2b9c37 214 ECN_OR_COST(BESTEFFORT),
1da177e4
LT
215 TC_PRIO_BESTEFFORT,
216 ECN_OR_COST(BESTEFFORT),
217 TC_PRIO_BULK,
218 ECN_OR_COST(BULK),
219 TC_PRIO_BULK,
220 ECN_OR_COST(BULK),
221 TC_PRIO_INTERACTIVE,
222 ECN_OR_COST(INTERACTIVE),
223 TC_PRIO_INTERACTIVE,
224 ECN_OR_COST(INTERACTIVE),
225 TC_PRIO_INTERACTIVE_BULK,
226 ECN_OR_COST(INTERACTIVE_BULK),
227 TC_PRIO_INTERACTIVE_BULK,
228 ECN_OR_COST(INTERACTIVE_BULK)
229};
d4a96865 230EXPORT_SYMBOL(ip_tos2prio);
1da177e4
LT
231
232/*
233 * Route cache.
234 */
235
236/* The locking scheme is rather straight forward:
237 *
238 * 1) Read-Copy Update protects the buckets of the central route hash.
239 * 2) Only writers remove entries, and they hold the lock
240 * as they look at rtable reference counts.
241 * 3) Only readers acquire references to rtable entries,
242 * they do so with atomic increments and with the
243 * lock held.
244 */
245
246struct rt_hash_bucket {
1c31720a 247 struct rtable __rcu *chain;
22c047cc 248};
1080d709 249
8a25d5de
IM
250#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
251 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
252/*
253 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
254 * The size of this table is a power of two and depends on the number of CPUS.
62051200 255 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 256 */
62051200
IM
257#ifdef CONFIG_LOCKDEP
258# define RT_HASH_LOCK_SZ 256
22c047cc 259#else
62051200
IM
260# if NR_CPUS >= 32
261# define RT_HASH_LOCK_SZ 4096
262# elif NR_CPUS >= 16
263# define RT_HASH_LOCK_SZ 2048
264# elif NR_CPUS >= 8
265# define RT_HASH_LOCK_SZ 1024
266# elif NR_CPUS >= 4
267# define RT_HASH_LOCK_SZ 512
268# else
269# define RT_HASH_LOCK_SZ 256
270# endif
22c047cc
ED
271#endif
272
273static spinlock_t *rt_hash_locks;
274# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
275
276static __init void rt_hash_lock_init(void)
277{
278 int i;
279
280 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
281 GFP_KERNEL);
282 if (!rt_hash_locks)
283 panic("IP: failed to allocate rt_hash_locks\n");
284
285 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
286 spin_lock_init(&rt_hash_locks[i]);
287}
22c047cc
ED
288#else
289# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
290
291static inline void rt_hash_lock_init(void)
292{
293}
22c047cc 294#endif
1da177e4 295
817bc4db 296static struct rt_hash_bucket *rt_hash_table __read_mostly;
95c96174 297static unsigned int rt_hash_mask __read_mostly;
817bc4db 298static unsigned int rt_hash_log __read_mostly;
1da177e4 299
2f970d83 300static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 301#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 302
b00180de 303static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 304 int genid)
1da177e4 305{
0eae88f3 306 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 307 idx, genid)
29e75252 308 & rt_hash_mask;
1da177e4
LT
309}
310
e84f84f2
DL
311static inline int rt_genid(struct net *net)
312{
313 return atomic_read(&net->ipv4.rt_genid);
314}
315
1da177e4
LT
316#ifdef CONFIG_PROC_FS
317struct rt_cache_iter_state {
a75e936f 318 struct seq_net_private p;
1da177e4 319 int bucket;
29e75252 320 int genid;
1da177e4
LT
321};
322
1218854a 323static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 324{
1218854a 325 struct rt_cache_iter_state *st = seq->private;
1da177e4 326 struct rtable *r = NULL;
1da177e4
LT
327
328 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
33d480ce 329 if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
a6272665 330 continue;
1da177e4 331 rcu_read_lock_bh();
a898def2 332 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 333 while (r) {
d8d1f30b 334 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 335 r->rt_genid == st->genid)
29e75252 336 return r;
d8d1f30b 337 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 338 }
1da177e4
LT
339 rcu_read_unlock_bh();
340 }
29e75252 341 return r;
1da177e4
LT
342}
343
1218854a 344static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 345 struct rtable *r)
1da177e4 346{
1218854a 347 struct rt_cache_iter_state *st = seq->private;
a6272665 348
1c31720a 349 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
350 while (!r) {
351 rcu_read_unlock_bh();
a6272665
ED
352 do {
353 if (--st->bucket < 0)
354 return NULL;
33d480ce 355 } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
1da177e4 356 rcu_read_lock_bh();
1c31720a 357 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 358 }
1c31720a 359 return r;
1da177e4
LT
360}
361
1218854a 362static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
363 struct rtable *r)
364{
1218854a
YH
365 struct rt_cache_iter_state *st = seq->private;
366 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 367 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 368 continue;
642d6318
DL
369 if (r->rt_genid == st->genid)
370 break;
371 }
372 return r;
373}
374
1218854a 375static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 376{
1218854a 377 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
378
379 if (r)
1218854a 380 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
381 --pos;
382 return pos ? NULL : r;
383}
384
385static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
386{
29e75252 387 struct rt_cache_iter_state *st = seq->private;
29e75252 388 if (*pos)
1218854a 389 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 390 st->genid = rt_genid(seq_file_net(seq));
29e75252 391 return SEQ_START_TOKEN;
1da177e4
LT
392}
393
394static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
395{
29e75252 396 struct rtable *r;
1da177e4
LT
397
398 if (v == SEQ_START_TOKEN)
1218854a 399 r = rt_cache_get_first(seq);
1da177e4 400 else
1218854a 401 r = rt_cache_get_next(seq, v);
1da177e4
LT
402 ++*pos;
403 return r;
404}
405
406static void rt_cache_seq_stop(struct seq_file *seq, void *v)
407{
408 if (v && v != SEQ_START_TOKEN)
409 rcu_read_unlock_bh();
410}
411
412static int rt_cache_seq_show(struct seq_file *seq, void *v)
413{
414 if (v == SEQ_START_TOKEN)
415 seq_printf(seq, "%-127s\n",
416 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
417 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
418 "HHUptod\tSpecDst");
419 else {
420 struct rtable *r = v;
3c521f2b 421 int len;
218fa90f 422
0eae88f3
ED
423 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
424 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 425 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
426 (__force u32)r->rt_dst,
427 (__force u32)r->rt_gateway,
d8d1f30b
CG
428 r->rt_flags, atomic_read(&r->dst.__refcnt),
429 r->dst.__use, 0, (__force u32)r->rt_src,
0dbaee3b 430 dst_metric_advmss(&r->dst) + 40,
d8d1f30b
CG
431 dst_metric(&r->dst, RTAX_WINDOW),
432 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
433 dst_metric(&r->dst, RTAX_RTTVAR)),
475949d8 434 r->rt_key_tos,
3c521f2b 435 -1, 0, 0, &len);
5e659e4c
PE
436
437 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
438 }
439 return 0;
1da177e4
LT
440}
441
f690808e 442static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
443 .start = rt_cache_seq_start,
444 .next = rt_cache_seq_next,
445 .stop = rt_cache_seq_stop,
446 .show = rt_cache_seq_show,
447};
448
449static int rt_cache_seq_open(struct inode *inode, struct file *file)
450{
a75e936f 451 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 452 sizeof(struct rt_cache_iter_state));
1da177e4
LT
453}
454
9a32144e 455static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
456 .owner = THIS_MODULE,
457 .open = rt_cache_seq_open,
458 .read = seq_read,
459 .llseek = seq_lseek,
a75e936f 460 .release = seq_release_net,
1da177e4
LT
461};
462
463
464static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
465{
466 int cpu;
467
468 if (*pos == 0)
469 return SEQ_START_TOKEN;
470
0f23174a 471 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
472 if (!cpu_possible(cpu))
473 continue;
474 *pos = cpu+1;
2f970d83 475 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
476 }
477 return NULL;
478}
479
480static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
481{
482 int cpu;
483
0f23174a 484 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
485 if (!cpu_possible(cpu))
486 continue;
487 *pos = cpu+1;
2f970d83 488 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
489 }
490 return NULL;
e905a9ed 491
1da177e4
LT
492}
493
494static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
495{
496
497}
498
499static int rt_cpu_seq_show(struct seq_file *seq, void *v)
500{
501 struct rt_cache_stat *st = v;
502
503 if (v == SEQ_START_TOKEN) {
5bec0039 504 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
505 return 0;
506 }
e905a9ed 507
1da177e4
LT
508 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
509 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 510 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
511 st->in_hit,
512 st->in_slow_tot,
513 st->in_slow_mc,
514 st->in_no_route,
515 st->in_brd,
516 st->in_martian_dst,
517 st->in_martian_src,
518
519 st->out_hit,
520 st->out_slow_tot,
e905a9ed 521 st->out_slow_mc,
1da177e4
LT
522
523 st->gc_total,
524 st->gc_ignored,
525 st->gc_goal_miss,
526 st->gc_dst_overflow,
527 st->in_hlist_search,
528 st->out_hlist_search
529 );
530 return 0;
531}
532
f690808e 533static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
534 .start = rt_cpu_seq_start,
535 .next = rt_cpu_seq_next,
536 .stop = rt_cpu_seq_stop,
537 .show = rt_cpu_seq_show,
538};
539
540
541static int rt_cpu_seq_open(struct inode *inode, struct file *file)
542{
543 return seq_open(file, &rt_cpu_seq_ops);
544}
545
9a32144e 546static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
547 .owner = THIS_MODULE,
548 .open = rt_cpu_seq_open,
549 .read = seq_read,
550 .llseek = seq_lseek,
551 .release = seq_release,
552};
553
c7066f70 554#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 555static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 556{
a661c419
AD
557 struct ip_rt_acct *dst, *src;
558 unsigned int i, j;
559
560 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
561 if (!dst)
562 return -ENOMEM;
563
564 for_each_possible_cpu(i) {
565 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
566 for (j = 0; j < 256; j++) {
567 dst[j].o_bytes += src[j].o_bytes;
568 dst[j].o_packets += src[j].o_packets;
569 dst[j].i_bytes += src[j].i_bytes;
570 dst[j].i_packets += src[j].i_packets;
571 }
78c686e9
PE
572 }
573
a661c419
AD
574 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
575 kfree(dst);
576 return 0;
577}
78c686e9 578
a661c419
AD
579static int rt_acct_proc_open(struct inode *inode, struct file *file)
580{
581 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 582}
a661c419
AD
583
584static const struct file_operations rt_acct_proc_fops = {
585 .owner = THIS_MODULE,
586 .open = rt_acct_proc_open,
587 .read = seq_read,
588 .llseek = seq_lseek,
589 .release = single_release,
590};
78c686e9 591#endif
107f1634 592
73b38711 593static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
594{
595 struct proc_dir_entry *pde;
596
597 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
598 &rt_cache_seq_fops);
599 if (!pde)
600 goto err1;
601
77020720
WC
602 pde = proc_create("rt_cache", S_IRUGO,
603 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
604 if (!pde)
605 goto err2;
606
c7066f70 607#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 608 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
609 if (!pde)
610 goto err3;
611#endif
612 return 0;
613
c7066f70 614#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
615err3:
616 remove_proc_entry("rt_cache", net->proc_net_stat);
617#endif
618err2:
619 remove_proc_entry("rt_cache", net->proc_net);
620err1:
621 return -ENOMEM;
622}
73b38711
DL
623
624static void __net_exit ip_rt_do_proc_exit(struct net *net)
625{
626 remove_proc_entry("rt_cache", net->proc_net_stat);
627 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 628#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 629 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 630#endif
73b38711
DL
631}
632
633static struct pernet_operations ip_rt_proc_ops __net_initdata = {
634 .init = ip_rt_do_proc_init,
635 .exit = ip_rt_do_proc_exit,
636};
637
638static int __init ip_rt_proc_init(void)
639{
640 return register_pernet_subsys(&ip_rt_proc_ops);
641}
642
107f1634 643#else
73b38711 644static inline int ip_rt_proc_init(void)
107f1634
PE
645{
646 return 0;
647}
1da177e4 648#endif /* CONFIG_PROC_FS */
e905a9ed 649
5969f71d 650static inline void rt_free(struct rtable *rt)
1da177e4 651{
d8d1f30b 652 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
653}
654
5969f71d 655static inline void rt_drop(struct rtable *rt)
1da177e4 656{
1da177e4 657 ip_rt_put(rt);
d8d1f30b 658 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
659}
660
5969f71d 661static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
662{
663 /* Kill broadcast/multicast entries very aggresively, if they
664 collide in hash table with more useful entries */
665 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 666 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
667}
668
5969f71d 669static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
670{
671 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
97bab73f 672 (rt_has_peer(rth) && rt_peer_ptr(rth)->pmtu_expires);
1da177e4
LT
673}
674
675static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
676{
677 unsigned long age;
678 int ret = 0;
679
d8d1f30b 680 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
681 goto out;
682
d8d1f30b 683 age = jiffies - rth->dst.lastuse;
1da177e4
LT
684 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
685 (age <= tmo2 && rt_valuable(rth)))
686 goto out;
687 ret = 1;
688out: return ret;
689}
690
691/* Bits of score are:
692 * 31: very valuable
693 * 30: not quite useless
694 * 29..0: usage counter
695 */
696static inline u32 rt_score(struct rtable *rt)
697{
d8d1f30b 698 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
699
700 score = ~score & ~(3<<30);
701
702 if (rt_valuable(rt))
703 score |= (1<<31);
704
c7537967 705 if (rt_is_output_route(rt) ||
1da177e4
LT
706 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
707 score |= (1<<30);
708
709 return score;
710}
711
1080d709
NH
712static inline bool rt_caching(const struct net *net)
713{
714 return net->ipv4.current_rt_cache_rebuild_count <=
715 net->ipv4.sysctl_rt_cache_rebuild_count;
716}
717
5e2b61f7
DM
718static inline bool compare_hash_inputs(const struct rtable *rt1,
719 const struct rtable *rt2)
1080d709 720{
5e2b61f7
DM
721 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
722 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
97a80410 723 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
1080d709
NH
724}
725
5e2b61f7 726static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
1da177e4 727{
5e2b61f7
DM
728 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
729 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
730 (rt1->rt_mark ^ rt2->rt_mark) |
475949d8 731 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
d547f727 732 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
97a80410 733 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
1da177e4
LT
734}
735
b5921910
DL
736static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
737{
d8d1f30b 738 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
739}
740
e84f84f2
DL
741static inline int rt_is_expired(struct rtable *rth)
742{
d8d1f30b 743 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
744}
745
beb659bd
ED
746/*
747 * Perform a full scan of hash table and free all entries.
748 * Can be called by a softirq or a process.
749 * In the later case, we want to be reschedule if necessary
750 */
6561a3b1 751static void rt_do_flush(struct net *net, int process_context)
beb659bd
ED
752{
753 unsigned int i;
754 struct rtable *rth, *next;
755
756 for (i = 0; i <= rt_hash_mask; i++) {
6561a3b1
DM
757 struct rtable __rcu **pprev;
758 struct rtable *list;
759
beb659bd
ED
760 if (process_context && need_resched())
761 cond_resched();
33d480ce 762 rth = rcu_access_pointer(rt_hash_table[i].chain);
beb659bd
ED
763 if (!rth)
764 continue;
765
766 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e 767
6561a3b1
DM
768 list = NULL;
769 pprev = &rt_hash_table[i].chain;
770 rth = rcu_dereference_protected(*pprev,
1c31720a 771 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 772
6561a3b1
DM
773 while (rth) {
774 next = rcu_dereference_protected(rth->dst.rt_next,
1c31720a 775 lockdep_is_held(rt_hash_lock_addr(i)));
6561a3b1
DM
776
777 if (!net ||
778 net_eq(dev_net(rth->dst.dev), net)) {
779 rcu_assign_pointer(*pprev, next);
780 rcu_assign_pointer(rth->dst.rt_next, list);
781 list = rth;
32cb5b4e 782 } else {
6561a3b1 783 pprev = &rth->dst.rt_next;
32cb5b4e 784 }
6561a3b1 785 rth = next;
32cb5b4e 786 }
6561a3b1 787
beb659bd
ED
788 spin_unlock_bh(rt_hash_lock_addr(i));
789
6561a3b1
DM
790 for (; list; list = next) {
791 next = rcu_dereference_protected(list->dst.rt_next, 1);
792 rt_free(list);
beb659bd
ED
793 }
794 }
795}
796
1080d709
NH
797/*
798 * While freeing expired entries, we compute average chain length
799 * and standard deviation, using fixed-point arithmetic.
800 * This to have an estimation of rt_chain_length_max
801 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
802 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
803 */
804
805#define FRACT_BITS 3
806#define ONE (1UL << FRACT_BITS)
807
98376387
ED
808/*
809 * Given a hash chain and an item in this hash chain,
810 * find if a previous entry has the same hash_inputs
811 * (but differs on tos, mark or oif)
812 * Returns 0 if an alias is found.
813 * Returns ONE if rth has no alias before itself.
814 */
815static int has_noalias(const struct rtable *head, const struct rtable *rth)
816{
817 const struct rtable *aux = head;
818
819 while (aux != rth) {
5e2b61f7 820 if (compare_hash_inputs(aux, rth))
98376387 821 return 0;
1c31720a 822 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
823 }
824 return ONE;
825}
826
9f28a2fc
ED
827static void rt_check_expire(void)
828{
829 static unsigned int rover;
830 unsigned int i = rover, goal;
831 struct rtable *rth;
832 struct rtable __rcu **rthp;
833 unsigned long samples = 0;
834 unsigned long sum = 0, sum2 = 0;
835 unsigned long delta;
836 u64 mult;
837
838 delta = jiffies - expires_ljiffies;
839 expires_ljiffies = jiffies;
840 mult = ((u64)delta) << rt_hash_log;
841 if (ip_rt_gc_timeout > 1)
842 do_div(mult, ip_rt_gc_timeout);
843 goal = (unsigned int)mult;
844 if (goal > rt_hash_mask)
845 goal = rt_hash_mask + 1;
846 for (; goal > 0; goal--) {
847 unsigned long tmo = ip_rt_gc_timeout;
848 unsigned long length;
849
850 i = (i + 1) & rt_hash_mask;
851 rthp = &rt_hash_table[i].chain;
852
853 if (need_resched())
854 cond_resched();
855
856 samples++;
857
858 if (rcu_dereference_raw(*rthp) == NULL)
859 continue;
860 length = 0;
861 spin_lock_bh(rt_hash_lock_addr(i));
862 while ((rth = rcu_dereference_protected(*rthp,
863 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
864 prefetch(rth->dst.rt_next);
df67e6c9
DM
865 if (rt_is_expired(rth) ||
866 rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
9f28a2fc
ED
867 *rthp = rth->dst.rt_next;
868 rt_free(rth);
869 continue;
870 }
df67e6c9
DM
871
872 /* We only count entries on a chain with equal
873 * hash inputs once so that entries for
874 * different QOS levels, and other non-hash
875 * input attributes don't unfairly skew the
876 * length computation
877 */
878 tmo >>= 1;
879 rthp = &rth->dst.rt_next;
880 length += has_noalias(rt_hash_table[i].chain, rth);
9f28a2fc
ED
881 }
882 spin_unlock_bh(rt_hash_lock_addr(i));
883 sum += length;
884 sum2 += length*length;
885 }
886 if (samples) {
887 unsigned long avg = sum / samples;
888 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
889 rt_chain_length_max = max_t(unsigned long,
890 ip_rt_gc_elasticity,
891 (avg + 4*sd) >> FRACT_BITS);
892 }
893 rover = i;
894}
895
896/*
897 * rt_worker_func() is run in process context.
898 * we call rt_check_expire() to scan part of the hash table
899 */
900static void rt_worker_func(struct work_struct *work)
901{
902 rt_check_expire();
903 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
904}
905
29e75252 906/*
25985edc 907 * Perturbation of rt_genid by a small quantity [1..256]
29e75252
ED
908 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
909 * many times (2^24) without giving recent rt_genid.
910 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 911 */
86c657f6 912static void rt_cache_invalidate(struct net *net)
1da177e4 913{
29e75252 914 unsigned char shuffle;
1da177e4 915
29e75252 916 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 917 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
b48c80ec 918 inetpeer_invalidate_family(AF_INET);
1da177e4
LT
919}
920
29e75252
ED
921/*
922 * delay < 0 : invalidate cache (fast : entries will be deleted later)
923 * delay >= 0 : invalidate & flush cache (can be long)
924 */
76e6ebfb 925void rt_cache_flush(struct net *net, int delay)
1da177e4 926{
86c657f6 927 rt_cache_invalidate(net);
29e75252 928 if (delay >= 0)
6561a3b1 929 rt_do_flush(net, !in_softirq());
1da177e4
LT
930}
931
a5ee1551 932/* Flush previous cache invalidated entries from the cache */
6561a3b1 933void rt_cache_flush_batch(struct net *net)
a5ee1551 934{
6561a3b1 935 rt_do_flush(net, !in_softirq());
a5ee1551
EB
936}
937
1080d709
NH
938static void rt_emergency_hash_rebuild(struct net *net)
939{
e87cc472 940 net_warn_ratelimited("Route hash chain too long!\n");
3ee94372 941 rt_cache_invalidate(net);
1080d709
NH
942}
943
1da177e4
LT
944/*
945 Short description of GC goals.
946
947 We want to build algorithm, which will keep routing cache
948 at some equilibrium point, when number of aged off entries
949 is kept approximately equal to newly generated ones.
950
951 Current expiration strength is variable "expire".
952 We try to adjust it dynamically, so that if networking
953 is idle expires is large enough to keep enough of warm entries,
954 and when load increases it reduces to limit cache size.
955 */
956
569d3645 957static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
958{
959 static unsigned long expire = RT_GC_TIMEOUT;
960 static unsigned long last_gc;
961 static int rover;
962 static int equilibrium;
1c31720a
ED
963 struct rtable *rth;
964 struct rtable __rcu **rthp;
1da177e4
LT
965 unsigned long now = jiffies;
966 int goal;
fc66f95c 967 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
968
969 /*
970 * Garbage collection is pretty expensive,
971 * do not make it too frequently.
972 */
973
974 RT_CACHE_STAT_INC(gc_total);
975
976 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 977 entries < ip_rt_max_size) {
1da177e4
LT
978 RT_CACHE_STAT_INC(gc_ignored);
979 goto out;
980 }
981
fc66f95c 982 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 983 /* Calculate number of entries, which we want to expire now. */
fc66f95c 984 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
985 if (goal <= 0) {
986 if (equilibrium < ipv4_dst_ops.gc_thresh)
987 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 988 goal = entries - equilibrium;
1da177e4 989 if (goal > 0) {
b790cedd 990 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 991 goal = entries - equilibrium;
1da177e4
LT
992 }
993 } else {
994 /* We are in dangerous area. Try to reduce cache really
995 * aggressively.
996 */
b790cedd 997 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 998 equilibrium = entries - goal;
1da177e4
LT
999 }
1000
1001 if (now - last_gc >= ip_rt_gc_min_interval)
1002 last_gc = now;
1003
1004 if (goal <= 0) {
1005 equilibrium += goal;
1006 goto work_done;
1007 }
1008
1009 do {
1010 int i, k;
1011
1012 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1013 unsigned long tmo = expire;
1014
1015 k = (k + 1) & rt_hash_mask;
1016 rthp = &rt_hash_table[k].chain;
22c047cc 1017 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
1018 while ((rth = rcu_dereference_protected(*rthp,
1019 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 1020 if (!rt_is_expired(rth) &&
29e75252 1021 !rt_may_expire(rth, tmo, expire)) {
1da177e4 1022 tmo >>= 1;
d8d1f30b 1023 rthp = &rth->dst.rt_next;
1da177e4
LT
1024 continue;
1025 }
d8d1f30b 1026 *rthp = rth->dst.rt_next;
1da177e4
LT
1027 rt_free(rth);
1028 goal--;
1da177e4 1029 }
22c047cc 1030 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
1031 if (goal <= 0)
1032 break;
1033 }
1034 rover = k;
1035
1036 if (goal <= 0)
1037 goto work_done;
1038
1039 /* Goal is not achieved. We stop process if:
1040
1041 - if expire reduced to zero. Otherwise, expire is halfed.
1042 - if table is not full.
1043 - if we are called from interrupt.
1044 - jiffies check is just fallback/debug loop breaker.
1045 We will not spin here for long time in any case.
1046 */
1047
1048 RT_CACHE_STAT_INC(gc_goal_miss);
1049
1050 if (expire == 0)
1051 break;
1052
1053 expire >>= 1;
1da177e4 1054
fc66f95c 1055 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
1056 goto out;
1057 } while (!in_softirq() && time_before_eq(jiffies, now));
1058
fc66f95c
ED
1059 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1060 goto out;
1061 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4 1062 goto out;
e87cc472 1063 net_warn_ratelimited("dst cache overflow\n");
1da177e4
LT
1064 RT_CACHE_STAT_INC(gc_dst_overflow);
1065 return 1;
1066
1067work_done:
1068 expire += ip_rt_gc_min_interval;
1069 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
1070 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1071 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4 1072 expire = ip_rt_gc_timeout;
1da177e4
LT
1073out: return 0;
1074}
1075
98376387
ED
1076/*
1077 * Returns number of entries in a hash chain that have different hash_inputs
1078 */
1079static int slow_chain_length(const struct rtable *head)
1080{
1081 int length = 0;
1082 const struct rtable *rth = head;
1083
1084 while (rth) {
1085 length += has_noalias(head, rth);
1c31720a 1086 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1087 }
1088 return length >> FRACT_BITS;
1089}
1090
d3aaeb38 1091static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
3769cffb 1092{
d3aaeb38
DM
1093 struct net_device *dev = dst->dev;
1094 const __be32 *pkey = daddr;
39232973 1095 const struct rtable *rt;
3769cffb
DM
1096 struct neighbour *n;
1097
39232973 1098 rt = (const struct rtable *) dst;
a263b309 1099 if (rt->rt_gateway)
39232973 1100 pkey = (const __be32 *) &rt->rt_gateway;
d3aaeb38 1101
80703d26 1102 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
d3aaeb38
DM
1103 if (n)
1104 return n;
32092ecf 1105 return neigh_create(&arp_tbl, pkey, dev);
d3aaeb38
DM
1106}
1107
1108static int rt_bind_neighbour(struct rtable *rt)
1109{
1110 struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
3769cffb
DM
1111 if (IS_ERR(n))
1112 return PTR_ERR(n);
69cce1d1 1113 dst_set_neighbour(&rt->dst, n);
3769cffb
DM
1114
1115 return 0;
1116}
1117
95c96174 1118static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
b23dd4fe 1119 struct sk_buff *skb, int ifindex)
1da177e4 1120{
1c31720a
ED
1121 struct rtable *rth, *cand;
1122 struct rtable __rcu **rthp, **candp;
1da177e4 1123 unsigned long now;
1da177e4
LT
1124 u32 min_score;
1125 int chain_length;
1126 int attempts = !in_softirq();
1127
1128restart:
1129 chain_length = 0;
1130 min_score = ~(u32)0;
1131 cand = NULL;
1132 candp = NULL;
1133 now = jiffies;
1134
7586eceb 1135 if (!rt_caching(dev_net(rt->dst.dev)) || (rt->dst.flags & DST_NOCACHE)) {
73e42897
NH
1136 /*
1137 * If we're not caching, just tell the caller we
1138 * were successful and don't touch the route. The
1139 * caller hold the sole reference to the cache entry, and
1140 * it will be released when the caller is done with it.
1141 * If we drop it here, the callers have no way to resolve routes
1142 * when we're not caching. Instead, just point *rp at rt, so
1143 * the caller gets a single use out of the route
b6280b47
NH
1144 * Note that we do rt_free on this new route entry, so that
1145 * once its refcount hits zero, we are still able to reap it
1146 * (Thanks Alexey)
27b75c95
ED
1147 * Note: To avoid expensive rcu stuff for this uncached dst,
1148 * we set DST_NOCACHE so that dst_release() can free dst without
1149 * waiting a grace period.
73e42897 1150 */
b6280b47 1151
c7d4426a 1152 rt->dst.flags |= DST_NOCACHE;
c7537967 1153 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
3769cffb 1154 int err = rt_bind_neighbour(rt);
b6280b47 1155 if (err) {
e87cc472 1156 net_warn_ratelimited("Neighbour table failure & not caching routes\n");
27b75c95 1157 ip_rt_put(rt);
b23dd4fe 1158 return ERR_PTR(err);
b6280b47
NH
1159 }
1160 }
1161
b6280b47 1162 goto skip_hashing;
1080d709
NH
1163 }
1164
1da177e4
LT
1165 rthp = &rt_hash_table[hash].chain;
1166
22c047cc 1167 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1168 while ((rth = rcu_dereference_protected(*rthp,
1169 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1170 if (rt_is_expired(rth)) {
d8d1f30b 1171 *rthp = rth->dst.rt_next;
29e75252
ED
1172 rt_free(rth);
1173 continue;
1174 }
5e2b61f7 1175 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1da177e4 1176 /* Put it first */
d8d1f30b 1177 *rthp = rth->dst.rt_next;
1da177e4
LT
1178 /*
1179 * Since lookup is lockfree, the deletion
1180 * must be visible to another weakly ordered CPU before
1181 * the insertion at the start of the hash chain.
1182 */
d8d1f30b 1183 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1184 rt_hash_table[hash].chain);
1185 /*
1186 * Since lookup is lockfree, the update writes
1187 * must be ordered for consistency on SMP.
1188 */
1189 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1190
d8d1f30b 1191 dst_use(&rth->dst, now);
22c047cc 1192 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1193
1194 rt_drop(rt);
b23dd4fe 1195 if (skb)
d8d1f30b 1196 skb_dst_set(skb, &rth->dst);
b23dd4fe 1197 return rth;
1da177e4
LT
1198 }
1199
d8d1f30b 1200 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1201 u32 score = rt_score(rth);
1202
1203 if (score <= min_score) {
1204 cand = rth;
1205 candp = rthp;
1206 min_score = score;
1207 }
1208 }
1209
1210 chain_length++;
1211
d8d1f30b 1212 rthp = &rth->dst.rt_next;
1da177e4
LT
1213 }
1214
1215 if (cand) {
1216 /* ip_rt_gc_elasticity used to be average length of chain
1217 * length, when exceeded gc becomes really aggressive.
1218 *
1219 * The second limit is less certain. At the moment it allows
1220 * only 2 entries per bucket. We will see.
1221 */
1222 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1223 *candp = cand->dst.rt_next;
1da177e4
LT
1224 rt_free(cand);
1225 }
1080d709 1226 } else {
98376387
ED
1227 if (chain_length > rt_chain_length_max &&
1228 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1229 struct net *net = dev_net(rt->dst.dev);
1080d709 1230 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1231 if (!rt_caching(net)) {
058bd4d2 1232 pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1233 rt->dst.dev->name, num);
1080d709 1234 }
b35ecb5d 1235 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1236 spin_unlock_bh(rt_hash_lock_addr(hash));
1237
5e2b61f7 1238 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
6a2bad70
PE
1239 ifindex, rt_genid(net));
1240 goto restart;
1080d709 1241 }
1da177e4
LT
1242 }
1243
1244 /* Try to bind route to arp only if it is output
1245 route or unicast forwarding path.
1246 */
c7537967 1247 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
3769cffb 1248 int err = rt_bind_neighbour(rt);
1da177e4 1249 if (err) {
22c047cc 1250 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1251
1252 if (err != -ENOBUFS) {
1253 rt_drop(rt);
b23dd4fe 1254 return ERR_PTR(err);
1da177e4
LT
1255 }
1256
1257 /* Neighbour tables are full and nothing
1258 can be released. Try to shrink route cache,
1259 it is most likely it holds some neighbour records.
1260 */
1261 if (attempts-- > 0) {
1262 int saved_elasticity = ip_rt_gc_elasticity;
1263 int saved_int = ip_rt_gc_min_interval;
1264 ip_rt_gc_elasticity = 1;
1265 ip_rt_gc_min_interval = 0;
569d3645 1266 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1267 ip_rt_gc_min_interval = saved_int;
1268 ip_rt_gc_elasticity = saved_elasticity;
1269 goto restart;
1270 }
1271
e87cc472 1272 net_warn_ratelimited("Neighbour table overflow\n");
1da177e4 1273 rt_drop(rt);
b23dd4fe 1274 return ERR_PTR(-ENOBUFS);
1da177e4
LT
1275 }
1276 }
1277
d8d1f30b 1278 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1279
00269b54
ED
1280 /*
1281 * Since lookup is lockfree, we must make sure
25985edc 1282 * previous writes to rt are committed to memory
00269b54
ED
1283 * before making rt visible to other CPUS.
1284 */
1ddbcb00 1285 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1286
22c047cc 1287 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1288
b6280b47 1289skip_hashing:
b23dd4fe 1290 if (skb)
d8d1f30b 1291 skb_dst_set(skb, &rt->dst);
b23dd4fe 1292 return rt;
1da177e4
LT
1293}
1294
6431cbc2
DM
1295static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1296
1297static u32 rt_peer_genid(void)
1298{
1299 return atomic_read(&__rt_peer_genid);
1300}
1301
a48eff12 1302void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1da177e4 1303{
97bab73f 1304 struct inet_peer_base *base;
1da177e4
LT
1305 struct inet_peer *peer;
1306
97bab73f
DM
1307 base = inetpeer_base_ptr(rt->_peer);
1308 if (!base)
1309 return;
1310
1311 peer = inet_getpeer_v4(base, daddr, create);
7b34ca2a
DM
1312 if (peer) {
1313 if (!rt_set_peer(rt, peer))
1314 inet_putpeer(peer);
1315 else
1316 rt->rt_peer_genid = rt_peer_genid();
1317 }
1da177e4
LT
1318}
1319
1320/*
1321 * Peer allocation may fail only in serious out-of-memory conditions. However
1322 * we still can generate some output.
1323 * Random ID selection looks a bit dangerous because we have no chances to
1324 * select ID being unique in a reasonable period of time.
1325 * But broken packet identifier may be better than no packet at all.
1326 */
1327static void ip_select_fb_ident(struct iphdr *iph)
1328{
1329 static DEFINE_SPINLOCK(ip_fb_id_lock);
1330 static u32 ip_fallback_id;
1331 u32 salt;
1332
1333 spin_lock_bh(&ip_fb_id_lock);
e448515c 1334 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1335 iph->id = htons(salt & 0xFFFF);
1336 ip_fallback_id = salt;
1337 spin_unlock_bh(&ip_fb_id_lock);
1338}
1339
1340void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1341{
1342 struct rtable *rt = (struct rtable *) dst;
1343
e688a604 1344 if (rt && !(rt->dst.flags & DST_NOPEER)) {
fbfe95a4 1345 struct inet_peer *peer = rt_get_peer_create(rt, rt->rt_dst);
1da177e4
LT
1346
1347 /* If peer is attached to destination, it is never detached,
1348 so that we need not to grab a lock to dereference it.
1349 */
fbfe95a4
DM
1350 if (peer) {
1351 iph->id = htons(inet_getid(peer, more));
1da177e4
LT
1352 return;
1353 }
e688a604 1354 } else if (!rt)
91df42be 1355 pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
1da177e4
LT
1356
1357 ip_select_fb_ident(iph);
1358}
4bc2f18b 1359EXPORT_SYMBOL(__ip_select_ident);
1da177e4 1360
95c96174 1361static void rt_del(unsigned int hash, struct rtable *rt)
1da177e4 1362{
1c31720a
ED
1363 struct rtable __rcu **rthp;
1364 struct rtable *aux;
1da177e4 1365
29e75252 1366 rthp = &rt_hash_table[hash].chain;
22c047cc 1367 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1368 ip_rt_put(rt);
1c31720a
ED
1369 while ((aux = rcu_dereference_protected(*rthp,
1370 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1371 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1372 *rthp = aux->dst.rt_next;
29e75252
ED
1373 rt_free(aux);
1374 continue;
1da177e4 1375 }
d8d1f30b 1376 rthp = &aux->dst.rt_next;
29e75252 1377 }
22c047cc 1378 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1379}
1380
de398fb8 1381static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
9cc20b26
ED
1382{
1383 struct rtable *rt = (struct rtable *) dst;
1384 __be32 orig_gw = rt->rt_gateway;
1385 struct neighbour *n, *old_n;
1386
1387 dst_confirm(&rt->dst);
1388
1389 rt->rt_gateway = peer->redirect_learned.a4;
1390
1391 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
de398fb8
DM
1392 if (IS_ERR(n)) {
1393 rt->rt_gateway = orig_gw;
1394 return;
1395 }
9cc20b26
ED
1396 old_n = xchg(&rt->dst._neighbour, n);
1397 if (old_n)
1398 neigh_release(old_n);
de398fb8
DM
1399 if (!(n->nud_state & NUD_VALID)) {
1400 neigh_event_send(n, NULL);
9cc20b26
ED
1401 } else {
1402 rt->rt_flags |= RTCF_REDIRECTED;
1403 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1404 }
9cc20b26
ED
1405}
1406
ed7865a4 1407/* called in rcu_read_lock() section */
f7655229
AV
1408void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1409 __be32 saddr, struct net_device *dev)
1da177e4 1410{
7cc9150e 1411 int s, i;
ed7865a4 1412 struct in_device *in_dev = __in_dev_get_rcu(dev);
7cc9150e
FL
1413 __be32 skeys[2] = { saddr, 0 };
1414 int ikeys[2] = { dev->ifindex, 0 };
f39925db 1415 struct inet_peer *peer;
317805b8 1416 struct net *net;
1da177e4 1417
1da177e4
LT
1418 if (!in_dev)
1419 return;
1420
c346dca1 1421 net = dev_net(dev);
9d4fb27d
JP
1422 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1423 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1424 ipv4_is_zeronet(new_gw))
1da177e4
LT
1425 goto reject_redirect;
1426
1427 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1428 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1429 goto reject_redirect;
1430 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1431 goto reject_redirect;
1432 } else {
317805b8 1433 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1434 goto reject_redirect;
1435 }
1436
7cc9150e
FL
1437 for (s = 0; s < 2; s++) {
1438 for (i = 0; i < 2; i++) {
9cc20b26
ED
1439 unsigned int hash;
1440 struct rtable __rcu **rthp;
1441 struct rtable *rt;
1442
1443 hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
1444
1445 rthp = &rt_hash_table[hash].chain;
1446
1447 while ((rt = rcu_dereference(*rthp)) != NULL) {
1448 rthp = &rt->dst.rt_next;
1449
1450 if (rt->rt_key_dst != daddr ||
1451 rt->rt_key_src != skeys[s] ||
1452 rt->rt_oif != ikeys[i] ||
1453 rt_is_input_route(rt) ||
1454 rt_is_expired(rt) ||
1455 !net_eq(dev_net(rt->dst.dev), net) ||
1456 rt->dst.error ||
1457 rt->dst.dev != dev ||
1458 rt->rt_gateway != old_gw)
1459 continue;
e905a9ed 1460
fbfe95a4 1461 peer = rt_get_peer_create(rt, rt->rt_dst);
9cc20b26 1462 if (peer) {
ac3f48de 1463 if (peer->redirect_learned.a4 != new_gw) {
9cc20b26
ED
1464 peer->redirect_learned.a4 = new_gw;
1465 atomic_inc(&__rt_peer_genid);
1466 }
1467 check_peer_redir(&rt->dst, peer);
1468 }
7cc9150e 1469 }
7cc9150e 1470 }
1da177e4 1471 }
1da177e4
LT
1472 return;
1473
1474reject_redirect:
1475#ifdef CONFIG_IP_ROUTE_VERBOSE
e87cc472
JP
1476 if (IN_DEV_LOG_MARTIANS(in_dev))
1477 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
1478 " Advised path = %pI4 -> %pI4\n",
1479 &old_gw, dev->name, &new_gw,
1480 &saddr, &daddr);
1da177e4 1481#endif
ed7865a4 1482 ;
1da177e4
LT
1483}
1484
fe6fe792
ED
1485static bool peer_pmtu_expired(struct inet_peer *peer)
1486{
1487 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1488
1489 return orig &&
1490 time_after_eq(jiffies, orig) &&
1491 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1492}
1493
1494static bool peer_pmtu_cleaned(struct inet_peer *peer)
1495{
1496 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1497
1498 return orig &&
1499 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1500}
1501
1da177e4
LT
1502static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1503{
ee6b9673 1504 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1505 struct dst_entry *ret = dst;
1506
1507 if (rt) {
d11a4dc1 1508 if (dst->obsolete > 0) {
1da177e4
LT
1509 ip_rt_put(rt);
1510 ret = NULL;
2c8cec5c 1511 } else if (rt->rt_flags & RTCF_REDIRECTED) {
95c96174 1512 unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
5e2b61f7 1513 rt->rt_oif,
e84f84f2 1514 rt_genid(dev_net(dst->dev)));
1da177e4
LT
1515 rt_del(hash, rt);
1516 ret = NULL;
97bab73f
DM
1517 } else if (rt_has_peer(rt)) {
1518 struct inet_peer *peer = rt_peer_ptr(rt);
1519 if (peer_pmtu_expired(peer))
1520 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1da177e4
LT
1521 }
1522 }
1523 return ret;
1524}
1525
1526/*
1527 * Algorithm:
1528 * 1. The first ip_rt_redirect_number redirects are sent
1529 * with exponential backoff, then we stop sending them at all,
1530 * assuming that the host ignores our redirects.
1531 * 2. If we did not see packets requiring redirects
1532 * during ip_rt_redirect_silence, we assume that the host
1533 * forgot redirected route and start to send redirects again.
1534 *
1535 * This algorithm is much cheaper and more intelligent than dumb load limiting
1536 * in icmp.c.
1537 *
1538 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1539 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1540 */
1541
1542void ip_rt_send_redirect(struct sk_buff *skb)
1543{
511c3f92 1544 struct rtable *rt = skb_rtable(skb);
30038fc6 1545 struct in_device *in_dev;
92d86829 1546 struct inet_peer *peer;
30038fc6 1547 int log_martians;
1da177e4 1548
30038fc6 1549 rcu_read_lock();
d8d1f30b 1550 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1551 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1552 rcu_read_unlock();
1da177e4 1553 return;
30038fc6
ED
1554 }
1555 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1556 rcu_read_unlock();
1da177e4 1557
fbfe95a4 1558 peer = rt_get_peer_create(rt, rt->rt_dst);
92d86829
DM
1559 if (!peer) {
1560 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1561 return;
1562 }
1563
1da177e4
LT
1564 /* No redirected packets during ip_rt_redirect_silence;
1565 * reset the algorithm.
1566 */
92d86829
DM
1567 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1568 peer->rate_tokens = 0;
1da177e4
LT
1569
1570 /* Too many ignored redirects; do not send anything
d8d1f30b 1571 * set dst.rate_last to the last seen redirected packet.
1da177e4 1572 */
92d86829
DM
1573 if (peer->rate_tokens >= ip_rt_redirect_number) {
1574 peer->rate_last = jiffies;
30038fc6 1575 return;
1da177e4
LT
1576 }
1577
1578 /* Check for load limit; set rate_last to the latest sent
1579 * redirect.
1580 */
92d86829 1581 if (peer->rate_tokens == 0 ||
14fb8a76 1582 time_after(jiffies,
92d86829
DM
1583 (peer->rate_last +
1584 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 1585 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
1586 peer->rate_last = jiffies;
1587 ++peer->rate_tokens;
1da177e4 1588#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1589 if (log_martians &&
e87cc472
JP
1590 peer->rate_tokens == ip_rt_redirect_number)
1591 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
1592 &ip_hdr(skb)->saddr, rt->rt_iif,
1593 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1594#endif
1595 }
1da177e4
LT
1596}
1597
1598static int ip_error(struct sk_buff *skb)
1599{
251da413 1600 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
511c3f92 1601 struct rtable *rt = skb_rtable(skb);
92d86829 1602 struct inet_peer *peer;
1da177e4 1603 unsigned long now;
251da413 1604 struct net *net;
92d86829 1605 bool send;
1da177e4
LT
1606 int code;
1607
251da413
DM
1608 net = dev_net(rt->dst.dev);
1609 if (!IN_DEV_FORWARD(in_dev)) {
1610 switch (rt->dst.error) {
1611 case EHOSTUNREACH:
1612 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
1613 break;
1614
1615 case ENETUNREACH:
1616 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
1617 break;
1618 }
1619 goto out;
1620 }
1621
d8d1f30b 1622 switch (rt->dst.error) {
4500ebf8
JP
1623 case EINVAL:
1624 default:
1625 goto out;
1626 case EHOSTUNREACH:
1627 code = ICMP_HOST_UNREACH;
1628 break;
1629 case ENETUNREACH:
1630 code = ICMP_NET_UNREACH;
251da413 1631 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
4500ebf8
JP
1632 break;
1633 case EACCES:
1634 code = ICMP_PKT_FILTERED;
1635 break;
1da177e4
LT
1636 }
1637
fbfe95a4 1638 peer = rt_get_peer_create(rt, rt->rt_dst);
92d86829
DM
1639
1640 send = true;
1641 if (peer) {
1642 now = jiffies;
1643 peer->rate_tokens += now - peer->rate_last;
1644 if (peer->rate_tokens > ip_rt_error_burst)
1645 peer->rate_tokens = ip_rt_error_burst;
1646 peer->rate_last = now;
1647 if (peer->rate_tokens >= ip_rt_error_cost)
1648 peer->rate_tokens -= ip_rt_error_cost;
1649 else
1650 send = false;
1da177e4 1651 }
92d86829
DM
1652 if (send)
1653 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
1654
1655out: kfree_skb(skb);
1656 return 0;
e905a9ed 1657}
1da177e4 1658
2c8cec5c
DM
1659static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1660{
fe6fe792 1661 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
2c8cec5c 1662
fe6fe792
ED
1663 if (!expires)
1664 return;
46af3180 1665 if (time_before(jiffies, expires)) {
2c8cec5c
DM
1666 u32 orig_dst_mtu = dst_mtu(dst);
1667 if (peer->pmtu_learned < orig_dst_mtu) {
1668 if (!peer->pmtu_orig)
1669 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1670 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1671 }
1672 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1673 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1674}
1675
1da177e4
LT
1676static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1677{
2c8cec5c
DM
1678 struct rtable *rt = (struct rtable *) dst;
1679 struct inet_peer *peer;
1680
1681 dst_confirm(dst);
1682
fbfe95a4 1683 peer = rt_get_peer_create(rt, rt->rt_dst);
2c8cec5c 1684 if (peer) {
fe6fe792
ED
1685 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1686
2c8cec5c 1687 if (mtu < ip_rt_min_pmtu)
1da177e4 1688 mtu = ip_rt_min_pmtu;
fe6fe792 1689 if (!pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1690
1691 pmtu_expires = jiffies + ip_rt_mtu_expires;
1692 if (!pmtu_expires)
1693 pmtu_expires = 1UL;
1694
2c8cec5c 1695 peer->pmtu_learned = mtu;
46af3180 1696 peer->pmtu_expires = pmtu_expires;
2c8cec5c
DM
1697
1698 atomic_inc(&__rt_peer_genid);
1699 rt->rt_peer_genid = rt_peer_genid();
1da177e4 1700 }
46af3180 1701 check_peer_pmtu(dst, peer);
1da177e4
LT
1702 }
1703}
1704
36393395
DM
1705void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1706 int oif, u32 mark, u8 protocol, int flow_flags)
1707{
1708 const struct iphdr *iph = (const struct iphdr *)skb->data;
1709 struct flowi4 fl4;
1710 struct rtable *rt;
1711
1712 flowi4_init_output(&fl4, oif, mark, RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
1713 protocol, flow_flags | FLOWI_FLAG_PRECOW_METRICS,
1714 iph->daddr, iph->saddr, 0, 0);
1715 rt = __ip_route_output_key(net, &fl4);
1716 if (!IS_ERR(rt)) {
1717 ip_rt_update_pmtu(&rt->dst, mtu);
1718 ip_rt_put(rt);
1719 }
1720}
1721EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1722
1723void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1724{
1725 const struct inet_sock *inet = inet_sk(sk);
1726
1727 return ipv4_update_pmtu(skb, sock_net(sk), mtu,
1728 sk->sk_bound_dev_if, sk->sk_mark,
1729 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
1730 inet_sk_flowi_flags(sk));
1731}
1732EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
f39925db 1733
de398fb8 1734static void ipv4_validate_peer(struct rtable *rt)
1da177e4 1735{
6431cbc2 1736 if (rt->rt_peer_genid != rt_peer_genid()) {
fbfe95a4 1737 struct inet_peer *peer = rt_get_peer(rt, rt->rt_dst);
6431cbc2 1738
fe6fe792 1739 if (peer) {
efbc368d 1740 check_peer_pmtu(&rt->dst, peer);
2c8cec5c 1741
fe6fe792 1742 if (peer->redirect_learned.a4 &&
de398fb8
DM
1743 peer->redirect_learned.a4 != rt->rt_gateway)
1744 check_peer_redir(&rt->dst, peer);
f39925db
DM
1745 }
1746
6431cbc2
DM
1747 rt->rt_peer_genid = rt_peer_genid();
1748 }
efbc368d
DM
1749}
1750
1751static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1752{
1753 struct rtable *rt = (struct rtable *) dst;
1754
1755 if (rt_is_expired(rt))
1756 return NULL;
de398fb8 1757 ipv4_validate_peer(rt);
d11a4dc1 1758 return dst;
1da177e4
LT
1759}
1760
1761static void ipv4_dst_destroy(struct dst_entry *dst)
1762{
1763 struct rtable *rt = (struct rtable *) dst;
1da177e4 1764
62fa8a84
DM
1765 if (rt->fi) {
1766 fib_info_put(rt->fi);
1767 rt->fi = NULL;
1768 }
97bab73f
DM
1769 if (rt_has_peer(rt)) {
1770 struct inet_peer *peer = rt_peer_ptr(rt);
1da177e4
LT
1771 inet_putpeer(peer);
1772 }
1da177e4
LT
1773}
1774
1da177e4
LT
1775
1776static void ipv4_link_failure(struct sk_buff *skb)
1777{
1778 struct rtable *rt;
1779
1780 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1781
511c3f92 1782 rt = skb_rtable(skb);
97bab73f
DM
1783 if (rt && rt_has_peer(rt)) {
1784 struct inet_peer *peer = rt_peer_ptr(rt);
1785 if (peer_pmtu_cleaned(peer))
1786 dst_metric_set(&rt->dst, RTAX_MTU, peer->pmtu_orig);
1787 }
1da177e4
LT
1788}
1789
1790static int ip_rt_bug(struct sk_buff *skb)
1791{
91df42be
JP
1792 pr_debug("%s: %pI4 -> %pI4, %s\n",
1793 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1794 skb->dev ? skb->dev->name : "?");
1da177e4 1795 kfree_skb(skb);
c378a9c0 1796 WARN_ON(1);
1da177e4
LT
1797 return 0;
1798}
1799
1800/*
1801 We do not cache source address of outgoing interface,
1802 because it is used only by IP RR, TS and SRR options,
1803 so that it out of fast path.
1804
1805 BTW remember: "addr" is allowed to be not aligned
1806 in IP options!
1807 */
1808
8e36360a 1809void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1da177e4 1810{
a61ced5d 1811 __be32 src;
1da177e4 1812
c7537967 1813 if (rt_is_output_route(rt))
c5be24ff 1814 src = ip_hdr(skb)->saddr;
ebc0ffae 1815 else {
8e36360a
DM
1816 struct fib_result res;
1817 struct flowi4 fl4;
1818 struct iphdr *iph;
1819
1820 iph = ip_hdr(skb);
1821
1822 memset(&fl4, 0, sizeof(fl4));
1823 fl4.daddr = iph->daddr;
1824 fl4.saddr = iph->saddr;
b0fe4a31 1825 fl4.flowi4_tos = RT_TOS(iph->tos);
8e36360a
DM
1826 fl4.flowi4_oif = rt->dst.dev->ifindex;
1827 fl4.flowi4_iif = skb->dev->ifindex;
1828 fl4.flowi4_mark = skb->mark;
5e2b61f7 1829
ebc0ffae 1830 rcu_read_lock();
68a5e3dd 1831 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
436c3b66 1832 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
ebc0ffae
ED
1833 else
1834 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1835 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1836 rcu_read_unlock();
1837 }
1da177e4
LT
1838 memcpy(addr, &src, 4);
1839}
1840
c7066f70 1841#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1842static void set_class_tag(struct rtable *rt, u32 tag)
1843{
d8d1f30b
CG
1844 if (!(rt->dst.tclassid & 0xFFFF))
1845 rt->dst.tclassid |= tag & 0xFFFF;
1846 if (!(rt->dst.tclassid & 0xFFFF0000))
1847 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1848}
1849#endif
1850
0dbaee3b
DM
1851static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1852{
1853 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1854
1855 if (advmss == 0) {
1856 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1857 ip_rt_min_advmss);
1858 if (advmss > 65535 - 40)
1859 advmss = 65535 - 40;
1860 }
1861 return advmss;
1862}
1863
ebb762f2 1864static unsigned int ipv4_mtu(const struct dst_entry *dst)
d33e4553 1865{
261663b0 1866 const struct rtable *rt = (const struct rtable *) dst;
618f9bc7
SK
1867 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1868
261663b0 1869 if (mtu && rt_is_output_route(rt))
618f9bc7
SK
1870 return mtu;
1871
1872 mtu = dst->dev->mtu;
d33e4553
DM
1873
1874 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
d33e4553
DM
1875
1876 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1877 mtu = 576;
1878 }
1879
1880 if (mtu > IP_MAX_MTU)
1881 mtu = IP_MAX_MTU;
1882
1883 return mtu;
1884}
1885
813b3b5d 1886static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
5e2b61f7 1887 struct fib_info *fi)
a4daad6b 1888{
97bab73f 1889 struct inet_peer_base *base;
0131ba45
DM
1890 struct inet_peer *peer;
1891 int create = 0;
a4daad6b 1892
0131ba45
DM
1893 /* If a peer entry exists for this destination, we must hook
1894 * it up in order to get at cached metrics.
1895 */
813b3b5d 1896 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
0131ba45
DM
1897 create = 1;
1898
97bab73f
DM
1899 base = inetpeer_base_ptr(rt->_peer);
1900 BUG_ON(!base);
1901
1902 peer = inet_getpeer_v4(base, rt->rt_dst, create);
0131ba45 1903 if (peer) {
97bab73f 1904 __rt_set_peer(rt, peer);
3c0afdca 1905 rt->rt_peer_genid = rt_peer_genid();
a4daad6b
DM
1906 if (inet_metrics_new(peer))
1907 memcpy(peer->metrics, fi->fib_metrics,
1908 sizeof(u32) * RTAX_MAX);
1909 dst_init_metrics(&rt->dst, peer->metrics, false);
2c8cec5c 1910
fe6fe792 1911 check_peer_pmtu(&rt->dst, peer);
ac3f48de 1912
f39925db
DM
1913 if (peer->redirect_learned.a4 &&
1914 peer->redirect_learned.a4 != rt->rt_gateway) {
1915 rt->rt_gateway = peer->redirect_learned.a4;
1916 rt->rt_flags |= RTCF_REDIRECTED;
1917 }
0131ba45
DM
1918 } else {
1919 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1920 rt->fi = fi;
1921 atomic_inc(&fi->fib_clntref);
1922 }
1923 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
a4daad6b
DM
1924 }
1925}
1926
813b3b5d 1927static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
5e2b61f7 1928 const struct fib_result *res,
982721f3 1929 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1930{
defb3519 1931 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1932
1933 if (fi) {
1934 if (FIB_RES_GW(*res) &&
1935 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1936 rt->rt_gateway = FIB_RES_GW(*res);
813b3b5d 1937 rt_init_metrics(rt, fl4, fi);
c7066f70 1938#ifdef CONFIG_IP_ROUTE_CLASSID
defb3519 1939 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4 1940#endif
d33e4553 1941 }
defb3519 1942
defb3519
DM
1943 if (dst_mtu(dst) > IP_MAX_MTU)
1944 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1da177e4 1945
c7066f70 1946#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1947#ifdef CONFIG_IP_MULTIPLE_TABLES
1948 set_class_tag(rt, fib_rules_tclass(res));
1949#endif
1950 set_class_tag(rt, itag);
1951#endif
1da177e4
LT
1952}
1953
5c1e6aa3
DM
1954static struct rtable *rt_dst_alloc(struct net_device *dev,
1955 bool nopolicy, bool noxfrm)
0c4dcd58 1956{
5c1e6aa3
DM
1957 return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1958 DST_HOST |
1959 (nopolicy ? DST_NOPOLICY : 0) |
1960 (noxfrm ? DST_NOXFRM : 0));
0c4dcd58
DM
1961}
1962
96d36220 1963/* called in rcu_read_lock() section */
9e12bb22 1964static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1965 u8 tos, struct net_device *dev, int our)
1966{
96d36220 1967 unsigned int hash;
1da177e4 1968 struct rtable *rth;
96d36220 1969 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1970 u32 itag = 0;
b5f7e755 1971 int err;
1da177e4
LT
1972
1973 /* Primary sanity checks. */
1974
1975 if (in_dev == NULL)
1976 return -EINVAL;
1977
1e637c74 1978 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
d0daebc3 1979 skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1980 goto e_inval;
1981
d0daebc3
TG
1982 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1983 if (ipv4_is_loopback(saddr))
1984 goto e_inval;
1985
f97c1e0c
JP
1986 if (ipv4_is_zeronet(saddr)) {
1987 if (!ipv4_is_local_multicast(daddr))
1da177e4 1988 goto e_inval;
b5f7e755 1989 } else {
9e56e380
DM
1990 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1991 in_dev, &itag);
b5f7e755
ED
1992 if (err < 0)
1993 goto e_err;
1994 }
4e7b2f14 1995 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
5c1e6aa3 1996 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
1997 if (!rth)
1998 goto e_nobufs;
1999
cf911662
DM
2000#ifdef CONFIG_IP_ROUTE_CLASSID
2001 rth->dst.tclassid = itag;
2002#endif
d8d1f30b 2003 rth->dst.output = ip_rt_bug;
1da177e4 2004
5e2b61f7 2005 rth->rt_key_dst = daddr;
5e2b61f7 2006 rth->rt_key_src = saddr;
cf911662
DM
2007 rth->rt_genid = rt_genid(dev_net(dev));
2008 rth->rt_flags = RTCF_MULTICAST;
2009 rth->rt_type = RTN_MULTICAST;
475949d8 2010 rth->rt_key_tos = tos;
cf911662 2011 rth->rt_dst = daddr;
1da177e4 2012 rth->rt_src = saddr;
1b86a58f 2013 rth->rt_route_iif = dev->ifindex;
5e2b61f7 2014 rth->rt_iif = dev->ifindex;
5e2b61f7 2015 rth->rt_oif = 0;
cf911662 2016 rth->rt_mark = skb->mark;
1da177e4 2017 rth->rt_gateway = daddr;
cf911662 2018 rth->rt_peer_genid = 0;
97bab73f 2019 rt_init_peer(rth, dev_net(dev)->ipv4.peers);
cf911662 2020 rth->fi = NULL;
1da177e4 2021 if (our) {
d8d1f30b 2022 rth->dst.input= ip_local_deliver;
1da177e4
LT
2023 rth->rt_flags |= RTCF_LOCAL;
2024 }
2025
2026#ifdef CONFIG_IP_MROUTE
f97c1e0c 2027 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 2028 rth->dst.input = ip_mr_input;
1da177e4
LT
2029#endif
2030 RT_CACHE_STAT_INC(in_slow_mc);
2031
e84f84f2 2032 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
b23dd4fe 2033 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
9aa3c94c 2034 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1da177e4
LT
2035
2036e_nobufs:
1da177e4 2037 return -ENOBUFS;
1da177e4 2038e_inval:
96d36220 2039 return -EINVAL;
b5f7e755 2040e_err:
b5f7e755 2041 return err;
1da177e4
LT
2042}
2043
2044
2045static void ip_handle_martian_source(struct net_device *dev,
2046 struct in_device *in_dev,
2047 struct sk_buff *skb,
9e12bb22
AV
2048 __be32 daddr,
2049 __be32 saddr)
1da177e4
LT
2050{
2051 RT_CACHE_STAT_INC(in_martian_src);
2052#ifdef CONFIG_IP_ROUTE_VERBOSE
2053 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
2054 /*
2055 * RFC1812 recommendation, if source is martian,
2056 * the only hint is MAC header.
2057 */
058bd4d2 2058 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
673d57e7 2059 &daddr, &saddr, dev->name);
98e399f8 2060 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
058bd4d2
JP
2061 print_hex_dump(KERN_WARNING, "ll header: ",
2062 DUMP_PREFIX_OFFSET, 16, 1,
2063 skb_mac_header(skb),
2064 dev->hard_header_len, true);
1da177e4
LT
2065 }
2066 }
2067#endif
2068}
2069
47360228 2070/* called in rcu_read_lock() section */
5969f71d 2071static int __mkroute_input(struct sk_buff *skb,
982721f3 2072 const struct fib_result *res,
5969f71d
SH
2073 struct in_device *in_dev,
2074 __be32 daddr, __be32 saddr, u32 tos,
2075 struct rtable **result)
1da177e4 2076{
1da177e4
LT
2077 struct rtable *rth;
2078 int err;
2079 struct in_device *out_dev;
47360228 2080 unsigned int flags = 0;
d9c9df8c 2081 u32 itag;
1da177e4
LT
2082
2083 /* get a working reference to the output device */
47360228 2084 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4 2085 if (out_dev == NULL) {
e87cc472 2086 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1da177e4
LT
2087 return -EINVAL;
2088 }
2089
2090
5c04c819 2091 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
9e56e380 2092 in_dev->dev, in_dev, &itag);
1da177e4 2093 if (err < 0) {
e905a9ed 2094 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 2095 saddr);
e905a9ed 2096
1da177e4
LT
2097 goto cleanup;
2098 }
2099
2100 if (err)
2101 flags |= RTCF_DIRECTSRC;
2102
51b77cae 2103 if (out_dev == in_dev && err &&
1da177e4
LT
2104 (IN_DEV_SHARED_MEDIA(out_dev) ||
2105 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2106 flags |= RTCF_DOREDIRECT;
2107
2108 if (skb->protocol != htons(ETH_P_IP)) {
2109 /* Not IP (i.e. ARP). Do not create route, if it is
2110 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
2111 *
2112 * Proxy arp feature have been extended to allow, ARP
2113 * replies back to the same interface, to support
2114 * Private VLAN switch technologies. See arp.c.
1da177e4 2115 */
65324144
JDB
2116 if (out_dev == in_dev &&
2117 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
2118 err = -EINVAL;
2119 goto cleanup;
2120 }
2121 }
2122
5c1e6aa3
DM
2123 rth = rt_dst_alloc(out_dev->dev,
2124 IN_DEV_CONF_GET(in_dev, NOPOLICY),
0c4dcd58 2125 IN_DEV_CONF_GET(out_dev, NOXFRM));
1da177e4
LT
2126 if (!rth) {
2127 err = -ENOBUFS;
2128 goto cleanup;
2129 }
2130
5e2b61f7 2131 rth->rt_key_dst = daddr;
5e2b61f7 2132 rth->rt_key_src = saddr;
cf911662
DM
2133 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2134 rth->rt_flags = flags;
2135 rth->rt_type = res->type;
475949d8 2136 rth->rt_key_tos = tos;
cf911662 2137 rth->rt_dst = daddr;
1da177e4 2138 rth->rt_src = saddr;
1b86a58f 2139 rth->rt_route_iif = in_dev->dev->ifindex;
5e2b61f7 2140 rth->rt_iif = in_dev->dev->ifindex;
5e2b61f7 2141 rth->rt_oif = 0;
cf911662
DM
2142 rth->rt_mark = skb->mark;
2143 rth->rt_gateway = daddr;
cf911662 2144 rth->rt_peer_genid = 0;
8b96d22d 2145 rt_init_peer(rth, &res->table->tb_peers);
cf911662 2146 rth->fi = NULL;
1da177e4 2147
d8d1f30b
CG
2148 rth->dst.input = ip_forward;
2149 rth->dst.output = ip_output;
1da177e4 2150
5e2b61f7 2151 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1da177e4 2152
1da177e4
LT
2153 *result = rth;
2154 err = 0;
2155 cleanup:
1da177e4 2156 return err;
e905a9ed 2157}
1da177e4 2158
5969f71d
SH
2159static int ip_mkroute_input(struct sk_buff *skb,
2160 struct fib_result *res,
68a5e3dd 2161 const struct flowi4 *fl4,
5969f71d
SH
2162 struct in_device *in_dev,
2163 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2164{
5e73ea1a 2165 struct rtable *rth = NULL;
1da177e4 2166 int err;
95c96174 2167 unsigned int hash;
1da177e4
LT
2168
2169#ifdef CONFIG_IP_ROUTE_MULTIPATH
ff3fccb3 2170 if (res->fi && res->fi->fib_nhs > 1)
1b7fe593 2171 fib_select_multipath(res);
1da177e4
LT
2172#endif
2173
2174 /* create a routing cache entry */
2175 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2176 if (err)
2177 return err;
1da177e4
LT
2178
2179 /* put it into the cache */
68a5e3dd 2180 hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
d8d1f30b 2181 rt_genid(dev_net(rth->dst.dev)));
68a5e3dd 2182 rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
b23dd4fe
DM
2183 if (IS_ERR(rth))
2184 return PTR_ERR(rth);
2185 return 0;
1da177e4
LT
2186}
2187
1da177e4
LT
2188/*
2189 * NOTE. We drop all the packets that has local source
2190 * addresses, because every properly looped back packet
2191 * must have correct destination already attached by output routine.
2192 *
2193 * Such approach solves two big problems:
2194 * 1. Not simplex devices are handled properly.
2195 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2196 * called with rcu_read_lock()
1da177e4
LT
2197 */
2198
9e12bb22 2199static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
c10237e0 2200 u8 tos, struct net_device *dev)
1da177e4
LT
2201{
2202 struct fib_result res;
96d36220 2203 struct in_device *in_dev = __in_dev_get_rcu(dev);
68a5e3dd 2204 struct flowi4 fl4;
95c96174 2205 unsigned int flags = 0;
1da177e4 2206 u32 itag = 0;
95c96174
ED
2207 struct rtable *rth;
2208 unsigned int hash;
1da177e4 2209 int err = -EINVAL;
5e73ea1a 2210 struct net *net = dev_net(dev);
1da177e4
LT
2211
2212 /* IP on this device is disabled. */
2213
2214 if (!in_dev)
2215 goto out;
2216
2217 /* Check for the most weird martians, which can be not detected
2218 by fib_lookup.
2219 */
2220
d0daebc3 2221 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1da177e4
LT
2222 goto martian_source;
2223
27a954bd 2224 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2225 goto brd_input;
2226
2227 /* Accept zero addresses only to limited broadcast;
2228 * I even do not know to fix it or not. Waiting for complains :-)
2229 */
f97c1e0c 2230 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2231 goto martian_source;
2232
d0daebc3 2233 if (ipv4_is_zeronet(daddr))
1da177e4
LT
2234 goto martian_destination;
2235
d0daebc3
TG
2236 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
2237 if (ipv4_is_loopback(daddr))
2238 goto martian_destination;
2239
2240 if (ipv4_is_loopback(saddr))
2241 goto martian_source;
2242 }
2243
1da177e4
LT
2244 /*
2245 * Now we are ready to route packet.
2246 */
68a5e3dd
DM
2247 fl4.flowi4_oif = 0;
2248 fl4.flowi4_iif = dev->ifindex;
2249 fl4.flowi4_mark = skb->mark;
2250 fl4.flowi4_tos = tos;
2251 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2252 fl4.daddr = daddr;
2253 fl4.saddr = saddr;
2254 err = fib_lookup(net, &fl4, &res);
251da413 2255 if (err != 0)
1da177e4 2256 goto no_route;
1da177e4
LT
2257
2258 RT_CACHE_STAT_INC(in_slow_tot);
2259
2260 if (res.type == RTN_BROADCAST)
2261 goto brd_input;
2262
2263 if (res.type == RTN_LOCAL) {
5c04c819 2264 err = fib_validate_source(skb, saddr, daddr, tos,
ebc0ffae 2265 net->loopback_dev->ifindex,
9e56e380 2266 dev, in_dev, &itag);
b5f7e755
ED
2267 if (err < 0)
2268 goto martian_source_keep_err;
2269 if (err)
1da177e4 2270 flags |= RTCF_DIRECTSRC;
1da177e4
LT
2271 goto local_input;
2272 }
2273
2274 if (!IN_DEV_FORWARD(in_dev))
251da413 2275 goto no_route;
1da177e4
LT
2276 if (res.type != RTN_UNICAST)
2277 goto martian_destination;
2278
68a5e3dd 2279 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1da177e4
LT
2280out: return err;
2281
2282brd_input:
2283 if (skb->protocol != htons(ETH_P_IP))
2284 goto e_inval;
2285
41347dcd 2286 if (!ipv4_is_zeronet(saddr)) {
9e56e380
DM
2287 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2288 in_dev, &itag);
1da177e4 2289 if (err < 0)
b5f7e755 2290 goto martian_source_keep_err;
1da177e4
LT
2291 if (err)
2292 flags |= RTCF_DIRECTSRC;
2293 }
2294 flags |= RTCF_BROADCAST;
2295 res.type = RTN_BROADCAST;
2296 RT_CACHE_STAT_INC(in_brd);
2297
2298local_input:
5c1e6aa3
DM
2299 rth = rt_dst_alloc(net->loopback_dev,
2300 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
2301 if (!rth)
2302 goto e_nobufs;
2303
cf911662 2304 rth->dst.input= ip_local_deliver;
d8d1f30b 2305 rth->dst.output= ip_rt_bug;
cf911662
DM
2306#ifdef CONFIG_IP_ROUTE_CLASSID
2307 rth->dst.tclassid = itag;
2308#endif
1da177e4 2309
5e2b61f7 2310 rth->rt_key_dst = daddr;
5e2b61f7 2311 rth->rt_key_src = saddr;
cf911662
DM
2312 rth->rt_genid = rt_genid(net);
2313 rth->rt_flags = flags|RTCF_LOCAL;
2314 rth->rt_type = res.type;
475949d8 2315 rth->rt_key_tos = tos;
cf911662 2316 rth->rt_dst = daddr;
1da177e4 2317 rth->rt_src = saddr;
1b86a58f 2318 rth->rt_route_iif = dev->ifindex;
5e2b61f7 2319 rth->rt_iif = dev->ifindex;
cf911662
DM
2320 rth->rt_oif = 0;
2321 rth->rt_mark = skb->mark;
1da177e4 2322 rth->rt_gateway = daddr;
cf911662 2323 rth->rt_peer_genid = 0;
97bab73f 2324 rt_init_peer(rth, net->ipv4.peers);
cf911662 2325 rth->fi = NULL;
1da177e4 2326 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2327 rth->dst.input= ip_error;
2328 rth->dst.error= -err;
1da177e4
LT
2329 rth->rt_flags &= ~RTCF_LOCAL;
2330 }
68a5e3dd
DM
2331 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2332 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
b23dd4fe
DM
2333 err = 0;
2334 if (IS_ERR(rth))
2335 err = PTR_ERR(rth);
ebc0ffae 2336 goto out;
1da177e4
LT
2337
2338no_route:
2339 RT_CACHE_STAT_INC(in_no_route);
1da177e4 2340 res.type = RTN_UNREACHABLE;
7f53878d
MC
2341 if (err == -ESRCH)
2342 err = -ENETUNREACH;
1da177e4
LT
2343 goto local_input;
2344
2345 /*
2346 * Do not cache martian addresses: they should be logged (RFC1812)
2347 */
2348martian_destination:
2349 RT_CACHE_STAT_INC(in_martian_dst);
2350#ifdef CONFIG_IP_ROUTE_VERBOSE
e87cc472
JP
2351 if (IN_DEV_LOG_MARTIANS(in_dev))
2352 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2353 &daddr, &saddr, dev->name);
1da177e4 2354#endif
2c2910a4 2355
1da177e4
LT
2356e_inval:
2357 err = -EINVAL;
ebc0ffae 2358 goto out;
1da177e4
LT
2359
2360e_nobufs:
2361 err = -ENOBUFS;
ebc0ffae 2362 goto out;
1da177e4
LT
2363
2364martian_source:
b5f7e755
ED
2365 err = -EINVAL;
2366martian_source_keep_err:
1da177e4 2367 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2368 goto out;
1da177e4
LT
2369}
2370
407eadd9 2371int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
c10237e0 2372 u8 tos, struct net_device *dev, bool noref)
1da177e4 2373{
95c96174
ED
2374 struct rtable *rth;
2375 unsigned int hash;
1da177e4 2376 int iif = dev->ifindex;
b5921910 2377 struct net *net;
96d36220 2378 int res;
1da177e4 2379
c346dca1 2380 net = dev_net(dev);
1080d709 2381
96d36220
ED
2382 rcu_read_lock();
2383
1080d709
NH
2384 if (!rt_caching(net))
2385 goto skip_cache;
2386
1da177e4 2387 tos &= IPTOS_RT_MASK;
e84f84f2 2388 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2389
1da177e4 2390 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2391 rth = rcu_dereference(rth->dst.rt_next)) {
5e2b61f7
DM
2392 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2393 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
97a80410 2394 (rth->rt_route_iif ^ iif) |
475949d8 2395 (rth->rt_key_tos ^ tos)) == 0 &&
5e2b61f7 2396 rth->rt_mark == skb->mark &&
d8d1f30b 2397 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2398 !rt_is_expired(rth)) {
de398fb8 2399 ipv4_validate_peer(rth);
407eadd9 2400 if (noref) {
d8d1f30b
CG
2401 dst_use_noref(&rth->dst, jiffies);
2402 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2403 } else {
d8d1f30b
CG
2404 dst_use(&rth->dst, jiffies);
2405 skb_dst_set(skb, &rth->dst);
407eadd9 2406 }
1da177e4
LT
2407 RT_CACHE_STAT_INC(in_hit);
2408 rcu_read_unlock();
1da177e4
LT
2409 return 0;
2410 }
2411 RT_CACHE_STAT_INC(in_hlist_search);
2412 }
1da177e4 2413
1080d709 2414skip_cache:
1da177e4
LT
2415 /* Multicast recognition logic is moved from route cache to here.
2416 The problem was that too many Ethernet cards have broken/missing
2417 hardware multicast filters :-( As result the host on multicasting
2418 network acquires a lot of useless route cache entries, sort of
2419 SDR messages from all the world. Now we try to get rid of them.
2420 Really, provided software IP multicast filter is organized
2421 reasonably (at least, hashed), it does not result in a slowdown
2422 comparing with route cache reject entries.
2423 Note, that multicast routers are not affected, because
2424 route cache entry is created eventually.
2425 */
f97c1e0c 2426 if (ipv4_is_multicast(daddr)) {
96d36220 2427 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2428
96d36220 2429 if (in_dev) {
dbdd9a52
DM
2430 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2431 ip_hdr(skb)->protocol);
1da177e4
LT
2432 if (our
2433#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2434 ||
2435 (!ipv4_is_local_multicast(daddr) &&
2436 IN_DEV_MFORWARD(in_dev))
1da177e4 2437#endif
9d4fb27d 2438 ) {
96d36220
ED
2439 int res = ip_route_input_mc(skb, daddr, saddr,
2440 tos, dev, our);
1da177e4 2441 rcu_read_unlock();
96d36220 2442 return res;
1da177e4
LT
2443 }
2444 }
2445 rcu_read_unlock();
2446 return -EINVAL;
2447 }
c10237e0 2448 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
96d36220
ED
2449 rcu_read_unlock();
2450 return res;
1da177e4 2451}
407eadd9 2452EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2453
ebc0ffae 2454/* called with rcu_read_lock() */
982721f3 2455static struct rtable *__mkroute_output(const struct fib_result *res,
68a5e3dd 2456 const struct flowi4 *fl4,
813b3b5d 2457 __be32 orig_daddr, __be32 orig_saddr,
f61759e6
JA
2458 int orig_oif, __u8 orig_rtos,
2459 struct net_device *dev_out,
5ada5527 2460 unsigned int flags)
1da177e4 2461{
982721f3 2462 struct fib_info *fi = res->fi;
5ada5527 2463 struct in_device *in_dev;
982721f3 2464 u16 type = res->type;
5ada5527 2465 struct rtable *rth;
1da177e4 2466
d0daebc3
TG
2467 in_dev = __in_dev_get_rcu(dev_out);
2468 if (!in_dev)
5ada5527 2469 return ERR_PTR(-EINVAL);
1da177e4 2470
d0daebc3
TG
2471 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2472 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
2473 return ERR_PTR(-EINVAL);
2474
68a5e3dd 2475 if (ipv4_is_lbcast(fl4->daddr))
982721f3 2476 type = RTN_BROADCAST;
68a5e3dd 2477 else if (ipv4_is_multicast(fl4->daddr))
982721f3 2478 type = RTN_MULTICAST;
68a5e3dd 2479 else if (ipv4_is_zeronet(fl4->daddr))
5ada5527 2480 return ERR_PTR(-EINVAL);
1da177e4
LT
2481
2482 if (dev_out->flags & IFF_LOOPBACK)
2483 flags |= RTCF_LOCAL;
2484
982721f3 2485 if (type == RTN_BROADCAST) {
1da177e4 2486 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
2487 fi = NULL;
2488 } else if (type == RTN_MULTICAST) {
dd28d1a0 2489 flags |= RTCF_MULTICAST | RTCF_LOCAL;
813b3b5d
DM
2490 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2491 fl4->flowi4_proto))
1da177e4
LT
2492 flags &= ~RTCF_LOCAL;
2493 /* If multicast route do not exist use
dd28d1a0
ED
2494 * default one, but do not gateway in this case.
2495 * Yes, it is hack.
1da177e4 2496 */
982721f3
DM
2497 if (fi && res->prefixlen < 4)
2498 fi = NULL;
1da177e4
LT
2499 }
2500
5c1e6aa3
DM
2501 rth = rt_dst_alloc(dev_out,
2502 IN_DEV_CONF_GET(in_dev, NOPOLICY),
0c4dcd58 2503 IN_DEV_CONF_GET(in_dev, NOXFRM));
8391d07b 2504 if (!rth)
5ada5527 2505 return ERR_PTR(-ENOBUFS);
8391d07b 2506
cf911662
DM
2507 rth->dst.output = ip_output;
2508
813b3b5d
DM
2509 rth->rt_key_dst = orig_daddr;
2510 rth->rt_key_src = orig_saddr;
cf911662
DM
2511 rth->rt_genid = rt_genid(dev_net(dev_out));
2512 rth->rt_flags = flags;
2513 rth->rt_type = type;
f61759e6 2514 rth->rt_key_tos = orig_rtos;
68a5e3dd
DM
2515 rth->rt_dst = fl4->daddr;
2516 rth->rt_src = fl4->saddr;
1b86a58f 2517 rth->rt_route_iif = 0;
813b3b5d
DM
2518 rth->rt_iif = orig_oif ? : dev_out->ifindex;
2519 rth->rt_oif = orig_oif;
2520 rth->rt_mark = fl4->flowi4_mark;
68a5e3dd 2521 rth->rt_gateway = fl4->daddr;
cf911662 2522 rth->rt_peer_genid = 0;
8b96d22d
DM
2523 rt_init_peer(rth, (res->table ?
2524 &res->table->tb_peers :
2525 dev_net(dev_out)->ipv4.peers));
cf911662 2526 rth->fi = NULL;
1da177e4
LT
2527
2528 RT_CACHE_STAT_INC(out_slow_tot);
2529
41347dcd 2530 if (flags & RTCF_LOCAL)
d8d1f30b 2531 rth->dst.input = ip_local_deliver;
1da177e4 2532 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
e905a9ed 2533 if (flags & RTCF_LOCAL &&
1da177e4 2534 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2535 rth->dst.output = ip_mc_output;
1da177e4
LT
2536 RT_CACHE_STAT_INC(out_slow_mc);
2537 }
2538#ifdef CONFIG_IP_MROUTE
982721f3 2539 if (type == RTN_MULTICAST) {
1da177e4 2540 if (IN_DEV_MFORWARD(in_dev) &&
813b3b5d 2541 !ipv4_is_local_multicast(fl4->daddr)) {
d8d1f30b
CG
2542 rth->dst.input = ip_mr_input;
2543 rth->dst.output = ip_mc_output;
1da177e4
LT
2544 }
2545 }
2546#endif
2547 }
2548
813b3b5d 2549 rt_set_nexthop(rth, fl4, res, fi, type, 0);
1da177e4 2550
7586eceb
ED
2551 if (fl4->flowi4_flags & FLOWI_FLAG_RT_NOCACHE)
2552 rth->dst.flags |= DST_NOCACHE;
2553
5ada5527 2554 return rth;
1da177e4
LT
2555}
2556
1da177e4
LT
2557/*
2558 * Major route resolver routine.
0197aa38 2559 * called with rcu_read_lock();
1da177e4
LT
2560 */
2561
813b3b5d 2562static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
1da177e4 2563{
1da177e4 2564 struct net_device *dev_out = NULL;
f61759e6 2565 __u8 tos = RT_FL_TOS(fl4);
813b3b5d
DM
2566 unsigned int flags = 0;
2567 struct fib_result res;
5ada5527 2568 struct rtable *rth;
813b3b5d
DM
2569 __be32 orig_daddr;
2570 __be32 orig_saddr;
2571 int orig_oif;
1da177e4
LT
2572
2573 res.fi = NULL;
8b96d22d 2574 res.table = NULL;
1da177e4
LT
2575#ifdef CONFIG_IP_MULTIPLE_TABLES
2576 res.r = NULL;
2577#endif
2578
813b3b5d
DM
2579 orig_daddr = fl4->daddr;
2580 orig_saddr = fl4->saddr;
2581 orig_oif = fl4->flowi4_oif;
2582
2583 fl4->flowi4_iif = net->loopback_dev->ifindex;
2584 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2585 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2586 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
44713b67 2587
010c2708 2588 rcu_read_lock();
813b3b5d 2589 if (fl4->saddr) {
b23dd4fe 2590 rth = ERR_PTR(-EINVAL);
813b3b5d
DM
2591 if (ipv4_is_multicast(fl4->saddr) ||
2592 ipv4_is_lbcast(fl4->saddr) ||
2593 ipv4_is_zeronet(fl4->saddr))
1da177e4
LT
2594 goto out;
2595
1da177e4
LT
2596 /* I removed check for oif == dev_out->oif here.
2597 It was wrong for two reasons:
1ab35276
DL
2598 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2599 is assigned to multiple interfaces.
1da177e4
LT
2600 2. Moreover, we are allowed to send packets with saddr
2601 of another iface. --ANK
2602 */
2603
813b3b5d
DM
2604 if (fl4->flowi4_oif == 0 &&
2605 (ipv4_is_multicast(fl4->daddr) ||
2606 ipv4_is_lbcast(fl4->daddr))) {
a210d01a 2607 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
813b3b5d 2608 dev_out = __ip_dev_find(net, fl4->saddr, false);
a210d01a
JA
2609 if (dev_out == NULL)
2610 goto out;
2611
1da177e4
LT
2612 /* Special hack: user can direct multicasts
2613 and limited broadcast via necessary interface
2614 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2615 This hack is not just for fun, it allows
2616 vic,vat and friends to work.
2617 They bind socket to loopback, set ttl to zero
2618 and expect that it will work.
2619 From the viewpoint of routing cache they are broken,
2620 because we are not allowed to build multicast path
2621 with loopback source addr (look, routing cache
2622 cannot know, that ttl is zero, so that packet
2623 will not leave this host and route is valid).
2624 Luckily, this hack is good workaround.
2625 */
2626
813b3b5d 2627 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
2628 goto make_route;
2629 }
a210d01a 2630
813b3b5d 2631 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
a210d01a 2632 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
813b3b5d 2633 if (!__ip_dev_find(net, fl4->saddr, false))
a210d01a 2634 goto out;
a210d01a 2635 }
1da177e4
LT
2636 }
2637
2638
813b3b5d
DM
2639 if (fl4->flowi4_oif) {
2640 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
b23dd4fe 2641 rth = ERR_PTR(-ENODEV);
1da177e4
LT
2642 if (dev_out == NULL)
2643 goto out;
e5ed6399
HX
2644
2645 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83 2646 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
b23dd4fe 2647 rth = ERR_PTR(-ENETUNREACH);
fc75fc83
ED
2648 goto out;
2649 }
813b3b5d
DM
2650 if (ipv4_is_local_multicast(fl4->daddr) ||
2651 ipv4_is_lbcast(fl4->daddr)) {
2652 if (!fl4->saddr)
2653 fl4->saddr = inet_select_addr(dev_out, 0,
2654 RT_SCOPE_LINK);
1da177e4
LT
2655 goto make_route;
2656 }
813b3b5d
DM
2657 if (fl4->saddr) {
2658 if (ipv4_is_multicast(fl4->daddr))
2659 fl4->saddr = inet_select_addr(dev_out, 0,
2660 fl4->flowi4_scope);
2661 else if (!fl4->daddr)
2662 fl4->saddr = inet_select_addr(dev_out, 0,
2663 RT_SCOPE_HOST);
1da177e4
LT
2664 }
2665 }
2666
813b3b5d
DM
2667 if (!fl4->daddr) {
2668 fl4->daddr = fl4->saddr;
2669 if (!fl4->daddr)
2670 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
b40afd0e 2671 dev_out = net->loopback_dev;
813b3b5d 2672 fl4->flowi4_oif = net->loopback_dev->ifindex;
1da177e4
LT
2673 res.type = RTN_LOCAL;
2674 flags |= RTCF_LOCAL;
2675 goto make_route;
2676 }
2677
813b3b5d 2678 if (fib_lookup(net, fl4, &res)) {
1da177e4 2679 res.fi = NULL;
8b96d22d 2680 res.table = NULL;
813b3b5d 2681 if (fl4->flowi4_oif) {
1da177e4
LT
2682 /* Apparently, routing tables are wrong. Assume,
2683 that the destination is on link.
2684
2685 WHY? DW.
2686 Because we are allowed to send to iface
2687 even if it has NO routes and NO assigned
2688 addresses. When oif is specified, routing
2689 tables are looked up with only one purpose:
2690 to catch if destination is gatewayed, rather than
2691 direct. Moreover, if MSG_DONTROUTE is set,
2692 we send packet, ignoring both routing tables
2693 and ifaddr state. --ANK
2694
2695
2696 We could make it even if oif is unknown,
2697 likely IPv6, but we do not.
2698 */
2699
813b3b5d
DM
2700 if (fl4->saddr == 0)
2701 fl4->saddr = inet_select_addr(dev_out, 0,
2702 RT_SCOPE_LINK);
1da177e4
LT
2703 res.type = RTN_UNICAST;
2704 goto make_route;
2705 }
b23dd4fe 2706 rth = ERR_PTR(-ENETUNREACH);
1da177e4
LT
2707 goto out;
2708 }
1da177e4
LT
2709
2710 if (res.type == RTN_LOCAL) {
813b3b5d 2711 if (!fl4->saddr) {
9fc3bbb4 2712 if (res.fi->fib_prefsrc)
813b3b5d 2713 fl4->saddr = res.fi->fib_prefsrc;
9fc3bbb4 2714 else
813b3b5d 2715 fl4->saddr = fl4->daddr;
9fc3bbb4 2716 }
b40afd0e 2717 dev_out = net->loopback_dev;
813b3b5d 2718 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
2719 res.fi = NULL;
2720 flags |= RTCF_LOCAL;
2721 goto make_route;
2722 }
2723
2724#ifdef CONFIG_IP_ROUTE_MULTIPATH
813b3b5d 2725 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
1b7fe593 2726 fib_select_multipath(&res);
1da177e4
LT
2727 else
2728#endif
21d8c49e
DM
2729 if (!res.prefixlen &&
2730 res.table->tb_num_default > 1 &&
813b3b5d 2731 res.type == RTN_UNICAST && !fl4->flowi4_oif)
0c838ff1 2732 fib_select_default(&res);
1da177e4 2733
813b3b5d
DM
2734 if (!fl4->saddr)
2735 fl4->saddr = FIB_RES_PREFSRC(net, res);
1da177e4 2736
1da177e4 2737 dev_out = FIB_RES_DEV(res);
813b3b5d 2738 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
2739
2740
2741make_route:
813b3b5d 2742 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
f61759e6 2743 tos, dev_out, flags);
b23dd4fe 2744 if (!IS_ERR(rth)) {
5ada5527
DM
2745 unsigned int hash;
2746
813b3b5d 2747 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
5ada5527 2748 rt_genid(dev_net(dev_out)));
813b3b5d 2749 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
5ada5527 2750 }
1da177e4 2751
010c2708
DM
2752out:
2753 rcu_read_unlock();
b23dd4fe 2754 return rth;
1da177e4
LT
2755}
2756
813b3b5d 2757struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
1da177e4 2758{
1da177e4 2759 struct rtable *rth;
010c2708 2760 unsigned int hash;
1da177e4 2761
1080d709
NH
2762 if (!rt_caching(net))
2763 goto slow_output;
2764
9d6ec938 2765 hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
1da177e4
LT
2766
2767 rcu_read_lock_bh();
a898def2 2768 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2769 rth = rcu_dereference_bh(rth->dst.rt_next)) {
9d6ec938
DM
2770 if (rth->rt_key_dst == flp4->daddr &&
2771 rth->rt_key_src == flp4->saddr &&
c7537967 2772 rt_is_output_route(rth) &&
9d6ec938
DM
2773 rth->rt_oif == flp4->flowi4_oif &&
2774 rth->rt_mark == flp4->flowi4_mark &&
475949d8 2775 !((rth->rt_key_tos ^ flp4->flowi4_tos) &
b5921910 2776 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2777 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2778 !rt_is_expired(rth)) {
de398fb8 2779 ipv4_validate_peer(rth);
d8d1f30b 2780 dst_use(&rth->dst, jiffies);
1da177e4
LT
2781 RT_CACHE_STAT_INC(out_hit);
2782 rcu_read_unlock_bh();
56157872
DM
2783 if (!flp4->saddr)
2784 flp4->saddr = rth->rt_src;
2785 if (!flp4->daddr)
2786 flp4->daddr = rth->rt_dst;
b23dd4fe 2787 return rth;
1da177e4
LT
2788 }
2789 RT_CACHE_STAT_INC(out_hlist_search);
2790 }
2791 rcu_read_unlock_bh();
2792
1080d709 2793slow_output:
9d6ec938 2794 return ip_route_output_slow(net, flp4);
1da177e4 2795}
d8c97a94
ACM
2796EXPORT_SYMBOL_GPL(__ip_route_output_key);
2797
ae2688d5
JW
2798static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2799{
2800 return NULL;
2801}
2802
ebb762f2 2803static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
ec831ea7 2804{
618f9bc7
SK
2805 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2806
2807 return mtu ? : dst->dev->mtu;
ec831ea7
RD
2808}
2809
14e50e57
DM
2810static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2811{
2812}
2813
0972ddb2
HB
2814static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2815 unsigned long old)
2816{
2817 return NULL;
2818}
2819
14e50e57
DM
2820static struct dst_ops ipv4_dst_blackhole_ops = {
2821 .family = AF_INET,
09640e63 2822 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2823 .destroy = ipv4_dst_destroy,
ae2688d5 2824 .check = ipv4_blackhole_dst_check,
ebb762f2 2825 .mtu = ipv4_blackhole_mtu,
214f45c9 2826 .default_advmss = ipv4_default_advmss,
14e50e57 2827 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
0972ddb2 2828 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
d3aaeb38 2829 .neigh_lookup = ipv4_neigh_lookup,
14e50e57
DM
2830};
2831
2774c131 2832struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2833{
5c1e6aa3 2834 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2774c131 2835 struct rtable *ort = (struct rtable *) dst_orig;
14e50e57
DM
2836
2837 if (rt) {
d8d1f30b 2838 struct dst_entry *new = &rt->dst;
14e50e57 2839
14e50e57 2840 new->__use = 1;
352e512c
HX
2841 new->input = dst_discard;
2842 new->output = dst_discard;
defb3519 2843 dst_copy_metrics(new, &ort->dst);
14e50e57 2844
d8d1f30b 2845 new->dev = ort->dst.dev;
14e50e57
DM
2846 if (new->dev)
2847 dev_hold(new->dev);
2848
5e2b61f7
DM
2849 rt->rt_key_dst = ort->rt_key_dst;
2850 rt->rt_key_src = ort->rt_key_src;
475949d8 2851 rt->rt_key_tos = ort->rt_key_tos;
1b86a58f 2852 rt->rt_route_iif = ort->rt_route_iif;
5e2b61f7
DM
2853 rt->rt_iif = ort->rt_iif;
2854 rt->rt_oif = ort->rt_oif;
2855 rt->rt_mark = ort->rt_mark;
14e50e57 2856
e84f84f2 2857 rt->rt_genid = rt_genid(net);
14e50e57
DM
2858 rt->rt_flags = ort->rt_flags;
2859 rt->rt_type = ort->rt_type;
2860 rt->rt_dst = ort->rt_dst;
2861 rt->rt_src = ort->rt_src;
14e50e57 2862 rt->rt_gateway = ort->rt_gateway;
97bab73f 2863 rt_transfer_peer(rt, ort);
62fa8a84
DM
2864 rt->fi = ort->fi;
2865 if (rt->fi)
2866 atomic_inc(&rt->fi->fib_clntref);
14e50e57
DM
2867
2868 dst_free(new);
2869 }
2870
2774c131
DM
2871 dst_release(dst_orig);
2872
2873 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2874}
2875
9d6ec938 2876struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
b23dd4fe 2877 struct sock *sk)
1da177e4 2878{
9d6ec938 2879 struct rtable *rt = __ip_route_output_key(net, flp4);
1da177e4 2880
b23dd4fe
DM
2881 if (IS_ERR(rt))
2882 return rt;
1da177e4 2883
56157872 2884 if (flp4->flowi4_proto)
9d6ec938
DM
2885 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2886 flowi4_to_flowi(flp4),
2887 sk, 0);
1da177e4 2888
b23dd4fe 2889 return rt;
1da177e4 2890}
d8c97a94
ACM
2891EXPORT_SYMBOL_GPL(ip_route_output_flow);
2892
4feb88e5
BT
2893static int rt_fill_info(struct net *net,
2894 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2895 int nowait, unsigned int flags)
1da177e4 2896{
511c3f92 2897 struct rtable *rt = skb_rtable(skb);
1da177e4 2898 struct rtmsg *r;
be403ea1 2899 struct nlmsghdr *nlh;
2bc8ca40 2900 unsigned long expires = 0;
e3703b3d 2901 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2902
2903 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2904 if (nlh == NULL)
26932566 2905 return -EMSGSIZE;
be403ea1
TG
2906
2907 r = nlmsg_data(nlh);
1da177e4
LT
2908 r->rtm_family = AF_INET;
2909 r->rtm_dst_len = 32;
2910 r->rtm_src_len = 0;
475949d8 2911 r->rtm_tos = rt->rt_key_tos;
1da177e4 2912 r->rtm_table = RT_TABLE_MAIN;
f3756b79
DM
2913 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2914 goto nla_put_failure;
1da177e4
LT
2915 r->rtm_type = rt->rt_type;
2916 r->rtm_scope = RT_SCOPE_UNIVERSE;
2917 r->rtm_protocol = RTPROT_UNSPEC;
2918 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2919 if (rt->rt_flags & RTCF_NOTIFY)
2920 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2921
f3756b79
DM
2922 if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
2923 goto nla_put_failure;
5e2b61f7 2924 if (rt->rt_key_src) {
1da177e4 2925 r->rtm_src_len = 32;
f3756b79
DM
2926 if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
2927 goto nla_put_failure;
1da177e4 2928 }
f3756b79
DM
2929 if (rt->dst.dev &&
2930 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2931 goto nla_put_failure;
c7066f70 2932#ifdef CONFIG_IP_ROUTE_CLASSID
f3756b79
DM
2933 if (rt->dst.tclassid &&
2934 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2935 goto nla_put_failure;
1da177e4 2936#endif
41347dcd
DM
2937 if (!rt_is_input_route(rt) &&
2938 rt->rt_src != rt->rt_key_src) {
f3756b79
DM
2939 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
2940 goto nla_put_failure;
2941 }
2942 if (rt->rt_dst != rt->rt_gateway &&
2943 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2944 goto nla_put_failure;
be403ea1 2945
defb3519 2946 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2947 goto nla_put_failure;
2948
f3756b79
DM
2949 if (rt->rt_mark &&
2950 nla_put_be32(skb, RTA_MARK, rt->rt_mark))
2951 goto nla_put_failure;
963bfeee 2952
d8d1f30b 2953 error = rt->dst.error;
97bab73f
DM
2954 if (rt_has_peer(rt)) {
2955 const struct inet_peer *peer = rt_peer_ptr(rt);
2956 inet_peer_refcheck(peer);
fe6fe792
ED
2957 id = atomic_read(&peer->ip_id_count) & 0xffff;
2958 if (peer->tcp_ts_stamp) {
2959 ts = peer->tcp_ts;
2960 tsage = get_seconds() - peer->tcp_ts_stamp;
1da177e4 2961 }
fe6fe792 2962 expires = ACCESS_ONCE(peer->pmtu_expires);
2bc8ca40
SK
2963 if (expires) {
2964 if (time_before(jiffies, expires))
2965 expires -= jiffies;
2966 else
2967 expires = 0;
2968 }
1da177e4 2969 }
be403ea1 2970
c7537967 2971 if (rt_is_input_route(rt)) {
1da177e4 2972#ifdef CONFIG_IP_MROUTE
e448515c 2973 __be32 dst = rt->rt_dst;
1da177e4 2974
f97c1e0c 2975 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5 2976 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
9a1b9496
DM
2977 int err = ipmr_get_route(net, skb,
2978 rt->rt_src, rt->rt_dst,
2979 r, nowait);
1da177e4
LT
2980 if (err <= 0) {
2981 if (!nowait) {
2982 if (err == 0)
2983 return 0;
be403ea1 2984 goto nla_put_failure;
1da177e4
LT
2985 } else {
2986 if (err == -EMSGSIZE)
be403ea1 2987 goto nla_put_failure;
e3703b3d 2988 error = err;
1da177e4
LT
2989 }
2990 }
2991 } else
2992#endif
f3756b79
DM
2993 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2994 goto nla_put_failure;
1da177e4
LT
2995 }
2996
d8d1f30b 2997 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2998 expires, error) < 0)
2999 goto nla_put_failure;
be403ea1
TG
3000
3001 return nlmsg_end(skb, nlh);
1da177e4 3002
be403ea1 3003nla_put_failure:
26932566
PM
3004 nlmsg_cancel(skb, nlh);
3005 return -EMSGSIZE;
1da177e4
LT
3006}
3007
5e73ea1a 3008static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
1da177e4 3009{
3b1e0a65 3010 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
3011 struct rtmsg *rtm;
3012 struct nlattr *tb[RTA_MAX+1];
1da177e4 3013 struct rtable *rt = NULL;
9e12bb22
AV
3014 __be32 dst = 0;
3015 __be32 src = 0;
3016 u32 iif;
d889ce3b 3017 int err;
963bfeee 3018 int mark;
1da177e4
LT
3019 struct sk_buff *skb;
3020
d889ce3b
TG
3021 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
3022 if (err < 0)
3023 goto errout;
3024
3025 rtm = nlmsg_data(nlh);
3026
1da177e4 3027 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
3028 if (skb == NULL) {
3029 err = -ENOBUFS;
3030 goto errout;
3031 }
1da177e4
LT
3032
3033 /* Reserve room for dummy headers, this skb can pass
3034 through good chunk of routing engine.
3035 */
459a98ed 3036 skb_reset_mac_header(skb);
c1d2bbe1 3037 skb_reset_network_header(skb);
d2c962b8
SH
3038
3039 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 3040 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
3041 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
3042
17fb2c64
AV
3043 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
3044 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 3045 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 3046 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
3047
3048 if (iif) {
d889ce3b
TG
3049 struct net_device *dev;
3050
1937504d 3051 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
3052 if (dev == NULL) {
3053 err = -ENODEV;
3054 goto errout_free;
3055 }
3056
1da177e4
LT
3057 skb->protocol = htons(ETH_P_IP);
3058 skb->dev = dev;
963bfeee 3059 skb->mark = mark;
1da177e4
LT
3060 local_bh_disable();
3061 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
3062 local_bh_enable();
d889ce3b 3063
511c3f92 3064 rt = skb_rtable(skb);
d8d1f30b
CG
3065 if (err == 0 && rt->dst.error)
3066 err = -rt->dst.error;
1da177e4 3067 } else {
68a5e3dd
DM
3068 struct flowi4 fl4 = {
3069 .daddr = dst,
3070 .saddr = src,
3071 .flowi4_tos = rtm->rtm_tos,
3072 .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
3073 .flowi4_mark = mark,
d889ce3b 3074 };
9d6ec938 3075 rt = ip_route_output_key(net, &fl4);
b23dd4fe
DM
3076
3077 err = 0;
3078 if (IS_ERR(rt))
3079 err = PTR_ERR(rt);
1da177e4 3080 }
d889ce3b 3081
1da177e4 3082 if (err)
d889ce3b 3083 goto errout_free;
1da177e4 3084
d8d1f30b 3085 skb_dst_set(skb, &rt->dst);
1da177e4
LT
3086 if (rtm->rtm_flags & RTM_F_NOTIFY)
3087 rt->rt_flags |= RTCF_NOTIFY;
3088
4feb88e5 3089 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 3090 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
3091 if (err <= 0)
3092 goto errout_free;
1da177e4 3093
1937504d 3094 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 3095errout:
2942e900 3096 return err;
1da177e4 3097
d889ce3b 3098errout_free:
1da177e4 3099 kfree_skb(skb);
d889ce3b 3100 goto errout;
1da177e4
LT
3101}
3102
3103int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3104{
3105 struct rtable *rt;
3106 int h, s_h;
3107 int idx, s_idx;
1937504d
DL
3108 struct net *net;
3109
3b1e0a65 3110 net = sock_net(skb->sk);
1da177e4
LT
3111
3112 s_h = cb->args[0];
d8c92830
ED
3113 if (s_h < 0)
3114 s_h = 0;
1da177e4 3115 s_idx = idx = cb->args[1];
a6272665
ED
3116 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3117 if (!rt_hash_table[h].chain)
3118 continue;
1da177e4 3119 rcu_read_lock_bh();
a898def2 3120 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
3121 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3122 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 3123 continue;
e84f84f2 3124 if (rt_is_expired(rt))
29e75252 3125 continue;
d8d1f30b 3126 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 3127 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 3128 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 3129 1, NLM_F_MULTI) <= 0) {
adf30907 3130 skb_dst_drop(skb);
1da177e4
LT
3131 rcu_read_unlock_bh();
3132 goto done;
3133 }
adf30907 3134 skb_dst_drop(skb);
1da177e4
LT
3135 }
3136 rcu_read_unlock_bh();
3137 }
3138
3139done:
3140 cb->args[0] = h;
3141 cb->args[1] = idx;
3142 return skb->len;
3143}
3144
3145void ip_rt_multicast_event(struct in_device *in_dev)
3146{
76e6ebfb 3147 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
3148}
3149
3150#ifdef CONFIG_SYSCTL
81c684d1 3151static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 3152 void __user *buffer,
1da177e4
LT
3153 size_t *lenp, loff_t *ppos)
3154{
3155 if (write) {
639e104f 3156 int flush_delay;
81c684d1 3157 ctl_table ctl;
39a23e75 3158 struct net *net;
639e104f 3159
81c684d1
DL
3160 memcpy(&ctl, __ctl, sizeof(ctl));
3161 ctl.data = &flush_delay;
8d65af78 3162 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3163
81c684d1 3164 net = (struct net *)__ctl->extra1;
39a23e75 3165 rt_cache_flush(net, flush_delay);
1da177e4 3166 return 0;
e905a9ed 3167 }
1da177e4
LT
3168
3169 return -EINVAL;
3170}
3171
eeb61f71 3172static ctl_table ipv4_route_table[] = {
1da177e4 3173 {
1da177e4
LT
3174 .procname = "gc_thresh",
3175 .data = &ipv4_dst_ops.gc_thresh,
3176 .maxlen = sizeof(int),
3177 .mode = 0644,
6d9f239a 3178 .proc_handler = proc_dointvec,
1da177e4
LT
3179 },
3180 {
1da177e4
LT
3181 .procname = "max_size",
3182 .data = &ip_rt_max_size,
3183 .maxlen = sizeof(int),
3184 .mode = 0644,
6d9f239a 3185 .proc_handler = proc_dointvec,
1da177e4
LT
3186 },
3187 {
3188 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3189
1da177e4
LT
3190 .procname = "gc_min_interval",
3191 .data = &ip_rt_gc_min_interval,
3192 .maxlen = sizeof(int),
3193 .mode = 0644,
6d9f239a 3194 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3195 },
3196 {
1da177e4
LT
3197 .procname = "gc_min_interval_ms",
3198 .data = &ip_rt_gc_min_interval,
3199 .maxlen = sizeof(int),
3200 .mode = 0644,
6d9f239a 3201 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3202 },
3203 {
1da177e4
LT
3204 .procname = "gc_timeout",
3205 .data = &ip_rt_gc_timeout,
3206 .maxlen = sizeof(int),
3207 .mode = 0644,
6d9f239a 3208 .proc_handler = proc_dointvec_jiffies,
1da177e4 3209 },
9f28a2fc
ED
3210 {
3211 .procname = "gc_interval",
3212 .data = &ip_rt_gc_interval,
3213 .maxlen = sizeof(int),
3214 .mode = 0644,
3215 .proc_handler = proc_dointvec_jiffies,
3216 },
1da177e4 3217 {
1da177e4
LT
3218 .procname = "redirect_load",
3219 .data = &ip_rt_redirect_load,
3220 .maxlen = sizeof(int),
3221 .mode = 0644,
6d9f239a 3222 .proc_handler = proc_dointvec,
1da177e4
LT
3223 },
3224 {
1da177e4
LT
3225 .procname = "redirect_number",
3226 .data = &ip_rt_redirect_number,
3227 .maxlen = sizeof(int),
3228 .mode = 0644,
6d9f239a 3229 .proc_handler = proc_dointvec,
1da177e4
LT
3230 },
3231 {
1da177e4
LT
3232 .procname = "redirect_silence",
3233 .data = &ip_rt_redirect_silence,
3234 .maxlen = sizeof(int),
3235 .mode = 0644,
6d9f239a 3236 .proc_handler = proc_dointvec,
1da177e4
LT
3237 },
3238 {
1da177e4
LT
3239 .procname = "error_cost",
3240 .data = &ip_rt_error_cost,
3241 .maxlen = sizeof(int),
3242 .mode = 0644,
6d9f239a 3243 .proc_handler = proc_dointvec,
1da177e4
LT
3244 },
3245 {
1da177e4
LT
3246 .procname = "error_burst",
3247 .data = &ip_rt_error_burst,
3248 .maxlen = sizeof(int),
3249 .mode = 0644,
6d9f239a 3250 .proc_handler = proc_dointvec,
1da177e4
LT
3251 },
3252 {
1da177e4
LT
3253 .procname = "gc_elasticity",
3254 .data = &ip_rt_gc_elasticity,
3255 .maxlen = sizeof(int),
3256 .mode = 0644,
6d9f239a 3257 .proc_handler = proc_dointvec,
1da177e4
LT
3258 },
3259 {
1da177e4
LT
3260 .procname = "mtu_expires",
3261 .data = &ip_rt_mtu_expires,
3262 .maxlen = sizeof(int),
3263 .mode = 0644,
6d9f239a 3264 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3265 },
3266 {
1da177e4
LT
3267 .procname = "min_pmtu",
3268 .data = &ip_rt_min_pmtu,
3269 .maxlen = sizeof(int),
3270 .mode = 0644,
6d9f239a 3271 .proc_handler = proc_dointvec,
1da177e4
LT
3272 },
3273 {
1da177e4
LT
3274 .procname = "min_adv_mss",
3275 .data = &ip_rt_min_advmss,
3276 .maxlen = sizeof(int),
3277 .mode = 0644,
6d9f239a 3278 .proc_handler = proc_dointvec,
1da177e4 3279 },
f8572d8f 3280 { }
1da177e4 3281};
39a23e75 3282
39a23e75
DL
3283static struct ctl_table ipv4_route_flush_table[] = {
3284 {
39a23e75
DL
3285 .procname = "flush",
3286 .maxlen = sizeof(int),
3287 .mode = 0200,
6d9f239a 3288 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3289 },
f8572d8f 3290 { },
39a23e75
DL
3291};
3292
3293static __net_init int sysctl_route_net_init(struct net *net)
3294{
3295 struct ctl_table *tbl;
3296
3297 tbl = ipv4_route_flush_table;
09ad9bc7 3298 if (!net_eq(net, &init_net)) {
39a23e75
DL
3299 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3300 if (tbl == NULL)
3301 goto err_dup;
3302 }
3303 tbl[0].extra1 = net;
3304
ec8f23ce 3305 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
39a23e75
DL
3306 if (net->ipv4.route_hdr == NULL)
3307 goto err_reg;
3308 return 0;
3309
3310err_reg:
3311 if (tbl != ipv4_route_flush_table)
3312 kfree(tbl);
3313err_dup:
3314 return -ENOMEM;
3315}
3316
3317static __net_exit void sysctl_route_net_exit(struct net *net)
3318{
3319 struct ctl_table *tbl;
3320
3321 tbl = net->ipv4.route_hdr->ctl_table_arg;
3322 unregister_net_sysctl_table(net->ipv4.route_hdr);
3323 BUG_ON(tbl == ipv4_route_flush_table);
3324 kfree(tbl);
3325}
3326
3327static __net_initdata struct pernet_operations sysctl_route_ops = {
3328 .init = sysctl_route_net_init,
3329 .exit = sysctl_route_net_exit,
3330};
1da177e4
LT
3331#endif
3332
3ee94372 3333static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3334{
3ee94372
NH
3335 get_random_bytes(&net->ipv4.rt_genid,
3336 sizeof(net->ipv4.rt_genid));
436c3b66
DM
3337 get_random_bytes(&net->ipv4.dev_addr_genid,
3338 sizeof(net->ipv4.dev_addr_genid));
9f5e97e5
DL
3339 return 0;
3340}
3341
3ee94372
NH
3342static __net_initdata struct pernet_operations rt_genid_ops = {
3343 .init = rt_genid_init,
9f5e97e5
DL
3344};
3345
c3426b47
DM
3346static int __net_init ipv4_inetpeer_init(struct net *net)
3347{
3348 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3349
3350 if (!bp)
3351 return -ENOMEM;
3352 inet_peer_base_init(bp);
3353 net->ipv4.peers = bp;
3354 return 0;
3355}
3356
3357static void __net_exit ipv4_inetpeer_exit(struct net *net)
3358{
3359 struct inet_peer_base *bp = net->ipv4.peers;
3360
3361 net->ipv4.peers = NULL;
56a6b248 3362 inetpeer_invalidate_tree(bp);
c3426b47
DM
3363 kfree(bp);
3364}
3365
3366static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3367 .init = ipv4_inetpeer_init,
3368 .exit = ipv4_inetpeer_exit,
3369};
9f5e97e5 3370
c7066f70 3371#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 3372struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 3373#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4
LT
3374
3375static __initdata unsigned long rhash_entries;
3376static int __init set_rhash_entries(char *str)
3377{
413c27d8
EZ
3378 ssize_t ret;
3379
1da177e4
LT
3380 if (!str)
3381 return 0;
413c27d8
EZ
3382
3383 ret = kstrtoul(str, 0, &rhash_entries);
3384 if (ret)
3385 return 0;
3386
1da177e4
LT
3387 return 1;
3388}
3389__setup("rhash_entries=", set_rhash_entries);
3390
3391int __init ip_rt_init(void)
3392{
424c4b70 3393 int rc = 0;
1da177e4 3394
c7066f70 3395#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 3396 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3397 if (!ip_rt_acct)
3398 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3399#endif
3400
e5d679f3
AD
3401 ipv4_dst_ops.kmem_cachep =
3402 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3403 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3404
14e50e57
DM
3405 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3406
fc66f95c
ED
3407 if (dst_entries_init(&ipv4_dst_ops) < 0)
3408 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3409
3410 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3411 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3412
424c4b70
ED
3413 rt_hash_table = (struct rt_hash_bucket *)
3414 alloc_large_system_hash("IP route cache",
3415 sizeof(struct rt_hash_bucket),
3416 rhash_entries,
4481374c 3417 (totalram_pages >= 128 * 1024) ?
18955cfc 3418 15 : 17,
8d1502de 3419 0,
424c4b70
ED
3420 &rt_hash_log,
3421 &rt_hash_mask,
31fe62b9 3422 0,
c9503e0f 3423 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3424 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3425 rt_hash_lock_init();
1da177e4
LT
3426
3427 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3428 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3429
1da177e4
LT
3430 devinet_init();
3431 ip_fib_init();
3432
9f28a2fc
ED
3433 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3434 expires_ljiffies = jiffies;
3435 schedule_delayed_work(&expires_work,
3436 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3437
73b38711 3438 if (ip_rt_proc_init())
058bd4d2 3439 pr_err("Unable to create route proc files\n");
1da177e4
LT
3440#ifdef CONFIG_XFRM
3441 xfrm_init();
a33bc5c1 3442 xfrm4_init(ip_rt_max_size);
1da177e4 3443#endif
c7ac8679 3444 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
63f3444f 3445
39a23e75
DL
3446#ifdef CONFIG_SYSCTL
3447 register_pernet_subsys(&sysctl_route_ops);
3448#endif
3ee94372 3449 register_pernet_subsys(&rt_genid_ops);
c3426b47 3450 register_pernet_subsys(&ipv4_inetpeer_ops);
1da177e4
LT
3451 return rc;
3452}
3453
a1bc6eb4 3454#ifdef CONFIG_SYSCTL
eeb61f71
AV
3455/*
3456 * We really need to sanitize the damn ipv4 init order, then all
3457 * this nonsense will go away.
3458 */
3459void __init ip_static_sysctl_init(void)
3460{
4e5ca785 3461 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
eeb61f71 3462}
a1bc6eb4 3463#endif