]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv4/fib_semantics.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / fib_semantics.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IPv4 Forwarding Information Base: semantics.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <asm/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
21 #include <linux/mm.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
26 #include <linux/in.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
35
36 #include <net/arp.h>
37 #include <net/ip.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
40 #include <net/tcp.h>
41 #include <net/sock.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
45
46 #include "fib_lookup.h"
47
48 static DEFINE_SPINLOCK(fib_info_lock);
49 static struct hlist_head *fib_info_hash;
50 static struct hlist_head *fib_info_laddrhash;
51 static unsigned int fib_info_hash_size;
52 static unsigned int fib_info_cnt;
53
54 #define DEVINDEX_HASHBITS 8
55 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
56 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
57
58 #ifdef CONFIG_IP_ROUTE_MULTIPATH
59
60 static DEFINE_SPINLOCK(fib_multipath_lock);
61
62 #define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
66 nh++, nhsel++)
67
68 #define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
73
74 #else /* CONFIG_IP_ROUTE_MULTIPATH */
75
76 /* Hope, that gcc will optimize it to get rid of dummy loop */
77
78 #define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
81
82 #define change_nexthops(fi) { \
83 int nhsel; \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
86
87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
88
89 #define endfor_nexthops(fi) }
90
91
92 const struct fib_prop fib_props[RTN_MAX + 1] = {
93 [RTN_UNSPEC] = {
94 .error = 0,
95 .scope = RT_SCOPE_NOWHERE,
96 },
97 [RTN_UNICAST] = {
98 .error = 0,
99 .scope = RT_SCOPE_UNIVERSE,
100 },
101 [RTN_LOCAL] = {
102 .error = 0,
103 .scope = RT_SCOPE_HOST,
104 },
105 [RTN_BROADCAST] = {
106 .error = 0,
107 .scope = RT_SCOPE_LINK,
108 },
109 [RTN_ANYCAST] = {
110 .error = 0,
111 .scope = RT_SCOPE_LINK,
112 },
113 [RTN_MULTICAST] = {
114 .error = 0,
115 .scope = RT_SCOPE_UNIVERSE,
116 },
117 [RTN_BLACKHOLE] = {
118 .error = -EINVAL,
119 .scope = RT_SCOPE_UNIVERSE,
120 },
121 [RTN_UNREACHABLE] = {
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
124 },
125 [RTN_PROHIBIT] = {
126 .error = -EACCES,
127 .scope = RT_SCOPE_UNIVERSE,
128 },
129 [RTN_THROW] = {
130 .error = -EAGAIN,
131 .scope = RT_SCOPE_UNIVERSE,
132 },
133 [RTN_NAT] = {
134 .error = -EINVAL,
135 .scope = RT_SCOPE_NOWHERE,
136 },
137 [RTN_XRESOLVE] = {
138 .error = -EINVAL,
139 .scope = RT_SCOPE_NOWHERE,
140 },
141 };
142
143 static void rt_fibinfo_free(struct rtable __rcu **rtp)
144 {
145 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
146
147 if (!rt)
148 return;
149
150 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151 * because we waited an RCU grace period before calling
152 * free_fib_info_rcu()
153 */
154
155 dst_free(&rt->dst);
156 }
157
158 static void free_nh_exceptions(struct fib_nh *nh)
159 {
160 struct fnhe_hash_bucket *hash;
161 int i;
162
163 hash = rcu_dereference_protected(nh->nh_exceptions, 1);
164 if (!hash)
165 return;
166 for (i = 0; i < FNHE_HASH_SIZE; i++) {
167 struct fib_nh_exception *fnhe;
168
169 fnhe = rcu_dereference_protected(hash[i].chain, 1);
170 while (fnhe) {
171 struct fib_nh_exception *next;
172
173 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
174
175 rt_fibinfo_free(&fnhe->fnhe_rth_input);
176 rt_fibinfo_free(&fnhe->fnhe_rth_output);
177
178 kfree(fnhe);
179
180 fnhe = next;
181 }
182 }
183 kfree(hash);
184 }
185
186 static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
187 {
188 int cpu;
189
190 if (!rtp)
191 return;
192
193 for_each_possible_cpu(cpu) {
194 struct rtable *rt;
195
196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
197 if (rt)
198 dst_free(&rt->dst);
199 }
200 free_percpu(rtp);
201 }
202
203 /* Release a nexthop info record */
204 static void free_fib_info_rcu(struct rcu_head *head)
205 {
206 struct fib_info *fi = container_of(head, struct fib_info, rcu);
207
208 change_nexthops(fi) {
209 if (nexthop_nh->nh_dev)
210 dev_put(nexthop_nh->nh_dev);
211 free_nh_exceptions(nexthop_nh);
212 rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
213 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
214 } endfor_nexthops(fi);
215
216 if (fi->fib_metrics != (u32 *) dst_default_metrics)
217 kfree(fi->fib_metrics);
218 kfree(fi);
219 }
220
221 void free_fib_info(struct fib_info *fi)
222 {
223 if (fi->fib_dead == 0) {
224 pr_warn("Freeing alive fib_info %p\n", fi);
225 return;
226 }
227 fib_info_cnt--;
228 #ifdef CONFIG_IP_ROUTE_CLASSID
229 change_nexthops(fi) {
230 if (nexthop_nh->nh_tclassid)
231 fi->fib_net->ipv4.fib_num_tclassid_users--;
232 } endfor_nexthops(fi);
233 #endif
234 call_rcu(&fi->rcu, free_fib_info_rcu);
235 }
236
237 void fib_release_info(struct fib_info *fi)
238 {
239 spin_lock_bh(&fib_info_lock);
240 if (fi && --fi->fib_treeref == 0) {
241 hlist_del(&fi->fib_hash);
242 if (fi->fib_prefsrc)
243 hlist_del(&fi->fib_lhash);
244 change_nexthops(fi) {
245 if (!nexthop_nh->nh_dev)
246 continue;
247 hlist_del(&nexthop_nh->nh_hash);
248 } endfor_nexthops(fi)
249 fi->fib_dead = 1;
250 fib_info_put(fi);
251 }
252 spin_unlock_bh(&fib_info_lock);
253 }
254
255 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
256 {
257 const struct fib_nh *onh = ofi->fib_nh;
258
259 for_nexthops(fi) {
260 if (nh->nh_oif != onh->nh_oif ||
261 nh->nh_gw != onh->nh_gw ||
262 nh->nh_scope != onh->nh_scope ||
263 #ifdef CONFIG_IP_ROUTE_MULTIPATH
264 nh->nh_weight != onh->nh_weight ||
265 #endif
266 #ifdef CONFIG_IP_ROUTE_CLASSID
267 nh->nh_tclassid != onh->nh_tclassid ||
268 #endif
269 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
270 return -1;
271 onh++;
272 } endfor_nexthops(fi);
273 return 0;
274 }
275
276 static inline unsigned int fib_devindex_hashfn(unsigned int val)
277 {
278 unsigned int mask = DEVINDEX_HASHSIZE - 1;
279
280 return (val ^
281 (val >> DEVINDEX_HASHBITS) ^
282 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
283 }
284
285 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
286 {
287 unsigned int mask = (fib_info_hash_size - 1);
288 unsigned int val = fi->fib_nhs;
289
290 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
291 val ^= (__force u32)fi->fib_prefsrc;
292 val ^= fi->fib_priority;
293 for_nexthops(fi) {
294 val ^= fib_devindex_hashfn(nh->nh_oif);
295 } endfor_nexthops(fi)
296
297 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
298 }
299
300 static struct fib_info *fib_find_info(const struct fib_info *nfi)
301 {
302 struct hlist_head *head;
303 struct fib_info *fi;
304 unsigned int hash;
305
306 hash = fib_info_hashfn(nfi);
307 head = &fib_info_hash[hash];
308
309 hlist_for_each_entry(fi, head, fib_hash) {
310 if (!net_eq(fi->fib_net, nfi->fib_net))
311 continue;
312 if (fi->fib_nhs != nfi->fib_nhs)
313 continue;
314 if (nfi->fib_protocol == fi->fib_protocol &&
315 nfi->fib_scope == fi->fib_scope &&
316 nfi->fib_prefsrc == fi->fib_prefsrc &&
317 nfi->fib_priority == fi->fib_priority &&
318 nfi->fib_type == fi->fib_type &&
319 memcmp(nfi->fib_metrics, fi->fib_metrics,
320 sizeof(u32) * RTAX_MAX) == 0 &&
321 !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
322 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
323 return fi;
324 }
325
326 return NULL;
327 }
328
329 /* Check, that the gateway is already configured.
330 * Used only by redirect accept routine.
331 */
332 int ip_fib_check_default(__be32 gw, struct net_device *dev)
333 {
334 struct hlist_head *head;
335 struct fib_nh *nh;
336 unsigned int hash;
337
338 spin_lock(&fib_info_lock);
339
340 hash = fib_devindex_hashfn(dev->ifindex);
341 head = &fib_info_devhash[hash];
342 hlist_for_each_entry(nh, head, nh_hash) {
343 if (nh->nh_dev == dev &&
344 nh->nh_gw == gw &&
345 !(nh->nh_flags & RTNH_F_DEAD)) {
346 spin_unlock(&fib_info_lock);
347 return 0;
348 }
349 }
350
351 spin_unlock(&fib_info_lock);
352
353 return -1;
354 }
355
356 static inline size_t fib_nlmsg_size(struct fib_info *fi)
357 {
358 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
359 + nla_total_size(4) /* RTA_TABLE */
360 + nla_total_size(4) /* RTA_DST */
361 + nla_total_size(4) /* RTA_PRIORITY */
362 + nla_total_size(4) /* RTA_PREFSRC */
363 + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
364
365 /* space for nested metrics */
366 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
367
368 if (fi->fib_nhs) {
369 /* Also handles the special case fib_nhs == 1 */
370
371 /* each nexthop is packed in an attribute */
372 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
373
374 /* may contain flow and gateway attribute */
375 nhsize += 2 * nla_total_size(4);
376
377 /* all nexthops are packed in a nested attribute */
378 payload += nla_total_size(fi->fib_nhs * nhsize);
379 }
380
381 return payload;
382 }
383
384 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
385 int dst_len, u32 tb_id, const struct nl_info *info,
386 unsigned int nlm_flags)
387 {
388 struct sk_buff *skb;
389 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
390 int err = -ENOBUFS;
391
392 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
393 if (!skb)
394 goto errout;
395
396 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
397 fa->fa_type, key, dst_len,
398 fa->fa_tos, fa->fa_info, nlm_flags);
399 if (err < 0) {
400 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
401 WARN_ON(err == -EMSGSIZE);
402 kfree_skb(skb);
403 goto errout;
404 }
405 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
406 info->nlh, GFP_KERNEL);
407 return;
408 errout:
409 if (err < 0)
410 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
411 }
412
413 static int fib_detect_death(struct fib_info *fi, int order,
414 struct fib_info **last_resort, int *last_idx,
415 int dflt)
416 {
417 struct neighbour *n;
418 int state = NUD_NONE;
419
420 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
421 if (n) {
422 state = n->nud_state;
423 neigh_release(n);
424 }
425 if (state == NUD_REACHABLE)
426 return 0;
427 if ((state & NUD_VALID) && order != dflt)
428 return 0;
429 if ((state & NUD_VALID) ||
430 (*last_idx < 0 && order > dflt)) {
431 *last_resort = fi;
432 *last_idx = order;
433 }
434 return 1;
435 }
436
437 #ifdef CONFIG_IP_ROUTE_MULTIPATH
438
439 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
440 {
441 int nhs = 0;
442
443 while (rtnh_ok(rtnh, remaining)) {
444 nhs++;
445 rtnh = rtnh_next(rtnh, &remaining);
446 }
447
448 /* leftover implies invalid nexthop configuration, discard it */
449 return remaining > 0 ? 0 : nhs;
450 }
451
452 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
453 int remaining, struct fib_config *cfg)
454 {
455 change_nexthops(fi) {
456 int attrlen;
457
458 if (!rtnh_ok(rtnh, remaining))
459 return -EINVAL;
460
461 nexthop_nh->nh_flags =
462 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
463 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
464 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
465
466 attrlen = rtnh_attrlen(rtnh);
467 if (attrlen > 0) {
468 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
469
470 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
471 nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
472 #ifdef CONFIG_IP_ROUTE_CLASSID
473 nla = nla_find(attrs, attrlen, RTA_FLOW);
474 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
475 if (nexthop_nh->nh_tclassid)
476 fi->fib_net->ipv4.fib_num_tclassid_users++;
477 #endif
478 }
479
480 rtnh = rtnh_next(rtnh, &remaining);
481 } endfor_nexthops(fi);
482
483 return 0;
484 }
485
486 #endif
487
488 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
489 {
490 #ifdef CONFIG_IP_ROUTE_MULTIPATH
491 struct rtnexthop *rtnh;
492 int remaining;
493 #endif
494
495 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
496 return 1;
497
498 if (cfg->fc_oif || cfg->fc_gw) {
499 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
500 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
501 return 0;
502 return 1;
503 }
504
505 #ifdef CONFIG_IP_ROUTE_MULTIPATH
506 if (!cfg->fc_mp)
507 return 0;
508
509 rtnh = cfg->fc_mp;
510 remaining = cfg->fc_mp_len;
511
512 for_nexthops(fi) {
513 int attrlen;
514
515 if (!rtnh_ok(rtnh, remaining))
516 return -EINVAL;
517
518 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
519 return 1;
520
521 attrlen = rtnh_attrlen(rtnh);
522 if (attrlen > 0) {
523 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
524
525 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
526 if (nla && nla_get_in_addr(nla) != nh->nh_gw)
527 return 1;
528 #ifdef CONFIG_IP_ROUTE_CLASSID
529 nla = nla_find(attrs, attrlen, RTA_FLOW);
530 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
531 return 1;
532 #endif
533 }
534
535 rtnh = rtnh_next(rtnh, &remaining);
536 } endfor_nexthops(fi);
537 #endif
538 return 0;
539 }
540
541
542 /*
543 * Picture
544 * -------
545 *
546 * Semantics of nexthop is very messy by historical reasons.
547 * We have to take into account, that:
548 * a) gateway can be actually local interface address,
549 * so that gatewayed route is direct.
550 * b) gateway must be on-link address, possibly
551 * described not by an ifaddr, but also by a direct route.
552 * c) If both gateway and interface are specified, they should not
553 * contradict.
554 * d) If we use tunnel routes, gateway could be not on-link.
555 *
556 * Attempt to reconcile all of these (alas, self-contradictory) conditions
557 * results in pretty ugly and hairy code with obscure logic.
558 *
559 * I chose to generalized it instead, so that the size
560 * of code does not increase practically, but it becomes
561 * much more general.
562 * Every prefix is assigned a "scope" value: "host" is local address,
563 * "link" is direct route,
564 * [ ... "site" ... "interior" ... ]
565 * and "universe" is true gateway route with global meaning.
566 *
567 * Every prefix refers to a set of "nexthop"s (gw, oif),
568 * where gw must have narrower scope. This recursion stops
569 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
570 * which means that gw is forced to be on link.
571 *
572 * Code is still hairy, but now it is apparently logically
573 * consistent and very flexible. F.e. as by-product it allows
574 * to co-exists in peace independent exterior and interior
575 * routing processes.
576 *
577 * Normally it looks as following.
578 *
579 * {universe prefix} -> (gw, oif) [scope link]
580 * |
581 * |-> {link prefix} -> (gw, oif) [scope local]
582 * |
583 * |-> {local prefix} (terminal node)
584 */
585 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
586 struct fib_nh *nh)
587 {
588 int err;
589 struct net *net;
590 struct net_device *dev;
591
592 net = cfg->fc_nlinfo.nl_net;
593 if (nh->nh_gw) {
594 struct fib_result res;
595
596 if (nh->nh_flags & RTNH_F_ONLINK) {
597
598 if (cfg->fc_scope >= RT_SCOPE_LINK)
599 return -EINVAL;
600 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
601 return -EINVAL;
602 dev = __dev_get_by_index(net, nh->nh_oif);
603 if (!dev)
604 return -ENODEV;
605 if (!(dev->flags & IFF_UP))
606 return -ENETDOWN;
607 if (!netif_carrier_ok(dev))
608 nh->nh_flags |= RTNH_F_LINKDOWN;
609 nh->nh_dev = dev;
610 dev_hold(dev);
611 nh->nh_scope = RT_SCOPE_LINK;
612 return 0;
613 }
614 rcu_read_lock();
615 {
616 struct flowi4 fl4 = {
617 .daddr = nh->nh_gw,
618 .flowi4_scope = cfg->fc_scope + 1,
619 .flowi4_oif = nh->nh_oif,
620 .flowi4_iif = LOOPBACK_IFINDEX,
621 };
622
623 /* It is not necessary, but requires a bit of thinking */
624 if (fl4.flowi4_scope < RT_SCOPE_LINK)
625 fl4.flowi4_scope = RT_SCOPE_LINK;
626 err = fib_lookup(net, &fl4, &res,
627 FIB_LOOKUP_IGNORE_LINKSTATE);
628 if (err) {
629 rcu_read_unlock();
630 return err;
631 }
632 }
633 err = -EINVAL;
634 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
635 goto out;
636 nh->nh_scope = res.scope;
637 nh->nh_oif = FIB_RES_OIF(res);
638 nh->nh_dev = dev = FIB_RES_DEV(res);
639 if (!dev)
640 goto out;
641 dev_hold(dev);
642 if (!netif_carrier_ok(dev))
643 nh->nh_flags |= RTNH_F_LINKDOWN;
644 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
645 } else {
646 struct in_device *in_dev;
647
648 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
649 return -EINVAL;
650
651 rcu_read_lock();
652 err = -ENODEV;
653 in_dev = inetdev_by_index(net, nh->nh_oif);
654 if (!in_dev)
655 goto out;
656 err = -ENETDOWN;
657 if (!(in_dev->dev->flags & IFF_UP))
658 goto out;
659 nh->nh_dev = in_dev->dev;
660 dev_hold(nh->nh_dev);
661 nh->nh_scope = RT_SCOPE_HOST;
662 if (!netif_carrier_ok(nh->nh_dev))
663 nh->nh_flags |= RTNH_F_LINKDOWN;
664 err = 0;
665 }
666 out:
667 rcu_read_unlock();
668 return err;
669 }
670
671 static inline unsigned int fib_laddr_hashfn(__be32 val)
672 {
673 unsigned int mask = (fib_info_hash_size - 1);
674
675 return ((__force u32)val ^
676 ((__force u32)val >> 7) ^
677 ((__force u32)val >> 14)) & mask;
678 }
679
680 static struct hlist_head *fib_info_hash_alloc(int bytes)
681 {
682 if (bytes <= PAGE_SIZE)
683 return kzalloc(bytes, GFP_KERNEL);
684 else
685 return (struct hlist_head *)
686 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
687 get_order(bytes));
688 }
689
690 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
691 {
692 if (!hash)
693 return;
694
695 if (bytes <= PAGE_SIZE)
696 kfree(hash);
697 else
698 free_pages((unsigned long) hash, get_order(bytes));
699 }
700
701 static void fib_info_hash_move(struct hlist_head *new_info_hash,
702 struct hlist_head *new_laddrhash,
703 unsigned int new_size)
704 {
705 struct hlist_head *old_info_hash, *old_laddrhash;
706 unsigned int old_size = fib_info_hash_size;
707 unsigned int i, bytes;
708
709 spin_lock_bh(&fib_info_lock);
710 old_info_hash = fib_info_hash;
711 old_laddrhash = fib_info_laddrhash;
712 fib_info_hash_size = new_size;
713
714 for (i = 0; i < old_size; i++) {
715 struct hlist_head *head = &fib_info_hash[i];
716 struct hlist_node *n;
717 struct fib_info *fi;
718
719 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
720 struct hlist_head *dest;
721 unsigned int new_hash;
722
723 new_hash = fib_info_hashfn(fi);
724 dest = &new_info_hash[new_hash];
725 hlist_add_head(&fi->fib_hash, dest);
726 }
727 }
728 fib_info_hash = new_info_hash;
729
730 for (i = 0; i < old_size; i++) {
731 struct hlist_head *lhead = &fib_info_laddrhash[i];
732 struct hlist_node *n;
733 struct fib_info *fi;
734
735 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
736 struct hlist_head *ldest;
737 unsigned int new_hash;
738
739 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
740 ldest = &new_laddrhash[new_hash];
741 hlist_add_head(&fi->fib_lhash, ldest);
742 }
743 }
744 fib_info_laddrhash = new_laddrhash;
745
746 spin_unlock_bh(&fib_info_lock);
747
748 bytes = old_size * sizeof(struct hlist_head *);
749 fib_info_hash_free(old_info_hash, bytes);
750 fib_info_hash_free(old_laddrhash, bytes);
751 }
752
753 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
754 {
755 nh->nh_saddr = inet_select_addr(nh->nh_dev,
756 nh->nh_gw,
757 nh->nh_parent->fib_scope);
758 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
759
760 return nh->nh_saddr;
761 }
762
763 struct fib_info *fib_create_info(struct fib_config *cfg)
764 {
765 int err;
766 struct fib_info *fi = NULL;
767 struct fib_info *ofi;
768 int nhs = 1;
769 struct net *net = cfg->fc_nlinfo.nl_net;
770
771 if (cfg->fc_type > RTN_MAX)
772 goto err_inval;
773
774 /* Fast check to catch the most weird cases */
775 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
776 goto err_inval;
777
778 #ifdef CONFIG_IP_ROUTE_MULTIPATH
779 if (cfg->fc_mp) {
780 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
781 if (nhs == 0)
782 goto err_inval;
783 }
784 #endif
785
786 err = -ENOBUFS;
787 if (fib_info_cnt >= fib_info_hash_size) {
788 unsigned int new_size = fib_info_hash_size << 1;
789 struct hlist_head *new_info_hash;
790 struct hlist_head *new_laddrhash;
791 unsigned int bytes;
792
793 if (!new_size)
794 new_size = 16;
795 bytes = new_size * sizeof(struct hlist_head *);
796 new_info_hash = fib_info_hash_alloc(bytes);
797 new_laddrhash = fib_info_hash_alloc(bytes);
798 if (!new_info_hash || !new_laddrhash) {
799 fib_info_hash_free(new_info_hash, bytes);
800 fib_info_hash_free(new_laddrhash, bytes);
801 } else
802 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
803
804 if (!fib_info_hash_size)
805 goto failure;
806 }
807
808 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
809 if (!fi)
810 goto failure;
811 fib_info_cnt++;
812 if (cfg->fc_mx) {
813 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
814 if (!fi->fib_metrics)
815 goto failure;
816 } else
817 fi->fib_metrics = (u32 *) dst_default_metrics;
818
819 fi->fib_net = net;
820 fi->fib_protocol = cfg->fc_protocol;
821 fi->fib_scope = cfg->fc_scope;
822 fi->fib_flags = cfg->fc_flags;
823 fi->fib_priority = cfg->fc_priority;
824 fi->fib_prefsrc = cfg->fc_prefsrc;
825 fi->fib_type = cfg->fc_type;
826
827 fi->fib_nhs = nhs;
828 change_nexthops(fi) {
829 nexthop_nh->nh_parent = fi;
830 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
831 if (!nexthop_nh->nh_pcpu_rth_output)
832 goto failure;
833 } endfor_nexthops(fi)
834
835 if (cfg->fc_mx) {
836 struct nlattr *nla;
837 int remaining;
838
839 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
840 int type = nla_type(nla);
841
842 if (type) {
843 u32 val;
844
845 if (type > RTAX_MAX)
846 goto err_inval;
847 if (type == RTAX_CC_ALGO) {
848 char tmp[TCP_CA_NAME_MAX];
849
850 nla_strlcpy(tmp, nla, sizeof(tmp));
851 val = tcp_ca_get_key_by_name(tmp);
852 if (val == TCP_CA_UNSPEC)
853 goto err_inval;
854 } else {
855 val = nla_get_u32(nla);
856 }
857 if (type == RTAX_ADVMSS && val > 65535 - 40)
858 val = 65535 - 40;
859 if (type == RTAX_MTU && val > 65535 - 15)
860 val = 65535 - 15;
861 fi->fib_metrics[type - 1] = val;
862 }
863 }
864 }
865
866 if (cfg->fc_mp) {
867 #ifdef CONFIG_IP_ROUTE_MULTIPATH
868 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
869 if (err != 0)
870 goto failure;
871 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
872 goto err_inval;
873 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
874 goto err_inval;
875 #ifdef CONFIG_IP_ROUTE_CLASSID
876 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
877 goto err_inval;
878 #endif
879 #else
880 goto err_inval;
881 #endif
882 } else {
883 struct fib_nh *nh = fi->fib_nh;
884
885 nh->nh_oif = cfg->fc_oif;
886 nh->nh_gw = cfg->fc_gw;
887 nh->nh_flags = cfg->fc_flags;
888 #ifdef CONFIG_IP_ROUTE_CLASSID
889 nh->nh_tclassid = cfg->fc_flow;
890 if (nh->nh_tclassid)
891 fi->fib_net->ipv4.fib_num_tclassid_users++;
892 #endif
893 #ifdef CONFIG_IP_ROUTE_MULTIPATH
894 nh->nh_weight = 1;
895 #endif
896 }
897
898 if (fib_props[cfg->fc_type].error) {
899 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
900 goto err_inval;
901 goto link_it;
902 } else {
903 switch (cfg->fc_type) {
904 case RTN_UNICAST:
905 case RTN_LOCAL:
906 case RTN_BROADCAST:
907 case RTN_ANYCAST:
908 case RTN_MULTICAST:
909 break;
910 default:
911 goto err_inval;
912 }
913 }
914
915 if (cfg->fc_scope > RT_SCOPE_HOST)
916 goto err_inval;
917
918 if (cfg->fc_scope == RT_SCOPE_HOST) {
919 struct fib_nh *nh = fi->fib_nh;
920
921 /* Local address is added. */
922 if (nhs != 1 || nh->nh_gw)
923 goto err_inval;
924 nh->nh_scope = RT_SCOPE_NOWHERE;
925 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
926 err = -ENODEV;
927 if (!nh->nh_dev)
928 goto failure;
929 } else {
930 int linkdown = 0;
931
932 change_nexthops(fi) {
933 err = fib_check_nh(cfg, fi, nexthop_nh);
934 if (err != 0)
935 goto failure;
936 if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
937 linkdown++;
938 } endfor_nexthops(fi)
939 if (linkdown == fi->fib_nhs)
940 fi->fib_flags |= RTNH_F_LINKDOWN;
941 }
942
943 if (fi->fib_prefsrc) {
944 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
945 fi->fib_prefsrc != cfg->fc_dst)
946 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
947 goto err_inval;
948 }
949
950 change_nexthops(fi) {
951 fib_info_update_nh_saddr(net, nexthop_nh);
952 } endfor_nexthops(fi)
953
954 link_it:
955 ofi = fib_find_info(fi);
956 if (ofi) {
957 fi->fib_dead = 1;
958 free_fib_info(fi);
959 ofi->fib_treeref++;
960 return ofi;
961 }
962
963 fi->fib_treeref++;
964 atomic_inc(&fi->fib_clntref);
965 spin_lock_bh(&fib_info_lock);
966 hlist_add_head(&fi->fib_hash,
967 &fib_info_hash[fib_info_hashfn(fi)]);
968 if (fi->fib_prefsrc) {
969 struct hlist_head *head;
970
971 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
972 hlist_add_head(&fi->fib_lhash, head);
973 }
974 change_nexthops(fi) {
975 struct hlist_head *head;
976 unsigned int hash;
977
978 if (!nexthop_nh->nh_dev)
979 continue;
980 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
981 head = &fib_info_devhash[hash];
982 hlist_add_head(&nexthop_nh->nh_hash, head);
983 } endfor_nexthops(fi)
984 spin_unlock_bh(&fib_info_lock);
985 return fi;
986
987 err_inval:
988 err = -EINVAL;
989
990 failure:
991 if (fi) {
992 fi->fib_dead = 1;
993 free_fib_info(fi);
994 }
995
996 return ERR_PTR(err);
997 }
998
999 int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1000 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
1001 struct fib_info *fi, unsigned int flags)
1002 {
1003 struct nlmsghdr *nlh;
1004 struct rtmsg *rtm;
1005
1006 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1007 if (!nlh)
1008 return -EMSGSIZE;
1009
1010 rtm = nlmsg_data(nlh);
1011 rtm->rtm_family = AF_INET;
1012 rtm->rtm_dst_len = dst_len;
1013 rtm->rtm_src_len = 0;
1014 rtm->rtm_tos = tos;
1015 if (tb_id < 256)
1016 rtm->rtm_table = tb_id;
1017 else
1018 rtm->rtm_table = RT_TABLE_COMPAT;
1019 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1020 goto nla_put_failure;
1021 rtm->rtm_type = type;
1022 rtm->rtm_flags = fi->fib_flags;
1023 rtm->rtm_scope = fi->fib_scope;
1024 rtm->rtm_protocol = fi->fib_protocol;
1025
1026 if (rtm->rtm_dst_len &&
1027 nla_put_in_addr(skb, RTA_DST, dst))
1028 goto nla_put_failure;
1029 if (fi->fib_priority &&
1030 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1031 goto nla_put_failure;
1032 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1033 goto nla_put_failure;
1034
1035 if (fi->fib_prefsrc &&
1036 nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
1037 goto nla_put_failure;
1038 if (fi->fib_nhs == 1) {
1039 struct in_device *in_dev;
1040
1041 if (fi->fib_nh->nh_gw &&
1042 nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1043 goto nla_put_failure;
1044 if (fi->fib_nh->nh_oif &&
1045 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1046 goto nla_put_failure;
1047 if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
1048 in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev);
1049 if (in_dev &&
1050 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1051 rtm->rtm_flags |= RTNH_F_DEAD;
1052 }
1053 #ifdef CONFIG_IP_ROUTE_CLASSID
1054 if (fi->fib_nh[0].nh_tclassid &&
1055 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1056 goto nla_put_failure;
1057 #endif
1058 }
1059 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1060 if (fi->fib_nhs > 1) {
1061 struct rtnexthop *rtnh;
1062 struct nlattr *mp;
1063
1064 mp = nla_nest_start(skb, RTA_MULTIPATH);
1065 if (!mp)
1066 goto nla_put_failure;
1067
1068 for_nexthops(fi) {
1069 struct in_device *in_dev;
1070
1071 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1072 if (!rtnh)
1073 goto nla_put_failure;
1074
1075 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1076 if (nh->nh_flags & RTNH_F_LINKDOWN) {
1077 in_dev = __in_dev_get_rtnl(nh->nh_dev);
1078 if (in_dev &&
1079 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1080 rtnh->rtnh_flags |= RTNH_F_DEAD;
1081 }
1082 rtnh->rtnh_hops = nh->nh_weight - 1;
1083 rtnh->rtnh_ifindex = nh->nh_oif;
1084
1085 if (nh->nh_gw &&
1086 nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw))
1087 goto nla_put_failure;
1088 #ifdef CONFIG_IP_ROUTE_CLASSID
1089 if (nh->nh_tclassid &&
1090 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1091 goto nla_put_failure;
1092 #endif
1093 /* length of rtnetlink header + attributes */
1094 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1095 } endfor_nexthops(fi);
1096
1097 nla_nest_end(skb, mp);
1098 }
1099 #endif
1100 nlmsg_end(skb, nlh);
1101 return 0;
1102
1103 nla_put_failure:
1104 nlmsg_cancel(skb, nlh);
1105 return -EMSGSIZE;
1106 }
1107
1108 /*
1109 * Update FIB if:
1110 * - local address disappeared -> we must delete all the entries
1111 * referring to it.
1112 * - device went down -> we must shutdown all nexthops going via it.
1113 */
1114 int fib_sync_down_addr(struct net *net, __be32 local)
1115 {
1116 int ret = 0;
1117 unsigned int hash = fib_laddr_hashfn(local);
1118 struct hlist_head *head = &fib_info_laddrhash[hash];
1119 struct fib_info *fi;
1120
1121 if (!fib_info_laddrhash || local == 0)
1122 return 0;
1123
1124 hlist_for_each_entry(fi, head, fib_lhash) {
1125 if (!net_eq(fi->fib_net, net))
1126 continue;
1127 if (fi->fib_prefsrc == local) {
1128 fi->fib_flags |= RTNH_F_DEAD;
1129 ret++;
1130 }
1131 }
1132 return ret;
1133 }
1134
1135 int fib_sync_down_dev(struct net_device *dev, unsigned long event)
1136 {
1137 int ret = 0;
1138 int scope = RT_SCOPE_NOWHERE;
1139 struct fib_info *prev_fi = NULL;
1140 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1141 struct hlist_head *head = &fib_info_devhash[hash];
1142 struct fib_nh *nh;
1143
1144 if (event == NETDEV_UNREGISTER ||
1145 event == NETDEV_DOWN)
1146 scope = -1;
1147
1148 hlist_for_each_entry(nh, head, nh_hash) {
1149 struct fib_info *fi = nh->nh_parent;
1150 int dead;
1151
1152 BUG_ON(!fi->fib_nhs);
1153 if (nh->nh_dev != dev || fi == prev_fi)
1154 continue;
1155 prev_fi = fi;
1156 dead = 0;
1157 change_nexthops(fi) {
1158 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1159 dead++;
1160 else if (nexthop_nh->nh_dev == dev &&
1161 nexthop_nh->nh_scope != scope) {
1162 switch (event) {
1163 case NETDEV_DOWN:
1164 case NETDEV_UNREGISTER:
1165 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1166 /* fall through */
1167 case NETDEV_CHANGE:
1168 nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
1169 break;
1170 }
1171 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1172 spin_lock_bh(&fib_multipath_lock);
1173 fi->fib_power -= nexthop_nh->nh_power;
1174 nexthop_nh->nh_power = 0;
1175 spin_unlock_bh(&fib_multipath_lock);
1176 #endif
1177 dead++;
1178 }
1179 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1180 if (event == NETDEV_UNREGISTER &&
1181 nexthop_nh->nh_dev == dev) {
1182 dead = fi->fib_nhs;
1183 break;
1184 }
1185 #endif
1186 } endfor_nexthops(fi)
1187 if (dead == fi->fib_nhs) {
1188 switch (event) {
1189 case NETDEV_DOWN:
1190 case NETDEV_UNREGISTER:
1191 fi->fib_flags |= RTNH_F_DEAD;
1192 /* fall through */
1193 case NETDEV_CHANGE:
1194 fi->fib_flags |= RTNH_F_LINKDOWN;
1195 break;
1196 }
1197 ret++;
1198 }
1199 }
1200
1201 return ret;
1202 }
1203
1204 /* Must be invoked inside of an RCU protected region. */
1205 void fib_select_default(struct fib_result *res)
1206 {
1207 struct fib_info *fi = NULL, *last_resort = NULL;
1208 struct hlist_head *fa_head = res->fa_head;
1209 struct fib_table *tb = res->table;
1210 int order = -1, last_idx = -1;
1211 struct fib_alias *fa;
1212
1213 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1214 struct fib_info *next_fi = fa->fa_info;
1215
1216 if (next_fi->fib_scope != res->scope ||
1217 fa->fa_type != RTN_UNICAST)
1218 continue;
1219
1220 if (next_fi->fib_priority > res->fi->fib_priority)
1221 break;
1222 if (!next_fi->fib_nh[0].nh_gw ||
1223 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1224 continue;
1225
1226 fib_alias_accessed(fa);
1227
1228 if (!fi) {
1229 if (next_fi != res->fi)
1230 break;
1231 } else if (!fib_detect_death(fi, order, &last_resort,
1232 &last_idx, tb->tb_default)) {
1233 fib_result_assign(res, fi);
1234 tb->tb_default = order;
1235 goto out;
1236 }
1237 fi = next_fi;
1238 order++;
1239 }
1240
1241 if (order <= 0 || !fi) {
1242 tb->tb_default = -1;
1243 goto out;
1244 }
1245
1246 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1247 tb->tb_default)) {
1248 fib_result_assign(res, fi);
1249 tb->tb_default = order;
1250 goto out;
1251 }
1252
1253 if (last_idx >= 0)
1254 fib_result_assign(res, last_resort);
1255 tb->tb_default = last_idx;
1256 out:
1257 return;
1258 }
1259
1260 /*
1261 * Dead device goes up. We wake up dead nexthops.
1262 * It takes sense only on multipath routes.
1263 */
1264 int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
1265 {
1266 struct fib_info *prev_fi;
1267 unsigned int hash;
1268 struct hlist_head *head;
1269 struct fib_nh *nh;
1270 int ret;
1271
1272 if (!(dev->flags & IFF_UP))
1273 return 0;
1274
1275 prev_fi = NULL;
1276 hash = fib_devindex_hashfn(dev->ifindex);
1277 head = &fib_info_devhash[hash];
1278 ret = 0;
1279
1280 hlist_for_each_entry(nh, head, nh_hash) {
1281 struct fib_info *fi = nh->nh_parent;
1282 int alive;
1283
1284 BUG_ON(!fi->fib_nhs);
1285 if (nh->nh_dev != dev || fi == prev_fi)
1286 continue;
1287
1288 prev_fi = fi;
1289 alive = 0;
1290 change_nexthops(fi) {
1291 if (!(nexthop_nh->nh_flags & nh_flags)) {
1292 alive++;
1293 continue;
1294 }
1295 if (!nexthop_nh->nh_dev ||
1296 !(nexthop_nh->nh_dev->flags & IFF_UP))
1297 continue;
1298 if (nexthop_nh->nh_dev != dev ||
1299 !__in_dev_get_rtnl(dev))
1300 continue;
1301 alive++;
1302 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1303 spin_lock_bh(&fib_multipath_lock);
1304 nexthop_nh->nh_power = 0;
1305 nexthop_nh->nh_flags &= ~nh_flags;
1306 spin_unlock_bh(&fib_multipath_lock);
1307 #else
1308 nexthop_nh->nh_flags &= ~nh_flags;
1309 #endif
1310 } endfor_nexthops(fi)
1311
1312 if (alive > 0) {
1313 fi->fib_flags &= ~nh_flags;
1314 ret++;
1315 }
1316 }
1317
1318 return ret;
1319 }
1320
1321 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1322
1323 /*
1324 * The algorithm is suboptimal, but it provides really
1325 * fair weighted route distribution.
1326 */
1327 void fib_select_multipath(struct fib_result *res)
1328 {
1329 struct fib_info *fi = res->fi;
1330 struct in_device *in_dev;
1331 int w;
1332
1333 spin_lock_bh(&fib_multipath_lock);
1334 if (fi->fib_power <= 0) {
1335 int power = 0;
1336 change_nexthops(fi) {
1337 in_dev = __in_dev_get_rcu(nexthop_nh->nh_dev);
1338 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1339 continue;
1340 if (in_dev &&
1341 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1342 nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
1343 continue;
1344 power += nexthop_nh->nh_weight;
1345 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1346 } endfor_nexthops(fi);
1347 fi->fib_power = power;
1348 if (power <= 0) {
1349 spin_unlock_bh(&fib_multipath_lock);
1350 /* Race condition: route has just become dead. */
1351 res->nh_sel = 0;
1352 return;
1353 }
1354 }
1355
1356
1357 /* w should be random number [0..fi->fib_power-1],
1358 * it is pretty bad approximation.
1359 */
1360
1361 w = jiffies % fi->fib_power;
1362
1363 change_nexthops(fi) {
1364 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
1365 nexthop_nh->nh_power) {
1366 w -= nexthop_nh->nh_power;
1367 if (w <= 0) {
1368 nexthop_nh->nh_power--;
1369 fi->fib_power--;
1370 res->nh_sel = nhsel;
1371 spin_unlock_bh(&fib_multipath_lock);
1372 return;
1373 }
1374 }
1375 } endfor_nexthops(fi);
1376
1377 /* Race condition: route has just become dead. */
1378 res->nh_sel = 0;
1379 spin_unlock_bh(&fib_multipath_lock);
1380 }
1381 #endif