2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
45 #include <net/lwtunnel.h>
47 #include "fib_lookup.h"
49 static DEFINE_SPINLOCK(fib_info_lock
);
50 static struct hlist_head
*fib_info_hash
;
51 static struct hlist_head
*fib_info_laddrhash
;
52 static unsigned int fib_info_hash_size
;
53 static unsigned int fib_info_cnt
;
55 #define DEVINDEX_HASHBITS 8
56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
57 static struct hlist_head fib_info_devhash
[DEVINDEX_HASHSIZE
];
59 #ifdef CONFIG_IP_ROUTE_MULTIPATH
60 u32 fib_multipath_secret __read_mostly
;
62 #define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
68 #define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
74 #else /* CONFIG_IP_ROUTE_MULTIPATH */
76 /* Hope, that gcc will optimize it to get rid of dummy loop */
78 #define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
82 #define change_nexthops(fi) { \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
89 #define endfor_nexthops(fi) }
92 const struct fib_prop fib_props
[RTN_MAX
+ 1] = {
95 .scope
= RT_SCOPE_NOWHERE
,
99 .scope
= RT_SCOPE_UNIVERSE
,
103 .scope
= RT_SCOPE_HOST
,
107 .scope
= RT_SCOPE_LINK
,
111 .scope
= RT_SCOPE_LINK
,
115 .scope
= RT_SCOPE_UNIVERSE
,
119 .scope
= RT_SCOPE_UNIVERSE
,
121 [RTN_UNREACHABLE
] = {
122 .error
= -EHOSTUNREACH
,
123 .scope
= RT_SCOPE_UNIVERSE
,
127 .scope
= RT_SCOPE_UNIVERSE
,
131 .scope
= RT_SCOPE_UNIVERSE
,
135 .scope
= RT_SCOPE_NOWHERE
,
139 .scope
= RT_SCOPE_NOWHERE
,
143 static void rt_fibinfo_free(struct rtable __rcu
**rtp
)
145 struct rtable
*rt
= rcu_dereference_protected(*rtp
, 1);
150 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151 * because we waited an RCU grace period before calling
152 * free_fib_info_rcu()
158 static void free_nh_exceptions(struct fib_nh
*nh
)
160 struct fnhe_hash_bucket
*hash
;
163 hash
= rcu_dereference_protected(nh
->nh_exceptions
, 1);
166 for (i
= 0; i
< FNHE_HASH_SIZE
; i
++) {
167 struct fib_nh_exception
*fnhe
;
169 fnhe
= rcu_dereference_protected(hash
[i
].chain
, 1);
171 struct fib_nh_exception
*next
;
173 next
= rcu_dereference_protected(fnhe
->fnhe_next
, 1);
175 rt_fibinfo_free(&fnhe
->fnhe_rth_input
);
176 rt_fibinfo_free(&fnhe
->fnhe_rth_output
);
186 static void rt_fibinfo_free_cpus(struct rtable __rcu
* __percpu
*rtp
)
193 for_each_possible_cpu(cpu
) {
196 rt
= rcu_dereference_protected(*per_cpu_ptr(rtp
, cpu
), 1);
203 /* Release a nexthop info record */
204 static void free_fib_info_rcu(struct rcu_head
*head
)
206 struct fib_info
*fi
= container_of(head
, struct fib_info
, rcu
);
208 change_nexthops(fi
) {
209 if (nexthop_nh
->nh_dev
)
210 dev_put(nexthop_nh
->nh_dev
);
211 lwtstate_put(nexthop_nh
->nh_lwtstate
);
212 free_nh_exceptions(nexthop_nh
);
213 rt_fibinfo_free_cpus(nexthop_nh
->nh_pcpu_rth_output
);
214 rt_fibinfo_free(&nexthop_nh
->nh_rth_input
);
215 } endfor_nexthops(fi
);
217 if (fi
->fib_metrics
!= (u32
*) dst_default_metrics
)
218 kfree(fi
->fib_metrics
);
222 void free_fib_info(struct fib_info
*fi
)
224 if (fi
->fib_dead
== 0) {
225 pr_warn("Freeing alive fib_info %p\n", fi
);
229 #ifdef CONFIG_IP_ROUTE_CLASSID
230 change_nexthops(fi
) {
231 if (nexthop_nh
->nh_tclassid
)
232 fi
->fib_net
->ipv4
.fib_num_tclassid_users
--;
233 } endfor_nexthops(fi
);
235 call_rcu(&fi
->rcu
, free_fib_info_rcu
);
237 EXPORT_SYMBOL_GPL(free_fib_info
);
239 void fib_release_info(struct fib_info
*fi
)
241 spin_lock_bh(&fib_info_lock
);
242 if (fi
&& --fi
->fib_treeref
== 0) {
243 hlist_del(&fi
->fib_hash
);
245 hlist_del(&fi
->fib_lhash
);
246 change_nexthops(fi
) {
247 if (!nexthop_nh
->nh_dev
)
249 hlist_del(&nexthop_nh
->nh_hash
);
250 } endfor_nexthops(fi
)
254 spin_unlock_bh(&fib_info_lock
);
257 static inline int nh_comp(const struct fib_info
*fi
, const struct fib_info
*ofi
)
259 const struct fib_nh
*onh
= ofi
->fib_nh
;
262 if (nh
->nh_oif
!= onh
->nh_oif
||
263 nh
->nh_gw
!= onh
->nh_gw
||
264 nh
->nh_scope
!= onh
->nh_scope
||
265 #ifdef CONFIG_IP_ROUTE_MULTIPATH
266 nh
->nh_weight
!= onh
->nh_weight
||
268 #ifdef CONFIG_IP_ROUTE_CLASSID
269 nh
->nh_tclassid
!= onh
->nh_tclassid
||
271 lwtunnel_cmp_encap(nh
->nh_lwtstate
, onh
->nh_lwtstate
) ||
272 ((nh
->nh_flags
^ onh
->nh_flags
) & ~RTNH_COMPARE_MASK
))
275 } endfor_nexthops(fi
);
279 static inline unsigned int fib_devindex_hashfn(unsigned int val
)
281 unsigned int mask
= DEVINDEX_HASHSIZE
- 1;
284 (val
>> DEVINDEX_HASHBITS
) ^
285 (val
>> (DEVINDEX_HASHBITS
* 2))) & mask
;
288 static inline unsigned int fib_info_hashfn(const struct fib_info
*fi
)
290 unsigned int mask
= (fib_info_hash_size
- 1);
291 unsigned int val
= fi
->fib_nhs
;
293 val
^= (fi
->fib_protocol
<< 8) | fi
->fib_scope
;
294 val
^= (__force u32
)fi
->fib_prefsrc
;
295 val
^= fi
->fib_priority
;
297 val
^= fib_devindex_hashfn(nh
->nh_oif
);
298 } endfor_nexthops(fi
)
300 return (val
^ (val
>> 7) ^ (val
>> 12)) & mask
;
303 static struct fib_info
*fib_find_info(const struct fib_info
*nfi
)
305 struct hlist_head
*head
;
309 hash
= fib_info_hashfn(nfi
);
310 head
= &fib_info_hash
[hash
];
312 hlist_for_each_entry(fi
, head
, fib_hash
) {
313 if (!net_eq(fi
->fib_net
, nfi
->fib_net
))
315 if (fi
->fib_nhs
!= nfi
->fib_nhs
)
317 if (nfi
->fib_protocol
== fi
->fib_protocol
&&
318 nfi
->fib_scope
== fi
->fib_scope
&&
319 nfi
->fib_prefsrc
== fi
->fib_prefsrc
&&
320 nfi
->fib_priority
== fi
->fib_priority
&&
321 nfi
->fib_type
== fi
->fib_type
&&
322 memcmp(nfi
->fib_metrics
, fi
->fib_metrics
,
323 sizeof(u32
) * RTAX_MAX
) == 0 &&
324 !((nfi
->fib_flags
^ fi
->fib_flags
) & ~RTNH_COMPARE_MASK
) &&
325 (nfi
->fib_nhs
== 0 || nh_comp(fi
, nfi
) == 0))
332 /* Check, that the gateway is already configured.
333 * Used only by redirect accept routine.
335 int ip_fib_check_default(__be32 gw
, struct net_device
*dev
)
337 struct hlist_head
*head
;
341 spin_lock(&fib_info_lock
);
343 hash
= fib_devindex_hashfn(dev
->ifindex
);
344 head
= &fib_info_devhash
[hash
];
345 hlist_for_each_entry(nh
, head
, nh_hash
) {
346 if (nh
->nh_dev
== dev
&&
348 !(nh
->nh_flags
& RTNH_F_DEAD
)) {
349 spin_unlock(&fib_info_lock
);
354 spin_unlock(&fib_info_lock
);
359 static inline size_t fib_nlmsg_size(struct fib_info
*fi
)
361 size_t payload
= NLMSG_ALIGN(sizeof(struct rtmsg
))
362 + nla_total_size(4) /* RTA_TABLE */
363 + nla_total_size(4) /* RTA_DST */
364 + nla_total_size(4) /* RTA_PRIORITY */
365 + nla_total_size(4) /* RTA_PREFSRC */
366 + nla_total_size(TCP_CA_NAME_MAX
); /* RTAX_CC_ALGO */
368 /* space for nested metrics */
369 payload
+= nla_total_size((RTAX_MAX
* nla_total_size(4)));
372 size_t nh_encapsize
= 0;
373 /* Also handles the special case fib_nhs == 1 */
375 /* each nexthop is packed in an attribute */
376 size_t nhsize
= nla_total_size(sizeof(struct rtnexthop
));
378 /* may contain flow and gateway attribute */
379 nhsize
+= 2 * nla_total_size(4);
381 /* grab encap info */
383 if (nh
->nh_lwtstate
) {
385 nh_encapsize
+= lwtunnel_get_encap_size(
388 nh_encapsize
+= nla_total_size(2);
390 } endfor_nexthops(fi
);
392 /* all nexthops are packed in a nested attribute */
393 payload
+= nla_total_size((fi
->fib_nhs
* nhsize
) +
401 void rtmsg_fib(int event
, __be32 key
, struct fib_alias
*fa
,
402 int dst_len
, u32 tb_id
, const struct nl_info
*info
,
403 unsigned int nlm_flags
)
406 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
409 skb
= nlmsg_new(fib_nlmsg_size(fa
->fa_info
), GFP_KERNEL
);
413 err
= fib_dump_info(skb
, info
->portid
, seq
, event
, tb_id
,
414 fa
->fa_type
, key
, dst_len
,
415 fa
->fa_tos
, fa
->fa_info
, nlm_flags
);
417 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
418 WARN_ON(err
== -EMSGSIZE
);
422 rtnl_notify(skb
, info
->nl_net
, info
->portid
, RTNLGRP_IPV4_ROUTE
,
423 info
->nlh
, GFP_KERNEL
);
427 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_IPV4_ROUTE
, err
);
430 static int fib_detect_death(struct fib_info
*fi
, int order
,
431 struct fib_info
**last_resort
, int *last_idx
,
435 int state
= NUD_NONE
;
437 n
= neigh_lookup(&arp_tbl
, &fi
->fib_nh
[0].nh_gw
, fi
->fib_dev
);
439 state
= n
->nud_state
;
444 if (state
== NUD_REACHABLE
)
446 if ((state
& NUD_VALID
) && order
!= dflt
)
448 if ((state
& NUD_VALID
) ||
449 (*last_idx
< 0 && order
> dflt
&& state
!= NUD_INCOMPLETE
)) {
456 #ifdef CONFIG_IP_ROUTE_MULTIPATH
458 static int fib_count_nexthops(struct rtnexthop
*rtnh
, int remaining
)
462 while (rtnh_ok(rtnh
, remaining
)) {
464 rtnh
= rtnh_next(rtnh
, &remaining
);
467 /* leftover implies invalid nexthop configuration, discard it */
468 return remaining
> 0 ? 0 : nhs
;
471 static int fib_get_nhs(struct fib_info
*fi
, struct rtnexthop
*rtnh
,
472 int remaining
, struct fib_config
*cfg
)
474 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
477 change_nexthops(fi
) {
480 if (!rtnh_ok(rtnh
, remaining
))
483 if (rtnh
->rtnh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
486 nexthop_nh
->nh_flags
=
487 (cfg
->fc_flags
& ~0xFF) | rtnh
->rtnh_flags
;
488 nexthop_nh
->nh_oif
= rtnh
->rtnh_ifindex
;
489 nexthop_nh
->nh_weight
= rtnh
->rtnh_hops
+ 1;
491 attrlen
= rtnh_attrlen(rtnh
);
493 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
495 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
496 nexthop_nh
->nh_gw
= nla
? nla_get_in_addr(nla
) : 0;
497 #ifdef CONFIG_IP_ROUTE_CLASSID
498 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
499 nexthop_nh
->nh_tclassid
= nla
? nla_get_u32(nla
) : 0;
500 if (nexthop_nh
->nh_tclassid
)
501 fi
->fib_net
->ipv4
.fib_num_tclassid_users
++;
503 nla
= nla_find(attrs
, attrlen
, RTA_ENCAP
);
505 struct lwtunnel_state
*lwtstate
;
506 struct net_device
*dev
= NULL
;
507 struct nlattr
*nla_entype
;
509 nla_entype
= nla_find(attrs
, attrlen
,
514 dev
= __dev_get_by_index(net
, cfg
->fc_oif
);
515 ret
= lwtunnel_build_state(dev
, nla_get_u16(
521 nexthop_nh
->nh_lwtstate
=
522 lwtstate_get(lwtstate
);
526 rtnh
= rtnh_next(rtnh
, &remaining
);
527 } endfor_nexthops(fi
);
538 static void fib_rebalance(struct fib_info
*fi
)
542 struct in_device
*in_dev
;
549 if (nh
->nh_flags
& RTNH_F_DEAD
)
552 in_dev
= __in_dev_get_rtnl(nh
->nh_dev
);
555 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
) &&
556 nh
->nh_flags
& RTNH_F_LINKDOWN
)
559 total
+= nh
->nh_weight
;
560 } endfor_nexthops(fi
);
563 change_nexthops(fi
) {
566 in_dev
= __in_dev_get_rtnl(nexthop_nh
->nh_dev
);
568 if (nexthop_nh
->nh_flags
& RTNH_F_DEAD
) {
571 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
) &&
572 nexthop_nh
->nh_flags
& RTNH_F_LINKDOWN
) {
575 w
+= nexthop_nh
->nh_weight
;
576 upper_bound
= DIV_ROUND_CLOSEST_ULL((u64
)w
<< 31,
580 atomic_set(&nexthop_nh
->nh_upper_bound
, upper_bound
);
581 } endfor_nexthops(fi
);
583 net_get_random_once(&fib_multipath_secret
,
584 sizeof(fib_multipath_secret
));
587 static inline void fib_add_weight(struct fib_info
*fi
,
588 const struct fib_nh
*nh
)
590 fi
->fib_weight
+= nh
->nh_weight
;
593 #else /* CONFIG_IP_ROUTE_MULTIPATH */
595 #define fib_rebalance(fi) do { } while (0)
596 #define fib_add_weight(fi, nh) do { } while (0)
598 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
600 static int fib_encap_match(struct net
*net
, u16 encap_type
,
601 struct nlattr
*encap
,
602 int oif
, const struct fib_nh
*nh
,
603 const struct fib_config
*cfg
)
605 struct lwtunnel_state
*lwtstate
;
606 struct net_device
*dev
= NULL
;
609 if (encap_type
== LWTUNNEL_ENCAP_NONE
)
613 dev
= __dev_get_by_index(net
, oif
);
614 ret
= lwtunnel_build_state(dev
, encap_type
, encap
,
615 AF_INET
, cfg
, &lwtstate
);
617 result
= lwtunnel_cmp_encap(lwtstate
, nh
->nh_lwtstate
);
618 lwtstate_free(lwtstate
);
624 int fib_nh_match(struct fib_config
*cfg
, struct fib_info
*fi
)
626 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
627 #ifdef CONFIG_IP_ROUTE_MULTIPATH
628 struct rtnexthop
*rtnh
;
632 if (cfg
->fc_priority
&& cfg
->fc_priority
!= fi
->fib_priority
)
635 if (cfg
->fc_oif
|| cfg
->fc_gw
) {
637 if (fib_encap_match(net
, cfg
->fc_encap_type
,
638 cfg
->fc_encap
, cfg
->fc_oif
,
642 if ((!cfg
->fc_oif
|| cfg
->fc_oif
== fi
->fib_nh
->nh_oif
) &&
643 (!cfg
->fc_gw
|| cfg
->fc_gw
== fi
->fib_nh
->nh_gw
))
648 #ifdef CONFIG_IP_ROUTE_MULTIPATH
653 remaining
= cfg
->fc_mp_len
;
658 if (!rtnh_ok(rtnh
, remaining
))
661 if (rtnh
->rtnh_ifindex
&& rtnh
->rtnh_ifindex
!= nh
->nh_oif
)
664 attrlen
= rtnh_attrlen(rtnh
);
666 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
668 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
669 if (nla
&& nla_get_in_addr(nla
) != nh
->nh_gw
)
671 #ifdef CONFIG_IP_ROUTE_CLASSID
672 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
673 if (nla
&& nla_get_u32(nla
) != nh
->nh_tclassid
)
678 rtnh
= rtnh_next(rtnh
, &remaining
);
679 } endfor_nexthops(fi
);
689 * Semantics of nexthop is very messy by historical reasons.
690 * We have to take into account, that:
691 * a) gateway can be actually local interface address,
692 * so that gatewayed route is direct.
693 * b) gateway must be on-link address, possibly
694 * described not by an ifaddr, but also by a direct route.
695 * c) If both gateway and interface are specified, they should not
697 * d) If we use tunnel routes, gateway could be not on-link.
699 * Attempt to reconcile all of these (alas, self-contradictory) conditions
700 * results in pretty ugly and hairy code with obscure logic.
702 * I chose to generalized it instead, so that the size
703 * of code does not increase practically, but it becomes
705 * Every prefix is assigned a "scope" value: "host" is local address,
706 * "link" is direct route,
707 * [ ... "site" ... "interior" ... ]
708 * and "universe" is true gateway route with global meaning.
710 * Every prefix refers to a set of "nexthop"s (gw, oif),
711 * where gw must have narrower scope. This recursion stops
712 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
713 * which means that gw is forced to be on link.
715 * Code is still hairy, but now it is apparently logically
716 * consistent and very flexible. F.e. as by-product it allows
717 * to co-exists in peace independent exterior and interior
720 * Normally it looks as following.
722 * {universe prefix} -> (gw, oif) [scope link]
724 * |-> {link prefix} -> (gw, oif) [scope local]
726 * |-> {local prefix} (terminal node)
728 static int fib_check_nh(struct fib_config
*cfg
, struct fib_info
*fi
,
733 struct net_device
*dev
;
735 net
= cfg
->fc_nlinfo
.nl_net
;
737 struct fib_result res
;
739 if (nh
->nh_flags
& RTNH_F_ONLINK
) {
740 unsigned int addr_type
;
742 if (cfg
->fc_scope
>= RT_SCOPE_LINK
)
744 dev
= __dev_get_by_index(net
, nh
->nh_oif
);
747 if (!(dev
->flags
& IFF_UP
))
749 addr_type
= inet_addr_type_dev_table(net
, dev
, nh
->nh_gw
);
750 if (addr_type
!= RTN_UNICAST
)
752 if (!netif_carrier_ok(dev
))
753 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
756 nh
->nh_scope
= RT_SCOPE_LINK
;
761 struct fib_table
*tbl
= NULL
;
762 struct flowi4 fl4
= {
764 .flowi4_scope
= cfg
->fc_scope
+ 1,
765 .flowi4_oif
= nh
->nh_oif
,
766 .flowi4_iif
= LOOPBACK_IFINDEX
,
769 /* It is not necessary, but requires a bit of thinking */
770 if (fl4
.flowi4_scope
< RT_SCOPE_LINK
)
771 fl4
.flowi4_scope
= RT_SCOPE_LINK
;
774 tbl
= fib_get_table(net
, cfg
->fc_table
);
777 err
= fib_table_lookup(tbl
, &fl4
, &res
,
778 FIB_LOOKUP_IGNORE_LINKSTATE
|
781 /* on error or if no table given do full lookup. This
782 * is needed for example when nexthops are in the local
783 * table rather than the given table
786 err
= fib_lookup(net
, &fl4
, &res
,
787 FIB_LOOKUP_IGNORE_LINKSTATE
);
796 if (res
.type
!= RTN_UNICAST
&& res
.type
!= RTN_LOCAL
)
798 nh
->nh_scope
= res
.scope
;
799 nh
->nh_oif
= FIB_RES_OIF(res
);
800 nh
->nh_dev
= dev
= FIB_RES_DEV(res
);
804 if (!netif_carrier_ok(dev
))
805 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
806 err
= (dev
->flags
& IFF_UP
) ? 0 : -ENETDOWN
;
808 struct in_device
*in_dev
;
810 if (nh
->nh_flags
& (RTNH_F_PERVASIVE
| RTNH_F_ONLINK
))
815 in_dev
= inetdev_by_index(net
, nh
->nh_oif
);
819 if (!(in_dev
->dev
->flags
& IFF_UP
))
821 nh
->nh_dev
= in_dev
->dev
;
822 dev_hold(nh
->nh_dev
);
823 nh
->nh_scope
= RT_SCOPE_HOST
;
824 if (!netif_carrier_ok(nh
->nh_dev
))
825 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
833 static inline unsigned int fib_laddr_hashfn(__be32 val
)
835 unsigned int mask
= (fib_info_hash_size
- 1);
837 return ((__force u32
)val
^
838 ((__force u32
)val
>> 7) ^
839 ((__force u32
)val
>> 14)) & mask
;
842 static struct hlist_head
*fib_info_hash_alloc(int bytes
)
844 if (bytes
<= PAGE_SIZE
)
845 return kzalloc(bytes
, GFP_KERNEL
);
847 return (struct hlist_head
*)
848 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
852 static void fib_info_hash_free(struct hlist_head
*hash
, int bytes
)
857 if (bytes
<= PAGE_SIZE
)
860 free_pages((unsigned long) hash
, get_order(bytes
));
863 static void fib_info_hash_move(struct hlist_head
*new_info_hash
,
864 struct hlist_head
*new_laddrhash
,
865 unsigned int new_size
)
867 struct hlist_head
*old_info_hash
, *old_laddrhash
;
868 unsigned int old_size
= fib_info_hash_size
;
869 unsigned int i
, bytes
;
871 spin_lock_bh(&fib_info_lock
);
872 old_info_hash
= fib_info_hash
;
873 old_laddrhash
= fib_info_laddrhash
;
874 fib_info_hash_size
= new_size
;
876 for (i
= 0; i
< old_size
; i
++) {
877 struct hlist_head
*head
= &fib_info_hash
[i
];
878 struct hlist_node
*n
;
881 hlist_for_each_entry_safe(fi
, n
, head
, fib_hash
) {
882 struct hlist_head
*dest
;
883 unsigned int new_hash
;
885 new_hash
= fib_info_hashfn(fi
);
886 dest
= &new_info_hash
[new_hash
];
887 hlist_add_head(&fi
->fib_hash
, dest
);
890 fib_info_hash
= new_info_hash
;
892 for (i
= 0; i
< old_size
; i
++) {
893 struct hlist_head
*lhead
= &fib_info_laddrhash
[i
];
894 struct hlist_node
*n
;
897 hlist_for_each_entry_safe(fi
, n
, lhead
, fib_lhash
) {
898 struct hlist_head
*ldest
;
899 unsigned int new_hash
;
901 new_hash
= fib_laddr_hashfn(fi
->fib_prefsrc
);
902 ldest
= &new_laddrhash
[new_hash
];
903 hlist_add_head(&fi
->fib_lhash
, ldest
);
906 fib_info_laddrhash
= new_laddrhash
;
908 spin_unlock_bh(&fib_info_lock
);
910 bytes
= old_size
* sizeof(struct hlist_head
*);
911 fib_info_hash_free(old_info_hash
, bytes
);
912 fib_info_hash_free(old_laddrhash
, bytes
);
915 __be32
fib_info_update_nh_saddr(struct net
*net
, struct fib_nh
*nh
)
917 nh
->nh_saddr
= inet_select_addr(nh
->nh_dev
,
919 nh
->nh_parent
->fib_scope
);
920 nh
->nh_saddr_genid
= atomic_read(&net
->ipv4
.dev_addr_genid
);
925 static bool fib_valid_prefsrc(struct fib_config
*cfg
, __be32 fib_prefsrc
)
927 if (cfg
->fc_type
!= RTN_LOCAL
|| !cfg
->fc_dst
||
928 fib_prefsrc
!= cfg
->fc_dst
) {
929 u32 tb_id
= cfg
->fc_table
;
932 if (tb_id
== RT_TABLE_MAIN
)
933 tb_id
= RT_TABLE_LOCAL
;
935 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
938 if (rc
!= RTN_LOCAL
&& tb_id
!= RT_TABLE_LOCAL
) {
939 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
940 fib_prefsrc
, RT_TABLE_LOCAL
);
950 fib_convert_metrics(struct fib_info
*fi
, const struct fib_config
*cfg
)
959 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
960 int type
= nla_type(nla
);
968 if (type
== RTAX_CC_ALGO
) {
969 char tmp
[TCP_CA_NAME_MAX
];
971 nla_strlcpy(tmp
, nla
, sizeof(tmp
));
972 val
= tcp_ca_get_key_by_name(tmp
, &ecn_ca
);
973 if (val
== TCP_CA_UNSPEC
)
976 val
= nla_get_u32(nla
);
978 if (type
== RTAX_ADVMSS
&& val
> 65535 - 40)
980 if (type
== RTAX_MTU
&& val
> 65535 - 15)
982 if (type
== RTAX_HOPLIMIT
&& val
> 255)
984 if (type
== RTAX_FEATURES
&& (val
& ~RTAX_FEATURE_MASK
))
986 fi
->fib_metrics
[type
- 1] = val
;
990 fi
->fib_metrics
[RTAX_FEATURES
- 1] |= DST_FEATURE_ECN_CA
;
995 struct fib_info
*fib_create_info(struct fib_config
*cfg
)
998 struct fib_info
*fi
= NULL
;
999 struct fib_info
*ofi
;
1001 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
1003 if (cfg
->fc_type
> RTN_MAX
)
1006 /* Fast check to catch the most weird cases */
1007 if (fib_props
[cfg
->fc_type
].scope
> cfg
->fc_scope
)
1010 if (cfg
->fc_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
1013 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1015 nhs
= fib_count_nexthops(cfg
->fc_mp
, cfg
->fc_mp_len
);
1022 if (fib_info_cnt
>= fib_info_hash_size
) {
1023 unsigned int new_size
= fib_info_hash_size
<< 1;
1024 struct hlist_head
*new_info_hash
;
1025 struct hlist_head
*new_laddrhash
;
1030 bytes
= new_size
* sizeof(struct hlist_head
*);
1031 new_info_hash
= fib_info_hash_alloc(bytes
);
1032 new_laddrhash
= fib_info_hash_alloc(bytes
);
1033 if (!new_info_hash
|| !new_laddrhash
) {
1034 fib_info_hash_free(new_info_hash
, bytes
);
1035 fib_info_hash_free(new_laddrhash
, bytes
);
1037 fib_info_hash_move(new_info_hash
, new_laddrhash
, new_size
);
1039 if (!fib_info_hash_size
)
1043 fi
= kzalloc(sizeof(*fi
)+nhs
*sizeof(struct fib_nh
), GFP_KERNEL
);
1048 fi
->fib_metrics
= kzalloc(sizeof(u32
) * RTAX_MAX
, GFP_KERNEL
);
1049 if (!fi
->fib_metrics
)
1052 fi
->fib_metrics
= (u32
*) dst_default_metrics
;
1055 fi
->fib_protocol
= cfg
->fc_protocol
;
1056 fi
->fib_scope
= cfg
->fc_scope
;
1057 fi
->fib_flags
= cfg
->fc_flags
;
1058 fi
->fib_priority
= cfg
->fc_priority
;
1059 fi
->fib_prefsrc
= cfg
->fc_prefsrc
;
1060 fi
->fib_type
= cfg
->fc_type
;
1061 fi
->fib_tb_id
= cfg
->fc_table
;
1064 change_nexthops(fi
) {
1065 nexthop_nh
->nh_parent
= fi
;
1066 nexthop_nh
->nh_pcpu_rth_output
= alloc_percpu(struct rtable __rcu
*);
1067 if (!nexthop_nh
->nh_pcpu_rth_output
)
1069 } endfor_nexthops(fi
)
1071 err
= fib_convert_metrics(fi
, cfg
);
1076 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1077 err
= fib_get_nhs(fi
, cfg
->fc_mp
, cfg
->fc_mp_len
, cfg
);
1080 if (cfg
->fc_oif
&& fi
->fib_nh
->nh_oif
!= cfg
->fc_oif
)
1082 if (cfg
->fc_gw
&& fi
->fib_nh
->nh_gw
!= cfg
->fc_gw
)
1084 #ifdef CONFIG_IP_ROUTE_CLASSID
1085 if (cfg
->fc_flow
&& fi
->fib_nh
->nh_tclassid
!= cfg
->fc_flow
)
1092 struct fib_nh
*nh
= fi
->fib_nh
;
1094 if (cfg
->fc_encap
) {
1095 struct lwtunnel_state
*lwtstate
;
1096 struct net_device
*dev
= NULL
;
1098 if (cfg
->fc_encap_type
== LWTUNNEL_ENCAP_NONE
)
1101 dev
= __dev_get_by_index(net
, cfg
->fc_oif
);
1102 err
= lwtunnel_build_state(dev
, cfg
->fc_encap_type
,
1103 cfg
->fc_encap
, AF_INET
, cfg
,
1108 nh
->nh_lwtstate
= lwtstate_get(lwtstate
);
1110 nh
->nh_oif
= cfg
->fc_oif
;
1111 nh
->nh_gw
= cfg
->fc_gw
;
1112 nh
->nh_flags
= cfg
->fc_flags
;
1113 #ifdef CONFIG_IP_ROUTE_CLASSID
1114 nh
->nh_tclassid
= cfg
->fc_flow
;
1115 if (nh
->nh_tclassid
)
1116 fi
->fib_net
->ipv4
.fib_num_tclassid_users
++;
1118 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1123 if (fib_props
[cfg
->fc_type
].error
) {
1124 if (cfg
->fc_gw
|| cfg
->fc_oif
|| cfg
->fc_mp
)
1128 switch (cfg
->fc_type
) {
1140 if (cfg
->fc_scope
> RT_SCOPE_HOST
)
1143 if (cfg
->fc_scope
== RT_SCOPE_HOST
) {
1144 struct fib_nh
*nh
= fi
->fib_nh
;
1146 /* Local address is added. */
1147 if (nhs
!= 1 || nh
->nh_gw
)
1149 nh
->nh_scope
= RT_SCOPE_NOWHERE
;
1150 nh
->nh_dev
= dev_get_by_index(net
, fi
->fib_nh
->nh_oif
);
1157 change_nexthops(fi
) {
1158 err
= fib_check_nh(cfg
, fi
, nexthop_nh
);
1161 if (nexthop_nh
->nh_flags
& RTNH_F_LINKDOWN
)
1163 } endfor_nexthops(fi
)
1164 if (linkdown
== fi
->fib_nhs
)
1165 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
1168 if (fi
->fib_prefsrc
&& !fib_valid_prefsrc(cfg
, fi
->fib_prefsrc
))
1171 change_nexthops(fi
) {
1172 fib_info_update_nh_saddr(net
, nexthop_nh
);
1173 fib_add_weight(fi
, nexthop_nh
);
1174 } endfor_nexthops(fi
)
1179 ofi
= fib_find_info(fi
);
1188 atomic_inc(&fi
->fib_clntref
);
1189 spin_lock_bh(&fib_info_lock
);
1190 hlist_add_head(&fi
->fib_hash
,
1191 &fib_info_hash
[fib_info_hashfn(fi
)]);
1192 if (fi
->fib_prefsrc
) {
1193 struct hlist_head
*head
;
1195 head
= &fib_info_laddrhash
[fib_laddr_hashfn(fi
->fib_prefsrc
)];
1196 hlist_add_head(&fi
->fib_lhash
, head
);
1198 change_nexthops(fi
) {
1199 struct hlist_head
*head
;
1202 if (!nexthop_nh
->nh_dev
)
1204 hash
= fib_devindex_hashfn(nexthop_nh
->nh_dev
->ifindex
);
1205 head
= &fib_info_devhash
[hash
];
1206 hlist_add_head(&nexthop_nh
->nh_hash
, head
);
1207 } endfor_nexthops(fi
)
1208 spin_unlock_bh(&fib_info_lock
);
1220 return ERR_PTR(err
);
1223 int fib_dump_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, int event
,
1224 u32 tb_id
, u8 type
, __be32 dst
, int dst_len
, u8 tos
,
1225 struct fib_info
*fi
, unsigned int flags
)
1227 struct nlmsghdr
*nlh
;
1230 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*rtm
), flags
);
1234 rtm
= nlmsg_data(nlh
);
1235 rtm
->rtm_family
= AF_INET
;
1236 rtm
->rtm_dst_len
= dst_len
;
1237 rtm
->rtm_src_len
= 0;
1240 rtm
->rtm_table
= tb_id
;
1242 rtm
->rtm_table
= RT_TABLE_COMPAT
;
1243 if (nla_put_u32(skb
, RTA_TABLE
, tb_id
))
1244 goto nla_put_failure
;
1245 rtm
->rtm_type
= type
;
1246 rtm
->rtm_flags
= fi
->fib_flags
;
1247 rtm
->rtm_scope
= fi
->fib_scope
;
1248 rtm
->rtm_protocol
= fi
->fib_protocol
;
1250 if (rtm
->rtm_dst_len
&&
1251 nla_put_in_addr(skb
, RTA_DST
, dst
))
1252 goto nla_put_failure
;
1253 if (fi
->fib_priority
&&
1254 nla_put_u32(skb
, RTA_PRIORITY
, fi
->fib_priority
))
1255 goto nla_put_failure
;
1256 if (rtnetlink_put_metrics(skb
, fi
->fib_metrics
) < 0)
1257 goto nla_put_failure
;
1259 if (fi
->fib_prefsrc
&&
1260 nla_put_in_addr(skb
, RTA_PREFSRC
, fi
->fib_prefsrc
))
1261 goto nla_put_failure
;
1262 if (fi
->fib_nhs
== 1) {
1263 struct in_device
*in_dev
;
1265 if (fi
->fib_nh
->nh_gw
&&
1266 nla_put_in_addr(skb
, RTA_GATEWAY
, fi
->fib_nh
->nh_gw
))
1267 goto nla_put_failure
;
1268 if (fi
->fib_nh
->nh_oif
&&
1269 nla_put_u32(skb
, RTA_OIF
, fi
->fib_nh
->nh_oif
))
1270 goto nla_put_failure
;
1271 if (fi
->fib_nh
->nh_flags
& RTNH_F_LINKDOWN
) {
1272 in_dev
= __in_dev_get_rtnl(fi
->fib_nh
->nh_dev
);
1274 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
))
1275 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1277 #ifdef CONFIG_IP_ROUTE_CLASSID
1278 if (fi
->fib_nh
[0].nh_tclassid
&&
1279 nla_put_u32(skb
, RTA_FLOW
, fi
->fib_nh
[0].nh_tclassid
))
1280 goto nla_put_failure
;
1282 if (fi
->fib_nh
->nh_lwtstate
&&
1283 lwtunnel_fill_encap(skb
, fi
->fib_nh
->nh_lwtstate
) < 0)
1284 goto nla_put_failure
;
1286 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1287 if (fi
->fib_nhs
> 1) {
1288 struct rtnexthop
*rtnh
;
1291 mp
= nla_nest_start(skb
, RTA_MULTIPATH
);
1293 goto nla_put_failure
;
1296 struct in_device
*in_dev
;
1298 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1300 goto nla_put_failure
;
1302 rtnh
->rtnh_flags
= nh
->nh_flags
& 0xFF;
1303 if (nh
->nh_flags
& RTNH_F_LINKDOWN
) {
1304 in_dev
= __in_dev_get_rtnl(nh
->nh_dev
);
1306 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
))
1307 rtnh
->rtnh_flags
|= RTNH_F_DEAD
;
1309 rtnh
->rtnh_hops
= nh
->nh_weight
- 1;
1310 rtnh
->rtnh_ifindex
= nh
->nh_oif
;
1313 nla_put_in_addr(skb
, RTA_GATEWAY
, nh
->nh_gw
))
1314 goto nla_put_failure
;
1315 #ifdef CONFIG_IP_ROUTE_CLASSID
1316 if (nh
->nh_tclassid
&&
1317 nla_put_u32(skb
, RTA_FLOW
, nh
->nh_tclassid
))
1318 goto nla_put_failure
;
1320 if (nh
->nh_lwtstate
&&
1321 lwtunnel_fill_encap(skb
, nh
->nh_lwtstate
) < 0)
1322 goto nla_put_failure
;
1324 /* length of rtnetlink header + attributes */
1325 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *) rtnh
;
1326 } endfor_nexthops(fi
);
1328 nla_nest_end(skb
, mp
);
1331 nlmsg_end(skb
, nlh
);
1335 nlmsg_cancel(skb
, nlh
);
1341 * - local address disappeared -> we must delete all the entries
1343 * - device went down -> we must shutdown all nexthops going via it.
1345 int fib_sync_down_addr(struct net_device
*dev
, __be32 local
)
1348 unsigned int hash
= fib_laddr_hashfn(local
);
1349 struct hlist_head
*head
= &fib_info_laddrhash
[hash
];
1350 struct net
*net
= dev_net(dev
);
1351 int tb_id
= l3mdev_fib_table(dev
);
1352 struct fib_info
*fi
;
1354 if (!fib_info_laddrhash
|| local
== 0)
1357 hlist_for_each_entry(fi
, head
, fib_lhash
) {
1358 if (!net_eq(fi
->fib_net
, net
) ||
1359 fi
->fib_tb_id
!= tb_id
)
1361 if (fi
->fib_prefsrc
== local
) {
1362 fi
->fib_flags
|= RTNH_F_DEAD
;
1369 /* Event force Flags Description
1370 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1371 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1372 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1373 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1375 int fib_sync_down_dev(struct net_device
*dev
, unsigned long event
, bool force
)
1378 int scope
= RT_SCOPE_NOWHERE
;
1379 struct fib_info
*prev_fi
= NULL
;
1380 unsigned int hash
= fib_devindex_hashfn(dev
->ifindex
);
1381 struct hlist_head
*head
= &fib_info_devhash
[hash
];
1387 hlist_for_each_entry(nh
, head
, nh_hash
) {
1388 struct fib_info
*fi
= nh
->nh_parent
;
1391 BUG_ON(!fi
->fib_nhs
);
1392 if (nh
->nh_dev
!= dev
|| fi
== prev_fi
)
1396 change_nexthops(fi
) {
1397 if (nexthop_nh
->nh_flags
& RTNH_F_DEAD
)
1399 else if (nexthop_nh
->nh_dev
== dev
&&
1400 nexthop_nh
->nh_scope
!= scope
) {
1403 case NETDEV_UNREGISTER
:
1404 nexthop_nh
->nh_flags
|= RTNH_F_DEAD
;
1407 nexthop_nh
->nh_flags
|= RTNH_F_LINKDOWN
;
1412 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1413 if (event
== NETDEV_UNREGISTER
&&
1414 nexthop_nh
->nh_dev
== dev
) {
1419 } endfor_nexthops(fi
)
1420 if (dead
== fi
->fib_nhs
) {
1423 case NETDEV_UNREGISTER
:
1424 fi
->fib_flags
|= RTNH_F_DEAD
;
1427 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
1439 /* Must be invoked inside of an RCU protected region. */
1440 static void fib_select_default(const struct flowi4
*flp
, struct fib_result
*res
)
1442 struct fib_info
*fi
= NULL
, *last_resort
= NULL
;
1443 struct hlist_head
*fa_head
= res
->fa_head
;
1444 struct fib_table
*tb
= res
->table
;
1445 u8 slen
= 32 - res
->prefixlen
;
1446 int order
= -1, last_idx
= -1;
1447 struct fib_alias
*fa
, *fa1
= NULL
;
1448 u32 last_prio
= res
->fi
->fib_priority
;
1451 hlist_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
1452 struct fib_info
*next_fi
= fa
->fa_info
;
1454 if (fa
->fa_slen
!= slen
)
1456 if (fa
->fa_tos
&& fa
->fa_tos
!= flp
->flowi4_tos
)
1458 if (fa
->tb_id
!= tb
->tb_id
)
1460 if (next_fi
->fib_priority
> last_prio
&&
1461 fa
->fa_tos
== last_tos
) {
1466 if (next_fi
->fib_flags
& RTNH_F_DEAD
)
1468 last_tos
= fa
->fa_tos
;
1469 last_prio
= next_fi
->fib_priority
;
1471 if (next_fi
->fib_scope
!= res
->scope
||
1472 fa
->fa_type
!= RTN_UNICAST
)
1474 if (!next_fi
->fib_nh
[0].nh_gw
||
1475 next_fi
->fib_nh
[0].nh_scope
!= RT_SCOPE_LINK
)
1478 fib_alias_accessed(fa
);
1481 if (next_fi
!= res
->fi
)
1484 } else if (!fib_detect_death(fi
, order
, &last_resort
,
1485 &last_idx
, fa1
->fa_default
)) {
1486 fib_result_assign(res
, fi
);
1487 fa1
->fa_default
= order
;
1494 if (order
<= 0 || !fi
) {
1496 fa1
->fa_default
= -1;
1500 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
,
1502 fib_result_assign(res
, fi
);
1503 fa1
->fa_default
= order
;
1508 fib_result_assign(res
, last_resort
);
1509 fa1
->fa_default
= last_idx
;
1515 * Dead device goes up. We wake up dead nexthops.
1516 * It takes sense only on multipath routes.
1518 int fib_sync_up(struct net_device
*dev
, unsigned int nh_flags
)
1520 struct fib_info
*prev_fi
;
1522 struct hlist_head
*head
;
1526 if (!(dev
->flags
& IFF_UP
))
1529 if (nh_flags
& RTNH_F_DEAD
) {
1530 unsigned int flags
= dev_get_flags(dev
);
1532 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1533 nh_flags
|= RTNH_F_LINKDOWN
;
1537 hash
= fib_devindex_hashfn(dev
->ifindex
);
1538 head
= &fib_info_devhash
[hash
];
1541 hlist_for_each_entry(nh
, head
, nh_hash
) {
1542 struct fib_info
*fi
= nh
->nh_parent
;
1545 BUG_ON(!fi
->fib_nhs
);
1546 if (nh
->nh_dev
!= dev
|| fi
== prev_fi
)
1551 change_nexthops(fi
) {
1552 if (!(nexthop_nh
->nh_flags
& nh_flags
)) {
1556 if (!nexthop_nh
->nh_dev
||
1557 !(nexthop_nh
->nh_dev
->flags
& IFF_UP
))
1559 if (nexthop_nh
->nh_dev
!= dev
||
1560 !__in_dev_get_rtnl(dev
))
1563 nexthop_nh
->nh_flags
&= ~nh_flags
;
1564 } endfor_nexthops(fi
)
1567 fi
->fib_flags
&= ~nh_flags
;
1577 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1578 static bool fib_good_nh(const struct fib_nh
*nh
)
1580 int state
= NUD_REACHABLE
;
1582 if (nh
->nh_scope
== RT_SCOPE_LINK
) {
1583 struct neighbour
*n
;
1587 n
= __ipv4_neigh_lookup_noref(nh
->nh_dev
,
1588 (__force u32
)nh
->nh_gw
);
1590 state
= n
->nud_state
;
1592 rcu_read_unlock_bh();
1595 return !!(state
& NUD_VALID
);
1598 void fib_select_multipath(struct fib_result
*res
, int hash
)
1600 struct fib_info
*fi
= res
->fi
;
1601 struct net
*net
= fi
->fib_net
;
1605 if (hash
> atomic_read(&nh
->nh_upper_bound
))
1608 if (!net
->ipv4
.sysctl_fib_multipath_use_neigh
||
1610 res
->nh_sel
= nhsel
;
1614 res
->nh_sel
= nhsel
;
1617 } endfor_nexthops(fi
);
1621 void fib_select_path(struct net
*net
, struct fib_result
*res
,
1622 struct flowi4
*fl4
, int mp_hash
)
1626 oif_check
= (fl4
->flowi4_oif
== 0 ||
1627 fl4
->flowi4_flags
& FLOWI_FLAG_SKIP_NH_OIF
);
1629 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1630 if (res
->fi
->fib_nhs
> 1 && oif_check
) {
1632 mp_hash
= get_hash_from_flowi4(fl4
) >> 1;
1634 fib_select_multipath(res
, mp_hash
);
1638 if (!res
->prefixlen
&&
1639 res
->table
->tb_num_default
> 1 &&
1640 res
->type
== RTN_UNICAST
&& oif_check
)
1641 fib_select_default(fl4
, res
);
1644 fl4
->saddr
= FIB_RES_PREFSRC(net
, *res
);
1646 EXPORT_SYMBOL_GPL(fib_select_path
);