]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/ipv6/route.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / route.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */
9
10 /* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
19 * Ville Nuorvala
20 * Fixed routing subtrees.
21 */
22
23 #define pr_fmt(fmt) "IPv6: " fmt
24
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/dst_metadata.h>
55 #include <net/xfrm.h>
56 #include <net/netevent.h>
57 #include <net/netlink.h>
58 #include <net/rtnh.h>
59 #include <net/lwtunnel.h>
60 #include <net/ip_tunnels.h>
61 #include <net/l3mdev.h>
62 #include <net/ip.h>
63 #include <linux/uaccess.h>
64
65 #ifdef CONFIG_SYSCTL
66 #include <linux/sysctl.h>
67 #endif
68
69 static int ip6_rt_type_to_error(u8 fib6_type);
70
71 #define CREATE_TRACE_POINTS
72 #include <trace/events/fib6.h>
73 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
74 #undef CREATE_TRACE_POINTS
75
76 enum rt6_nud_state {
77 RT6_NUD_FAIL_HARD = -3,
78 RT6_NUD_FAIL_PROBE = -2,
79 RT6_NUD_FAIL_DO_RR = -1,
80 RT6_NUD_SUCCEED = 1
81 };
82
83 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
84 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
85 static unsigned int ip6_mtu(const struct dst_entry *dst);
86 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87 static void ip6_dst_destroy(struct dst_entry *);
88 static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
90 static int ip6_dst_gc(struct dst_ops *ops);
91
92 static int ip6_pkt_discard(struct sk_buff *skb);
93 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static int ip6_pkt_prohibit(struct sk_buff *skb);
95 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96 static void ip6_link_failure(struct sk_buff *skb);
97 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb, u32 mtu);
99 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb);
101 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
102 int strict);
103 static size_t rt6_nlmsg_size(struct fib6_info *rt);
104 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
105 struct fib6_info *rt, struct dst_entry *dst,
106 struct in6_addr *dest, struct in6_addr *src,
107 int iif, int type, u32 portid, u32 seq,
108 unsigned int flags);
109 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
110 const struct in6_addr *daddr,
111 const struct in6_addr *saddr);
112
113 #ifdef CONFIG_IPV6_ROUTE_INFO
114 static struct fib6_info *rt6_add_route_info(struct net *net,
115 const struct in6_addr *prefix, int prefixlen,
116 const struct in6_addr *gwaddr,
117 struct net_device *dev,
118 unsigned int pref);
119 static struct fib6_info *rt6_get_route_info(struct net *net,
120 const struct in6_addr *prefix, int prefixlen,
121 const struct in6_addr *gwaddr,
122 struct net_device *dev);
123 #endif
124
125 struct uncached_list {
126 spinlock_t lock;
127 struct list_head head;
128 };
129
130 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
131
132 void rt6_uncached_list_add(struct rt6_info *rt)
133 {
134 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
135
136 rt->rt6i_uncached_list = ul;
137
138 spin_lock_bh(&ul->lock);
139 list_add_tail(&rt->rt6i_uncached, &ul->head);
140 spin_unlock_bh(&ul->lock);
141 }
142
143 void rt6_uncached_list_del(struct rt6_info *rt)
144 {
145 if (!list_empty(&rt->rt6i_uncached)) {
146 struct uncached_list *ul = rt->rt6i_uncached_list;
147 struct net *net = dev_net(rt->dst.dev);
148
149 spin_lock_bh(&ul->lock);
150 list_del(&rt->rt6i_uncached);
151 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
152 spin_unlock_bh(&ul->lock);
153 }
154 }
155
156 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
157 {
158 struct net_device *loopback_dev = net->loopback_dev;
159 int cpu;
160
161 if (dev == loopback_dev)
162 return;
163
164 for_each_possible_cpu(cpu) {
165 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 struct rt6_info *rt;
167
168 spin_lock_bh(&ul->lock);
169 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
170 struct inet6_dev *rt_idev = rt->rt6i_idev;
171 struct net_device *rt_dev = rt->dst.dev;
172
173 if (rt_idev->dev == dev) {
174 rt->rt6i_idev = in6_dev_get(loopback_dev);
175 in6_dev_put(rt_idev);
176 }
177
178 if (rt_dev == dev) {
179 rt->dst.dev = loopback_dev;
180 dev_hold(rt->dst.dev);
181 dev_put(rt_dev);
182 }
183 }
184 spin_unlock_bh(&ul->lock);
185 }
186 }
187
188 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
189 struct sk_buff *skb,
190 const void *daddr)
191 {
192 if (!ipv6_addr_any(p))
193 return (const void *) p;
194 else if (skb)
195 return &ipv6_hdr(skb)->daddr;
196 return daddr;
197 }
198
199 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
200 struct net_device *dev,
201 struct sk_buff *skb,
202 const void *daddr)
203 {
204 struct neighbour *n;
205
206 daddr = choose_neigh_daddr(gw, skb, daddr);
207 n = __ipv6_neigh_lookup(dev, daddr);
208 if (n)
209 return n;
210
211 n = neigh_create(&nd_tbl, daddr, dev);
212 return IS_ERR(n) ? NULL : n;
213 }
214
215 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
216 struct sk_buff *skb,
217 const void *daddr)
218 {
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
220
221 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
222 }
223
224 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
225 {
226 struct net_device *dev = dst->dev;
227 struct rt6_info *rt = (struct rt6_info *)dst;
228
229 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
230 if (!daddr)
231 return;
232 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
233 return;
234 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
235 return;
236 __ipv6_confirm_neigh(dev, daddr);
237 }
238
239 static struct dst_ops ip6_dst_ops_template = {
240 .family = AF_INET6,
241 .gc = ip6_dst_gc,
242 .gc_thresh = 1024,
243 .check = ip6_dst_check,
244 .default_advmss = ip6_default_advmss,
245 .mtu = ip6_mtu,
246 .cow_metrics = dst_cow_metrics_generic,
247 .destroy = ip6_dst_destroy,
248 .ifdown = ip6_dst_ifdown,
249 .negative_advice = ip6_negative_advice,
250 .link_failure = ip6_link_failure,
251 .update_pmtu = ip6_rt_update_pmtu,
252 .redirect = rt6_do_redirect,
253 .local_out = __ip6_local_out,
254 .neigh_lookup = ip6_dst_neigh_lookup,
255 .confirm_neigh = ip6_confirm_neigh,
256 };
257
258 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
259 {
260 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
261
262 return mtu ? : dst->dev->mtu;
263 }
264
265 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
266 struct sk_buff *skb, u32 mtu)
267 {
268 }
269
270 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
271 struct sk_buff *skb)
272 {
273 }
274
275 static struct dst_ops ip6_dst_blackhole_ops = {
276 .family = AF_INET6,
277 .destroy = ip6_dst_destroy,
278 .check = ip6_dst_check,
279 .mtu = ip6_blackhole_mtu,
280 .default_advmss = ip6_default_advmss,
281 .update_pmtu = ip6_rt_blackhole_update_pmtu,
282 .redirect = ip6_rt_blackhole_redirect,
283 .cow_metrics = dst_cow_metrics_generic,
284 .neigh_lookup = ip6_dst_neigh_lookup,
285 };
286
287 static const u32 ip6_template_metrics[RTAX_MAX] = {
288 [RTAX_HOPLIMIT - 1] = 0,
289 };
290
291 static const struct fib6_info fib6_null_entry_template = {
292 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
293 .fib6_protocol = RTPROT_KERNEL,
294 .fib6_metric = ~(u32)0,
295 .fib6_ref = REFCOUNT_INIT(1),
296 .fib6_type = RTN_UNREACHABLE,
297 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
298 };
299
300 static const struct rt6_info ip6_null_entry_template = {
301 .dst = {
302 .__refcnt = ATOMIC_INIT(1),
303 .__use = 1,
304 .obsolete = DST_OBSOLETE_FORCE_CHK,
305 .error = -ENETUNREACH,
306 .input = ip6_pkt_discard,
307 .output = ip6_pkt_discard_out,
308 },
309 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
310 };
311
312 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
313
314 static const struct rt6_info ip6_prohibit_entry_template = {
315 .dst = {
316 .__refcnt = ATOMIC_INIT(1),
317 .__use = 1,
318 .obsolete = DST_OBSOLETE_FORCE_CHK,
319 .error = -EACCES,
320 .input = ip6_pkt_prohibit,
321 .output = ip6_pkt_prohibit_out,
322 },
323 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
324 };
325
326 static const struct rt6_info ip6_blk_hole_entry_template = {
327 .dst = {
328 .__refcnt = ATOMIC_INIT(1),
329 .__use = 1,
330 .obsolete = DST_OBSOLETE_FORCE_CHK,
331 .error = -EINVAL,
332 .input = dst_discard,
333 .output = dst_discard_out,
334 },
335 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
336 };
337
338 #endif
339
340 static void rt6_info_init(struct rt6_info *rt)
341 {
342 struct dst_entry *dst = &rt->dst;
343
344 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
345 INIT_LIST_HEAD(&rt->rt6i_uncached);
346 }
347
348 /* allocate dst with ip6_dst_ops */
349 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
350 int flags)
351 {
352 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
353 1, DST_OBSOLETE_FORCE_CHK, flags);
354
355 if (rt) {
356 rt6_info_init(rt);
357 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
358 }
359
360 return rt;
361 }
362 EXPORT_SYMBOL(ip6_dst_alloc);
363
364 static void ip6_dst_destroy(struct dst_entry *dst)
365 {
366 struct rt6_info *rt = (struct rt6_info *)dst;
367 struct fib6_info *from;
368 struct inet6_dev *idev;
369
370 ip_dst_metrics_put(dst);
371 rt6_uncached_list_del(rt);
372
373 idev = rt->rt6i_idev;
374 if (idev) {
375 rt->rt6i_idev = NULL;
376 in6_dev_put(idev);
377 }
378
379 from = xchg((__force struct fib6_info **)&rt->from, NULL);
380 fib6_info_release(from);
381 }
382
383 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
384 int how)
385 {
386 struct rt6_info *rt = (struct rt6_info *)dst;
387 struct inet6_dev *idev = rt->rt6i_idev;
388 struct net_device *loopback_dev =
389 dev_net(dev)->loopback_dev;
390
391 if (idev && idev->dev != loopback_dev) {
392 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
393 if (loopback_idev) {
394 rt->rt6i_idev = loopback_idev;
395 in6_dev_put(idev);
396 }
397 }
398 }
399
400 static bool __rt6_check_expired(const struct rt6_info *rt)
401 {
402 if (rt->rt6i_flags & RTF_EXPIRES)
403 return time_after(jiffies, rt->dst.expires);
404 else
405 return false;
406 }
407
408 static bool rt6_check_expired(const struct rt6_info *rt)
409 {
410 struct fib6_info *from;
411
412 from = rcu_dereference(rt->from);
413
414 if (rt->rt6i_flags & RTF_EXPIRES) {
415 if (time_after(jiffies, rt->dst.expires))
416 return true;
417 } else if (from) {
418 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
419 fib6_check_expired(from);
420 }
421 return false;
422 }
423
424 void fib6_select_path(const struct net *net, struct fib6_result *res,
425 struct flowi6 *fl6, int oif, bool have_oif_match,
426 const struct sk_buff *skb, int strict)
427 {
428 struct fib6_info *sibling, *next_sibling;
429 struct fib6_info *match = res->f6i;
430
431 if (!match->fib6_nsiblings || have_oif_match)
432 goto out;
433
434 /* We might have already computed the hash for ICMPv6 errors. In such
435 * case it will always be non-zero. Otherwise now is the time to do it.
436 */
437 if (!fl6->mp_hash)
438 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
439
440 if (fl6->mp_hash <= atomic_read(&match->fib6_nh.fib_nh_upper_bound))
441 goto out;
442
443 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
444 fib6_siblings) {
445 const struct fib6_nh *nh = &sibling->fib6_nh;
446 int nh_upper_bound;
447
448 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
449 if (fl6->mp_hash > nh_upper_bound)
450 continue;
451 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
452 break;
453 match = sibling;
454 break;
455 }
456
457 out:
458 res->f6i = match;
459 res->nh = &match->fib6_nh;
460 }
461
462 /*
463 * Route lookup. rcu_read_lock() should be held.
464 */
465
466 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
467 const struct in6_addr *saddr, int oif, int flags)
468 {
469 const struct net_device *dev;
470
471 if (nh->fib_nh_flags & RTNH_F_DEAD)
472 return false;
473
474 dev = nh->fib_nh_dev;
475 if (oif) {
476 if (dev->ifindex == oif)
477 return true;
478 } else {
479 if (ipv6_chk_addr(net, saddr, dev,
480 flags & RT6_LOOKUP_F_IFACE))
481 return true;
482 }
483
484 return false;
485 }
486
487 static void rt6_device_match(struct net *net, struct fib6_result *res,
488 const struct in6_addr *saddr, int oif, int flags)
489 {
490 struct fib6_info *f6i = res->f6i;
491 struct fib6_info *spf6i;
492 struct fib6_nh *nh;
493
494 if (!oif && ipv6_addr_any(saddr)) {
495 nh = &f6i->fib6_nh;
496 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
497 goto out;
498 }
499
500 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
501 nh = &spf6i->fib6_nh;
502 if (__rt6_device_match(net, nh, saddr, oif, flags)) {
503 res->f6i = spf6i;
504 goto out;
505 }
506 }
507
508 if (oif && flags & RT6_LOOKUP_F_IFACE) {
509 res->f6i = net->ipv6.fib6_null_entry;
510 nh = &res->f6i->fib6_nh;
511 goto out;
512 }
513
514 nh = &f6i->fib6_nh;
515 if (nh->fib_nh_flags & RTNH_F_DEAD) {
516 res->f6i = net->ipv6.fib6_null_entry;
517 nh = &res->f6i->fib6_nh;
518 }
519 out:
520 res->nh = nh;
521 res->fib6_type = res->f6i->fib6_type;
522 res->fib6_flags = res->f6i->fib6_flags;
523 }
524
525 #ifdef CONFIG_IPV6_ROUTER_PREF
526 struct __rt6_probe_work {
527 struct work_struct work;
528 struct in6_addr target;
529 struct net_device *dev;
530 };
531
532 static void rt6_probe_deferred(struct work_struct *w)
533 {
534 struct in6_addr mcaddr;
535 struct __rt6_probe_work *work =
536 container_of(w, struct __rt6_probe_work, work);
537
538 addrconf_addr_solict_mult(&work->target, &mcaddr);
539 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
540 dev_put(work->dev);
541 kfree(work);
542 }
543
544 static void rt6_probe(struct fib6_nh *fib6_nh)
545 {
546 struct __rt6_probe_work *work = NULL;
547 const struct in6_addr *nh_gw;
548 struct neighbour *neigh;
549 struct net_device *dev;
550 struct inet6_dev *idev;
551
552 /*
553 * Okay, this does not seem to be appropriate
554 * for now, however, we need to check if it
555 * is really so; aka Router Reachability Probing.
556 *
557 * Router Reachability Probe MUST be rate-limited
558 * to no more than one per minute.
559 */
560 if (fib6_nh->fib_nh_gw_family)
561 return;
562
563 nh_gw = &fib6_nh->fib_nh_gw6;
564 dev = fib6_nh->fib_nh_dev;
565 rcu_read_lock_bh();
566 idev = __in6_dev_get(dev);
567 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
568 if (neigh) {
569 if (neigh->nud_state & NUD_VALID)
570 goto out;
571
572 write_lock(&neigh->lock);
573 if (!(neigh->nud_state & NUD_VALID) &&
574 time_after(jiffies,
575 neigh->updated + idev->cnf.rtr_probe_interval)) {
576 work = kmalloc(sizeof(*work), GFP_ATOMIC);
577 if (work)
578 __neigh_set_probe_once(neigh);
579 }
580 write_unlock(&neigh->lock);
581 } else if (time_after(jiffies, fib6_nh->last_probe +
582 idev->cnf.rtr_probe_interval)) {
583 work = kmalloc(sizeof(*work), GFP_ATOMIC);
584 }
585
586 if (work) {
587 fib6_nh->last_probe = jiffies;
588 INIT_WORK(&work->work, rt6_probe_deferred);
589 work->target = *nh_gw;
590 dev_hold(dev);
591 work->dev = dev;
592 schedule_work(&work->work);
593 }
594
595 out:
596 rcu_read_unlock_bh();
597 }
598 #else
599 static inline void rt6_probe(struct fib6_nh *fib6_nh)
600 {
601 }
602 #endif
603
604 /*
605 * Default Router Selection (RFC 2461 6.3.6)
606 */
607 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
608 {
609 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
610 struct neighbour *neigh;
611
612 rcu_read_lock_bh();
613 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
614 &fib6_nh->fib_nh_gw6);
615 if (neigh) {
616 read_lock(&neigh->lock);
617 if (neigh->nud_state & NUD_VALID)
618 ret = RT6_NUD_SUCCEED;
619 #ifdef CONFIG_IPV6_ROUTER_PREF
620 else if (!(neigh->nud_state & NUD_FAILED))
621 ret = RT6_NUD_SUCCEED;
622 else
623 ret = RT6_NUD_FAIL_PROBE;
624 #endif
625 read_unlock(&neigh->lock);
626 } else {
627 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
628 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
629 }
630 rcu_read_unlock_bh();
631
632 return ret;
633 }
634
635 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
636 int strict)
637 {
638 int m = 0;
639
640 if (!oif || nh->fib_nh_dev->ifindex == oif)
641 m = 2;
642
643 if (!m && (strict & RT6_LOOKUP_F_IFACE))
644 return RT6_NUD_FAIL_HARD;
645 #ifdef CONFIG_IPV6_ROUTER_PREF
646 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
647 #endif
648 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
649 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
650 int n = rt6_check_neigh(nh);
651 if (n < 0)
652 return n;
653 }
654 return m;
655 }
656
657 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
658 int oif, int strict, int *mpri, bool *do_rr)
659 {
660 bool match_do_rr = false;
661 bool rc = false;
662 int m;
663
664 if (nh->fib_nh_flags & RTNH_F_DEAD)
665 goto out;
666
667 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
668 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
669 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
670 goto out;
671
672 m = rt6_score_route(nh, fib6_flags, oif, strict);
673 if (m == RT6_NUD_FAIL_DO_RR) {
674 match_do_rr = true;
675 m = 0; /* lowest valid score */
676 } else if (m == RT6_NUD_FAIL_HARD) {
677 goto out;
678 }
679
680 if (strict & RT6_LOOKUP_F_REACHABLE)
681 rt6_probe(nh);
682
683 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
684 if (m > *mpri) {
685 *do_rr = match_do_rr;
686 *mpri = m;
687 rc = true;
688 }
689 out:
690 return rc;
691 }
692
693 static void __find_rr_leaf(struct fib6_info *f6i_start,
694 struct fib6_info *nomatch, u32 metric,
695 struct fib6_result *res, struct fib6_info **cont,
696 int oif, int strict, bool *do_rr, int *mpri)
697 {
698 struct fib6_info *f6i;
699
700 for (f6i = f6i_start;
701 f6i && f6i != nomatch;
702 f6i = rcu_dereference(f6i->fib6_next)) {
703 struct fib6_nh *nh;
704
705 if (cont && f6i->fib6_metric != metric) {
706 *cont = f6i;
707 return;
708 }
709
710 if (fib6_check_expired(f6i))
711 continue;
712
713 nh = &f6i->fib6_nh;
714 if (find_match(nh, f6i->fib6_flags, oif, strict, mpri, do_rr)) {
715 res->f6i = f6i;
716 res->nh = nh;
717 res->fib6_flags = f6i->fib6_flags;
718 res->fib6_type = f6i->fib6_type;
719 }
720 }
721 }
722
723 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
724 struct fib6_info *rr_head, int oif, int strict,
725 bool *do_rr, struct fib6_result *res)
726 {
727 u32 metric = rr_head->fib6_metric;
728 struct fib6_info *cont = NULL;
729 int mpri = -1;
730
731 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
732 oif, strict, do_rr, &mpri);
733
734 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
735 oif, strict, do_rr, &mpri);
736
737 if (res->f6i || !cont)
738 return;
739
740 __find_rr_leaf(cont, NULL, metric, res, NULL,
741 oif, strict, do_rr, &mpri);
742 }
743
744 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
745 struct fib6_result *res, int strict)
746 {
747 struct fib6_info *leaf = rcu_dereference(fn->leaf);
748 struct fib6_info *rt0;
749 bool do_rr = false;
750 int key_plen;
751
752 /* make sure this function or its helpers sets f6i */
753 res->f6i = NULL;
754
755 if (!leaf || leaf == net->ipv6.fib6_null_entry)
756 goto out;
757
758 rt0 = rcu_dereference(fn->rr_ptr);
759 if (!rt0)
760 rt0 = leaf;
761
762 /* Double check to make sure fn is not an intermediate node
763 * and fn->leaf does not points to its child's leaf
764 * (This might happen if all routes under fn are deleted from
765 * the tree and fib6_repair_tree() is called on the node.)
766 */
767 key_plen = rt0->fib6_dst.plen;
768 #ifdef CONFIG_IPV6_SUBTREES
769 if (rt0->fib6_src.plen)
770 key_plen = rt0->fib6_src.plen;
771 #endif
772 if (fn->fn_bit != key_plen)
773 goto out;
774
775 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
776 if (do_rr) {
777 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
778
779 /* no entries matched; do round-robin */
780 if (!next || next->fib6_metric != rt0->fib6_metric)
781 next = leaf;
782
783 if (next != rt0) {
784 spin_lock_bh(&leaf->fib6_table->tb6_lock);
785 /* make sure next is not being deleted from the tree */
786 if (next->fib6_node)
787 rcu_assign_pointer(fn->rr_ptr, next);
788 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
789 }
790 }
791
792 out:
793 if (!res->f6i) {
794 res->f6i = net->ipv6.fib6_null_entry;
795 res->nh = &res->f6i->fib6_nh;
796 res->fib6_flags = res->f6i->fib6_flags;
797 res->fib6_type = res->f6i->fib6_type;
798 }
799 }
800
801 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
802 {
803 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
804 res->nh->fib_nh_gw_family;
805 }
806
807 #ifdef CONFIG_IPV6_ROUTE_INFO
808 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
809 const struct in6_addr *gwaddr)
810 {
811 struct net *net = dev_net(dev);
812 struct route_info *rinfo = (struct route_info *) opt;
813 struct in6_addr prefix_buf, *prefix;
814 unsigned int pref;
815 unsigned long lifetime;
816 struct fib6_info *rt;
817
818 if (len < sizeof(struct route_info)) {
819 return -EINVAL;
820 }
821
822 /* Sanity check for prefix_len and length */
823 if (rinfo->length > 3) {
824 return -EINVAL;
825 } else if (rinfo->prefix_len > 128) {
826 return -EINVAL;
827 } else if (rinfo->prefix_len > 64) {
828 if (rinfo->length < 2) {
829 return -EINVAL;
830 }
831 } else if (rinfo->prefix_len > 0) {
832 if (rinfo->length < 1) {
833 return -EINVAL;
834 }
835 }
836
837 pref = rinfo->route_pref;
838 if (pref == ICMPV6_ROUTER_PREF_INVALID)
839 return -EINVAL;
840
841 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
842
843 if (rinfo->length == 3)
844 prefix = (struct in6_addr *)rinfo->prefix;
845 else {
846 /* this function is safe */
847 ipv6_addr_prefix(&prefix_buf,
848 (struct in6_addr *)rinfo->prefix,
849 rinfo->prefix_len);
850 prefix = &prefix_buf;
851 }
852
853 if (rinfo->prefix_len == 0)
854 rt = rt6_get_dflt_router(net, gwaddr, dev);
855 else
856 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
857 gwaddr, dev);
858
859 if (rt && !lifetime) {
860 ip6_del_rt(net, rt);
861 rt = NULL;
862 }
863
864 if (!rt && lifetime)
865 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
866 dev, pref);
867 else if (rt)
868 rt->fib6_flags = RTF_ROUTEINFO |
869 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
870
871 if (rt) {
872 if (!addrconf_finite_timeout(lifetime))
873 fib6_clean_expires(rt);
874 else
875 fib6_set_expires(rt, jiffies + HZ * lifetime);
876
877 fib6_info_release(rt);
878 }
879 return 0;
880 }
881 #endif
882
883 /*
884 * Misc support functions
885 */
886
887 /* called with rcu_lock held */
888 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
889 {
890 struct net_device *dev = res->nh->fib_nh_dev;
891
892 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
893 /* for copies of local routes, dst->dev needs to be the
894 * device if it is a master device, the master device if
895 * device is enslaved, and the loopback as the default
896 */
897 if (netif_is_l3_slave(dev) &&
898 !rt6_need_strict(&res->f6i->fib6_dst.addr))
899 dev = l3mdev_master_dev_rcu(dev);
900 else if (!netif_is_l3_master(dev))
901 dev = dev_net(dev)->loopback_dev;
902 /* last case is netif_is_l3_master(dev) is true in which
903 * case we want dev returned to be dev
904 */
905 }
906
907 return dev;
908 }
909
910 static const int fib6_prop[RTN_MAX + 1] = {
911 [RTN_UNSPEC] = 0,
912 [RTN_UNICAST] = 0,
913 [RTN_LOCAL] = 0,
914 [RTN_BROADCAST] = 0,
915 [RTN_ANYCAST] = 0,
916 [RTN_MULTICAST] = 0,
917 [RTN_BLACKHOLE] = -EINVAL,
918 [RTN_UNREACHABLE] = -EHOSTUNREACH,
919 [RTN_PROHIBIT] = -EACCES,
920 [RTN_THROW] = -EAGAIN,
921 [RTN_NAT] = -EINVAL,
922 [RTN_XRESOLVE] = -EINVAL,
923 };
924
925 static int ip6_rt_type_to_error(u8 fib6_type)
926 {
927 return fib6_prop[fib6_type];
928 }
929
930 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
931 {
932 unsigned short flags = 0;
933
934 if (rt->dst_nocount)
935 flags |= DST_NOCOUNT;
936 if (rt->dst_nopolicy)
937 flags |= DST_NOPOLICY;
938 if (rt->dst_host)
939 flags |= DST_HOST;
940
941 return flags;
942 }
943
944 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
945 {
946 rt->dst.error = ip6_rt_type_to_error(fib6_type);
947
948 switch (fib6_type) {
949 case RTN_BLACKHOLE:
950 rt->dst.output = dst_discard_out;
951 rt->dst.input = dst_discard;
952 break;
953 case RTN_PROHIBIT:
954 rt->dst.output = ip6_pkt_prohibit_out;
955 rt->dst.input = ip6_pkt_prohibit;
956 break;
957 case RTN_THROW:
958 case RTN_UNREACHABLE:
959 default:
960 rt->dst.output = ip6_pkt_discard_out;
961 rt->dst.input = ip6_pkt_discard;
962 break;
963 }
964 }
965
966 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
967 {
968 struct fib6_info *f6i = res->f6i;
969
970 if (res->fib6_flags & RTF_REJECT) {
971 ip6_rt_init_dst_reject(rt, res->fib6_type);
972 return;
973 }
974
975 rt->dst.error = 0;
976 rt->dst.output = ip6_output;
977
978 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
979 rt->dst.input = ip6_input;
980 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
981 rt->dst.input = ip6_mc_input;
982 } else {
983 rt->dst.input = ip6_forward;
984 }
985
986 if (res->nh->fib_nh_lws) {
987 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
988 lwtunnel_set_redirect(&rt->dst);
989 }
990
991 rt->dst.lastuse = jiffies;
992 }
993
994 /* Caller must already hold reference to @from */
995 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
996 {
997 rt->rt6i_flags &= ~RTF_EXPIRES;
998 rcu_assign_pointer(rt->from, from);
999 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1000 }
1001
1002 /* Caller must already hold reference to f6i in result */
1003 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1004 {
1005 const struct fib6_nh *nh = res->nh;
1006 const struct net_device *dev = nh->fib_nh_dev;
1007 struct fib6_info *f6i = res->f6i;
1008
1009 ip6_rt_init_dst(rt, res);
1010
1011 rt->rt6i_dst = f6i->fib6_dst;
1012 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1013 rt->rt6i_flags = res->fib6_flags;
1014 if (nh->fib_nh_gw_family) {
1015 rt->rt6i_gateway = nh->fib_nh_gw6;
1016 rt->rt6i_flags |= RTF_GATEWAY;
1017 }
1018 rt6_set_from(rt, f6i);
1019 #ifdef CONFIG_IPV6_SUBTREES
1020 rt->rt6i_src = f6i->fib6_src;
1021 #endif
1022 }
1023
1024 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1025 struct in6_addr *saddr)
1026 {
1027 struct fib6_node *pn, *sn;
1028 while (1) {
1029 if (fn->fn_flags & RTN_TL_ROOT)
1030 return NULL;
1031 pn = rcu_dereference(fn->parent);
1032 sn = FIB6_SUBTREE(pn);
1033 if (sn && sn != fn)
1034 fn = fib6_node_lookup(sn, NULL, saddr);
1035 else
1036 fn = pn;
1037 if (fn->fn_flags & RTN_RTINFO)
1038 return fn;
1039 }
1040 }
1041
1042 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1043 {
1044 struct rt6_info *rt = *prt;
1045
1046 if (dst_hold_safe(&rt->dst))
1047 return true;
1048 if (net) {
1049 rt = net->ipv6.ip6_null_entry;
1050 dst_hold(&rt->dst);
1051 } else {
1052 rt = NULL;
1053 }
1054 *prt = rt;
1055 return false;
1056 }
1057
1058 /* called with rcu_lock held */
1059 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1060 {
1061 struct net_device *dev = res->nh->fib_nh_dev;
1062 struct fib6_info *f6i = res->f6i;
1063 unsigned short flags;
1064 struct rt6_info *nrt;
1065
1066 if (!fib6_info_hold_safe(f6i))
1067 goto fallback;
1068
1069 flags = fib6_info_dst_flags(f6i);
1070 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1071 if (!nrt) {
1072 fib6_info_release(f6i);
1073 goto fallback;
1074 }
1075
1076 ip6_rt_copy_init(nrt, res);
1077 return nrt;
1078
1079 fallback:
1080 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1081 dst_hold(&nrt->dst);
1082 return nrt;
1083 }
1084
1085 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1086 struct fib6_table *table,
1087 struct flowi6 *fl6,
1088 const struct sk_buff *skb,
1089 int flags)
1090 {
1091 struct fib6_result res = {};
1092 struct fib6_node *fn;
1093 struct rt6_info *rt;
1094
1095 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1096 flags &= ~RT6_LOOKUP_F_IFACE;
1097
1098 rcu_read_lock();
1099 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1100 restart:
1101 res.f6i = rcu_dereference(fn->leaf);
1102 if (!res.f6i)
1103 res.f6i = net->ipv6.fib6_null_entry;
1104 else
1105 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1106 flags);
1107
1108 if (res.f6i == net->ipv6.fib6_null_entry) {
1109 fn = fib6_backtrack(fn, &fl6->saddr);
1110 if (fn)
1111 goto restart;
1112
1113 rt = net->ipv6.ip6_null_entry;
1114 dst_hold(&rt->dst);
1115 goto out;
1116 }
1117
1118 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1119 fl6->flowi6_oif != 0, skb, flags);
1120
1121 /* Search through exception table */
1122 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1123 if (rt) {
1124 if (ip6_hold_safe(net, &rt))
1125 dst_use_noref(&rt->dst, jiffies);
1126 } else {
1127 rt = ip6_create_rt_rcu(&res);
1128 }
1129
1130 out:
1131 trace_fib6_table_lookup(net, &res, table, fl6);
1132
1133 rcu_read_unlock();
1134
1135 return rt;
1136 }
1137
1138 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1139 const struct sk_buff *skb, int flags)
1140 {
1141 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1142 }
1143 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1144
1145 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1146 const struct in6_addr *saddr, int oif,
1147 const struct sk_buff *skb, int strict)
1148 {
1149 struct flowi6 fl6 = {
1150 .flowi6_oif = oif,
1151 .daddr = *daddr,
1152 };
1153 struct dst_entry *dst;
1154 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1155
1156 if (saddr) {
1157 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1158 flags |= RT6_LOOKUP_F_HAS_SADDR;
1159 }
1160
1161 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1162 if (dst->error == 0)
1163 return (struct rt6_info *) dst;
1164
1165 dst_release(dst);
1166
1167 return NULL;
1168 }
1169 EXPORT_SYMBOL(rt6_lookup);
1170
1171 /* ip6_ins_rt is called with FREE table->tb6_lock.
1172 * It takes new route entry, the addition fails by any reason the
1173 * route is released.
1174 * Caller must hold dst before calling it.
1175 */
1176
1177 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1178 struct netlink_ext_ack *extack)
1179 {
1180 int err;
1181 struct fib6_table *table;
1182
1183 table = rt->fib6_table;
1184 spin_lock_bh(&table->tb6_lock);
1185 err = fib6_add(&table->tb6_root, rt, info, extack);
1186 spin_unlock_bh(&table->tb6_lock);
1187
1188 return err;
1189 }
1190
1191 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1192 {
1193 struct nl_info info = { .nl_net = net, };
1194
1195 return __ip6_ins_rt(rt, &info, NULL);
1196 }
1197
1198 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1199 const struct in6_addr *daddr,
1200 const struct in6_addr *saddr)
1201 {
1202 struct fib6_info *f6i = res->f6i;
1203 struct net_device *dev;
1204 struct rt6_info *rt;
1205
1206 /*
1207 * Clone the route.
1208 */
1209
1210 if (!fib6_info_hold_safe(f6i))
1211 return NULL;
1212
1213 dev = ip6_rt_get_dev_rcu(res);
1214 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1215 if (!rt) {
1216 fib6_info_release(f6i);
1217 return NULL;
1218 }
1219
1220 ip6_rt_copy_init(rt, res);
1221 rt->rt6i_flags |= RTF_CACHE;
1222 rt->dst.flags |= DST_HOST;
1223 rt->rt6i_dst.addr = *daddr;
1224 rt->rt6i_dst.plen = 128;
1225
1226 if (!rt6_is_gw_or_nonexthop(res)) {
1227 if (f6i->fib6_dst.plen != 128 &&
1228 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1229 rt->rt6i_flags |= RTF_ANYCAST;
1230 #ifdef CONFIG_IPV6_SUBTREES
1231 if (rt->rt6i_src.plen && saddr) {
1232 rt->rt6i_src.addr = *saddr;
1233 rt->rt6i_src.plen = 128;
1234 }
1235 #endif
1236 }
1237
1238 return rt;
1239 }
1240
1241 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1242 {
1243 struct fib6_info *f6i = res->f6i;
1244 unsigned short flags = fib6_info_dst_flags(f6i);
1245 struct net_device *dev;
1246 struct rt6_info *pcpu_rt;
1247
1248 if (!fib6_info_hold_safe(f6i))
1249 return NULL;
1250
1251 rcu_read_lock();
1252 dev = ip6_rt_get_dev_rcu(res);
1253 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1254 rcu_read_unlock();
1255 if (!pcpu_rt) {
1256 fib6_info_release(f6i);
1257 return NULL;
1258 }
1259 ip6_rt_copy_init(pcpu_rt, res);
1260 pcpu_rt->rt6i_flags |= RTF_PCPU;
1261 return pcpu_rt;
1262 }
1263
1264 /* It should be called with rcu_read_lock() acquired */
1265 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1266 {
1267 struct rt6_info *pcpu_rt, **p;
1268
1269 p = this_cpu_ptr(res->f6i->rt6i_pcpu);
1270 pcpu_rt = *p;
1271
1272 if (pcpu_rt)
1273 ip6_hold_safe(NULL, &pcpu_rt);
1274
1275 return pcpu_rt;
1276 }
1277
1278 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1279 const struct fib6_result *res)
1280 {
1281 struct rt6_info *pcpu_rt, *prev, **p;
1282
1283 pcpu_rt = ip6_rt_pcpu_alloc(res);
1284 if (!pcpu_rt) {
1285 dst_hold(&net->ipv6.ip6_null_entry->dst);
1286 return net->ipv6.ip6_null_entry;
1287 }
1288
1289 dst_hold(&pcpu_rt->dst);
1290 p = this_cpu_ptr(res->f6i->rt6i_pcpu);
1291 prev = cmpxchg(p, NULL, pcpu_rt);
1292 BUG_ON(prev);
1293
1294 if (res->f6i->fib6_destroying) {
1295 struct fib6_info *from;
1296
1297 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1298 fib6_info_release(from);
1299 }
1300
1301 return pcpu_rt;
1302 }
1303
1304 /* exception hash table implementation
1305 */
1306 static DEFINE_SPINLOCK(rt6_exception_lock);
1307
1308 /* Remove rt6_ex from hash table and free the memory
1309 * Caller must hold rt6_exception_lock
1310 */
1311 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1312 struct rt6_exception *rt6_ex)
1313 {
1314 struct fib6_info *from;
1315 struct net *net;
1316
1317 if (!bucket || !rt6_ex)
1318 return;
1319
1320 net = dev_net(rt6_ex->rt6i->dst.dev);
1321 net->ipv6.rt6_stats->fib_rt_cache--;
1322
1323 /* purge completely the exception to allow releasing the held resources:
1324 * some [sk] cache may keep the dst around for unlimited time
1325 */
1326 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1327 fib6_info_release(from);
1328 dst_dev_put(&rt6_ex->rt6i->dst);
1329
1330 hlist_del_rcu(&rt6_ex->hlist);
1331 dst_release(&rt6_ex->rt6i->dst);
1332 kfree_rcu(rt6_ex, rcu);
1333 WARN_ON_ONCE(!bucket->depth);
1334 bucket->depth--;
1335 }
1336
1337 /* Remove oldest rt6_ex in bucket and free the memory
1338 * Caller must hold rt6_exception_lock
1339 */
1340 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1341 {
1342 struct rt6_exception *rt6_ex, *oldest = NULL;
1343
1344 if (!bucket)
1345 return;
1346
1347 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1348 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1349 oldest = rt6_ex;
1350 }
1351 rt6_remove_exception(bucket, oldest);
1352 }
1353
1354 static u32 rt6_exception_hash(const struct in6_addr *dst,
1355 const struct in6_addr *src)
1356 {
1357 static u32 seed __read_mostly;
1358 u32 val;
1359
1360 net_get_random_once(&seed, sizeof(seed));
1361 val = jhash(dst, sizeof(*dst), seed);
1362
1363 #ifdef CONFIG_IPV6_SUBTREES
1364 if (src)
1365 val = jhash(src, sizeof(*src), val);
1366 #endif
1367 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1368 }
1369
1370 /* Helper function to find the cached rt in the hash table
1371 * and update bucket pointer to point to the bucket for this
1372 * (daddr, saddr) pair
1373 * Caller must hold rt6_exception_lock
1374 */
1375 static struct rt6_exception *
1376 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1377 const struct in6_addr *daddr,
1378 const struct in6_addr *saddr)
1379 {
1380 struct rt6_exception *rt6_ex;
1381 u32 hval;
1382
1383 if (!(*bucket) || !daddr)
1384 return NULL;
1385
1386 hval = rt6_exception_hash(daddr, saddr);
1387 *bucket += hval;
1388
1389 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1390 struct rt6_info *rt6 = rt6_ex->rt6i;
1391 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1392
1393 #ifdef CONFIG_IPV6_SUBTREES
1394 if (matched && saddr)
1395 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1396 #endif
1397 if (matched)
1398 return rt6_ex;
1399 }
1400 return NULL;
1401 }
1402
1403 /* Helper function to find the cached rt in the hash table
1404 * and update bucket pointer to point to the bucket for this
1405 * (daddr, saddr) pair
1406 * Caller must hold rcu_read_lock()
1407 */
1408 static struct rt6_exception *
1409 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1410 const struct in6_addr *daddr,
1411 const struct in6_addr *saddr)
1412 {
1413 struct rt6_exception *rt6_ex;
1414 u32 hval;
1415
1416 WARN_ON_ONCE(!rcu_read_lock_held());
1417
1418 if (!(*bucket) || !daddr)
1419 return NULL;
1420
1421 hval = rt6_exception_hash(daddr, saddr);
1422 *bucket += hval;
1423
1424 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1425 struct rt6_info *rt6 = rt6_ex->rt6i;
1426 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1427
1428 #ifdef CONFIG_IPV6_SUBTREES
1429 if (matched && saddr)
1430 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1431 #endif
1432 if (matched)
1433 return rt6_ex;
1434 }
1435 return NULL;
1436 }
1437
1438 static unsigned int fib6_mtu(const struct fib6_result *res)
1439 {
1440 const struct fib6_nh *nh = res->nh;
1441 unsigned int mtu;
1442
1443 if (res->f6i->fib6_pmtu) {
1444 mtu = res->f6i->fib6_pmtu;
1445 } else {
1446 struct net_device *dev = nh->fib_nh_dev;
1447 struct inet6_dev *idev;
1448
1449 rcu_read_lock();
1450 idev = __in6_dev_get(dev);
1451 mtu = idev->cnf.mtu6;
1452 rcu_read_unlock();
1453 }
1454
1455 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1456
1457 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1458 }
1459
1460 static int rt6_insert_exception(struct rt6_info *nrt,
1461 const struct fib6_result *res)
1462 {
1463 struct net *net = dev_net(nrt->dst.dev);
1464 struct rt6_exception_bucket *bucket;
1465 struct in6_addr *src_key = NULL;
1466 struct rt6_exception *rt6_ex;
1467 struct fib6_info *f6i = res->f6i;
1468 int err = 0;
1469
1470 spin_lock_bh(&rt6_exception_lock);
1471
1472 if (f6i->exception_bucket_flushed) {
1473 err = -EINVAL;
1474 goto out;
1475 }
1476
1477 bucket = rcu_dereference_protected(f6i->rt6i_exception_bucket,
1478 lockdep_is_held(&rt6_exception_lock));
1479 if (!bucket) {
1480 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1481 GFP_ATOMIC);
1482 if (!bucket) {
1483 err = -ENOMEM;
1484 goto out;
1485 }
1486 rcu_assign_pointer(f6i->rt6i_exception_bucket, bucket);
1487 }
1488
1489 #ifdef CONFIG_IPV6_SUBTREES
1490 /* fib6_src.plen != 0 indicates f6i is in subtree
1491 * and exception table is indexed by a hash of
1492 * both fib6_dst and fib6_src.
1493 * Otherwise, the exception table is indexed by
1494 * a hash of only fib6_dst.
1495 */
1496 if (f6i->fib6_src.plen)
1497 src_key = &nrt->rt6i_src.addr;
1498 #endif
1499 /* rt6_mtu_change() might lower mtu on f6i.
1500 * Only insert this exception route if its mtu
1501 * is less than f6i's mtu value.
1502 */
1503 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1504 err = -EINVAL;
1505 goto out;
1506 }
1507
1508 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1509 src_key);
1510 if (rt6_ex)
1511 rt6_remove_exception(bucket, rt6_ex);
1512
1513 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1514 if (!rt6_ex) {
1515 err = -ENOMEM;
1516 goto out;
1517 }
1518 rt6_ex->rt6i = nrt;
1519 rt6_ex->stamp = jiffies;
1520 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1521 bucket->depth++;
1522 net->ipv6.rt6_stats->fib_rt_cache++;
1523
1524 if (bucket->depth > FIB6_MAX_DEPTH)
1525 rt6_exception_remove_oldest(bucket);
1526
1527 out:
1528 spin_unlock_bh(&rt6_exception_lock);
1529
1530 /* Update fn->fn_sernum to invalidate all cached dst */
1531 if (!err) {
1532 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1533 fib6_update_sernum(net, f6i);
1534 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1535 fib6_force_start_gc(net);
1536 }
1537
1538 return err;
1539 }
1540
1541 void rt6_flush_exceptions(struct fib6_info *rt)
1542 {
1543 struct rt6_exception_bucket *bucket;
1544 struct rt6_exception *rt6_ex;
1545 struct hlist_node *tmp;
1546 int i;
1547
1548 spin_lock_bh(&rt6_exception_lock);
1549 /* Prevent rt6_insert_exception() to recreate the bucket list */
1550 rt->exception_bucket_flushed = 1;
1551
1552 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1553 lockdep_is_held(&rt6_exception_lock));
1554 if (!bucket)
1555 goto out;
1556
1557 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1558 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1559 rt6_remove_exception(bucket, rt6_ex);
1560 WARN_ON_ONCE(bucket->depth);
1561 bucket++;
1562 }
1563
1564 out:
1565 spin_unlock_bh(&rt6_exception_lock);
1566 }
1567
1568 /* Find cached rt in the hash table inside passed in rt
1569 * Caller has to hold rcu_read_lock()
1570 */
1571 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1572 const struct in6_addr *daddr,
1573 const struct in6_addr *saddr)
1574 {
1575 const struct in6_addr *src_key = NULL;
1576 struct rt6_exception_bucket *bucket;
1577 struct rt6_exception *rt6_ex;
1578 struct rt6_info *ret = NULL;
1579
1580 #ifdef CONFIG_IPV6_SUBTREES
1581 /* fib6i_src.plen != 0 indicates f6i is in subtree
1582 * and exception table is indexed by a hash of
1583 * both fib6_dst and fib6_src.
1584 * However, the src addr used to create the hash
1585 * might not be exactly the passed in saddr which
1586 * is a /128 addr from the flow.
1587 * So we need to use f6i->fib6_src to redo lookup
1588 * if the passed in saddr does not find anything.
1589 * (See the logic in ip6_rt_cache_alloc() on how
1590 * rt->rt6i_src is updated.)
1591 */
1592 if (res->f6i->fib6_src.plen)
1593 src_key = saddr;
1594 find_ex:
1595 #endif
1596 bucket = rcu_dereference(res->f6i->rt6i_exception_bucket);
1597 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1598
1599 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1600 ret = rt6_ex->rt6i;
1601
1602 #ifdef CONFIG_IPV6_SUBTREES
1603 /* Use fib6_src as src_key and redo lookup */
1604 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1605 src_key = &res->f6i->fib6_src.addr;
1606 goto find_ex;
1607 }
1608 #endif
1609
1610 return ret;
1611 }
1612
1613 /* Remove the passed in cached rt from the hash table that contains it */
1614 static int rt6_remove_exception_rt(struct rt6_info *rt)
1615 {
1616 struct rt6_exception_bucket *bucket;
1617 struct in6_addr *src_key = NULL;
1618 struct rt6_exception *rt6_ex;
1619 struct fib6_info *from;
1620 int err;
1621
1622 from = rcu_dereference(rt->from);
1623 if (!from ||
1624 !(rt->rt6i_flags & RTF_CACHE))
1625 return -EINVAL;
1626
1627 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1628 return -ENOENT;
1629
1630 spin_lock_bh(&rt6_exception_lock);
1631 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1632 lockdep_is_held(&rt6_exception_lock));
1633 #ifdef CONFIG_IPV6_SUBTREES
1634 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1635 * and exception table is indexed by a hash of
1636 * both rt6i_dst and rt6i_src.
1637 * Otherwise, the exception table is indexed by
1638 * a hash of only rt6i_dst.
1639 */
1640 if (from->fib6_src.plen)
1641 src_key = &rt->rt6i_src.addr;
1642 #endif
1643 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1644 &rt->rt6i_dst.addr,
1645 src_key);
1646 if (rt6_ex) {
1647 rt6_remove_exception(bucket, rt6_ex);
1648 err = 0;
1649 } else {
1650 err = -ENOENT;
1651 }
1652
1653 spin_unlock_bh(&rt6_exception_lock);
1654 return err;
1655 }
1656
1657 /* Find rt6_ex which contains the passed in rt cache and
1658 * refresh its stamp
1659 */
1660 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1661 {
1662 struct rt6_exception_bucket *bucket;
1663 struct in6_addr *src_key = NULL;
1664 struct rt6_exception *rt6_ex;
1665 struct fib6_info *from;
1666
1667 rcu_read_lock();
1668 from = rcu_dereference(rt->from);
1669 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1670 goto unlock;
1671
1672 bucket = rcu_dereference(from->rt6i_exception_bucket);
1673
1674 #ifdef CONFIG_IPV6_SUBTREES
1675 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1676 * and exception table is indexed by a hash of
1677 * both rt6i_dst and rt6i_src.
1678 * Otherwise, the exception table is indexed by
1679 * a hash of only rt6i_dst.
1680 */
1681 if (from->fib6_src.plen)
1682 src_key = &rt->rt6i_src.addr;
1683 #endif
1684 rt6_ex = __rt6_find_exception_rcu(&bucket,
1685 &rt->rt6i_dst.addr,
1686 src_key);
1687 if (rt6_ex)
1688 rt6_ex->stamp = jiffies;
1689
1690 unlock:
1691 rcu_read_unlock();
1692 }
1693
1694 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1695 struct rt6_info *rt, int mtu)
1696 {
1697 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1698 * lowest MTU in the path: always allow updating the route PMTU to
1699 * reflect PMTU decreases.
1700 *
1701 * If the new MTU is higher, and the route PMTU is equal to the local
1702 * MTU, this means the old MTU is the lowest in the path, so allow
1703 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1704 * handle this.
1705 */
1706
1707 if (dst_mtu(&rt->dst) >= mtu)
1708 return true;
1709
1710 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1711 return true;
1712
1713 return false;
1714 }
1715
1716 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1717 struct fib6_info *rt, int mtu)
1718 {
1719 struct rt6_exception_bucket *bucket;
1720 struct rt6_exception *rt6_ex;
1721 int i;
1722
1723 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1724 lockdep_is_held(&rt6_exception_lock));
1725
1726 if (!bucket)
1727 return;
1728
1729 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1730 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1731 struct rt6_info *entry = rt6_ex->rt6i;
1732
1733 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1734 * route), the metrics of its rt->from have already
1735 * been updated.
1736 */
1737 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
1738 rt6_mtu_change_route_allowed(idev, entry, mtu))
1739 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
1740 }
1741 bucket++;
1742 }
1743 }
1744
1745 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1746
1747 static void rt6_exceptions_clean_tohost(struct fib6_info *rt,
1748 struct in6_addr *gateway)
1749 {
1750 struct rt6_exception_bucket *bucket;
1751 struct rt6_exception *rt6_ex;
1752 struct hlist_node *tmp;
1753 int i;
1754
1755 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1756 return;
1757
1758 spin_lock_bh(&rt6_exception_lock);
1759 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1760 lockdep_is_held(&rt6_exception_lock));
1761
1762 if (bucket) {
1763 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1764 hlist_for_each_entry_safe(rt6_ex, tmp,
1765 &bucket->chain, hlist) {
1766 struct rt6_info *entry = rt6_ex->rt6i;
1767
1768 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1769 RTF_CACHE_GATEWAY &&
1770 ipv6_addr_equal(gateway,
1771 &entry->rt6i_gateway)) {
1772 rt6_remove_exception(bucket, rt6_ex);
1773 }
1774 }
1775 bucket++;
1776 }
1777 }
1778
1779 spin_unlock_bh(&rt6_exception_lock);
1780 }
1781
1782 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1783 struct rt6_exception *rt6_ex,
1784 struct fib6_gc_args *gc_args,
1785 unsigned long now)
1786 {
1787 struct rt6_info *rt = rt6_ex->rt6i;
1788
1789 /* we are pruning and obsoleting aged-out and non gateway exceptions
1790 * even if others have still references to them, so that on next
1791 * dst_check() such references can be dropped.
1792 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1793 * expired, independently from their aging, as per RFC 8201 section 4
1794 */
1795 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1796 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1797 RT6_TRACE("aging clone %p\n", rt);
1798 rt6_remove_exception(bucket, rt6_ex);
1799 return;
1800 }
1801 } else if (time_after(jiffies, rt->dst.expires)) {
1802 RT6_TRACE("purging expired route %p\n", rt);
1803 rt6_remove_exception(bucket, rt6_ex);
1804 return;
1805 }
1806
1807 if (rt->rt6i_flags & RTF_GATEWAY) {
1808 struct neighbour *neigh;
1809 __u8 neigh_flags = 0;
1810
1811 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1812 if (neigh)
1813 neigh_flags = neigh->flags;
1814
1815 if (!(neigh_flags & NTF_ROUTER)) {
1816 RT6_TRACE("purging route %p via non-router but gateway\n",
1817 rt);
1818 rt6_remove_exception(bucket, rt6_ex);
1819 return;
1820 }
1821 }
1822
1823 gc_args->more++;
1824 }
1825
1826 void rt6_age_exceptions(struct fib6_info *rt,
1827 struct fib6_gc_args *gc_args,
1828 unsigned long now)
1829 {
1830 struct rt6_exception_bucket *bucket;
1831 struct rt6_exception *rt6_ex;
1832 struct hlist_node *tmp;
1833 int i;
1834
1835 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1836 return;
1837
1838 rcu_read_lock_bh();
1839 spin_lock(&rt6_exception_lock);
1840 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1841 lockdep_is_held(&rt6_exception_lock));
1842
1843 if (bucket) {
1844 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1845 hlist_for_each_entry_safe(rt6_ex, tmp,
1846 &bucket->chain, hlist) {
1847 rt6_age_examine_exception(bucket, rt6_ex,
1848 gc_args, now);
1849 }
1850 bucket++;
1851 }
1852 }
1853 spin_unlock(&rt6_exception_lock);
1854 rcu_read_unlock_bh();
1855 }
1856
1857 /* must be called with rcu lock held */
1858 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
1859 struct flowi6 *fl6, struct fib6_result *res, int strict)
1860 {
1861 struct fib6_node *fn, *saved_fn;
1862
1863 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1864 saved_fn = fn;
1865
1866 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1867 oif = 0;
1868
1869 redo_rt6_select:
1870 rt6_select(net, fn, oif, res, strict);
1871 if (res->f6i == net->ipv6.fib6_null_entry) {
1872 fn = fib6_backtrack(fn, &fl6->saddr);
1873 if (fn)
1874 goto redo_rt6_select;
1875 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1876 /* also consider unreachable route */
1877 strict &= ~RT6_LOOKUP_F_REACHABLE;
1878 fn = saved_fn;
1879 goto redo_rt6_select;
1880 }
1881 }
1882
1883 trace_fib6_table_lookup(net, res, table, fl6);
1884
1885 return 0;
1886 }
1887
1888 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1889 int oif, struct flowi6 *fl6,
1890 const struct sk_buff *skb, int flags)
1891 {
1892 struct fib6_result res = {};
1893 struct rt6_info *rt;
1894 int strict = 0;
1895
1896 strict |= flags & RT6_LOOKUP_F_IFACE;
1897 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1898 if (net->ipv6.devconf_all->forwarding == 0)
1899 strict |= RT6_LOOKUP_F_REACHABLE;
1900
1901 rcu_read_lock();
1902
1903 fib6_table_lookup(net, table, oif, fl6, &res, strict);
1904 if (res.f6i == net->ipv6.fib6_null_entry) {
1905 rt = net->ipv6.ip6_null_entry;
1906 rcu_read_unlock();
1907 dst_hold(&rt->dst);
1908 return rt;
1909 }
1910
1911 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
1912
1913 /*Search through exception table */
1914 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1915 if (rt) {
1916 if (ip6_hold_safe(net, &rt))
1917 dst_use_noref(&rt->dst, jiffies);
1918
1919 rcu_read_unlock();
1920 return rt;
1921 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1922 !res.nh->fib_nh_gw_family)) {
1923 /* Create a RTF_CACHE clone which will not be
1924 * owned by the fib6 tree. It is for the special case where
1925 * the daddr in the skb during the neighbor look-up is different
1926 * from the fl6->daddr used to look-up route here.
1927 */
1928 struct rt6_info *uncached_rt;
1929
1930 uncached_rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
1931
1932 rcu_read_unlock();
1933
1934 if (uncached_rt) {
1935 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1936 * No need for another dst_hold()
1937 */
1938 rt6_uncached_list_add(uncached_rt);
1939 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1940 } else {
1941 uncached_rt = net->ipv6.ip6_null_entry;
1942 dst_hold(&uncached_rt->dst);
1943 }
1944
1945 return uncached_rt;
1946 } else {
1947 /* Get a percpu copy */
1948
1949 struct rt6_info *pcpu_rt;
1950
1951 local_bh_disable();
1952 pcpu_rt = rt6_get_pcpu_route(&res);
1953
1954 if (!pcpu_rt)
1955 pcpu_rt = rt6_make_pcpu_route(net, &res);
1956
1957 local_bh_enable();
1958 rcu_read_unlock();
1959
1960 return pcpu_rt;
1961 }
1962 }
1963 EXPORT_SYMBOL_GPL(ip6_pol_route);
1964
1965 static struct rt6_info *ip6_pol_route_input(struct net *net,
1966 struct fib6_table *table,
1967 struct flowi6 *fl6,
1968 const struct sk_buff *skb,
1969 int flags)
1970 {
1971 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
1972 }
1973
1974 struct dst_entry *ip6_route_input_lookup(struct net *net,
1975 struct net_device *dev,
1976 struct flowi6 *fl6,
1977 const struct sk_buff *skb,
1978 int flags)
1979 {
1980 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1981 flags |= RT6_LOOKUP_F_IFACE;
1982
1983 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
1984 }
1985 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1986
1987 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1988 struct flow_keys *keys,
1989 struct flow_keys *flkeys)
1990 {
1991 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1992 const struct ipv6hdr *key_iph = outer_iph;
1993 struct flow_keys *_flkeys = flkeys;
1994 const struct ipv6hdr *inner_iph;
1995 const struct icmp6hdr *icmph;
1996 struct ipv6hdr _inner_iph;
1997 struct icmp6hdr _icmph;
1998
1999 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2000 goto out;
2001
2002 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2003 sizeof(_icmph), &_icmph);
2004 if (!icmph)
2005 goto out;
2006
2007 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
2008 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
2009 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
2010 icmph->icmp6_type != ICMPV6_PARAMPROB)
2011 goto out;
2012
2013 inner_iph = skb_header_pointer(skb,
2014 skb_transport_offset(skb) + sizeof(*icmph),
2015 sizeof(_inner_iph), &_inner_iph);
2016 if (!inner_iph)
2017 goto out;
2018
2019 key_iph = inner_iph;
2020 _flkeys = NULL;
2021 out:
2022 if (_flkeys) {
2023 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2024 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2025 keys->tags.flow_label = _flkeys->tags.flow_label;
2026 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2027 } else {
2028 keys->addrs.v6addrs.src = key_iph->saddr;
2029 keys->addrs.v6addrs.dst = key_iph->daddr;
2030 keys->tags.flow_label = ip6_flowlabel(key_iph);
2031 keys->basic.ip_proto = key_iph->nexthdr;
2032 }
2033 }
2034
2035 /* if skb is set it will be used and fl6 can be NULL */
2036 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2037 const struct sk_buff *skb, struct flow_keys *flkeys)
2038 {
2039 struct flow_keys hash_keys;
2040 u32 mhash;
2041
2042 switch (ip6_multipath_hash_policy(net)) {
2043 case 0:
2044 memset(&hash_keys, 0, sizeof(hash_keys));
2045 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2046 if (skb) {
2047 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2048 } else {
2049 hash_keys.addrs.v6addrs.src = fl6->saddr;
2050 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2051 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2052 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2053 }
2054 break;
2055 case 1:
2056 if (skb) {
2057 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2058 struct flow_keys keys;
2059
2060 /* short-circuit if we already have L4 hash present */
2061 if (skb->l4_hash)
2062 return skb_get_hash_raw(skb) >> 1;
2063
2064 memset(&hash_keys, 0, sizeof(hash_keys));
2065
2066 if (!flkeys) {
2067 skb_flow_dissect_flow_keys(skb, &keys, flag);
2068 flkeys = &keys;
2069 }
2070 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2071 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2072 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2073 hash_keys.ports.src = flkeys->ports.src;
2074 hash_keys.ports.dst = flkeys->ports.dst;
2075 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2076 } else {
2077 memset(&hash_keys, 0, sizeof(hash_keys));
2078 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2079 hash_keys.addrs.v6addrs.src = fl6->saddr;
2080 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2081 hash_keys.ports.src = fl6->fl6_sport;
2082 hash_keys.ports.dst = fl6->fl6_dport;
2083 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2084 }
2085 break;
2086 }
2087 mhash = flow_hash_from_keys(&hash_keys);
2088
2089 return mhash >> 1;
2090 }
2091
2092 void ip6_route_input(struct sk_buff *skb)
2093 {
2094 const struct ipv6hdr *iph = ipv6_hdr(skb);
2095 struct net *net = dev_net(skb->dev);
2096 int flags = RT6_LOOKUP_F_HAS_SADDR;
2097 struct ip_tunnel_info *tun_info;
2098 struct flowi6 fl6 = {
2099 .flowi6_iif = skb->dev->ifindex,
2100 .daddr = iph->daddr,
2101 .saddr = iph->saddr,
2102 .flowlabel = ip6_flowinfo(iph),
2103 .flowi6_mark = skb->mark,
2104 .flowi6_proto = iph->nexthdr,
2105 };
2106 struct flow_keys *flkeys = NULL, _flkeys;
2107
2108 tun_info = skb_tunnel_info(skb);
2109 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2110 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2111
2112 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2113 flkeys = &_flkeys;
2114
2115 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2116 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2117 skb_dst_drop(skb);
2118 skb_dst_set(skb,
2119 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
2120 }
2121
2122 static struct rt6_info *ip6_pol_route_output(struct net *net,
2123 struct fib6_table *table,
2124 struct flowi6 *fl6,
2125 const struct sk_buff *skb,
2126 int flags)
2127 {
2128 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2129 }
2130
2131 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2132 struct flowi6 *fl6, int flags)
2133 {
2134 bool any_src;
2135
2136 if (ipv6_addr_type(&fl6->daddr) &
2137 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2138 struct dst_entry *dst;
2139
2140 dst = l3mdev_link_scope_lookup(net, fl6);
2141 if (dst)
2142 return dst;
2143 }
2144
2145 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2146
2147 any_src = ipv6_addr_any(&fl6->saddr);
2148 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2149 (fl6->flowi6_oif && any_src))
2150 flags |= RT6_LOOKUP_F_IFACE;
2151
2152 if (!any_src)
2153 flags |= RT6_LOOKUP_F_HAS_SADDR;
2154 else if (sk)
2155 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2156
2157 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2158 }
2159 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2160
2161 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2162 {
2163 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2164 struct net_device *loopback_dev = net->loopback_dev;
2165 struct dst_entry *new = NULL;
2166
2167 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2168 DST_OBSOLETE_DEAD, 0);
2169 if (rt) {
2170 rt6_info_init(rt);
2171 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2172
2173 new = &rt->dst;
2174 new->__use = 1;
2175 new->input = dst_discard;
2176 new->output = dst_discard_out;
2177
2178 dst_copy_metrics(new, &ort->dst);
2179
2180 rt->rt6i_idev = in6_dev_get(loopback_dev);
2181 rt->rt6i_gateway = ort->rt6i_gateway;
2182 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2183
2184 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2185 #ifdef CONFIG_IPV6_SUBTREES
2186 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2187 #endif
2188 }
2189
2190 dst_release(dst_orig);
2191 return new ? new : ERR_PTR(-ENOMEM);
2192 }
2193
2194 /*
2195 * Destination cache support functions
2196 */
2197
2198 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2199 {
2200 u32 rt_cookie = 0;
2201
2202 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2203 return false;
2204
2205 if (fib6_check_expired(f6i))
2206 return false;
2207
2208 return true;
2209 }
2210
2211 static struct dst_entry *rt6_check(struct rt6_info *rt,
2212 struct fib6_info *from,
2213 u32 cookie)
2214 {
2215 u32 rt_cookie = 0;
2216
2217 if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
2218 rt_cookie != cookie)
2219 return NULL;
2220
2221 if (rt6_check_expired(rt))
2222 return NULL;
2223
2224 return &rt->dst;
2225 }
2226
2227 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2228 struct fib6_info *from,
2229 u32 cookie)
2230 {
2231 if (!__rt6_check_expired(rt) &&
2232 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2233 fib6_check(from, cookie))
2234 return &rt->dst;
2235 else
2236 return NULL;
2237 }
2238
2239 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2240 {
2241 struct dst_entry *dst_ret;
2242 struct fib6_info *from;
2243 struct rt6_info *rt;
2244
2245 rt = container_of(dst, struct rt6_info, dst);
2246
2247 rcu_read_lock();
2248
2249 /* All IPV6 dsts are created with ->obsolete set to the value
2250 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2251 * into this function always.
2252 */
2253
2254 from = rcu_dereference(rt->from);
2255
2256 if (from && (rt->rt6i_flags & RTF_PCPU ||
2257 unlikely(!list_empty(&rt->rt6i_uncached))))
2258 dst_ret = rt6_dst_from_check(rt, from, cookie);
2259 else
2260 dst_ret = rt6_check(rt, from, cookie);
2261
2262 rcu_read_unlock();
2263
2264 return dst_ret;
2265 }
2266
2267 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2268 {
2269 struct rt6_info *rt = (struct rt6_info *) dst;
2270
2271 if (rt) {
2272 if (rt->rt6i_flags & RTF_CACHE) {
2273 rcu_read_lock();
2274 if (rt6_check_expired(rt)) {
2275 rt6_remove_exception_rt(rt);
2276 dst = NULL;
2277 }
2278 rcu_read_unlock();
2279 } else {
2280 dst_release(dst);
2281 dst = NULL;
2282 }
2283 }
2284 return dst;
2285 }
2286
2287 static void ip6_link_failure(struct sk_buff *skb)
2288 {
2289 struct rt6_info *rt;
2290
2291 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2292
2293 rt = (struct rt6_info *) skb_dst(skb);
2294 if (rt) {
2295 rcu_read_lock();
2296 if (rt->rt6i_flags & RTF_CACHE) {
2297 rt6_remove_exception_rt(rt);
2298 } else {
2299 struct fib6_info *from;
2300 struct fib6_node *fn;
2301
2302 from = rcu_dereference(rt->from);
2303 if (from) {
2304 fn = rcu_dereference(from->fib6_node);
2305 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2306 fn->fn_sernum = -1;
2307 }
2308 }
2309 rcu_read_unlock();
2310 }
2311 }
2312
2313 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2314 {
2315 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2316 struct fib6_info *from;
2317
2318 rcu_read_lock();
2319 from = rcu_dereference(rt0->from);
2320 if (from)
2321 rt0->dst.expires = from->expires;
2322 rcu_read_unlock();
2323 }
2324
2325 dst_set_expires(&rt0->dst, timeout);
2326 rt0->rt6i_flags |= RTF_EXPIRES;
2327 }
2328
2329 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2330 {
2331 struct net *net = dev_net(rt->dst.dev);
2332
2333 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2334 rt->rt6i_flags |= RTF_MODIFIED;
2335 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2336 }
2337
2338 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2339 {
2340 return !(rt->rt6i_flags & RTF_CACHE) &&
2341 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2342 }
2343
2344 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2345 const struct ipv6hdr *iph, u32 mtu)
2346 {
2347 const struct in6_addr *daddr, *saddr;
2348 struct rt6_info *rt6 = (struct rt6_info *)dst;
2349
2350 if (dst_metric_locked(dst, RTAX_MTU))
2351 return;
2352
2353 if (iph) {
2354 daddr = &iph->daddr;
2355 saddr = &iph->saddr;
2356 } else if (sk) {
2357 daddr = &sk->sk_v6_daddr;
2358 saddr = &inet6_sk(sk)->saddr;
2359 } else {
2360 daddr = NULL;
2361 saddr = NULL;
2362 }
2363 dst_confirm_neigh(dst, daddr);
2364 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2365 if (mtu >= dst_mtu(dst))
2366 return;
2367
2368 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2369 rt6_do_update_pmtu(rt6, mtu);
2370 /* update rt6_ex->stamp for cache */
2371 if (rt6->rt6i_flags & RTF_CACHE)
2372 rt6_update_exception_stamp_rt(rt6);
2373 } else if (daddr) {
2374 struct fib6_result res = {};
2375 struct rt6_info *nrt6;
2376
2377 rcu_read_lock();
2378 res.f6i = rcu_dereference(rt6->from);
2379 if (!res.f6i) {
2380 rcu_read_unlock();
2381 return;
2382 }
2383 res.nh = &res.f6i->fib6_nh;
2384 res.fib6_flags = res.f6i->fib6_flags;
2385 res.fib6_type = res.f6i->fib6_type;
2386
2387 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2388 if (nrt6) {
2389 rt6_do_update_pmtu(nrt6, mtu);
2390 if (rt6_insert_exception(nrt6, &res))
2391 dst_release_immediate(&nrt6->dst);
2392 }
2393 rcu_read_unlock();
2394 }
2395 }
2396
2397 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2398 struct sk_buff *skb, u32 mtu)
2399 {
2400 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2401 }
2402
2403 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2404 int oif, u32 mark, kuid_t uid)
2405 {
2406 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2407 struct dst_entry *dst;
2408 struct flowi6 fl6 = {
2409 .flowi6_oif = oif,
2410 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2411 .daddr = iph->daddr,
2412 .saddr = iph->saddr,
2413 .flowlabel = ip6_flowinfo(iph),
2414 .flowi6_uid = uid,
2415 };
2416
2417 dst = ip6_route_output(net, NULL, &fl6);
2418 if (!dst->error)
2419 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2420 dst_release(dst);
2421 }
2422 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2423
2424 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2425 {
2426 int oif = sk->sk_bound_dev_if;
2427 struct dst_entry *dst;
2428
2429 if (!oif && skb->dev)
2430 oif = l3mdev_master_ifindex(skb->dev);
2431
2432 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2433
2434 dst = __sk_dst_get(sk);
2435 if (!dst || !dst->obsolete ||
2436 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2437 return;
2438
2439 bh_lock_sock(sk);
2440 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2441 ip6_datagram_dst_update(sk, false);
2442 bh_unlock_sock(sk);
2443 }
2444 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2445
2446 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2447 const struct flowi6 *fl6)
2448 {
2449 #ifdef CONFIG_IPV6_SUBTREES
2450 struct ipv6_pinfo *np = inet6_sk(sk);
2451 #endif
2452
2453 ip6_dst_store(sk, dst,
2454 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2455 &sk->sk_v6_daddr : NULL,
2456 #ifdef CONFIG_IPV6_SUBTREES
2457 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2458 &np->saddr :
2459 #endif
2460 NULL);
2461 }
2462
2463 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2464 struct flowi6 *fl6,
2465 const struct in6_addr *gw,
2466 struct rt6_info **ret)
2467 {
2468 const struct fib6_nh *nh = res->nh;
2469
2470 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2471 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2472 return false;
2473
2474 /* rt_cache's gateway might be different from its 'parent'
2475 * in the case of an ip redirect.
2476 * So we keep searching in the exception table if the gateway
2477 * is different.
2478 */
2479 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2480 struct rt6_info *rt_cache;
2481
2482 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2483 if (rt_cache &&
2484 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2485 *ret = rt_cache;
2486 return true;
2487 }
2488 return false;
2489 }
2490 return true;
2491 }
2492
2493 /* Handle redirects */
2494 struct ip6rd_flowi {
2495 struct flowi6 fl6;
2496 struct in6_addr gateway;
2497 };
2498
2499 static struct rt6_info *__ip6_route_redirect(struct net *net,
2500 struct fib6_table *table,
2501 struct flowi6 *fl6,
2502 const struct sk_buff *skb,
2503 int flags)
2504 {
2505 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2506 struct rt6_info *ret = NULL;
2507 struct fib6_result res = {};
2508 struct fib6_info *rt;
2509 struct fib6_node *fn;
2510
2511 /* Get the "current" route for this destination and
2512 * check if the redirect has come from appropriate router.
2513 *
2514 * RFC 4861 specifies that redirects should only be
2515 * accepted if they come from the nexthop to the target.
2516 * Due to the way the routes are chosen, this notion
2517 * is a bit fuzzy and one might need to check all possible
2518 * routes.
2519 */
2520
2521 rcu_read_lock();
2522 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2523 restart:
2524 for_each_fib6_node_rt_rcu(fn) {
2525 res.f6i = rt;
2526 res.nh = &rt->fib6_nh;
2527
2528 if (fib6_check_expired(rt))
2529 continue;
2530 if (rt->fib6_flags & RTF_REJECT)
2531 break;
2532 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, &ret))
2533 goto out;
2534 }
2535
2536 if (!rt)
2537 rt = net->ipv6.fib6_null_entry;
2538 else if (rt->fib6_flags & RTF_REJECT) {
2539 ret = net->ipv6.ip6_null_entry;
2540 goto out;
2541 }
2542
2543 if (rt == net->ipv6.fib6_null_entry) {
2544 fn = fib6_backtrack(fn, &fl6->saddr);
2545 if (fn)
2546 goto restart;
2547 }
2548
2549 res.f6i = rt;
2550 res.nh = &rt->fib6_nh;
2551 out:
2552 if (ret) {
2553 ip6_hold_safe(net, &ret);
2554 } else {
2555 res.fib6_flags = res.f6i->fib6_flags;
2556 res.fib6_type = res.f6i->fib6_type;
2557 ret = ip6_create_rt_rcu(&res);
2558 }
2559
2560 rcu_read_unlock();
2561
2562 trace_fib6_table_lookup(net, &res, table, fl6);
2563 return ret;
2564 };
2565
2566 static struct dst_entry *ip6_route_redirect(struct net *net,
2567 const struct flowi6 *fl6,
2568 const struct sk_buff *skb,
2569 const struct in6_addr *gateway)
2570 {
2571 int flags = RT6_LOOKUP_F_HAS_SADDR;
2572 struct ip6rd_flowi rdfl;
2573
2574 rdfl.fl6 = *fl6;
2575 rdfl.gateway = *gateway;
2576
2577 return fib6_rule_lookup(net, &rdfl.fl6, skb,
2578 flags, __ip6_route_redirect);
2579 }
2580
2581 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2582 kuid_t uid)
2583 {
2584 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2585 struct dst_entry *dst;
2586 struct flowi6 fl6 = {
2587 .flowi6_iif = LOOPBACK_IFINDEX,
2588 .flowi6_oif = oif,
2589 .flowi6_mark = mark,
2590 .daddr = iph->daddr,
2591 .saddr = iph->saddr,
2592 .flowlabel = ip6_flowinfo(iph),
2593 .flowi6_uid = uid,
2594 };
2595
2596 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2597 rt6_do_redirect(dst, NULL, skb);
2598 dst_release(dst);
2599 }
2600 EXPORT_SYMBOL_GPL(ip6_redirect);
2601
2602 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
2603 {
2604 const struct ipv6hdr *iph = ipv6_hdr(skb);
2605 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2606 struct dst_entry *dst;
2607 struct flowi6 fl6 = {
2608 .flowi6_iif = LOOPBACK_IFINDEX,
2609 .flowi6_oif = oif,
2610 .daddr = msg->dest,
2611 .saddr = iph->daddr,
2612 .flowi6_uid = sock_net_uid(net, NULL),
2613 };
2614
2615 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
2616 rt6_do_redirect(dst, NULL, skb);
2617 dst_release(dst);
2618 }
2619
2620 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2621 {
2622 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2623 sk->sk_uid);
2624 }
2625 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2626
2627 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2628 {
2629 struct net_device *dev = dst->dev;
2630 unsigned int mtu = dst_mtu(dst);
2631 struct net *net = dev_net(dev);
2632
2633 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2634
2635 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2636 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2637
2638 /*
2639 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2640 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2641 * IPV6_MAXPLEN is also valid and means: "any MSS,
2642 * rely only on pmtu discovery"
2643 */
2644 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2645 mtu = IPV6_MAXPLEN;
2646 return mtu;
2647 }
2648
2649 static unsigned int ip6_mtu(const struct dst_entry *dst)
2650 {
2651 struct inet6_dev *idev;
2652 unsigned int mtu;
2653
2654 mtu = dst_metric_raw(dst, RTAX_MTU);
2655 if (mtu)
2656 goto out;
2657
2658 mtu = IPV6_MIN_MTU;
2659
2660 rcu_read_lock();
2661 idev = __in6_dev_get(dst->dev);
2662 if (idev)
2663 mtu = idev->cnf.mtu6;
2664 rcu_read_unlock();
2665
2666 out:
2667 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2668
2669 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2670 }
2671
2672 /* MTU selection:
2673 * 1. mtu on route is locked - use it
2674 * 2. mtu from nexthop exception
2675 * 3. mtu from egress device
2676 *
2677 * based on ip6_dst_mtu_forward and exception logic of
2678 * rt6_find_cached_rt; called with rcu_read_lock
2679 */
2680 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
2681 const struct in6_addr *daddr,
2682 const struct in6_addr *saddr)
2683 {
2684 const struct fib6_nh *nh = res->nh;
2685 struct fib6_info *f6i = res->f6i;
2686 struct inet6_dev *idev;
2687 struct rt6_info *rt;
2688 u32 mtu = 0;
2689
2690 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
2691 mtu = f6i->fib6_pmtu;
2692 if (mtu)
2693 goto out;
2694 }
2695
2696 rt = rt6_find_cached_rt(res, daddr, saddr);
2697 if (unlikely(rt)) {
2698 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
2699 } else {
2700 struct net_device *dev = nh->fib_nh_dev;
2701
2702 mtu = IPV6_MIN_MTU;
2703 idev = __in6_dev_get(dev);
2704 if (idev && idev->cnf.mtu6 > mtu)
2705 mtu = idev->cnf.mtu6;
2706 }
2707
2708 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2709 out:
2710 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
2711 }
2712
2713 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2714 struct flowi6 *fl6)
2715 {
2716 struct dst_entry *dst;
2717 struct rt6_info *rt;
2718 struct inet6_dev *idev = in6_dev_get(dev);
2719 struct net *net = dev_net(dev);
2720
2721 if (unlikely(!idev))
2722 return ERR_PTR(-ENODEV);
2723
2724 rt = ip6_dst_alloc(net, dev, 0);
2725 if (unlikely(!rt)) {
2726 in6_dev_put(idev);
2727 dst = ERR_PTR(-ENOMEM);
2728 goto out;
2729 }
2730
2731 rt->dst.flags |= DST_HOST;
2732 rt->dst.input = ip6_input;
2733 rt->dst.output = ip6_output;
2734 rt->rt6i_gateway = fl6->daddr;
2735 rt->rt6i_dst.addr = fl6->daddr;
2736 rt->rt6i_dst.plen = 128;
2737 rt->rt6i_idev = idev;
2738 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2739
2740 /* Add this dst into uncached_list so that rt6_disable_ip() can
2741 * do proper release of the net_device
2742 */
2743 rt6_uncached_list_add(rt);
2744 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2745
2746 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2747
2748 out:
2749 return dst;
2750 }
2751
2752 static int ip6_dst_gc(struct dst_ops *ops)
2753 {
2754 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2755 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2756 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2757 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2758 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2759 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2760 int entries;
2761
2762 entries = dst_entries_get_fast(ops);
2763 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2764 entries <= rt_max_size)
2765 goto out;
2766
2767 net->ipv6.ip6_rt_gc_expire++;
2768 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2769 entries = dst_entries_get_slow(ops);
2770 if (entries < ops->gc_thresh)
2771 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2772 out:
2773 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2774 return entries > rt_max_size;
2775 }
2776
2777 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2778 struct fib6_config *cfg,
2779 const struct in6_addr *gw_addr,
2780 u32 tbid, int flags)
2781 {
2782 struct flowi6 fl6 = {
2783 .flowi6_oif = cfg->fc_ifindex,
2784 .daddr = *gw_addr,
2785 .saddr = cfg->fc_prefsrc,
2786 };
2787 struct fib6_table *table;
2788 struct rt6_info *rt;
2789
2790 table = fib6_get_table(net, tbid);
2791 if (!table)
2792 return NULL;
2793
2794 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2795 flags |= RT6_LOOKUP_F_HAS_SADDR;
2796
2797 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
2798 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
2799
2800 /* if table lookup failed, fall back to full lookup */
2801 if (rt == net->ipv6.ip6_null_entry) {
2802 ip6_rt_put(rt);
2803 rt = NULL;
2804 }
2805
2806 return rt;
2807 }
2808
2809 static int ip6_route_check_nh_onlink(struct net *net,
2810 struct fib6_config *cfg,
2811 const struct net_device *dev,
2812 struct netlink_ext_ack *extack)
2813 {
2814 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2815 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2816 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2817 struct fib6_info *from;
2818 struct rt6_info *grt;
2819 int err;
2820
2821 err = 0;
2822 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2823 if (grt) {
2824 rcu_read_lock();
2825 from = rcu_dereference(grt->from);
2826 if (!grt->dst.error &&
2827 /* ignore match if it is the default route */
2828 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
2829 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2830 NL_SET_ERR_MSG(extack,
2831 "Nexthop has invalid gateway or device mismatch");
2832 err = -EINVAL;
2833 }
2834 rcu_read_unlock();
2835
2836 ip6_rt_put(grt);
2837 }
2838
2839 return err;
2840 }
2841
2842 static int ip6_route_check_nh(struct net *net,
2843 struct fib6_config *cfg,
2844 struct net_device **_dev,
2845 struct inet6_dev **idev)
2846 {
2847 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2848 struct net_device *dev = _dev ? *_dev : NULL;
2849 struct rt6_info *grt = NULL;
2850 int err = -EHOSTUNREACH;
2851
2852 if (cfg->fc_table) {
2853 int flags = RT6_LOOKUP_F_IFACE;
2854
2855 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2856 cfg->fc_table, flags);
2857 if (grt) {
2858 if (grt->rt6i_flags & RTF_GATEWAY ||
2859 (dev && dev != grt->dst.dev)) {
2860 ip6_rt_put(grt);
2861 grt = NULL;
2862 }
2863 }
2864 }
2865
2866 if (!grt)
2867 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
2868
2869 if (!grt)
2870 goto out;
2871
2872 if (dev) {
2873 if (dev != grt->dst.dev) {
2874 ip6_rt_put(grt);
2875 goto out;
2876 }
2877 } else {
2878 *_dev = dev = grt->dst.dev;
2879 *idev = grt->rt6i_idev;
2880 dev_hold(dev);
2881 in6_dev_hold(grt->rt6i_idev);
2882 }
2883
2884 if (!(grt->rt6i_flags & RTF_GATEWAY))
2885 err = 0;
2886
2887 ip6_rt_put(grt);
2888
2889 out:
2890 return err;
2891 }
2892
2893 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2894 struct net_device **_dev, struct inet6_dev **idev,
2895 struct netlink_ext_ack *extack)
2896 {
2897 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2898 int gwa_type = ipv6_addr_type(gw_addr);
2899 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
2900 const struct net_device *dev = *_dev;
2901 bool need_addr_check = !dev;
2902 int err = -EINVAL;
2903
2904 /* if gw_addr is local we will fail to detect this in case
2905 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2906 * will return already-added prefix route via interface that
2907 * prefix route was assigned to, which might be non-loopback.
2908 */
2909 if (dev &&
2910 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2911 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2912 goto out;
2913 }
2914
2915 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
2916 /* IPv6 strictly inhibits using not link-local
2917 * addresses as nexthop address.
2918 * Otherwise, router will not able to send redirects.
2919 * It is very good, but in some (rare!) circumstances
2920 * (SIT, PtP, NBMA NOARP links) it is handy to allow
2921 * some exceptions. --ANK
2922 * We allow IPv4-mapped nexthops to support RFC4798-type
2923 * addressing
2924 */
2925 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
2926 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2927 goto out;
2928 }
2929
2930 if (cfg->fc_flags & RTNH_F_ONLINK)
2931 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
2932 else
2933 err = ip6_route_check_nh(net, cfg, _dev, idev);
2934
2935 if (err)
2936 goto out;
2937 }
2938
2939 /* reload in case device was changed */
2940 dev = *_dev;
2941
2942 err = -EINVAL;
2943 if (!dev) {
2944 NL_SET_ERR_MSG(extack, "Egress device not specified");
2945 goto out;
2946 } else if (dev->flags & IFF_LOOPBACK) {
2947 NL_SET_ERR_MSG(extack,
2948 "Egress device can not be loopback device for this route");
2949 goto out;
2950 }
2951
2952 /* if we did not check gw_addr above, do so now that the
2953 * egress device has been resolved.
2954 */
2955 if (need_addr_check &&
2956 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2957 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2958 goto out;
2959 }
2960
2961 err = 0;
2962 out:
2963 return err;
2964 }
2965
2966 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
2967 {
2968 if ((flags & RTF_REJECT) ||
2969 (dev && (dev->flags & IFF_LOOPBACK) &&
2970 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2971 !(flags & RTF_LOCAL)))
2972 return true;
2973
2974 return false;
2975 }
2976
2977 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
2978 struct fib6_config *cfg, gfp_t gfp_flags,
2979 struct netlink_ext_ack *extack)
2980 {
2981 struct net_device *dev = NULL;
2982 struct inet6_dev *idev = NULL;
2983 int addr_type;
2984 int err;
2985
2986 fib6_nh->fib_nh_family = AF_INET6;
2987
2988 err = -ENODEV;
2989 if (cfg->fc_ifindex) {
2990 dev = dev_get_by_index(net, cfg->fc_ifindex);
2991 if (!dev)
2992 goto out;
2993 idev = in6_dev_get(dev);
2994 if (!idev)
2995 goto out;
2996 }
2997
2998 if (cfg->fc_flags & RTNH_F_ONLINK) {
2999 if (!dev) {
3000 NL_SET_ERR_MSG(extack,
3001 "Nexthop device required for onlink");
3002 goto out;
3003 }
3004
3005 if (!(dev->flags & IFF_UP)) {
3006 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3007 err = -ENETDOWN;
3008 goto out;
3009 }
3010
3011 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3012 }
3013
3014 fib6_nh->fib_nh_weight = 1;
3015
3016 /* We cannot add true routes via loopback here,
3017 * they would result in kernel looping; promote them to reject routes
3018 */
3019 addr_type = ipv6_addr_type(&cfg->fc_dst);
3020 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3021 /* hold loopback dev/idev if we haven't done so. */
3022 if (dev != net->loopback_dev) {
3023 if (dev) {
3024 dev_put(dev);
3025 in6_dev_put(idev);
3026 }
3027 dev = net->loopback_dev;
3028 dev_hold(dev);
3029 idev = in6_dev_get(dev);
3030 if (!idev) {
3031 err = -ENODEV;
3032 goto out;
3033 }
3034 }
3035 goto set_dev;
3036 }
3037
3038 if (cfg->fc_flags & RTF_GATEWAY) {
3039 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3040 if (err)
3041 goto out;
3042
3043 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3044 fib6_nh->fib_nh_gw_family = AF_INET6;
3045 }
3046
3047 err = -ENODEV;
3048 if (!dev)
3049 goto out;
3050
3051 if (idev->cnf.disable_ipv6) {
3052 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3053 err = -EACCES;
3054 goto out;
3055 }
3056
3057 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3058 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3059 err = -ENETDOWN;
3060 goto out;
3061 }
3062
3063 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3064 !netif_carrier_ok(dev))
3065 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3066
3067 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3068 cfg->fc_encap_type, cfg, gfp_flags, extack);
3069 if (err)
3070 goto out;
3071 set_dev:
3072 fib6_nh->fib_nh_dev = dev;
3073 fib6_nh->fib_nh_oif = dev->ifindex;
3074 err = 0;
3075 out:
3076 if (idev)
3077 in6_dev_put(idev);
3078
3079 if (err) {
3080 lwtstate_put(fib6_nh->fib_nh_lws);
3081 fib6_nh->fib_nh_lws = NULL;
3082 if (dev)
3083 dev_put(dev);
3084 }
3085
3086 return err;
3087 }
3088
3089 void fib6_nh_release(struct fib6_nh *fib6_nh)
3090 {
3091 fib_nh_common_release(&fib6_nh->nh_common);
3092 }
3093
3094 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3095 gfp_t gfp_flags,
3096 struct netlink_ext_ack *extack)
3097 {
3098 struct net *net = cfg->fc_nlinfo.nl_net;
3099 struct fib6_info *rt = NULL;
3100 struct fib6_table *table;
3101 int err = -EINVAL;
3102 int addr_type;
3103
3104 /* RTF_PCPU is an internal flag; can not be set by userspace */
3105 if (cfg->fc_flags & RTF_PCPU) {
3106 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3107 goto out;
3108 }
3109
3110 /* RTF_CACHE is an internal flag; can not be set by userspace */
3111 if (cfg->fc_flags & RTF_CACHE) {
3112 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3113 goto out;
3114 }
3115
3116 if (cfg->fc_type > RTN_MAX) {
3117 NL_SET_ERR_MSG(extack, "Invalid route type");
3118 goto out;
3119 }
3120
3121 if (cfg->fc_dst_len > 128) {
3122 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3123 goto out;
3124 }
3125 if (cfg->fc_src_len > 128) {
3126 NL_SET_ERR_MSG(extack, "Invalid source address length");
3127 goto out;
3128 }
3129 #ifndef CONFIG_IPV6_SUBTREES
3130 if (cfg->fc_src_len) {
3131 NL_SET_ERR_MSG(extack,
3132 "Specifying source address requires IPV6_SUBTREES to be enabled");
3133 goto out;
3134 }
3135 #endif
3136
3137 err = -ENOBUFS;
3138 if (cfg->fc_nlinfo.nlh &&
3139 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3140 table = fib6_get_table(net, cfg->fc_table);
3141 if (!table) {
3142 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3143 table = fib6_new_table(net, cfg->fc_table);
3144 }
3145 } else {
3146 table = fib6_new_table(net, cfg->fc_table);
3147 }
3148
3149 if (!table)
3150 goto out;
3151
3152 err = -ENOMEM;
3153 rt = fib6_info_alloc(gfp_flags);
3154 if (!rt)
3155 goto out;
3156
3157 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3158 extack);
3159 if (IS_ERR(rt->fib6_metrics)) {
3160 err = PTR_ERR(rt->fib6_metrics);
3161 /* Do not leave garbage there. */
3162 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3163 goto out;
3164 }
3165
3166 if (cfg->fc_flags & RTF_ADDRCONF)
3167 rt->dst_nocount = true;
3168
3169 if (cfg->fc_flags & RTF_EXPIRES)
3170 fib6_set_expires(rt, jiffies +
3171 clock_t_to_jiffies(cfg->fc_expires));
3172 else
3173 fib6_clean_expires(rt);
3174
3175 if (cfg->fc_protocol == RTPROT_UNSPEC)
3176 cfg->fc_protocol = RTPROT_BOOT;
3177 rt->fib6_protocol = cfg->fc_protocol;
3178
3179 rt->fib6_table = table;
3180 rt->fib6_metric = cfg->fc_metric;
3181 rt->fib6_type = cfg->fc_type;
3182 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3183
3184 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3185 rt->fib6_dst.plen = cfg->fc_dst_len;
3186 if (rt->fib6_dst.plen == 128)
3187 rt->dst_host = true;
3188
3189 #ifdef CONFIG_IPV6_SUBTREES
3190 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3191 rt->fib6_src.plen = cfg->fc_src_len;
3192 #endif
3193 err = fib6_nh_init(net, &rt->fib6_nh, cfg, gfp_flags, extack);
3194 if (err)
3195 goto out;
3196
3197 /* We cannot add true routes via loopback here,
3198 * they would result in kernel looping; promote them to reject routes
3199 */
3200 addr_type = ipv6_addr_type(&cfg->fc_dst);
3201 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh.fib_nh_dev, addr_type))
3202 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3203
3204 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3205 struct net_device *dev = fib6_info_nh_dev(rt);
3206
3207 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3208 NL_SET_ERR_MSG(extack, "Invalid source address");
3209 err = -EINVAL;
3210 goto out;
3211 }
3212 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3213 rt->fib6_prefsrc.plen = 128;
3214 } else
3215 rt->fib6_prefsrc.plen = 0;
3216
3217 return rt;
3218 out:
3219 fib6_info_release(rt);
3220 return ERR_PTR(err);
3221 }
3222
3223 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3224 struct netlink_ext_ack *extack)
3225 {
3226 struct fib6_info *rt;
3227 int err;
3228
3229 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3230 if (IS_ERR(rt))
3231 return PTR_ERR(rt);
3232
3233 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3234 fib6_info_release(rt);
3235
3236 return err;
3237 }
3238
3239 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3240 {
3241 struct net *net = info->nl_net;
3242 struct fib6_table *table;
3243 int err;
3244
3245 if (rt == net->ipv6.fib6_null_entry) {
3246 err = -ENOENT;
3247 goto out;
3248 }
3249
3250 table = rt->fib6_table;
3251 spin_lock_bh(&table->tb6_lock);
3252 err = fib6_del(rt, info);
3253 spin_unlock_bh(&table->tb6_lock);
3254
3255 out:
3256 fib6_info_release(rt);
3257 return err;
3258 }
3259
3260 int ip6_del_rt(struct net *net, struct fib6_info *rt)
3261 {
3262 struct nl_info info = { .nl_net = net };
3263
3264 return __ip6_del_rt(rt, &info);
3265 }
3266
3267 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3268 {
3269 struct nl_info *info = &cfg->fc_nlinfo;
3270 struct net *net = info->nl_net;
3271 struct sk_buff *skb = NULL;
3272 struct fib6_table *table;
3273 int err = -ENOENT;
3274
3275 if (rt == net->ipv6.fib6_null_entry)
3276 goto out_put;
3277 table = rt->fib6_table;
3278 spin_lock_bh(&table->tb6_lock);
3279
3280 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3281 struct fib6_info *sibling, *next_sibling;
3282
3283 /* prefer to send a single notification with all hops */
3284 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3285 if (skb) {
3286 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3287
3288 if (rt6_fill_node(net, skb, rt, NULL,
3289 NULL, NULL, 0, RTM_DELROUTE,
3290 info->portid, seq, 0) < 0) {
3291 kfree_skb(skb);
3292 skb = NULL;
3293 } else
3294 info->skip_notify = 1;
3295 }
3296
3297 list_for_each_entry_safe(sibling, next_sibling,
3298 &rt->fib6_siblings,
3299 fib6_siblings) {
3300 err = fib6_del(sibling, info);
3301 if (err)
3302 goto out_unlock;
3303 }
3304 }
3305
3306 err = fib6_del(rt, info);
3307 out_unlock:
3308 spin_unlock_bh(&table->tb6_lock);
3309 out_put:
3310 fib6_info_release(rt);
3311
3312 if (skb) {
3313 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3314 info->nlh, gfp_any());
3315 }
3316 return err;
3317 }
3318
3319 static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3320 {
3321 int rc = -ESRCH;
3322
3323 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3324 goto out;
3325
3326 if (cfg->fc_flags & RTF_GATEWAY &&
3327 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3328 goto out;
3329
3330 rc = rt6_remove_exception_rt(rt);
3331 out:
3332 return rc;
3333 }
3334
3335 static int ip6_route_del(struct fib6_config *cfg,
3336 struct netlink_ext_ack *extack)
3337 {
3338 struct rt6_info *rt_cache;
3339 struct fib6_table *table;
3340 struct fib6_info *rt;
3341 struct fib6_node *fn;
3342 int err = -ESRCH;
3343
3344 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3345 if (!table) {
3346 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3347 return err;
3348 }
3349
3350 rcu_read_lock();
3351
3352 fn = fib6_locate(&table->tb6_root,
3353 &cfg->fc_dst, cfg->fc_dst_len,
3354 &cfg->fc_src, cfg->fc_src_len,
3355 !(cfg->fc_flags & RTF_CACHE));
3356
3357 if (fn) {
3358 for_each_fib6_node_rt_rcu(fn) {
3359 struct fib6_nh *nh;
3360
3361 if (cfg->fc_flags & RTF_CACHE) {
3362 struct fib6_result res = {
3363 .f6i = rt,
3364 };
3365 int rc;
3366
3367 rt_cache = rt6_find_cached_rt(&res,
3368 &cfg->fc_dst,
3369 &cfg->fc_src);
3370 if (rt_cache) {
3371 rc = ip6_del_cached_rt(rt_cache, cfg);
3372 if (rc != -ESRCH) {
3373 rcu_read_unlock();
3374 return rc;
3375 }
3376 }
3377 continue;
3378 }
3379
3380 nh = &rt->fib6_nh;
3381 if (cfg->fc_ifindex &&
3382 (!nh->fib_nh_dev ||
3383 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3384 continue;
3385 if (cfg->fc_flags & RTF_GATEWAY &&
3386 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3387 continue;
3388 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3389 continue;
3390 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3391 continue;
3392 if (!fib6_info_hold_safe(rt))
3393 continue;
3394 rcu_read_unlock();
3395
3396 /* if gateway was specified only delete the one hop */
3397 if (cfg->fc_flags & RTF_GATEWAY)
3398 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3399
3400 return __ip6_del_rt_siblings(rt, cfg);
3401 }
3402 }
3403 rcu_read_unlock();
3404
3405 return err;
3406 }
3407
3408 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3409 {
3410 struct netevent_redirect netevent;
3411 struct rt6_info *rt, *nrt = NULL;
3412 struct fib6_result res = {};
3413 struct ndisc_options ndopts;
3414 struct inet6_dev *in6_dev;
3415 struct neighbour *neigh;
3416 struct rd_msg *msg;
3417 int optlen, on_link;
3418 u8 *lladdr;
3419
3420 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3421 optlen -= sizeof(*msg);
3422
3423 if (optlen < 0) {
3424 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3425 return;
3426 }
3427
3428 msg = (struct rd_msg *)icmp6_hdr(skb);
3429
3430 if (ipv6_addr_is_multicast(&msg->dest)) {
3431 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3432 return;
3433 }
3434
3435 on_link = 0;
3436 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3437 on_link = 1;
3438 } else if (ipv6_addr_type(&msg->target) !=
3439 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3440 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3441 return;
3442 }
3443
3444 in6_dev = __in6_dev_get(skb->dev);
3445 if (!in6_dev)
3446 return;
3447 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3448 return;
3449
3450 /* RFC2461 8.1:
3451 * The IP source address of the Redirect MUST be the same as the current
3452 * first-hop router for the specified ICMP Destination Address.
3453 */
3454
3455 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3456 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3457 return;
3458 }
3459
3460 lladdr = NULL;
3461 if (ndopts.nd_opts_tgt_lladdr) {
3462 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3463 skb->dev);
3464 if (!lladdr) {
3465 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3466 return;
3467 }
3468 }
3469
3470 rt = (struct rt6_info *) dst;
3471 if (rt->rt6i_flags & RTF_REJECT) {
3472 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3473 return;
3474 }
3475
3476 /* Redirect received -> path was valid.
3477 * Look, redirects are sent only in response to data packets,
3478 * so that this nexthop apparently is reachable. --ANK
3479 */
3480 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3481
3482 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3483 if (!neigh)
3484 return;
3485
3486 /*
3487 * We have finally decided to accept it.
3488 */
3489
3490 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3491 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3492 NEIGH_UPDATE_F_OVERRIDE|
3493 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3494 NEIGH_UPDATE_F_ISROUTER)),
3495 NDISC_REDIRECT, &ndopts);
3496
3497 rcu_read_lock();
3498 res.f6i = rcu_dereference(rt->from);
3499 if (!res.f6i)
3500 goto out;
3501
3502 res.nh = &res.f6i->fib6_nh;
3503 res.fib6_flags = res.f6i->fib6_flags;
3504 res.fib6_type = res.f6i->fib6_type;
3505 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
3506 if (!nrt)
3507 goto out;
3508
3509 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3510 if (on_link)
3511 nrt->rt6i_flags &= ~RTF_GATEWAY;
3512
3513 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3514
3515 /* rt6_insert_exception() will take care of duplicated exceptions */
3516 if (rt6_insert_exception(nrt, &res)) {
3517 dst_release_immediate(&nrt->dst);
3518 goto out;
3519 }
3520
3521 netevent.old = &rt->dst;
3522 netevent.new = &nrt->dst;
3523 netevent.daddr = &msg->dest;
3524 netevent.neigh = neigh;
3525 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3526
3527 out:
3528 rcu_read_unlock();
3529 neigh_release(neigh);
3530 }
3531
3532 #ifdef CONFIG_IPV6_ROUTE_INFO
3533 static struct fib6_info *rt6_get_route_info(struct net *net,
3534 const struct in6_addr *prefix, int prefixlen,
3535 const struct in6_addr *gwaddr,
3536 struct net_device *dev)
3537 {
3538 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3539 int ifindex = dev->ifindex;
3540 struct fib6_node *fn;
3541 struct fib6_info *rt = NULL;
3542 struct fib6_table *table;
3543
3544 table = fib6_get_table(net, tb_id);
3545 if (!table)
3546 return NULL;
3547
3548 rcu_read_lock();
3549 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3550 if (!fn)
3551 goto out;
3552
3553 for_each_fib6_node_rt_rcu(fn) {
3554 if (rt->fib6_nh.fib_nh_dev->ifindex != ifindex)
3555 continue;
3556 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
3557 !rt->fib6_nh.fib_nh_gw_family)
3558 continue;
3559 if (!ipv6_addr_equal(&rt->fib6_nh.fib_nh_gw6, gwaddr))
3560 continue;
3561 if (!fib6_info_hold_safe(rt))
3562 continue;
3563 break;
3564 }
3565 out:
3566 rcu_read_unlock();
3567 return rt;
3568 }
3569
3570 static struct fib6_info *rt6_add_route_info(struct net *net,
3571 const struct in6_addr *prefix, int prefixlen,
3572 const struct in6_addr *gwaddr,
3573 struct net_device *dev,
3574 unsigned int pref)
3575 {
3576 struct fib6_config cfg = {
3577 .fc_metric = IP6_RT_PRIO_USER,
3578 .fc_ifindex = dev->ifindex,
3579 .fc_dst_len = prefixlen,
3580 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3581 RTF_UP | RTF_PREF(pref),
3582 .fc_protocol = RTPROT_RA,
3583 .fc_type = RTN_UNICAST,
3584 .fc_nlinfo.portid = 0,
3585 .fc_nlinfo.nlh = NULL,
3586 .fc_nlinfo.nl_net = net,
3587 };
3588
3589 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3590 cfg.fc_dst = *prefix;
3591 cfg.fc_gateway = *gwaddr;
3592
3593 /* We should treat it as a default route if prefix length is 0. */
3594 if (!prefixlen)
3595 cfg.fc_flags |= RTF_DEFAULT;
3596
3597 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
3598
3599 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3600 }
3601 #endif
3602
3603 struct fib6_info *rt6_get_dflt_router(struct net *net,
3604 const struct in6_addr *addr,
3605 struct net_device *dev)
3606 {
3607 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3608 struct fib6_info *rt;
3609 struct fib6_table *table;
3610
3611 table = fib6_get_table(net, tb_id);
3612 if (!table)
3613 return NULL;
3614
3615 rcu_read_lock();
3616 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3617 struct fib6_nh *nh = &rt->fib6_nh;
3618
3619 if (dev == nh->fib_nh_dev &&
3620 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3621 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
3622 break;
3623 }
3624 if (rt && !fib6_info_hold_safe(rt))
3625 rt = NULL;
3626 rcu_read_unlock();
3627 return rt;
3628 }
3629
3630 struct fib6_info *rt6_add_dflt_router(struct net *net,
3631 const struct in6_addr *gwaddr,
3632 struct net_device *dev,
3633 unsigned int pref)
3634 {
3635 struct fib6_config cfg = {
3636 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3637 .fc_metric = IP6_RT_PRIO_USER,
3638 .fc_ifindex = dev->ifindex,
3639 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3640 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3641 .fc_protocol = RTPROT_RA,
3642 .fc_type = RTN_UNICAST,
3643 .fc_nlinfo.portid = 0,
3644 .fc_nlinfo.nlh = NULL,
3645 .fc_nlinfo.nl_net = net,
3646 };
3647
3648 cfg.fc_gateway = *gwaddr;
3649
3650 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
3651 struct fib6_table *table;
3652
3653 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3654 if (table)
3655 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3656 }
3657
3658 return rt6_get_dflt_router(net, gwaddr, dev);
3659 }
3660
3661 static void __rt6_purge_dflt_routers(struct net *net,
3662 struct fib6_table *table)
3663 {
3664 struct fib6_info *rt;
3665
3666 restart:
3667 rcu_read_lock();
3668 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3669 struct net_device *dev = fib6_info_nh_dev(rt);
3670 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3671
3672 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3673 (!idev || idev->cnf.accept_ra != 2) &&
3674 fib6_info_hold_safe(rt)) {
3675 rcu_read_unlock();
3676 ip6_del_rt(net, rt);
3677 goto restart;
3678 }
3679 }
3680 rcu_read_unlock();
3681
3682 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3683 }
3684
3685 void rt6_purge_dflt_routers(struct net *net)
3686 {
3687 struct fib6_table *table;
3688 struct hlist_head *head;
3689 unsigned int h;
3690
3691 rcu_read_lock();
3692
3693 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3694 head = &net->ipv6.fib_table_hash[h];
3695 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3696 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3697 __rt6_purge_dflt_routers(net, table);
3698 }
3699 }
3700
3701 rcu_read_unlock();
3702 }
3703
3704 static void rtmsg_to_fib6_config(struct net *net,
3705 struct in6_rtmsg *rtmsg,
3706 struct fib6_config *cfg)
3707 {
3708 *cfg = (struct fib6_config){
3709 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3710 : RT6_TABLE_MAIN,
3711 .fc_ifindex = rtmsg->rtmsg_ifindex,
3712 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
3713 .fc_expires = rtmsg->rtmsg_info,
3714 .fc_dst_len = rtmsg->rtmsg_dst_len,
3715 .fc_src_len = rtmsg->rtmsg_src_len,
3716 .fc_flags = rtmsg->rtmsg_flags,
3717 .fc_type = rtmsg->rtmsg_type,
3718
3719 .fc_nlinfo.nl_net = net,
3720
3721 .fc_dst = rtmsg->rtmsg_dst,
3722 .fc_src = rtmsg->rtmsg_src,
3723 .fc_gateway = rtmsg->rtmsg_gateway,
3724 };
3725 }
3726
3727 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3728 {
3729 struct fib6_config cfg;
3730 struct in6_rtmsg rtmsg;
3731 int err;
3732
3733 switch (cmd) {
3734 case SIOCADDRT: /* Add a route */
3735 case SIOCDELRT: /* Delete a route */
3736 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3737 return -EPERM;
3738 err = copy_from_user(&rtmsg, arg,
3739 sizeof(struct in6_rtmsg));
3740 if (err)
3741 return -EFAULT;
3742
3743 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3744
3745 rtnl_lock();
3746 switch (cmd) {
3747 case SIOCADDRT:
3748 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
3749 break;
3750 case SIOCDELRT:
3751 err = ip6_route_del(&cfg, NULL);
3752 break;
3753 default:
3754 err = -EINVAL;
3755 }
3756 rtnl_unlock();
3757
3758 return err;
3759 }
3760
3761 return -EINVAL;
3762 }
3763
3764 /*
3765 * Drop the packet on the floor
3766 */
3767
3768 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3769 {
3770 struct dst_entry *dst = skb_dst(skb);
3771 struct net *net = dev_net(dst->dev);
3772 struct inet6_dev *idev;
3773 int type;
3774
3775 if (netif_is_l3_master(skb->dev) &&
3776 dst->dev == net->loopback_dev)
3777 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
3778 else
3779 idev = ip6_dst_idev(dst);
3780
3781 switch (ipstats_mib_noroutes) {
3782 case IPSTATS_MIB_INNOROUTES:
3783 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3784 if (type == IPV6_ADDR_ANY) {
3785 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
3786 break;
3787 }
3788 /* FALLTHROUGH */
3789 case IPSTATS_MIB_OUTNOROUTES:
3790 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
3791 break;
3792 }
3793
3794 /* Start over by dropping the dst for l3mdev case */
3795 if (netif_is_l3_master(skb->dev))
3796 skb_dst_drop(skb);
3797
3798 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3799 kfree_skb(skb);
3800 return 0;
3801 }
3802
3803 static int ip6_pkt_discard(struct sk_buff *skb)
3804 {
3805 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3806 }
3807
3808 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3809 {
3810 skb->dev = skb_dst(skb)->dev;
3811 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3812 }
3813
3814 static int ip6_pkt_prohibit(struct sk_buff *skb)
3815 {
3816 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3817 }
3818
3819 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3820 {
3821 skb->dev = skb_dst(skb)->dev;
3822 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3823 }
3824
3825 /*
3826 * Allocate a dst for local (unicast / anycast) address.
3827 */
3828
3829 struct fib6_info *addrconf_f6i_alloc(struct net *net,
3830 struct inet6_dev *idev,
3831 const struct in6_addr *addr,
3832 bool anycast, gfp_t gfp_flags)
3833 {
3834 struct fib6_config cfg = {
3835 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
3836 .fc_ifindex = idev->dev->ifindex,
3837 .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
3838 .fc_dst = *addr,
3839 .fc_dst_len = 128,
3840 .fc_protocol = RTPROT_KERNEL,
3841 .fc_nlinfo.nl_net = net,
3842 .fc_ignore_dev_down = true,
3843 };
3844
3845 if (anycast) {
3846 cfg.fc_type = RTN_ANYCAST;
3847 cfg.fc_flags |= RTF_ANYCAST;
3848 } else {
3849 cfg.fc_type = RTN_LOCAL;
3850 cfg.fc_flags |= RTF_LOCAL;
3851 }
3852
3853 return ip6_route_info_create(&cfg, gfp_flags, NULL);
3854 }
3855
3856 /* remove deleted ip from prefsrc entries */
3857 struct arg_dev_net_ip {
3858 struct net_device *dev;
3859 struct net *net;
3860 struct in6_addr *addr;
3861 };
3862
3863 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
3864 {
3865 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3866 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3867 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3868
3869 if (((void *)rt->fib6_nh.fib_nh_dev == dev || !dev) &&
3870 rt != net->ipv6.fib6_null_entry &&
3871 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
3872 spin_lock_bh(&rt6_exception_lock);
3873 /* remove prefsrc entry */
3874 rt->fib6_prefsrc.plen = 0;
3875 spin_unlock_bh(&rt6_exception_lock);
3876 }
3877 return 0;
3878 }
3879
3880 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3881 {
3882 struct net *net = dev_net(ifp->idev->dev);
3883 struct arg_dev_net_ip adni = {
3884 .dev = ifp->idev->dev,
3885 .net = net,
3886 .addr = &ifp->addr,
3887 };
3888 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3889 }
3890
3891 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
3892
3893 /* Remove routers and update dst entries when gateway turn into host. */
3894 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
3895 {
3896 struct in6_addr *gateway = (struct in6_addr *)arg;
3897
3898 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3899 rt->fib6_nh.fib_nh_gw_family &&
3900 ipv6_addr_equal(gateway, &rt->fib6_nh.fib_nh_gw6)) {
3901 return -1;
3902 }
3903
3904 /* Further clean up cached routes in exception table.
3905 * This is needed because cached route may have a different
3906 * gateway than its 'parent' in the case of an ip redirect.
3907 */
3908 rt6_exceptions_clean_tohost(rt, gateway);
3909
3910 return 0;
3911 }
3912
3913 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3914 {
3915 fib6_clean_all(net, fib6_clean_tohost, gateway);
3916 }
3917
3918 struct arg_netdev_event {
3919 const struct net_device *dev;
3920 union {
3921 unsigned char nh_flags;
3922 unsigned long event;
3923 };
3924 };
3925
3926 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
3927 {
3928 struct fib6_info *iter;
3929 struct fib6_node *fn;
3930
3931 fn = rcu_dereference_protected(rt->fib6_node,
3932 lockdep_is_held(&rt->fib6_table->tb6_lock));
3933 iter = rcu_dereference_protected(fn->leaf,
3934 lockdep_is_held(&rt->fib6_table->tb6_lock));
3935 while (iter) {
3936 if (iter->fib6_metric == rt->fib6_metric &&
3937 rt6_qualify_for_ecmp(iter))
3938 return iter;
3939 iter = rcu_dereference_protected(iter->fib6_next,
3940 lockdep_is_held(&rt->fib6_table->tb6_lock));
3941 }
3942
3943 return NULL;
3944 }
3945
3946 static bool rt6_is_dead(const struct fib6_info *rt)
3947 {
3948 if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD ||
3949 (rt->fib6_nh.fib_nh_flags & RTNH_F_LINKDOWN &&
3950 ip6_ignore_linkdown(rt->fib6_nh.fib_nh_dev)))
3951 return true;
3952
3953 return false;
3954 }
3955
3956 static int rt6_multipath_total_weight(const struct fib6_info *rt)
3957 {
3958 struct fib6_info *iter;
3959 int total = 0;
3960
3961 if (!rt6_is_dead(rt))
3962 total += rt->fib6_nh.fib_nh_weight;
3963
3964 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
3965 if (!rt6_is_dead(iter))
3966 total += iter->fib6_nh.fib_nh_weight;
3967 }
3968
3969 return total;
3970 }
3971
3972 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
3973 {
3974 int upper_bound = -1;
3975
3976 if (!rt6_is_dead(rt)) {
3977 *weight += rt->fib6_nh.fib_nh_weight;
3978 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3979 total) - 1;
3980 }
3981 atomic_set(&rt->fib6_nh.fib_nh_upper_bound, upper_bound);
3982 }
3983
3984 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
3985 {
3986 struct fib6_info *iter;
3987 int weight = 0;
3988
3989 rt6_upper_bound_set(rt, &weight, total);
3990
3991 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3992 rt6_upper_bound_set(iter, &weight, total);
3993 }
3994
3995 void rt6_multipath_rebalance(struct fib6_info *rt)
3996 {
3997 struct fib6_info *first;
3998 int total;
3999
4000 /* In case the entire multipath route was marked for flushing,
4001 * then there is no need to rebalance upon the removal of every
4002 * sibling route.
4003 */
4004 if (!rt->fib6_nsiblings || rt->should_flush)
4005 return;
4006
4007 /* During lookup routes are evaluated in order, so we need to
4008 * make sure upper bounds are assigned from the first sibling
4009 * onwards.
4010 */
4011 first = rt6_multipath_first_sibling(rt);
4012 if (WARN_ON_ONCE(!first))
4013 return;
4014
4015 total = rt6_multipath_total_weight(first);
4016 rt6_multipath_upper_bound_set(first, total);
4017 }
4018
4019 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4020 {
4021 const struct arg_netdev_event *arg = p_arg;
4022 struct net *net = dev_net(arg->dev);
4023
4024 if (rt != net->ipv6.fib6_null_entry &&
4025 rt->fib6_nh.fib_nh_dev == arg->dev) {
4026 rt->fib6_nh.fib_nh_flags &= ~arg->nh_flags;
4027 fib6_update_sernum_upto_root(net, rt);
4028 rt6_multipath_rebalance(rt);
4029 }
4030
4031 return 0;
4032 }
4033
4034 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4035 {
4036 struct arg_netdev_event arg = {
4037 .dev = dev,
4038 {
4039 .nh_flags = nh_flags,
4040 },
4041 };
4042
4043 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4044 arg.nh_flags |= RTNH_F_LINKDOWN;
4045
4046 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4047 }
4048
4049 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4050 const struct net_device *dev)
4051 {
4052 struct fib6_info *iter;
4053
4054 if (rt->fib6_nh.fib_nh_dev == dev)
4055 return true;
4056 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4057 if (iter->fib6_nh.fib_nh_dev == dev)
4058 return true;
4059
4060 return false;
4061 }
4062
4063 static void rt6_multipath_flush(struct fib6_info *rt)
4064 {
4065 struct fib6_info *iter;
4066
4067 rt->should_flush = 1;
4068 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4069 iter->should_flush = 1;
4070 }
4071
4072 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4073 const struct net_device *down_dev)
4074 {
4075 struct fib6_info *iter;
4076 unsigned int dead = 0;
4077
4078 if (rt->fib6_nh.fib_nh_dev == down_dev ||
4079 rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
4080 dead++;
4081 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4082 if (iter->fib6_nh.fib_nh_dev == down_dev ||
4083 iter->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
4084 dead++;
4085
4086 return dead;
4087 }
4088
4089 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4090 const struct net_device *dev,
4091 unsigned char nh_flags)
4092 {
4093 struct fib6_info *iter;
4094
4095 if (rt->fib6_nh.fib_nh_dev == dev)
4096 rt->fib6_nh.fib_nh_flags |= nh_flags;
4097 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4098 if (iter->fib6_nh.fib_nh_dev == dev)
4099 iter->fib6_nh.fib_nh_flags |= nh_flags;
4100 }
4101
4102 /* called with write lock held for table with rt */
4103 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4104 {
4105 const struct arg_netdev_event *arg = p_arg;
4106 const struct net_device *dev = arg->dev;
4107 struct net *net = dev_net(dev);
4108
4109 if (rt == net->ipv6.fib6_null_entry)
4110 return 0;
4111
4112 switch (arg->event) {
4113 case NETDEV_UNREGISTER:
4114 return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
4115 case NETDEV_DOWN:
4116 if (rt->should_flush)
4117 return -1;
4118 if (!rt->fib6_nsiblings)
4119 return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
4120 if (rt6_multipath_uses_dev(rt, dev)) {
4121 unsigned int count;
4122
4123 count = rt6_multipath_dead_count(rt, dev);
4124 if (rt->fib6_nsiblings + 1 == count) {
4125 rt6_multipath_flush(rt);
4126 return -1;
4127 }
4128 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4129 RTNH_F_LINKDOWN);
4130 fib6_update_sernum(net, rt);
4131 rt6_multipath_rebalance(rt);
4132 }
4133 return -2;
4134 case NETDEV_CHANGE:
4135 if (rt->fib6_nh.fib_nh_dev != dev ||
4136 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4137 break;
4138 rt->fib6_nh.fib_nh_flags |= RTNH_F_LINKDOWN;
4139 rt6_multipath_rebalance(rt);
4140 break;
4141 }
4142
4143 return 0;
4144 }
4145
4146 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4147 {
4148 struct arg_netdev_event arg = {
4149 .dev = dev,
4150 {
4151 .event = event,
4152 },
4153 };
4154 struct net *net = dev_net(dev);
4155
4156 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4157 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4158 else
4159 fib6_clean_all(net, fib6_ifdown, &arg);
4160 }
4161
4162 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4163 {
4164 rt6_sync_down_dev(dev, event);
4165 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4166 neigh_ifdown(&nd_tbl, dev);
4167 }
4168
4169 struct rt6_mtu_change_arg {
4170 struct net_device *dev;
4171 unsigned int mtu;
4172 };
4173
4174 static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg)
4175 {
4176 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4177 struct inet6_dev *idev;
4178
4179 /* In IPv6 pmtu discovery is not optional,
4180 so that RTAX_MTU lock cannot disable it.
4181 We still use this lock to block changes
4182 caused by addrconf/ndisc.
4183 */
4184
4185 idev = __in6_dev_get(arg->dev);
4186 if (!idev)
4187 return 0;
4188
4189 /* For administrative MTU increase, there is no way to discover
4190 IPv6 PMTU increase, so PMTU increase should be updated here.
4191 Since RFC 1981 doesn't include administrative MTU increase
4192 update PMTU increase is a MUST. (i.e. jumbo frame)
4193 */
4194 if (rt->fib6_nh.fib_nh_dev == arg->dev &&
4195 !fib6_metric_locked(rt, RTAX_MTU)) {
4196 u32 mtu = rt->fib6_pmtu;
4197
4198 if (mtu >= arg->mtu ||
4199 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4200 fib6_metric_set(rt, RTAX_MTU, arg->mtu);
4201
4202 spin_lock_bh(&rt6_exception_lock);
4203 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
4204 spin_unlock_bh(&rt6_exception_lock);
4205 }
4206 return 0;
4207 }
4208
4209 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4210 {
4211 struct rt6_mtu_change_arg arg = {
4212 .dev = dev,
4213 .mtu = mtu,
4214 };
4215
4216 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4217 }
4218
4219 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4220 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4221 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4222 [RTA_OIF] = { .type = NLA_U32 },
4223 [RTA_IIF] = { .type = NLA_U32 },
4224 [RTA_PRIORITY] = { .type = NLA_U32 },
4225 [RTA_METRICS] = { .type = NLA_NESTED },
4226 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4227 [RTA_PREF] = { .type = NLA_U8 },
4228 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4229 [RTA_ENCAP] = { .type = NLA_NESTED },
4230 [RTA_EXPIRES] = { .type = NLA_U32 },
4231 [RTA_UID] = { .type = NLA_U32 },
4232 [RTA_MARK] = { .type = NLA_U32 },
4233 [RTA_TABLE] = { .type = NLA_U32 },
4234 [RTA_IP_PROTO] = { .type = NLA_U8 },
4235 [RTA_SPORT] = { .type = NLA_U16 },
4236 [RTA_DPORT] = { .type = NLA_U16 },
4237 };
4238
4239 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4240 struct fib6_config *cfg,
4241 struct netlink_ext_ack *extack)
4242 {
4243 struct rtmsg *rtm;
4244 struct nlattr *tb[RTA_MAX+1];
4245 unsigned int pref;
4246 int err;
4247
4248 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4249 rtm_ipv6_policy, extack);
4250 if (err < 0)
4251 goto errout;
4252
4253 err = -EINVAL;
4254 rtm = nlmsg_data(nlh);
4255
4256 *cfg = (struct fib6_config){
4257 .fc_table = rtm->rtm_table,
4258 .fc_dst_len = rtm->rtm_dst_len,
4259 .fc_src_len = rtm->rtm_src_len,
4260 .fc_flags = RTF_UP,
4261 .fc_protocol = rtm->rtm_protocol,
4262 .fc_type = rtm->rtm_type,
4263
4264 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4265 .fc_nlinfo.nlh = nlh,
4266 .fc_nlinfo.nl_net = sock_net(skb->sk),
4267 };
4268
4269 if (rtm->rtm_type == RTN_UNREACHABLE ||
4270 rtm->rtm_type == RTN_BLACKHOLE ||
4271 rtm->rtm_type == RTN_PROHIBIT ||
4272 rtm->rtm_type == RTN_THROW)
4273 cfg->fc_flags |= RTF_REJECT;
4274
4275 if (rtm->rtm_type == RTN_LOCAL)
4276 cfg->fc_flags |= RTF_LOCAL;
4277
4278 if (rtm->rtm_flags & RTM_F_CLONED)
4279 cfg->fc_flags |= RTF_CACHE;
4280
4281 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4282
4283 if (tb[RTA_GATEWAY]) {
4284 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4285 cfg->fc_flags |= RTF_GATEWAY;
4286 }
4287 if (tb[RTA_VIA]) {
4288 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4289 goto errout;
4290 }
4291
4292 if (tb[RTA_DST]) {
4293 int plen = (rtm->rtm_dst_len + 7) >> 3;
4294
4295 if (nla_len(tb[RTA_DST]) < plen)
4296 goto errout;
4297
4298 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4299 }
4300
4301 if (tb[RTA_SRC]) {
4302 int plen = (rtm->rtm_src_len + 7) >> 3;
4303
4304 if (nla_len(tb[RTA_SRC]) < plen)
4305 goto errout;
4306
4307 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4308 }
4309
4310 if (tb[RTA_PREFSRC])
4311 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4312
4313 if (tb[RTA_OIF])
4314 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4315
4316 if (tb[RTA_PRIORITY])
4317 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4318
4319 if (tb[RTA_METRICS]) {
4320 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4321 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4322 }
4323
4324 if (tb[RTA_TABLE])
4325 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4326
4327 if (tb[RTA_MULTIPATH]) {
4328 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4329 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4330
4331 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4332 cfg->fc_mp_len, extack);
4333 if (err < 0)
4334 goto errout;
4335 }
4336
4337 if (tb[RTA_PREF]) {
4338 pref = nla_get_u8(tb[RTA_PREF]);
4339 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4340 pref != ICMPV6_ROUTER_PREF_HIGH)
4341 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4342 cfg->fc_flags |= RTF_PREF(pref);
4343 }
4344
4345 if (tb[RTA_ENCAP])
4346 cfg->fc_encap = tb[RTA_ENCAP];
4347
4348 if (tb[RTA_ENCAP_TYPE]) {
4349 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4350
4351 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4352 if (err < 0)
4353 goto errout;
4354 }
4355
4356 if (tb[RTA_EXPIRES]) {
4357 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4358
4359 if (addrconf_finite_timeout(timeout)) {
4360 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4361 cfg->fc_flags |= RTF_EXPIRES;
4362 }
4363 }
4364
4365 err = 0;
4366 errout:
4367 return err;
4368 }
4369
4370 struct rt6_nh {
4371 struct fib6_info *fib6_info;
4372 struct fib6_config r_cfg;
4373 struct list_head next;
4374 };
4375
4376 static int ip6_route_info_append(struct net *net,
4377 struct list_head *rt6_nh_list,
4378 struct fib6_info *rt,
4379 struct fib6_config *r_cfg)
4380 {
4381 struct rt6_nh *nh;
4382 int err = -EEXIST;
4383
4384 list_for_each_entry(nh, rt6_nh_list, next) {
4385 /* check if fib6_info already exists */
4386 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4387 return err;
4388 }
4389
4390 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4391 if (!nh)
4392 return -ENOMEM;
4393 nh->fib6_info = rt;
4394 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4395 list_add_tail(&nh->next, rt6_nh_list);
4396
4397 return 0;
4398 }
4399
4400 static void ip6_route_mpath_notify(struct fib6_info *rt,
4401 struct fib6_info *rt_last,
4402 struct nl_info *info,
4403 __u16 nlflags)
4404 {
4405 /* if this is an APPEND route, then rt points to the first route
4406 * inserted and rt_last points to last route inserted. Userspace
4407 * wants a consistent dump of the route which starts at the first
4408 * nexthop. Since sibling routes are always added at the end of
4409 * the list, find the first sibling of the last route appended
4410 */
4411 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
4412 rt = list_first_entry(&rt_last->fib6_siblings,
4413 struct fib6_info,
4414 fib6_siblings);
4415 }
4416
4417 if (rt)
4418 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4419 }
4420
4421 static int ip6_route_multipath_add(struct fib6_config *cfg,
4422 struct netlink_ext_ack *extack)
4423 {
4424 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4425 struct nl_info *info = &cfg->fc_nlinfo;
4426 struct fib6_config r_cfg;
4427 struct rtnexthop *rtnh;
4428 struct fib6_info *rt;
4429 struct rt6_nh *err_nh;
4430 struct rt6_nh *nh, *nh_safe;
4431 __u16 nlflags;
4432 int remaining;
4433 int attrlen;
4434 int err = 1;
4435 int nhn = 0;
4436 int replace = (cfg->fc_nlinfo.nlh &&
4437 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4438 LIST_HEAD(rt6_nh_list);
4439
4440 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4441 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4442 nlflags |= NLM_F_APPEND;
4443
4444 remaining = cfg->fc_mp_len;
4445 rtnh = (struct rtnexthop *)cfg->fc_mp;
4446
4447 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
4448 * fib6_info structs per nexthop
4449 */
4450 while (rtnh_ok(rtnh, remaining)) {
4451 memcpy(&r_cfg, cfg, sizeof(*cfg));
4452 if (rtnh->rtnh_ifindex)
4453 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4454
4455 attrlen = rtnh_attrlen(rtnh);
4456 if (attrlen > 0) {
4457 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4458
4459 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4460 if (nla) {
4461 r_cfg.fc_gateway = nla_get_in6_addr(nla);
4462 r_cfg.fc_flags |= RTF_GATEWAY;
4463 }
4464 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4465 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4466 if (nla)
4467 r_cfg.fc_encap_type = nla_get_u16(nla);
4468 }
4469
4470 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4471 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
4472 if (IS_ERR(rt)) {
4473 err = PTR_ERR(rt);
4474 rt = NULL;
4475 goto cleanup;
4476 }
4477 if (!rt6_qualify_for_ecmp(rt)) {
4478 err = -EINVAL;
4479 NL_SET_ERR_MSG(extack,
4480 "Device only routes can not be added for IPv6 using the multipath API.");
4481 fib6_info_release(rt);
4482 goto cleanup;
4483 }
4484
4485 rt->fib6_nh.fib_nh_weight = rtnh->rtnh_hops + 1;
4486
4487 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4488 rt, &r_cfg);
4489 if (err) {
4490 fib6_info_release(rt);
4491 goto cleanup;
4492 }
4493
4494 rtnh = rtnh_next(rtnh, &remaining);
4495 }
4496
4497 /* for add and replace send one notification with all nexthops.
4498 * Skip the notification in fib6_add_rt2node and send one with
4499 * the full route when done
4500 */
4501 info->skip_notify = 1;
4502
4503 err_nh = NULL;
4504 list_for_each_entry(nh, &rt6_nh_list, next) {
4505 err = __ip6_ins_rt(nh->fib6_info, info, extack);
4506 fib6_info_release(nh->fib6_info);
4507
4508 if (!err) {
4509 /* save reference to last route successfully inserted */
4510 rt_last = nh->fib6_info;
4511
4512 /* save reference to first route for notification */
4513 if (!rt_notif)
4514 rt_notif = nh->fib6_info;
4515 }
4516
4517 /* nh->fib6_info is used or freed at this point, reset to NULL*/
4518 nh->fib6_info = NULL;
4519 if (err) {
4520 if (replace && nhn)
4521 NL_SET_ERR_MSG_MOD(extack,
4522 "multipath route replace failed (check consistency of installed routes)");
4523 err_nh = nh;
4524 goto add_errout;
4525 }
4526
4527 /* Because each route is added like a single route we remove
4528 * these flags after the first nexthop: if there is a collision,
4529 * we have already failed to add the first nexthop:
4530 * fib6_add_rt2node() has rejected it; when replacing, old
4531 * nexthops have been replaced by first new, the rest should
4532 * be added to it.
4533 */
4534 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4535 NLM_F_REPLACE);
4536 nhn++;
4537 }
4538
4539 /* success ... tell user about new route */
4540 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4541 goto cleanup;
4542
4543 add_errout:
4544 /* send notification for routes that were added so that
4545 * the delete notifications sent by ip6_route_del are
4546 * coherent
4547 */
4548 if (rt_notif)
4549 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4550
4551 /* Delete routes that were already added */
4552 list_for_each_entry(nh, &rt6_nh_list, next) {
4553 if (err_nh == nh)
4554 break;
4555 ip6_route_del(&nh->r_cfg, extack);
4556 }
4557
4558 cleanup:
4559 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4560 if (nh->fib6_info)
4561 fib6_info_release(nh->fib6_info);
4562 list_del(&nh->next);
4563 kfree(nh);
4564 }
4565
4566 return err;
4567 }
4568
4569 static int ip6_route_multipath_del(struct fib6_config *cfg,
4570 struct netlink_ext_ack *extack)
4571 {
4572 struct fib6_config r_cfg;
4573 struct rtnexthop *rtnh;
4574 int remaining;
4575 int attrlen;
4576 int err = 1, last_err = 0;
4577
4578 remaining = cfg->fc_mp_len;
4579 rtnh = (struct rtnexthop *)cfg->fc_mp;
4580
4581 /* Parse a Multipath Entry */
4582 while (rtnh_ok(rtnh, remaining)) {
4583 memcpy(&r_cfg, cfg, sizeof(*cfg));
4584 if (rtnh->rtnh_ifindex)
4585 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4586
4587 attrlen = rtnh_attrlen(rtnh);
4588 if (attrlen > 0) {
4589 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4590
4591 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4592 if (nla) {
4593 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4594 r_cfg.fc_flags |= RTF_GATEWAY;
4595 }
4596 }
4597 err = ip6_route_del(&r_cfg, extack);
4598 if (err)
4599 last_err = err;
4600
4601 rtnh = rtnh_next(rtnh, &remaining);
4602 }
4603
4604 return last_err;
4605 }
4606
4607 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4608 struct netlink_ext_ack *extack)
4609 {
4610 struct fib6_config cfg;
4611 int err;
4612
4613 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4614 if (err < 0)
4615 return err;
4616
4617 if (cfg.fc_mp)
4618 return ip6_route_multipath_del(&cfg, extack);
4619 else {
4620 cfg.fc_delete_all_nh = 1;
4621 return ip6_route_del(&cfg, extack);
4622 }
4623 }
4624
4625 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4626 struct netlink_ext_ack *extack)
4627 {
4628 struct fib6_config cfg;
4629 int err;
4630
4631 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4632 if (err < 0)
4633 return err;
4634
4635 if (cfg.fc_metric == 0)
4636 cfg.fc_metric = IP6_RT_PRIO_USER;
4637
4638 if (cfg.fc_mp)
4639 return ip6_route_multipath_add(&cfg, extack);
4640 else
4641 return ip6_route_add(&cfg, GFP_KERNEL, extack);
4642 }
4643
4644 static size_t rt6_nlmsg_size(struct fib6_info *rt)
4645 {
4646 int nexthop_len = 0;
4647
4648 if (rt->fib6_nsiblings) {
4649 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4650 + NLA_ALIGN(sizeof(struct rtnexthop))
4651 + nla_total_size(16) /* RTA_GATEWAY */
4652 + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws);
4653
4654 nexthop_len *= rt->fib6_nsiblings;
4655 }
4656
4657 return NLMSG_ALIGN(sizeof(struct rtmsg))
4658 + nla_total_size(16) /* RTA_SRC */
4659 + nla_total_size(16) /* RTA_DST */
4660 + nla_total_size(16) /* RTA_GATEWAY */
4661 + nla_total_size(16) /* RTA_PREFSRC */
4662 + nla_total_size(4) /* RTA_TABLE */
4663 + nla_total_size(4) /* RTA_IIF */
4664 + nla_total_size(4) /* RTA_OIF */
4665 + nla_total_size(4) /* RTA_PRIORITY */
4666 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4667 + nla_total_size(sizeof(struct rta_cacheinfo))
4668 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4669 + nla_total_size(1) /* RTA_PREF */
4670 + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws)
4671 + nexthop_len;
4672 }
4673
4674 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4675 struct fib6_info *rt, struct dst_entry *dst,
4676 struct in6_addr *dest, struct in6_addr *src,
4677 int iif, int type, u32 portid, u32 seq,
4678 unsigned int flags)
4679 {
4680 struct rt6_info *rt6 = (struct rt6_info *)dst;
4681 struct rt6key *rt6_dst, *rt6_src;
4682 u32 *pmetrics, table, rt6_flags;
4683 struct nlmsghdr *nlh;
4684 struct rtmsg *rtm;
4685 long expires = 0;
4686
4687 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4688 if (!nlh)
4689 return -EMSGSIZE;
4690
4691 if (rt6) {
4692 rt6_dst = &rt6->rt6i_dst;
4693 rt6_src = &rt6->rt6i_src;
4694 rt6_flags = rt6->rt6i_flags;
4695 } else {
4696 rt6_dst = &rt->fib6_dst;
4697 rt6_src = &rt->fib6_src;
4698 rt6_flags = rt->fib6_flags;
4699 }
4700
4701 rtm = nlmsg_data(nlh);
4702 rtm->rtm_family = AF_INET6;
4703 rtm->rtm_dst_len = rt6_dst->plen;
4704 rtm->rtm_src_len = rt6_src->plen;
4705 rtm->rtm_tos = 0;
4706 if (rt->fib6_table)
4707 table = rt->fib6_table->tb6_id;
4708 else
4709 table = RT6_TABLE_UNSPEC;
4710 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
4711 if (nla_put_u32(skb, RTA_TABLE, table))
4712 goto nla_put_failure;
4713
4714 rtm->rtm_type = rt->fib6_type;
4715 rtm->rtm_flags = 0;
4716 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4717 rtm->rtm_protocol = rt->fib6_protocol;
4718
4719 if (rt6_flags & RTF_CACHE)
4720 rtm->rtm_flags |= RTM_F_CLONED;
4721
4722 if (dest) {
4723 if (nla_put_in6_addr(skb, RTA_DST, dest))
4724 goto nla_put_failure;
4725 rtm->rtm_dst_len = 128;
4726 } else if (rtm->rtm_dst_len)
4727 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
4728 goto nla_put_failure;
4729 #ifdef CONFIG_IPV6_SUBTREES
4730 if (src) {
4731 if (nla_put_in6_addr(skb, RTA_SRC, src))
4732 goto nla_put_failure;
4733 rtm->rtm_src_len = 128;
4734 } else if (rtm->rtm_src_len &&
4735 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
4736 goto nla_put_failure;
4737 #endif
4738 if (iif) {
4739 #ifdef CONFIG_IPV6_MROUTE
4740 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
4741 int err = ip6mr_get_route(net, skb, rtm, portid);
4742
4743 if (err == 0)
4744 return 0;
4745 if (err < 0)
4746 goto nla_put_failure;
4747 } else
4748 #endif
4749 if (nla_put_u32(skb, RTA_IIF, iif))
4750 goto nla_put_failure;
4751 } else if (dest) {
4752 struct in6_addr saddr_buf;
4753 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
4754 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4755 goto nla_put_failure;
4756 }
4757
4758 if (rt->fib6_prefsrc.plen) {
4759 struct in6_addr saddr_buf;
4760 saddr_buf = rt->fib6_prefsrc.addr;
4761 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4762 goto nla_put_failure;
4763 }
4764
4765 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
4766 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
4767 goto nla_put_failure;
4768
4769 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
4770 goto nla_put_failure;
4771
4772 /* For multipath routes, walk the siblings list and add
4773 * each as a nexthop within RTA_MULTIPATH.
4774 */
4775 if (rt6) {
4776 if (rt6_flags & RTF_GATEWAY &&
4777 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
4778 goto nla_put_failure;
4779
4780 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
4781 goto nla_put_failure;
4782 } else if (rt->fib6_nsiblings) {
4783 struct fib6_info *sibling, *next_sibling;
4784 struct nlattr *mp;
4785
4786 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
4787 if (!mp)
4788 goto nla_put_failure;
4789
4790 if (fib_add_nexthop(skb, &rt->fib6_nh.nh_common,
4791 rt->fib6_nh.fib_nh_weight) < 0)
4792 goto nla_put_failure;
4793
4794 list_for_each_entry_safe(sibling, next_sibling,
4795 &rt->fib6_siblings, fib6_siblings) {
4796 if (fib_add_nexthop(skb, &sibling->fib6_nh.nh_common,
4797 sibling->fib6_nh.fib_nh_weight) < 0)
4798 goto nla_put_failure;
4799 }
4800
4801 nla_nest_end(skb, mp);
4802 } else {
4803 unsigned char nh_flags = 0;
4804
4805 if (fib_nexthop_info(skb, &rt->fib6_nh.nh_common,
4806 &nh_flags, false) < 0)
4807 goto nla_put_failure;
4808
4809 rtm->rtm_flags |= nh_flags;
4810 }
4811
4812 if (rt6_flags & RTF_EXPIRES) {
4813 expires = dst ? dst->expires : rt->expires;
4814 expires -= jiffies;
4815 }
4816
4817 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
4818 goto nla_put_failure;
4819
4820 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
4821 goto nla_put_failure;
4822
4823
4824 nlmsg_end(skb, nlh);
4825 return 0;
4826
4827 nla_put_failure:
4828 nlmsg_cancel(skb, nlh);
4829 return -EMSGSIZE;
4830 }
4831
4832 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
4833 const struct net_device *dev)
4834 {
4835 if (f6i->fib6_nh.fib_nh_dev == dev)
4836 return true;
4837
4838 if (f6i->fib6_nsiblings) {
4839 struct fib6_info *sibling, *next_sibling;
4840
4841 list_for_each_entry_safe(sibling, next_sibling,
4842 &f6i->fib6_siblings, fib6_siblings) {
4843 if (sibling->fib6_nh.fib_nh_dev == dev)
4844 return true;
4845 }
4846 }
4847
4848 return false;
4849 }
4850
4851 int rt6_dump_route(struct fib6_info *rt, void *p_arg)
4852 {
4853 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4854 struct fib_dump_filter *filter = &arg->filter;
4855 unsigned int flags = NLM_F_MULTI;
4856 struct net *net = arg->net;
4857
4858 if (rt == net->ipv6.fib6_null_entry)
4859 return 0;
4860
4861 if ((filter->flags & RTM_F_PREFIX) &&
4862 !(rt->fib6_flags & RTF_PREFIX_RT)) {
4863 /* success since this is not a prefix route */
4864 return 1;
4865 }
4866 if (filter->filter_set) {
4867 if ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
4868 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
4869 (filter->protocol && rt->fib6_protocol != filter->protocol)) {
4870 return 1;
4871 }
4872 flags |= NLM_F_DUMP_FILTERED;
4873 }
4874
4875 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
4876 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
4877 arg->cb->nlh->nlmsg_seq, flags);
4878 }
4879
4880 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
4881 const struct nlmsghdr *nlh,
4882 struct nlattr **tb,
4883 struct netlink_ext_ack *extack)
4884 {
4885 struct rtmsg *rtm;
4886 int i, err;
4887
4888 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
4889 NL_SET_ERR_MSG_MOD(extack,
4890 "Invalid header for get route request");
4891 return -EINVAL;
4892 }
4893
4894 if (!netlink_strict_get_check(skb))
4895 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4896 rtm_ipv6_policy, extack);
4897
4898 rtm = nlmsg_data(nlh);
4899 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
4900 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
4901 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
4902 rtm->rtm_type) {
4903 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
4904 return -EINVAL;
4905 }
4906 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
4907 NL_SET_ERR_MSG_MOD(extack,
4908 "Invalid flags for get route request");
4909 return -EINVAL;
4910 }
4911
4912 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
4913 rtm_ipv6_policy, extack);
4914 if (err)
4915 return err;
4916
4917 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
4918 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
4919 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
4920 return -EINVAL;
4921 }
4922
4923 for (i = 0; i <= RTA_MAX; i++) {
4924 if (!tb[i])
4925 continue;
4926
4927 switch (i) {
4928 case RTA_SRC:
4929 case RTA_DST:
4930 case RTA_IIF:
4931 case RTA_OIF:
4932 case RTA_MARK:
4933 case RTA_UID:
4934 case RTA_SPORT:
4935 case RTA_DPORT:
4936 case RTA_IP_PROTO:
4937 break;
4938 default:
4939 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
4940 return -EINVAL;
4941 }
4942 }
4943
4944 return 0;
4945 }
4946
4947 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4948 struct netlink_ext_ack *extack)
4949 {
4950 struct net *net = sock_net(in_skb->sk);
4951 struct nlattr *tb[RTA_MAX+1];
4952 int err, iif = 0, oif = 0;
4953 struct fib6_info *from;
4954 struct dst_entry *dst;
4955 struct rt6_info *rt;
4956 struct sk_buff *skb;
4957 struct rtmsg *rtm;
4958 struct flowi6 fl6 = {};
4959 bool fibmatch;
4960
4961 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
4962 if (err < 0)
4963 goto errout;
4964
4965 err = -EINVAL;
4966 rtm = nlmsg_data(nlh);
4967 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4968 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
4969
4970 if (tb[RTA_SRC]) {
4971 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4972 goto errout;
4973
4974 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4975 }
4976
4977 if (tb[RTA_DST]) {
4978 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4979 goto errout;
4980
4981 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4982 }
4983
4984 if (tb[RTA_IIF])
4985 iif = nla_get_u32(tb[RTA_IIF]);
4986
4987 if (tb[RTA_OIF])
4988 oif = nla_get_u32(tb[RTA_OIF]);
4989
4990 if (tb[RTA_MARK])
4991 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4992
4993 if (tb[RTA_UID])
4994 fl6.flowi6_uid = make_kuid(current_user_ns(),
4995 nla_get_u32(tb[RTA_UID]));
4996 else
4997 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4998
4999 if (tb[RTA_SPORT])
5000 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5001
5002 if (tb[RTA_DPORT])
5003 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5004
5005 if (tb[RTA_IP_PROTO]) {
5006 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5007 &fl6.flowi6_proto, AF_INET6,
5008 extack);
5009 if (err)
5010 goto errout;
5011 }
5012
5013 if (iif) {
5014 struct net_device *dev;
5015 int flags = 0;
5016
5017 rcu_read_lock();
5018
5019 dev = dev_get_by_index_rcu(net, iif);
5020 if (!dev) {
5021 rcu_read_unlock();
5022 err = -ENODEV;
5023 goto errout;
5024 }
5025
5026 fl6.flowi6_iif = iif;
5027
5028 if (!ipv6_addr_any(&fl6.saddr))
5029 flags |= RT6_LOOKUP_F_HAS_SADDR;
5030
5031 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5032
5033 rcu_read_unlock();
5034 } else {
5035 fl6.flowi6_oif = oif;
5036
5037 dst = ip6_route_output(net, NULL, &fl6);
5038 }
5039
5040
5041 rt = container_of(dst, struct rt6_info, dst);
5042 if (rt->dst.error) {
5043 err = rt->dst.error;
5044 ip6_rt_put(rt);
5045 goto errout;
5046 }
5047
5048 if (rt == net->ipv6.ip6_null_entry) {
5049 err = rt->dst.error;
5050 ip6_rt_put(rt);
5051 goto errout;
5052 }
5053
5054 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5055 if (!skb) {
5056 ip6_rt_put(rt);
5057 err = -ENOBUFS;
5058 goto errout;
5059 }
5060
5061 skb_dst_set(skb, &rt->dst);
5062
5063 rcu_read_lock();
5064 from = rcu_dereference(rt->from);
5065 if (from) {
5066 if (fibmatch)
5067 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5068 iif, RTM_NEWROUTE,
5069 NETLINK_CB(in_skb).portid,
5070 nlh->nlmsg_seq, 0);
5071 else
5072 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5073 &fl6.saddr, iif, RTM_NEWROUTE,
5074 NETLINK_CB(in_skb).portid,
5075 nlh->nlmsg_seq, 0);
5076 } else {
5077 err = -ENETUNREACH;
5078 }
5079 rcu_read_unlock();
5080
5081 if (err < 0) {
5082 kfree_skb(skb);
5083 goto errout;
5084 }
5085
5086 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5087 errout:
5088 return err;
5089 }
5090
5091 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5092 unsigned int nlm_flags)
5093 {
5094 struct sk_buff *skb;
5095 struct net *net = info->nl_net;
5096 u32 seq;
5097 int err;
5098
5099 err = -ENOBUFS;
5100 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5101
5102 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5103 if (!skb)
5104 goto errout;
5105
5106 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5107 event, info->portid, seq, nlm_flags);
5108 if (err < 0) {
5109 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5110 WARN_ON(err == -EMSGSIZE);
5111 kfree_skb(skb);
5112 goto errout;
5113 }
5114 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5115 info->nlh, gfp_any());
5116 return;
5117 errout:
5118 if (err < 0)
5119 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5120 }
5121
5122 static int ip6_route_dev_notify(struct notifier_block *this,
5123 unsigned long event, void *ptr)
5124 {
5125 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5126 struct net *net = dev_net(dev);
5127
5128 if (!(dev->flags & IFF_LOOPBACK))
5129 return NOTIFY_OK;
5130
5131 if (event == NETDEV_REGISTER) {
5132 net->ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = dev;
5133 net->ipv6.ip6_null_entry->dst.dev = dev;
5134 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5135 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5136 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5137 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5138 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5139 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5140 #endif
5141 } else if (event == NETDEV_UNREGISTER &&
5142 dev->reg_state != NETREG_UNREGISTERED) {
5143 /* NETDEV_UNREGISTER could be fired for multiple times by
5144 * netdev_wait_allrefs(). Make sure we only call this once.
5145 */
5146 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5147 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5148 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
5149 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5150 #endif
5151 }
5152
5153 return NOTIFY_OK;
5154 }
5155
5156 /*
5157 * /proc
5158 */
5159
5160 #ifdef CONFIG_PROC_FS
5161 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
5162 {
5163 struct net *net = (struct net *)seq->private;
5164 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
5165 net->ipv6.rt6_stats->fib_nodes,
5166 net->ipv6.rt6_stats->fib_route_nodes,
5167 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
5168 net->ipv6.rt6_stats->fib_rt_entries,
5169 net->ipv6.rt6_stats->fib_rt_cache,
5170 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
5171 net->ipv6.rt6_stats->fib_discarded_routes);
5172
5173 return 0;
5174 }
5175 #endif /* CONFIG_PROC_FS */
5176
5177 #ifdef CONFIG_SYSCTL
5178
5179 static
5180 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
5181 void __user *buffer, size_t *lenp, loff_t *ppos)
5182 {
5183 struct net *net;
5184 int delay;
5185 int ret;
5186 if (!write)
5187 return -EINVAL;
5188
5189 net = (struct net *)ctl->extra1;
5190 delay = net->ipv6.sysctl.flush_delay;
5191 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5192 if (ret)
5193 return ret;
5194
5195 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
5196 return 0;
5197 }
5198
5199 static int zero;
5200 static int one = 1;
5201
5202 static struct ctl_table ipv6_route_table_template[] = {
5203 {
5204 .procname = "flush",
5205 .data = &init_net.ipv6.sysctl.flush_delay,
5206 .maxlen = sizeof(int),
5207 .mode = 0200,
5208 .proc_handler = ipv6_sysctl_rtcache_flush
5209 },
5210 {
5211 .procname = "gc_thresh",
5212 .data = &ip6_dst_ops_template.gc_thresh,
5213 .maxlen = sizeof(int),
5214 .mode = 0644,
5215 .proc_handler = proc_dointvec,
5216 },
5217 {
5218 .procname = "max_size",
5219 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
5220 .maxlen = sizeof(int),
5221 .mode = 0644,
5222 .proc_handler = proc_dointvec,
5223 },
5224 {
5225 .procname = "gc_min_interval",
5226 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5227 .maxlen = sizeof(int),
5228 .mode = 0644,
5229 .proc_handler = proc_dointvec_jiffies,
5230 },
5231 {
5232 .procname = "gc_timeout",
5233 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
5234 .maxlen = sizeof(int),
5235 .mode = 0644,
5236 .proc_handler = proc_dointvec_jiffies,
5237 },
5238 {
5239 .procname = "gc_interval",
5240 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
5241 .maxlen = sizeof(int),
5242 .mode = 0644,
5243 .proc_handler = proc_dointvec_jiffies,
5244 },
5245 {
5246 .procname = "gc_elasticity",
5247 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
5248 .maxlen = sizeof(int),
5249 .mode = 0644,
5250 .proc_handler = proc_dointvec,
5251 },
5252 {
5253 .procname = "mtu_expires",
5254 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
5255 .maxlen = sizeof(int),
5256 .mode = 0644,
5257 .proc_handler = proc_dointvec_jiffies,
5258 },
5259 {
5260 .procname = "min_adv_mss",
5261 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
5262 .maxlen = sizeof(int),
5263 .mode = 0644,
5264 .proc_handler = proc_dointvec,
5265 },
5266 {
5267 .procname = "gc_min_interval_ms",
5268 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5269 .maxlen = sizeof(int),
5270 .mode = 0644,
5271 .proc_handler = proc_dointvec_ms_jiffies,
5272 },
5273 {
5274 .procname = "skip_notify_on_dev_down",
5275 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
5276 .maxlen = sizeof(int),
5277 .mode = 0644,
5278 .proc_handler = proc_dointvec,
5279 .extra1 = &zero,
5280 .extra2 = &one,
5281 },
5282 { }
5283 };
5284
5285 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
5286 {
5287 struct ctl_table *table;
5288
5289 table = kmemdup(ipv6_route_table_template,
5290 sizeof(ipv6_route_table_template),
5291 GFP_KERNEL);
5292
5293 if (table) {
5294 table[0].data = &net->ipv6.sysctl.flush_delay;
5295 table[0].extra1 = net;
5296 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
5297 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
5298 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5299 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5300 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5301 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5302 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5303 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
5304 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5305 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
5306
5307 /* Don't export sysctls to unprivileged users */
5308 if (net->user_ns != &init_user_ns)
5309 table[0].procname = NULL;
5310 }
5311
5312 return table;
5313 }
5314 #endif
5315
5316 static int __net_init ip6_route_net_init(struct net *net)
5317 {
5318 int ret = -ENOMEM;
5319
5320 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5321 sizeof(net->ipv6.ip6_dst_ops));
5322
5323 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5324 goto out_ip6_dst_ops;
5325
5326 net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
5327 sizeof(*net->ipv6.fib6_null_entry),
5328 GFP_KERNEL);
5329 if (!net->ipv6.fib6_null_entry)
5330 goto out_ip6_dst_entries;
5331
5332 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5333 sizeof(*net->ipv6.ip6_null_entry),
5334 GFP_KERNEL);
5335 if (!net->ipv6.ip6_null_entry)
5336 goto out_fib6_null_entry;
5337 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5338 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5339 ip6_template_metrics, true);
5340
5341 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5342 net->ipv6.fib6_has_custom_rules = false;
5343 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5344 sizeof(*net->ipv6.ip6_prohibit_entry),
5345 GFP_KERNEL);
5346 if (!net->ipv6.ip6_prohibit_entry)
5347 goto out_ip6_null_entry;
5348 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5349 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5350 ip6_template_metrics, true);
5351
5352 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5353 sizeof(*net->ipv6.ip6_blk_hole_entry),
5354 GFP_KERNEL);
5355 if (!net->ipv6.ip6_blk_hole_entry)
5356 goto out_ip6_prohibit_entry;
5357 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5358 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5359 ip6_template_metrics, true);
5360 #endif
5361
5362 net->ipv6.sysctl.flush_delay = 0;
5363 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5364 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5365 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5366 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5367 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5368 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5369 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
5370 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
5371
5372 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5373
5374 ret = 0;
5375 out:
5376 return ret;
5377
5378 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5379 out_ip6_prohibit_entry:
5380 kfree(net->ipv6.ip6_prohibit_entry);
5381 out_ip6_null_entry:
5382 kfree(net->ipv6.ip6_null_entry);
5383 #endif
5384 out_fib6_null_entry:
5385 kfree(net->ipv6.fib6_null_entry);
5386 out_ip6_dst_entries:
5387 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5388 out_ip6_dst_ops:
5389 goto out;
5390 }
5391
5392 static void __net_exit ip6_route_net_exit(struct net *net)
5393 {
5394 kfree(net->ipv6.fib6_null_entry);
5395 kfree(net->ipv6.ip6_null_entry);
5396 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5397 kfree(net->ipv6.ip6_prohibit_entry);
5398 kfree(net->ipv6.ip6_blk_hole_entry);
5399 #endif
5400 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5401 }
5402
5403 static int __net_init ip6_route_net_init_late(struct net *net)
5404 {
5405 #ifdef CONFIG_PROC_FS
5406 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
5407 sizeof(struct ipv6_route_iter));
5408 proc_create_net_single("rt6_stats", 0444, net->proc_net,
5409 rt6_stats_seq_show, NULL);
5410 #endif
5411 return 0;
5412 }
5413
5414 static void __net_exit ip6_route_net_exit_late(struct net *net)
5415 {
5416 #ifdef CONFIG_PROC_FS
5417 remove_proc_entry("ipv6_route", net->proc_net);
5418 remove_proc_entry("rt6_stats", net->proc_net);
5419 #endif
5420 }
5421
5422 static struct pernet_operations ip6_route_net_ops = {
5423 .init = ip6_route_net_init,
5424 .exit = ip6_route_net_exit,
5425 };
5426
5427 static int __net_init ipv6_inetpeer_init(struct net *net)
5428 {
5429 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5430
5431 if (!bp)
5432 return -ENOMEM;
5433 inet_peer_base_init(bp);
5434 net->ipv6.peers = bp;
5435 return 0;
5436 }
5437
5438 static void __net_exit ipv6_inetpeer_exit(struct net *net)
5439 {
5440 struct inet_peer_base *bp = net->ipv6.peers;
5441
5442 net->ipv6.peers = NULL;
5443 inetpeer_invalidate_tree(bp);
5444 kfree(bp);
5445 }
5446
5447 static struct pernet_operations ipv6_inetpeer_ops = {
5448 .init = ipv6_inetpeer_init,
5449 .exit = ipv6_inetpeer_exit,
5450 };
5451
5452 static struct pernet_operations ip6_route_net_late_ops = {
5453 .init = ip6_route_net_init_late,
5454 .exit = ip6_route_net_exit_late,
5455 };
5456
5457 static struct notifier_block ip6_route_dev_notifier = {
5458 .notifier_call = ip6_route_dev_notify,
5459 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5460 };
5461
5462 void __init ip6_route_init_special_entries(void)
5463 {
5464 /* Registering of the loopback is done before this portion of code,
5465 * the loopback reference in rt6_info will not be taken, do it
5466 * manually for init_net */
5467 init_net.ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = init_net.loopback_dev;
5468 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5469 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5470 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5471 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5472 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5473 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5474 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5475 #endif
5476 }
5477
5478 int __init ip6_route_init(void)
5479 {
5480 int ret;
5481 int cpu;
5482
5483 ret = -ENOMEM;
5484 ip6_dst_ops_template.kmem_cachep =
5485 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5486 SLAB_HWCACHE_ALIGN, NULL);
5487 if (!ip6_dst_ops_template.kmem_cachep)
5488 goto out;
5489
5490 ret = dst_entries_init(&ip6_dst_blackhole_ops);
5491 if (ret)
5492 goto out_kmem_cache;
5493
5494 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5495 if (ret)
5496 goto out_dst_entries;
5497
5498 ret = register_pernet_subsys(&ip6_route_net_ops);
5499 if (ret)
5500 goto out_register_inetpeer;
5501
5502 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5503
5504 ret = fib6_init();
5505 if (ret)
5506 goto out_register_subsys;
5507
5508 ret = xfrm6_init();
5509 if (ret)
5510 goto out_fib6_init;
5511
5512 ret = fib6_rules_init();
5513 if (ret)
5514 goto xfrm6_init;
5515
5516 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5517 if (ret)
5518 goto fib6_rules_init;
5519
5520 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5521 inet6_rtm_newroute, NULL, 0);
5522 if (ret < 0)
5523 goto out_register_late_subsys;
5524
5525 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5526 inet6_rtm_delroute, NULL, 0);
5527 if (ret < 0)
5528 goto out_register_late_subsys;
5529
5530 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5531 inet6_rtm_getroute, NULL,
5532 RTNL_FLAG_DOIT_UNLOCKED);
5533 if (ret < 0)
5534 goto out_register_late_subsys;
5535
5536 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5537 if (ret)
5538 goto out_register_late_subsys;
5539
5540 for_each_possible_cpu(cpu) {
5541 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5542
5543 INIT_LIST_HEAD(&ul->head);
5544 spin_lock_init(&ul->lock);
5545 }
5546
5547 out:
5548 return ret;
5549
5550 out_register_late_subsys:
5551 rtnl_unregister_all(PF_INET6);
5552 unregister_pernet_subsys(&ip6_route_net_late_ops);
5553 fib6_rules_init:
5554 fib6_rules_cleanup();
5555 xfrm6_init:
5556 xfrm6_fini();
5557 out_fib6_init:
5558 fib6_gc_cleanup();
5559 out_register_subsys:
5560 unregister_pernet_subsys(&ip6_route_net_ops);
5561 out_register_inetpeer:
5562 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5563 out_dst_entries:
5564 dst_entries_destroy(&ip6_dst_blackhole_ops);
5565 out_kmem_cache:
5566 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5567 goto out;
5568 }
5569
5570 void ip6_route_cleanup(void)
5571 {
5572 unregister_netdevice_notifier(&ip6_route_dev_notifier);
5573 unregister_pernet_subsys(&ip6_route_net_late_ops);
5574 fib6_rules_cleanup();
5575 xfrm6_fini();
5576 fib6_gc_cleanup();
5577 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5578 unregister_pernet_subsys(&ip6_route_net_ops);
5579 dst_entries_destroy(&ip6_dst_blackhole_ops);
5580 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5581 }