]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/ipv6/addrconf.c
net/tls: fix slab-out-of-bounds bug in decrypt_internal
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / addrconf.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 Address [auto]configuration
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 */
10
11 /*
12 * Changes:
13 *
14 * Janos Farkas : delete timer on ifdown
15 * <chexum@bankinf.banki.hu>
16 * Andi Kleen : kill double kfree on module
17 * unload.
18 * Maciej W. Rozycki : FDDI support
19 * sekiya@USAGI : Don't send too many RS
20 * packets.
21 * yoshfuji@USAGI : Fixed interval between DAD
22 * packets.
23 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
24 * address validation timer.
25 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
26 * support.
27 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
28 * address on a same interface.
29 * YOSHIFUJI Hideaki @USAGI : ARCnet support
30 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
31 * seq_file.
32 * YOSHIFUJI Hideaki @USAGI : improved source address
33 * selection; consider scope,
34 * status etc.
35 */
36
37 #define pr_fmt(fmt) "IPv6: " fmt
38
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
57 #ifdef CONFIG_SYSCTL
58 #include <linux/sysctl.h>
59 #endif
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
65
66 #include <net/net_namespace.h>
67 #include <net/sock.h>
68 #include <net/snmp.h>
69
70 #include <net/6lowpan.h>
71 #include <net/firewire.h>
72 #include <net/ipv6.h>
73 #include <net/protocol.h>
74 #include <net/ndisc.h>
75 #include <net/ip6_route.h>
76 #include <net/addrconf.h>
77 #include <net/tcp.h>
78 #include <net/ip.h>
79 #include <net/netlink.h>
80 #include <net/pkt_sched.h>
81 #include <net/l3mdev.h>
82 #include <linux/if_tunnel.h>
83 #include <linux/rtnetlink.h>
84 #include <linux/netconf.h>
85 #include <linux/random.h>
86 #include <linux/uaccess.h>
87 #include <asm/unaligned.h>
88
89 #include <linux/proc_fs.h>
90 #include <linux/seq_file.h>
91 #include <linux/export.h>
92 #include <linux/ioam6.h>
93
94 #define INFINITY_LIFE_TIME 0xFFFFFFFF
95
96 #define IPV6_MAX_STRLEN \
97 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
98
99 static inline u32 cstamp_delta(unsigned long cstamp)
100 {
101 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
102 }
103
104 static inline s32 rfc3315_s14_backoff_init(s32 irt)
105 {
106 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
107 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
108 do_div(tmp, 1000000);
109 return (s32)tmp;
110 }
111
112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
113 {
114 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
115 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
116 do_div(tmp, 1000000);
117 if ((s32)tmp > mrt) {
118 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
119 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
120 do_div(tmp, 1000000);
121 }
122 return (s32)tmp;
123 }
124
125 #ifdef CONFIG_SYSCTL
126 static int addrconf_sysctl_register(struct inet6_dev *idev);
127 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
128 #else
129 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
130 {
131 return 0;
132 }
133
134 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
135 {
136 }
137 #endif
138
139 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
140
141 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
142 static int ipv6_count_addresses(const struct inet6_dev *idev);
143 static int ipv6_generate_stable_address(struct in6_addr *addr,
144 u8 dad_count,
145 const struct inet6_dev *idev);
146
147 #define IN6_ADDR_HSIZE_SHIFT 8
148 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
149 /*
150 * Configured unicast address hash table
151 */
152 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
153 static DEFINE_SPINLOCK(addrconf_hash_lock);
154
155 static void addrconf_verify(void);
156 static void addrconf_verify_rtnl(void);
157 static void addrconf_verify_work(struct work_struct *);
158
159 static struct workqueue_struct *addrconf_wq;
160 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
161
162 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
163 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
164
165 static void addrconf_type_change(struct net_device *dev,
166 unsigned long event);
167 static int addrconf_ifdown(struct net_device *dev, bool unregister);
168
169 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
170 int plen,
171 const struct net_device *dev,
172 u32 flags, u32 noflags,
173 bool no_gw);
174
175 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
176 static void addrconf_dad_work(struct work_struct *w);
177 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
178 bool send_na);
179 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
180 static void addrconf_rs_timer(struct timer_list *t);
181 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
182 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
183
184 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
185 struct prefix_info *pinfo);
186
187 static struct ipv6_devconf ipv6_devconf __read_mostly = {
188 .forwarding = 0,
189 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
190 .mtu6 = IPV6_MIN_MTU,
191 .accept_ra = 1,
192 .accept_redirects = 1,
193 .autoconf = 1,
194 .force_mld_version = 0,
195 .mldv1_unsolicited_report_interval = 10 * HZ,
196 .mldv2_unsolicited_report_interval = HZ,
197 .dad_transmits = 1,
198 .rtr_solicits = MAX_RTR_SOLICITATIONS,
199 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
200 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
201 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
202 .use_tempaddr = 0,
203 .temp_valid_lft = TEMP_VALID_LIFETIME,
204 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
205 .regen_max_retry = REGEN_MAX_RETRY,
206 .max_desync_factor = MAX_DESYNC_FACTOR,
207 .max_addresses = IPV6_MAX_ADDRESSES,
208 .accept_ra_defrtr = 1,
209 .ra_defrtr_metric = IP6_RT_PRIO_USER,
210 .accept_ra_from_local = 0,
211 .accept_ra_min_hop_limit= 1,
212 .accept_ra_pinfo = 1,
213 #ifdef CONFIG_IPV6_ROUTER_PREF
214 .accept_ra_rtr_pref = 1,
215 .rtr_probe_interval = 60 * HZ,
216 #ifdef CONFIG_IPV6_ROUTE_INFO
217 .accept_ra_rt_info_min_plen = 0,
218 .accept_ra_rt_info_max_plen = 0,
219 #endif
220 #endif
221 .proxy_ndp = 0,
222 .accept_source_route = 0, /* we do not accept RH0 by default. */
223 .disable_ipv6 = 0,
224 .accept_dad = 0,
225 .suppress_frag_ndisc = 1,
226 .accept_ra_mtu = 1,
227 .stable_secret = {
228 .initialized = false,
229 },
230 .use_oif_addrs_only = 0,
231 .ignore_routes_with_linkdown = 0,
232 .keep_addr_on_down = 0,
233 .seg6_enabled = 0,
234 #ifdef CONFIG_IPV6_SEG6_HMAC
235 .seg6_require_hmac = 0,
236 #endif
237 .enhanced_dad = 1,
238 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
239 .disable_policy = 0,
240 .rpl_seg_enabled = 0,
241 .ioam6_enabled = 0,
242 .ioam6_id = IOAM6_DEFAULT_IF_ID,
243 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
244 };
245
246 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
247 .forwarding = 0,
248 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
249 .mtu6 = IPV6_MIN_MTU,
250 .accept_ra = 1,
251 .accept_redirects = 1,
252 .autoconf = 1,
253 .force_mld_version = 0,
254 .mldv1_unsolicited_report_interval = 10 * HZ,
255 .mldv2_unsolicited_report_interval = HZ,
256 .dad_transmits = 1,
257 .rtr_solicits = MAX_RTR_SOLICITATIONS,
258 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
259 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
260 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
261 .use_tempaddr = 0,
262 .temp_valid_lft = TEMP_VALID_LIFETIME,
263 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
264 .regen_max_retry = REGEN_MAX_RETRY,
265 .max_desync_factor = MAX_DESYNC_FACTOR,
266 .max_addresses = IPV6_MAX_ADDRESSES,
267 .accept_ra_defrtr = 1,
268 .ra_defrtr_metric = IP6_RT_PRIO_USER,
269 .accept_ra_from_local = 0,
270 .accept_ra_min_hop_limit= 1,
271 .accept_ra_pinfo = 1,
272 #ifdef CONFIG_IPV6_ROUTER_PREF
273 .accept_ra_rtr_pref = 1,
274 .rtr_probe_interval = 60 * HZ,
275 #ifdef CONFIG_IPV6_ROUTE_INFO
276 .accept_ra_rt_info_min_plen = 0,
277 .accept_ra_rt_info_max_plen = 0,
278 #endif
279 #endif
280 .proxy_ndp = 0,
281 .accept_source_route = 0, /* we do not accept RH0 by default. */
282 .disable_ipv6 = 0,
283 .accept_dad = 1,
284 .suppress_frag_ndisc = 1,
285 .accept_ra_mtu = 1,
286 .stable_secret = {
287 .initialized = false,
288 },
289 .use_oif_addrs_only = 0,
290 .ignore_routes_with_linkdown = 0,
291 .keep_addr_on_down = 0,
292 .seg6_enabled = 0,
293 #ifdef CONFIG_IPV6_SEG6_HMAC
294 .seg6_require_hmac = 0,
295 #endif
296 .enhanced_dad = 1,
297 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
298 .disable_policy = 0,
299 .rpl_seg_enabled = 0,
300 .ioam6_enabled = 0,
301 .ioam6_id = IOAM6_DEFAULT_IF_ID,
302 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
303 };
304
305 /* Check if link is ready: is it up and is a valid qdisc available */
306 static inline bool addrconf_link_ready(const struct net_device *dev)
307 {
308 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
309 }
310
311 static void addrconf_del_rs_timer(struct inet6_dev *idev)
312 {
313 if (del_timer(&idev->rs_timer))
314 __in6_dev_put(idev);
315 }
316
317 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
318 {
319 if (cancel_delayed_work(&ifp->dad_work))
320 __in6_ifa_put(ifp);
321 }
322
323 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
324 unsigned long when)
325 {
326 if (!timer_pending(&idev->rs_timer))
327 in6_dev_hold(idev);
328 mod_timer(&idev->rs_timer, jiffies + when);
329 }
330
331 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
332 unsigned long delay)
333 {
334 in6_ifa_hold(ifp);
335 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
336 in6_ifa_put(ifp);
337 }
338
339 static int snmp6_alloc_dev(struct inet6_dev *idev)
340 {
341 int i;
342
343 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
344 if (!idev->stats.ipv6)
345 goto err_ip;
346
347 for_each_possible_cpu(i) {
348 struct ipstats_mib *addrconf_stats;
349 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
350 u64_stats_init(&addrconf_stats->syncp);
351 }
352
353
354 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
355 GFP_KERNEL);
356 if (!idev->stats.icmpv6dev)
357 goto err_icmp;
358 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
359 GFP_KERNEL);
360 if (!idev->stats.icmpv6msgdev)
361 goto err_icmpmsg;
362
363 return 0;
364
365 err_icmpmsg:
366 kfree(idev->stats.icmpv6dev);
367 err_icmp:
368 free_percpu(idev->stats.ipv6);
369 err_ip:
370 return -ENOMEM;
371 }
372
373 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
374 {
375 struct inet6_dev *ndev;
376 int err = -ENOMEM;
377
378 ASSERT_RTNL();
379
380 if (dev->mtu < IPV6_MIN_MTU)
381 return ERR_PTR(-EINVAL);
382
383 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
384 if (!ndev)
385 return ERR_PTR(err);
386
387 rwlock_init(&ndev->lock);
388 ndev->dev = dev;
389 INIT_LIST_HEAD(&ndev->addr_list);
390 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
391 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
392
393 if (ndev->cnf.stable_secret.initialized)
394 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
395
396 ndev->cnf.mtu6 = dev->mtu;
397 ndev->ra_mtu = 0;
398 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
399 if (!ndev->nd_parms) {
400 kfree(ndev);
401 return ERR_PTR(err);
402 }
403 if (ndev->cnf.forwarding)
404 dev_disable_lro(dev);
405 /* We refer to the device */
406 dev_hold(dev);
407
408 if (snmp6_alloc_dev(ndev) < 0) {
409 netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
410 __func__);
411 neigh_parms_release(&nd_tbl, ndev->nd_parms);
412 dev_put(dev);
413 kfree(ndev);
414 return ERR_PTR(err);
415 }
416
417 if (snmp6_register_dev(ndev) < 0) {
418 netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
419 __func__, dev->name);
420 goto err_release;
421 }
422
423 /* One reference from device. */
424 refcount_set(&ndev->refcnt, 1);
425
426 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
427 ndev->cnf.accept_dad = -1;
428
429 #if IS_ENABLED(CONFIG_IPV6_SIT)
430 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
431 pr_info("%s: Disabled Multicast RS\n", dev->name);
432 ndev->cnf.rtr_solicits = 0;
433 }
434 #endif
435
436 INIT_LIST_HEAD(&ndev->tempaddr_list);
437 ndev->desync_factor = U32_MAX;
438 if ((dev->flags&IFF_LOOPBACK) ||
439 dev->type == ARPHRD_TUNNEL ||
440 dev->type == ARPHRD_TUNNEL6 ||
441 dev->type == ARPHRD_SIT ||
442 dev->type == ARPHRD_NONE) {
443 ndev->cnf.use_tempaddr = -1;
444 }
445
446 ndev->token = in6addr_any;
447
448 if (netif_running(dev) && addrconf_link_ready(dev))
449 ndev->if_flags |= IF_READY;
450
451 ipv6_mc_init_dev(ndev);
452 ndev->tstamp = jiffies;
453 err = addrconf_sysctl_register(ndev);
454 if (err) {
455 ipv6_mc_destroy_dev(ndev);
456 snmp6_unregister_dev(ndev);
457 goto err_release;
458 }
459 /* protected by rtnl_lock */
460 rcu_assign_pointer(dev->ip6_ptr, ndev);
461
462 /* Join interface-local all-node multicast group */
463 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
464
465 /* Join all-node multicast group */
466 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
467
468 /* Join all-router multicast group if forwarding is set */
469 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
470 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
471
472 return ndev;
473
474 err_release:
475 neigh_parms_release(&nd_tbl, ndev->nd_parms);
476 ndev->dead = 1;
477 in6_dev_finish_destroy(ndev);
478 return ERR_PTR(err);
479 }
480
481 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
482 {
483 struct inet6_dev *idev;
484
485 ASSERT_RTNL();
486
487 idev = __in6_dev_get(dev);
488 if (!idev) {
489 idev = ipv6_add_dev(dev);
490 if (IS_ERR(idev))
491 return idev;
492 }
493
494 if (dev->flags&IFF_UP)
495 ipv6_mc_up(idev);
496 return idev;
497 }
498
499 static int inet6_netconf_msgsize_devconf(int type)
500 {
501 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
502 + nla_total_size(4); /* NETCONFA_IFINDEX */
503 bool all = false;
504
505 if (type == NETCONFA_ALL)
506 all = true;
507
508 if (all || type == NETCONFA_FORWARDING)
509 size += nla_total_size(4);
510 #ifdef CONFIG_IPV6_MROUTE
511 if (all || type == NETCONFA_MC_FORWARDING)
512 size += nla_total_size(4);
513 #endif
514 if (all || type == NETCONFA_PROXY_NEIGH)
515 size += nla_total_size(4);
516
517 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
518 size += nla_total_size(4);
519
520 return size;
521 }
522
523 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
524 struct ipv6_devconf *devconf, u32 portid,
525 u32 seq, int event, unsigned int flags,
526 int type)
527 {
528 struct nlmsghdr *nlh;
529 struct netconfmsg *ncm;
530 bool all = false;
531
532 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
533 flags);
534 if (!nlh)
535 return -EMSGSIZE;
536
537 if (type == NETCONFA_ALL)
538 all = true;
539
540 ncm = nlmsg_data(nlh);
541 ncm->ncm_family = AF_INET6;
542
543 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
544 goto nla_put_failure;
545
546 if (!devconf)
547 goto out;
548
549 if ((all || type == NETCONFA_FORWARDING) &&
550 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
551 goto nla_put_failure;
552 #ifdef CONFIG_IPV6_MROUTE
553 if ((all || type == NETCONFA_MC_FORWARDING) &&
554 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
555 atomic_read(&devconf->mc_forwarding)) < 0)
556 goto nla_put_failure;
557 #endif
558 if ((all || type == NETCONFA_PROXY_NEIGH) &&
559 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
560 goto nla_put_failure;
561
562 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
563 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
564 devconf->ignore_routes_with_linkdown) < 0)
565 goto nla_put_failure;
566
567 out:
568 nlmsg_end(skb, nlh);
569 return 0;
570
571 nla_put_failure:
572 nlmsg_cancel(skb, nlh);
573 return -EMSGSIZE;
574 }
575
576 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
577 int ifindex, struct ipv6_devconf *devconf)
578 {
579 struct sk_buff *skb;
580 int err = -ENOBUFS;
581
582 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
583 if (!skb)
584 goto errout;
585
586 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
587 event, 0, type);
588 if (err < 0) {
589 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
590 WARN_ON(err == -EMSGSIZE);
591 kfree_skb(skb);
592 goto errout;
593 }
594 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
595 return;
596 errout:
597 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
598 }
599
600 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
601 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
602 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
603 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
604 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
605 };
606
607 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
608 const struct nlmsghdr *nlh,
609 struct nlattr **tb,
610 struct netlink_ext_ack *extack)
611 {
612 int i, err;
613
614 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
615 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
616 return -EINVAL;
617 }
618
619 if (!netlink_strict_get_check(skb))
620 return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
621 tb, NETCONFA_MAX,
622 devconf_ipv6_policy, extack);
623
624 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
625 tb, NETCONFA_MAX,
626 devconf_ipv6_policy, extack);
627 if (err)
628 return err;
629
630 for (i = 0; i <= NETCONFA_MAX; i++) {
631 if (!tb[i])
632 continue;
633
634 switch (i) {
635 case NETCONFA_IFINDEX:
636 break;
637 default:
638 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
639 return -EINVAL;
640 }
641 }
642
643 return 0;
644 }
645
646 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
647 struct nlmsghdr *nlh,
648 struct netlink_ext_ack *extack)
649 {
650 struct net *net = sock_net(in_skb->sk);
651 struct nlattr *tb[NETCONFA_MAX+1];
652 struct inet6_dev *in6_dev = NULL;
653 struct net_device *dev = NULL;
654 struct sk_buff *skb;
655 struct ipv6_devconf *devconf;
656 int ifindex;
657 int err;
658
659 err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
660 if (err < 0)
661 return err;
662
663 if (!tb[NETCONFA_IFINDEX])
664 return -EINVAL;
665
666 err = -EINVAL;
667 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
668 switch (ifindex) {
669 case NETCONFA_IFINDEX_ALL:
670 devconf = net->ipv6.devconf_all;
671 break;
672 case NETCONFA_IFINDEX_DEFAULT:
673 devconf = net->ipv6.devconf_dflt;
674 break;
675 default:
676 dev = dev_get_by_index(net, ifindex);
677 if (!dev)
678 return -EINVAL;
679 in6_dev = in6_dev_get(dev);
680 if (!in6_dev)
681 goto errout;
682 devconf = &in6_dev->cnf;
683 break;
684 }
685
686 err = -ENOBUFS;
687 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
688 if (!skb)
689 goto errout;
690
691 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
692 NETLINK_CB(in_skb).portid,
693 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
694 NETCONFA_ALL);
695 if (err < 0) {
696 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
697 WARN_ON(err == -EMSGSIZE);
698 kfree_skb(skb);
699 goto errout;
700 }
701 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
702 errout:
703 if (in6_dev)
704 in6_dev_put(in6_dev);
705 dev_put(dev);
706 return err;
707 }
708
709 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
710 struct netlink_callback *cb)
711 {
712 const struct nlmsghdr *nlh = cb->nlh;
713 struct net *net = sock_net(skb->sk);
714 int h, s_h;
715 int idx, s_idx;
716 struct net_device *dev;
717 struct inet6_dev *idev;
718 struct hlist_head *head;
719
720 if (cb->strict_check) {
721 struct netlink_ext_ack *extack = cb->extack;
722 struct netconfmsg *ncm;
723
724 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
725 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
726 return -EINVAL;
727 }
728
729 if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
730 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
731 return -EINVAL;
732 }
733 }
734
735 s_h = cb->args[0];
736 s_idx = idx = cb->args[1];
737
738 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
739 idx = 0;
740 head = &net->dev_index_head[h];
741 rcu_read_lock();
742 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
743 net->dev_base_seq;
744 hlist_for_each_entry_rcu(dev, head, index_hlist) {
745 if (idx < s_idx)
746 goto cont;
747 idev = __in6_dev_get(dev);
748 if (!idev)
749 goto cont;
750
751 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
752 &idev->cnf,
753 NETLINK_CB(cb->skb).portid,
754 nlh->nlmsg_seq,
755 RTM_NEWNETCONF,
756 NLM_F_MULTI,
757 NETCONFA_ALL) < 0) {
758 rcu_read_unlock();
759 goto done;
760 }
761 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
762 cont:
763 idx++;
764 }
765 rcu_read_unlock();
766 }
767 if (h == NETDEV_HASHENTRIES) {
768 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
769 net->ipv6.devconf_all,
770 NETLINK_CB(cb->skb).portid,
771 nlh->nlmsg_seq,
772 RTM_NEWNETCONF, NLM_F_MULTI,
773 NETCONFA_ALL) < 0)
774 goto done;
775 else
776 h++;
777 }
778 if (h == NETDEV_HASHENTRIES + 1) {
779 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
780 net->ipv6.devconf_dflt,
781 NETLINK_CB(cb->skb).portid,
782 nlh->nlmsg_seq,
783 RTM_NEWNETCONF, NLM_F_MULTI,
784 NETCONFA_ALL) < 0)
785 goto done;
786 else
787 h++;
788 }
789 done:
790 cb->args[0] = h;
791 cb->args[1] = idx;
792
793 return skb->len;
794 }
795
796 #ifdef CONFIG_SYSCTL
797 static void dev_forward_change(struct inet6_dev *idev)
798 {
799 struct net_device *dev;
800 struct inet6_ifaddr *ifa;
801
802 if (!idev)
803 return;
804 dev = idev->dev;
805 if (idev->cnf.forwarding)
806 dev_disable_lro(dev);
807 if (dev->flags & IFF_MULTICAST) {
808 if (idev->cnf.forwarding) {
809 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
810 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
811 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
812 } else {
813 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
814 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
815 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
816 }
817 }
818
819 list_for_each_entry(ifa, &idev->addr_list, if_list) {
820 if (ifa->flags&IFA_F_TENTATIVE)
821 continue;
822 if (idev->cnf.forwarding)
823 addrconf_join_anycast(ifa);
824 else
825 addrconf_leave_anycast(ifa);
826 }
827 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
828 NETCONFA_FORWARDING,
829 dev->ifindex, &idev->cnf);
830 }
831
832
833 static void addrconf_forward_change(struct net *net, __s32 newf)
834 {
835 struct net_device *dev;
836 struct inet6_dev *idev;
837
838 for_each_netdev(net, dev) {
839 idev = __in6_dev_get(dev);
840 if (idev) {
841 int changed = (!idev->cnf.forwarding) ^ (!newf);
842 idev->cnf.forwarding = newf;
843 if (changed)
844 dev_forward_change(idev);
845 }
846 }
847 }
848
849 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
850 {
851 struct net *net;
852 int old;
853
854 if (!rtnl_trylock())
855 return restart_syscall();
856
857 net = (struct net *)table->extra2;
858 old = *p;
859 *p = newf;
860
861 if (p == &net->ipv6.devconf_dflt->forwarding) {
862 if ((!newf) ^ (!old))
863 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
864 NETCONFA_FORWARDING,
865 NETCONFA_IFINDEX_DEFAULT,
866 net->ipv6.devconf_dflt);
867 rtnl_unlock();
868 return 0;
869 }
870
871 if (p == &net->ipv6.devconf_all->forwarding) {
872 int old_dflt = net->ipv6.devconf_dflt->forwarding;
873
874 net->ipv6.devconf_dflt->forwarding = newf;
875 if ((!newf) ^ (!old_dflt))
876 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
877 NETCONFA_FORWARDING,
878 NETCONFA_IFINDEX_DEFAULT,
879 net->ipv6.devconf_dflt);
880
881 addrconf_forward_change(net, newf);
882 if ((!newf) ^ (!old))
883 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
884 NETCONFA_FORWARDING,
885 NETCONFA_IFINDEX_ALL,
886 net->ipv6.devconf_all);
887 } else if ((!newf) ^ (!old))
888 dev_forward_change((struct inet6_dev *)table->extra1);
889 rtnl_unlock();
890
891 if (newf)
892 rt6_purge_dflt_routers(net);
893 return 1;
894 }
895
896 static void addrconf_linkdown_change(struct net *net, __s32 newf)
897 {
898 struct net_device *dev;
899 struct inet6_dev *idev;
900
901 for_each_netdev(net, dev) {
902 idev = __in6_dev_get(dev);
903 if (idev) {
904 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
905
906 idev->cnf.ignore_routes_with_linkdown = newf;
907 if (changed)
908 inet6_netconf_notify_devconf(dev_net(dev),
909 RTM_NEWNETCONF,
910 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
911 dev->ifindex,
912 &idev->cnf);
913 }
914 }
915 }
916
917 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
918 {
919 struct net *net;
920 int old;
921
922 if (!rtnl_trylock())
923 return restart_syscall();
924
925 net = (struct net *)table->extra2;
926 old = *p;
927 *p = newf;
928
929 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
930 if ((!newf) ^ (!old))
931 inet6_netconf_notify_devconf(net,
932 RTM_NEWNETCONF,
933 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
934 NETCONFA_IFINDEX_DEFAULT,
935 net->ipv6.devconf_dflt);
936 rtnl_unlock();
937 return 0;
938 }
939
940 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
941 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
942 addrconf_linkdown_change(net, newf);
943 if ((!newf) ^ (!old))
944 inet6_netconf_notify_devconf(net,
945 RTM_NEWNETCONF,
946 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
947 NETCONFA_IFINDEX_ALL,
948 net->ipv6.devconf_all);
949 }
950 rtnl_unlock();
951
952 return 1;
953 }
954
955 #endif
956
957 /* Nobody refers to this ifaddr, destroy it */
958 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
959 {
960 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
961
962 #ifdef NET_REFCNT_DEBUG
963 pr_debug("%s\n", __func__);
964 #endif
965
966 in6_dev_put(ifp->idev);
967
968 if (cancel_delayed_work(&ifp->dad_work))
969 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
970 ifp);
971
972 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
973 pr_warn("Freeing alive inet6 address %p\n", ifp);
974 return;
975 }
976
977 kfree_rcu(ifp, rcu);
978 }
979
980 static void
981 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
982 {
983 struct list_head *p;
984 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
985
986 /*
987 * Each device address list is sorted in order of scope -
988 * global before linklocal.
989 */
990 list_for_each(p, &idev->addr_list) {
991 struct inet6_ifaddr *ifa
992 = list_entry(p, struct inet6_ifaddr, if_list);
993 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
994 break;
995 }
996
997 list_add_tail_rcu(&ifp->if_list, p);
998 }
999
1000 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1001 {
1002 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
1003
1004 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1005 }
1006
1007 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1008 struct net_device *dev, unsigned int hash)
1009 {
1010 struct inet6_ifaddr *ifp;
1011
1012 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1013 if (!net_eq(dev_net(ifp->idev->dev), net))
1014 continue;
1015 if (ipv6_addr_equal(&ifp->addr, addr)) {
1016 if (!dev || ifp->idev->dev == dev)
1017 return true;
1018 }
1019 }
1020 return false;
1021 }
1022
1023 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1024 {
1025 unsigned int hash = inet6_addr_hash(dev_net(dev), &ifa->addr);
1026 int err = 0;
1027
1028 spin_lock(&addrconf_hash_lock);
1029
1030 /* Ignore adding duplicate addresses on an interface */
1031 if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
1032 netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1033 err = -EEXIST;
1034 } else {
1035 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
1036 }
1037
1038 spin_unlock(&addrconf_hash_lock);
1039
1040 return err;
1041 }
1042
1043 /* On success it returns ifp with increased reference count */
1044
1045 static struct inet6_ifaddr *
1046 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1047 bool can_block, struct netlink_ext_ack *extack)
1048 {
1049 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1050 int addr_type = ipv6_addr_type(cfg->pfx);
1051 struct net *net = dev_net(idev->dev);
1052 struct inet6_ifaddr *ifa = NULL;
1053 struct fib6_info *f6i = NULL;
1054 int err = 0;
1055
1056 if (addr_type == IPV6_ADDR_ANY ||
1057 (addr_type & IPV6_ADDR_MULTICAST &&
1058 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1059 (!(idev->dev->flags & IFF_LOOPBACK) &&
1060 !netif_is_l3_master(idev->dev) &&
1061 addr_type & IPV6_ADDR_LOOPBACK))
1062 return ERR_PTR(-EADDRNOTAVAIL);
1063
1064 if (idev->dead) {
1065 err = -ENODEV; /*XXX*/
1066 goto out;
1067 }
1068
1069 if (idev->cnf.disable_ipv6) {
1070 err = -EACCES;
1071 goto out;
1072 }
1073
1074 /* validator notifier needs to be blocking;
1075 * do not call in atomic context
1076 */
1077 if (can_block) {
1078 struct in6_validator_info i6vi = {
1079 .i6vi_addr = *cfg->pfx,
1080 .i6vi_dev = idev,
1081 .extack = extack,
1082 };
1083
1084 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1085 err = notifier_to_errno(err);
1086 if (err < 0)
1087 goto out;
1088 }
1089
1090 ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT);
1091 if (!ifa) {
1092 err = -ENOBUFS;
1093 goto out;
1094 }
1095
1096 f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1097 if (IS_ERR(f6i)) {
1098 err = PTR_ERR(f6i);
1099 f6i = NULL;
1100 goto out;
1101 }
1102
1103 if (net->ipv6.devconf_all->disable_policy ||
1104 idev->cnf.disable_policy)
1105 f6i->dst_nopolicy = true;
1106
1107 neigh_parms_data_state_setall(idev->nd_parms);
1108
1109 ifa->addr = *cfg->pfx;
1110 if (cfg->peer_pfx)
1111 ifa->peer_addr = *cfg->peer_pfx;
1112
1113 spin_lock_init(&ifa->lock);
1114 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1115 INIT_HLIST_NODE(&ifa->addr_lst);
1116 ifa->scope = cfg->scope;
1117 ifa->prefix_len = cfg->plen;
1118 ifa->rt_priority = cfg->rt_priority;
1119 ifa->flags = cfg->ifa_flags;
1120 /* No need to add the TENTATIVE flag for addresses with NODAD */
1121 if (!(cfg->ifa_flags & IFA_F_NODAD))
1122 ifa->flags |= IFA_F_TENTATIVE;
1123 ifa->valid_lft = cfg->valid_lft;
1124 ifa->prefered_lft = cfg->preferred_lft;
1125 ifa->cstamp = ifa->tstamp = jiffies;
1126 ifa->tokenized = false;
1127
1128 ifa->rt = f6i;
1129
1130 ifa->idev = idev;
1131 in6_dev_hold(idev);
1132
1133 /* For caller */
1134 refcount_set(&ifa->refcnt, 1);
1135
1136 rcu_read_lock_bh();
1137
1138 err = ipv6_add_addr_hash(idev->dev, ifa);
1139 if (err < 0) {
1140 rcu_read_unlock_bh();
1141 goto out;
1142 }
1143
1144 write_lock(&idev->lock);
1145
1146 /* Add to inet6_dev unicast addr list. */
1147 ipv6_link_dev_addr(idev, ifa);
1148
1149 if (ifa->flags&IFA_F_TEMPORARY) {
1150 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1151 in6_ifa_hold(ifa);
1152 }
1153
1154 in6_ifa_hold(ifa);
1155 write_unlock(&idev->lock);
1156
1157 rcu_read_unlock_bh();
1158
1159 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1160 out:
1161 if (unlikely(err < 0)) {
1162 fib6_info_release(f6i);
1163
1164 if (ifa) {
1165 if (ifa->idev)
1166 in6_dev_put(ifa->idev);
1167 kfree(ifa);
1168 }
1169 ifa = ERR_PTR(err);
1170 }
1171
1172 return ifa;
1173 }
1174
1175 enum cleanup_prefix_rt_t {
1176 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1177 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1178 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1179 };
1180
1181 /*
1182 * Check, whether the prefix for ifp would still need a prefix route
1183 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1184 * constants.
1185 *
1186 * 1) we don't purge prefix if address was not permanent.
1187 * prefix is managed by its own lifetime.
1188 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1189 * 3) if there are no addresses, delete prefix.
1190 * 4) if there are still other permanent address(es),
1191 * corresponding prefix is still permanent.
1192 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1193 * don't purge the prefix, assume user space is managing it.
1194 * 6) otherwise, update prefix lifetime to the
1195 * longest valid lifetime among the corresponding
1196 * addresses on the device.
1197 * Note: subsequent RA will update lifetime.
1198 **/
1199 static enum cleanup_prefix_rt_t
1200 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1201 {
1202 struct inet6_ifaddr *ifa;
1203 struct inet6_dev *idev = ifp->idev;
1204 unsigned long lifetime;
1205 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1206
1207 *expires = jiffies;
1208
1209 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1210 if (ifa == ifp)
1211 continue;
1212 if (ifa->prefix_len != ifp->prefix_len ||
1213 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1214 ifp->prefix_len))
1215 continue;
1216 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1217 return CLEANUP_PREFIX_RT_NOP;
1218
1219 action = CLEANUP_PREFIX_RT_EXPIRE;
1220
1221 spin_lock(&ifa->lock);
1222
1223 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1224 /*
1225 * Note: Because this address is
1226 * not permanent, lifetime <
1227 * LONG_MAX / HZ here.
1228 */
1229 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1230 *expires = ifa->tstamp + lifetime * HZ;
1231 spin_unlock(&ifa->lock);
1232 }
1233
1234 return action;
1235 }
1236
1237 static void
1238 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1239 bool del_rt, bool del_peer)
1240 {
1241 struct fib6_info *f6i;
1242
1243 f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1244 ifp->prefix_len,
1245 ifp->idev->dev, 0, RTF_DEFAULT, true);
1246 if (f6i) {
1247 if (del_rt)
1248 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1249 else {
1250 if (!(f6i->fib6_flags & RTF_EXPIRES))
1251 fib6_set_expires(f6i, expires);
1252 fib6_info_release(f6i);
1253 }
1254 }
1255 }
1256
1257
1258 /* This function wants to get referenced ifp and releases it before return */
1259
1260 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1261 {
1262 int state;
1263 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1264 unsigned long expires;
1265
1266 ASSERT_RTNL();
1267
1268 spin_lock_bh(&ifp->lock);
1269 state = ifp->state;
1270 ifp->state = INET6_IFADDR_STATE_DEAD;
1271 spin_unlock_bh(&ifp->lock);
1272
1273 if (state == INET6_IFADDR_STATE_DEAD)
1274 goto out;
1275
1276 spin_lock_bh(&addrconf_hash_lock);
1277 hlist_del_init_rcu(&ifp->addr_lst);
1278 spin_unlock_bh(&addrconf_hash_lock);
1279
1280 write_lock_bh(&ifp->idev->lock);
1281
1282 if (ifp->flags&IFA_F_TEMPORARY) {
1283 list_del(&ifp->tmp_list);
1284 if (ifp->ifpub) {
1285 in6_ifa_put(ifp->ifpub);
1286 ifp->ifpub = NULL;
1287 }
1288 __in6_ifa_put(ifp);
1289 }
1290
1291 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1292 action = check_cleanup_prefix_route(ifp, &expires);
1293
1294 list_del_rcu(&ifp->if_list);
1295 __in6_ifa_put(ifp);
1296
1297 write_unlock_bh(&ifp->idev->lock);
1298
1299 addrconf_del_dad_work(ifp);
1300
1301 ipv6_ifa_notify(RTM_DELADDR, ifp);
1302
1303 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1304
1305 if (action != CLEANUP_PREFIX_RT_NOP) {
1306 cleanup_prefix_route(ifp, expires,
1307 action == CLEANUP_PREFIX_RT_DEL, false);
1308 }
1309
1310 /* clean up prefsrc entries */
1311 rt6_remove_prefsrc(ifp);
1312 out:
1313 in6_ifa_put(ifp);
1314 }
1315
1316 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1317 {
1318 struct inet6_dev *idev = ifp->idev;
1319 unsigned long tmp_tstamp, age;
1320 unsigned long regen_advance;
1321 unsigned long now = jiffies;
1322 s32 cnf_temp_preferred_lft;
1323 struct inet6_ifaddr *ift;
1324 struct ifa6_config cfg;
1325 long max_desync_factor;
1326 struct in6_addr addr;
1327 int ret = 0;
1328
1329 write_lock_bh(&idev->lock);
1330
1331 retry:
1332 in6_dev_hold(idev);
1333 if (idev->cnf.use_tempaddr <= 0) {
1334 write_unlock_bh(&idev->lock);
1335 pr_info("%s: use_tempaddr is disabled\n", __func__);
1336 in6_dev_put(idev);
1337 ret = -1;
1338 goto out;
1339 }
1340 spin_lock_bh(&ifp->lock);
1341 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1342 idev->cnf.use_tempaddr = -1; /*XXX*/
1343 spin_unlock_bh(&ifp->lock);
1344 write_unlock_bh(&idev->lock);
1345 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1346 __func__);
1347 in6_dev_put(idev);
1348 ret = -1;
1349 goto out;
1350 }
1351 in6_ifa_hold(ifp);
1352 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1353 ipv6_gen_rnd_iid(&addr);
1354
1355 age = (now - ifp->tstamp) / HZ;
1356
1357 regen_advance = idev->cnf.regen_max_retry *
1358 idev->cnf.dad_transmits *
1359 max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1360
1361 /* recalculate max_desync_factor each time and update
1362 * idev->desync_factor if it's larger
1363 */
1364 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1365 max_desync_factor = min_t(__u32,
1366 idev->cnf.max_desync_factor,
1367 cnf_temp_preferred_lft - regen_advance);
1368
1369 if (unlikely(idev->desync_factor > max_desync_factor)) {
1370 if (max_desync_factor > 0) {
1371 get_random_bytes(&idev->desync_factor,
1372 sizeof(idev->desync_factor));
1373 idev->desync_factor %= max_desync_factor;
1374 } else {
1375 idev->desync_factor = 0;
1376 }
1377 }
1378
1379 memset(&cfg, 0, sizeof(cfg));
1380 cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1381 idev->cnf.temp_valid_lft + age);
1382 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1383 cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1384
1385 cfg.plen = ifp->prefix_len;
1386 tmp_tstamp = ifp->tstamp;
1387 spin_unlock_bh(&ifp->lock);
1388
1389 write_unlock_bh(&idev->lock);
1390
1391 /* A temporary address is created only if this calculated Preferred
1392 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1393 * an implementation must not create a temporary address with a zero
1394 * Preferred Lifetime.
1395 * Use age calculation as in addrconf_verify to avoid unnecessary
1396 * temporary addresses being generated.
1397 */
1398 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1399 if (cfg.preferred_lft <= regen_advance + age) {
1400 in6_ifa_put(ifp);
1401 in6_dev_put(idev);
1402 ret = -1;
1403 goto out;
1404 }
1405
1406 cfg.ifa_flags = IFA_F_TEMPORARY;
1407 /* set in addrconf_prefix_rcv() */
1408 if (ifp->flags & IFA_F_OPTIMISTIC)
1409 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1410
1411 cfg.pfx = &addr;
1412 cfg.scope = ipv6_addr_scope(cfg.pfx);
1413
1414 ift = ipv6_add_addr(idev, &cfg, block, NULL);
1415 if (IS_ERR(ift)) {
1416 in6_ifa_put(ifp);
1417 in6_dev_put(idev);
1418 pr_info("%s: retry temporary address regeneration\n", __func__);
1419 write_lock_bh(&idev->lock);
1420 goto retry;
1421 }
1422
1423 spin_lock_bh(&ift->lock);
1424 ift->ifpub = ifp;
1425 ift->cstamp = now;
1426 ift->tstamp = tmp_tstamp;
1427 spin_unlock_bh(&ift->lock);
1428
1429 addrconf_dad_start(ift);
1430 in6_ifa_put(ift);
1431 in6_dev_put(idev);
1432 out:
1433 return ret;
1434 }
1435
1436 /*
1437 * Choose an appropriate source address (RFC3484)
1438 */
1439 enum {
1440 IPV6_SADDR_RULE_INIT = 0,
1441 IPV6_SADDR_RULE_LOCAL,
1442 IPV6_SADDR_RULE_SCOPE,
1443 IPV6_SADDR_RULE_PREFERRED,
1444 #ifdef CONFIG_IPV6_MIP6
1445 IPV6_SADDR_RULE_HOA,
1446 #endif
1447 IPV6_SADDR_RULE_OIF,
1448 IPV6_SADDR_RULE_LABEL,
1449 IPV6_SADDR_RULE_PRIVACY,
1450 IPV6_SADDR_RULE_ORCHID,
1451 IPV6_SADDR_RULE_PREFIX,
1452 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1453 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1454 #endif
1455 IPV6_SADDR_RULE_MAX
1456 };
1457
1458 struct ipv6_saddr_score {
1459 int rule;
1460 int addr_type;
1461 struct inet6_ifaddr *ifa;
1462 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1463 int scopedist;
1464 int matchlen;
1465 };
1466
1467 struct ipv6_saddr_dst {
1468 const struct in6_addr *addr;
1469 int ifindex;
1470 int scope;
1471 int label;
1472 unsigned int prefs;
1473 };
1474
1475 static inline int ipv6_saddr_preferred(int type)
1476 {
1477 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1478 return 1;
1479 return 0;
1480 }
1481
1482 static bool ipv6_use_optimistic_addr(struct net *net,
1483 struct inet6_dev *idev)
1484 {
1485 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1486 if (!idev)
1487 return false;
1488 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1489 return false;
1490 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1491 return false;
1492
1493 return true;
1494 #else
1495 return false;
1496 #endif
1497 }
1498
1499 static bool ipv6_allow_optimistic_dad(struct net *net,
1500 struct inet6_dev *idev)
1501 {
1502 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1503 if (!idev)
1504 return false;
1505 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1506 return false;
1507
1508 return true;
1509 #else
1510 return false;
1511 #endif
1512 }
1513
1514 static int ipv6_get_saddr_eval(struct net *net,
1515 struct ipv6_saddr_score *score,
1516 struct ipv6_saddr_dst *dst,
1517 int i)
1518 {
1519 int ret;
1520
1521 if (i <= score->rule) {
1522 switch (i) {
1523 case IPV6_SADDR_RULE_SCOPE:
1524 ret = score->scopedist;
1525 break;
1526 case IPV6_SADDR_RULE_PREFIX:
1527 ret = score->matchlen;
1528 break;
1529 default:
1530 ret = !!test_bit(i, score->scorebits);
1531 }
1532 goto out;
1533 }
1534
1535 switch (i) {
1536 case IPV6_SADDR_RULE_INIT:
1537 /* Rule 0: remember if hiscore is not ready yet */
1538 ret = !!score->ifa;
1539 break;
1540 case IPV6_SADDR_RULE_LOCAL:
1541 /* Rule 1: Prefer same address */
1542 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1543 break;
1544 case IPV6_SADDR_RULE_SCOPE:
1545 /* Rule 2: Prefer appropriate scope
1546 *
1547 * ret
1548 * ^
1549 * -1 | d 15
1550 * ---+--+-+---> scope
1551 * |
1552 * | d is scope of the destination.
1553 * B-d | \
1554 * | \ <- smaller scope is better if
1555 * B-15 | \ if scope is enough for destination.
1556 * | ret = B - scope (-1 <= scope >= d <= 15).
1557 * d-C-1 | /
1558 * |/ <- greater is better
1559 * -C / if scope is not enough for destination.
1560 * /| ret = scope - C (-1 <= d < scope <= 15).
1561 *
1562 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1563 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1564 * Assume B = 0 and we get C > 29.
1565 */
1566 ret = __ipv6_addr_src_scope(score->addr_type);
1567 if (ret >= dst->scope)
1568 ret = -ret;
1569 else
1570 ret -= 128; /* 30 is enough */
1571 score->scopedist = ret;
1572 break;
1573 case IPV6_SADDR_RULE_PREFERRED:
1574 {
1575 /* Rule 3: Avoid deprecated and optimistic addresses */
1576 u8 avoid = IFA_F_DEPRECATED;
1577
1578 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1579 avoid |= IFA_F_OPTIMISTIC;
1580 ret = ipv6_saddr_preferred(score->addr_type) ||
1581 !(score->ifa->flags & avoid);
1582 break;
1583 }
1584 #ifdef CONFIG_IPV6_MIP6
1585 case IPV6_SADDR_RULE_HOA:
1586 {
1587 /* Rule 4: Prefer home address */
1588 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1589 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1590 break;
1591 }
1592 #endif
1593 case IPV6_SADDR_RULE_OIF:
1594 /* Rule 5: Prefer outgoing interface */
1595 ret = (!dst->ifindex ||
1596 dst->ifindex == score->ifa->idev->dev->ifindex);
1597 break;
1598 case IPV6_SADDR_RULE_LABEL:
1599 /* Rule 6: Prefer matching label */
1600 ret = ipv6_addr_label(net,
1601 &score->ifa->addr, score->addr_type,
1602 score->ifa->idev->dev->ifindex) == dst->label;
1603 break;
1604 case IPV6_SADDR_RULE_PRIVACY:
1605 {
1606 /* Rule 7: Prefer public address
1607 * Note: prefer temporary address if use_tempaddr >= 2
1608 */
1609 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1610 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1611 score->ifa->idev->cnf.use_tempaddr >= 2;
1612 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1613 break;
1614 }
1615 case IPV6_SADDR_RULE_ORCHID:
1616 /* Rule 8-: Prefer ORCHID vs ORCHID or
1617 * non-ORCHID vs non-ORCHID
1618 */
1619 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1620 ipv6_addr_orchid(dst->addr));
1621 break;
1622 case IPV6_SADDR_RULE_PREFIX:
1623 /* Rule 8: Use longest matching prefix */
1624 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1625 if (ret > score->ifa->prefix_len)
1626 ret = score->ifa->prefix_len;
1627 score->matchlen = ret;
1628 break;
1629 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1630 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1631 /* Optimistic addresses still have lower precedence than other
1632 * preferred addresses.
1633 */
1634 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1635 break;
1636 #endif
1637 default:
1638 ret = 0;
1639 }
1640
1641 if (ret)
1642 __set_bit(i, score->scorebits);
1643 score->rule = i;
1644 out:
1645 return ret;
1646 }
1647
1648 static int __ipv6_dev_get_saddr(struct net *net,
1649 struct ipv6_saddr_dst *dst,
1650 struct inet6_dev *idev,
1651 struct ipv6_saddr_score *scores,
1652 int hiscore_idx)
1653 {
1654 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1655
1656 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1657 int i;
1658
1659 /*
1660 * - Tentative Address (RFC2462 section 5.4)
1661 * - A tentative address is not considered
1662 * "assigned to an interface" in the traditional
1663 * sense, unless it is also flagged as optimistic.
1664 * - Candidate Source Address (section 4)
1665 * - In any case, anycast addresses, multicast
1666 * addresses, and the unspecified address MUST
1667 * NOT be included in a candidate set.
1668 */
1669 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1670 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1671 continue;
1672
1673 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1674
1675 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1676 score->addr_type & IPV6_ADDR_MULTICAST)) {
1677 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1678 idev->dev->name);
1679 continue;
1680 }
1681
1682 score->rule = -1;
1683 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1684
1685 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1686 int minihiscore, miniscore;
1687
1688 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1689 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1690
1691 if (minihiscore > miniscore) {
1692 if (i == IPV6_SADDR_RULE_SCOPE &&
1693 score->scopedist > 0) {
1694 /*
1695 * special case:
1696 * each remaining entry
1697 * has too small (not enough)
1698 * scope, because ifa entries
1699 * are sorted by their scope
1700 * values.
1701 */
1702 goto out;
1703 }
1704 break;
1705 } else if (minihiscore < miniscore) {
1706 swap(hiscore, score);
1707 hiscore_idx = 1 - hiscore_idx;
1708
1709 /* restore our iterator */
1710 score->ifa = hiscore->ifa;
1711
1712 break;
1713 }
1714 }
1715 }
1716 out:
1717 return hiscore_idx;
1718 }
1719
1720 static int ipv6_get_saddr_master(struct net *net,
1721 const struct net_device *dst_dev,
1722 const struct net_device *master,
1723 struct ipv6_saddr_dst *dst,
1724 struct ipv6_saddr_score *scores,
1725 int hiscore_idx)
1726 {
1727 struct inet6_dev *idev;
1728
1729 idev = __in6_dev_get(dst_dev);
1730 if (idev)
1731 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1732 scores, hiscore_idx);
1733
1734 idev = __in6_dev_get(master);
1735 if (idev)
1736 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1737 scores, hiscore_idx);
1738
1739 return hiscore_idx;
1740 }
1741
1742 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1743 const struct in6_addr *daddr, unsigned int prefs,
1744 struct in6_addr *saddr)
1745 {
1746 struct ipv6_saddr_score scores[2], *hiscore;
1747 struct ipv6_saddr_dst dst;
1748 struct inet6_dev *idev;
1749 struct net_device *dev;
1750 int dst_type;
1751 bool use_oif_addr = false;
1752 int hiscore_idx = 0;
1753 int ret = 0;
1754
1755 dst_type = __ipv6_addr_type(daddr);
1756 dst.addr = daddr;
1757 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1758 dst.scope = __ipv6_addr_src_scope(dst_type);
1759 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1760 dst.prefs = prefs;
1761
1762 scores[hiscore_idx].rule = -1;
1763 scores[hiscore_idx].ifa = NULL;
1764
1765 rcu_read_lock();
1766
1767 /* Candidate Source Address (section 4)
1768 * - multicast and link-local destination address,
1769 * the set of candidate source address MUST only
1770 * include addresses assigned to interfaces
1771 * belonging to the same link as the outgoing
1772 * interface.
1773 * (- For site-local destination addresses, the
1774 * set of candidate source addresses MUST only
1775 * include addresses assigned to interfaces
1776 * belonging to the same site as the outgoing
1777 * interface.)
1778 * - "It is RECOMMENDED that the candidate source addresses
1779 * be the set of unicast addresses assigned to the
1780 * interface that will be used to send to the destination
1781 * (the 'outgoing' interface)." (RFC 6724)
1782 */
1783 if (dst_dev) {
1784 idev = __in6_dev_get(dst_dev);
1785 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1786 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1787 (idev && idev->cnf.use_oif_addrs_only)) {
1788 use_oif_addr = true;
1789 }
1790 }
1791
1792 if (use_oif_addr) {
1793 if (idev)
1794 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1795 } else {
1796 const struct net_device *master;
1797 int master_idx = 0;
1798
1799 /* if dst_dev exists and is enslaved to an L3 device, then
1800 * prefer addresses from dst_dev and then the master over
1801 * any other enslaved devices in the L3 domain.
1802 */
1803 master = l3mdev_master_dev_rcu(dst_dev);
1804 if (master) {
1805 master_idx = master->ifindex;
1806
1807 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1808 master, &dst,
1809 scores, hiscore_idx);
1810
1811 if (scores[hiscore_idx].ifa)
1812 goto out;
1813 }
1814
1815 for_each_netdev_rcu(net, dev) {
1816 /* only consider addresses on devices in the
1817 * same L3 domain
1818 */
1819 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1820 continue;
1821 idev = __in6_dev_get(dev);
1822 if (!idev)
1823 continue;
1824 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1825 }
1826 }
1827
1828 out:
1829 hiscore = &scores[hiscore_idx];
1830 if (!hiscore->ifa)
1831 ret = -EADDRNOTAVAIL;
1832 else
1833 *saddr = hiscore->ifa->addr;
1834
1835 rcu_read_unlock();
1836 return ret;
1837 }
1838 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1839
1840 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1841 u32 banned_flags)
1842 {
1843 struct inet6_ifaddr *ifp;
1844 int err = -EADDRNOTAVAIL;
1845
1846 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1847 if (ifp->scope > IFA_LINK)
1848 break;
1849 if (ifp->scope == IFA_LINK &&
1850 !(ifp->flags & banned_flags)) {
1851 *addr = ifp->addr;
1852 err = 0;
1853 break;
1854 }
1855 }
1856 return err;
1857 }
1858
1859 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1860 u32 banned_flags)
1861 {
1862 struct inet6_dev *idev;
1863 int err = -EADDRNOTAVAIL;
1864
1865 rcu_read_lock();
1866 idev = __in6_dev_get(dev);
1867 if (idev) {
1868 read_lock_bh(&idev->lock);
1869 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1870 read_unlock_bh(&idev->lock);
1871 }
1872 rcu_read_unlock();
1873 return err;
1874 }
1875
1876 static int ipv6_count_addresses(const struct inet6_dev *idev)
1877 {
1878 const struct inet6_ifaddr *ifp;
1879 int cnt = 0;
1880
1881 rcu_read_lock();
1882 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1883 cnt++;
1884 rcu_read_unlock();
1885 return cnt;
1886 }
1887
1888 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1889 const struct net_device *dev, int strict)
1890 {
1891 return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1892 strict, IFA_F_TENTATIVE);
1893 }
1894 EXPORT_SYMBOL(ipv6_chk_addr);
1895
1896 /* device argument is used to find the L3 domain of interest. If
1897 * skip_dev_check is set, then the ifp device is not checked against
1898 * the passed in dev argument. So the 2 cases for addresses checks are:
1899 * 1. does the address exist in the L3 domain that dev is part of
1900 * (skip_dev_check = true), or
1901 *
1902 * 2. does the address exist on the specific device
1903 * (skip_dev_check = false)
1904 */
1905 static struct net_device *
1906 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1907 const struct net_device *dev, bool skip_dev_check,
1908 int strict, u32 banned_flags)
1909 {
1910 unsigned int hash = inet6_addr_hash(net, addr);
1911 struct net_device *l3mdev, *ndev;
1912 struct inet6_ifaddr *ifp;
1913 u32 ifp_flags;
1914
1915 rcu_read_lock();
1916
1917 l3mdev = l3mdev_master_dev_rcu(dev);
1918 if (skip_dev_check)
1919 dev = NULL;
1920
1921 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1922 ndev = ifp->idev->dev;
1923 if (!net_eq(dev_net(ndev), net))
1924 continue;
1925
1926 if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1927 continue;
1928
1929 /* Decouple optimistic from tentative for evaluation here.
1930 * Ban optimistic addresses explicitly, when required.
1931 */
1932 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1933 ? (ifp->flags&~IFA_F_TENTATIVE)
1934 : ifp->flags;
1935 if (ipv6_addr_equal(&ifp->addr, addr) &&
1936 !(ifp_flags&banned_flags) &&
1937 (!dev || ndev == dev ||
1938 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1939 rcu_read_unlock();
1940 return ndev;
1941 }
1942 }
1943
1944 rcu_read_unlock();
1945 return NULL;
1946 }
1947
1948 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1949 const struct net_device *dev, bool skip_dev_check,
1950 int strict, u32 banned_flags)
1951 {
1952 return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
1953 strict, banned_flags) ? 1 : 0;
1954 }
1955 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1956
1957
1958 /* Compares an address/prefix_len with addresses on device @dev.
1959 * If one is found it returns true.
1960 */
1961 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1962 const unsigned int prefix_len, struct net_device *dev)
1963 {
1964 const struct inet6_ifaddr *ifa;
1965 const struct inet6_dev *idev;
1966 bool ret = false;
1967
1968 rcu_read_lock();
1969 idev = __in6_dev_get(dev);
1970 if (idev) {
1971 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1972 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1973 if (ret)
1974 break;
1975 }
1976 }
1977 rcu_read_unlock();
1978
1979 return ret;
1980 }
1981 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1982
1983 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1984 {
1985 const struct inet6_ifaddr *ifa;
1986 const struct inet6_dev *idev;
1987 int onlink;
1988
1989 onlink = 0;
1990 rcu_read_lock();
1991 idev = __in6_dev_get(dev);
1992 if (idev) {
1993 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1994 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1995 ifa->prefix_len);
1996 if (onlink)
1997 break;
1998 }
1999 }
2000 rcu_read_unlock();
2001 return onlink;
2002 }
2003 EXPORT_SYMBOL(ipv6_chk_prefix);
2004
2005 /**
2006 * ipv6_dev_find - find the first device with a given source address.
2007 * @net: the net namespace
2008 * @addr: the source address
2009 * @dev: used to find the L3 domain of interest
2010 *
2011 * The caller should be protected by RCU, or RTNL.
2012 */
2013 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2014 struct net_device *dev)
2015 {
2016 return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2017 IFA_F_TENTATIVE);
2018 }
2019 EXPORT_SYMBOL(ipv6_dev_find);
2020
2021 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2022 struct net_device *dev, int strict)
2023 {
2024 unsigned int hash = inet6_addr_hash(net, addr);
2025 struct inet6_ifaddr *ifp, *result = NULL;
2026
2027 rcu_read_lock();
2028 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
2029 if (!net_eq(dev_net(ifp->idev->dev), net))
2030 continue;
2031 if (ipv6_addr_equal(&ifp->addr, addr)) {
2032 if (!dev || ifp->idev->dev == dev ||
2033 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2034 result = ifp;
2035 in6_ifa_hold(ifp);
2036 break;
2037 }
2038 }
2039 }
2040 rcu_read_unlock();
2041
2042 return result;
2043 }
2044
2045 /* Gets referenced address, destroys ifaddr */
2046
2047 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2048 {
2049 if (dad_failed)
2050 ifp->flags |= IFA_F_DADFAILED;
2051
2052 if (ifp->flags&IFA_F_TEMPORARY) {
2053 struct inet6_ifaddr *ifpub;
2054 spin_lock_bh(&ifp->lock);
2055 ifpub = ifp->ifpub;
2056 if (ifpub) {
2057 in6_ifa_hold(ifpub);
2058 spin_unlock_bh(&ifp->lock);
2059 ipv6_create_tempaddr(ifpub, true);
2060 in6_ifa_put(ifpub);
2061 } else {
2062 spin_unlock_bh(&ifp->lock);
2063 }
2064 ipv6_del_addr(ifp);
2065 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2066 spin_lock_bh(&ifp->lock);
2067 addrconf_del_dad_work(ifp);
2068 ifp->flags |= IFA_F_TENTATIVE;
2069 if (dad_failed)
2070 ifp->flags &= ~IFA_F_OPTIMISTIC;
2071 spin_unlock_bh(&ifp->lock);
2072 if (dad_failed)
2073 ipv6_ifa_notify(0, ifp);
2074 in6_ifa_put(ifp);
2075 } else {
2076 ipv6_del_addr(ifp);
2077 }
2078 }
2079
2080 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2081 {
2082 int err = -ENOENT;
2083
2084 spin_lock_bh(&ifp->lock);
2085 if (ifp->state == INET6_IFADDR_STATE_DAD) {
2086 ifp->state = INET6_IFADDR_STATE_POSTDAD;
2087 err = 0;
2088 }
2089 spin_unlock_bh(&ifp->lock);
2090
2091 return err;
2092 }
2093
2094 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2095 {
2096 struct inet6_dev *idev = ifp->idev;
2097 struct net *net = dev_net(ifp->idev->dev);
2098
2099 if (addrconf_dad_end(ifp)) {
2100 in6_ifa_put(ifp);
2101 return;
2102 }
2103
2104 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2105 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2106
2107 spin_lock_bh(&ifp->lock);
2108
2109 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2110 struct in6_addr new_addr;
2111 struct inet6_ifaddr *ifp2;
2112 int retries = ifp->stable_privacy_retry + 1;
2113 struct ifa6_config cfg = {
2114 .pfx = &new_addr,
2115 .plen = ifp->prefix_len,
2116 .ifa_flags = ifp->flags,
2117 .valid_lft = ifp->valid_lft,
2118 .preferred_lft = ifp->prefered_lft,
2119 .scope = ifp->scope,
2120 };
2121
2122 if (retries > net->ipv6.sysctl.idgen_retries) {
2123 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2124 ifp->idev->dev->name);
2125 goto errdad;
2126 }
2127
2128 new_addr = ifp->addr;
2129 if (ipv6_generate_stable_address(&new_addr, retries,
2130 idev))
2131 goto errdad;
2132
2133 spin_unlock_bh(&ifp->lock);
2134
2135 if (idev->cnf.max_addresses &&
2136 ipv6_count_addresses(idev) >=
2137 idev->cnf.max_addresses)
2138 goto lock_errdad;
2139
2140 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2141 ifp->idev->dev->name);
2142
2143 ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2144 if (IS_ERR(ifp2))
2145 goto lock_errdad;
2146
2147 spin_lock_bh(&ifp2->lock);
2148 ifp2->stable_privacy_retry = retries;
2149 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2150 spin_unlock_bh(&ifp2->lock);
2151
2152 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2153 in6_ifa_put(ifp2);
2154 lock_errdad:
2155 spin_lock_bh(&ifp->lock);
2156 }
2157
2158 errdad:
2159 /* transition from _POSTDAD to _ERRDAD */
2160 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2161 spin_unlock_bh(&ifp->lock);
2162
2163 addrconf_mod_dad_work(ifp, 0);
2164 in6_ifa_put(ifp);
2165 }
2166
2167 /* Join to solicited addr multicast group.
2168 * caller must hold RTNL */
2169 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2170 {
2171 struct in6_addr maddr;
2172
2173 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2174 return;
2175
2176 addrconf_addr_solict_mult(addr, &maddr);
2177 ipv6_dev_mc_inc(dev, &maddr);
2178 }
2179
2180 /* caller must hold RTNL */
2181 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2182 {
2183 struct in6_addr maddr;
2184
2185 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2186 return;
2187
2188 addrconf_addr_solict_mult(addr, &maddr);
2189 __ipv6_dev_mc_dec(idev, &maddr);
2190 }
2191
2192 /* caller must hold RTNL */
2193 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2194 {
2195 struct in6_addr addr;
2196
2197 if (ifp->prefix_len >= 127) /* RFC 6164 */
2198 return;
2199 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2200 if (ipv6_addr_any(&addr))
2201 return;
2202 __ipv6_dev_ac_inc(ifp->idev, &addr);
2203 }
2204
2205 /* caller must hold RTNL */
2206 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2207 {
2208 struct in6_addr addr;
2209
2210 if (ifp->prefix_len >= 127) /* RFC 6164 */
2211 return;
2212 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2213 if (ipv6_addr_any(&addr))
2214 return;
2215 __ipv6_dev_ac_dec(ifp->idev, &addr);
2216 }
2217
2218 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2219 {
2220 switch (dev->addr_len) {
2221 case ETH_ALEN:
2222 memcpy(eui, dev->dev_addr, 3);
2223 eui[3] = 0xFF;
2224 eui[4] = 0xFE;
2225 memcpy(eui + 5, dev->dev_addr + 3, 3);
2226 break;
2227 case EUI64_ADDR_LEN:
2228 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2229 eui[0] ^= 2;
2230 break;
2231 default:
2232 return -1;
2233 }
2234
2235 return 0;
2236 }
2237
2238 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2239 {
2240 union fwnet_hwaddr *ha;
2241
2242 if (dev->addr_len != FWNET_ALEN)
2243 return -1;
2244
2245 ha = (union fwnet_hwaddr *)dev->dev_addr;
2246
2247 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2248 eui[0] ^= 2;
2249 return 0;
2250 }
2251
2252 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2253 {
2254 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2255 if (dev->addr_len != ARCNET_ALEN)
2256 return -1;
2257 memset(eui, 0, 7);
2258 eui[7] = *(u8 *)dev->dev_addr;
2259 return 0;
2260 }
2261
2262 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2263 {
2264 if (dev->addr_len != INFINIBAND_ALEN)
2265 return -1;
2266 memcpy(eui, dev->dev_addr + 12, 8);
2267 eui[0] |= 2;
2268 return 0;
2269 }
2270
2271 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2272 {
2273 if (addr == 0)
2274 return -1;
2275 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2276 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2277 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2278 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2279 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2280 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2281 eui[1] = 0;
2282 eui[2] = 0x5E;
2283 eui[3] = 0xFE;
2284 memcpy(eui + 4, &addr, 4);
2285 return 0;
2286 }
2287
2288 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2289 {
2290 if (dev->priv_flags & IFF_ISATAP)
2291 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2292 return -1;
2293 }
2294
2295 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2296 {
2297 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2298 }
2299
2300 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2301 {
2302 memcpy(eui, dev->perm_addr, 3);
2303 memcpy(eui + 5, dev->perm_addr + 3, 3);
2304 eui[3] = 0xFF;
2305 eui[4] = 0xFE;
2306 eui[0] ^= 2;
2307 return 0;
2308 }
2309
2310 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2311 {
2312 switch (dev->type) {
2313 case ARPHRD_ETHER:
2314 case ARPHRD_FDDI:
2315 return addrconf_ifid_eui48(eui, dev);
2316 case ARPHRD_ARCNET:
2317 return addrconf_ifid_arcnet(eui, dev);
2318 case ARPHRD_INFINIBAND:
2319 return addrconf_ifid_infiniband(eui, dev);
2320 case ARPHRD_SIT:
2321 return addrconf_ifid_sit(eui, dev);
2322 case ARPHRD_IPGRE:
2323 case ARPHRD_TUNNEL:
2324 return addrconf_ifid_gre(eui, dev);
2325 case ARPHRD_6LOWPAN:
2326 return addrconf_ifid_6lowpan(eui, dev);
2327 case ARPHRD_IEEE1394:
2328 return addrconf_ifid_ieee1394(eui, dev);
2329 case ARPHRD_TUNNEL6:
2330 case ARPHRD_IP6GRE:
2331 case ARPHRD_RAWIP:
2332 return addrconf_ifid_ip6tnl(eui, dev);
2333 }
2334 return -1;
2335 }
2336
2337 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2338 {
2339 int err = -1;
2340 struct inet6_ifaddr *ifp;
2341
2342 read_lock_bh(&idev->lock);
2343 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2344 if (ifp->scope > IFA_LINK)
2345 break;
2346 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2347 memcpy(eui, ifp->addr.s6_addr+8, 8);
2348 err = 0;
2349 break;
2350 }
2351 }
2352 read_unlock_bh(&idev->lock);
2353 return err;
2354 }
2355
2356 /* Generation of a randomized Interface Identifier
2357 * draft-ietf-6man-rfc4941bis, Section 3.3.1
2358 */
2359
2360 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2361 {
2362 regen:
2363 get_random_bytes(&addr->s6_addr[8], 8);
2364
2365 /* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2366 * check if generated address is not inappropriate:
2367 *
2368 * - Reserved IPv6 Interface Identifiers
2369 * - XXX: already assigned to an address on the device
2370 */
2371
2372 /* Subnet-router anycast: 0000:0000:0000:0000 */
2373 if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2374 goto regen;
2375
2376 /* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2377 * Proxy Mobile IPv6: 0200:5EFF:FE00:5213
2378 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2379 */
2380 if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2381 (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2382 goto regen;
2383
2384 /* Reserved subnet anycast addresses */
2385 if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2386 ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2387 goto regen;
2388 }
2389
2390 /*
2391 * Add prefix route.
2392 */
2393
2394 static void
2395 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2396 struct net_device *dev, unsigned long expires,
2397 u32 flags, gfp_t gfp_flags)
2398 {
2399 struct fib6_config cfg = {
2400 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2401 .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2402 .fc_ifindex = dev->ifindex,
2403 .fc_expires = expires,
2404 .fc_dst_len = plen,
2405 .fc_flags = RTF_UP | flags,
2406 .fc_nlinfo.nl_net = dev_net(dev),
2407 .fc_protocol = RTPROT_KERNEL,
2408 .fc_type = RTN_UNICAST,
2409 };
2410
2411 cfg.fc_dst = *pfx;
2412
2413 /* Prevent useless cloning on PtP SIT.
2414 This thing is done here expecting that the whole
2415 class of non-broadcast devices need not cloning.
2416 */
2417 #if IS_ENABLED(CONFIG_IPV6_SIT)
2418 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2419 cfg.fc_flags |= RTF_NONEXTHOP;
2420 #endif
2421
2422 ip6_route_add(&cfg, gfp_flags, NULL);
2423 }
2424
2425
2426 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2427 int plen,
2428 const struct net_device *dev,
2429 u32 flags, u32 noflags,
2430 bool no_gw)
2431 {
2432 struct fib6_node *fn;
2433 struct fib6_info *rt = NULL;
2434 struct fib6_table *table;
2435 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2436
2437 table = fib6_get_table(dev_net(dev), tb_id);
2438 if (!table)
2439 return NULL;
2440
2441 rcu_read_lock();
2442 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2443 if (!fn)
2444 goto out;
2445
2446 for_each_fib6_node_rt_rcu(fn) {
2447 /* prefix routes only use builtin fib6_nh */
2448 if (rt->nh)
2449 continue;
2450
2451 if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2452 continue;
2453 if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2454 continue;
2455 if ((rt->fib6_flags & flags) != flags)
2456 continue;
2457 if ((rt->fib6_flags & noflags) != 0)
2458 continue;
2459 if (!fib6_info_hold_safe(rt))
2460 continue;
2461 break;
2462 }
2463 out:
2464 rcu_read_unlock();
2465 return rt;
2466 }
2467
2468
2469 /* Create "default" multicast route to the interface */
2470
2471 static void addrconf_add_mroute(struct net_device *dev)
2472 {
2473 struct fib6_config cfg = {
2474 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2475 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2476 .fc_ifindex = dev->ifindex,
2477 .fc_dst_len = 8,
2478 .fc_flags = RTF_UP,
2479 .fc_type = RTN_MULTICAST,
2480 .fc_nlinfo.nl_net = dev_net(dev),
2481 .fc_protocol = RTPROT_KERNEL,
2482 };
2483
2484 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2485
2486 ip6_route_add(&cfg, GFP_KERNEL, NULL);
2487 }
2488
2489 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2490 {
2491 struct inet6_dev *idev;
2492
2493 ASSERT_RTNL();
2494
2495 idev = ipv6_find_idev(dev);
2496 if (IS_ERR(idev))
2497 return idev;
2498
2499 if (idev->cnf.disable_ipv6)
2500 return ERR_PTR(-EACCES);
2501
2502 /* Add default multicast route */
2503 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2504 addrconf_add_mroute(dev);
2505
2506 return idev;
2507 }
2508
2509 static void manage_tempaddrs(struct inet6_dev *idev,
2510 struct inet6_ifaddr *ifp,
2511 __u32 valid_lft, __u32 prefered_lft,
2512 bool create, unsigned long now)
2513 {
2514 u32 flags;
2515 struct inet6_ifaddr *ift;
2516
2517 read_lock_bh(&idev->lock);
2518 /* update all temporary addresses in the list */
2519 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2520 int age, max_valid, max_prefered;
2521
2522 if (ifp != ift->ifpub)
2523 continue;
2524
2525 /* RFC 4941 section 3.3:
2526 * If a received option will extend the lifetime of a public
2527 * address, the lifetimes of temporary addresses should
2528 * be extended, subject to the overall constraint that no
2529 * temporary addresses should ever remain "valid" or "preferred"
2530 * for a time longer than (TEMP_VALID_LIFETIME) or
2531 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2532 */
2533 age = (now - ift->cstamp) / HZ;
2534 max_valid = idev->cnf.temp_valid_lft - age;
2535 if (max_valid < 0)
2536 max_valid = 0;
2537
2538 max_prefered = idev->cnf.temp_prefered_lft -
2539 idev->desync_factor - age;
2540 if (max_prefered < 0)
2541 max_prefered = 0;
2542
2543 if (valid_lft > max_valid)
2544 valid_lft = max_valid;
2545
2546 if (prefered_lft > max_prefered)
2547 prefered_lft = max_prefered;
2548
2549 spin_lock(&ift->lock);
2550 flags = ift->flags;
2551 ift->valid_lft = valid_lft;
2552 ift->prefered_lft = prefered_lft;
2553 ift->tstamp = now;
2554 if (prefered_lft > 0)
2555 ift->flags &= ~IFA_F_DEPRECATED;
2556
2557 spin_unlock(&ift->lock);
2558 if (!(flags&IFA_F_TENTATIVE))
2559 ipv6_ifa_notify(0, ift);
2560 }
2561
2562 if ((create || list_empty(&idev->tempaddr_list)) &&
2563 idev->cnf.use_tempaddr > 0) {
2564 /* When a new public address is created as described
2565 * in [ADDRCONF], also create a new temporary address.
2566 * Also create a temporary address if it's enabled but
2567 * no temporary address currently exists.
2568 */
2569 read_unlock_bh(&idev->lock);
2570 ipv6_create_tempaddr(ifp, false);
2571 } else {
2572 read_unlock_bh(&idev->lock);
2573 }
2574 }
2575
2576 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2577 {
2578 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2579 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2580 }
2581
2582 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2583 const struct prefix_info *pinfo,
2584 struct inet6_dev *in6_dev,
2585 const struct in6_addr *addr, int addr_type,
2586 u32 addr_flags, bool sllao, bool tokenized,
2587 __u32 valid_lft, u32 prefered_lft)
2588 {
2589 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2590 int create = 0, update_lft = 0;
2591
2592 if (!ifp && valid_lft) {
2593 int max_addresses = in6_dev->cnf.max_addresses;
2594 struct ifa6_config cfg = {
2595 .pfx = addr,
2596 .plen = pinfo->prefix_len,
2597 .ifa_flags = addr_flags,
2598 .valid_lft = valid_lft,
2599 .preferred_lft = prefered_lft,
2600 .scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2601 };
2602
2603 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2604 if ((net->ipv6.devconf_all->optimistic_dad ||
2605 in6_dev->cnf.optimistic_dad) &&
2606 !net->ipv6.devconf_all->forwarding && sllao)
2607 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2608 #endif
2609
2610 /* Do not allow to create too much of autoconfigured
2611 * addresses; this would be too easy way to crash kernel.
2612 */
2613 if (!max_addresses ||
2614 ipv6_count_addresses(in6_dev) < max_addresses)
2615 ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2616
2617 if (IS_ERR_OR_NULL(ifp))
2618 return -1;
2619
2620 create = 1;
2621 spin_lock_bh(&ifp->lock);
2622 ifp->flags |= IFA_F_MANAGETEMPADDR;
2623 ifp->cstamp = jiffies;
2624 ifp->tokenized = tokenized;
2625 spin_unlock_bh(&ifp->lock);
2626 addrconf_dad_start(ifp);
2627 }
2628
2629 if (ifp) {
2630 u32 flags;
2631 unsigned long now;
2632 u32 stored_lft;
2633
2634 /* update lifetime (RFC2462 5.5.3 e) */
2635 spin_lock_bh(&ifp->lock);
2636 now = jiffies;
2637 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2638 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2639 else
2640 stored_lft = 0;
2641 if (!create && stored_lft) {
2642 const u32 minimum_lft = min_t(u32,
2643 stored_lft, MIN_VALID_LIFETIME);
2644 valid_lft = max(valid_lft, minimum_lft);
2645
2646 /* RFC4862 Section 5.5.3e:
2647 * "Note that the preferred lifetime of the
2648 * corresponding address is always reset to
2649 * the Preferred Lifetime in the received
2650 * Prefix Information option, regardless of
2651 * whether the valid lifetime is also reset or
2652 * ignored."
2653 *
2654 * So we should always update prefered_lft here.
2655 */
2656 update_lft = 1;
2657 }
2658
2659 if (update_lft) {
2660 ifp->valid_lft = valid_lft;
2661 ifp->prefered_lft = prefered_lft;
2662 ifp->tstamp = now;
2663 flags = ifp->flags;
2664 ifp->flags &= ~IFA_F_DEPRECATED;
2665 spin_unlock_bh(&ifp->lock);
2666
2667 if (!(flags&IFA_F_TENTATIVE))
2668 ipv6_ifa_notify(0, ifp);
2669 } else
2670 spin_unlock_bh(&ifp->lock);
2671
2672 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2673 create, now);
2674
2675 in6_ifa_put(ifp);
2676 addrconf_verify();
2677 }
2678
2679 return 0;
2680 }
2681 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2682
2683 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2684 {
2685 struct prefix_info *pinfo;
2686 __u32 valid_lft;
2687 __u32 prefered_lft;
2688 int addr_type, err;
2689 u32 addr_flags = 0;
2690 struct inet6_dev *in6_dev;
2691 struct net *net = dev_net(dev);
2692
2693 pinfo = (struct prefix_info *) opt;
2694
2695 if (len < sizeof(struct prefix_info)) {
2696 netdev_dbg(dev, "addrconf: prefix option too short\n");
2697 return;
2698 }
2699
2700 /*
2701 * Validation checks ([ADDRCONF], page 19)
2702 */
2703
2704 addr_type = ipv6_addr_type(&pinfo->prefix);
2705
2706 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2707 return;
2708
2709 valid_lft = ntohl(pinfo->valid);
2710 prefered_lft = ntohl(pinfo->prefered);
2711
2712 if (prefered_lft > valid_lft) {
2713 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2714 return;
2715 }
2716
2717 in6_dev = in6_dev_get(dev);
2718
2719 if (!in6_dev) {
2720 net_dbg_ratelimited("addrconf: device %s not configured\n",
2721 dev->name);
2722 return;
2723 }
2724
2725 /*
2726 * Two things going on here:
2727 * 1) Add routes for on-link prefixes
2728 * 2) Configure prefixes with the auto flag set
2729 */
2730
2731 if (pinfo->onlink) {
2732 struct fib6_info *rt;
2733 unsigned long rt_expires;
2734
2735 /* Avoid arithmetic overflow. Really, we could
2736 * save rt_expires in seconds, likely valid_lft,
2737 * but it would require division in fib gc, that it
2738 * not good.
2739 */
2740 if (HZ > USER_HZ)
2741 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2742 else
2743 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2744
2745 if (addrconf_finite_timeout(rt_expires))
2746 rt_expires *= HZ;
2747
2748 rt = addrconf_get_prefix_route(&pinfo->prefix,
2749 pinfo->prefix_len,
2750 dev,
2751 RTF_ADDRCONF | RTF_PREFIX_RT,
2752 RTF_DEFAULT, true);
2753
2754 if (rt) {
2755 /* Autoconf prefix route */
2756 if (valid_lft == 0) {
2757 ip6_del_rt(net, rt, false);
2758 rt = NULL;
2759 } else if (addrconf_finite_timeout(rt_expires)) {
2760 /* not infinity */
2761 fib6_set_expires(rt, jiffies + rt_expires);
2762 } else {
2763 fib6_clean_expires(rt);
2764 }
2765 } else if (valid_lft) {
2766 clock_t expires = 0;
2767 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2768 if (addrconf_finite_timeout(rt_expires)) {
2769 /* not infinity */
2770 flags |= RTF_EXPIRES;
2771 expires = jiffies_to_clock_t(rt_expires);
2772 }
2773 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2774 0, dev, expires, flags,
2775 GFP_ATOMIC);
2776 }
2777 fib6_info_release(rt);
2778 }
2779
2780 /* Try to figure out our local address for this prefix */
2781
2782 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2783 struct in6_addr addr;
2784 bool tokenized = false, dev_addr_generated = false;
2785
2786 if (pinfo->prefix_len == 64) {
2787 memcpy(&addr, &pinfo->prefix, 8);
2788
2789 if (!ipv6_addr_any(&in6_dev->token)) {
2790 read_lock_bh(&in6_dev->lock);
2791 memcpy(addr.s6_addr + 8,
2792 in6_dev->token.s6_addr + 8, 8);
2793 read_unlock_bh(&in6_dev->lock);
2794 tokenized = true;
2795 } else if (is_addr_mode_generate_stable(in6_dev) &&
2796 !ipv6_generate_stable_address(&addr, 0,
2797 in6_dev)) {
2798 addr_flags |= IFA_F_STABLE_PRIVACY;
2799 goto ok;
2800 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2801 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2802 goto put;
2803 } else {
2804 dev_addr_generated = true;
2805 }
2806 goto ok;
2807 }
2808 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2809 pinfo->prefix_len);
2810 goto put;
2811
2812 ok:
2813 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2814 &addr, addr_type,
2815 addr_flags, sllao,
2816 tokenized, valid_lft,
2817 prefered_lft);
2818 if (err)
2819 goto put;
2820
2821 /* Ignore error case here because previous prefix add addr was
2822 * successful which will be notified.
2823 */
2824 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2825 addr_type, addr_flags, sllao,
2826 tokenized, valid_lft,
2827 prefered_lft,
2828 dev_addr_generated);
2829 }
2830 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2831 put:
2832 in6_dev_put(in6_dev);
2833 }
2834
2835 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2836 struct in6_ifreq *ireq)
2837 {
2838 struct ip_tunnel_parm p = { };
2839 int err;
2840
2841 if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2842 return -EADDRNOTAVAIL;
2843
2844 p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2845 p.iph.version = 4;
2846 p.iph.ihl = 5;
2847 p.iph.protocol = IPPROTO_IPV6;
2848 p.iph.ttl = 64;
2849
2850 if (!dev->netdev_ops->ndo_tunnel_ctl)
2851 return -EOPNOTSUPP;
2852 err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2853 if (err)
2854 return err;
2855
2856 dev = __dev_get_by_name(net, p.name);
2857 if (!dev)
2858 return -ENOBUFS;
2859 return dev_open(dev, NULL);
2860 }
2861
2862 /*
2863 * Set destination address.
2864 * Special case for SIT interfaces where we create a new "virtual"
2865 * device.
2866 */
2867 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2868 {
2869 struct net_device *dev;
2870 struct in6_ifreq ireq;
2871 int err = -ENODEV;
2872
2873 if (!IS_ENABLED(CONFIG_IPV6_SIT))
2874 return -ENODEV;
2875 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2876 return -EFAULT;
2877
2878 rtnl_lock();
2879 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2880 if (dev && dev->type == ARPHRD_SIT)
2881 err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2882 rtnl_unlock();
2883 return err;
2884 }
2885
2886 static int ipv6_mc_config(struct sock *sk, bool join,
2887 const struct in6_addr *addr, int ifindex)
2888 {
2889 int ret;
2890
2891 ASSERT_RTNL();
2892
2893 lock_sock(sk);
2894 if (join)
2895 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2896 else
2897 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2898 release_sock(sk);
2899
2900 return ret;
2901 }
2902
2903 /*
2904 * Manual configuration of address on an interface
2905 */
2906 static int inet6_addr_add(struct net *net, int ifindex,
2907 struct ifa6_config *cfg,
2908 struct netlink_ext_ack *extack)
2909 {
2910 struct inet6_ifaddr *ifp;
2911 struct inet6_dev *idev;
2912 struct net_device *dev;
2913 unsigned long timeout;
2914 clock_t expires;
2915 u32 flags;
2916
2917 ASSERT_RTNL();
2918
2919 if (cfg->plen > 128)
2920 return -EINVAL;
2921
2922 /* check the lifetime */
2923 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2924 return -EINVAL;
2925
2926 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2927 return -EINVAL;
2928
2929 dev = __dev_get_by_index(net, ifindex);
2930 if (!dev)
2931 return -ENODEV;
2932
2933 idev = addrconf_add_dev(dev);
2934 if (IS_ERR(idev))
2935 return PTR_ERR(idev);
2936
2937 if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2938 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2939 true, cfg->pfx, ifindex);
2940
2941 if (ret < 0)
2942 return ret;
2943 }
2944
2945 cfg->scope = ipv6_addr_scope(cfg->pfx);
2946
2947 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2948 if (addrconf_finite_timeout(timeout)) {
2949 expires = jiffies_to_clock_t(timeout * HZ);
2950 cfg->valid_lft = timeout;
2951 flags = RTF_EXPIRES;
2952 } else {
2953 expires = 0;
2954 flags = 0;
2955 cfg->ifa_flags |= IFA_F_PERMANENT;
2956 }
2957
2958 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2959 if (addrconf_finite_timeout(timeout)) {
2960 if (timeout == 0)
2961 cfg->ifa_flags |= IFA_F_DEPRECATED;
2962 cfg->preferred_lft = timeout;
2963 }
2964
2965 ifp = ipv6_add_addr(idev, cfg, true, extack);
2966 if (!IS_ERR(ifp)) {
2967 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2968 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2969 ifp->rt_priority, dev, expires,
2970 flags, GFP_KERNEL);
2971 }
2972
2973 /* Send a netlink notification if DAD is enabled and
2974 * optimistic flag is not set
2975 */
2976 if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2977 ipv6_ifa_notify(0, ifp);
2978 /*
2979 * Note that section 3.1 of RFC 4429 indicates
2980 * that the Optimistic flag should not be set for
2981 * manually configured addresses
2982 */
2983 addrconf_dad_start(ifp);
2984 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2985 manage_tempaddrs(idev, ifp, cfg->valid_lft,
2986 cfg->preferred_lft, true, jiffies);
2987 in6_ifa_put(ifp);
2988 addrconf_verify_rtnl();
2989 return 0;
2990 } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2991 ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
2992 cfg->pfx, ifindex);
2993 }
2994
2995 return PTR_ERR(ifp);
2996 }
2997
2998 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2999 const struct in6_addr *pfx, unsigned int plen)
3000 {
3001 struct inet6_ifaddr *ifp;
3002 struct inet6_dev *idev;
3003 struct net_device *dev;
3004
3005 if (plen > 128)
3006 return -EINVAL;
3007
3008 dev = __dev_get_by_index(net, ifindex);
3009 if (!dev)
3010 return -ENODEV;
3011
3012 idev = __in6_dev_get(dev);
3013 if (!idev)
3014 return -ENXIO;
3015
3016 read_lock_bh(&idev->lock);
3017 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3018 if (ifp->prefix_len == plen &&
3019 ipv6_addr_equal(pfx, &ifp->addr)) {
3020 in6_ifa_hold(ifp);
3021 read_unlock_bh(&idev->lock);
3022
3023 if (!(ifp->flags & IFA_F_TEMPORARY) &&
3024 (ifa_flags & IFA_F_MANAGETEMPADDR))
3025 manage_tempaddrs(idev, ifp, 0, 0, false,
3026 jiffies);
3027 ipv6_del_addr(ifp);
3028 addrconf_verify_rtnl();
3029 if (ipv6_addr_is_multicast(pfx)) {
3030 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3031 false, pfx, dev->ifindex);
3032 }
3033 return 0;
3034 }
3035 }
3036 read_unlock_bh(&idev->lock);
3037 return -EADDRNOTAVAIL;
3038 }
3039
3040
3041 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3042 {
3043 struct ifa6_config cfg = {
3044 .ifa_flags = IFA_F_PERMANENT,
3045 .preferred_lft = INFINITY_LIFE_TIME,
3046 .valid_lft = INFINITY_LIFE_TIME,
3047 };
3048 struct in6_ifreq ireq;
3049 int err;
3050
3051 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3052 return -EPERM;
3053
3054 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3055 return -EFAULT;
3056
3057 cfg.pfx = &ireq.ifr6_addr;
3058 cfg.plen = ireq.ifr6_prefixlen;
3059
3060 rtnl_lock();
3061 err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3062 rtnl_unlock();
3063 return err;
3064 }
3065
3066 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3067 {
3068 struct in6_ifreq ireq;
3069 int err;
3070
3071 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3072 return -EPERM;
3073
3074 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3075 return -EFAULT;
3076
3077 rtnl_lock();
3078 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3079 ireq.ifr6_prefixlen);
3080 rtnl_unlock();
3081 return err;
3082 }
3083
3084 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3085 int plen, int scope)
3086 {
3087 struct inet6_ifaddr *ifp;
3088 struct ifa6_config cfg = {
3089 .pfx = addr,
3090 .plen = plen,
3091 .ifa_flags = IFA_F_PERMANENT,
3092 .valid_lft = INFINITY_LIFE_TIME,
3093 .preferred_lft = INFINITY_LIFE_TIME,
3094 .scope = scope
3095 };
3096
3097 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3098 if (!IS_ERR(ifp)) {
3099 spin_lock_bh(&ifp->lock);
3100 ifp->flags &= ~IFA_F_TENTATIVE;
3101 spin_unlock_bh(&ifp->lock);
3102 rt_genid_bump_ipv6(dev_net(idev->dev));
3103 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3104 in6_ifa_put(ifp);
3105 }
3106 }
3107
3108 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3109 static void add_v4_addrs(struct inet6_dev *idev)
3110 {
3111 struct in6_addr addr;
3112 struct net_device *dev;
3113 struct net *net = dev_net(idev->dev);
3114 int scope, plen, offset = 0;
3115 u32 pflags = 0;
3116
3117 ASSERT_RTNL();
3118
3119 memset(&addr, 0, sizeof(struct in6_addr));
3120 /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
3121 if (idev->dev->addr_len == sizeof(struct in6_addr))
3122 offset = sizeof(struct in6_addr) - 4;
3123 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
3124
3125 if (idev->dev->flags&IFF_POINTOPOINT) {
3126 if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3127 return;
3128
3129 addr.s6_addr32[0] = htonl(0xfe800000);
3130 scope = IFA_LINK;
3131 plen = 64;
3132 } else {
3133 scope = IPV6_ADDR_COMPATv4;
3134 plen = 96;
3135 pflags |= RTF_NONEXTHOP;
3136 }
3137
3138 if (addr.s6_addr32[3]) {
3139 add_addr(idev, &addr, plen, scope);
3140 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3141 GFP_KERNEL);
3142 return;
3143 }
3144
3145 for_each_netdev(net, dev) {
3146 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3147 if (in_dev && (dev->flags & IFF_UP)) {
3148 struct in_ifaddr *ifa;
3149 int flag = scope;
3150
3151 in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3152 addr.s6_addr32[3] = ifa->ifa_local;
3153
3154 if (ifa->ifa_scope == RT_SCOPE_LINK)
3155 continue;
3156 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3157 if (idev->dev->flags&IFF_POINTOPOINT)
3158 continue;
3159 flag |= IFA_HOST;
3160 }
3161
3162 add_addr(idev, &addr, plen, flag);
3163 addrconf_prefix_route(&addr, plen, 0, idev->dev,
3164 0, pflags, GFP_KERNEL);
3165 }
3166 }
3167 }
3168 }
3169 #endif
3170
3171 static void init_loopback(struct net_device *dev)
3172 {
3173 struct inet6_dev *idev;
3174
3175 /* ::1 */
3176
3177 ASSERT_RTNL();
3178
3179 idev = ipv6_find_idev(dev);
3180 if (IS_ERR(idev)) {
3181 pr_debug("%s: add_dev failed\n", __func__);
3182 return;
3183 }
3184
3185 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3186 }
3187
3188 void addrconf_add_linklocal(struct inet6_dev *idev,
3189 const struct in6_addr *addr, u32 flags)
3190 {
3191 struct ifa6_config cfg = {
3192 .pfx = addr,
3193 .plen = 64,
3194 .ifa_flags = flags | IFA_F_PERMANENT,
3195 .valid_lft = INFINITY_LIFE_TIME,
3196 .preferred_lft = INFINITY_LIFE_TIME,
3197 .scope = IFA_LINK
3198 };
3199 struct inet6_ifaddr *ifp;
3200
3201 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3202 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3203 idev->cnf.optimistic_dad) &&
3204 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3205 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3206 #endif
3207
3208 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3209 if (!IS_ERR(ifp)) {
3210 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3211 0, 0, GFP_ATOMIC);
3212 addrconf_dad_start(ifp);
3213 in6_ifa_put(ifp);
3214 }
3215 }
3216 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3217
3218 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3219 {
3220 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3221 return true;
3222
3223 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3224 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3225 return true;
3226
3227 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3228 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3229 return true;
3230
3231 return false;
3232 }
3233
3234 static int ipv6_generate_stable_address(struct in6_addr *address,
3235 u8 dad_count,
3236 const struct inet6_dev *idev)
3237 {
3238 static DEFINE_SPINLOCK(lock);
3239 static __u32 digest[SHA1_DIGEST_WORDS];
3240 static __u32 workspace[SHA1_WORKSPACE_WORDS];
3241
3242 static union {
3243 char __data[SHA1_BLOCK_SIZE];
3244 struct {
3245 struct in6_addr secret;
3246 __be32 prefix[2];
3247 unsigned char hwaddr[MAX_ADDR_LEN];
3248 u8 dad_count;
3249 } __packed;
3250 } data;
3251
3252 struct in6_addr secret;
3253 struct in6_addr temp;
3254 struct net *net = dev_net(idev->dev);
3255
3256 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3257
3258 if (idev->cnf.stable_secret.initialized)
3259 secret = idev->cnf.stable_secret.secret;
3260 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3261 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3262 else
3263 return -1;
3264
3265 retry:
3266 spin_lock_bh(&lock);
3267
3268 sha1_init(digest);
3269 memset(&data, 0, sizeof(data));
3270 memset(workspace, 0, sizeof(workspace));
3271 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3272 data.prefix[0] = address->s6_addr32[0];
3273 data.prefix[1] = address->s6_addr32[1];
3274 data.secret = secret;
3275 data.dad_count = dad_count;
3276
3277 sha1_transform(digest, data.__data, workspace);
3278
3279 temp = *address;
3280 temp.s6_addr32[2] = (__force __be32)digest[0];
3281 temp.s6_addr32[3] = (__force __be32)digest[1];
3282
3283 spin_unlock_bh(&lock);
3284
3285 if (ipv6_reserved_interfaceid(temp)) {
3286 dad_count++;
3287 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3288 return -1;
3289 goto retry;
3290 }
3291
3292 *address = temp;
3293 return 0;
3294 }
3295
3296 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3297 {
3298 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3299
3300 if (s->initialized)
3301 return;
3302 s = &idev->cnf.stable_secret;
3303 get_random_bytes(&s->secret, sizeof(s->secret));
3304 s->initialized = true;
3305 }
3306
3307 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3308 {
3309 struct in6_addr addr;
3310
3311 /* no link local addresses on L3 master devices */
3312 if (netif_is_l3_master(idev->dev))
3313 return;
3314
3315 /* no link local addresses on devices flagged as slaves */
3316 if (idev->dev->flags & IFF_SLAVE)
3317 return;
3318
3319 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3320
3321 switch (idev->cnf.addr_gen_mode) {
3322 case IN6_ADDR_GEN_MODE_RANDOM:
3323 ipv6_gen_mode_random_init(idev);
3324 fallthrough;
3325 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3326 if (!ipv6_generate_stable_address(&addr, 0, idev))
3327 addrconf_add_linklocal(idev, &addr,
3328 IFA_F_STABLE_PRIVACY);
3329 else if (prefix_route)
3330 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3331 0, 0, GFP_KERNEL);
3332 break;
3333 case IN6_ADDR_GEN_MODE_EUI64:
3334 /* addrconf_add_linklocal also adds a prefix_route and we
3335 * only need to care about prefix routes if ipv6_generate_eui64
3336 * couldn't generate one.
3337 */
3338 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3339 addrconf_add_linklocal(idev, &addr, 0);
3340 else if (prefix_route)
3341 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3342 0, 0, GFP_KERNEL);
3343 break;
3344 case IN6_ADDR_GEN_MODE_NONE:
3345 default:
3346 /* will not add any link local address */
3347 break;
3348 }
3349 }
3350
3351 static void addrconf_dev_config(struct net_device *dev)
3352 {
3353 struct inet6_dev *idev;
3354
3355 ASSERT_RTNL();
3356
3357 if ((dev->type != ARPHRD_ETHER) &&
3358 (dev->type != ARPHRD_FDDI) &&
3359 (dev->type != ARPHRD_ARCNET) &&
3360 (dev->type != ARPHRD_INFINIBAND) &&
3361 (dev->type != ARPHRD_IEEE1394) &&
3362 (dev->type != ARPHRD_TUNNEL6) &&
3363 (dev->type != ARPHRD_6LOWPAN) &&
3364 (dev->type != ARPHRD_TUNNEL) &&
3365 (dev->type != ARPHRD_NONE) &&
3366 (dev->type != ARPHRD_RAWIP)) {
3367 /* Alas, we support only Ethernet autoconfiguration. */
3368 idev = __in6_dev_get(dev);
3369 if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3370 dev->flags & IFF_MULTICAST)
3371 ipv6_mc_up(idev);
3372 return;
3373 }
3374
3375 idev = addrconf_add_dev(dev);
3376 if (IS_ERR(idev))
3377 return;
3378
3379 /* this device type has no EUI support */
3380 if (dev->type == ARPHRD_NONE &&
3381 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3382 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3383
3384 addrconf_addr_gen(idev, false);
3385 }
3386
3387 #if IS_ENABLED(CONFIG_IPV6_SIT)
3388 static void addrconf_sit_config(struct net_device *dev)
3389 {
3390 struct inet6_dev *idev;
3391
3392 ASSERT_RTNL();
3393
3394 /*
3395 * Configure the tunnel with one of our IPv4
3396 * addresses... we should configure all of
3397 * our v4 addrs in the tunnel
3398 */
3399
3400 idev = ipv6_find_idev(dev);
3401 if (IS_ERR(idev)) {
3402 pr_debug("%s: add_dev failed\n", __func__);
3403 return;
3404 }
3405
3406 if (dev->priv_flags & IFF_ISATAP) {
3407 addrconf_addr_gen(idev, false);
3408 return;
3409 }
3410
3411 add_v4_addrs(idev);
3412
3413 if (dev->flags&IFF_POINTOPOINT)
3414 addrconf_add_mroute(dev);
3415 }
3416 #endif
3417
3418 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3419 static void addrconf_gre_config(struct net_device *dev)
3420 {
3421 struct inet6_dev *idev;
3422
3423 ASSERT_RTNL();
3424
3425 idev = ipv6_find_idev(dev);
3426 if (IS_ERR(idev)) {
3427 pr_debug("%s: add_dev failed\n", __func__);
3428 return;
3429 }
3430
3431 if (dev->type == ARPHRD_ETHER) {
3432 addrconf_addr_gen(idev, true);
3433 return;
3434 }
3435
3436 add_v4_addrs(idev);
3437
3438 if (dev->flags & IFF_POINTOPOINT)
3439 addrconf_add_mroute(dev);
3440 }
3441 #endif
3442
3443 static int fixup_permanent_addr(struct net *net,
3444 struct inet6_dev *idev,
3445 struct inet6_ifaddr *ifp)
3446 {
3447 /* !fib6_node means the host route was removed from the
3448 * FIB, for example, if 'lo' device is taken down. In that
3449 * case regenerate the host route.
3450 */
3451 if (!ifp->rt || !ifp->rt->fib6_node) {
3452 struct fib6_info *f6i, *prev;
3453
3454 f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3455 GFP_ATOMIC);
3456 if (IS_ERR(f6i))
3457 return PTR_ERR(f6i);
3458
3459 /* ifp->rt can be accessed outside of rtnl */
3460 spin_lock(&ifp->lock);
3461 prev = ifp->rt;
3462 ifp->rt = f6i;
3463 spin_unlock(&ifp->lock);
3464
3465 fib6_info_release(prev);
3466 }
3467
3468 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3469 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3470 ifp->rt_priority, idev->dev, 0, 0,
3471 GFP_ATOMIC);
3472 }
3473
3474 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3475 addrconf_dad_start(ifp);
3476
3477 return 0;
3478 }
3479
3480 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3481 {
3482 struct inet6_ifaddr *ifp, *tmp;
3483 struct inet6_dev *idev;
3484
3485 idev = __in6_dev_get(dev);
3486 if (!idev)
3487 return;
3488
3489 write_lock_bh(&idev->lock);
3490
3491 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3492 if ((ifp->flags & IFA_F_PERMANENT) &&
3493 fixup_permanent_addr(net, idev, ifp) < 0) {
3494 write_unlock_bh(&idev->lock);
3495 in6_ifa_hold(ifp);
3496 ipv6_del_addr(ifp);
3497 write_lock_bh(&idev->lock);
3498
3499 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3500 idev->dev->name, &ifp->addr);
3501 }
3502 }
3503
3504 write_unlock_bh(&idev->lock);
3505 }
3506
3507 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3508 void *ptr)
3509 {
3510 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3511 struct netdev_notifier_change_info *change_info;
3512 struct netdev_notifier_changeupper_info *info;
3513 struct inet6_dev *idev = __in6_dev_get(dev);
3514 struct net *net = dev_net(dev);
3515 int run_pending = 0;
3516 int err;
3517
3518 switch (event) {
3519 case NETDEV_REGISTER:
3520 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3521 idev = ipv6_add_dev(dev);
3522 if (IS_ERR(idev))
3523 return notifier_from_errno(PTR_ERR(idev));
3524 }
3525 break;
3526
3527 case NETDEV_CHANGEMTU:
3528 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3529 if (dev->mtu < IPV6_MIN_MTU) {
3530 addrconf_ifdown(dev, dev != net->loopback_dev);
3531 break;
3532 }
3533
3534 if (idev) {
3535 rt6_mtu_change(dev, dev->mtu);
3536 idev->cnf.mtu6 = dev->mtu;
3537 break;
3538 }
3539
3540 /* allocate new idev */
3541 idev = ipv6_add_dev(dev);
3542 if (IS_ERR(idev))
3543 break;
3544
3545 /* device is still not ready */
3546 if (!(idev->if_flags & IF_READY))
3547 break;
3548
3549 run_pending = 1;
3550 fallthrough;
3551 case NETDEV_UP:
3552 case NETDEV_CHANGE:
3553 if (dev->flags & IFF_SLAVE)
3554 break;
3555
3556 if (idev && idev->cnf.disable_ipv6)
3557 break;
3558
3559 if (event == NETDEV_UP) {
3560 /* restore routes for permanent addresses */
3561 addrconf_permanent_addr(net, dev);
3562
3563 if (!addrconf_link_ready(dev)) {
3564 /* device is not ready yet. */
3565 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3566 dev->name);
3567 break;
3568 }
3569
3570 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3571 idev = ipv6_add_dev(dev);
3572
3573 if (!IS_ERR_OR_NULL(idev)) {
3574 idev->if_flags |= IF_READY;
3575 run_pending = 1;
3576 }
3577 } else if (event == NETDEV_CHANGE) {
3578 if (!addrconf_link_ready(dev)) {
3579 /* device is still not ready. */
3580 rt6_sync_down_dev(dev, event);
3581 break;
3582 }
3583
3584 if (!IS_ERR_OR_NULL(idev)) {
3585 if (idev->if_flags & IF_READY) {
3586 /* device is already configured -
3587 * but resend MLD reports, we might
3588 * have roamed and need to update
3589 * multicast snooping switches
3590 */
3591 ipv6_mc_up(idev);
3592 change_info = ptr;
3593 if (change_info->flags_changed & IFF_NOARP)
3594 addrconf_dad_run(idev, true);
3595 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3596 break;
3597 }
3598 idev->if_flags |= IF_READY;
3599 }
3600
3601 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3602 dev->name);
3603
3604 run_pending = 1;
3605 }
3606
3607 switch (dev->type) {
3608 #if IS_ENABLED(CONFIG_IPV6_SIT)
3609 case ARPHRD_SIT:
3610 addrconf_sit_config(dev);
3611 break;
3612 #endif
3613 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3614 case ARPHRD_IP6GRE:
3615 case ARPHRD_IPGRE:
3616 addrconf_gre_config(dev);
3617 break;
3618 #endif
3619 case ARPHRD_LOOPBACK:
3620 init_loopback(dev);
3621 break;
3622
3623 default:
3624 addrconf_dev_config(dev);
3625 break;
3626 }
3627
3628 if (!IS_ERR_OR_NULL(idev)) {
3629 if (run_pending)
3630 addrconf_dad_run(idev, false);
3631
3632 /* Device has an address by now */
3633 rt6_sync_up(dev, RTNH_F_DEAD);
3634
3635 /*
3636 * If the MTU changed during the interface down,
3637 * when the interface up, the changed MTU must be
3638 * reflected in the idev as well as routers.
3639 */
3640 if (idev->cnf.mtu6 != dev->mtu &&
3641 dev->mtu >= IPV6_MIN_MTU) {
3642 rt6_mtu_change(dev, dev->mtu);
3643 idev->cnf.mtu6 = dev->mtu;
3644 }
3645 idev->tstamp = jiffies;
3646 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3647
3648 /*
3649 * If the changed mtu during down is lower than
3650 * IPV6_MIN_MTU stop IPv6 on this interface.
3651 */
3652 if (dev->mtu < IPV6_MIN_MTU)
3653 addrconf_ifdown(dev, dev != net->loopback_dev);
3654 }
3655 break;
3656
3657 case NETDEV_DOWN:
3658 case NETDEV_UNREGISTER:
3659 /*
3660 * Remove all addresses from this interface.
3661 */
3662 addrconf_ifdown(dev, event != NETDEV_DOWN);
3663 break;
3664
3665 case NETDEV_CHANGENAME:
3666 if (idev) {
3667 snmp6_unregister_dev(idev);
3668 addrconf_sysctl_unregister(idev);
3669 err = addrconf_sysctl_register(idev);
3670 if (err)
3671 return notifier_from_errno(err);
3672 err = snmp6_register_dev(idev);
3673 if (err) {
3674 addrconf_sysctl_unregister(idev);
3675 return notifier_from_errno(err);
3676 }
3677 }
3678 break;
3679
3680 case NETDEV_PRE_TYPE_CHANGE:
3681 case NETDEV_POST_TYPE_CHANGE:
3682 if (idev)
3683 addrconf_type_change(dev, event);
3684 break;
3685
3686 case NETDEV_CHANGEUPPER:
3687 info = ptr;
3688
3689 /* flush all routes if dev is linked to or unlinked from
3690 * an L3 master device (e.g., VRF)
3691 */
3692 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3693 addrconf_ifdown(dev, false);
3694 }
3695
3696 return NOTIFY_OK;
3697 }
3698
3699 /*
3700 * addrconf module should be notified of a device going up
3701 */
3702 static struct notifier_block ipv6_dev_notf = {
3703 .notifier_call = addrconf_notify,
3704 .priority = ADDRCONF_NOTIFY_PRIORITY,
3705 };
3706
3707 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3708 {
3709 struct inet6_dev *idev;
3710 ASSERT_RTNL();
3711
3712 idev = __in6_dev_get(dev);
3713
3714 if (event == NETDEV_POST_TYPE_CHANGE)
3715 ipv6_mc_remap(idev);
3716 else if (event == NETDEV_PRE_TYPE_CHANGE)
3717 ipv6_mc_unmap(idev);
3718 }
3719
3720 static bool addr_is_local(const struct in6_addr *addr)
3721 {
3722 return ipv6_addr_type(addr) &
3723 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3724 }
3725
3726 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3727 {
3728 unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3729 struct net *net = dev_net(dev);
3730 struct inet6_dev *idev;
3731 struct inet6_ifaddr *ifa, *tmp;
3732 bool keep_addr = false;
3733 bool was_ready;
3734 int state, i;
3735
3736 ASSERT_RTNL();
3737
3738 rt6_disable_ip(dev, event);
3739
3740 idev = __in6_dev_get(dev);
3741 if (!idev)
3742 return -ENODEV;
3743
3744 /*
3745 * Step 1: remove reference to ipv6 device from parent device.
3746 * Do not dev_put!
3747 */
3748 if (unregister) {
3749 idev->dead = 1;
3750
3751 /* protected by rtnl_lock */
3752 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3753
3754 /* Step 1.5: remove snmp6 entry */
3755 snmp6_unregister_dev(idev);
3756
3757 }
3758
3759 /* combine the user config with event to determine if permanent
3760 * addresses are to be removed from address hash table
3761 */
3762 if (!unregister && !idev->cnf.disable_ipv6) {
3763 /* aggregate the system setting and interface setting */
3764 int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3765
3766 if (!_keep_addr)
3767 _keep_addr = idev->cnf.keep_addr_on_down;
3768
3769 keep_addr = (_keep_addr > 0);
3770 }
3771
3772 /* Step 2: clear hash table */
3773 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3774 struct hlist_head *h = &inet6_addr_lst[i];
3775
3776 spin_lock_bh(&addrconf_hash_lock);
3777 restart:
3778 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3779 if (ifa->idev == idev) {
3780 addrconf_del_dad_work(ifa);
3781 /* combined flag + permanent flag decide if
3782 * address is retained on a down event
3783 */
3784 if (!keep_addr ||
3785 !(ifa->flags & IFA_F_PERMANENT) ||
3786 addr_is_local(&ifa->addr)) {
3787 hlist_del_init_rcu(&ifa->addr_lst);
3788 goto restart;
3789 }
3790 }
3791 }
3792 spin_unlock_bh(&addrconf_hash_lock);
3793 }
3794
3795 write_lock_bh(&idev->lock);
3796
3797 addrconf_del_rs_timer(idev);
3798
3799 /* Step 2: clear flags for stateless addrconf, repeated down
3800 * detection
3801 */
3802 was_ready = idev->if_flags & IF_READY;
3803 if (!unregister)
3804 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3805
3806 /* Step 3: clear tempaddr list */
3807 while (!list_empty(&idev->tempaddr_list)) {
3808 ifa = list_first_entry(&idev->tempaddr_list,
3809 struct inet6_ifaddr, tmp_list);
3810 list_del(&ifa->tmp_list);
3811 write_unlock_bh(&idev->lock);
3812 spin_lock_bh(&ifa->lock);
3813
3814 if (ifa->ifpub) {
3815 in6_ifa_put(ifa->ifpub);
3816 ifa->ifpub = NULL;
3817 }
3818 spin_unlock_bh(&ifa->lock);
3819 in6_ifa_put(ifa);
3820 write_lock_bh(&idev->lock);
3821 }
3822
3823 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3824 struct fib6_info *rt = NULL;
3825 bool keep;
3826
3827 addrconf_del_dad_work(ifa);
3828
3829 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3830 !addr_is_local(&ifa->addr);
3831
3832 write_unlock_bh(&idev->lock);
3833 spin_lock_bh(&ifa->lock);
3834
3835 if (keep) {
3836 /* set state to skip the notifier below */
3837 state = INET6_IFADDR_STATE_DEAD;
3838 ifa->state = INET6_IFADDR_STATE_PREDAD;
3839 if (!(ifa->flags & IFA_F_NODAD))
3840 ifa->flags |= IFA_F_TENTATIVE;
3841
3842 rt = ifa->rt;
3843 ifa->rt = NULL;
3844 } else {
3845 state = ifa->state;
3846 ifa->state = INET6_IFADDR_STATE_DEAD;
3847 }
3848
3849 spin_unlock_bh(&ifa->lock);
3850
3851 if (rt)
3852 ip6_del_rt(net, rt, false);
3853
3854 if (state != INET6_IFADDR_STATE_DEAD) {
3855 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3856 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3857 } else {
3858 if (idev->cnf.forwarding)
3859 addrconf_leave_anycast(ifa);
3860 addrconf_leave_solict(ifa->idev, &ifa->addr);
3861 }
3862
3863 write_lock_bh(&idev->lock);
3864 if (!keep) {
3865 list_del_rcu(&ifa->if_list);
3866 in6_ifa_put(ifa);
3867 }
3868 }
3869
3870 write_unlock_bh(&idev->lock);
3871
3872 /* Step 5: Discard anycast and multicast list */
3873 if (unregister) {
3874 ipv6_ac_destroy_dev(idev);
3875 ipv6_mc_destroy_dev(idev);
3876 } else if (was_ready) {
3877 ipv6_mc_down(idev);
3878 }
3879
3880 idev->tstamp = jiffies;
3881 idev->ra_mtu = 0;
3882
3883 /* Last: Shot the device (if unregistered) */
3884 if (unregister) {
3885 addrconf_sysctl_unregister(idev);
3886 neigh_parms_release(&nd_tbl, idev->nd_parms);
3887 neigh_ifdown(&nd_tbl, dev);
3888 in6_dev_put(idev);
3889 }
3890 return 0;
3891 }
3892
3893 static void addrconf_rs_timer(struct timer_list *t)
3894 {
3895 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3896 struct net_device *dev = idev->dev;
3897 struct in6_addr lladdr;
3898
3899 write_lock(&idev->lock);
3900 if (idev->dead || !(idev->if_flags & IF_READY))
3901 goto out;
3902
3903 if (!ipv6_accept_ra(idev))
3904 goto out;
3905
3906 /* Announcement received after solicitation was sent */
3907 if (idev->if_flags & IF_RA_RCVD)
3908 goto out;
3909
3910 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3911 write_unlock(&idev->lock);
3912 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3913 ndisc_send_rs(dev, &lladdr,
3914 &in6addr_linklocal_allrouters);
3915 else
3916 goto put;
3917
3918 write_lock(&idev->lock);
3919 idev->rs_interval = rfc3315_s14_backoff_update(
3920 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3921 /* The wait after the last probe can be shorter */
3922 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3923 idev->cnf.rtr_solicits) ?
3924 idev->cnf.rtr_solicit_delay :
3925 idev->rs_interval);
3926 } else {
3927 /*
3928 * Note: we do not support deprecated "all on-link"
3929 * assumption any longer.
3930 */
3931 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3932 }
3933
3934 out:
3935 write_unlock(&idev->lock);
3936 put:
3937 in6_dev_put(idev);
3938 }
3939
3940 /*
3941 * Duplicate Address Detection
3942 */
3943 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3944 {
3945 unsigned long rand_num;
3946 struct inet6_dev *idev = ifp->idev;
3947 u64 nonce;
3948
3949 if (ifp->flags & IFA_F_OPTIMISTIC)
3950 rand_num = 0;
3951 else
3952 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3953
3954 nonce = 0;
3955 if (idev->cnf.enhanced_dad ||
3956 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3957 do
3958 get_random_bytes(&nonce, 6);
3959 while (nonce == 0);
3960 }
3961 ifp->dad_nonce = nonce;
3962 ifp->dad_probes = idev->cnf.dad_transmits;
3963 addrconf_mod_dad_work(ifp, rand_num);
3964 }
3965
3966 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3967 {
3968 struct inet6_dev *idev = ifp->idev;
3969 struct net_device *dev = idev->dev;
3970 bool bump_id, notify = false;
3971 struct net *net;
3972
3973 addrconf_join_solict(dev, &ifp->addr);
3974
3975 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3976
3977 read_lock_bh(&idev->lock);
3978 spin_lock(&ifp->lock);
3979 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3980 goto out;
3981
3982 net = dev_net(dev);
3983 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3984 (net->ipv6.devconf_all->accept_dad < 1 &&
3985 idev->cnf.accept_dad < 1) ||
3986 !(ifp->flags&IFA_F_TENTATIVE) ||
3987 ifp->flags & IFA_F_NODAD) {
3988 bool send_na = false;
3989
3990 if (ifp->flags & IFA_F_TENTATIVE &&
3991 !(ifp->flags & IFA_F_OPTIMISTIC))
3992 send_na = true;
3993 bump_id = ifp->flags & IFA_F_TENTATIVE;
3994 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3995 spin_unlock(&ifp->lock);
3996 read_unlock_bh(&idev->lock);
3997
3998 addrconf_dad_completed(ifp, bump_id, send_na);
3999 return;
4000 }
4001
4002 if (!(idev->if_flags & IF_READY)) {
4003 spin_unlock(&ifp->lock);
4004 read_unlock_bh(&idev->lock);
4005 /*
4006 * If the device is not ready:
4007 * - keep it tentative if it is a permanent address.
4008 * - otherwise, kill it.
4009 */
4010 in6_ifa_hold(ifp);
4011 addrconf_dad_stop(ifp, 0);
4012 return;
4013 }
4014
4015 /*
4016 * Optimistic nodes can start receiving
4017 * Frames right away
4018 */
4019 if (ifp->flags & IFA_F_OPTIMISTIC) {
4020 ip6_ins_rt(net, ifp->rt);
4021 if (ipv6_use_optimistic_addr(net, idev)) {
4022 /* Because optimistic nodes can use this address,
4023 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4024 */
4025 notify = true;
4026 }
4027 }
4028
4029 addrconf_dad_kick(ifp);
4030 out:
4031 spin_unlock(&ifp->lock);
4032 read_unlock_bh(&idev->lock);
4033 if (notify)
4034 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4035 }
4036
4037 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4038 {
4039 bool begin_dad = false;
4040
4041 spin_lock_bh(&ifp->lock);
4042 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4043 ifp->state = INET6_IFADDR_STATE_PREDAD;
4044 begin_dad = true;
4045 }
4046 spin_unlock_bh(&ifp->lock);
4047
4048 if (begin_dad)
4049 addrconf_mod_dad_work(ifp, 0);
4050 }
4051
4052 static void addrconf_dad_work(struct work_struct *w)
4053 {
4054 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4055 struct inet6_ifaddr,
4056 dad_work);
4057 struct inet6_dev *idev = ifp->idev;
4058 bool bump_id, disable_ipv6 = false;
4059 struct in6_addr mcaddr;
4060
4061 enum {
4062 DAD_PROCESS,
4063 DAD_BEGIN,
4064 DAD_ABORT,
4065 } action = DAD_PROCESS;
4066
4067 rtnl_lock();
4068
4069 spin_lock_bh(&ifp->lock);
4070 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4071 action = DAD_BEGIN;
4072 ifp->state = INET6_IFADDR_STATE_DAD;
4073 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4074 action = DAD_ABORT;
4075 ifp->state = INET6_IFADDR_STATE_POSTDAD;
4076
4077 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
4078 idev->cnf.accept_dad > 1) &&
4079 !idev->cnf.disable_ipv6 &&
4080 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4081 struct in6_addr addr;
4082
4083 addr.s6_addr32[0] = htonl(0xfe800000);
4084 addr.s6_addr32[1] = 0;
4085
4086 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4087 ipv6_addr_equal(&ifp->addr, &addr)) {
4088 /* DAD failed for link-local based on MAC */
4089 idev->cnf.disable_ipv6 = 1;
4090
4091 pr_info("%s: IPv6 being disabled!\n",
4092 ifp->idev->dev->name);
4093 disable_ipv6 = true;
4094 }
4095 }
4096 }
4097 spin_unlock_bh(&ifp->lock);
4098
4099 if (action == DAD_BEGIN) {
4100 addrconf_dad_begin(ifp);
4101 goto out;
4102 } else if (action == DAD_ABORT) {
4103 in6_ifa_hold(ifp);
4104 addrconf_dad_stop(ifp, 1);
4105 if (disable_ipv6)
4106 addrconf_ifdown(idev->dev, false);
4107 goto out;
4108 }
4109
4110 if (!ifp->dad_probes && addrconf_dad_end(ifp))
4111 goto out;
4112
4113 write_lock_bh(&idev->lock);
4114 if (idev->dead || !(idev->if_flags & IF_READY)) {
4115 write_unlock_bh(&idev->lock);
4116 goto out;
4117 }
4118
4119 spin_lock(&ifp->lock);
4120 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4121 spin_unlock(&ifp->lock);
4122 write_unlock_bh(&idev->lock);
4123 goto out;
4124 }
4125
4126 if (ifp->dad_probes == 0) {
4127 bool send_na = false;
4128
4129 /*
4130 * DAD was successful
4131 */
4132
4133 if (ifp->flags & IFA_F_TENTATIVE &&
4134 !(ifp->flags & IFA_F_OPTIMISTIC))
4135 send_na = true;
4136 bump_id = ifp->flags & IFA_F_TENTATIVE;
4137 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4138 spin_unlock(&ifp->lock);
4139 write_unlock_bh(&idev->lock);
4140
4141 addrconf_dad_completed(ifp, bump_id, send_na);
4142
4143 goto out;
4144 }
4145
4146 ifp->dad_probes--;
4147 addrconf_mod_dad_work(ifp,
4148 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4149 HZ/100));
4150 spin_unlock(&ifp->lock);
4151 write_unlock_bh(&idev->lock);
4152
4153 /* send a neighbour solicitation for our addr */
4154 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4155 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4156 ifp->dad_nonce);
4157 out:
4158 in6_ifa_put(ifp);
4159 rtnl_unlock();
4160 }
4161
4162 /* ifp->idev must be at least read locked */
4163 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4164 {
4165 struct inet6_ifaddr *ifpiter;
4166 struct inet6_dev *idev = ifp->idev;
4167
4168 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4169 if (ifpiter->scope > IFA_LINK)
4170 break;
4171 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4172 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4173 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4174 IFA_F_PERMANENT)
4175 return false;
4176 }
4177 return true;
4178 }
4179
4180 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4181 bool send_na)
4182 {
4183 struct net_device *dev = ifp->idev->dev;
4184 struct in6_addr lladdr;
4185 bool send_rs, send_mld;
4186
4187 addrconf_del_dad_work(ifp);
4188
4189 /*
4190 * Configure the address for reception. Now it is valid.
4191 */
4192
4193 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4194
4195 /* If added prefix is link local and we are prepared to process
4196 router advertisements, start sending router solicitations.
4197 */
4198
4199 read_lock_bh(&ifp->idev->lock);
4200 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4201 send_rs = send_mld &&
4202 ipv6_accept_ra(ifp->idev) &&
4203 ifp->idev->cnf.rtr_solicits != 0 &&
4204 (dev->flags&IFF_LOOPBACK) == 0;
4205 read_unlock_bh(&ifp->idev->lock);
4206
4207 /* While dad is in progress mld report's source address is in6_addrany.
4208 * Resend with proper ll now.
4209 */
4210 if (send_mld)
4211 ipv6_mc_dad_complete(ifp->idev);
4212
4213 /* send unsolicited NA if enabled */
4214 if (send_na &&
4215 (ifp->idev->cnf.ndisc_notify ||
4216 dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4217 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4218 /*router=*/ !!ifp->idev->cnf.forwarding,
4219 /*solicited=*/ false, /*override=*/ true,
4220 /*inc_opt=*/ true);
4221 }
4222
4223 if (send_rs) {
4224 /*
4225 * If a host as already performed a random delay
4226 * [...] as part of DAD [...] there is no need
4227 * to delay again before sending the first RS
4228 */
4229 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4230 return;
4231 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4232
4233 write_lock_bh(&ifp->idev->lock);
4234 spin_lock(&ifp->lock);
4235 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4236 ifp->idev->cnf.rtr_solicit_interval);
4237 ifp->idev->rs_probes = 1;
4238 ifp->idev->if_flags |= IF_RS_SENT;
4239 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4240 spin_unlock(&ifp->lock);
4241 write_unlock_bh(&ifp->idev->lock);
4242 }
4243
4244 if (bump_id)
4245 rt_genid_bump_ipv6(dev_net(dev));
4246
4247 /* Make sure that a new temporary address will be created
4248 * before this temporary address becomes deprecated.
4249 */
4250 if (ifp->flags & IFA_F_TEMPORARY)
4251 addrconf_verify_rtnl();
4252 }
4253
4254 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4255 {
4256 struct inet6_ifaddr *ifp;
4257
4258 read_lock_bh(&idev->lock);
4259 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4260 spin_lock(&ifp->lock);
4261 if ((ifp->flags & IFA_F_TENTATIVE &&
4262 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4263 if (restart)
4264 ifp->state = INET6_IFADDR_STATE_PREDAD;
4265 addrconf_dad_kick(ifp);
4266 }
4267 spin_unlock(&ifp->lock);
4268 }
4269 read_unlock_bh(&idev->lock);
4270 }
4271
4272 #ifdef CONFIG_PROC_FS
4273 struct if6_iter_state {
4274 struct seq_net_private p;
4275 int bucket;
4276 int offset;
4277 };
4278
4279 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4280 {
4281 struct if6_iter_state *state = seq->private;
4282 struct net *net = seq_file_net(seq);
4283 struct inet6_ifaddr *ifa = NULL;
4284 int p = 0;
4285
4286 /* initial bucket if pos is 0 */
4287 if (pos == 0) {
4288 state->bucket = 0;
4289 state->offset = 0;
4290 }
4291
4292 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4293 hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket],
4294 addr_lst) {
4295 if (!net_eq(dev_net(ifa->idev->dev), net))
4296 continue;
4297 /* sync with offset */
4298 if (p < state->offset) {
4299 p++;
4300 continue;
4301 }
4302 return ifa;
4303 }
4304
4305 /* prepare for next bucket */
4306 state->offset = 0;
4307 p = 0;
4308 }
4309 return NULL;
4310 }
4311
4312 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4313 struct inet6_ifaddr *ifa)
4314 {
4315 struct if6_iter_state *state = seq->private;
4316 struct net *net = seq_file_net(seq);
4317
4318 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4319 if (!net_eq(dev_net(ifa->idev->dev), net))
4320 continue;
4321 state->offset++;
4322 return ifa;
4323 }
4324
4325 state->offset = 0;
4326 while (++state->bucket < IN6_ADDR_HSIZE) {
4327 hlist_for_each_entry_rcu(ifa,
4328 &inet6_addr_lst[state->bucket], addr_lst) {
4329 if (!net_eq(dev_net(ifa->idev->dev), net))
4330 continue;
4331 return ifa;
4332 }
4333 }
4334
4335 return NULL;
4336 }
4337
4338 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4339 __acquires(rcu)
4340 {
4341 rcu_read_lock();
4342 return if6_get_first(seq, *pos);
4343 }
4344
4345 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4346 {
4347 struct inet6_ifaddr *ifa;
4348
4349 ifa = if6_get_next(seq, v);
4350 ++*pos;
4351 return ifa;
4352 }
4353
4354 static void if6_seq_stop(struct seq_file *seq, void *v)
4355 __releases(rcu)
4356 {
4357 rcu_read_unlock();
4358 }
4359
4360 static int if6_seq_show(struct seq_file *seq, void *v)
4361 {
4362 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4363 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4364 &ifp->addr,
4365 ifp->idev->dev->ifindex,
4366 ifp->prefix_len,
4367 ifp->scope,
4368 (u8) ifp->flags,
4369 ifp->idev->dev->name);
4370 return 0;
4371 }
4372
4373 static const struct seq_operations if6_seq_ops = {
4374 .start = if6_seq_start,
4375 .next = if6_seq_next,
4376 .show = if6_seq_show,
4377 .stop = if6_seq_stop,
4378 };
4379
4380 static int __net_init if6_proc_net_init(struct net *net)
4381 {
4382 if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4383 sizeof(struct if6_iter_state)))
4384 return -ENOMEM;
4385 return 0;
4386 }
4387
4388 static void __net_exit if6_proc_net_exit(struct net *net)
4389 {
4390 remove_proc_entry("if_inet6", net->proc_net);
4391 }
4392
4393 static struct pernet_operations if6_proc_net_ops = {
4394 .init = if6_proc_net_init,
4395 .exit = if6_proc_net_exit,
4396 };
4397
4398 int __init if6_proc_init(void)
4399 {
4400 return register_pernet_subsys(&if6_proc_net_ops);
4401 }
4402
4403 void if6_proc_exit(void)
4404 {
4405 unregister_pernet_subsys(&if6_proc_net_ops);
4406 }
4407 #endif /* CONFIG_PROC_FS */
4408
4409 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4410 /* Check if address is a home address configured on any interface. */
4411 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4412 {
4413 unsigned int hash = inet6_addr_hash(net, addr);
4414 struct inet6_ifaddr *ifp = NULL;
4415 int ret = 0;
4416
4417 rcu_read_lock();
4418 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4419 if (!net_eq(dev_net(ifp->idev->dev), net))
4420 continue;
4421 if (ipv6_addr_equal(&ifp->addr, addr) &&
4422 (ifp->flags & IFA_F_HOMEADDRESS)) {
4423 ret = 1;
4424 break;
4425 }
4426 }
4427 rcu_read_unlock();
4428 return ret;
4429 }
4430 #endif
4431
4432 /* RFC6554 has some algorithm to avoid loops in segment routing by
4433 * checking if the segments contains any of a local interface address.
4434 *
4435 * Quote:
4436 *
4437 * To detect loops in the SRH, a router MUST determine if the SRH
4438 * includes multiple addresses assigned to any interface on that router.
4439 * If such addresses appear more than once and are separated by at least
4440 * one address not assigned to that router.
4441 */
4442 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4443 unsigned char nsegs)
4444 {
4445 const struct in6_addr *addr;
4446 int i, ret = 0, found = 0;
4447 struct inet6_ifaddr *ifp;
4448 bool separated = false;
4449 unsigned int hash;
4450 bool hash_found;
4451
4452 rcu_read_lock();
4453 for (i = 0; i < nsegs; i++) {
4454 addr = &segs[i];
4455 hash = inet6_addr_hash(net, addr);
4456
4457 hash_found = false;
4458 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4459 if (!net_eq(dev_net(ifp->idev->dev), net))
4460 continue;
4461
4462 if (ipv6_addr_equal(&ifp->addr, addr)) {
4463 hash_found = true;
4464 break;
4465 }
4466 }
4467
4468 if (hash_found) {
4469 if (found > 1 && separated) {
4470 ret = 1;
4471 break;
4472 }
4473
4474 separated = false;
4475 found++;
4476 } else {
4477 separated = true;
4478 }
4479 }
4480 rcu_read_unlock();
4481
4482 return ret;
4483 }
4484
4485 /*
4486 * Periodic address status verification
4487 */
4488
4489 static void addrconf_verify_rtnl(void)
4490 {
4491 unsigned long now, next, next_sec, next_sched;
4492 struct inet6_ifaddr *ifp;
4493 int i;
4494
4495 ASSERT_RTNL();
4496
4497 rcu_read_lock_bh();
4498 now = jiffies;
4499 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4500
4501 cancel_delayed_work(&addr_chk_work);
4502
4503 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4504 restart:
4505 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4506 unsigned long age;
4507
4508 /* When setting preferred_lft to a value not zero or
4509 * infinity, while valid_lft is infinity
4510 * IFA_F_PERMANENT has a non-infinity life time.
4511 */
4512 if ((ifp->flags & IFA_F_PERMANENT) &&
4513 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4514 continue;
4515
4516 spin_lock(&ifp->lock);
4517 /* We try to batch several events at once. */
4518 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4519
4520 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4521 age >= ifp->valid_lft) {
4522 spin_unlock(&ifp->lock);
4523 in6_ifa_hold(ifp);
4524 rcu_read_unlock_bh();
4525 ipv6_del_addr(ifp);
4526 rcu_read_lock_bh();
4527 goto restart;
4528 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4529 spin_unlock(&ifp->lock);
4530 continue;
4531 } else if (age >= ifp->prefered_lft) {
4532 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4533 int deprecate = 0;
4534
4535 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4536 deprecate = 1;
4537 ifp->flags |= IFA_F_DEPRECATED;
4538 }
4539
4540 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4541 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4542 next = ifp->tstamp + ifp->valid_lft * HZ;
4543
4544 spin_unlock(&ifp->lock);
4545
4546 if (deprecate) {
4547 in6_ifa_hold(ifp);
4548
4549 ipv6_ifa_notify(0, ifp);
4550 in6_ifa_put(ifp);
4551 goto restart;
4552 }
4553 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4554 !(ifp->flags&IFA_F_TENTATIVE)) {
4555 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4556 ifp->idev->cnf.dad_transmits *
4557 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
4558
4559 if (age >= ifp->prefered_lft - regen_advance) {
4560 struct inet6_ifaddr *ifpub = ifp->ifpub;
4561 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4562 next = ifp->tstamp + ifp->prefered_lft * HZ;
4563 if (!ifp->regen_count && ifpub) {
4564 ifp->regen_count++;
4565 in6_ifa_hold(ifp);
4566 in6_ifa_hold(ifpub);
4567 spin_unlock(&ifp->lock);
4568
4569 spin_lock(&ifpub->lock);
4570 ifpub->regen_count = 0;
4571 spin_unlock(&ifpub->lock);
4572 rcu_read_unlock_bh();
4573 ipv6_create_tempaddr(ifpub, true);
4574 in6_ifa_put(ifpub);
4575 in6_ifa_put(ifp);
4576 rcu_read_lock_bh();
4577 goto restart;
4578 }
4579 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4580 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4581 spin_unlock(&ifp->lock);
4582 } else {
4583 /* ifp->prefered_lft <= ifp->valid_lft */
4584 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4585 next = ifp->tstamp + ifp->prefered_lft * HZ;
4586 spin_unlock(&ifp->lock);
4587 }
4588 }
4589 }
4590
4591 next_sec = round_jiffies_up(next);
4592 next_sched = next;
4593
4594 /* If rounded timeout is accurate enough, accept it. */
4595 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4596 next_sched = next_sec;
4597
4598 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4599 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4600 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4601
4602 pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4603 now, next, next_sec, next_sched);
4604 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4605 rcu_read_unlock_bh();
4606 }
4607
4608 static void addrconf_verify_work(struct work_struct *w)
4609 {
4610 rtnl_lock();
4611 addrconf_verify_rtnl();
4612 rtnl_unlock();
4613 }
4614
4615 static void addrconf_verify(void)
4616 {
4617 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4618 }
4619
4620 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4621 struct in6_addr **peer_pfx)
4622 {
4623 struct in6_addr *pfx = NULL;
4624
4625 *peer_pfx = NULL;
4626
4627 if (addr)
4628 pfx = nla_data(addr);
4629
4630 if (local) {
4631 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4632 *peer_pfx = pfx;
4633 pfx = nla_data(local);
4634 }
4635
4636 return pfx;
4637 }
4638
4639 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4640 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4641 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4642 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4643 [IFA_FLAGS] = { .len = sizeof(u32) },
4644 [IFA_RT_PRIORITY] = { .len = sizeof(u32) },
4645 [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
4646 };
4647
4648 static int
4649 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4650 struct netlink_ext_ack *extack)
4651 {
4652 struct net *net = sock_net(skb->sk);
4653 struct ifaddrmsg *ifm;
4654 struct nlattr *tb[IFA_MAX+1];
4655 struct in6_addr *pfx, *peer_pfx;
4656 u32 ifa_flags;
4657 int err;
4658
4659 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4660 ifa_ipv6_policy, extack);
4661 if (err < 0)
4662 return err;
4663
4664 ifm = nlmsg_data(nlh);
4665 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4666 if (!pfx)
4667 return -EINVAL;
4668
4669 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4670
4671 /* We ignore other flags so far. */
4672 ifa_flags &= IFA_F_MANAGETEMPADDR;
4673
4674 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4675 ifm->ifa_prefixlen);
4676 }
4677
4678 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4679 unsigned long expires, u32 flags,
4680 bool modify_peer)
4681 {
4682 struct fib6_info *f6i;
4683 u32 prio;
4684
4685 f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4686 ifp->prefix_len,
4687 ifp->idev->dev, 0, RTF_DEFAULT, true);
4688 if (!f6i)
4689 return -ENOENT;
4690
4691 prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4692 if (f6i->fib6_metric != prio) {
4693 /* delete old one */
4694 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4695
4696 /* add new one */
4697 addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4698 ifp->prefix_len,
4699 ifp->rt_priority, ifp->idev->dev,
4700 expires, flags, GFP_KERNEL);
4701 } else {
4702 if (!expires)
4703 fib6_clean_expires(f6i);
4704 else
4705 fib6_set_expires(f6i, expires);
4706
4707 fib6_info_release(f6i);
4708 }
4709
4710 return 0;
4711 }
4712
4713 static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
4714 {
4715 u32 flags;
4716 clock_t expires;
4717 unsigned long timeout;
4718 bool was_managetempaddr;
4719 bool had_prefixroute;
4720 bool new_peer = false;
4721
4722 ASSERT_RTNL();
4723
4724 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4725 return -EINVAL;
4726
4727 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4728 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4729 return -EINVAL;
4730
4731 if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4732 cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4733
4734 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4735 if (addrconf_finite_timeout(timeout)) {
4736 expires = jiffies_to_clock_t(timeout * HZ);
4737 cfg->valid_lft = timeout;
4738 flags = RTF_EXPIRES;
4739 } else {
4740 expires = 0;
4741 flags = 0;
4742 cfg->ifa_flags |= IFA_F_PERMANENT;
4743 }
4744
4745 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4746 if (addrconf_finite_timeout(timeout)) {
4747 if (timeout == 0)
4748 cfg->ifa_flags |= IFA_F_DEPRECATED;
4749 cfg->preferred_lft = timeout;
4750 }
4751
4752 if (cfg->peer_pfx &&
4753 memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4754 if (!ipv6_addr_any(&ifp->peer_addr))
4755 cleanup_prefix_route(ifp, expires, true, true);
4756 new_peer = true;
4757 }
4758
4759 spin_lock_bh(&ifp->lock);
4760 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4761 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4762 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4763 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4764 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4765 IFA_F_NOPREFIXROUTE);
4766 ifp->flags |= cfg->ifa_flags;
4767 ifp->tstamp = jiffies;
4768 ifp->valid_lft = cfg->valid_lft;
4769 ifp->prefered_lft = cfg->preferred_lft;
4770
4771 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4772 ifp->rt_priority = cfg->rt_priority;
4773
4774 if (new_peer)
4775 ifp->peer_addr = *cfg->peer_pfx;
4776
4777 spin_unlock_bh(&ifp->lock);
4778 if (!(ifp->flags&IFA_F_TENTATIVE))
4779 ipv6_ifa_notify(0, ifp);
4780
4781 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4782 int rc = -ENOENT;
4783
4784 if (had_prefixroute)
4785 rc = modify_prefix_route(ifp, expires, flags, false);
4786
4787 /* prefix route could have been deleted; if so restore it */
4788 if (rc == -ENOENT) {
4789 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4790 ifp->rt_priority, ifp->idev->dev,
4791 expires, flags, GFP_KERNEL);
4792 }
4793
4794 if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4795 rc = modify_prefix_route(ifp, expires, flags, true);
4796
4797 if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4798 addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4799 ifp->rt_priority, ifp->idev->dev,
4800 expires, flags, GFP_KERNEL);
4801 }
4802 } else if (had_prefixroute) {
4803 enum cleanup_prefix_rt_t action;
4804 unsigned long rt_expires;
4805
4806 write_lock_bh(&ifp->idev->lock);
4807 action = check_cleanup_prefix_route(ifp, &rt_expires);
4808 write_unlock_bh(&ifp->idev->lock);
4809
4810 if (action != CLEANUP_PREFIX_RT_NOP) {
4811 cleanup_prefix_route(ifp, rt_expires,
4812 action == CLEANUP_PREFIX_RT_DEL, false);
4813 }
4814 }
4815
4816 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4817 if (was_managetempaddr &&
4818 !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4819 cfg->valid_lft = 0;
4820 cfg->preferred_lft = 0;
4821 }
4822 manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4823 cfg->preferred_lft, !was_managetempaddr,
4824 jiffies);
4825 }
4826
4827 addrconf_verify_rtnl();
4828
4829 return 0;
4830 }
4831
4832 static int
4833 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4834 struct netlink_ext_ack *extack)
4835 {
4836 struct net *net = sock_net(skb->sk);
4837 struct ifaddrmsg *ifm;
4838 struct nlattr *tb[IFA_MAX+1];
4839 struct in6_addr *peer_pfx;
4840 struct inet6_ifaddr *ifa;
4841 struct net_device *dev;
4842 struct inet6_dev *idev;
4843 struct ifa6_config cfg;
4844 int err;
4845
4846 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4847 ifa_ipv6_policy, extack);
4848 if (err < 0)
4849 return err;
4850
4851 memset(&cfg, 0, sizeof(cfg));
4852
4853 ifm = nlmsg_data(nlh);
4854 cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4855 if (!cfg.pfx)
4856 return -EINVAL;
4857
4858 cfg.peer_pfx = peer_pfx;
4859 cfg.plen = ifm->ifa_prefixlen;
4860 if (tb[IFA_RT_PRIORITY])
4861 cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4862
4863 cfg.valid_lft = INFINITY_LIFE_TIME;
4864 cfg.preferred_lft = INFINITY_LIFE_TIME;
4865
4866 if (tb[IFA_CACHEINFO]) {
4867 struct ifa_cacheinfo *ci;
4868
4869 ci = nla_data(tb[IFA_CACHEINFO]);
4870 cfg.valid_lft = ci->ifa_valid;
4871 cfg.preferred_lft = ci->ifa_prefered;
4872 }
4873
4874 dev = __dev_get_by_index(net, ifm->ifa_index);
4875 if (!dev)
4876 return -ENODEV;
4877
4878 if (tb[IFA_FLAGS])
4879 cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4880 else
4881 cfg.ifa_flags = ifm->ifa_flags;
4882
4883 /* We ignore other flags so far. */
4884 cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4885 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4886 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4887
4888 idev = ipv6_find_idev(dev);
4889 if (IS_ERR(idev))
4890 return PTR_ERR(idev);
4891
4892 if (!ipv6_allow_optimistic_dad(net, idev))
4893 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4894
4895 if (cfg.ifa_flags & IFA_F_NODAD &&
4896 cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4897 NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4898 return -EINVAL;
4899 }
4900
4901 ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4902 if (!ifa) {
4903 /*
4904 * It would be best to check for !NLM_F_CREATE here but
4905 * userspace already relies on not having to provide this.
4906 */
4907 return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4908 }
4909
4910 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4911 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4912 err = -EEXIST;
4913 else
4914 err = inet6_addr_modify(ifa, &cfg);
4915
4916 in6_ifa_put(ifa);
4917
4918 return err;
4919 }
4920
4921 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4922 u8 scope, int ifindex)
4923 {
4924 struct ifaddrmsg *ifm;
4925
4926 ifm = nlmsg_data(nlh);
4927 ifm->ifa_family = AF_INET6;
4928 ifm->ifa_prefixlen = prefixlen;
4929 ifm->ifa_flags = flags;
4930 ifm->ifa_scope = scope;
4931 ifm->ifa_index = ifindex;
4932 }
4933
4934 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4935 unsigned long tstamp, u32 preferred, u32 valid)
4936 {
4937 struct ifa_cacheinfo ci;
4938
4939 ci.cstamp = cstamp_delta(cstamp);
4940 ci.tstamp = cstamp_delta(tstamp);
4941 ci.ifa_prefered = preferred;
4942 ci.ifa_valid = valid;
4943
4944 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4945 }
4946
4947 static inline int rt_scope(int ifa_scope)
4948 {
4949 if (ifa_scope & IFA_HOST)
4950 return RT_SCOPE_HOST;
4951 else if (ifa_scope & IFA_LINK)
4952 return RT_SCOPE_LINK;
4953 else if (ifa_scope & IFA_SITE)
4954 return RT_SCOPE_SITE;
4955 else
4956 return RT_SCOPE_UNIVERSE;
4957 }
4958
4959 static inline int inet6_ifaddr_msgsize(void)
4960 {
4961 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4962 + nla_total_size(16) /* IFA_LOCAL */
4963 + nla_total_size(16) /* IFA_ADDRESS */
4964 + nla_total_size(sizeof(struct ifa_cacheinfo))
4965 + nla_total_size(4) /* IFA_FLAGS */
4966 + nla_total_size(4) /* IFA_RT_PRIORITY */;
4967 }
4968
4969 enum addr_type_t {
4970 UNICAST_ADDR,
4971 MULTICAST_ADDR,
4972 ANYCAST_ADDR,
4973 };
4974
4975 struct inet6_fill_args {
4976 u32 portid;
4977 u32 seq;
4978 int event;
4979 unsigned int flags;
4980 int netnsid;
4981 int ifindex;
4982 enum addr_type_t type;
4983 };
4984
4985 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4986 struct inet6_fill_args *args)
4987 {
4988 struct nlmsghdr *nlh;
4989 u32 preferred, valid;
4990
4991 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
4992 sizeof(struct ifaddrmsg), args->flags);
4993 if (!nlh)
4994 return -EMSGSIZE;
4995
4996 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4997 ifa->idev->dev->ifindex);
4998
4999 if (args->netnsid >= 0 &&
5000 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5001 goto error;
5002
5003 spin_lock_bh(&ifa->lock);
5004 if (!((ifa->flags&IFA_F_PERMANENT) &&
5005 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
5006 preferred = ifa->prefered_lft;
5007 valid = ifa->valid_lft;
5008 if (preferred != INFINITY_LIFE_TIME) {
5009 long tval = (jiffies - ifa->tstamp)/HZ;
5010 if (preferred > tval)
5011 preferred -= tval;
5012 else
5013 preferred = 0;
5014 if (valid != INFINITY_LIFE_TIME) {
5015 if (valid > tval)
5016 valid -= tval;
5017 else
5018 valid = 0;
5019 }
5020 }
5021 } else {
5022 preferred = INFINITY_LIFE_TIME;
5023 valid = INFINITY_LIFE_TIME;
5024 }
5025 spin_unlock_bh(&ifa->lock);
5026
5027 if (!ipv6_addr_any(&ifa->peer_addr)) {
5028 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5029 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5030 goto error;
5031 } else
5032 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5033 goto error;
5034
5035 if (ifa->rt_priority &&
5036 nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
5037 goto error;
5038
5039 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
5040 goto error;
5041
5042 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
5043 goto error;
5044
5045 nlmsg_end(skb, nlh);
5046 return 0;
5047
5048 error:
5049 nlmsg_cancel(skb, nlh);
5050 return -EMSGSIZE;
5051 }
5052
5053 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
5054 struct inet6_fill_args *args)
5055 {
5056 struct nlmsghdr *nlh;
5057 u8 scope = RT_SCOPE_UNIVERSE;
5058 int ifindex = ifmca->idev->dev->ifindex;
5059
5060 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5061 scope = RT_SCOPE_SITE;
5062
5063 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5064 sizeof(struct ifaddrmsg), args->flags);
5065 if (!nlh)
5066 return -EMSGSIZE;
5067
5068 if (args->netnsid >= 0 &&
5069 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5070 nlmsg_cancel(skb, nlh);
5071 return -EMSGSIZE;
5072 }
5073
5074 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5075 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5076 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
5077 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5078 nlmsg_cancel(skb, nlh);
5079 return -EMSGSIZE;
5080 }
5081
5082 nlmsg_end(skb, nlh);
5083 return 0;
5084 }
5085
5086 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
5087 struct inet6_fill_args *args)
5088 {
5089 struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5090 int ifindex = dev ? dev->ifindex : 1;
5091 struct nlmsghdr *nlh;
5092 u8 scope = RT_SCOPE_UNIVERSE;
5093
5094 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5095 scope = RT_SCOPE_SITE;
5096
5097 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5098 sizeof(struct ifaddrmsg), args->flags);
5099 if (!nlh)
5100 return -EMSGSIZE;
5101
5102 if (args->netnsid >= 0 &&
5103 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5104 nlmsg_cancel(skb, nlh);
5105 return -EMSGSIZE;
5106 }
5107
5108 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5109 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5110 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
5111 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5112 nlmsg_cancel(skb, nlh);
5113 return -EMSGSIZE;
5114 }
5115
5116 nlmsg_end(skb, nlh);
5117 return 0;
5118 }
5119
5120 /* called with rcu_read_lock() */
5121 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
5122 struct netlink_callback *cb, int s_ip_idx,
5123 struct inet6_fill_args *fillargs)
5124 {
5125 struct ifmcaddr6 *ifmca;
5126 struct ifacaddr6 *ifaca;
5127 int ip_idx = 0;
5128 int err = 1;
5129
5130 read_lock_bh(&idev->lock);
5131 switch (fillargs->type) {
5132 case UNICAST_ADDR: {
5133 struct inet6_ifaddr *ifa;
5134 fillargs->event = RTM_NEWADDR;
5135
5136 /* unicast address incl. temp addr */
5137 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5138 if (ip_idx < s_ip_idx)
5139 goto next;
5140 err = inet6_fill_ifaddr(skb, ifa, fillargs);
5141 if (err < 0)
5142 break;
5143 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5144 next:
5145 ip_idx++;
5146 }
5147 break;
5148 }
5149 case MULTICAST_ADDR:
5150 read_unlock_bh(&idev->lock);
5151 fillargs->event = RTM_GETMULTICAST;
5152
5153 /* multicast address */
5154 for (ifmca = rcu_dereference(idev->mc_list);
5155 ifmca;
5156 ifmca = rcu_dereference(ifmca->next), ip_idx++) {
5157 if (ip_idx < s_ip_idx)
5158 continue;
5159 err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5160 if (err < 0)
5161 break;
5162 }
5163 read_lock_bh(&idev->lock);
5164 break;
5165 case ANYCAST_ADDR:
5166 fillargs->event = RTM_GETANYCAST;
5167 /* anycast address */
5168 for (ifaca = idev->ac_list; ifaca;
5169 ifaca = ifaca->aca_next, ip_idx++) {
5170 if (ip_idx < s_ip_idx)
5171 continue;
5172 err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5173 if (err < 0)
5174 break;
5175 }
5176 break;
5177 default:
5178 break;
5179 }
5180 read_unlock_bh(&idev->lock);
5181 cb->args[2] = ip_idx;
5182 return err;
5183 }
5184
5185 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5186 struct inet6_fill_args *fillargs,
5187 struct net **tgt_net, struct sock *sk,
5188 struct netlink_callback *cb)
5189 {
5190 struct netlink_ext_ack *extack = cb->extack;
5191 struct nlattr *tb[IFA_MAX+1];
5192 struct ifaddrmsg *ifm;
5193 int err, i;
5194
5195 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5196 NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5197 return -EINVAL;
5198 }
5199
5200 ifm = nlmsg_data(nlh);
5201 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5202 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5203 return -EINVAL;
5204 }
5205
5206 fillargs->ifindex = ifm->ifa_index;
5207 if (fillargs->ifindex) {
5208 cb->answer_flags |= NLM_F_DUMP_FILTERED;
5209 fillargs->flags |= NLM_F_DUMP_FILTERED;
5210 }
5211
5212 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5213 ifa_ipv6_policy, extack);
5214 if (err < 0)
5215 return err;
5216
5217 for (i = 0; i <= IFA_MAX; ++i) {
5218 if (!tb[i])
5219 continue;
5220
5221 if (i == IFA_TARGET_NETNSID) {
5222 struct net *net;
5223
5224 fillargs->netnsid = nla_get_s32(tb[i]);
5225 net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5226 if (IS_ERR(net)) {
5227 fillargs->netnsid = -1;
5228 NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5229 return PTR_ERR(net);
5230 }
5231 *tgt_net = net;
5232 } else {
5233 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5234 return -EINVAL;
5235 }
5236 }
5237
5238 return 0;
5239 }
5240
5241 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5242 enum addr_type_t type)
5243 {
5244 const struct nlmsghdr *nlh = cb->nlh;
5245 struct inet6_fill_args fillargs = {
5246 .portid = NETLINK_CB(cb->skb).portid,
5247 .seq = cb->nlh->nlmsg_seq,
5248 .flags = NLM_F_MULTI,
5249 .netnsid = -1,
5250 .type = type,
5251 };
5252 struct net *tgt_net = sock_net(skb->sk);
5253 int idx, s_idx, s_ip_idx;
5254 int h, s_h;
5255 struct net_device *dev;
5256 struct inet6_dev *idev;
5257 struct hlist_head *head;
5258 int err = 0;
5259
5260 s_h = cb->args[0];
5261 s_idx = idx = cb->args[1];
5262 s_ip_idx = cb->args[2];
5263
5264 if (cb->strict_check) {
5265 err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5266 skb->sk, cb);
5267 if (err < 0)
5268 goto put_tgt_net;
5269
5270 err = 0;
5271 if (fillargs.ifindex) {
5272 dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
5273 if (!dev) {
5274 err = -ENODEV;
5275 goto put_tgt_net;
5276 }
5277 idev = __in6_dev_get(dev);
5278 if (idev) {
5279 err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5280 &fillargs);
5281 if (err > 0)
5282 err = 0;
5283 }
5284 goto put_tgt_net;
5285 }
5286 }
5287
5288 rcu_read_lock();
5289 cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
5290 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5291 idx = 0;
5292 head = &tgt_net->dev_index_head[h];
5293 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5294 if (idx < s_idx)
5295 goto cont;
5296 if (h > s_h || idx > s_idx)
5297 s_ip_idx = 0;
5298 idev = __in6_dev_get(dev);
5299 if (!idev)
5300 goto cont;
5301
5302 if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
5303 &fillargs) < 0)
5304 goto done;
5305 cont:
5306 idx++;
5307 }
5308 }
5309 done:
5310 rcu_read_unlock();
5311 cb->args[0] = h;
5312 cb->args[1] = idx;
5313 put_tgt_net:
5314 if (fillargs.netnsid >= 0)
5315 put_net(tgt_net);
5316
5317 return skb->len ? : err;
5318 }
5319
5320 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5321 {
5322 enum addr_type_t type = UNICAST_ADDR;
5323
5324 return inet6_dump_addr(skb, cb, type);
5325 }
5326
5327 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5328 {
5329 enum addr_type_t type = MULTICAST_ADDR;
5330
5331 return inet6_dump_addr(skb, cb, type);
5332 }
5333
5334
5335 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5336 {
5337 enum addr_type_t type = ANYCAST_ADDR;
5338
5339 return inet6_dump_addr(skb, cb, type);
5340 }
5341
5342 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5343 const struct nlmsghdr *nlh,
5344 struct nlattr **tb,
5345 struct netlink_ext_ack *extack)
5346 {
5347 struct ifaddrmsg *ifm;
5348 int i, err;
5349
5350 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5351 NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5352 return -EINVAL;
5353 }
5354
5355 if (!netlink_strict_get_check(skb))
5356 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5357 ifa_ipv6_policy, extack);
5358
5359 ifm = nlmsg_data(nlh);
5360 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5361 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5362 return -EINVAL;
5363 }
5364
5365 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5366 ifa_ipv6_policy, extack);
5367 if (err)
5368 return err;
5369
5370 for (i = 0; i <= IFA_MAX; i++) {
5371 if (!tb[i])
5372 continue;
5373
5374 switch (i) {
5375 case IFA_TARGET_NETNSID:
5376 case IFA_ADDRESS:
5377 case IFA_LOCAL:
5378 break;
5379 default:
5380 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5381 return -EINVAL;
5382 }
5383 }
5384
5385 return 0;
5386 }
5387
5388 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5389 struct netlink_ext_ack *extack)
5390 {
5391 struct net *tgt_net = sock_net(in_skb->sk);
5392 struct inet6_fill_args fillargs = {
5393 .portid = NETLINK_CB(in_skb).portid,
5394 .seq = nlh->nlmsg_seq,
5395 .event = RTM_NEWADDR,
5396 .flags = 0,
5397 .netnsid = -1,
5398 };
5399 struct ifaddrmsg *ifm;
5400 struct nlattr *tb[IFA_MAX+1];
5401 struct in6_addr *addr = NULL, *peer;
5402 struct net_device *dev = NULL;
5403 struct inet6_ifaddr *ifa;
5404 struct sk_buff *skb;
5405 int err;
5406
5407 err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5408 if (err < 0)
5409 return err;
5410
5411 if (tb[IFA_TARGET_NETNSID]) {
5412 fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5413
5414 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5415 fillargs.netnsid);
5416 if (IS_ERR(tgt_net))
5417 return PTR_ERR(tgt_net);
5418 }
5419
5420 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5421 if (!addr)
5422 return -EINVAL;
5423
5424 ifm = nlmsg_data(nlh);
5425 if (ifm->ifa_index)
5426 dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5427
5428 ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5429 if (!ifa) {
5430 err = -EADDRNOTAVAIL;
5431 goto errout;
5432 }
5433
5434 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5435 if (!skb) {
5436 err = -ENOBUFS;
5437 goto errout_ifa;
5438 }
5439
5440 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5441 if (err < 0) {
5442 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5443 WARN_ON(err == -EMSGSIZE);
5444 kfree_skb(skb);
5445 goto errout_ifa;
5446 }
5447 err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5448 errout_ifa:
5449 in6_ifa_put(ifa);
5450 errout:
5451 dev_put(dev);
5452 if (fillargs.netnsid >= 0)
5453 put_net(tgt_net);
5454
5455 return err;
5456 }
5457
5458 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5459 {
5460 struct sk_buff *skb;
5461 struct net *net = dev_net(ifa->idev->dev);
5462 struct inet6_fill_args fillargs = {
5463 .portid = 0,
5464 .seq = 0,
5465 .event = event,
5466 .flags = 0,
5467 .netnsid = -1,
5468 };
5469 int err = -ENOBUFS;
5470
5471 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5472 if (!skb)
5473 goto errout;
5474
5475 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5476 if (err < 0) {
5477 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5478 WARN_ON(err == -EMSGSIZE);
5479 kfree_skb(skb);
5480 goto errout;
5481 }
5482 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5483 return;
5484 errout:
5485 if (err < 0)
5486 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5487 }
5488
5489 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5490 __s32 *array, int bytes)
5491 {
5492 BUG_ON(bytes < (DEVCONF_MAX * 4));
5493
5494 memset(array, 0, bytes);
5495 array[DEVCONF_FORWARDING] = cnf->forwarding;
5496 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5497 array[DEVCONF_MTU6] = cnf->mtu6;
5498 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5499 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5500 array[DEVCONF_AUTOCONF] = cnf->autoconf;
5501 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5502 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5503 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5504 jiffies_to_msecs(cnf->rtr_solicit_interval);
5505 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5506 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5507 array[DEVCONF_RTR_SOLICIT_DELAY] =
5508 jiffies_to_msecs(cnf->rtr_solicit_delay);
5509 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5510 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5511 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5512 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5513 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5514 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5515 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5516 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5517 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5518 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5519 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5520 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5521 array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
5522 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5523 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5524 #ifdef CONFIG_IPV6_ROUTER_PREF
5525 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5526 array[DEVCONF_RTR_PROBE_INTERVAL] =
5527 jiffies_to_msecs(cnf->rtr_probe_interval);
5528 #ifdef CONFIG_IPV6_ROUTE_INFO
5529 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5530 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5531 #endif
5532 #endif
5533 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5534 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5535 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5536 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5537 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5538 #endif
5539 #ifdef CONFIG_IPV6_MROUTE
5540 array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5541 #endif
5542 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5543 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5544 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5545 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5546 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5547 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5548 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5549 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5550 /* we omit DEVCONF_STABLE_SECRET for now */
5551 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5552 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5553 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5554 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5555 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5556 #ifdef CONFIG_IPV6_SEG6_HMAC
5557 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5558 #endif
5559 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5560 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5561 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5562 array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5563 array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
5564 array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
5565 array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
5566 array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
5567 }
5568
5569 static inline size_t inet6_ifla6_size(void)
5570 {
5571 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5572 + nla_total_size(sizeof(struct ifla_cacheinfo))
5573 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5574 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5575 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5576 + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5577 + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5578 + nla_total_size(4) /* IFLA_INET6_RA_MTU */
5579 + 0;
5580 }
5581
5582 static inline size_t inet6_if_nlmsg_size(void)
5583 {
5584 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5585 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5586 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5587 + nla_total_size(4) /* IFLA_MTU */
5588 + nla_total_size(4) /* IFLA_LINK */
5589 + nla_total_size(1) /* IFLA_OPERSTATE */
5590 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5591 }
5592
5593 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5594 int bytes)
5595 {
5596 int i;
5597 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5598 BUG_ON(pad < 0);
5599
5600 /* Use put_unaligned() because stats may not be aligned for u64. */
5601 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5602 for (i = 1; i < ICMP6_MIB_MAX; i++)
5603 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5604
5605 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5606 }
5607
5608 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5609 int bytes, size_t syncpoff)
5610 {
5611 int i, c;
5612 u64 buff[IPSTATS_MIB_MAX];
5613 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5614
5615 BUG_ON(pad < 0);
5616
5617 memset(buff, 0, sizeof(buff));
5618 buff[0] = IPSTATS_MIB_MAX;
5619
5620 for_each_possible_cpu(c) {
5621 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5622 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5623 }
5624
5625 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5626 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5627 }
5628
5629 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5630 int bytes)
5631 {
5632 switch (attrtype) {
5633 case IFLA_INET6_STATS:
5634 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5635 offsetof(struct ipstats_mib, syncp));
5636 break;
5637 case IFLA_INET6_ICMP6STATS:
5638 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5639 break;
5640 }
5641 }
5642
5643 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5644 u32 ext_filter_mask)
5645 {
5646 struct nlattr *nla;
5647 struct ifla_cacheinfo ci;
5648
5649 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5650 goto nla_put_failure;
5651 ci.max_reasm_len = IPV6_MAXPLEN;
5652 ci.tstamp = cstamp_delta(idev->tstamp);
5653 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5654 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5655 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5656 goto nla_put_failure;
5657 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5658 if (!nla)
5659 goto nla_put_failure;
5660 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5661
5662 /* XXX - MC not implemented */
5663
5664 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5665 return 0;
5666
5667 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5668 if (!nla)
5669 goto nla_put_failure;
5670 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5671
5672 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5673 if (!nla)
5674 goto nla_put_failure;
5675 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5676
5677 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5678 if (!nla)
5679 goto nla_put_failure;
5680 read_lock_bh(&idev->lock);
5681 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5682 read_unlock_bh(&idev->lock);
5683
5684 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5685 goto nla_put_failure;
5686
5687 if (idev->ra_mtu &&
5688 nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
5689 goto nla_put_failure;
5690
5691 return 0;
5692
5693 nla_put_failure:
5694 return -EMSGSIZE;
5695 }
5696
5697 static size_t inet6_get_link_af_size(const struct net_device *dev,
5698 u32 ext_filter_mask)
5699 {
5700 if (!__in6_dev_get(dev))
5701 return 0;
5702
5703 return inet6_ifla6_size();
5704 }
5705
5706 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5707 u32 ext_filter_mask)
5708 {
5709 struct inet6_dev *idev = __in6_dev_get(dev);
5710
5711 if (!idev)
5712 return -ENODATA;
5713
5714 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5715 return -EMSGSIZE;
5716
5717 return 0;
5718 }
5719
5720 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5721 struct netlink_ext_ack *extack)
5722 {
5723 struct inet6_ifaddr *ifp;
5724 struct net_device *dev = idev->dev;
5725 bool clear_token, update_rs = false;
5726 struct in6_addr ll_addr;
5727
5728 ASSERT_RTNL();
5729
5730 if (!token)
5731 return -EINVAL;
5732
5733 if (dev->flags & IFF_LOOPBACK) {
5734 NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5735 return -EINVAL;
5736 }
5737
5738 if (dev->flags & IFF_NOARP) {
5739 NL_SET_ERR_MSG_MOD(extack,
5740 "Device does not do neighbour discovery");
5741 return -EINVAL;
5742 }
5743
5744 if (!ipv6_accept_ra(idev)) {
5745 NL_SET_ERR_MSG_MOD(extack,
5746 "Router advertisement is disabled on device");
5747 return -EINVAL;
5748 }
5749
5750 if (idev->cnf.rtr_solicits == 0) {
5751 NL_SET_ERR_MSG(extack,
5752 "Router solicitation is disabled on device");
5753 return -EINVAL;
5754 }
5755
5756 write_lock_bh(&idev->lock);
5757
5758 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5759 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5760
5761 write_unlock_bh(&idev->lock);
5762
5763 clear_token = ipv6_addr_any(token);
5764 if (clear_token)
5765 goto update_lft;
5766
5767 if (!idev->dead && (idev->if_flags & IF_READY) &&
5768 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5769 IFA_F_OPTIMISTIC)) {
5770 /* If we're not ready, then normal ifup will take care
5771 * of this. Otherwise, we need to request our rs here.
5772 */
5773 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5774 update_rs = true;
5775 }
5776
5777 update_lft:
5778 write_lock_bh(&idev->lock);
5779
5780 if (update_rs) {
5781 idev->if_flags |= IF_RS_SENT;
5782 idev->rs_interval = rfc3315_s14_backoff_init(
5783 idev->cnf.rtr_solicit_interval);
5784 idev->rs_probes = 1;
5785 addrconf_mod_rs_timer(idev, idev->rs_interval);
5786 }
5787
5788 /* Well, that's kinda nasty ... */
5789 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5790 spin_lock(&ifp->lock);
5791 if (ifp->tokenized) {
5792 ifp->valid_lft = 0;
5793 ifp->prefered_lft = 0;
5794 }
5795 spin_unlock(&ifp->lock);
5796 }
5797
5798 write_unlock_bh(&idev->lock);
5799 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5800 addrconf_verify_rtnl();
5801 return 0;
5802 }
5803
5804 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5805 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5806 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5807 [IFLA_INET6_RA_MTU] = { .type = NLA_REJECT,
5808 .reject_message =
5809 "IFLA_INET6_RA_MTU can not be set" },
5810 };
5811
5812 static int check_addr_gen_mode(int mode)
5813 {
5814 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5815 mode != IN6_ADDR_GEN_MODE_NONE &&
5816 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5817 mode != IN6_ADDR_GEN_MODE_RANDOM)
5818 return -EINVAL;
5819 return 1;
5820 }
5821
5822 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5823 int mode)
5824 {
5825 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5826 !idev->cnf.stable_secret.initialized &&
5827 !net->ipv6.devconf_dflt->stable_secret.initialized)
5828 return -EINVAL;
5829 return 1;
5830 }
5831
5832 static int inet6_validate_link_af(const struct net_device *dev,
5833 const struct nlattr *nla,
5834 struct netlink_ext_ack *extack)
5835 {
5836 struct nlattr *tb[IFLA_INET6_MAX + 1];
5837 struct inet6_dev *idev = NULL;
5838 int err;
5839
5840 if (dev) {
5841 idev = __in6_dev_get(dev);
5842 if (!idev)
5843 return -EAFNOSUPPORT;
5844 }
5845
5846 err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
5847 inet6_af_policy, extack);
5848 if (err)
5849 return err;
5850
5851 if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
5852 return -EINVAL;
5853
5854 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5855 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5856
5857 if (check_addr_gen_mode(mode) < 0)
5858 return -EINVAL;
5859 if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
5860 return -EINVAL;
5861 }
5862
5863 return 0;
5864 }
5865
5866 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
5867 struct netlink_ext_ack *extack)
5868 {
5869 struct inet6_dev *idev = __in6_dev_get(dev);
5870 struct nlattr *tb[IFLA_INET6_MAX + 1];
5871 int err;
5872
5873 if (!idev)
5874 return -EAFNOSUPPORT;
5875
5876 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5877 return -EINVAL;
5878
5879 if (tb[IFLA_INET6_TOKEN]) {
5880 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
5881 extack);
5882 if (err)
5883 return err;
5884 }
5885
5886 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5887 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5888
5889 idev->cnf.addr_gen_mode = mode;
5890 }
5891
5892 return 0;
5893 }
5894
5895 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5896 u32 portid, u32 seq, int event, unsigned int flags)
5897 {
5898 struct net_device *dev = idev->dev;
5899 struct ifinfomsg *hdr;
5900 struct nlmsghdr *nlh;
5901 void *protoinfo;
5902
5903 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5904 if (!nlh)
5905 return -EMSGSIZE;
5906
5907 hdr = nlmsg_data(nlh);
5908 hdr->ifi_family = AF_INET6;
5909 hdr->__ifi_pad = 0;
5910 hdr->ifi_type = dev->type;
5911 hdr->ifi_index = dev->ifindex;
5912 hdr->ifi_flags = dev_get_flags(dev);
5913 hdr->ifi_change = 0;
5914
5915 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5916 (dev->addr_len &&
5917 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5918 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5919 (dev->ifindex != dev_get_iflink(dev) &&
5920 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5921 nla_put_u8(skb, IFLA_OPERSTATE,
5922 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5923 goto nla_put_failure;
5924 protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
5925 if (!protoinfo)
5926 goto nla_put_failure;
5927
5928 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5929 goto nla_put_failure;
5930
5931 nla_nest_end(skb, protoinfo);
5932 nlmsg_end(skb, nlh);
5933 return 0;
5934
5935 nla_put_failure:
5936 nlmsg_cancel(skb, nlh);
5937 return -EMSGSIZE;
5938 }
5939
5940 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
5941 struct netlink_ext_ack *extack)
5942 {
5943 struct ifinfomsg *ifm;
5944
5945 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5946 NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
5947 return -EINVAL;
5948 }
5949
5950 if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
5951 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
5952 return -EINVAL;
5953 }
5954
5955 ifm = nlmsg_data(nlh);
5956 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5957 ifm->ifi_change || ifm->ifi_index) {
5958 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
5959 return -EINVAL;
5960 }
5961
5962 return 0;
5963 }
5964
5965 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5966 {
5967 struct net *net = sock_net(skb->sk);
5968 int h, s_h;
5969 int idx = 0, s_idx;
5970 struct net_device *dev;
5971 struct inet6_dev *idev;
5972 struct hlist_head *head;
5973
5974 /* only requests using strict checking can pass data to
5975 * influence the dump
5976 */
5977 if (cb->strict_check) {
5978 int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
5979
5980 if (err < 0)
5981 return err;
5982 }
5983
5984 s_h = cb->args[0];
5985 s_idx = cb->args[1];
5986
5987 rcu_read_lock();
5988 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5989 idx = 0;
5990 head = &net->dev_index_head[h];
5991 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5992 if (idx < s_idx)
5993 goto cont;
5994 idev = __in6_dev_get(dev);
5995 if (!idev)
5996 goto cont;
5997 if (inet6_fill_ifinfo(skb, idev,
5998 NETLINK_CB(cb->skb).portid,
5999 cb->nlh->nlmsg_seq,
6000 RTM_NEWLINK, NLM_F_MULTI) < 0)
6001 goto out;
6002 cont:
6003 idx++;
6004 }
6005 }
6006 out:
6007 rcu_read_unlock();
6008 cb->args[1] = idx;
6009 cb->args[0] = h;
6010
6011 return skb->len;
6012 }
6013
6014 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6015 {
6016 struct sk_buff *skb;
6017 struct net *net = dev_net(idev->dev);
6018 int err = -ENOBUFS;
6019
6020 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6021 if (!skb)
6022 goto errout;
6023
6024 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6025 if (err < 0) {
6026 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6027 WARN_ON(err == -EMSGSIZE);
6028 kfree_skb(skb);
6029 goto errout;
6030 }
6031 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6032 return;
6033 errout:
6034 if (err < 0)
6035 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6036 }
6037
6038 static inline size_t inet6_prefix_nlmsg_size(void)
6039 {
6040 return NLMSG_ALIGN(sizeof(struct prefixmsg))
6041 + nla_total_size(sizeof(struct in6_addr))
6042 + nla_total_size(sizeof(struct prefix_cacheinfo));
6043 }
6044
6045 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6046 struct prefix_info *pinfo, u32 portid, u32 seq,
6047 int event, unsigned int flags)
6048 {
6049 struct prefixmsg *pmsg;
6050 struct nlmsghdr *nlh;
6051 struct prefix_cacheinfo ci;
6052
6053 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6054 if (!nlh)
6055 return -EMSGSIZE;
6056
6057 pmsg = nlmsg_data(nlh);
6058 pmsg->prefix_family = AF_INET6;
6059 pmsg->prefix_pad1 = 0;
6060 pmsg->prefix_pad2 = 0;
6061 pmsg->prefix_ifindex = idev->dev->ifindex;
6062 pmsg->prefix_len = pinfo->prefix_len;
6063 pmsg->prefix_type = pinfo->type;
6064 pmsg->prefix_pad3 = 0;
6065 pmsg->prefix_flags = 0;
6066 if (pinfo->onlink)
6067 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
6068 if (pinfo->autoconf)
6069 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
6070
6071 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6072 goto nla_put_failure;
6073 ci.preferred_time = ntohl(pinfo->prefered);
6074 ci.valid_time = ntohl(pinfo->valid);
6075 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6076 goto nla_put_failure;
6077 nlmsg_end(skb, nlh);
6078 return 0;
6079
6080 nla_put_failure:
6081 nlmsg_cancel(skb, nlh);
6082 return -EMSGSIZE;
6083 }
6084
6085 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6086 struct prefix_info *pinfo)
6087 {
6088 struct sk_buff *skb;
6089 struct net *net = dev_net(idev->dev);
6090 int err = -ENOBUFS;
6091
6092 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6093 if (!skb)
6094 goto errout;
6095
6096 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6097 if (err < 0) {
6098 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6099 WARN_ON(err == -EMSGSIZE);
6100 kfree_skb(skb);
6101 goto errout;
6102 }
6103 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6104 return;
6105 errout:
6106 if (err < 0)
6107 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6108 }
6109
6110 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6111 {
6112 struct net *net = dev_net(ifp->idev->dev);
6113
6114 if (event)
6115 ASSERT_RTNL();
6116
6117 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6118
6119 switch (event) {
6120 case RTM_NEWADDR:
6121 /*
6122 * If the address was optimistic we inserted the route at the
6123 * start of our DAD process, so we don't need to do it again.
6124 * If the device was taken down in the middle of the DAD
6125 * cycle there is a race where we could get here without a
6126 * host route, so nothing to insert. That will be fixed when
6127 * the device is brought up.
6128 */
6129 if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6130 ip6_ins_rt(net, ifp->rt);
6131 } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6132 pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6133 &ifp->addr, ifp->idev->dev->name);
6134 }
6135
6136 if (ifp->idev->cnf.forwarding)
6137 addrconf_join_anycast(ifp);
6138 if (!ipv6_addr_any(&ifp->peer_addr))
6139 addrconf_prefix_route(&ifp->peer_addr, 128,
6140 ifp->rt_priority, ifp->idev->dev,
6141 0, 0, GFP_ATOMIC);
6142 break;
6143 case RTM_DELADDR:
6144 if (ifp->idev->cnf.forwarding)
6145 addrconf_leave_anycast(ifp);
6146 addrconf_leave_solict(ifp->idev, &ifp->addr);
6147 if (!ipv6_addr_any(&ifp->peer_addr)) {
6148 struct fib6_info *rt;
6149
6150 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6151 ifp->idev->dev, 0, 0,
6152 false);
6153 if (rt)
6154 ip6_del_rt(net, rt, false);
6155 }
6156 if (ifp->rt) {
6157 ip6_del_rt(net, ifp->rt, false);
6158 ifp->rt = NULL;
6159 }
6160 rt_genid_bump_ipv6(net);
6161 break;
6162 }
6163 atomic_inc(&net->ipv6.dev_addr_genid);
6164 }
6165
6166 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6167 {
6168 if (likely(ifp->idev->dead == 0))
6169 __ipv6_ifa_notify(event, ifp);
6170 }
6171
6172 #ifdef CONFIG_SYSCTL
6173
6174 static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
6175 void *buffer, size_t *lenp, loff_t *ppos)
6176 {
6177 int *valp = ctl->data;
6178 int val = *valp;
6179 loff_t pos = *ppos;
6180 struct ctl_table lctl;
6181 int ret;
6182
6183 /*
6184 * ctl->data points to idev->cnf.forwarding, we should
6185 * not modify it until we get the rtnl lock.
6186 */
6187 lctl = *ctl;
6188 lctl.data = &val;
6189
6190 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6191
6192 if (write)
6193 ret = addrconf_fixup_forwarding(ctl, valp, val);
6194 if (ret)
6195 *ppos = pos;
6196 return ret;
6197 }
6198
6199 static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
6200 void *buffer, size_t *lenp, loff_t *ppos)
6201 {
6202 struct inet6_dev *idev = ctl->extra1;
6203 int min_mtu = IPV6_MIN_MTU;
6204 struct ctl_table lctl;
6205
6206 lctl = *ctl;
6207 lctl.extra1 = &min_mtu;
6208 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6209
6210 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6211 }
6212
6213 static void dev_disable_change(struct inet6_dev *idev)
6214 {
6215 struct netdev_notifier_info info;
6216
6217 if (!idev || !idev->dev)
6218 return;
6219
6220 netdev_notifier_info_init(&info, idev->dev);
6221 if (idev->cnf.disable_ipv6)
6222 addrconf_notify(NULL, NETDEV_DOWN, &info);
6223 else
6224 addrconf_notify(NULL, NETDEV_UP, &info);
6225 }
6226
6227 static void addrconf_disable_change(struct net *net, __s32 newf)
6228 {
6229 struct net_device *dev;
6230 struct inet6_dev *idev;
6231
6232 for_each_netdev(net, dev) {
6233 idev = __in6_dev_get(dev);
6234 if (idev) {
6235 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6236 idev->cnf.disable_ipv6 = newf;
6237 if (changed)
6238 dev_disable_change(idev);
6239 }
6240 }
6241 }
6242
6243 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
6244 {
6245 struct net *net;
6246 int old;
6247
6248 if (!rtnl_trylock())
6249 return restart_syscall();
6250
6251 net = (struct net *)table->extra2;
6252 old = *p;
6253 *p = newf;
6254
6255 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6256 rtnl_unlock();
6257 return 0;
6258 }
6259
6260 if (p == &net->ipv6.devconf_all->disable_ipv6) {
6261 net->ipv6.devconf_dflt->disable_ipv6 = newf;
6262 addrconf_disable_change(net, newf);
6263 } else if ((!newf) ^ (!old))
6264 dev_disable_change((struct inet6_dev *)table->extra1);
6265
6266 rtnl_unlock();
6267 return 0;
6268 }
6269
6270 static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
6271 void *buffer, size_t *lenp, loff_t *ppos)
6272 {
6273 int *valp = ctl->data;
6274 int val = *valp;
6275 loff_t pos = *ppos;
6276 struct ctl_table lctl;
6277 int ret;
6278
6279 /*
6280 * ctl->data points to idev->cnf.disable_ipv6, we should
6281 * not modify it until we get the rtnl lock.
6282 */
6283 lctl = *ctl;
6284 lctl.data = &val;
6285
6286 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6287
6288 if (write)
6289 ret = addrconf_disable_ipv6(ctl, valp, val);
6290 if (ret)
6291 *ppos = pos;
6292 return ret;
6293 }
6294
6295 static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
6296 void *buffer, size_t *lenp, loff_t *ppos)
6297 {
6298 int *valp = ctl->data;
6299 int ret;
6300 int old, new;
6301
6302 old = *valp;
6303 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6304 new = *valp;
6305
6306 if (write && old != new) {
6307 struct net *net = ctl->extra2;
6308
6309 if (!rtnl_trylock())
6310 return restart_syscall();
6311
6312 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6313 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6314 NETCONFA_PROXY_NEIGH,
6315 NETCONFA_IFINDEX_DEFAULT,
6316 net->ipv6.devconf_dflt);
6317 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6318 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6319 NETCONFA_PROXY_NEIGH,
6320 NETCONFA_IFINDEX_ALL,
6321 net->ipv6.devconf_all);
6322 else {
6323 struct inet6_dev *idev = ctl->extra1;
6324
6325 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6326 NETCONFA_PROXY_NEIGH,
6327 idev->dev->ifindex,
6328 &idev->cnf);
6329 }
6330 rtnl_unlock();
6331 }
6332
6333 return ret;
6334 }
6335
6336 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
6337 void *buffer, size_t *lenp,
6338 loff_t *ppos)
6339 {
6340 int ret = 0;
6341 u32 new_val;
6342 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6343 struct net *net = (struct net *)ctl->extra2;
6344 struct ctl_table tmp = {
6345 .data = &new_val,
6346 .maxlen = sizeof(new_val),
6347 .mode = ctl->mode,
6348 };
6349
6350 if (!rtnl_trylock())
6351 return restart_syscall();
6352
6353 new_val = *((u32 *)ctl->data);
6354
6355 ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6356 if (ret != 0)
6357 goto out;
6358
6359 if (write) {
6360 if (check_addr_gen_mode(new_val) < 0) {
6361 ret = -EINVAL;
6362 goto out;
6363 }
6364
6365 if (idev) {
6366 if (check_stable_privacy(idev, net, new_val) < 0) {
6367 ret = -EINVAL;
6368 goto out;
6369 }
6370
6371 if (idev->cnf.addr_gen_mode != new_val) {
6372 idev->cnf.addr_gen_mode = new_val;
6373 addrconf_dev_config(idev->dev);
6374 }
6375 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6376 struct net_device *dev;
6377
6378 net->ipv6.devconf_dflt->addr_gen_mode = new_val;
6379 for_each_netdev(net, dev) {
6380 idev = __in6_dev_get(dev);
6381 if (idev &&
6382 idev->cnf.addr_gen_mode != new_val) {
6383 idev->cnf.addr_gen_mode = new_val;
6384 addrconf_dev_config(idev->dev);
6385 }
6386 }
6387 }
6388
6389 *((u32 *)ctl->data) = new_val;
6390 }
6391
6392 out:
6393 rtnl_unlock();
6394
6395 return ret;
6396 }
6397
6398 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6399 void *buffer, size_t *lenp,
6400 loff_t *ppos)
6401 {
6402 int err;
6403 struct in6_addr addr;
6404 char str[IPV6_MAX_STRLEN];
6405 struct ctl_table lctl = *ctl;
6406 struct net *net = ctl->extra2;
6407 struct ipv6_stable_secret *secret = ctl->data;
6408
6409 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6410 return -EIO;
6411
6412 lctl.maxlen = IPV6_MAX_STRLEN;
6413 lctl.data = str;
6414
6415 if (!rtnl_trylock())
6416 return restart_syscall();
6417
6418 if (!write && !secret->initialized) {
6419 err = -EIO;
6420 goto out;
6421 }
6422
6423 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6424 if (err >= sizeof(str)) {
6425 err = -EIO;
6426 goto out;
6427 }
6428
6429 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6430 if (err || !write)
6431 goto out;
6432
6433 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6434 err = -EIO;
6435 goto out;
6436 }
6437
6438 secret->initialized = true;
6439 secret->secret = addr;
6440
6441 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6442 struct net_device *dev;
6443
6444 for_each_netdev(net, dev) {
6445 struct inet6_dev *idev = __in6_dev_get(dev);
6446
6447 if (idev) {
6448 idev->cnf.addr_gen_mode =
6449 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6450 }
6451 }
6452 } else {
6453 struct inet6_dev *idev = ctl->extra1;
6454
6455 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6456 }
6457
6458 out:
6459 rtnl_unlock();
6460
6461 return err;
6462 }
6463
6464 static
6465 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6466 int write, void *buffer,
6467 size_t *lenp,
6468 loff_t *ppos)
6469 {
6470 int *valp = ctl->data;
6471 int val = *valp;
6472 loff_t pos = *ppos;
6473 struct ctl_table lctl;
6474 int ret;
6475
6476 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6477 * we should not modify it until we get the rtnl lock.
6478 */
6479 lctl = *ctl;
6480 lctl.data = &val;
6481
6482 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6483
6484 if (write)
6485 ret = addrconf_fixup_linkdown(ctl, valp, val);
6486 if (ret)
6487 *ppos = pos;
6488 return ret;
6489 }
6490
6491 static
6492 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6493 {
6494 if (rt) {
6495 if (action)
6496 rt->dst.flags |= DST_NOPOLICY;
6497 else
6498 rt->dst.flags &= ~DST_NOPOLICY;
6499 }
6500 }
6501
6502 static
6503 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6504 {
6505 struct inet6_ifaddr *ifa;
6506
6507 read_lock_bh(&idev->lock);
6508 list_for_each_entry(ifa, &idev->addr_list, if_list) {
6509 spin_lock(&ifa->lock);
6510 if (ifa->rt) {
6511 /* host routes only use builtin fib6_nh */
6512 struct fib6_nh *nh = ifa->rt->fib6_nh;
6513 int cpu;
6514
6515 rcu_read_lock();
6516 ifa->rt->dst_nopolicy = val ? true : false;
6517 if (nh->rt6i_pcpu) {
6518 for_each_possible_cpu(cpu) {
6519 struct rt6_info **rtp;
6520
6521 rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6522 addrconf_set_nopolicy(*rtp, val);
6523 }
6524 }
6525 rcu_read_unlock();
6526 }
6527 spin_unlock(&ifa->lock);
6528 }
6529 read_unlock_bh(&idev->lock);
6530 }
6531
6532 static
6533 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6534 {
6535 struct inet6_dev *idev;
6536 struct net *net;
6537
6538 if (!rtnl_trylock())
6539 return restart_syscall();
6540
6541 *valp = val;
6542
6543 net = (struct net *)ctl->extra2;
6544 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6545 rtnl_unlock();
6546 return 0;
6547 }
6548
6549 if (valp == &net->ipv6.devconf_all->disable_policy) {
6550 struct net_device *dev;
6551
6552 for_each_netdev(net, dev) {
6553 idev = __in6_dev_get(dev);
6554 if (idev)
6555 addrconf_disable_policy_idev(idev, val);
6556 }
6557 } else {
6558 idev = (struct inet6_dev *)ctl->extra1;
6559 addrconf_disable_policy_idev(idev, val);
6560 }
6561
6562 rtnl_unlock();
6563 return 0;
6564 }
6565
6566 static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6567 void *buffer, size_t *lenp, loff_t *ppos)
6568 {
6569 int *valp = ctl->data;
6570 int val = *valp;
6571 loff_t pos = *ppos;
6572 struct ctl_table lctl;
6573 int ret;
6574
6575 lctl = *ctl;
6576 lctl.data = &val;
6577 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6578
6579 if (write && (*valp != val))
6580 ret = addrconf_disable_policy(ctl, valp, val);
6581
6582 if (ret)
6583 *ppos = pos;
6584
6585 return ret;
6586 }
6587
6588 static int minus_one = -1;
6589 static const int two_five_five = 255;
6590 static u32 ioam6_if_id_max = U16_MAX;
6591
6592 static const struct ctl_table addrconf_sysctl[] = {
6593 {
6594 .procname = "forwarding",
6595 .data = &ipv6_devconf.forwarding,
6596 .maxlen = sizeof(int),
6597 .mode = 0644,
6598 .proc_handler = addrconf_sysctl_forward,
6599 },
6600 {
6601 .procname = "hop_limit",
6602 .data = &ipv6_devconf.hop_limit,
6603 .maxlen = sizeof(int),
6604 .mode = 0644,
6605 .proc_handler = proc_dointvec_minmax,
6606 .extra1 = (void *)SYSCTL_ONE,
6607 .extra2 = (void *)&two_five_five,
6608 },
6609 {
6610 .procname = "mtu",
6611 .data = &ipv6_devconf.mtu6,
6612 .maxlen = sizeof(int),
6613 .mode = 0644,
6614 .proc_handler = addrconf_sysctl_mtu,
6615 },
6616 {
6617 .procname = "accept_ra",
6618 .data = &ipv6_devconf.accept_ra,
6619 .maxlen = sizeof(int),
6620 .mode = 0644,
6621 .proc_handler = proc_dointvec,
6622 },
6623 {
6624 .procname = "accept_redirects",
6625 .data = &ipv6_devconf.accept_redirects,
6626 .maxlen = sizeof(int),
6627 .mode = 0644,
6628 .proc_handler = proc_dointvec,
6629 },
6630 {
6631 .procname = "autoconf",
6632 .data = &ipv6_devconf.autoconf,
6633 .maxlen = sizeof(int),
6634 .mode = 0644,
6635 .proc_handler = proc_dointvec,
6636 },
6637 {
6638 .procname = "dad_transmits",
6639 .data = &ipv6_devconf.dad_transmits,
6640 .maxlen = sizeof(int),
6641 .mode = 0644,
6642 .proc_handler = proc_dointvec,
6643 },
6644 {
6645 .procname = "router_solicitations",
6646 .data = &ipv6_devconf.rtr_solicits,
6647 .maxlen = sizeof(int),
6648 .mode = 0644,
6649 .proc_handler = proc_dointvec_minmax,
6650 .extra1 = &minus_one,
6651 },
6652 {
6653 .procname = "router_solicitation_interval",
6654 .data = &ipv6_devconf.rtr_solicit_interval,
6655 .maxlen = sizeof(int),
6656 .mode = 0644,
6657 .proc_handler = proc_dointvec_jiffies,
6658 },
6659 {
6660 .procname = "router_solicitation_max_interval",
6661 .data = &ipv6_devconf.rtr_solicit_max_interval,
6662 .maxlen = sizeof(int),
6663 .mode = 0644,
6664 .proc_handler = proc_dointvec_jiffies,
6665 },
6666 {
6667 .procname = "router_solicitation_delay",
6668 .data = &ipv6_devconf.rtr_solicit_delay,
6669 .maxlen = sizeof(int),
6670 .mode = 0644,
6671 .proc_handler = proc_dointvec_jiffies,
6672 },
6673 {
6674 .procname = "force_mld_version",
6675 .data = &ipv6_devconf.force_mld_version,
6676 .maxlen = sizeof(int),
6677 .mode = 0644,
6678 .proc_handler = proc_dointvec,
6679 },
6680 {
6681 .procname = "mldv1_unsolicited_report_interval",
6682 .data =
6683 &ipv6_devconf.mldv1_unsolicited_report_interval,
6684 .maxlen = sizeof(int),
6685 .mode = 0644,
6686 .proc_handler = proc_dointvec_ms_jiffies,
6687 },
6688 {
6689 .procname = "mldv2_unsolicited_report_interval",
6690 .data =
6691 &ipv6_devconf.mldv2_unsolicited_report_interval,
6692 .maxlen = sizeof(int),
6693 .mode = 0644,
6694 .proc_handler = proc_dointvec_ms_jiffies,
6695 },
6696 {
6697 .procname = "use_tempaddr",
6698 .data = &ipv6_devconf.use_tempaddr,
6699 .maxlen = sizeof(int),
6700 .mode = 0644,
6701 .proc_handler = proc_dointvec,
6702 },
6703 {
6704 .procname = "temp_valid_lft",
6705 .data = &ipv6_devconf.temp_valid_lft,
6706 .maxlen = sizeof(int),
6707 .mode = 0644,
6708 .proc_handler = proc_dointvec,
6709 },
6710 {
6711 .procname = "temp_prefered_lft",
6712 .data = &ipv6_devconf.temp_prefered_lft,
6713 .maxlen = sizeof(int),
6714 .mode = 0644,
6715 .proc_handler = proc_dointvec,
6716 },
6717 {
6718 .procname = "regen_max_retry",
6719 .data = &ipv6_devconf.regen_max_retry,
6720 .maxlen = sizeof(int),
6721 .mode = 0644,
6722 .proc_handler = proc_dointvec,
6723 },
6724 {
6725 .procname = "max_desync_factor",
6726 .data = &ipv6_devconf.max_desync_factor,
6727 .maxlen = sizeof(int),
6728 .mode = 0644,
6729 .proc_handler = proc_dointvec,
6730 },
6731 {
6732 .procname = "max_addresses",
6733 .data = &ipv6_devconf.max_addresses,
6734 .maxlen = sizeof(int),
6735 .mode = 0644,
6736 .proc_handler = proc_dointvec,
6737 },
6738 {
6739 .procname = "accept_ra_defrtr",
6740 .data = &ipv6_devconf.accept_ra_defrtr,
6741 .maxlen = sizeof(int),
6742 .mode = 0644,
6743 .proc_handler = proc_dointvec,
6744 },
6745 {
6746 .procname = "ra_defrtr_metric",
6747 .data = &ipv6_devconf.ra_defrtr_metric,
6748 .maxlen = sizeof(u32),
6749 .mode = 0644,
6750 .proc_handler = proc_douintvec_minmax,
6751 .extra1 = (void *)SYSCTL_ONE,
6752 },
6753 {
6754 .procname = "accept_ra_min_hop_limit",
6755 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6756 .maxlen = sizeof(int),
6757 .mode = 0644,
6758 .proc_handler = proc_dointvec,
6759 },
6760 {
6761 .procname = "accept_ra_pinfo",
6762 .data = &ipv6_devconf.accept_ra_pinfo,
6763 .maxlen = sizeof(int),
6764 .mode = 0644,
6765 .proc_handler = proc_dointvec,
6766 },
6767 #ifdef CONFIG_IPV6_ROUTER_PREF
6768 {
6769 .procname = "accept_ra_rtr_pref",
6770 .data = &ipv6_devconf.accept_ra_rtr_pref,
6771 .maxlen = sizeof(int),
6772 .mode = 0644,
6773 .proc_handler = proc_dointvec,
6774 },
6775 {
6776 .procname = "router_probe_interval",
6777 .data = &ipv6_devconf.rtr_probe_interval,
6778 .maxlen = sizeof(int),
6779 .mode = 0644,
6780 .proc_handler = proc_dointvec_jiffies,
6781 },
6782 #ifdef CONFIG_IPV6_ROUTE_INFO
6783 {
6784 .procname = "accept_ra_rt_info_min_plen",
6785 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6786 .maxlen = sizeof(int),
6787 .mode = 0644,
6788 .proc_handler = proc_dointvec,
6789 },
6790 {
6791 .procname = "accept_ra_rt_info_max_plen",
6792 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6793 .maxlen = sizeof(int),
6794 .mode = 0644,
6795 .proc_handler = proc_dointvec,
6796 },
6797 #endif
6798 #endif
6799 {
6800 .procname = "proxy_ndp",
6801 .data = &ipv6_devconf.proxy_ndp,
6802 .maxlen = sizeof(int),
6803 .mode = 0644,
6804 .proc_handler = addrconf_sysctl_proxy_ndp,
6805 },
6806 {
6807 .procname = "accept_source_route",
6808 .data = &ipv6_devconf.accept_source_route,
6809 .maxlen = sizeof(int),
6810 .mode = 0644,
6811 .proc_handler = proc_dointvec,
6812 },
6813 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6814 {
6815 .procname = "optimistic_dad",
6816 .data = &ipv6_devconf.optimistic_dad,
6817 .maxlen = sizeof(int),
6818 .mode = 0644,
6819 .proc_handler = proc_dointvec,
6820 },
6821 {
6822 .procname = "use_optimistic",
6823 .data = &ipv6_devconf.use_optimistic,
6824 .maxlen = sizeof(int),
6825 .mode = 0644,
6826 .proc_handler = proc_dointvec,
6827 },
6828 #endif
6829 #ifdef CONFIG_IPV6_MROUTE
6830 {
6831 .procname = "mc_forwarding",
6832 .data = &ipv6_devconf.mc_forwarding,
6833 .maxlen = sizeof(int),
6834 .mode = 0444,
6835 .proc_handler = proc_dointvec,
6836 },
6837 #endif
6838 {
6839 .procname = "disable_ipv6",
6840 .data = &ipv6_devconf.disable_ipv6,
6841 .maxlen = sizeof(int),
6842 .mode = 0644,
6843 .proc_handler = addrconf_sysctl_disable,
6844 },
6845 {
6846 .procname = "accept_dad",
6847 .data = &ipv6_devconf.accept_dad,
6848 .maxlen = sizeof(int),
6849 .mode = 0644,
6850 .proc_handler = proc_dointvec,
6851 },
6852 {
6853 .procname = "force_tllao",
6854 .data = &ipv6_devconf.force_tllao,
6855 .maxlen = sizeof(int),
6856 .mode = 0644,
6857 .proc_handler = proc_dointvec
6858 },
6859 {
6860 .procname = "ndisc_notify",
6861 .data = &ipv6_devconf.ndisc_notify,
6862 .maxlen = sizeof(int),
6863 .mode = 0644,
6864 .proc_handler = proc_dointvec
6865 },
6866 {
6867 .procname = "suppress_frag_ndisc",
6868 .data = &ipv6_devconf.suppress_frag_ndisc,
6869 .maxlen = sizeof(int),
6870 .mode = 0644,
6871 .proc_handler = proc_dointvec
6872 },
6873 {
6874 .procname = "accept_ra_from_local",
6875 .data = &ipv6_devconf.accept_ra_from_local,
6876 .maxlen = sizeof(int),
6877 .mode = 0644,
6878 .proc_handler = proc_dointvec,
6879 },
6880 {
6881 .procname = "accept_ra_mtu",
6882 .data = &ipv6_devconf.accept_ra_mtu,
6883 .maxlen = sizeof(int),
6884 .mode = 0644,
6885 .proc_handler = proc_dointvec,
6886 },
6887 {
6888 .procname = "stable_secret",
6889 .data = &ipv6_devconf.stable_secret,
6890 .maxlen = IPV6_MAX_STRLEN,
6891 .mode = 0600,
6892 .proc_handler = addrconf_sysctl_stable_secret,
6893 },
6894 {
6895 .procname = "use_oif_addrs_only",
6896 .data = &ipv6_devconf.use_oif_addrs_only,
6897 .maxlen = sizeof(int),
6898 .mode = 0644,
6899 .proc_handler = proc_dointvec,
6900 },
6901 {
6902 .procname = "ignore_routes_with_linkdown",
6903 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6904 .maxlen = sizeof(int),
6905 .mode = 0644,
6906 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6907 },
6908 {
6909 .procname = "drop_unicast_in_l2_multicast",
6910 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6911 .maxlen = sizeof(int),
6912 .mode = 0644,
6913 .proc_handler = proc_dointvec,
6914 },
6915 {
6916 .procname = "drop_unsolicited_na",
6917 .data = &ipv6_devconf.drop_unsolicited_na,
6918 .maxlen = sizeof(int),
6919 .mode = 0644,
6920 .proc_handler = proc_dointvec,
6921 },
6922 {
6923 .procname = "keep_addr_on_down",
6924 .data = &ipv6_devconf.keep_addr_on_down,
6925 .maxlen = sizeof(int),
6926 .mode = 0644,
6927 .proc_handler = proc_dointvec,
6928
6929 },
6930 {
6931 .procname = "seg6_enabled",
6932 .data = &ipv6_devconf.seg6_enabled,
6933 .maxlen = sizeof(int),
6934 .mode = 0644,
6935 .proc_handler = proc_dointvec,
6936 },
6937 #ifdef CONFIG_IPV6_SEG6_HMAC
6938 {
6939 .procname = "seg6_require_hmac",
6940 .data = &ipv6_devconf.seg6_require_hmac,
6941 .maxlen = sizeof(int),
6942 .mode = 0644,
6943 .proc_handler = proc_dointvec,
6944 },
6945 #endif
6946 {
6947 .procname = "enhanced_dad",
6948 .data = &ipv6_devconf.enhanced_dad,
6949 .maxlen = sizeof(int),
6950 .mode = 0644,
6951 .proc_handler = proc_dointvec,
6952 },
6953 {
6954 .procname = "addr_gen_mode",
6955 .data = &ipv6_devconf.addr_gen_mode,
6956 .maxlen = sizeof(int),
6957 .mode = 0644,
6958 .proc_handler = addrconf_sysctl_addr_gen_mode,
6959 },
6960 {
6961 .procname = "disable_policy",
6962 .data = &ipv6_devconf.disable_policy,
6963 .maxlen = sizeof(int),
6964 .mode = 0644,
6965 .proc_handler = addrconf_sysctl_disable_policy,
6966 },
6967 {
6968 .procname = "ndisc_tclass",
6969 .data = &ipv6_devconf.ndisc_tclass,
6970 .maxlen = sizeof(int),
6971 .mode = 0644,
6972 .proc_handler = proc_dointvec_minmax,
6973 .extra1 = (void *)SYSCTL_ZERO,
6974 .extra2 = (void *)&two_five_five,
6975 },
6976 {
6977 .procname = "rpl_seg_enabled",
6978 .data = &ipv6_devconf.rpl_seg_enabled,
6979 .maxlen = sizeof(int),
6980 .mode = 0644,
6981 .proc_handler = proc_dointvec,
6982 },
6983 {
6984 .procname = "ioam6_enabled",
6985 .data = &ipv6_devconf.ioam6_enabled,
6986 .maxlen = sizeof(u8),
6987 .mode = 0644,
6988 .proc_handler = proc_dou8vec_minmax,
6989 .extra1 = (void *)SYSCTL_ZERO,
6990 .extra2 = (void *)SYSCTL_ONE,
6991 },
6992 {
6993 .procname = "ioam6_id",
6994 .data = &ipv6_devconf.ioam6_id,
6995 .maxlen = sizeof(u32),
6996 .mode = 0644,
6997 .proc_handler = proc_douintvec_minmax,
6998 .extra1 = (void *)SYSCTL_ZERO,
6999 .extra2 = (void *)&ioam6_if_id_max,
7000 },
7001 {
7002 .procname = "ioam6_id_wide",
7003 .data = &ipv6_devconf.ioam6_id_wide,
7004 .maxlen = sizeof(u32),
7005 .mode = 0644,
7006 .proc_handler = proc_douintvec,
7007 },
7008 {
7009 /* sentinel */
7010 }
7011 };
7012
7013 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7014 struct inet6_dev *idev, struct ipv6_devconf *p)
7015 {
7016 int i, ifindex;
7017 struct ctl_table *table;
7018 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7019
7020 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
7021 if (!table)
7022 goto out;
7023
7024 for (i = 0; table[i].data; i++) {
7025 table[i].data += (char *)p - (char *)&ipv6_devconf;
7026 /* If one of these is already set, then it is not safe to
7027 * overwrite either of them: this makes proc_dointvec_minmax
7028 * usable.
7029 */
7030 if (!table[i].extra1 && !table[i].extra2) {
7031 table[i].extra1 = idev; /* embedded; no ref */
7032 table[i].extra2 = net;
7033 }
7034 }
7035
7036 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7037
7038 p->sysctl_header = register_net_sysctl(net, path, table);
7039 if (!p->sysctl_header)
7040 goto free;
7041
7042 if (!strcmp(dev_name, "all"))
7043 ifindex = NETCONFA_IFINDEX_ALL;
7044 else if (!strcmp(dev_name, "default"))
7045 ifindex = NETCONFA_IFINDEX_DEFAULT;
7046 else
7047 ifindex = idev->dev->ifindex;
7048 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7049 ifindex, p);
7050 return 0;
7051
7052 free:
7053 kfree(table);
7054 out:
7055 return -ENOBUFS;
7056 }
7057
7058 static void __addrconf_sysctl_unregister(struct net *net,
7059 struct ipv6_devconf *p, int ifindex)
7060 {
7061 struct ctl_table *table;
7062
7063 if (!p->sysctl_header)
7064 return;
7065
7066 table = p->sysctl_header->ctl_table_arg;
7067 unregister_net_sysctl_table(p->sysctl_header);
7068 p->sysctl_header = NULL;
7069 kfree(table);
7070
7071 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7072 }
7073
7074 static int addrconf_sysctl_register(struct inet6_dev *idev)
7075 {
7076 int err;
7077
7078 if (!sysctl_dev_name_is_allowed(idev->dev->name))
7079 return -EINVAL;
7080
7081 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7082 &ndisc_ifinfo_sysctl_change);
7083 if (err)
7084 return err;
7085 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7086 idev, &idev->cnf);
7087 if (err)
7088 neigh_sysctl_unregister(idev->nd_parms);
7089
7090 return err;
7091 }
7092
7093 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7094 {
7095 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7096 idev->dev->ifindex);
7097 neigh_sysctl_unregister(idev->nd_parms);
7098 }
7099
7100
7101 #endif
7102
7103 static int __net_init addrconf_init_net(struct net *net)
7104 {
7105 int err = -ENOMEM;
7106 struct ipv6_devconf *all, *dflt;
7107
7108 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7109 if (!all)
7110 goto err_alloc_all;
7111
7112 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7113 if (!dflt)
7114 goto err_alloc_dflt;
7115
7116 if (IS_ENABLED(CONFIG_SYSCTL) &&
7117 !net_eq(net, &init_net)) {
7118 switch (sysctl_devconf_inherit_init_net) {
7119 case 1: /* copy from init_net */
7120 memcpy(all, init_net.ipv6.devconf_all,
7121 sizeof(ipv6_devconf));
7122 memcpy(dflt, init_net.ipv6.devconf_dflt,
7123 sizeof(ipv6_devconf_dflt));
7124 break;
7125 case 3: /* copy from the current netns */
7126 memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7127 sizeof(ipv6_devconf));
7128 memcpy(dflt,
7129 current->nsproxy->net_ns->ipv6.devconf_dflt,
7130 sizeof(ipv6_devconf_dflt));
7131 break;
7132 case 0:
7133 case 2:
7134 /* use compiled values */
7135 break;
7136 }
7137 }
7138
7139 /* these will be inherited by all namespaces */
7140 dflt->autoconf = ipv6_defaults.autoconf;
7141 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7142
7143 dflt->stable_secret.initialized = false;
7144 all->stable_secret.initialized = false;
7145
7146 net->ipv6.devconf_all = all;
7147 net->ipv6.devconf_dflt = dflt;
7148
7149 #ifdef CONFIG_SYSCTL
7150 err = __addrconf_sysctl_register(net, "all", NULL, all);
7151 if (err < 0)
7152 goto err_reg_all;
7153
7154 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7155 if (err < 0)
7156 goto err_reg_dflt;
7157 #endif
7158 return 0;
7159
7160 #ifdef CONFIG_SYSCTL
7161 err_reg_dflt:
7162 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7163 err_reg_all:
7164 kfree(dflt);
7165 #endif
7166 err_alloc_dflt:
7167 kfree(all);
7168 err_alloc_all:
7169 return err;
7170 }
7171
7172 static void __net_exit addrconf_exit_net(struct net *net)
7173 {
7174 #ifdef CONFIG_SYSCTL
7175 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7176 NETCONFA_IFINDEX_DEFAULT);
7177 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7178 NETCONFA_IFINDEX_ALL);
7179 #endif
7180 kfree(net->ipv6.devconf_dflt);
7181 kfree(net->ipv6.devconf_all);
7182 }
7183
7184 static struct pernet_operations addrconf_ops = {
7185 .init = addrconf_init_net,
7186 .exit = addrconf_exit_net,
7187 };
7188
7189 static struct rtnl_af_ops inet6_ops __read_mostly = {
7190 .family = AF_INET6,
7191 .fill_link_af = inet6_fill_link_af,
7192 .get_link_af_size = inet6_get_link_af_size,
7193 .validate_link_af = inet6_validate_link_af,
7194 .set_link_af = inet6_set_link_af,
7195 };
7196
7197 /*
7198 * Init / cleanup code
7199 */
7200
7201 int __init addrconf_init(void)
7202 {
7203 struct inet6_dev *idev;
7204 int i, err;
7205
7206 err = ipv6_addr_label_init();
7207 if (err < 0) {
7208 pr_crit("%s: cannot initialize default policy table: %d\n",
7209 __func__, err);
7210 goto out;
7211 }
7212
7213 err = register_pernet_subsys(&addrconf_ops);
7214 if (err < 0)
7215 goto out_addrlabel;
7216
7217 addrconf_wq = create_workqueue("ipv6_addrconf");
7218 if (!addrconf_wq) {
7219 err = -ENOMEM;
7220 goto out_nowq;
7221 }
7222
7223 /* The addrconf netdev notifier requires that loopback_dev
7224 * has it's ipv6 private information allocated and setup
7225 * before it can bring up and give link-local addresses
7226 * to other devices which are up.
7227 *
7228 * Unfortunately, loopback_dev is not necessarily the first
7229 * entry in the global dev_base list of net devices. In fact,
7230 * it is likely to be the very last entry on that list.
7231 * So this causes the notifier registry below to try and
7232 * give link-local addresses to all devices besides loopback_dev
7233 * first, then loopback_dev, which cases all the non-loopback_dev
7234 * devices to fail to get a link-local address.
7235 *
7236 * So, as a temporary fix, allocate the ipv6 structure for
7237 * loopback_dev first by hand.
7238 * Longer term, all of the dependencies ipv6 has upon the loopback
7239 * device and it being up should be removed.
7240 */
7241 rtnl_lock();
7242 idev = ipv6_add_dev(init_net.loopback_dev);
7243 rtnl_unlock();
7244 if (IS_ERR(idev)) {
7245 err = PTR_ERR(idev);
7246 goto errlo;
7247 }
7248
7249 ip6_route_init_special_entries();
7250
7251 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7252 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
7253
7254 register_netdevice_notifier(&ipv6_dev_notf);
7255
7256 addrconf_verify();
7257
7258 rtnl_af_register(&inet6_ops);
7259
7260 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7261 NULL, inet6_dump_ifinfo, 0);
7262 if (err < 0)
7263 goto errout;
7264
7265 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7266 inet6_rtm_newaddr, NULL, 0);
7267 if (err < 0)
7268 goto errout;
7269 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7270 inet6_rtm_deladdr, NULL, 0);
7271 if (err < 0)
7272 goto errout;
7273 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7274 inet6_rtm_getaddr, inet6_dump_ifaddr,
7275 RTNL_FLAG_DOIT_UNLOCKED);
7276 if (err < 0)
7277 goto errout;
7278 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7279 NULL, inet6_dump_ifmcaddr, 0);
7280 if (err < 0)
7281 goto errout;
7282 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7283 NULL, inet6_dump_ifacaddr, 0);
7284 if (err < 0)
7285 goto errout;
7286 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7287 inet6_netconf_get_devconf,
7288 inet6_netconf_dump_devconf,
7289 RTNL_FLAG_DOIT_UNLOCKED);
7290 if (err < 0)
7291 goto errout;
7292 err = ipv6_addr_label_rtnl_register();
7293 if (err < 0)
7294 goto errout;
7295
7296 return 0;
7297 errout:
7298 rtnl_unregister_all(PF_INET6);
7299 rtnl_af_unregister(&inet6_ops);
7300 unregister_netdevice_notifier(&ipv6_dev_notf);
7301 errlo:
7302 destroy_workqueue(addrconf_wq);
7303 out_nowq:
7304 unregister_pernet_subsys(&addrconf_ops);
7305 out_addrlabel:
7306 ipv6_addr_label_cleanup();
7307 out:
7308 return err;
7309 }
7310
7311 void addrconf_cleanup(void)
7312 {
7313 struct net_device *dev;
7314 int i;
7315
7316 unregister_netdevice_notifier(&ipv6_dev_notf);
7317 unregister_pernet_subsys(&addrconf_ops);
7318 ipv6_addr_label_cleanup();
7319
7320 rtnl_af_unregister(&inet6_ops);
7321
7322 rtnl_lock();
7323
7324 /* clean dev list */
7325 for_each_netdev(&init_net, dev) {
7326 if (__in6_dev_get(dev) == NULL)
7327 continue;
7328 addrconf_ifdown(dev, true);
7329 }
7330 addrconf_ifdown(init_net.loopback_dev, true);
7331
7332 /*
7333 * Check hash table.
7334 */
7335 spin_lock_bh(&addrconf_hash_lock);
7336 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7337 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
7338 spin_unlock_bh(&addrconf_hash_lock);
7339 cancel_delayed_work(&addr_chk_work);
7340 rtnl_unlock();
7341
7342 destroy_workqueue(addrconf_wq);
7343 }