]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/ipv6/addrconf.c
Merge tag 'media/v5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-eoan-kernel.git] / net / ipv6 / addrconf.c
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 /*
16 * Changes:
17 *
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
39 */
40
41 #define pr_fmt(fmt) "IPv6: " fmt
42
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched/signal.h>
47 #include <linux/socket.h>
48 #include <linux/sockios.h>
49 #include <linux/net.h>
50 #include <linux/inet.h>
51 #include <linux/in6.h>
52 #include <linux/netdevice.h>
53 #include <linux/if_addr.h>
54 #include <linux/if_arp.h>
55 #include <linux/if_arcnet.h>
56 #include <linux/if_infiniband.h>
57 #include <linux/route.h>
58 #include <linux/inetdevice.h>
59 #include <linux/init.h>
60 #include <linux/slab.h>
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64 #include <linux/capability.h>
65 #include <linux/delay.h>
66 #include <linux/notifier.h>
67 #include <linux/string.h>
68 #include <linux/hash.h>
69
70 #include <net/net_namespace.h>
71 #include <net/sock.h>
72 #include <net/snmp.h>
73
74 #include <net/6lowpan.h>
75 #include <net/firewire.h>
76 #include <net/ipv6.h>
77 #include <net/protocol.h>
78 #include <net/ndisc.h>
79 #include <net/ip6_route.h>
80 #include <net/addrconf.h>
81 #include <net/tcp.h>
82 #include <net/ip.h>
83 #include <net/netlink.h>
84 #include <net/pkt_sched.h>
85 #include <net/l3mdev.h>
86 #include <linux/if_tunnel.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/netconf.h>
89 #include <linux/random.h>
90 #include <linux/uaccess.h>
91 #include <asm/unaligned.h>
92
93 #include <linux/proc_fs.h>
94 #include <linux/seq_file.h>
95 #include <linux/export.h>
96
97 #define INFINITY_LIFE_TIME 0xFFFFFFFF
98
99 #define IPV6_MAX_STRLEN \
100 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
101
102 static inline u32 cstamp_delta(unsigned long cstamp)
103 {
104 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
105 }
106
107 static inline s32 rfc3315_s14_backoff_init(s32 irt)
108 {
109 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
110 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
111 do_div(tmp, 1000000);
112 return (s32)tmp;
113 }
114
115 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
116 {
117 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
118 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
119 do_div(tmp, 1000000);
120 if ((s32)tmp > mrt) {
121 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
122 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
123 do_div(tmp, 1000000);
124 }
125 return (s32)tmp;
126 }
127
128 #ifdef CONFIG_SYSCTL
129 static int addrconf_sysctl_register(struct inet6_dev *idev);
130 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
131 #else
132 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
133 {
134 return 0;
135 }
136
137 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
138 {
139 }
140 #endif
141
142 static void ipv6_regen_rndid(struct inet6_dev *idev);
143 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
144
145 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
146 static int ipv6_count_addresses(const struct inet6_dev *idev);
147 static int ipv6_generate_stable_address(struct in6_addr *addr,
148 u8 dad_count,
149 const struct inet6_dev *idev);
150
151 #define IN6_ADDR_HSIZE_SHIFT 8
152 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
153 /*
154 * Configured unicast address hash table
155 */
156 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
157 static DEFINE_SPINLOCK(addrconf_hash_lock);
158
159 static void addrconf_verify(void);
160 static void addrconf_verify_rtnl(void);
161 static void addrconf_verify_work(struct work_struct *);
162
163 static struct workqueue_struct *addrconf_wq;
164 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
165
166 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
167 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
168
169 static void addrconf_type_change(struct net_device *dev,
170 unsigned long event);
171 static int addrconf_ifdown(struct net_device *dev, int how);
172
173 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
174 int plen,
175 const struct net_device *dev,
176 u32 flags, u32 noflags,
177 bool no_gw);
178
179 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
180 static void addrconf_dad_work(struct work_struct *w);
181 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
182 bool send_na);
183 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
184 static void addrconf_rs_timer(struct timer_list *t);
185 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
186 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
187
188 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
189 struct prefix_info *pinfo);
190
191 static struct ipv6_devconf ipv6_devconf __read_mostly = {
192 .forwarding = 0,
193 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
194 .mtu6 = IPV6_MIN_MTU,
195 .accept_ra = 1,
196 .accept_redirects = 1,
197 .autoconf = 1,
198 .force_mld_version = 0,
199 .mldv1_unsolicited_report_interval = 10 * HZ,
200 .mldv2_unsolicited_report_interval = HZ,
201 .dad_transmits = 1,
202 .rtr_solicits = MAX_RTR_SOLICITATIONS,
203 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
204 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
205 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
206 .use_tempaddr = 0,
207 .temp_valid_lft = TEMP_VALID_LIFETIME,
208 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
209 .regen_max_retry = REGEN_MAX_RETRY,
210 .max_desync_factor = MAX_DESYNC_FACTOR,
211 .max_addresses = IPV6_MAX_ADDRESSES,
212 .accept_ra_defrtr = 1,
213 .accept_ra_from_local = 0,
214 .accept_ra_min_hop_limit= 1,
215 .accept_ra_pinfo = 1,
216 #ifdef CONFIG_IPV6_ROUTER_PREF
217 .accept_ra_rtr_pref = 1,
218 .rtr_probe_interval = 60 * HZ,
219 #ifdef CONFIG_IPV6_ROUTE_INFO
220 .accept_ra_rt_info_min_plen = 0,
221 .accept_ra_rt_info_max_plen = 0,
222 #endif
223 #endif
224 .proxy_ndp = 0,
225 .accept_source_route = 0, /* we do not accept RH0 by default. */
226 .disable_ipv6 = 0,
227 .accept_dad = 0,
228 .suppress_frag_ndisc = 1,
229 .accept_ra_mtu = 1,
230 .stable_secret = {
231 .initialized = false,
232 },
233 .use_oif_addrs_only = 0,
234 .ignore_routes_with_linkdown = 0,
235 .keep_addr_on_down = 0,
236 .seg6_enabled = 0,
237 #ifdef CONFIG_IPV6_SEG6_HMAC
238 .seg6_require_hmac = 0,
239 #endif
240 .enhanced_dad = 1,
241 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
242 .disable_policy = 0,
243 };
244
245 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
246 .forwarding = 0,
247 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
248 .mtu6 = IPV6_MIN_MTU,
249 .accept_ra = 1,
250 .accept_redirects = 1,
251 .autoconf = 1,
252 .force_mld_version = 0,
253 .mldv1_unsolicited_report_interval = 10 * HZ,
254 .mldv2_unsolicited_report_interval = HZ,
255 .dad_transmits = 1,
256 .rtr_solicits = MAX_RTR_SOLICITATIONS,
257 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
258 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
259 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
260 .use_tempaddr = 0,
261 .temp_valid_lft = TEMP_VALID_LIFETIME,
262 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
263 .regen_max_retry = REGEN_MAX_RETRY,
264 .max_desync_factor = MAX_DESYNC_FACTOR,
265 .max_addresses = IPV6_MAX_ADDRESSES,
266 .accept_ra_defrtr = 1,
267 .accept_ra_from_local = 0,
268 .accept_ra_min_hop_limit= 1,
269 .accept_ra_pinfo = 1,
270 #ifdef CONFIG_IPV6_ROUTER_PREF
271 .accept_ra_rtr_pref = 1,
272 .rtr_probe_interval = 60 * HZ,
273 #ifdef CONFIG_IPV6_ROUTE_INFO
274 .accept_ra_rt_info_min_plen = 0,
275 .accept_ra_rt_info_max_plen = 0,
276 #endif
277 #endif
278 .proxy_ndp = 0,
279 .accept_source_route = 0, /* we do not accept RH0 by default. */
280 .disable_ipv6 = 0,
281 .accept_dad = 1,
282 .suppress_frag_ndisc = 1,
283 .accept_ra_mtu = 1,
284 .stable_secret = {
285 .initialized = false,
286 },
287 .use_oif_addrs_only = 0,
288 .ignore_routes_with_linkdown = 0,
289 .keep_addr_on_down = 0,
290 .seg6_enabled = 0,
291 #ifdef CONFIG_IPV6_SEG6_HMAC
292 .seg6_require_hmac = 0,
293 #endif
294 .enhanced_dad = 1,
295 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
296 .disable_policy = 0,
297 };
298
299 /* Check if link is ready: is it up and is a valid qdisc available */
300 static inline bool addrconf_link_ready(const struct net_device *dev)
301 {
302 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
303 }
304
305 static void addrconf_del_rs_timer(struct inet6_dev *idev)
306 {
307 if (del_timer(&idev->rs_timer))
308 __in6_dev_put(idev);
309 }
310
311 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
312 {
313 if (cancel_delayed_work(&ifp->dad_work))
314 __in6_ifa_put(ifp);
315 }
316
317 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
318 unsigned long when)
319 {
320 if (!timer_pending(&idev->rs_timer))
321 in6_dev_hold(idev);
322 mod_timer(&idev->rs_timer, jiffies + when);
323 }
324
325 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
326 unsigned long delay)
327 {
328 in6_ifa_hold(ifp);
329 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
330 in6_ifa_put(ifp);
331 }
332
333 static int snmp6_alloc_dev(struct inet6_dev *idev)
334 {
335 int i;
336
337 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
338 if (!idev->stats.ipv6)
339 goto err_ip;
340
341 for_each_possible_cpu(i) {
342 struct ipstats_mib *addrconf_stats;
343 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
344 u64_stats_init(&addrconf_stats->syncp);
345 }
346
347
348 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
349 GFP_KERNEL);
350 if (!idev->stats.icmpv6dev)
351 goto err_icmp;
352 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
353 GFP_KERNEL);
354 if (!idev->stats.icmpv6msgdev)
355 goto err_icmpmsg;
356
357 return 0;
358
359 err_icmpmsg:
360 kfree(idev->stats.icmpv6dev);
361 err_icmp:
362 free_percpu(idev->stats.ipv6);
363 err_ip:
364 return -ENOMEM;
365 }
366
367 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
368 {
369 struct inet6_dev *ndev;
370 int err = -ENOMEM;
371
372 ASSERT_RTNL();
373
374 if (dev->mtu < IPV6_MIN_MTU)
375 return ERR_PTR(-EINVAL);
376
377 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
378 if (!ndev)
379 return ERR_PTR(err);
380
381 rwlock_init(&ndev->lock);
382 ndev->dev = dev;
383 INIT_LIST_HEAD(&ndev->addr_list);
384 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
385 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
386
387 if (ndev->cnf.stable_secret.initialized)
388 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
389
390 ndev->cnf.mtu6 = dev->mtu;
391 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
392 if (!ndev->nd_parms) {
393 kfree(ndev);
394 return ERR_PTR(err);
395 }
396 if (ndev->cnf.forwarding)
397 dev_disable_lro(dev);
398 /* We refer to the device */
399 dev_hold(dev);
400
401 if (snmp6_alloc_dev(ndev) < 0) {
402 netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
403 __func__);
404 neigh_parms_release(&nd_tbl, ndev->nd_parms);
405 dev_put(dev);
406 kfree(ndev);
407 return ERR_PTR(err);
408 }
409
410 if (snmp6_register_dev(ndev) < 0) {
411 netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
412 __func__, dev->name);
413 goto err_release;
414 }
415
416 /* One reference from device. */
417 refcount_set(&ndev->refcnt, 1);
418
419 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
420 ndev->cnf.accept_dad = -1;
421
422 #if IS_ENABLED(CONFIG_IPV6_SIT)
423 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
424 pr_info("%s: Disabled Multicast RS\n", dev->name);
425 ndev->cnf.rtr_solicits = 0;
426 }
427 #endif
428
429 INIT_LIST_HEAD(&ndev->tempaddr_list);
430 ndev->desync_factor = U32_MAX;
431 if ((dev->flags&IFF_LOOPBACK) ||
432 dev->type == ARPHRD_TUNNEL ||
433 dev->type == ARPHRD_TUNNEL6 ||
434 dev->type == ARPHRD_SIT ||
435 dev->type == ARPHRD_NONE) {
436 ndev->cnf.use_tempaddr = -1;
437 } else
438 ipv6_regen_rndid(ndev);
439
440 ndev->token = in6addr_any;
441
442 if (netif_running(dev) && addrconf_link_ready(dev))
443 ndev->if_flags |= IF_READY;
444
445 ipv6_mc_init_dev(ndev);
446 ndev->tstamp = jiffies;
447 err = addrconf_sysctl_register(ndev);
448 if (err) {
449 ipv6_mc_destroy_dev(ndev);
450 snmp6_unregister_dev(ndev);
451 goto err_release;
452 }
453 /* protected by rtnl_lock */
454 rcu_assign_pointer(dev->ip6_ptr, ndev);
455
456 /* Join interface-local all-node multicast group */
457 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
458
459 /* Join all-node multicast group */
460 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
461
462 /* Join all-router multicast group if forwarding is set */
463 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
464 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
465
466 return ndev;
467
468 err_release:
469 neigh_parms_release(&nd_tbl, ndev->nd_parms);
470 ndev->dead = 1;
471 in6_dev_finish_destroy(ndev);
472 return ERR_PTR(err);
473 }
474
475 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
476 {
477 struct inet6_dev *idev;
478
479 ASSERT_RTNL();
480
481 idev = __in6_dev_get(dev);
482 if (!idev) {
483 idev = ipv6_add_dev(dev);
484 if (IS_ERR(idev))
485 return NULL;
486 }
487
488 if (dev->flags&IFF_UP)
489 ipv6_mc_up(idev);
490 return idev;
491 }
492
493 static int inet6_netconf_msgsize_devconf(int type)
494 {
495 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
496 + nla_total_size(4); /* NETCONFA_IFINDEX */
497 bool all = false;
498
499 if (type == NETCONFA_ALL)
500 all = true;
501
502 if (all || type == NETCONFA_FORWARDING)
503 size += nla_total_size(4);
504 #ifdef CONFIG_IPV6_MROUTE
505 if (all || type == NETCONFA_MC_FORWARDING)
506 size += nla_total_size(4);
507 #endif
508 if (all || type == NETCONFA_PROXY_NEIGH)
509 size += nla_total_size(4);
510
511 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
512 size += nla_total_size(4);
513
514 return size;
515 }
516
517 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
518 struct ipv6_devconf *devconf, u32 portid,
519 u32 seq, int event, unsigned int flags,
520 int type)
521 {
522 struct nlmsghdr *nlh;
523 struct netconfmsg *ncm;
524 bool all = false;
525
526 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
527 flags);
528 if (!nlh)
529 return -EMSGSIZE;
530
531 if (type == NETCONFA_ALL)
532 all = true;
533
534 ncm = nlmsg_data(nlh);
535 ncm->ncm_family = AF_INET6;
536
537 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
538 goto nla_put_failure;
539
540 if (!devconf)
541 goto out;
542
543 if ((all || type == NETCONFA_FORWARDING) &&
544 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
545 goto nla_put_failure;
546 #ifdef CONFIG_IPV6_MROUTE
547 if ((all || type == NETCONFA_MC_FORWARDING) &&
548 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
549 devconf->mc_forwarding) < 0)
550 goto nla_put_failure;
551 #endif
552 if ((all || type == NETCONFA_PROXY_NEIGH) &&
553 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
554 goto nla_put_failure;
555
556 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
557 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
558 devconf->ignore_routes_with_linkdown) < 0)
559 goto nla_put_failure;
560
561 out:
562 nlmsg_end(skb, nlh);
563 return 0;
564
565 nla_put_failure:
566 nlmsg_cancel(skb, nlh);
567 return -EMSGSIZE;
568 }
569
570 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
571 int ifindex, struct ipv6_devconf *devconf)
572 {
573 struct sk_buff *skb;
574 int err = -ENOBUFS;
575
576 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
577 if (!skb)
578 goto errout;
579
580 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
581 event, 0, type);
582 if (err < 0) {
583 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
584 WARN_ON(err == -EMSGSIZE);
585 kfree_skb(skb);
586 goto errout;
587 }
588 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
589 return;
590 errout:
591 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
592 }
593
594 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
595 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
596 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
597 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
598 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
599 };
600
601 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
602 const struct nlmsghdr *nlh,
603 struct nlattr **tb,
604 struct netlink_ext_ack *extack)
605 {
606 int i, err;
607
608 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
609 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
610 return -EINVAL;
611 }
612
613 if (!netlink_strict_get_check(skb))
614 return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
615 tb, NETCONFA_MAX,
616 devconf_ipv6_policy, extack);
617
618 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
619 tb, NETCONFA_MAX,
620 devconf_ipv6_policy, extack);
621 if (err)
622 return err;
623
624 for (i = 0; i <= NETCONFA_MAX; i++) {
625 if (!tb[i])
626 continue;
627
628 switch (i) {
629 case NETCONFA_IFINDEX:
630 break;
631 default:
632 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
633 return -EINVAL;
634 }
635 }
636
637 return 0;
638 }
639
640 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
641 struct nlmsghdr *nlh,
642 struct netlink_ext_ack *extack)
643 {
644 struct net *net = sock_net(in_skb->sk);
645 struct nlattr *tb[NETCONFA_MAX+1];
646 struct inet6_dev *in6_dev = NULL;
647 struct net_device *dev = NULL;
648 struct sk_buff *skb;
649 struct ipv6_devconf *devconf;
650 int ifindex;
651 int err;
652
653 err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
654 if (err < 0)
655 return err;
656
657 if (!tb[NETCONFA_IFINDEX])
658 return -EINVAL;
659
660 err = -EINVAL;
661 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
662 switch (ifindex) {
663 case NETCONFA_IFINDEX_ALL:
664 devconf = net->ipv6.devconf_all;
665 break;
666 case NETCONFA_IFINDEX_DEFAULT:
667 devconf = net->ipv6.devconf_dflt;
668 break;
669 default:
670 dev = dev_get_by_index(net, ifindex);
671 if (!dev)
672 return -EINVAL;
673 in6_dev = in6_dev_get(dev);
674 if (!in6_dev)
675 goto errout;
676 devconf = &in6_dev->cnf;
677 break;
678 }
679
680 err = -ENOBUFS;
681 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
682 if (!skb)
683 goto errout;
684
685 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
686 NETLINK_CB(in_skb).portid,
687 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
688 NETCONFA_ALL);
689 if (err < 0) {
690 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
691 WARN_ON(err == -EMSGSIZE);
692 kfree_skb(skb);
693 goto errout;
694 }
695 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
696 errout:
697 if (in6_dev)
698 in6_dev_put(in6_dev);
699 if (dev)
700 dev_put(dev);
701 return err;
702 }
703
704 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
705 struct netlink_callback *cb)
706 {
707 const struct nlmsghdr *nlh = cb->nlh;
708 struct net *net = sock_net(skb->sk);
709 int h, s_h;
710 int idx, s_idx;
711 struct net_device *dev;
712 struct inet6_dev *idev;
713 struct hlist_head *head;
714
715 if (cb->strict_check) {
716 struct netlink_ext_ack *extack = cb->extack;
717 struct netconfmsg *ncm;
718
719 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
720 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
721 return -EINVAL;
722 }
723
724 if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
725 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
726 return -EINVAL;
727 }
728 }
729
730 s_h = cb->args[0];
731 s_idx = idx = cb->args[1];
732
733 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
734 idx = 0;
735 head = &net->dev_index_head[h];
736 rcu_read_lock();
737 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
738 net->dev_base_seq;
739 hlist_for_each_entry_rcu(dev, head, index_hlist) {
740 if (idx < s_idx)
741 goto cont;
742 idev = __in6_dev_get(dev);
743 if (!idev)
744 goto cont;
745
746 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
747 &idev->cnf,
748 NETLINK_CB(cb->skb).portid,
749 nlh->nlmsg_seq,
750 RTM_NEWNETCONF,
751 NLM_F_MULTI,
752 NETCONFA_ALL) < 0) {
753 rcu_read_unlock();
754 goto done;
755 }
756 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
757 cont:
758 idx++;
759 }
760 rcu_read_unlock();
761 }
762 if (h == NETDEV_HASHENTRIES) {
763 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
764 net->ipv6.devconf_all,
765 NETLINK_CB(cb->skb).portid,
766 nlh->nlmsg_seq,
767 RTM_NEWNETCONF, NLM_F_MULTI,
768 NETCONFA_ALL) < 0)
769 goto done;
770 else
771 h++;
772 }
773 if (h == NETDEV_HASHENTRIES + 1) {
774 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
775 net->ipv6.devconf_dflt,
776 NETLINK_CB(cb->skb).portid,
777 nlh->nlmsg_seq,
778 RTM_NEWNETCONF, NLM_F_MULTI,
779 NETCONFA_ALL) < 0)
780 goto done;
781 else
782 h++;
783 }
784 done:
785 cb->args[0] = h;
786 cb->args[1] = idx;
787
788 return skb->len;
789 }
790
791 #ifdef CONFIG_SYSCTL
792 static void dev_forward_change(struct inet6_dev *idev)
793 {
794 struct net_device *dev;
795 struct inet6_ifaddr *ifa;
796
797 if (!idev)
798 return;
799 dev = idev->dev;
800 if (idev->cnf.forwarding)
801 dev_disable_lro(dev);
802 if (dev->flags & IFF_MULTICAST) {
803 if (idev->cnf.forwarding) {
804 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
805 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
806 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
807 } else {
808 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
809 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
810 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
811 }
812 }
813
814 list_for_each_entry(ifa, &idev->addr_list, if_list) {
815 if (ifa->flags&IFA_F_TENTATIVE)
816 continue;
817 if (idev->cnf.forwarding)
818 addrconf_join_anycast(ifa);
819 else
820 addrconf_leave_anycast(ifa);
821 }
822 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
823 NETCONFA_FORWARDING,
824 dev->ifindex, &idev->cnf);
825 }
826
827
828 static void addrconf_forward_change(struct net *net, __s32 newf)
829 {
830 struct net_device *dev;
831 struct inet6_dev *idev;
832
833 for_each_netdev(net, dev) {
834 idev = __in6_dev_get(dev);
835 if (idev) {
836 int changed = (!idev->cnf.forwarding) ^ (!newf);
837 idev->cnf.forwarding = newf;
838 if (changed)
839 dev_forward_change(idev);
840 }
841 }
842 }
843
844 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
845 {
846 struct net *net;
847 int old;
848
849 if (!rtnl_trylock())
850 return restart_syscall();
851
852 net = (struct net *)table->extra2;
853 old = *p;
854 *p = newf;
855
856 if (p == &net->ipv6.devconf_dflt->forwarding) {
857 if ((!newf) ^ (!old))
858 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
859 NETCONFA_FORWARDING,
860 NETCONFA_IFINDEX_DEFAULT,
861 net->ipv6.devconf_dflt);
862 rtnl_unlock();
863 return 0;
864 }
865
866 if (p == &net->ipv6.devconf_all->forwarding) {
867 int old_dflt = net->ipv6.devconf_dflt->forwarding;
868
869 net->ipv6.devconf_dflt->forwarding = newf;
870 if ((!newf) ^ (!old_dflt))
871 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
872 NETCONFA_FORWARDING,
873 NETCONFA_IFINDEX_DEFAULT,
874 net->ipv6.devconf_dflt);
875
876 addrconf_forward_change(net, newf);
877 if ((!newf) ^ (!old))
878 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
879 NETCONFA_FORWARDING,
880 NETCONFA_IFINDEX_ALL,
881 net->ipv6.devconf_all);
882 } else if ((!newf) ^ (!old))
883 dev_forward_change((struct inet6_dev *)table->extra1);
884 rtnl_unlock();
885
886 if (newf)
887 rt6_purge_dflt_routers(net);
888 return 1;
889 }
890
891 static void addrconf_linkdown_change(struct net *net, __s32 newf)
892 {
893 struct net_device *dev;
894 struct inet6_dev *idev;
895
896 for_each_netdev(net, dev) {
897 idev = __in6_dev_get(dev);
898 if (idev) {
899 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
900
901 idev->cnf.ignore_routes_with_linkdown = newf;
902 if (changed)
903 inet6_netconf_notify_devconf(dev_net(dev),
904 RTM_NEWNETCONF,
905 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
906 dev->ifindex,
907 &idev->cnf);
908 }
909 }
910 }
911
912 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
913 {
914 struct net *net;
915 int old;
916
917 if (!rtnl_trylock())
918 return restart_syscall();
919
920 net = (struct net *)table->extra2;
921 old = *p;
922 *p = newf;
923
924 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
925 if ((!newf) ^ (!old))
926 inet6_netconf_notify_devconf(net,
927 RTM_NEWNETCONF,
928 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
929 NETCONFA_IFINDEX_DEFAULT,
930 net->ipv6.devconf_dflt);
931 rtnl_unlock();
932 return 0;
933 }
934
935 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
936 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
937 addrconf_linkdown_change(net, newf);
938 if ((!newf) ^ (!old))
939 inet6_netconf_notify_devconf(net,
940 RTM_NEWNETCONF,
941 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
942 NETCONFA_IFINDEX_ALL,
943 net->ipv6.devconf_all);
944 }
945 rtnl_unlock();
946
947 return 1;
948 }
949
950 #endif
951
952 /* Nobody refers to this ifaddr, destroy it */
953 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
954 {
955 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
956
957 #ifdef NET_REFCNT_DEBUG
958 pr_debug("%s\n", __func__);
959 #endif
960
961 in6_dev_put(ifp->idev);
962
963 if (cancel_delayed_work(&ifp->dad_work))
964 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
965 ifp);
966
967 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
968 pr_warn("Freeing alive inet6 address %p\n", ifp);
969 return;
970 }
971
972 kfree_rcu(ifp, rcu);
973 }
974
975 static void
976 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
977 {
978 struct list_head *p;
979 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
980
981 /*
982 * Each device address list is sorted in order of scope -
983 * global before linklocal.
984 */
985 list_for_each(p, &idev->addr_list) {
986 struct inet6_ifaddr *ifa
987 = list_entry(p, struct inet6_ifaddr, if_list);
988 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
989 break;
990 }
991
992 list_add_tail_rcu(&ifp->if_list, p);
993 }
994
995 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
996 {
997 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
998
999 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1000 }
1001
1002 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1003 struct net_device *dev, unsigned int hash)
1004 {
1005 struct inet6_ifaddr *ifp;
1006
1007 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1008 if (!net_eq(dev_net(ifp->idev->dev), net))
1009 continue;
1010 if (ipv6_addr_equal(&ifp->addr, addr)) {
1011 if (!dev || ifp->idev->dev == dev)
1012 return true;
1013 }
1014 }
1015 return false;
1016 }
1017
1018 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1019 {
1020 unsigned int hash = inet6_addr_hash(dev_net(dev), &ifa->addr);
1021 int err = 0;
1022
1023 spin_lock(&addrconf_hash_lock);
1024
1025 /* Ignore adding duplicate addresses on an interface */
1026 if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
1027 netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1028 err = -EEXIST;
1029 } else {
1030 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
1031 }
1032
1033 spin_unlock(&addrconf_hash_lock);
1034
1035 return err;
1036 }
1037
1038 /* On success it returns ifp with increased reference count */
1039
1040 static struct inet6_ifaddr *
1041 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1042 bool can_block, struct netlink_ext_ack *extack)
1043 {
1044 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1045 int addr_type = ipv6_addr_type(cfg->pfx);
1046 struct net *net = dev_net(idev->dev);
1047 struct inet6_ifaddr *ifa = NULL;
1048 struct fib6_info *f6i = NULL;
1049 int err = 0;
1050
1051 if (addr_type == IPV6_ADDR_ANY ||
1052 addr_type & IPV6_ADDR_MULTICAST ||
1053 (!(idev->dev->flags & IFF_LOOPBACK) &&
1054 !netif_is_l3_master(idev->dev) &&
1055 addr_type & IPV6_ADDR_LOOPBACK))
1056 return ERR_PTR(-EADDRNOTAVAIL);
1057
1058 if (idev->dead) {
1059 err = -ENODEV; /*XXX*/
1060 goto out;
1061 }
1062
1063 if (idev->cnf.disable_ipv6) {
1064 err = -EACCES;
1065 goto out;
1066 }
1067
1068 /* validator notifier needs to be blocking;
1069 * do not call in atomic context
1070 */
1071 if (can_block) {
1072 struct in6_validator_info i6vi = {
1073 .i6vi_addr = *cfg->pfx,
1074 .i6vi_dev = idev,
1075 .extack = extack,
1076 };
1077
1078 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1079 err = notifier_to_errno(err);
1080 if (err < 0)
1081 goto out;
1082 }
1083
1084 ifa = kzalloc(sizeof(*ifa), gfp_flags);
1085 if (!ifa) {
1086 err = -ENOBUFS;
1087 goto out;
1088 }
1089
1090 f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1091 if (IS_ERR(f6i)) {
1092 err = PTR_ERR(f6i);
1093 f6i = NULL;
1094 goto out;
1095 }
1096
1097 if (net->ipv6.devconf_all->disable_policy ||
1098 idev->cnf.disable_policy)
1099 f6i->dst_nopolicy = true;
1100
1101 neigh_parms_data_state_setall(idev->nd_parms);
1102
1103 ifa->addr = *cfg->pfx;
1104 if (cfg->peer_pfx)
1105 ifa->peer_addr = *cfg->peer_pfx;
1106
1107 spin_lock_init(&ifa->lock);
1108 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1109 INIT_HLIST_NODE(&ifa->addr_lst);
1110 ifa->scope = cfg->scope;
1111 ifa->prefix_len = cfg->plen;
1112 ifa->rt_priority = cfg->rt_priority;
1113 ifa->flags = cfg->ifa_flags;
1114 /* No need to add the TENTATIVE flag for addresses with NODAD */
1115 if (!(cfg->ifa_flags & IFA_F_NODAD))
1116 ifa->flags |= IFA_F_TENTATIVE;
1117 ifa->valid_lft = cfg->valid_lft;
1118 ifa->prefered_lft = cfg->preferred_lft;
1119 ifa->cstamp = ifa->tstamp = jiffies;
1120 ifa->tokenized = false;
1121
1122 ifa->rt = f6i;
1123
1124 ifa->idev = idev;
1125 in6_dev_hold(idev);
1126
1127 /* For caller */
1128 refcount_set(&ifa->refcnt, 1);
1129
1130 rcu_read_lock_bh();
1131
1132 err = ipv6_add_addr_hash(idev->dev, ifa);
1133 if (err < 0) {
1134 rcu_read_unlock_bh();
1135 goto out;
1136 }
1137
1138 write_lock(&idev->lock);
1139
1140 /* Add to inet6_dev unicast addr list. */
1141 ipv6_link_dev_addr(idev, ifa);
1142
1143 if (ifa->flags&IFA_F_TEMPORARY) {
1144 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1145 in6_ifa_hold(ifa);
1146 }
1147
1148 in6_ifa_hold(ifa);
1149 write_unlock(&idev->lock);
1150
1151 rcu_read_unlock_bh();
1152
1153 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1154 out:
1155 if (unlikely(err < 0)) {
1156 fib6_info_release(f6i);
1157
1158 if (ifa) {
1159 if (ifa->idev)
1160 in6_dev_put(ifa->idev);
1161 kfree(ifa);
1162 }
1163 ifa = ERR_PTR(err);
1164 }
1165
1166 return ifa;
1167 }
1168
1169 enum cleanup_prefix_rt_t {
1170 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1171 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1172 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1173 };
1174
1175 /*
1176 * Check, whether the prefix for ifp would still need a prefix route
1177 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1178 * constants.
1179 *
1180 * 1) we don't purge prefix if address was not permanent.
1181 * prefix is managed by its own lifetime.
1182 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1183 * 3) if there are no addresses, delete prefix.
1184 * 4) if there are still other permanent address(es),
1185 * corresponding prefix is still permanent.
1186 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1187 * don't purge the prefix, assume user space is managing it.
1188 * 6) otherwise, update prefix lifetime to the
1189 * longest valid lifetime among the corresponding
1190 * addresses on the device.
1191 * Note: subsequent RA will update lifetime.
1192 **/
1193 static enum cleanup_prefix_rt_t
1194 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1195 {
1196 struct inet6_ifaddr *ifa;
1197 struct inet6_dev *idev = ifp->idev;
1198 unsigned long lifetime;
1199 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1200
1201 *expires = jiffies;
1202
1203 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1204 if (ifa == ifp)
1205 continue;
1206 if (ifa->prefix_len != ifp->prefix_len ||
1207 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1208 ifp->prefix_len))
1209 continue;
1210 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1211 return CLEANUP_PREFIX_RT_NOP;
1212
1213 action = CLEANUP_PREFIX_RT_EXPIRE;
1214
1215 spin_lock(&ifa->lock);
1216
1217 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1218 /*
1219 * Note: Because this address is
1220 * not permanent, lifetime <
1221 * LONG_MAX / HZ here.
1222 */
1223 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1224 *expires = ifa->tstamp + lifetime * HZ;
1225 spin_unlock(&ifa->lock);
1226 }
1227
1228 return action;
1229 }
1230
1231 static void
1232 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
1233 {
1234 struct fib6_info *f6i;
1235
1236 f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
1237 ifp->idev->dev, 0, RTF_DEFAULT, true);
1238 if (f6i) {
1239 if (del_rt)
1240 ip6_del_rt(dev_net(ifp->idev->dev), f6i);
1241 else {
1242 if (!(f6i->fib6_flags & RTF_EXPIRES))
1243 fib6_set_expires(f6i, expires);
1244 fib6_info_release(f6i);
1245 }
1246 }
1247 }
1248
1249
1250 /* This function wants to get referenced ifp and releases it before return */
1251
1252 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1253 {
1254 int state;
1255 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1256 unsigned long expires;
1257
1258 ASSERT_RTNL();
1259
1260 spin_lock_bh(&ifp->lock);
1261 state = ifp->state;
1262 ifp->state = INET6_IFADDR_STATE_DEAD;
1263 spin_unlock_bh(&ifp->lock);
1264
1265 if (state == INET6_IFADDR_STATE_DEAD)
1266 goto out;
1267
1268 spin_lock_bh(&addrconf_hash_lock);
1269 hlist_del_init_rcu(&ifp->addr_lst);
1270 spin_unlock_bh(&addrconf_hash_lock);
1271
1272 write_lock_bh(&ifp->idev->lock);
1273
1274 if (ifp->flags&IFA_F_TEMPORARY) {
1275 list_del(&ifp->tmp_list);
1276 if (ifp->ifpub) {
1277 in6_ifa_put(ifp->ifpub);
1278 ifp->ifpub = NULL;
1279 }
1280 __in6_ifa_put(ifp);
1281 }
1282
1283 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1284 action = check_cleanup_prefix_route(ifp, &expires);
1285
1286 list_del_rcu(&ifp->if_list);
1287 __in6_ifa_put(ifp);
1288
1289 write_unlock_bh(&ifp->idev->lock);
1290
1291 addrconf_del_dad_work(ifp);
1292
1293 ipv6_ifa_notify(RTM_DELADDR, ifp);
1294
1295 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1296
1297 if (action != CLEANUP_PREFIX_RT_NOP) {
1298 cleanup_prefix_route(ifp, expires,
1299 action == CLEANUP_PREFIX_RT_DEL);
1300 }
1301
1302 /* clean up prefsrc entries */
1303 rt6_remove_prefsrc(ifp);
1304 out:
1305 in6_ifa_put(ifp);
1306 }
1307
1308 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp,
1309 struct inet6_ifaddr *ift,
1310 bool block)
1311 {
1312 struct inet6_dev *idev = ifp->idev;
1313 struct in6_addr addr, *tmpaddr;
1314 unsigned long tmp_tstamp, age;
1315 unsigned long regen_advance;
1316 struct ifa6_config cfg;
1317 int ret = 0;
1318 unsigned long now = jiffies;
1319 long max_desync_factor;
1320 s32 cnf_temp_preferred_lft;
1321
1322 write_lock_bh(&idev->lock);
1323 if (ift) {
1324 spin_lock_bh(&ift->lock);
1325 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1326 spin_unlock_bh(&ift->lock);
1327 tmpaddr = &addr;
1328 } else {
1329 tmpaddr = NULL;
1330 }
1331 retry:
1332 in6_dev_hold(idev);
1333 if (idev->cnf.use_tempaddr <= 0) {
1334 write_unlock_bh(&idev->lock);
1335 pr_info("%s: use_tempaddr is disabled\n", __func__);
1336 in6_dev_put(idev);
1337 ret = -1;
1338 goto out;
1339 }
1340 spin_lock_bh(&ifp->lock);
1341 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1342 idev->cnf.use_tempaddr = -1; /*XXX*/
1343 spin_unlock_bh(&ifp->lock);
1344 write_unlock_bh(&idev->lock);
1345 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1346 __func__);
1347 in6_dev_put(idev);
1348 ret = -1;
1349 goto out;
1350 }
1351 in6_ifa_hold(ifp);
1352 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1353 ipv6_try_regen_rndid(idev, tmpaddr);
1354 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1355 age = (now - ifp->tstamp) / HZ;
1356
1357 regen_advance = idev->cnf.regen_max_retry *
1358 idev->cnf.dad_transmits *
1359 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1360
1361 /* recalculate max_desync_factor each time and update
1362 * idev->desync_factor if it's larger
1363 */
1364 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1365 max_desync_factor = min_t(__u32,
1366 idev->cnf.max_desync_factor,
1367 cnf_temp_preferred_lft - regen_advance);
1368
1369 if (unlikely(idev->desync_factor > max_desync_factor)) {
1370 if (max_desync_factor > 0) {
1371 get_random_bytes(&idev->desync_factor,
1372 sizeof(idev->desync_factor));
1373 idev->desync_factor %= max_desync_factor;
1374 } else {
1375 idev->desync_factor = 0;
1376 }
1377 }
1378
1379 memset(&cfg, 0, sizeof(cfg));
1380 cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1381 idev->cnf.temp_valid_lft + age);
1382 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1383 cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1384
1385 cfg.plen = ifp->prefix_len;
1386 tmp_tstamp = ifp->tstamp;
1387 spin_unlock_bh(&ifp->lock);
1388
1389 write_unlock_bh(&idev->lock);
1390
1391 /* A temporary address is created only if this calculated Preferred
1392 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1393 * an implementation must not create a temporary address with a zero
1394 * Preferred Lifetime.
1395 * Use age calculation as in addrconf_verify to avoid unnecessary
1396 * temporary addresses being generated.
1397 */
1398 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1399 if (cfg.preferred_lft <= regen_advance + age) {
1400 in6_ifa_put(ifp);
1401 in6_dev_put(idev);
1402 ret = -1;
1403 goto out;
1404 }
1405
1406 cfg.ifa_flags = IFA_F_TEMPORARY;
1407 /* set in addrconf_prefix_rcv() */
1408 if (ifp->flags & IFA_F_OPTIMISTIC)
1409 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1410
1411 cfg.pfx = &addr;
1412 cfg.scope = ipv6_addr_scope(cfg.pfx);
1413
1414 ift = ipv6_add_addr(idev, &cfg, block, NULL);
1415 if (IS_ERR(ift)) {
1416 in6_ifa_put(ifp);
1417 in6_dev_put(idev);
1418 pr_info("%s: retry temporary address regeneration\n", __func__);
1419 tmpaddr = &addr;
1420 write_lock_bh(&idev->lock);
1421 goto retry;
1422 }
1423
1424 spin_lock_bh(&ift->lock);
1425 ift->ifpub = ifp;
1426 ift->cstamp = now;
1427 ift->tstamp = tmp_tstamp;
1428 spin_unlock_bh(&ift->lock);
1429
1430 addrconf_dad_start(ift);
1431 in6_ifa_put(ift);
1432 in6_dev_put(idev);
1433 out:
1434 return ret;
1435 }
1436
1437 /*
1438 * Choose an appropriate source address (RFC3484)
1439 */
1440 enum {
1441 IPV6_SADDR_RULE_INIT = 0,
1442 IPV6_SADDR_RULE_LOCAL,
1443 IPV6_SADDR_RULE_SCOPE,
1444 IPV6_SADDR_RULE_PREFERRED,
1445 #ifdef CONFIG_IPV6_MIP6
1446 IPV6_SADDR_RULE_HOA,
1447 #endif
1448 IPV6_SADDR_RULE_OIF,
1449 IPV6_SADDR_RULE_LABEL,
1450 IPV6_SADDR_RULE_PRIVACY,
1451 IPV6_SADDR_RULE_ORCHID,
1452 IPV6_SADDR_RULE_PREFIX,
1453 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1454 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1455 #endif
1456 IPV6_SADDR_RULE_MAX
1457 };
1458
1459 struct ipv6_saddr_score {
1460 int rule;
1461 int addr_type;
1462 struct inet6_ifaddr *ifa;
1463 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1464 int scopedist;
1465 int matchlen;
1466 };
1467
1468 struct ipv6_saddr_dst {
1469 const struct in6_addr *addr;
1470 int ifindex;
1471 int scope;
1472 int label;
1473 unsigned int prefs;
1474 };
1475
1476 static inline int ipv6_saddr_preferred(int type)
1477 {
1478 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1479 return 1;
1480 return 0;
1481 }
1482
1483 static bool ipv6_use_optimistic_addr(struct net *net,
1484 struct inet6_dev *idev)
1485 {
1486 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1487 if (!idev)
1488 return false;
1489 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1490 return false;
1491 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1492 return false;
1493
1494 return true;
1495 #else
1496 return false;
1497 #endif
1498 }
1499
1500 static bool ipv6_allow_optimistic_dad(struct net *net,
1501 struct inet6_dev *idev)
1502 {
1503 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1504 if (!idev)
1505 return false;
1506 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1507 return false;
1508
1509 return true;
1510 #else
1511 return false;
1512 #endif
1513 }
1514
1515 static int ipv6_get_saddr_eval(struct net *net,
1516 struct ipv6_saddr_score *score,
1517 struct ipv6_saddr_dst *dst,
1518 int i)
1519 {
1520 int ret;
1521
1522 if (i <= score->rule) {
1523 switch (i) {
1524 case IPV6_SADDR_RULE_SCOPE:
1525 ret = score->scopedist;
1526 break;
1527 case IPV6_SADDR_RULE_PREFIX:
1528 ret = score->matchlen;
1529 break;
1530 default:
1531 ret = !!test_bit(i, score->scorebits);
1532 }
1533 goto out;
1534 }
1535
1536 switch (i) {
1537 case IPV6_SADDR_RULE_INIT:
1538 /* Rule 0: remember if hiscore is not ready yet */
1539 ret = !!score->ifa;
1540 break;
1541 case IPV6_SADDR_RULE_LOCAL:
1542 /* Rule 1: Prefer same address */
1543 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1544 break;
1545 case IPV6_SADDR_RULE_SCOPE:
1546 /* Rule 2: Prefer appropriate scope
1547 *
1548 * ret
1549 * ^
1550 * -1 | d 15
1551 * ---+--+-+---> scope
1552 * |
1553 * | d is scope of the destination.
1554 * B-d | \
1555 * | \ <- smaller scope is better if
1556 * B-15 | \ if scope is enough for destination.
1557 * | ret = B - scope (-1 <= scope >= d <= 15).
1558 * d-C-1 | /
1559 * |/ <- greater is better
1560 * -C / if scope is not enough for destination.
1561 * /| ret = scope - C (-1 <= d < scope <= 15).
1562 *
1563 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1564 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1565 * Assume B = 0 and we get C > 29.
1566 */
1567 ret = __ipv6_addr_src_scope(score->addr_type);
1568 if (ret >= dst->scope)
1569 ret = -ret;
1570 else
1571 ret -= 128; /* 30 is enough */
1572 score->scopedist = ret;
1573 break;
1574 case IPV6_SADDR_RULE_PREFERRED:
1575 {
1576 /* Rule 3: Avoid deprecated and optimistic addresses */
1577 u8 avoid = IFA_F_DEPRECATED;
1578
1579 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1580 avoid |= IFA_F_OPTIMISTIC;
1581 ret = ipv6_saddr_preferred(score->addr_type) ||
1582 !(score->ifa->flags & avoid);
1583 break;
1584 }
1585 #ifdef CONFIG_IPV6_MIP6
1586 case IPV6_SADDR_RULE_HOA:
1587 {
1588 /* Rule 4: Prefer home address */
1589 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1590 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1591 break;
1592 }
1593 #endif
1594 case IPV6_SADDR_RULE_OIF:
1595 /* Rule 5: Prefer outgoing interface */
1596 ret = (!dst->ifindex ||
1597 dst->ifindex == score->ifa->idev->dev->ifindex);
1598 break;
1599 case IPV6_SADDR_RULE_LABEL:
1600 /* Rule 6: Prefer matching label */
1601 ret = ipv6_addr_label(net,
1602 &score->ifa->addr, score->addr_type,
1603 score->ifa->idev->dev->ifindex) == dst->label;
1604 break;
1605 case IPV6_SADDR_RULE_PRIVACY:
1606 {
1607 /* Rule 7: Prefer public address
1608 * Note: prefer temporary address if use_tempaddr >= 2
1609 */
1610 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1611 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1612 score->ifa->idev->cnf.use_tempaddr >= 2;
1613 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1614 break;
1615 }
1616 case IPV6_SADDR_RULE_ORCHID:
1617 /* Rule 8-: Prefer ORCHID vs ORCHID or
1618 * non-ORCHID vs non-ORCHID
1619 */
1620 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1621 ipv6_addr_orchid(dst->addr));
1622 break;
1623 case IPV6_SADDR_RULE_PREFIX:
1624 /* Rule 8: Use longest matching prefix */
1625 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1626 if (ret > score->ifa->prefix_len)
1627 ret = score->ifa->prefix_len;
1628 score->matchlen = ret;
1629 break;
1630 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1631 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1632 /* Optimistic addresses still have lower precedence than other
1633 * preferred addresses.
1634 */
1635 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1636 break;
1637 #endif
1638 default:
1639 ret = 0;
1640 }
1641
1642 if (ret)
1643 __set_bit(i, score->scorebits);
1644 score->rule = i;
1645 out:
1646 return ret;
1647 }
1648
1649 static int __ipv6_dev_get_saddr(struct net *net,
1650 struct ipv6_saddr_dst *dst,
1651 struct inet6_dev *idev,
1652 struct ipv6_saddr_score *scores,
1653 int hiscore_idx)
1654 {
1655 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1656
1657 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1658 int i;
1659
1660 /*
1661 * - Tentative Address (RFC2462 section 5.4)
1662 * - A tentative address is not considered
1663 * "assigned to an interface" in the traditional
1664 * sense, unless it is also flagged as optimistic.
1665 * - Candidate Source Address (section 4)
1666 * - In any case, anycast addresses, multicast
1667 * addresses, and the unspecified address MUST
1668 * NOT be included in a candidate set.
1669 */
1670 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1671 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1672 continue;
1673
1674 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1675
1676 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1677 score->addr_type & IPV6_ADDR_MULTICAST)) {
1678 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1679 idev->dev->name);
1680 continue;
1681 }
1682
1683 score->rule = -1;
1684 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1685
1686 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1687 int minihiscore, miniscore;
1688
1689 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1690 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1691
1692 if (minihiscore > miniscore) {
1693 if (i == IPV6_SADDR_RULE_SCOPE &&
1694 score->scopedist > 0) {
1695 /*
1696 * special case:
1697 * each remaining entry
1698 * has too small (not enough)
1699 * scope, because ifa entries
1700 * are sorted by their scope
1701 * values.
1702 */
1703 goto out;
1704 }
1705 break;
1706 } else if (minihiscore < miniscore) {
1707 swap(hiscore, score);
1708 hiscore_idx = 1 - hiscore_idx;
1709
1710 /* restore our iterator */
1711 score->ifa = hiscore->ifa;
1712
1713 break;
1714 }
1715 }
1716 }
1717 out:
1718 return hiscore_idx;
1719 }
1720
1721 static int ipv6_get_saddr_master(struct net *net,
1722 const struct net_device *dst_dev,
1723 const struct net_device *master,
1724 struct ipv6_saddr_dst *dst,
1725 struct ipv6_saddr_score *scores,
1726 int hiscore_idx)
1727 {
1728 struct inet6_dev *idev;
1729
1730 idev = __in6_dev_get(dst_dev);
1731 if (idev)
1732 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1733 scores, hiscore_idx);
1734
1735 idev = __in6_dev_get(master);
1736 if (idev)
1737 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1738 scores, hiscore_idx);
1739
1740 return hiscore_idx;
1741 }
1742
1743 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1744 const struct in6_addr *daddr, unsigned int prefs,
1745 struct in6_addr *saddr)
1746 {
1747 struct ipv6_saddr_score scores[2], *hiscore;
1748 struct ipv6_saddr_dst dst;
1749 struct inet6_dev *idev;
1750 struct net_device *dev;
1751 int dst_type;
1752 bool use_oif_addr = false;
1753 int hiscore_idx = 0;
1754 int ret = 0;
1755
1756 dst_type = __ipv6_addr_type(daddr);
1757 dst.addr = daddr;
1758 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1759 dst.scope = __ipv6_addr_src_scope(dst_type);
1760 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1761 dst.prefs = prefs;
1762
1763 scores[hiscore_idx].rule = -1;
1764 scores[hiscore_idx].ifa = NULL;
1765
1766 rcu_read_lock();
1767
1768 /* Candidate Source Address (section 4)
1769 * - multicast and link-local destination address,
1770 * the set of candidate source address MUST only
1771 * include addresses assigned to interfaces
1772 * belonging to the same link as the outgoing
1773 * interface.
1774 * (- For site-local destination addresses, the
1775 * set of candidate source addresses MUST only
1776 * include addresses assigned to interfaces
1777 * belonging to the same site as the outgoing
1778 * interface.)
1779 * - "It is RECOMMENDED that the candidate source addresses
1780 * be the set of unicast addresses assigned to the
1781 * interface that will be used to send to the destination
1782 * (the 'outgoing' interface)." (RFC 6724)
1783 */
1784 if (dst_dev) {
1785 idev = __in6_dev_get(dst_dev);
1786 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1787 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1788 (idev && idev->cnf.use_oif_addrs_only)) {
1789 use_oif_addr = true;
1790 }
1791 }
1792
1793 if (use_oif_addr) {
1794 if (idev)
1795 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1796 } else {
1797 const struct net_device *master;
1798 int master_idx = 0;
1799
1800 /* if dst_dev exists and is enslaved to an L3 device, then
1801 * prefer addresses from dst_dev and then the master over
1802 * any other enslaved devices in the L3 domain.
1803 */
1804 master = l3mdev_master_dev_rcu(dst_dev);
1805 if (master) {
1806 master_idx = master->ifindex;
1807
1808 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1809 master, &dst,
1810 scores, hiscore_idx);
1811
1812 if (scores[hiscore_idx].ifa)
1813 goto out;
1814 }
1815
1816 for_each_netdev_rcu(net, dev) {
1817 /* only consider addresses on devices in the
1818 * same L3 domain
1819 */
1820 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1821 continue;
1822 idev = __in6_dev_get(dev);
1823 if (!idev)
1824 continue;
1825 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1826 }
1827 }
1828
1829 out:
1830 hiscore = &scores[hiscore_idx];
1831 if (!hiscore->ifa)
1832 ret = -EADDRNOTAVAIL;
1833 else
1834 *saddr = hiscore->ifa->addr;
1835
1836 rcu_read_unlock();
1837 return ret;
1838 }
1839 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1840
1841 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1842 u32 banned_flags)
1843 {
1844 struct inet6_ifaddr *ifp;
1845 int err = -EADDRNOTAVAIL;
1846
1847 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1848 if (ifp->scope > IFA_LINK)
1849 break;
1850 if (ifp->scope == IFA_LINK &&
1851 !(ifp->flags & banned_flags)) {
1852 *addr = ifp->addr;
1853 err = 0;
1854 break;
1855 }
1856 }
1857 return err;
1858 }
1859
1860 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1861 u32 banned_flags)
1862 {
1863 struct inet6_dev *idev;
1864 int err = -EADDRNOTAVAIL;
1865
1866 rcu_read_lock();
1867 idev = __in6_dev_get(dev);
1868 if (idev) {
1869 read_lock_bh(&idev->lock);
1870 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1871 read_unlock_bh(&idev->lock);
1872 }
1873 rcu_read_unlock();
1874 return err;
1875 }
1876
1877 static int ipv6_count_addresses(const struct inet6_dev *idev)
1878 {
1879 const struct inet6_ifaddr *ifp;
1880 int cnt = 0;
1881
1882 rcu_read_lock();
1883 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1884 cnt++;
1885 rcu_read_unlock();
1886 return cnt;
1887 }
1888
1889 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1890 const struct net_device *dev, int strict)
1891 {
1892 return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1893 strict, IFA_F_TENTATIVE);
1894 }
1895 EXPORT_SYMBOL(ipv6_chk_addr);
1896
1897 /* device argument is used to find the L3 domain of interest. If
1898 * skip_dev_check is set, then the ifp device is not checked against
1899 * the passed in dev argument. So the 2 cases for addresses checks are:
1900 * 1. does the address exist in the L3 domain that dev is part of
1901 * (skip_dev_check = true), or
1902 *
1903 * 2. does the address exist on the specific device
1904 * (skip_dev_check = false)
1905 */
1906 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1907 const struct net_device *dev, bool skip_dev_check,
1908 int strict, u32 banned_flags)
1909 {
1910 unsigned int hash = inet6_addr_hash(net, addr);
1911 const struct net_device *l3mdev;
1912 struct inet6_ifaddr *ifp;
1913 u32 ifp_flags;
1914
1915 rcu_read_lock();
1916
1917 l3mdev = l3mdev_master_dev_rcu(dev);
1918 if (skip_dev_check)
1919 dev = NULL;
1920
1921 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1922 if (!net_eq(dev_net(ifp->idev->dev), net))
1923 continue;
1924
1925 if (l3mdev_master_dev_rcu(ifp->idev->dev) != l3mdev)
1926 continue;
1927
1928 /* Decouple optimistic from tentative for evaluation here.
1929 * Ban optimistic addresses explicitly, when required.
1930 */
1931 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1932 ? (ifp->flags&~IFA_F_TENTATIVE)
1933 : ifp->flags;
1934 if (ipv6_addr_equal(&ifp->addr, addr) &&
1935 !(ifp_flags&banned_flags) &&
1936 (!dev || ifp->idev->dev == dev ||
1937 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1938 rcu_read_unlock();
1939 return 1;
1940 }
1941 }
1942
1943 rcu_read_unlock();
1944 return 0;
1945 }
1946 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1947
1948
1949 /* Compares an address/prefix_len with addresses on device @dev.
1950 * If one is found it returns true.
1951 */
1952 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1953 const unsigned int prefix_len, struct net_device *dev)
1954 {
1955 const struct inet6_ifaddr *ifa;
1956 const struct inet6_dev *idev;
1957 bool ret = false;
1958
1959 rcu_read_lock();
1960 idev = __in6_dev_get(dev);
1961 if (idev) {
1962 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1963 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1964 if (ret)
1965 break;
1966 }
1967 }
1968 rcu_read_unlock();
1969
1970 return ret;
1971 }
1972 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1973
1974 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1975 {
1976 const struct inet6_ifaddr *ifa;
1977 const struct inet6_dev *idev;
1978 int onlink;
1979
1980 onlink = 0;
1981 rcu_read_lock();
1982 idev = __in6_dev_get(dev);
1983 if (idev) {
1984 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1985 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1986 ifa->prefix_len);
1987 if (onlink)
1988 break;
1989 }
1990 }
1991 rcu_read_unlock();
1992 return onlink;
1993 }
1994 EXPORT_SYMBOL(ipv6_chk_prefix);
1995
1996 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1997 struct net_device *dev, int strict)
1998 {
1999 unsigned int hash = inet6_addr_hash(net, addr);
2000 struct inet6_ifaddr *ifp, *result = NULL;
2001
2002 rcu_read_lock();
2003 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
2004 if (!net_eq(dev_net(ifp->idev->dev), net))
2005 continue;
2006 if (ipv6_addr_equal(&ifp->addr, addr)) {
2007 if (!dev || ifp->idev->dev == dev ||
2008 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2009 result = ifp;
2010 in6_ifa_hold(ifp);
2011 break;
2012 }
2013 }
2014 }
2015 rcu_read_unlock();
2016
2017 return result;
2018 }
2019
2020 /* Gets referenced address, destroys ifaddr */
2021
2022 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2023 {
2024 if (dad_failed)
2025 ifp->flags |= IFA_F_DADFAILED;
2026
2027 if (ifp->flags&IFA_F_TEMPORARY) {
2028 struct inet6_ifaddr *ifpub;
2029 spin_lock_bh(&ifp->lock);
2030 ifpub = ifp->ifpub;
2031 if (ifpub) {
2032 in6_ifa_hold(ifpub);
2033 spin_unlock_bh(&ifp->lock);
2034 ipv6_create_tempaddr(ifpub, ifp, true);
2035 in6_ifa_put(ifpub);
2036 } else {
2037 spin_unlock_bh(&ifp->lock);
2038 }
2039 ipv6_del_addr(ifp);
2040 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2041 spin_lock_bh(&ifp->lock);
2042 addrconf_del_dad_work(ifp);
2043 ifp->flags |= IFA_F_TENTATIVE;
2044 if (dad_failed)
2045 ifp->flags &= ~IFA_F_OPTIMISTIC;
2046 spin_unlock_bh(&ifp->lock);
2047 if (dad_failed)
2048 ipv6_ifa_notify(0, ifp);
2049 in6_ifa_put(ifp);
2050 } else {
2051 ipv6_del_addr(ifp);
2052 }
2053 }
2054
2055 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2056 {
2057 int err = -ENOENT;
2058
2059 spin_lock_bh(&ifp->lock);
2060 if (ifp->state == INET6_IFADDR_STATE_DAD) {
2061 ifp->state = INET6_IFADDR_STATE_POSTDAD;
2062 err = 0;
2063 }
2064 spin_unlock_bh(&ifp->lock);
2065
2066 return err;
2067 }
2068
2069 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2070 {
2071 struct inet6_dev *idev = ifp->idev;
2072 struct net *net = dev_net(ifp->idev->dev);
2073
2074 if (addrconf_dad_end(ifp)) {
2075 in6_ifa_put(ifp);
2076 return;
2077 }
2078
2079 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2080 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2081
2082 spin_lock_bh(&ifp->lock);
2083
2084 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2085 struct in6_addr new_addr;
2086 struct inet6_ifaddr *ifp2;
2087 int retries = ifp->stable_privacy_retry + 1;
2088 struct ifa6_config cfg = {
2089 .pfx = &new_addr,
2090 .plen = ifp->prefix_len,
2091 .ifa_flags = ifp->flags,
2092 .valid_lft = ifp->valid_lft,
2093 .preferred_lft = ifp->prefered_lft,
2094 .scope = ifp->scope,
2095 };
2096
2097 if (retries > net->ipv6.sysctl.idgen_retries) {
2098 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2099 ifp->idev->dev->name);
2100 goto errdad;
2101 }
2102
2103 new_addr = ifp->addr;
2104 if (ipv6_generate_stable_address(&new_addr, retries,
2105 idev))
2106 goto errdad;
2107
2108 spin_unlock_bh(&ifp->lock);
2109
2110 if (idev->cnf.max_addresses &&
2111 ipv6_count_addresses(idev) >=
2112 idev->cnf.max_addresses)
2113 goto lock_errdad;
2114
2115 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2116 ifp->idev->dev->name);
2117
2118 ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2119 if (IS_ERR(ifp2))
2120 goto lock_errdad;
2121
2122 spin_lock_bh(&ifp2->lock);
2123 ifp2->stable_privacy_retry = retries;
2124 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2125 spin_unlock_bh(&ifp2->lock);
2126
2127 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2128 in6_ifa_put(ifp2);
2129 lock_errdad:
2130 spin_lock_bh(&ifp->lock);
2131 }
2132
2133 errdad:
2134 /* transition from _POSTDAD to _ERRDAD */
2135 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2136 spin_unlock_bh(&ifp->lock);
2137
2138 addrconf_mod_dad_work(ifp, 0);
2139 in6_ifa_put(ifp);
2140 }
2141
2142 /* Join to solicited addr multicast group.
2143 * caller must hold RTNL */
2144 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2145 {
2146 struct in6_addr maddr;
2147
2148 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2149 return;
2150
2151 addrconf_addr_solict_mult(addr, &maddr);
2152 ipv6_dev_mc_inc(dev, &maddr);
2153 }
2154
2155 /* caller must hold RTNL */
2156 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2157 {
2158 struct in6_addr maddr;
2159
2160 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2161 return;
2162
2163 addrconf_addr_solict_mult(addr, &maddr);
2164 __ipv6_dev_mc_dec(idev, &maddr);
2165 }
2166
2167 /* caller must hold RTNL */
2168 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2169 {
2170 struct in6_addr addr;
2171
2172 if (ifp->prefix_len >= 127) /* RFC 6164 */
2173 return;
2174 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2175 if (ipv6_addr_any(&addr))
2176 return;
2177 __ipv6_dev_ac_inc(ifp->idev, &addr);
2178 }
2179
2180 /* caller must hold RTNL */
2181 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2182 {
2183 struct in6_addr addr;
2184
2185 if (ifp->prefix_len >= 127) /* RFC 6164 */
2186 return;
2187 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2188 if (ipv6_addr_any(&addr))
2189 return;
2190 __ipv6_dev_ac_dec(ifp->idev, &addr);
2191 }
2192
2193 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2194 {
2195 switch (dev->addr_len) {
2196 case ETH_ALEN:
2197 memcpy(eui, dev->dev_addr, 3);
2198 eui[3] = 0xFF;
2199 eui[4] = 0xFE;
2200 memcpy(eui + 5, dev->dev_addr + 3, 3);
2201 break;
2202 case EUI64_ADDR_LEN:
2203 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2204 eui[0] ^= 2;
2205 break;
2206 default:
2207 return -1;
2208 }
2209
2210 return 0;
2211 }
2212
2213 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2214 {
2215 union fwnet_hwaddr *ha;
2216
2217 if (dev->addr_len != FWNET_ALEN)
2218 return -1;
2219
2220 ha = (union fwnet_hwaddr *)dev->dev_addr;
2221
2222 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2223 eui[0] ^= 2;
2224 return 0;
2225 }
2226
2227 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2228 {
2229 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2230 if (dev->addr_len != ARCNET_ALEN)
2231 return -1;
2232 memset(eui, 0, 7);
2233 eui[7] = *(u8 *)dev->dev_addr;
2234 return 0;
2235 }
2236
2237 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2238 {
2239 if (dev->addr_len != INFINIBAND_ALEN)
2240 return -1;
2241 memcpy(eui, dev->dev_addr + 12, 8);
2242 eui[0] |= 2;
2243 return 0;
2244 }
2245
2246 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2247 {
2248 if (addr == 0)
2249 return -1;
2250 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2251 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2252 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2253 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2254 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2255 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2256 eui[1] = 0;
2257 eui[2] = 0x5E;
2258 eui[3] = 0xFE;
2259 memcpy(eui + 4, &addr, 4);
2260 return 0;
2261 }
2262
2263 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2264 {
2265 if (dev->priv_flags & IFF_ISATAP)
2266 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2267 return -1;
2268 }
2269
2270 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2271 {
2272 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2273 }
2274
2275 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2276 {
2277 memcpy(eui, dev->perm_addr, 3);
2278 memcpy(eui + 5, dev->perm_addr + 3, 3);
2279 eui[3] = 0xFF;
2280 eui[4] = 0xFE;
2281 eui[0] ^= 2;
2282 return 0;
2283 }
2284
2285 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2286 {
2287 switch (dev->type) {
2288 case ARPHRD_ETHER:
2289 case ARPHRD_FDDI:
2290 return addrconf_ifid_eui48(eui, dev);
2291 case ARPHRD_ARCNET:
2292 return addrconf_ifid_arcnet(eui, dev);
2293 case ARPHRD_INFINIBAND:
2294 return addrconf_ifid_infiniband(eui, dev);
2295 case ARPHRD_SIT:
2296 return addrconf_ifid_sit(eui, dev);
2297 case ARPHRD_IPGRE:
2298 case ARPHRD_TUNNEL:
2299 return addrconf_ifid_gre(eui, dev);
2300 case ARPHRD_6LOWPAN:
2301 return addrconf_ifid_6lowpan(eui, dev);
2302 case ARPHRD_IEEE1394:
2303 return addrconf_ifid_ieee1394(eui, dev);
2304 case ARPHRD_TUNNEL6:
2305 case ARPHRD_IP6GRE:
2306 case ARPHRD_RAWIP:
2307 return addrconf_ifid_ip6tnl(eui, dev);
2308 }
2309 return -1;
2310 }
2311
2312 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2313 {
2314 int err = -1;
2315 struct inet6_ifaddr *ifp;
2316
2317 read_lock_bh(&idev->lock);
2318 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2319 if (ifp->scope > IFA_LINK)
2320 break;
2321 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2322 memcpy(eui, ifp->addr.s6_addr+8, 8);
2323 err = 0;
2324 break;
2325 }
2326 }
2327 read_unlock_bh(&idev->lock);
2328 return err;
2329 }
2330
2331 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2332 static void ipv6_regen_rndid(struct inet6_dev *idev)
2333 {
2334 regen:
2335 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2336 idev->rndid[0] &= ~0x02;
2337
2338 /*
2339 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2340 * check if generated address is not inappropriate
2341 *
2342 * - Reserved subnet anycast (RFC 2526)
2343 * 11111101 11....11 1xxxxxxx
2344 * - ISATAP (RFC4214) 6.1
2345 * 00-00-5E-FE-xx-xx-xx-xx
2346 * - value 0
2347 * - XXX: already assigned to an address on the device
2348 */
2349 if (idev->rndid[0] == 0xfd &&
2350 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2351 (idev->rndid[7]&0x80))
2352 goto regen;
2353 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2354 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2355 goto regen;
2356 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2357 goto regen;
2358 }
2359 }
2360
2361 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2362 {
2363 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2364 ipv6_regen_rndid(idev);
2365 }
2366
2367 /*
2368 * Add prefix route.
2369 */
2370
2371 static void
2372 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2373 struct net_device *dev, unsigned long expires,
2374 u32 flags, gfp_t gfp_flags)
2375 {
2376 struct fib6_config cfg = {
2377 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2378 .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2379 .fc_ifindex = dev->ifindex,
2380 .fc_expires = expires,
2381 .fc_dst_len = plen,
2382 .fc_flags = RTF_UP | flags,
2383 .fc_nlinfo.nl_net = dev_net(dev),
2384 .fc_protocol = RTPROT_KERNEL,
2385 .fc_type = RTN_UNICAST,
2386 };
2387
2388 cfg.fc_dst = *pfx;
2389
2390 /* Prevent useless cloning on PtP SIT.
2391 This thing is done here expecting that the whole
2392 class of non-broadcast devices need not cloning.
2393 */
2394 #if IS_ENABLED(CONFIG_IPV6_SIT)
2395 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2396 cfg.fc_flags |= RTF_NONEXTHOP;
2397 #endif
2398
2399 ip6_route_add(&cfg, gfp_flags, NULL);
2400 }
2401
2402
2403 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2404 int plen,
2405 const struct net_device *dev,
2406 u32 flags, u32 noflags,
2407 bool no_gw)
2408 {
2409 struct fib6_node *fn;
2410 struct fib6_info *rt = NULL;
2411 struct fib6_table *table;
2412 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2413
2414 table = fib6_get_table(dev_net(dev), tb_id);
2415 if (!table)
2416 return NULL;
2417
2418 rcu_read_lock();
2419 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2420 if (!fn)
2421 goto out;
2422
2423 for_each_fib6_node_rt_rcu(fn) {
2424 if (rt->fib6_nh.fib_nh_dev->ifindex != dev->ifindex)
2425 continue;
2426 if (no_gw && rt->fib6_nh.fib_nh_gw_family)
2427 continue;
2428 if ((rt->fib6_flags & flags) != flags)
2429 continue;
2430 if ((rt->fib6_flags & noflags) != 0)
2431 continue;
2432 if (!fib6_info_hold_safe(rt))
2433 continue;
2434 break;
2435 }
2436 out:
2437 rcu_read_unlock();
2438 return rt;
2439 }
2440
2441
2442 /* Create "default" multicast route to the interface */
2443
2444 static void addrconf_add_mroute(struct net_device *dev)
2445 {
2446 struct fib6_config cfg = {
2447 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2448 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2449 .fc_ifindex = dev->ifindex,
2450 .fc_dst_len = 8,
2451 .fc_flags = RTF_UP,
2452 .fc_type = RTN_UNICAST,
2453 .fc_nlinfo.nl_net = dev_net(dev),
2454 };
2455
2456 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2457
2458 ip6_route_add(&cfg, GFP_KERNEL, NULL);
2459 }
2460
2461 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2462 {
2463 struct inet6_dev *idev;
2464
2465 ASSERT_RTNL();
2466
2467 idev = ipv6_find_idev(dev);
2468 if (!idev)
2469 return ERR_PTR(-ENOBUFS);
2470
2471 if (idev->cnf.disable_ipv6)
2472 return ERR_PTR(-EACCES);
2473
2474 /* Add default multicast route */
2475 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2476 addrconf_add_mroute(dev);
2477
2478 return idev;
2479 }
2480
2481 static void manage_tempaddrs(struct inet6_dev *idev,
2482 struct inet6_ifaddr *ifp,
2483 __u32 valid_lft, __u32 prefered_lft,
2484 bool create, unsigned long now)
2485 {
2486 u32 flags;
2487 struct inet6_ifaddr *ift;
2488
2489 read_lock_bh(&idev->lock);
2490 /* update all temporary addresses in the list */
2491 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2492 int age, max_valid, max_prefered;
2493
2494 if (ifp != ift->ifpub)
2495 continue;
2496
2497 /* RFC 4941 section 3.3:
2498 * If a received option will extend the lifetime of a public
2499 * address, the lifetimes of temporary addresses should
2500 * be extended, subject to the overall constraint that no
2501 * temporary addresses should ever remain "valid" or "preferred"
2502 * for a time longer than (TEMP_VALID_LIFETIME) or
2503 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2504 */
2505 age = (now - ift->cstamp) / HZ;
2506 max_valid = idev->cnf.temp_valid_lft - age;
2507 if (max_valid < 0)
2508 max_valid = 0;
2509
2510 max_prefered = idev->cnf.temp_prefered_lft -
2511 idev->desync_factor - age;
2512 if (max_prefered < 0)
2513 max_prefered = 0;
2514
2515 if (valid_lft > max_valid)
2516 valid_lft = max_valid;
2517
2518 if (prefered_lft > max_prefered)
2519 prefered_lft = max_prefered;
2520
2521 spin_lock(&ift->lock);
2522 flags = ift->flags;
2523 ift->valid_lft = valid_lft;
2524 ift->prefered_lft = prefered_lft;
2525 ift->tstamp = now;
2526 if (prefered_lft > 0)
2527 ift->flags &= ~IFA_F_DEPRECATED;
2528
2529 spin_unlock(&ift->lock);
2530 if (!(flags&IFA_F_TENTATIVE))
2531 ipv6_ifa_notify(0, ift);
2532 }
2533
2534 if ((create || list_empty(&idev->tempaddr_list)) &&
2535 idev->cnf.use_tempaddr > 0) {
2536 /* When a new public address is created as described
2537 * in [ADDRCONF], also create a new temporary address.
2538 * Also create a temporary address if it's enabled but
2539 * no temporary address currently exists.
2540 */
2541 read_unlock_bh(&idev->lock);
2542 ipv6_create_tempaddr(ifp, NULL, false);
2543 } else {
2544 read_unlock_bh(&idev->lock);
2545 }
2546 }
2547
2548 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2549 {
2550 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2551 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2552 }
2553
2554 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2555 const struct prefix_info *pinfo,
2556 struct inet6_dev *in6_dev,
2557 const struct in6_addr *addr, int addr_type,
2558 u32 addr_flags, bool sllao, bool tokenized,
2559 __u32 valid_lft, u32 prefered_lft)
2560 {
2561 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2562 int create = 0, update_lft = 0;
2563
2564 if (!ifp && valid_lft) {
2565 int max_addresses = in6_dev->cnf.max_addresses;
2566 struct ifa6_config cfg = {
2567 .pfx = addr,
2568 .plen = pinfo->prefix_len,
2569 .ifa_flags = addr_flags,
2570 .valid_lft = valid_lft,
2571 .preferred_lft = prefered_lft,
2572 .scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2573 };
2574
2575 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2576 if ((net->ipv6.devconf_all->optimistic_dad ||
2577 in6_dev->cnf.optimistic_dad) &&
2578 !net->ipv6.devconf_all->forwarding && sllao)
2579 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2580 #endif
2581
2582 /* Do not allow to create too much of autoconfigured
2583 * addresses; this would be too easy way to crash kernel.
2584 */
2585 if (!max_addresses ||
2586 ipv6_count_addresses(in6_dev) < max_addresses)
2587 ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2588
2589 if (IS_ERR_OR_NULL(ifp))
2590 return -1;
2591
2592 create = 1;
2593 spin_lock_bh(&ifp->lock);
2594 ifp->flags |= IFA_F_MANAGETEMPADDR;
2595 ifp->cstamp = jiffies;
2596 ifp->tokenized = tokenized;
2597 spin_unlock_bh(&ifp->lock);
2598 addrconf_dad_start(ifp);
2599 }
2600
2601 if (ifp) {
2602 u32 flags;
2603 unsigned long now;
2604 u32 stored_lft;
2605
2606 /* update lifetime (RFC2462 5.5.3 e) */
2607 spin_lock_bh(&ifp->lock);
2608 now = jiffies;
2609 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2610 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2611 else
2612 stored_lft = 0;
2613 if (!create && stored_lft) {
2614 const u32 minimum_lft = min_t(u32,
2615 stored_lft, MIN_VALID_LIFETIME);
2616 valid_lft = max(valid_lft, minimum_lft);
2617
2618 /* RFC4862 Section 5.5.3e:
2619 * "Note that the preferred lifetime of the
2620 * corresponding address is always reset to
2621 * the Preferred Lifetime in the received
2622 * Prefix Information option, regardless of
2623 * whether the valid lifetime is also reset or
2624 * ignored."
2625 *
2626 * So we should always update prefered_lft here.
2627 */
2628 update_lft = 1;
2629 }
2630
2631 if (update_lft) {
2632 ifp->valid_lft = valid_lft;
2633 ifp->prefered_lft = prefered_lft;
2634 ifp->tstamp = now;
2635 flags = ifp->flags;
2636 ifp->flags &= ~IFA_F_DEPRECATED;
2637 spin_unlock_bh(&ifp->lock);
2638
2639 if (!(flags&IFA_F_TENTATIVE))
2640 ipv6_ifa_notify(0, ifp);
2641 } else
2642 spin_unlock_bh(&ifp->lock);
2643
2644 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2645 create, now);
2646
2647 in6_ifa_put(ifp);
2648 addrconf_verify();
2649 }
2650
2651 return 0;
2652 }
2653 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2654
2655 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2656 {
2657 struct prefix_info *pinfo;
2658 __u32 valid_lft;
2659 __u32 prefered_lft;
2660 int addr_type, err;
2661 u32 addr_flags = 0;
2662 struct inet6_dev *in6_dev;
2663 struct net *net = dev_net(dev);
2664
2665 pinfo = (struct prefix_info *) opt;
2666
2667 if (len < sizeof(struct prefix_info)) {
2668 netdev_dbg(dev, "addrconf: prefix option too short\n");
2669 return;
2670 }
2671
2672 /*
2673 * Validation checks ([ADDRCONF], page 19)
2674 */
2675
2676 addr_type = ipv6_addr_type(&pinfo->prefix);
2677
2678 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2679 return;
2680
2681 valid_lft = ntohl(pinfo->valid);
2682 prefered_lft = ntohl(pinfo->prefered);
2683
2684 if (prefered_lft > valid_lft) {
2685 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2686 return;
2687 }
2688
2689 in6_dev = in6_dev_get(dev);
2690
2691 if (!in6_dev) {
2692 net_dbg_ratelimited("addrconf: device %s not configured\n",
2693 dev->name);
2694 return;
2695 }
2696
2697 /*
2698 * Two things going on here:
2699 * 1) Add routes for on-link prefixes
2700 * 2) Configure prefixes with the auto flag set
2701 */
2702
2703 if (pinfo->onlink) {
2704 struct fib6_info *rt;
2705 unsigned long rt_expires;
2706
2707 /* Avoid arithmetic overflow. Really, we could
2708 * save rt_expires in seconds, likely valid_lft,
2709 * but it would require division in fib gc, that it
2710 * not good.
2711 */
2712 if (HZ > USER_HZ)
2713 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2714 else
2715 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2716
2717 if (addrconf_finite_timeout(rt_expires))
2718 rt_expires *= HZ;
2719
2720 rt = addrconf_get_prefix_route(&pinfo->prefix,
2721 pinfo->prefix_len,
2722 dev,
2723 RTF_ADDRCONF | RTF_PREFIX_RT,
2724 RTF_DEFAULT, true);
2725
2726 if (rt) {
2727 /* Autoconf prefix route */
2728 if (valid_lft == 0) {
2729 ip6_del_rt(net, rt);
2730 rt = NULL;
2731 } else if (addrconf_finite_timeout(rt_expires)) {
2732 /* not infinity */
2733 fib6_set_expires(rt, jiffies + rt_expires);
2734 } else {
2735 fib6_clean_expires(rt);
2736 }
2737 } else if (valid_lft) {
2738 clock_t expires = 0;
2739 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2740 if (addrconf_finite_timeout(rt_expires)) {
2741 /* not infinity */
2742 flags |= RTF_EXPIRES;
2743 expires = jiffies_to_clock_t(rt_expires);
2744 }
2745 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2746 0, dev, expires, flags,
2747 GFP_ATOMIC);
2748 }
2749 fib6_info_release(rt);
2750 }
2751
2752 /* Try to figure out our local address for this prefix */
2753
2754 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2755 struct in6_addr addr;
2756 bool tokenized = false, dev_addr_generated = false;
2757
2758 if (pinfo->prefix_len == 64) {
2759 memcpy(&addr, &pinfo->prefix, 8);
2760
2761 if (!ipv6_addr_any(&in6_dev->token)) {
2762 read_lock_bh(&in6_dev->lock);
2763 memcpy(addr.s6_addr + 8,
2764 in6_dev->token.s6_addr + 8, 8);
2765 read_unlock_bh(&in6_dev->lock);
2766 tokenized = true;
2767 } else if (is_addr_mode_generate_stable(in6_dev) &&
2768 !ipv6_generate_stable_address(&addr, 0,
2769 in6_dev)) {
2770 addr_flags |= IFA_F_STABLE_PRIVACY;
2771 goto ok;
2772 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2773 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2774 goto put;
2775 } else {
2776 dev_addr_generated = true;
2777 }
2778 goto ok;
2779 }
2780 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2781 pinfo->prefix_len);
2782 goto put;
2783
2784 ok:
2785 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2786 &addr, addr_type,
2787 addr_flags, sllao,
2788 tokenized, valid_lft,
2789 prefered_lft);
2790 if (err)
2791 goto put;
2792
2793 /* Ignore error case here because previous prefix add addr was
2794 * successful which will be notified.
2795 */
2796 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2797 addr_type, addr_flags, sllao,
2798 tokenized, valid_lft,
2799 prefered_lft,
2800 dev_addr_generated);
2801 }
2802 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2803 put:
2804 in6_dev_put(in6_dev);
2805 }
2806
2807 /*
2808 * Set destination address.
2809 * Special case for SIT interfaces where we create a new "virtual"
2810 * device.
2811 */
2812 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2813 {
2814 struct in6_ifreq ireq;
2815 struct net_device *dev;
2816 int err = -EINVAL;
2817
2818 rtnl_lock();
2819
2820 err = -EFAULT;
2821 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2822 goto err_exit;
2823
2824 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2825
2826 err = -ENODEV;
2827 if (!dev)
2828 goto err_exit;
2829
2830 #if IS_ENABLED(CONFIG_IPV6_SIT)
2831 if (dev->type == ARPHRD_SIT) {
2832 const struct net_device_ops *ops = dev->netdev_ops;
2833 struct ifreq ifr;
2834 struct ip_tunnel_parm p;
2835
2836 err = -EADDRNOTAVAIL;
2837 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2838 goto err_exit;
2839
2840 memset(&p, 0, sizeof(p));
2841 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2842 p.iph.saddr = 0;
2843 p.iph.version = 4;
2844 p.iph.ihl = 5;
2845 p.iph.protocol = IPPROTO_IPV6;
2846 p.iph.ttl = 64;
2847 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2848
2849 if (ops->ndo_do_ioctl) {
2850 mm_segment_t oldfs = get_fs();
2851
2852 set_fs(KERNEL_DS);
2853 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2854 set_fs(oldfs);
2855 } else
2856 err = -EOPNOTSUPP;
2857
2858 if (err == 0) {
2859 err = -ENOBUFS;
2860 dev = __dev_get_by_name(net, p.name);
2861 if (!dev)
2862 goto err_exit;
2863 err = dev_open(dev, NULL);
2864 }
2865 }
2866 #endif
2867
2868 err_exit:
2869 rtnl_unlock();
2870 return err;
2871 }
2872
2873 static int ipv6_mc_config(struct sock *sk, bool join,
2874 const struct in6_addr *addr, int ifindex)
2875 {
2876 int ret;
2877
2878 ASSERT_RTNL();
2879
2880 lock_sock(sk);
2881 if (join)
2882 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2883 else
2884 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2885 release_sock(sk);
2886
2887 return ret;
2888 }
2889
2890 /*
2891 * Manual configuration of address on an interface
2892 */
2893 static int inet6_addr_add(struct net *net, int ifindex,
2894 struct ifa6_config *cfg,
2895 struct netlink_ext_ack *extack)
2896 {
2897 struct inet6_ifaddr *ifp;
2898 struct inet6_dev *idev;
2899 struct net_device *dev;
2900 unsigned long timeout;
2901 clock_t expires;
2902 u32 flags;
2903
2904 ASSERT_RTNL();
2905
2906 if (cfg->plen > 128)
2907 return -EINVAL;
2908
2909 /* check the lifetime */
2910 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2911 return -EINVAL;
2912
2913 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2914 return -EINVAL;
2915
2916 dev = __dev_get_by_index(net, ifindex);
2917 if (!dev)
2918 return -ENODEV;
2919
2920 idev = addrconf_add_dev(dev);
2921 if (IS_ERR(idev))
2922 return PTR_ERR(idev);
2923
2924 if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2925 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2926 true, cfg->pfx, ifindex);
2927
2928 if (ret < 0)
2929 return ret;
2930 }
2931
2932 cfg->scope = ipv6_addr_scope(cfg->pfx);
2933
2934 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2935 if (addrconf_finite_timeout(timeout)) {
2936 expires = jiffies_to_clock_t(timeout * HZ);
2937 cfg->valid_lft = timeout;
2938 flags = RTF_EXPIRES;
2939 } else {
2940 expires = 0;
2941 flags = 0;
2942 cfg->ifa_flags |= IFA_F_PERMANENT;
2943 }
2944
2945 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2946 if (addrconf_finite_timeout(timeout)) {
2947 if (timeout == 0)
2948 cfg->ifa_flags |= IFA_F_DEPRECATED;
2949 cfg->preferred_lft = timeout;
2950 }
2951
2952 ifp = ipv6_add_addr(idev, cfg, true, extack);
2953 if (!IS_ERR(ifp)) {
2954 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2955 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2956 ifp->rt_priority, dev, expires,
2957 flags, GFP_KERNEL);
2958 }
2959
2960 /* Send a netlink notification if DAD is enabled and
2961 * optimistic flag is not set
2962 */
2963 if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2964 ipv6_ifa_notify(0, ifp);
2965 /*
2966 * Note that section 3.1 of RFC 4429 indicates
2967 * that the Optimistic flag should not be set for
2968 * manually configured addresses
2969 */
2970 addrconf_dad_start(ifp);
2971 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2972 manage_tempaddrs(idev, ifp, cfg->valid_lft,
2973 cfg->preferred_lft, true, jiffies);
2974 in6_ifa_put(ifp);
2975 addrconf_verify_rtnl();
2976 return 0;
2977 } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2978 ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
2979 cfg->pfx, ifindex);
2980 }
2981
2982 return PTR_ERR(ifp);
2983 }
2984
2985 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2986 const struct in6_addr *pfx, unsigned int plen)
2987 {
2988 struct inet6_ifaddr *ifp;
2989 struct inet6_dev *idev;
2990 struct net_device *dev;
2991
2992 if (plen > 128)
2993 return -EINVAL;
2994
2995 dev = __dev_get_by_index(net, ifindex);
2996 if (!dev)
2997 return -ENODEV;
2998
2999 idev = __in6_dev_get(dev);
3000 if (!idev)
3001 return -ENXIO;
3002
3003 read_lock_bh(&idev->lock);
3004 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3005 if (ifp->prefix_len == plen &&
3006 ipv6_addr_equal(pfx, &ifp->addr)) {
3007 in6_ifa_hold(ifp);
3008 read_unlock_bh(&idev->lock);
3009
3010 if (!(ifp->flags & IFA_F_TEMPORARY) &&
3011 (ifa_flags & IFA_F_MANAGETEMPADDR))
3012 manage_tempaddrs(idev, ifp, 0, 0, false,
3013 jiffies);
3014 ipv6_del_addr(ifp);
3015 addrconf_verify_rtnl();
3016 if (ipv6_addr_is_multicast(pfx)) {
3017 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3018 false, pfx, dev->ifindex);
3019 }
3020 return 0;
3021 }
3022 }
3023 read_unlock_bh(&idev->lock);
3024 return -EADDRNOTAVAIL;
3025 }
3026
3027
3028 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3029 {
3030 struct ifa6_config cfg = {
3031 .ifa_flags = IFA_F_PERMANENT,
3032 .preferred_lft = INFINITY_LIFE_TIME,
3033 .valid_lft = INFINITY_LIFE_TIME,
3034 };
3035 struct in6_ifreq ireq;
3036 int err;
3037
3038 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3039 return -EPERM;
3040
3041 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3042 return -EFAULT;
3043
3044 cfg.pfx = &ireq.ifr6_addr;
3045 cfg.plen = ireq.ifr6_prefixlen;
3046
3047 rtnl_lock();
3048 err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3049 rtnl_unlock();
3050 return err;
3051 }
3052
3053 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3054 {
3055 struct in6_ifreq ireq;
3056 int err;
3057
3058 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3059 return -EPERM;
3060
3061 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3062 return -EFAULT;
3063
3064 rtnl_lock();
3065 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3066 ireq.ifr6_prefixlen);
3067 rtnl_unlock();
3068 return err;
3069 }
3070
3071 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3072 int plen, int scope)
3073 {
3074 struct inet6_ifaddr *ifp;
3075 struct ifa6_config cfg = {
3076 .pfx = addr,
3077 .plen = plen,
3078 .ifa_flags = IFA_F_PERMANENT,
3079 .valid_lft = INFINITY_LIFE_TIME,
3080 .preferred_lft = INFINITY_LIFE_TIME,
3081 .scope = scope
3082 };
3083
3084 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3085 if (!IS_ERR(ifp)) {
3086 spin_lock_bh(&ifp->lock);
3087 ifp->flags &= ~IFA_F_TENTATIVE;
3088 spin_unlock_bh(&ifp->lock);
3089 rt_genid_bump_ipv6(dev_net(idev->dev));
3090 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3091 in6_ifa_put(ifp);
3092 }
3093 }
3094
3095 #if IS_ENABLED(CONFIG_IPV6_SIT)
3096 static void sit_add_v4_addrs(struct inet6_dev *idev)
3097 {
3098 struct in6_addr addr;
3099 struct net_device *dev;
3100 struct net *net = dev_net(idev->dev);
3101 int scope, plen;
3102 u32 pflags = 0;
3103
3104 ASSERT_RTNL();
3105
3106 memset(&addr, 0, sizeof(struct in6_addr));
3107 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3108
3109 if (idev->dev->flags&IFF_POINTOPOINT) {
3110 addr.s6_addr32[0] = htonl(0xfe800000);
3111 scope = IFA_LINK;
3112 plen = 64;
3113 } else {
3114 scope = IPV6_ADDR_COMPATv4;
3115 plen = 96;
3116 pflags |= RTF_NONEXTHOP;
3117 }
3118
3119 if (addr.s6_addr32[3]) {
3120 add_addr(idev, &addr, plen, scope);
3121 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3122 GFP_KERNEL);
3123 return;
3124 }
3125
3126 for_each_netdev(net, dev) {
3127 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3128 if (in_dev && (dev->flags & IFF_UP)) {
3129 struct in_ifaddr *ifa;
3130
3131 int flag = scope;
3132
3133 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
3134
3135 addr.s6_addr32[3] = ifa->ifa_local;
3136
3137 if (ifa->ifa_scope == RT_SCOPE_LINK)
3138 continue;
3139 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3140 if (idev->dev->flags&IFF_POINTOPOINT)
3141 continue;
3142 flag |= IFA_HOST;
3143 }
3144
3145 add_addr(idev, &addr, plen, flag);
3146 addrconf_prefix_route(&addr, plen, 0, idev->dev,
3147 0, pflags, GFP_KERNEL);
3148 }
3149 }
3150 }
3151 }
3152 #endif
3153
3154 static void init_loopback(struct net_device *dev)
3155 {
3156 struct inet6_dev *idev;
3157
3158 /* ::1 */
3159
3160 ASSERT_RTNL();
3161
3162 idev = ipv6_find_idev(dev);
3163 if (!idev) {
3164 pr_debug("%s: add_dev failed\n", __func__);
3165 return;
3166 }
3167
3168 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3169 }
3170
3171 void addrconf_add_linklocal(struct inet6_dev *idev,
3172 const struct in6_addr *addr, u32 flags)
3173 {
3174 struct ifa6_config cfg = {
3175 .pfx = addr,
3176 .plen = 64,
3177 .ifa_flags = flags | IFA_F_PERMANENT,
3178 .valid_lft = INFINITY_LIFE_TIME,
3179 .preferred_lft = INFINITY_LIFE_TIME,
3180 .scope = IFA_LINK
3181 };
3182 struct inet6_ifaddr *ifp;
3183
3184 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3185 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3186 idev->cnf.optimistic_dad) &&
3187 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3188 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3189 #endif
3190
3191 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3192 if (!IS_ERR(ifp)) {
3193 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3194 0, 0, GFP_ATOMIC);
3195 addrconf_dad_start(ifp);
3196 in6_ifa_put(ifp);
3197 }
3198 }
3199 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3200
3201 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3202 {
3203 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3204 return true;
3205
3206 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3207 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3208 return true;
3209
3210 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3211 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3212 return true;
3213
3214 return false;
3215 }
3216
3217 static int ipv6_generate_stable_address(struct in6_addr *address,
3218 u8 dad_count,
3219 const struct inet6_dev *idev)
3220 {
3221 static DEFINE_SPINLOCK(lock);
3222 static __u32 digest[SHA_DIGEST_WORDS];
3223 static __u32 workspace[SHA_WORKSPACE_WORDS];
3224
3225 static union {
3226 char __data[SHA_MESSAGE_BYTES];
3227 struct {
3228 struct in6_addr secret;
3229 __be32 prefix[2];
3230 unsigned char hwaddr[MAX_ADDR_LEN];
3231 u8 dad_count;
3232 } __packed;
3233 } data;
3234
3235 struct in6_addr secret;
3236 struct in6_addr temp;
3237 struct net *net = dev_net(idev->dev);
3238
3239 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3240
3241 if (idev->cnf.stable_secret.initialized)
3242 secret = idev->cnf.stable_secret.secret;
3243 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3244 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3245 else
3246 return -1;
3247
3248 retry:
3249 spin_lock_bh(&lock);
3250
3251 sha_init(digest);
3252 memset(&data, 0, sizeof(data));
3253 memset(workspace, 0, sizeof(workspace));
3254 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3255 data.prefix[0] = address->s6_addr32[0];
3256 data.prefix[1] = address->s6_addr32[1];
3257 data.secret = secret;
3258 data.dad_count = dad_count;
3259
3260 sha_transform(digest, data.__data, workspace);
3261
3262 temp = *address;
3263 temp.s6_addr32[2] = (__force __be32)digest[0];
3264 temp.s6_addr32[3] = (__force __be32)digest[1];
3265
3266 spin_unlock_bh(&lock);
3267
3268 if (ipv6_reserved_interfaceid(temp)) {
3269 dad_count++;
3270 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3271 return -1;
3272 goto retry;
3273 }
3274
3275 *address = temp;
3276 return 0;
3277 }
3278
3279 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3280 {
3281 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3282
3283 if (s->initialized)
3284 return;
3285 s = &idev->cnf.stable_secret;
3286 get_random_bytes(&s->secret, sizeof(s->secret));
3287 s->initialized = true;
3288 }
3289
3290 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3291 {
3292 struct in6_addr addr;
3293
3294 /* no link local addresses on L3 master devices */
3295 if (netif_is_l3_master(idev->dev))
3296 return;
3297
3298 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3299
3300 switch (idev->cnf.addr_gen_mode) {
3301 case IN6_ADDR_GEN_MODE_RANDOM:
3302 ipv6_gen_mode_random_init(idev);
3303 /* fallthrough */
3304 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3305 if (!ipv6_generate_stable_address(&addr, 0, idev))
3306 addrconf_add_linklocal(idev, &addr,
3307 IFA_F_STABLE_PRIVACY);
3308 else if (prefix_route)
3309 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3310 0, 0, GFP_KERNEL);
3311 break;
3312 case IN6_ADDR_GEN_MODE_EUI64:
3313 /* addrconf_add_linklocal also adds a prefix_route and we
3314 * only need to care about prefix routes if ipv6_generate_eui64
3315 * couldn't generate one.
3316 */
3317 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3318 addrconf_add_linklocal(idev, &addr, 0);
3319 else if (prefix_route)
3320 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3321 0, 0, GFP_KERNEL);
3322 break;
3323 case IN6_ADDR_GEN_MODE_NONE:
3324 default:
3325 /* will not add any link local address */
3326 break;
3327 }
3328 }
3329
3330 static void addrconf_dev_config(struct net_device *dev)
3331 {
3332 struct inet6_dev *idev;
3333
3334 ASSERT_RTNL();
3335
3336 if ((dev->type != ARPHRD_ETHER) &&
3337 (dev->type != ARPHRD_FDDI) &&
3338 (dev->type != ARPHRD_ARCNET) &&
3339 (dev->type != ARPHRD_INFINIBAND) &&
3340 (dev->type != ARPHRD_IEEE1394) &&
3341 (dev->type != ARPHRD_TUNNEL6) &&
3342 (dev->type != ARPHRD_6LOWPAN) &&
3343 (dev->type != ARPHRD_IP6GRE) &&
3344 (dev->type != ARPHRD_IPGRE) &&
3345 (dev->type != ARPHRD_TUNNEL) &&
3346 (dev->type != ARPHRD_NONE) &&
3347 (dev->type != ARPHRD_RAWIP)) {
3348 /* Alas, we support only Ethernet autoconfiguration. */
3349 return;
3350 }
3351
3352 idev = addrconf_add_dev(dev);
3353 if (IS_ERR(idev))
3354 return;
3355
3356 /* this device type has no EUI support */
3357 if (dev->type == ARPHRD_NONE &&
3358 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3359 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3360
3361 addrconf_addr_gen(idev, false);
3362 }
3363
3364 #if IS_ENABLED(CONFIG_IPV6_SIT)
3365 static void addrconf_sit_config(struct net_device *dev)
3366 {
3367 struct inet6_dev *idev;
3368
3369 ASSERT_RTNL();
3370
3371 /*
3372 * Configure the tunnel with one of our IPv4
3373 * addresses... we should configure all of
3374 * our v4 addrs in the tunnel
3375 */
3376
3377 idev = ipv6_find_idev(dev);
3378 if (!idev) {
3379 pr_debug("%s: add_dev failed\n", __func__);
3380 return;
3381 }
3382
3383 if (dev->priv_flags & IFF_ISATAP) {
3384 addrconf_addr_gen(idev, false);
3385 return;
3386 }
3387
3388 sit_add_v4_addrs(idev);
3389
3390 if (dev->flags&IFF_POINTOPOINT)
3391 addrconf_add_mroute(dev);
3392 }
3393 #endif
3394
3395 #if IS_ENABLED(CONFIG_NET_IPGRE)
3396 static void addrconf_gre_config(struct net_device *dev)
3397 {
3398 struct inet6_dev *idev;
3399
3400 ASSERT_RTNL();
3401
3402 idev = ipv6_find_idev(dev);
3403 if (!idev) {
3404 pr_debug("%s: add_dev failed\n", __func__);
3405 return;
3406 }
3407
3408 addrconf_addr_gen(idev, true);
3409 if (dev->flags & IFF_POINTOPOINT)
3410 addrconf_add_mroute(dev);
3411 }
3412 #endif
3413
3414 static int fixup_permanent_addr(struct net *net,
3415 struct inet6_dev *idev,
3416 struct inet6_ifaddr *ifp)
3417 {
3418 /* !fib6_node means the host route was removed from the
3419 * FIB, for example, if 'lo' device is taken down. In that
3420 * case regenerate the host route.
3421 */
3422 if (!ifp->rt || !ifp->rt->fib6_node) {
3423 struct fib6_info *f6i, *prev;
3424
3425 f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3426 GFP_ATOMIC);
3427 if (IS_ERR(f6i))
3428 return PTR_ERR(f6i);
3429
3430 /* ifp->rt can be accessed outside of rtnl */
3431 spin_lock(&ifp->lock);
3432 prev = ifp->rt;
3433 ifp->rt = f6i;
3434 spin_unlock(&ifp->lock);
3435
3436 fib6_info_release(prev);
3437 }
3438
3439 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3440 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3441 ifp->rt_priority, idev->dev, 0, 0,
3442 GFP_ATOMIC);
3443 }
3444
3445 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3446 addrconf_dad_start(ifp);
3447
3448 return 0;
3449 }
3450
3451 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3452 {
3453 struct inet6_ifaddr *ifp, *tmp;
3454 struct inet6_dev *idev;
3455
3456 idev = __in6_dev_get(dev);
3457 if (!idev)
3458 return;
3459
3460 write_lock_bh(&idev->lock);
3461
3462 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3463 if ((ifp->flags & IFA_F_PERMANENT) &&
3464 fixup_permanent_addr(net, idev, ifp) < 0) {
3465 write_unlock_bh(&idev->lock);
3466 in6_ifa_hold(ifp);
3467 ipv6_del_addr(ifp);
3468 write_lock_bh(&idev->lock);
3469
3470 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3471 idev->dev->name, &ifp->addr);
3472 }
3473 }
3474
3475 write_unlock_bh(&idev->lock);
3476 }
3477
3478 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3479 void *ptr)
3480 {
3481 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3482 struct netdev_notifier_change_info *change_info;
3483 struct netdev_notifier_changeupper_info *info;
3484 struct inet6_dev *idev = __in6_dev_get(dev);
3485 struct net *net = dev_net(dev);
3486 int run_pending = 0;
3487 int err;
3488
3489 switch (event) {
3490 case NETDEV_REGISTER:
3491 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3492 idev = ipv6_add_dev(dev);
3493 if (IS_ERR(idev))
3494 return notifier_from_errno(PTR_ERR(idev));
3495 }
3496 break;
3497
3498 case NETDEV_CHANGEMTU:
3499 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3500 if (dev->mtu < IPV6_MIN_MTU) {
3501 addrconf_ifdown(dev, dev != net->loopback_dev);
3502 break;
3503 }
3504
3505 if (idev) {
3506 rt6_mtu_change(dev, dev->mtu);
3507 idev->cnf.mtu6 = dev->mtu;
3508 break;
3509 }
3510
3511 /* allocate new idev */
3512 idev = ipv6_add_dev(dev);
3513 if (IS_ERR(idev))
3514 break;
3515
3516 /* device is still not ready */
3517 if (!(idev->if_flags & IF_READY))
3518 break;
3519
3520 run_pending = 1;
3521
3522 /* fall through */
3523
3524 case NETDEV_UP:
3525 case NETDEV_CHANGE:
3526 if (dev->flags & IFF_SLAVE)
3527 break;
3528
3529 if (idev && idev->cnf.disable_ipv6)
3530 break;
3531
3532 if (event == NETDEV_UP) {
3533 /* restore routes for permanent addresses */
3534 addrconf_permanent_addr(net, dev);
3535
3536 if (!addrconf_link_ready(dev)) {
3537 /* device is not ready yet. */
3538 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3539 dev->name);
3540 break;
3541 }
3542
3543 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3544 idev = ipv6_add_dev(dev);
3545
3546 if (!IS_ERR_OR_NULL(idev)) {
3547 idev->if_flags |= IF_READY;
3548 run_pending = 1;
3549 }
3550 } else if (event == NETDEV_CHANGE) {
3551 if (!addrconf_link_ready(dev)) {
3552 /* device is still not ready. */
3553 rt6_sync_down_dev(dev, event);
3554 break;
3555 }
3556
3557 if (!IS_ERR_OR_NULL(idev)) {
3558 if (idev->if_flags & IF_READY) {
3559 /* device is already configured -
3560 * but resend MLD reports, we might
3561 * have roamed and need to update
3562 * multicast snooping switches
3563 */
3564 ipv6_mc_up(idev);
3565 change_info = ptr;
3566 if (change_info->flags_changed & IFF_NOARP)
3567 addrconf_dad_run(idev, true);
3568 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3569 break;
3570 }
3571 idev->if_flags |= IF_READY;
3572 }
3573
3574 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3575 dev->name);
3576
3577 run_pending = 1;
3578 }
3579
3580 switch (dev->type) {
3581 #if IS_ENABLED(CONFIG_IPV6_SIT)
3582 case ARPHRD_SIT:
3583 addrconf_sit_config(dev);
3584 break;
3585 #endif
3586 #if IS_ENABLED(CONFIG_NET_IPGRE)
3587 case ARPHRD_IPGRE:
3588 addrconf_gre_config(dev);
3589 break;
3590 #endif
3591 case ARPHRD_LOOPBACK:
3592 init_loopback(dev);
3593 break;
3594
3595 default:
3596 addrconf_dev_config(dev);
3597 break;
3598 }
3599
3600 if (!IS_ERR_OR_NULL(idev)) {
3601 if (run_pending)
3602 addrconf_dad_run(idev, false);
3603
3604 /* Device has an address by now */
3605 rt6_sync_up(dev, RTNH_F_DEAD);
3606
3607 /*
3608 * If the MTU changed during the interface down,
3609 * when the interface up, the changed MTU must be
3610 * reflected in the idev as well as routers.
3611 */
3612 if (idev->cnf.mtu6 != dev->mtu &&
3613 dev->mtu >= IPV6_MIN_MTU) {
3614 rt6_mtu_change(dev, dev->mtu);
3615 idev->cnf.mtu6 = dev->mtu;
3616 }
3617 idev->tstamp = jiffies;
3618 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3619
3620 /*
3621 * If the changed mtu during down is lower than
3622 * IPV6_MIN_MTU stop IPv6 on this interface.
3623 */
3624 if (dev->mtu < IPV6_MIN_MTU)
3625 addrconf_ifdown(dev, dev != net->loopback_dev);
3626 }
3627 break;
3628
3629 case NETDEV_DOWN:
3630 case NETDEV_UNREGISTER:
3631 /*
3632 * Remove all addresses from this interface.
3633 */
3634 addrconf_ifdown(dev, event != NETDEV_DOWN);
3635 break;
3636
3637 case NETDEV_CHANGENAME:
3638 if (idev) {
3639 snmp6_unregister_dev(idev);
3640 addrconf_sysctl_unregister(idev);
3641 err = addrconf_sysctl_register(idev);
3642 if (err)
3643 return notifier_from_errno(err);
3644 err = snmp6_register_dev(idev);
3645 if (err) {
3646 addrconf_sysctl_unregister(idev);
3647 return notifier_from_errno(err);
3648 }
3649 }
3650 break;
3651
3652 case NETDEV_PRE_TYPE_CHANGE:
3653 case NETDEV_POST_TYPE_CHANGE:
3654 if (idev)
3655 addrconf_type_change(dev, event);
3656 break;
3657
3658 case NETDEV_CHANGEUPPER:
3659 info = ptr;
3660
3661 /* flush all routes if dev is linked to or unlinked from
3662 * an L3 master device (e.g., VRF)
3663 */
3664 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3665 addrconf_ifdown(dev, 0);
3666 }
3667
3668 return NOTIFY_OK;
3669 }
3670
3671 /*
3672 * addrconf module should be notified of a device going up
3673 */
3674 static struct notifier_block ipv6_dev_notf = {
3675 .notifier_call = addrconf_notify,
3676 .priority = ADDRCONF_NOTIFY_PRIORITY,
3677 };
3678
3679 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3680 {
3681 struct inet6_dev *idev;
3682 ASSERT_RTNL();
3683
3684 idev = __in6_dev_get(dev);
3685
3686 if (event == NETDEV_POST_TYPE_CHANGE)
3687 ipv6_mc_remap(idev);
3688 else if (event == NETDEV_PRE_TYPE_CHANGE)
3689 ipv6_mc_unmap(idev);
3690 }
3691
3692 static bool addr_is_local(const struct in6_addr *addr)
3693 {
3694 return ipv6_addr_type(addr) &
3695 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3696 }
3697
3698 static int addrconf_ifdown(struct net_device *dev, int how)
3699 {
3700 unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN;
3701 struct net *net = dev_net(dev);
3702 struct inet6_dev *idev;
3703 struct inet6_ifaddr *ifa, *tmp;
3704 bool keep_addr = false;
3705 int state, i;
3706
3707 ASSERT_RTNL();
3708
3709 rt6_disable_ip(dev, event);
3710
3711 idev = __in6_dev_get(dev);
3712 if (!idev)
3713 return -ENODEV;
3714
3715 /*
3716 * Step 1: remove reference to ipv6 device from parent device.
3717 * Do not dev_put!
3718 */
3719 if (how) {
3720 idev->dead = 1;
3721
3722 /* protected by rtnl_lock */
3723 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3724
3725 /* Step 1.5: remove snmp6 entry */
3726 snmp6_unregister_dev(idev);
3727
3728 }
3729
3730 /* combine the user config with event to determine if permanent
3731 * addresses are to be removed from address hash table
3732 */
3733 if (!how && !idev->cnf.disable_ipv6) {
3734 /* aggregate the system setting and interface setting */
3735 int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3736
3737 if (!_keep_addr)
3738 _keep_addr = idev->cnf.keep_addr_on_down;
3739
3740 keep_addr = (_keep_addr > 0);
3741 }
3742
3743 /* Step 2: clear hash table */
3744 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3745 struct hlist_head *h = &inet6_addr_lst[i];
3746
3747 spin_lock_bh(&addrconf_hash_lock);
3748 restart:
3749 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3750 if (ifa->idev == idev) {
3751 addrconf_del_dad_work(ifa);
3752 /* combined flag + permanent flag decide if
3753 * address is retained on a down event
3754 */
3755 if (!keep_addr ||
3756 !(ifa->flags & IFA_F_PERMANENT) ||
3757 addr_is_local(&ifa->addr)) {
3758 hlist_del_init_rcu(&ifa->addr_lst);
3759 goto restart;
3760 }
3761 }
3762 }
3763 spin_unlock_bh(&addrconf_hash_lock);
3764 }
3765
3766 write_lock_bh(&idev->lock);
3767
3768 addrconf_del_rs_timer(idev);
3769
3770 /* Step 2: clear flags for stateless addrconf */
3771 if (!how)
3772 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3773
3774 /* Step 3: clear tempaddr list */
3775 while (!list_empty(&idev->tempaddr_list)) {
3776 ifa = list_first_entry(&idev->tempaddr_list,
3777 struct inet6_ifaddr, tmp_list);
3778 list_del(&ifa->tmp_list);
3779 write_unlock_bh(&idev->lock);
3780 spin_lock_bh(&ifa->lock);
3781
3782 if (ifa->ifpub) {
3783 in6_ifa_put(ifa->ifpub);
3784 ifa->ifpub = NULL;
3785 }
3786 spin_unlock_bh(&ifa->lock);
3787 in6_ifa_put(ifa);
3788 write_lock_bh(&idev->lock);
3789 }
3790
3791 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3792 struct fib6_info *rt = NULL;
3793 bool keep;
3794
3795 addrconf_del_dad_work(ifa);
3796
3797 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3798 !addr_is_local(&ifa->addr);
3799
3800 write_unlock_bh(&idev->lock);
3801 spin_lock_bh(&ifa->lock);
3802
3803 if (keep) {
3804 /* set state to skip the notifier below */
3805 state = INET6_IFADDR_STATE_DEAD;
3806 ifa->state = INET6_IFADDR_STATE_PREDAD;
3807 if (!(ifa->flags & IFA_F_NODAD))
3808 ifa->flags |= IFA_F_TENTATIVE;
3809
3810 rt = ifa->rt;
3811 ifa->rt = NULL;
3812 } else {
3813 state = ifa->state;
3814 ifa->state = INET6_IFADDR_STATE_DEAD;
3815 }
3816
3817 spin_unlock_bh(&ifa->lock);
3818
3819 if (rt)
3820 ip6_del_rt(net, rt);
3821
3822 if (state != INET6_IFADDR_STATE_DEAD) {
3823 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3824 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3825 } else {
3826 if (idev->cnf.forwarding)
3827 addrconf_leave_anycast(ifa);
3828 addrconf_leave_solict(ifa->idev, &ifa->addr);
3829 }
3830
3831 write_lock_bh(&idev->lock);
3832 if (!keep) {
3833 list_del_rcu(&ifa->if_list);
3834 in6_ifa_put(ifa);
3835 }
3836 }
3837
3838 write_unlock_bh(&idev->lock);
3839
3840 /* Step 5: Discard anycast and multicast list */
3841 if (how) {
3842 ipv6_ac_destroy_dev(idev);
3843 ipv6_mc_destroy_dev(idev);
3844 } else {
3845 ipv6_mc_down(idev);
3846 }
3847
3848 idev->tstamp = jiffies;
3849
3850 /* Last: Shot the device (if unregistered) */
3851 if (how) {
3852 addrconf_sysctl_unregister(idev);
3853 neigh_parms_release(&nd_tbl, idev->nd_parms);
3854 neigh_ifdown(&nd_tbl, dev);
3855 in6_dev_put(idev);
3856 }
3857 return 0;
3858 }
3859
3860 static void addrconf_rs_timer(struct timer_list *t)
3861 {
3862 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3863 struct net_device *dev = idev->dev;
3864 struct in6_addr lladdr;
3865
3866 write_lock(&idev->lock);
3867 if (idev->dead || !(idev->if_flags & IF_READY))
3868 goto out;
3869
3870 if (!ipv6_accept_ra(idev))
3871 goto out;
3872
3873 /* Announcement received after solicitation was sent */
3874 if (idev->if_flags & IF_RA_RCVD)
3875 goto out;
3876
3877 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3878 write_unlock(&idev->lock);
3879 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3880 ndisc_send_rs(dev, &lladdr,
3881 &in6addr_linklocal_allrouters);
3882 else
3883 goto put;
3884
3885 write_lock(&idev->lock);
3886 idev->rs_interval = rfc3315_s14_backoff_update(
3887 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3888 /* The wait after the last probe can be shorter */
3889 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3890 idev->cnf.rtr_solicits) ?
3891 idev->cnf.rtr_solicit_delay :
3892 idev->rs_interval);
3893 } else {
3894 /*
3895 * Note: we do not support deprecated "all on-link"
3896 * assumption any longer.
3897 */
3898 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3899 }
3900
3901 out:
3902 write_unlock(&idev->lock);
3903 put:
3904 in6_dev_put(idev);
3905 }
3906
3907 /*
3908 * Duplicate Address Detection
3909 */
3910 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3911 {
3912 unsigned long rand_num;
3913 struct inet6_dev *idev = ifp->idev;
3914 u64 nonce;
3915
3916 if (ifp->flags & IFA_F_OPTIMISTIC)
3917 rand_num = 0;
3918 else
3919 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3920
3921 nonce = 0;
3922 if (idev->cnf.enhanced_dad ||
3923 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3924 do
3925 get_random_bytes(&nonce, 6);
3926 while (nonce == 0);
3927 }
3928 ifp->dad_nonce = nonce;
3929 ifp->dad_probes = idev->cnf.dad_transmits;
3930 addrconf_mod_dad_work(ifp, rand_num);
3931 }
3932
3933 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3934 {
3935 struct inet6_dev *idev = ifp->idev;
3936 struct net_device *dev = idev->dev;
3937 bool bump_id, notify = false;
3938 struct net *net;
3939
3940 addrconf_join_solict(dev, &ifp->addr);
3941
3942 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3943
3944 read_lock_bh(&idev->lock);
3945 spin_lock(&ifp->lock);
3946 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3947 goto out;
3948
3949 net = dev_net(dev);
3950 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3951 (net->ipv6.devconf_all->accept_dad < 1 &&
3952 idev->cnf.accept_dad < 1) ||
3953 !(ifp->flags&IFA_F_TENTATIVE) ||
3954 ifp->flags & IFA_F_NODAD) {
3955 bool send_na = false;
3956
3957 if (ifp->flags & IFA_F_TENTATIVE &&
3958 !(ifp->flags & IFA_F_OPTIMISTIC))
3959 send_na = true;
3960 bump_id = ifp->flags & IFA_F_TENTATIVE;
3961 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3962 spin_unlock(&ifp->lock);
3963 read_unlock_bh(&idev->lock);
3964
3965 addrconf_dad_completed(ifp, bump_id, send_na);
3966 return;
3967 }
3968
3969 if (!(idev->if_flags & IF_READY)) {
3970 spin_unlock(&ifp->lock);
3971 read_unlock_bh(&idev->lock);
3972 /*
3973 * If the device is not ready:
3974 * - keep it tentative if it is a permanent address.
3975 * - otherwise, kill it.
3976 */
3977 in6_ifa_hold(ifp);
3978 addrconf_dad_stop(ifp, 0);
3979 return;
3980 }
3981
3982 /*
3983 * Optimistic nodes can start receiving
3984 * Frames right away
3985 */
3986 if (ifp->flags & IFA_F_OPTIMISTIC) {
3987 ip6_ins_rt(net, ifp->rt);
3988 if (ipv6_use_optimistic_addr(net, idev)) {
3989 /* Because optimistic nodes can use this address,
3990 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3991 */
3992 notify = true;
3993 }
3994 }
3995
3996 addrconf_dad_kick(ifp);
3997 out:
3998 spin_unlock(&ifp->lock);
3999 read_unlock_bh(&idev->lock);
4000 if (notify)
4001 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4002 }
4003
4004 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4005 {
4006 bool begin_dad = false;
4007
4008 spin_lock_bh(&ifp->lock);
4009 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4010 ifp->state = INET6_IFADDR_STATE_PREDAD;
4011 begin_dad = true;
4012 }
4013 spin_unlock_bh(&ifp->lock);
4014
4015 if (begin_dad)
4016 addrconf_mod_dad_work(ifp, 0);
4017 }
4018
4019 static void addrconf_dad_work(struct work_struct *w)
4020 {
4021 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4022 struct inet6_ifaddr,
4023 dad_work);
4024 struct inet6_dev *idev = ifp->idev;
4025 bool bump_id, disable_ipv6 = false;
4026 struct in6_addr mcaddr;
4027
4028 enum {
4029 DAD_PROCESS,
4030 DAD_BEGIN,
4031 DAD_ABORT,
4032 } action = DAD_PROCESS;
4033
4034 rtnl_lock();
4035
4036 spin_lock_bh(&ifp->lock);
4037 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4038 action = DAD_BEGIN;
4039 ifp->state = INET6_IFADDR_STATE_DAD;
4040 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4041 action = DAD_ABORT;
4042 ifp->state = INET6_IFADDR_STATE_POSTDAD;
4043
4044 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
4045 idev->cnf.accept_dad > 1) &&
4046 !idev->cnf.disable_ipv6 &&
4047 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4048 struct in6_addr addr;
4049
4050 addr.s6_addr32[0] = htonl(0xfe800000);
4051 addr.s6_addr32[1] = 0;
4052
4053 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4054 ipv6_addr_equal(&ifp->addr, &addr)) {
4055 /* DAD failed for link-local based on MAC */
4056 idev->cnf.disable_ipv6 = 1;
4057
4058 pr_info("%s: IPv6 being disabled!\n",
4059 ifp->idev->dev->name);
4060 disable_ipv6 = true;
4061 }
4062 }
4063 }
4064 spin_unlock_bh(&ifp->lock);
4065
4066 if (action == DAD_BEGIN) {
4067 addrconf_dad_begin(ifp);
4068 goto out;
4069 } else if (action == DAD_ABORT) {
4070 in6_ifa_hold(ifp);
4071 addrconf_dad_stop(ifp, 1);
4072 if (disable_ipv6)
4073 addrconf_ifdown(idev->dev, 0);
4074 goto out;
4075 }
4076
4077 if (!ifp->dad_probes && addrconf_dad_end(ifp))
4078 goto out;
4079
4080 write_lock_bh(&idev->lock);
4081 if (idev->dead || !(idev->if_flags & IF_READY)) {
4082 write_unlock_bh(&idev->lock);
4083 goto out;
4084 }
4085
4086 spin_lock(&ifp->lock);
4087 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4088 spin_unlock(&ifp->lock);
4089 write_unlock_bh(&idev->lock);
4090 goto out;
4091 }
4092
4093 if (ifp->dad_probes == 0) {
4094 bool send_na = false;
4095
4096 /*
4097 * DAD was successful
4098 */
4099
4100 if (ifp->flags & IFA_F_TENTATIVE &&
4101 !(ifp->flags & IFA_F_OPTIMISTIC))
4102 send_na = true;
4103 bump_id = ifp->flags & IFA_F_TENTATIVE;
4104 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4105 spin_unlock(&ifp->lock);
4106 write_unlock_bh(&idev->lock);
4107
4108 addrconf_dad_completed(ifp, bump_id, send_na);
4109
4110 goto out;
4111 }
4112
4113 ifp->dad_probes--;
4114 addrconf_mod_dad_work(ifp,
4115 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
4116 spin_unlock(&ifp->lock);
4117 write_unlock_bh(&idev->lock);
4118
4119 /* send a neighbour solicitation for our addr */
4120 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4121 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4122 ifp->dad_nonce);
4123 out:
4124 in6_ifa_put(ifp);
4125 rtnl_unlock();
4126 }
4127
4128 /* ifp->idev must be at least read locked */
4129 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4130 {
4131 struct inet6_ifaddr *ifpiter;
4132 struct inet6_dev *idev = ifp->idev;
4133
4134 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4135 if (ifpiter->scope > IFA_LINK)
4136 break;
4137 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4138 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4139 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4140 IFA_F_PERMANENT)
4141 return false;
4142 }
4143 return true;
4144 }
4145
4146 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4147 bool send_na)
4148 {
4149 struct net_device *dev = ifp->idev->dev;
4150 struct in6_addr lladdr;
4151 bool send_rs, send_mld;
4152
4153 addrconf_del_dad_work(ifp);
4154
4155 /*
4156 * Configure the address for reception. Now it is valid.
4157 */
4158
4159 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4160
4161 /* If added prefix is link local and we are prepared to process
4162 router advertisements, start sending router solicitations.
4163 */
4164
4165 read_lock_bh(&ifp->idev->lock);
4166 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4167 send_rs = send_mld &&
4168 ipv6_accept_ra(ifp->idev) &&
4169 ifp->idev->cnf.rtr_solicits != 0 &&
4170 (dev->flags&IFF_LOOPBACK) == 0;
4171 read_unlock_bh(&ifp->idev->lock);
4172
4173 /* While dad is in progress mld report's source address is in6_addrany.
4174 * Resend with proper ll now.
4175 */
4176 if (send_mld)
4177 ipv6_mc_dad_complete(ifp->idev);
4178
4179 /* send unsolicited NA if enabled */
4180 if (send_na &&
4181 (ifp->idev->cnf.ndisc_notify ||
4182 dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4183 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4184 /*router=*/ !!ifp->idev->cnf.forwarding,
4185 /*solicited=*/ false, /*override=*/ true,
4186 /*inc_opt=*/ true);
4187 }
4188
4189 if (send_rs) {
4190 /*
4191 * If a host as already performed a random delay
4192 * [...] as part of DAD [...] there is no need
4193 * to delay again before sending the first RS
4194 */
4195 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4196 return;
4197 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4198
4199 write_lock_bh(&ifp->idev->lock);
4200 spin_lock(&ifp->lock);
4201 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4202 ifp->idev->cnf.rtr_solicit_interval);
4203 ifp->idev->rs_probes = 1;
4204 ifp->idev->if_flags |= IF_RS_SENT;
4205 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4206 spin_unlock(&ifp->lock);
4207 write_unlock_bh(&ifp->idev->lock);
4208 }
4209
4210 if (bump_id)
4211 rt_genid_bump_ipv6(dev_net(dev));
4212
4213 /* Make sure that a new temporary address will be created
4214 * before this temporary address becomes deprecated.
4215 */
4216 if (ifp->flags & IFA_F_TEMPORARY)
4217 addrconf_verify_rtnl();
4218 }
4219
4220 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4221 {
4222 struct inet6_ifaddr *ifp;
4223
4224 read_lock_bh(&idev->lock);
4225 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4226 spin_lock(&ifp->lock);
4227 if ((ifp->flags & IFA_F_TENTATIVE &&
4228 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4229 if (restart)
4230 ifp->state = INET6_IFADDR_STATE_PREDAD;
4231 addrconf_dad_kick(ifp);
4232 }
4233 spin_unlock(&ifp->lock);
4234 }
4235 read_unlock_bh(&idev->lock);
4236 }
4237
4238 #ifdef CONFIG_PROC_FS
4239 struct if6_iter_state {
4240 struct seq_net_private p;
4241 int bucket;
4242 int offset;
4243 };
4244
4245 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4246 {
4247 struct if6_iter_state *state = seq->private;
4248 struct net *net = seq_file_net(seq);
4249 struct inet6_ifaddr *ifa = NULL;
4250 int p = 0;
4251
4252 /* initial bucket if pos is 0 */
4253 if (pos == 0) {
4254 state->bucket = 0;
4255 state->offset = 0;
4256 }
4257
4258 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4259 hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket],
4260 addr_lst) {
4261 if (!net_eq(dev_net(ifa->idev->dev), net))
4262 continue;
4263 /* sync with offset */
4264 if (p < state->offset) {
4265 p++;
4266 continue;
4267 }
4268 return ifa;
4269 }
4270
4271 /* prepare for next bucket */
4272 state->offset = 0;
4273 p = 0;
4274 }
4275 return NULL;
4276 }
4277
4278 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4279 struct inet6_ifaddr *ifa)
4280 {
4281 struct if6_iter_state *state = seq->private;
4282 struct net *net = seq_file_net(seq);
4283
4284 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4285 if (!net_eq(dev_net(ifa->idev->dev), net))
4286 continue;
4287 state->offset++;
4288 return ifa;
4289 }
4290
4291 state->offset = 0;
4292 while (++state->bucket < IN6_ADDR_HSIZE) {
4293 hlist_for_each_entry_rcu(ifa,
4294 &inet6_addr_lst[state->bucket], addr_lst) {
4295 if (!net_eq(dev_net(ifa->idev->dev), net))
4296 continue;
4297 return ifa;
4298 }
4299 }
4300
4301 return NULL;
4302 }
4303
4304 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4305 __acquires(rcu)
4306 {
4307 rcu_read_lock();
4308 return if6_get_first(seq, *pos);
4309 }
4310
4311 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4312 {
4313 struct inet6_ifaddr *ifa;
4314
4315 ifa = if6_get_next(seq, v);
4316 ++*pos;
4317 return ifa;
4318 }
4319
4320 static void if6_seq_stop(struct seq_file *seq, void *v)
4321 __releases(rcu)
4322 {
4323 rcu_read_unlock();
4324 }
4325
4326 static int if6_seq_show(struct seq_file *seq, void *v)
4327 {
4328 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4329 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4330 &ifp->addr,
4331 ifp->idev->dev->ifindex,
4332 ifp->prefix_len,
4333 ifp->scope,
4334 (u8) ifp->flags,
4335 ifp->idev->dev->name);
4336 return 0;
4337 }
4338
4339 static const struct seq_operations if6_seq_ops = {
4340 .start = if6_seq_start,
4341 .next = if6_seq_next,
4342 .show = if6_seq_show,
4343 .stop = if6_seq_stop,
4344 };
4345
4346 static int __net_init if6_proc_net_init(struct net *net)
4347 {
4348 if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4349 sizeof(struct if6_iter_state)))
4350 return -ENOMEM;
4351 return 0;
4352 }
4353
4354 static void __net_exit if6_proc_net_exit(struct net *net)
4355 {
4356 remove_proc_entry("if_inet6", net->proc_net);
4357 }
4358
4359 static struct pernet_operations if6_proc_net_ops = {
4360 .init = if6_proc_net_init,
4361 .exit = if6_proc_net_exit,
4362 };
4363
4364 int __init if6_proc_init(void)
4365 {
4366 return register_pernet_subsys(&if6_proc_net_ops);
4367 }
4368
4369 void if6_proc_exit(void)
4370 {
4371 unregister_pernet_subsys(&if6_proc_net_ops);
4372 }
4373 #endif /* CONFIG_PROC_FS */
4374
4375 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4376 /* Check if address is a home address configured on any interface. */
4377 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4378 {
4379 unsigned int hash = inet6_addr_hash(net, addr);
4380 struct inet6_ifaddr *ifp = NULL;
4381 int ret = 0;
4382
4383 rcu_read_lock();
4384 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4385 if (!net_eq(dev_net(ifp->idev->dev), net))
4386 continue;
4387 if (ipv6_addr_equal(&ifp->addr, addr) &&
4388 (ifp->flags & IFA_F_HOMEADDRESS)) {
4389 ret = 1;
4390 break;
4391 }
4392 }
4393 rcu_read_unlock();
4394 return ret;
4395 }
4396 #endif
4397
4398 /*
4399 * Periodic address status verification
4400 */
4401
4402 static void addrconf_verify_rtnl(void)
4403 {
4404 unsigned long now, next, next_sec, next_sched;
4405 struct inet6_ifaddr *ifp;
4406 int i;
4407
4408 ASSERT_RTNL();
4409
4410 rcu_read_lock_bh();
4411 now = jiffies;
4412 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4413
4414 cancel_delayed_work(&addr_chk_work);
4415
4416 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4417 restart:
4418 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4419 unsigned long age;
4420
4421 /* When setting preferred_lft to a value not zero or
4422 * infinity, while valid_lft is infinity
4423 * IFA_F_PERMANENT has a non-infinity life time.
4424 */
4425 if ((ifp->flags & IFA_F_PERMANENT) &&
4426 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4427 continue;
4428
4429 spin_lock(&ifp->lock);
4430 /* We try to batch several events at once. */
4431 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4432
4433 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4434 age >= ifp->valid_lft) {
4435 spin_unlock(&ifp->lock);
4436 in6_ifa_hold(ifp);
4437 ipv6_del_addr(ifp);
4438 goto restart;
4439 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4440 spin_unlock(&ifp->lock);
4441 continue;
4442 } else if (age >= ifp->prefered_lft) {
4443 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4444 int deprecate = 0;
4445
4446 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4447 deprecate = 1;
4448 ifp->flags |= IFA_F_DEPRECATED;
4449 }
4450
4451 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4452 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4453 next = ifp->tstamp + ifp->valid_lft * HZ;
4454
4455 spin_unlock(&ifp->lock);
4456
4457 if (deprecate) {
4458 in6_ifa_hold(ifp);
4459
4460 ipv6_ifa_notify(0, ifp);
4461 in6_ifa_put(ifp);
4462 goto restart;
4463 }
4464 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4465 !(ifp->flags&IFA_F_TENTATIVE)) {
4466 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4467 ifp->idev->cnf.dad_transmits *
4468 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4469
4470 if (age >= ifp->prefered_lft - regen_advance) {
4471 struct inet6_ifaddr *ifpub = ifp->ifpub;
4472 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4473 next = ifp->tstamp + ifp->prefered_lft * HZ;
4474 if (!ifp->regen_count && ifpub) {
4475 ifp->regen_count++;
4476 in6_ifa_hold(ifp);
4477 in6_ifa_hold(ifpub);
4478 spin_unlock(&ifp->lock);
4479
4480 spin_lock(&ifpub->lock);
4481 ifpub->regen_count = 0;
4482 spin_unlock(&ifpub->lock);
4483 rcu_read_unlock_bh();
4484 ipv6_create_tempaddr(ifpub, ifp, true);
4485 in6_ifa_put(ifpub);
4486 in6_ifa_put(ifp);
4487 rcu_read_lock_bh();
4488 goto restart;
4489 }
4490 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4491 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4492 spin_unlock(&ifp->lock);
4493 } else {
4494 /* ifp->prefered_lft <= ifp->valid_lft */
4495 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4496 next = ifp->tstamp + ifp->prefered_lft * HZ;
4497 spin_unlock(&ifp->lock);
4498 }
4499 }
4500 }
4501
4502 next_sec = round_jiffies_up(next);
4503 next_sched = next;
4504
4505 /* If rounded timeout is accurate enough, accept it. */
4506 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4507 next_sched = next_sec;
4508
4509 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4510 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4511 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4512
4513 pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4514 now, next, next_sec, next_sched);
4515 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4516 rcu_read_unlock_bh();
4517 }
4518
4519 static void addrconf_verify_work(struct work_struct *w)
4520 {
4521 rtnl_lock();
4522 addrconf_verify_rtnl();
4523 rtnl_unlock();
4524 }
4525
4526 static void addrconf_verify(void)
4527 {
4528 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4529 }
4530
4531 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4532 struct in6_addr **peer_pfx)
4533 {
4534 struct in6_addr *pfx = NULL;
4535
4536 *peer_pfx = NULL;
4537
4538 if (addr)
4539 pfx = nla_data(addr);
4540
4541 if (local) {
4542 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4543 *peer_pfx = pfx;
4544 pfx = nla_data(local);
4545 }
4546
4547 return pfx;
4548 }
4549
4550 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4551 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4552 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4553 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4554 [IFA_FLAGS] = { .len = sizeof(u32) },
4555 [IFA_RT_PRIORITY] = { .len = sizeof(u32) },
4556 [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
4557 };
4558
4559 static int
4560 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4561 struct netlink_ext_ack *extack)
4562 {
4563 struct net *net = sock_net(skb->sk);
4564 struct ifaddrmsg *ifm;
4565 struct nlattr *tb[IFA_MAX+1];
4566 struct in6_addr *pfx, *peer_pfx;
4567 u32 ifa_flags;
4568 int err;
4569
4570 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4571 ifa_ipv6_policy, extack);
4572 if (err < 0)
4573 return err;
4574
4575 ifm = nlmsg_data(nlh);
4576 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4577 if (!pfx)
4578 return -EINVAL;
4579
4580 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4581
4582 /* We ignore other flags so far. */
4583 ifa_flags &= IFA_F_MANAGETEMPADDR;
4584
4585 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4586 ifm->ifa_prefixlen);
4587 }
4588
4589 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4590 unsigned long expires, u32 flags)
4591 {
4592 struct fib6_info *f6i;
4593 u32 prio;
4594
4595 f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
4596 ifp->idev->dev, 0, RTF_DEFAULT, true);
4597 if (!f6i)
4598 return -ENOENT;
4599
4600 prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4601 if (f6i->fib6_metric != prio) {
4602 /* delete old one */
4603 ip6_del_rt(dev_net(ifp->idev->dev), f6i);
4604
4605 /* add new one */
4606 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4607 ifp->rt_priority, ifp->idev->dev,
4608 expires, flags, GFP_KERNEL);
4609 } else {
4610 if (!expires)
4611 fib6_clean_expires(f6i);
4612 else
4613 fib6_set_expires(f6i, expires);
4614
4615 fib6_info_release(f6i);
4616 }
4617
4618 return 0;
4619 }
4620
4621 static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
4622 {
4623 u32 flags;
4624 clock_t expires;
4625 unsigned long timeout;
4626 bool was_managetempaddr;
4627 bool had_prefixroute;
4628
4629 ASSERT_RTNL();
4630
4631 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4632 return -EINVAL;
4633
4634 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4635 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4636 return -EINVAL;
4637
4638 if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4639 cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4640
4641 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4642 if (addrconf_finite_timeout(timeout)) {
4643 expires = jiffies_to_clock_t(timeout * HZ);
4644 cfg->valid_lft = timeout;
4645 flags = RTF_EXPIRES;
4646 } else {
4647 expires = 0;
4648 flags = 0;
4649 cfg->ifa_flags |= IFA_F_PERMANENT;
4650 }
4651
4652 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4653 if (addrconf_finite_timeout(timeout)) {
4654 if (timeout == 0)
4655 cfg->ifa_flags |= IFA_F_DEPRECATED;
4656 cfg->preferred_lft = timeout;
4657 }
4658
4659 spin_lock_bh(&ifp->lock);
4660 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4661 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4662 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4663 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4664 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4665 IFA_F_NOPREFIXROUTE);
4666 ifp->flags |= cfg->ifa_flags;
4667 ifp->tstamp = jiffies;
4668 ifp->valid_lft = cfg->valid_lft;
4669 ifp->prefered_lft = cfg->preferred_lft;
4670
4671 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4672 ifp->rt_priority = cfg->rt_priority;
4673
4674 spin_unlock_bh(&ifp->lock);
4675 if (!(ifp->flags&IFA_F_TENTATIVE))
4676 ipv6_ifa_notify(0, ifp);
4677
4678 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4679 int rc = -ENOENT;
4680
4681 if (had_prefixroute)
4682 rc = modify_prefix_route(ifp, expires, flags);
4683
4684 /* prefix route could have been deleted; if so restore it */
4685 if (rc == -ENOENT) {
4686 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4687 ifp->rt_priority, ifp->idev->dev,
4688 expires, flags, GFP_KERNEL);
4689 }
4690 } else if (had_prefixroute) {
4691 enum cleanup_prefix_rt_t action;
4692 unsigned long rt_expires;
4693
4694 write_lock_bh(&ifp->idev->lock);
4695 action = check_cleanup_prefix_route(ifp, &rt_expires);
4696 write_unlock_bh(&ifp->idev->lock);
4697
4698 if (action != CLEANUP_PREFIX_RT_NOP) {
4699 cleanup_prefix_route(ifp, rt_expires,
4700 action == CLEANUP_PREFIX_RT_DEL);
4701 }
4702 }
4703
4704 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4705 if (was_managetempaddr &&
4706 !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4707 cfg->valid_lft = 0;
4708 cfg->preferred_lft = 0;
4709 }
4710 manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4711 cfg->preferred_lft, !was_managetempaddr,
4712 jiffies);
4713 }
4714
4715 addrconf_verify_rtnl();
4716
4717 return 0;
4718 }
4719
4720 static int
4721 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4722 struct netlink_ext_ack *extack)
4723 {
4724 struct net *net = sock_net(skb->sk);
4725 struct ifaddrmsg *ifm;
4726 struct nlattr *tb[IFA_MAX+1];
4727 struct in6_addr *peer_pfx;
4728 struct inet6_ifaddr *ifa;
4729 struct net_device *dev;
4730 struct inet6_dev *idev;
4731 struct ifa6_config cfg;
4732 int err;
4733
4734 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4735 ifa_ipv6_policy, extack);
4736 if (err < 0)
4737 return err;
4738
4739 memset(&cfg, 0, sizeof(cfg));
4740
4741 ifm = nlmsg_data(nlh);
4742 cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4743 if (!cfg.pfx)
4744 return -EINVAL;
4745
4746 cfg.peer_pfx = peer_pfx;
4747 cfg.plen = ifm->ifa_prefixlen;
4748 if (tb[IFA_RT_PRIORITY])
4749 cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4750
4751 cfg.valid_lft = INFINITY_LIFE_TIME;
4752 cfg.preferred_lft = INFINITY_LIFE_TIME;
4753
4754 if (tb[IFA_CACHEINFO]) {
4755 struct ifa_cacheinfo *ci;
4756
4757 ci = nla_data(tb[IFA_CACHEINFO]);
4758 cfg.valid_lft = ci->ifa_valid;
4759 cfg.preferred_lft = ci->ifa_prefered;
4760 }
4761
4762 dev = __dev_get_by_index(net, ifm->ifa_index);
4763 if (!dev)
4764 return -ENODEV;
4765
4766 if (tb[IFA_FLAGS])
4767 cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4768 else
4769 cfg.ifa_flags = ifm->ifa_flags;
4770
4771 /* We ignore other flags so far. */
4772 cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4773 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4774 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4775
4776 idev = ipv6_find_idev(dev);
4777 if (!idev)
4778 return -ENOBUFS;
4779
4780 if (!ipv6_allow_optimistic_dad(net, idev))
4781 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4782
4783 if (cfg.ifa_flags & IFA_F_NODAD &&
4784 cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4785 NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4786 return -EINVAL;
4787 }
4788
4789 ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4790 if (!ifa) {
4791 /*
4792 * It would be best to check for !NLM_F_CREATE here but
4793 * userspace already relies on not having to provide this.
4794 */
4795 return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4796 }
4797
4798 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4799 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4800 err = -EEXIST;
4801 else
4802 err = inet6_addr_modify(ifa, &cfg);
4803
4804 in6_ifa_put(ifa);
4805
4806 return err;
4807 }
4808
4809 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4810 u8 scope, int ifindex)
4811 {
4812 struct ifaddrmsg *ifm;
4813
4814 ifm = nlmsg_data(nlh);
4815 ifm->ifa_family = AF_INET6;
4816 ifm->ifa_prefixlen = prefixlen;
4817 ifm->ifa_flags = flags;
4818 ifm->ifa_scope = scope;
4819 ifm->ifa_index = ifindex;
4820 }
4821
4822 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4823 unsigned long tstamp, u32 preferred, u32 valid)
4824 {
4825 struct ifa_cacheinfo ci;
4826
4827 ci.cstamp = cstamp_delta(cstamp);
4828 ci.tstamp = cstamp_delta(tstamp);
4829 ci.ifa_prefered = preferred;
4830 ci.ifa_valid = valid;
4831
4832 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4833 }
4834
4835 static inline int rt_scope(int ifa_scope)
4836 {
4837 if (ifa_scope & IFA_HOST)
4838 return RT_SCOPE_HOST;
4839 else if (ifa_scope & IFA_LINK)
4840 return RT_SCOPE_LINK;
4841 else if (ifa_scope & IFA_SITE)
4842 return RT_SCOPE_SITE;
4843 else
4844 return RT_SCOPE_UNIVERSE;
4845 }
4846
4847 static inline int inet6_ifaddr_msgsize(void)
4848 {
4849 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4850 + nla_total_size(16) /* IFA_LOCAL */
4851 + nla_total_size(16) /* IFA_ADDRESS */
4852 + nla_total_size(sizeof(struct ifa_cacheinfo))
4853 + nla_total_size(4) /* IFA_FLAGS */
4854 + nla_total_size(4) /* IFA_RT_PRIORITY */;
4855 }
4856
4857 enum addr_type_t {
4858 UNICAST_ADDR,
4859 MULTICAST_ADDR,
4860 ANYCAST_ADDR,
4861 };
4862
4863 struct inet6_fill_args {
4864 u32 portid;
4865 u32 seq;
4866 int event;
4867 unsigned int flags;
4868 int netnsid;
4869 int ifindex;
4870 enum addr_type_t type;
4871 };
4872
4873 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4874 struct inet6_fill_args *args)
4875 {
4876 struct nlmsghdr *nlh;
4877 u32 preferred, valid;
4878
4879 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
4880 sizeof(struct ifaddrmsg), args->flags);
4881 if (!nlh)
4882 return -EMSGSIZE;
4883
4884 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4885 ifa->idev->dev->ifindex);
4886
4887 if (args->netnsid >= 0 &&
4888 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
4889 goto error;
4890
4891 if (!((ifa->flags&IFA_F_PERMANENT) &&
4892 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4893 preferred = ifa->prefered_lft;
4894 valid = ifa->valid_lft;
4895 if (preferred != INFINITY_LIFE_TIME) {
4896 long tval = (jiffies - ifa->tstamp)/HZ;
4897 if (preferred > tval)
4898 preferred -= tval;
4899 else
4900 preferred = 0;
4901 if (valid != INFINITY_LIFE_TIME) {
4902 if (valid > tval)
4903 valid -= tval;
4904 else
4905 valid = 0;
4906 }
4907 }
4908 } else {
4909 preferred = INFINITY_LIFE_TIME;
4910 valid = INFINITY_LIFE_TIME;
4911 }
4912
4913 if (!ipv6_addr_any(&ifa->peer_addr)) {
4914 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4915 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4916 goto error;
4917 } else
4918 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4919 goto error;
4920
4921 if (ifa->rt_priority &&
4922 nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
4923 goto error;
4924
4925 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4926 goto error;
4927
4928 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4929 goto error;
4930
4931 nlmsg_end(skb, nlh);
4932 return 0;
4933
4934 error:
4935 nlmsg_cancel(skb, nlh);
4936 return -EMSGSIZE;
4937 }
4938
4939 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4940 struct inet6_fill_args *args)
4941 {
4942 struct nlmsghdr *nlh;
4943 u8 scope = RT_SCOPE_UNIVERSE;
4944 int ifindex = ifmca->idev->dev->ifindex;
4945
4946 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4947 scope = RT_SCOPE_SITE;
4948
4949 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
4950 sizeof(struct ifaddrmsg), args->flags);
4951 if (!nlh)
4952 return -EMSGSIZE;
4953
4954 if (args->netnsid >= 0 &&
4955 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
4956 return -EMSGSIZE;
4957
4958 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4959 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4960 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4961 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4962 nlmsg_cancel(skb, nlh);
4963 return -EMSGSIZE;
4964 }
4965
4966 nlmsg_end(skb, nlh);
4967 return 0;
4968 }
4969
4970 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4971 struct inet6_fill_args *args)
4972 {
4973 struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
4974 int ifindex = dev ? dev->ifindex : 1;
4975 struct nlmsghdr *nlh;
4976 u8 scope = RT_SCOPE_UNIVERSE;
4977
4978 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4979 scope = RT_SCOPE_SITE;
4980
4981 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
4982 sizeof(struct ifaddrmsg), args->flags);
4983 if (!nlh)
4984 return -EMSGSIZE;
4985
4986 if (args->netnsid >= 0 &&
4987 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
4988 return -EMSGSIZE;
4989
4990 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4991 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4992 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4993 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4994 nlmsg_cancel(skb, nlh);
4995 return -EMSGSIZE;
4996 }
4997
4998 nlmsg_end(skb, nlh);
4999 return 0;
5000 }
5001
5002 /* called with rcu_read_lock() */
5003 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
5004 struct netlink_callback *cb, int s_ip_idx,
5005 struct inet6_fill_args *fillargs)
5006 {
5007 struct ifmcaddr6 *ifmca;
5008 struct ifacaddr6 *ifaca;
5009 int ip_idx = 0;
5010 int err = 1;
5011
5012 read_lock_bh(&idev->lock);
5013 switch (fillargs->type) {
5014 case UNICAST_ADDR: {
5015 struct inet6_ifaddr *ifa;
5016 fillargs->event = RTM_NEWADDR;
5017
5018 /* unicast address incl. temp addr */
5019 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5020 if (ip_idx < s_ip_idx)
5021 goto next;
5022 err = inet6_fill_ifaddr(skb, ifa, fillargs);
5023 if (err < 0)
5024 break;
5025 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5026 next:
5027 ip_idx++;
5028 }
5029 break;
5030 }
5031 case MULTICAST_ADDR:
5032 fillargs->event = RTM_GETMULTICAST;
5033
5034 /* multicast address */
5035 for (ifmca = idev->mc_list; ifmca;
5036 ifmca = ifmca->next, ip_idx++) {
5037 if (ip_idx < s_ip_idx)
5038 continue;
5039 err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5040 if (err < 0)
5041 break;
5042 }
5043 break;
5044 case ANYCAST_ADDR:
5045 fillargs->event = RTM_GETANYCAST;
5046 /* anycast address */
5047 for (ifaca = idev->ac_list; ifaca;
5048 ifaca = ifaca->aca_next, ip_idx++) {
5049 if (ip_idx < s_ip_idx)
5050 continue;
5051 err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5052 if (err < 0)
5053 break;
5054 }
5055 break;
5056 default:
5057 break;
5058 }
5059 read_unlock_bh(&idev->lock);
5060 cb->args[2] = ip_idx;
5061 return err;
5062 }
5063
5064 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5065 struct inet6_fill_args *fillargs,
5066 struct net **tgt_net, struct sock *sk,
5067 struct netlink_callback *cb)
5068 {
5069 struct netlink_ext_ack *extack = cb->extack;
5070 struct nlattr *tb[IFA_MAX+1];
5071 struct ifaddrmsg *ifm;
5072 int err, i;
5073
5074 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5075 NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5076 return -EINVAL;
5077 }
5078
5079 ifm = nlmsg_data(nlh);
5080 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5081 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5082 return -EINVAL;
5083 }
5084
5085 fillargs->ifindex = ifm->ifa_index;
5086 if (fillargs->ifindex) {
5087 cb->answer_flags |= NLM_F_DUMP_FILTERED;
5088 fillargs->flags |= NLM_F_DUMP_FILTERED;
5089 }
5090
5091 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5092 ifa_ipv6_policy, extack);
5093 if (err < 0)
5094 return err;
5095
5096 for (i = 0; i <= IFA_MAX; ++i) {
5097 if (!tb[i])
5098 continue;
5099
5100 if (i == IFA_TARGET_NETNSID) {
5101 struct net *net;
5102
5103 fillargs->netnsid = nla_get_s32(tb[i]);
5104 net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5105 if (IS_ERR(net)) {
5106 fillargs->netnsid = -1;
5107 NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5108 return PTR_ERR(net);
5109 }
5110 *tgt_net = net;
5111 } else {
5112 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5113 return -EINVAL;
5114 }
5115 }
5116
5117 return 0;
5118 }
5119
5120 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5121 enum addr_type_t type)
5122 {
5123 const struct nlmsghdr *nlh = cb->nlh;
5124 struct inet6_fill_args fillargs = {
5125 .portid = NETLINK_CB(cb->skb).portid,
5126 .seq = cb->nlh->nlmsg_seq,
5127 .flags = NLM_F_MULTI,
5128 .netnsid = -1,
5129 .type = type,
5130 };
5131 struct net *net = sock_net(skb->sk);
5132 struct net *tgt_net = net;
5133 int idx, s_idx, s_ip_idx;
5134 int h, s_h;
5135 struct net_device *dev;
5136 struct inet6_dev *idev;
5137 struct hlist_head *head;
5138 int err = 0;
5139
5140 s_h = cb->args[0];
5141 s_idx = idx = cb->args[1];
5142 s_ip_idx = cb->args[2];
5143
5144 if (cb->strict_check) {
5145 err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5146 skb->sk, cb);
5147 if (err < 0)
5148 goto put_tgt_net;
5149
5150 err = 0;
5151 if (fillargs.ifindex) {
5152 dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
5153 if (!dev) {
5154 err = -ENODEV;
5155 goto put_tgt_net;
5156 }
5157 idev = __in6_dev_get(dev);
5158 if (idev) {
5159 err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5160 &fillargs);
5161 if (err > 0)
5162 err = 0;
5163 }
5164 goto put_tgt_net;
5165 }
5166 }
5167
5168 rcu_read_lock();
5169 cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
5170 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5171 idx = 0;
5172 head = &tgt_net->dev_index_head[h];
5173 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5174 if (idx < s_idx)
5175 goto cont;
5176 if (h > s_h || idx > s_idx)
5177 s_ip_idx = 0;
5178 idev = __in6_dev_get(dev);
5179 if (!idev)
5180 goto cont;
5181
5182 if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
5183 &fillargs) < 0)
5184 goto done;
5185 cont:
5186 idx++;
5187 }
5188 }
5189 done:
5190 rcu_read_unlock();
5191 cb->args[0] = h;
5192 cb->args[1] = idx;
5193 put_tgt_net:
5194 if (fillargs.netnsid >= 0)
5195 put_net(tgt_net);
5196
5197 return skb->len ? : err;
5198 }
5199
5200 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5201 {
5202 enum addr_type_t type = UNICAST_ADDR;
5203
5204 return inet6_dump_addr(skb, cb, type);
5205 }
5206
5207 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5208 {
5209 enum addr_type_t type = MULTICAST_ADDR;
5210
5211 return inet6_dump_addr(skb, cb, type);
5212 }
5213
5214
5215 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5216 {
5217 enum addr_type_t type = ANYCAST_ADDR;
5218
5219 return inet6_dump_addr(skb, cb, type);
5220 }
5221
5222 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5223 const struct nlmsghdr *nlh,
5224 struct nlattr **tb,
5225 struct netlink_ext_ack *extack)
5226 {
5227 struct ifaddrmsg *ifm;
5228 int i, err;
5229
5230 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5231 NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5232 return -EINVAL;
5233 }
5234
5235 ifm = nlmsg_data(nlh);
5236 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5237 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5238 return -EINVAL;
5239 }
5240
5241 if (!netlink_strict_get_check(skb))
5242 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5243 ifa_ipv6_policy, extack);
5244
5245 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5246 ifa_ipv6_policy, extack);
5247 if (err)
5248 return err;
5249
5250 for (i = 0; i <= IFA_MAX; i++) {
5251 if (!tb[i])
5252 continue;
5253
5254 switch (i) {
5255 case IFA_TARGET_NETNSID:
5256 case IFA_ADDRESS:
5257 case IFA_LOCAL:
5258 break;
5259 default:
5260 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5261 return -EINVAL;
5262 }
5263 }
5264
5265 return 0;
5266 }
5267
5268 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5269 struct netlink_ext_ack *extack)
5270 {
5271 struct net *net = sock_net(in_skb->sk);
5272 struct inet6_fill_args fillargs = {
5273 .portid = NETLINK_CB(in_skb).portid,
5274 .seq = nlh->nlmsg_seq,
5275 .event = RTM_NEWADDR,
5276 .flags = 0,
5277 .netnsid = -1,
5278 };
5279 struct net *tgt_net = net;
5280 struct ifaddrmsg *ifm;
5281 struct nlattr *tb[IFA_MAX+1];
5282 struct in6_addr *addr = NULL, *peer;
5283 struct net_device *dev = NULL;
5284 struct inet6_ifaddr *ifa;
5285 struct sk_buff *skb;
5286 int err;
5287
5288 err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5289 if (err < 0)
5290 return err;
5291
5292 if (tb[IFA_TARGET_NETNSID]) {
5293 fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5294
5295 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5296 fillargs.netnsid);
5297 if (IS_ERR(tgt_net))
5298 return PTR_ERR(tgt_net);
5299 }
5300
5301 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5302 if (!addr)
5303 return -EINVAL;
5304
5305 ifm = nlmsg_data(nlh);
5306 if (ifm->ifa_index)
5307 dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5308
5309 ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5310 if (!ifa) {
5311 err = -EADDRNOTAVAIL;
5312 goto errout;
5313 }
5314
5315 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5316 if (!skb) {
5317 err = -ENOBUFS;
5318 goto errout_ifa;
5319 }
5320
5321 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5322 if (err < 0) {
5323 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5324 WARN_ON(err == -EMSGSIZE);
5325 kfree_skb(skb);
5326 goto errout_ifa;
5327 }
5328 err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5329 errout_ifa:
5330 in6_ifa_put(ifa);
5331 errout:
5332 if (dev)
5333 dev_put(dev);
5334 if (fillargs.netnsid >= 0)
5335 put_net(tgt_net);
5336
5337 return err;
5338 }
5339
5340 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5341 {
5342 struct sk_buff *skb;
5343 struct net *net = dev_net(ifa->idev->dev);
5344 struct inet6_fill_args fillargs = {
5345 .portid = 0,
5346 .seq = 0,
5347 .event = event,
5348 .flags = 0,
5349 .netnsid = -1,
5350 };
5351 int err = -ENOBUFS;
5352
5353 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5354 if (!skb)
5355 goto errout;
5356
5357 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5358 if (err < 0) {
5359 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5360 WARN_ON(err == -EMSGSIZE);
5361 kfree_skb(skb);
5362 goto errout;
5363 }
5364 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5365 return;
5366 errout:
5367 if (err < 0)
5368 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5369 }
5370
5371 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5372 __s32 *array, int bytes)
5373 {
5374 BUG_ON(bytes < (DEVCONF_MAX * 4));
5375
5376 memset(array, 0, bytes);
5377 array[DEVCONF_FORWARDING] = cnf->forwarding;
5378 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5379 array[DEVCONF_MTU6] = cnf->mtu6;
5380 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5381 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5382 array[DEVCONF_AUTOCONF] = cnf->autoconf;
5383 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5384 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5385 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5386 jiffies_to_msecs(cnf->rtr_solicit_interval);
5387 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5388 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5389 array[DEVCONF_RTR_SOLICIT_DELAY] =
5390 jiffies_to_msecs(cnf->rtr_solicit_delay);
5391 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5392 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5393 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5394 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5395 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5396 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5397 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5398 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5399 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5400 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5401 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5402 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5403 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5404 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5405 #ifdef CONFIG_IPV6_ROUTER_PREF
5406 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5407 array[DEVCONF_RTR_PROBE_INTERVAL] =
5408 jiffies_to_msecs(cnf->rtr_probe_interval);
5409 #ifdef CONFIG_IPV6_ROUTE_INFO
5410 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5411 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5412 #endif
5413 #endif
5414 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5415 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5416 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5417 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5418 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5419 #endif
5420 #ifdef CONFIG_IPV6_MROUTE
5421 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
5422 #endif
5423 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5424 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5425 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5426 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5427 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5428 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5429 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5430 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5431 /* we omit DEVCONF_STABLE_SECRET for now */
5432 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5433 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5434 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5435 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5436 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5437 #ifdef CONFIG_IPV6_SEG6_HMAC
5438 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5439 #endif
5440 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5441 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5442 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5443 array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5444 }
5445
5446 static inline size_t inet6_ifla6_size(void)
5447 {
5448 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5449 + nla_total_size(sizeof(struct ifla_cacheinfo))
5450 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5451 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5452 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5453 + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5454 + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5455 + 0;
5456 }
5457
5458 static inline size_t inet6_if_nlmsg_size(void)
5459 {
5460 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5461 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5462 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5463 + nla_total_size(4) /* IFLA_MTU */
5464 + nla_total_size(4) /* IFLA_LINK */
5465 + nla_total_size(1) /* IFLA_OPERSTATE */
5466 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5467 }
5468
5469 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5470 int bytes)
5471 {
5472 int i;
5473 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5474 BUG_ON(pad < 0);
5475
5476 /* Use put_unaligned() because stats may not be aligned for u64. */
5477 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5478 for (i = 1; i < ICMP6_MIB_MAX; i++)
5479 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5480
5481 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5482 }
5483
5484 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5485 int bytes, size_t syncpoff)
5486 {
5487 int i, c;
5488 u64 buff[IPSTATS_MIB_MAX];
5489 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5490
5491 BUG_ON(pad < 0);
5492
5493 memset(buff, 0, sizeof(buff));
5494 buff[0] = IPSTATS_MIB_MAX;
5495
5496 for_each_possible_cpu(c) {
5497 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5498 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5499 }
5500
5501 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5502 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5503 }
5504
5505 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5506 int bytes)
5507 {
5508 switch (attrtype) {
5509 case IFLA_INET6_STATS:
5510 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5511 offsetof(struct ipstats_mib, syncp));
5512 break;
5513 case IFLA_INET6_ICMP6STATS:
5514 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5515 break;
5516 }
5517 }
5518
5519 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5520 u32 ext_filter_mask)
5521 {
5522 struct nlattr *nla;
5523 struct ifla_cacheinfo ci;
5524
5525 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5526 goto nla_put_failure;
5527 ci.max_reasm_len = IPV6_MAXPLEN;
5528 ci.tstamp = cstamp_delta(idev->tstamp);
5529 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5530 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5531 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5532 goto nla_put_failure;
5533 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5534 if (!nla)
5535 goto nla_put_failure;
5536 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5537
5538 /* XXX - MC not implemented */
5539
5540 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5541 return 0;
5542
5543 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5544 if (!nla)
5545 goto nla_put_failure;
5546 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5547
5548 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5549 if (!nla)
5550 goto nla_put_failure;
5551 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5552
5553 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5554 if (!nla)
5555 goto nla_put_failure;
5556
5557 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5558 goto nla_put_failure;
5559
5560 read_lock_bh(&idev->lock);
5561 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5562 read_unlock_bh(&idev->lock);
5563
5564 return 0;
5565
5566 nla_put_failure:
5567 return -EMSGSIZE;
5568 }
5569
5570 static size_t inet6_get_link_af_size(const struct net_device *dev,
5571 u32 ext_filter_mask)
5572 {
5573 if (!__in6_dev_get(dev))
5574 return 0;
5575
5576 return inet6_ifla6_size();
5577 }
5578
5579 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5580 u32 ext_filter_mask)
5581 {
5582 struct inet6_dev *idev = __in6_dev_get(dev);
5583
5584 if (!idev)
5585 return -ENODATA;
5586
5587 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5588 return -EMSGSIZE;
5589
5590 return 0;
5591 }
5592
5593 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5594 {
5595 struct inet6_ifaddr *ifp;
5596 struct net_device *dev = idev->dev;
5597 bool clear_token, update_rs = false;
5598 struct in6_addr ll_addr;
5599
5600 ASSERT_RTNL();
5601
5602 if (!token)
5603 return -EINVAL;
5604 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5605 return -EINVAL;
5606 if (!ipv6_accept_ra(idev))
5607 return -EINVAL;
5608 if (idev->cnf.rtr_solicits == 0)
5609 return -EINVAL;
5610
5611 write_lock_bh(&idev->lock);
5612
5613 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5614 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5615
5616 write_unlock_bh(&idev->lock);
5617
5618 clear_token = ipv6_addr_any(token);
5619 if (clear_token)
5620 goto update_lft;
5621
5622 if (!idev->dead && (idev->if_flags & IF_READY) &&
5623 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5624 IFA_F_OPTIMISTIC)) {
5625 /* If we're not ready, then normal ifup will take care
5626 * of this. Otherwise, we need to request our rs here.
5627 */
5628 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5629 update_rs = true;
5630 }
5631
5632 update_lft:
5633 write_lock_bh(&idev->lock);
5634
5635 if (update_rs) {
5636 idev->if_flags |= IF_RS_SENT;
5637 idev->rs_interval = rfc3315_s14_backoff_init(
5638 idev->cnf.rtr_solicit_interval);
5639 idev->rs_probes = 1;
5640 addrconf_mod_rs_timer(idev, idev->rs_interval);
5641 }
5642
5643 /* Well, that's kinda nasty ... */
5644 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5645 spin_lock(&ifp->lock);
5646 if (ifp->tokenized) {
5647 ifp->valid_lft = 0;
5648 ifp->prefered_lft = 0;
5649 }
5650 spin_unlock(&ifp->lock);
5651 }
5652
5653 write_unlock_bh(&idev->lock);
5654 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5655 addrconf_verify_rtnl();
5656 return 0;
5657 }
5658
5659 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5660 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5661 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5662 };
5663
5664 static int inet6_validate_link_af(const struct net_device *dev,
5665 const struct nlattr *nla)
5666 {
5667 struct nlattr *tb[IFLA_INET6_MAX + 1];
5668
5669 if (dev && !__in6_dev_get(dev))
5670 return -EAFNOSUPPORT;
5671
5672 return nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
5673 inet6_af_policy, NULL);
5674 }
5675
5676 static int check_addr_gen_mode(int mode)
5677 {
5678 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5679 mode != IN6_ADDR_GEN_MODE_NONE &&
5680 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5681 mode != IN6_ADDR_GEN_MODE_RANDOM)
5682 return -EINVAL;
5683 return 1;
5684 }
5685
5686 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5687 int mode)
5688 {
5689 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5690 !idev->cnf.stable_secret.initialized &&
5691 !net->ipv6.devconf_dflt->stable_secret.initialized)
5692 return -EINVAL;
5693 return 1;
5694 }
5695
5696 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5697 {
5698 int err = -EINVAL;
5699 struct inet6_dev *idev = __in6_dev_get(dev);
5700 struct nlattr *tb[IFLA_INET6_MAX + 1];
5701
5702 if (!idev)
5703 return -EAFNOSUPPORT;
5704
5705 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5706 BUG();
5707
5708 if (tb[IFLA_INET6_TOKEN]) {
5709 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5710 if (err)
5711 return err;
5712 }
5713
5714 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5715 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5716
5717 if (check_addr_gen_mode(mode) < 0 ||
5718 check_stable_privacy(idev, dev_net(dev), mode) < 0)
5719 return -EINVAL;
5720
5721 idev->cnf.addr_gen_mode = mode;
5722 err = 0;
5723 }
5724
5725 return err;
5726 }
5727
5728 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5729 u32 portid, u32 seq, int event, unsigned int flags)
5730 {
5731 struct net_device *dev = idev->dev;
5732 struct ifinfomsg *hdr;
5733 struct nlmsghdr *nlh;
5734 void *protoinfo;
5735
5736 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5737 if (!nlh)
5738 return -EMSGSIZE;
5739
5740 hdr = nlmsg_data(nlh);
5741 hdr->ifi_family = AF_INET6;
5742 hdr->__ifi_pad = 0;
5743 hdr->ifi_type = dev->type;
5744 hdr->ifi_index = dev->ifindex;
5745 hdr->ifi_flags = dev_get_flags(dev);
5746 hdr->ifi_change = 0;
5747
5748 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5749 (dev->addr_len &&
5750 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5751 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5752 (dev->ifindex != dev_get_iflink(dev) &&
5753 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5754 nla_put_u8(skb, IFLA_OPERSTATE,
5755 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5756 goto nla_put_failure;
5757 protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
5758 if (!protoinfo)
5759 goto nla_put_failure;
5760
5761 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5762 goto nla_put_failure;
5763
5764 nla_nest_end(skb, protoinfo);
5765 nlmsg_end(skb, nlh);
5766 return 0;
5767
5768 nla_put_failure:
5769 nlmsg_cancel(skb, nlh);
5770 return -EMSGSIZE;
5771 }
5772
5773 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
5774 struct netlink_ext_ack *extack)
5775 {
5776 struct ifinfomsg *ifm;
5777
5778 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5779 NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
5780 return -EINVAL;
5781 }
5782
5783 if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
5784 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
5785 return -EINVAL;
5786 }
5787
5788 ifm = nlmsg_data(nlh);
5789 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5790 ifm->ifi_change || ifm->ifi_index) {
5791 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
5792 return -EINVAL;
5793 }
5794
5795 return 0;
5796 }
5797
5798 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5799 {
5800 struct net *net = sock_net(skb->sk);
5801 int h, s_h;
5802 int idx = 0, s_idx;
5803 struct net_device *dev;
5804 struct inet6_dev *idev;
5805 struct hlist_head *head;
5806
5807 /* only requests using strict checking can pass data to
5808 * influence the dump
5809 */
5810 if (cb->strict_check) {
5811 int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
5812
5813 if (err < 0)
5814 return err;
5815 }
5816
5817 s_h = cb->args[0];
5818 s_idx = cb->args[1];
5819
5820 rcu_read_lock();
5821 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5822 idx = 0;
5823 head = &net->dev_index_head[h];
5824 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5825 if (idx < s_idx)
5826 goto cont;
5827 idev = __in6_dev_get(dev);
5828 if (!idev)
5829 goto cont;
5830 if (inet6_fill_ifinfo(skb, idev,
5831 NETLINK_CB(cb->skb).portid,
5832 cb->nlh->nlmsg_seq,
5833 RTM_NEWLINK, NLM_F_MULTI) < 0)
5834 goto out;
5835 cont:
5836 idx++;
5837 }
5838 }
5839 out:
5840 rcu_read_unlock();
5841 cb->args[1] = idx;
5842 cb->args[0] = h;
5843
5844 return skb->len;
5845 }
5846
5847 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5848 {
5849 struct sk_buff *skb;
5850 struct net *net = dev_net(idev->dev);
5851 int err = -ENOBUFS;
5852
5853 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5854 if (!skb)
5855 goto errout;
5856
5857 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5858 if (err < 0) {
5859 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5860 WARN_ON(err == -EMSGSIZE);
5861 kfree_skb(skb);
5862 goto errout;
5863 }
5864 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5865 return;
5866 errout:
5867 if (err < 0)
5868 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5869 }
5870
5871 static inline size_t inet6_prefix_nlmsg_size(void)
5872 {
5873 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5874 + nla_total_size(sizeof(struct in6_addr))
5875 + nla_total_size(sizeof(struct prefix_cacheinfo));
5876 }
5877
5878 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5879 struct prefix_info *pinfo, u32 portid, u32 seq,
5880 int event, unsigned int flags)
5881 {
5882 struct prefixmsg *pmsg;
5883 struct nlmsghdr *nlh;
5884 struct prefix_cacheinfo ci;
5885
5886 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5887 if (!nlh)
5888 return -EMSGSIZE;
5889
5890 pmsg = nlmsg_data(nlh);
5891 pmsg->prefix_family = AF_INET6;
5892 pmsg->prefix_pad1 = 0;
5893 pmsg->prefix_pad2 = 0;
5894 pmsg->prefix_ifindex = idev->dev->ifindex;
5895 pmsg->prefix_len = pinfo->prefix_len;
5896 pmsg->prefix_type = pinfo->type;
5897 pmsg->prefix_pad3 = 0;
5898 pmsg->prefix_flags = 0;
5899 if (pinfo->onlink)
5900 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5901 if (pinfo->autoconf)
5902 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5903
5904 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5905 goto nla_put_failure;
5906 ci.preferred_time = ntohl(pinfo->prefered);
5907 ci.valid_time = ntohl(pinfo->valid);
5908 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5909 goto nla_put_failure;
5910 nlmsg_end(skb, nlh);
5911 return 0;
5912
5913 nla_put_failure:
5914 nlmsg_cancel(skb, nlh);
5915 return -EMSGSIZE;
5916 }
5917
5918 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5919 struct prefix_info *pinfo)
5920 {
5921 struct sk_buff *skb;
5922 struct net *net = dev_net(idev->dev);
5923 int err = -ENOBUFS;
5924
5925 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5926 if (!skb)
5927 goto errout;
5928
5929 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5930 if (err < 0) {
5931 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5932 WARN_ON(err == -EMSGSIZE);
5933 kfree_skb(skb);
5934 goto errout;
5935 }
5936 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5937 return;
5938 errout:
5939 if (err < 0)
5940 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5941 }
5942
5943 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5944 {
5945 struct net *net = dev_net(ifp->idev->dev);
5946
5947 if (event)
5948 ASSERT_RTNL();
5949
5950 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5951
5952 switch (event) {
5953 case RTM_NEWADDR:
5954 /*
5955 * If the address was optimistic
5956 * we inserted the route at the start of
5957 * our DAD process, so we don't need
5958 * to do it again
5959 */
5960 if (!rcu_access_pointer(ifp->rt->fib6_node))
5961 ip6_ins_rt(net, ifp->rt);
5962 if (ifp->idev->cnf.forwarding)
5963 addrconf_join_anycast(ifp);
5964 if (!ipv6_addr_any(&ifp->peer_addr))
5965 addrconf_prefix_route(&ifp->peer_addr, 128, 0,
5966 ifp->idev->dev, 0, 0,
5967 GFP_ATOMIC);
5968 break;
5969 case RTM_DELADDR:
5970 if (ifp->idev->cnf.forwarding)
5971 addrconf_leave_anycast(ifp);
5972 addrconf_leave_solict(ifp->idev, &ifp->addr);
5973 if (!ipv6_addr_any(&ifp->peer_addr)) {
5974 struct fib6_info *rt;
5975
5976 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5977 ifp->idev->dev, 0, 0,
5978 false);
5979 if (rt)
5980 ip6_del_rt(net, rt);
5981 }
5982 if (ifp->rt) {
5983 ip6_del_rt(net, ifp->rt);
5984 ifp->rt = NULL;
5985 }
5986 rt_genid_bump_ipv6(net);
5987 break;
5988 }
5989 atomic_inc(&net->ipv6.dev_addr_genid);
5990 }
5991
5992 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5993 {
5994 rcu_read_lock_bh();
5995 if (likely(ifp->idev->dead == 0))
5996 __ipv6_ifa_notify(event, ifp);
5997 rcu_read_unlock_bh();
5998 }
5999
6000 #ifdef CONFIG_SYSCTL
6001
6002 static
6003 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
6004 void __user *buffer, size_t *lenp, loff_t *ppos)
6005 {
6006 int *valp = ctl->data;
6007 int val = *valp;
6008 loff_t pos = *ppos;
6009 struct ctl_table lctl;
6010 int ret;
6011
6012 /*
6013 * ctl->data points to idev->cnf.forwarding, we should
6014 * not modify it until we get the rtnl lock.
6015 */
6016 lctl = *ctl;
6017 lctl.data = &val;
6018
6019 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6020
6021 if (write)
6022 ret = addrconf_fixup_forwarding(ctl, valp, val);
6023 if (ret)
6024 *ppos = pos;
6025 return ret;
6026 }
6027
6028 static
6029 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
6030 void __user *buffer, size_t *lenp, loff_t *ppos)
6031 {
6032 struct inet6_dev *idev = ctl->extra1;
6033 int min_mtu = IPV6_MIN_MTU;
6034 struct ctl_table lctl;
6035
6036 lctl = *ctl;
6037 lctl.extra1 = &min_mtu;
6038 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6039
6040 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6041 }
6042
6043 static void dev_disable_change(struct inet6_dev *idev)
6044 {
6045 struct netdev_notifier_info info;
6046
6047 if (!idev || !idev->dev)
6048 return;
6049
6050 netdev_notifier_info_init(&info, idev->dev);
6051 if (idev->cnf.disable_ipv6)
6052 addrconf_notify(NULL, NETDEV_DOWN, &info);
6053 else
6054 addrconf_notify(NULL, NETDEV_UP, &info);
6055 }
6056
6057 static void addrconf_disable_change(struct net *net, __s32 newf)
6058 {
6059 struct net_device *dev;
6060 struct inet6_dev *idev;
6061
6062 for_each_netdev(net, dev) {
6063 idev = __in6_dev_get(dev);
6064 if (idev) {
6065 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6066 idev->cnf.disable_ipv6 = newf;
6067 if (changed)
6068 dev_disable_change(idev);
6069 }
6070 }
6071 }
6072
6073 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
6074 {
6075 struct net *net;
6076 int old;
6077
6078 if (!rtnl_trylock())
6079 return restart_syscall();
6080
6081 net = (struct net *)table->extra2;
6082 old = *p;
6083 *p = newf;
6084
6085 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6086 rtnl_unlock();
6087 return 0;
6088 }
6089
6090 if (p == &net->ipv6.devconf_all->disable_ipv6) {
6091 net->ipv6.devconf_dflt->disable_ipv6 = newf;
6092 addrconf_disable_change(net, newf);
6093 } else if ((!newf) ^ (!old))
6094 dev_disable_change((struct inet6_dev *)table->extra1);
6095
6096 rtnl_unlock();
6097 return 0;
6098 }
6099
6100 static
6101 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
6102 void __user *buffer, size_t *lenp, loff_t *ppos)
6103 {
6104 int *valp = ctl->data;
6105 int val = *valp;
6106 loff_t pos = *ppos;
6107 struct ctl_table lctl;
6108 int ret;
6109
6110 /*
6111 * ctl->data points to idev->cnf.disable_ipv6, we should
6112 * not modify it until we get the rtnl lock.
6113 */
6114 lctl = *ctl;
6115 lctl.data = &val;
6116
6117 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6118
6119 if (write)
6120 ret = addrconf_disable_ipv6(ctl, valp, val);
6121 if (ret)
6122 *ppos = pos;
6123 return ret;
6124 }
6125
6126 static
6127 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
6128 void __user *buffer, size_t *lenp, loff_t *ppos)
6129 {
6130 int *valp = ctl->data;
6131 int ret;
6132 int old, new;
6133
6134 old = *valp;
6135 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6136 new = *valp;
6137
6138 if (write && old != new) {
6139 struct net *net = ctl->extra2;
6140
6141 if (!rtnl_trylock())
6142 return restart_syscall();
6143
6144 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6145 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6146 NETCONFA_PROXY_NEIGH,
6147 NETCONFA_IFINDEX_DEFAULT,
6148 net->ipv6.devconf_dflt);
6149 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6150 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6151 NETCONFA_PROXY_NEIGH,
6152 NETCONFA_IFINDEX_ALL,
6153 net->ipv6.devconf_all);
6154 else {
6155 struct inet6_dev *idev = ctl->extra1;
6156
6157 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6158 NETCONFA_PROXY_NEIGH,
6159 idev->dev->ifindex,
6160 &idev->cnf);
6161 }
6162 rtnl_unlock();
6163 }
6164
6165 return ret;
6166 }
6167
6168 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
6169 void __user *buffer, size_t *lenp,
6170 loff_t *ppos)
6171 {
6172 int ret = 0;
6173 u32 new_val;
6174 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6175 struct net *net = (struct net *)ctl->extra2;
6176 struct ctl_table tmp = {
6177 .data = &new_val,
6178 .maxlen = sizeof(new_val),
6179 .mode = ctl->mode,
6180 };
6181
6182 if (!rtnl_trylock())
6183 return restart_syscall();
6184
6185 new_val = *((u32 *)ctl->data);
6186
6187 ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6188 if (ret != 0)
6189 goto out;
6190
6191 if (write) {
6192 if (check_addr_gen_mode(new_val) < 0) {
6193 ret = -EINVAL;
6194 goto out;
6195 }
6196
6197 if (idev) {
6198 if (check_stable_privacy(idev, net, new_val) < 0) {
6199 ret = -EINVAL;
6200 goto out;
6201 }
6202
6203 if (idev->cnf.addr_gen_mode != new_val) {
6204 idev->cnf.addr_gen_mode = new_val;
6205 addrconf_dev_config(idev->dev);
6206 }
6207 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6208 struct net_device *dev;
6209
6210 net->ipv6.devconf_dflt->addr_gen_mode = new_val;
6211 for_each_netdev(net, dev) {
6212 idev = __in6_dev_get(dev);
6213 if (idev &&
6214 idev->cnf.addr_gen_mode != new_val) {
6215 idev->cnf.addr_gen_mode = new_val;
6216 addrconf_dev_config(idev->dev);
6217 }
6218 }
6219 }
6220
6221 *((u32 *)ctl->data) = new_val;
6222 }
6223
6224 out:
6225 rtnl_unlock();
6226
6227 return ret;
6228 }
6229
6230 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6231 void __user *buffer, size_t *lenp,
6232 loff_t *ppos)
6233 {
6234 int err;
6235 struct in6_addr addr;
6236 char str[IPV6_MAX_STRLEN];
6237 struct ctl_table lctl = *ctl;
6238 struct net *net = ctl->extra2;
6239 struct ipv6_stable_secret *secret = ctl->data;
6240
6241 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6242 return -EIO;
6243
6244 lctl.maxlen = IPV6_MAX_STRLEN;
6245 lctl.data = str;
6246
6247 if (!rtnl_trylock())
6248 return restart_syscall();
6249
6250 if (!write && !secret->initialized) {
6251 err = -EIO;
6252 goto out;
6253 }
6254
6255 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6256 if (err >= sizeof(str)) {
6257 err = -EIO;
6258 goto out;
6259 }
6260
6261 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6262 if (err || !write)
6263 goto out;
6264
6265 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6266 err = -EIO;
6267 goto out;
6268 }
6269
6270 secret->initialized = true;
6271 secret->secret = addr;
6272
6273 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6274 struct net_device *dev;
6275
6276 for_each_netdev(net, dev) {
6277 struct inet6_dev *idev = __in6_dev_get(dev);
6278
6279 if (idev) {
6280 idev->cnf.addr_gen_mode =
6281 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6282 }
6283 }
6284 } else {
6285 struct inet6_dev *idev = ctl->extra1;
6286
6287 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6288 }
6289
6290 out:
6291 rtnl_unlock();
6292
6293 return err;
6294 }
6295
6296 static
6297 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6298 int write,
6299 void __user *buffer,
6300 size_t *lenp,
6301 loff_t *ppos)
6302 {
6303 int *valp = ctl->data;
6304 int val = *valp;
6305 loff_t pos = *ppos;
6306 struct ctl_table lctl;
6307 int ret;
6308
6309 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6310 * we should not modify it until we get the rtnl lock.
6311 */
6312 lctl = *ctl;
6313 lctl.data = &val;
6314
6315 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6316
6317 if (write)
6318 ret = addrconf_fixup_linkdown(ctl, valp, val);
6319 if (ret)
6320 *ppos = pos;
6321 return ret;
6322 }
6323
6324 static
6325 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6326 {
6327 if (rt) {
6328 if (action)
6329 rt->dst.flags |= DST_NOPOLICY;
6330 else
6331 rt->dst.flags &= ~DST_NOPOLICY;
6332 }
6333 }
6334
6335 static
6336 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6337 {
6338 struct inet6_ifaddr *ifa;
6339
6340 read_lock_bh(&idev->lock);
6341 list_for_each_entry(ifa, &idev->addr_list, if_list) {
6342 spin_lock(&ifa->lock);
6343 if (ifa->rt) {
6344 struct fib6_info *rt = ifa->rt;
6345 int cpu;
6346
6347 rcu_read_lock();
6348 ifa->rt->dst_nopolicy = val ? true : false;
6349 if (rt->rt6i_pcpu) {
6350 for_each_possible_cpu(cpu) {
6351 struct rt6_info **rtp;
6352
6353 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
6354 addrconf_set_nopolicy(*rtp, val);
6355 }
6356 }
6357 rcu_read_unlock();
6358 }
6359 spin_unlock(&ifa->lock);
6360 }
6361 read_unlock_bh(&idev->lock);
6362 }
6363
6364 static
6365 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6366 {
6367 struct inet6_dev *idev;
6368 struct net *net;
6369
6370 if (!rtnl_trylock())
6371 return restart_syscall();
6372
6373 *valp = val;
6374
6375 net = (struct net *)ctl->extra2;
6376 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6377 rtnl_unlock();
6378 return 0;
6379 }
6380
6381 if (valp == &net->ipv6.devconf_all->disable_policy) {
6382 struct net_device *dev;
6383
6384 for_each_netdev(net, dev) {
6385 idev = __in6_dev_get(dev);
6386 if (idev)
6387 addrconf_disable_policy_idev(idev, val);
6388 }
6389 } else {
6390 idev = (struct inet6_dev *)ctl->extra1;
6391 addrconf_disable_policy_idev(idev, val);
6392 }
6393
6394 rtnl_unlock();
6395 return 0;
6396 }
6397
6398 static
6399 int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6400 void __user *buffer, size_t *lenp,
6401 loff_t *ppos)
6402 {
6403 int *valp = ctl->data;
6404 int val = *valp;
6405 loff_t pos = *ppos;
6406 struct ctl_table lctl;
6407 int ret;
6408
6409 lctl = *ctl;
6410 lctl.data = &val;
6411 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6412
6413 if (write && (*valp != val))
6414 ret = addrconf_disable_policy(ctl, valp, val);
6415
6416 if (ret)
6417 *ppos = pos;
6418
6419 return ret;
6420 }
6421
6422 static int minus_one = -1;
6423 static const int zero = 0;
6424 static const int one = 1;
6425 static const int two_five_five = 255;
6426
6427 static const struct ctl_table addrconf_sysctl[] = {
6428 {
6429 .procname = "forwarding",
6430 .data = &ipv6_devconf.forwarding,
6431 .maxlen = sizeof(int),
6432 .mode = 0644,
6433 .proc_handler = addrconf_sysctl_forward,
6434 },
6435 {
6436 .procname = "hop_limit",
6437 .data = &ipv6_devconf.hop_limit,
6438 .maxlen = sizeof(int),
6439 .mode = 0644,
6440 .proc_handler = proc_dointvec_minmax,
6441 .extra1 = (void *)&one,
6442 .extra2 = (void *)&two_five_five,
6443 },
6444 {
6445 .procname = "mtu",
6446 .data = &ipv6_devconf.mtu6,
6447 .maxlen = sizeof(int),
6448 .mode = 0644,
6449 .proc_handler = addrconf_sysctl_mtu,
6450 },
6451 {
6452 .procname = "accept_ra",
6453 .data = &ipv6_devconf.accept_ra,
6454 .maxlen = sizeof(int),
6455 .mode = 0644,
6456 .proc_handler = proc_dointvec,
6457 },
6458 {
6459 .procname = "accept_redirects",
6460 .data = &ipv6_devconf.accept_redirects,
6461 .maxlen = sizeof(int),
6462 .mode = 0644,
6463 .proc_handler = proc_dointvec,
6464 },
6465 {
6466 .procname = "autoconf",
6467 .data = &ipv6_devconf.autoconf,
6468 .maxlen = sizeof(int),
6469 .mode = 0644,
6470 .proc_handler = proc_dointvec,
6471 },
6472 {
6473 .procname = "dad_transmits",
6474 .data = &ipv6_devconf.dad_transmits,
6475 .maxlen = sizeof(int),
6476 .mode = 0644,
6477 .proc_handler = proc_dointvec,
6478 },
6479 {
6480 .procname = "router_solicitations",
6481 .data = &ipv6_devconf.rtr_solicits,
6482 .maxlen = sizeof(int),
6483 .mode = 0644,
6484 .proc_handler = proc_dointvec_minmax,
6485 .extra1 = &minus_one,
6486 },
6487 {
6488 .procname = "router_solicitation_interval",
6489 .data = &ipv6_devconf.rtr_solicit_interval,
6490 .maxlen = sizeof(int),
6491 .mode = 0644,
6492 .proc_handler = proc_dointvec_jiffies,
6493 },
6494 {
6495 .procname = "router_solicitation_max_interval",
6496 .data = &ipv6_devconf.rtr_solicit_max_interval,
6497 .maxlen = sizeof(int),
6498 .mode = 0644,
6499 .proc_handler = proc_dointvec_jiffies,
6500 },
6501 {
6502 .procname = "router_solicitation_delay",
6503 .data = &ipv6_devconf.rtr_solicit_delay,
6504 .maxlen = sizeof(int),
6505 .mode = 0644,
6506 .proc_handler = proc_dointvec_jiffies,
6507 },
6508 {
6509 .procname = "force_mld_version",
6510 .data = &ipv6_devconf.force_mld_version,
6511 .maxlen = sizeof(int),
6512 .mode = 0644,
6513 .proc_handler = proc_dointvec,
6514 },
6515 {
6516 .procname = "mldv1_unsolicited_report_interval",
6517 .data =
6518 &ipv6_devconf.mldv1_unsolicited_report_interval,
6519 .maxlen = sizeof(int),
6520 .mode = 0644,
6521 .proc_handler = proc_dointvec_ms_jiffies,
6522 },
6523 {
6524 .procname = "mldv2_unsolicited_report_interval",
6525 .data =
6526 &ipv6_devconf.mldv2_unsolicited_report_interval,
6527 .maxlen = sizeof(int),
6528 .mode = 0644,
6529 .proc_handler = proc_dointvec_ms_jiffies,
6530 },
6531 {
6532 .procname = "use_tempaddr",
6533 .data = &ipv6_devconf.use_tempaddr,
6534 .maxlen = sizeof(int),
6535 .mode = 0644,
6536 .proc_handler = proc_dointvec,
6537 },
6538 {
6539 .procname = "temp_valid_lft",
6540 .data = &ipv6_devconf.temp_valid_lft,
6541 .maxlen = sizeof(int),
6542 .mode = 0644,
6543 .proc_handler = proc_dointvec,
6544 },
6545 {
6546 .procname = "temp_prefered_lft",
6547 .data = &ipv6_devconf.temp_prefered_lft,
6548 .maxlen = sizeof(int),
6549 .mode = 0644,
6550 .proc_handler = proc_dointvec,
6551 },
6552 {
6553 .procname = "regen_max_retry",
6554 .data = &ipv6_devconf.regen_max_retry,
6555 .maxlen = sizeof(int),
6556 .mode = 0644,
6557 .proc_handler = proc_dointvec,
6558 },
6559 {
6560 .procname = "max_desync_factor",
6561 .data = &ipv6_devconf.max_desync_factor,
6562 .maxlen = sizeof(int),
6563 .mode = 0644,
6564 .proc_handler = proc_dointvec,
6565 },
6566 {
6567 .procname = "max_addresses",
6568 .data = &ipv6_devconf.max_addresses,
6569 .maxlen = sizeof(int),
6570 .mode = 0644,
6571 .proc_handler = proc_dointvec,
6572 },
6573 {
6574 .procname = "accept_ra_defrtr",
6575 .data = &ipv6_devconf.accept_ra_defrtr,
6576 .maxlen = sizeof(int),
6577 .mode = 0644,
6578 .proc_handler = proc_dointvec,
6579 },
6580 {
6581 .procname = "accept_ra_min_hop_limit",
6582 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6583 .maxlen = sizeof(int),
6584 .mode = 0644,
6585 .proc_handler = proc_dointvec,
6586 },
6587 {
6588 .procname = "accept_ra_pinfo",
6589 .data = &ipv6_devconf.accept_ra_pinfo,
6590 .maxlen = sizeof(int),
6591 .mode = 0644,
6592 .proc_handler = proc_dointvec,
6593 },
6594 #ifdef CONFIG_IPV6_ROUTER_PREF
6595 {
6596 .procname = "accept_ra_rtr_pref",
6597 .data = &ipv6_devconf.accept_ra_rtr_pref,
6598 .maxlen = sizeof(int),
6599 .mode = 0644,
6600 .proc_handler = proc_dointvec,
6601 },
6602 {
6603 .procname = "router_probe_interval",
6604 .data = &ipv6_devconf.rtr_probe_interval,
6605 .maxlen = sizeof(int),
6606 .mode = 0644,
6607 .proc_handler = proc_dointvec_jiffies,
6608 },
6609 #ifdef CONFIG_IPV6_ROUTE_INFO
6610 {
6611 .procname = "accept_ra_rt_info_min_plen",
6612 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6613 .maxlen = sizeof(int),
6614 .mode = 0644,
6615 .proc_handler = proc_dointvec,
6616 },
6617 {
6618 .procname = "accept_ra_rt_info_max_plen",
6619 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6620 .maxlen = sizeof(int),
6621 .mode = 0644,
6622 .proc_handler = proc_dointvec,
6623 },
6624 #endif
6625 #endif
6626 {
6627 .procname = "proxy_ndp",
6628 .data = &ipv6_devconf.proxy_ndp,
6629 .maxlen = sizeof(int),
6630 .mode = 0644,
6631 .proc_handler = addrconf_sysctl_proxy_ndp,
6632 },
6633 {
6634 .procname = "accept_source_route",
6635 .data = &ipv6_devconf.accept_source_route,
6636 .maxlen = sizeof(int),
6637 .mode = 0644,
6638 .proc_handler = proc_dointvec,
6639 },
6640 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6641 {
6642 .procname = "optimistic_dad",
6643 .data = &ipv6_devconf.optimistic_dad,
6644 .maxlen = sizeof(int),
6645 .mode = 0644,
6646 .proc_handler = proc_dointvec,
6647 },
6648 {
6649 .procname = "use_optimistic",
6650 .data = &ipv6_devconf.use_optimistic,
6651 .maxlen = sizeof(int),
6652 .mode = 0644,
6653 .proc_handler = proc_dointvec,
6654 },
6655 #endif
6656 #ifdef CONFIG_IPV6_MROUTE
6657 {
6658 .procname = "mc_forwarding",
6659 .data = &ipv6_devconf.mc_forwarding,
6660 .maxlen = sizeof(int),
6661 .mode = 0444,
6662 .proc_handler = proc_dointvec,
6663 },
6664 #endif
6665 {
6666 .procname = "disable_ipv6",
6667 .data = &ipv6_devconf.disable_ipv6,
6668 .maxlen = sizeof(int),
6669 .mode = 0644,
6670 .proc_handler = addrconf_sysctl_disable,
6671 },
6672 {
6673 .procname = "accept_dad",
6674 .data = &ipv6_devconf.accept_dad,
6675 .maxlen = sizeof(int),
6676 .mode = 0644,
6677 .proc_handler = proc_dointvec,
6678 },
6679 {
6680 .procname = "force_tllao",
6681 .data = &ipv6_devconf.force_tllao,
6682 .maxlen = sizeof(int),
6683 .mode = 0644,
6684 .proc_handler = proc_dointvec
6685 },
6686 {
6687 .procname = "ndisc_notify",
6688 .data = &ipv6_devconf.ndisc_notify,
6689 .maxlen = sizeof(int),
6690 .mode = 0644,
6691 .proc_handler = proc_dointvec
6692 },
6693 {
6694 .procname = "suppress_frag_ndisc",
6695 .data = &ipv6_devconf.suppress_frag_ndisc,
6696 .maxlen = sizeof(int),
6697 .mode = 0644,
6698 .proc_handler = proc_dointvec
6699 },
6700 {
6701 .procname = "accept_ra_from_local",
6702 .data = &ipv6_devconf.accept_ra_from_local,
6703 .maxlen = sizeof(int),
6704 .mode = 0644,
6705 .proc_handler = proc_dointvec,
6706 },
6707 {
6708 .procname = "accept_ra_mtu",
6709 .data = &ipv6_devconf.accept_ra_mtu,
6710 .maxlen = sizeof(int),
6711 .mode = 0644,
6712 .proc_handler = proc_dointvec,
6713 },
6714 {
6715 .procname = "stable_secret",
6716 .data = &ipv6_devconf.stable_secret,
6717 .maxlen = IPV6_MAX_STRLEN,
6718 .mode = 0600,
6719 .proc_handler = addrconf_sysctl_stable_secret,
6720 },
6721 {
6722 .procname = "use_oif_addrs_only",
6723 .data = &ipv6_devconf.use_oif_addrs_only,
6724 .maxlen = sizeof(int),
6725 .mode = 0644,
6726 .proc_handler = proc_dointvec,
6727 },
6728 {
6729 .procname = "ignore_routes_with_linkdown",
6730 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6731 .maxlen = sizeof(int),
6732 .mode = 0644,
6733 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6734 },
6735 {
6736 .procname = "drop_unicast_in_l2_multicast",
6737 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6738 .maxlen = sizeof(int),
6739 .mode = 0644,
6740 .proc_handler = proc_dointvec,
6741 },
6742 {
6743 .procname = "drop_unsolicited_na",
6744 .data = &ipv6_devconf.drop_unsolicited_na,
6745 .maxlen = sizeof(int),
6746 .mode = 0644,
6747 .proc_handler = proc_dointvec,
6748 },
6749 {
6750 .procname = "keep_addr_on_down",
6751 .data = &ipv6_devconf.keep_addr_on_down,
6752 .maxlen = sizeof(int),
6753 .mode = 0644,
6754 .proc_handler = proc_dointvec,
6755
6756 },
6757 {
6758 .procname = "seg6_enabled",
6759 .data = &ipv6_devconf.seg6_enabled,
6760 .maxlen = sizeof(int),
6761 .mode = 0644,
6762 .proc_handler = proc_dointvec,
6763 },
6764 #ifdef CONFIG_IPV6_SEG6_HMAC
6765 {
6766 .procname = "seg6_require_hmac",
6767 .data = &ipv6_devconf.seg6_require_hmac,
6768 .maxlen = sizeof(int),
6769 .mode = 0644,
6770 .proc_handler = proc_dointvec,
6771 },
6772 #endif
6773 {
6774 .procname = "enhanced_dad",
6775 .data = &ipv6_devconf.enhanced_dad,
6776 .maxlen = sizeof(int),
6777 .mode = 0644,
6778 .proc_handler = proc_dointvec,
6779 },
6780 {
6781 .procname = "addr_gen_mode",
6782 .data = &ipv6_devconf.addr_gen_mode,
6783 .maxlen = sizeof(int),
6784 .mode = 0644,
6785 .proc_handler = addrconf_sysctl_addr_gen_mode,
6786 },
6787 {
6788 .procname = "disable_policy",
6789 .data = &ipv6_devconf.disable_policy,
6790 .maxlen = sizeof(int),
6791 .mode = 0644,
6792 .proc_handler = addrconf_sysctl_disable_policy,
6793 },
6794 {
6795 .procname = "ndisc_tclass",
6796 .data = &ipv6_devconf.ndisc_tclass,
6797 .maxlen = sizeof(int),
6798 .mode = 0644,
6799 .proc_handler = proc_dointvec_minmax,
6800 .extra1 = (void *)&zero,
6801 .extra2 = (void *)&two_five_five,
6802 },
6803 {
6804 /* sentinel */
6805 }
6806 };
6807
6808 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6809 struct inet6_dev *idev, struct ipv6_devconf *p)
6810 {
6811 int i, ifindex;
6812 struct ctl_table *table;
6813 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6814
6815 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6816 if (!table)
6817 goto out;
6818
6819 for (i = 0; table[i].data; i++) {
6820 table[i].data += (char *)p - (char *)&ipv6_devconf;
6821 /* If one of these is already set, then it is not safe to
6822 * overwrite either of them: this makes proc_dointvec_minmax
6823 * usable.
6824 */
6825 if (!table[i].extra1 && !table[i].extra2) {
6826 table[i].extra1 = idev; /* embedded; no ref */
6827 table[i].extra2 = net;
6828 }
6829 }
6830
6831 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6832
6833 p->sysctl_header = register_net_sysctl(net, path, table);
6834 if (!p->sysctl_header)
6835 goto free;
6836
6837 if (!strcmp(dev_name, "all"))
6838 ifindex = NETCONFA_IFINDEX_ALL;
6839 else if (!strcmp(dev_name, "default"))
6840 ifindex = NETCONFA_IFINDEX_DEFAULT;
6841 else
6842 ifindex = idev->dev->ifindex;
6843 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6844 ifindex, p);
6845 return 0;
6846
6847 free:
6848 kfree(table);
6849 out:
6850 return -ENOBUFS;
6851 }
6852
6853 static void __addrconf_sysctl_unregister(struct net *net,
6854 struct ipv6_devconf *p, int ifindex)
6855 {
6856 struct ctl_table *table;
6857
6858 if (!p->sysctl_header)
6859 return;
6860
6861 table = p->sysctl_header->ctl_table_arg;
6862 unregister_net_sysctl_table(p->sysctl_header);
6863 p->sysctl_header = NULL;
6864 kfree(table);
6865
6866 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
6867 }
6868
6869 static int addrconf_sysctl_register(struct inet6_dev *idev)
6870 {
6871 int err;
6872
6873 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6874 return -EINVAL;
6875
6876 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6877 &ndisc_ifinfo_sysctl_change);
6878 if (err)
6879 return err;
6880 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6881 idev, &idev->cnf);
6882 if (err)
6883 neigh_sysctl_unregister(idev->nd_parms);
6884
6885 return err;
6886 }
6887
6888 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6889 {
6890 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
6891 idev->dev->ifindex);
6892 neigh_sysctl_unregister(idev->nd_parms);
6893 }
6894
6895
6896 #endif
6897
6898 static int __net_init addrconf_init_net(struct net *net)
6899 {
6900 int err = -ENOMEM;
6901 struct ipv6_devconf *all, *dflt;
6902
6903 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6904 if (!all)
6905 goto err_alloc_all;
6906
6907 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6908 if (!dflt)
6909 goto err_alloc_dflt;
6910
6911 if (IS_ENABLED(CONFIG_SYSCTL) &&
6912 sysctl_devconf_inherit_init_net == 1 && !net_eq(net, &init_net)) {
6913 memcpy(all, init_net.ipv6.devconf_all, sizeof(ipv6_devconf));
6914 memcpy(dflt, init_net.ipv6.devconf_dflt, sizeof(ipv6_devconf_dflt));
6915 }
6916
6917 /* these will be inherited by all namespaces */
6918 dflt->autoconf = ipv6_defaults.autoconf;
6919 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6920
6921 dflt->stable_secret.initialized = false;
6922 all->stable_secret.initialized = false;
6923
6924 net->ipv6.devconf_all = all;
6925 net->ipv6.devconf_dflt = dflt;
6926
6927 #ifdef CONFIG_SYSCTL
6928 err = __addrconf_sysctl_register(net, "all", NULL, all);
6929 if (err < 0)
6930 goto err_reg_all;
6931
6932 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6933 if (err < 0)
6934 goto err_reg_dflt;
6935 #endif
6936 return 0;
6937
6938 #ifdef CONFIG_SYSCTL
6939 err_reg_dflt:
6940 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
6941 err_reg_all:
6942 kfree(dflt);
6943 #endif
6944 err_alloc_dflt:
6945 kfree(all);
6946 err_alloc_all:
6947 return err;
6948 }
6949
6950 static void __net_exit addrconf_exit_net(struct net *net)
6951 {
6952 #ifdef CONFIG_SYSCTL
6953 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
6954 NETCONFA_IFINDEX_DEFAULT);
6955 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
6956 NETCONFA_IFINDEX_ALL);
6957 #endif
6958 kfree(net->ipv6.devconf_dflt);
6959 kfree(net->ipv6.devconf_all);
6960 }
6961
6962 static struct pernet_operations addrconf_ops = {
6963 .init = addrconf_init_net,
6964 .exit = addrconf_exit_net,
6965 };
6966
6967 static struct rtnl_af_ops inet6_ops __read_mostly = {
6968 .family = AF_INET6,
6969 .fill_link_af = inet6_fill_link_af,
6970 .get_link_af_size = inet6_get_link_af_size,
6971 .validate_link_af = inet6_validate_link_af,
6972 .set_link_af = inet6_set_link_af,
6973 };
6974
6975 /*
6976 * Init / cleanup code
6977 */
6978
6979 int __init addrconf_init(void)
6980 {
6981 struct inet6_dev *idev;
6982 int i, err;
6983
6984 err = ipv6_addr_label_init();
6985 if (err < 0) {
6986 pr_crit("%s: cannot initialize default policy table: %d\n",
6987 __func__, err);
6988 goto out;
6989 }
6990
6991 err = register_pernet_subsys(&addrconf_ops);
6992 if (err < 0)
6993 goto out_addrlabel;
6994
6995 addrconf_wq = create_workqueue("ipv6_addrconf");
6996 if (!addrconf_wq) {
6997 err = -ENOMEM;
6998 goto out_nowq;
6999 }
7000
7001 /* The addrconf netdev notifier requires that loopback_dev
7002 * has it's ipv6 private information allocated and setup
7003 * before it can bring up and give link-local addresses
7004 * to other devices which are up.
7005 *
7006 * Unfortunately, loopback_dev is not necessarily the first
7007 * entry in the global dev_base list of net devices. In fact,
7008 * it is likely to be the very last entry on that list.
7009 * So this causes the notifier registry below to try and
7010 * give link-local addresses to all devices besides loopback_dev
7011 * first, then loopback_dev, which cases all the non-loopback_dev
7012 * devices to fail to get a link-local address.
7013 *
7014 * So, as a temporary fix, allocate the ipv6 structure for
7015 * loopback_dev first by hand.
7016 * Longer term, all of the dependencies ipv6 has upon the loopback
7017 * device and it being up should be removed.
7018 */
7019 rtnl_lock();
7020 idev = ipv6_add_dev(init_net.loopback_dev);
7021 rtnl_unlock();
7022 if (IS_ERR(idev)) {
7023 err = PTR_ERR(idev);
7024 goto errlo;
7025 }
7026
7027 ip6_route_init_special_entries();
7028
7029 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7030 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
7031
7032 register_netdevice_notifier(&ipv6_dev_notf);
7033
7034 addrconf_verify();
7035
7036 rtnl_af_register(&inet6_ops);
7037
7038 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7039 NULL, inet6_dump_ifinfo, 0);
7040 if (err < 0)
7041 goto errout;
7042
7043 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7044 inet6_rtm_newaddr, NULL, 0);
7045 if (err < 0)
7046 goto errout;
7047 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7048 inet6_rtm_deladdr, NULL, 0);
7049 if (err < 0)
7050 goto errout;
7051 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7052 inet6_rtm_getaddr, inet6_dump_ifaddr,
7053 RTNL_FLAG_DOIT_UNLOCKED);
7054 if (err < 0)
7055 goto errout;
7056 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7057 NULL, inet6_dump_ifmcaddr, 0);
7058 if (err < 0)
7059 goto errout;
7060 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7061 NULL, inet6_dump_ifacaddr, 0);
7062 if (err < 0)
7063 goto errout;
7064 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7065 inet6_netconf_get_devconf,
7066 inet6_netconf_dump_devconf,
7067 RTNL_FLAG_DOIT_UNLOCKED);
7068 if (err < 0)
7069 goto errout;
7070 err = ipv6_addr_label_rtnl_register();
7071 if (err < 0)
7072 goto errout;
7073
7074 return 0;
7075 errout:
7076 rtnl_unregister_all(PF_INET6);
7077 rtnl_af_unregister(&inet6_ops);
7078 unregister_netdevice_notifier(&ipv6_dev_notf);
7079 errlo:
7080 destroy_workqueue(addrconf_wq);
7081 out_nowq:
7082 unregister_pernet_subsys(&addrconf_ops);
7083 out_addrlabel:
7084 ipv6_addr_label_cleanup();
7085 out:
7086 return err;
7087 }
7088
7089 void addrconf_cleanup(void)
7090 {
7091 struct net_device *dev;
7092 int i;
7093
7094 unregister_netdevice_notifier(&ipv6_dev_notf);
7095 unregister_pernet_subsys(&addrconf_ops);
7096 ipv6_addr_label_cleanup();
7097
7098 rtnl_af_unregister(&inet6_ops);
7099
7100 rtnl_lock();
7101
7102 /* clean dev list */
7103 for_each_netdev(&init_net, dev) {
7104 if (__in6_dev_get(dev) == NULL)
7105 continue;
7106 addrconf_ifdown(dev, 1);
7107 }
7108 addrconf_ifdown(init_net.loopback_dev, 2);
7109
7110 /*
7111 * Check hash table.
7112 */
7113 spin_lock_bh(&addrconf_hash_lock);
7114 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7115 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
7116 spin_unlock_bh(&addrconf_hash_lock);
7117 cancel_delayed_work(&addr_chk_work);
7118 rtnl_unlock();
7119
7120 destroy_workqueue(addrconf_wq);
7121 }