]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/addrconf.c
a6cf37b7e34c461e204153078be92c3e297b3ec2
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / addrconf.c
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 /*
16 * Changes:
17 *
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
39 */
40
41 #define pr_fmt(fmt) "IPv6: " fmt
42
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched/signal.h>
47 #include <linux/socket.h>
48 #include <linux/sockios.h>
49 #include <linux/net.h>
50 #include <linux/inet.h>
51 #include <linux/in6.h>
52 #include <linux/netdevice.h>
53 #include <linux/if_addr.h>
54 #include <linux/if_arp.h>
55 #include <linux/if_arcnet.h>
56 #include <linux/if_infiniband.h>
57 #include <linux/route.h>
58 #include <linux/inetdevice.h>
59 #include <linux/init.h>
60 #include <linux/slab.h>
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64 #include <linux/capability.h>
65 #include <linux/delay.h>
66 #include <linux/notifier.h>
67 #include <linux/string.h>
68 #include <linux/hash.h>
69
70 #include <net/net_namespace.h>
71 #include <net/sock.h>
72 #include <net/snmp.h>
73
74 #include <net/6lowpan.h>
75 #include <net/firewire.h>
76 #include <net/ipv6.h>
77 #include <net/protocol.h>
78 #include <net/ndisc.h>
79 #include <net/ip6_route.h>
80 #include <net/addrconf.h>
81 #include <net/tcp.h>
82 #include <net/ip.h>
83 #include <net/netlink.h>
84 #include <net/pkt_sched.h>
85 #include <net/l3mdev.h>
86 #include <linux/if_tunnel.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/netconf.h>
89 #include <linux/random.h>
90 #include <linux/uaccess.h>
91 #include <asm/unaligned.h>
92
93 #include <linux/proc_fs.h>
94 #include <linux/seq_file.h>
95 #include <linux/export.h>
96
97 /* Set to 3 to get tracing... */
98 #define ACONF_DEBUG 2
99
100 #if ACONF_DEBUG >= 3
101 #define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
102 #else
103 #define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
104 #endif
105
106 #define INFINITY_LIFE_TIME 0xFFFFFFFF
107
108 #define IPV6_MAX_STRLEN \
109 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
110
111 static inline u32 cstamp_delta(unsigned long cstamp)
112 {
113 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
114 }
115
116 static inline s32 rfc3315_s14_backoff_init(s32 irt)
117 {
118 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
119 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
120 do_div(tmp, 1000000);
121 return (s32)tmp;
122 }
123
124 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
125 {
126 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
127 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
128 do_div(tmp, 1000000);
129 if ((s32)tmp > mrt) {
130 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
131 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
132 do_div(tmp, 1000000);
133 }
134 return (s32)tmp;
135 }
136
137 #ifdef CONFIG_SYSCTL
138 static int addrconf_sysctl_register(struct inet6_dev *idev);
139 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
140 #else
141 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
142 {
143 return 0;
144 }
145
146 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
147 {
148 }
149 #endif
150
151 static void ipv6_regen_rndid(struct inet6_dev *idev);
152 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
153
154 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
155 static int ipv6_count_addresses(const struct inet6_dev *idev);
156 static int ipv6_generate_stable_address(struct in6_addr *addr,
157 u8 dad_count,
158 const struct inet6_dev *idev);
159
160 /*
161 * Configured unicast address hash table
162 */
163 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
164 static DEFINE_SPINLOCK(addrconf_hash_lock);
165
166 static void addrconf_verify(void);
167 static void addrconf_verify_rtnl(void);
168 static void addrconf_verify_work(struct work_struct *);
169
170 static struct workqueue_struct *addrconf_wq;
171 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
172
173 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
174 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
175
176 static void addrconf_type_change(struct net_device *dev,
177 unsigned long event);
178 static int addrconf_ifdown(struct net_device *dev, int how);
179
180 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
181 int plen,
182 const struct net_device *dev,
183 u32 flags, u32 noflags);
184
185 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
186 static void addrconf_dad_work(struct work_struct *w);
187 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
188 static void addrconf_dad_run(struct inet6_dev *idev);
189 static void addrconf_rs_timer(unsigned long data);
190 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
191 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
192
193 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
194 struct prefix_info *pinfo);
195
196 static struct ipv6_devconf ipv6_devconf __read_mostly = {
197 .forwarding = 0,
198 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
199 .mtu6 = IPV6_MIN_MTU,
200 .accept_ra = 1,
201 .accept_redirects = 1,
202 .autoconf = 1,
203 .force_mld_version = 0,
204 .mldv1_unsolicited_report_interval = 10 * HZ,
205 .mldv2_unsolicited_report_interval = HZ,
206 .dad_transmits = 1,
207 .rtr_solicits = MAX_RTR_SOLICITATIONS,
208 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
209 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
210 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
211 .use_tempaddr = 0,
212 .temp_valid_lft = TEMP_VALID_LIFETIME,
213 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
214 .regen_max_retry = REGEN_MAX_RETRY,
215 .max_desync_factor = MAX_DESYNC_FACTOR,
216 .max_addresses = IPV6_MAX_ADDRESSES,
217 .accept_ra_defrtr = 1,
218 .accept_ra_from_local = 0,
219 .accept_ra_min_hop_limit= 1,
220 .accept_ra_pinfo = 1,
221 #ifdef CONFIG_IPV6_ROUTER_PREF
222 .accept_ra_rtr_pref = 1,
223 .rtr_probe_interval = 60 * HZ,
224 #ifdef CONFIG_IPV6_ROUTE_INFO
225 .accept_ra_rt_info_min_plen = 0,
226 .accept_ra_rt_info_max_plen = 0,
227 #endif
228 #endif
229 .proxy_ndp = 0,
230 .accept_source_route = 0, /* we do not accept RH0 by default. */
231 .disable_ipv6 = 0,
232 .accept_dad = 1,
233 .suppress_frag_ndisc = 1,
234 .accept_ra_mtu = 1,
235 .stable_secret = {
236 .initialized = false,
237 },
238 .use_oif_addrs_only = 0,
239 .ignore_routes_with_linkdown = 0,
240 .keep_addr_on_down = 0,
241 .seg6_enabled = 0,
242 #ifdef CONFIG_IPV6_SEG6_HMAC
243 .seg6_require_hmac = 0,
244 #endif
245 .enhanced_dad = 1,
246 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
247 .disable_policy = 0,
248 };
249
250 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
251 .forwarding = 0,
252 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
253 .mtu6 = IPV6_MIN_MTU,
254 .accept_ra = 1,
255 .accept_redirects = 1,
256 .autoconf = 1,
257 .force_mld_version = 0,
258 .mldv1_unsolicited_report_interval = 10 * HZ,
259 .mldv2_unsolicited_report_interval = HZ,
260 .dad_transmits = 1,
261 .rtr_solicits = MAX_RTR_SOLICITATIONS,
262 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
263 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
264 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
265 .use_tempaddr = 0,
266 .temp_valid_lft = TEMP_VALID_LIFETIME,
267 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
268 .regen_max_retry = REGEN_MAX_RETRY,
269 .max_desync_factor = MAX_DESYNC_FACTOR,
270 .max_addresses = IPV6_MAX_ADDRESSES,
271 .accept_ra_defrtr = 1,
272 .accept_ra_from_local = 0,
273 .accept_ra_min_hop_limit= 1,
274 .accept_ra_pinfo = 1,
275 #ifdef CONFIG_IPV6_ROUTER_PREF
276 .accept_ra_rtr_pref = 1,
277 .rtr_probe_interval = 60 * HZ,
278 #ifdef CONFIG_IPV6_ROUTE_INFO
279 .accept_ra_rt_info_min_plen = 0,
280 .accept_ra_rt_info_max_plen = 0,
281 #endif
282 #endif
283 .proxy_ndp = 0,
284 .accept_source_route = 0, /* we do not accept RH0 by default. */
285 .disable_ipv6 = 0,
286 .accept_dad = 1,
287 .suppress_frag_ndisc = 1,
288 .accept_ra_mtu = 1,
289 .stable_secret = {
290 .initialized = false,
291 },
292 .use_oif_addrs_only = 0,
293 .ignore_routes_with_linkdown = 0,
294 .keep_addr_on_down = 0,
295 .seg6_enabled = 0,
296 #ifdef CONFIG_IPV6_SEG6_HMAC
297 .seg6_require_hmac = 0,
298 #endif
299 .enhanced_dad = 1,
300 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
301 .disable_policy = 0,
302 };
303
304 /* Check if link is ready: is it up and is a valid qdisc available */
305 static inline bool addrconf_link_ready(const struct net_device *dev)
306 {
307 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
308 }
309
310 static void addrconf_del_rs_timer(struct inet6_dev *idev)
311 {
312 if (del_timer(&idev->rs_timer))
313 __in6_dev_put(idev);
314 }
315
316 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
317 {
318 if (cancel_delayed_work(&ifp->dad_work))
319 __in6_ifa_put(ifp);
320 }
321
322 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
323 unsigned long when)
324 {
325 if (!timer_pending(&idev->rs_timer))
326 in6_dev_hold(idev);
327 mod_timer(&idev->rs_timer, jiffies + when);
328 }
329
330 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
331 unsigned long delay)
332 {
333 in6_ifa_hold(ifp);
334 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
335 in6_ifa_put(ifp);
336 }
337
338 static int snmp6_alloc_dev(struct inet6_dev *idev)
339 {
340 int i;
341
342 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
343 if (!idev->stats.ipv6)
344 goto err_ip;
345
346 for_each_possible_cpu(i) {
347 struct ipstats_mib *addrconf_stats;
348 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
349 u64_stats_init(&addrconf_stats->syncp);
350 }
351
352
353 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
354 GFP_KERNEL);
355 if (!idev->stats.icmpv6dev)
356 goto err_icmp;
357 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
358 GFP_KERNEL);
359 if (!idev->stats.icmpv6msgdev)
360 goto err_icmpmsg;
361
362 return 0;
363
364 err_icmpmsg:
365 kfree(idev->stats.icmpv6dev);
366 err_icmp:
367 free_percpu(idev->stats.ipv6);
368 err_ip:
369 return -ENOMEM;
370 }
371
372 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
373 {
374 struct inet6_dev *ndev;
375 int err = -ENOMEM;
376
377 ASSERT_RTNL();
378
379 if (dev->mtu < IPV6_MIN_MTU)
380 return ERR_PTR(-EINVAL);
381
382 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
383 if (!ndev)
384 return ERR_PTR(err);
385
386 rwlock_init(&ndev->lock);
387 ndev->dev = dev;
388 INIT_LIST_HEAD(&ndev->addr_list);
389 setup_timer(&ndev->rs_timer, addrconf_rs_timer,
390 (unsigned long)ndev);
391 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
392
393 if (ndev->cnf.stable_secret.initialized)
394 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
395 else
396 ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
397
398 ndev->cnf.mtu6 = dev->mtu;
399 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
400 if (!ndev->nd_parms) {
401 kfree(ndev);
402 return ERR_PTR(err);
403 }
404 if (ndev->cnf.forwarding)
405 dev_disable_lro(dev);
406 /* We refer to the device */
407 dev_hold(dev);
408
409 if (snmp6_alloc_dev(ndev) < 0) {
410 ADBG(KERN_WARNING
411 "%s: cannot allocate memory for statistics; dev=%s.\n",
412 __func__, dev->name);
413 neigh_parms_release(&nd_tbl, ndev->nd_parms);
414 dev_put(dev);
415 kfree(ndev);
416 return ERR_PTR(err);
417 }
418
419 if (snmp6_register_dev(ndev) < 0) {
420 ADBG(KERN_WARNING
421 "%s: cannot create /proc/net/dev_snmp6/%s\n",
422 __func__, dev->name);
423 goto err_release;
424 }
425
426 /* One reference from device. */
427 refcount_set(&ndev->refcnt, 1);
428
429 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
430 ndev->cnf.accept_dad = -1;
431
432 #if IS_ENABLED(CONFIG_IPV6_SIT)
433 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
434 pr_info("%s: Disabled Multicast RS\n", dev->name);
435 ndev->cnf.rtr_solicits = 0;
436 }
437 #endif
438
439 INIT_LIST_HEAD(&ndev->tempaddr_list);
440 ndev->desync_factor = U32_MAX;
441 if ((dev->flags&IFF_LOOPBACK) ||
442 dev->type == ARPHRD_TUNNEL ||
443 dev->type == ARPHRD_TUNNEL6 ||
444 dev->type == ARPHRD_SIT ||
445 dev->type == ARPHRD_NONE) {
446 ndev->cnf.use_tempaddr = -1;
447 } else
448 ipv6_regen_rndid(ndev);
449
450 ndev->token = in6addr_any;
451
452 if (netif_running(dev) && addrconf_link_ready(dev))
453 ndev->if_flags |= IF_READY;
454
455 ipv6_mc_init_dev(ndev);
456 ndev->tstamp = jiffies;
457 err = addrconf_sysctl_register(ndev);
458 if (err) {
459 ipv6_mc_destroy_dev(ndev);
460 snmp6_unregister_dev(ndev);
461 goto err_release;
462 }
463 /* protected by rtnl_lock */
464 rcu_assign_pointer(dev->ip6_ptr, ndev);
465
466 /* Join interface-local all-node multicast group */
467 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
468
469 /* Join all-node multicast group */
470 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
471
472 /* Join all-router multicast group if forwarding is set */
473 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
474 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
475
476 return ndev;
477
478 err_release:
479 neigh_parms_release(&nd_tbl, ndev->nd_parms);
480 ndev->dead = 1;
481 in6_dev_finish_destroy(ndev);
482 return ERR_PTR(err);
483 }
484
485 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
486 {
487 struct inet6_dev *idev;
488
489 ASSERT_RTNL();
490
491 idev = __in6_dev_get(dev);
492 if (!idev) {
493 idev = ipv6_add_dev(dev);
494 if (IS_ERR(idev))
495 return NULL;
496 }
497
498 if (dev->flags&IFF_UP)
499 ipv6_mc_up(idev);
500 return idev;
501 }
502
503 static int inet6_netconf_msgsize_devconf(int type)
504 {
505 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
506 + nla_total_size(4); /* NETCONFA_IFINDEX */
507 bool all = false;
508
509 if (type == NETCONFA_ALL)
510 all = true;
511
512 if (all || type == NETCONFA_FORWARDING)
513 size += nla_total_size(4);
514 #ifdef CONFIG_IPV6_MROUTE
515 if (all || type == NETCONFA_MC_FORWARDING)
516 size += nla_total_size(4);
517 #endif
518 if (all || type == NETCONFA_PROXY_NEIGH)
519 size += nla_total_size(4);
520
521 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
522 size += nla_total_size(4);
523
524 return size;
525 }
526
527 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
528 struct ipv6_devconf *devconf, u32 portid,
529 u32 seq, int event, unsigned int flags,
530 int type)
531 {
532 struct nlmsghdr *nlh;
533 struct netconfmsg *ncm;
534 bool all = false;
535
536 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
537 flags);
538 if (!nlh)
539 return -EMSGSIZE;
540
541 if (type == NETCONFA_ALL)
542 all = true;
543
544 ncm = nlmsg_data(nlh);
545 ncm->ncm_family = AF_INET6;
546
547 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
548 goto nla_put_failure;
549
550 if (!devconf)
551 goto out;
552
553 if ((all || type == NETCONFA_FORWARDING) &&
554 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
555 goto nla_put_failure;
556 #ifdef CONFIG_IPV6_MROUTE
557 if ((all || type == NETCONFA_MC_FORWARDING) &&
558 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
559 devconf->mc_forwarding) < 0)
560 goto nla_put_failure;
561 #endif
562 if ((all || type == NETCONFA_PROXY_NEIGH) &&
563 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
564 goto nla_put_failure;
565
566 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
567 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
568 devconf->ignore_routes_with_linkdown) < 0)
569 goto nla_put_failure;
570
571 out:
572 nlmsg_end(skb, nlh);
573 return 0;
574
575 nla_put_failure:
576 nlmsg_cancel(skb, nlh);
577 return -EMSGSIZE;
578 }
579
580 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
581 int ifindex, struct ipv6_devconf *devconf)
582 {
583 struct sk_buff *skb;
584 int err = -ENOBUFS;
585
586 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
587 if (!skb)
588 goto errout;
589
590 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
591 event, 0, type);
592 if (err < 0) {
593 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
594 WARN_ON(err == -EMSGSIZE);
595 kfree_skb(skb);
596 goto errout;
597 }
598 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
599 return;
600 errout:
601 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
602 }
603
604 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
605 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
606 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
607 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
608 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
609 };
610
611 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
612 struct nlmsghdr *nlh,
613 struct netlink_ext_ack *extack)
614 {
615 struct net *net = sock_net(in_skb->sk);
616 struct nlattr *tb[NETCONFA_MAX+1];
617 struct inet6_dev *in6_dev = NULL;
618 struct net_device *dev = NULL;
619 struct netconfmsg *ncm;
620 struct sk_buff *skb;
621 struct ipv6_devconf *devconf;
622 int ifindex;
623 int err;
624
625 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
626 devconf_ipv6_policy, extack);
627 if (err < 0)
628 return err;
629
630 if (!tb[NETCONFA_IFINDEX])
631 return -EINVAL;
632
633 err = -EINVAL;
634 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
635 switch (ifindex) {
636 case NETCONFA_IFINDEX_ALL:
637 devconf = net->ipv6.devconf_all;
638 break;
639 case NETCONFA_IFINDEX_DEFAULT:
640 devconf = net->ipv6.devconf_dflt;
641 break;
642 default:
643 dev = dev_get_by_index(net, ifindex);
644 if (!dev)
645 return -EINVAL;
646 in6_dev = in6_dev_get(dev);
647 if (!in6_dev)
648 goto errout;
649 devconf = &in6_dev->cnf;
650 break;
651 }
652
653 err = -ENOBUFS;
654 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
655 if (!skb)
656 goto errout;
657
658 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
659 NETLINK_CB(in_skb).portid,
660 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
661 NETCONFA_ALL);
662 if (err < 0) {
663 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
664 WARN_ON(err == -EMSGSIZE);
665 kfree_skb(skb);
666 goto errout;
667 }
668 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
669 errout:
670 if (in6_dev)
671 in6_dev_put(in6_dev);
672 if (dev)
673 dev_put(dev);
674 return err;
675 }
676
677 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
678 struct netlink_callback *cb)
679 {
680 struct net *net = sock_net(skb->sk);
681 int h, s_h;
682 int idx, s_idx;
683 struct net_device *dev;
684 struct inet6_dev *idev;
685 struct hlist_head *head;
686
687 s_h = cb->args[0];
688 s_idx = idx = cb->args[1];
689
690 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
691 idx = 0;
692 head = &net->dev_index_head[h];
693 rcu_read_lock();
694 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
695 net->dev_base_seq;
696 hlist_for_each_entry_rcu(dev, head, index_hlist) {
697 if (idx < s_idx)
698 goto cont;
699 idev = __in6_dev_get(dev);
700 if (!idev)
701 goto cont;
702
703 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
704 &idev->cnf,
705 NETLINK_CB(cb->skb).portid,
706 cb->nlh->nlmsg_seq,
707 RTM_NEWNETCONF,
708 NLM_F_MULTI,
709 NETCONFA_ALL) < 0) {
710 rcu_read_unlock();
711 goto done;
712 }
713 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
714 cont:
715 idx++;
716 }
717 rcu_read_unlock();
718 }
719 if (h == NETDEV_HASHENTRIES) {
720 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
721 net->ipv6.devconf_all,
722 NETLINK_CB(cb->skb).portid,
723 cb->nlh->nlmsg_seq,
724 RTM_NEWNETCONF, NLM_F_MULTI,
725 NETCONFA_ALL) < 0)
726 goto done;
727 else
728 h++;
729 }
730 if (h == NETDEV_HASHENTRIES + 1) {
731 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
732 net->ipv6.devconf_dflt,
733 NETLINK_CB(cb->skb).portid,
734 cb->nlh->nlmsg_seq,
735 RTM_NEWNETCONF, NLM_F_MULTI,
736 NETCONFA_ALL) < 0)
737 goto done;
738 else
739 h++;
740 }
741 done:
742 cb->args[0] = h;
743 cb->args[1] = idx;
744
745 return skb->len;
746 }
747
748 #ifdef CONFIG_SYSCTL
749 static void dev_forward_change(struct inet6_dev *idev)
750 {
751 struct net_device *dev;
752 struct inet6_ifaddr *ifa;
753
754 if (!idev)
755 return;
756 dev = idev->dev;
757 if (idev->cnf.forwarding)
758 dev_disable_lro(dev);
759 if (dev->flags & IFF_MULTICAST) {
760 if (idev->cnf.forwarding) {
761 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
762 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
763 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
764 } else {
765 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
766 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
767 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
768 }
769 }
770
771 list_for_each_entry(ifa, &idev->addr_list, if_list) {
772 if (ifa->flags&IFA_F_TENTATIVE)
773 continue;
774 if (idev->cnf.forwarding)
775 addrconf_join_anycast(ifa);
776 else
777 addrconf_leave_anycast(ifa);
778 }
779 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
780 NETCONFA_FORWARDING,
781 dev->ifindex, &idev->cnf);
782 }
783
784
785 static void addrconf_forward_change(struct net *net, __s32 newf)
786 {
787 struct net_device *dev;
788 struct inet6_dev *idev;
789
790 for_each_netdev(net, dev) {
791 idev = __in6_dev_get(dev);
792 if (idev) {
793 int changed = (!idev->cnf.forwarding) ^ (!newf);
794 idev->cnf.forwarding = newf;
795 if (changed)
796 dev_forward_change(idev);
797 }
798 }
799 }
800
801 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
802 {
803 struct net *net;
804 int old;
805
806 if (!rtnl_trylock())
807 return restart_syscall();
808
809 net = (struct net *)table->extra2;
810 old = *p;
811 *p = newf;
812
813 if (p == &net->ipv6.devconf_dflt->forwarding) {
814 if ((!newf) ^ (!old))
815 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
816 NETCONFA_FORWARDING,
817 NETCONFA_IFINDEX_DEFAULT,
818 net->ipv6.devconf_dflt);
819 rtnl_unlock();
820 return 0;
821 }
822
823 if (p == &net->ipv6.devconf_all->forwarding) {
824 int old_dflt = net->ipv6.devconf_dflt->forwarding;
825
826 net->ipv6.devconf_dflt->forwarding = newf;
827 if ((!newf) ^ (!old_dflt))
828 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
829 NETCONFA_FORWARDING,
830 NETCONFA_IFINDEX_DEFAULT,
831 net->ipv6.devconf_dflt);
832
833 addrconf_forward_change(net, newf);
834 if ((!newf) ^ (!old))
835 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
836 NETCONFA_FORWARDING,
837 NETCONFA_IFINDEX_ALL,
838 net->ipv6.devconf_all);
839 } else if ((!newf) ^ (!old))
840 dev_forward_change((struct inet6_dev *)table->extra1);
841 rtnl_unlock();
842
843 if (newf)
844 rt6_purge_dflt_routers(net);
845 return 1;
846 }
847
848 static void addrconf_linkdown_change(struct net *net, __s32 newf)
849 {
850 struct net_device *dev;
851 struct inet6_dev *idev;
852
853 for_each_netdev(net, dev) {
854 idev = __in6_dev_get(dev);
855 if (idev) {
856 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
857
858 idev->cnf.ignore_routes_with_linkdown = newf;
859 if (changed)
860 inet6_netconf_notify_devconf(dev_net(dev),
861 RTM_NEWNETCONF,
862 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
863 dev->ifindex,
864 &idev->cnf);
865 }
866 }
867 }
868
869 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
870 {
871 struct net *net;
872 int old;
873
874 if (!rtnl_trylock())
875 return restart_syscall();
876
877 net = (struct net *)table->extra2;
878 old = *p;
879 *p = newf;
880
881 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
882 if ((!newf) ^ (!old))
883 inet6_netconf_notify_devconf(net,
884 RTM_NEWNETCONF,
885 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
886 NETCONFA_IFINDEX_DEFAULT,
887 net->ipv6.devconf_dflt);
888 rtnl_unlock();
889 return 0;
890 }
891
892 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
893 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
894 addrconf_linkdown_change(net, newf);
895 if ((!newf) ^ (!old))
896 inet6_netconf_notify_devconf(net,
897 RTM_NEWNETCONF,
898 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
899 NETCONFA_IFINDEX_ALL,
900 net->ipv6.devconf_all);
901 }
902 rtnl_unlock();
903
904 return 1;
905 }
906
907 #endif
908
909 /* Nobody refers to this ifaddr, destroy it */
910 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
911 {
912 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
913
914 #ifdef NET_REFCNT_DEBUG
915 pr_debug("%s\n", __func__);
916 #endif
917
918 in6_dev_put(ifp->idev);
919
920 if (cancel_delayed_work(&ifp->dad_work))
921 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
922 ifp);
923
924 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
925 pr_warn("Freeing alive inet6 address %p\n", ifp);
926 return;
927 }
928 ip6_rt_put(ifp->rt);
929
930 kfree_rcu(ifp, rcu);
931 }
932
933 static void
934 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
935 {
936 struct list_head *p;
937 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
938
939 /*
940 * Each device address list is sorted in order of scope -
941 * global before linklocal.
942 */
943 list_for_each(p, &idev->addr_list) {
944 struct inet6_ifaddr *ifa
945 = list_entry(p, struct inet6_ifaddr, if_list);
946 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
947 break;
948 }
949
950 list_add_tail_rcu(&ifp->if_list, p);
951 }
952
953 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
954 {
955 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
956
957 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
958 }
959
960 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
961 struct net_device *dev, unsigned int hash)
962 {
963 struct inet6_ifaddr *ifp;
964
965 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
966 if (!net_eq(dev_net(ifp->idev->dev), net))
967 continue;
968 if (ipv6_addr_equal(&ifp->addr, addr)) {
969 if (!dev || ifp->idev->dev == dev)
970 return true;
971 }
972 }
973 return false;
974 }
975
976 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
977 {
978 unsigned int hash = inet6_addr_hash(dev_net(dev), &ifa->addr);
979 int err = 0;
980
981 spin_lock(&addrconf_hash_lock);
982
983 /* Ignore adding duplicate addresses on an interface */
984 if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
985 ADBG("ipv6_add_addr: already assigned\n");
986 err = -EEXIST;
987 } else {
988 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
989 }
990
991 spin_unlock(&addrconf_hash_lock);
992
993 return err;
994 }
995
996 /* On success it returns ifp with increased reference count */
997
998 static struct inet6_ifaddr *
999 ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
1000 const struct in6_addr *peer_addr, int pfxlen,
1001 int scope, u32 flags, u32 valid_lft, u32 prefered_lft,
1002 bool can_block, struct netlink_ext_ack *extack)
1003 {
1004 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1005 struct net *net = dev_net(idev->dev);
1006 struct inet6_ifaddr *ifa = NULL;
1007 struct rt6_info *rt = NULL;
1008 int err = 0;
1009 int addr_type = ipv6_addr_type(addr);
1010
1011 if (addr_type == IPV6_ADDR_ANY ||
1012 addr_type & IPV6_ADDR_MULTICAST ||
1013 (!(idev->dev->flags & IFF_LOOPBACK) &&
1014 addr_type & IPV6_ADDR_LOOPBACK))
1015 return ERR_PTR(-EADDRNOTAVAIL);
1016
1017 if (idev->dead) {
1018 err = -ENODEV; /*XXX*/
1019 goto out;
1020 }
1021
1022 if (idev->cnf.disable_ipv6) {
1023 err = -EACCES;
1024 goto out;
1025 }
1026
1027 /* validator notifier needs to be blocking;
1028 * do not call in atomic context
1029 */
1030 if (can_block) {
1031 struct in6_validator_info i6vi = {
1032 .i6vi_addr = *addr,
1033 .i6vi_dev = idev,
1034 .extack = extack,
1035 };
1036
1037 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1038 err = notifier_to_errno(err);
1039 if (err < 0)
1040 goto out;
1041 }
1042
1043 ifa = kzalloc(sizeof(*ifa), gfp_flags);
1044 if (!ifa) {
1045 ADBG("ipv6_add_addr: malloc failed\n");
1046 err = -ENOBUFS;
1047 goto out;
1048 }
1049
1050 rt = addrconf_dst_alloc(idev, addr, false);
1051 if (IS_ERR(rt)) {
1052 err = PTR_ERR(rt);
1053 rt = NULL;
1054 goto out;
1055 }
1056
1057 if (net->ipv6.devconf_all->disable_policy ||
1058 idev->cnf.disable_policy)
1059 rt->dst.flags |= DST_NOPOLICY;
1060
1061 neigh_parms_data_state_setall(idev->nd_parms);
1062
1063 ifa->addr = *addr;
1064 if (peer_addr)
1065 ifa->peer_addr = *peer_addr;
1066
1067 spin_lock_init(&ifa->lock);
1068 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1069 INIT_HLIST_NODE(&ifa->addr_lst);
1070 ifa->scope = scope;
1071 ifa->prefix_len = pfxlen;
1072 ifa->flags = flags;
1073 /* No need to add the TENTATIVE flag for addresses with NODAD */
1074 if (!(flags & IFA_F_NODAD))
1075 ifa->flags |= IFA_F_TENTATIVE;
1076 ifa->valid_lft = valid_lft;
1077 ifa->prefered_lft = prefered_lft;
1078 ifa->cstamp = ifa->tstamp = jiffies;
1079 ifa->tokenized = false;
1080
1081 ifa->rt = rt;
1082
1083 ifa->idev = idev;
1084 in6_dev_hold(idev);
1085
1086 /* For caller */
1087 refcount_set(&ifa->refcnt, 1);
1088
1089 rcu_read_lock_bh();
1090
1091 err = ipv6_add_addr_hash(idev->dev, ifa);
1092 if (err < 0) {
1093 rcu_read_unlock_bh();
1094 goto out;
1095 }
1096
1097 write_lock(&idev->lock);
1098
1099 /* Add to inet6_dev unicast addr list. */
1100 ipv6_link_dev_addr(idev, ifa);
1101
1102 if (ifa->flags&IFA_F_TEMPORARY) {
1103 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1104 in6_ifa_hold(ifa);
1105 }
1106
1107 in6_ifa_hold(ifa);
1108 write_unlock(&idev->lock);
1109
1110 rcu_read_unlock_bh();
1111
1112 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1113 out:
1114 if (unlikely(err < 0)) {
1115 if (rt)
1116 ip6_rt_put(rt);
1117 if (ifa) {
1118 if (ifa->idev)
1119 in6_dev_put(ifa->idev);
1120 kfree(ifa);
1121 }
1122 ifa = ERR_PTR(err);
1123 }
1124
1125 return ifa;
1126 }
1127
1128 enum cleanup_prefix_rt_t {
1129 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1130 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1131 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1132 };
1133
1134 /*
1135 * Check, whether the prefix for ifp would still need a prefix route
1136 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1137 * constants.
1138 *
1139 * 1) we don't purge prefix if address was not permanent.
1140 * prefix is managed by its own lifetime.
1141 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1142 * 3) if there are no addresses, delete prefix.
1143 * 4) if there are still other permanent address(es),
1144 * corresponding prefix is still permanent.
1145 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1146 * don't purge the prefix, assume user space is managing it.
1147 * 6) otherwise, update prefix lifetime to the
1148 * longest valid lifetime among the corresponding
1149 * addresses on the device.
1150 * Note: subsequent RA will update lifetime.
1151 **/
1152 static enum cleanup_prefix_rt_t
1153 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1154 {
1155 struct inet6_ifaddr *ifa;
1156 struct inet6_dev *idev = ifp->idev;
1157 unsigned long lifetime;
1158 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1159
1160 *expires = jiffies;
1161
1162 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1163 if (ifa == ifp)
1164 continue;
1165 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1166 ifp->prefix_len))
1167 continue;
1168 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1169 return CLEANUP_PREFIX_RT_NOP;
1170
1171 action = CLEANUP_PREFIX_RT_EXPIRE;
1172
1173 spin_lock(&ifa->lock);
1174
1175 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1176 /*
1177 * Note: Because this address is
1178 * not permanent, lifetime <
1179 * LONG_MAX / HZ here.
1180 */
1181 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1182 *expires = ifa->tstamp + lifetime * HZ;
1183 spin_unlock(&ifa->lock);
1184 }
1185
1186 return action;
1187 }
1188
1189 static void
1190 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
1191 {
1192 struct rt6_info *rt;
1193
1194 rt = addrconf_get_prefix_route(&ifp->addr,
1195 ifp->prefix_len,
1196 ifp->idev->dev,
1197 0, RTF_GATEWAY | RTF_DEFAULT);
1198 if (rt) {
1199 if (del_rt)
1200 ip6_del_rt(rt);
1201 else {
1202 if (!(rt->rt6i_flags & RTF_EXPIRES))
1203 rt6_set_expires(rt, expires);
1204 ip6_rt_put(rt);
1205 }
1206 }
1207 }
1208
1209
1210 /* This function wants to get referenced ifp and releases it before return */
1211
1212 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1213 {
1214 int state;
1215 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1216 unsigned long expires;
1217
1218 ASSERT_RTNL();
1219
1220 spin_lock_bh(&ifp->lock);
1221 state = ifp->state;
1222 ifp->state = INET6_IFADDR_STATE_DEAD;
1223 spin_unlock_bh(&ifp->lock);
1224
1225 if (state == INET6_IFADDR_STATE_DEAD)
1226 goto out;
1227
1228 spin_lock_bh(&addrconf_hash_lock);
1229 hlist_del_init_rcu(&ifp->addr_lst);
1230 spin_unlock_bh(&addrconf_hash_lock);
1231
1232 write_lock_bh(&ifp->idev->lock);
1233
1234 if (ifp->flags&IFA_F_TEMPORARY) {
1235 list_del(&ifp->tmp_list);
1236 if (ifp->ifpub) {
1237 in6_ifa_put(ifp->ifpub);
1238 ifp->ifpub = NULL;
1239 }
1240 __in6_ifa_put(ifp);
1241 }
1242
1243 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1244 action = check_cleanup_prefix_route(ifp, &expires);
1245
1246 list_del_rcu(&ifp->if_list);
1247 __in6_ifa_put(ifp);
1248
1249 write_unlock_bh(&ifp->idev->lock);
1250
1251 addrconf_del_dad_work(ifp);
1252
1253 ipv6_ifa_notify(RTM_DELADDR, ifp);
1254
1255 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1256
1257 if (action != CLEANUP_PREFIX_RT_NOP) {
1258 cleanup_prefix_route(ifp, expires,
1259 action == CLEANUP_PREFIX_RT_DEL);
1260 }
1261
1262 /* clean up prefsrc entries */
1263 rt6_remove_prefsrc(ifp);
1264 out:
1265 in6_ifa_put(ifp);
1266 }
1267
1268 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *ift)
1269 {
1270 struct inet6_dev *idev = ifp->idev;
1271 struct in6_addr addr, *tmpaddr;
1272 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
1273 unsigned long regen_advance;
1274 int tmp_plen;
1275 int ret = 0;
1276 u32 addr_flags;
1277 unsigned long now = jiffies;
1278 long max_desync_factor;
1279 s32 cnf_temp_preferred_lft;
1280
1281 write_lock_bh(&idev->lock);
1282 if (ift) {
1283 spin_lock_bh(&ift->lock);
1284 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1285 spin_unlock_bh(&ift->lock);
1286 tmpaddr = &addr;
1287 } else {
1288 tmpaddr = NULL;
1289 }
1290 retry:
1291 in6_dev_hold(idev);
1292 if (idev->cnf.use_tempaddr <= 0) {
1293 write_unlock_bh(&idev->lock);
1294 pr_info("%s: use_tempaddr is disabled\n", __func__);
1295 in6_dev_put(idev);
1296 ret = -1;
1297 goto out;
1298 }
1299 spin_lock_bh(&ifp->lock);
1300 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1301 idev->cnf.use_tempaddr = -1; /*XXX*/
1302 spin_unlock_bh(&ifp->lock);
1303 write_unlock_bh(&idev->lock);
1304 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1305 __func__);
1306 in6_dev_put(idev);
1307 ret = -1;
1308 goto out;
1309 }
1310 in6_ifa_hold(ifp);
1311 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1312 ipv6_try_regen_rndid(idev, tmpaddr);
1313 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1314 age = (now - ifp->tstamp) / HZ;
1315
1316 regen_advance = idev->cnf.regen_max_retry *
1317 idev->cnf.dad_transmits *
1318 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1319
1320 /* recalculate max_desync_factor each time and update
1321 * idev->desync_factor if it's larger
1322 */
1323 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1324 max_desync_factor = min_t(__u32,
1325 idev->cnf.max_desync_factor,
1326 cnf_temp_preferred_lft - regen_advance);
1327
1328 if (unlikely(idev->desync_factor > max_desync_factor)) {
1329 if (max_desync_factor > 0) {
1330 get_random_bytes(&idev->desync_factor,
1331 sizeof(idev->desync_factor));
1332 idev->desync_factor %= max_desync_factor;
1333 } else {
1334 idev->desync_factor = 0;
1335 }
1336 }
1337
1338 tmp_valid_lft = min_t(__u32,
1339 ifp->valid_lft,
1340 idev->cnf.temp_valid_lft + age);
1341 tmp_prefered_lft = cnf_temp_preferred_lft + age -
1342 idev->desync_factor;
1343 tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
1344 tmp_plen = ifp->prefix_len;
1345 tmp_tstamp = ifp->tstamp;
1346 spin_unlock_bh(&ifp->lock);
1347
1348 write_unlock_bh(&idev->lock);
1349
1350 /* A temporary address is created only if this calculated Preferred
1351 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1352 * an implementation must not create a temporary address with a zero
1353 * Preferred Lifetime.
1354 * Use age calculation as in addrconf_verify to avoid unnecessary
1355 * temporary addresses being generated.
1356 */
1357 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1358 if (tmp_prefered_lft <= regen_advance + age) {
1359 in6_ifa_put(ifp);
1360 in6_dev_put(idev);
1361 ret = -1;
1362 goto out;
1363 }
1364
1365 addr_flags = IFA_F_TEMPORARY;
1366 /* set in addrconf_prefix_rcv() */
1367 if (ifp->flags & IFA_F_OPTIMISTIC)
1368 addr_flags |= IFA_F_OPTIMISTIC;
1369
1370 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1371 ipv6_addr_scope(&addr), addr_flags,
1372 tmp_valid_lft, tmp_prefered_lft, true, NULL);
1373 if (IS_ERR(ift)) {
1374 in6_ifa_put(ifp);
1375 in6_dev_put(idev);
1376 pr_info("%s: retry temporary address regeneration\n", __func__);
1377 tmpaddr = &addr;
1378 write_lock_bh(&idev->lock);
1379 goto retry;
1380 }
1381
1382 spin_lock_bh(&ift->lock);
1383 ift->ifpub = ifp;
1384 ift->cstamp = now;
1385 ift->tstamp = tmp_tstamp;
1386 spin_unlock_bh(&ift->lock);
1387
1388 addrconf_dad_start(ift);
1389 in6_ifa_put(ift);
1390 in6_dev_put(idev);
1391 out:
1392 return ret;
1393 }
1394
1395 /*
1396 * Choose an appropriate source address (RFC3484)
1397 */
1398 enum {
1399 IPV6_SADDR_RULE_INIT = 0,
1400 IPV6_SADDR_RULE_LOCAL,
1401 IPV6_SADDR_RULE_SCOPE,
1402 IPV6_SADDR_RULE_PREFERRED,
1403 #ifdef CONFIG_IPV6_MIP6
1404 IPV6_SADDR_RULE_HOA,
1405 #endif
1406 IPV6_SADDR_RULE_OIF,
1407 IPV6_SADDR_RULE_LABEL,
1408 IPV6_SADDR_RULE_PRIVACY,
1409 IPV6_SADDR_RULE_ORCHID,
1410 IPV6_SADDR_RULE_PREFIX,
1411 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1412 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1413 #endif
1414 IPV6_SADDR_RULE_MAX
1415 };
1416
1417 struct ipv6_saddr_score {
1418 int rule;
1419 int addr_type;
1420 struct inet6_ifaddr *ifa;
1421 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1422 int scopedist;
1423 int matchlen;
1424 };
1425
1426 struct ipv6_saddr_dst {
1427 const struct in6_addr *addr;
1428 int ifindex;
1429 int scope;
1430 int label;
1431 unsigned int prefs;
1432 };
1433
1434 static inline int ipv6_saddr_preferred(int type)
1435 {
1436 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1437 return 1;
1438 return 0;
1439 }
1440
1441 static bool ipv6_use_optimistic_addr(struct net *net,
1442 struct inet6_dev *idev)
1443 {
1444 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1445 if (!idev)
1446 return false;
1447 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1448 return false;
1449 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1450 return false;
1451
1452 return true;
1453 #else
1454 return false;
1455 #endif
1456 }
1457
1458 static int ipv6_get_saddr_eval(struct net *net,
1459 struct ipv6_saddr_score *score,
1460 struct ipv6_saddr_dst *dst,
1461 int i)
1462 {
1463 int ret;
1464
1465 if (i <= score->rule) {
1466 switch (i) {
1467 case IPV6_SADDR_RULE_SCOPE:
1468 ret = score->scopedist;
1469 break;
1470 case IPV6_SADDR_RULE_PREFIX:
1471 ret = score->matchlen;
1472 break;
1473 default:
1474 ret = !!test_bit(i, score->scorebits);
1475 }
1476 goto out;
1477 }
1478
1479 switch (i) {
1480 case IPV6_SADDR_RULE_INIT:
1481 /* Rule 0: remember if hiscore is not ready yet */
1482 ret = !!score->ifa;
1483 break;
1484 case IPV6_SADDR_RULE_LOCAL:
1485 /* Rule 1: Prefer same address */
1486 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1487 break;
1488 case IPV6_SADDR_RULE_SCOPE:
1489 /* Rule 2: Prefer appropriate scope
1490 *
1491 * ret
1492 * ^
1493 * -1 | d 15
1494 * ---+--+-+---> scope
1495 * |
1496 * | d is scope of the destination.
1497 * B-d | \
1498 * | \ <- smaller scope is better if
1499 * B-15 | \ if scope is enough for destination.
1500 * | ret = B - scope (-1 <= scope >= d <= 15).
1501 * d-C-1 | /
1502 * |/ <- greater is better
1503 * -C / if scope is not enough for destination.
1504 * /| ret = scope - C (-1 <= d < scope <= 15).
1505 *
1506 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1507 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1508 * Assume B = 0 and we get C > 29.
1509 */
1510 ret = __ipv6_addr_src_scope(score->addr_type);
1511 if (ret >= dst->scope)
1512 ret = -ret;
1513 else
1514 ret -= 128; /* 30 is enough */
1515 score->scopedist = ret;
1516 break;
1517 case IPV6_SADDR_RULE_PREFERRED:
1518 {
1519 /* Rule 3: Avoid deprecated and optimistic addresses */
1520 u8 avoid = IFA_F_DEPRECATED;
1521
1522 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1523 avoid |= IFA_F_OPTIMISTIC;
1524 ret = ipv6_saddr_preferred(score->addr_type) ||
1525 !(score->ifa->flags & avoid);
1526 break;
1527 }
1528 #ifdef CONFIG_IPV6_MIP6
1529 case IPV6_SADDR_RULE_HOA:
1530 {
1531 /* Rule 4: Prefer home address */
1532 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1533 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1534 break;
1535 }
1536 #endif
1537 case IPV6_SADDR_RULE_OIF:
1538 /* Rule 5: Prefer outgoing interface */
1539 ret = (!dst->ifindex ||
1540 dst->ifindex == score->ifa->idev->dev->ifindex);
1541 break;
1542 case IPV6_SADDR_RULE_LABEL:
1543 /* Rule 6: Prefer matching label */
1544 ret = ipv6_addr_label(net,
1545 &score->ifa->addr, score->addr_type,
1546 score->ifa->idev->dev->ifindex) == dst->label;
1547 break;
1548 case IPV6_SADDR_RULE_PRIVACY:
1549 {
1550 /* Rule 7: Prefer public address
1551 * Note: prefer temporary address if use_tempaddr >= 2
1552 */
1553 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1554 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1555 score->ifa->idev->cnf.use_tempaddr >= 2;
1556 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1557 break;
1558 }
1559 case IPV6_SADDR_RULE_ORCHID:
1560 /* Rule 8-: Prefer ORCHID vs ORCHID or
1561 * non-ORCHID vs non-ORCHID
1562 */
1563 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1564 ipv6_addr_orchid(dst->addr));
1565 break;
1566 case IPV6_SADDR_RULE_PREFIX:
1567 /* Rule 8: Use longest matching prefix */
1568 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1569 if (ret > score->ifa->prefix_len)
1570 ret = score->ifa->prefix_len;
1571 score->matchlen = ret;
1572 break;
1573 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1574 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1575 /* Optimistic addresses still have lower precedence than other
1576 * preferred addresses.
1577 */
1578 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1579 break;
1580 #endif
1581 default:
1582 ret = 0;
1583 }
1584
1585 if (ret)
1586 __set_bit(i, score->scorebits);
1587 score->rule = i;
1588 out:
1589 return ret;
1590 }
1591
1592 static int __ipv6_dev_get_saddr(struct net *net,
1593 struct ipv6_saddr_dst *dst,
1594 struct inet6_dev *idev,
1595 struct ipv6_saddr_score *scores,
1596 int hiscore_idx)
1597 {
1598 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1599
1600 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1601 int i;
1602
1603 /*
1604 * - Tentative Address (RFC2462 section 5.4)
1605 * - A tentative address is not considered
1606 * "assigned to an interface" in the traditional
1607 * sense, unless it is also flagged as optimistic.
1608 * - Candidate Source Address (section 4)
1609 * - In any case, anycast addresses, multicast
1610 * addresses, and the unspecified address MUST
1611 * NOT be included in a candidate set.
1612 */
1613 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1614 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1615 continue;
1616
1617 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1618
1619 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1620 score->addr_type & IPV6_ADDR_MULTICAST)) {
1621 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1622 idev->dev->name);
1623 continue;
1624 }
1625
1626 score->rule = -1;
1627 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1628
1629 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1630 int minihiscore, miniscore;
1631
1632 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1633 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1634
1635 if (minihiscore > miniscore) {
1636 if (i == IPV6_SADDR_RULE_SCOPE &&
1637 score->scopedist > 0) {
1638 /*
1639 * special case:
1640 * each remaining entry
1641 * has too small (not enough)
1642 * scope, because ifa entries
1643 * are sorted by their scope
1644 * values.
1645 */
1646 goto out;
1647 }
1648 break;
1649 } else if (minihiscore < miniscore) {
1650 swap(hiscore, score);
1651 hiscore_idx = 1 - hiscore_idx;
1652
1653 /* restore our iterator */
1654 score->ifa = hiscore->ifa;
1655
1656 break;
1657 }
1658 }
1659 }
1660 out:
1661 return hiscore_idx;
1662 }
1663
1664 static int ipv6_get_saddr_master(struct net *net,
1665 const struct net_device *dst_dev,
1666 const struct net_device *master,
1667 struct ipv6_saddr_dst *dst,
1668 struct ipv6_saddr_score *scores,
1669 int hiscore_idx)
1670 {
1671 struct inet6_dev *idev;
1672
1673 idev = __in6_dev_get(dst_dev);
1674 if (idev)
1675 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1676 scores, hiscore_idx);
1677
1678 idev = __in6_dev_get(master);
1679 if (idev)
1680 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1681 scores, hiscore_idx);
1682
1683 return hiscore_idx;
1684 }
1685
1686 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1687 const struct in6_addr *daddr, unsigned int prefs,
1688 struct in6_addr *saddr)
1689 {
1690 struct ipv6_saddr_score scores[2], *hiscore;
1691 struct ipv6_saddr_dst dst;
1692 struct inet6_dev *idev;
1693 struct net_device *dev;
1694 int dst_type;
1695 bool use_oif_addr = false;
1696 int hiscore_idx = 0;
1697 int ret = 0;
1698
1699 dst_type = __ipv6_addr_type(daddr);
1700 dst.addr = daddr;
1701 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1702 dst.scope = __ipv6_addr_src_scope(dst_type);
1703 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1704 dst.prefs = prefs;
1705
1706 scores[hiscore_idx].rule = -1;
1707 scores[hiscore_idx].ifa = NULL;
1708
1709 rcu_read_lock();
1710
1711 /* Candidate Source Address (section 4)
1712 * - multicast and link-local destination address,
1713 * the set of candidate source address MUST only
1714 * include addresses assigned to interfaces
1715 * belonging to the same link as the outgoing
1716 * interface.
1717 * (- For site-local destination addresses, the
1718 * set of candidate source addresses MUST only
1719 * include addresses assigned to interfaces
1720 * belonging to the same site as the outgoing
1721 * interface.)
1722 * - "It is RECOMMENDED that the candidate source addresses
1723 * be the set of unicast addresses assigned to the
1724 * interface that will be used to send to the destination
1725 * (the 'outgoing' interface)." (RFC 6724)
1726 */
1727 if (dst_dev) {
1728 idev = __in6_dev_get(dst_dev);
1729 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1730 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1731 (idev && idev->cnf.use_oif_addrs_only)) {
1732 use_oif_addr = true;
1733 }
1734 }
1735
1736 if (use_oif_addr) {
1737 if (idev)
1738 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1739 } else {
1740 const struct net_device *master;
1741 int master_idx = 0;
1742
1743 /* if dst_dev exists and is enslaved to an L3 device, then
1744 * prefer addresses from dst_dev and then the master over
1745 * any other enslaved devices in the L3 domain.
1746 */
1747 master = l3mdev_master_dev_rcu(dst_dev);
1748 if (master) {
1749 master_idx = master->ifindex;
1750
1751 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1752 master, &dst,
1753 scores, hiscore_idx);
1754
1755 if (scores[hiscore_idx].ifa)
1756 goto out;
1757 }
1758
1759 for_each_netdev_rcu(net, dev) {
1760 /* only consider addresses on devices in the
1761 * same L3 domain
1762 */
1763 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1764 continue;
1765 idev = __in6_dev_get(dev);
1766 if (!idev)
1767 continue;
1768 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1769 }
1770 }
1771
1772 out:
1773 hiscore = &scores[hiscore_idx];
1774 if (!hiscore->ifa)
1775 ret = -EADDRNOTAVAIL;
1776 else
1777 *saddr = hiscore->ifa->addr;
1778
1779 rcu_read_unlock();
1780 return ret;
1781 }
1782 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1783
1784 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1785 u32 banned_flags)
1786 {
1787 struct inet6_ifaddr *ifp;
1788 int err = -EADDRNOTAVAIL;
1789
1790 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1791 if (ifp->scope > IFA_LINK)
1792 break;
1793 if (ifp->scope == IFA_LINK &&
1794 !(ifp->flags & banned_flags)) {
1795 *addr = ifp->addr;
1796 err = 0;
1797 break;
1798 }
1799 }
1800 return err;
1801 }
1802
1803 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1804 u32 banned_flags)
1805 {
1806 struct inet6_dev *idev;
1807 int err = -EADDRNOTAVAIL;
1808
1809 rcu_read_lock();
1810 idev = __in6_dev_get(dev);
1811 if (idev) {
1812 read_lock_bh(&idev->lock);
1813 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1814 read_unlock_bh(&idev->lock);
1815 }
1816 rcu_read_unlock();
1817 return err;
1818 }
1819
1820 static int ipv6_count_addresses(const struct inet6_dev *idev)
1821 {
1822 const struct inet6_ifaddr *ifp;
1823 int cnt = 0;
1824
1825 rcu_read_lock();
1826 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1827 cnt++;
1828 rcu_read_unlock();
1829 return cnt;
1830 }
1831
1832 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1833 const struct net_device *dev, int strict)
1834 {
1835 return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE);
1836 }
1837 EXPORT_SYMBOL(ipv6_chk_addr);
1838
1839 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1840 const struct net_device *dev, int strict,
1841 u32 banned_flags)
1842 {
1843 unsigned int hash = inet6_addr_hash(net, addr);
1844 struct inet6_ifaddr *ifp;
1845 u32 ifp_flags;
1846
1847 rcu_read_lock();
1848 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1849 if (!net_eq(dev_net(ifp->idev->dev), net))
1850 continue;
1851 /* Decouple optimistic from tentative for evaluation here.
1852 * Ban optimistic addresses explicitly, when required.
1853 */
1854 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1855 ? (ifp->flags&~IFA_F_TENTATIVE)
1856 : ifp->flags;
1857 if (ipv6_addr_equal(&ifp->addr, addr) &&
1858 !(ifp_flags&banned_flags) &&
1859 (!dev || ifp->idev->dev == dev ||
1860 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1861 rcu_read_unlock();
1862 return 1;
1863 }
1864 }
1865
1866 rcu_read_unlock();
1867 return 0;
1868 }
1869 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1870
1871
1872 /* Compares an address/prefix_len with addresses on device @dev.
1873 * If one is found it returns true.
1874 */
1875 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1876 const unsigned int prefix_len, struct net_device *dev)
1877 {
1878 const struct inet6_ifaddr *ifa;
1879 const struct inet6_dev *idev;
1880 bool ret = false;
1881
1882 rcu_read_lock();
1883 idev = __in6_dev_get(dev);
1884 if (idev) {
1885 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1886 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1887 if (ret)
1888 break;
1889 }
1890 }
1891 rcu_read_unlock();
1892
1893 return ret;
1894 }
1895 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1896
1897 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1898 {
1899 const struct inet6_ifaddr *ifa;
1900 const struct inet6_dev *idev;
1901 int onlink;
1902
1903 onlink = 0;
1904 rcu_read_lock();
1905 idev = __in6_dev_get(dev);
1906 if (idev) {
1907 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1908 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1909 ifa->prefix_len);
1910 if (onlink)
1911 break;
1912 }
1913 }
1914 rcu_read_unlock();
1915 return onlink;
1916 }
1917 EXPORT_SYMBOL(ipv6_chk_prefix);
1918
1919 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1920 struct net_device *dev, int strict)
1921 {
1922 unsigned int hash = inet6_addr_hash(net, addr);
1923 struct inet6_ifaddr *ifp, *result = NULL;
1924
1925 rcu_read_lock_bh();
1926 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
1927 if (!net_eq(dev_net(ifp->idev->dev), net))
1928 continue;
1929 if (ipv6_addr_equal(&ifp->addr, addr)) {
1930 if (!dev || ifp->idev->dev == dev ||
1931 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1932 result = ifp;
1933 in6_ifa_hold(ifp);
1934 break;
1935 }
1936 }
1937 }
1938 rcu_read_unlock_bh();
1939
1940 return result;
1941 }
1942
1943 /* Gets referenced address, destroys ifaddr */
1944
1945 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1946 {
1947 if (dad_failed)
1948 ifp->flags |= IFA_F_DADFAILED;
1949
1950 if (ifp->flags&IFA_F_TEMPORARY) {
1951 struct inet6_ifaddr *ifpub;
1952 spin_lock_bh(&ifp->lock);
1953 ifpub = ifp->ifpub;
1954 if (ifpub) {
1955 in6_ifa_hold(ifpub);
1956 spin_unlock_bh(&ifp->lock);
1957 ipv6_create_tempaddr(ifpub, ifp);
1958 in6_ifa_put(ifpub);
1959 } else {
1960 spin_unlock_bh(&ifp->lock);
1961 }
1962 ipv6_del_addr(ifp);
1963 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
1964 spin_lock_bh(&ifp->lock);
1965 addrconf_del_dad_work(ifp);
1966 ifp->flags |= IFA_F_TENTATIVE;
1967 spin_unlock_bh(&ifp->lock);
1968 if (dad_failed)
1969 ipv6_ifa_notify(0, ifp);
1970 in6_ifa_put(ifp);
1971 } else {
1972 ipv6_del_addr(ifp);
1973 }
1974 }
1975
1976 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1977 {
1978 int err = -ENOENT;
1979
1980 spin_lock_bh(&ifp->lock);
1981 if (ifp->state == INET6_IFADDR_STATE_DAD) {
1982 ifp->state = INET6_IFADDR_STATE_POSTDAD;
1983 err = 0;
1984 }
1985 spin_unlock_bh(&ifp->lock);
1986
1987 return err;
1988 }
1989
1990 void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1991 {
1992 struct inet6_dev *idev = ifp->idev;
1993 struct net *net = dev_net(ifp->idev->dev);
1994
1995 if (addrconf_dad_end(ifp)) {
1996 in6_ifa_put(ifp);
1997 return;
1998 }
1999
2000 net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
2001 ifp->idev->dev->name, &ifp->addr);
2002
2003 spin_lock_bh(&ifp->lock);
2004
2005 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2006 int scope = ifp->scope;
2007 u32 flags = ifp->flags;
2008 struct in6_addr new_addr;
2009 struct inet6_ifaddr *ifp2;
2010 u32 valid_lft, preferred_lft;
2011 int pfxlen = ifp->prefix_len;
2012 int retries = ifp->stable_privacy_retry + 1;
2013
2014 if (retries > net->ipv6.sysctl.idgen_retries) {
2015 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2016 ifp->idev->dev->name);
2017 goto errdad;
2018 }
2019
2020 new_addr = ifp->addr;
2021 if (ipv6_generate_stable_address(&new_addr, retries,
2022 idev))
2023 goto errdad;
2024
2025 valid_lft = ifp->valid_lft;
2026 preferred_lft = ifp->prefered_lft;
2027
2028 spin_unlock_bh(&ifp->lock);
2029
2030 if (idev->cnf.max_addresses &&
2031 ipv6_count_addresses(idev) >=
2032 idev->cnf.max_addresses)
2033 goto lock_errdad;
2034
2035 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2036 ifp->idev->dev->name);
2037
2038 ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
2039 scope, flags, valid_lft,
2040 preferred_lft, false, NULL);
2041 if (IS_ERR(ifp2))
2042 goto lock_errdad;
2043
2044 spin_lock_bh(&ifp2->lock);
2045 ifp2->stable_privacy_retry = retries;
2046 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2047 spin_unlock_bh(&ifp2->lock);
2048
2049 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2050 in6_ifa_put(ifp2);
2051 lock_errdad:
2052 spin_lock_bh(&ifp->lock);
2053 }
2054
2055 errdad:
2056 /* transition from _POSTDAD to _ERRDAD */
2057 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2058 spin_unlock_bh(&ifp->lock);
2059
2060 addrconf_mod_dad_work(ifp, 0);
2061 in6_ifa_put(ifp);
2062 }
2063
2064 /* Join to solicited addr multicast group.
2065 * caller must hold RTNL */
2066 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2067 {
2068 struct in6_addr maddr;
2069
2070 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2071 return;
2072
2073 addrconf_addr_solict_mult(addr, &maddr);
2074 ipv6_dev_mc_inc(dev, &maddr);
2075 }
2076
2077 /* caller must hold RTNL */
2078 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2079 {
2080 struct in6_addr maddr;
2081
2082 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2083 return;
2084
2085 addrconf_addr_solict_mult(addr, &maddr);
2086 __ipv6_dev_mc_dec(idev, &maddr);
2087 }
2088
2089 /* caller must hold RTNL */
2090 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2091 {
2092 struct in6_addr addr;
2093
2094 if (ifp->prefix_len >= 127) /* RFC 6164 */
2095 return;
2096 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2097 if (ipv6_addr_any(&addr))
2098 return;
2099 __ipv6_dev_ac_inc(ifp->idev, &addr);
2100 }
2101
2102 /* caller must hold RTNL */
2103 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2104 {
2105 struct in6_addr addr;
2106
2107 if (ifp->prefix_len >= 127) /* RFC 6164 */
2108 return;
2109 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2110 if (ipv6_addr_any(&addr))
2111 return;
2112 __ipv6_dev_ac_dec(ifp->idev, &addr);
2113 }
2114
2115 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2116 {
2117 switch (dev->addr_len) {
2118 case ETH_ALEN:
2119 memcpy(eui, dev->dev_addr, 3);
2120 eui[3] = 0xFF;
2121 eui[4] = 0xFE;
2122 memcpy(eui + 5, dev->dev_addr + 3, 3);
2123 break;
2124 case EUI64_ADDR_LEN:
2125 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2126 eui[0] ^= 2;
2127 break;
2128 default:
2129 return -1;
2130 }
2131
2132 return 0;
2133 }
2134
2135 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2136 {
2137 union fwnet_hwaddr *ha;
2138
2139 if (dev->addr_len != FWNET_ALEN)
2140 return -1;
2141
2142 ha = (union fwnet_hwaddr *)dev->dev_addr;
2143
2144 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2145 eui[0] ^= 2;
2146 return 0;
2147 }
2148
2149 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2150 {
2151 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2152 if (dev->addr_len != ARCNET_ALEN)
2153 return -1;
2154 memset(eui, 0, 7);
2155 eui[7] = *(u8 *)dev->dev_addr;
2156 return 0;
2157 }
2158
2159 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2160 {
2161 if (dev->addr_len != INFINIBAND_ALEN)
2162 return -1;
2163 memcpy(eui, dev->dev_addr + 12, 8);
2164 eui[0] |= 2;
2165 return 0;
2166 }
2167
2168 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2169 {
2170 if (addr == 0)
2171 return -1;
2172 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2173 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2174 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2175 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2176 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2177 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2178 eui[1] = 0;
2179 eui[2] = 0x5E;
2180 eui[3] = 0xFE;
2181 memcpy(eui + 4, &addr, 4);
2182 return 0;
2183 }
2184
2185 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2186 {
2187 if (dev->priv_flags & IFF_ISATAP)
2188 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2189 return -1;
2190 }
2191
2192 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2193 {
2194 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2195 }
2196
2197 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2198 {
2199 memcpy(eui, dev->perm_addr, 3);
2200 memcpy(eui + 5, dev->perm_addr + 3, 3);
2201 eui[3] = 0xFF;
2202 eui[4] = 0xFE;
2203 eui[0] ^= 2;
2204 return 0;
2205 }
2206
2207 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2208 {
2209 switch (dev->type) {
2210 case ARPHRD_ETHER:
2211 case ARPHRD_FDDI:
2212 return addrconf_ifid_eui48(eui, dev);
2213 case ARPHRD_ARCNET:
2214 return addrconf_ifid_arcnet(eui, dev);
2215 case ARPHRD_INFINIBAND:
2216 return addrconf_ifid_infiniband(eui, dev);
2217 case ARPHRD_SIT:
2218 return addrconf_ifid_sit(eui, dev);
2219 case ARPHRD_IPGRE:
2220 case ARPHRD_TUNNEL:
2221 return addrconf_ifid_gre(eui, dev);
2222 case ARPHRD_6LOWPAN:
2223 return addrconf_ifid_6lowpan(eui, dev);
2224 case ARPHRD_IEEE1394:
2225 return addrconf_ifid_ieee1394(eui, dev);
2226 case ARPHRD_TUNNEL6:
2227 case ARPHRD_IP6GRE:
2228 return addrconf_ifid_ip6tnl(eui, dev);
2229 }
2230 return -1;
2231 }
2232
2233 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2234 {
2235 int err = -1;
2236 struct inet6_ifaddr *ifp;
2237
2238 read_lock_bh(&idev->lock);
2239 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2240 if (ifp->scope > IFA_LINK)
2241 break;
2242 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2243 memcpy(eui, ifp->addr.s6_addr+8, 8);
2244 err = 0;
2245 break;
2246 }
2247 }
2248 read_unlock_bh(&idev->lock);
2249 return err;
2250 }
2251
2252 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2253 static void ipv6_regen_rndid(struct inet6_dev *idev)
2254 {
2255 regen:
2256 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2257 idev->rndid[0] &= ~0x02;
2258
2259 /*
2260 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2261 * check if generated address is not inappropriate
2262 *
2263 * - Reserved subnet anycast (RFC 2526)
2264 * 11111101 11....11 1xxxxxxx
2265 * - ISATAP (RFC4214) 6.1
2266 * 00-00-5E-FE-xx-xx-xx-xx
2267 * - value 0
2268 * - XXX: already assigned to an address on the device
2269 */
2270 if (idev->rndid[0] == 0xfd &&
2271 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2272 (idev->rndid[7]&0x80))
2273 goto regen;
2274 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2275 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2276 goto regen;
2277 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2278 goto regen;
2279 }
2280 }
2281
2282 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2283 {
2284 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2285 ipv6_regen_rndid(idev);
2286 }
2287
2288 /*
2289 * Add prefix route.
2290 */
2291
2292 static void
2293 addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
2294 unsigned long expires, u32 flags)
2295 {
2296 struct fib6_config cfg = {
2297 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2298 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2299 .fc_ifindex = dev->ifindex,
2300 .fc_expires = expires,
2301 .fc_dst_len = plen,
2302 .fc_flags = RTF_UP | flags,
2303 .fc_nlinfo.nl_net = dev_net(dev),
2304 .fc_protocol = RTPROT_KERNEL,
2305 };
2306
2307 cfg.fc_dst = *pfx;
2308
2309 /* Prevent useless cloning on PtP SIT.
2310 This thing is done here expecting that the whole
2311 class of non-broadcast devices need not cloning.
2312 */
2313 #if IS_ENABLED(CONFIG_IPV6_SIT)
2314 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2315 cfg.fc_flags |= RTF_NONEXTHOP;
2316 #endif
2317
2318 ip6_route_add(&cfg, NULL);
2319 }
2320
2321
2322 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2323 int plen,
2324 const struct net_device *dev,
2325 u32 flags, u32 noflags)
2326 {
2327 struct fib6_node *fn;
2328 struct rt6_info *rt = NULL;
2329 struct fib6_table *table;
2330 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2331
2332 table = fib6_get_table(dev_net(dev), tb_id);
2333 if (!table)
2334 return NULL;
2335
2336 rcu_read_lock();
2337 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2338 if (!fn)
2339 goto out;
2340
2341 for_each_fib6_node_rt_rcu(fn) {
2342 if (rt->dst.dev->ifindex != dev->ifindex)
2343 continue;
2344 if ((rt->rt6i_flags & flags) != flags)
2345 continue;
2346 if ((rt->rt6i_flags & noflags) != 0)
2347 continue;
2348 if (!dst_hold_safe(&rt->dst))
2349 rt = NULL;
2350 break;
2351 }
2352 out:
2353 rcu_read_unlock();
2354 return rt;
2355 }
2356
2357
2358 /* Create "default" multicast route to the interface */
2359
2360 static void addrconf_add_mroute(struct net_device *dev)
2361 {
2362 struct fib6_config cfg = {
2363 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2364 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2365 .fc_ifindex = dev->ifindex,
2366 .fc_dst_len = 8,
2367 .fc_flags = RTF_UP,
2368 .fc_nlinfo.nl_net = dev_net(dev),
2369 };
2370
2371 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2372
2373 ip6_route_add(&cfg, NULL);
2374 }
2375
2376 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2377 {
2378 struct inet6_dev *idev;
2379
2380 ASSERT_RTNL();
2381
2382 idev = ipv6_find_idev(dev);
2383 if (!idev)
2384 return ERR_PTR(-ENOBUFS);
2385
2386 if (idev->cnf.disable_ipv6)
2387 return ERR_PTR(-EACCES);
2388
2389 /* Add default multicast route */
2390 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2391 addrconf_add_mroute(dev);
2392
2393 return idev;
2394 }
2395
2396 static void manage_tempaddrs(struct inet6_dev *idev,
2397 struct inet6_ifaddr *ifp,
2398 __u32 valid_lft, __u32 prefered_lft,
2399 bool create, unsigned long now)
2400 {
2401 u32 flags;
2402 struct inet6_ifaddr *ift;
2403
2404 read_lock_bh(&idev->lock);
2405 /* update all temporary addresses in the list */
2406 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2407 int age, max_valid, max_prefered;
2408
2409 if (ifp != ift->ifpub)
2410 continue;
2411
2412 /* RFC 4941 section 3.3:
2413 * If a received option will extend the lifetime of a public
2414 * address, the lifetimes of temporary addresses should
2415 * be extended, subject to the overall constraint that no
2416 * temporary addresses should ever remain "valid" or "preferred"
2417 * for a time longer than (TEMP_VALID_LIFETIME) or
2418 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2419 */
2420 age = (now - ift->cstamp) / HZ;
2421 max_valid = idev->cnf.temp_valid_lft - age;
2422 if (max_valid < 0)
2423 max_valid = 0;
2424
2425 max_prefered = idev->cnf.temp_prefered_lft -
2426 idev->desync_factor - age;
2427 if (max_prefered < 0)
2428 max_prefered = 0;
2429
2430 if (valid_lft > max_valid)
2431 valid_lft = max_valid;
2432
2433 if (prefered_lft > max_prefered)
2434 prefered_lft = max_prefered;
2435
2436 spin_lock(&ift->lock);
2437 flags = ift->flags;
2438 ift->valid_lft = valid_lft;
2439 ift->prefered_lft = prefered_lft;
2440 ift->tstamp = now;
2441 if (prefered_lft > 0)
2442 ift->flags &= ~IFA_F_DEPRECATED;
2443
2444 spin_unlock(&ift->lock);
2445 if (!(flags&IFA_F_TENTATIVE))
2446 ipv6_ifa_notify(0, ift);
2447 }
2448
2449 if ((create || list_empty(&idev->tempaddr_list)) &&
2450 idev->cnf.use_tempaddr > 0) {
2451 /* When a new public address is created as described
2452 * in [ADDRCONF], also create a new temporary address.
2453 * Also create a temporary address if it's enabled but
2454 * no temporary address currently exists.
2455 */
2456 read_unlock_bh(&idev->lock);
2457 ipv6_create_tempaddr(ifp, NULL);
2458 } else {
2459 read_unlock_bh(&idev->lock);
2460 }
2461 }
2462
2463 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2464 {
2465 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2466 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2467 }
2468
2469 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2470 const struct prefix_info *pinfo,
2471 struct inet6_dev *in6_dev,
2472 const struct in6_addr *addr, int addr_type,
2473 u32 addr_flags, bool sllao, bool tokenized,
2474 __u32 valid_lft, u32 prefered_lft)
2475 {
2476 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2477 int create = 0, update_lft = 0;
2478
2479 if (!ifp && valid_lft) {
2480 int max_addresses = in6_dev->cnf.max_addresses;
2481
2482 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2483 if ((net->ipv6.devconf_all->optimistic_dad ||
2484 in6_dev->cnf.optimistic_dad) &&
2485 !net->ipv6.devconf_all->forwarding && sllao)
2486 addr_flags |= IFA_F_OPTIMISTIC;
2487 #endif
2488
2489 /* Do not allow to create too much of autoconfigured
2490 * addresses; this would be too easy way to crash kernel.
2491 */
2492 if (!max_addresses ||
2493 ipv6_count_addresses(in6_dev) < max_addresses)
2494 ifp = ipv6_add_addr(in6_dev, addr, NULL,
2495 pinfo->prefix_len,
2496 addr_type&IPV6_ADDR_SCOPE_MASK,
2497 addr_flags, valid_lft,
2498 prefered_lft, false, NULL);
2499
2500 if (IS_ERR_OR_NULL(ifp))
2501 return -1;
2502
2503 update_lft = 0;
2504 create = 1;
2505 spin_lock_bh(&ifp->lock);
2506 ifp->flags |= IFA_F_MANAGETEMPADDR;
2507 ifp->cstamp = jiffies;
2508 ifp->tokenized = tokenized;
2509 spin_unlock_bh(&ifp->lock);
2510 addrconf_dad_start(ifp);
2511 }
2512
2513 if (ifp) {
2514 u32 flags;
2515 unsigned long now;
2516 u32 stored_lft;
2517
2518 /* update lifetime (RFC2462 5.5.3 e) */
2519 spin_lock_bh(&ifp->lock);
2520 now = jiffies;
2521 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2522 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2523 else
2524 stored_lft = 0;
2525 if (!update_lft && !create && stored_lft) {
2526 const u32 minimum_lft = min_t(u32,
2527 stored_lft, MIN_VALID_LIFETIME);
2528 valid_lft = max(valid_lft, minimum_lft);
2529
2530 /* RFC4862 Section 5.5.3e:
2531 * "Note that the preferred lifetime of the
2532 * corresponding address is always reset to
2533 * the Preferred Lifetime in the received
2534 * Prefix Information option, regardless of
2535 * whether the valid lifetime is also reset or
2536 * ignored."
2537 *
2538 * So we should always update prefered_lft here.
2539 */
2540 update_lft = 1;
2541 }
2542
2543 if (update_lft) {
2544 ifp->valid_lft = valid_lft;
2545 ifp->prefered_lft = prefered_lft;
2546 ifp->tstamp = now;
2547 flags = ifp->flags;
2548 ifp->flags &= ~IFA_F_DEPRECATED;
2549 spin_unlock_bh(&ifp->lock);
2550
2551 if (!(flags&IFA_F_TENTATIVE))
2552 ipv6_ifa_notify(0, ifp);
2553 } else
2554 spin_unlock_bh(&ifp->lock);
2555
2556 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2557 create, now);
2558
2559 in6_ifa_put(ifp);
2560 addrconf_verify();
2561 }
2562
2563 return 0;
2564 }
2565 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2566
2567 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2568 {
2569 struct prefix_info *pinfo;
2570 __u32 valid_lft;
2571 __u32 prefered_lft;
2572 int addr_type, err;
2573 u32 addr_flags = 0;
2574 struct inet6_dev *in6_dev;
2575 struct net *net = dev_net(dev);
2576
2577 pinfo = (struct prefix_info *) opt;
2578
2579 if (len < sizeof(struct prefix_info)) {
2580 ADBG("addrconf: prefix option too short\n");
2581 return;
2582 }
2583
2584 /*
2585 * Validation checks ([ADDRCONF], page 19)
2586 */
2587
2588 addr_type = ipv6_addr_type(&pinfo->prefix);
2589
2590 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2591 return;
2592
2593 valid_lft = ntohl(pinfo->valid);
2594 prefered_lft = ntohl(pinfo->prefered);
2595
2596 if (prefered_lft > valid_lft) {
2597 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2598 return;
2599 }
2600
2601 in6_dev = in6_dev_get(dev);
2602
2603 if (!in6_dev) {
2604 net_dbg_ratelimited("addrconf: device %s not configured\n",
2605 dev->name);
2606 return;
2607 }
2608
2609 /*
2610 * Two things going on here:
2611 * 1) Add routes for on-link prefixes
2612 * 2) Configure prefixes with the auto flag set
2613 */
2614
2615 if (pinfo->onlink) {
2616 struct rt6_info *rt;
2617 unsigned long rt_expires;
2618
2619 /* Avoid arithmetic overflow. Really, we could
2620 * save rt_expires in seconds, likely valid_lft,
2621 * but it would require division in fib gc, that it
2622 * not good.
2623 */
2624 if (HZ > USER_HZ)
2625 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2626 else
2627 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2628
2629 if (addrconf_finite_timeout(rt_expires))
2630 rt_expires *= HZ;
2631
2632 rt = addrconf_get_prefix_route(&pinfo->prefix,
2633 pinfo->prefix_len,
2634 dev,
2635 RTF_ADDRCONF | RTF_PREFIX_RT,
2636 RTF_GATEWAY | RTF_DEFAULT);
2637
2638 if (rt) {
2639 /* Autoconf prefix route */
2640 if (valid_lft == 0) {
2641 ip6_del_rt(rt);
2642 rt = NULL;
2643 } else if (addrconf_finite_timeout(rt_expires)) {
2644 /* not infinity */
2645 rt6_set_expires(rt, jiffies + rt_expires);
2646 } else {
2647 rt6_clean_expires(rt);
2648 }
2649 } else if (valid_lft) {
2650 clock_t expires = 0;
2651 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2652 if (addrconf_finite_timeout(rt_expires)) {
2653 /* not infinity */
2654 flags |= RTF_EXPIRES;
2655 expires = jiffies_to_clock_t(rt_expires);
2656 }
2657 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2658 dev, expires, flags);
2659 }
2660 ip6_rt_put(rt);
2661 }
2662
2663 /* Try to figure out our local address for this prefix */
2664
2665 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2666 struct in6_addr addr;
2667 bool tokenized = false, dev_addr_generated = false;
2668
2669 if (pinfo->prefix_len == 64) {
2670 memcpy(&addr, &pinfo->prefix, 8);
2671
2672 if (!ipv6_addr_any(&in6_dev->token)) {
2673 read_lock_bh(&in6_dev->lock);
2674 memcpy(addr.s6_addr + 8,
2675 in6_dev->token.s6_addr + 8, 8);
2676 read_unlock_bh(&in6_dev->lock);
2677 tokenized = true;
2678 } else if (is_addr_mode_generate_stable(in6_dev) &&
2679 !ipv6_generate_stable_address(&addr, 0,
2680 in6_dev)) {
2681 addr_flags |= IFA_F_STABLE_PRIVACY;
2682 goto ok;
2683 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2684 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2685 goto put;
2686 } else {
2687 dev_addr_generated = true;
2688 }
2689 goto ok;
2690 }
2691 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2692 pinfo->prefix_len);
2693 goto put;
2694
2695 ok:
2696 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2697 &addr, addr_type,
2698 addr_flags, sllao,
2699 tokenized, valid_lft,
2700 prefered_lft);
2701 if (err)
2702 goto put;
2703
2704 /* Ignore error case here because previous prefix add addr was
2705 * successful which will be notified.
2706 */
2707 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2708 addr_type, addr_flags, sllao,
2709 tokenized, valid_lft,
2710 prefered_lft,
2711 dev_addr_generated);
2712 }
2713 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2714 put:
2715 in6_dev_put(in6_dev);
2716 }
2717
2718 /*
2719 * Set destination address.
2720 * Special case for SIT interfaces where we create a new "virtual"
2721 * device.
2722 */
2723 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2724 {
2725 struct in6_ifreq ireq;
2726 struct net_device *dev;
2727 int err = -EINVAL;
2728
2729 rtnl_lock();
2730
2731 err = -EFAULT;
2732 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2733 goto err_exit;
2734
2735 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2736
2737 err = -ENODEV;
2738 if (!dev)
2739 goto err_exit;
2740
2741 #if IS_ENABLED(CONFIG_IPV6_SIT)
2742 if (dev->type == ARPHRD_SIT) {
2743 const struct net_device_ops *ops = dev->netdev_ops;
2744 struct ifreq ifr;
2745 struct ip_tunnel_parm p;
2746
2747 err = -EADDRNOTAVAIL;
2748 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2749 goto err_exit;
2750
2751 memset(&p, 0, sizeof(p));
2752 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2753 p.iph.saddr = 0;
2754 p.iph.version = 4;
2755 p.iph.ihl = 5;
2756 p.iph.protocol = IPPROTO_IPV6;
2757 p.iph.ttl = 64;
2758 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2759
2760 if (ops->ndo_do_ioctl) {
2761 mm_segment_t oldfs = get_fs();
2762
2763 set_fs(KERNEL_DS);
2764 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2765 set_fs(oldfs);
2766 } else
2767 err = -EOPNOTSUPP;
2768
2769 if (err == 0) {
2770 err = -ENOBUFS;
2771 dev = __dev_get_by_name(net, p.name);
2772 if (!dev)
2773 goto err_exit;
2774 err = dev_open(dev);
2775 }
2776 }
2777 #endif
2778
2779 err_exit:
2780 rtnl_unlock();
2781 return err;
2782 }
2783
2784 static int ipv6_mc_config(struct sock *sk, bool join,
2785 const struct in6_addr *addr, int ifindex)
2786 {
2787 int ret;
2788
2789 ASSERT_RTNL();
2790
2791 lock_sock(sk);
2792 if (join)
2793 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2794 else
2795 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2796 release_sock(sk);
2797
2798 return ret;
2799 }
2800
2801 /*
2802 * Manual configuration of address on an interface
2803 */
2804 static int inet6_addr_add(struct net *net, int ifindex,
2805 const struct in6_addr *pfx,
2806 const struct in6_addr *peer_pfx,
2807 unsigned int plen, __u32 ifa_flags,
2808 __u32 prefered_lft, __u32 valid_lft,
2809 struct netlink_ext_ack *extack)
2810 {
2811 struct inet6_ifaddr *ifp;
2812 struct inet6_dev *idev;
2813 struct net_device *dev;
2814 unsigned long timeout;
2815 clock_t expires;
2816 int scope;
2817 u32 flags;
2818
2819 ASSERT_RTNL();
2820
2821 if (plen > 128)
2822 return -EINVAL;
2823
2824 /* check the lifetime */
2825 if (!valid_lft || prefered_lft > valid_lft)
2826 return -EINVAL;
2827
2828 if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
2829 return -EINVAL;
2830
2831 dev = __dev_get_by_index(net, ifindex);
2832 if (!dev)
2833 return -ENODEV;
2834
2835 idev = addrconf_add_dev(dev);
2836 if (IS_ERR(idev))
2837 return PTR_ERR(idev);
2838
2839 if (ifa_flags & IFA_F_MCAUTOJOIN) {
2840 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2841 true, pfx, ifindex);
2842
2843 if (ret < 0)
2844 return ret;
2845 }
2846
2847 scope = ipv6_addr_scope(pfx);
2848
2849 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2850 if (addrconf_finite_timeout(timeout)) {
2851 expires = jiffies_to_clock_t(timeout * HZ);
2852 valid_lft = timeout;
2853 flags = RTF_EXPIRES;
2854 } else {
2855 expires = 0;
2856 flags = 0;
2857 ifa_flags |= IFA_F_PERMANENT;
2858 }
2859
2860 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2861 if (addrconf_finite_timeout(timeout)) {
2862 if (timeout == 0)
2863 ifa_flags |= IFA_F_DEPRECATED;
2864 prefered_lft = timeout;
2865 }
2866
2867 ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
2868 valid_lft, prefered_lft, true, extack);
2869
2870 if (!IS_ERR(ifp)) {
2871 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
2872 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2873 expires, flags);
2874 }
2875
2876 /*
2877 * Note that section 3.1 of RFC 4429 indicates
2878 * that the Optimistic flag should not be set for
2879 * manually configured addresses
2880 */
2881 addrconf_dad_start(ifp);
2882 if (ifa_flags & IFA_F_MANAGETEMPADDR)
2883 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
2884 true, jiffies);
2885 in6_ifa_put(ifp);
2886 addrconf_verify_rtnl();
2887 return 0;
2888 } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
2889 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2890 false, pfx, ifindex);
2891 }
2892
2893 return PTR_ERR(ifp);
2894 }
2895
2896 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2897 const struct in6_addr *pfx, unsigned int plen)
2898 {
2899 struct inet6_ifaddr *ifp;
2900 struct inet6_dev *idev;
2901 struct net_device *dev;
2902
2903 if (plen > 128)
2904 return -EINVAL;
2905
2906 dev = __dev_get_by_index(net, ifindex);
2907 if (!dev)
2908 return -ENODEV;
2909
2910 idev = __in6_dev_get(dev);
2911 if (!idev)
2912 return -ENXIO;
2913
2914 read_lock_bh(&idev->lock);
2915 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2916 if (ifp->prefix_len == plen &&
2917 ipv6_addr_equal(pfx, &ifp->addr)) {
2918 in6_ifa_hold(ifp);
2919 read_unlock_bh(&idev->lock);
2920
2921 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2922 (ifa_flags & IFA_F_MANAGETEMPADDR))
2923 manage_tempaddrs(idev, ifp, 0, 0, false,
2924 jiffies);
2925 ipv6_del_addr(ifp);
2926 addrconf_verify_rtnl();
2927 if (ipv6_addr_is_multicast(pfx)) {
2928 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2929 false, pfx, dev->ifindex);
2930 }
2931 return 0;
2932 }
2933 }
2934 read_unlock_bh(&idev->lock);
2935 return -EADDRNOTAVAIL;
2936 }
2937
2938
2939 int addrconf_add_ifaddr(struct net *net, void __user *arg)
2940 {
2941 struct in6_ifreq ireq;
2942 int err;
2943
2944 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2945 return -EPERM;
2946
2947 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2948 return -EFAULT;
2949
2950 rtnl_lock();
2951 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
2952 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2953 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME, NULL);
2954 rtnl_unlock();
2955 return err;
2956 }
2957
2958 int addrconf_del_ifaddr(struct net *net, void __user *arg)
2959 {
2960 struct in6_ifreq ireq;
2961 int err;
2962
2963 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2964 return -EPERM;
2965
2966 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2967 return -EFAULT;
2968
2969 rtnl_lock();
2970 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2971 ireq.ifr6_prefixlen);
2972 rtnl_unlock();
2973 return err;
2974 }
2975
2976 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2977 int plen, int scope)
2978 {
2979 struct inet6_ifaddr *ifp;
2980
2981 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2982 scope, IFA_F_PERMANENT,
2983 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME,
2984 true, NULL);
2985 if (!IS_ERR(ifp)) {
2986 spin_lock_bh(&ifp->lock);
2987 ifp->flags &= ~IFA_F_TENTATIVE;
2988 spin_unlock_bh(&ifp->lock);
2989 rt_genid_bump_ipv6(dev_net(idev->dev));
2990 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2991 in6_ifa_put(ifp);
2992 }
2993 }
2994
2995 #if IS_ENABLED(CONFIG_IPV6_SIT)
2996 static void sit_add_v4_addrs(struct inet6_dev *idev)
2997 {
2998 struct in6_addr addr;
2999 struct net_device *dev;
3000 struct net *net = dev_net(idev->dev);
3001 int scope, plen;
3002 u32 pflags = 0;
3003
3004 ASSERT_RTNL();
3005
3006 memset(&addr, 0, sizeof(struct in6_addr));
3007 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3008
3009 if (idev->dev->flags&IFF_POINTOPOINT) {
3010 addr.s6_addr32[0] = htonl(0xfe800000);
3011 scope = IFA_LINK;
3012 plen = 64;
3013 } else {
3014 scope = IPV6_ADDR_COMPATv4;
3015 plen = 96;
3016 pflags |= RTF_NONEXTHOP;
3017 }
3018
3019 if (addr.s6_addr32[3]) {
3020 add_addr(idev, &addr, plen, scope);
3021 addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags);
3022 return;
3023 }
3024
3025 for_each_netdev(net, dev) {
3026 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3027 if (in_dev && (dev->flags & IFF_UP)) {
3028 struct in_ifaddr *ifa;
3029
3030 int flag = scope;
3031
3032 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
3033
3034 addr.s6_addr32[3] = ifa->ifa_local;
3035
3036 if (ifa->ifa_scope == RT_SCOPE_LINK)
3037 continue;
3038 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3039 if (idev->dev->flags&IFF_POINTOPOINT)
3040 continue;
3041 flag |= IFA_HOST;
3042 }
3043
3044 add_addr(idev, &addr, plen, flag);
3045 addrconf_prefix_route(&addr, plen, idev->dev, 0,
3046 pflags);
3047 }
3048 }
3049 }
3050 }
3051 #endif
3052
3053 static void init_loopback(struct net_device *dev)
3054 {
3055 struct inet6_dev *idev;
3056
3057 /* ::1 */
3058
3059 ASSERT_RTNL();
3060
3061 idev = ipv6_find_idev(dev);
3062 if (!idev) {
3063 pr_debug("%s: add_dev failed\n", __func__);
3064 return;
3065 }
3066
3067 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3068 }
3069
3070 void addrconf_add_linklocal(struct inet6_dev *idev,
3071 const struct in6_addr *addr, u32 flags)
3072 {
3073 struct inet6_ifaddr *ifp;
3074 u32 addr_flags = flags | IFA_F_PERMANENT;
3075
3076 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3077 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3078 idev->cnf.optimistic_dad) &&
3079 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3080 addr_flags |= IFA_F_OPTIMISTIC;
3081 #endif
3082
3083 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
3084 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME, true, NULL);
3085 if (!IS_ERR(ifp)) {
3086 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
3087 addrconf_dad_start(ifp);
3088 in6_ifa_put(ifp);
3089 }
3090 }
3091 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3092
3093 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3094 {
3095 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3096 return true;
3097
3098 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3099 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3100 return true;
3101
3102 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3103 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3104 return true;
3105
3106 return false;
3107 }
3108
3109 static int ipv6_generate_stable_address(struct in6_addr *address,
3110 u8 dad_count,
3111 const struct inet6_dev *idev)
3112 {
3113 static DEFINE_SPINLOCK(lock);
3114 static __u32 digest[SHA_DIGEST_WORDS];
3115 static __u32 workspace[SHA_WORKSPACE_WORDS];
3116
3117 static union {
3118 char __data[SHA_MESSAGE_BYTES];
3119 struct {
3120 struct in6_addr secret;
3121 __be32 prefix[2];
3122 unsigned char hwaddr[MAX_ADDR_LEN];
3123 u8 dad_count;
3124 } __packed;
3125 } data;
3126
3127 struct in6_addr secret;
3128 struct in6_addr temp;
3129 struct net *net = dev_net(idev->dev);
3130
3131 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3132
3133 if (idev->cnf.stable_secret.initialized)
3134 secret = idev->cnf.stable_secret.secret;
3135 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3136 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3137 else
3138 return -1;
3139
3140 retry:
3141 spin_lock_bh(&lock);
3142
3143 sha_init(digest);
3144 memset(&data, 0, sizeof(data));
3145 memset(workspace, 0, sizeof(workspace));
3146 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3147 data.prefix[0] = address->s6_addr32[0];
3148 data.prefix[1] = address->s6_addr32[1];
3149 data.secret = secret;
3150 data.dad_count = dad_count;
3151
3152 sha_transform(digest, data.__data, workspace);
3153
3154 temp = *address;
3155 temp.s6_addr32[2] = (__force __be32)digest[0];
3156 temp.s6_addr32[3] = (__force __be32)digest[1];
3157
3158 spin_unlock_bh(&lock);
3159
3160 if (ipv6_reserved_interfaceid(temp)) {
3161 dad_count++;
3162 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3163 return -1;
3164 goto retry;
3165 }
3166
3167 *address = temp;
3168 return 0;
3169 }
3170
3171 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3172 {
3173 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3174
3175 if (s->initialized)
3176 return;
3177 s = &idev->cnf.stable_secret;
3178 get_random_bytes(&s->secret, sizeof(s->secret));
3179 s->initialized = true;
3180 }
3181
3182 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3183 {
3184 struct in6_addr addr;
3185
3186 /* no link local addresses on L3 master devices */
3187 if (netif_is_l3_master(idev->dev))
3188 return;
3189
3190 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3191
3192 switch (idev->cnf.addr_gen_mode) {
3193 case IN6_ADDR_GEN_MODE_RANDOM:
3194 ipv6_gen_mode_random_init(idev);
3195 /* fallthrough */
3196 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3197 if (!ipv6_generate_stable_address(&addr, 0, idev))
3198 addrconf_add_linklocal(idev, &addr,
3199 IFA_F_STABLE_PRIVACY);
3200 else if (prefix_route)
3201 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3202 break;
3203 case IN6_ADDR_GEN_MODE_EUI64:
3204 /* addrconf_add_linklocal also adds a prefix_route and we
3205 * only need to care about prefix routes if ipv6_generate_eui64
3206 * couldn't generate one.
3207 */
3208 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3209 addrconf_add_linklocal(idev, &addr, 0);
3210 else if (prefix_route)
3211 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3212 break;
3213 case IN6_ADDR_GEN_MODE_NONE:
3214 default:
3215 /* will not add any link local address */
3216 break;
3217 }
3218 }
3219
3220 static void addrconf_dev_config(struct net_device *dev)
3221 {
3222 struct inet6_dev *idev;
3223
3224 ASSERT_RTNL();
3225
3226 if ((dev->type != ARPHRD_ETHER) &&
3227 (dev->type != ARPHRD_FDDI) &&
3228 (dev->type != ARPHRD_ARCNET) &&
3229 (dev->type != ARPHRD_INFINIBAND) &&
3230 (dev->type != ARPHRD_IEEE1394) &&
3231 (dev->type != ARPHRD_TUNNEL6) &&
3232 (dev->type != ARPHRD_6LOWPAN) &&
3233 (dev->type != ARPHRD_IP6GRE) &&
3234 (dev->type != ARPHRD_IPGRE) &&
3235 (dev->type != ARPHRD_TUNNEL) &&
3236 (dev->type != ARPHRD_NONE)) {
3237 /* Alas, we support only Ethernet autoconfiguration. */
3238 return;
3239 }
3240
3241 idev = addrconf_add_dev(dev);
3242 if (IS_ERR(idev))
3243 return;
3244
3245 /* this device type has no EUI support */
3246 if (dev->type == ARPHRD_NONE &&
3247 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3248 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3249
3250 addrconf_addr_gen(idev, false);
3251 }
3252
3253 #if IS_ENABLED(CONFIG_IPV6_SIT)
3254 static void addrconf_sit_config(struct net_device *dev)
3255 {
3256 struct inet6_dev *idev;
3257
3258 ASSERT_RTNL();
3259
3260 /*
3261 * Configure the tunnel with one of our IPv4
3262 * addresses... we should configure all of
3263 * our v4 addrs in the tunnel
3264 */
3265
3266 idev = ipv6_find_idev(dev);
3267 if (!idev) {
3268 pr_debug("%s: add_dev failed\n", __func__);
3269 return;
3270 }
3271
3272 if (dev->priv_flags & IFF_ISATAP) {
3273 addrconf_addr_gen(idev, false);
3274 return;
3275 }
3276
3277 sit_add_v4_addrs(idev);
3278
3279 if (dev->flags&IFF_POINTOPOINT)
3280 addrconf_add_mroute(dev);
3281 }
3282 #endif
3283
3284 #if IS_ENABLED(CONFIG_NET_IPGRE)
3285 static void addrconf_gre_config(struct net_device *dev)
3286 {
3287 struct inet6_dev *idev;
3288
3289 ASSERT_RTNL();
3290
3291 idev = ipv6_find_idev(dev);
3292 if (!idev) {
3293 pr_debug("%s: add_dev failed\n", __func__);
3294 return;
3295 }
3296
3297 addrconf_addr_gen(idev, true);
3298 if (dev->flags & IFF_POINTOPOINT)
3299 addrconf_add_mroute(dev);
3300 }
3301 #endif
3302
3303 static int fixup_permanent_addr(struct inet6_dev *idev,
3304 struct inet6_ifaddr *ifp)
3305 {
3306 /* !rt6i_node means the host route was removed from the
3307 * FIB, for example, if 'lo' device is taken down. In that
3308 * case regenerate the host route.
3309 */
3310 if (!ifp->rt || !ifp->rt->rt6i_node) {
3311 struct rt6_info *rt, *prev;
3312
3313 rt = addrconf_dst_alloc(idev, &ifp->addr, false);
3314 if (IS_ERR(rt))
3315 return PTR_ERR(rt);
3316
3317 /* ifp->rt can be accessed outside of rtnl */
3318 spin_lock(&ifp->lock);
3319 prev = ifp->rt;
3320 ifp->rt = rt;
3321 spin_unlock(&ifp->lock);
3322
3323 ip6_rt_put(prev);
3324 }
3325
3326 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3327 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3328 idev->dev, 0, 0);
3329 }
3330
3331 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3332 addrconf_dad_start(ifp);
3333
3334 return 0;
3335 }
3336
3337 static void addrconf_permanent_addr(struct net_device *dev)
3338 {
3339 struct inet6_ifaddr *ifp, *tmp;
3340 struct inet6_dev *idev;
3341
3342 idev = __in6_dev_get(dev);
3343 if (!idev)
3344 return;
3345
3346 write_lock_bh(&idev->lock);
3347
3348 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3349 if ((ifp->flags & IFA_F_PERMANENT) &&
3350 fixup_permanent_addr(idev, ifp) < 0) {
3351 write_unlock_bh(&idev->lock);
3352 ipv6_del_addr(ifp);
3353 write_lock_bh(&idev->lock);
3354
3355 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3356 idev->dev->name, &ifp->addr);
3357 }
3358 }
3359
3360 write_unlock_bh(&idev->lock);
3361 }
3362
3363 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3364 void *ptr)
3365 {
3366 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3367 struct netdev_notifier_changeupper_info *info;
3368 struct inet6_dev *idev = __in6_dev_get(dev);
3369 struct net *net = dev_net(dev);
3370 int run_pending = 0;
3371 int err;
3372
3373 switch (event) {
3374 case NETDEV_REGISTER:
3375 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3376 idev = ipv6_add_dev(dev);
3377 if (IS_ERR(idev))
3378 return notifier_from_errno(PTR_ERR(idev));
3379 }
3380 break;
3381
3382 case NETDEV_CHANGEMTU:
3383 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3384 if (dev->mtu < IPV6_MIN_MTU) {
3385 addrconf_ifdown(dev, dev != net->loopback_dev);
3386 break;
3387 }
3388
3389 if (idev) {
3390 rt6_mtu_change(dev, dev->mtu);
3391 idev->cnf.mtu6 = dev->mtu;
3392 break;
3393 }
3394
3395 /* allocate new idev */
3396 idev = ipv6_add_dev(dev);
3397 if (IS_ERR(idev))
3398 break;
3399
3400 /* device is still not ready */
3401 if (!(idev->if_flags & IF_READY))
3402 break;
3403
3404 run_pending = 1;
3405
3406 /* fall through */
3407
3408 case NETDEV_UP:
3409 case NETDEV_CHANGE:
3410 if (dev->flags & IFF_SLAVE)
3411 break;
3412
3413 if (idev && idev->cnf.disable_ipv6)
3414 break;
3415
3416 if (event == NETDEV_UP) {
3417 /* restore routes for permanent addresses */
3418 addrconf_permanent_addr(dev);
3419
3420 if (!addrconf_link_ready(dev)) {
3421 /* device is not ready yet. */
3422 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3423 dev->name);
3424 break;
3425 }
3426
3427 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3428 idev = ipv6_add_dev(dev);
3429
3430 if (!IS_ERR_OR_NULL(idev)) {
3431 idev->if_flags |= IF_READY;
3432 run_pending = 1;
3433 }
3434 } else if (event == NETDEV_CHANGE) {
3435 if (!addrconf_link_ready(dev)) {
3436 /* device is still not ready. */
3437 break;
3438 }
3439
3440 if (idev) {
3441 if (idev->if_flags & IF_READY) {
3442 /* device is already configured -
3443 * but resend MLD reports, we might
3444 * have roamed and need to update
3445 * multicast snooping switches
3446 */
3447 ipv6_mc_up(idev);
3448 break;
3449 }
3450 idev->if_flags |= IF_READY;
3451 }
3452
3453 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3454 dev->name);
3455
3456 run_pending = 1;
3457 }
3458
3459 switch (dev->type) {
3460 #if IS_ENABLED(CONFIG_IPV6_SIT)
3461 case ARPHRD_SIT:
3462 addrconf_sit_config(dev);
3463 break;
3464 #endif
3465 #if IS_ENABLED(CONFIG_NET_IPGRE)
3466 case ARPHRD_IPGRE:
3467 addrconf_gre_config(dev);
3468 break;
3469 #endif
3470 case ARPHRD_LOOPBACK:
3471 init_loopback(dev);
3472 break;
3473
3474 default:
3475 addrconf_dev_config(dev);
3476 break;
3477 }
3478
3479 if (!IS_ERR_OR_NULL(idev)) {
3480 if (run_pending)
3481 addrconf_dad_run(idev);
3482
3483 /*
3484 * If the MTU changed during the interface down,
3485 * when the interface up, the changed MTU must be
3486 * reflected in the idev as well as routers.
3487 */
3488 if (idev->cnf.mtu6 != dev->mtu &&
3489 dev->mtu >= IPV6_MIN_MTU) {
3490 rt6_mtu_change(dev, dev->mtu);
3491 idev->cnf.mtu6 = dev->mtu;
3492 }
3493 idev->tstamp = jiffies;
3494 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3495
3496 /*
3497 * If the changed mtu during down is lower than
3498 * IPV6_MIN_MTU stop IPv6 on this interface.
3499 */
3500 if (dev->mtu < IPV6_MIN_MTU)
3501 addrconf_ifdown(dev, dev != net->loopback_dev);
3502 }
3503 break;
3504
3505 case NETDEV_DOWN:
3506 case NETDEV_UNREGISTER:
3507 /*
3508 * Remove all addresses from this interface.
3509 */
3510 addrconf_ifdown(dev, event != NETDEV_DOWN);
3511 break;
3512
3513 case NETDEV_CHANGENAME:
3514 if (idev) {
3515 snmp6_unregister_dev(idev);
3516 addrconf_sysctl_unregister(idev);
3517 err = addrconf_sysctl_register(idev);
3518 if (err)
3519 return notifier_from_errno(err);
3520 err = snmp6_register_dev(idev);
3521 if (err) {
3522 addrconf_sysctl_unregister(idev);
3523 return notifier_from_errno(err);
3524 }
3525 }
3526 break;
3527
3528 case NETDEV_PRE_TYPE_CHANGE:
3529 case NETDEV_POST_TYPE_CHANGE:
3530 if (idev)
3531 addrconf_type_change(dev, event);
3532 break;
3533
3534 case NETDEV_CHANGEUPPER:
3535 info = ptr;
3536
3537 /* flush all routes if dev is linked to or unlinked from
3538 * an L3 master device (e.g., VRF)
3539 */
3540 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3541 addrconf_ifdown(dev, 0);
3542 }
3543
3544 return NOTIFY_OK;
3545 }
3546
3547 /*
3548 * addrconf module should be notified of a device going up
3549 */
3550 static struct notifier_block ipv6_dev_notf = {
3551 .notifier_call = addrconf_notify,
3552 .priority = ADDRCONF_NOTIFY_PRIORITY,
3553 };
3554
3555 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3556 {
3557 struct inet6_dev *idev;
3558 ASSERT_RTNL();
3559
3560 idev = __in6_dev_get(dev);
3561
3562 if (event == NETDEV_POST_TYPE_CHANGE)
3563 ipv6_mc_remap(idev);
3564 else if (event == NETDEV_PRE_TYPE_CHANGE)
3565 ipv6_mc_unmap(idev);
3566 }
3567
3568 static bool addr_is_local(const struct in6_addr *addr)
3569 {
3570 return ipv6_addr_type(addr) &
3571 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3572 }
3573
3574 static int addrconf_ifdown(struct net_device *dev, int how)
3575 {
3576 struct net *net = dev_net(dev);
3577 struct inet6_dev *idev;
3578 struct inet6_ifaddr *ifa, *tmp;
3579 int _keep_addr;
3580 bool keep_addr;
3581 int state, i;
3582
3583 ASSERT_RTNL();
3584
3585 rt6_ifdown(net, dev);
3586 neigh_ifdown(&nd_tbl, dev);
3587
3588 idev = __in6_dev_get(dev);
3589 if (!idev)
3590 return -ENODEV;
3591
3592 /*
3593 * Step 1: remove reference to ipv6 device from parent device.
3594 * Do not dev_put!
3595 */
3596 if (how) {
3597 idev->dead = 1;
3598
3599 /* protected by rtnl_lock */
3600 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3601
3602 /* Step 1.5: remove snmp6 entry */
3603 snmp6_unregister_dev(idev);
3604
3605 }
3606
3607 /* aggregate the system setting and interface setting */
3608 _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3609 if (!_keep_addr)
3610 _keep_addr = idev->cnf.keep_addr_on_down;
3611
3612 /* combine the user config with event to determine if permanent
3613 * addresses are to be removed from address hash table
3614 */
3615 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3616
3617 /* Step 2: clear hash table */
3618 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3619 struct hlist_head *h = &inet6_addr_lst[i];
3620
3621 spin_lock_bh(&addrconf_hash_lock);
3622 restart:
3623 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3624 if (ifa->idev == idev) {
3625 addrconf_del_dad_work(ifa);
3626 /* combined flag + permanent flag decide if
3627 * address is retained on a down event
3628 */
3629 if (!keep_addr ||
3630 !(ifa->flags & IFA_F_PERMANENT) ||
3631 addr_is_local(&ifa->addr)) {
3632 hlist_del_init_rcu(&ifa->addr_lst);
3633 goto restart;
3634 }
3635 }
3636 }
3637 spin_unlock_bh(&addrconf_hash_lock);
3638 }
3639
3640 write_lock_bh(&idev->lock);
3641
3642 addrconf_del_rs_timer(idev);
3643
3644 /* Step 2: clear flags for stateless addrconf */
3645 if (!how)
3646 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3647
3648 /* Step 3: clear tempaddr list */
3649 while (!list_empty(&idev->tempaddr_list)) {
3650 ifa = list_first_entry(&idev->tempaddr_list,
3651 struct inet6_ifaddr, tmp_list);
3652 list_del(&ifa->tmp_list);
3653 write_unlock_bh(&idev->lock);
3654 spin_lock_bh(&ifa->lock);
3655
3656 if (ifa->ifpub) {
3657 in6_ifa_put(ifa->ifpub);
3658 ifa->ifpub = NULL;
3659 }
3660 spin_unlock_bh(&ifa->lock);
3661 in6_ifa_put(ifa);
3662 write_lock_bh(&idev->lock);
3663 }
3664
3665 /* re-combine the user config with event to determine if permanent
3666 * addresses are to be removed from the interface list
3667 */
3668 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3669
3670 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3671 struct rt6_info *rt = NULL;
3672 bool keep;
3673
3674 addrconf_del_dad_work(ifa);
3675
3676 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3677 !addr_is_local(&ifa->addr);
3678
3679 write_unlock_bh(&idev->lock);
3680 spin_lock_bh(&ifa->lock);
3681
3682 if (keep) {
3683 /* set state to skip the notifier below */
3684 state = INET6_IFADDR_STATE_DEAD;
3685 ifa->state = INET6_IFADDR_STATE_PREDAD;
3686 if (!(ifa->flags & IFA_F_NODAD))
3687 ifa->flags |= IFA_F_TENTATIVE;
3688
3689 rt = ifa->rt;
3690 ifa->rt = NULL;
3691 } else {
3692 state = ifa->state;
3693 ifa->state = INET6_IFADDR_STATE_DEAD;
3694 }
3695
3696 spin_unlock_bh(&ifa->lock);
3697
3698 if (rt)
3699 ip6_del_rt(rt);
3700
3701 if (state != INET6_IFADDR_STATE_DEAD) {
3702 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3703 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3704 } else {
3705 if (idev->cnf.forwarding)
3706 addrconf_leave_anycast(ifa);
3707 addrconf_leave_solict(ifa->idev, &ifa->addr);
3708 }
3709
3710 write_lock_bh(&idev->lock);
3711 if (!keep) {
3712 list_del_rcu(&ifa->if_list);
3713 in6_ifa_put(ifa);
3714 }
3715 }
3716
3717 write_unlock_bh(&idev->lock);
3718
3719 /* Step 5: Discard anycast and multicast list */
3720 if (how) {
3721 ipv6_ac_destroy_dev(idev);
3722 ipv6_mc_destroy_dev(idev);
3723 } else {
3724 ipv6_mc_down(idev);
3725 }
3726
3727 idev->tstamp = jiffies;
3728
3729 /* Last: Shot the device (if unregistered) */
3730 if (how) {
3731 addrconf_sysctl_unregister(idev);
3732 neigh_parms_release(&nd_tbl, idev->nd_parms);
3733 neigh_ifdown(&nd_tbl, dev);
3734 in6_dev_put(idev);
3735 }
3736 return 0;
3737 }
3738
3739 static void addrconf_rs_timer(unsigned long data)
3740 {
3741 struct inet6_dev *idev = (struct inet6_dev *)data;
3742 struct net_device *dev = idev->dev;
3743 struct in6_addr lladdr;
3744
3745 write_lock(&idev->lock);
3746 if (idev->dead || !(idev->if_flags & IF_READY))
3747 goto out;
3748
3749 if (!ipv6_accept_ra(idev))
3750 goto out;
3751
3752 /* Announcement received after solicitation was sent */
3753 if (idev->if_flags & IF_RA_RCVD)
3754 goto out;
3755
3756 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3757 write_unlock(&idev->lock);
3758 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3759 ndisc_send_rs(dev, &lladdr,
3760 &in6addr_linklocal_allrouters);
3761 else
3762 goto put;
3763
3764 write_lock(&idev->lock);
3765 idev->rs_interval = rfc3315_s14_backoff_update(
3766 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3767 /* The wait after the last probe can be shorter */
3768 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3769 idev->cnf.rtr_solicits) ?
3770 idev->cnf.rtr_solicit_delay :
3771 idev->rs_interval);
3772 } else {
3773 /*
3774 * Note: we do not support deprecated "all on-link"
3775 * assumption any longer.
3776 */
3777 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3778 }
3779
3780 out:
3781 write_unlock(&idev->lock);
3782 put:
3783 in6_dev_put(idev);
3784 }
3785
3786 /*
3787 * Duplicate Address Detection
3788 */
3789 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3790 {
3791 unsigned long rand_num;
3792 struct inet6_dev *idev = ifp->idev;
3793 u64 nonce;
3794
3795 if (ifp->flags & IFA_F_OPTIMISTIC)
3796 rand_num = 0;
3797 else
3798 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3799
3800 nonce = 0;
3801 if (idev->cnf.enhanced_dad ||
3802 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3803 do
3804 get_random_bytes(&nonce, 6);
3805 while (nonce == 0);
3806 }
3807 ifp->dad_nonce = nonce;
3808 ifp->dad_probes = idev->cnf.dad_transmits;
3809 addrconf_mod_dad_work(ifp, rand_num);
3810 }
3811
3812 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3813 {
3814 struct inet6_dev *idev = ifp->idev;
3815 struct net_device *dev = idev->dev;
3816 bool bump_id, notify = false;
3817
3818 addrconf_join_solict(dev, &ifp->addr);
3819
3820 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3821
3822 read_lock_bh(&idev->lock);
3823 spin_lock(&ifp->lock);
3824 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3825 goto out;
3826
3827 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3828 (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
3829 idev->cnf.accept_dad < 1) ||
3830 !(ifp->flags&IFA_F_TENTATIVE) ||
3831 ifp->flags & IFA_F_NODAD) {
3832 bump_id = ifp->flags & IFA_F_TENTATIVE;
3833 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3834 spin_unlock(&ifp->lock);
3835 read_unlock_bh(&idev->lock);
3836
3837 addrconf_dad_completed(ifp, bump_id);
3838 return;
3839 }
3840
3841 if (!(idev->if_flags & IF_READY)) {
3842 spin_unlock(&ifp->lock);
3843 read_unlock_bh(&idev->lock);
3844 /*
3845 * If the device is not ready:
3846 * - keep it tentative if it is a permanent address.
3847 * - otherwise, kill it.
3848 */
3849 in6_ifa_hold(ifp);
3850 addrconf_dad_stop(ifp, 0);
3851 return;
3852 }
3853
3854 /*
3855 * Optimistic nodes can start receiving
3856 * Frames right away
3857 */
3858 if (ifp->flags & IFA_F_OPTIMISTIC) {
3859 ip6_ins_rt(ifp->rt);
3860 if (ipv6_use_optimistic_addr(dev_net(dev), idev)) {
3861 /* Because optimistic nodes can use this address,
3862 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3863 */
3864 notify = true;
3865 }
3866 }
3867
3868 addrconf_dad_kick(ifp);
3869 out:
3870 spin_unlock(&ifp->lock);
3871 read_unlock_bh(&idev->lock);
3872 if (notify)
3873 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3874 }
3875
3876 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3877 {
3878 bool begin_dad = false;
3879
3880 spin_lock_bh(&ifp->lock);
3881 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3882 ifp->state = INET6_IFADDR_STATE_PREDAD;
3883 begin_dad = true;
3884 }
3885 spin_unlock_bh(&ifp->lock);
3886
3887 if (begin_dad)
3888 addrconf_mod_dad_work(ifp, 0);
3889 }
3890
3891 static void addrconf_dad_work(struct work_struct *w)
3892 {
3893 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3894 struct inet6_ifaddr,
3895 dad_work);
3896 struct inet6_dev *idev = ifp->idev;
3897 bool bump_id, disable_ipv6 = false;
3898 struct in6_addr mcaddr;
3899
3900 enum {
3901 DAD_PROCESS,
3902 DAD_BEGIN,
3903 DAD_ABORT,
3904 } action = DAD_PROCESS;
3905
3906 rtnl_lock();
3907
3908 spin_lock_bh(&ifp->lock);
3909 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3910 action = DAD_BEGIN;
3911 ifp->state = INET6_IFADDR_STATE_DAD;
3912 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3913 action = DAD_ABORT;
3914 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3915
3916 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
3917 idev->cnf.accept_dad > 1) &&
3918 !idev->cnf.disable_ipv6 &&
3919 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
3920 struct in6_addr addr;
3921
3922 addr.s6_addr32[0] = htonl(0xfe800000);
3923 addr.s6_addr32[1] = 0;
3924
3925 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
3926 ipv6_addr_equal(&ifp->addr, &addr)) {
3927 /* DAD failed for link-local based on MAC */
3928 idev->cnf.disable_ipv6 = 1;
3929
3930 pr_info("%s: IPv6 being disabled!\n",
3931 ifp->idev->dev->name);
3932 disable_ipv6 = true;
3933 }
3934 }
3935 }
3936 spin_unlock_bh(&ifp->lock);
3937
3938 if (action == DAD_BEGIN) {
3939 addrconf_dad_begin(ifp);
3940 goto out;
3941 } else if (action == DAD_ABORT) {
3942 in6_ifa_hold(ifp);
3943 addrconf_dad_stop(ifp, 1);
3944 if (disable_ipv6)
3945 addrconf_ifdown(idev->dev, 0);
3946 goto out;
3947 }
3948
3949 if (!ifp->dad_probes && addrconf_dad_end(ifp))
3950 goto out;
3951
3952 write_lock_bh(&idev->lock);
3953 if (idev->dead || !(idev->if_flags & IF_READY)) {
3954 write_unlock_bh(&idev->lock);
3955 goto out;
3956 }
3957
3958 spin_lock(&ifp->lock);
3959 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
3960 spin_unlock(&ifp->lock);
3961 write_unlock_bh(&idev->lock);
3962 goto out;
3963 }
3964
3965 if (ifp->dad_probes == 0) {
3966 /*
3967 * DAD was successful
3968 */
3969
3970 bump_id = ifp->flags & IFA_F_TENTATIVE;
3971 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3972 spin_unlock(&ifp->lock);
3973 write_unlock_bh(&idev->lock);
3974
3975 addrconf_dad_completed(ifp, bump_id);
3976
3977 goto out;
3978 }
3979
3980 ifp->dad_probes--;
3981 addrconf_mod_dad_work(ifp,
3982 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
3983 spin_unlock(&ifp->lock);
3984 write_unlock_bh(&idev->lock);
3985
3986 /* send a neighbour solicitation for our addr */
3987 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3988 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
3989 ifp->dad_nonce);
3990 out:
3991 in6_ifa_put(ifp);
3992 rtnl_unlock();
3993 }
3994
3995 /* ifp->idev must be at least read locked */
3996 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
3997 {
3998 struct inet6_ifaddr *ifpiter;
3999 struct inet6_dev *idev = ifp->idev;
4000
4001 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4002 if (ifpiter->scope > IFA_LINK)
4003 break;
4004 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4005 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4006 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4007 IFA_F_PERMANENT)
4008 return false;
4009 }
4010 return true;
4011 }
4012
4013 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
4014 {
4015 struct net_device *dev = ifp->idev->dev;
4016 struct in6_addr lladdr;
4017 bool send_rs, send_mld;
4018
4019 addrconf_del_dad_work(ifp);
4020
4021 /*
4022 * Configure the address for reception. Now it is valid.
4023 */
4024
4025 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4026
4027 /* If added prefix is link local and we are prepared to process
4028 router advertisements, start sending router solicitations.
4029 */
4030
4031 read_lock_bh(&ifp->idev->lock);
4032 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4033 send_rs = send_mld &&
4034 ipv6_accept_ra(ifp->idev) &&
4035 ifp->idev->cnf.rtr_solicits != 0 &&
4036 (dev->flags&IFF_LOOPBACK) == 0;
4037 read_unlock_bh(&ifp->idev->lock);
4038
4039 /* While dad is in progress mld report's source address is in6_addrany.
4040 * Resend with proper ll now.
4041 */
4042 if (send_mld)
4043 ipv6_mc_dad_complete(ifp->idev);
4044
4045 if (send_rs) {
4046 /*
4047 * If a host as already performed a random delay
4048 * [...] as part of DAD [...] there is no need
4049 * to delay again before sending the first RS
4050 */
4051 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4052 return;
4053 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4054
4055 write_lock_bh(&ifp->idev->lock);
4056 spin_lock(&ifp->lock);
4057 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4058 ifp->idev->cnf.rtr_solicit_interval);
4059 ifp->idev->rs_probes = 1;
4060 ifp->idev->if_flags |= IF_RS_SENT;
4061 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4062 spin_unlock(&ifp->lock);
4063 write_unlock_bh(&ifp->idev->lock);
4064 }
4065
4066 if (bump_id)
4067 rt_genid_bump_ipv6(dev_net(dev));
4068
4069 /* Make sure that a new temporary address will be created
4070 * before this temporary address becomes deprecated.
4071 */
4072 if (ifp->flags & IFA_F_TEMPORARY)
4073 addrconf_verify_rtnl();
4074 }
4075
4076 static void addrconf_dad_run(struct inet6_dev *idev)
4077 {
4078 struct inet6_ifaddr *ifp;
4079
4080 read_lock_bh(&idev->lock);
4081 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4082 spin_lock(&ifp->lock);
4083 if (ifp->flags & IFA_F_TENTATIVE &&
4084 ifp->state == INET6_IFADDR_STATE_DAD)
4085 addrconf_dad_kick(ifp);
4086 spin_unlock(&ifp->lock);
4087 }
4088 read_unlock_bh(&idev->lock);
4089 }
4090
4091 #ifdef CONFIG_PROC_FS
4092 struct if6_iter_state {
4093 struct seq_net_private p;
4094 int bucket;
4095 int offset;
4096 };
4097
4098 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4099 {
4100 struct inet6_ifaddr *ifa = NULL;
4101 struct if6_iter_state *state = seq->private;
4102 struct net *net = seq_file_net(seq);
4103 int p = 0;
4104
4105 /* initial bucket if pos is 0 */
4106 if (pos == 0) {
4107 state->bucket = 0;
4108 state->offset = 0;
4109 }
4110
4111 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4112 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
4113 addr_lst) {
4114 if (!net_eq(dev_net(ifa->idev->dev), net))
4115 continue;
4116 /* sync with offset */
4117 if (p < state->offset) {
4118 p++;
4119 continue;
4120 }
4121 state->offset++;
4122 return ifa;
4123 }
4124
4125 /* prepare for next bucket */
4126 state->offset = 0;
4127 p = 0;
4128 }
4129 return NULL;
4130 }
4131
4132 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4133 struct inet6_ifaddr *ifa)
4134 {
4135 struct if6_iter_state *state = seq->private;
4136 struct net *net = seq_file_net(seq);
4137
4138 hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
4139 if (!net_eq(dev_net(ifa->idev->dev), net))
4140 continue;
4141 state->offset++;
4142 return ifa;
4143 }
4144
4145 while (++state->bucket < IN6_ADDR_HSIZE) {
4146 state->offset = 0;
4147 hlist_for_each_entry_rcu_bh(ifa,
4148 &inet6_addr_lst[state->bucket], addr_lst) {
4149 if (!net_eq(dev_net(ifa->idev->dev), net))
4150 continue;
4151 state->offset++;
4152 return ifa;
4153 }
4154 }
4155
4156 return NULL;
4157 }
4158
4159 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4160 __acquires(rcu_bh)
4161 {
4162 rcu_read_lock_bh();
4163 return if6_get_first(seq, *pos);
4164 }
4165
4166 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4167 {
4168 struct inet6_ifaddr *ifa;
4169
4170 ifa = if6_get_next(seq, v);
4171 ++*pos;
4172 return ifa;
4173 }
4174
4175 static void if6_seq_stop(struct seq_file *seq, void *v)
4176 __releases(rcu_bh)
4177 {
4178 rcu_read_unlock_bh();
4179 }
4180
4181 static int if6_seq_show(struct seq_file *seq, void *v)
4182 {
4183 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4184 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4185 &ifp->addr,
4186 ifp->idev->dev->ifindex,
4187 ifp->prefix_len,
4188 ifp->scope,
4189 (u8) ifp->flags,
4190 ifp->idev->dev->name);
4191 return 0;
4192 }
4193
4194 static const struct seq_operations if6_seq_ops = {
4195 .start = if6_seq_start,
4196 .next = if6_seq_next,
4197 .show = if6_seq_show,
4198 .stop = if6_seq_stop,
4199 };
4200
4201 static int if6_seq_open(struct inode *inode, struct file *file)
4202 {
4203 return seq_open_net(inode, file, &if6_seq_ops,
4204 sizeof(struct if6_iter_state));
4205 }
4206
4207 static const struct file_operations if6_fops = {
4208 .owner = THIS_MODULE,
4209 .open = if6_seq_open,
4210 .read = seq_read,
4211 .llseek = seq_lseek,
4212 .release = seq_release_net,
4213 };
4214
4215 static int __net_init if6_proc_net_init(struct net *net)
4216 {
4217 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
4218 return -ENOMEM;
4219 return 0;
4220 }
4221
4222 static void __net_exit if6_proc_net_exit(struct net *net)
4223 {
4224 remove_proc_entry("if_inet6", net->proc_net);
4225 }
4226
4227 static struct pernet_operations if6_proc_net_ops = {
4228 .init = if6_proc_net_init,
4229 .exit = if6_proc_net_exit,
4230 };
4231
4232 int __init if6_proc_init(void)
4233 {
4234 return register_pernet_subsys(&if6_proc_net_ops);
4235 }
4236
4237 void if6_proc_exit(void)
4238 {
4239 unregister_pernet_subsys(&if6_proc_net_ops);
4240 }
4241 #endif /* CONFIG_PROC_FS */
4242
4243 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4244 /* Check if address is a home address configured on any interface. */
4245 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4246 {
4247 unsigned int hash = inet6_addr_hash(net, addr);
4248 struct inet6_ifaddr *ifp = NULL;
4249 int ret = 0;
4250
4251 rcu_read_lock_bh();
4252 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
4253 if (!net_eq(dev_net(ifp->idev->dev), net))
4254 continue;
4255 if (ipv6_addr_equal(&ifp->addr, addr) &&
4256 (ifp->flags & IFA_F_HOMEADDRESS)) {
4257 ret = 1;
4258 break;
4259 }
4260 }
4261 rcu_read_unlock_bh();
4262 return ret;
4263 }
4264 #endif
4265
4266 /*
4267 * Periodic address status verification
4268 */
4269
4270 static void addrconf_verify_rtnl(void)
4271 {
4272 unsigned long now, next, next_sec, next_sched;
4273 struct inet6_ifaddr *ifp;
4274 int i;
4275
4276 ASSERT_RTNL();
4277
4278 rcu_read_lock_bh();
4279 now = jiffies;
4280 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4281
4282 cancel_delayed_work(&addr_chk_work);
4283
4284 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4285 restart:
4286 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4287 unsigned long age;
4288
4289 /* When setting preferred_lft to a value not zero or
4290 * infinity, while valid_lft is infinity
4291 * IFA_F_PERMANENT has a non-infinity life time.
4292 */
4293 if ((ifp->flags & IFA_F_PERMANENT) &&
4294 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4295 continue;
4296
4297 spin_lock(&ifp->lock);
4298 /* We try to batch several events at once. */
4299 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4300
4301 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4302 age >= ifp->valid_lft) {
4303 spin_unlock(&ifp->lock);
4304 in6_ifa_hold(ifp);
4305 ipv6_del_addr(ifp);
4306 goto restart;
4307 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4308 spin_unlock(&ifp->lock);
4309 continue;
4310 } else if (age >= ifp->prefered_lft) {
4311 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4312 int deprecate = 0;
4313
4314 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4315 deprecate = 1;
4316 ifp->flags |= IFA_F_DEPRECATED;
4317 }
4318
4319 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4320 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4321 next = ifp->tstamp + ifp->valid_lft * HZ;
4322
4323 spin_unlock(&ifp->lock);
4324
4325 if (deprecate) {
4326 in6_ifa_hold(ifp);
4327
4328 ipv6_ifa_notify(0, ifp);
4329 in6_ifa_put(ifp);
4330 goto restart;
4331 }
4332 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4333 !(ifp->flags&IFA_F_TENTATIVE)) {
4334 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4335 ifp->idev->cnf.dad_transmits *
4336 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4337
4338 if (age >= ifp->prefered_lft - regen_advance) {
4339 struct inet6_ifaddr *ifpub = ifp->ifpub;
4340 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4341 next = ifp->tstamp + ifp->prefered_lft * HZ;
4342 if (!ifp->regen_count && ifpub) {
4343 ifp->regen_count++;
4344 in6_ifa_hold(ifp);
4345 in6_ifa_hold(ifpub);
4346 spin_unlock(&ifp->lock);
4347
4348 spin_lock(&ifpub->lock);
4349 ifpub->regen_count = 0;
4350 spin_unlock(&ifpub->lock);
4351 ipv6_create_tempaddr(ifpub, ifp);
4352 in6_ifa_put(ifpub);
4353 in6_ifa_put(ifp);
4354 goto restart;
4355 }
4356 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4357 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4358 spin_unlock(&ifp->lock);
4359 } else {
4360 /* ifp->prefered_lft <= ifp->valid_lft */
4361 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4362 next = ifp->tstamp + ifp->prefered_lft * HZ;
4363 spin_unlock(&ifp->lock);
4364 }
4365 }
4366 }
4367
4368 next_sec = round_jiffies_up(next);
4369 next_sched = next;
4370
4371 /* If rounded timeout is accurate enough, accept it. */
4372 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4373 next_sched = next_sec;
4374
4375 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4376 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4377 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4378
4379 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4380 now, next, next_sec, next_sched);
4381 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4382 rcu_read_unlock_bh();
4383 }
4384
4385 static void addrconf_verify_work(struct work_struct *w)
4386 {
4387 rtnl_lock();
4388 addrconf_verify_rtnl();
4389 rtnl_unlock();
4390 }
4391
4392 static void addrconf_verify(void)
4393 {
4394 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4395 }
4396
4397 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4398 struct in6_addr **peer_pfx)
4399 {
4400 struct in6_addr *pfx = NULL;
4401
4402 *peer_pfx = NULL;
4403
4404 if (addr)
4405 pfx = nla_data(addr);
4406
4407 if (local) {
4408 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4409 *peer_pfx = pfx;
4410 pfx = nla_data(local);
4411 }
4412
4413 return pfx;
4414 }
4415
4416 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4417 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4418 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4419 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4420 [IFA_FLAGS] = { .len = sizeof(u32) },
4421 };
4422
4423 static int
4424 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4425 struct netlink_ext_ack *extack)
4426 {
4427 struct net *net = sock_net(skb->sk);
4428 struct ifaddrmsg *ifm;
4429 struct nlattr *tb[IFA_MAX+1];
4430 struct in6_addr *pfx, *peer_pfx;
4431 u32 ifa_flags;
4432 int err;
4433
4434 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4435 extack);
4436 if (err < 0)
4437 return err;
4438
4439 ifm = nlmsg_data(nlh);
4440 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4441 if (!pfx)
4442 return -EINVAL;
4443
4444 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4445
4446 /* We ignore other flags so far. */
4447 ifa_flags &= IFA_F_MANAGETEMPADDR;
4448
4449 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4450 ifm->ifa_prefixlen);
4451 }
4452
4453 static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
4454 u32 prefered_lft, u32 valid_lft)
4455 {
4456 u32 flags;
4457 clock_t expires;
4458 unsigned long timeout;
4459 bool was_managetempaddr;
4460 bool had_prefixroute;
4461
4462 ASSERT_RTNL();
4463
4464 if (!valid_lft || (prefered_lft > valid_lft))
4465 return -EINVAL;
4466
4467 if (ifa_flags & IFA_F_MANAGETEMPADDR &&
4468 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4469 return -EINVAL;
4470
4471 timeout = addrconf_timeout_fixup(valid_lft, HZ);
4472 if (addrconf_finite_timeout(timeout)) {
4473 expires = jiffies_to_clock_t(timeout * HZ);
4474 valid_lft = timeout;
4475 flags = RTF_EXPIRES;
4476 } else {
4477 expires = 0;
4478 flags = 0;
4479 ifa_flags |= IFA_F_PERMANENT;
4480 }
4481
4482 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
4483 if (addrconf_finite_timeout(timeout)) {
4484 if (timeout == 0)
4485 ifa_flags |= IFA_F_DEPRECATED;
4486 prefered_lft = timeout;
4487 }
4488
4489 spin_lock_bh(&ifp->lock);
4490 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4491 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4492 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4493 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4494 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4495 IFA_F_NOPREFIXROUTE);
4496 ifp->flags |= ifa_flags;
4497 ifp->tstamp = jiffies;
4498 ifp->valid_lft = valid_lft;
4499 ifp->prefered_lft = prefered_lft;
4500
4501 spin_unlock_bh(&ifp->lock);
4502 if (!(ifp->flags&IFA_F_TENTATIVE))
4503 ipv6_ifa_notify(0, ifp);
4504
4505 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
4506 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
4507 expires, flags);
4508 } else if (had_prefixroute) {
4509 enum cleanup_prefix_rt_t action;
4510 unsigned long rt_expires;
4511
4512 write_lock_bh(&ifp->idev->lock);
4513 action = check_cleanup_prefix_route(ifp, &rt_expires);
4514 write_unlock_bh(&ifp->idev->lock);
4515
4516 if (action != CLEANUP_PREFIX_RT_NOP) {
4517 cleanup_prefix_route(ifp, rt_expires,
4518 action == CLEANUP_PREFIX_RT_DEL);
4519 }
4520 }
4521
4522 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4523 if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4524 valid_lft = prefered_lft = 0;
4525 manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
4526 !was_managetempaddr, jiffies);
4527 }
4528
4529 addrconf_verify_rtnl();
4530
4531 return 0;
4532 }
4533
4534 static int
4535 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4536 struct netlink_ext_ack *extack)
4537 {
4538 struct net *net = sock_net(skb->sk);
4539 struct ifaddrmsg *ifm;
4540 struct nlattr *tb[IFA_MAX+1];
4541 struct in6_addr *pfx, *peer_pfx;
4542 struct inet6_ifaddr *ifa;
4543 struct net_device *dev;
4544 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
4545 u32 ifa_flags;
4546 int err;
4547
4548 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4549 extack);
4550 if (err < 0)
4551 return err;
4552
4553 ifm = nlmsg_data(nlh);
4554 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4555 if (!pfx)
4556 return -EINVAL;
4557
4558 if (tb[IFA_CACHEINFO]) {
4559 struct ifa_cacheinfo *ci;
4560
4561 ci = nla_data(tb[IFA_CACHEINFO]);
4562 valid_lft = ci->ifa_valid;
4563 preferred_lft = ci->ifa_prefered;
4564 } else {
4565 preferred_lft = INFINITY_LIFE_TIME;
4566 valid_lft = INFINITY_LIFE_TIME;
4567 }
4568
4569 dev = __dev_get_by_index(net, ifm->ifa_index);
4570 if (!dev)
4571 return -ENODEV;
4572
4573 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4574
4575 /* We ignore other flags so far. */
4576 ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4577 IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
4578
4579 ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
4580 if (!ifa) {
4581 /*
4582 * It would be best to check for !NLM_F_CREATE here but
4583 * userspace already relies on not having to provide this.
4584 */
4585 return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
4586 ifm->ifa_prefixlen, ifa_flags,
4587 preferred_lft, valid_lft, extack);
4588 }
4589
4590 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4591 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4592 err = -EEXIST;
4593 else
4594 err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
4595
4596 in6_ifa_put(ifa);
4597
4598 return err;
4599 }
4600
4601 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4602 u8 scope, int ifindex)
4603 {
4604 struct ifaddrmsg *ifm;
4605
4606 ifm = nlmsg_data(nlh);
4607 ifm->ifa_family = AF_INET6;
4608 ifm->ifa_prefixlen = prefixlen;
4609 ifm->ifa_flags = flags;
4610 ifm->ifa_scope = scope;
4611 ifm->ifa_index = ifindex;
4612 }
4613
4614 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4615 unsigned long tstamp, u32 preferred, u32 valid)
4616 {
4617 struct ifa_cacheinfo ci;
4618
4619 ci.cstamp = cstamp_delta(cstamp);
4620 ci.tstamp = cstamp_delta(tstamp);
4621 ci.ifa_prefered = preferred;
4622 ci.ifa_valid = valid;
4623
4624 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4625 }
4626
4627 static inline int rt_scope(int ifa_scope)
4628 {
4629 if (ifa_scope & IFA_HOST)
4630 return RT_SCOPE_HOST;
4631 else if (ifa_scope & IFA_LINK)
4632 return RT_SCOPE_LINK;
4633 else if (ifa_scope & IFA_SITE)
4634 return RT_SCOPE_SITE;
4635 else
4636 return RT_SCOPE_UNIVERSE;
4637 }
4638
4639 static inline int inet6_ifaddr_msgsize(void)
4640 {
4641 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4642 + nla_total_size(16) /* IFA_LOCAL */
4643 + nla_total_size(16) /* IFA_ADDRESS */
4644 + nla_total_size(sizeof(struct ifa_cacheinfo))
4645 + nla_total_size(4) /* IFA_FLAGS */;
4646 }
4647
4648 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4649 u32 portid, u32 seq, int event, unsigned int flags)
4650 {
4651 struct nlmsghdr *nlh;
4652 u32 preferred, valid;
4653
4654 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4655 if (!nlh)
4656 return -EMSGSIZE;
4657
4658 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4659 ifa->idev->dev->ifindex);
4660
4661 if (!((ifa->flags&IFA_F_PERMANENT) &&
4662 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4663 preferred = ifa->prefered_lft;
4664 valid = ifa->valid_lft;
4665 if (preferred != INFINITY_LIFE_TIME) {
4666 long tval = (jiffies - ifa->tstamp)/HZ;
4667 if (preferred > tval)
4668 preferred -= tval;
4669 else
4670 preferred = 0;
4671 if (valid != INFINITY_LIFE_TIME) {
4672 if (valid > tval)
4673 valid -= tval;
4674 else
4675 valid = 0;
4676 }
4677 }
4678 } else {
4679 preferred = INFINITY_LIFE_TIME;
4680 valid = INFINITY_LIFE_TIME;
4681 }
4682
4683 if (!ipv6_addr_any(&ifa->peer_addr)) {
4684 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4685 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4686 goto error;
4687 } else
4688 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4689 goto error;
4690
4691 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4692 goto error;
4693
4694 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4695 goto error;
4696
4697 nlmsg_end(skb, nlh);
4698 return 0;
4699
4700 error:
4701 nlmsg_cancel(skb, nlh);
4702 return -EMSGSIZE;
4703 }
4704
4705 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4706 u32 portid, u32 seq, int event, u16 flags)
4707 {
4708 struct nlmsghdr *nlh;
4709 u8 scope = RT_SCOPE_UNIVERSE;
4710 int ifindex = ifmca->idev->dev->ifindex;
4711
4712 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4713 scope = RT_SCOPE_SITE;
4714
4715 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4716 if (!nlh)
4717 return -EMSGSIZE;
4718
4719 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4720 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4721 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4722 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4723 nlmsg_cancel(skb, nlh);
4724 return -EMSGSIZE;
4725 }
4726
4727 nlmsg_end(skb, nlh);
4728 return 0;
4729 }
4730
4731 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4732 u32 portid, u32 seq, int event, unsigned int flags)
4733 {
4734 struct nlmsghdr *nlh;
4735 u8 scope = RT_SCOPE_UNIVERSE;
4736 int ifindex = ifaca->aca_idev->dev->ifindex;
4737
4738 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4739 scope = RT_SCOPE_SITE;
4740
4741 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4742 if (!nlh)
4743 return -EMSGSIZE;
4744
4745 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4746 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4747 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4748 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4749 nlmsg_cancel(skb, nlh);
4750 return -EMSGSIZE;
4751 }
4752
4753 nlmsg_end(skb, nlh);
4754 return 0;
4755 }
4756
4757 enum addr_type_t {
4758 UNICAST_ADDR,
4759 MULTICAST_ADDR,
4760 ANYCAST_ADDR,
4761 };
4762
4763 /* called with rcu_read_lock() */
4764 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4765 struct netlink_callback *cb, enum addr_type_t type,
4766 int s_ip_idx, int *p_ip_idx)
4767 {
4768 struct ifmcaddr6 *ifmca;
4769 struct ifacaddr6 *ifaca;
4770 int err = 1;
4771 int ip_idx = *p_ip_idx;
4772
4773 read_lock_bh(&idev->lock);
4774 switch (type) {
4775 case UNICAST_ADDR: {
4776 struct inet6_ifaddr *ifa;
4777
4778 /* unicast address incl. temp addr */
4779 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4780 if (++ip_idx < s_ip_idx)
4781 continue;
4782 err = inet6_fill_ifaddr(skb, ifa,
4783 NETLINK_CB(cb->skb).portid,
4784 cb->nlh->nlmsg_seq,
4785 RTM_NEWADDR,
4786 NLM_F_MULTI);
4787 if (err < 0)
4788 break;
4789 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4790 }
4791 break;
4792 }
4793 case MULTICAST_ADDR:
4794 /* multicast address */
4795 for (ifmca = idev->mc_list; ifmca;
4796 ifmca = ifmca->next, ip_idx++) {
4797 if (ip_idx < s_ip_idx)
4798 continue;
4799 err = inet6_fill_ifmcaddr(skb, ifmca,
4800 NETLINK_CB(cb->skb).portid,
4801 cb->nlh->nlmsg_seq,
4802 RTM_GETMULTICAST,
4803 NLM_F_MULTI);
4804 if (err < 0)
4805 break;
4806 }
4807 break;
4808 case ANYCAST_ADDR:
4809 /* anycast address */
4810 for (ifaca = idev->ac_list; ifaca;
4811 ifaca = ifaca->aca_next, ip_idx++) {
4812 if (ip_idx < s_ip_idx)
4813 continue;
4814 err = inet6_fill_ifacaddr(skb, ifaca,
4815 NETLINK_CB(cb->skb).portid,
4816 cb->nlh->nlmsg_seq,
4817 RTM_GETANYCAST,
4818 NLM_F_MULTI);
4819 if (err < 0)
4820 break;
4821 }
4822 break;
4823 default:
4824 break;
4825 }
4826 read_unlock_bh(&idev->lock);
4827 *p_ip_idx = ip_idx;
4828 return err;
4829 }
4830
4831 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
4832 enum addr_type_t type)
4833 {
4834 struct net *net = sock_net(skb->sk);
4835 int h, s_h;
4836 int idx, ip_idx;
4837 int s_idx, s_ip_idx;
4838 struct net_device *dev;
4839 struct inet6_dev *idev;
4840 struct hlist_head *head;
4841
4842 s_h = cb->args[0];
4843 s_idx = idx = cb->args[1];
4844 s_ip_idx = ip_idx = cb->args[2];
4845
4846 rcu_read_lock();
4847 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
4848 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4849 idx = 0;
4850 head = &net->dev_index_head[h];
4851 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4852 if (idx < s_idx)
4853 goto cont;
4854 if (h > s_h || idx > s_idx)
4855 s_ip_idx = 0;
4856 ip_idx = 0;
4857 idev = __in6_dev_get(dev);
4858 if (!idev)
4859 goto cont;
4860
4861 if (in6_dump_addrs(idev, skb, cb, type,
4862 s_ip_idx, &ip_idx) < 0)
4863 goto done;
4864 cont:
4865 idx++;
4866 }
4867 }
4868 done:
4869 rcu_read_unlock();
4870 cb->args[0] = h;
4871 cb->args[1] = idx;
4872 cb->args[2] = ip_idx;
4873
4874 return skb->len;
4875 }
4876
4877 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
4878 {
4879 enum addr_type_t type = UNICAST_ADDR;
4880
4881 return inet6_dump_addr(skb, cb, type);
4882 }
4883
4884 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
4885 {
4886 enum addr_type_t type = MULTICAST_ADDR;
4887
4888 return inet6_dump_addr(skb, cb, type);
4889 }
4890
4891
4892 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
4893 {
4894 enum addr_type_t type = ANYCAST_ADDR;
4895
4896 return inet6_dump_addr(skb, cb, type);
4897 }
4898
4899 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4900 struct netlink_ext_ack *extack)
4901 {
4902 struct net *net = sock_net(in_skb->sk);
4903 struct ifaddrmsg *ifm;
4904 struct nlattr *tb[IFA_MAX+1];
4905 struct in6_addr *addr = NULL, *peer;
4906 struct net_device *dev = NULL;
4907 struct inet6_ifaddr *ifa;
4908 struct sk_buff *skb;
4909 int err;
4910
4911 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4912 extack);
4913 if (err < 0)
4914 return err;
4915
4916 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
4917 if (!addr)
4918 return -EINVAL;
4919
4920 ifm = nlmsg_data(nlh);
4921 if (ifm->ifa_index)
4922 dev = dev_get_by_index(net, ifm->ifa_index);
4923
4924 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
4925 if (!ifa) {
4926 err = -EADDRNOTAVAIL;
4927 goto errout;
4928 }
4929
4930 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
4931 if (!skb) {
4932 err = -ENOBUFS;
4933 goto errout_ifa;
4934 }
4935
4936 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
4937 nlh->nlmsg_seq, RTM_NEWADDR, 0);
4938 if (err < 0) {
4939 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4940 WARN_ON(err == -EMSGSIZE);
4941 kfree_skb(skb);
4942 goto errout_ifa;
4943 }
4944 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4945 errout_ifa:
4946 in6_ifa_put(ifa);
4947 errout:
4948 if (dev)
4949 dev_put(dev);
4950 return err;
4951 }
4952
4953 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
4954 {
4955 struct sk_buff *skb;
4956 struct net *net = dev_net(ifa->idev->dev);
4957 int err = -ENOBUFS;
4958
4959 /* Don't send DELADDR notification for TENTATIVE address,
4960 * since NEWADDR notification is sent only after removing
4961 * TENTATIVE flag, if DAD has not failed.
4962 */
4963 if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
4964 event == RTM_DELADDR)
4965 return;
4966
4967 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
4968 if (!skb)
4969 goto errout;
4970
4971 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
4972 if (err < 0) {
4973 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4974 WARN_ON(err == -EMSGSIZE);
4975 kfree_skb(skb);
4976 goto errout;
4977 }
4978 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
4979 return;
4980 errout:
4981 if (err < 0)
4982 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
4983 }
4984
4985 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
4986 __s32 *array, int bytes)
4987 {
4988 BUG_ON(bytes < (DEVCONF_MAX * 4));
4989
4990 memset(array, 0, bytes);
4991 array[DEVCONF_FORWARDING] = cnf->forwarding;
4992 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
4993 array[DEVCONF_MTU6] = cnf->mtu6;
4994 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
4995 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
4996 array[DEVCONF_AUTOCONF] = cnf->autoconf;
4997 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
4998 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
4999 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5000 jiffies_to_msecs(cnf->rtr_solicit_interval);
5001 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5002 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5003 array[DEVCONF_RTR_SOLICIT_DELAY] =
5004 jiffies_to_msecs(cnf->rtr_solicit_delay);
5005 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5006 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5007 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5008 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5009 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5010 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5011 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5012 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5013 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5014 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5015 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5016 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5017 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5018 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5019 #ifdef CONFIG_IPV6_ROUTER_PREF
5020 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5021 array[DEVCONF_RTR_PROBE_INTERVAL] =
5022 jiffies_to_msecs(cnf->rtr_probe_interval);
5023 #ifdef CONFIG_IPV6_ROUTE_INFO
5024 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5025 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5026 #endif
5027 #endif
5028 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5029 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5030 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5031 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5032 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5033 #endif
5034 #ifdef CONFIG_IPV6_MROUTE
5035 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
5036 #endif
5037 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5038 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5039 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5040 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5041 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5042 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5043 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5044 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5045 /* we omit DEVCONF_STABLE_SECRET for now */
5046 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5047 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5048 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5049 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5050 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5051 #ifdef CONFIG_IPV6_SEG6_HMAC
5052 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5053 #endif
5054 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5055 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5056 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5057 }
5058
5059 static inline size_t inet6_ifla6_size(void)
5060 {
5061 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5062 + nla_total_size(sizeof(struct ifla_cacheinfo))
5063 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5064 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5065 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5066 + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
5067 }
5068
5069 static inline size_t inet6_if_nlmsg_size(void)
5070 {
5071 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5072 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5073 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5074 + nla_total_size(4) /* IFLA_MTU */
5075 + nla_total_size(4) /* IFLA_LINK */
5076 + nla_total_size(1) /* IFLA_OPERSTATE */
5077 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5078 }
5079
5080 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5081 int bytes)
5082 {
5083 int i;
5084 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5085 BUG_ON(pad < 0);
5086
5087 /* Use put_unaligned() because stats may not be aligned for u64. */
5088 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5089 for (i = 1; i < ICMP6_MIB_MAX; i++)
5090 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5091
5092 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5093 }
5094
5095 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5096 int bytes, size_t syncpoff)
5097 {
5098 int i, c;
5099 u64 buff[IPSTATS_MIB_MAX];
5100 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5101
5102 BUG_ON(pad < 0);
5103
5104 memset(buff, 0, sizeof(buff));
5105 buff[0] = IPSTATS_MIB_MAX;
5106
5107 for_each_possible_cpu(c) {
5108 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5109 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5110 }
5111
5112 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5113 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5114 }
5115
5116 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5117 int bytes)
5118 {
5119 switch (attrtype) {
5120 case IFLA_INET6_STATS:
5121 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5122 offsetof(struct ipstats_mib, syncp));
5123 break;
5124 case IFLA_INET6_ICMP6STATS:
5125 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5126 break;
5127 }
5128 }
5129
5130 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5131 u32 ext_filter_mask)
5132 {
5133 struct nlattr *nla;
5134 struct ifla_cacheinfo ci;
5135
5136 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5137 goto nla_put_failure;
5138 ci.max_reasm_len = IPV6_MAXPLEN;
5139 ci.tstamp = cstamp_delta(idev->tstamp);
5140 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5141 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5142 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5143 goto nla_put_failure;
5144 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5145 if (!nla)
5146 goto nla_put_failure;
5147 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5148
5149 /* XXX - MC not implemented */
5150
5151 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5152 return 0;
5153
5154 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5155 if (!nla)
5156 goto nla_put_failure;
5157 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5158
5159 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5160 if (!nla)
5161 goto nla_put_failure;
5162 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5163
5164 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5165 if (!nla)
5166 goto nla_put_failure;
5167
5168 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5169 goto nla_put_failure;
5170
5171 read_lock_bh(&idev->lock);
5172 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5173 read_unlock_bh(&idev->lock);
5174
5175 return 0;
5176
5177 nla_put_failure:
5178 return -EMSGSIZE;
5179 }
5180
5181 static size_t inet6_get_link_af_size(const struct net_device *dev,
5182 u32 ext_filter_mask)
5183 {
5184 if (!__in6_dev_get(dev))
5185 return 0;
5186
5187 return inet6_ifla6_size();
5188 }
5189
5190 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5191 u32 ext_filter_mask)
5192 {
5193 struct inet6_dev *idev = __in6_dev_get(dev);
5194
5195 if (!idev)
5196 return -ENODATA;
5197
5198 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5199 return -EMSGSIZE;
5200
5201 return 0;
5202 }
5203
5204 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5205 {
5206 struct inet6_ifaddr *ifp;
5207 struct net_device *dev = idev->dev;
5208 bool clear_token, update_rs = false;
5209 struct in6_addr ll_addr;
5210
5211 ASSERT_RTNL();
5212
5213 if (!token)
5214 return -EINVAL;
5215 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5216 return -EINVAL;
5217 if (!ipv6_accept_ra(idev))
5218 return -EINVAL;
5219 if (idev->cnf.rtr_solicits == 0)
5220 return -EINVAL;
5221
5222 write_lock_bh(&idev->lock);
5223
5224 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5225 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5226
5227 write_unlock_bh(&idev->lock);
5228
5229 clear_token = ipv6_addr_any(token);
5230 if (clear_token)
5231 goto update_lft;
5232
5233 if (!idev->dead && (idev->if_flags & IF_READY) &&
5234 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5235 IFA_F_OPTIMISTIC)) {
5236 /* If we're not ready, then normal ifup will take care
5237 * of this. Otherwise, we need to request our rs here.
5238 */
5239 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5240 update_rs = true;
5241 }
5242
5243 update_lft:
5244 write_lock_bh(&idev->lock);
5245
5246 if (update_rs) {
5247 idev->if_flags |= IF_RS_SENT;
5248 idev->rs_interval = rfc3315_s14_backoff_init(
5249 idev->cnf.rtr_solicit_interval);
5250 idev->rs_probes = 1;
5251 addrconf_mod_rs_timer(idev, idev->rs_interval);
5252 }
5253
5254 /* Well, that's kinda nasty ... */
5255 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5256 spin_lock(&ifp->lock);
5257 if (ifp->tokenized) {
5258 ifp->valid_lft = 0;
5259 ifp->prefered_lft = 0;
5260 }
5261 spin_unlock(&ifp->lock);
5262 }
5263
5264 write_unlock_bh(&idev->lock);
5265 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5266 addrconf_verify_rtnl();
5267 return 0;
5268 }
5269
5270 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5271 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5272 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5273 };
5274
5275 static int inet6_validate_link_af(const struct net_device *dev,
5276 const struct nlattr *nla)
5277 {
5278 struct nlattr *tb[IFLA_INET6_MAX + 1];
5279
5280 if (dev && !__in6_dev_get(dev))
5281 return -EAFNOSUPPORT;
5282
5283 return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy,
5284 NULL);
5285 }
5286
5287 static int check_addr_gen_mode(int mode)
5288 {
5289 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5290 mode != IN6_ADDR_GEN_MODE_NONE &&
5291 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5292 mode != IN6_ADDR_GEN_MODE_RANDOM)
5293 return -EINVAL;
5294 return 1;
5295 }
5296
5297 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5298 int mode)
5299 {
5300 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5301 !idev->cnf.stable_secret.initialized &&
5302 !net->ipv6.devconf_dflt->stable_secret.initialized)
5303 return -EINVAL;
5304 return 1;
5305 }
5306
5307 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5308 {
5309 int err = -EINVAL;
5310 struct inet6_dev *idev = __in6_dev_get(dev);
5311 struct nlattr *tb[IFLA_INET6_MAX + 1];
5312
5313 if (!idev)
5314 return -EAFNOSUPPORT;
5315
5316 if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5317 BUG();
5318
5319 if (tb[IFLA_INET6_TOKEN]) {
5320 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5321 if (err)
5322 return err;
5323 }
5324
5325 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5326 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5327
5328 if (check_addr_gen_mode(mode) < 0 ||
5329 check_stable_privacy(idev, dev_net(dev), mode) < 0)
5330 return -EINVAL;
5331
5332 idev->cnf.addr_gen_mode = mode;
5333 err = 0;
5334 }
5335
5336 return err;
5337 }
5338
5339 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5340 u32 portid, u32 seq, int event, unsigned int flags)
5341 {
5342 struct net_device *dev = idev->dev;
5343 struct ifinfomsg *hdr;
5344 struct nlmsghdr *nlh;
5345 void *protoinfo;
5346
5347 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5348 if (!nlh)
5349 return -EMSGSIZE;
5350
5351 hdr = nlmsg_data(nlh);
5352 hdr->ifi_family = AF_INET6;
5353 hdr->__ifi_pad = 0;
5354 hdr->ifi_type = dev->type;
5355 hdr->ifi_index = dev->ifindex;
5356 hdr->ifi_flags = dev_get_flags(dev);
5357 hdr->ifi_change = 0;
5358
5359 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5360 (dev->addr_len &&
5361 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5362 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5363 (dev->ifindex != dev_get_iflink(dev) &&
5364 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5365 nla_put_u8(skb, IFLA_OPERSTATE,
5366 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5367 goto nla_put_failure;
5368 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
5369 if (!protoinfo)
5370 goto nla_put_failure;
5371
5372 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5373 goto nla_put_failure;
5374
5375 nla_nest_end(skb, protoinfo);
5376 nlmsg_end(skb, nlh);
5377 return 0;
5378
5379 nla_put_failure:
5380 nlmsg_cancel(skb, nlh);
5381 return -EMSGSIZE;
5382 }
5383
5384 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5385 {
5386 struct net *net = sock_net(skb->sk);
5387 int h, s_h;
5388 int idx = 0, s_idx;
5389 struct net_device *dev;
5390 struct inet6_dev *idev;
5391 struct hlist_head *head;
5392
5393 s_h = cb->args[0];
5394 s_idx = cb->args[1];
5395
5396 rcu_read_lock();
5397 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5398 idx = 0;
5399 head = &net->dev_index_head[h];
5400 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5401 if (idx < s_idx)
5402 goto cont;
5403 idev = __in6_dev_get(dev);
5404 if (!idev)
5405 goto cont;
5406 if (inet6_fill_ifinfo(skb, idev,
5407 NETLINK_CB(cb->skb).portid,
5408 cb->nlh->nlmsg_seq,
5409 RTM_NEWLINK, NLM_F_MULTI) < 0)
5410 goto out;
5411 cont:
5412 idx++;
5413 }
5414 }
5415 out:
5416 rcu_read_unlock();
5417 cb->args[1] = idx;
5418 cb->args[0] = h;
5419
5420 return skb->len;
5421 }
5422
5423 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5424 {
5425 struct sk_buff *skb;
5426 struct net *net = dev_net(idev->dev);
5427 int err = -ENOBUFS;
5428
5429 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5430 if (!skb)
5431 goto errout;
5432
5433 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5434 if (err < 0) {
5435 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5436 WARN_ON(err == -EMSGSIZE);
5437 kfree_skb(skb);
5438 goto errout;
5439 }
5440 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5441 return;
5442 errout:
5443 if (err < 0)
5444 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5445 }
5446
5447 static inline size_t inet6_prefix_nlmsg_size(void)
5448 {
5449 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5450 + nla_total_size(sizeof(struct in6_addr))
5451 + nla_total_size(sizeof(struct prefix_cacheinfo));
5452 }
5453
5454 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5455 struct prefix_info *pinfo, u32 portid, u32 seq,
5456 int event, unsigned int flags)
5457 {
5458 struct prefixmsg *pmsg;
5459 struct nlmsghdr *nlh;
5460 struct prefix_cacheinfo ci;
5461
5462 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5463 if (!nlh)
5464 return -EMSGSIZE;
5465
5466 pmsg = nlmsg_data(nlh);
5467 pmsg->prefix_family = AF_INET6;
5468 pmsg->prefix_pad1 = 0;
5469 pmsg->prefix_pad2 = 0;
5470 pmsg->prefix_ifindex = idev->dev->ifindex;
5471 pmsg->prefix_len = pinfo->prefix_len;
5472 pmsg->prefix_type = pinfo->type;
5473 pmsg->prefix_pad3 = 0;
5474 pmsg->prefix_flags = 0;
5475 if (pinfo->onlink)
5476 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5477 if (pinfo->autoconf)
5478 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5479
5480 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5481 goto nla_put_failure;
5482 ci.preferred_time = ntohl(pinfo->prefered);
5483 ci.valid_time = ntohl(pinfo->valid);
5484 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5485 goto nla_put_failure;
5486 nlmsg_end(skb, nlh);
5487 return 0;
5488
5489 nla_put_failure:
5490 nlmsg_cancel(skb, nlh);
5491 return -EMSGSIZE;
5492 }
5493
5494 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5495 struct prefix_info *pinfo)
5496 {
5497 struct sk_buff *skb;
5498 struct net *net = dev_net(idev->dev);
5499 int err = -ENOBUFS;
5500
5501 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5502 if (!skb)
5503 goto errout;
5504
5505 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5506 if (err < 0) {
5507 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5508 WARN_ON(err == -EMSGSIZE);
5509 kfree_skb(skb);
5510 goto errout;
5511 }
5512 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5513 return;
5514 errout:
5515 if (err < 0)
5516 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5517 }
5518
5519 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5520 {
5521 struct net *net = dev_net(ifp->idev->dev);
5522
5523 if (event)
5524 ASSERT_RTNL();
5525
5526 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5527
5528 switch (event) {
5529 case RTM_NEWADDR:
5530 /*
5531 * If the address was optimistic
5532 * we inserted the route at the start of
5533 * our DAD process, so we don't need
5534 * to do it again
5535 */
5536 if (!rcu_access_pointer(ifp->rt->rt6i_node))
5537 ip6_ins_rt(ifp->rt);
5538 if (ifp->idev->cnf.forwarding)
5539 addrconf_join_anycast(ifp);
5540 if (!ipv6_addr_any(&ifp->peer_addr))
5541 addrconf_prefix_route(&ifp->peer_addr, 128,
5542 ifp->idev->dev, 0, 0);
5543 break;
5544 case RTM_DELADDR:
5545 if (ifp->idev->cnf.forwarding)
5546 addrconf_leave_anycast(ifp);
5547 addrconf_leave_solict(ifp->idev, &ifp->addr);
5548 if (!ipv6_addr_any(&ifp->peer_addr)) {
5549 struct rt6_info *rt;
5550
5551 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5552 ifp->idev->dev, 0, 0);
5553 if (rt)
5554 ip6_del_rt(rt);
5555 }
5556 if (ifp->rt) {
5557 if (dst_hold_safe(&ifp->rt->dst))
5558 ip6_del_rt(ifp->rt);
5559 }
5560 rt_genid_bump_ipv6(net);
5561 break;
5562 }
5563 atomic_inc(&net->ipv6.dev_addr_genid);
5564 }
5565
5566 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5567 {
5568 rcu_read_lock_bh();
5569 if (likely(ifp->idev->dead == 0))
5570 __ipv6_ifa_notify(event, ifp);
5571 rcu_read_unlock_bh();
5572 }
5573
5574 #ifdef CONFIG_SYSCTL
5575
5576 static
5577 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
5578 void __user *buffer, size_t *lenp, loff_t *ppos)
5579 {
5580 int *valp = ctl->data;
5581 int val = *valp;
5582 loff_t pos = *ppos;
5583 struct ctl_table lctl;
5584 int ret;
5585
5586 /*
5587 * ctl->data points to idev->cnf.forwarding, we should
5588 * not modify it until we get the rtnl lock.
5589 */
5590 lctl = *ctl;
5591 lctl.data = &val;
5592
5593 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5594
5595 if (write)
5596 ret = addrconf_fixup_forwarding(ctl, valp, val);
5597 if (ret)
5598 *ppos = pos;
5599 return ret;
5600 }
5601
5602 static
5603 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
5604 void __user *buffer, size_t *lenp, loff_t *ppos)
5605 {
5606 struct inet6_dev *idev = ctl->extra1;
5607 int min_mtu = IPV6_MIN_MTU;
5608 struct ctl_table lctl;
5609
5610 lctl = *ctl;
5611 lctl.extra1 = &min_mtu;
5612 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
5613
5614 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5615 }
5616
5617 static void dev_disable_change(struct inet6_dev *idev)
5618 {
5619 struct netdev_notifier_info info;
5620
5621 if (!idev || !idev->dev)
5622 return;
5623
5624 netdev_notifier_info_init(&info, idev->dev);
5625 if (idev->cnf.disable_ipv6)
5626 addrconf_notify(NULL, NETDEV_DOWN, &info);
5627 else
5628 addrconf_notify(NULL, NETDEV_UP, &info);
5629 }
5630
5631 static void addrconf_disable_change(struct net *net, __s32 newf)
5632 {
5633 struct net_device *dev;
5634 struct inet6_dev *idev;
5635
5636 for_each_netdev(net, dev) {
5637 idev = __in6_dev_get(dev);
5638 if (idev) {
5639 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
5640 idev->cnf.disable_ipv6 = newf;
5641 if (changed)
5642 dev_disable_change(idev);
5643 }
5644 }
5645 }
5646
5647 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
5648 {
5649 struct net *net;
5650 int old;
5651
5652 if (!rtnl_trylock())
5653 return restart_syscall();
5654
5655 net = (struct net *)table->extra2;
5656 old = *p;
5657 *p = newf;
5658
5659 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
5660 rtnl_unlock();
5661 return 0;
5662 }
5663
5664 if (p == &net->ipv6.devconf_all->disable_ipv6) {
5665 net->ipv6.devconf_dflt->disable_ipv6 = newf;
5666 addrconf_disable_change(net, newf);
5667 } else if ((!newf) ^ (!old))
5668 dev_disable_change((struct inet6_dev *)table->extra1);
5669
5670 rtnl_unlock();
5671 return 0;
5672 }
5673
5674 static
5675 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
5676 void __user *buffer, size_t *lenp, loff_t *ppos)
5677 {
5678 int *valp = ctl->data;
5679 int val = *valp;
5680 loff_t pos = *ppos;
5681 struct ctl_table lctl;
5682 int ret;
5683
5684 /*
5685 * ctl->data points to idev->cnf.disable_ipv6, we should
5686 * not modify it until we get the rtnl lock.
5687 */
5688 lctl = *ctl;
5689 lctl.data = &val;
5690
5691 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5692
5693 if (write)
5694 ret = addrconf_disable_ipv6(ctl, valp, val);
5695 if (ret)
5696 *ppos = pos;
5697 return ret;
5698 }
5699
5700 static
5701 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
5702 void __user *buffer, size_t *lenp, loff_t *ppos)
5703 {
5704 int *valp = ctl->data;
5705 int ret;
5706 int old, new;
5707
5708 old = *valp;
5709 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5710 new = *valp;
5711
5712 if (write && old != new) {
5713 struct net *net = ctl->extra2;
5714
5715 if (!rtnl_trylock())
5716 return restart_syscall();
5717
5718 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
5719 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5720 NETCONFA_PROXY_NEIGH,
5721 NETCONFA_IFINDEX_DEFAULT,
5722 net->ipv6.devconf_dflt);
5723 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
5724 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5725 NETCONFA_PROXY_NEIGH,
5726 NETCONFA_IFINDEX_ALL,
5727 net->ipv6.devconf_all);
5728 else {
5729 struct inet6_dev *idev = ctl->extra1;
5730
5731 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5732 NETCONFA_PROXY_NEIGH,
5733 idev->dev->ifindex,
5734 &idev->cnf);
5735 }
5736 rtnl_unlock();
5737 }
5738
5739 return ret;
5740 }
5741
5742 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5743 void __user *buffer, size_t *lenp,
5744 loff_t *ppos)
5745 {
5746 int ret = 0;
5747 int new_val;
5748 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
5749 struct net *net = (struct net *)ctl->extra2;
5750
5751 if (!rtnl_trylock())
5752 return restart_syscall();
5753
5754 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5755
5756 if (write) {
5757 new_val = *((int *)ctl->data);
5758
5759 if (check_addr_gen_mode(new_val) < 0) {
5760 ret = -EINVAL;
5761 goto out;
5762 }
5763
5764 /* request for default */
5765 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
5766 ipv6_devconf_dflt.addr_gen_mode = new_val;
5767
5768 /* request for individual net device */
5769 } else {
5770 if (!idev)
5771 goto out;
5772
5773 if (check_stable_privacy(idev, net, new_val) < 0) {
5774 ret = -EINVAL;
5775 goto out;
5776 }
5777
5778 if (idev->cnf.addr_gen_mode != new_val) {
5779 idev->cnf.addr_gen_mode = new_val;
5780 addrconf_dev_config(idev->dev);
5781 }
5782 }
5783 }
5784
5785 out:
5786 rtnl_unlock();
5787
5788 return ret;
5789 }
5790
5791 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
5792 void __user *buffer, size_t *lenp,
5793 loff_t *ppos)
5794 {
5795 int err;
5796 struct in6_addr addr;
5797 char str[IPV6_MAX_STRLEN];
5798 struct ctl_table lctl = *ctl;
5799 struct net *net = ctl->extra2;
5800 struct ipv6_stable_secret *secret = ctl->data;
5801
5802 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
5803 return -EIO;
5804
5805 lctl.maxlen = IPV6_MAX_STRLEN;
5806 lctl.data = str;
5807
5808 if (!rtnl_trylock())
5809 return restart_syscall();
5810
5811 if (!write && !secret->initialized) {
5812 err = -EIO;
5813 goto out;
5814 }
5815
5816 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
5817 if (err >= sizeof(str)) {
5818 err = -EIO;
5819 goto out;
5820 }
5821
5822 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
5823 if (err || !write)
5824 goto out;
5825
5826 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
5827 err = -EIO;
5828 goto out;
5829 }
5830
5831 secret->initialized = true;
5832 secret->secret = addr;
5833
5834 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
5835 struct net_device *dev;
5836
5837 for_each_netdev(net, dev) {
5838 struct inet6_dev *idev = __in6_dev_get(dev);
5839
5840 if (idev) {
5841 idev->cnf.addr_gen_mode =
5842 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5843 }
5844 }
5845 } else {
5846 struct inet6_dev *idev = ctl->extra1;
5847
5848 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5849 }
5850
5851 out:
5852 rtnl_unlock();
5853
5854 return err;
5855 }
5856
5857 static
5858 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
5859 int write,
5860 void __user *buffer,
5861 size_t *lenp,
5862 loff_t *ppos)
5863 {
5864 int *valp = ctl->data;
5865 int val = *valp;
5866 loff_t pos = *ppos;
5867 struct ctl_table lctl;
5868 int ret;
5869
5870 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
5871 * we should not modify it until we get the rtnl lock.
5872 */
5873 lctl = *ctl;
5874 lctl.data = &val;
5875
5876 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5877
5878 if (write)
5879 ret = addrconf_fixup_linkdown(ctl, valp, val);
5880 if (ret)
5881 *ppos = pos;
5882 return ret;
5883 }
5884
5885 static
5886 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
5887 {
5888 if (rt) {
5889 if (action)
5890 rt->dst.flags |= DST_NOPOLICY;
5891 else
5892 rt->dst.flags &= ~DST_NOPOLICY;
5893 }
5894 }
5895
5896 static
5897 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
5898 {
5899 struct inet6_ifaddr *ifa;
5900
5901 read_lock_bh(&idev->lock);
5902 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5903 spin_lock(&ifa->lock);
5904 if (ifa->rt) {
5905 struct rt6_info *rt = ifa->rt;
5906 int cpu;
5907
5908 rcu_read_lock();
5909 addrconf_set_nopolicy(ifa->rt, val);
5910 if (rt->rt6i_pcpu) {
5911 for_each_possible_cpu(cpu) {
5912 struct rt6_info **rtp;
5913
5914 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
5915 addrconf_set_nopolicy(*rtp, val);
5916 }
5917 }
5918 rcu_read_unlock();
5919 }
5920 spin_unlock(&ifa->lock);
5921 }
5922 read_unlock_bh(&idev->lock);
5923 }
5924
5925 static
5926 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
5927 {
5928 struct inet6_dev *idev;
5929 struct net *net;
5930
5931 if (!rtnl_trylock())
5932 return restart_syscall();
5933
5934 *valp = val;
5935
5936 net = (struct net *)ctl->extra2;
5937 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
5938 rtnl_unlock();
5939 return 0;
5940 }
5941
5942 if (valp == &net->ipv6.devconf_all->disable_policy) {
5943 struct net_device *dev;
5944
5945 for_each_netdev(net, dev) {
5946 idev = __in6_dev_get(dev);
5947 if (idev)
5948 addrconf_disable_policy_idev(idev, val);
5949 }
5950 } else {
5951 idev = (struct inet6_dev *)ctl->extra1;
5952 addrconf_disable_policy_idev(idev, val);
5953 }
5954
5955 rtnl_unlock();
5956 return 0;
5957 }
5958
5959 static
5960 int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
5961 void __user *buffer, size_t *lenp,
5962 loff_t *ppos)
5963 {
5964 int *valp = ctl->data;
5965 int val = *valp;
5966 loff_t pos = *ppos;
5967 struct ctl_table lctl;
5968 int ret;
5969
5970 lctl = *ctl;
5971 lctl.data = &val;
5972 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5973
5974 if (write && (*valp != val))
5975 ret = addrconf_disable_policy(ctl, valp, val);
5976
5977 if (ret)
5978 *ppos = pos;
5979
5980 return ret;
5981 }
5982
5983 static int minus_one = -1;
5984 static const int one = 1;
5985 static const int two_five_five = 255;
5986
5987 static const struct ctl_table addrconf_sysctl[] = {
5988 {
5989 .procname = "forwarding",
5990 .data = &ipv6_devconf.forwarding,
5991 .maxlen = sizeof(int),
5992 .mode = 0644,
5993 .proc_handler = addrconf_sysctl_forward,
5994 },
5995 {
5996 .procname = "hop_limit",
5997 .data = &ipv6_devconf.hop_limit,
5998 .maxlen = sizeof(int),
5999 .mode = 0644,
6000 .proc_handler = proc_dointvec_minmax,
6001 .extra1 = (void *)&one,
6002 .extra2 = (void *)&two_five_five,
6003 },
6004 {
6005 .procname = "mtu",
6006 .data = &ipv6_devconf.mtu6,
6007 .maxlen = sizeof(int),
6008 .mode = 0644,
6009 .proc_handler = addrconf_sysctl_mtu,
6010 },
6011 {
6012 .procname = "accept_ra",
6013 .data = &ipv6_devconf.accept_ra,
6014 .maxlen = sizeof(int),
6015 .mode = 0644,
6016 .proc_handler = proc_dointvec,
6017 },
6018 {
6019 .procname = "accept_redirects",
6020 .data = &ipv6_devconf.accept_redirects,
6021 .maxlen = sizeof(int),
6022 .mode = 0644,
6023 .proc_handler = proc_dointvec,
6024 },
6025 {
6026 .procname = "autoconf",
6027 .data = &ipv6_devconf.autoconf,
6028 .maxlen = sizeof(int),
6029 .mode = 0644,
6030 .proc_handler = proc_dointvec,
6031 },
6032 {
6033 .procname = "dad_transmits",
6034 .data = &ipv6_devconf.dad_transmits,
6035 .maxlen = sizeof(int),
6036 .mode = 0644,
6037 .proc_handler = proc_dointvec,
6038 },
6039 {
6040 .procname = "router_solicitations",
6041 .data = &ipv6_devconf.rtr_solicits,
6042 .maxlen = sizeof(int),
6043 .mode = 0644,
6044 .proc_handler = proc_dointvec_minmax,
6045 .extra1 = &minus_one,
6046 },
6047 {
6048 .procname = "router_solicitation_interval",
6049 .data = &ipv6_devconf.rtr_solicit_interval,
6050 .maxlen = sizeof(int),
6051 .mode = 0644,
6052 .proc_handler = proc_dointvec_jiffies,
6053 },
6054 {
6055 .procname = "router_solicitation_max_interval",
6056 .data = &ipv6_devconf.rtr_solicit_max_interval,
6057 .maxlen = sizeof(int),
6058 .mode = 0644,
6059 .proc_handler = proc_dointvec_jiffies,
6060 },
6061 {
6062 .procname = "router_solicitation_delay",
6063 .data = &ipv6_devconf.rtr_solicit_delay,
6064 .maxlen = sizeof(int),
6065 .mode = 0644,
6066 .proc_handler = proc_dointvec_jiffies,
6067 },
6068 {
6069 .procname = "force_mld_version",
6070 .data = &ipv6_devconf.force_mld_version,
6071 .maxlen = sizeof(int),
6072 .mode = 0644,
6073 .proc_handler = proc_dointvec,
6074 },
6075 {
6076 .procname = "mldv1_unsolicited_report_interval",
6077 .data =
6078 &ipv6_devconf.mldv1_unsolicited_report_interval,
6079 .maxlen = sizeof(int),
6080 .mode = 0644,
6081 .proc_handler = proc_dointvec_ms_jiffies,
6082 },
6083 {
6084 .procname = "mldv2_unsolicited_report_interval",
6085 .data =
6086 &ipv6_devconf.mldv2_unsolicited_report_interval,
6087 .maxlen = sizeof(int),
6088 .mode = 0644,
6089 .proc_handler = proc_dointvec_ms_jiffies,
6090 },
6091 {
6092 .procname = "use_tempaddr",
6093 .data = &ipv6_devconf.use_tempaddr,
6094 .maxlen = sizeof(int),
6095 .mode = 0644,
6096 .proc_handler = proc_dointvec,
6097 },
6098 {
6099 .procname = "temp_valid_lft",
6100 .data = &ipv6_devconf.temp_valid_lft,
6101 .maxlen = sizeof(int),
6102 .mode = 0644,
6103 .proc_handler = proc_dointvec,
6104 },
6105 {
6106 .procname = "temp_prefered_lft",
6107 .data = &ipv6_devconf.temp_prefered_lft,
6108 .maxlen = sizeof(int),
6109 .mode = 0644,
6110 .proc_handler = proc_dointvec,
6111 },
6112 {
6113 .procname = "regen_max_retry",
6114 .data = &ipv6_devconf.regen_max_retry,
6115 .maxlen = sizeof(int),
6116 .mode = 0644,
6117 .proc_handler = proc_dointvec,
6118 },
6119 {
6120 .procname = "max_desync_factor",
6121 .data = &ipv6_devconf.max_desync_factor,
6122 .maxlen = sizeof(int),
6123 .mode = 0644,
6124 .proc_handler = proc_dointvec,
6125 },
6126 {
6127 .procname = "max_addresses",
6128 .data = &ipv6_devconf.max_addresses,
6129 .maxlen = sizeof(int),
6130 .mode = 0644,
6131 .proc_handler = proc_dointvec,
6132 },
6133 {
6134 .procname = "accept_ra_defrtr",
6135 .data = &ipv6_devconf.accept_ra_defrtr,
6136 .maxlen = sizeof(int),
6137 .mode = 0644,
6138 .proc_handler = proc_dointvec,
6139 },
6140 {
6141 .procname = "accept_ra_min_hop_limit",
6142 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6143 .maxlen = sizeof(int),
6144 .mode = 0644,
6145 .proc_handler = proc_dointvec,
6146 },
6147 {
6148 .procname = "accept_ra_pinfo",
6149 .data = &ipv6_devconf.accept_ra_pinfo,
6150 .maxlen = sizeof(int),
6151 .mode = 0644,
6152 .proc_handler = proc_dointvec,
6153 },
6154 #ifdef CONFIG_IPV6_ROUTER_PREF
6155 {
6156 .procname = "accept_ra_rtr_pref",
6157 .data = &ipv6_devconf.accept_ra_rtr_pref,
6158 .maxlen = sizeof(int),
6159 .mode = 0644,
6160 .proc_handler = proc_dointvec,
6161 },
6162 {
6163 .procname = "router_probe_interval",
6164 .data = &ipv6_devconf.rtr_probe_interval,
6165 .maxlen = sizeof(int),
6166 .mode = 0644,
6167 .proc_handler = proc_dointvec_jiffies,
6168 },
6169 #ifdef CONFIG_IPV6_ROUTE_INFO
6170 {
6171 .procname = "accept_ra_rt_info_min_plen",
6172 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6173 .maxlen = sizeof(int),
6174 .mode = 0644,
6175 .proc_handler = proc_dointvec,
6176 },
6177 {
6178 .procname = "accept_ra_rt_info_max_plen",
6179 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6180 .maxlen = sizeof(int),
6181 .mode = 0644,
6182 .proc_handler = proc_dointvec,
6183 },
6184 #endif
6185 #endif
6186 {
6187 .procname = "proxy_ndp",
6188 .data = &ipv6_devconf.proxy_ndp,
6189 .maxlen = sizeof(int),
6190 .mode = 0644,
6191 .proc_handler = addrconf_sysctl_proxy_ndp,
6192 },
6193 {
6194 .procname = "accept_source_route",
6195 .data = &ipv6_devconf.accept_source_route,
6196 .maxlen = sizeof(int),
6197 .mode = 0644,
6198 .proc_handler = proc_dointvec,
6199 },
6200 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6201 {
6202 .procname = "optimistic_dad",
6203 .data = &ipv6_devconf.optimistic_dad,
6204 .maxlen = sizeof(int),
6205 .mode = 0644,
6206 .proc_handler = proc_dointvec,
6207 },
6208 {
6209 .procname = "use_optimistic",
6210 .data = &ipv6_devconf.use_optimistic,
6211 .maxlen = sizeof(int),
6212 .mode = 0644,
6213 .proc_handler = proc_dointvec,
6214 },
6215 #endif
6216 #ifdef CONFIG_IPV6_MROUTE
6217 {
6218 .procname = "mc_forwarding",
6219 .data = &ipv6_devconf.mc_forwarding,
6220 .maxlen = sizeof(int),
6221 .mode = 0444,
6222 .proc_handler = proc_dointvec,
6223 },
6224 #endif
6225 {
6226 .procname = "disable_ipv6",
6227 .data = &ipv6_devconf.disable_ipv6,
6228 .maxlen = sizeof(int),
6229 .mode = 0644,
6230 .proc_handler = addrconf_sysctl_disable,
6231 },
6232 {
6233 .procname = "accept_dad",
6234 .data = &ipv6_devconf.accept_dad,
6235 .maxlen = sizeof(int),
6236 .mode = 0644,
6237 .proc_handler = proc_dointvec,
6238 },
6239 {
6240 .procname = "force_tllao",
6241 .data = &ipv6_devconf.force_tllao,
6242 .maxlen = sizeof(int),
6243 .mode = 0644,
6244 .proc_handler = proc_dointvec
6245 },
6246 {
6247 .procname = "ndisc_notify",
6248 .data = &ipv6_devconf.ndisc_notify,
6249 .maxlen = sizeof(int),
6250 .mode = 0644,
6251 .proc_handler = proc_dointvec
6252 },
6253 {
6254 .procname = "suppress_frag_ndisc",
6255 .data = &ipv6_devconf.suppress_frag_ndisc,
6256 .maxlen = sizeof(int),
6257 .mode = 0644,
6258 .proc_handler = proc_dointvec
6259 },
6260 {
6261 .procname = "accept_ra_from_local",
6262 .data = &ipv6_devconf.accept_ra_from_local,
6263 .maxlen = sizeof(int),
6264 .mode = 0644,
6265 .proc_handler = proc_dointvec,
6266 },
6267 {
6268 .procname = "accept_ra_mtu",
6269 .data = &ipv6_devconf.accept_ra_mtu,
6270 .maxlen = sizeof(int),
6271 .mode = 0644,
6272 .proc_handler = proc_dointvec,
6273 },
6274 {
6275 .procname = "stable_secret",
6276 .data = &ipv6_devconf.stable_secret,
6277 .maxlen = IPV6_MAX_STRLEN,
6278 .mode = 0600,
6279 .proc_handler = addrconf_sysctl_stable_secret,
6280 },
6281 {
6282 .procname = "use_oif_addrs_only",
6283 .data = &ipv6_devconf.use_oif_addrs_only,
6284 .maxlen = sizeof(int),
6285 .mode = 0644,
6286 .proc_handler = proc_dointvec,
6287 },
6288 {
6289 .procname = "ignore_routes_with_linkdown",
6290 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6291 .maxlen = sizeof(int),
6292 .mode = 0644,
6293 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6294 },
6295 {
6296 .procname = "drop_unicast_in_l2_multicast",
6297 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6298 .maxlen = sizeof(int),
6299 .mode = 0644,
6300 .proc_handler = proc_dointvec,
6301 },
6302 {
6303 .procname = "drop_unsolicited_na",
6304 .data = &ipv6_devconf.drop_unsolicited_na,
6305 .maxlen = sizeof(int),
6306 .mode = 0644,
6307 .proc_handler = proc_dointvec,
6308 },
6309 {
6310 .procname = "keep_addr_on_down",
6311 .data = &ipv6_devconf.keep_addr_on_down,
6312 .maxlen = sizeof(int),
6313 .mode = 0644,
6314 .proc_handler = proc_dointvec,
6315
6316 },
6317 {
6318 .procname = "seg6_enabled",
6319 .data = &ipv6_devconf.seg6_enabled,
6320 .maxlen = sizeof(int),
6321 .mode = 0644,
6322 .proc_handler = proc_dointvec,
6323 },
6324 #ifdef CONFIG_IPV6_SEG6_HMAC
6325 {
6326 .procname = "seg6_require_hmac",
6327 .data = &ipv6_devconf.seg6_require_hmac,
6328 .maxlen = sizeof(int),
6329 .mode = 0644,
6330 .proc_handler = proc_dointvec,
6331 },
6332 #endif
6333 {
6334 .procname = "enhanced_dad",
6335 .data = &ipv6_devconf.enhanced_dad,
6336 .maxlen = sizeof(int),
6337 .mode = 0644,
6338 .proc_handler = proc_dointvec,
6339 },
6340 {
6341 .procname = "addr_gen_mode",
6342 .data = &ipv6_devconf.addr_gen_mode,
6343 .maxlen = sizeof(int),
6344 .mode = 0644,
6345 .proc_handler = addrconf_sysctl_addr_gen_mode,
6346 },
6347 {
6348 .procname = "disable_policy",
6349 .data = &ipv6_devconf.disable_policy,
6350 .maxlen = sizeof(int),
6351 .mode = 0644,
6352 .proc_handler = addrconf_sysctl_disable_policy,
6353 },
6354 {
6355 /* sentinel */
6356 }
6357 };
6358
6359 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6360 struct inet6_dev *idev, struct ipv6_devconf *p)
6361 {
6362 int i, ifindex;
6363 struct ctl_table *table;
6364 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6365
6366 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6367 if (!table)
6368 goto out;
6369
6370 for (i = 0; table[i].data; i++) {
6371 table[i].data += (char *)p - (char *)&ipv6_devconf;
6372 /* If one of these is already set, then it is not safe to
6373 * overwrite either of them: this makes proc_dointvec_minmax
6374 * usable.
6375 */
6376 if (!table[i].extra1 && !table[i].extra2) {
6377 table[i].extra1 = idev; /* embedded; no ref */
6378 table[i].extra2 = net;
6379 }
6380 }
6381
6382 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6383
6384 p->sysctl_header = register_net_sysctl(net, path, table);
6385 if (!p->sysctl_header)
6386 goto free;
6387
6388 if (!strcmp(dev_name, "all"))
6389 ifindex = NETCONFA_IFINDEX_ALL;
6390 else if (!strcmp(dev_name, "default"))
6391 ifindex = NETCONFA_IFINDEX_DEFAULT;
6392 else
6393 ifindex = idev->dev->ifindex;
6394 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6395 ifindex, p);
6396 return 0;
6397
6398 free:
6399 kfree(table);
6400 out:
6401 return -ENOBUFS;
6402 }
6403
6404 static void __addrconf_sysctl_unregister(struct net *net,
6405 struct ipv6_devconf *p, int ifindex)
6406 {
6407 struct ctl_table *table;
6408
6409 if (!p->sysctl_header)
6410 return;
6411
6412 table = p->sysctl_header->ctl_table_arg;
6413 unregister_net_sysctl_table(p->sysctl_header);
6414 p->sysctl_header = NULL;
6415 kfree(table);
6416
6417 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
6418 }
6419
6420 static int addrconf_sysctl_register(struct inet6_dev *idev)
6421 {
6422 int err;
6423
6424 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6425 return -EINVAL;
6426
6427 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6428 &ndisc_ifinfo_sysctl_change);
6429 if (err)
6430 return err;
6431 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6432 idev, &idev->cnf);
6433 if (err)
6434 neigh_sysctl_unregister(idev->nd_parms);
6435
6436 return err;
6437 }
6438
6439 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6440 {
6441 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
6442 idev->dev->ifindex);
6443 neigh_sysctl_unregister(idev->nd_parms);
6444 }
6445
6446
6447 #endif
6448
6449 static int __net_init addrconf_init_net(struct net *net)
6450 {
6451 int err = -ENOMEM;
6452 struct ipv6_devconf *all, *dflt;
6453
6454 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6455 if (!all)
6456 goto err_alloc_all;
6457
6458 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6459 if (!dflt)
6460 goto err_alloc_dflt;
6461
6462 /* these will be inherited by all namespaces */
6463 dflt->autoconf = ipv6_defaults.autoconf;
6464 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6465
6466 dflt->stable_secret.initialized = false;
6467 all->stable_secret.initialized = false;
6468
6469 net->ipv6.devconf_all = all;
6470 net->ipv6.devconf_dflt = dflt;
6471
6472 #ifdef CONFIG_SYSCTL
6473 err = __addrconf_sysctl_register(net, "all", NULL, all);
6474 if (err < 0)
6475 goto err_reg_all;
6476
6477 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6478 if (err < 0)
6479 goto err_reg_dflt;
6480 #endif
6481 return 0;
6482
6483 #ifdef CONFIG_SYSCTL
6484 err_reg_dflt:
6485 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
6486 err_reg_all:
6487 kfree(dflt);
6488 #endif
6489 err_alloc_dflt:
6490 kfree(all);
6491 err_alloc_all:
6492 return err;
6493 }
6494
6495 static void __net_exit addrconf_exit_net(struct net *net)
6496 {
6497 #ifdef CONFIG_SYSCTL
6498 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
6499 NETCONFA_IFINDEX_DEFAULT);
6500 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
6501 NETCONFA_IFINDEX_ALL);
6502 #endif
6503 kfree(net->ipv6.devconf_dflt);
6504 kfree(net->ipv6.devconf_all);
6505 }
6506
6507 static struct pernet_operations addrconf_ops = {
6508 .init = addrconf_init_net,
6509 .exit = addrconf_exit_net,
6510 };
6511
6512 static struct rtnl_af_ops inet6_ops __read_mostly = {
6513 .family = AF_INET6,
6514 .fill_link_af = inet6_fill_link_af,
6515 .get_link_af_size = inet6_get_link_af_size,
6516 .validate_link_af = inet6_validate_link_af,
6517 .set_link_af = inet6_set_link_af,
6518 };
6519
6520 /*
6521 * Init / cleanup code
6522 */
6523
6524 int __init addrconf_init(void)
6525 {
6526 struct inet6_dev *idev;
6527 int i, err;
6528
6529 err = ipv6_addr_label_init();
6530 if (err < 0) {
6531 pr_crit("%s: cannot initialize default policy table: %d\n",
6532 __func__, err);
6533 goto out;
6534 }
6535
6536 err = register_pernet_subsys(&addrconf_ops);
6537 if (err < 0)
6538 goto out_addrlabel;
6539
6540 addrconf_wq = create_workqueue("ipv6_addrconf");
6541 if (!addrconf_wq) {
6542 err = -ENOMEM;
6543 goto out_nowq;
6544 }
6545
6546 /* The addrconf netdev notifier requires that loopback_dev
6547 * has it's ipv6 private information allocated and setup
6548 * before it can bring up and give link-local addresses
6549 * to other devices which are up.
6550 *
6551 * Unfortunately, loopback_dev is not necessarily the first
6552 * entry in the global dev_base list of net devices. In fact,
6553 * it is likely to be the very last entry on that list.
6554 * So this causes the notifier registry below to try and
6555 * give link-local addresses to all devices besides loopback_dev
6556 * first, then loopback_dev, which cases all the non-loopback_dev
6557 * devices to fail to get a link-local address.
6558 *
6559 * So, as a temporary fix, allocate the ipv6 structure for
6560 * loopback_dev first by hand.
6561 * Longer term, all of the dependencies ipv6 has upon the loopback
6562 * device and it being up should be removed.
6563 */
6564 rtnl_lock();
6565 idev = ipv6_add_dev(init_net.loopback_dev);
6566 rtnl_unlock();
6567 if (IS_ERR(idev)) {
6568 err = PTR_ERR(idev);
6569 goto errlo;
6570 }
6571
6572 ip6_route_init_special_entries();
6573
6574 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6575 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
6576
6577 register_netdevice_notifier(&ipv6_dev_notf);
6578
6579 addrconf_verify();
6580
6581 rtnl_af_register(&inet6_ops);
6582
6583 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
6584 0);
6585 if (err < 0)
6586 goto errout;
6587
6588 /* Only the first call to __rtnl_register can fail */
6589 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, 0);
6590 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, 0);
6591 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
6592 inet6_dump_ifaddr, RTNL_FLAG_DOIT_UNLOCKED);
6593 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
6594 inet6_dump_ifmcaddr, 0);
6595 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
6596 inet6_dump_ifacaddr, 0);
6597 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
6598 inet6_netconf_dump_devconf, RTNL_FLAG_DOIT_UNLOCKED);
6599
6600 ipv6_addr_label_rtnl_register();
6601
6602 return 0;
6603 errout:
6604 rtnl_af_unregister(&inet6_ops);
6605 unregister_netdevice_notifier(&ipv6_dev_notf);
6606 errlo:
6607 destroy_workqueue(addrconf_wq);
6608 out_nowq:
6609 unregister_pernet_subsys(&addrconf_ops);
6610 out_addrlabel:
6611 ipv6_addr_label_cleanup();
6612 out:
6613 return err;
6614 }
6615
6616 void addrconf_cleanup(void)
6617 {
6618 struct net_device *dev;
6619 int i;
6620
6621 unregister_netdevice_notifier(&ipv6_dev_notf);
6622 unregister_pernet_subsys(&addrconf_ops);
6623 ipv6_addr_label_cleanup();
6624
6625 rtnl_af_unregister(&inet6_ops);
6626
6627 rtnl_lock();
6628
6629 /* clean dev list */
6630 for_each_netdev(&init_net, dev) {
6631 if (__in6_dev_get(dev) == NULL)
6632 continue;
6633 addrconf_ifdown(dev, 1);
6634 }
6635 addrconf_ifdown(init_net.loopback_dev, 2);
6636
6637 /*
6638 * Check hash table.
6639 */
6640 spin_lock_bh(&addrconf_hash_lock);
6641 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6642 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
6643 spin_unlock_bh(&addrconf_hash_lock);
6644 cancel_delayed_work(&addr_chk_work);
6645 rtnl_unlock();
6646
6647 destroy_workqueue(addrconf_wq);
6648 }