]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/addrconf.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / addrconf.c
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 /*
16 * Changes:
17 *
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
39 */
40
41 #define pr_fmt(fmt) "IPv6: " fmt
42
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched/signal.h>
47 #include <linux/socket.h>
48 #include <linux/sockios.h>
49 #include <linux/net.h>
50 #include <linux/inet.h>
51 #include <linux/in6.h>
52 #include <linux/netdevice.h>
53 #include <linux/if_addr.h>
54 #include <linux/if_arp.h>
55 #include <linux/if_arcnet.h>
56 #include <linux/if_infiniband.h>
57 #include <linux/route.h>
58 #include <linux/inetdevice.h>
59 #include <linux/init.h>
60 #include <linux/slab.h>
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64 #include <linux/capability.h>
65 #include <linux/delay.h>
66 #include <linux/notifier.h>
67 #include <linux/string.h>
68 #include <linux/hash.h>
69
70 #include <net/net_namespace.h>
71 #include <net/sock.h>
72 #include <net/snmp.h>
73
74 #include <net/6lowpan.h>
75 #include <net/firewire.h>
76 #include <net/ipv6.h>
77 #include <net/protocol.h>
78 #include <net/ndisc.h>
79 #include <net/ip6_route.h>
80 #include <net/addrconf.h>
81 #include <net/tcp.h>
82 #include <net/ip.h>
83 #include <net/netlink.h>
84 #include <net/pkt_sched.h>
85 #include <net/l3mdev.h>
86 #include <linux/if_tunnel.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/netconf.h>
89 #include <linux/random.h>
90 #include <linux/uaccess.h>
91 #include <asm/unaligned.h>
92
93 #include <linux/proc_fs.h>
94 #include <linux/seq_file.h>
95 #include <linux/export.h>
96
97 /* Set to 3 to get tracing... */
98 #define ACONF_DEBUG 2
99
100 #if ACONF_DEBUG >= 3
101 #define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
102 #else
103 #define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
104 #endif
105
106 #define INFINITY_LIFE_TIME 0xFFFFFFFF
107
108 #define IPV6_MAX_STRLEN \
109 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
110
111 static inline u32 cstamp_delta(unsigned long cstamp)
112 {
113 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
114 }
115
116 static inline s32 rfc3315_s14_backoff_init(s32 irt)
117 {
118 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
119 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
120 do_div(tmp, 1000000);
121 return (s32)tmp;
122 }
123
124 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
125 {
126 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
127 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
128 do_div(tmp, 1000000);
129 if ((s32)tmp > mrt) {
130 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
131 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
132 do_div(tmp, 1000000);
133 }
134 return (s32)tmp;
135 }
136
137 #ifdef CONFIG_SYSCTL
138 static int addrconf_sysctl_register(struct inet6_dev *idev);
139 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
140 #else
141 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
142 {
143 return 0;
144 }
145
146 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
147 {
148 }
149 #endif
150
151 static void ipv6_regen_rndid(struct inet6_dev *idev);
152 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
153
154 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
155 static int ipv6_count_addresses(struct inet6_dev *idev);
156 static int ipv6_generate_stable_address(struct in6_addr *addr,
157 u8 dad_count,
158 const struct inet6_dev *idev);
159
160 /*
161 * Configured unicast address hash table
162 */
163 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
164 static DEFINE_SPINLOCK(addrconf_hash_lock);
165
166 static void addrconf_verify(void);
167 static void addrconf_verify_rtnl(void);
168 static void addrconf_verify_work(struct work_struct *);
169
170 static struct workqueue_struct *addrconf_wq;
171 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
172
173 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
174 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
175
176 static void addrconf_type_change(struct net_device *dev,
177 unsigned long event);
178 static int addrconf_ifdown(struct net_device *dev, int how);
179
180 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
181 int plen,
182 const struct net_device *dev,
183 u32 flags, u32 noflags);
184
185 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
186 static void addrconf_dad_work(struct work_struct *w);
187 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
188 static void addrconf_dad_run(struct inet6_dev *idev);
189 static void addrconf_rs_timer(unsigned long data);
190 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
191 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
192
193 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
194 struct prefix_info *pinfo);
195 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
196 struct net_device *dev);
197
198 static struct ipv6_devconf ipv6_devconf __read_mostly = {
199 .forwarding = 0,
200 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
201 .mtu6 = IPV6_MIN_MTU,
202 .accept_ra = 1,
203 .accept_redirects = 1,
204 .autoconf = 1,
205 .force_mld_version = 0,
206 .mldv1_unsolicited_report_interval = 10 * HZ,
207 .mldv2_unsolicited_report_interval = HZ,
208 .dad_transmits = 1,
209 .rtr_solicits = MAX_RTR_SOLICITATIONS,
210 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
211 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
212 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
213 .use_tempaddr = 0,
214 .temp_valid_lft = TEMP_VALID_LIFETIME,
215 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
216 .regen_max_retry = REGEN_MAX_RETRY,
217 .max_desync_factor = MAX_DESYNC_FACTOR,
218 .max_addresses = IPV6_MAX_ADDRESSES,
219 .accept_ra_defrtr = 1,
220 .accept_ra_from_local = 0,
221 .accept_ra_min_hop_limit= 1,
222 .accept_ra_pinfo = 1,
223 #ifdef CONFIG_IPV6_ROUTER_PREF
224 .accept_ra_rtr_pref = 1,
225 .rtr_probe_interval = 60 * HZ,
226 #ifdef CONFIG_IPV6_ROUTE_INFO
227 .accept_ra_rt_info_min_plen = 0,
228 .accept_ra_rt_info_max_plen = 0,
229 #endif
230 #endif
231 .proxy_ndp = 0,
232 .accept_source_route = 0, /* we do not accept RH0 by default. */
233 .disable_ipv6 = 0,
234 .accept_dad = 1,
235 .suppress_frag_ndisc = 1,
236 .accept_ra_mtu = 1,
237 .stable_secret = {
238 .initialized = false,
239 },
240 .use_oif_addrs_only = 0,
241 .ignore_routes_with_linkdown = 0,
242 .keep_addr_on_down = 0,
243 .seg6_enabled = 0,
244 #ifdef CONFIG_IPV6_SEG6_HMAC
245 .seg6_require_hmac = 0,
246 #endif
247 .enhanced_dad = 1,
248 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
249 .disable_policy = 0,
250 };
251
252 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
253 .forwarding = 0,
254 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
255 .mtu6 = IPV6_MIN_MTU,
256 .accept_ra = 1,
257 .accept_redirects = 1,
258 .autoconf = 1,
259 .force_mld_version = 0,
260 .mldv1_unsolicited_report_interval = 10 * HZ,
261 .mldv2_unsolicited_report_interval = HZ,
262 .dad_transmits = 1,
263 .rtr_solicits = MAX_RTR_SOLICITATIONS,
264 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
265 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
266 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
267 .use_tempaddr = 0,
268 .temp_valid_lft = TEMP_VALID_LIFETIME,
269 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
270 .regen_max_retry = REGEN_MAX_RETRY,
271 .max_desync_factor = MAX_DESYNC_FACTOR,
272 .max_addresses = IPV6_MAX_ADDRESSES,
273 .accept_ra_defrtr = 1,
274 .accept_ra_from_local = 0,
275 .accept_ra_min_hop_limit= 1,
276 .accept_ra_pinfo = 1,
277 #ifdef CONFIG_IPV6_ROUTER_PREF
278 .accept_ra_rtr_pref = 1,
279 .rtr_probe_interval = 60 * HZ,
280 #ifdef CONFIG_IPV6_ROUTE_INFO
281 .accept_ra_rt_info_min_plen = 0,
282 .accept_ra_rt_info_max_plen = 0,
283 #endif
284 #endif
285 .proxy_ndp = 0,
286 .accept_source_route = 0, /* we do not accept RH0 by default. */
287 .disable_ipv6 = 0,
288 .accept_dad = 1,
289 .suppress_frag_ndisc = 1,
290 .accept_ra_mtu = 1,
291 .stable_secret = {
292 .initialized = false,
293 },
294 .use_oif_addrs_only = 0,
295 .ignore_routes_with_linkdown = 0,
296 .keep_addr_on_down = 0,
297 .seg6_enabled = 0,
298 #ifdef CONFIG_IPV6_SEG6_HMAC
299 .seg6_require_hmac = 0,
300 #endif
301 .enhanced_dad = 1,
302 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
303 .disable_policy = 0,
304 };
305
306 /* Check if a valid qdisc is available */
307 static inline bool addrconf_qdisc_ok(const struct net_device *dev)
308 {
309 return !qdisc_tx_is_noop(dev);
310 }
311
312 static void addrconf_del_rs_timer(struct inet6_dev *idev)
313 {
314 if (del_timer(&idev->rs_timer))
315 __in6_dev_put(idev);
316 }
317
318 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
319 {
320 if (cancel_delayed_work(&ifp->dad_work))
321 __in6_ifa_put(ifp);
322 }
323
324 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
325 unsigned long when)
326 {
327 if (!timer_pending(&idev->rs_timer))
328 in6_dev_hold(idev);
329 mod_timer(&idev->rs_timer, jiffies + when);
330 }
331
332 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
333 unsigned long delay)
334 {
335 if (!delayed_work_pending(&ifp->dad_work))
336 in6_ifa_hold(ifp);
337 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
338 }
339
340 static int snmp6_alloc_dev(struct inet6_dev *idev)
341 {
342 int i;
343
344 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
345 if (!idev->stats.ipv6)
346 goto err_ip;
347
348 for_each_possible_cpu(i) {
349 struct ipstats_mib *addrconf_stats;
350 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
351 u64_stats_init(&addrconf_stats->syncp);
352 }
353
354
355 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
356 GFP_KERNEL);
357 if (!idev->stats.icmpv6dev)
358 goto err_icmp;
359 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
360 GFP_KERNEL);
361 if (!idev->stats.icmpv6msgdev)
362 goto err_icmpmsg;
363
364 return 0;
365
366 err_icmpmsg:
367 kfree(idev->stats.icmpv6dev);
368 err_icmp:
369 free_percpu(idev->stats.ipv6);
370 err_ip:
371 return -ENOMEM;
372 }
373
374 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
375 {
376 struct inet6_dev *ndev;
377 int err = -ENOMEM;
378
379 ASSERT_RTNL();
380
381 if (dev->mtu < IPV6_MIN_MTU)
382 return ERR_PTR(-EINVAL);
383
384 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
385 if (!ndev)
386 return ERR_PTR(err);
387
388 rwlock_init(&ndev->lock);
389 ndev->dev = dev;
390 INIT_LIST_HEAD(&ndev->addr_list);
391 setup_timer(&ndev->rs_timer, addrconf_rs_timer,
392 (unsigned long)ndev);
393 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
394
395 if (ndev->cnf.stable_secret.initialized)
396 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
397 else
398 ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
399
400 ndev->cnf.mtu6 = dev->mtu;
401 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
402 if (!ndev->nd_parms) {
403 kfree(ndev);
404 return ERR_PTR(err);
405 }
406 if (ndev->cnf.forwarding)
407 dev_disable_lro(dev);
408 /* We refer to the device */
409 dev_hold(dev);
410
411 if (snmp6_alloc_dev(ndev) < 0) {
412 ADBG(KERN_WARNING
413 "%s: cannot allocate memory for statistics; dev=%s.\n",
414 __func__, dev->name);
415 neigh_parms_release(&nd_tbl, ndev->nd_parms);
416 dev_put(dev);
417 kfree(ndev);
418 return ERR_PTR(err);
419 }
420
421 if (snmp6_register_dev(ndev) < 0) {
422 ADBG(KERN_WARNING
423 "%s: cannot create /proc/net/dev_snmp6/%s\n",
424 __func__, dev->name);
425 goto err_release;
426 }
427
428 /* One reference from device. */
429 in6_dev_hold(ndev);
430
431 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
432 ndev->cnf.accept_dad = -1;
433
434 #if IS_ENABLED(CONFIG_IPV6_SIT)
435 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
436 pr_info("%s: Disabled Multicast RS\n", dev->name);
437 ndev->cnf.rtr_solicits = 0;
438 }
439 #endif
440
441 INIT_LIST_HEAD(&ndev->tempaddr_list);
442 ndev->desync_factor = U32_MAX;
443 if ((dev->flags&IFF_LOOPBACK) ||
444 dev->type == ARPHRD_TUNNEL ||
445 dev->type == ARPHRD_TUNNEL6 ||
446 dev->type == ARPHRD_SIT ||
447 dev->type == ARPHRD_NONE) {
448 ndev->cnf.use_tempaddr = -1;
449 } else
450 ipv6_regen_rndid(ndev);
451
452 ndev->token = in6addr_any;
453
454 if (netif_running(dev) && addrconf_qdisc_ok(dev))
455 ndev->if_flags |= IF_READY;
456
457 ipv6_mc_init_dev(ndev);
458 ndev->tstamp = jiffies;
459 err = addrconf_sysctl_register(ndev);
460 if (err) {
461 ipv6_mc_destroy_dev(ndev);
462 snmp6_unregister_dev(ndev);
463 goto err_release;
464 }
465 /* protected by rtnl_lock */
466 rcu_assign_pointer(dev->ip6_ptr, ndev);
467
468 /* Join interface-local all-node multicast group */
469 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
470
471 /* Join all-node multicast group */
472 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
473
474 /* Join all-router multicast group if forwarding is set */
475 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
476 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
477
478 return ndev;
479
480 err_release:
481 neigh_parms_release(&nd_tbl, ndev->nd_parms);
482 ndev->dead = 1;
483 in6_dev_finish_destroy(ndev);
484 return ERR_PTR(err);
485 }
486
487 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
488 {
489 struct inet6_dev *idev;
490
491 ASSERT_RTNL();
492
493 idev = __in6_dev_get(dev);
494 if (!idev) {
495 idev = ipv6_add_dev(dev);
496 if (IS_ERR(idev))
497 return NULL;
498 }
499
500 if (dev->flags&IFF_UP)
501 ipv6_mc_up(idev);
502 return idev;
503 }
504
505 static int inet6_netconf_msgsize_devconf(int type)
506 {
507 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
508 + nla_total_size(4); /* NETCONFA_IFINDEX */
509 bool all = false;
510
511 if (type == NETCONFA_ALL)
512 all = true;
513
514 if (all || type == NETCONFA_FORWARDING)
515 size += nla_total_size(4);
516 #ifdef CONFIG_IPV6_MROUTE
517 if (all || type == NETCONFA_MC_FORWARDING)
518 size += nla_total_size(4);
519 #endif
520 if (all || type == NETCONFA_PROXY_NEIGH)
521 size += nla_total_size(4);
522
523 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
524 size += nla_total_size(4);
525
526 return size;
527 }
528
529 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
530 struct ipv6_devconf *devconf, u32 portid,
531 u32 seq, int event, unsigned int flags,
532 int type)
533 {
534 struct nlmsghdr *nlh;
535 struct netconfmsg *ncm;
536 bool all = false;
537
538 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
539 flags);
540 if (!nlh)
541 return -EMSGSIZE;
542
543 if (type == NETCONFA_ALL)
544 all = true;
545
546 ncm = nlmsg_data(nlh);
547 ncm->ncm_family = AF_INET6;
548
549 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
550 goto nla_put_failure;
551
552 if (!devconf)
553 goto out;
554
555 if ((all || type == NETCONFA_FORWARDING) &&
556 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
557 goto nla_put_failure;
558 #ifdef CONFIG_IPV6_MROUTE
559 if ((all || type == NETCONFA_MC_FORWARDING) &&
560 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
561 devconf->mc_forwarding) < 0)
562 goto nla_put_failure;
563 #endif
564 if ((all || type == NETCONFA_PROXY_NEIGH) &&
565 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
566 goto nla_put_failure;
567
568 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
569 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
570 devconf->ignore_routes_with_linkdown) < 0)
571 goto nla_put_failure;
572
573 out:
574 nlmsg_end(skb, nlh);
575 return 0;
576
577 nla_put_failure:
578 nlmsg_cancel(skb, nlh);
579 return -EMSGSIZE;
580 }
581
582 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
583 int ifindex, struct ipv6_devconf *devconf)
584 {
585 struct sk_buff *skb;
586 int err = -ENOBUFS;
587
588 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
589 if (!skb)
590 goto errout;
591
592 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
593 event, 0, type);
594 if (err < 0) {
595 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
596 WARN_ON(err == -EMSGSIZE);
597 kfree_skb(skb);
598 goto errout;
599 }
600 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
601 return;
602 errout:
603 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
604 }
605
606 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
607 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
608 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
609 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
610 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
611 };
612
613 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
614 struct nlmsghdr *nlh)
615 {
616 struct net *net = sock_net(in_skb->sk);
617 struct nlattr *tb[NETCONFA_MAX+1];
618 struct netconfmsg *ncm;
619 struct sk_buff *skb;
620 struct ipv6_devconf *devconf;
621 struct inet6_dev *in6_dev;
622 struct net_device *dev;
623 int ifindex;
624 int err;
625
626 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
627 devconf_ipv6_policy, NULL);
628 if (err < 0)
629 goto errout;
630
631 err = -EINVAL;
632 if (!tb[NETCONFA_IFINDEX])
633 goto errout;
634
635 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
636 switch (ifindex) {
637 case NETCONFA_IFINDEX_ALL:
638 devconf = net->ipv6.devconf_all;
639 break;
640 case NETCONFA_IFINDEX_DEFAULT:
641 devconf = net->ipv6.devconf_dflt;
642 break;
643 default:
644 dev = __dev_get_by_index(net, ifindex);
645 if (!dev)
646 goto errout;
647 in6_dev = __in6_dev_get(dev);
648 if (!in6_dev)
649 goto errout;
650 devconf = &in6_dev->cnf;
651 break;
652 }
653
654 err = -ENOBUFS;
655 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_ATOMIC);
656 if (!skb)
657 goto errout;
658
659 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
660 NETLINK_CB(in_skb).portid,
661 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
662 NETCONFA_ALL);
663 if (err < 0) {
664 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
665 WARN_ON(err == -EMSGSIZE);
666 kfree_skb(skb);
667 goto errout;
668 }
669 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
670 errout:
671 return err;
672 }
673
674 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
675 struct netlink_callback *cb)
676 {
677 struct net *net = sock_net(skb->sk);
678 int h, s_h;
679 int idx, s_idx;
680 struct net_device *dev;
681 struct inet6_dev *idev;
682 struct hlist_head *head;
683
684 s_h = cb->args[0];
685 s_idx = idx = cb->args[1];
686
687 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
688 idx = 0;
689 head = &net->dev_index_head[h];
690 rcu_read_lock();
691 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
692 net->dev_base_seq;
693 hlist_for_each_entry_rcu(dev, head, index_hlist) {
694 if (idx < s_idx)
695 goto cont;
696 idev = __in6_dev_get(dev);
697 if (!idev)
698 goto cont;
699
700 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
701 &idev->cnf,
702 NETLINK_CB(cb->skb).portid,
703 cb->nlh->nlmsg_seq,
704 RTM_NEWNETCONF,
705 NLM_F_MULTI,
706 NETCONFA_ALL) < 0) {
707 rcu_read_unlock();
708 goto done;
709 }
710 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
711 cont:
712 idx++;
713 }
714 rcu_read_unlock();
715 }
716 if (h == NETDEV_HASHENTRIES) {
717 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
718 net->ipv6.devconf_all,
719 NETLINK_CB(cb->skb).portid,
720 cb->nlh->nlmsg_seq,
721 RTM_NEWNETCONF, NLM_F_MULTI,
722 NETCONFA_ALL) < 0)
723 goto done;
724 else
725 h++;
726 }
727 if (h == NETDEV_HASHENTRIES + 1) {
728 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
729 net->ipv6.devconf_dflt,
730 NETLINK_CB(cb->skb).portid,
731 cb->nlh->nlmsg_seq,
732 RTM_NEWNETCONF, NLM_F_MULTI,
733 NETCONFA_ALL) < 0)
734 goto done;
735 else
736 h++;
737 }
738 done:
739 cb->args[0] = h;
740 cb->args[1] = idx;
741
742 return skb->len;
743 }
744
745 #ifdef CONFIG_SYSCTL
746 static void dev_forward_change(struct inet6_dev *idev)
747 {
748 struct net_device *dev;
749 struct inet6_ifaddr *ifa;
750
751 if (!idev)
752 return;
753 dev = idev->dev;
754 if (idev->cnf.forwarding)
755 dev_disable_lro(dev);
756 if (dev->flags & IFF_MULTICAST) {
757 if (idev->cnf.forwarding) {
758 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
759 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
760 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
761 } else {
762 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
763 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
764 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
765 }
766 }
767
768 list_for_each_entry(ifa, &idev->addr_list, if_list) {
769 if (ifa->flags&IFA_F_TENTATIVE)
770 continue;
771 if (idev->cnf.forwarding)
772 addrconf_join_anycast(ifa);
773 else
774 addrconf_leave_anycast(ifa);
775 }
776 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
777 NETCONFA_FORWARDING,
778 dev->ifindex, &idev->cnf);
779 }
780
781
782 static void addrconf_forward_change(struct net *net, __s32 newf)
783 {
784 struct net_device *dev;
785 struct inet6_dev *idev;
786
787 for_each_netdev(net, dev) {
788 idev = __in6_dev_get(dev);
789 if (idev) {
790 int changed = (!idev->cnf.forwarding) ^ (!newf);
791 idev->cnf.forwarding = newf;
792 if (changed)
793 dev_forward_change(idev);
794 }
795 }
796 }
797
798 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
799 {
800 struct net *net;
801 int old;
802
803 if (!rtnl_trylock())
804 return restart_syscall();
805
806 net = (struct net *)table->extra2;
807 old = *p;
808 *p = newf;
809
810 if (p == &net->ipv6.devconf_dflt->forwarding) {
811 if ((!newf) ^ (!old))
812 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
813 NETCONFA_FORWARDING,
814 NETCONFA_IFINDEX_DEFAULT,
815 net->ipv6.devconf_dflt);
816 rtnl_unlock();
817 return 0;
818 }
819
820 if (p == &net->ipv6.devconf_all->forwarding) {
821 int old_dflt = net->ipv6.devconf_dflt->forwarding;
822
823 net->ipv6.devconf_dflt->forwarding = newf;
824 if ((!newf) ^ (!old_dflt))
825 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
826 NETCONFA_FORWARDING,
827 NETCONFA_IFINDEX_DEFAULT,
828 net->ipv6.devconf_dflt);
829
830 addrconf_forward_change(net, newf);
831 if ((!newf) ^ (!old))
832 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
833 NETCONFA_FORWARDING,
834 NETCONFA_IFINDEX_ALL,
835 net->ipv6.devconf_all);
836 } else if ((!newf) ^ (!old))
837 dev_forward_change((struct inet6_dev *)table->extra1);
838 rtnl_unlock();
839
840 if (newf)
841 rt6_purge_dflt_routers(net);
842 return 1;
843 }
844
845 static void addrconf_linkdown_change(struct net *net, __s32 newf)
846 {
847 struct net_device *dev;
848 struct inet6_dev *idev;
849
850 for_each_netdev(net, dev) {
851 idev = __in6_dev_get(dev);
852 if (idev) {
853 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
854
855 idev->cnf.ignore_routes_with_linkdown = newf;
856 if (changed)
857 inet6_netconf_notify_devconf(dev_net(dev),
858 RTM_NEWNETCONF,
859 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
860 dev->ifindex,
861 &idev->cnf);
862 }
863 }
864 }
865
866 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
867 {
868 struct net *net;
869 int old;
870
871 if (!rtnl_trylock())
872 return restart_syscall();
873
874 net = (struct net *)table->extra2;
875 old = *p;
876 *p = newf;
877
878 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
879 if ((!newf) ^ (!old))
880 inet6_netconf_notify_devconf(net,
881 RTM_NEWNETCONF,
882 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
883 NETCONFA_IFINDEX_DEFAULT,
884 net->ipv6.devconf_dflt);
885 rtnl_unlock();
886 return 0;
887 }
888
889 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
890 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
891 addrconf_linkdown_change(net, newf);
892 if ((!newf) ^ (!old))
893 inet6_netconf_notify_devconf(net,
894 RTM_NEWNETCONF,
895 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
896 NETCONFA_IFINDEX_ALL,
897 net->ipv6.devconf_all);
898 }
899 rtnl_unlock();
900
901 return 1;
902 }
903
904 #endif
905
906 /* Nobody refers to this ifaddr, destroy it */
907 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
908 {
909 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
910
911 #ifdef NET_REFCNT_DEBUG
912 pr_debug("%s\n", __func__);
913 #endif
914
915 in6_dev_put(ifp->idev);
916
917 if (cancel_delayed_work(&ifp->dad_work))
918 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
919 ifp);
920
921 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
922 pr_warn("Freeing alive inet6 address %p\n", ifp);
923 return;
924 }
925 ip6_rt_put(ifp->rt);
926
927 kfree_rcu(ifp, rcu);
928 }
929
930 static void
931 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
932 {
933 struct list_head *p;
934 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
935
936 /*
937 * Each device address list is sorted in order of scope -
938 * global before linklocal.
939 */
940 list_for_each(p, &idev->addr_list) {
941 struct inet6_ifaddr *ifa
942 = list_entry(p, struct inet6_ifaddr, if_list);
943 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
944 break;
945 }
946
947 list_add_tail(&ifp->if_list, p);
948 }
949
950 static u32 inet6_addr_hash(const struct in6_addr *addr)
951 {
952 return hash_32(ipv6_addr_hash(addr), IN6_ADDR_HSIZE_SHIFT);
953 }
954
955 /* On success it returns ifp with increased reference count */
956
957 static struct inet6_ifaddr *
958 ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
959 const struct in6_addr *peer_addr, int pfxlen,
960 int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
961 {
962 struct net *net = dev_net(idev->dev);
963 struct inet6_ifaddr *ifa = NULL;
964 struct rt6_info *rt;
965 unsigned int hash;
966 int err = 0;
967 int addr_type = ipv6_addr_type(addr);
968
969 if (addr_type == IPV6_ADDR_ANY ||
970 addr_type & IPV6_ADDR_MULTICAST ||
971 (!(idev->dev->flags & IFF_LOOPBACK) &&
972 addr_type & IPV6_ADDR_LOOPBACK))
973 return ERR_PTR(-EADDRNOTAVAIL);
974
975 rcu_read_lock_bh();
976 if (idev->dead) {
977 err = -ENODEV; /*XXX*/
978 goto out2;
979 }
980
981 if (idev->cnf.disable_ipv6) {
982 err = -EACCES;
983 goto out2;
984 }
985
986 spin_lock(&addrconf_hash_lock);
987
988 /* Ignore adding duplicate addresses on an interface */
989 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
990 ADBG("ipv6_add_addr: already assigned\n");
991 err = -EEXIST;
992 goto out;
993 }
994
995 ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
996
997 if (!ifa) {
998 ADBG("ipv6_add_addr: malloc failed\n");
999 err = -ENOBUFS;
1000 goto out;
1001 }
1002
1003 rt = addrconf_dst_alloc(idev, addr, false);
1004 if (IS_ERR(rt)) {
1005 err = PTR_ERR(rt);
1006 goto out;
1007 }
1008
1009 if (net->ipv6.devconf_all->disable_policy ||
1010 idev->cnf.disable_policy)
1011 rt->dst.flags |= DST_NOPOLICY;
1012
1013 neigh_parms_data_state_setall(idev->nd_parms);
1014
1015 ifa->addr = *addr;
1016 if (peer_addr)
1017 ifa->peer_addr = *peer_addr;
1018
1019 spin_lock_init(&ifa->lock);
1020 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1021 INIT_HLIST_NODE(&ifa->addr_lst);
1022 ifa->scope = scope;
1023 ifa->prefix_len = pfxlen;
1024 ifa->flags = flags | IFA_F_TENTATIVE;
1025 ifa->valid_lft = valid_lft;
1026 ifa->prefered_lft = prefered_lft;
1027 ifa->cstamp = ifa->tstamp = jiffies;
1028 ifa->tokenized = false;
1029
1030 ifa->rt = rt;
1031
1032 ifa->idev = idev;
1033 in6_dev_hold(idev);
1034 /* For caller */
1035 in6_ifa_hold(ifa);
1036
1037 /* Add to big hash table */
1038 hash = inet6_addr_hash(addr);
1039
1040 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
1041 spin_unlock(&addrconf_hash_lock);
1042
1043 write_lock(&idev->lock);
1044 /* Add to inet6_dev unicast addr list. */
1045 ipv6_link_dev_addr(idev, ifa);
1046
1047 if (ifa->flags&IFA_F_TEMPORARY) {
1048 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1049 in6_ifa_hold(ifa);
1050 }
1051
1052 in6_ifa_hold(ifa);
1053 write_unlock(&idev->lock);
1054 out2:
1055 rcu_read_unlock_bh();
1056
1057 if (likely(err == 0))
1058 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1059 else {
1060 kfree(ifa);
1061 ifa = ERR_PTR(err);
1062 }
1063
1064 return ifa;
1065 out:
1066 spin_unlock(&addrconf_hash_lock);
1067 goto out2;
1068 }
1069
1070 enum cleanup_prefix_rt_t {
1071 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1072 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1073 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1074 };
1075
1076 /*
1077 * Check, whether the prefix for ifp would still need a prefix route
1078 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1079 * constants.
1080 *
1081 * 1) we don't purge prefix if address was not permanent.
1082 * prefix is managed by its own lifetime.
1083 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1084 * 3) if there are no addresses, delete prefix.
1085 * 4) if there are still other permanent address(es),
1086 * corresponding prefix is still permanent.
1087 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1088 * don't purge the prefix, assume user space is managing it.
1089 * 6) otherwise, update prefix lifetime to the
1090 * longest valid lifetime among the corresponding
1091 * addresses on the device.
1092 * Note: subsequent RA will update lifetime.
1093 **/
1094 static enum cleanup_prefix_rt_t
1095 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1096 {
1097 struct inet6_ifaddr *ifa;
1098 struct inet6_dev *idev = ifp->idev;
1099 unsigned long lifetime;
1100 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1101
1102 *expires = jiffies;
1103
1104 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1105 if (ifa == ifp)
1106 continue;
1107 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1108 ifp->prefix_len))
1109 continue;
1110 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1111 return CLEANUP_PREFIX_RT_NOP;
1112
1113 action = CLEANUP_PREFIX_RT_EXPIRE;
1114
1115 spin_lock(&ifa->lock);
1116
1117 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1118 /*
1119 * Note: Because this address is
1120 * not permanent, lifetime <
1121 * LONG_MAX / HZ here.
1122 */
1123 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1124 *expires = ifa->tstamp + lifetime * HZ;
1125 spin_unlock(&ifa->lock);
1126 }
1127
1128 return action;
1129 }
1130
1131 static void
1132 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
1133 {
1134 struct rt6_info *rt;
1135
1136 rt = addrconf_get_prefix_route(&ifp->addr,
1137 ifp->prefix_len,
1138 ifp->idev->dev,
1139 0, RTF_GATEWAY | RTF_DEFAULT);
1140 if (rt) {
1141 if (del_rt)
1142 ip6_del_rt(rt);
1143 else {
1144 if (!(rt->rt6i_flags & RTF_EXPIRES))
1145 rt6_set_expires(rt, expires);
1146 ip6_rt_put(rt);
1147 }
1148 }
1149 }
1150
1151
1152 /* This function wants to get referenced ifp and releases it before return */
1153
1154 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1155 {
1156 int state;
1157 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1158 unsigned long expires;
1159
1160 ASSERT_RTNL();
1161
1162 spin_lock_bh(&ifp->lock);
1163 state = ifp->state;
1164 ifp->state = INET6_IFADDR_STATE_DEAD;
1165 spin_unlock_bh(&ifp->lock);
1166
1167 if (state == INET6_IFADDR_STATE_DEAD)
1168 goto out;
1169
1170 spin_lock_bh(&addrconf_hash_lock);
1171 hlist_del_init_rcu(&ifp->addr_lst);
1172 spin_unlock_bh(&addrconf_hash_lock);
1173
1174 write_lock_bh(&ifp->idev->lock);
1175
1176 if (ifp->flags&IFA_F_TEMPORARY) {
1177 list_del(&ifp->tmp_list);
1178 if (ifp->ifpub) {
1179 in6_ifa_put(ifp->ifpub);
1180 ifp->ifpub = NULL;
1181 }
1182 __in6_ifa_put(ifp);
1183 }
1184
1185 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1186 action = check_cleanup_prefix_route(ifp, &expires);
1187
1188 list_del_init(&ifp->if_list);
1189 __in6_ifa_put(ifp);
1190
1191 write_unlock_bh(&ifp->idev->lock);
1192
1193 addrconf_del_dad_work(ifp);
1194
1195 ipv6_ifa_notify(RTM_DELADDR, ifp);
1196
1197 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1198
1199 if (action != CLEANUP_PREFIX_RT_NOP) {
1200 cleanup_prefix_route(ifp, expires,
1201 action == CLEANUP_PREFIX_RT_DEL);
1202 }
1203
1204 /* clean up prefsrc entries */
1205 rt6_remove_prefsrc(ifp);
1206 out:
1207 in6_ifa_put(ifp);
1208 }
1209
1210 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *ift)
1211 {
1212 struct inet6_dev *idev = ifp->idev;
1213 struct in6_addr addr, *tmpaddr;
1214 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
1215 unsigned long regen_advance;
1216 int tmp_plen;
1217 int ret = 0;
1218 u32 addr_flags;
1219 unsigned long now = jiffies;
1220 long max_desync_factor;
1221 s32 cnf_temp_preferred_lft;
1222
1223 write_lock_bh(&idev->lock);
1224 if (ift) {
1225 spin_lock_bh(&ift->lock);
1226 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1227 spin_unlock_bh(&ift->lock);
1228 tmpaddr = &addr;
1229 } else {
1230 tmpaddr = NULL;
1231 }
1232 retry:
1233 in6_dev_hold(idev);
1234 if (idev->cnf.use_tempaddr <= 0) {
1235 write_unlock_bh(&idev->lock);
1236 pr_info("%s: use_tempaddr is disabled\n", __func__);
1237 in6_dev_put(idev);
1238 ret = -1;
1239 goto out;
1240 }
1241 spin_lock_bh(&ifp->lock);
1242 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1243 idev->cnf.use_tempaddr = -1; /*XXX*/
1244 spin_unlock_bh(&ifp->lock);
1245 write_unlock_bh(&idev->lock);
1246 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1247 __func__);
1248 in6_dev_put(idev);
1249 ret = -1;
1250 goto out;
1251 }
1252 in6_ifa_hold(ifp);
1253 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1254 ipv6_try_regen_rndid(idev, tmpaddr);
1255 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1256 age = (now - ifp->tstamp) / HZ;
1257
1258 regen_advance = idev->cnf.regen_max_retry *
1259 idev->cnf.dad_transmits *
1260 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1261
1262 /* recalculate max_desync_factor each time and update
1263 * idev->desync_factor if it's larger
1264 */
1265 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1266 max_desync_factor = min_t(__u32,
1267 idev->cnf.max_desync_factor,
1268 cnf_temp_preferred_lft - regen_advance);
1269
1270 if (unlikely(idev->desync_factor > max_desync_factor)) {
1271 if (max_desync_factor > 0) {
1272 get_random_bytes(&idev->desync_factor,
1273 sizeof(idev->desync_factor));
1274 idev->desync_factor %= max_desync_factor;
1275 } else {
1276 idev->desync_factor = 0;
1277 }
1278 }
1279
1280 tmp_valid_lft = min_t(__u32,
1281 ifp->valid_lft,
1282 idev->cnf.temp_valid_lft + age);
1283 tmp_prefered_lft = cnf_temp_preferred_lft + age -
1284 idev->desync_factor;
1285 tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
1286 tmp_plen = ifp->prefix_len;
1287 tmp_tstamp = ifp->tstamp;
1288 spin_unlock_bh(&ifp->lock);
1289
1290 write_unlock_bh(&idev->lock);
1291
1292 /* A temporary address is created only if this calculated Preferred
1293 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1294 * an implementation must not create a temporary address with a zero
1295 * Preferred Lifetime.
1296 * Use age calculation as in addrconf_verify to avoid unnecessary
1297 * temporary addresses being generated.
1298 */
1299 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1300 if (tmp_prefered_lft <= regen_advance + age) {
1301 in6_ifa_put(ifp);
1302 in6_dev_put(idev);
1303 ret = -1;
1304 goto out;
1305 }
1306
1307 addr_flags = IFA_F_TEMPORARY;
1308 /* set in addrconf_prefix_rcv() */
1309 if (ifp->flags & IFA_F_OPTIMISTIC)
1310 addr_flags |= IFA_F_OPTIMISTIC;
1311
1312 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1313 ipv6_addr_scope(&addr), addr_flags,
1314 tmp_valid_lft, tmp_prefered_lft);
1315 if (IS_ERR(ift)) {
1316 in6_ifa_put(ifp);
1317 in6_dev_put(idev);
1318 pr_info("%s: retry temporary address regeneration\n", __func__);
1319 tmpaddr = &addr;
1320 write_lock_bh(&idev->lock);
1321 goto retry;
1322 }
1323
1324 spin_lock_bh(&ift->lock);
1325 ift->ifpub = ifp;
1326 ift->cstamp = now;
1327 ift->tstamp = tmp_tstamp;
1328 spin_unlock_bh(&ift->lock);
1329
1330 addrconf_dad_start(ift);
1331 in6_ifa_put(ift);
1332 in6_dev_put(idev);
1333 out:
1334 return ret;
1335 }
1336
1337 /*
1338 * Choose an appropriate source address (RFC3484)
1339 */
1340 enum {
1341 IPV6_SADDR_RULE_INIT = 0,
1342 IPV6_SADDR_RULE_LOCAL,
1343 IPV6_SADDR_RULE_SCOPE,
1344 IPV6_SADDR_RULE_PREFERRED,
1345 #ifdef CONFIG_IPV6_MIP6
1346 IPV6_SADDR_RULE_HOA,
1347 #endif
1348 IPV6_SADDR_RULE_OIF,
1349 IPV6_SADDR_RULE_LABEL,
1350 IPV6_SADDR_RULE_PRIVACY,
1351 IPV6_SADDR_RULE_ORCHID,
1352 IPV6_SADDR_RULE_PREFIX,
1353 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1354 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1355 #endif
1356 IPV6_SADDR_RULE_MAX
1357 };
1358
1359 struct ipv6_saddr_score {
1360 int rule;
1361 int addr_type;
1362 struct inet6_ifaddr *ifa;
1363 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1364 int scopedist;
1365 int matchlen;
1366 };
1367
1368 struct ipv6_saddr_dst {
1369 const struct in6_addr *addr;
1370 int ifindex;
1371 int scope;
1372 int label;
1373 unsigned int prefs;
1374 };
1375
1376 static inline int ipv6_saddr_preferred(int type)
1377 {
1378 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1379 return 1;
1380 return 0;
1381 }
1382
1383 static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
1384 {
1385 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1386 return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
1387 #else
1388 return false;
1389 #endif
1390 }
1391
1392 static int ipv6_get_saddr_eval(struct net *net,
1393 struct ipv6_saddr_score *score,
1394 struct ipv6_saddr_dst *dst,
1395 int i)
1396 {
1397 int ret;
1398
1399 if (i <= score->rule) {
1400 switch (i) {
1401 case IPV6_SADDR_RULE_SCOPE:
1402 ret = score->scopedist;
1403 break;
1404 case IPV6_SADDR_RULE_PREFIX:
1405 ret = score->matchlen;
1406 break;
1407 default:
1408 ret = !!test_bit(i, score->scorebits);
1409 }
1410 goto out;
1411 }
1412
1413 switch (i) {
1414 case IPV6_SADDR_RULE_INIT:
1415 /* Rule 0: remember if hiscore is not ready yet */
1416 ret = !!score->ifa;
1417 break;
1418 case IPV6_SADDR_RULE_LOCAL:
1419 /* Rule 1: Prefer same address */
1420 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1421 break;
1422 case IPV6_SADDR_RULE_SCOPE:
1423 /* Rule 2: Prefer appropriate scope
1424 *
1425 * ret
1426 * ^
1427 * -1 | d 15
1428 * ---+--+-+---> scope
1429 * |
1430 * | d is scope of the destination.
1431 * B-d | \
1432 * | \ <- smaller scope is better if
1433 * B-15 | \ if scope is enough for destination.
1434 * | ret = B - scope (-1 <= scope >= d <= 15).
1435 * d-C-1 | /
1436 * |/ <- greater is better
1437 * -C / if scope is not enough for destination.
1438 * /| ret = scope - C (-1 <= d < scope <= 15).
1439 *
1440 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1441 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1442 * Assume B = 0 and we get C > 29.
1443 */
1444 ret = __ipv6_addr_src_scope(score->addr_type);
1445 if (ret >= dst->scope)
1446 ret = -ret;
1447 else
1448 ret -= 128; /* 30 is enough */
1449 score->scopedist = ret;
1450 break;
1451 case IPV6_SADDR_RULE_PREFERRED:
1452 {
1453 /* Rule 3: Avoid deprecated and optimistic addresses */
1454 u8 avoid = IFA_F_DEPRECATED;
1455
1456 if (!ipv6_use_optimistic_addr(score->ifa->idev))
1457 avoid |= IFA_F_OPTIMISTIC;
1458 ret = ipv6_saddr_preferred(score->addr_type) ||
1459 !(score->ifa->flags & avoid);
1460 break;
1461 }
1462 #ifdef CONFIG_IPV6_MIP6
1463 case IPV6_SADDR_RULE_HOA:
1464 {
1465 /* Rule 4: Prefer home address */
1466 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1467 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1468 break;
1469 }
1470 #endif
1471 case IPV6_SADDR_RULE_OIF:
1472 /* Rule 5: Prefer outgoing interface */
1473 ret = (!dst->ifindex ||
1474 dst->ifindex == score->ifa->idev->dev->ifindex);
1475 break;
1476 case IPV6_SADDR_RULE_LABEL:
1477 /* Rule 6: Prefer matching label */
1478 ret = ipv6_addr_label(net,
1479 &score->ifa->addr, score->addr_type,
1480 score->ifa->idev->dev->ifindex) == dst->label;
1481 break;
1482 case IPV6_SADDR_RULE_PRIVACY:
1483 {
1484 /* Rule 7: Prefer public address
1485 * Note: prefer temporary address if use_tempaddr >= 2
1486 */
1487 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1488 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1489 score->ifa->idev->cnf.use_tempaddr >= 2;
1490 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1491 break;
1492 }
1493 case IPV6_SADDR_RULE_ORCHID:
1494 /* Rule 8-: Prefer ORCHID vs ORCHID or
1495 * non-ORCHID vs non-ORCHID
1496 */
1497 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1498 ipv6_addr_orchid(dst->addr));
1499 break;
1500 case IPV6_SADDR_RULE_PREFIX:
1501 /* Rule 8: Use longest matching prefix */
1502 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1503 if (ret > score->ifa->prefix_len)
1504 ret = score->ifa->prefix_len;
1505 score->matchlen = ret;
1506 break;
1507 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1508 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1509 /* Optimistic addresses still have lower precedence than other
1510 * preferred addresses.
1511 */
1512 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1513 break;
1514 #endif
1515 default:
1516 ret = 0;
1517 }
1518
1519 if (ret)
1520 __set_bit(i, score->scorebits);
1521 score->rule = i;
1522 out:
1523 return ret;
1524 }
1525
1526 static int __ipv6_dev_get_saddr(struct net *net,
1527 struct ipv6_saddr_dst *dst,
1528 struct inet6_dev *idev,
1529 struct ipv6_saddr_score *scores,
1530 int hiscore_idx)
1531 {
1532 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1533
1534 read_lock_bh(&idev->lock);
1535 list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
1536 int i;
1537
1538 /*
1539 * - Tentative Address (RFC2462 section 5.4)
1540 * - A tentative address is not considered
1541 * "assigned to an interface" in the traditional
1542 * sense, unless it is also flagged as optimistic.
1543 * - Candidate Source Address (section 4)
1544 * - In any case, anycast addresses, multicast
1545 * addresses, and the unspecified address MUST
1546 * NOT be included in a candidate set.
1547 */
1548 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1549 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1550 continue;
1551
1552 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1553
1554 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1555 score->addr_type & IPV6_ADDR_MULTICAST)) {
1556 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1557 idev->dev->name);
1558 continue;
1559 }
1560
1561 score->rule = -1;
1562 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1563
1564 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1565 int minihiscore, miniscore;
1566
1567 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1568 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1569
1570 if (minihiscore > miniscore) {
1571 if (i == IPV6_SADDR_RULE_SCOPE &&
1572 score->scopedist > 0) {
1573 /*
1574 * special case:
1575 * each remaining entry
1576 * has too small (not enough)
1577 * scope, because ifa entries
1578 * are sorted by their scope
1579 * values.
1580 */
1581 goto out;
1582 }
1583 break;
1584 } else if (minihiscore < miniscore) {
1585 if (hiscore->ifa)
1586 in6_ifa_put(hiscore->ifa);
1587
1588 in6_ifa_hold(score->ifa);
1589
1590 swap(hiscore, score);
1591 hiscore_idx = 1 - hiscore_idx;
1592
1593 /* restore our iterator */
1594 score->ifa = hiscore->ifa;
1595
1596 break;
1597 }
1598 }
1599 }
1600 out:
1601 read_unlock_bh(&idev->lock);
1602 return hiscore_idx;
1603 }
1604
1605 static int ipv6_get_saddr_master(struct net *net,
1606 const struct net_device *dst_dev,
1607 const struct net_device *master,
1608 struct ipv6_saddr_dst *dst,
1609 struct ipv6_saddr_score *scores,
1610 int hiscore_idx)
1611 {
1612 struct inet6_dev *idev;
1613
1614 idev = __in6_dev_get(dst_dev);
1615 if (idev)
1616 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1617 scores, hiscore_idx);
1618
1619 idev = __in6_dev_get(master);
1620 if (idev)
1621 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1622 scores, hiscore_idx);
1623
1624 return hiscore_idx;
1625 }
1626
1627 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1628 const struct in6_addr *daddr, unsigned int prefs,
1629 struct in6_addr *saddr)
1630 {
1631 struct ipv6_saddr_score scores[2], *hiscore;
1632 struct ipv6_saddr_dst dst;
1633 struct inet6_dev *idev;
1634 struct net_device *dev;
1635 int dst_type;
1636 bool use_oif_addr = false;
1637 int hiscore_idx = 0;
1638
1639 dst_type = __ipv6_addr_type(daddr);
1640 dst.addr = daddr;
1641 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1642 dst.scope = __ipv6_addr_src_scope(dst_type);
1643 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1644 dst.prefs = prefs;
1645
1646 scores[hiscore_idx].rule = -1;
1647 scores[hiscore_idx].ifa = NULL;
1648
1649 rcu_read_lock();
1650
1651 /* Candidate Source Address (section 4)
1652 * - multicast and link-local destination address,
1653 * the set of candidate source address MUST only
1654 * include addresses assigned to interfaces
1655 * belonging to the same link as the outgoing
1656 * interface.
1657 * (- For site-local destination addresses, the
1658 * set of candidate source addresses MUST only
1659 * include addresses assigned to interfaces
1660 * belonging to the same site as the outgoing
1661 * interface.)
1662 * - "It is RECOMMENDED that the candidate source addresses
1663 * be the set of unicast addresses assigned to the
1664 * interface that will be used to send to the destination
1665 * (the 'outgoing' interface)." (RFC 6724)
1666 */
1667 if (dst_dev) {
1668 idev = __in6_dev_get(dst_dev);
1669 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1670 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1671 (idev && idev->cnf.use_oif_addrs_only)) {
1672 use_oif_addr = true;
1673 }
1674 }
1675
1676 if (use_oif_addr) {
1677 if (idev)
1678 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1679 } else {
1680 const struct net_device *master;
1681 int master_idx = 0;
1682
1683 /* if dst_dev exists and is enslaved to an L3 device, then
1684 * prefer addresses from dst_dev and then the master over
1685 * any other enslaved devices in the L3 domain.
1686 */
1687 master = l3mdev_master_dev_rcu(dst_dev);
1688 if (master) {
1689 master_idx = master->ifindex;
1690
1691 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1692 master, &dst,
1693 scores, hiscore_idx);
1694
1695 if (scores[hiscore_idx].ifa)
1696 goto out;
1697 }
1698
1699 for_each_netdev_rcu(net, dev) {
1700 /* only consider addresses on devices in the
1701 * same L3 domain
1702 */
1703 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1704 continue;
1705 idev = __in6_dev_get(dev);
1706 if (!idev)
1707 continue;
1708 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1709 }
1710 }
1711
1712 out:
1713 rcu_read_unlock();
1714
1715 hiscore = &scores[hiscore_idx];
1716 if (!hiscore->ifa)
1717 return -EADDRNOTAVAIL;
1718
1719 *saddr = hiscore->ifa->addr;
1720 in6_ifa_put(hiscore->ifa);
1721 return 0;
1722 }
1723 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1724
1725 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1726 u32 banned_flags)
1727 {
1728 struct inet6_ifaddr *ifp;
1729 int err = -EADDRNOTAVAIL;
1730
1731 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1732 if (ifp->scope > IFA_LINK)
1733 break;
1734 if (ifp->scope == IFA_LINK &&
1735 !(ifp->flags & banned_flags)) {
1736 *addr = ifp->addr;
1737 err = 0;
1738 break;
1739 }
1740 }
1741 return err;
1742 }
1743
1744 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1745 u32 banned_flags)
1746 {
1747 struct inet6_dev *idev;
1748 int err = -EADDRNOTAVAIL;
1749
1750 rcu_read_lock();
1751 idev = __in6_dev_get(dev);
1752 if (idev) {
1753 read_lock_bh(&idev->lock);
1754 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1755 read_unlock_bh(&idev->lock);
1756 }
1757 rcu_read_unlock();
1758 return err;
1759 }
1760
1761 static int ipv6_count_addresses(struct inet6_dev *idev)
1762 {
1763 int cnt = 0;
1764 struct inet6_ifaddr *ifp;
1765
1766 read_lock_bh(&idev->lock);
1767 list_for_each_entry(ifp, &idev->addr_list, if_list)
1768 cnt++;
1769 read_unlock_bh(&idev->lock);
1770 return cnt;
1771 }
1772
1773 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1774 const struct net_device *dev, int strict)
1775 {
1776 return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE);
1777 }
1778 EXPORT_SYMBOL(ipv6_chk_addr);
1779
1780 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1781 const struct net_device *dev, int strict,
1782 u32 banned_flags)
1783 {
1784 struct inet6_ifaddr *ifp;
1785 unsigned int hash = inet6_addr_hash(addr);
1786 u32 ifp_flags;
1787
1788 rcu_read_lock_bh();
1789 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1790 if (!net_eq(dev_net(ifp->idev->dev), net))
1791 continue;
1792 /* Decouple optimistic from tentative for evaluation here.
1793 * Ban optimistic addresses explicitly, when required.
1794 */
1795 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1796 ? (ifp->flags&~IFA_F_TENTATIVE)
1797 : ifp->flags;
1798 if (ipv6_addr_equal(&ifp->addr, addr) &&
1799 !(ifp_flags&banned_flags) &&
1800 (!dev || ifp->idev->dev == dev ||
1801 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1802 rcu_read_unlock_bh();
1803 return 1;
1804 }
1805 }
1806
1807 rcu_read_unlock_bh();
1808 return 0;
1809 }
1810 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1811
1812 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1813 struct net_device *dev)
1814 {
1815 unsigned int hash = inet6_addr_hash(addr);
1816 struct inet6_ifaddr *ifp;
1817
1818 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1819 if (!net_eq(dev_net(ifp->idev->dev), net))
1820 continue;
1821 if (ipv6_addr_equal(&ifp->addr, addr)) {
1822 if (!dev || ifp->idev->dev == dev)
1823 return true;
1824 }
1825 }
1826 return false;
1827 }
1828
1829 /* Compares an address/prefix_len with addresses on device @dev.
1830 * If one is found it returns true.
1831 */
1832 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1833 const unsigned int prefix_len, struct net_device *dev)
1834 {
1835 struct inet6_dev *idev;
1836 struct inet6_ifaddr *ifa;
1837 bool ret = false;
1838
1839 rcu_read_lock();
1840 idev = __in6_dev_get(dev);
1841 if (idev) {
1842 read_lock_bh(&idev->lock);
1843 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1844 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1845 if (ret)
1846 break;
1847 }
1848 read_unlock_bh(&idev->lock);
1849 }
1850 rcu_read_unlock();
1851
1852 return ret;
1853 }
1854 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1855
1856 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1857 {
1858 struct inet6_dev *idev;
1859 struct inet6_ifaddr *ifa;
1860 int onlink;
1861
1862 onlink = 0;
1863 rcu_read_lock();
1864 idev = __in6_dev_get(dev);
1865 if (idev) {
1866 read_lock_bh(&idev->lock);
1867 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1868 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1869 ifa->prefix_len);
1870 if (onlink)
1871 break;
1872 }
1873 read_unlock_bh(&idev->lock);
1874 }
1875 rcu_read_unlock();
1876 return onlink;
1877 }
1878 EXPORT_SYMBOL(ipv6_chk_prefix);
1879
1880 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1881 struct net_device *dev, int strict)
1882 {
1883 struct inet6_ifaddr *ifp, *result = NULL;
1884 unsigned int hash = inet6_addr_hash(addr);
1885
1886 rcu_read_lock_bh();
1887 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
1888 if (!net_eq(dev_net(ifp->idev->dev), net))
1889 continue;
1890 if (ipv6_addr_equal(&ifp->addr, addr)) {
1891 if (!dev || ifp->idev->dev == dev ||
1892 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1893 result = ifp;
1894 in6_ifa_hold(ifp);
1895 break;
1896 }
1897 }
1898 }
1899 rcu_read_unlock_bh();
1900
1901 return result;
1902 }
1903
1904 /* Gets referenced address, destroys ifaddr */
1905
1906 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1907 {
1908 if (dad_failed)
1909 ifp->flags |= IFA_F_DADFAILED;
1910
1911 if (ifp->flags&IFA_F_PERMANENT) {
1912 spin_lock_bh(&ifp->lock);
1913 addrconf_del_dad_work(ifp);
1914 ifp->flags |= IFA_F_TENTATIVE;
1915 spin_unlock_bh(&ifp->lock);
1916 if (dad_failed)
1917 ipv6_ifa_notify(0, ifp);
1918 in6_ifa_put(ifp);
1919 } else if (ifp->flags&IFA_F_TEMPORARY) {
1920 struct inet6_ifaddr *ifpub;
1921 spin_lock_bh(&ifp->lock);
1922 ifpub = ifp->ifpub;
1923 if (ifpub) {
1924 in6_ifa_hold(ifpub);
1925 spin_unlock_bh(&ifp->lock);
1926 ipv6_create_tempaddr(ifpub, ifp);
1927 in6_ifa_put(ifpub);
1928 } else {
1929 spin_unlock_bh(&ifp->lock);
1930 }
1931 ipv6_del_addr(ifp);
1932 } else {
1933 ipv6_del_addr(ifp);
1934 }
1935 }
1936
1937 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1938 {
1939 int err = -ENOENT;
1940
1941 spin_lock_bh(&ifp->lock);
1942 if (ifp->state == INET6_IFADDR_STATE_DAD) {
1943 ifp->state = INET6_IFADDR_STATE_POSTDAD;
1944 err = 0;
1945 }
1946 spin_unlock_bh(&ifp->lock);
1947
1948 return err;
1949 }
1950
1951 void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1952 {
1953 struct inet6_dev *idev = ifp->idev;
1954 struct net *net = dev_net(ifp->idev->dev);
1955
1956 if (addrconf_dad_end(ifp)) {
1957 in6_ifa_put(ifp);
1958 return;
1959 }
1960
1961 net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
1962 ifp->idev->dev->name, &ifp->addr);
1963
1964 spin_lock_bh(&ifp->lock);
1965
1966 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
1967 int scope = ifp->scope;
1968 u32 flags = ifp->flags;
1969 struct in6_addr new_addr;
1970 struct inet6_ifaddr *ifp2;
1971 u32 valid_lft, preferred_lft;
1972 int pfxlen = ifp->prefix_len;
1973 int retries = ifp->stable_privacy_retry + 1;
1974
1975 if (retries > net->ipv6.sysctl.idgen_retries) {
1976 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
1977 ifp->idev->dev->name);
1978 goto errdad;
1979 }
1980
1981 new_addr = ifp->addr;
1982 if (ipv6_generate_stable_address(&new_addr, retries,
1983 idev))
1984 goto errdad;
1985
1986 valid_lft = ifp->valid_lft;
1987 preferred_lft = ifp->prefered_lft;
1988
1989 spin_unlock_bh(&ifp->lock);
1990
1991 if (idev->cnf.max_addresses &&
1992 ipv6_count_addresses(idev) >=
1993 idev->cnf.max_addresses)
1994 goto lock_errdad;
1995
1996 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
1997 ifp->idev->dev->name);
1998
1999 ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
2000 scope, flags, valid_lft,
2001 preferred_lft);
2002 if (IS_ERR(ifp2))
2003 goto lock_errdad;
2004
2005 spin_lock_bh(&ifp2->lock);
2006 ifp2->stable_privacy_retry = retries;
2007 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2008 spin_unlock_bh(&ifp2->lock);
2009
2010 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2011 in6_ifa_put(ifp2);
2012 lock_errdad:
2013 spin_lock_bh(&ifp->lock);
2014 }
2015
2016 errdad:
2017 /* transition from _POSTDAD to _ERRDAD */
2018 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2019 spin_unlock_bh(&ifp->lock);
2020
2021 addrconf_mod_dad_work(ifp, 0);
2022 in6_ifa_put(ifp);
2023 }
2024
2025 /* Join to solicited addr multicast group.
2026 * caller must hold RTNL */
2027 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2028 {
2029 struct in6_addr maddr;
2030
2031 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2032 return;
2033
2034 addrconf_addr_solict_mult(addr, &maddr);
2035 ipv6_dev_mc_inc(dev, &maddr);
2036 }
2037
2038 /* caller must hold RTNL */
2039 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2040 {
2041 struct in6_addr maddr;
2042
2043 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2044 return;
2045
2046 addrconf_addr_solict_mult(addr, &maddr);
2047 __ipv6_dev_mc_dec(idev, &maddr);
2048 }
2049
2050 /* caller must hold RTNL */
2051 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2052 {
2053 struct in6_addr addr;
2054
2055 if (ifp->prefix_len >= 127) /* RFC 6164 */
2056 return;
2057 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2058 if (ipv6_addr_any(&addr))
2059 return;
2060 __ipv6_dev_ac_inc(ifp->idev, &addr);
2061 }
2062
2063 /* caller must hold RTNL */
2064 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2065 {
2066 struct in6_addr addr;
2067
2068 if (ifp->prefix_len >= 127) /* RFC 6164 */
2069 return;
2070 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2071 if (ipv6_addr_any(&addr))
2072 return;
2073 __ipv6_dev_ac_dec(ifp->idev, &addr);
2074 }
2075
2076 static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
2077 {
2078 if (dev->addr_len != EUI64_ADDR_LEN)
2079 return -1;
2080 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2081 eui[0] ^= 2;
2082 return 0;
2083 }
2084
2085 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2086 {
2087 union fwnet_hwaddr *ha;
2088
2089 if (dev->addr_len != FWNET_ALEN)
2090 return -1;
2091
2092 ha = (union fwnet_hwaddr *)dev->dev_addr;
2093
2094 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2095 eui[0] ^= 2;
2096 return 0;
2097 }
2098
2099 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2100 {
2101 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2102 if (dev->addr_len != ARCNET_ALEN)
2103 return -1;
2104 memset(eui, 0, 7);
2105 eui[7] = *(u8 *)dev->dev_addr;
2106 return 0;
2107 }
2108
2109 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2110 {
2111 if (dev->addr_len != INFINIBAND_ALEN)
2112 return -1;
2113 memcpy(eui, dev->dev_addr + 12, 8);
2114 eui[0] |= 2;
2115 return 0;
2116 }
2117
2118 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2119 {
2120 if (addr == 0)
2121 return -1;
2122 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2123 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2124 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2125 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2126 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2127 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2128 eui[1] = 0;
2129 eui[2] = 0x5E;
2130 eui[3] = 0xFE;
2131 memcpy(eui + 4, &addr, 4);
2132 return 0;
2133 }
2134
2135 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2136 {
2137 if (dev->priv_flags & IFF_ISATAP)
2138 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2139 return -1;
2140 }
2141
2142 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2143 {
2144 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2145 }
2146
2147 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2148 {
2149 memcpy(eui, dev->perm_addr, 3);
2150 memcpy(eui + 5, dev->perm_addr + 3, 3);
2151 eui[3] = 0xFF;
2152 eui[4] = 0xFE;
2153 eui[0] ^= 2;
2154 return 0;
2155 }
2156
2157 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2158 {
2159 switch (dev->type) {
2160 case ARPHRD_ETHER:
2161 case ARPHRD_FDDI:
2162 return addrconf_ifid_eui48(eui, dev);
2163 case ARPHRD_ARCNET:
2164 return addrconf_ifid_arcnet(eui, dev);
2165 case ARPHRD_INFINIBAND:
2166 return addrconf_ifid_infiniband(eui, dev);
2167 case ARPHRD_SIT:
2168 return addrconf_ifid_sit(eui, dev);
2169 case ARPHRD_IPGRE:
2170 case ARPHRD_TUNNEL:
2171 return addrconf_ifid_gre(eui, dev);
2172 case ARPHRD_6LOWPAN:
2173 return addrconf_ifid_eui64(eui, dev);
2174 case ARPHRD_IEEE1394:
2175 return addrconf_ifid_ieee1394(eui, dev);
2176 case ARPHRD_TUNNEL6:
2177 case ARPHRD_IP6GRE:
2178 return addrconf_ifid_ip6tnl(eui, dev);
2179 }
2180 return -1;
2181 }
2182
2183 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2184 {
2185 int err = -1;
2186 struct inet6_ifaddr *ifp;
2187
2188 read_lock_bh(&idev->lock);
2189 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2190 if (ifp->scope > IFA_LINK)
2191 break;
2192 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2193 memcpy(eui, ifp->addr.s6_addr+8, 8);
2194 err = 0;
2195 break;
2196 }
2197 }
2198 read_unlock_bh(&idev->lock);
2199 return err;
2200 }
2201
2202 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2203 static void ipv6_regen_rndid(struct inet6_dev *idev)
2204 {
2205 regen:
2206 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2207 idev->rndid[0] &= ~0x02;
2208
2209 /*
2210 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2211 * check if generated address is not inappropriate
2212 *
2213 * - Reserved subnet anycast (RFC 2526)
2214 * 11111101 11....11 1xxxxxxx
2215 * - ISATAP (RFC4214) 6.1
2216 * 00-00-5E-FE-xx-xx-xx-xx
2217 * - value 0
2218 * - XXX: already assigned to an address on the device
2219 */
2220 if (idev->rndid[0] == 0xfd &&
2221 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2222 (idev->rndid[7]&0x80))
2223 goto regen;
2224 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2225 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2226 goto regen;
2227 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2228 goto regen;
2229 }
2230 }
2231
2232 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2233 {
2234 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2235 ipv6_regen_rndid(idev);
2236 }
2237
2238 /*
2239 * Add prefix route.
2240 */
2241
2242 static void
2243 addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
2244 unsigned long expires, u32 flags)
2245 {
2246 struct fib6_config cfg = {
2247 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2248 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2249 .fc_ifindex = dev->ifindex,
2250 .fc_expires = expires,
2251 .fc_dst_len = plen,
2252 .fc_flags = RTF_UP | flags,
2253 .fc_nlinfo.nl_net = dev_net(dev),
2254 .fc_protocol = RTPROT_KERNEL,
2255 };
2256
2257 cfg.fc_dst = *pfx;
2258
2259 /* Prevent useless cloning on PtP SIT.
2260 This thing is done here expecting that the whole
2261 class of non-broadcast devices need not cloning.
2262 */
2263 #if IS_ENABLED(CONFIG_IPV6_SIT)
2264 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2265 cfg.fc_flags |= RTF_NONEXTHOP;
2266 #endif
2267
2268 ip6_route_add(&cfg);
2269 }
2270
2271
2272 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2273 int plen,
2274 const struct net_device *dev,
2275 u32 flags, u32 noflags)
2276 {
2277 struct fib6_node *fn;
2278 struct rt6_info *rt = NULL;
2279 struct fib6_table *table;
2280 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2281
2282 table = fib6_get_table(dev_net(dev), tb_id);
2283 if (!table)
2284 return NULL;
2285
2286 read_lock_bh(&table->tb6_lock);
2287 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
2288 if (!fn)
2289 goto out;
2290
2291 noflags |= RTF_CACHE;
2292 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2293 if (rt->dst.dev->ifindex != dev->ifindex)
2294 continue;
2295 if ((rt->rt6i_flags & flags) != flags)
2296 continue;
2297 if ((rt->rt6i_flags & noflags) != 0)
2298 continue;
2299 dst_hold(&rt->dst);
2300 break;
2301 }
2302 out:
2303 read_unlock_bh(&table->tb6_lock);
2304 return rt;
2305 }
2306
2307
2308 /* Create "default" multicast route to the interface */
2309
2310 static void addrconf_add_mroute(struct net_device *dev)
2311 {
2312 struct fib6_config cfg = {
2313 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2314 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2315 .fc_ifindex = dev->ifindex,
2316 .fc_dst_len = 8,
2317 .fc_flags = RTF_UP,
2318 .fc_nlinfo.nl_net = dev_net(dev),
2319 };
2320
2321 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2322
2323 ip6_route_add(&cfg);
2324 }
2325
2326 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2327 {
2328 struct inet6_dev *idev;
2329
2330 ASSERT_RTNL();
2331
2332 idev = ipv6_find_idev(dev);
2333 if (!idev)
2334 return ERR_PTR(-ENOBUFS);
2335
2336 if (idev->cnf.disable_ipv6)
2337 return ERR_PTR(-EACCES);
2338
2339 /* Add default multicast route */
2340 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2341 addrconf_add_mroute(dev);
2342
2343 return idev;
2344 }
2345
2346 static void manage_tempaddrs(struct inet6_dev *idev,
2347 struct inet6_ifaddr *ifp,
2348 __u32 valid_lft, __u32 prefered_lft,
2349 bool create, unsigned long now)
2350 {
2351 u32 flags;
2352 struct inet6_ifaddr *ift;
2353
2354 read_lock_bh(&idev->lock);
2355 /* update all temporary addresses in the list */
2356 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2357 int age, max_valid, max_prefered;
2358
2359 if (ifp != ift->ifpub)
2360 continue;
2361
2362 /* RFC 4941 section 3.3:
2363 * If a received option will extend the lifetime of a public
2364 * address, the lifetimes of temporary addresses should
2365 * be extended, subject to the overall constraint that no
2366 * temporary addresses should ever remain "valid" or "preferred"
2367 * for a time longer than (TEMP_VALID_LIFETIME) or
2368 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2369 */
2370 age = (now - ift->cstamp) / HZ;
2371 max_valid = idev->cnf.temp_valid_lft - age;
2372 if (max_valid < 0)
2373 max_valid = 0;
2374
2375 max_prefered = idev->cnf.temp_prefered_lft -
2376 idev->desync_factor - age;
2377 if (max_prefered < 0)
2378 max_prefered = 0;
2379
2380 if (valid_lft > max_valid)
2381 valid_lft = max_valid;
2382
2383 if (prefered_lft > max_prefered)
2384 prefered_lft = max_prefered;
2385
2386 spin_lock(&ift->lock);
2387 flags = ift->flags;
2388 ift->valid_lft = valid_lft;
2389 ift->prefered_lft = prefered_lft;
2390 ift->tstamp = now;
2391 if (prefered_lft > 0)
2392 ift->flags &= ~IFA_F_DEPRECATED;
2393
2394 spin_unlock(&ift->lock);
2395 if (!(flags&IFA_F_TENTATIVE))
2396 ipv6_ifa_notify(0, ift);
2397 }
2398
2399 if ((create || list_empty(&idev->tempaddr_list)) &&
2400 idev->cnf.use_tempaddr > 0) {
2401 /* When a new public address is created as described
2402 * in [ADDRCONF], also create a new temporary address.
2403 * Also create a temporary address if it's enabled but
2404 * no temporary address currently exists.
2405 */
2406 read_unlock_bh(&idev->lock);
2407 ipv6_create_tempaddr(ifp, NULL);
2408 } else {
2409 read_unlock_bh(&idev->lock);
2410 }
2411 }
2412
2413 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2414 {
2415 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2416 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2417 }
2418
2419 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2420 const struct prefix_info *pinfo,
2421 struct inet6_dev *in6_dev,
2422 const struct in6_addr *addr, int addr_type,
2423 u32 addr_flags, bool sllao, bool tokenized,
2424 __u32 valid_lft, u32 prefered_lft)
2425 {
2426 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2427 int create = 0, update_lft = 0;
2428
2429 if (!ifp && valid_lft) {
2430 int max_addresses = in6_dev->cnf.max_addresses;
2431
2432 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2433 if (in6_dev->cnf.optimistic_dad &&
2434 !net->ipv6.devconf_all->forwarding && sllao)
2435 addr_flags |= IFA_F_OPTIMISTIC;
2436 #endif
2437
2438 /* Do not allow to create too much of autoconfigured
2439 * addresses; this would be too easy way to crash kernel.
2440 */
2441 if (!max_addresses ||
2442 ipv6_count_addresses(in6_dev) < max_addresses)
2443 ifp = ipv6_add_addr(in6_dev, addr, NULL,
2444 pinfo->prefix_len,
2445 addr_type&IPV6_ADDR_SCOPE_MASK,
2446 addr_flags, valid_lft,
2447 prefered_lft);
2448
2449 if (IS_ERR_OR_NULL(ifp))
2450 return -1;
2451
2452 update_lft = 0;
2453 create = 1;
2454 spin_lock_bh(&ifp->lock);
2455 ifp->flags |= IFA_F_MANAGETEMPADDR;
2456 ifp->cstamp = jiffies;
2457 ifp->tokenized = tokenized;
2458 spin_unlock_bh(&ifp->lock);
2459 addrconf_dad_start(ifp);
2460 }
2461
2462 if (ifp) {
2463 u32 flags;
2464 unsigned long now;
2465 u32 stored_lft;
2466
2467 /* update lifetime (RFC2462 5.5.3 e) */
2468 spin_lock_bh(&ifp->lock);
2469 now = jiffies;
2470 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2471 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2472 else
2473 stored_lft = 0;
2474 if (!update_lft && !create && stored_lft) {
2475 const u32 minimum_lft = min_t(u32,
2476 stored_lft, MIN_VALID_LIFETIME);
2477 valid_lft = max(valid_lft, minimum_lft);
2478
2479 /* RFC4862 Section 5.5.3e:
2480 * "Note that the preferred lifetime of the
2481 * corresponding address is always reset to
2482 * the Preferred Lifetime in the received
2483 * Prefix Information option, regardless of
2484 * whether the valid lifetime is also reset or
2485 * ignored."
2486 *
2487 * So we should always update prefered_lft here.
2488 */
2489 update_lft = 1;
2490 }
2491
2492 if (update_lft) {
2493 ifp->valid_lft = valid_lft;
2494 ifp->prefered_lft = prefered_lft;
2495 ifp->tstamp = now;
2496 flags = ifp->flags;
2497 ifp->flags &= ~IFA_F_DEPRECATED;
2498 spin_unlock_bh(&ifp->lock);
2499
2500 if (!(flags&IFA_F_TENTATIVE))
2501 ipv6_ifa_notify(0, ifp);
2502 } else
2503 spin_unlock_bh(&ifp->lock);
2504
2505 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2506 create, now);
2507
2508 in6_ifa_put(ifp);
2509 addrconf_verify();
2510 }
2511
2512 return 0;
2513 }
2514 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2515
2516 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2517 {
2518 struct prefix_info *pinfo;
2519 __u32 valid_lft;
2520 __u32 prefered_lft;
2521 int addr_type, err;
2522 u32 addr_flags = 0;
2523 struct inet6_dev *in6_dev;
2524 struct net *net = dev_net(dev);
2525
2526 pinfo = (struct prefix_info *) opt;
2527
2528 if (len < sizeof(struct prefix_info)) {
2529 ADBG("addrconf: prefix option too short\n");
2530 return;
2531 }
2532
2533 /*
2534 * Validation checks ([ADDRCONF], page 19)
2535 */
2536
2537 addr_type = ipv6_addr_type(&pinfo->prefix);
2538
2539 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2540 return;
2541
2542 valid_lft = ntohl(pinfo->valid);
2543 prefered_lft = ntohl(pinfo->prefered);
2544
2545 if (prefered_lft > valid_lft) {
2546 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2547 return;
2548 }
2549
2550 in6_dev = in6_dev_get(dev);
2551
2552 if (!in6_dev) {
2553 net_dbg_ratelimited("addrconf: device %s not configured\n",
2554 dev->name);
2555 return;
2556 }
2557
2558 /*
2559 * Two things going on here:
2560 * 1) Add routes for on-link prefixes
2561 * 2) Configure prefixes with the auto flag set
2562 */
2563
2564 if (pinfo->onlink) {
2565 struct rt6_info *rt;
2566 unsigned long rt_expires;
2567
2568 /* Avoid arithmetic overflow. Really, we could
2569 * save rt_expires in seconds, likely valid_lft,
2570 * but it would require division in fib gc, that it
2571 * not good.
2572 */
2573 if (HZ > USER_HZ)
2574 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2575 else
2576 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2577
2578 if (addrconf_finite_timeout(rt_expires))
2579 rt_expires *= HZ;
2580
2581 rt = addrconf_get_prefix_route(&pinfo->prefix,
2582 pinfo->prefix_len,
2583 dev,
2584 RTF_ADDRCONF | RTF_PREFIX_RT,
2585 RTF_GATEWAY | RTF_DEFAULT);
2586
2587 if (rt) {
2588 /* Autoconf prefix route */
2589 if (valid_lft == 0) {
2590 ip6_del_rt(rt);
2591 rt = NULL;
2592 } else if (addrconf_finite_timeout(rt_expires)) {
2593 /* not infinity */
2594 rt6_set_expires(rt, jiffies + rt_expires);
2595 } else {
2596 rt6_clean_expires(rt);
2597 }
2598 } else if (valid_lft) {
2599 clock_t expires = 0;
2600 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2601 if (addrconf_finite_timeout(rt_expires)) {
2602 /* not infinity */
2603 flags |= RTF_EXPIRES;
2604 expires = jiffies_to_clock_t(rt_expires);
2605 }
2606 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2607 dev, expires, flags);
2608 }
2609 ip6_rt_put(rt);
2610 }
2611
2612 /* Try to figure out our local address for this prefix */
2613
2614 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2615 struct in6_addr addr;
2616 bool tokenized = false, dev_addr_generated = false;
2617
2618 if (pinfo->prefix_len == 64) {
2619 memcpy(&addr, &pinfo->prefix, 8);
2620
2621 if (!ipv6_addr_any(&in6_dev->token)) {
2622 read_lock_bh(&in6_dev->lock);
2623 memcpy(addr.s6_addr + 8,
2624 in6_dev->token.s6_addr + 8, 8);
2625 read_unlock_bh(&in6_dev->lock);
2626 tokenized = true;
2627 } else if (is_addr_mode_generate_stable(in6_dev) &&
2628 !ipv6_generate_stable_address(&addr, 0,
2629 in6_dev)) {
2630 addr_flags |= IFA_F_STABLE_PRIVACY;
2631 goto ok;
2632 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2633 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2634 goto put;
2635 } else {
2636 dev_addr_generated = true;
2637 }
2638 goto ok;
2639 }
2640 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2641 pinfo->prefix_len);
2642 goto put;
2643
2644 ok:
2645 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2646 &addr, addr_type,
2647 addr_flags, sllao,
2648 tokenized, valid_lft,
2649 prefered_lft);
2650 if (err)
2651 goto put;
2652
2653 /* Ignore error case here because previous prefix add addr was
2654 * successful which will be notified.
2655 */
2656 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2657 addr_type, addr_flags, sllao,
2658 tokenized, valid_lft,
2659 prefered_lft,
2660 dev_addr_generated);
2661 }
2662 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2663 put:
2664 in6_dev_put(in6_dev);
2665 }
2666
2667 /*
2668 * Set destination address.
2669 * Special case for SIT interfaces where we create a new "virtual"
2670 * device.
2671 */
2672 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2673 {
2674 struct in6_ifreq ireq;
2675 struct net_device *dev;
2676 int err = -EINVAL;
2677
2678 rtnl_lock();
2679
2680 err = -EFAULT;
2681 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2682 goto err_exit;
2683
2684 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2685
2686 err = -ENODEV;
2687 if (!dev)
2688 goto err_exit;
2689
2690 #if IS_ENABLED(CONFIG_IPV6_SIT)
2691 if (dev->type == ARPHRD_SIT) {
2692 const struct net_device_ops *ops = dev->netdev_ops;
2693 struct ifreq ifr;
2694 struct ip_tunnel_parm p;
2695
2696 err = -EADDRNOTAVAIL;
2697 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2698 goto err_exit;
2699
2700 memset(&p, 0, sizeof(p));
2701 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2702 p.iph.saddr = 0;
2703 p.iph.version = 4;
2704 p.iph.ihl = 5;
2705 p.iph.protocol = IPPROTO_IPV6;
2706 p.iph.ttl = 64;
2707 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2708
2709 if (ops->ndo_do_ioctl) {
2710 mm_segment_t oldfs = get_fs();
2711
2712 set_fs(KERNEL_DS);
2713 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2714 set_fs(oldfs);
2715 } else
2716 err = -EOPNOTSUPP;
2717
2718 if (err == 0) {
2719 err = -ENOBUFS;
2720 dev = __dev_get_by_name(net, p.name);
2721 if (!dev)
2722 goto err_exit;
2723 err = dev_open(dev);
2724 }
2725 }
2726 #endif
2727
2728 err_exit:
2729 rtnl_unlock();
2730 return err;
2731 }
2732
2733 static int ipv6_mc_config(struct sock *sk, bool join,
2734 const struct in6_addr *addr, int ifindex)
2735 {
2736 int ret;
2737
2738 ASSERT_RTNL();
2739
2740 lock_sock(sk);
2741 if (join)
2742 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2743 else
2744 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2745 release_sock(sk);
2746
2747 return ret;
2748 }
2749
2750 /*
2751 * Manual configuration of address on an interface
2752 */
2753 static int inet6_addr_add(struct net *net, int ifindex,
2754 const struct in6_addr *pfx,
2755 const struct in6_addr *peer_pfx,
2756 unsigned int plen, __u32 ifa_flags,
2757 __u32 prefered_lft, __u32 valid_lft)
2758 {
2759 struct inet6_ifaddr *ifp;
2760 struct inet6_dev *idev;
2761 struct net_device *dev;
2762 unsigned long timeout;
2763 clock_t expires;
2764 int scope;
2765 u32 flags;
2766
2767 ASSERT_RTNL();
2768
2769 if (plen > 128)
2770 return -EINVAL;
2771
2772 /* check the lifetime */
2773 if (!valid_lft || prefered_lft > valid_lft)
2774 return -EINVAL;
2775
2776 if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
2777 return -EINVAL;
2778
2779 dev = __dev_get_by_index(net, ifindex);
2780 if (!dev)
2781 return -ENODEV;
2782
2783 idev = addrconf_add_dev(dev);
2784 if (IS_ERR(idev))
2785 return PTR_ERR(idev);
2786
2787 if (ifa_flags & IFA_F_MCAUTOJOIN) {
2788 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2789 true, pfx, ifindex);
2790
2791 if (ret < 0)
2792 return ret;
2793 }
2794
2795 scope = ipv6_addr_scope(pfx);
2796
2797 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2798 if (addrconf_finite_timeout(timeout)) {
2799 expires = jiffies_to_clock_t(timeout * HZ);
2800 valid_lft = timeout;
2801 flags = RTF_EXPIRES;
2802 } else {
2803 expires = 0;
2804 flags = 0;
2805 ifa_flags |= IFA_F_PERMANENT;
2806 }
2807
2808 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2809 if (addrconf_finite_timeout(timeout)) {
2810 if (timeout == 0)
2811 ifa_flags |= IFA_F_DEPRECATED;
2812 prefered_lft = timeout;
2813 }
2814
2815 ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
2816 valid_lft, prefered_lft);
2817
2818 if (!IS_ERR(ifp)) {
2819 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
2820 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2821 expires, flags);
2822 }
2823
2824 /*
2825 * Note that section 3.1 of RFC 4429 indicates
2826 * that the Optimistic flag should not be set for
2827 * manually configured addresses
2828 */
2829 addrconf_dad_start(ifp);
2830 if (ifa_flags & IFA_F_MANAGETEMPADDR)
2831 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
2832 true, jiffies);
2833 in6_ifa_put(ifp);
2834 addrconf_verify_rtnl();
2835 return 0;
2836 } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
2837 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2838 false, pfx, ifindex);
2839 }
2840
2841 return PTR_ERR(ifp);
2842 }
2843
2844 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2845 const struct in6_addr *pfx, unsigned int plen)
2846 {
2847 struct inet6_ifaddr *ifp;
2848 struct inet6_dev *idev;
2849 struct net_device *dev;
2850
2851 if (plen > 128)
2852 return -EINVAL;
2853
2854 dev = __dev_get_by_index(net, ifindex);
2855 if (!dev)
2856 return -ENODEV;
2857
2858 idev = __in6_dev_get(dev);
2859 if (!idev)
2860 return -ENXIO;
2861
2862 read_lock_bh(&idev->lock);
2863 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2864 if (ifp->prefix_len == plen &&
2865 ipv6_addr_equal(pfx, &ifp->addr)) {
2866 in6_ifa_hold(ifp);
2867 read_unlock_bh(&idev->lock);
2868
2869 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2870 (ifa_flags & IFA_F_MANAGETEMPADDR))
2871 manage_tempaddrs(idev, ifp, 0, 0, false,
2872 jiffies);
2873 ipv6_del_addr(ifp);
2874 addrconf_verify_rtnl();
2875 if (ipv6_addr_is_multicast(pfx)) {
2876 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2877 false, pfx, dev->ifindex);
2878 }
2879 return 0;
2880 }
2881 }
2882 read_unlock_bh(&idev->lock);
2883 return -EADDRNOTAVAIL;
2884 }
2885
2886
2887 int addrconf_add_ifaddr(struct net *net, void __user *arg)
2888 {
2889 struct in6_ifreq ireq;
2890 int err;
2891
2892 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2893 return -EPERM;
2894
2895 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2896 return -EFAULT;
2897
2898 rtnl_lock();
2899 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
2900 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2901 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2902 rtnl_unlock();
2903 return err;
2904 }
2905
2906 int addrconf_del_ifaddr(struct net *net, void __user *arg)
2907 {
2908 struct in6_ifreq ireq;
2909 int err;
2910
2911 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2912 return -EPERM;
2913
2914 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2915 return -EFAULT;
2916
2917 rtnl_lock();
2918 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2919 ireq.ifr6_prefixlen);
2920 rtnl_unlock();
2921 return err;
2922 }
2923
2924 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2925 int plen, int scope)
2926 {
2927 struct inet6_ifaddr *ifp;
2928
2929 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2930 scope, IFA_F_PERMANENT,
2931 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2932 if (!IS_ERR(ifp)) {
2933 spin_lock_bh(&ifp->lock);
2934 ifp->flags &= ~IFA_F_TENTATIVE;
2935 spin_unlock_bh(&ifp->lock);
2936 rt_genid_bump_ipv6(dev_net(idev->dev));
2937 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2938 in6_ifa_put(ifp);
2939 }
2940 }
2941
2942 #if IS_ENABLED(CONFIG_IPV6_SIT)
2943 static void sit_add_v4_addrs(struct inet6_dev *idev)
2944 {
2945 struct in6_addr addr;
2946 struct net_device *dev;
2947 struct net *net = dev_net(idev->dev);
2948 int scope, plen;
2949 u32 pflags = 0;
2950
2951 ASSERT_RTNL();
2952
2953 memset(&addr, 0, sizeof(struct in6_addr));
2954 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
2955
2956 if (idev->dev->flags&IFF_POINTOPOINT) {
2957 addr.s6_addr32[0] = htonl(0xfe800000);
2958 scope = IFA_LINK;
2959 plen = 64;
2960 } else {
2961 scope = IPV6_ADDR_COMPATv4;
2962 plen = 96;
2963 pflags |= RTF_NONEXTHOP;
2964 }
2965
2966 if (addr.s6_addr32[3]) {
2967 add_addr(idev, &addr, plen, scope);
2968 addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags);
2969 return;
2970 }
2971
2972 for_each_netdev(net, dev) {
2973 struct in_device *in_dev = __in_dev_get_rtnl(dev);
2974 if (in_dev && (dev->flags & IFF_UP)) {
2975 struct in_ifaddr *ifa;
2976
2977 int flag = scope;
2978
2979 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
2980
2981 addr.s6_addr32[3] = ifa->ifa_local;
2982
2983 if (ifa->ifa_scope == RT_SCOPE_LINK)
2984 continue;
2985 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
2986 if (idev->dev->flags&IFF_POINTOPOINT)
2987 continue;
2988 flag |= IFA_HOST;
2989 }
2990
2991 add_addr(idev, &addr, plen, flag);
2992 addrconf_prefix_route(&addr, plen, idev->dev, 0,
2993 pflags);
2994 }
2995 }
2996 }
2997 }
2998 #endif
2999
3000 static void init_loopback(struct net_device *dev)
3001 {
3002 struct inet6_dev *idev;
3003 struct net_device *sp_dev;
3004 struct inet6_ifaddr *sp_ifa;
3005 struct rt6_info *sp_rt;
3006
3007 /* ::1 */
3008
3009 ASSERT_RTNL();
3010
3011 idev = ipv6_find_idev(dev);
3012 if (!idev) {
3013 pr_debug("%s: add_dev failed\n", __func__);
3014 return;
3015 }
3016
3017 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3018
3019 /* Add routes to other interface's IPv6 addresses */
3020 for_each_netdev(dev_net(dev), sp_dev) {
3021 if (!strcmp(sp_dev->name, dev->name))
3022 continue;
3023
3024 idev = __in6_dev_get(sp_dev);
3025 if (!idev)
3026 continue;
3027
3028 read_lock_bh(&idev->lock);
3029 list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
3030
3031 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
3032 continue;
3033
3034 if (sp_ifa->rt) {
3035 /* This dst has been added to garbage list when
3036 * lo device down, release this obsolete dst and
3037 * reallocate a new router for ifa.
3038 */
3039 if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
3040 ip6_rt_put(sp_ifa->rt);
3041 sp_ifa->rt = NULL;
3042 } else {
3043 continue;
3044 }
3045 }
3046
3047 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false);
3048
3049 /* Failure cases are ignored */
3050 if (!IS_ERR(sp_rt)) {
3051 sp_ifa->rt = sp_rt;
3052 ip6_ins_rt(sp_rt);
3053 }
3054 }
3055 read_unlock_bh(&idev->lock);
3056 }
3057 }
3058
3059 void addrconf_add_linklocal(struct inet6_dev *idev,
3060 const struct in6_addr *addr, u32 flags)
3061 {
3062 struct inet6_ifaddr *ifp;
3063 u32 addr_flags = flags | IFA_F_PERMANENT;
3064
3065 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3066 if (idev->cnf.optimistic_dad &&
3067 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3068 addr_flags |= IFA_F_OPTIMISTIC;
3069 #endif
3070
3071 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
3072 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
3073 if (!IS_ERR(ifp)) {
3074 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
3075 addrconf_dad_start(ifp);
3076 in6_ifa_put(ifp);
3077 }
3078 }
3079 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3080
3081 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3082 {
3083 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3084 return true;
3085
3086 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3087 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3088 return true;
3089
3090 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3091 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3092 return true;
3093
3094 return false;
3095 }
3096
3097 static int ipv6_generate_stable_address(struct in6_addr *address,
3098 u8 dad_count,
3099 const struct inet6_dev *idev)
3100 {
3101 static DEFINE_SPINLOCK(lock);
3102 static __u32 digest[SHA_DIGEST_WORDS];
3103 static __u32 workspace[SHA_WORKSPACE_WORDS];
3104
3105 static union {
3106 char __data[SHA_MESSAGE_BYTES];
3107 struct {
3108 struct in6_addr secret;
3109 __be32 prefix[2];
3110 unsigned char hwaddr[MAX_ADDR_LEN];
3111 u8 dad_count;
3112 } __packed;
3113 } data;
3114
3115 struct in6_addr secret;
3116 struct in6_addr temp;
3117 struct net *net = dev_net(idev->dev);
3118
3119 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3120
3121 if (idev->cnf.stable_secret.initialized)
3122 secret = idev->cnf.stable_secret.secret;
3123 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3124 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3125 else
3126 return -1;
3127
3128 retry:
3129 spin_lock_bh(&lock);
3130
3131 sha_init(digest);
3132 memset(&data, 0, sizeof(data));
3133 memset(workspace, 0, sizeof(workspace));
3134 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3135 data.prefix[0] = address->s6_addr32[0];
3136 data.prefix[1] = address->s6_addr32[1];
3137 data.secret = secret;
3138 data.dad_count = dad_count;
3139
3140 sha_transform(digest, data.__data, workspace);
3141
3142 temp = *address;
3143 temp.s6_addr32[2] = (__force __be32)digest[0];
3144 temp.s6_addr32[3] = (__force __be32)digest[1];
3145
3146 spin_unlock_bh(&lock);
3147
3148 if (ipv6_reserved_interfaceid(temp)) {
3149 dad_count++;
3150 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3151 return -1;
3152 goto retry;
3153 }
3154
3155 *address = temp;
3156 return 0;
3157 }
3158
3159 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3160 {
3161 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3162
3163 if (s->initialized)
3164 return;
3165 s = &idev->cnf.stable_secret;
3166 get_random_bytes(&s->secret, sizeof(s->secret));
3167 s->initialized = true;
3168 }
3169
3170 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3171 {
3172 struct in6_addr addr;
3173
3174 /* no link local addresses on L3 master devices */
3175 if (netif_is_l3_master(idev->dev))
3176 return;
3177
3178 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3179
3180 switch (idev->cnf.addr_gen_mode) {
3181 case IN6_ADDR_GEN_MODE_RANDOM:
3182 ipv6_gen_mode_random_init(idev);
3183 /* fallthrough */
3184 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3185 if (!ipv6_generate_stable_address(&addr, 0, idev))
3186 addrconf_add_linklocal(idev, &addr,
3187 IFA_F_STABLE_PRIVACY);
3188 else if (prefix_route)
3189 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3190 break;
3191 case IN6_ADDR_GEN_MODE_EUI64:
3192 /* addrconf_add_linklocal also adds a prefix_route and we
3193 * only need to care about prefix routes if ipv6_generate_eui64
3194 * couldn't generate one.
3195 */
3196 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3197 addrconf_add_linklocal(idev, &addr, 0);
3198 else if (prefix_route)
3199 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3200 break;
3201 case IN6_ADDR_GEN_MODE_NONE:
3202 default:
3203 /* will not add any link local address */
3204 break;
3205 }
3206 }
3207
3208 static void addrconf_dev_config(struct net_device *dev)
3209 {
3210 struct inet6_dev *idev;
3211
3212 ASSERT_RTNL();
3213
3214 if ((dev->type != ARPHRD_ETHER) &&
3215 (dev->type != ARPHRD_FDDI) &&
3216 (dev->type != ARPHRD_ARCNET) &&
3217 (dev->type != ARPHRD_INFINIBAND) &&
3218 (dev->type != ARPHRD_IEEE1394) &&
3219 (dev->type != ARPHRD_TUNNEL6) &&
3220 (dev->type != ARPHRD_6LOWPAN) &&
3221 (dev->type != ARPHRD_IP6GRE) &&
3222 (dev->type != ARPHRD_IPGRE) &&
3223 (dev->type != ARPHRD_TUNNEL) &&
3224 (dev->type != ARPHRD_NONE)) {
3225 /* Alas, we support only Ethernet autoconfiguration. */
3226 return;
3227 }
3228
3229 idev = addrconf_add_dev(dev);
3230 if (IS_ERR(idev))
3231 return;
3232
3233 /* this device type has no EUI support */
3234 if (dev->type == ARPHRD_NONE &&
3235 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3236 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3237
3238 addrconf_addr_gen(idev, false);
3239 }
3240
3241 #if IS_ENABLED(CONFIG_IPV6_SIT)
3242 static void addrconf_sit_config(struct net_device *dev)
3243 {
3244 struct inet6_dev *idev;
3245
3246 ASSERT_RTNL();
3247
3248 /*
3249 * Configure the tunnel with one of our IPv4
3250 * addresses... we should configure all of
3251 * our v4 addrs in the tunnel
3252 */
3253
3254 idev = ipv6_find_idev(dev);
3255 if (!idev) {
3256 pr_debug("%s: add_dev failed\n", __func__);
3257 return;
3258 }
3259
3260 if (dev->priv_flags & IFF_ISATAP) {
3261 addrconf_addr_gen(idev, false);
3262 return;
3263 }
3264
3265 sit_add_v4_addrs(idev);
3266
3267 if (dev->flags&IFF_POINTOPOINT)
3268 addrconf_add_mroute(dev);
3269 }
3270 #endif
3271
3272 #if IS_ENABLED(CONFIG_NET_IPGRE)
3273 static void addrconf_gre_config(struct net_device *dev)
3274 {
3275 struct inet6_dev *idev;
3276
3277 ASSERT_RTNL();
3278
3279 idev = ipv6_find_idev(dev);
3280 if (!idev) {
3281 pr_debug("%s: add_dev failed\n", __func__);
3282 return;
3283 }
3284
3285 addrconf_addr_gen(idev, true);
3286 if (dev->flags & IFF_POINTOPOINT)
3287 addrconf_add_mroute(dev);
3288 }
3289 #endif
3290
3291 static int fixup_permanent_addr(struct inet6_dev *idev,
3292 struct inet6_ifaddr *ifp)
3293 {
3294 if (!ifp->rt) {
3295 struct rt6_info *rt;
3296
3297 rt = addrconf_dst_alloc(idev, &ifp->addr, false);
3298 if (unlikely(IS_ERR(rt)))
3299 return PTR_ERR(rt);
3300
3301 ifp->rt = rt;
3302 }
3303
3304 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3305 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3306 idev->dev, 0, 0);
3307 }
3308
3309 addrconf_dad_start(ifp);
3310
3311 return 0;
3312 }
3313
3314 static void addrconf_permanent_addr(struct net_device *dev)
3315 {
3316 struct inet6_ifaddr *ifp, *tmp;
3317 struct inet6_dev *idev;
3318
3319 idev = __in6_dev_get(dev);
3320 if (!idev)
3321 return;
3322
3323 write_lock_bh(&idev->lock);
3324
3325 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3326 if ((ifp->flags & IFA_F_PERMANENT) &&
3327 fixup_permanent_addr(idev, ifp) < 0) {
3328 write_unlock_bh(&idev->lock);
3329 ipv6_del_addr(ifp);
3330 write_lock_bh(&idev->lock);
3331
3332 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3333 idev->dev->name, &ifp->addr);
3334 }
3335 }
3336
3337 write_unlock_bh(&idev->lock);
3338 }
3339
3340 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3341 void *ptr)
3342 {
3343 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3344 struct netdev_notifier_changeupper_info *info;
3345 struct inet6_dev *idev = __in6_dev_get(dev);
3346 int run_pending = 0;
3347 int err;
3348
3349 switch (event) {
3350 case NETDEV_REGISTER:
3351 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3352 idev = ipv6_add_dev(dev);
3353 if (IS_ERR(idev))
3354 return notifier_from_errno(PTR_ERR(idev));
3355 }
3356 break;
3357
3358 case NETDEV_CHANGEMTU:
3359 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3360 if (dev->mtu < IPV6_MIN_MTU) {
3361 addrconf_ifdown(dev, 1);
3362 break;
3363 }
3364
3365 if (idev) {
3366 rt6_mtu_change(dev, dev->mtu);
3367 idev->cnf.mtu6 = dev->mtu;
3368 break;
3369 }
3370
3371 /* allocate new idev */
3372 idev = ipv6_add_dev(dev);
3373 if (IS_ERR(idev))
3374 break;
3375
3376 /* device is still not ready */
3377 if (!(idev->if_flags & IF_READY))
3378 break;
3379
3380 run_pending = 1;
3381
3382 /* fall through */
3383
3384 case NETDEV_UP:
3385 case NETDEV_CHANGE:
3386 if (dev->flags & IFF_SLAVE)
3387 break;
3388
3389 if (idev && idev->cnf.disable_ipv6)
3390 break;
3391
3392 if (event == NETDEV_UP) {
3393 /* restore routes for permanent addresses */
3394 addrconf_permanent_addr(dev);
3395
3396 if (!addrconf_qdisc_ok(dev)) {
3397 /* device is not ready yet. */
3398 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3399 dev->name);
3400 break;
3401 }
3402
3403 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3404 idev = ipv6_add_dev(dev);
3405
3406 if (!IS_ERR_OR_NULL(idev)) {
3407 idev->if_flags |= IF_READY;
3408 run_pending = 1;
3409 }
3410 } else if (event == NETDEV_CHANGE) {
3411 if (!addrconf_qdisc_ok(dev)) {
3412 /* device is still not ready. */
3413 break;
3414 }
3415
3416 if (idev) {
3417 if (idev->if_flags & IF_READY) {
3418 /* device is already configured -
3419 * but resend MLD reports, we might
3420 * have roamed and need to update
3421 * multicast snooping switches
3422 */
3423 ipv6_mc_up(idev);
3424 break;
3425 }
3426 idev->if_flags |= IF_READY;
3427 }
3428
3429 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3430 dev->name);
3431
3432 run_pending = 1;
3433 }
3434
3435 switch (dev->type) {
3436 #if IS_ENABLED(CONFIG_IPV6_SIT)
3437 case ARPHRD_SIT:
3438 addrconf_sit_config(dev);
3439 break;
3440 #endif
3441 #if IS_ENABLED(CONFIG_NET_IPGRE)
3442 case ARPHRD_IPGRE:
3443 addrconf_gre_config(dev);
3444 break;
3445 #endif
3446 case ARPHRD_LOOPBACK:
3447 init_loopback(dev);
3448 break;
3449
3450 default:
3451 addrconf_dev_config(dev);
3452 break;
3453 }
3454
3455 if (!IS_ERR_OR_NULL(idev)) {
3456 if (run_pending)
3457 addrconf_dad_run(idev);
3458
3459 /*
3460 * If the MTU changed during the interface down,
3461 * when the interface up, the changed MTU must be
3462 * reflected in the idev as well as routers.
3463 */
3464 if (idev->cnf.mtu6 != dev->mtu &&
3465 dev->mtu >= IPV6_MIN_MTU) {
3466 rt6_mtu_change(dev, dev->mtu);
3467 idev->cnf.mtu6 = dev->mtu;
3468 }
3469 idev->tstamp = jiffies;
3470 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3471
3472 /*
3473 * If the changed mtu during down is lower than
3474 * IPV6_MIN_MTU stop IPv6 on this interface.
3475 */
3476 if (dev->mtu < IPV6_MIN_MTU)
3477 addrconf_ifdown(dev, 1);
3478 }
3479 break;
3480
3481 case NETDEV_DOWN:
3482 case NETDEV_UNREGISTER:
3483 /*
3484 * Remove all addresses from this interface.
3485 */
3486 addrconf_ifdown(dev, event != NETDEV_DOWN);
3487 break;
3488
3489 case NETDEV_CHANGENAME:
3490 if (idev) {
3491 snmp6_unregister_dev(idev);
3492 addrconf_sysctl_unregister(idev);
3493 err = addrconf_sysctl_register(idev);
3494 if (err)
3495 return notifier_from_errno(err);
3496 err = snmp6_register_dev(idev);
3497 if (err) {
3498 addrconf_sysctl_unregister(idev);
3499 return notifier_from_errno(err);
3500 }
3501 }
3502 break;
3503
3504 case NETDEV_PRE_TYPE_CHANGE:
3505 case NETDEV_POST_TYPE_CHANGE:
3506 if (idev)
3507 addrconf_type_change(dev, event);
3508 break;
3509
3510 case NETDEV_CHANGEUPPER:
3511 info = ptr;
3512
3513 /* flush all routes if dev is linked to or unlinked from
3514 * an L3 master device (e.g., VRF)
3515 */
3516 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3517 addrconf_ifdown(dev, 0);
3518 }
3519
3520 return NOTIFY_OK;
3521 }
3522
3523 /*
3524 * addrconf module should be notified of a device going up
3525 */
3526 static struct notifier_block ipv6_dev_notf = {
3527 .notifier_call = addrconf_notify,
3528 };
3529
3530 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3531 {
3532 struct inet6_dev *idev;
3533 ASSERT_RTNL();
3534
3535 idev = __in6_dev_get(dev);
3536
3537 if (event == NETDEV_POST_TYPE_CHANGE)
3538 ipv6_mc_remap(idev);
3539 else if (event == NETDEV_PRE_TYPE_CHANGE)
3540 ipv6_mc_unmap(idev);
3541 }
3542
3543 static bool addr_is_local(const struct in6_addr *addr)
3544 {
3545 return ipv6_addr_type(addr) &
3546 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3547 }
3548
3549 static int addrconf_ifdown(struct net_device *dev, int how)
3550 {
3551 struct net *net = dev_net(dev);
3552 struct inet6_dev *idev;
3553 struct inet6_ifaddr *ifa, *tmp;
3554 struct list_head del_list;
3555 int _keep_addr;
3556 bool keep_addr;
3557 int state, i;
3558
3559 ASSERT_RTNL();
3560
3561 rt6_ifdown(net, dev);
3562 neigh_ifdown(&nd_tbl, dev);
3563
3564 idev = __in6_dev_get(dev);
3565 if (!idev)
3566 return -ENODEV;
3567
3568 /*
3569 * Step 1: remove reference to ipv6 device from parent device.
3570 * Do not dev_put!
3571 */
3572 if (how) {
3573 idev->dead = 1;
3574
3575 /* protected by rtnl_lock */
3576 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3577
3578 /* Step 1.5: remove snmp6 entry */
3579 snmp6_unregister_dev(idev);
3580
3581 }
3582
3583 /* aggregate the system setting and interface setting */
3584 _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3585 if (!_keep_addr)
3586 _keep_addr = idev->cnf.keep_addr_on_down;
3587
3588 /* combine the user config with event to determine if permanent
3589 * addresses are to be removed from address hash table
3590 */
3591 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3592
3593 /* Step 2: clear hash table */
3594 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3595 struct hlist_head *h = &inet6_addr_lst[i];
3596
3597 spin_lock_bh(&addrconf_hash_lock);
3598 restart:
3599 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3600 if (ifa->idev == idev) {
3601 addrconf_del_dad_work(ifa);
3602 /* combined flag + permanent flag decide if
3603 * address is retained on a down event
3604 */
3605 if (!keep_addr ||
3606 !(ifa->flags & IFA_F_PERMANENT) ||
3607 addr_is_local(&ifa->addr)) {
3608 hlist_del_init_rcu(&ifa->addr_lst);
3609 goto restart;
3610 }
3611 }
3612 }
3613 spin_unlock_bh(&addrconf_hash_lock);
3614 }
3615
3616 write_lock_bh(&idev->lock);
3617
3618 addrconf_del_rs_timer(idev);
3619
3620 /* Step 2: clear flags for stateless addrconf */
3621 if (!how)
3622 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3623
3624 /* Step 3: clear tempaddr list */
3625 while (!list_empty(&idev->tempaddr_list)) {
3626 ifa = list_first_entry(&idev->tempaddr_list,
3627 struct inet6_ifaddr, tmp_list);
3628 list_del(&ifa->tmp_list);
3629 write_unlock_bh(&idev->lock);
3630 spin_lock_bh(&ifa->lock);
3631
3632 if (ifa->ifpub) {
3633 in6_ifa_put(ifa->ifpub);
3634 ifa->ifpub = NULL;
3635 }
3636 spin_unlock_bh(&ifa->lock);
3637 in6_ifa_put(ifa);
3638 write_lock_bh(&idev->lock);
3639 }
3640
3641 /* re-combine the user config with event to determine if permanent
3642 * addresses are to be removed from the interface list
3643 */
3644 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3645
3646 INIT_LIST_HEAD(&del_list);
3647 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3648 struct rt6_info *rt = NULL;
3649 bool keep;
3650
3651 addrconf_del_dad_work(ifa);
3652
3653 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3654 !addr_is_local(&ifa->addr);
3655 if (!keep)
3656 list_move(&ifa->if_list, &del_list);
3657
3658 write_unlock_bh(&idev->lock);
3659 spin_lock_bh(&ifa->lock);
3660
3661 if (keep) {
3662 /* set state to skip the notifier below */
3663 state = INET6_IFADDR_STATE_DEAD;
3664 ifa->state = 0;
3665 if (!(ifa->flags & IFA_F_NODAD))
3666 ifa->flags |= IFA_F_TENTATIVE;
3667
3668 rt = ifa->rt;
3669 ifa->rt = NULL;
3670 } else {
3671 state = ifa->state;
3672 ifa->state = INET6_IFADDR_STATE_DEAD;
3673 }
3674
3675 spin_unlock_bh(&ifa->lock);
3676
3677 if (rt)
3678 ip6_del_rt(rt);
3679
3680 if (state != INET6_IFADDR_STATE_DEAD) {
3681 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3682 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3683 } else {
3684 if (idev->cnf.forwarding)
3685 addrconf_leave_anycast(ifa);
3686 addrconf_leave_solict(ifa->idev, &ifa->addr);
3687 }
3688
3689 write_lock_bh(&idev->lock);
3690 }
3691
3692 write_unlock_bh(&idev->lock);
3693
3694 /* now clean up addresses to be removed */
3695 while (!list_empty(&del_list)) {
3696 ifa = list_first_entry(&del_list,
3697 struct inet6_ifaddr, if_list);
3698 list_del(&ifa->if_list);
3699
3700 in6_ifa_put(ifa);
3701 }
3702
3703 /* Step 5: Discard anycast and multicast list */
3704 if (how) {
3705 ipv6_ac_destroy_dev(idev);
3706 ipv6_mc_destroy_dev(idev);
3707 } else {
3708 ipv6_mc_down(idev);
3709 }
3710
3711 idev->tstamp = jiffies;
3712
3713 /* Last: Shot the device (if unregistered) */
3714 if (how) {
3715 addrconf_sysctl_unregister(idev);
3716 neigh_parms_release(&nd_tbl, idev->nd_parms);
3717 neigh_ifdown(&nd_tbl, dev);
3718 in6_dev_put(idev);
3719 }
3720 return 0;
3721 }
3722
3723 static void addrconf_rs_timer(unsigned long data)
3724 {
3725 struct inet6_dev *idev = (struct inet6_dev *)data;
3726 struct net_device *dev = idev->dev;
3727 struct in6_addr lladdr;
3728
3729 write_lock(&idev->lock);
3730 if (idev->dead || !(idev->if_flags & IF_READY))
3731 goto out;
3732
3733 if (!ipv6_accept_ra(idev))
3734 goto out;
3735
3736 /* Announcement received after solicitation was sent */
3737 if (idev->if_flags & IF_RA_RCVD)
3738 goto out;
3739
3740 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3741 write_unlock(&idev->lock);
3742 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3743 ndisc_send_rs(dev, &lladdr,
3744 &in6addr_linklocal_allrouters);
3745 else
3746 goto put;
3747
3748 write_lock(&idev->lock);
3749 idev->rs_interval = rfc3315_s14_backoff_update(
3750 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3751 /* The wait after the last probe can be shorter */
3752 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3753 idev->cnf.rtr_solicits) ?
3754 idev->cnf.rtr_solicit_delay :
3755 idev->rs_interval);
3756 } else {
3757 /*
3758 * Note: we do not support deprecated "all on-link"
3759 * assumption any longer.
3760 */
3761 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3762 }
3763
3764 out:
3765 write_unlock(&idev->lock);
3766 put:
3767 in6_dev_put(idev);
3768 }
3769
3770 /*
3771 * Duplicate Address Detection
3772 */
3773 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3774 {
3775 unsigned long rand_num;
3776 struct inet6_dev *idev = ifp->idev;
3777 u64 nonce;
3778
3779 if (ifp->flags & IFA_F_OPTIMISTIC)
3780 rand_num = 0;
3781 else
3782 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3783
3784 nonce = 0;
3785 if (idev->cnf.enhanced_dad ||
3786 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3787 do
3788 get_random_bytes(&nonce, 6);
3789 while (nonce == 0);
3790 }
3791 ifp->dad_nonce = nonce;
3792 ifp->dad_probes = idev->cnf.dad_transmits;
3793 addrconf_mod_dad_work(ifp, rand_num);
3794 }
3795
3796 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3797 {
3798 struct inet6_dev *idev = ifp->idev;
3799 struct net_device *dev = idev->dev;
3800 bool bump_id, notify = false;
3801
3802 addrconf_join_solict(dev, &ifp->addr);
3803
3804 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3805
3806 read_lock_bh(&idev->lock);
3807 spin_lock(&ifp->lock);
3808 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3809 goto out;
3810
3811 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3812 idev->cnf.accept_dad < 1 ||
3813 !(ifp->flags&IFA_F_TENTATIVE) ||
3814 ifp->flags & IFA_F_NODAD) {
3815 bump_id = ifp->flags & IFA_F_TENTATIVE;
3816 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3817 spin_unlock(&ifp->lock);
3818 read_unlock_bh(&idev->lock);
3819
3820 addrconf_dad_completed(ifp, bump_id);
3821 return;
3822 }
3823
3824 if (!(idev->if_flags & IF_READY)) {
3825 spin_unlock(&ifp->lock);
3826 read_unlock_bh(&idev->lock);
3827 /*
3828 * If the device is not ready:
3829 * - keep it tentative if it is a permanent address.
3830 * - otherwise, kill it.
3831 */
3832 in6_ifa_hold(ifp);
3833 addrconf_dad_stop(ifp, 0);
3834 return;
3835 }
3836
3837 /*
3838 * Optimistic nodes can start receiving
3839 * Frames right away
3840 */
3841 if (ifp->flags & IFA_F_OPTIMISTIC) {
3842 ip6_ins_rt(ifp->rt);
3843 if (ipv6_use_optimistic_addr(idev)) {
3844 /* Because optimistic nodes can use this address,
3845 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3846 */
3847 notify = true;
3848 }
3849 }
3850
3851 addrconf_dad_kick(ifp);
3852 out:
3853 spin_unlock(&ifp->lock);
3854 read_unlock_bh(&idev->lock);
3855 if (notify)
3856 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3857 }
3858
3859 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3860 {
3861 bool begin_dad = false;
3862
3863 spin_lock_bh(&ifp->lock);
3864 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3865 ifp->state = INET6_IFADDR_STATE_PREDAD;
3866 begin_dad = true;
3867 }
3868 spin_unlock_bh(&ifp->lock);
3869
3870 if (begin_dad)
3871 addrconf_mod_dad_work(ifp, 0);
3872 }
3873
3874 static void addrconf_dad_work(struct work_struct *w)
3875 {
3876 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3877 struct inet6_ifaddr,
3878 dad_work);
3879 struct inet6_dev *idev = ifp->idev;
3880 bool bump_id, disable_ipv6 = false;
3881 struct in6_addr mcaddr;
3882
3883 enum {
3884 DAD_PROCESS,
3885 DAD_BEGIN,
3886 DAD_ABORT,
3887 } action = DAD_PROCESS;
3888
3889 rtnl_lock();
3890
3891 spin_lock_bh(&ifp->lock);
3892 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3893 action = DAD_BEGIN;
3894 ifp->state = INET6_IFADDR_STATE_DAD;
3895 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3896 action = DAD_ABORT;
3897 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3898
3899 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
3900 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
3901 struct in6_addr addr;
3902
3903 addr.s6_addr32[0] = htonl(0xfe800000);
3904 addr.s6_addr32[1] = 0;
3905
3906 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
3907 ipv6_addr_equal(&ifp->addr, &addr)) {
3908 /* DAD failed for link-local based on MAC */
3909 idev->cnf.disable_ipv6 = 1;
3910
3911 pr_info("%s: IPv6 being disabled!\n",
3912 ifp->idev->dev->name);
3913 disable_ipv6 = true;
3914 }
3915 }
3916 }
3917 spin_unlock_bh(&ifp->lock);
3918
3919 if (action == DAD_BEGIN) {
3920 addrconf_dad_begin(ifp);
3921 goto out;
3922 } else if (action == DAD_ABORT) {
3923 in6_ifa_hold(ifp);
3924 addrconf_dad_stop(ifp, 1);
3925 if (disable_ipv6)
3926 addrconf_ifdown(idev->dev, 0);
3927 goto out;
3928 }
3929
3930 if (!ifp->dad_probes && addrconf_dad_end(ifp))
3931 goto out;
3932
3933 write_lock_bh(&idev->lock);
3934 if (idev->dead || !(idev->if_flags & IF_READY)) {
3935 write_unlock_bh(&idev->lock);
3936 goto out;
3937 }
3938
3939 spin_lock(&ifp->lock);
3940 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
3941 spin_unlock(&ifp->lock);
3942 write_unlock_bh(&idev->lock);
3943 goto out;
3944 }
3945
3946 if (ifp->dad_probes == 0) {
3947 /*
3948 * DAD was successful
3949 */
3950
3951 bump_id = ifp->flags & IFA_F_TENTATIVE;
3952 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3953 spin_unlock(&ifp->lock);
3954 write_unlock_bh(&idev->lock);
3955
3956 addrconf_dad_completed(ifp, bump_id);
3957
3958 goto out;
3959 }
3960
3961 ifp->dad_probes--;
3962 addrconf_mod_dad_work(ifp,
3963 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
3964 spin_unlock(&ifp->lock);
3965 write_unlock_bh(&idev->lock);
3966
3967 /* send a neighbour solicitation for our addr */
3968 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3969 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
3970 ifp->dad_nonce);
3971 out:
3972 in6_ifa_put(ifp);
3973 rtnl_unlock();
3974 }
3975
3976 /* ifp->idev must be at least read locked */
3977 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
3978 {
3979 struct inet6_ifaddr *ifpiter;
3980 struct inet6_dev *idev = ifp->idev;
3981
3982 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
3983 if (ifpiter->scope > IFA_LINK)
3984 break;
3985 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
3986 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
3987 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
3988 IFA_F_PERMANENT)
3989 return false;
3990 }
3991 return true;
3992 }
3993
3994 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
3995 {
3996 struct net_device *dev = ifp->idev->dev;
3997 struct in6_addr lladdr;
3998 bool send_rs, send_mld;
3999
4000 addrconf_del_dad_work(ifp);
4001
4002 /*
4003 * Configure the address for reception. Now it is valid.
4004 */
4005
4006 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4007
4008 /* If added prefix is link local and we are prepared to process
4009 router advertisements, start sending router solicitations.
4010 */
4011
4012 read_lock_bh(&ifp->idev->lock);
4013 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4014 send_rs = send_mld &&
4015 ipv6_accept_ra(ifp->idev) &&
4016 ifp->idev->cnf.rtr_solicits != 0 &&
4017 (dev->flags&IFF_LOOPBACK) == 0;
4018 read_unlock_bh(&ifp->idev->lock);
4019
4020 /* While dad is in progress mld report's source address is in6_addrany.
4021 * Resend with proper ll now.
4022 */
4023 if (send_mld)
4024 ipv6_mc_dad_complete(ifp->idev);
4025
4026 if (send_rs) {
4027 /*
4028 * If a host as already performed a random delay
4029 * [...] as part of DAD [...] there is no need
4030 * to delay again before sending the first RS
4031 */
4032 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4033 return;
4034 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4035
4036 write_lock_bh(&ifp->idev->lock);
4037 spin_lock(&ifp->lock);
4038 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4039 ifp->idev->cnf.rtr_solicit_interval);
4040 ifp->idev->rs_probes = 1;
4041 ifp->idev->if_flags |= IF_RS_SENT;
4042 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4043 spin_unlock(&ifp->lock);
4044 write_unlock_bh(&ifp->idev->lock);
4045 }
4046
4047 if (bump_id)
4048 rt_genid_bump_ipv6(dev_net(dev));
4049
4050 /* Make sure that a new temporary address will be created
4051 * before this temporary address becomes deprecated.
4052 */
4053 if (ifp->flags & IFA_F_TEMPORARY)
4054 addrconf_verify_rtnl();
4055 }
4056
4057 static void addrconf_dad_run(struct inet6_dev *idev)
4058 {
4059 struct inet6_ifaddr *ifp;
4060
4061 read_lock_bh(&idev->lock);
4062 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4063 spin_lock(&ifp->lock);
4064 if (ifp->flags & IFA_F_TENTATIVE &&
4065 ifp->state == INET6_IFADDR_STATE_DAD)
4066 addrconf_dad_kick(ifp);
4067 spin_unlock(&ifp->lock);
4068 }
4069 read_unlock_bh(&idev->lock);
4070 }
4071
4072 #ifdef CONFIG_PROC_FS
4073 struct if6_iter_state {
4074 struct seq_net_private p;
4075 int bucket;
4076 int offset;
4077 };
4078
4079 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4080 {
4081 struct inet6_ifaddr *ifa = NULL;
4082 struct if6_iter_state *state = seq->private;
4083 struct net *net = seq_file_net(seq);
4084 int p = 0;
4085
4086 /* initial bucket if pos is 0 */
4087 if (pos == 0) {
4088 state->bucket = 0;
4089 state->offset = 0;
4090 }
4091
4092 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4093 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
4094 addr_lst) {
4095 if (!net_eq(dev_net(ifa->idev->dev), net))
4096 continue;
4097 /* sync with offset */
4098 if (p < state->offset) {
4099 p++;
4100 continue;
4101 }
4102 state->offset++;
4103 return ifa;
4104 }
4105
4106 /* prepare for next bucket */
4107 state->offset = 0;
4108 p = 0;
4109 }
4110 return NULL;
4111 }
4112
4113 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4114 struct inet6_ifaddr *ifa)
4115 {
4116 struct if6_iter_state *state = seq->private;
4117 struct net *net = seq_file_net(seq);
4118
4119 hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
4120 if (!net_eq(dev_net(ifa->idev->dev), net))
4121 continue;
4122 state->offset++;
4123 return ifa;
4124 }
4125
4126 while (++state->bucket < IN6_ADDR_HSIZE) {
4127 state->offset = 0;
4128 hlist_for_each_entry_rcu_bh(ifa,
4129 &inet6_addr_lst[state->bucket], addr_lst) {
4130 if (!net_eq(dev_net(ifa->idev->dev), net))
4131 continue;
4132 state->offset++;
4133 return ifa;
4134 }
4135 }
4136
4137 return NULL;
4138 }
4139
4140 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4141 __acquires(rcu_bh)
4142 {
4143 rcu_read_lock_bh();
4144 return if6_get_first(seq, *pos);
4145 }
4146
4147 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4148 {
4149 struct inet6_ifaddr *ifa;
4150
4151 ifa = if6_get_next(seq, v);
4152 ++*pos;
4153 return ifa;
4154 }
4155
4156 static void if6_seq_stop(struct seq_file *seq, void *v)
4157 __releases(rcu_bh)
4158 {
4159 rcu_read_unlock_bh();
4160 }
4161
4162 static int if6_seq_show(struct seq_file *seq, void *v)
4163 {
4164 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4165 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4166 &ifp->addr,
4167 ifp->idev->dev->ifindex,
4168 ifp->prefix_len,
4169 ifp->scope,
4170 (u8) ifp->flags,
4171 ifp->idev->dev->name);
4172 return 0;
4173 }
4174
4175 static const struct seq_operations if6_seq_ops = {
4176 .start = if6_seq_start,
4177 .next = if6_seq_next,
4178 .show = if6_seq_show,
4179 .stop = if6_seq_stop,
4180 };
4181
4182 static int if6_seq_open(struct inode *inode, struct file *file)
4183 {
4184 return seq_open_net(inode, file, &if6_seq_ops,
4185 sizeof(struct if6_iter_state));
4186 }
4187
4188 static const struct file_operations if6_fops = {
4189 .owner = THIS_MODULE,
4190 .open = if6_seq_open,
4191 .read = seq_read,
4192 .llseek = seq_lseek,
4193 .release = seq_release_net,
4194 };
4195
4196 static int __net_init if6_proc_net_init(struct net *net)
4197 {
4198 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
4199 return -ENOMEM;
4200 return 0;
4201 }
4202
4203 static void __net_exit if6_proc_net_exit(struct net *net)
4204 {
4205 remove_proc_entry("if_inet6", net->proc_net);
4206 }
4207
4208 static struct pernet_operations if6_proc_net_ops = {
4209 .init = if6_proc_net_init,
4210 .exit = if6_proc_net_exit,
4211 };
4212
4213 int __init if6_proc_init(void)
4214 {
4215 return register_pernet_subsys(&if6_proc_net_ops);
4216 }
4217
4218 void if6_proc_exit(void)
4219 {
4220 unregister_pernet_subsys(&if6_proc_net_ops);
4221 }
4222 #endif /* CONFIG_PROC_FS */
4223
4224 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4225 /* Check if address is a home address configured on any interface. */
4226 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4227 {
4228 int ret = 0;
4229 struct inet6_ifaddr *ifp = NULL;
4230 unsigned int hash = inet6_addr_hash(addr);
4231
4232 rcu_read_lock_bh();
4233 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
4234 if (!net_eq(dev_net(ifp->idev->dev), net))
4235 continue;
4236 if (ipv6_addr_equal(&ifp->addr, addr) &&
4237 (ifp->flags & IFA_F_HOMEADDRESS)) {
4238 ret = 1;
4239 break;
4240 }
4241 }
4242 rcu_read_unlock_bh();
4243 return ret;
4244 }
4245 #endif
4246
4247 /*
4248 * Periodic address status verification
4249 */
4250
4251 static void addrconf_verify_rtnl(void)
4252 {
4253 unsigned long now, next, next_sec, next_sched;
4254 struct inet6_ifaddr *ifp;
4255 int i;
4256
4257 ASSERT_RTNL();
4258
4259 rcu_read_lock_bh();
4260 now = jiffies;
4261 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4262
4263 cancel_delayed_work(&addr_chk_work);
4264
4265 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4266 restart:
4267 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4268 unsigned long age;
4269
4270 /* When setting preferred_lft to a value not zero or
4271 * infinity, while valid_lft is infinity
4272 * IFA_F_PERMANENT has a non-infinity life time.
4273 */
4274 if ((ifp->flags & IFA_F_PERMANENT) &&
4275 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4276 continue;
4277
4278 spin_lock(&ifp->lock);
4279 /* We try to batch several events at once. */
4280 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4281
4282 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4283 age >= ifp->valid_lft) {
4284 spin_unlock(&ifp->lock);
4285 in6_ifa_hold(ifp);
4286 ipv6_del_addr(ifp);
4287 goto restart;
4288 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4289 spin_unlock(&ifp->lock);
4290 continue;
4291 } else if (age >= ifp->prefered_lft) {
4292 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4293 int deprecate = 0;
4294
4295 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4296 deprecate = 1;
4297 ifp->flags |= IFA_F_DEPRECATED;
4298 }
4299
4300 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4301 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4302 next = ifp->tstamp + ifp->valid_lft * HZ;
4303
4304 spin_unlock(&ifp->lock);
4305
4306 if (deprecate) {
4307 in6_ifa_hold(ifp);
4308
4309 ipv6_ifa_notify(0, ifp);
4310 in6_ifa_put(ifp);
4311 goto restart;
4312 }
4313 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4314 !(ifp->flags&IFA_F_TENTATIVE)) {
4315 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4316 ifp->idev->cnf.dad_transmits *
4317 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4318
4319 if (age >= ifp->prefered_lft - regen_advance) {
4320 struct inet6_ifaddr *ifpub = ifp->ifpub;
4321 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4322 next = ifp->tstamp + ifp->prefered_lft * HZ;
4323 if (!ifp->regen_count && ifpub) {
4324 ifp->regen_count++;
4325 in6_ifa_hold(ifp);
4326 in6_ifa_hold(ifpub);
4327 spin_unlock(&ifp->lock);
4328
4329 spin_lock(&ifpub->lock);
4330 ifpub->regen_count = 0;
4331 spin_unlock(&ifpub->lock);
4332 ipv6_create_tempaddr(ifpub, ifp);
4333 in6_ifa_put(ifpub);
4334 in6_ifa_put(ifp);
4335 goto restart;
4336 }
4337 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4338 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4339 spin_unlock(&ifp->lock);
4340 } else {
4341 /* ifp->prefered_lft <= ifp->valid_lft */
4342 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4343 next = ifp->tstamp + ifp->prefered_lft * HZ;
4344 spin_unlock(&ifp->lock);
4345 }
4346 }
4347 }
4348
4349 next_sec = round_jiffies_up(next);
4350 next_sched = next;
4351
4352 /* If rounded timeout is accurate enough, accept it. */
4353 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4354 next_sched = next_sec;
4355
4356 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4357 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4358 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4359
4360 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4361 now, next, next_sec, next_sched);
4362 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4363 rcu_read_unlock_bh();
4364 }
4365
4366 static void addrconf_verify_work(struct work_struct *w)
4367 {
4368 rtnl_lock();
4369 addrconf_verify_rtnl();
4370 rtnl_unlock();
4371 }
4372
4373 static void addrconf_verify(void)
4374 {
4375 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4376 }
4377
4378 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4379 struct in6_addr **peer_pfx)
4380 {
4381 struct in6_addr *pfx = NULL;
4382
4383 *peer_pfx = NULL;
4384
4385 if (addr)
4386 pfx = nla_data(addr);
4387
4388 if (local) {
4389 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4390 *peer_pfx = pfx;
4391 pfx = nla_data(local);
4392 }
4393
4394 return pfx;
4395 }
4396
4397 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4398 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4399 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4400 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4401 [IFA_FLAGS] = { .len = sizeof(u32) },
4402 };
4403
4404 static int
4405 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
4406 {
4407 struct net *net = sock_net(skb->sk);
4408 struct ifaddrmsg *ifm;
4409 struct nlattr *tb[IFA_MAX+1];
4410 struct in6_addr *pfx, *peer_pfx;
4411 u32 ifa_flags;
4412 int err;
4413
4414 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4415 NULL);
4416 if (err < 0)
4417 return err;
4418
4419 ifm = nlmsg_data(nlh);
4420 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4421 if (!pfx)
4422 return -EINVAL;
4423
4424 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4425
4426 /* We ignore other flags so far. */
4427 ifa_flags &= IFA_F_MANAGETEMPADDR;
4428
4429 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4430 ifm->ifa_prefixlen);
4431 }
4432
4433 static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
4434 u32 prefered_lft, u32 valid_lft)
4435 {
4436 u32 flags;
4437 clock_t expires;
4438 unsigned long timeout;
4439 bool was_managetempaddr;
4440 bool had_prefixroute;
4441
4442 ASSERT_RTNL();
4443
4444 if (!valid_lft || (prefered_lft > valid_lft))
4445 return -EINVAL;
4446
4447 if (ifa_flags & IFA_F_MANAGETEMPADDR &&
4448 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4449 return -EINVAL;
4450
4451 timeout = addrconf_timeout_fixup(valid_lft, HZ);
4452 if (addrconf_finite_timeout(timeout)) {
4453 expires = jiffies_to_clock_t(timeout * HZ);
4454 valid_lft = timeout;
4455 flags = RTF_EXPIRES;
4456 } else {
4457 expires = 0;
4458 flags = 0;
4459 ifa_flags |= IFA_F_PERMANENT;
4460 }
4461
4462 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
4463 if (addrconf_finite_timeout(timeout)) {
4464 if (timeout == 0)
4465 ifa_flags |= IFA_F_DEPRECATED;
4466 prefered_lft = timeout;
4467 }
4468
4469 spin_lock_bh(&ifp->lock);
4470 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4471 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4472 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4473 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4474 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4475 IFA_F_NOPREFIXROUTE);
4476 ifp->flags |= ifa_flags;
4477 ifp->tstamp = jiffies;
4478 ifp->valid_lft = valid_lft;
4479 ifp->prefered_lft = prefered_lft;
4480
4481 spin_unlock_bh(&ifp->lock);
4482 if (!(ifp->flags&IFA_F_TENTATIVE))
4483 ipv6_ifa_notify(0, ifp);
4484
4485 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
4486 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
4487 expires, flags);
4488 } else if (had_prefixroute) {
4489 enum cleanup_prefix_rt_t action;
4490 unsigned long rt_expires;
4491
4492 write_lock_bh(&ifp->idev->lock);
4493 action = check_cleanup_prefix_route(ifp, &rt_expires);
4494 write_unlock_bh(&ifp->idev->lock);
4495
4496 if (action != CLEANUP_PREFIX_RT_NOP) {
4497 cleanup_prefix_route(ifp, rt_expires,
4498 action == CLEANUP_PREFIX_RT_DEL);
4499 }
4500 }
4501
4502 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4503 if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4504 valid_lft = prefered_lft = 0;
4505 manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
4506 !was_managetempaddr, jiffies);
4507 }
4508
4509 addrconf_verify_rtnl();
4510
4511 return 0;
4512 }
4513
4514 static int
4515 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
4516 {
4517 struct net *net = sock_net(skb->sk);
4518 struct ifaddrmsg *ifm;
4519 struct nlattr *tb[IFA_MAX+1];
4520 struct in6_addr *pfx, *peer_pfx;
4521 struct inet6_ifaddr *ifa;
4522 struct net_device *dev;
4523 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
4524 u32 ifa_flags;
4525 int err;
4526
4527 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4528 NULL);
4529 if (err < 0)
4530 return err;
4531
4532 ifm = nlmsg_data(nlh);
4533 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4534 if (!pfx)
4535 return -EINVAL;
4536
4537 if (tb[IFA_CACHEINFO]) {
4538 struct ifa_cacheinfo *ci;
4539
4540 ci = nla_data(tb[IFA_CACHEINFO]);
4541 valid_lft = ci->ifa_valid;
4542 preferred_lft = ci->ifa_prefered;
4543 } else {
4544 preferred_lft = INFINITY_LIFE_TIME;
4545 valid_lft = INFINITY_LIFE_TIME;
4546 }
4547
4548 dev = __dev_get_by_index(net, ifm->ifa_index);
4549 if (!dev)
4550 return -ENODEV;
4551
4552 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4553
4554 /* We ignore other flags so far. */
4555 ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4556 IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
4557
4558 ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
4559 if (!ifa) {
4560 /*
4561 * It would be best to check for !NLM_F_CREATE here but
4562 * userspace already relies on not having to provide this.
4563 */
4564 return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
4565 ifm->ifa_prefixlen, ifa_flags,
4566 preferred_lft, valid_lft);
4567 }
4568
4569 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4570 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4571 err = -EEXIST;
4572 else
4573 err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
4574
4575 in6_ifa_put(ifa);
4576
4577 return err;
4578 }
4579
4580 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4581 u8 scope, int ifindex)
4582 {
4583 struct ifaddrmsg *ifm;
4584
4585 ifm = nlmsg_data(nlh);
4586 ifm->ifa_family = AF_INET6;
4587 ifm->ifa_prefixlen = prefixlen;
4588 ifm->ifa_flags = flags;
4589 ifm->ifa_scope = scope;
4590 ifm->ifa_index = ifindex;
4591 }
4592
4593 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4594 unsigned long tstamp, u32 preferred, u32 valid)
4595 {
4596 struct ifa_cacheinfo ci;
4597
4598 ci.cstamp = cstamp_delta(cstamp);
4599 ci.tstamp = cstamp_delta(tstamp);
4600 ci.ifa_prefered = preferred;
4601 ci.ifa_valid = valid;
4602
4603 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4604 }
4605
4606 static inline int rt_scope(int ifa_scope)
4607 {
4608 if (ifa_scope & IFA_HOST)
4609 return RT_SCOPE_HOST;
4610 else if (ifa_scope & IFA_LINK)
4611 return RT_SCOPE_LINK;
4612 else if (ifa_scope & IFA_SITE)
4613 return RT_SCOPE_SITE;
4614 else
4615 return RT_SCOPE_UNIVERSE;
4616 }
4617
4618 static inline int inet6_ifaddr_msgsize(void)
4619 {
4620 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4621 + nla_total_size(16) /* IFA_LOCAL */
4622 + nla_total_size(16) /* IFA_ADDRESS */
4623 + nla_total_size(sizeof(struct ifa_cacheinfo))
4624 + nla_total_size(4) /* IFA_FLAGS */;
4625 }
4626
4627 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4628 u32 portid, u32 seq, int event, unsigned int flags)
4629 {
4630 struct nlmsghdr *nlh;
4631 u32 preferred, valid;
4632
4633 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4634 if (!nlh)
4635 return -EMSGSIZE;
4636
4637 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4638 ifa->idev->dev->ifindex);
4639
4640 if (!((ifa->flags&IFA_F_PERMANENT) &&
4641 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4642 preferred = ifa->prefered_lft;
4643 valid = ifa->valid_lft;
4644 if (preferred != INFINITY_LIFE_TIME) {
4645 long tval = (jiffies - ifa->tstamp)/HZ;
4646 if (preferred > tval)
4647 preferred -= tval;
4648 else
4649 preferred = 0;
4650 if (valid != INFINITY_LIFE_TIME) {
4651 if (valid > tval)
4652 valid -= tval;
4653 else
4654 valid = 0;
4655 }
4656 }
4657 } else {
4658 preferred = INFINITY_LIFE_TIME;
4659 valid = INFINITY_LIFE_TIME;
4660 }
4661
4662 if (!ipv6_addr_any(&ifa->peer_addr)) {
4663 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4664 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4665 goto error;
4666 } else
4667 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4668 goto error;
4669
4670 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4671 goto error;
4672
4673 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4674 goto error;
4675
4676 nlmsg_end(skb, nlh);
4677 return 0;
4678
4679 error:
4680 nlmsg_cancel(skb, nlh);
4681 return -EMSGSIZE;
4682 }
4683
4684 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4685 u32 portid, u32 seq, int event, u16 flags)
4686 {
4687 struct nlmsghdr *nlh;
4688 u8 scope = RT_SCOPE_UNIVERSE;
4689 int ifindex = ifmca->idev->dev->ifindex;
4690
4691 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4692 scope = RT_SCOPE_SITE;
4693
4694 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4695 if (!nlh)
4696 return -EMSGSIZE;
4697
4698 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4699 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4700 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4701 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4702 nlmsg_cancel(skb, nlh);
4703 return -EMSGSIZE;
4704 }
4705
4706 nlmsg_end(skb, nlh);
4707 return 0;
4708 }
4709
4710 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4711 u32 portid, u32 seq, int event, unsigned int flags)
4712 {
4713 struct nlmsghdr *nlh;
4714 u8 scope = RT_SCOPE_UNIVERSE;
4715 int ifindex = ifaca->aca_idev->dev->ifindex;
4716
4717 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4718 scope = RT_SCOPE_SITE;
4719
4720 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4721 if (!nlh)
4722 return -EMSGSIZE;
4723
4724 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4725 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4726 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4727 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4728 nlmsg_cancel(skb, nlh);
4729 return -EMSGSIZE;
4730 }
4731
4732 nlmsg_end(skb, nlh);
4733 return 0;
4734 }
4735
4736 enum addr_type_t {
4737 UNICAST_ADDR,
4738 MULTICAST_ADDR,
4739 ANYCAST_ADDR,
4740 };
4741
4742 /* called with rcu_read_lock() */
4743 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4744 struct netlink_callback *cb, enum addr_type_t type,
4745 int s_ip_idx, int *p_ip_idx)
4746 {
4747 struct ifmcaddr6 *ifmca;
4748 struct ifacaddr6 *ifaca;
4749 int err = 1;
4750 int ip_idx = *p_ip_idx;
4751
4752 read_lock_bh(&idev->lock);
4753 switch (type) {
4754 case UNICAST_ADDR: {
4755 struct inet6_ifaddr *ifa;
4756
4757 /* unicast address incl. temp addr */
4758 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4759 if (++ip_idx < s_ip_idx)
4760 continue;
4761 err = inet6_fill_ifaddr(skb, ifa,
4762 NETLINK_CB(cb->skb).portid,
4763 cb->nlh->nlmsg_seq,
4764 RTM_NEWADDR,
4765 NLM_F_MULTI);
4766 if (err < 0)
4767 break;
4768 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4769 }
4770 break;
4771 }
4772 case MULTICAST_ADDR:
4773 /* multicast address */
4774 for (ifmca = idev->mc_list; ifmca;
4775 ifmca = ifmca->next, ip_idx++) {
4776 if (ip_idx < s_ip_idx)
4777 continue;
4778 err = inet6_fill_ifmcaddr(skb, ifmca,
4779 NETLINK_CB(cb->skb).portid,
4780 cb->nlh->nlmsg_seq,
4781 RTM_GETMULTICAST,
4782 NLM_F_MULTI);
4783 if (err < 0)
4784 break;
4785 }
4786 break;
4787 case ANYCAST_ADDR:
4788 /* anycast address */
4789 for (ifaca = idev->ac_list; ifaca;
4790 ifaca = ifaca->aca_next, ip_idx++) {
4791 if (ip_idx < s_ip_idx)
4792 continue;
4793 err = inet6_fill_ifacaddr(skb, ifaca,
4794 NETLINK_CB(cb->skb).portid,
4795 cb->nlh->nlmsg_seq,
4796 RTM_GETANYCAST,
4797 NLM_F_MULTI);
4798 if (err < 0)
4799 break;
4800 }
4801 break;
4802 default:
4803 break;
4804 }
4805 read_unlock_bh(&idev->lock);
4806 *p_ip_idx = ip_idx;
4807 return err;
4808 }
4809
4810 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
4811 enum addr_type_t type)
4812 {
4813 struct net *net = sock_net(skb->sk);
4814 int h, s_h;
4815 int idx, ip_idx;
4816 int s_idx, s_ip_idx;
4817 struct net_device *dev;
4818 struct inet6_dev *idev;
4819 struct hlist_head *head;
4820
4821 s_h = cb->args[0];
4822 s_idx = idx = cb->args[1];
4823 s_ip_idx = ip_idx = cb->args[2];
4824
4825 rcu_read_lock();
4826 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
4827 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4828 idx = 0;
4829 head = &net->dev_index_head[h];
4830 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4831 if (idx < s_idx)
4832 goto cont;
4833 if (h > s_h || idx > s_idx)
4834 s_ip_idx = 0;
4835 ip_idx = 0;
4836 idev = __in6_dev_get(dev);
4837 if (!idev)
4838 goto cont;
4839
4840 if (in6_dump_addrs(idev, skb, cb, type,
4841 s_ip_idx, &ip_idx) < 0)
4842 goto done;
4843 cont:
4844 idx++;
4845 }
4846 }
4847 done:
4848 rcu_read_unlock();
4849 cb->args[0] = h;
4850 cb->args[1] = idx;
4851 cb->args[2] = ip_idx;
4852
4853 return skb->len;
4854 }
4855
4856 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
4857 {
4858 enum addr_type_t type = UNICAST_ADDR;
4859
4860 return inet6_dump_addr(skb, cb, type);
4861 }
4862
4863 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
4864 {
4865 enum addr_type_t type = MULTICAST_ADDR;
4866
4867 return inet6_dump_addr(skb, cb, type);
4868 }
4869
4870
4871 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
4872 {
4873 enum addr_type_t type = ANYCAST_ADDR;
4874
4875 return inet6_dump_addr(skb, cb, type);
4876 }
4877
4878 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
4879 {
4880 struct net *net = sock_net(in_skb->sk);
4881 struct ifaddrmsg *ifm;
4882 struct nlattr *tb[IFA_MAX+1];
4883 struct in6_addr *addr = NULL, *peer;
4884 struct net_device *dev = NULL;
4885 struct inet6_ifaddr *ifa;
4886 struct sk_buff *skb;
4887 int err;
4888
4889 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4890 NULL);
4891 if (err < 0)
4892 goto errout;
4893
4894 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
4895 if (!addr) {
4896 err = -EINVAL;
4897 goto errout;
4898 }
4899
4900 ifm = nlmsg_data(nlh);
4901 if (ifm->ifa_index)
4902 dev = __dev_get_by_index(net, ifm->ifa_index);
4903
4904 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
4905 if (!ifa) {
4906 err = -EADDRNOTAVAIL;
4907 goto errout;
4908 }
4909
4910 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
4911 if (!skb) {
4912 err = -ENOBUFS;
4913 goto errout_ifa;
4914 }
4915
4916 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
4917 nlh->nlmsg_seq, RTM_NEWADDR, 0);
4918 if (err < 0) {
4919 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4920 WARN_ON(err == -EMSGSIZE);
4921 kfree_skb(skb);
4922 goto errout_ifa;
4923 }
4924 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4925 errout_ifa:
4926 in6_ifa_put(ifa);
4927 errout:
4928 return err;
4929 }
4930
4931 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
4932 {
4933 struct sk_buff *skb;
4934 struct net *net = dev_net(ifa->idev->dev);
4935 int err = -ENOBUFS;
4936
4937 /* Don't send DELADDR notification for TENTATIVE address,
4938 * since NEWADDR notification is sent only after removing
4939 * TENTATIVE flag.
4940 */
4941 if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
4942 return;
4943
4944 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
4945 if (!skb)
4946 goto errout;
4947
4948 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
4949 if (err < 0) {
4950 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4951 WARN_ON(err == -EMSGSIZE);
4952 kfree_skb(skb);
4953 goto errout;
4954 }
4955 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
4956 return;
4957 errout:
4958 if (err < 0)
4959 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
4960 }
4961
4962 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
4963 __s32 *array, int bytes)
4964 {
4965 BUG_ON(bytes < (DEVCONF_MAX * 4));
4966
4967 memset(array, 0, bytes);
4968 array[DEVCONF_FORWARDING] = cnf->forwarding;
4969 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
4970 array[DEVCONF_MTU6] = cnf->mtu6;
4971 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
4972 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
4973 array[DEVCONF_AUTOCONF] = cnf->autoconf;
4974 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
4975 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
4976 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
4977 jiffies_to_msecs(cnf->rtr_solicit_interval);
4978 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
4979 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
4980 array[DEVCONF_RTR_SOLICIT_DELAY] =
4981 jiffies_to_msecs(cnf->rtr_solicit_delay);
4982 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
4983 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
4984 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
4985 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
4986 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
4987 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
4988 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
4989 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
4990 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
4991 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
4992 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
4993 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
4994 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
4995 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
4996 #ifdef CONFIG_IPV6_ROUTER_PREF
4997 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
4998 array[DEVCONF_RTR_PROBE_INTERVAL] =
4999 jiffies_to_msecs(cnf->rtr_probe_interval);
5000 #ifdef CONFIG_IPV6_ROUTE_INFO
5001 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5002 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5003 #endif
5004 #endif
5005 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5006 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5007 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5008 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5009 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5010 #endif
5011 #ifdef CONFIG_IPV6_MROUTE
5012 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
5013 #endif
5014 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5015 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5016 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5017 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5018 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5019 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5020 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5021 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5022 /* we omit DEVCONF_STABLE_SECRET for now */
5023 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5024 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5025 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5026 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5027 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5028 #ifdef CONFIG_IPV6_SEG6_HMAC
5029 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5030 #endif
5031 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5032 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5033 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5034 }
5035
5036 static inline size_t inet6_ifla6_size(void)
5037 {
5038 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5039 + nla_total_size(sizeof(struct ifla_cacheinfo))
5040 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5041 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5042 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5043 + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
5044 }
5045
5046 static inline size_t inet6_if_nlmsg_size(void)
5047 {
5048 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5049 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5050 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5051 + nla_total_size(4) /* IFLA_MTU */
5052 + nla_total_size(4) /* IFLA_LINK */
5053 + nla_total_size(1) /* IFLA_OPERSTATE */
5054 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5055 }
5056
5057 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5058 int bytes)
5059 {
5060 int i;
5061 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5062 BUG_ON(pad < 0);
5063
5064 /* Use put_unaligned() because stats may not be aligned for u64. */
5065 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5066 for (i = 1; i < ICMP6_MIB_MAX; i++)
5067 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5068
5069 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5070 }
5071
5072 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5073 int bytes, size_t syncpoff)
5074 {
5075 int i, c;
5076 u64 buff[IPSTATS_MIB_MAX];
5077 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5078
5079 BUG_ON(pad < 0);
5080
5081 memset(buff, 0, sizeof(buff));
5082 buff[0] = IPSTATS_MIB_MAX;
5083
5084 for_each_possible_cpu(c) {
5085 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5086 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5087 }
5088
5089 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5090 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5091 }
5092
5093 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5094 int bytes)
5095 {
5096 switch (attrtype) {
5097 case IFLA_INET6_STATS:
5098 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5099 offsetof(struct ipstats_mib, syncp));
5100 break;
5101 case IFLA_INET6_ICMP6STATS:
5102 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5103 break;
5104 }
5105 }
5106
5107 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5108 u32 ext_filter_mask)
5109 {
5110 struct nlattr *nla;
5111 struct ifla_cacheinfo ci;
5112
5113 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5114 goto nla_put_failure;
5115 ci.max_reasm_len = IPV6_MAXPLEN;
5116 ci.tstamp = cstamp_delta(idev->tstamp);
5117 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5118 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5119 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5120 goto nla_put_failure;
5121 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5122 if (!nla)
5123 goto nla_put_failure;
5124 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5125
5126 /* XXX - MC not implemented */
5127
5128 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5129 return 0;
5130
5131 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5132 if (!nla)
5133 goto nla_put_failure;
5134 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5135
5136 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5137 if (!nla)
5138 goto nla_put_failure;
5139 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5140
5141 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5142 if (!nla)
5143 goto nla_put_failure;
5144
5145 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5146 goto nla_put_failure;
5147
5148 read_lock_bh(&idev->lock);
5149 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5150 read_unlock_bh(&idev->lock);
5151
5152 return 0;
5153
5154 nla_put_failure:
5155 return -EMSGSIZE;
5156 }
5157
5158 static size_t inet6_get_link_af_size(const struct net_device *dev,
5159 u32 ext_filter_mask)
5160 {
5161 if (!__in6_dev_get(dev))
5162 return 0;
5163
5164 return inet6_ifla6_size();
5165 }
5166
5167 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5168 u32 ext_filter_mask)
5169 {
5170 struct inet6_dev *idev = __in6_dev_get(dev);
5171
5172 if (!idev)
5173 return -ENODATA;
5174
5175 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5176 return -EMSGSIZE;
5177
5178 return 0;
5179 }
5180
5181 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5182 {
5183 struct inet6_ifaddr *ifp;
5184 struct net_device *dev = idev->dev;
5185 bool clear_token, update_rs = false;
5186 struct in6_addr ll_addr;
5187
5188 ASSERT_RTNL();
5189
5190 if (!token)
5191 return -EINVAL;
5192 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5193 return -EINVAL;
5194 if (!ipv6_accept_ra(idev))
5195 return -EINVAL;
5196 if (idev->cnf.rtr_solicits == 0)
5197 return -EINVAL;
5198
5199 write_lock_bh(&idev->lock);
5200
5201 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5202 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5203
5204 write_unlock_bh(&idev->lock);
5205
5206 clear_token = ipv6_addr_any(token);
5207 if (clear_token)
5208 goto update_lft;
5209
5210 if (!idev->dead && (idev->if_flags & IF_READY) &&
5211 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5212 IFA_F_OPTIMISTIC)) {
5213 /* If we're not ready, then normal ifup will take care
5214 * of this. Otherwise, we need to request our rs here.
5215 */
5216 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5217 update_rs = true;
5218 }
5219
5220 update_lft:
5221 write_lock_bh(&idev->lock);
5222
5223 if (update_rs) {
5224 idev->if_flags |= IF_RS_SENT;
5225 idev->rs_interval = rfc3315_s14_backoff_init(
5226 idev->cnf.rtr_solicit_interval);
5227 idev->rs_probes = 1;
5228 addrconf_mod_rs_timer(idev, idev->rs_interval);
5229 }
5230
5231 /* Well, that's kinda nasty ... */
5232 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5233 spin_lock(&ifp->lock);
5234 if (ifp->tokenized) {
5235 ifp->valid_lft = 0;
5236 ifp->prefered_lft = 0;
5237 }
5238 spin_unlock(&ifp->lock);
5239 }
5240
5241 write_unlock_bh(&idev->lock);
5242 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5243 addrconf_verify_rtnl();
5244 return 0;
5245 }
5246
5247 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5248 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5249 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5250 };
5251
5252 static int inet6_validate_link_af(const struct net_device *dev,
5253 const struct nlattr *nla)
5254 {
5255 struct nlattr *tb[IFLA_INET6_MAX + 1];
5256
5257 if (dev && !__in6_dev_get(dev))
5258 return -EAFNOSUPPORT;
5259
5260 return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy,
5261 NULL);
5262 }
5263
5264 static int check_addr_gen_mode(int mode)
5265 {
5266 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5267 mode != IN6_ADDR_GEN_MODE_NONE &&
5268 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5269 mode != IN6_ADDR_GEN_MODE_RANDOM)
5270 return -EINVAL;
5271 return 1;
5272 }
5273
5274 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5275 int mode)
5276 {
5277 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5278 !idev->cnf.stable_secret.initialized &&
5279 !net->ipv6.devconf_dflt->stable_secret.initialized)
5280 return -EINVAL;
5281 return 1;
5282 }
5283
5284 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5285 {
5286 int err = -EINVAL;
5287 struct inet6_dev *idev = __in6_dev_get(dev);
5288 struct nlattr *tb[IFLA_INET6_MAX + 1];
5289
5290 if (!idev)
5291 return -EAFNOSUPPORT;
5292
5293 if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5294 BUG();
5295
5296 if (tb[IFLA_INET6_TOKEN]) {
5297 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5298 if (err)
5299 return err;
5300 }
5301
5302 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5303 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5304
5305 if (check_addr_gen_mode(mode) < 0 ||
5306 check_stable_privacy(idev, dev_net(dev), mode) < 0)
5307 return -EINVAL;
5308
5309 idev->cnf.addr_gen_mode = mode;
5310 err = 0;
5311 }
5312
5313 return err;
5314 }
5315
5316 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5317 u32 portid, u32 seq, int event, unsigned int flags)
5318 {
5319 struct net_device *dev = idev->dev;
5320 struct ifinfomsg *hdr;
5321 struct nlmsghdr *nlh;
5322 void *protoinfo;
5323
5324 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5325 if (!nlh)
5326 return -EMSGSIZE;
5327
5328 hdr = nlmsg_data(nlh);
5329 hdr->ifi_family = AF_INET6;
5330 hdr->__ifi_pad = 0;
5331 hdr->ifi_type = dev->type;
5332 hdr->ifi_index = dev->ifindex;
5333 hdr->ifi_flags = dev_get_flags(dev);
5334 hdr->ifi_change = 0;
5335
5336 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5337 (dev->addr_len &&
5338 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5339 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5340 (dev->ifindex != dev_get_iflink(dev) &&
5341 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5342 nla_put_u8(skb, IFLA_OPERSTATE,
5343 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5344 goto nla_put_failure;
5345 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
5346 if (!protoinfo)
5347 goto nla_put_failure;
5348
5349 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5350 goto nla_put_failure;
5351
5352 nla_nest_end(skb, protoinfo);
5353 nlmsg_end(skb, nlh);
5354 return 0;
5355
5356 nla_put_failure:
5357 nlmsg_cancel(skb, nlh);
5358 return -EMSGSIZE;
5359 }
5360
5361 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5362 {
5363 struct net *net = sock_net(skb->sk);
5364 int h, s_h;
5365 int idx = 0, s_idx;
5366 struct net_device *dev;
5367 struct inet6_dev *idev;
5368 struct hlist_head *head;
5369
5370 s_h = cb->args[0];
5371 s_idx = cb->args[1];
5372
5373 rcu_read_lock();
5374 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5375 idx = 0;
5376 head = &net->dev_index_head[h];
5377 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5378 if (idx < s_idx)
5379 goto cont;
5380 idev = __in6_dev_get(dev);
5381 if (!idev)
5382 goto cont;
5383 if (inet6_fill_ifinfo(skb, idev,
5384 NETLINK_CB(cb->skb).portid,
5385 cb->nlh->nlmsg_seq,
5386 RTM_NEWLINK, NLM_F_MULTI) < 0)
5387 goto out;
5388 cont:
5389 idx++;
5390 }
5391 }
5392 out:
5393 rcu_read_unlock();
5394 cb->args[1] = idx;
5395 cb->args[0] = h;
5396
5397 return skb->len;
5398 }
5399
5400 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5401 {
5402 struct sk_buff *skb;
5403 struct net *net = dev_net(idev->dev);
5404 int err = -ENOBUFS;
5405
5406 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5407 if (!skb)
5408 goto errout;
5409
5410 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5411 if (err < 0) {
5412 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5413 WARN_ON(err == -EMSGSIZE);
5414 kfree_skb(skb);
5415 goto errout;
5416 }
5417 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5418 return;
5419 errout:
5420 if (err < 0)
5421 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5422 }
5423
5424 static inline size_t inet6_prefix_nlmsg_size(void)
5425 {
5426 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5427 + nla_total_size(sizeof(struct in6_addr))
5428 + nla_total_size(sizeof(struct prefix_cacheinfo));
5429 }
5430
5431 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5432 struct prefix_info *pinfo, u32 portid, u32 seq,
5433 int event, unsigned int flags)
5434 {
5435 struct prefixmsg *pmsg;
5436 struct nlmsghdr *nlh;
5437 struct prefix_cacheinfo ci;
5438
5439 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5440 if (!nlh)
5441 return -EMSGSIZE;
5442
5443 pmsg = nlmsg_data(nlh);
5444 pmsg->prefix_family = AF_INET6;
5445 pmsg->prefix_pad1 = 0;
5446 pmsg->prefix_pad2 = 0;
5447 pmsg->prefix_ifindex = idev->dev->ifindex;
5448 pmsg->prefix_len = pinfo->prefix_len;
5449 pmsg->prefix_type = pinfo->type;
5450 pmsg->prefix_pad3 = 0;
5451 pmsg->prefix_flags = 0;
5452 if (pinfo->onlink)
5453 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5454 if (pinfo->autoconf)
5455 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5456
5457 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5458 goto nla_put_failure;
5459 ci.preferred_time = ntohl(pinfo->prefered);
5460 ci.valid_time = ntohl(pinfo->valid);
5461 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5462 goto nla_put_failure;
5463 nlmsg_end(skb, nlh);
5464 return 0;
5465
5466 nla_put_failure:
5467 nlmsg_cancel(skb, nlh);
5468 return -EMSGSIZE;
5469 }
5470
5471 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5472 struct prefix_info *pinfo)
5473 {
5474 struct sk_buff *skb;
5475 struct net *net = dev_net(idev->dev);
5476 int err = -ENOBUFS;
5477
5478 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5479 if (!skb)
5480 goto errout;
5481
5482 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5483 if (err < 0) {
5484 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5485 WARN_ON(err == -EMSGSIZE);
5486 kfree_skb(skb);
5487 goto errout;
5488 }
5489 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5490 return;
5491 errout:
5492 if (err < 0)
5493 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5494 }
5495
5496 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5497 {
5498 struct net *net = dev_net(ifp->idev->dev);
5499
5500 if (event)
5501 ASSERT_RTNL();
5502
5503 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5504
5505 switch (event) {
5506 case RTM_NEWADDR:
5507 /*
5508 * If the address was optimistic
5509 * we inserted the route at the start of
5510 * our DAD process, so we don't need
5511 * to do it again
5512 */
5513 if (!(ifp->rt->rt6i_node))
5514 ip6_ins_rt(ifp->rt);
5515 if (ifp->idev->cnf.forwarding)
5516 addrconf_join_anycast(ifp);
5517 if (!ipv6_addr_any(&ifp->peer_addr))
5518 addrconf_prefix_route(&ifp->peer_addr, 128,
5519 ifp->idev->dev, 0, 0);
5520 break;
5521 case RTM_DELADDR:
5522 if (ifp->idev->cnf.forwarding)
5523 addrconf_leave_anycast(ifp);
5524 addrconf_leave_solict(ifp->idev, &ifp->addr);
5525 if (!ipv6_addr_any(&ifp->peer_addr)) {
5526 struct rt6_info *rt;
5527
5528 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5529 ifp->idev->dev, 0, 0);
5530 if (rt)
5531 ip6_del_rt(rt);
5532 }
5533 if (ifp->rt) {
5534 dst_hold(&ifp->rt->dst);
5535 ip6_del_rt(ifp->rt);
5536 }
5537 rt_genid_bump_ipv6(net);
5538 break;
5539 }
5540 atomic_inc(&net->ipv6.dev_addr_genid);
5541 }
5542
5543 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5544 {
5545 rcu_read_lock_bh();
5546 if (likely(ifp->idev->dead == 0))
5547 __ipv6_ifa_notify(event, ifp);
5548 rcu_read_unlock_bh();
5549 }
5550
5551 #ifdef CONFIG_SYSCTL
5552
5553 static
5554 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
5555 void __user *buffer, size_t *lenp, loff_t *ppos)
5556 {
5557 int *valp = ctl->data;
5558 int val = *valp;
5559 loff_t pos = *ppos;
5560 struct ctl_table lctl;
5561 int ret;
5562
5563 /*
5564 * ctl->data points to idev->cnf.forwarding, we should
5565 * not modify it until we get the rtnl lock.
5566 */
5567 lctl = *ctl;
5568 lctl.data = &val;
5569
5570 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5571
5572 if (write)
5573 ret = addrconf_fixup_forwarding(ctl, valp, val);
5574 if (ret)
5575 *ppos = pos;
5576 return ret;
5577 }
5578
5579 static
5580 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
5581 void __user *buffer, size_t *lenp, loff_t *ppos)
5582 {
5583 struct inet6_dev *idev = ctl->extra1;
5584 int min_mtu = IPV6_MIN_MTU;
5585 struct ctl_table lctl;
5586
5587 lctl = *ctl;
5588 lctl.extra1 = &min_mtu;
5589 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
5590
5591 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5592 }
5593
5594 static void dev_disable_change(struct inet6_dev *idev)
5595 {
5596 struct netdev_notifier_info info;
5597
5598 if (!idev || !idev->dev)
5599 return;
5600
5601 netdev_notifier_info_init(&info, idev->dev);
5602 if (idev->cnf.disable_ipv6)
5603 addrconf_notify(NULL, NETDEV_DOWN, &info);
5604 else
5605 addrconf_notify(NULL, NETDEV_UP, &info);
5606 }
5607
5608 static void addrconf_disable_change(struct net *net, __s32 newf)
5609 {
5610 struct net_device *dev;
5611 struct inet6_dev *idev;
5612
5613 for_each_netdev(net, dev) {
5614 idev = __in6_dev_get(dev);
5615 if (idev) {
5616 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
5617 idev->cnf.disable_ipv6 = newf;
5618 if (changed)
5619 dev_disable_change(idev);
5620 }
5621 }
5622 }
5623
5624 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
5625 {
5626 struct net *net;
5627 int old;
5628
5629 if (!rtnl_trylock())
5630 return restart_syscall();
5631
5632 net = (struct net *)table->extra2;
5633 old = *p;
5634 *p = newf;
5635
5636 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
5637 rtnl_unlock();
5638 return 0;
5639 }
5640
5641 if (p == &net->ipv6.devconf_all->disable_ipv6) {
5642 net->ipv6.devconf_dflt->disable_ipv6 = newf;
5643 addrconf_disable_change(net, newf);
5644 } else if ((!newf) ^ (!old))
5645 dev_disable_change((struct inet6_dev *)table->extra1);
5646
5647 rtnl_unlock();
5648 return 0;
5649 }
5650
5651 static
5652 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
5653 void __user *buffer, size_t *lenp, loff_t *ppos)
5654 {
5655 int *valp = ctl->data;
5656 int val = *valp;
5657 loff_t pos = *ppos;
5658 struct ctl_table lctl;
5659 int ret;
5660
5661 /*
5662 * ctl->data points to idev->cnf.disable_ipv6, we should
5663 * not modify it until we get the rtnl lock.
5664 */
5665 lctl = *ctl;
5666 lctl.data = &val;
5667
5668 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5669
5670 if (write)
5671 ret = addrconf_disable_ipv6(ctl, valp, val);
5672 if (ret)
5673 *ppos = pos;
5674 return ret;
5675 }
5676
5677 static
5678 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
5679 void __user *buffer, size_t *lenp, loff_t *ppos)
5680 {
5681 int *valp = ctl->data;
5682 int ret;
5683 int old, new;
5684
5685 old = *valp;
5686 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5687 new = *valp;
5688
5689 if (write && old != new) {
5690 struct net *net = ctl->extra2;
5691
5692 if (!rtnl_trylock())
5693 return restart_syscall();
5694
5695 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
5696 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5697 NETCONFA_PROXY_NEIGH,
5698 NETCONFA_IFINDEX_DEFAULT,
5699 net->ipv6.devconf_dflt);
5700 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
5701 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5702 NETCONFA_PROXY_NEIGH,
5703 NETCONFA_IFINDEX_ALL,
5704 net->ipv6.devconf_all);
5705 else {
5706 struct inet6_dev *idev = ctl->extra1;
5707
5708 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5709 NETCONFA_PROXY_NEIGH,
5710 idev->dev->ifindex,
5711 &idev->cnf);
5712 }
5713 rtnl_unlock();
5714 }
5715
5716 return ret;
5717 }
5718
5719 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5720 void __user *buffer, size_t *lenp,
5721 loff_t *ppos)
5722 {
5723 int ret = 0;
5724 int new_val;
5725 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
5726 struct net *net = (struct net *)ctl->extra2;
5727
5728 if (!rtnl_trylock())
5729 return restart_syscall();
5730
5731 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5732
5733 if (write) {
5734 new_val = *((int *)ctl->data);
5735
5736 if (check_addr_gen_mode(new_val) < 0) {
5737 ret = -EINVAL;
5738 goto out;
5739 }
5740
5741 /* request for default */
5742 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
5743 ipv6_devconf_dflt.addr_gen_mode = new_val;
5744
5745 /* request for individual net device */
5746 } else {
5747 if (!idev)
5748 goto out;
5749
5750 if (check_stable_privacy(idev, net, new_val) < 0) {
5751 ret = -EINVAL;
5752 goto out;
5753 }
5754
5755 if (idev->cnf.addr_gen_mode != new_val) {
5756 idev->cnf.addr_gen_mode = new_val;
5757 addrconf_dev_config(idev->dev);
5758 }
5759 }
5760 }
5761
5762 out:
5763 rtnl_unlock();
5764
5765 return ret;
5766 }
5767
5768 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
5769 void __user *buffer, size_t *lenp,
5770 loff_t *ppos)
5771 {
5772 int err;
5773 struct in6_addr addr;
5774 char str[IPV6_MAX_STRLEN];
5775 struct ctl_table lctl = *ctl;
5776 struct net *net = ctl->extra2;
5777 struct ipv6_stable_secret *secret = ctl->data;
5778
5779 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
5780 return -EIO;
5781
5782 lctl.maxlen = IPV6_MAX_STRLEN;
5783 lctl.data = str;
5784
5785 if (!rtnl_trylock())
5786 return restart_syscall();
5787
5788 if (!write && !secret->initialized) {
5789 err = -EIO;
5790 goto out;
5791 }
5792
5793 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
5794 if (err >= sizeof(str)) {
5795 err = -EIO;
5796 goto out;
5797 }
5798
5799 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
5800 if (err || !write)
5801 goto out;
5802
5803 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
5804 err = -EIO;
5805 goto out;
5806 }
5807
5808 secret->initialized = true;
5809 secret->secret = addr;
5810
5811 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
5812 struct net_device *dev;
5813
5814 for_each_netdev(net, dev) {
5815 struct inet6_dev *idev = __in6_dev_get(dev);
5816
5817 if (idev) {
5818 idev->cnf.addr_gen_mode =
5819 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5820 }
5821 }
5822 } else {
5823 struct inet6_dev *idev = ctl->extra1;
5824
5825 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5826 }
5827
5828 out:
5829 rtnl_unlock();
5830
5831 return err;
5832 }
5833
5834 static
5835 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
5836 int write,
5837 void __user *buffer,
5838 size_t *lenp,
5839 loff_t *ppos)
5840 {
5841 int *valp = ctl->data;
5842 int val = *valp;
5843 loff_t pos = *ppos;
5844 struct ctl_table lctl;
5845 int ret;
5846
5847 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
5848 * we should not modify it until we get the rtnl lock.
5849 */
5850 lctl = *ctl;
5851 lctl.data = &val;
5852
5853 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5854
5855 if (write)
5856 ret = addrconf_fixup_linkdown(ctl, valp, val);
5857 if (ret)
5858 *ppos = pos;
5859 return ret;
5860 }
5861
5862 static
5863 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
5864 {
5865 if (rt) {
5866 if (action)
5867 rt->dst.flags |= DST_NOPOLICY;
5868 else
5869 rt->dst.flags &= ~DST_NOPOLICY;
5870 }
5871 }
5872
5873 static
5874 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
5875 {
5876 struct inet6_ifaddr *ifa;
5877
5878 read_lock_bh(&idev->lock);
5879 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5880 spin_lock(&ifa->lock);
5881 if (ifa->rt) {
5882 struct rt6_info *rt = ifa->rt;
5883 struct fib6_table *table = rt->rt6i_table;
5884 int cpu;
5885
5886 read_lock(&table->tb6_lock);
5887 addrconf_set_nopolicy(ifa->rt, val);
5888 if (rt->rt6i_pcpu) {
5889 for_each_possible_cpu(cpu) {
5890 struct rt6_info **rtp;
5891
5892 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
5893 addrconf_set_nopolicy(*rtp, val);
5894 }
5895 }
5896 read_unlock(&table->tb6_lock);
5897 }
5898 spin_unlock(&ifa->lock);
5899 }
5900 read_unlock_bh(&idev->lock);
5901 }
5902
5903 static
5904 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
5905 {
5906 struct inet6_dev *idev;
5907 struct net *net;
5908
5909 if (!rtnl_trylock())
5910 return restart_syscall();
5911
5912 *valp = val;
5913
5914 net = (struct net *)ctl->extra2;
5915 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
5916 rtnl_unlock();
5917 return 0;
5918 }
5919
5920 if (valp == &net->ipv6.devconf_all->disable_policy) {
5921 struct net_device *dev;
5922
5923 for_each_netdev(net, dev) {
5924 idev = __in6_dev_get(dev);
5925 if (idev)
5926 addrconf_disable_policy_idev(idev, val);
5927 }
5928 } else {
5929 idev = (struct inet6_dev *)ctl->extra1;
5930 addrconf_disable_policy_idev(idev, val);
5931 }
5932
5933 rtnl_unlock();
5934 return 0;
5935 }
5936
5937 static
5938 int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
5939 void __user *buffer, size_t *lenp,
5940 loff_t *ppos)
5941 {
5942 int *valp = ctl->data;
5943 int val = *valp;
5944 loff_t pos = *ppos;
5945 struct ctl_table lctl;
5946 int ret;
5947
5948 lctl = *ctl;
5949 lctl.data = &val;
5950 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5951
5952 if (write && (*valp != val))
5953 ret = addrconf_disable_policy(ctl, valp, val);
5954
5955 if (ret)
5956 *ppos = pos;
5957
5958 return ret;
5959 }
5960
5961 static int minus_one = -1;
5962 static const int one = 1;
5963 static const int two_five_five = 255;
5964
5965 static const struct ctl_table addrconf_sysctl[] = {
5966 {
5967 .procname = "forwarding",
5968 .data = &ipv6_devconf.forwarding,
5969 .maxlen = sizeof(int),
5970 .mode = 0644,
5971 .proc_handler = addrconf_sysctl_forward,
5972 },
5973 {
5974 .procname = "hop_limit",
5975 .data = &ipv6_devconf.hop_limit,
5976 .maxlen = sizeof(int),
5977 .mode = 0644,
5978 .proc_handler = proc_dointvec_minmax,
5979 .extra1 = (void *)&one,
5980 .extra2 = (void *)&two_five_five,
5981 },
5982 {
5983 .procname = "mtu",
5984 .data = &ipv6_devconf.mtu6,
5985 .maxlen = sizeof(int),
5986 .mode = 0644,
5987 .proc_handler = addrconf_sysctl_mtu,
5988 },
5989 {
5990 .procname = "accept_ra",
5991 .data = &ipv6_devconf.accept_ra,
5992 .maxlen = sizeof(int),
5993 .mode = 0644,
5994 .proc_handler = proc_dointvec,
5995 },
5996 {
5997 .procname = "accept_redirects",
5998 .data = &ipv6_devconf.accept_redirects,
5999 .maxlen = sizeof(int),
6000 .mode = 0644,
6001 .proc_handler = proc_dointvec,
6002 },
6003 {
6004 .procname = "autoconf",
6005 .data = &ipv6_devconf.autoconf,
6006 .maxlen = sizeof(int),
6007 .mode = 0644,
6008 .proc_handler = proc_dointvec,
6009 },
6010 {
6011 .procname = "dad_transmits",
6012 .data = &ipv6_devconf.dad_transmits,
6013 .maxlen = sizeof(int),
6014 .mode = 0644,
6015 .proc_handler = proc_dointvec,
6016 },
6017 {
6018 .procname = "router_solicitations",
6019 .data = &ipv6_devconf.rtr_solicits,
6020 .maxlen = sizeof(int),
6021 .mode = 0644,
6022 .proc_handler = proc_dointvec_minmax,
6023 .extra1 = &minus_one,
6024 },
6025 {
6026 .procname = "router_solicitation_interval",
6027 .data = &ipv6_devconf.rtr_solicit_interval,
6028 .maxlen = sizeof(int),
6029 .mode = 0644,
6030 .proc_handler = proc_dointvec_jiffies,
6031 },
6032 {
6033 .procname = "router_solicitation_max_interval",
6034 .data = &ipv6_devconf.rtr_solicit_max_interval,
6035 .maxlen = sizeof(int),
6036 .mode = 0644,
6037 .proc_handler = proc_dointvec_jiffies,
6038 },
6039 {
6040 .procname = "router_solicitation_delay",
6041 .data = &ipv6_devconf.rtr_solicit_delay,
6042 .maxlen = sizeof(int),
6043 .mode = 0644,
6044 .proc_handler = proc_dointvec_jiffies,
6045 },
6046 {
6047 .procname = "force_mld_version",
6048 .data = &ipv6_devconf.force_mld_version,
6049 .maxlen = sizeof(int),
6050 .mode = 0644,
6051 .proc_handler = proc_dointvec,
6052 },
6053 {
6054 .procname = "mldv1_unsolicited_report_interval",
6055 .data =
6056 &ipv6_devconf.mldv1_unsolicited_report_interval,
6057 .maxlen = sizeof(int),
6058 .mode = 0644,
6059 .proc_handler = proc_dointvec_ms_jiffies,
6060 },
6061 {
6062 .procname = "mldv2_unsolicited_report_interval",
6063 .data =
6064 &ipv6_devconf.mldv2_unsolicited_report_interval,
6065 .maxlen = sizeof(int),
6066 .mode = 0644,
6067 .proc_handler = proc_dointvec_ms_jiffies,
6068 },
6069 {
6070 .procname = "use_tempaddr",
6071 .data = &ipv6_devconf.use_tempaddr,
6072 .maxlen = sizeof(int),
6073 .mode = 0644,
6074 .proc_handler = proc_dointvec,
6075 },
6076 {
6077 .procname = "temp_valid_lft",
6078 .data = &ipv6_devconf.temp_valid_lft,
6079 .maxlen = sizeof(int),
6080 .mode = 0644,
6081 .proc_handler = proc_dointvec,
6082 },
6083 {
6084 .procname = "temp_prefered_lft",
6085 .data = &ipv6_devconf.temp_prefered_lft,
6086 .maxlen = sizeof(int),
6087 .mode = 0644,
6088 .proc_handler = proc_dointvec,
6089 },
6090 {
6091 .procname = "regen_max_retry",
6092 .data = &ipv6_devconf.regen_max_retry,
6093 .maxlen = sizeof(int),
6094 .mode = 0644,
6095 .proc_handler = proc_dointvec,
6096 },
6097 {
6098 .procname = "max_desync_factor",
6099 .data = &ipv6_devconf.max_desync_factor,
6100 .maxlen = sizeof(int),
6101 .mode = 0644,
6102 .proc_handler = proc_dointvec,
6103 },
6104 {
6105 .procname = "max_addresses",
6106 .data = &ipv6_devconf.max_addresses,
6107 .maxlen = sizeof(int),
6108 .mode = 0644,
6109 .proc_handler = proc_dointvec,
6110 },
6111 {
6112 .procname = "accept_ra_defrtr",
6113 .data = &ipv6_devconf.accept_ra_defrtr,
6114 .maxlen = sizeof(int),
6115 .mode = 0644,
6116 .proc_handler = proc_dointvec,
6117 },
6118 {
6119 .procname = "accept_ra_min_hop_limit",
6120 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6121 .maxlen = sizeof(int),
6122 .mode = 0644,
6123 .proc_handler = proc_dointvec,
6124 },
6125 {
6126 .procname = "accept_ra_pinfo",
6127 .data = &ipv6_devconf.accept_ra_pinfo,
6128 .maxlen = sizeof(int),
6129 .mode = 0644,
6130 .proc_handler = proc_dointvec,
6131 },
6132 #ifdef CONFIG_IPV6_ROUTER_PREF
6133 {
6134 .procname = "accept_ra_rtr_pref",
6135 .data = &ipv6_devconf.accept_ra_rtr_pref,
6136 .maxlen = sizeof(int),
6137 .mode = 0644,
6138 .proc_handler = proc_dointvec,
6139 },
6140 {
6141 .procname = "router_probe_interval",
6142 .data = &ipv6_devconf.rtr_probe_interval,
6143 .maxlen = sizeof(int),
6144 .mode = 0644,
6145 .proc_handler = proc_dointvec_jiffies,
6146 },
6147 #ifdef CONFIG_IPV6_ROUTE_INFO
6148 {
6149 .procname = "accept_ra_rt_info_min_plen",
6150 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6151 .maxlen = sizeof(int),
6152 .mode = 0644,
6153 .proc_handler = proc_dointvec,
6154 },
6155 {
6156 .procname = "accept_ra_rt_info_max_plen",
6157 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6158 .maxlen = sizeof(int),
6159 .mode = 0644,
6160 .proc_handler = proc_dointvec,
6161 },
6162 #endif
6163 #endif
6164 {
6165 .procname = "proxy_ndp",
6166 .data = &ipv6_devconf.proxy_ndp,
6167 .maxlen = sizeof(int),
6168 .mode = 0644,
6169 .proc_handler = addrconf_sysctl_proxy_ndp,
6170 },
6171 {
6172 .procname = "accept_source_route",
6173 .data = &ipv6_devconf.accept_source_route,
6174 .maxlen = sizeof(int),
6175 .mode = 0644,
6176 .proc_handler = proc_dointvec,
6177 },
6178 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6179 {
6180 .procname = "optimistic_dad",
6181 .data = &ipv6_devconf.optimistic_dad,
6182 .maxlen = sizeof(int),
6183 .mode = 0644,
6184 .proc_handler = proc_dointvec,
6185 },
6186 {
6187 .procname = "use_optimistic",
6188 .data = &ipv6_devconf.use_optimistic,
6189 .maxlen = sizeof(int),
6190 .mode = 0644,
6191 .proc_handler = proc_dointvec,
6192 },
6193 #endif
6194 #ifdef CONFIG_IPV6_MROUTE
6195 {
6196 .procname = "mc_forwarding",
6197 .data = &ipv6_devconf.mc_forwarding,
6198 .maxlen = sizeof(int),
6199 .mode = 0444,
6200 .proc_handler = proc_dointvec,
6201 },
6202 #endif
6203 {
6204 .procname = "disable_ipv6",
6205 .data = &ipv6_devconf.disable_ipv6,
6206 .maxlen = sizeof(int),
6207 .mode = 0644,
6208 .proc_handler = addrconf_sysctl_disable,
6209 },
6210 {
6211 .procname = "accept_dad",
6212 .data = &ipv6_devconf.accept_dad,
6213 .maxlen = sizeof(int),
6214 .mode = 0644,
6215 .proc_handler = proc_dointvec,
6216 },
6217 {
6218 .procname = "force_tllao",
6219 .data = &ipv6_devconf.force_tllao,
6220 .maxlen = sizeof(int),
6221 .mode = 0644,
6222 .proc_handler = proc_dointvec
6223 },
6224 {
6225 .procname = "ndisc_notify",
6226 .data = &ipv6_devconf.ndisc_notify,
6227 .maxlen = sizeof(int),
6228 .mode = 0644,
6229 .proc_handler = proc_dointvec
6230 },
6231 {
6232 .procname = "suppress_frag_ndisc",
6233 .data = &ipv6_devconf.suppress_frag_ndisc,
6234 .maxlen = sizeof(int),
6235 .mode = 0644,
6236 .proc_handler = proc_dointvec
6237 },
6238 {
6239 .procname = "accept_ra_from_local",
6240 .data = &ipv6_devconf.accept_ra_from_local,
6241 .maxlen = sizeof(int),
6242 .mode = 0644,
6243 .proc_handler = proc_dointvec,
6244 },
6245 {
6246 .procname = "accept_ra_mtu",
6247 .data = &ipv6_devconf.accept_ra_mtu,
6248 .maxlen = sizeof(int),
6249 .mode = 0644,
6250 .proc_handler = proc_dointvec,
6251 },
6252 {
6253 .procname = "stable_secret",
6254 .data = &ipv6_devconf.stable_secret,
6255 .maxlen = IPV6_MAX_STRLEN,
6256 .mode = 0600,
6257 .proc_handler = addrconf_sysctl_stable_secret,
6258 },
6259 {
6260 .procname = "use_oif_addrs_only",
6261 .data = &ipv6_devconf.use_oif_addrs_only,
6262 .maxlen = sizeof(int),
6263 .mode = 0644,
6264 .proc_handler = proc_dointvec,
6265 },
6266 {
6267 .procname = "ignore_routes_with_linkdown",
6268 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6269 .maxlen = sizeof(int),
6270 .mode = 0644,
6271 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6272 },
6273 {
6274 .procname = "drop_unicast_in_l2_multicast",
6275 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6276 .maxlen = sizeof(int),
6277 .mode = 0644,
6278 .proc_handler = proc_dointvec,
6279 },
6280 {
6281 .procname = "drop_unsolicited_na",
6282 .data = &ipv6_devconf.drop_unsolicited_na,
6283 .maxlen = sizeof(int),
6284 .mode = 0644,
6285 .proc_handler = proc_dointvec,
6286 },
6287 {
6288 .procname = "keep_addr_on_down",
6289 .data = &ipv6_devconf.keep_addr_on_down,
6290 .maxlen = sizeof(int),
6291 .mode = 0644,
6292 .proc_handler = proc_dointvec,
6293
6294 },
6295 {
6296 .procname = "seg6_enabled",
6297 .data = &ipv6_devconf.seg6_enabled,
6298 .maxlen = sizeof(int),
6299 .mode = 0644,
6300 .proc_handler = proc_dointvec,
6301 },
6302 #ifdef CONFIG_IPV6_SEG6_HMAC
6303 {
6304 .procname = "seg6_require_hmac",
6305 .data = &ipv6_devconf.seg6_require_hmac,
6306 .maxlen = sizeof(int),
6307 .mode = 0644,
6308 .proc_handler = proc_dointvec,
6309 },
6310 #endif
6311 {
6312 .procname = "enhanced_dad",
6313 .data = &ipv6_devconf.enhanced_dad,
6314 .maxlen = sizeof(int),
6315 .mode = 0644,
6316 .proc_handler = proc_dointvec,
6317 },
6318 {
6319 .procname = "addr_gen_mode",
6320 .data = &ipv6_devconf.addr_gen_mode,
6321 .maxlen = sizeof(int),
6322 .mode = 0644,
6323 .proc_handler = addrconf_sysctl_addr_gen_mode,
6324 },
6325 {
6326 .procname = "disable_policy",
6327 .data = &ipv6_devconf.disable_policy,
6328 .maxlen = sizeof(int),
6329 .mode = 0644,
6330 .proc_handler = addrconf_sysctl_disable_policy,
6331 },
6332 {
6333 /* sentinel */
6334 }
6335 };
6336
6337 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6338 struct inet6_dev *idev, struct ipv6_devconf *p)
6339 {
6340 int i, ifindex;
6341 struct ctl_table *table;
6342 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6343
6344 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6345 if (!table)
6346 goto out;
6347
6348 for (i = 0; table[i].data; i++) {
6349 table[i].data += (char *)p - (char *)&ipv6_devconf;
6350 /* If one of these is already set, then it is not safe to
6351 * overwrite either of them: this makes proc_dointvec_minmax
6352 * usable.
6353 */
6354 if (!table[i].extra1 && !table[i].extra2) {
6355 table[i].extra1 = idev; /* embedded; no ref */
6356 table[i].extra2 = net;
6357 }
6358 }
6359
6360 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6361
6362 p->sysctl_header = register_net_sysctl(net, path, table);
6363 if (!p->sysctl_header)
6364 goto free;
6365
6366 if (!strcmp(dev_name, "all"))
6367 ifindex = NETCONFA_IFINDEX_ALL;
6368 else if (!strcmp(dev_name, "default"))
6369 ifindex = NETCONFA_IFINDEX_DEFAULT;
6370 else
6371 ifindex = idev->dev->ifindex;
6372 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6373 ifindex, p);
6374 return 0;
6375
6376 free:
6377 kfree(table);
6378 out:
6379 return -ENOBUFS;
6380 }
6381
6382 static void __addrconf_sysctl_unregister(struct net *net,
6383 struct ipv6_devconf *p, int ifindex)
6384 {
6385 struct ctl_table *table;
6386
6387 if (!p->sysctl_header)
6388 return;
6389
6390 table = p->sysctl_header->ctl_table_arg;
6391 unregister_net_sysctl_table(p->sysctl_header);
6392 p->sysctl_header = NULL;
6393 kfree(table);
6394
6395 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
6396 }
6397
6398 static int addrconf_sysctl_register(struct inet6_dev *idev)
6399 {
6400 int err;
6401
6402 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6403 return -EINVAL;
6404
6405 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6406 &ndisc_ifinfo_sysctl_change);
6407 if (err)
6408 return err;
6409 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6410 idev, &idev->cnf);
6411 if (err)
6412 neigh_sysctl_unregister(idev->nd_parms);
6413
6414 return err;
6415 }
6416
6417 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6418 {
6419 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
6420 idev->dev->ifindex);
6421 neigh_sysctl_unregister(idev->nd_parms);
6422 }
6423
6424
6425 #endif
6426
6427 static int __net_init addrconf_init_net(struct net *net)
6428 {
6429 int err = -ENOMEM;
6430 struct ipv6_devconf *all, *dflt;
6431
6432 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6433 if (!all)
6434 goto err_alloc_all;
6435
6436 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6437 if (!dflt)
6438 goto err_alloc_dflt;
6439
6440 /* these will be inherited by all namespaces */
6441 dflt->autoconf = ipv6_defaults.autoconf;
6442 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6443
6444 dflt->stable_secret.initialized = false;
6445 all->stable_secret.initialized = false;
6446
6447 net->ipv6.devconf_all = all;
6448 net->ipv6.devconf_dflt = dflt;
6449
6450 #ifdef CONFIG_SYSCTL
6451 err = __addrconf_sysctl_register(net, "all", NULL, all);
6452 if (err < 0)
6453 goto err_reg_all;
6454
6455 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6456 if (err < 0)
6457 goto err_reg_dflt;
6458 #endif
6459 return 0;
6460
6461 #ifdef CONFIG_SYSCTL
6462 err_reg_dflt:
6463 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
6464 err_reg_all:
6465 kfree(dflt);
6466 #endif
6467 err_alloc_dflt:
6468 kfree(all);
6469 err_alloc_all:
6470 return err;
6471 }
6472
6473 static void __net_exit addrconf_exit_net(struct net *net)
6474 {
6475 #ifdef CONFIG_SYSCTL
6476 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
6477 NETCONFA_IFINDEX_DEFAULT);
6478 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
6479 NETCONFA_IFINDEX_ALL);
6480 #endif
6481 kfree(net->ipv6.devconf_dflt);
6482 kfree(net->ipv6.devconf_all);
6483 }
6484
6485 static struct pernet_operations addrconf_ops = {
6486 .init = addrconf_init_net,
6487 .exit = addrconf_exit_net,
6488 };
6489
6490 static struct rtnl_af_ops inet6_ops __read_mostly = {
6491 .family = AF_INET6,
6492 .fill_link_af = inet6_fill_link_af,
6493 .get_link_af_size = inet6_get_link_af_size,
6494 .validate_link_af = inet6_validate_link_af,
6495 .set_link_af = inet6_set_link_af,
6496 };
6497
6498 /*
6499 * Init / cleanup code
6500 */
6501
6502 int __init addrconf_init(void)
6503 {
6504 struct inet6_dev *idev;
6505 int i, err;
6506
6507 err = ipv6_addr_label_init();
6508 if (err < 0) {
6509 pr_crit("%s: cannot initialize default policy table: %d\n",
6510 __func__, err);
6511 goto out;
6512 }
6513
6514 err = register_pernet_subsys(&addrconf_ops);
6515 if (err < 0)
6516 goto out_addrlabel;
6517
6518 addrconf_wq = create_workqueue("ipv6_addrconf");
6519 if (!addrconf_wq) {
6520 err = -ENOMEM;
6521 goto out_nowq;
6522 }
6523
6524 /* The addrconf netdev notifier requires that loopback_dev
6525 * has it's ipv6 private information allocated and setup
6526 * before it can bring up and give link-local addresses
6527 * to other devices which are up.
6528 *
6529 * Unfortunately, loopback_dev is not necessarily the first
6530 * entry in the global dev_base list of net devices. In fact,
6531 * it is likely to be the very last entry on that list.
6532 * So this causes the notifier registry below to try and
6533 * give link-local addresses to all devices besides loopback_dev
6534 * first, then loopback_dev, which cases all the non-loopback_dev
6535 * devices to fail to get a link-local address.
6536 *
6537 * So, as a temporary fix, allocate the ipv6 structure for
6538 * loopback_dev first by hand.
6539 * Longer term, all of the dependencies ipv6 has upon the loopback
6540 * device and it being up should be removed.
6541 */
6542 rtnl_lock();
6543 idev = ipv6_add_dev(init_net.loopback_dev);
6544 rtnl_unlock();
6545 if (IS_ERR(idev)) {
6546 err = PTR_ERR(idev);
6547 goto errlo;
6548 }
6549
6550 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6551 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
6552
6553 register_netdevice_notifier(&ipv6_dev_notf);
6554
6555 addrconf_verify();
6556
6557 rtnl_af_register(&inet6_ops);
6558
6559 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
6560 NULL);
6561 if (err < 0)
6562 goto errout;
6563
6564 /* Only the first call to __rtnl_register can fail */
6565 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL);
6566 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL);
6567 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
6568 inet6_dump_ifaddr, NULL);
6569 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
6570 inet6_dump_ifmcaddr, NULL);
6571 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
6572 inet6_dump_ifacaddr, NULL);
6573 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
6574 inet6_netconf_dump_devconf, NULL);
6575
6576 ipv6_addr_label_rtnl_register();
6577
6578 return 0;
6579 errout:
6580 rtnl_af_unregister(&inet6_ops);
6581 unregister_netdevice_notifier(&ipv6_dev_notf);
6582 errlo:
6583 destroy_workqueue(addrconf_wq);
6584 out_nowq:
6585 unregister_pernet_subsys(&addrconf_ops);
6586 out_addrlabel:
6587 ipv6_addr_label_cleanup();
6588 out:
6589 return err;
6590 }
6591
6592 void addrconf_cleanup(void)
6593 {
6594 struct net_device *dev;
6595 int i;
6596
6597 unregister_netdevice_notifier(&ipv6_dev_notf);
6598 unregister_pernet_subsys(&addrconf_ops);
6599 ipv6_addr_label_cleanup();
6600
6601 rtnl_lock();
6602
6603 __rtnl_af_unregister(&inet6_ops);
6604
6605 /* clean dev list */
6606 for_each_netdev(&init_net, dev) {
6607 if (__in6_dev_get(dev) == NULL)
6608 continue;
6609 addrconf_ifdown(dev, 1);
6610 }
6611 addrconf_ifdown(init_net.loopback_dev, 2);
6612
6613 /*
6614 * Check hash table.
6615 */
6616 spin_lock_bh(&addrconf_hash_lock);
6617 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6618 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
6619 spin_unlock_bh(&addrconf_hash_lock);
6620 cancel_delayed_work(&addr_chk_work);
6621 rtnl_unlock();
6622
6623 destroy_workqueue(addrconf_wq);
6624 }