]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/addrconf.c
netlink: pass extended ACK struct to parsing functions
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / addrconf.c
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 /*
16 * Changes:
17 *
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
39 */
40
41 #define pr_fmt(fmt) "IPv6: " fmt
42
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched/signal.h>
47 #include <linux/socket.h>
48 #include <linux/sockios.h>
49 #include <linux/net.h>
50 #include <linux/inet.h>
51 #include <linux/in6.h>
52 #include <linux/netdevice.h>
53 #include <linux/if_addr.h>
54 #include <linux/if_arp.h>
55 #include <linux/if_arcnet.h>
56 #include <linux/if_infiniband.h>
57 #include <linux/route.h>
58 #include <linux/inetdevice.h>
59 #include <linux/init.h>
60 #include <linux/slab.h>
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64 #include <linux/capability.h>
65 #include <linux/delay.h>
66 #include <linux/notifier.h>
67 #include <linux/string.h>
68 #include <linux/hash.h>
69
70 #include <net/net_namespace.h>
71 #include <net/sock.h>
72 #include <net/snmp.h>
73
74 #include <net/6lowpan.h>
75 #include <net/firewire.h>
76 #include <net/ipv6.h>
77 #include <net/protocol.h>
78 #include <net/ndisc.h>
79 #include <net/ip6_route.h>
80 #include <net/addrconf.h>
81 #include <net/tcp.h>
82 #include <net/ip.h>
83 #include <net/netlink.h>
84 #include <net/pkt_sched.h>
85 #include <net/l3mdev.h>
86 #include <linux/if_tunnel.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/netconf.h>
89 #include <linux/random.h>
90 #include <linux/uaccess.h>
91 #include <asm/unaligned.h>
92
93 #include <linux/proc_fs.h>
94 #include <linux/seq_file.h>
95 #include <linux/export.h>
96
97 /* Set to 3 to get tracing... */
98 #define ACONF_DEBUG 2
99
100 #if ACONF_DEBUG >= 3
101 #define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
102 #else
103 #define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
104 #endif
105
106 #define INFINITY_LIFE_TIME 0xFFFFFFFF
107
108 #define IPV6_MAX_STRLEN \
109 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
110
111 static inline u32 cstamp_delta(unsigned long cstamp)
112 {
113 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
114 }
115
116 static inline s32 rfc3315_s14_backoff_init(s32 irt)
117 {
118 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
119 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
120 do_div(tmp, 1000000);
121 return (s32)tmp;
122 }
123
124 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
125 {
126 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
127 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
128 do_div(tmp, 1000000);
129 if ((s32)tmp > mrt) {
130 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
131 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
132 do_div(tmp, 1000000);
133 }
134 return (s32)tmp;
135 }
136
137 #ifdef CONFIG_SYSCTL
138 static int addrconf_sysctl_register(struct inet6_dev *idev);
139 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
140 #else
141 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
142 {
143 return 0;
144 }
145
146 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
147 {
148 }
149 #endif
150
151 static void ipv6_regen_rndid(struct inet6_dev *idev);
152 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
153
154 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
155 static int ipv6_count_addresses(struct inet6_dev *idev);
156 static int ipv6_generate_stable_address(struct in6_addr *addr,
157 u8 dad_count,
158 const struct inet6_dev *idev);
159
160 /*
161 * Configured unicast address hash table
162 */
163 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
164 static DEFINE_SPINLOCK(addrconf_hash_lock);
165
166 static void addrconf_verify(void);
167 static void addrconf_verify_rtnl(void);
168 static void addrconf_verify_work(struct work_struct *);
169
170 static struct workqueue_struct *addrconf_wq;
171 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
172
173 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
174 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
175
176 static void addrconf_type_change(struct net_device *dev,
177 unsigned long event);
178 static int addrconf_ifdown(struct net_device *dev, int how);
179
180 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
181 int plen,
182 const struct net_device *dev,
183 u32 flags, u32 noflags);
184
185 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
186 static void addrconf_dad_work(struct work_struct *w);
187 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
188 static void addrconf_dad_run(struct inet6_dev *idev);
189 static void addrconf_rs_timer(unsigned long data);
190 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
191 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
192
193 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
194 struct prefix_info *pinfo);
195 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
196 struct net_device *dev);
197
198 static struct ipv6_devconf ipv6_devconf __read_mostly = {
199 .forwarding = 0,
200 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
201 .mtu6 = IPV6_MIN_MTU,
202 .accept_ra = 1,
203 .accept_redirects = 1,
204 .autoconf = 1,
205 .force_mld_version = 0,
206 .mldv1_unsolicited_report_interval = 10 * HZ,
207 .mldv2_unsolicited_report_interval = HZ,
208 .dad_transmits = 1,
209 .rtr_solicits = MAX_RTR_SOLICITATIONS,
210 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
211 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
212 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
213 .use_tempaddr = 0,
214 .temp_valid_lft = TEMP_VALID_LIFETIME,
215 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
216 .regen_max_retry = REGEN_MAX_RETRY,
217 .max_desync_factor = MAX_DESYNC_FACTOR,
218 .max_addresses = IPV6_MAX_ADDRESSES,
219 .accept_ra_defrtr = 1,
220 .accept_ra_from_local = 0,
221 .accept_ra_min_hop_limit= 1,
222 .accept_ra_pinfo = 1,
223 #ifdef CONFIG_IPV6_ROUTER_PREF
224 .accept_ra_rtr_pref = 1,
225 .rtr_probe_interval = 60 * HZ,
226 #ifdef CONFIG_IPV6_ROUTE_INFO
227 .accept_ra_rt_info_min_plen = 0,
228 .accept_ra_rt_info_max_plen = 0,
229 #endif
230 #endif
231 .proxy_ndp = 0,
232 .accept_source_route = 0, /* we do not accept RH0 by default. */
233 .disable_ipv6 = 0,
234 .accept_dad = 1,
235 .suppress_frag_ndisc = 1,
236 .accept_ra_mtu = 1,
237 .stable_secret = {
238 .initialized = false,
239 },
240 .use_oif_addrs_only = 0,
241 .ignore_routes_with_linkdown = 0,
242 .keep_addr_on_down = 0,
243 .seg6_enabled = 0,
244 #ifdef CONFIG_IPV6_SEG6_HMAC
245 .seg6_require_hmac = 0,
246 #endif
247 .enhanced_dad = 1,
248 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
249 .disable_policy = 0,
250 };
251
252 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
253 .forwarding = 0,
254 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
255 .mtu6 = IPV6_MIN_MTU,
256 .accept_ra = 1,
257 .accept_redirects = 1,
258 .autoconf = 1,
259 .force_mld_version = 0,
260 .mldv1_unsolicited_report_interval = 10 * HZ,
261 .mldv2_unsolicited_report_interval = HZ,
262 .dad_transmits = 1,
263 .rtr_solicits = MAX_RTR_SOLICITATIONS,
264 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
265 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
266 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
267 .use_tempaddr = 0,
268 .temp_valid_lft = TEMP_VALID_LIFETIME,
269 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
270 .regen_max_retry = REGEN_MAX_RETRY,
271 .max_desync_factor = MAX_DESYNC_FACTOR,
272 .max_addresses = IPV6_MAX_ADDRESSES,
273 .accept_ra_defrtr = 1,
274 .accept_ra_from_local = 0,
275 .accept_ra_min_hop_limit= 1,
276 .accept_ra_pinfo = 1,
277 #ifdef CONFIG_IPV6_ROUTER_PREF
278 .accept_ra_rtr_pref = 1,
279 .rtr_probe_interval = 60 * HZ,
280 #ifdef CONFIG_IPV6_ROUTE_INFO
281 .accept_ra_rt_info_min_plen = 0,
282 .accept_ra_rt_info_max_plen = 0,
283 #endif
284 #endif
285 .proxy_ndp = 0,
286 .accept_source_route = 0, /* we do not accept RH0 by default. */
287 .disable_ipv6 = 0,
288 .accept_dad = 1,
289 .suppress_frag_ndisc = 1,
290 .accept_ra_mtu = 1,
291 .stable_secret = {
292 .initialized = false,
293 },
294 .use_oif_addrs_only = 0,
295 .ignore_routes_with_linkdown = 0,
296 .keep_addr_on_down = 0,
297 .seg6_enabled = 0,
298 #ifdef CONFIG_IPV6_SEG6_HMAC
299 .seg6_require_hmac = 0,
300 #endif
301 .enhanced_dad = 1,
302 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
303 .disable_policy = 0,
304 };
305
306 /* Check if a valid qdisc is available */
307 static inline bool addrconf_qdisc_ok(const struct net_device *dev)
308 {
309 return !qdisc_tx_is_noop(dev);
310 }
311
312 static void addrconf_del_rs_timer(struct inet6_dev *idev)
313 {
314 if (del_timer(&idev->rs_timer))
315 __in6_dev_put(idev);
316 }
317
318 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
319 {
320 if (cancel_delayed_work(&ifp->dad_work))
321 __in6_ifa_put(ifp);
322 }
323
324 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
325 unsigned long when)
326 {
327 if (!timer_pending(&idev->rs_timer))
328 in6_dev_hold(idev);
329 mod_timer(&idev->rs_timer, jiffies + when);
330 }
331
332 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
333 unsigned long delay)
334 {
335 if (!delayed_work_pending(&ifp->dad_work))
336 in6_ifa_hold(ifp);
337 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
338 }
339
340 static int snmp6_alloc_dev(struct inet6_dev *idev)
341 {
342 int i;
343
344 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
345 if (!idev->stats.ipv6)
346 goto err_ip;
347
348 for_each_possible_cpu(i) {
349 struct ipstats_mib *addrconf_stats;
350 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
351 u64_stats_init(&addrconf_stats->syncp);
352 }
353
354
355 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
356 GFP_KERNEL);
357 if (!idev->stats.icmpv6dev)
358 goto err_icmp;
359 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
360 GFP_KERNEL);
361 if (!idev->stats.icmpv6msgdev)
362 goto err_icmpmsg;
363
364 return 0;
365
366 err_icmpmsg:
367 kfree(idev->stats.icmpv6dev);
368 err_icmp:
369 free_percpu(idev->stats.ipv6);
370 err_ip:
371 return -ENOMEM;
372 }
373
374 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
375 {
376 struct inet6_dev *ndev;
377 int err = -ENOMEM;
378
379 ASSERT_RTNL();
380
381 if (dev->mtu < IPV6_MIN_MTU)
382 return ERR_PTR(-EINVAL);
383
384 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
385 if (!ndev)
386 return ERR_PTR(err);
387
388 rwlock_init(&ndev->lock);
389 ndev->dev = dev;
390 INIT_LIST_HEAD(&ndev->addr_list);
391 setup_timer(&ndev->rs_timer, addrconf_rs_timer,
392 (unsigned long)ndev);
393 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
394
395 if (ndev->cnf.stable_secret.initialized)
396 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
397 else
398 ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
399
400 ndev->cnf.mtu6 = dev->mtu;
401 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
402 if (!ndev->nd_parms) {
403 kfree(ndev);
404 return ERR_PTR(err);
405 }
406 if (ndev->cnf.forwarding)
407 dev_disable_lro(dev);
408 /* We refer to the device */
409 dev_hold(dev);
410
411 if (snmp6_alloc_dev(ndev) < 0) {
412 ADBG(KERN_WARNING
413 "%s: cannot allocate memory for statistics; dev=%s.\n",
414 __func__, dev->name);
415 neigh_parms_release(&nd_tbl, ndev->nd_parms);
416 dev_put(dev);
417 kfree(ndev);
418 return ERR_PTR(err);
419 }
420
421 if (snmp6_register_dev(ndev) < 0) {
422 ADBG(KERN_WARNING
423 "%s: cannot create /proc/net/dev_snmp6/%s\n",
424 __func__, dev->name);
425 goto err_release;
426 }
427
428 /* One reference from device. */
429 in6_dev_hold(ndev);
430
431 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
432 ndev->cnf.accept_dad = -1;
433
434 #if IS_ENABLED(CONFIG_IPV6_SIT)
435 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
436 pr_info("%s: Disabled Multicast RS\n", dev->name);
437 ndev->cnf.rtr_solicits = 0;
438 }
439 #endif
440
441 INIT_LIST_HEAD(&ndev->tempaddr_list);
442 ndev->desync_factor = U32_MAX;
443 if ((dev->flags&IFF_LOOPBACK) ||
444 dev->type == ARPHRD_TUNNEL ||
445 dev->type == ARPHRD_TUNNEL6 ||
446 dev->type == ARPHRD_SIT ||
447 dev->type == ARPHRD_NONE) {
448 ndev->cnf.use_tempaddr = -1;
449 } else
450 ipv6_regen_rndid(ndev);
451
452 ndev->token = in6addr_any;
453
454 if (netif_running(dev) && addrconf_qdisc_ok(dev))
455 ndev->if_flags |= IF_READY;
456
457 ipv6_mc_init_dev(ndev);
458 ndev->tstamp = jiffies;
459 err = addrconf_sysctl_register(ndev);
460 if (err) {
461 ipv6_mc_destroy_dev(ndev);
462 snmp6_unregister_dev(ndev);
463 goto err_release;
464 }
465 /* protected by rtnl_lock */
466 rcu_assign_pointer(dev->ip6_ptr, ndev);
467
468 /* Join interface-local all-node multicast group */
469 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
470
471 /* Join all-node multicast group */
472 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
473
474 /* Join all-router multicast group if forwarding is set */
475 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
476 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
477
478 return ndev;
479
480 err_release:
481 neigh_parms_release(&nd_tbl, ndev->nd_parms);
482 ndev->dead = 1;
483 in6_dev_finish_destroy(ndev);
484 return ERR_PTR(err);
485 }
486
487 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
488 {
489 struct inet6_dev *idev;
490
491 ASSERT_RTNL();
492
493 idev = __in6_dev_get(dev);
494 if (!idev) {
495 idev = ipv6_add_dev(dev);
496 if (IS_ERR(idev))
497 return NULL;
498 }
499
500 if (dev->flags&IFF_UP)
501 ipv6_mc_up(idev);
502 return idev;
503 }
504
505 static int inet6_netconf_msgsize_devconf(int type)
506 {
507 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
508 + nla_total_size(4); /* NETCONFA_IFINDEX */
509 bool all = false;
510
511 if (type == NETCONFA_ALL)
512 all = true;
513
514 if (all || type == NETCONFA_FORWARDING)
515 size += nla_total_size(4);
516 #ifdef CONFIG_IPV6_MROUTE
517 if (all || type == NETCONFA_MC_FORWARDING)
518 size += nla_total_size(4);
519 #endif
520 if (all || type == NETCONFA_PROXY_NEIGH)
521 size += nla_total_size(4);
522
523 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
524 size += nla_total_size(4);
525
526 return size;
527 }
528
529 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
530 struct ipv6_devconf *devconf, u32 portid,
531 u32 seq, int event, unsigned int flags,
532 int type)
533 {
534 struct nlmsghdr *nlh;
535 struct netconfmsg *ncm;
536 bool all = false;
537
538 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
539 flags);
540 if (!nlh)
541 return -EMSGSIZE;
542
543 if (type == NETCONFA_ALL)
544 all = true;
545
546 ncm = nlmsg_data(nlh);
547 ncm->ncm_family = AF_INET6;
548
549 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
550 goto nla_put_failure;
551
552 if (!devconf)
553 goto out;
554
555 if ((all || type == NETCONFA_FORWARDING) &&
556 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
557 goto nla_put_failure;
558 #ifdef CONFIG_IPV6_MROUTE
559 if ((all || type == NETCONFA_MC_FORWARDING) &&
560 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
561 devconf->mc_forwarding) < 0)
562 goto nla_put_failure;
563 #endif
564 if ((all || type == NETCONFA_PROXY_NEIGH) &&
565 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
566 goto nla_put_failure;
567
568 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
569 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
570 devconf->ignore_routes_with_linkdown) < 0)
571 goto nla_put_failure;
572
573 out:
574 nlmsg_end(skb, nlh);
575 return 0;
576
577 nla_put_failure:
578 nlmsg_cancel(skb, nlh);
579 return -EMSGSIZE;
580 }
581
582 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
583 int ifindex, struct ipv6_devconf *devconf)
584 {
585 struct sk_buff *skb;
586 int err = -ENOBUFS;
587
588 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
589 if (!skb)
590 goto errout;
591
592 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
593 event, 0, type);
594 if (err < 0) {
595 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
596 WARN_ON(err == -EMSGSIZE);
597 kfree_skb(skb);
598 goto errout;
599 }
600 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
601 return;
602 errout:
603 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
604 }
605
606 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
607 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
608 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
609 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
610 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
611 };
612
613 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
614 struct nlmsghdr *nlh)
615 {
616 struct net *net = sock_net(in_skb->sk);
617 struct nlattr *tb[NETCONFA_MAX+1];
618 struct netconfmsg *ncm;
619 struct sk_buff *skb;
620 struct ipv6_devconf *devconf;
621 struct inet6_dev *in6_dev;
622 struct net_device *dev;
623 int ifindex;
624 int err;
625
626 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
627 devconf_ipv6_policy, NULL);
628 if (err < 0)
629 goto errout;
630
631 err = -EINVAL;
632 if (!tb[NETCONFA_IFINDEX])
633 goto errout;
634
635 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
636 switch (ifindex) {
637 case NETCONFA_IFINDEX_ALL:
638 devconf = net->ipv6.devconf_all;
639 break;
640 case NETCONFA_IFINDEX_DEFAULT:
641 devconf = net->ipv6.devconf_dflt;
642 break;
643 default:
644 dev = __dev_get_by_index(net, ifindex);
645 if (!dev)
646 goto errout;
647 in6_dev = __in6_dev_get(dev);
648 if (!in6_dev)
649 goto errout;
650 devconf = &in6_dev->cnf;
651 break;
652 }
653
654 err = -ENOBUFS;
655 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_ATOMIC);
656 if (!skb)
657 goto errout;
658
659 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
660 NETLINK_CB(in_skb).portid,
661 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
662 NETCONFA_ALL);
663 if (err < 0) {
664 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
665 WARN_ON(err == -EMSGSIZE);
666 kfree_skb(skb);
667 goto errout;
668 }
669 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
670 errout:
671 return err;
672 }
673
674 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
675 struct netlink_callback *cb)
676 {
677 struct net *net = sock_net(skb->sk);
678 int h, s_h;
679 int idx, s_idx;
680 struct net_device *dev;
681 struct inet6_dev *idev;
682 struct hlist_head *head;
683
684 s_h = cb->args[0];
685 s_idx = idx = cb->args[1];
686
687 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
688 idx = 0;
689 head = &net->dev_index_head[h];
690 rcu_read_lock();
691 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
692 net->dev_base_seq;
693 hlist_for_each_entry_rcu(dev, head, index_hlist) {
694 if (idx < s_idx)
695 goto cont;
696 idev = __in6_dev_get(dev);
697 if (!idev)
698 goto cont;
699
700 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
701 &idev->cnf,
702 NETLINK_CB(cb->skb).portid,
703 cb->nlh->nlmsg_seq,
704 RTM_NEWNETCONF,
705 NLM_F_MULTI,
706 NETCONFA_ALL) < 0) {
707 rcu_read_unlock();
708 goto done;
709 }
710 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
711 cont:
712 idx++;
713 }
714 rcu_read_unlock();
715 }
716 if (h == NETDEV_HASHENTRIES) {
717 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
718 net->ipv6.devconf_all,
719 NETLINK_CB(cb->skb).portid,
720 cb->nlh->nlmsg_seq,
721 RTM_NEWNETCONF, NLM_F_MULTI,
722 NETCONFA_ALL) < 0)
723 goto done;
724 else
725 h++;
726 }
727 if (h == NETDEV_HASHENTRIES + 1) {
728 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
729 net->ipv6.devconf_dflt,
730 NETLINK_CB(cb->skb).portid,
731 cb->nlh->nlmsg_seq,
732 RTM_NEWNETCONF, NLM_F_MULTI,
733 NETCONFA_ALL) < 0)
734 goto done;
735 else
736 h++;
737 }
738 done:
739 cb->args[0] = h;
740 cb->args[1] = idx;
741
742 return skb->len;
743 }
744
745 #ifdef CONFIG_SYSCTL
746 static void dev_forward_change(struct inet6_dev *idev)
747 {
748 struct net_device *dev;
749 struct inet6_ifaddr *ifa;
750
751 if (!idev)
752 return;
753 dev = idev->dev;
754 if (idev->cnf.forwarding)
755 dev_disable_lro(dev);
756 if (dev->flags & IFF_MULTICAST) {
757 if (idev->cnf.forwarding) {
758 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
759 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
760 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
761 } else {
762 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
763 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
764 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
765 }
766 }
767
768 list_for_each_entry(ifa, &idev->addr_list, if_list) {
769 if (ifa->flags&IFA_F_TENTATIVE)
770 continue;
771 if (idev->cnf.forwarding)
772 addrconf_join_anycast(ifa);
773 else
774 addrconf_leave_anycast(ifa);
775 }
776 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
777 NETCONFA_FORWARDING,
778 dev->ifindex, &idev->cnf);
779 }
780
781
782 static void addrconf_forward_change(struct net *net, __s32 newf)
783 {
784 struct net_device *dev;
785 struct inet6_dev *idev;
786
787 for_each_netdev(net, dev) {
788 idev = __in6_dev_get(dev);
789 if (idev) {
790 int changed = (!idev->cnf.forwarding) ^ (!newf);
791 idev->cnf.forwarding = newf;
792 if (changed)
793 dev_forward_change(idev);
794 }
795 }
796 }
797
798 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
799 {
800 struct net *net;
801 int old;
802
803 if (!rtnl_trylock())
804 return restart_syscall();
805
806 net = (struct net *)table->extra2;
807 old = *p;
808 *p = newf;
809
810 if (p == &net->ipv6.devconf_dflt->forwarding) {
811 if ((!newf) ^ (!old))
812 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
813 NETCONFA_FORWARDING,
814 NETCONFA_IFINDEX_DEFAULT,
815 net->ipv6.devconf_dflt);
816 rtnl_unlock();
817 return 0;
818 }
819
820 if (p == &net->ipv6.devconf_all->forwarding) {
821 int old_dflt = net->ipv6.devconf_dflt->forwarding;
822
823 net->ipv6.devconf_dflt->forwarding = newf;
824 if ((!newf) ^ (!old_dflt))
825 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
826 NETCONFA_FORWARDING,
827 NETCONFA_IFINDEX_DEFAULT,
828 net->ipv6.devconf_dflt);
829
830 addrconf_forward_change(net, newf);
831 if ((!newf) ^ (!old))
832 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
833 NETCONFA_FORWARDING,
834 NETCONFA_IFINDEX_ALL,
835 net->ipv6.devconf_all);
836 } else if ((!newf) ^ (!old))
837 dev_forward_change((struct inet6_dev *)table->extra1);
838 rtnl_unlock();
839
840 if (newf)
841 rt6_purge_dflt_routers(net);
842 return 1;
843 }
844
845 static void addrconf_linkdown_change(struct net *net, __s32 newf)
846 {
847 struct net_device *dev;
848 struct inet6_dev *idev;
849
850 for_each_netdev(net, dev) {
851 idev = __in6_dev_get(dev);
852 if (idev) {
853 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
854
855 idev->cnf.ignore_routes_with_linkdown = newf;
856 if (changed)
857 inet6_netconf_notify_devconf(dev_net(dev),
858 RTM_NEWNETCONF,
859 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
860 dev->ifindex,
861 &idev->cnf);
862 }
863 }
864 }
865
866 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
867 {
868 struct net *net;
869 int old;
870
871 if (!rtnl_trylock())
872 return restart_syscall();
873
874 net = (struct net *)table->extra2;
875 old = *p;
876 *p = newf;
877
878 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
879 if ((!newf) ^ (!old))
880 inet6_netconf_notify_devconf(net,
881 RTM_NEWNETCONF,
882 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
883 NETCONFA_IFINDEX_DEFAULT,
884 net->ipv6.devconf_dflt);
885 rtnl_unlock();
886 return 0;
887 }
888
889 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
890 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
891 addrconf_linkdown_change(net, newf);
892 if ((!newf) ^ (!old))
893 inet6_netconf_notify_devconf(net,
894 RTM_NEWNETCONF,
895 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
896 NETCONFA_IFINDEX_ALL,
897 net->ipv6.devconf_all);
898 }
899 rtnl_unlock();
900
901 return 1;
902 }
903
904 #endif
905
906 /* Nobody refers to this ifaddr, destroy it */
907 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
908 {
909 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
910
911 #ifdef NET_REFCNT_DEBUG
912 pr_debug("%s\n", __func__);
913 #endif
914
915 in6_dev_put(ifp->idev);
916
917 if (cancel_delayed_work(&ifp->dad_work))
918 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
919 ifp);
920
921 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
922 pr_warn("Freeing alive inet6 address %p\n", ifp);
923 return;
924 }
925 ip6_rt_put(ifp->rt);
926
927 kfree_rcu(ifp, rcu);
928 }
929
930 static void
931 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
932 {
933 struct list_head *p;
934 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
935
936 /*
937 * Each device address list is sorted in order of scope -
938 * global before linklocal.
939 */
940 list_for_each(p, &idev->addr_list) {
941 struct inet6_ifaddr *ifa
942 = list_entry(p, struct inet6_ifaddr, if_list);
943 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
944 break;
945 }
946
947 list_add_tail(&ifp->if_list, p);
948 }
949
950 static u32 inet6_addr_hash(const struct in6_addr *addr)
951 {
952 return hash_32(ipv6_addr_hash(addr), IN6_ADDR_HSIZE_SHIFT);
953 }
954
955 /* On success it returns ifp with increased reference count */
956
957 static struct inet6_ifaddr *
958 ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
959 const struct in6_addr *peer_addr, int pfxlen,
960 int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
961 {
962 struct net *net = dev_net(idev->dev);
963 struct inet6_ifaddr *ifa = NULL;
964 struct rt6_info *rt;
965 unsigned int hash;
966 int err = 0;
967 int addr_type = ipv6_addr_type(addr);
968
969 if (addr_type == IPV6_ADDR_ANY ||
970 addr_type & IPV6_ADDR_MULTICAST ||
971 (!(idev->dev->flags & IFF_LOOPBACK) &&
972 addr_type & IPV6_ADDR_LOOPBACK))
973 return ERR_PTR(-EADDRNOTAVAIL);
974
975 rcu_read_lock_bh();
976 if (idev->dead) {
977 err = -ENODEV; /*XXX*/
978 goto out2;
979 }
980
981 if (idev->cnf.disable_ipv6) {
982 err = -EACCES;
983 goto out2;
984 }
985
986 spin_lock(&addrconf_hash_lock);
987
988 /* Ignore adding duplicate addresses on an interface */
989 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
990 ADBG("ipv6_add_addr: already assigned\n");
991 err = -EEXIST;
992 goto out;
993 }
994
995 ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
996
997 if (!ifa) {
998 ADBG("ipv6_add_addr: malloc failed\n");
999 err = -ENOBUFS;
1000 goto out;
1001 }
1002
1003 rt = addrconf_dst_alloc(idev, addr, false);
1004 if (IS_ERR(rt)) {
1005 err = PTR_ERR(rt);
1006 goto out;
1007 }
1008
1009 if (net->ipv6.devconf_all->disable_policy ||
1010 idev->cnf.disable_policy)
1011 rt->dst.flags |= DST_NOPOLICY;
1012
1013 neigh_parms_data_state_setall(idev->nd_parms);
1014
1015 ifa->addr = *addr;
1016 if (peer_addr)
1017 ifa->peer_addr = *peer_addr;
1018
1019 spin_lock_init(&ifa->lock);
1020 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1021 INIT_HLIST_NODE(&ifa->addr_lst);
1022 ifa->scope = scope;
1023 ifa->prefix_len = pfxlen;
1024 ifa->flags = flags | IFA_F_TENTATIVE;
1025 ifa->valid_lft = valid_lft;
1026 ifa->prefered_lft = prefered_lft;
1027 ifa->cstamp = ifa->tstamp = jiffies;
1028 ifa->tokenized = false;
1029
1030 ifa->rt = rt;
1031
1032 ifa->idev = idev;
1033 in6_dev_hold(idev);
1034 /* For caller */
1035 in6_ifa_hold(ifa);
1036
1037 /* Add to big hash table */
1038 hash = inet6_addr_hash(addr);
1039
1040 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
1041 spin_unlock(&addrconf_hash_lock);
1042
1043 write_lock(&idev->lock);
1044 /* Add to inet6_dev unicast addr list. */
1045 ipv6_link_dev_addr(idev, ifa);
1046
1047 if (ifa->flags&IFA_F_TEMPORARY) {
1048 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1049 in6_ifa_hold(ifa);
1050 }
1051
1052 in6_ifa_hold(ifa);
1053 write_unlock(&idev->lock);
1054 out2:
1055 rcu_read_unlock_bh();
1056
1057 if (likely(err == 0))
1058 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1059 else {
1060 kfree(ifa);
1061 ifa = ERR_PTR(err);
1062 }
1063
1064 return ifa;
1065 out:
1066 spin_unlock(&addrconf_hash_lock);
1067 goto out2;
1068 }
1069
1070 enum cleanup_prefix_rt_t {
1071 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1072 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1073 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1074 };
1075
1076 /*
1077 * Check, whether the prefix for ifp would still need a prefix route
1078 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1079 * constants.
1080 *
1081 * 1) we don't purge prefix if address was not permanent.
1082 * prefix is managed by its own lifetime.
1083 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1084 * 3) if there are no addresses, delete prefix.
1085 * 4) if there are still other permanent address(es),
1086 * corresponding prefix is still permanent.
1087 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1088 * don't purge the prefix, assume user space is managing it.
1089 * 6) otherwise, update prefix lifetime to the
1090 * longest valid lifetime among the corresponding
1091 * addresses on the device.
1092 * Note: subsequent RA will update lifetime.
1093 **/
1094 static enum cleanup_prefix_rt_t
1095 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1096 {
1097 struct inet6_ifaddr *ifa;
1098 struct inet6_dev *idev = ifp->idev;
1099 unsigned long lifetime;
1100 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1101
1102 *expires = jiffies;
1103
1104 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1105 if (ifa == ifp)
1106 continue;
1107 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1108 ifp->prefix_len))
1109 continue;
1110 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1111 return CLEANUP_PREFIX_RT_NOP;
1112
1113 action = CLEANUP_PREFIX_RT_EXPIRE;
1114
1115 spin_lock(&ifa->lock);
1116
1117 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1118 /*
1119 * Note: Because this address is
1120 * not permanent, lifetime <
1121 * LONG_MAX / HZ here.
1122 */
1123 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1124 *expires = ifa->tstamp + lifetime * HZ;
1125 spin_unlock(&ifa->lock);
1126 }
1127
1128 return action;
1129 }
1130
1131 static void
1132 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
1133 {
1134 struct rt6_info *rt;
1135
1136 rt = addrconf_get_prefix_route(&ifp->addr,
1137 ifp->prefix_len,
1138 ifp->idev->dev,
1139 0, RTF_GATEWAY | RTF_DEFAULT);
1140 if (rt) {
1141 if (del_rt)
1142 ip6_del_rt(rt);
1143 else {
1144 if (!(rt->rt6i_flags & RTF_EXPIRES))
1145 rt6_set_expires(rt, expires);
1146 ip6_rt_put(rt);
1147 }
1148 }
1149 }
1150
1151
1152 /* This function wants to get referenced ifp and releases it before return */
1153
1154 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1155 {
1156 int state;
1157 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1158 unsigned long expires;
1159
1160 ASSERT_RTNL();
1161
1162 spin_lock_bh(&ifp->lock);
1163 state = ifp->state;
1164 ifp->state = INET6_IFADDR_STATE_DEAD;
1165 spin_unlock_bh(&ifp->lock);
1166
1167 if (state == INET6_IFADDR_STATE_DEAD)
1168 goto out;
1169
1170 spin_lock_bh(&addrconf_hash_lock);
1171 hlist_del_init_rcu(&ifp->addr_lst);
1172 spin_unlock_bh(&addrconf_hash_lock);
1173
1174 write_lock_bh(&ifp->idev->lock);
1175
1176 if (ifp->flags&IFA_F_TEMPORARY) {
1177 list_del(&ifp->tmp_list);
1178 if (ifp->ifpub) {
1179 in6_ifa_put(ifp->ifpub);
1180 ifp->ifpub = NULL;
1181 }
1182 __in6_ifa_put(ifp);
1183 }
1184
1185 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1186 action = check_cleanup_prefix_route(ifp, &expires);
1187
1188 list_del_init(&ifp->if_list);
1189 __in6_ifa_put(ifp);
1190
1191 write_unlock_bh(&ifp->idev->lock);
1192
1193 addrconf_del_dad_work(ifp);
1194
1195 ipv6_ifa_notify(RTM_DELADDR, ifp);
1196
1197 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1198
1199 if (action != CLEANUP_PREFIX_RT_NOP) {
1200 cleanup_prefix_route(ifp, expires,
1201 action == CLEANUP_PREFIX_RT_DEL);
1202 }
1203
1204 /* clean up prefsrc entries */
1205 rt6_remove_prefsrc(ifp);
1206 out:
1207 in6_ifa_put(ifp);
1208 }
1209
1210 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *ift)
1211 {
1212 struct inet6_dev *idev = ifp->idev;
1213 struct in6_addr addr, *tmpaddr;
1214 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
1215 unsigned long regen_advance;
1216 int tmp_plen;
1217 int ret = 0;
1218 u32 addr_flags;
1219 unsigned long now = jiffies;
1220 long max_desync_factor;
1221 s32 cnf_temp_preferred_lft;
1222
1223 write_lock_bh(&idev->lock);
1224 if (ift) {
1225 spin_lock_bh(&ift->lock);
1226 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1227 spin_unlock_bh(&ift->lock);
1228 tmpaddr = &addr;
1229 } else {
1230 tmpaddr = NULL;
1231 }
1232 retry:
1233 in6_dev_hold(idev);
1234 if (idev->cnf.use_tempaddr <= 0) {
1235 write_unlock_bh(&idev->lock);
1236 pr_info("%s: use_tempaddr is disabled\n", __func__);
1237 in6_dev_put(idev);
1238 ret = -1;
1239 goto out;
1240 }
1241 spin_lock_bh(&ifp->lock);
1242 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1243 idev->cnf.use_tempaddr = -1; /*XXX*/
1244 spin_unlock_bh(&ifp->lock);
1245 write_unlock_bh(&idev->lock);
1246 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1247 __func__);
1248 in6_dev_put(idev);
1249 ret = -1;
1250 goto out;
1251 }
1252 in6_ifa_hold(ifp);
1253 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1254 ipv6_try_regen_rndid(idev, tmpaddr);
1255 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1256 age = (now - ifp->tstamp) / HZ;
1257
1258 regen_advance = idev->cnf.regen_max_retry *
1259 idev->cnf.dad_transmits *
1260 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1261
1262 /* recalculate max_desync_factor each time and update
1263 * idev->desync_factor if it's larger
1264 */
1265 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1266 max_desync_factor = min_t(__u32,
1267 idev->cnf.max_desync_factor,
1268 cnf_temp_preferred_lft - regen_advance);
1269
1270 if (unlikely(idev->desync_factor > max_desync_factor)) {
1271 if (max_desync_factor > 0) {
1272 get_random_bytes(&idev->desync_factor,
1273 sizeof(idev->desync_factor));
1274 idev->desync_factor %= max_desync_factor;
1275 } else {
1276 idev->desync_factor = 0;
1277 }
1278 }
1279
1280 tmp_valid_lft = min_t(__u32,
1281 ifp->valid_lft,
1282 idev->cnf.temp_valid_lft + age);
1283 tmp_prefered_lft = cnf_temp_preferred_lft + age -
1284 idev->desync_factor;
1285 tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
1286 tmp_plen = ifp->prefix_len;
1287 tmp_tstamp = ifp->tstamp;
1288 spin_unlock_bh(&ifp->lock);
1289
1290 write_unlock_bh(&idev->lock);
1291
1292 /* A temporary address is created only if this calculated Preferred
1293 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1294 * an implementation must not create a temporary address with a zero
1295 * Preferred Lifetime.
1296 * Use age calculation as in addrconf_verify to avoid unnecessary
1297 * temporary addresses being generated.
1298 */
1299 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1300 if (tmp_prefered_lft <= regen_advance + age) {
1301 in6_ifa_put(ifp);
1302 in6_dev_put(idev);
1303 ret = -1;
1304 goto out;
1305 }
1306
1307 addr_flags = IFA_F_TEMPORARY;
1308 /* set in addrconf_prefix_rcv() */
1309 if (ifp->flags & IFA_F_OPTIMISTIC)
1310 addr_flags |= IFA_F_OPTIMISTIC;
1311
1312 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1313 ipv6_addr_scope(&addr), addr_flags,
1314 tmp_valid_lft, tmp_prefered_lft);
1315 if (IS_ERR(ift)) {
1316 in6_ifa_put(ifp);
1317 in6_dev_put(idev);
1318 pr_info("%s: retry temporary address regeneration\n", __func__);
1319 tmpaddr = &addr;
1320 write_lock_bh(&idev->lock);
1321 goto retry;
1322 }
1323
1324 spin_lock_bh(&ift->lock);
1325 ift->ifpub = ifp;
1326 ift->cstamp = now;
1327 ift->tstamp = tmp_tstamp;
1328 spin_unlock_bh(&ift->lock);
1329
1330 addrconf_dad_start(ift);
1331 in6_ifa_put(ift);
1332 in6_dev_put(idev);
1333 out:
1334 return ret;
1335 }
1336
1337 /*
1338 * Choose an appropriate source address (RFC3484)
1339 */
1340 enum {
1341 IPV6_SADDR_RULE_INIT = 0,
1342 IPV6_SADDR_RULE_LOCAL,
1343 IPV6_SADDR_RULE_SCOPE,
1344 IPV6_SADDR_RULE_PREFERRED,
1345 #ifdef CONFIG_IPV6_MIP6
1346 IPV6_SADDR_RULE_HOA,
1347 #endif
1348 IPV6_SADDR_RULE_OIF,
1349 IPV6_SADDR_RULE_LABEL,
1350 IPV6_SADDR_RULE_PRIVACY,
1351 IPV6_SADDR_RULE_ORCHID,
1352 IPV6_SADDR_RULE_PREFIX,
1353 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1354 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1355 #endif
1356 IPV6_SADDR_RULE_MAX
1357 };
1358
1359 struct ipv6_saddr_score {
1360 int rule;
1361 int addr_type;
1362 struct inet6_ifaddr *ifa;
1363 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1364 int scopedist;
1365 int matchlen;
1366 };
1367
1368 struct ipv6_saddr_dst {
1369 const struct in6_addr *addr;
1370 int ifindex;
1371 int scope;
1372 int label;
1373 unsigned int prefs;
1374 };
1375
1376 static inline int ipv6_saddr_preferred(int type)
1377 {
1378 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1379 return 1;
1380 return 0;
1381 }
1382
1383 static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
1384 {
1385 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1386 return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
1387 #else
1388 return false;
1389 #endif
1390 }
1391
1392 static int ipv6_get_saddr_eval(struct net *net,
1393 struct ipv6_saddr_score *score,
1394 struct ipv6_saddr_dst *dst,
1395 int i)
1396 {
1397 int ret;
1398
1399 if (i <= score->rule) {
1400 switch (i) {
1401 case IPV6_SADDR_RULE_SCOPE:
1402 ret = score->scopedist;
1403 break;
1404 case IPV6_SADDR_RULE_PREFIX:
1405 ret = score->matchlen;
1406 break;
1407 default:
1408 ret = !!test_bit(i, score->scorebits);
1409 }
1410 goto out;
1411 }
1412
1413 switch (i) {
1414 case IPV6_SADDR_RULE_INIT:
1415 /* Rule 0: remember if hiscore is not ready yet */
1416 ret = !!score->ifa;
1417 break;
1418 case IPV6_SADDR_RULE_LOCAL:
1419 /* Rule 1: Prefer same address */
1420 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1421 break;
1422 case IPV6_SADDR_RULE_SCOPE:
1423 /* Rule 2: Prefer appropriate scope
1424 *
1425 * ret
1426 * ^
1427 * -1 | d 15
1428 * ---+--+-+---> scope
1429 * |
1430 * | d is scope of the destination.
1431 * B-d | \
1432 * | \ <- smaller scope is better if
1433 * B-15 | \ if scope is enough for destination.
1434 * | ret = B - scope (-1 <= scope >= d <= 15).
1435 * d-C-1 | /
1436 * |/ <- greater is better
1437 * -C / if scope is not enough for destination.
1438 * /| ret = scope - C (-1 <= d < scope <= 15).
1439 *
1440 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1441 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1442 * Assume B = 0 and we get C > 29.
1443 */
1444 ret = __ipv6_addr_src_scope(score->addr_type);
1445 if (ret >= dst->scope)
1446 ret = -ret;
1447 else
1448 ret -= 128; /* 30 is enough */
1449 score->scopedist = ret;
1450 break;
1451 case IPV6_SADDR_RULE_PREFERRED:
1452 {
1453 /* Rule 3: Avoid deprecated and optimistic addresses */
1454 u8 avoid = IFA_F_DEPRECATED;
1455
1456 if (!ipv6_use_optimistic_addr(score->ifa->idev))
1457 avoid |= IFA_F_OPTIMISTIC;
1458 ret = ipv6_saddr_preferred(score->addr_type) ||
1459 !(score->ifa->flags & avoid);
1460 break;
1461 }
1462 #ifdef CONFIG_IPV6_MIP6
1463 case IPV6_SADDR_RULE_HOA:
1464 {
1465 /* Rule 4: Prefer home address */
1466 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1467 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1468 break;
1469 }
1470 #endif
1471 case IPV6_SADDR_RULE_OIF:
1472 /* Rule 5: Prefer outgoing interface */
1473 ret = (!dst->ifindex ||
1474 dst->ifindex == score->ifa->idev->dev->ifindex);
1475 break;
1476 case IPV6_SADDR_RULE_LABEL:
1477 /* Rule 6: Prefer matching label */
1478 ret = ipv6_addr_label(net,
1479 &score->ifa->addr, score->addr_type,
1480 score->ifa->idev->dev->ifindex) == dst->label;
1481 break;
1482 case IPV6_SADDR_RULE_PRIVACY:
1483 {
1484 /* Rule 7: Prefer public address
1485 * Note: prefer temporary address if use_tempaddr >= 2
1486 */
1487 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1488 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1489 score->ifa->idev->cnf.use_tempaddr >= 2;
1490 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1491 break;
1492 }
1493 case IPV6_SADDR_RULE_ORCHID:
1494 /* Rule 8-: Prefer ORCHID vs ORCHID or
1495 * non-ORCHID vs non-ORCHID
1496 */
1497 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1498 ipv6_addr_orchid(dst->addr));
1499 break;
1500 case IPV6_SADDR_RULE_PREFIX:
1501 /* Rule 8: Use longest matching prefix */
1502 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1503 if (ret > score->ifa->prefix_len)
1504 ret = score->ifa->prefix_len;
1505 score->matchlen = ret;
1506 break;
1507 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1508 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1509 /* Optimistic addresses still have lower precedence than other
1510 * preferred addresses.
1511 */
1512 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1513 break;
1514 #endif
1515 default:
1516 ret = 0;
1517 }
1518
1519 if (ret)
1520 __set_bit(i, score->scorebits);
1521 score->rule = i;
1522 out:
1523 return ret;
1524 }
1525
1526 static int __ipv6_dev_get_saddr(struct net *net,
1527 struct ipv6_saddr_dst *dst,
1528 struct inet6_dev *idev,
1529 struct ipv6_saddr_score *scores,
1530 int hiscore_idx)
1531 {
1532 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1533
1534 read_lock_bh(&idev->lock);
1535 list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
1536 int i;
1537
1538 /*
1539 * - Tentative Address (RFC2462 section 5.4)
1540 * - A tentative address is not considered
1541 * "assigned to an interface" in the traditional
1542 * sense, unless it is also flagged as optimistic.
1543 * - Candidate Source Address (section 4)
1544 * - In any case, anycast addresses, multicast
1545 * addresses, and the unspecified address MUST
1546 * NOT be included in a candidate set.
1547 */
1548 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1549 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1550 continue;
1551
1552 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1553
1554 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1555 score->addr_type & IPV6_ADDR_MULTICAST)) {
1556 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1557 idev->dev->name);
1558 continue;
1559 }
1560
1561 score->rule = -1;
1562 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1563
1564 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1565 int minihiscore, miniscore;
1566
1567 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1568 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1569
1570 if (minihiscore > miniscore) {
1571 if (i == IPV6_SADDR_RULE_SCOPE &&
1572 score->scopedist > 0) {
1573 /*
1574 * special case:
1575 * each remaining entry
1576 * has too small (not enough)
1577 * scope, because ifa entries
1578 * are sorted by their scope
1579 * values.
1580 */
1581 goto out;
1582 }
1583 break;
1584 } else if (minihiscore < miniscore) {
1585 if (hiscore->ifa)
1586 in6_ifa_put(hiscore->ifa);
1587
1588 in6_ifa_hold(score->ifa);
1589
1590 swap(hiscore, score);
1591 hiscore_idx = 1 - hiscore_idx;
1592
1593 /* restore our iterator */
1594 score->ifa = hiscore->ifa;
1595
1596 break;
1597 }
1598 }
1599 }
1600 out:
1601 read_unlock_bh(&idev->lock);
1602 return hiscore_idx;
1603 }
1604
1605 static int ipv6_get_saddr_master(struct net *net,
1606 const struct net_device *dst_dev,
1607 const struct net_device *master,
1608 struct ipv6_saddr_dst *dst,
1609 struct ipv6_saddr_score *scores,
1610 int hiscore_idx)
1611 {
1612 struct inet6_dev *idev;
1613
1614 idev = __in6_dev_get(dst_dev);
1615 if (idev)
1616 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1617 scores, hiscore_idx);
1618
1619 idev = __in6_dev_get(master);
1620 if (idev)
1621 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1622 scores, hiscore_idx);
1623
1624 return hiscore_idx;
1625 }
1626
1627 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1628 const struct in6_addr *daddr, unsigned int prefs,
1629 struct in6_addr *saddr)
1630 {
1631 struct ipv6_saddr_score scores[2], *hiscore;
1632 struct ipv6_saddr_dst dst;
1633 struct inet6_dev *idev;
1634 struct net_device *dev;
1635 int dst_type;
1636 bool use_oif_addr = false;
1637 int hiscore_idx = 0;
1638
1639 dst_type = __ipv6_addr_type(daddr);
1640 dst.addr = daddr;
1641 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1642 dst.scope = __ipv6_addr_src_scope(dst_type);
1643 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1644 dst.prefs = prefs;
1645
1646 scores[hiscore_idx].rule = -1;
1647 scores[hiscore_idx].ifa = NULL;
1648
1649 rcu_read_lock();
1650
1651 /* Candidate Source Address (section 4)
1652 * - multicast and link-local destination address,
1653 * the set of candidate source address MUST only
1654 * include addresses assigned to interfaces
1655 * belonging to the same link as the outgoing
1656 * interface.
1657 * (- For site-local destination addresses, the
1658 * set of candidate source addresses MUST only
1659 * include addresses assigned to interfaces
1660 * belonging to the same site as the outgoing
1661 * interface.)
1662 * - "It is RECOMMENDED that the candidate source addresses
1663 * be the set of unicast addresses assigned to the
1664 * interface that will be used to send to the destination
1665 * (the 'outgoing' interface)." (RFC 6724)
1666 */
1667 if (dst_dev) {
1668 idev = __in6_dev_get(dst_dev);
1669 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1670 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1671 (idev && idev->cnf.use_oif_addrs_only)) {
1672 use_oif_addr = true;
1673 }
1674 }
1675
1676 if (use_oif_addr) {
1677 if (idev)
1678 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1679 } else {
1680 const struct net_device *master;
1681 int master_idx = 0;
1682
1683 /* if dst_dev exists and is enslaved to an L3 device, then
1684 * prefer addresses from dst_dev and then the master over
1685 * any other enslaved devices in the L3 domain.
1686 */
1687 master = l3mdev_master_dev_rcu(dst_dev);
1688 if (master) {
1689 master_idx = master->ifindex;
1690
1691 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1692 master, &dst,
1693 scores, hiscore_idx);
1694
1695 if (scores[hiscore_idx].ifa)
1696 goto out;
1697 }
1698
1699 for_each_netdev_rcu(net, dev) {
1700 /* only consider addresses on devices in the
1701 * same L3 domain
1702 */
1703 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1704 continue;
1705 idev = __in6_dev_get(dev);
1706 if (!idev)
1707 continue;
1708 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1709 }
1710 }
1711
1712 out:
1713 rcu_read_unlock();
1714
1715 hiscore = &scores[hiscore_idx];
1716 if (!hiscore->ifa)
1717 return -EADDRNOTAVAIL;
1718
1719 *saddr = hiscore->ifa->addr;
1720 in6_ifa_put(hiscore->ifa);
1721 return 0;
1722 }
1723 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1724
1725 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1726 u32 banned_flags)
1727 {
1728 struct inet6_ifaddr *ifp;
1729 int err = -EADDRNOTAVAIL;
1730
1731 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1732 if (ifp->scope > IFA_LINK)
1733 break;
1734 if (ifp->scope == IFA_LINK &&
1735 !(ifp->flags & banned_flags)) {
1736 *addr = ifp->addr;
1737 err = 0;
1738 break;
1739 }
1740 }
1741 return err;
1742 }
1743
1744 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1745 u32 banned_flags)
1746 {
1747 struct inet6_dev *idev;
1748 int err = -EADDRNOTAVAIL;
1749
1750 rcu_read_lock();
1751 idev = __in6_dev_get(dev);
1752 if (idev) {
1753 read_lock_bh(&idev->lock);
1754 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1755 read_unlock_bh(&idev->lock);
1756 }
1757 rcu_read_unlock();
1758 return err;
1759 }
1760
1761 static int ipv6_count_addresses(struct inet6_dev *idev)
1762 {
1763 int cnt = 0;
1764 struct inet6_ifaddr *ifp;
1765
1766 read_lock_bh(&idev->lock);
1767 list_for_each_entry(ifp, &idev->addr_list, if_list)
1768 cnt++;
1769 read_unlock_bh(&idev->lock);
1770 return cnt;
1771 }
1772
1773 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1774 const struct net_device *dev, int strict)
1775 {
1776 return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE);
1777 }
1778 EXPORT_SYMBOL(ipv6_chk_addr);
1779
1780 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1781 const struct net_device *dev, int strict,
1782 u32 banned_flags)
1783 {
1784 struct inet6_ifaddr *ifp;
1785 unsigned int hash = inet6_addr_hash(addr);
1786 u32 ifp_flags;
1787
1788 rcu_read_lock_bh();
1789 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1790 if (!net_eq(dev_net(ifp->idev->dev), net))
1791 continue;
1792 /* Decouple optimistic from tentative for evaluation here.
1793 * Ban optimistic addresses explicitly, when required.
1794 */
1795 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1796 ? (ifp->flags&~IFA_F_TENTATIVE)
1797 : ifp->flags;
1798 if (ipv6_addr_equal(&ifp->addr, addr) &&
1799 !(ifp_flags&banned_flags) &&
1800 (!dev || ifp->idev->dev == dev ||
1801 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1802 rcu_read_unlock_bh();
1803 return 1;
1804 }
1805 }
1806
1807 rcu_read_unlock_bh();
1808 return 0;
1809 }
1810 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1811
1812 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1813 struct net_device *dev)
1814 {
1815 unsigned int hash = inet6_addr_hash(addr);
1816 struct inet6_ifaddr *ifp;
1817
1818 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1819 if (!net_eq(dev_net(ifp->idev->dev), net))
1820 continue;
1821 if (ipv6_addr_equal(&ifp->addr, addr)) {
1822 if (!dev || ifp->idev->dev == dev)
1823 return true;
1824 }
1825 }
1826 return false;
1827 }
1828
1829 /* Compares an address/prefix_len with addresses on device @dev.
1830 * If one is found it returns true.
1831 */
1832 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1833 const unsigned int prefix_len, struct net_device *dev)
1834 {
1835 struct inet6_dev *idev;
1836 struct inet6_ifaddr *ifa;
1837 bool ret = false;
1838
1839 rcu_read_lock();
1840 idev = __in6_dev_get(dev);
1841 if (idev) {
1842 read_lock_bh(&idev->lock);
1843 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1844 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1845 if (ret)
1846 break;
1847 }
1848 read_unlock_bh(&idev->lock);
1849 }
1850 rcu_read_unlock();
1851
1852 return ret;
1853 }
1854 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1855
1856 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1857 {
1858 struct inet6_dev *idev;
1859 struct inet6_ifaddr *ifa;
1860 int onlink;
1861
1862 onlink = 0;
1863 rcu_read_lock();
1864 idev = __in6_dev_get(dev);
1865 if (idev) {
1866 read_lock_bh(&idev->lock);
1867 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1868 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1869 ifa->prefix_len);
1870 if (onlink)
1871 break;
1872 }
1873 read_unlock_bh(&idev->lock);
1874 }
1875 rcu_read_unlock();
1876 return onlink;
1877 }
1878 EXPORT_SYMBOL(ipv6_chk_prefix);
1879
1880 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1881 struct net_device *dev, int strict)
1882 {
1883 struct inet6_ifaddr *ifp, *result = NULL;
1884 unsigned int hash = inet6_addr_hash(addr);
1885
1886 rcu_read_lock_bh();
1887 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
1888 if (!net_eq(dev_net(ifp->idev->dev), net))
1889 continue;
1890 if (ipv6_addr_equal(&ifp->addr, addr)) {
1891 if (!dev || ifp->idev->dev == dev ||
1892 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1893 result = ifp;
1894 in6_ifa_hold(ifp);
1895 break;
1896 }
1897 }
1898 }
1899 rcu_read_unlock_bh();
1900
1901 return result;
1902 }
1903
1904 /* Gets referenced address, destroys ifaddr */
1905
1906 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1907 {
1908 if (dad_failed)
1909 ifp->flags |= IFA_F_DADFAILED;
1910
1911 if (ifp->flags&IFA_F_PERMANENT) {
1912 spin_lock_bh(&ifp->lock);
1913 addrconf_del_dad_work(ifp);
1914 ifp->flags |= IFA_F_TENTATIVE;
1915 spin_unlock_bh(&ifp->lock);
1916 if (dad_failed)
1917 ipv6_ifa_notify(0, ifp);
1918 in6_ifa_put(ifp);
1919 } else if (ifp->flags&IFA_F_TEMPORARY) {
1920 struct inet6_ifaddr *ifpub;
1921 spin_lock_bh(&ifp->lock);
1922 ifpub = ifp->ifpub;
1923 if (ifpub) {
1924 in6_ifa_hold(ifpub);
1925 spin_unlock_bh(&ifp->lock);
1926 ipv6_create_tempaddr(ifpub, ifp);
1927 in6_ifa_put(ifpub);
1928 } else {
1929 spin_unlock_bh(&ifp->lock);
1930 }
1931 ipv6_del_addr(ifp);
1932 } else {
1933 ipv6_del_addr(ifp);
1934 }
1935 }
1936
1937 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1938 {
1939 int err = -ENOENT;
1940
1941 spin_lock_bh(&ifp->lock);
1942 if (ifp->state == INET6_IFADDR_STATE_DAD) {
1943 ifp->state = INET6_IFADDR_STATE_POSTDAD;
1944 err = 0;
1945 }
1946 spin_unlock_bh(&ifp->lock);
1947
1948 return err;
1949 }
1950
1951 void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1952 {
1953 struct inet6_dev *idev = ifp->idev;
1954 struct net *net = dev_net(ifp->idev->dev);
1955
1956 if (addrconf_dad_end(ifp)) {
1957 in6_ifa_put(ifp);
1958 return;
1959 }
1960
1961 net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
1962 ifp->idev->dev->name, &ifp->addr);
1963
1964 spin_lock_bh(&ifp->lock);
1965
1966 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
1967 int scope = ifp->scope;
1968 u32 flags = ifp->flags;
1969 struct in6_addr new_addr;
1970 struct inet6_ifaddr *ifp2;
1971 u32 valid_lft, preferred_lft;
1972 int pfxlen = ifp->prefix_len;
1973 int retries = ifp->stable_privacy_retry + 1;
1974
1975 if (retries > net->ipv6.sysctl.idgen_retries) {
1976 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
1977 ifp->idev->dev->name);
1978 goto errdad;
1979 }
1980
1981 new_addr = ifp->addr;
1982 if (ipv6_generate_stable_address(&new_addr, retries,
1983 idev))
1984 goto errdad;
1985
1986 valid_lft = ifp->valid_lft;
1987 preferred_lft = ifp->prefered_lft;
1988
1989 spin_unlock_bh(&ifp->lock);
1990
1991 if (idev->cnf.max_addresses &&
1992 ipv6_count_addresses(idev) >=
1993 idev->cnf.max_addresses)
1994 goto lock_errdad;
1995
1996 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
1997 ifp->idev->dev->name);
1998
1999 ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
2000 scope, flags, valid_lft,
2001 preferred_lft);
2002 if (IS_ERR(ifp2))
2003 goto lock_errdad;
2004
2005 spin_lock_bh(&ifp2->lock);
2006 ifp2->stable_privacy_retry = retries;
2007 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2008 spin_unlock_bh(&ifp2->lock);
2009
2010 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2011 in6_ifa_put(ifp2);
2012 lock_errdad:
2013 spin_lock_bh(&ifp->lock);
2014 }
2015
2016 errdad:
2017 /* transition from _POSTDAD to _ERRDAD */
2018 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2019 spin_unlock_bh(&ifp->lock);
2020
2021 addrconf_mod_dad_work(ifp, 0);
2022 in6_ifa_put(ifp);
2023 }
2024
2025 /* Join to solicited addr multicast group.
2026 * caller must hold RTNL */
2027 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2028 {
2029 struct in6_addr maddr;
2030
2031 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2032 return;
2033
2034 addrconf_addr_solict_mult(addr, &maddr);
2035 ipv6_dev_mc_inc(dev, &maddr);
2036 }
2037
2038 /* caller must hold RTNL */
2039 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2040 {
2041 struct in6_addr maddr;
2042
2043 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2044 return;
2045
2046 addrconf_addr_solict_mult(addr, &maddr);
2047 __ipv6_dev_mc_dec(idev, &maddr);
2048 }
2049
2050 /* caller must hold RTNL */
2051 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2052 {
2053 struct in6_addr addr;
2054
2055 if (ifp->prefix_len >= 127) /* RFC 6164 */
2056 return;
2057 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2058 if (ipv6_addr_any(&addr))
2059 return;
2060 __ipv6_dev_ac_inc(ifp->idev, &addr);
2061 }
2062
2063 /* caller must hold RTNL */
2064 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2065 {
2066 struct in6_addr addr;
2067
2068 if (ifp->prefix_len >= 127) /* RFC 6164 */
2069 return;
2070 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2071 if (ipv6_addr_any(&addr))
2072 return;
2073 __ipv6_dev_ac_dec(ifp->idev, &addr);
2074 }
2075
2076 static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
2077 {
2078 if (dev->addr_len != EUI64_ADDR_LEN)
2079 return -1;
2080 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2081 eui[0] ^= 2;
2082 return 0;
2083 }
2084
2085 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2086 {
2087 union fwnet_hwaddr *ha;
2088
2089 if (dev->addr_len != FWNET_ALEN)
2090 return -1;
2091
2092 ha = (union fwnet_hwaddr *)dev->dev_addr;
2093
2094 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2095 eui[0] ^= 2;
2096 return 0;
2097 }
2098
2099 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2100 {
2101 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2102 if (dev->addr_len != ARCNET_ALEN)
2103 return -1;
2104 memset(eui, 0, 7);
2105 eui[7] = *(u8 *)dev->dev_addr;
2106 return 0;
2107 }
2108
2109 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2110 {
2111 if (dev->addr_len != INFINIBAND_ALEN)
2112 return -1;
2113 memcpy(eui, dev->dev_addr + 12, 8);
2114 eui[0] |= 2;
2115 return 0;
2116 }
2117
2118 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2119 {
2120 if (addr == 0)
2121 return -1;
2122 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2123 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2124 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2125 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2126 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2127 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2128 eui[1] = 0;
2129 eui[2] = 0x5E;
2130 eui[3] = 0xFE;
2131 memcpy(eui + 4, &addr, 4);
2132 return 0;
2133 }
2134
2135 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2136 {
2137 if (dev->priv_flags & IFF_ISATAP)
2138 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2139 return -1;
2140 }
2141
2142 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2143 {
2144 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2145 }
2146
2147 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2148 {
2149 memcpy(eui, dev->perm_addr, 3);
2150 memcpy(eui + 5, dev->perm_addr + 3, 3);
2151 eui[3] = 0xFF;
2152 eui[4] = 0xFE;
2153 eui[0] ^= 2;
2154 return 0;
2155 }
2156
2157 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2158 {
2159 switch (dev->type) {
2160 case ARPHRD_ETHER:
2161 case ARPHRD_FDDI:
2162 return addrconf_ifid_eui48(eui, dev);
2163 case ARPHRD_ARCNET:
2164 return addrconf_ifid_arcnet(eui, dev);
2165 case ARPHRD_INFINIBAND:
2166 return addrconf_ifid_infiniband(eui, dev);
2167 case ARPHRD_SIT:
2168 return addrconf_ifid_sit(eui, dev);
2169 case ARPHRD_IPGRE:
2170 case ARPHRD_TUNNEL:
2171 return addrconf_ifid_gre(eui, dev);
2172 case ARPHRD_6LOWPAN:
2173 return addrconf_ifid_eui64(eui, dev);
2174 case ARPHRD_IEEE1394:
2175 return addrconf_ifid_ieee1394(eui, dev);
2176 case ARPHRD_TUNNEL6:
2177 case ARPHRD_IP6GRE:
2178 return addrconf_ifid_ip6tnl(eui, dev);
2179 }
2180 return -1;
2181 }
2182
2183 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2184 {
2185 int err = -1;
2186 struct inet6_ifaddr *ifp;
2187
2188 read_lock_bh(&idev->lock);
2189 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2190 if (ifp->scope > IFA_LINK)
2191 break;
2192 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2193 memcpy(eui, ifp->addr.s6_addr+8, 8);
2194 err = 0;
2195 break;
2196 }
2197 }
2198 read_unlock_bh(&idev->lock);
2199 return err;
2200 }
2201
2202 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2203 static void ipv6_regen_rndid(struct inet6_dev *idev)
2204 {
2205 regen:
2206 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2207 idev->rndid[0] &= ~0x02;
2208
2209 /*
2210 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2211 * check if generated address is not inappropriate
2212 *
2213 * - Reserved subnet anycast (RFC 2526)
2214 * 11111101 11....11 1xxxxxxx
2215 * - ISATAP (RFC4214) 6.1
2216 * 00-00-5E-FE-xx-xx-xx-xx
2217 * - value 0
2218 * - XXX: already assigned to an address on the device
2219 */
2220 if (idev->rndid[0] == 0xfd &&
2221 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2222 (idev->rndid[7]&0x80))
2223 goto regen;
2224 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2225 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2226 goto regen;
2227 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2228 goto regen;
2229 }
2230 }
2231
2232 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2233 {
2234 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2235 ipv6_regen_rndid(idev);
2236 }
2237
2238 /*
2239 * Add prefix route.
2240 */
2241
2242 static void
2243 addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
2244 unsigned long expires, u32 flags)
2245 {
2246 struct fib6_config cfg = {
2247 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2248 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2249 .fc_ifindex = dev->ifindex,
2250 .fc_expires = expires,
2251 .fc_dst_len = plen,
2252 .fc_flags = RTF_UP | flags,
2253 .fc_nlinfo.nl_net = dev_net(dev),
2254 .fc_protocol = RTPROT_KERNEL,
2255 };
2256
2257 cfg.fc_dst = *pfx;
2258
2259 /* Prevent useless cloning on PtP SIT.
2260 This thing is done here expecting that the whole
2261 class of non-broadcast devices need not cloning.
2262 */
2263 #if IS_ENABLED(CONFIG_IPV6_SIT)
2264 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2265 cfg.fc_flags |= RTF_NONEXTHOP;
2266 #endif
2267
2268 ip6_route_add(&cfg);
2269 }
2270
2271
2272 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2273 int plen,
2274 const struct net_device *dev,
2275 u32 flags, u32 noflags)
2276 {
2277 struct fib6_node *fn;
2278 struct rt6_info *rt = NULL;
2279 struct fib6_table *table;
2280 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2281
2282 table = fib6_get_table(dev_net(dev), tb_id);
2283 if (!table)
2284 return NULL;
2285
2286 read_lock_bh(&table->tb6_lock);
2287 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
2288 if (!fn)
2289 goto out;
2290
2291 noflags |= RTF_CACHE;
2292 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2293 if (rt->dst.dev->ifindex != dev->ifindex)
2294 continue;
2295 if ((rt->rt6i_flags & flags) != flags)
2296 continue;
2297 if ((rt->rt6i_flags & noflags) != 0)
2298 continue;
2299 dst_hold(&rt->dst);
2300 break;
2301 }
2302 out:
2303 read_unlock_bh(&table->tb6_lock);
2304 return rt;
2305 }
2306
2307
2308 /* Create "default" multicast route to the interface */
2309
2310 static void addrconf_add_mroute(struct net_device *dev)
2311 {
2312 struct fib6_config cfg = {
2313 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2314 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2315 .fc_ifindex = dev->ifindex,
2316 .fc_dst_len = 8,
2317 .fc_flags = RTF_UP,
2318 .fc_nlinfo.nl_net = dev_net(dev),
2319 };
2320
2321 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2322
2323 ip6_route_add(&cfg);
2324 }
2325
2326 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2327 {
2328 struct inet6_dev *idev;
2329
2330 ASSERT_RTNL();
2331
2332 idev = ipv6_find_idev(dev);
2333 if (!idev)
2334 return ERR_PTR(-ENOBUFS);
2335
2336 if (idev->cnf.disable_ipv6)
2337 return ERR_PTR(-EACCES);
2338
2339 /* Add default multicast route */
2340 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2341 addrconf_add_mroute(dev);
2342
2343 return idev;
2344 }
2345
2346 static void manage_tempaddrs(struct inet6_dev *idev,
2347 struct inet6_ifaddr *ifp,
2348 __u32 valid_lft, __u32 prefered_lft,
2349 bool create, unsigned long now)
2350 {
2351 u32 flags;
2352 struct inet6_ifaddr *ift;
2353
2354 read_lock_bh(&idev->lock);
2355 /* update all temporary addresses in the list */
2356 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2357 int age, max_valid, max_prefered;
2358
2359 if (ifp != ift->ifpub)
2360 continue;
2361
2362 /* RFC 4941 section 3.3:
2363 * If a received option will extend the lifetime of a public
2364 * address, the lifetimes of temporary addresses should
2365 * be extended, subject to the overall constraint that no
2366 * temporary addresses should ever remain "valid" or "preferred"
2367 * for a time longer than (TEMP_VALID_LIFETIME) or
2368 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2369 */
2370 age = (now - ift->cstamp) / HZ;
2371 max_valid = idev->cnf.temp_valid_lft - age;
2372 if (max_valid < 0)
2373 max_valid = 0;
2374
2375 max_prefered = idev->cnf.temp_prefered_lft -
2376 idev->desync_factor - age;
2377 if (max_prefered < 0)
2378 max_prefered = 0;
2379
2380 if (valid_lft > max_valid)
2381 valid_lft = max_valid;
2382
2383 if (prefered_lft > max_prefered)
2384 prefered_lft = max_prefered;
2385
2386 spin_lock(&ift->lock);
2387 flags = ift->flags;
2388 ift->valid_lft = valid_lft;
2389 ift->prefered_lft = prefered_lft;
2390 ift->tstamp = now;
2391 if (prefered_lft > 0)
2392 ift->flags &= ~IFA_F_DEPRECATED;
2393
2394 spin_unlock(&ift->lock);
2395 if (!(flags&IFA_F_TENTATIVE))
2396 ipv6_ifa_notify(0, ift);
2397 }
2398
2399 if ((create || list_empty(&idev->tempaddr_list)) &&
2400 idev->cnf.use_tempaddr > 0) {
2401 /* When a new public address is created as described
2402 * in [ADDRCONF], also create a new temporary address.
2403 * Also create a temporary address if it's enabled but
2404 * no temporary address currently exists.
2405 */
2406 read_unlock_bh(&idev->lock);
2407 ipv6_create_tempaddr(ifp, NULL);
2408 } else {
2409 read_unlock_bh(&idev->lock);
2410 }
2411 }
2412
2413 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2414 {
2415 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2416 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2417 }
2418
2419 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2420 const struct prefix_info *pinfo,
2421 struct inet6_dev *in6_dev,
2422 const struct in6_addr *addr, int addr_type,
2423 u32 addr_flags, bool sllao, bool tokenized,
2424 __u32 valid_lft, u32 prefered_lft)
2425 {
2426 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2427 int create = 0, update_lft = 0;
2428
2429 if (!ifp && valid_lft) {
2430 int max_addresses = in6_dev->cnf.max_addresses;
2431
2432 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2433 if (in6_dev->cnf.optimistic_dad &&
2434 !net->ipv6.devconf_all->forwarding && sllao)
2435 addr_flags |= IFA_F_OPTIMISTIC;
2436 #endif
2437
2438 /* Do not allow to create too much of autoconfigured
2439 * addresses; this would be too easy way to crash kernel.
2440 */
2441 if (!max_addresses ||
2442 ipv6_count_addresses(in6_dev) < max_addresses)
2443 ifp = ipv6_add_addr(in6_dev, addr, NULL,
2444 pinfo->prefix_len,
2445 addr_type&IPV6_ADDR_SCOPE_MASK,
2446 addr_flags, valid_lft,
2447 prefered_lft);
2448
2449 if (IS_ERR_OR_NULL(ifp))
2450 return -1;
2451
2452 update_lft = 0;
2453 create = 1;
2454 spin_lock_bh(&ifp->lock);
2455 ifp->flags |= IFA_F_MANAGETEMPADDR;
2456 ifp->cstamp = jiffies;
2457 ifp->tokenized = tokenized;
2458 spin_unlock_bh(&ifp->lock);
2459 addrconf_dad_start(ifp);
2460 }
2461
2462 if (ifp) {
2463 u32 flags;
2464 unsigned long now;
2465 u32 stored_lft;
2466
2467 /* update lifetime (RFC2462 5.5.3 e) */
2468 spin_lock_bh(&ifp->lock);
2469 now = jiffies;
2470 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2471 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2472 else
2473 stored_lft = 0;
2474 if (!update_lft && !create && stored_lft) {
2475 const u32 minimum_lft = min_t(u32,
2476 stored_lft, MIN_VALID_LIFETIME);
2477 valid_lft = max(valid_lft, minimum_lft);
2478
2479 /* RFC4862 Section 5.5.3e:
2480 * "Note that the preferred lifetime of the
2481 * corresponding address is always reset to
2482 * the Preferred Lifetime in the received
2483 * Prefix Information option, regardless of
2484 * whether the valid lifetime is also reset or
2485 * ignored."
2486 *
2487 * So we should always update prefered_lft here.
2488 */
2489 update_lft = 1;
2490 }
2491
2492 if (update_lft) {
2493 ifp->valid_lft = valid_lft;
2494 ifp->prefered_lft = prefered_lft;
2495 ifp->tstamp = now;
2496 flags = ifp->flags;
2497 ifp->flags &= ~IFA_F_DEPRECATED;
2498 spin_unlock_bh(&ifp->lock);
2499
2500 if (!(flags&IFA_F_TENTATIVE))
2501 ipv6_ifa_notify(0, ifp);
2502 } else
2503 spin_unlock_bh(&ifp->lock);
2504
2505 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2506 create, now);
2507
2508 in6_ifa_put(ifp);
2509 addrconf_verify();
2510 }
2511
2512 return 0;
2513 }
2514 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2515
2516 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2517 {
2518 struct prefix_info *pinfo;
2519 __u32 valid_lft;
2520 __u32 prefered_lft;
2521 int addr_type, err;
2522 u32 addr_flags = 0;
2523 struct inet6_dev *in6_dev;
2524 struct net *net = dev_net(dev);
2525
2526 pinfo = (struct prefix_info *) opt;
2527
2528 if (len < sizeof(struct prefix_info)) {
2529 ADBG("addrconf: prefix option too short\n");
2530 return;
2531 }
2532
2533 /*
2534 * Validation checks ([ADDRCONF], page 19)
2535 */
2536
2537 addr_type = ipv6_addr_type(&pinfo->prefix);
2538
2539 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2540 return;
2541
2542 valid_lft = ntohl(pinfo->valid);
2543 prefered_lft = ntohl(pinfo->prefered);
2544
2545 if (prefered_lft > valid_lft) {
2546 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2547 return;
2548 }
2549
2550 in6_dev = in6_dev_get(dev);
2551
2552 if (!in6_dev) {
2553 net_dbg_ratelimited("addrconf: device %s not configured\n",
2554 dev->name);
2555 return;
2556 }
2557
2558 /*
2559 * Two things going on here:
2560 * 1) Add routes for on-link prefixes
2561 * 2) Configure prefixes with the auto flag set
2562 */
2563
2564 if (pinfo->onlink) {
2565 struct rt6_info *rt;
2566 unsigned long rt_expires;
2567
2568 /* Avoid arithmetic overflow. Really, we could
2569 * save rt_expires in seconds, likely valid_lft,
2570 * but it would require division in fib gc, that it
2571 * not good.
2572 */
2573 if (HZ > USER_HZ)
2574 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2575 else
2576 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2577
2578 if (addrconf_finite_timeout(rt_expires))
2579 rt_expires *= HZ;
2580
2581 rt = addrconf_get_prefix_route(&pinfo->prefix,
2582 pinfo->prefix_len,
2583 dev,
2584 RTF_ADDRCONF | RTF_PREFIX_RT,
2585 RTF_GATEWAY | RTF_DEFAULT);
2586
2587 if (rt) {
2588 /* Autoconf prefix route */
2589 if (valid_lft == 0) {
2590 ip6_del_rt(rt);
2591 rt = NULL;
2592 } else if (addrconf_finite_timeout(rt_expires)) {
2593 /* not infinity */
2594 rt6_set_expires(rt, jiffies + rt_expires);
2595 } else {
2596 rt6_clean_expires(rt);
2597 }
2598 } else if (valid_lft) {
2599 clock_t expires = 0;
2600 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2601 if (addrconf_finite_timeout(rt_expires)) {
2602 /* not infinity */
2603 flags |= RTF_EXPIRES;
2604 expires = jiffies_to_clock_t(rt_expires);
2605 }
2606 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2607 dev, expires, flags);
2608 }
2609 ip6_rt_put(rt);
2610 }
2611
2612 /* Try to figure out our local address for this prefix */
2613
2614 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2615 struct in6_addr addr;
2616 bool tokenized = false, dev_addr_generated = false;
2617
2618 if (pinfo->prefix_len == 64) {
2619 memcpy(&addr, &pinfo->prefix, 8);
2620
2621 if (!ipv6_addr_any(&in6_dev->token)) {
2622 read_lock_bh(&in6_dev->lock);
2623 memcpy(addr.s6_addr + 8,
2624 in6_dev->token.s6_addr + 8, 8);
2625 read_unlock_bh(&in6_dev->lock);
2626 tokenized = true;
2627 } else if (is_addr_mode_generate_stable(in6_dev) &&
2628 !ipv6_generate_stable_address(&addr, 0,
2629 in6_dev)) {
2630 addr_flags |= IFA_F_STABLE_PRIVACY;
2631 goto ok;
2632 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2633 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2634 goto put;
2635 } else {
2636 dev_addr_generated = true;
2637 }
2638 goto ok;
2639 }
2640 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2641 pinfo->prefix_len);
2642 goto put;
2643
2644 ok:
2645 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2646 &addr, addr_type,
2647 addr_flags, sllao,
2648 tokenized, valid_lft,
2649 prefered_lft);
2650 if (err)
2651 goto put;
2652
2653 /* Ignore error case here because previous prefix add addr was
2654 * successful which will be notified.
2655 */
2656 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2657 addr_type, addr_flags, sllao,
2658 tokenized, valid_lft,
2659 prefered_lft,
2660 dev_addr_generated);
2661 }
2662 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2663 put:
2664 in6_dev_put(in6_dev);
2665 }
2666
2667 /*
2668 * Set destination address.
2669 * Special case for SIT interfaces where we create a new "virtual"
2670 * device.
2671 */
2672 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2673 {
2674 struct in6_ifreq ireq;
2675 struct net_device *dev;
2676 int err = -EINVAL;
2677
2678 rtnl_lock();
2679
2680 err = -EFAULT;
2681 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2682 goto err_exit;
2683
2684 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2685
2686 err = -ENODEV;
2687 if (!dev)
2688 goto err_exit;
2689
2690 #if IS_ENABLED(CONFIG_IPV6_SIT)
2691 if (dev->type == ARPHRD_SIT) {
2692 const struct net_device_ops *ops = dev->netdev_ops;
2693 struct ifreq ifr;
2694 struct ip_tunnel_parm p;
2695
2696 err = -EADDRNOTAVAIL;
2697 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2698 goto err_exit;
2699
2700 memset(&p, 0, sizeof(p));
2701 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2702 p.iph.saddr = 0;
2703 p.iph.version = 4;
2704 p.iph.ihl = 5;
2705 p.iph.protocol = IPPROTO_IPV6;
2706 p.iph.ttl = 64;
2707 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2708
2709 if (ops->ndo_do_ioctl) {
2710 mm_segment_t oldfs = get_fs();
2711
2712 set_fs(KERNEL_DS);
2713 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2714 set_fs(oldfs);
2715 } else
2716 err = -EOPNOTSUPP;
2717
2718 if (err == 0) {
2719 err = -ENOBUFS;
2720 dev = __dev_get_by_name(net, p.name);
2721 if (!dev)
2722 goto err_exit;
2723 err = dev_open(dev);
2724 }
2725 }
2726 #endif
2727
2728 err_exit:
2729 rtnl_unlock();
2730 return err;
2731 }
2732
2733 static int ipv6_mc_config(struct sock *sk, bool join,
2734 const struct in6_addr *addr, int ifindex)
2735 {
2736 int ret;
2737
2738 ASSERT_RTNL();
2739
2740 lock_sock(sk);
2741 if (join)
2742 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2743 else
2744 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2745 release_sock(sk);
2746
2747 return ret;
2748 }
2749
2750 /*
2751 * Manual configuration of address on an interface
2752 */
2753 static int inet6_addr_add(struct net *net, int ifindex,
2754 const struct in6_addr *pfx,
2755 const struct in6_addr *peer_pfx,
2756 unsigned int plen, __u32 ifa_flags,
2757 __u32 prefered_lft, __u32 valid_lft)
2758 {
2759 struct inet6_ifaddr *ifp;
2760 struct inet6_dev *idev;
2761 struct net_device *dev;
2762 unsigned long timeout;
2763 clock_t expires;
2764 int scope;
2765 u32 flags;
2766
2767 ASSERT_RTNL();
2768
2769 if (plen > 128)
2770 return -EINVAL;
2771
2772 /* check the lifetime */
2773 if (!valid_lft || prefered_lft > valid_lft)
2774 return -EINVAL;
2775
2776 if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
2777 return -EINVAL;
2778
2779 dev = __dev_get_by_index(net, ifindex);
2780 if (!dev)
2781 return -ENODEV;
2782
2783 idev = addrconf_add_dev(dev);
2784 if (IS_ERR(idev))
2785 return PTR_ERR(idev);
2786
2787 if (ifa_flags & IFA_F_MCAUTOJOIN) {
2788 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2789 true, pfx, ifindex);
2790
2791 if (ret < 0)
2792 return ret;
2793 }
2794
2795 scope = ipv6_addr_scope(pfx);
2796
2797 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2798 if (addrconf_finite_timeout(timeout)) {
2799 expires = jiffies_to_clock_t(timeout * HZ);
2800 valid_lft = timeout;
2801 flags = RTF_EXPIRES;
2802 } else {
2803 expires = 0;
2804 flags = 0;
2805 ifa_flags |= IFA_F_PERMANENT;
2806 }
2807
2808 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2809 if (addrconf_finite_timeout(timeout)) {
2810 if (timeout == 0)
2811 ifa_flags |= IFA_F_DEPRECATED;
2812 prefered_lft = timeout;
2813 }
2814
2815 ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
2816 valid_lft, prefered_lft);
2817
2818 if (!IS_ERR(ifp)) {
2819 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
2820 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2821 expires, flags);
2822 }
2823
2824 /*
2825 * Note that section 3.1 of RFC 4429 indicates
2826 * that the Optimistic flag should not be set for
2827 * manually configured addresses
2828 */
2829 addrconf_dad_start(ifp);
2830 if (ifa_flags & IFA_F_MANAGETEMPADDR)
2831 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
2832 true, jiffies);
2833 in6_ifa_put(ifp);
2834 addrconf_verify_rtnl();
2835 return 0;
2836 } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
2837 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2838 false, pfx, ifindex);
2839 }
2840
2841 return PTR_ERR(ifp);
2842 }
2843
2844 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2845 const struct in6_addr *pfx, unsigned int plen)
2846 {
2847 struct inet6_ifaddr *ifp;
2848 struct inet6_dev *idev;
2849 struct net_device *dev;
2850
2851 if (plen > 128)
2852 return -EINVAL;
2853
2854 dev = __dev_get_by_index(net, ifindex);
2855 if (!dev)
2856 return -ENODEV;
2857
2858 idev = __in6_dev_get(dev);
2859 if (!idev)
2860 return -ENXIO;
2861
2862 read_lock_bh(&idev->lock);
2863 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2864 if (ifp->prefix_len == plen &&
2865 ipv6_addr_equal(pfx, &ifp->addr)) {
2866 in6_ifa_hold(ifp);
2867 read_unlock_bh(&idev->lock);
2868
2869 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2870 (ifa_flags & IFA_F_MANAGETEMPADDR))
2871 manage_tempaddrs(idev, ifp, 0, 0, false,
2872 jiffies);
2873 ipv6_del_addr(ifp);
2874 addrconf_verify_rtnl();
2875 if (ipv6_addr_is_multicast(pfx)) {
2876 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2877 false, pfx, dev->ifindex);
2878 }
2879 return 0;
2880 }
2881 }
2882 read_unlock_bh(&idev->lock);
2883 return -EADDRNOTAVAIL;
2884 }
2885
2886
2887 int addrconf_add_ifaddr(struct net *net, void __user *arg)
2888 {
2889 struct in6_ifreq ireq;
2890 int err;
2891
2892 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2893 return -EPERM;
2894
2895 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2896 return -EFAULT;
2897
2898 rtnl_lock();
2899 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
2900 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2901 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2902 rtnl_unlock();
2903 return err;
2904 }
2905
2906 int addrconf_del_ifaddr(struct net *net, void __user *arg)
2907 {
2908 struct in6_ifreq ireq;
2909 int err;
2910
2911 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2912 return -EPERM;
2913
2914 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2915 return -EFAULT;
2916
2917 rtnl_lock();
2918 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2919 ireq.ifr6_prefixlen);
2920 rtnl_unlock();
2921 return err;
2922 }
2923
2924 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2925 int plen, int scope)
2926 {
2927 struct inet6_ifaddr *ifp;
2928
2929 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2930 scope, IFA_F_PERMANENT,
2931 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2932 if (!IS_ERR(ifp)) {
2933 spin_lock_bh(&ifp->lock);
2934 ifp->flags &= ~IFA_F_TENTATIVE;
2935 spin_unlock_bh(&ifp->lock);
2936 rt_genid_bump_ipv6(dev_net(idev->dev));
2937 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2938 in6_ifa_put(ifp);
2939 }
2940 }
2941
2942 #if IS_ENABLED(CONFIG_IPV6_SIT)
2943 static void sit_add_v4_addrs(struct inet6_dev *idev)
2944 {
2945 struct in6_addr addr;
2946 struct net_device *dev;
2947 struct net *net = dev_net(idev->dev);
2948 int scope, plen;
2949 u32 pflags = 0;
2950
2951 ASSERT_RTNL();
2952
2953 memset(&addr, 0, sizeof(struct in6_addr));
2954 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
2955
2956 if (idev->dev->flags&IFF_POINTOPOINT) {
2957 addr.s6_addr32[0] = htonl(0xfe800000);
2958 scope = IFA_LINK;
2959 plen = 64;
2960 } else {
2961 scope = IPV6_ADDR_COMPATv4;
2962 plen = 96;
2963 pflags |= RTF_NONEXTHOP;
2964 }
2965
2966 if (addr.s6_addr32[3]) {
2967 add_addr(idev, &addr, plen, scope);
2968 addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags);
2969 return;
2970 }
2971
2972 for_each_netdev(net, dev) {
2973 struct in_device *in_dev = __in_dev_get_rtnl(dev);
2974 if (in_dev && (dev->flags & IFF_UP)) {
2975 struct in_ifaddr *ifa;
2976
2977 int flag = scope;
2978
2979 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
2980
2981 addr.s6_addr32[3] = ifa->ifa_local;
2982
2983 if (ifa->ifa_scope == RT_SCOPE_LINK)
2984 continue;
2985 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
2986 if (idev->dev->flags&IFF_POINTOPOINT)
2987 continue;
2988 flag |= IFA_HOST;
2989 }
2990
2991 add_addr(idev, &addr, plen, flag);
2992 addrconf_prefix_route(&addr, plen, idev->dev, 0,
2993 pflags);
2994 }
2995 }
2996 }
2997 }
2998 #endif
2999
3000 static void init_loopback(struct net_device *dev)
3001 {
3002 struct inet6_dev *idev;
3003 struct net_device *sp_dev;
3004 struct inet6_ifaddr *sp_ifa;
3005 struct rt6_info *sp_rt;
3006
3007 /* ::1 */
3008
3009 ASSERT_RTNL();
3010
3011 idev = ipv6_find_idev(dev);
3012 if (!idev) {
3013 pr_debug("%s: add_dev failed\n", __func__);
3014 return;
3015 }
3016
3017 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3018
3019 /* Add routes to other interface's IPv6 addresses */
3020 for_each_netdev(dev_net(dev), sp_dev) {
3021 if (!strcmp(sp_dev->name, dev->name))
3022 continue;
3023
3024 idev = __in6_dev_get(sp_dev);
3025 if (!idev)
3026 continue;
3027
3028 read_lock_bh(&idev->lock);
3029 list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
3030
3031 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
3032 continue;
3033
3034 if (sp_ifa->rt) {
3035 /* This dst has been added to garbage list when
3036 * lo device down, release this obsolete dst and
3037 * reallocate a new router for ifa.
3038 */
3039 if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
3040 ip6_rt_put(sp_ifa->rt);
3041 sp_ifa->rt = NULL;
3042 } else {
3043 continue;
3044 }
3045 }
3046
3047 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false);
3048
3049 /* Failure cases are ignored */
3050 if (!IS_ERR(sp_rt)) {
3051 sp_ifa->rt = sp_rt;
3052 ip6_ins_rt(sp_rt);
3053 }
3054 }
3055 read_unlock_bh(&idev->lock);
3056 }
3057 }
3058
3059 void addrconf_add_linklocal(struct inet6_dev *idev,
3060 const struct in6_addr *addr, u32 flags)
3061 {
3062 struct inet6_ifaddr *ifp;
3063 u32 addr_flags = flags | IFA_F_PERMANENT;
3064
3065 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3066 if (idev->cnf.optimistic_dad &&
3067 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3068 addr_flags |= IFA_F_OPTIMISTIC;
3069 #endif
3070
3071 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
3072 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
3073 if (!IS_ERR(ifp)) {
3074 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
3075 addrconf_dad_start(ifp);
3076 in6_ifa_put(ifp);
3077 }
3078 }
3079 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3080
3081 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3082 {
3083 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3084 return true;
3085
3086 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3087 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3088 return true;
3089
3090 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3091 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3092 return true;
3093
3094 return false;
3095 }
3096
3097 static int ipv6_generate_stable_address(struct in6_addr *address,
3098 u8 dad_count,
3099 const struct inet6_dev *idev)
3100 {
3101 static DEFINE_SPINLOCK(lock);
3102 static __u32 digest[SHA_DIGEST_WORDS];
3103 static __u32 workspace[SHA_WORKSPACE_WORDS];
3104
3105 static union {
3106 char __data[SHA_MESSAGE_BYTES];
3107 struct {
3108 struct in6_addr secret;
3109 __be32 prefix[2];
3110 unsigned char hwaddr[MAX_ADDR_LEN];
3111 u8 dad_count;
3112 } __packed;
3113 } data;
3114
3115 struct in6_addr secret;
3116 struct in6_addr temp;
3117 struct net *net = dev_net(idev->dev);
3118
3119 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3120
3121 if (idev->cnf.stable_secret.initialized)
3122 secret = idev->cnf.stable_secret.secret;
3123 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3124 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3125 else
3126 return -1;
3127
3128 retry:
3129 spin_lock_bh(&lock);
3130
3131 sha_init(digest);
3132 memset(&data, 0, sizeof(data));
3133 memset(workspace, 0, sizeof(workspace));
3134 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3135 data.prefix[0] = address->s6_addr32[0];
3136 data.prefix[1] = address->s6_addr32[1];
3137 data.secret = secret;
3138 data.dad_count = dad_count;
3139
3140 sha_transform(digest, data.__data, workspace);
3141
3142 temp = *address;
3143 temp.s6_addr32[2] = (__force __be32)digest[0];
3144 temp.s6_addr32[3] = (__force __be32)digest[1];
3145
3146 spin_unlock_bh(&lock);
3147
3148 if (ipv6_reserved_interfaceid(temp)) {
3149 dad_count++;
3150 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3151 return -1;
3152 goto retry;
3153 }
3154
3155 *address = temp;
3156 return 0;
3157 }
3158
3159 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3160 {
3161 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3162
3163 if (s->initialized)
3164 return;
3165 s = &idev->cnf.stable_secret;
3166 get_random_bytes(&s->secret, sizeof(s->secret));
3167 s->initialized = true;
3168 }
3169
3170 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3171 {
3172 struct in6_addr addr;
3173
3174 /* no link local addresses on L3 master devices */
3175 if (netif_is_l3_master(idev->dev))
3176 return;
3177
3178 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3179
3180 switch (idev->cnf.addr_gen_mode) {
3181 case IN6_ADDR_GEN_MODE_RANDOM:
3182 ipv6_gen_mode_random_init(idev);
3183 /* fallthrough */
3184 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3185 if (!ipv6_generate_stable_address(&addr, 0, idev))
3186 addrconf_add_linklocal(idev, &addr,
3187 IFA_F_STABLE_PRIVACY);
3188 else if (prefix_route)
3189 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3190 break;
3191 case IN6_ADDR_GEN_MODE_EUI64:
3192 /* addrconf_add_linklocal also adds a prefix_route and we
3193 * only need to care about prefix routes if ipv6_generate_eui64
3194 * couldn't generate one.
3195 */
3196 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3197 addrconf_add_linklocal(idev, &addr, 0);
3198 else if (prefix_route)
3199 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3200 break;
3201 case IN6_ADDR_GEN_MODE_NONE:
3202 default:
3203 /* will not add any link local address */
3204 break;
3205 }
3206 }
3207
3208 static void addrconf_dev_config(struct net_device *dev)
3209 {
3210 struct inet6_dev *idev;
3211
3212 ASSERT_RTNL();
3213
3214 if ((dev->type != ARPHRD_ETHER) &&
3215 (dev->type != ARPHRD_FDDI) &&
3216 (dev->type != ARPHRD_ARCNET) &&
3217 (dev->type != ARPHRD_INFINIBAND) &&
3218 (dev->type != ARPHRD_IEEE1394) &&
3219 (dev->type != ARPHRD_TUNNEL6) &&
3220 (dev->type != ARPHRD_6LOWPAN) &&
3221 (dev->type != ARPHRD_IP6GRE) &&
3222 (dev->type != ARPHRD_IPGRE) &&
3223 (dev->type != ARPHRD_TUNNEL) &&
3224 (dev->type != ARPHRD_NONE)) {
3225 /* Alas, we support only Ethernet autoconfiguration. */
3226 return;
3227 }
3228
3229 idev = addrconf_add_dev(dev);
3230 if (IS_ERR(idev))
3231 return;
3232
3233 /* this device type has no EUI support */
3234 if (dev->type == ARPHRD_NONE &&
3235 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3236 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3237
3238 addrconf_addr_gen(idev, false);
3239 }
3240
3241 #if IS_ENABLED(CONFIG_IPV6_SIT)
3242 static void addrconf_sit_config(struct net_device *dev)
3243 {
3244 struct inet6_dev *idev;
3245
3246 ASSERT_RTNL();
3247
3248 /*
3249 * Configure the tunnel with one of our IPv4
3250 * addresses... we should configure all of
3251 * our v4 addrs in the tunnel
3252 */
3253
3254 idev = ipv6_find_idev(dev);
3255 if (!idev) {
3256 pr_debug("%s: add_dev failed\n", __func__);
3257 return;
3258 }
3259
3260 if (dev->priv_flags & IFF_ISATAP) {
3261 addrconf_addr_gen(idev, false);
3262 return;
3263 }
3264
3265 sit_add_v4_addrs(idev);
3266
3267 if (dev->flags&IFF_POINTOPOINT)
3268 addrconf_add_mroute(dev);
3269 }
3270 #endif
3271
3272 #if IS_ENABLED(CONFIG_NET_IPGRE)
3273 static void addrconf_gre_config(struct net_device *dev)
3274 {
3275 struct inet6_dev *idev;
3276
3277 ASSERT_RTNL();
3278
3279 idev = ipv6_find_idev(dev);
3280 if (!idev) {
3281 pr_debug("%s: add_dev failed\n", __func__);
3282 return;
3283 }
3284
3285 addrconf_addr_gen(idev, true);
3286 if (dev->flags & IFF_POINTOPOINT)
3287 addrconf_add_mroute(dev);
3288 }
3289 #endif
3290
3291 static int fixup_permanent_addr(struct inet6_dev *idev,
3292 struct inet6_ifaddr *ifp)
3293 {
3294 if (!ifp->rt) {
3295 struct rt6_info *rt;
3296
3297 rt = addrconf_dst_alloc(idev, &ifp->addr, false);
3298 if (unlikely(IS_ERR(rt)))
3299 return PTR_ERR(rt);
3300
3301 ifp->rt = rt;
3302 }
3303
3304 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3305 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3306 idev->dev, 0, 0);
3307 }
3308
3309 addrconf_dad_start(ifp);
3310
3311 return 0;
3312 }
3313
3314 static void addrconf_permanent_addr(struct net_device *dev)
3315 {
3316 struct inet6_ifaddr *ifp, *tmp;
3317 struct inet6_dev *idev;
3318
3319 idev = __in6_dev_get(dev);
3320 if (!idev)
3321 return;
3322
3323 write_lock_bh(&idev->lock);
3324
3325 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3326 if ((ifp->flags & IFA_F_PERMANENT) &&
3327 fixup_permanent_addr(idev, ifp) < 0) {
3328 write_unlock_bh(&idev->lock);
3329 ipv6_del_addr(ifp);
3330 write_lock_bh(&idev->lock);
3331
3332 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3333 idev->dev->name, &ifp->addr);
3334 }
3335 }
3336
3337 write_unlock_bh(&idev->lock);
3338 }
3339
3340 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3341 void *ptr)
3342 {
3343 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3344 struct netdev_notifier_changeupper_info *info;
3345 struct inet6_dev *idev = __in6_dev_get(dev);
3346 int run_pending = 0;
3347 int err;
3348
3349 switch (event) {
3350 case NETDEV_REGISTER:
3351 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3352 idev = ipv6_add_dev(dev);
3353 if (IS_ERR(idev))
3354 return notifier_from_errno(PTR_ERR(idev));
3355 }
3356 break;
3357
3358 case NETDEV_CHANGEMTU:
3359 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3360 if (dev->mtu < IPV6_MIN_MTU) {
3361 addrconf_ifdown(dev, 1);
3362 break;
3363 }
3364
3365 if (idev) {
3366 rt6_mtu_change(dev, dev->mtu);
3367 idev->cnf.mtu6 = dev->mtu;
3368 break;
3369 }
3370
3371 /* allocate new idev */
3372 idev = ipv6_add_dev(dev);
3373 if (IS_ERR(idev))
3374 break;
3375
3376 /* device is still not ready */
3377 if (!(idev->if_flags & IF_READY))
3378 break;
3379
3380 run_pending = 1;
3381
3382 /* fall through */
3383
3384 case NETDEV_UP:
3385 case NETDEV_CHANGE:
3386 if (dev->flags & IFF_SLAVE)
3387 break;
3388
3389 if (idev && idev->cnf.disable_ipv6)
3390 break;
3391
3392 if (event == NETDEV_UP) {
3393 /* restore routes for permanent addresses */
3394 addrconf_permanent_addr(dev);
3395
3396 if (!addrconf_qdisc_ok(dev)) {
3397 /* device is not ready yet. */
3398 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3399 dev->name);
3400 break;
3401 }
3402
3403 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3404 idev = ipv6_add_dev(dev);
3405
3406 if (!IS_ERR_OR_NULL(idev)) {
3407 idev->if_flags |= IF_READY;
3408 run_pending = 1;
3409 }
3410 } else if (event == NETDEV_CHANGE) {
3411 if (!addrconf_qdisc_ok(dev)) {
3412 /* device is still not ready. */
3413 break;
3414 }
3415
3416 if (idev) {
3417 if (idev->if_flags & IF_READY) {
3418 /* device is already configured -
3419 * but resend MLD reports, we might
3420 * have roamed and need to update
3421 * multicast snooping switches
3422 */
3423 ipv6_mc_up(idev);
3424 break;
3425 }
3426 idev->if_flags |= IF_READY;
3427 }
3428
3429 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3430 dev->name);
3431
3432 run_pending = 1;
3433 }
3434
3435 switch (dev->type) {
3436 #if IS_ENABLED(CONFIG_IPV6_SIT)
3437 case ARPHRD_SIT:
3438 addrconf_sit_config(dev);
3439 break;
3440 #endif
3441 #if IS_ENABLED(CONFIG_NET_IPGRE)
3442 case ARPHRD_IPGRE:
3443 addrconf_gre_config(dev);
3444 break;
3445 #endif
3446 case ARPHRD_LOOPBACK:
3447 init_loopback(dev);
3448 break;
3449
3450 default:
3451 addrconf_dev_config(dev);
3452 break;
3453 }
3454
3455 if (!IS_ERR_OR_NULL(idev)) {
3456 if (run_pending)
3457 addrconf_dad_run(idev);
3458
3459 /*
3460 * If the MTU changed during the interface down,
3461 * when the interface up, the changed MTU must be
3462 * reflected in the idev as well as routers.
3463 */
3464 if (idev->cnf.mtu6 != dev->mtu &&
3465 dev->mtu >= IPV6_MIN_MTU) {
3466 rt6_mtu_change(dev, dev->mtu);
3467 idev->cnf.mtu6 = dev->mtu;
3468 }
3469 idev->tstamp = jiffies;
3470 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3471
3472 /*
3473 * If the changed mtu during down is lower than
3474 * IPV6_MIN_MTU stop IPv6 on this interface.
3475 */
3476 if (dev->mtu < IPV6_MIN_MTU)
3477 addrconf_ifdown(dev, 1);
3478 }
3479 break;
3480
3481 case NETDEV_DOWN:
3482 case NETDEV_UNREGISTER:
3483 /*
3484 * Remove all addresses from this interface.
3485 */
3486 addrconf_ifdown(dev, event != NETDEV_DOWN);
3487 break;
3488
3489 case NETDEV_CHANGENAME:
3490 if (idev) {
3491 snmp6_unregister_dev(idev);
3492 addrconf_sysctl_unregister(idev);
3493 err = addrconf_sysctl_register(idev);
3494 if (err)
3495 return notifier_from_errno(err);
3496 err = snmp6_register_dev(idev);
3497 if (err) {
3498 addrconf_sysctl_unregister(idev);
3499 return notifier_from_errno(err);
3500 }
3501 }
3502 break;
3503
3504 case NETDEV_PRE_TYPE_CHANGE:
3505 case NETDEV_POST_TYPE_CHANGE:
3506 if (idev)
3507 addrconf_type_change(dev, event);
3508 break;
3509
3510 case NETDEV_CHANGEUPPER:
3511 info = ptr;
3512
3513 /* flush all routes if dev is linked to or unlinked from
3514 * an L3 master device (e.g., VRF)
3515 */
3516 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3517 addrconf_ifdown(dev, 0);
3518 }
3519
3520 return NOTIFY_OK;
3521 }
3522
3523 /*
3524 * addrconf module should be notified of a device going up
3525 */
3526 static struct notifier_block ipv6_dev_notf = {
3527 .notifier_call = addrconf_notify,
3528 };
3529
3530 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3531 {
3532 struct inet6_dev *idev;
3533 ASSERT_RTNL();
3534
3535 idev = __in6_dev_get(dev);
3536
3537 if (event == NETDEV_POST_TYPE_CHANGE)
3538 ipv6_mc_remap(idev);
3539 else if (event == NETDEV_PRE_TYPE_CHANGE)
3540 ipv6_mc_unmap(idev);
3541 }
3542
3543 static bool addr_is_local(const struct in6_addr *addr)
3544 {
3545 return ipv6_addr_type(addr) &
3546 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3547 }
3548
3549 static int addrconf_ifdown(struct net_device *dev, int how)
3550 {
3551 struct net *net = dev_net(dev);
3552 struct inet6_dev *idev;
3553 struct inet6_ifaddr *ifa, *tmp;
3554 struct list_head del_list;
3555 int _keep_addr;
3556 bool keep_addr;
3557 int state, i;
3558
3559 ASSERT_RTNL();
3560
3561 rt6_ifdown(net, dev);
3562 neigh_ifdown(&nd_tbl, dev);
3563
3564 idev = __in6_dev_get(dev);
3565 if (!idev)
3566 return -ENODEV;
3567
3568 /*
3569 * Step 1: remove reference to ipv6 device from parent device.
3570 * Do not dev_put!
3571 */
3572 if (how) {
3573 idev->dead = 1;
3574
3575 /* protected by rtnl_lock */
3576 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3577
3578 /* Step 1.5: remove snmp6 entry */
3579 snmp6_unregister_dev(idev);
3580
3581 }
3582
3583 /* aggregate the system setting and interface setting */
3584 _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3585 if (!_keep_addr)
3586 _keep_addr = idev->cnf.keep_addr_on_down;
3587
3588 /* combine the user config with event to determine if permanent
3589 * addresses are to be removed from address hash table
3590 */
3591 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3592
3593 /* Step 2: clear hash table */
3594 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3595 struct hlist_head *h = &inet6_addr_lst[i];
3596
3597 spin_lock_bh(&addrconf_hash_lock);
3598 restart:
3599 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3600 if (ifa->idev == idev) {
3601 addrconf_del_dad_work(ifa);
3602 /* combined flag + permanent flag decide if
3603 * address is retained on a down event
3604 */
3605 if (!keep_addr ||
3606 !(ifa->flags & IFA_F_PERMANENT) ||
3607 addr_is_local(&ifa->addr)) {
3608 hlist_del_init_rcu(&ifa->addr_lst);
3609 goto restart;
3610 }
3611 }
3612 }
3613 spin_unlock_bh(&addrconf_hash_lock);
3614 }
3615
3616 write_lock_bh(&idev->lock);
3617
3618 addrconf_del_rs_timer(idev);
3619
3620 /* Step 2: clear flags for stateless addrconf */
3621 if (!how)
3622 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3623
3624 /* Step 3: clear tempaddr list */
3625 while (!list_empty(&idev->tempaddr_list)) {
3626 ifa = list_first_entry(&idev->tempaddr_list,
3627 struct inet6_ifaddr, tmp_list);
3628 list_del(&ifa->tmp_list);
3629 write_unlock_bh(&idev->lock);
3630 spin_lock_bh(&ifa->lock);
3631
3632 if (ifa->ifpub) {
3633 in6_ifa_put(ifa->ifpub);
3634 ifa->ifpub = NULL;
3635 }
3636 spin_unlock_bh(&ifa->lock);
3637 in6_ifa_put(ifa);
3638 write_lock_bh(&idev->lock);
3639 }
3640
3641 /* re-combine the user config with event to determine if permanent
3642 * addresses are to be removed from the interface list
3643 */
3644 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3645
3646 INIT_LIST_HEAD(&del_list);
3647 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3648 struct rt6_info *rt = NULL;
3649
3650 addrconf_del_dad_work(ifa);
3651
3652 write_unlock_bh(&idev->lock);
3653 spin_lock_bh(&ifa->lock);
3654
3655 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3656 !addr_is_local(&ifa->addr)) {
3657 /* set state to skip the notifier below */
3658 state = INET6_IFADDR_STATE_DEAD;
3659 ifa->state = 0;
3660 if (!(ifa->flags & IFA_F_NODAD))
3661 ifa->flags |= IFA_F_TENTATIVE;
3662
3663 rt = ifa->rt;
3664 ifa->rt = NULL;
3665 } else {
3666 state = ifa->state;
3667 ifa->state = INET6_IFADDR_STATE_DEAD;
3668
3669 list_move(&ifa->if_list, &del_list);
3670 }
3671
3672 spin_unlock_bh(&ifa->lock);
3673
3674 if (rt)
3675 ip6_del_rt(rt);
3676
3677 if (state != INET6_IFADDR_STATE_DEAD) {
3678 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3679 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3680 } else {
3681 if (idev->cnf.forwarding)
3682 addrconf_leave_anycast(ifa);
3683 addrconf_leave_solict(ifa->idev, &ifa->addr);
3684 }
3685
3686 write_lock_bh(&idev->lock);
3687 }
3688
3689 write_unlock_bh(&idev->lock);
3690
3691 /* now clean up addresses to be removed */
3692 while (!list_empty(&del_list)) {
3693 ifa = list_first_entry(&del_list,
3694 struct inet6_ifaddr, if_list);
3695 list_del(&ifa->if_list);
3696
3697 in6_ifa_put(ifa);
3698 }
3699
3700 /* Step 5: Discard anycast and multicast list */
3701 if (how) {
3702 ipv6_ac_destroy_dev(idev);
3703 ipv6_mc_destroy_dev(idev);
3704 } else {
3705 ipv6_mc_down(idev);
3706 }
3707
3708 idev->tstamp = jiffies;
3709
3710 /* Last: Shot the device (if unregistered) */
3711 if (how) {
3712 addrconf_sysctl_unregister(idev);
3713 neigh_parms_release(&nd_tbl, idev->nd_parms);
3714 neigh_ifdown(&nd_tbl, dev);
3715 in6_dev_put(idev);
3716 }
3717 return 0;
3718 }
3719
3720 static void addrconf_rs_timer(unsigned long data)
3721 {
3722 struct inet6_dev *idev = (struct inet6_dev *)data;
3723 struct net_device *dev = idev->dev;
3724 struct in6_addr lladdr;
3725
3726 write_lock(&idev->lock);
3727 if (idev->dead || !(idev->if_flags & IF_READY))
3728 goto out;
3729
3730 if (!ipv6_accept_ra(idev))
3731 goto out;
3732
3733 /* Announcement received after solicitation was sent */
3734 if (idev->if_flags & IF_RA_RCVD)
3735 goto out;
3736
3737 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3738 write_unlock(&idev->lock);
3739 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3740 ndisc_send_rs(dev, &lladdr,
3741 &in6addr_linklocal_allrouters);
3742 else
3743 goto put;
3744
3745 write_lock(&idev->lock);
3746 idev->rs_interval = rfc3315_s14_backoff_update(
3747 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3748 /* The wait after the last probe can be shorter */
3749 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3750 idev->cnf.rtr_solicits) ?
3751 idev->cnf.rtr_solicit_delay :
3752 idev->rs_interval);
3753 } else {
3754 /*
3755 * Note: we do not support deprecated "all on-link"
3756 * assumption any longer.
3757 */
3758 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3759 }
3760
3761 out:
3762 write_unlock(&idev->lock);
3763 put:
3764 in6_dev_put(idev);
3765 }
3766
3767 /*
3768 * Duplicate Address Detection
3769 */
3770 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3771 {
3772 unsigned long rand_num;
3773 struct inet6_dev *idev = ifp->idev;
3774 u64 nonce;
3775
3776 if (ifp->flags & IFA_F_OPTIMISTIC)
3777 rand_num = 0;
3778 else
3779 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3780
3781 nonce = 0;
3782 if (idev->cnf.enhanced_dad ||
3783 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3784 do
3785 get_random_bytes(&nonce, 6);
3786 while (nonce == 0);
3787 }
3788 ifp->dad_nonce = nonce;
3789 ifp->dad_probes = idev->cnf.dad_transmits;
3790 addrconf_mod_dad_work(ifp, rand_num);
3791 }
3792
3793 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3794 {
3795 struct inet6_dev *idev = ifp->idev;
3796 struct net_device *dev = idev->dev;
3797 bool bump_id, notify = false;
3798
3799 addrconf_join_solict(dev, &ifp->addr);
3800
3801 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3802
3803 read_lock_bh(&idev->lock);
3804 spin_lock(&ifp->lock);
3805 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3806 goto out;
3807
3808 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3809 idev->cnf.accept_dad < 1 ||
3810 !(ifp->flags&IFA_F_TENTATIVE) ||
3811 ifp->flags & IFA_F_NODAD) {
3812 bump_id = ifp->flags & IFA_F_TENTATIVE;
3813 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3814 spin_unlock(&ifp->lock);
3815 read_unlock_bh(&idev->lock);
3816
3817 addrconf_dad_completed(ifp, bump_id);
3818 return;
3819 }
3820
3821 if (!(idev->if_flags & IF_READY)) {
3822 spin_unlock(&ifp->lock);
3823 read_unlock_bh(&idev->lock);
3824 /*
3825 * If the device is not ready:
3826 * - keep it tentative if it is a permanent address.
3827 * - otherwise, kill it.
3828 */
3829 in6_ifa_hold(ifp);
3830 addrconf_dad_stop(ifp, 0);
3831 return;
3832 }
3833
3834 /*
3835 * Optimistic nodes can start receiving
3836 * Frames right away
3837 */
3838 if (ifp->flags & IFA_F_OPTIMISTIC) {
3839 ip6_ins_rt(ifp->rt);
3840 if (ipv6_use_optimistic_addr(idev)) {
3841 /* Because optimistic nodes can use this address,
3842 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3843 */
3844 notify = true;
3845 }
3846 }
3847
3848 addrconf_dad_kick(ifp);
3849 out:
3850 spin_unlock(&ifp->lock);
3851 read_unlock_bh(&idev->lock);
3852 if (notify)
3853 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3854 }
3855
3856 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3857 {
3858 bool begin_dad = false;
3859
3860 spin_lock_bh(&ifp->lock);
3861 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3862 ifp->state = INET6_IFADDR_STATE_PREDAD;
3863 begin_dad = true;
3864 }
3865 spin_unlock_bh(&ifp->lock);
3866
3867 if (begin_dad)
3868 addrconf_mod_dad_work(ifp, 0);
3869 }
3870
3871 static void addrconf_dad_work(struct work_struct *w)
3872 {
3873 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3874 struct inet6_ifaddr,
3875 dad_work);
3876 struct inet6_dev *idev = ifp->idev;
3877 bool bump_id, disable_ipv6 = false;
3878 struct in6_addr mcaddr;
3879
3880 enum {
3881 DAD_PROCESS,
3882 DAD_BEGIN,
3883 DAD_ABORT,
3884 } action = DAD_PROCESS;
3885
3886 rtnl_lock();
3887
3888 spin_lock_bh(&ifp->lock);
3889 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3890 action = DAD_BEGIN;
3891 ifp->state = INET6_IFADDR_STATE_DAD;
3892 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3893 action = DAD_ABORT;
3894 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3895
3896 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
3897 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
3898 struct in6_addr addr;
3899
3900 addr.s6_addr32[0] = htonl(0xfe800000);
3901 addr.s6_addr32[1] = 0;
3902
3903 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
3904 ipv6_addr_equal(&ifp->addr, &addr)) {
3905 /* DAD failed for link-local based on MAC */
3906 idev->cnf.disable_ipv6 = 1;
3907
3908 pr_info("%s: IPv6 being disabled!\n",
3909 ifp->idev->dev->name);
3910 disable_ipv6 = true;
3911 }
3912 }
3913 }
3914 spin_unlock_bh(&ifp->lock);
3915
3916 if (action == DAD_BEGIN) {
3917 addrconf_dad_begin(ifp);
3918 goto out;
3919 } else if (action == DAD_ABORT) {
3920 in6_ifa_hold(ifp);
3921 addrconf_dad_stop(ifp, 1);
3922 if (disable_ipv6)
3923 addrconf_ifdown(idev->dev, 0);
3924 goto out;
3925 }
3926
3927 if (!ifp->dad_probes && addrconf_dad_end(ifp))
3928 goto out;
3929
3930 write_lock_bh(&idev->lock);
3931 if (idev->dead || !(idev->if_flags & IF_READY)) {
3932 write_unlock_bh(&idev->lock);
3933 goto out;
3934 }
3935
3936 spin_lock(&ifp->lock);
3937 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
3938 spin_unlock(&ifp->lock);
3939 write_unlock_bh(&idev->lock);
3940 goto out;
3941 }
3942
3943 if (ifp->dad_probes == 0) {
3944 /*
3945 * DAD was successful
3946 */
3947
3948 bump_id = ifp->flags & IFA_F_TENTATIVE;
3949 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3950 spin_unlock(&ifp->lock);
3951 write_unlock_bh(&idev->lock);
3952
3953 addrconf_dad_completed(ifp, bump_id);
3954
3955 goto out;
3956 }
3957
3958 ifp->dad_probes--;
3959 addrconf_mod_dad_work(ifp,
3960 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
3961 spin_unlock(&ifp->lock);
3962 write_unlock_bh(&idev->lock);
3963
3964 /* send a neighbour solicitation for our addr */
3965 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3966 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
3967 ifp->dad_nonce);
3968 out:
3969 in6_ifa_put(ifp);
3970 rtnl_unlock();
3971 }
3972
3973 /* ifp->idev must be at least read locked */
3974 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
3975 {
3976 struct inet6_ifaddr *ifpiter;
3977 struct inet6_dev *idev = ifp->idev;
3978
3979 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
3980 if (ifpiter->scope > IFA_LINK)
3981 break;
3982 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
3983 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
3984 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
3985 IFA_F_PERMANENT)
3986 return false;
3987 }
3988 return true;
3989 }
3990
3991 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
3992 {
3993 struct net_device *dev = ifp->idev->dev;
3994 struct in6_addr lladdr;
3995 bool send_rs, send_mld;
3996
3997 addrconf_del_dad_work(ifp);
3998
3999 /*
4000 * Configure the address for reception. Now it is valid.
4001 */
4002
4003 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4004
4005 /* If added prefix is link local and we are prepared to process
4006 router advertisements, start sending router solicitations.
4007 */
4008
4009 read_lock_bh(&ifp->idev->lock);
4010 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4011 send_rs = send_mld &&
4012 ipv6_accept_ra(ifp->idev) &&
4013 ifp->idev->cnf.rtr_solicits != 0 &&
4014 (dev->flags&IFF_LOOPBACK) == 0;
4015 read_unlock_bh(&ifp->idev->lock);
4016
4017 /* While dad is in progress mld report's source address is in6_addrany.
4018 * Resend with proper ll now.
4019 */
4020 if (send_mld)
4021 ipv6_mc_dad_complete(ifp->idev);
4022
4023 if (send_rs) {
4024 /*
4025 * If a host as already performed a random delay
4026 * [...] as part of DAD [...] there is no need
4027 * to delay again before sending the first RS
4028 */
4029 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4030 return;
4031 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4032
4033 write_lock_bh(&ifp->idev->lock);
4034 spin_lock(&ifp->lock);
4035 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4036 ifp->idev->cnf.rtr_solicit_interval);
4037 ifp->idev->rs_probes = 1;
4038 ifp->idev->if_flags |= IF_RS_SENT;
4039 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4040 spin_unlock(&ifp->lock);
4041 write_unlock_bh(&ifp->idev->lock);
4042 }
4043
4044 if (bump_id)
4045 rt_genid_bump_ipv6(dev_net(dev));
4046
4047 /* Make sure that a new temporary address will be created
4048 * before this temporary address becomes deprecated.
4049 */
4050 if (ifp->flags & IFA_F_TEMPORARY)
4051 addrconf_verify_rtnl();
4052 }
4053
4054 static void addrconf_dad_run(struct inet6_dev *idev)
4055 {
4056 struct inet6_ifaddr *ifp;
4057
4058 read_lock_bh(&idev->lock);
4059 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4060 spin_lock(&ifp->lock);
4061 if (ifp->flags & IFA_F_TENTATIVE &&
4062 ifp->state == INET6_IFADDR_STATE_DAD)
4063 addrconf_dad_kick(ifp);
4064 spin_unlock(&ifp->lock);
4065 }
4066 read_unlock_bh(&idev->lock);
4067 }
4068
4069 #ifdef CONFIG_PROC_FS
4070 struct if6_iter_state {
4071 struct seq_net_private p;
4072 int bucket;
4073 int offset;
4074 };
4075
4076 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4077 {
4078 struct inet6_ifaddr *ifa = NULL;
4079 struct if6_iter_state *state = seq->private;
4080 struct net *net = seq_file_net(seq);
4081 int p = 0;
4082
4083 /* initial bucket if pos is 0 */
4084 if (pos == 0) {
4085 state->bucket = 0;
4086 state->offset = 0;
4087 }
4088
4089 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4090 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
4091 addr_lst) {
4092 if (!net_eq(dev_net(ifa->idev->dev), net))
4093 continue;
4094 /* sync with offset */
4095 if (p < state->offset) {
4096 p++;
4097 continue;
4098 }
4099 state->offset++;
4100 return ifa;
4101 }
4102
4103 /* prepare for next bucket */
4104 state->offset = 0;
4105 p = 0;
4106 }
4107 return NULL;
4108 }
4109
4110 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4111 struct inet6_ifaddr *ifa)
4112 {
4113 struct if6_iter_state *state = seq->private;
4114 struct net *net = seq_file_net(seq);
4115
4116 hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
4117 if (!net_eq(dev_net(ifa->idev->dev), net))
4118 continue;
4119 state->offset++;
4120 return ifa;
4121 }
4122
4123 while (++state->bucket < IN6_ADDR_HSIZE) {
4124 state->offset = 0;
4125 hlist_for_each_entry_rcu_bh(ifa,
4126 &inet6_addr_lst[state->bucket], addr_lst) {
4127 if (!net_eq(dev_net(ifa->idev->dev), net))
4128 continue;
4129 state->offset++;
4130 return ifa;
4131 }
4132 }
4133
4134 return NULL;
4135 }
4136
4137 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4138 __acquires(rcu_bh)
4139 {
4140 rcu_read_lock_bh();
4141 return if6_get_first(seq, *pos);
4142 }
4143
4144 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4145 {
4146 struct inet6_ifaddr *ifa;
4147
4148 ifa = if6_get_next(seq, v);
4149 ++*pos;
4150 return ifa;
4151 }
4152
4153 static void if6_seq_stop(struct seq_file *seq, void *v)
4154 __releases(rcu_bh)
4155 {
4156 rcu_read_unlock_bh();
4157 }
4158
4159 static int if6_seq_show(struct seq_file *seq, void *v)
4160 {
4161 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4162 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4163 &ifp->addr,
4164 ifp->idev->dev->ifindex,
4165 ifp->prefix_len,
4166 ifp->scope,
4167 (u8) ifp->flags,
4168 ifp->idev->dev->name);
4169 return 0;
4170 }
4171
4172 static const struct seq_operations if6_seq_ops = {
4173 .start = if6_seq_start,
4174 .next = if6_seq_next,
4175 .show = if6_seq_show,
4176 .stop = if6_seq_stop,
4177 };
4178
4179 static int if6_seq_open(struct inode *inode, struct file *file)
4180 {
4181 return seq_open_net(inode, file, &if6_seq_ops,
4182 sizeof(struct if6_iter_state));
4183 }
4184
4185 static const struct file_operations if6_fops = {
4186 .owner = THIS_MODULE,
4187 .open = if6_seq_open,
4188 .read = seq_read,
4189 .llseek = seq_lseek,
4190 .release = seq_release_net,
4191 };
4192
4193 static int __net_init if6_proc_net_init(struct net *net)
4194 {
4195 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
4196 return -ENOMEM;
4197 return 0;
4198 }
4199
4200 static void __net_exit if6_proc_net_exit(struct net *net)
4201 {
4202 remove_proc_entry("if_inet6", net->proc_net);
4203 }
4204
4205 static struct pernet_operations if6_proc_net_ops = {
4206 .init = if6_proc_net_init,
4207 .exit = if6_proc_net_exit,
4208 };
4209
4210 int __init if6_proc_init(void)
4211 {
4212 return register_pernet_subsys(&if6_proc_net_ops);
4213 }
4214
4215 void if6_proc_exit(void)
4216 {
4217 unregister_pernet_subsys(&if6_proc_net_ops);
4218 }
4219 #endif /* CONFIG_PROC_FS */
4220
4221 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4222 /* Check if address is a home address configured on any interface. */
4223 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4224 {
4225 int ret = 0;
4226 struct inet6_ifaddr *ifp = NULL;
4227 unsigned int hash = inet6_addr_hash(addr);
4228
4229 rcu_read_lock_bh();
4230 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
4231 if (!net_eq(dev_net(ifp->idev->dev), net))
4232 continue;
4233 if (ipv6_addr_equal(&ifp->addr, addr) &&
4234 (ifp->flags & IFA_F_HOMEADDRESS)) {
4235 ret = 1;
4236 break;
4237 }
4238 }
4239 rcu_read_unlock_bh();
4240 return ret;
4241 }
4242 #endif
4243
4244 /*
4245 * Periodic address status verification
4246 */
4247
4248 static void addrconf_verify_rtnl(void)
4249 {
4250 unsigned long now, next, next_sec, next_sched;
4251 struct inet6_ifaddr *ifp;
4252 int i;
4253
4254 ASSERT_RTNL();
4255
4256 rcu_read_lock_bh();
4257 now = jiffies;
4258 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4259
4260 cancel_delayed_work(&addr_chk_work);
4261
4262 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4263 restart:
4264 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4265 unsigned long age;
4266
4267 /* When setting preferred_lft to a value not zero or
4268 * infinity, while valid_lft is infinity
4269 * IFA_F_PERMANENT has a non-infinity life time.
4270 */
4271 if ((ifp->flags & IFA_F_PERMANENT) &&
4272 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4273 continue;
4274
4275 spin_lock(&ifp->lock);
4276 /* We try to batch several events at once. */
4277 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4278
4279 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4280 age >= ifp->valid_lft) {
4281 spin_unlock(&ifp->lock);
4282 in6_ifa_hold(ifp);
4283 ipv6_del_addr(ifp);
4284 goto restart;
4285 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4286 spin_unlock(&ifp->lock);
4287 continue;
4288 } else if (age >= ifp->prefered_lft) {
4289 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4290 int deprecate = 0;
4291
4292 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4293 deprecate = 1;
4294 ifp->flags |= IFA_F_DEPRECATED;
4295 }
4296
4297 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4298 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4299 next = ifp->tstamp + ifp->valid_lft * HZ;
4300
4301 spin_unlock(&ifp->lock);
4302
4303 if (deprecate) {
4304 in6_ifa_hold(ifp);
4305
4306 ipv6_ifa_notify(0, ifp);
4307 in6_ifa_put(ifp);
4308 goto restart;
4309 }
4310 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4311 !(ifp->flags&IFA_F_TENTATIVE)) {
4312 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4313 ifp->idev->cnf.dad_transmits *
4314 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4315
4316 if (age >= ifp->prefered_lft - regen_advance) {
4317 struct inet6_ifaddr *ifpub = ifp->ifpub;
4318 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4319 next = ifp->tstamp + ifp->prefered_lft * HZ;
4320 if (!ifp->regen_count && ifpub) {
4321 ifp->regen_count++;
4322 in6_ifa_hold(ifp);
4323 in6_ifa_hold(ifpub);
4324 spin_unlock(&ifp->lock);
4325
4326 spin_lock(&ifpub->lock);
4327 ifpub->regen_count = 0;
4328 spin_unlock(&ifpub->lock);
4329 ipv6_create_tempaddr(ifpub, ifp);
4330 in6_ifa_put(ifpub);
4331 in6_ifa_put(ifp);
4332 goto restart;
4333 }
4334 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4335 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4336 spin_unlock(&ifp->lock);
4337 } else {
4338 /* ifp->prefered_lft <= ifp->valid_lft */
4339 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4340 next = ifp->tstamp + ifp->prefered_lft * HZ;
4341 spin_unlock(&ifp->lock);
4342 }
4343 }
4344 }
4345
4346 next_sec = round_jiffies_up(next);
4347 next_sched = next;
4348
4349 /* If rounded timeout is accurate enough, accept it. */
4350 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4351 next_sched = next_sec;
4352
4353 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4354 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4355 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4356
4357 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4358 now, next, next_sec, next_sched);
4359 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4360 rcu_read_unlock_bh();
4361 }
4362
4363 static void addrconf_verify_work(struct work_struct *w)
4364 {
4365 rtnl_lock();
4366 addrconf_verify_rtnl();
4367 rtnl_unlock();
4368 }
4369
4370 static void addrconf_verify(void)
4371 {
4372 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4373 }
4374
4375 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4376 struct in6_addr **peer_pfx)
4377 {
4378 struct in6_addr *pfx = NULL;
4379
4380 *peer_pfx = NULL;
4381
4382 if (addr)
4383 pfx = nla_data(addr);
4384
4385 if (local) {
4386 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4387 *peer_pfx = pfx;
4388 pfx = nla_data(local);
4389 }
4390
4391 return pfx;
4392 }
4393
4394 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4395 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4396 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4397 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4398 [IFA_FLAGS] = { .len = sizeof(u32) },
4399 };
4400
4401 static int
4402 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
4403 {
4404 struct net *net = sock_net(skb->sk);
4405 struct ifaddrmsg *ifm;
4406 struct nlattr *tb[IFA_MAX+1];
4407 struct in6_addr *pfx, *peer_pfx;
4408 u32 ifa_flags;
4409 int err;
4410
4411 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4412 NULL);
4413 if (err < 0)
4414 return err;
4415
4416 ifm = nlmsg_data(nlh);
4417 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4418 if (!pfx)
4419 return -EINVAL;
4420
4421 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4422
4423 /* We ignore other flags so far. */
4424 ifa_flags &= IFA_F_MANAGETEMPADDR;
4425
4426 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4427 ifm->ifa_prefixlen);
4428 }
4429
4430 static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
4431 u32 prefered_lft, u32 valid_lft)
4432 {
4433 u32 flags;
4434 clock_t expires;
4435 unsigned long timeout;
4436 bool was_managetempaddr;
4437 bool had_prefixroute;
4438
4439 ASSERT_RTNL();
4440
4441 if (!valid_lft || (prefered_lft > valid_lft))
4442 return -EINVAL;
4443
4444 if (ifa_flags & IFA_F_MANAGETEMPADDR &&
4445 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4446 return -EINVAL;
4447
4448 timeout = addrconf_timeout_fixup(valid_lft, HZ);
4449 if (addrconf_finite_timeout(timeout)) {
4450 expires = jiffies_to_clock_t(timeout * HZ);
4451 valid_lft = timeout;
4452 flags = RTF_EXPIRES;
4453 } else {
4454 expires = 0;
4455 flags = 0;
4456 ifa_flags |= IFA_F_PERMANENT;
4457 }
4458
4459 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
4460 if (addrconf_finite_timeout(timeout)) {
4461 if (timeout == 0)
4462 ifa_flags |= IFA_F_DEPRECATED;
4463 prefered_lft = timeout;
4464 }
4465
4466 spin_lock_bh(&ifp->lock);
4467 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4468 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4469 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4470 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4471 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4472 IFA_F_NOPREFIXROUTE);
4473 ifp->flags |= ifa_flags;
4474 ifp->tstamp = jiffies;
4475 ifp->valid_lft = valid_lft;
4476 ifp->prefered_lft = prefered_lft;
4477
4478 spin_unlock_bh(&ifp->lock);
4479 if (!(ifp->flags&IFA_F_TENTATIVE))
4480 ipv6_ifa_notify(0, ifp);
4481
4482 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
4483 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
4484 expires, flags);
4485 } else if (had_prefixroute) {
4486 enum cleanup_prefix_rt_t action;
4487 unsigned long rt_expires;
4488
4489 write_lock_bh(&ifp->idev->lock);
4490 action = check_cleanup_prefix_route(ifp, &rt_expires);
4491 write_unlock_bh(&ifp->idev->lock);
4492
4493 if (action != CLEANUP_PREFIX_RT_NOP) {
4494 cleanup_prefix_route(ifp, rt_expires,
4495 action == CLEANUP_PREFIX_RT_DEL);
4496 }
4497 }
4498
4499 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4500 if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4501 valid_lft = prefered_lft = 0;
4502 manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
4503 !was_managetempaddr, jiffies);
4504 }
4505
4506 addrconf_verify_rtnl();
4507
4508 return 0;
4509 }
4510
4511 static int
4512 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
4513 {
4514 struct net *net = sock_net(skb->sk);
4515 struct ifaddrmsg *ifm;
4516 struct nlattr *tb[IFA_MAX+1];
4517 struct in6_addr *pfx, *peer_pfx;
4518 struct inet6_ifaddr *ifa;
4519 struct net_device *dev;
4520 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
4521 u32 ifa_flags;
4522 int err;
4523
4524 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4525 NULL);
4526 if (err < 0)
4527 return err;
4528
4529 ifm = nlmsg_data(nlh);
4530 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4531 if (!pfx)
4532 return -EINVAL;
4533
4534 if (tb[IFA_CACHEINFO]) {
4535 struct ifa_cacheinfo *ci;
4536
4537 ci = nla_data(tb[IFA_CACHEINFO]);
4538 valid_lft = ci->ifa_valid;
4539 preferred_lft = ci->ifa_prefered;
4540 } else {
4541 preferred_lft = INFINITY_LIFE_TIME;
4542 valid_lft = INFINITY_LIFE_TIME;
4543 }
4544
4545 dev = __dev_get_by_index(net, ifm->ifa_index);
4546 if (!dev)
4547 return -ENODEV;
4548
4549 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4550
4551 /* We ignore other flags so far. */
4552 ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4553 IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
4554
4555 ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
4556 if (!ifa) {
4557 /*
4558 * It would be best to check for !NLM_F_CREATE here but
4559 * userspace already relies on not having to provide this.
4560 */
4561 return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
4562 ifm->ifa_prefixlen, ifa_flags,
4563 preferred_lft, valid_lft);
4564 }
4565
4566 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4567 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4568 err = -EEXIST;
4569 else
4570 err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
4571
4572 in6_ifa_put(ifa);
4573
4574 return err;
4575 }
4576
4577 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4578 u8 scope, int ifindex)
4579 {
4580 struct ifaddrmsg *ifm;
4581
4582 ifm = nlmsg_data(nlh);
4583 ifm->ifa_family = AF_INET6;
4584 ifm->ifa_prefixlen = prefixlen;
4585 ifm->ifa_flags = flags;
4586 ifm->ifa_scope = scope;
4587 ifm->ifa_index = ifindex;
4588 }
4589
4590 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4591 unsigned long tstamp, u32 preferred, u32 valid)
4592 {
4593 struct ifa_cacheinfo ci;
4594
4595 ci.cstamp = cstamp_delta(cstamp);
4596 ci.tstamp = cstamp_delta(tstamp);
4597 ci.ifa_prefered = preferred;
4598 ci.ifa_valid = valid;
4599
4600 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4601 }
4602
4603 static inline int rt_scope(int ifa_scope)
4604 {
4605 if (ifa_scope & IFA_HOST)
4606 return RT_SCOPE_HOST;
4607 else if (ifa_scope & IFA_LINK)
4608 return RT_SCOPE_LINK;
4609 else if (ifa_scope & IFA_SITE)
4610 return RT_SCOPE_SITE;
4611 else
4612 return RT_SCOPE_UNIVERSE;
4613 }
4614
4615 static inline int inet6_ifaddr_msgsize(void)
4616 {
4617 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4618 + nla_total_size(16) /* IFA_LOCAL */
4619 + nla_total_size(16) /* IFA_ADDRESS */
4620 + nla_total_size(sizeof(struct ifa_cacheinfo))
4621 + nla_total_size(4) /* IFA_FLAGS */;
4622 }
4623
4624 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4625 u32 portid, u32 seq, int event, unsigned int flags)
4626 {
4627 struct nlmsghdr *nlh;
4628 u32 preferred, valid;
4629
4630 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4631 if (!nlh)
4632 return -EMSGSIZE;
4633
4634 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4635 ifa->idev->dev->ifindex);
4636
4637 if (!((ifa->flags&IFA_F_PERMANENT) &&
4638 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4639 preferred = ifa->prefered_lft;
4640 valid = ifa->valid_lft;
4641 if (preferred != INFINITY_LIFE_TIME) {
4642 long tval = (jiffies - ifa->tstamp)/HZ;
4643 if (preferred > tval)
4644 preferred -= tval;
4645 else
4646 preferred = 0;
4647 if (valid != INFINITY_LIFE_TIME) {
4648 if (valid > tval)
4649 valid -= tval;
4650 else
4651 valid = 0;
4652 }
4653 }
4654 } else {
4655 preferred = INFINITY_LIFE_TIME;
4656 valid = INFINITY_LIFE_TIME;
4657 }
4658
4659 if (!ipv6_addr_any(&ifa->peer_addr)) {
4660 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4661 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4662 goto error;
4663 } else
4664 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4665 goto error;
4666
4667 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4668 goto error;
4669
4670 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4671 goto error;
4672
4673 nlmsg_end(skb, nlh);
4674 return 0;
4675
4676 error:
4677 nlmsg_cancel(skb, nlh);
4678 return -EMSGSIZE;
4679 }
4680
4681 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4682 u32 portid, u32 seq, int event, u16 flags)
4683 {
4684 struct nlmsghdr *nlh;
4685 u8 scope = RT_SCOPE_UNIVERSE;
4686 int ifindex = ifmca->idev->dev->ifindex;
4687
4688 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4689 scope = RT_SCOPE_SITE;
4690
4691 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4692 if (!nlh)
4693 return -EMSGSIZE;
4694
4695 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4696 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4697 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4698 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4699 nlmsg_cancel(skb, nlh);
4700 return -EMSGSIZE;
4701 }
4702
4703 nlmsg_end(skb, nlh);
4704 return 0;
4705 }
4706
4707 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4708 u32 portid, u32 seq, int event, unsigned int flags)
4709 {
4710 struct nlmsghdr *nlh;
4711 u8 scope = RT_SCOPE_UNIVERSE;
4712 int ifindex = ifaca->aca_idev->dev->ifindex;
4713
4714 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4715 scope = RT_SCOPE_SITE;
4716
4717 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4718 if (!nlh)
4719 return -EMSGSIZE;
4720
4721 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4722 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4723 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4724 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4725 nlmsg_cancel(skb, nlh);
4726 return -EMSGSIZE;
4727 }
4728
4729 nlmsg_end(skb, nlh);
4730 return 0;
4731 }
4732
4733 enum addr_type_t {
4734 UNICAST_ADDR,
4735 MULTICAST_ADDR,
4736 ANYCAST_ADDR,
4737 };
4738
4739 /* called with rcu_read_lock() */
4740 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4741 struct netlink_callback *cb, enum addr_type_t type,
4742 int s_ip_idx, int *p_ip_idx)
4743 {
4744 struct ifmcaddr6 *ifmca;
4745 struct ifacaddr6 *ifaca;
4746 int err = 1;
4747 int ip_idx = *p_ip_idx;
4748
4749 read_lock_bh(&idev->lock);
4750 switch (type) {
4751 case UNICAST_ADDR: {
4752 struct inet6_ifaddr *ifa;
4753
4754 /* unicast address incl. temp addr */
4755 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4756 if (++ip_idx < s_ip_idx)
4757 continue;
4758 err = inet6_fill_ifaddr(skb, ifa,
4759 NETLINK_CB(cb->skb).portid,
4760 cb->nlh->nlmsg_seq,
4761 RTM_NEWADDR,
4762 NLM_F_MULTI);
4763 if (err < 0)
4764 break;
4765 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4766 }
4767 break;
4768 }
4769 case MULTICAST_ADDR:
4770 /* multicast address */
4771 for (ifmca = idev->mc_list; ifmca;
4772 ifmca = ifmca->next, ip_idx++) {
4773 if (ip_idx < s_ip_idx)
4774 continue;
4775 err = inet6_fill_ifmcaddr(skb, ifmca,
4776 NETLINK_CB(cb->skb).portid,
4777 cb->nlh->nlmsg_seq,
4778 RTM_GETMULTICAST,
4779 NLM_F_MULTI);
4780 if (err < 0)
4781 break;
4782 }
4783 break;
4784 case ANYCAST_ADDR:
4785 /* anycast address */
4786 for (ifaca = idev->ac_list; ifaca;
4787 ifaca = ifaca->aca_next, ip_idx++) {
4788 if (ip_idx < s_ip_idx)
4789 continue;
4790 err = inet6_fill_ifacaddr(skb, ifaca,
4791 NETLINK_CB(cb->skb).portid,
4792 cb->nlh->nlmsg_seq,
4793 RTM_GETANYCAST,
4794 NLM_F_MULTI);
4795 if (err < 0)
4796 break;
4797 }
4798 break;
4799 default:
4800 break;
4801 }
4802 read_unlock_bh(&idev->lock);
4803 *p_ip_idx = ip_idx;
4804 return err;
4805 }
4806
4807 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
4808 enum addr_type_t type)
4809 {
4810 struct net *net = sock_net(skb->sk);
4811 int h, s_h;
4812 int idx, ip_idx;
4813 int s_idx, s_ip_idx;
4814 struct net_device *dev;
4815 struct inet6_dev *idev;
4816 struct hlist_head *head;
4817
4818 s_h = cb->args[0];
4819 s_idx = idx = cb->args[1];
4820 s_ip_idx = ip_idx = cb->args[2];
4821
4822 rcu_read_lock();
4823 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
4824 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4825 idx = 0;
4826 head = &net->dev_index_head[h];
4827 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4828 if (idx < s_idx)
4829 goto cont;
4830 if (h > s_h || idx > s_idx)
4831 s_ip_idx = 0;
4832 ip_idx = 0;
4833 idev = __in6_dev_get(dev);
4834 if (!idev)
4835 goto cont;
4836
4837 if (in6_dump_addrs(idev, skb, cb, type,
4838 s_ip_idx, &ip_idx) < 0)
4839 goto done;
4840 cont:
4841 idx++;
4842 }
4843 }
4844 done:
4845 rcu_read_unlock();
4846 cb->args[0] = h;
4847 cb->args[1] = idx;
4848 cb->args[2] = ip_idx;
4849
4850 return skb->len;
4851 }
4852
4853 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
4854 {
4855 enum addr_type_t type = UNICAST_ADDR;
4856
4857 return inet6_dump_addr(skb, cb, type);
4858 }
4859
4860 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
4861 {
4862 enum addr_type_t type = MULTICAST_ADDR;
4863
4864 return inet6_dump_addr(skb, cb, type);
4865 }
4866
4867
4868 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
4869 {
4870 enum addr_type_t type = ANYCAST_ADDR;
4871
4872 return inet6_dump_addr(skb, cb, type);
4873 }
4874
4875 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
4876 {
4877 struct net *net = sock_net(in_skb->sk);
4878 struct ifaddrmsg *ifm;
4879 struct nlattr *tb[IFA_MAX+1];
4880 struct in6_addr *addr = NULL, *peer;
4881 struct net_device *dev = NULL;
4882 struct inet6_ifaddr *ifa;
4883 struct sk_buff *skb;
4884 int err;
4885
4886 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4887 NULL);
4888 if (err < 0)
4889 goto errout;
4890
4891 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
4892 if (!addr) {
4893 err = -EINVAL;
4894 goto errout;
4895 }
4896
4897 ifm = nlmsg_data(nlh);
4898 if (ifm->ifa_index)
4899 dev = __dev_get_by_index(net, ifm->ifa_index);
4900
4901 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
4902 if (!ifa) {
4903 err = -EADDRNOTAVAIL;
4904 goto errout;
4905 }
4906
4907 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
4908 if (!skb) {
4909 err = -ENOBUFS;
4910 goto errout_ifa;
4911 }
4912
4913 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
4914 nlh->nlmsg_seq, RTM_NEWADDR, 0);
4915 if (err < 0) {
4916 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4917 WARN_ON(err == -EMSGSIZE);
4918 kfree_skb(skb);
4919 goto errout_ifa;
4920 }
4921 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4922 errout_ifa:
4923 in6_ifa_put(ifa);
4924 errout:
4925 return err;
4926 }
4927
4928 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
4929 {
4930 struct sk_buff *skb;
4931 struct net *net = dev_net(ifa->idev->dev);
4932 int err = -ENOBUFS;
4933
4934 /* Don't send DELADDR notification for TENTATIVE address,
4935 * since NEWADDR notification is sent only after removing
4936 * TENTATIVE flag.
4937 */
4938 if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
4939 return;
4940
4941 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
4942 if (!skb)
4943 goto errout;
4944
4945 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
4946 if (err < 0) {
4947 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4948 WARN_ON(err == -EMSGSIZE);
4949 kfree_skb(skb);
4950 goto errout;
4951 }
4952 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
4953 return;
4954 errout:
4955 if (err < 0)
4956 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
4957 }
4958
4959 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
4960 __s32 *array, int bytes)
4961 {
4962 BUG_ON(bytes < (DEVCONF_MAX * 4));
4963
4964 memset(array, 0, bytes);
4965 array[DEVCONF_FORWARDING] = cnf->forwarding;
4966 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
4967 array[DEVCONF_MTU6] = cnf->mtu6;
4968 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
4969 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
4970 array[DEVCONF_AUTOCONF] = cnf->autoconf;
4971 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
4972 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
4973 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
4974 jiffies_to_msecs(cnf->rtr_solicit_interval);
4975 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
4976 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
4977 array[DEVCONF_RTR_SOLICIT_DELAY] =
4978 jiffies_to_msecs(cnf->rtr_solicit_delay);
4979 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
4980 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
4981 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
4982 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
4983 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
4984 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
4985 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
4986 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
4987 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
4988 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
4989 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
4990 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
4991 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
4992 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
4993 #ifdef CONFIG_IPV6_ROUTER_PREF
4994 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
4995 array[DEVCONF_RTR_PROBE_INTERVAL] =
4996 jiffies_to_msecs(cnf->rtr_probe_interval);
4997 #ifdef CONFIG_IPV6_ROUTE_INFO
4998 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
4999 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5000 #endif
5001 #endif
5002 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5003 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5004 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5005 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5006 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5007 #endif
5008 #ifdef CONFIG_IPV6_MROUTE
5009 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
5010 #endif
5011 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5012 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5013 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5014 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5015 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5016 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5017 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5018 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5019 /* we omit DEVCONF_STABLE_SECRET for now */
5020 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5021 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5022 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5023 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5024 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5025 #ifdef CONFIG_IPV6_SEG6_HMAC
5026 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5027 #endif
5028 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5029 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5030 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5031 }
5032
5033 static inline size_t inet6_ifla6_size(void)
5034 {
5035 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5036 + nla_total_size(sizeof(struct ifla_cacheinfo))
5037 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5038 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5039 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5040 + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
5041 }
5042
5043 static inline size_t inet6_if_nlmsg_size(void)
5044 {
5045 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5046 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5047 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5048 + nla_total_size(4) /* IFLA_MTU */
5049 + nla_total_size(4) /* IFLA_LINK */
5050 + nla_total_size(1) /* IFLA_OPERSTATE */
5051 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5052 }
5053
5054 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5055 int bytes)
5056 {
5057 int i;
5058 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5059 BUG_ON(pad < 0);
5060
5061 /* Use put_unaligned() because stats may not be aligned for u64. */
5062 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5063 for (i = 1; i < ICMP6_MIB_MAX; i++)
5064 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5065
5066 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5067 }
5068
5069 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5070 int bytes, size_t syncpoff)
5071 {
5072 int i, c;
5073 u64 buff[IPSTATS_MIB_MAX];
5074 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5075
5076 BUG_ON(pad < 0);
5077
5078 memset(buff, 0, sizeof(buff));
5079 buff[0] = IPSTATS_MIB_MAX;
5080
5081 for_each_possible_cpu(c) {
5082 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5083 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5084 }
5085
5086 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5087 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5088 }
5089
5090 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5091 int bytes)
5092 {
5093 switch (attrtype) {
5094 case IFLA_INET6_STATS:
5095 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5096 offsetof(struct ipstats_mib, syncp));
5097 break;
5098 case IFLA_INET6_ICMP6STATS:
5099 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5100 break;
5101 }
5102 }
5103
5104 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5105 u32 ext_filter_mask)
5106 {
5107 struct nlattr *nla;
5108 struct ifla_cacheinfo ci;
5109
5110 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5111 goto nla_put_failure;
5112 ci.max_reasm_len = IPV6_MAXPLEN;
5113 ci.tstamp = cstamp_delta(idev->tstamp);
5114 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5115 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5116 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5117 goto nla_put_failure;
5118 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5119 if (!nla)
5120 goto nla_put_failure;
5121 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5122
5123 /* XXX - MC not implemented */
5124
5125 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5126 return 0;
5127
5128 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5129 if (!nla)
5130 goto nla_put_failure;
5131 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5132
5133 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5134 if (!nla)
5135 goto nla_put_failure;
5136 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5137
5138 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5139 if (!nla)
5140 goto nla_put_failure;
5141
5142 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5143 goto nla_put_failure;
5144
5145 read_lock_bh(&idev->lock);
5146 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5147 read_unlock_bh(&idev->lock);
5148
5149 return 0;
5150
5151 nla_put_failure:
5152 return -EMSGSIZE;
5153 }
5154
5155 static size_t inet6_get_link_af_size(const struct net_device *dev,
5156 u32 ext_filter_mask)
5157 {
5158 if (!__in6_dev_get(dev))
5159 return 0;
5160
5161 return inet6_ifla6_size();
5162 }
5163
5164 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5165 u32 ext_filter_mask)
5166 {
5167 struct inet6_dev *idev = __in6_dev_get(dev);
5168
5169 if (!idev)
5170 return -ENODATA;
5171
5172 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5173 return -EMSGSIZE;
5174
5175 return 0;
5176 }
5177
5178 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5179 {
5180 struct inet6_ifaddr *ifp;
5181 struct net_device *dev = idev->dev;
5182 bool clear_token, update_rs = false;
5183 struct in6_addr ll_addr;
5184
5185 ASSERT_RTNL();
5186
5187 if (!token)
5188 return -EINVAL;
5189 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5190 return -EINVAL;
5191 if (!ipv6_accept_ra(idev))
5192 return -EINVAL;
5193 if (idev->cnf.rtr_solicits == 0)
5194 return -EINVAL;
5195
5196 write_lock_bh(&idev->lock);
5197
5198 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5199 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5200
5201 write_unlock_bh(&idev->lock);
5202
5203 clear_token = ipv6_addr_any(token);
5204 if (clear_token)
5205 goto update_lft;
5206
5207 if (!idev->dead && (idev->if_flags & IF_READY) &&
5208 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5209 IFA_F_OPTIMISTIC)) {
5210 /* If we're not ready, then normal ifup will take care
5211 * of this. Otherwise, we need to request our rs here.
5212 */
5213 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5214 update_rs = true;
5215 }
5216
5217 update_lft:
5218 write_lock_bh(&idev->lock);
5219
5220 if (update_rs) {
5221 idev->if_flags |= IF_RS_SENT;
5222 idev->rs_interval = rfc3315_s14_backoff_init(
5223 idev->cnf.rtr_solicit_interval);
5224 idev->rs_probes = 1;
5225 addrconf_mod_rs_timer(idev, idev->rs_interval);
5226 }
5227
5228 /* Well, that's kinda nasty ... */
5229 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5230 spin_lock(&ifp->lock);
5231 if (ifp->tokenized) {
5232 ifp->valid_lft = 0;
5233 ifp->prefered_lft = 0;
5234 }
5235 spin_unlock(&ifp->lock);
5236 }
5237
5238 write_unlock_bh(&idev->lock);
5239 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5240 addrconf_verify_rtnl();
5241 return 0;
5242 }
5243
5244 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5245 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5246 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5247 };
5248
5249 static int inet6_validate_link_af(const struct net_device *dev,
5250 const struct nlattr *nla)
5251 {
5252 struct nlattr *tb[IFLA_INET6_MAX + 1];
5253
5254 if (dev && !__in6_dev_get(dev))
5255 return -EAFNOSUPPORT;
5256
5257 return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy,
5258 NULL);
5259 }
5260
5261 static int check_addr_gen_mode(int mode)
5262 {
5263 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5264 mode != IN6_ADDR_GEN_MODE_NONE &&
5265 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5266 mode != IN6_ADDR_GEN_MODE_RANDOM)
5267 return -EINVAL;
5268 return 1;
5269 }
5270
5271 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5272 int mode)
5273 {
5274 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5275 !idev->cnf.stable_secret.initialized &&
5276 !net->ipv6.devconf_dflt->stable_secret.initialized)
5277 return -EINVAL;
5278 return 1;
5279 }
5280
5281 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5282 {
5283 int err = -EINVAL;
5284 struct inet6_dev *idev = __in6_dev_get(dev);
5285 struct nlattr *tb[IFLA_INET6_MAX + 1];
5286
5287 if (!idev)
5288 return -EAFNOSUPPORT;
5289
5290 if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5291 BUG();
5292
5293 if (tb[IFLA_INET6_TOKEN]) {
5294 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5295 if (err)
5296 return err;
5297 }
5298
5299 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5300 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5301
5302 if (check_addr_gen_mode(mode) < 0 ||
5303 check_stable_privacy(idev, dev_net(dev), mode) < 0)
5304 return -EINVAL;
5305
5306 idev->cnf.addr_gen_mode = mode;
5307 err = 0;
5308 }
5309
5310 return err;
5311 }
5312
5313 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5314 u32 portid, u32 seq, int event, unsigned int flags)
5315 {
5316 struct net_device *dev = idev->dev;
5317 struct ifinfomsg *hdr;
5318 struct nlmsghdr *nlh;
5319 void *protoinfo;
5320
5321 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5322 if (!nlh)
5323 return -EMSGSIZE;
5324
5325 hdr = nlmsg_data(nlh);
5326 hdr->ifi_family = AF_INET6;
5327 hdr->__ifi_pad = 0;
5328 hdr->ifi_type = dev->type;
5329 hdr->ifi_index = dev->ifindex;
5330 hdr->ifi_flags = dev_get_flags(dev);
5331 hdr->ifi_change = 0;
5332
5333 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5334 (dev->addr_len &&
5335 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5336 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5337 (dev->ifindex != dev_get_iflink(dev) &&
5338 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5339 nla_put_u8(skb, IFLA_OPERSTATE,
5340 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5341 goto nla_put_failure;
5342 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
5343 if (!protoinfo)
5344 goto nla_put_failure;
5345
5346 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5347 goto nla_put_failure;
5348
5349 nla_nest_end(skb, protoinfo);
5350 nlmsg_end(skb, nlh);
5351 return 0;
5352
5353 nla_put_failure:
5354 nlmsg_cancel(skb, nlh);
5355 return -EMSGSIZE;
5356 }
5357
5358 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5359 {
5360 struct net *net = sock_net(skb->sk);
5361 int h, s_h;
5362 int idx = 0, s_idx;
5363 struct net_device *dev;
5364 struct inet6_dev *idev;
5365 struct hlist_head *head;
5366
5367 s_h = cb->args[0];
5368 s_idx = cb->args[1];
5369
5370 rcu_read_lock();
5371 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5372 idx = 0;
5373 head = &net->dev_index_head[h];
5374 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5375 if (idx < s_idx)
5376 goto cont;
5377 idev = __in6_dev_get(dev);
5378 if (!idev)
5379 goto cont;
5380 if (inet6_fill_ifinfo(skb, idev,
5381 NETLINK_CB(cb->skb).portid,
5382 cb->nlh->nlmsg_seq,
5383 RTM_NEWLINK, NLM_F_MULTI) < 0)
5384 goto out;
5385 cont:
5386 idx++;
5387 }
5388 }
5389 out:
5390 rcu_read_unlock();
5391 cb->args[1] = idx;
5392 cb->args[0] = h;
5393
5394 return skb->len;
5395 }
5396
5397 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5398 {
5399 struct sk_buff *skb;
5400 struct net *net = dev_net(idev->dev);
5401 int err = -ENOBUFS;
5402
5403 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5404 if (!skb)
5405 goto errout;
5406
5407 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5408 if (err < 0) {
5409 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5410 WARN_ON(err == -EMSGSIZE);
5411 kfree_skb(skb);
5412 goto errout;
5413 }
5414 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5415 return;
5416 errout:
5417 if (err < 0)
5418 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5419 }
5420
5421 static inline size_t inet6_prefix_nlmsg_size(void)
5422 {
5423 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5424 + nla_total_size(sizeof(struct in6_addr))
5425 + nla_total_size(sizeof(struct prefix_cacheinfo));
5426 }
5427
5428 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5429 struct prefix_info *pinfo, u32 portid, u32 seq,
5430 int event, unsigned int flags)
5431 {
5432 struct prefixmsg *pmsg;
5433 struct nlmsghdr *nlh;
5434 struct prefix_cacheinfo ci;
5435
5436 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5437 if (!nlh)
5438 return -EMSGSIZE;
5439
5440 pmsg = nlmsg_data(nlh);
5441 pmsg->prefix_family = AF_INET6;
5442 pmsg->prefix_pad1 = 0;
5443 pmsg->prefix_pad2 = 0;
5444 pmsg->prefix_ifindex = idev->dev->ifindex;
5445 pmsg->prefix_len = pinfo->prefix_len;
5446 pmsg->prefix_type = pinfo->type;
5447 pmsg->prefix_pad3 = 0;
5448 pmsg->prefix_flags = 0;
5449 if (pinfo->onlink)
5450 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5451 if (pinfo->autoconf)
5452 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5453
5454 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5455 goto nla_put_failure;
5456 ci.preferred_time = ntohl(pinfo->prefered);
5457 ci.valid_time = ntohl(pinfo->valid);
5458 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5459 goto nla_put_failure;
5460 nlmsg_end(skb, nlh);
5461 return 0;
5462
5463 nla_put_failure:
5464 nlmsg_cancel(skb, nlh);
5465 return -EMSGSIZE;
5466 }
5467
5468 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5469 struct prefix_info *pinfo)
5470 {
5471 struct sk_buff *skb;
5472 struct net *net = dev_net(idev->dev);
5473 int err = -ENOBUFS;
5474
5475 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5476 if (!skb)
5477 goto errout;
5478
5479 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5480 if (err < 0) {
5481 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5482 WARN_ON(err == -EMSGSIZE);
5483 kfree_skb(skb);
5484 goto errout;
5485 }
5486 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5487 return;
5488 errout:
5489 if (err < 0)
5490 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5491 }
5492
5493 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5494 {
5495 struct net *net = dev_net(ifp->idev->dev);
5496
5497 if (event)
5498 ASSERT_RTNL();
5499
5500 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5501
5502 switch (event) {
5503 case RTM_NEWADDR:
5504 /*
5505 * If the address was optimistic
5506 * we inserted the route at the start of
5507 * our DAD process, so we don't need
5508 * to do it again
5509 */
5510 if (!(ifp->rt->rt6i_node))
5511 ip6_ins_rt(ifp->rt);
5512 if (ifp->idev->cnf.forwarding)
5513 addrconf_join_anycast(ifp);
5514 if (!ipv6_addr_any(&ifp->peer_addr))
5515 addrconf_prefix_route(&ifp->peer_addr, 128,
5516 ifp->idev->dev, 0, 0);
5517 break;
5518 case RTM_DELADDR:
5519 if (ifp->idev->cnf.forwarding)
5520 addrconf_leave_anycast(ifp);
5521 addrconf_leave_solict(ifp->idev, &ifp->addr);
5522 if (!ipv6_addr_any(&ifp->peer_addr)) {
5523 struct rt6_info *rt;
5524
5525 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5526 ifp->idev->dev, 0, 0);
5527 if (rt)
5528 ip6_del_rt(rt);
5529 }
5530 if (ifp->rt) {
5531 dst_hold(&ifp->rt->dst);
5532 ip6_del_rt(ifp->rt);
5533 }
5534 rt_genid_bump_ipv6(net);
5535 break;
5536 }
5537 atomic_inc(&net->ipv6.dev_addr_genid);
5538 }
5539
5540 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5541 {
5542 rcu_read_lock_bh();
5543 if (likely(ifp->idev->dead == 0))
5544 __ipv6_ifa_notify(event, ifp);
5545 rcu_read_unlock_bh();
5546 }
5547
5548 #ifdef CONFIG_SYSCTL
5549
5550 static
5551 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
5552 void __user *buffer, size_t *lenp, loff_t *ppos)
5553 {
5554 int *valp = ctl->data;
5555 int val = *valp;
5556 loff_t pos = *ppos;
5557 struct ctl_table lctl;
5558 int ret;
5559
5560 /*
5561 * ctl->data points to idev->cnf.forwarding, we should
5562 * not modify it until we get the rtnl lock.
5563 */
5564 lctl = *ctl;
5565 lctl.data = &val;
5566
5567 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5568
5569 if (write)
5570 ret = addrconf_fixup_forwarding(ctl, valp, val);
5571 if (ret)
5572 *ppos = pos;
5573 return ret;
5574 }
5575
5576 static
5577 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
5578 void __user *buffer, size_t *lenp, loff_t *ppos)
5579 {
5580 struct inet6_dev *idev = ctl->extra1;
5581 int min_mtu = IPV6_MIN_MTU;
5582 struct ctl_table lctl;
5583
5584 lctl = *ctl;
5585 lctl.extra1 = &min_mtu;
5586 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
5587
5588 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5589 }
5590
5591 static void dev_disable_change(struct inet6_dev *idev)
5592 {
5593 struct netdev_notifier_info info;
5594
5595 if (!idev || !idev->dev)
5596 return;
5597
5598 netdev_notifier_info_init(&info, idev->dev);
5599 if (idev->cnf.disable_ipv6)
5600 addrconf_notify(NULL, NETDEV_DOWN, &info);
5601 else
5602 addrconf_notify(NULL, NETDEV_UP, &info);
5603 }
5604
5605 static void addrconf_disable_change(struct net *net, __s32 newf)
5606 {
5607 struct net_device *dev;
5608 struct inet6_dev *idev;
5609
5610 for_each_netdev(net, dev) {
5611 idev = __in6_dev_get(dev);
5612 if (idev) {
5613 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
5614 idev->cnf.disable_ipv6 = newf;
5615 if (changed)
5616 dev_disable_change(idev);
5617 }
5618 }
5619 }
5620
5621 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
5622 {
5623 struct net *net;
5624 int old;
5625
5626 if (!rtnl_trylock())
5627 return restart_syscall();
5628
5629 net = (struct net *)table->extra2;
5630 old = *p;
5631 *p = newf;
5632
5633 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
5634 rtnl_unlock();
5635 return 0;
5636 }
5637
5638 if (p == &net->ipv6.devconf_all->disable_ipv6) {
5639 net->ipv6.devconf_dflt->disable_ipv6 = newf;
5640 addrconf_disable_change(net, newf);
5641 } else if ((!newf) ^ (!old))
5642 dev_disable_change((struct inet6_dev *)table->extra1);
5643
5644 rtnl_unlock();
5645 return 0;
5646 }
5647
5648 static
5649 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
5650 void __user *buffer, size_t *lenp, loff_t *ppos)
5651 {
5652 int *valp = ctl->data;
5653 int val = *valp;
5654 loff_t pos = *ppos;
5655 struct ctl_table lctl;
5656 int ret;
5657
5658 /*
5659 * ctl->data points to idev->cnf.disable_ipv6, we should
5660 * not modify it until we get the rtnl lock.
5661 */
5662 lctl = *ctl;
5663 lctl.data = &val;
5664
5665 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5666
5667 if (write)
5668 ret = addrconf_disable_ipv6(ctl, valp, val);
5669 if (ret)
5670 *ppos = pos;
5671 return ret;
5672 }
5673
5674 static
5675 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
5676 void __user *buffer, size_t *lenp, loff_t *ppos)
5677 {
5678 int *valp = ctl->data;
5679 int ret;
5680 int old, new;
5681
5682 old = *valp;
5683 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5684 new = *valp;
5685
5686 if (write && old != new) {
5687 struct net *net = ctl->extra2;
5688
5689 if (!rtnl_trylock())
5690 return restart_syscall();
5691
5692 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
5693 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5694 NETCONFA_PROXY_NEIGH,
5695 NETCONFA_IFINDEX_DEFAULT,
5696 net->ipv6.devconf_dflt);
5697 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
5698 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5699 NETCONFA_PROXY_NEIGH,
5700 NETCONFA_IFINDEX_ALL,
5701 net->ipv6.devconf_all);
5702 else {
5703 struct inet6_dev *idev = ctl->extra1;
5704
5705 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5706 NETCONFA_PROXY_NEIGH,
5707 idev->dev->ifindex,
5708 &idev->cnf);
5709 }
5710 rtnl_unlock();
5711 }
5712
5713 return ret;
5714 }
5715
5716 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5717 void __user *buffer, size_t *lenp,
5718 loff_t *ppos)
5719 {
5720 int ret = 0;
5721 int new_val;
5722 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
5723 struct net *net = (struct net *)ctl->extra2;
5724
5725 if (!rtnl_trylock())
5726 return restart_syscall();
5727
5728 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5729
5730 if (write) {
5731 new_val = *((int *)ctl->data);
5732
5733 if (check_addr_gen_mode(new_val) < 0) {
5734 ret = -EINVAL;
5735 goto out;
5736 }
5737
5738 /* request for default */
5739 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
5740 ipv6_devconf_dflt.addr_gen_mode = new_val;
5741
5742 /* request for individual net device */
5743 } else {
5744 if (!idev)
5745 goto out;
5746
5747 if (check_stable_privacy(idev, net, new_val) < 0) {
5748 ret = -EINVAL;
5749 goto out;
5750 }
5751
5752 if (idev->cnf.addr_gen_mode != new_val) {
5753 idev->cnf.addr_gen_mode = new_val;
5754 addrconf_dev_config(idev->dev);
5755 }
5756 }
5757 }
5758
5759 out:
5760 rtnl_unlock();
5761
5762 return ret;
5763 }
5764
5765 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
5766 void __user *buffer, size_t *lenp,
5767 loff_t *ppos)
5768 {
5769 int err;
5770 struct in6_addr addr;
5771 char str[IPV6_MAX_STRLEN];
5772 struct ctl_table lctl = *ctl;
5773 struct net *net = ctl->extra2;
5774 struct ipv6_stable_secret *secret = ctl->data;
5775
5776 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
5777 return -EIO;
5778
5779 lctl.maxlen = IPV6_MAX_STRLEN;
5780 lctl.data = str;
5781
5782 if (!rtnl_trylock())
5783 return restart_syscall();
5784
5785 if (!write && !secret->initialized) {
5786 err = -EIO;
5787 goto out;
5788 }
5789
5790 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
5791 if (err >= sizeof(str)) {
5792 err = -EIO;
5793 goto out;
5794 }
5795
5796 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
5797 if (err || !write)
5798 goto out;
5799
5800 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
5801 err = -EIO;
5802 goto out;
5803 }
5804
5805 secret->initialized = true;
5806 secret->secret = addr;
5807
5808 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
5809 struct net_device *dev;
5810
5811 for_each_netdev(net, dev) {
5812 struct inet6_dev *idev = __in6_dev_get(dev);
5813
5814 if (idev) {
5815 idev->cnf.addr_gen_mode =
5816 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5817 }
5818 }
5819 } else {
5820 struct inet6_dev *idev = ctl->extra1;
5821
5822 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5823 }
5824
5825 out:
5826 rtnl_unlock();
5827
5828 return err;
5829 }
5830
5831 static
5832 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
5833 int write,
5834 void __user *buffer,
5835 size_t *lenp,
5836 loff_t *ppos)
5837 {
5838 int *valp = ctl->data;
5839 int val = *valp;
5840 loff_t pos = *ppos;
5841 struct ctl_table lctl;
5842 int ret;
5843
5844 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
5845 * we should not modify it until we get the rtnl lock.
5846 */
5847 lctl = *ctl;
5848 lctl.data = &val;
5849
5850 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5851
5852 if (write)
5853 ret = addrconf_fixup_linkdown(ctl, valp, val);
5854 if (ret)
5855 *ppos = pos;
5856 return ret;
5857 }
5858
5859 static
5860 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
5861 {
5862 if (rt) {
5863 if (action)
5864 rt->dst.flags |= DST_NOPOLICY;
5865 else
5866 rt->dst.flags &= ~DST_NOPOLICY;
5867 }
5868 }
5869
5870 static
5871 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
5872 {
5873 struct inet6_ifaddr *ifa;
5874
5875 read_lock_bh(&idev->lock);
5876 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5877 spin_lock(&ifa->lock);
5878 if (ifa->rt) {
5879 struct rt6_info *rt = ifa->rt;
5880 struct fib6_table *table = rt->rt6i_table;
5881 int cpu;
5882
5883 read_lock(&table->tb6_lock);
5884 addrconf_set_nopolicy(ifa->rt, val);
5885 if (rt->rt6i_pcpu) {
5886 for_each_possible_cpu(cpu) {
5887 struct rt6_info **rtp;
5888
5889 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
5890 addrconf_set_nopolicy(*rtp, val);
5891 }
5892 }
5893 read_unlock(&table->tb6_lock);
5894 }
5895 spin_unlock(&ifa->lock);
5896 }
5897 read_unlock_bh(&idev->lock);
5898 }
5899
5900 static
5901 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
5902 {
5903 struct inet6_dev *idev;
5904 struct net *net;
5905
5906 if (!rtnl_trylock())
5907 return restart_syscall();
5908
5909 *valp = val;
5910
5911 net = (struct net *)ctl->extra2;
5912 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
5913 rtnl_unlock();
5914 return 0;
5915 }
5916
5917 if (valp == &net->ipv6.devconf_all->disable_policy) {
5918 struct net_device *dev;
5919
5920 for_each_netdev(net, dev) {
5921 idev = __in6_dev_get(dev);
5922 if (idev)
5923 addrconf_disable_policy_idev(idev, val);
5924 }
5925 } else {
5926 idev = (struct inet6_dev *)ctl->extra1;
5927 addrconf_disable_policy_idev(idev, val);
5928 }
5929
5930 rtnl_unlock();
5931 return 0;
5932 }
5933
5934 static
5935 int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
5936 void __user *buffer, size_t *lenp,
5937 loff_t *ppos)
5938 {
5939 int *valp = ctl->data;
5940 int val = *valp;
5941 loff_t pos = *ppos;
5942 struct ctl_table lctl;
5943 int ret;
5944
5945 lctl = *ctl;
5946 lctl.data = &val;
5947 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5948
5949 if (write && (*valp != val))
5950 ret = addrconf_disable_policy(ctl, valp, val);
5951
5952 if (ret)
5953 *ppos = pos;
5954
5955 return ret;
5956 }
5957
5958 static int minus_one = -1;
5959 static const int one = 1;
5960 static const int two_five_five = 255;
5961
5962 static const struct ctl_table addrconf_sysctl[] = {
5963 {
5964 .procname = "forwarding",
5965 .data = &ipv6_devconf.forwarding,
5966 .maxlen = sizeof(int),
5967 .mode = 0644,
5968 .proc_handler = addrconf_sysctl_forward,
5969 },
5970 {
5971 .procname = "hop_limit",
5972 .data = &ipv6_devconf.hop_limit,
5973 .maxlen = sizeof(int),
5974 .mode = 0644,
5975 .proc_handler = proc_dointvec_minmax,
5976 .extra1 = (void *)&one,
5977 .extra2 = (void *)&two_five_five,
5978 },
5979 {
5980 .procname = "mtu",
5981 .data = &ipv6_devconf.mtu6,
5982 .maxlen = sizeof(int),
5983 .mode = 0644,
5984 .proc_handler = addrconf_sysctl_mtu,
5985 },
5986 {
5987 .procname = "accept_ra",
5988 .data = &ipv6_devconf.accept_ra,
5989 .maxlen = sizeof(int),
5990 .mode = 0644,
5991 .proc_handler = proc_dointvec,
5992 },
5993 {
5994 .procname = "accept_redirects",
5995 .data = &ipv6_devconf.accept_redirects,
5996 .maxlen = sizeof(int),
5997 .mode = 0644,
5998 .proc_handler = proc_dointvec,
5999 },
6000 {
6001 .procname = "autoconf",
6002 .data = &ipv6_devconf.autoconf,
6003 .maxlen = sizeof(int),
6004 .mode = 0644,
6005 .proc_handler = proc_dointvec,
6006 },
6007 {
6008 .procname = "dad_transmits",
6009 .data = &ipv6_devconf.dad_transmits,
6010 .maxlen = sizeof(int),
6011 .mode = 0644,
6012 .proc_handler = proc_dointvec,
6013 },
6014 {
6015 .procname = "router_solicitations",
6016 .data = &ipv6_devconf.rtr_solicits,
6017 .maxlen = sizeof(int),
6018 .mode = 0644,
6019 .proc_handler = proc_dointvec_minmax,
6020 .extra1 = &minus_one,
6021 },
6022 {
6023 .procname = "router_solicitation_interval",
6024 .data = &ipv6_devconf.rtr_solicit_interval,
6025 .maxlen = sizeof(int),
6026 .mode = 0644,
6027 .proc_handler = proc_dointvec_jiffies,
6028 },
6029 {
6030 .procname = "router_solicitation_max_interval",
6031 .data = &ipv6_devconf.rtr_solicit_max_interval,
6032 .maxlen = sizeof(int),
6033 .mode = 0644,
6034 .proc_handler = proc_dointvec_jiffies,
6035 },
6036 {
6037 .procname = "router_solicitation_delay",
6038 .data = &ipv6_devconf.rtr_solicit_delay,
6039 .maxlen = sizeof(int),
6040 .mode = 0644,
6041 .proc_handler = proc_dointvec_jiffies,
6042 },
6043 {
6044 .procname = "force_mld_version",
6045 .data = &ipv6_devconf.force_mld_version,
6046 .maxlen = sizeof(int),
6047 .mode = 0644,
6048 .proc_handler = proc_dointvec,
6049 },
6050 {
6051 .procname = "mldv1_unsolicited_report_interval",
6052 .data =
6053 &ipv6_devconf.mldv1_unsolicited_report_interval,
6054 .maxlen = sizeof(int),
6055 .mode = 0644,
6056 .proc_handler = proc_dointvec_ms_jiffies,
6057 },
6058 {
6059 .procname = "mldv2_unsolicited_report_interval",
6060 .data =
6061 &ipv6_devconf.mldv2_unsolicited_report_interval,
6062 .maxlen = sizeof(int),
6063 .mode = 0644,
6064 .proc_handler = proc_dointvec_ms_jiffies,
6065 },
6066 {
6067 .procname = "use_tempaddr",
6068 .data = &ipv6_devconf.use_tempaddr,
6069 .maxlen = sizeof(int),
6070 .mode = 0644,
6071 .proc_handler = proc_dointvec,
6072 },
6073 {
6074 .procname = "temp_valid_lft",
6075 .data = &ipv6_devconf.temp_valid_lft,
6076 .maxlen = sizeof(int),
6077 .mode = 0644,
6078 .proc_handler = proc_dointvec,
6079 },
6080 {
6081 .procname = "temp_prefered_lft",
6082 .data = &ipv6_devconf.temp_prefered_lft,
6083 .maxlen = sizeof(int),
6084 .mode = 0644,
6085 .proc_handler = proc_dointvec,
6086 },
6087 {
6088 .procname = "regen_max_retry",
6089 .data = &ipv6_devconf.regen_max_retry,
6090 .maxlen = sizeof(int),
6091 .mode = 0644,
6092 .proc_handler = proc_dointvec,
6093 },
6094 {
6095 .procname = "max_desync_factor",
6096 .data = &ipv6_devconf.max_desync_factor,
6097 .maxlen = sizeof(int),
6098 .mode = 0644,
6099 .proc_handler = proc_dointvec,
6100 },
6101 {
6102 .procname = "max_addresses",
6103 .data = &ipv6_devconf.max_addresses,
6104 .maxlen = sizeof(int),
6105 .mode = 0644,
6106 .proc_handler = proc_dointvec,
6107 },
6108 {
6109 .procname = "accept_ra_defrtr",
6110 .data = &ipv6_devconf.accept_ra_defrtr,
6111 .maxlen = sizeof(int),
6112 .mode = 0644,
6113 .proc_handler = proc_dointvec,
6114 },
6115 {
6116 .procname = "accept_ra_min_hop_limit",
6117 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6118 .maxlen = sizeof(int),
6119 .mode = 0644,
6120 .proc_handler = proc_dointvec,
6121 },
6122 {
6123 .procname = "accept_ra_pinfo",
6124 .data = &ipv6_devconf.accept_ra_pinfo,
6125 .maxlen = sizeof(int),
6126 .mode = 0644,
6127 .proc_handler = proc_dointvec,
6128 },
6129 #ifdef CONFIG_IPV6_ROUTER_PREF
6130 {
6131 .procname = "accept_ra_rtr_pref",
6132 .data = &ipv6_devconf.accept_ra_rtr_pref,
6133 .maxlen = sizeof(int),
6134 .mode = 0644,
6135 .proc_handler = proc_dointvec,
6136 },
6137 {
6138 .procname = "router_probe_interval",
6139 .data = &ipv6_devconf.rtr_probe_interval,
6140 .maxlen = sizeof(int),
6141 .mode = 0644,
6142 .proc_handler = proc_dointvec_jiffies,
6143 },
6144 #ifdef CONFIG_IPV6_ROUTE_INFO
6145 {
6146 .procname = "accept_ra_rt_info_min_plen",
6147 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6148 .maxlen = sizeof(int),
6149 .mode = 0644,
6150 .proc_handler = proc_dointvec,
6151 },
6152 {
6153 .procname = "accept_ra_rt_info_max_plen",
6154 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6155 .maxlen = sizeof(int),
6156 .mode = 0644,
6157 .proc_handler = proc_dointvec,
6158 },
6159 #endif
6160 #endif
6161 {
6162 .procname = "proxy_ndp",
6163 .data = &ipv6_devconf.proxy_ndp,
6164 .maxlen = sizeof(int),
6165 .mode = 0644,
6166 .proc_handler = addrconf_sysctl_proxy_ndp,
6167 },
6168 {
6169 .procname = "accept_source_route",
6170 .data = &ipv6_devconf.accept_source_route,
6171 .maxlen = sizeof(int),
6172 .mode = 0644,
6173 .proc_handler = proc_dointvec,
6174 },
6175 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6176 {
6177 .procname = "optimistic_dad",
6178 .data = &ipv6_devconf.optimistic_dad,
6179 .maxlen = sizeof(int),
6180 .mode = 0644,
6181 .proc_handler = proc_dointvec,
6182 },
6183 {
6184 .procname = "use_optimistic",
6185 .data = &ipv6_devconf.use_optimistic,
6186 .maxlen = sizeof(int),
6187 .mode = 0644,
6188 .proc_handler = proc_dointvec,
6189 },
6190 #endif
6191 #ifdef CONFIG_IPV6_MROUTE
6192 {
6193 .procname = "mc_forwarding",
6194 .data = &ipv6_devconf.mc_forwarding,
6195 .maxlen = sizeof(int),
6196 .mode = 0444,
6197 .proc_handler = proc_dointvec,
6198 },
6199 #endif
6200 {
6201 .procname = "disable_ipv6",
6202 .data = &ipv6_devconf.disable_ipv6,
6203 .maxlen = sizeof(int),
6204 .mode = 0644,
6205 .proc_handler = addrconf_sysctl_disable,
6206 },
6207 {
6208 .procname = "accept_dad",
6209 .data = &ipv6_devconf.accept_dad,
6210 .maxlen = sizeof(int),
6211 .mode = 0644,
6212 .proc_handler = proc_dointvec,
6213 },
6214 {
6215 .procname = "force_tllao",
6216 .data = &ipv6_devconf.force_tllao,
6217 .maxlen = sizeof(int),
6218 .mode = 0644,
6219 .proc_handler = proc_dointvec
6220 },
6221 {
6222 .procname = "ndisc_notify",
6223 .data = &ipv6_devconf.ndisc_notify,
6224 .maxlen = sizeof(int),
6225 .mode = 0644,
6226 .proc_handler = proc_dointvec
6227 },
6228 {
6229 .procname = "suppress_frag_ndisc",
6230 .data = &ipv6_devconf.suppress_frag_ndisc,
6231 .maxlen = sizeof(int),
6232 .mode = 0644,
6233 .proc_handler = proc_dointvec
6234 },
6235 {
6236 .procname = "accept_ra_from_local",
6237 .data = &ipv6_devconf.accept_ra_from_local,
6238 .maxlen = sizeof(int),
6239 .mode = 0644,
6240 .proc_handler = proc_dointvec,
6241 },
6242 {
6243 .procname = "accept_ra_mtu",
6244 .data = &ipv6_devconf.accept_ra_mtu,
6245 .maxlen = sizeof(int),
6246 .mode = 0644,
6247 .proc_handler = proc_dointvec,
6248 },
6249 {
6250 .procname = "stable_secret",
6251 .data = &ipv6_devconf.stable_secret,
6252 .maxlen = IPV6_MAX_STRLEN,
6253 .mode = 0600,
6254 .proc_handler = addrconf_sysctl_stable_secret,
6255 },
6256 {
6257 .procname = "use_oif_addrs_only",
6258 .data = &ipv6_devconf.use_oif_addrs_only,
6259 .maxlen = sizeof(int),
6260 .mode = 0644,
6261 .proc_handler = proc_dointvec,
6262 },
6263 {
6264 .procname = "ignore_routes_with_linkdown",
6265 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6266 .maxlen = sizeof(int),
6267 .mode = 0644,
6268 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6269 },
6270 {
6271 .procname = "drop_unicast_in_l2_multicast",
6272 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6273 .maxlen = sizeof(int),
6274 .mode = 0644,
6275 .proc_handler = proc_dointvec,
6276 },
6277 {
6278 .procname = "drop_unsolicited_na",
6279 .data = &ipv6_devconf.drop_unsolicited_na,
6280 .maxlen = sizeof(int),
6281 .mode = 0644,
6282 .proc_handler = proc_dointvec,
6283 },
6284 {
6285 .procname = "keep_addr_on_down",
6286 .data = &ipv6_devconf.keep_addr_on_down,
6287 .maxlen = sizeof(int),
6288 .mode = 0644,
6289 .proc_handler = proc_dointvec,
6290
6291 },
6292 {
6293 .procname = "seg6_enabled",
6294 .data = &ipv6_devconf.seg6_enabled,
6295 .maxlen = sizeof(int),
6296 .mode = 0644,
6297 .proc_handler = proc_dointvec,
6298 },
6299 #ifdef CONFIG_IPV6_SEG6_HMAC
6300 {
6301 .procname = "seg6_require_hmac",
6302 .data = &ipv6_devconf.seg6_require_hmac,
6303 .maxlen = sizeof(int),
6304 .mode = 0644,
6305 .proc_handler = proc_dointvec,
6306 },
6307 #endif
6308 {
6309 .procname = "enhanced_dad",
6310 .data = &ipv6_devconf.enhanced_dad,
6311 .maxlen = sizeof(int),
6312 .mode = 0644,
6313 .proc_handler = proc_dointvec,
6314 },
6315 {
6316 .procname = "addr_gen_mode",
6317 .data = &ipv6_devconf.addr_gen_mode,
6318 .maxlen = sizeof(int),
6319 .mode = 0644,
6320 .proc_handler = addrconf_sysctl_addr_gen_mode,
6321 },
6322 {
6323 .procname = "disable_policy",
6324 .data = &ipv6_devconf.disable_policy,
6325 .maxlen = sizeof(int),
6326 .mode = 0644,
6327 .proc_handler = addrconf_sysctl_disable_policy,
6328 },
6329 {
6330 /* sentinel */
6331 }
6332 };
6333
6334 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6335 struct inet6_dev *idev, struct ipv6_devconf *p)
6336 {
6337 int i, ifindex;
6338 struct ctl_table *table;
6339 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6340
6341 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6342 if (!table)
6343 goto out;
6344
6345 for (i = 0; table[i].data; i++) {
6346 table[i].data += (char *)p - (char *)&ipv6_devconf;
6347 /* If one of these is already set, then it is not safe to
6348 * overwrite either of them: this makes proc_dointvec_minmax
6349 * usable.
6350 */
6351 if (!table[i].extra1 && !table[i].extra2) {
6352 table[i].extra1 = idev; /* embedded; no ref */
6353 table[i].extra2 = net;
6354 }
6355 }
6356
6357 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6358
6359 p->sysctl_header = register_net_sysctl(net, path, table);
6360 if (!p->sysctl_header)
6361 goto free;
6362
6363 if (!strcmp(dev_name, "all"))
6364 ifindex = NETCONFA_IFINDEX_ALL;
6365 else if (!strcmp(dev_name, "default"))
6366 ifindex = NETCONFA_IFINDEX_DEFAULT;
6367 else
6368 ifindex = idev->dev->ifindex;
6369 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6370 ifindex, p);
6371 return 0;
6372
6373 free:
6374 kfree(table);
6375 out:
6376 return -ENOBUFS;
6377 }
6378
6379 static void __addrconf_sysctl_unregister(struct net *net,
6380 struct ipv6_devconf *p, int ifindex)
6381 {
6382 struct ctl_table *table;
6383
6384 if (!p->sysctl_header)
6385 return;
6386
6387 table = p->sysctl_header->ctl_table_arg;
6388 unregister_net_sysctl_table(p->sysctl_header);
6389 p->sysctl_header = NULL;
6390 kfree(table);
6391
6392 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
6393 }
6394
6395 static int addrconf_sysctl_register(struct inet6_dev *idev)
6396 {
6397 int err;
6398
6399 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6400 return -EINVAL;
6401
6402 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6403 &ndisc_ifinfo_sysctl_change);
6404 if (err)
6405 return err;
6406 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6407 idev, &idev->cnf);
6408 if (err)
6409 neigh_sysctl_unregister(idev->nd_parms);
6410
6411 return err;
6412 }
6413
6414 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6415 {
6416 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
6417 idev->dev->ifindex);
6418 neigh_sysctl_unregister(idev->nd_parms);
6419 }
6420
6421
6422 #endif
6423
6424 static int __net_init addrconf_init_net(struct net *net)
6425 {
6426 int err = -ENOMEM;
6427 struct ipv6_devconf *all, *dflt;
6428
6429 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6430 if (!all)
6431 goto err_alloc_all;
6432
6433 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6434 if (!dflt)
6435 goto err_alloc_dflt;
6436
6437 /* these will be inherited by all namespaces */
6438 dflt->autoconf = ipv6_defaults.autoconf;
6439 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6440
6441 dflt->stable_secret.initialized = false;
6442 all->stable_secret.initialized = false;
6443
6444 net->ipv6.devconf_all = all;
6445 net->ipv6.devconf_dflt = dflt;
6446
6447 #ifdef CONFIG_SYSCTL
6448 err = __addrconf_sysctl_register(net, "all", NULL, all);
6449 if (err < 0)
6450 goto err_reg_all;
6451
6452 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6453 if (err < 0)
6454 goto err_reg_dflt;
6455 #endif
6456 return 0;
6457
6458 #ifdef CONFIG_SYSCTL
6459 err_reg_dflt:
6460 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
6461 err_reg_all:
6462 kfree(dflt);
6463 #endif
6464 err_alloc_dflt:
6465 kfree(all);
6466 err_alloc_all:
6467 return err;
6468 }
6469
6470 static void __net_exit addrconf_exit_net(struct net *net)
6471 {
6472 #ifdef CONFIG_SYSCTL
6473 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
6474 NETCONFA_IFINDEX_DEFAULT);
6475 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
6476 NETCONFA_IFINDEX_ALL);
6477 #endif
6478 kfree(net->ipv6.devconf_dflt);
6479 kfree(net->ipv6.devconf_all);
6480 }
6481
6482 static struct pernet_operations addrconf_ops = {
6483 .init = addrconf_init_net,
6484 .exit = addrconf_exit_net,
6485 };
6486
6487 static struct rtnl_af_ops inet6_ops __read_mostly = {
6488 .family = AF_INET6,
6489 .fill_link_af = inet6_fill_link_af,
6490 .get_link_af_size = inet6_get_link_af_size,
6491 .validate_link_af = inet6_validate_link_af,
6492 .set_link_af = inet6_set_link_af,
6493 };
6494
6495 /*
6496 * Init / cleanup code
6497 */
6498
6499 int __init addrconf_init(void)
6500 {
6501 struct inet6_dev *idev;
6502 int i, err;
6503
6504 err = ipv6_addr_label_init();
6505 if (err < 0) {
6506 pr_crit("%s: cannot initialize default policy table: %d\n",
6507 __func__, err);
6508 goto out;
6509 }
6510
6511 err = register_pernet_subsys(&addrconf_ops);
6512 if (err < 0)
6513 goto out_addrlabel;
6514
6515 addrconf_wq = create_workqueue("ipv6_addrconf");
6516 if (!addrconf_wq) {
6517 err = -ENOMEM;
6518 goto out_nowq;
6519 }
6520
6521 /* The addrconf netdev notifier requires that loopback_dev
6522 * has it's ipv6 private information allocated and setup
6523 * before it can bring up and give link-local addresses
6524 * to other devices which are up.
6525 *
6526 * Unfortunately, loopback_dev is not necessarily the first
6527 * entry in the global dev_base list of net devices. In fact,
6528 * it is likely to be the very last entry on that list.
6529 * So this causes the notifier registry below to try and
6530 * give link-local addresses to all devices besides loopback_dev
6531 * first, then loopback_dev, which cases all the non-loopback_dev
6532 * devices to fail to get a link-local address.
6533 *
6534 * So, as a temporary fix, allocate the ipv6 structure for
6535 * loopback_dev first by hand.
6536 * Longer term, all of the dependencies ipv6 has upon the loopback
6537 * device and it being up should be removed.
6538 */
6539 rtnl_lock();
6540 idev = ipv6_add_dev(init_net.loopback_dev);
6541 rtnl_unlock();
6542 if (IS_ERR(idev)) {
6543 err = PTR_ERR(idev);
6544 goto errlo;
6545 }
6546
6547 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6548 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
6549
6550 register_netdevice_notifier(&ipv6_dev_notf);
6551
6552 addrconf_verify();
6553
6554 rtnl_af_register(&inet6_ops);
6555
6556 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
6557 NULL);
6558 if (err < 0)
6559 goto errout;
6560
6561 /* Only the first call to __rtnl_register can fail */
6562 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL);
6563 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL);
6564 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
6565 inet6_dump_ifaddr, NULL);
6566 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
6567 inet6_dump_ifmcaddr, NULL);
6568 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
6569 inet6_dump_ifacaddr, NULL);
6570 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
6571 inet6_netconf_dump_devconf, NULL);
6572
6573 ipv6_addr_label_rtnl_register();
6574
6575 return 0;
6576 errout:
6577 rtnl_af_unregister(&inet6_ops);
6578 unregister_netdevice_notifier(&ipv6_dev_notf);
6579 errlo:
6580 destroy_workqueue(addrconf_wq);
6581 out_nowq:
6582 unregister_pernet_subsys(&addrconf_ops);
6583 out_addrlabel:
6584 ipv6_addr_label_cleanup();
6585 out:
6586 return err;
6587 }
6588
6589 void addrconf_cleanup(void)
6590 {
6591 struct net_device *dev;
6592 int i;
6593
6594 unregister_netdevice_notifier(&ipv6_dev_notf);
6595 unregister_pernet_subsys(&addrconf_ops);
6596 ipv6_addr_label_cleanup();
6597
6598 rtnl_lock();
6599
6600 __rtnl_af_unregister(&inet6_ops);
6601
6602 /* clean dev list */
6603 for_each_netdev(&init_net, dev) {
6604 if (__in6_dev_get(dev) == NULL)
6605 continue;
6606 addrconf_ifdown(dev, 1);
6607 }
6608 addrconf_ifdown(init_net.loopback_dev, 2);
6609
6610 /*
6611 * Check hash table.
6612 */
6613 spin_lock_bh(&addrconf_hash_lock);
6614 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6615 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
6616 spin_unlock_bh(&addrconf_hash_lock);
6617 cancel_delayed_work(&addr_chk_work);
6618 rtnl_unlock();
6619
6620 destroy_workqueue(addrconf_wq);
6621 }