]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/addrconf.c
multicast: do not restore deleted record source filter mode to new one
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / addrconf.c
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 /*
16 * Changes:
17 *
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
39 */
40
41 #define pr_fmt(fmt) "IPv6: " fmt
42
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched/signal.h>
47 #include <linux/socket.h>
48 #include <linux/sockios.h>
49 #include <linux/net.h>
50 #include <linux/inet.h>
51 #include <linux/in6.h>
52 #include <linux/netdevice.h>
53 #include <linux/if_addr.h>
54 #include <linux/if_arp.h>
55 #include <linux/if_arcnet.h>
56 #include <linux/if_infiniband.h>
57 #include <linux/route.h>
58 #include <linux/inetdevice.h>
59 #include <linux/init.h>
60 #include <linux/slab.h>
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64 #include <linux/capability.h>
65 #include <linux/delay.h>
66 #include <linux/notifier.h>
67 #include <linux/string.h>
68 #include <linux/hash.h>
69
70 #include <net/net_namespace.h>
71 #include <net/sock.h>
72 #include <net/snmp.h>
73
74 #include <net/6lowpan.h>
75 #include <net/firewire.h>
76 #include <net/ipv6.h>
77 #include <net/protocol.h>
78 #include <net/ndisc.h>
79 #include <net/ip6_route.h>
80 #include <net/addrconf.h>
81 #include <net/tcp.h>
82 #include <net/ip.h>
83 #include <net/netlink.h>
84 #include <net/pkt_sched.h>
85 #include <net/l3mdev.h>
86 #include <linux/if_tunnel.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/netconf.h>
89 #include <linux/random.h>
90 #include <linux/uaccess.h>
91 #include <asm/unaligned.h>
92
93 #include <linux/proc_fs.h>
94 #include <linux/seq_file.h>
95 #include <linux/export.h>
96
97 /* Set to 3 to get tracing... */
98 #define ACONF_DEBUG 2
99
100 #if ACONF_DEBUG >= 3
101 #define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
102 #else
103 #define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
104 #endif
105
106 #define INFINITY_LIFE_TIME 0xFFFFFFFF
107
108 #define IPV6_MAX_STRLEN \
109 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
110
111 static inline u32 cstamp_delta(unsigned long cstamp)
112 {
113 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
114 }
115
116 static inline s32 rfc3315_s14_backoff_init(s32 irt)
117 {
118 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
119 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
120 do_div(tmp, 1000000);
121 return (s32)tmp;
122 }
123
124 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
125 {
126 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
127 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
128 do_div(tmp, 1000000);
129 if ((s32)tmp > mrt) {
130 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
131 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
132 do_div(tmp, 1000000);
133 }
134 return (s32)tmp;
135 }
136
137 #ifdef CONFIG_SYSCTL
138 static int addrconf_sysctl_register(struct inet6_dev *idev);
139 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
140 #else
141 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
142 {
143 return 0;
144 }
145
146 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
147 {
148 }
149 #endif
150
151 static void ipv6_regen_rndid(struct inet6_dev *idev);
152 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
153
154 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
155 static int ipv6_count_addresses(const struct inet6_dev *idev);
156 static int ipv6_generate_stable_address(struct in6_addr *addr,
157 u8 dad_count,
158 const struct inet6_dev *idev);
159
160 #define IN6_ADDR_HSIZE_SHIFT 8
161 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
162 /*
163 * Configured unicast address hash table
164 */
165 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
166 static DEFINE_SPINLOCK(addrconf_hash_lock);
167
168 static void addrconf_verify(void);
169 static void addrconf_verify_rtnl(void);
170 static void addrconf_verify_work(struct work_struct *);
171
172 static struct workqueue_struct *addrconf_wq;
173 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
174
175 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
176 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
177
178 static void addrconf_type_change(struct net_device *dev,
179 unsigned long event);
180 static int addrconf_ifdown(struct net_device *dev, int how);
181
182 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
183 int plen,
184 const struct net_device *dev,
185 u32 flags, u32 noflags);
186
187 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
188 static void addrconf_dad_work(struct work_struct *w);
189 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
190 bool send_na);
191 static void addrconf_dad_run(struct inet6_dev *idev);
192 static void addrconf_rs_timer(struct timer_list *t);
193 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
194 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
195
196 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
197 struct prefix_info *pinfo);
198
199 static struct ipv6_devconf ipv6_devconf __read_mostly = {
200 .forwarding = 0,
201 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
202 .mtu6 = IPV6_MIN_MTU,
203 .accept_ra = 1,
204 .accept_redirects = 1,
205 .autoconf = 1,
206 .force_mld_version = 0,
207 .mldv1_unsolicited_report_interval = 10 * HZ,
208 .mldv2_unsolicited_report_interval = HZ,
209 .dad_transmits = 1,
210 .rtr_solicits = MAX_RTR_SOLICITATIONS,
211 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
212 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
213 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
214 .use_tempaddr = 0,
215 .temp_valid_lft = TEMP_VALID_LIFETIME,
216 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
217 .regen_max_retry = REGEN_MAX_RETRY,
218 .max_desync_factor = MAX_DESYNC_FACTOR,
219 .max_addresses = IPV6_MAX_ADDRESSES,
220 .accept_ra_defrtr = 1,
221 .accept_ra_from_local = 0,
222 .accept_ra_min_hop_limit= 1,
223 .accept_ra_pinfo = 1,
224 #ifdef CONFIG_IPV6_ROUTER_PREF
225 .accept_ra_rtr_pref = 1,
226 .rtr_probe_interval = 60 * HZ,
227 #ifdef CONFIG_IPV6_ROUTE_INFO
228 .accept_ra_rt_info_min_plen = 0,
229 .accept_ra_rt_info_max_plen = 0,
230 #endif
231 #endif
232 .proxy_ndp = 0,
233 .accept_source_route = 0, /* we do not accept RH0 by default. */
234 .disable_ipv6 = 0,
235 .accept_dad = 0,
236 .suppress_frag_ndisc = 1,
237 .accept_ra_mtu = 1,
238 .stable_secret = {
239 .initialized = false,
240 },
241 .use_oif_addrs_only = 0,
242 .ignore_routes_with_linkdown = 0,
243 .keep_addr_on_down = 0,
244 .seg6_enabled = 0,
245 #ifdef CONFIG_IPV6_SEG6_HMAC
246 .seg6_require_hmac = 0,
247 #endif
248 .enhanced_dad = 1,
249 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
250 .disable_policy = 0,
251 };
252
253 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
254 .forwarding = 0,
255 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
256 .mtu6 = IPV6_MIN_MTU,
257 .accept_ra = 1,
258 .accept_redirects = 1,
259 .autoconf = 1,
260 .force_mld_version = 0,
261 .mldv1_unsolicited_report_interval = 10 * HZ,
262 .mldv2_unsolicited_report_interval = HZ,
263 .dad_transmits = 1,
264 .rtr_solicits = MAX_RTR_SOLICITATIONS,
265 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
266 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
267 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
268 .use_tempaddr = 0,
269 .temp_valid_lft = TEMP_VALID_LIFETIME,
270 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
271 .regen_max_retry = REGEN_MAX_RETRY,
272 .max_desync_factor = MAX_DESYNC_FACTOR,
273 .max_addresses = IPV6_MAX_ADDRESSES,
274 .accept_ra_defrtr = 1,
275 .accept_ra_from_local = 0,
276 .accept_ra_min_hop_limit= 1,
277 .accept_ra_pinfo = 1,
278 #ifdef CONFIG_IPV6_ROUTER_PREF
279 .accept_ra_rtr_pref = 1,
280 .rtr_probe_interval = 60 * HZ,
281 #ifdef CONFIG_IPV6_ROUTE_INFO
282 .accept_ra_rt_info_min_plen = 0,
283 .accept_ra_rt_info_max_plen = 0,
284 #endif
285 #endif
286 .proxy_ndp = 0,
287 .accept_source_route = 0, /* we do not accept RH0 by default. */
288 .disable_ipv6 = 0,
289 .accept_dad = 1,
290 .suppress_frag_ndisc = 1,
291 .accept_ra_mtu = 1,
292 .stable_secret = {
293 .initialized = false,
294 },
295 .use_oif_addrs_only = 0,
296 .ignore_routes_with_linkdown = 0,
297 .keep_addr_on_down = 0,
298 .seg6_enabled = 0,
299 #ifdef CONFIG_IPV6_SEG6_HMAC
300 .seg6_require_hmac = 0,
301 #endif
302 .enhanced_dad = 1,
303 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
304 .disable_policy = 0,
305 };
306
307 /* Check if link is ready: is it up and is a valid qdisc available */
308 static inline bool addrconf_link_ready(const struct net_device *dev)
309 {
310 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
311 }
312
313 static void addrconf_del_rs_timer(struct inet6_dev *idev)
314 {
315 if (del_timer(&idev->rs_timer))
316 __in6_dev_put(idev);
317 }
318
319 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
320 {
321 if (cancel_delayed_work(&ifp->dad_work))
322 __in6_ifa_put(ifp);
323 }
324
325 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
326 unsigned long when)
327 {
328 if (!timer_pending(&idev->rs_timer))
329 in6_dev_hold(idev);
330 mod_timer(&idev->rs_timer, jiffies + when);
331 }
332
333 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
334 unsigned long delay)
335 {
336 in6_ifa_hold(ifp);
337 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
338 in6_ifa_put(ifp);
339 }
340
341 static int snmp6_alloc_dev(struct inet6_dev *idev)
342 {
343 int i;
344
345 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
346 if (!idev->stats.ipv6)
347 goto err_ip;
348
349 for_each_possible_cpu(i) {
350 struct ipstats_mib *addrconf_stats;
351 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
352 u64_stats_init(&addrconf_stats->syncp);
353 }
354
355
356 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
357 GFP_KERNEL);
358 if (!idev->stats.icmpv6dev)
359 goto err_icmp;
360 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
361 GFP_KERNEL);
362 if (!idev->stats.icmpv6msgdev)
363 goto err_icmpmsg;
364
365 return 0;
366
367 err_icmpmsg:
368 kfree(idev->stats.icmpv6dev);
369 err_icmp:
370 free_percpu(idev->stats.ipv6);
371 err_ip:
372 return -ENOMEM;
373 }
374
375 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
376 {
377 struct inet6_dev *ndev;
378 int err = -ENOMEM;
379
380 ASSERT_RTNL();
381
382 if (dev->mtu < IPV6_MIN_MTU)
383 return ERR_PTR(-EINVAL);
384
385 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
386 if (!ndev)
387 return ERR_PTR(err);
388
389 rwlock_init(&ndev->lock);
390 ndev->dev = dev;
391 INIT_LIST_HEAD(&ndev->addr_list);
392 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
393 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
394
395 if (ndev->cnf.stable_secret.initialized)
396 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
397 else
398 ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
399
400 ndev->cnf.mtu6 = dev->mtu;
401 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
402 if (!ndev->nd_parms) {
403 kfree(ndev);
404 return ERR_PTR(err);
405 }
406 if (ndev->cnf.forwarding)
407 dev_disable_lro(dev);
408 /* We refer to the device */
409 dev_hold(dev);
410
411 if (snmp6_alloc_dev(ndev) < 0) {
412 ADBG(KERN_WARNING
413 "%s: cannot allocate memory for statistics; dev=%s.\n",
414 __func__, dev->name);
415 neigh_parms_release(&nd_tbl, ndev->nd_parms);
416 dev_put(dev);
417 kfree(ndev);
418 return ERR_PTR(err);
419 }
420
421 if (snmp6_register_dev(ndev) < 0) {
422 ADBG(KERN_WARNING
423 "%s: cannot create /proc/net/dev_snmp6/%s\n",
424 __func__, dev->name);
425 goto err_release;
426 }
427
428 /* One reference from device. */
429 refcount_set(&ndev->refcnt, 1);
430
431 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
432 ndev->cnf.accept_dad = -1;
433
434 #if IS_ENABLED(CONFIG_IPV6_SIT)
435 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
436 pr_info("%s: Disabled Multicast RS\n", dev->name);
437 ndev->cnf.rtr_solicits = 0;
438 }
439 #endif
440
441 INIT_LIST_HEAD(&ndev->tempaddr_list);
442 ndev->desync_factor = U32_MAX;
443 if ((dev->flags&IFF_LOOPBACK) ||
444 dev->type == ARPHRD_TUNNEL ||
445 dev->type == ARPHRD_TUNNEL6 ||
446 dev->type == ARPHRD_SIT ||
447 dev->type == ARPHRD_NONE) {
448 ndev->cnf.use_tempaddr = -1;
449 } else
450 ipv6_regen_rndid(ndev);
451
452 ndev->token = in6addr_any;
453
454 if (netif_running(dev) && addrconf_link_ready(dev))
455 ndev->if_flags |= IF_READY;
456
457 ipv6_mc_init_dev(ndev);
458 ndev->tstamp = jiffies;
459 err = addrconf_sysctl_register(ndev);
460 if (err) {
461 ipv6_mc_destroy_dev(ndev);
462 snmp6_unregister_dev(ndev);
463 goto err_release;
464 }
465 /* protected by rtnl_lock */
466 rcu_assign_pointer(dev->ip6_ptr, ndev);
467
468 /* Join interface-local all-node multicast group */
469 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
470
471 /* Join all-node multicast group */
472 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
473
474 /* Join all-router multicast group if forwarding is set */
475 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
476 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
477
478 return ndev;
479
480 err_release:
481 neigh_parms_release(&nd_tbl, ndev->nd_parms);
482 ndev->dead = 1;
483 in6_dev_finish_destroy(ndev);
484 return ERR_PTR(err);
485 }
486
487 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
488 {
489 struct inet6_dev *idev;
490
491 ASSERT_RTNL();
492
493 idev = __in6_dev_get(dev);
494 if (!idev) {
495 idev = ipv6_add_dev(dev);
496 if (IS_ERR(idev))
497 return NULL;
498 }
499
500 if (dev->flags&IFF_UP)
501 ipv6_mc_up(idev);
502 return idev;
503 }
504
505 static int inet6_netconf_msgsize_devconf(int type)
506 {
507 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
508 + nla_total_size(4); /* NETCONFA_IFINDEX */
509 bool all = false;
510
511 if (type == NETCONFA_ALL)
512 all = true;
513
514 if (all || type == NETCONFA_FORWARDING)
515 size += nla_total_size(4);
516 #ifdef CONFIG_IPV6_MROUTE
517 if (all || type == NETCONFA_MC_FORWARDING)
518 size += nla_total_size(4);
519 #endif
520 if (all || type == NETCONFA_PROXY_NEIGH)
521 size += nla_total_size(4);
522
523 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
524 size += nla_total_size(4);
525
526 return size;
527 }
528
529 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
530 struct ipv6_devconf *devconf, u32 portid,
531 u32 seq, int event, unsigned int flags,
532 int type)
533 {
534 struct nlmsghdr *nlh;
535 struct netconfmsg *ncm;
536 bool all = false;
537
538 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
539 flags);
540 if (!nlh)
541 return -EMSGSIZE;
542
543 if (type == NETCONFA_ALL)
544 all = true;
545
546 ncm = nlmsg_data(nlh);
547 ncm->ncm_family = AF_INET6;
548
549 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
550 goto nla_put_failure;
551
552 if (!devconf)
553 goto out;
554
555 if ((all || type == NETCONFA_FORWARDING) &&
556 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
557 goto nla_put_failure;
558 #ifdef CONFIG_IPV6_MROUTE
559 if ((all || type == NETCONFA_MC_FORWARDING) &&
560 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
561 devconf->mc_forwarding) < 0)
562 goto nla_put_failure;
563 #endif
564 if ((all || type == NETCONFA_PROXY_NEIGH) &&
565 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
566 goto nla_put_failure;
567
568 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
569 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
570 devconf->ignore_routes_with_linkdown) < 0)
571 goto nla_put_failure;
572
573 out:
574 nlmsg_end(skb, nlh);
575 return 0;
576
577 nla_put_failure:
578 nlmsg_cancel(skb, nlh);
579 return -EMSGSIZE;
580 }
581
582 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
583 int ifindex, struct ipv6_devconf *devconf)
584 {
585 struct sk_buff *skb;
586 int err = -ENOBUFS;
587
588 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
589 if (!skb)
590 goto errout;
591
592 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
593 event, 0, type);
594 if (err < 0) {
595 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
596 WARN_ON(err == -EMSGSIZE);
597 kfree_skb(skb);
598 goto errout;
599 }
600 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
601 return;
602 errout:
603 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
604 }
605
606 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
607 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
608 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
609 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
610 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
611 };
612
613 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
614 struct nlmsghdr *nlh,
615 struct netlink_ext_ack *extack)
616 {
617 struct net *net = sock_net(in_skb->sk);
618 struct nlattr *tb[NETCONFA_MAX+1];
619 struct inet6_dev *in6_dev = NULL;
620 struct net_device *dev = NULL;
621 struct netconfmsg *ncm;
622 struct sk_buff *skb;
623 struct ipv6_devconf *devconf;
624 int ifindex;
625 int err;
626
627 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
628 devconf_ipv6_policy, extack);
629 if (err < 0)
630 return err;
631
632 if (!tb[NETCONFA_IFINDEX])
633 return -EINVAL;
634
635 err = -EINVAL;
636 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
637 switch (ifindex) {
638 case NETCONFA_IFINDEX_ALL:
639 devconf = net->ipv6.devconf_all;
640 break;
641 case NETCONFA_IFINDEX_DEFAULT:
642 devconf = net->ipv6.devconf_dflt;
643 break;
644 default:
645 dev = dev_get_by_index(net, ifindex);
646 if (!dev)
647 return -EINVAL;
648 in6_dev = in6_dev_get(dev);
649 if (!in6_dev)
650 goto errout;
651 devconf = &in6_dev->cnf;
652 break;
653 }
654
655 err = -ENOBUFS;
656 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
657 if (!skb)
658 goto errout;
659
660 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
661 NETLINK_CB(in_skb).portid,
662 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
663 NETCONFA_ALL);
664 if (err < 0) {
665 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
666 WARN_ON(err == -EMSGSIZE);
667 kfree_skb(skb);
668 goto errout;
669 }
670 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
671 errout:
672 if (in6_dev)
673 in6_dev_put(in6_dev);
674 if (dev)
675 dev_put(dev);
676 return err;
677 }
678
679 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
680 struct netlink_callback *cb)
681 {
682 struct net *net = sock_net(skb->sk);
683 int h, s_h;
684 int idx, s_idx;
685 struct net_device *dev;
686 struct inet6_dev *idev;
687 struct hlist_head *head;
688
689 s_h = cb->args[0];
690 s_idx = idx = cb->args[1];
691
692 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
693 idx = 0;
694 head = &net->dev_index_head[h];
695 rcu_read_lock();
696 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
697 net->dev_base_seq;
698 hlist_for_each_entry_rcu(dev, head, index_hlist) {
699 if (idx < s_idx)
700 goto cont;
701 idev = __in6_dev_get(dev);
702 if (!idev)
703 goto cont;
704
705 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
706 &idev->cnf,
707 NETLINK_CB(cb->skb).portid,
708 cb->nlh->nlmsg_seq,
709 RTM_NEWNETCONF,
710 NLM_F_MULTI,
711 NETCONFA_ALL) < 0) {
712 rcu_read_unlock();
713 goto done;
714 }
715 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
716 cont:
717 idx++;
718 }
719 rcu_read_unlock();
720 }
721 if (h == NETDEV_HASHENTRIES) {
722 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
723 net->ipv6.devconf_all,
724 NETLINK_CB(cb->skb).portid,
725 cb->nlh->nlmsg_seq,
726 RTM_NEWNETCONF, NLM_F_MULTI,
727 NETCONFA_ALL) < 0)
728 goto done;
729 else
730 h++;
731 }
732 if (h == NETDEV_HASHENTRIES + 1) {
733 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
734 net->ipv6.devconf_dflt,
735 NETLINK_CB(cb->skb).portid,
736 cb->nlh->nlmsg_seq,
737 RTM_NEWNETCONF, NLM_F_MULTI,
738 NETCONFA_ALL) < 0)
739 goto done;
740 else
741 h++;
742 }
743 done:
744 cb->args[0] = h;
745 cb->args[1] = idx;
746
747 return skb->len;
748 }
749
750 #ifdef CONFIG_SYSCTL
751 static void dev_forward_change(struct inet6_dev *idev)
752 {
753 struct net_device *dev;
754 struct inet6_ifaddr *ifa;
755
756 if (!idev)
757 return;
758 dev = idev->dev;
759 if (idev->cnf.forwarding)
760 dev_disable_lro(dev);
761 if (dev->flags & IFF_MULTICAST) {
762 if (idev->cnf.forwarding) {
763 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
764 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
765 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
766 } else {
767 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
768 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
769 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
770 }
771 }
772
773 list_for_each_entry(ifa, &idev->addr_list, if_list) {
774 if (ifa->flags&IFA_F_TENTATIVE)
775 continue;
776 if (idev->cnf.forwarding)
777 addrconf_join_anycast(ifa);
778 else
779 addrconf_leave_anycast(ifa);
780 }
781 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
782 NETCONFA_FORWARDING,
783 dev->ifindex, &idev->cnf);
784 }
785
786
787 static void addrconf_forward_change(struct net *net, __s32 newf)
788 {
789 struct net_device *dev;
790 struct inet6_dev *idev;
791
792 for_each_netdev(net, dev) {
793 idev = __in6_dev_get(dev);
794 if (idev) {
795 int changed = (!idev->cnf.forwarding) ^ (!newf);
796 idev->cnf.forwarding = newf;
797 if (changed)
798 dev_forward_change(idev);
799 }
800 }
801 }
802
803 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
804 {
805 struct net *net;
806 int old;
807
808 if (!rtnl_trylock())
809 return restart_syscall();
810
811 net = (struct net *)table->extra2;
812 old = *p;
813 *p = newf;
814
815 if (p == &net->ipv6.devconf_dflt->forwarding) {
816 if ((!newf) ^ (!old))
817 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
818 NETCONFA_FORWARDING,
819 NETCONFA_IFINDEX_DEFAULT,
820 net->ipv6.devconf_dflt);
821 rtnl_unlock();
822 return 0;
823 }
824
825 if (p == &net->ipv6.devconf_all->forwarding) {
826 int old_dflt = net->ipv6.devconf_dflt->forwarding;
827
828 net->ipv6.devconf_dflt->forwarding = newf;
829 if ((!newf) ^ (!old_dflt))
830 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
831 NETCONFA_FORWARDING,
832 NETCONFA_IFINDEX_DEFAULT,
833 net->ipv6.devconf_dflt);
834
835 addrconf_forward_change(net, newf);
836 if ((!newf) ^ (!old))
837 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
838 NETCONFA_FORWARDING,
839 NETCONFA_IFINDEX_ALL,
840 net->ipv6.devconf_all);
841 } else if ((!newf) ^ (!old))
842 dev_forward_change((struct inet6_dev *)table->extra1);
843 rtnl_unlock();
844
845 if (newf)
846 rt6_purge_dflt_routers(net);
847 return 1;
848 }
849
850 static void addrconf_linkdown_change(struct net *net, __s32 newf)
851 {
852 struct net_device *dev;
853 struct inet6_dev *idev;
854
855 for_each_netdev(net, dev) {
856 idev = __in6_dev_get(dev);
857 if (idev) {
858 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
859
860 idev->cnf.ignore_routes_with_linkdown = newf;
861 if (changed)
862 inet6_netconf_notify_devconf(dev_net(dev),
863 RTM_NEWNETCONF,
864 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
865 dev->ifindex,
866 &idev->cnf);
867 }
868 }
869 }
870
871 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
872 {
873 struct net *net;
874 int old;
875
876 if (!rtnl_trylock())
877 return restart_syscall();
878
879 net = (struct net *)table->extra2;
880 old = *p;
881 *p = newf;
882
883 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
884 if ((!newf) ^ (!old))
885 inet6_netconf_notify_devconf(net,
886 RTM_NEWNETCONF,
887 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
888 NETCONFA_IFINDEX_DEFAULT,
889 net->ipv6.devconf_dflt);
890 rtnl_unlock();
891 return 0;
892 }
893
894 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
895 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
896 addrconf_linkdown_change(net, newf);
897 if ((!newf) ^ (!old))
898 inet6_netconf_notify_devconf(net,
899 RTM_NEWNETCONF,
900 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
901 NETCONFA_IFINDEX_ALL,
902 net->ipv6.devconf_all);
903 }
904 rtnl_unlock();
905
906 return 1;
907 }
908
909 #endif
910
911 /* Nobody refers to this ifaddr, destroy it */
912 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
913 {
914 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
915
916 #ifdef NET_REFCNT_DEBUG
917 pr_debug("%s\n", __func__);
918 #endif
919
920 in6_dev_put(ifp->idev);
921
922 if (cancel_delayed_work(&ifp->dad_work))
923 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
924 ifp);
925
926 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
927 pr_warn("Freeing alive inet6 address %p\n", ifp);
928 return;
929 }
930 ip6_rt_put(ifp->rt);
931
932 kfree_rcu(ifp, rcu);
933 }
934
935 static void
936 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
937 {
938 struct list_head *p;
939 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
940
941 /*
942 * Each device address list is sorted in order of scope -
943 * global before linklocal.
944 */
945 list_for_each(p, &idev->addr_list) {
946 struct inet6_ifaddr *ifa
947 = list_entry(p, struct inet6_ifaddr, if_list);
948 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
949 break;
950 }
951
952 list_add_tail_rcu(&ifp->if_list, p);
953 }
954
955 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
956 {
957 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
958
959 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
960 }
961
962 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
963 struct net_device *dev, unsigned int hash)
964 {
965 struct inet6_ifaddr *ifp;
966
967 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
968 if (!net_eq(dev_net(ifp->idev->dev), net))
969 continue;
970 if (ipv6_addr_equal(&ifp->addr, addr)) {
971 if (!dev || ifp->idev->dev == dev)
972 return true;
973 }
974 }
975 return false;
976 }
977
978 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
979 {
980 unsigned int hash = inet6_addr_hash(dev_net(dev), &ifa->addr);
981 int err = 0;
982
983 spin_lock(&addrconf_hash_lock);
984
985 /* Ignore adding duplicate addresses on an interface */
986 if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
987 ADBG("ipv6_add_addr: already assigned\n");
988 err = -EEXIST;
989 } else {
990 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
991 }
992
993 spin_unlock(&addrconf_hash_lock);
994
995 return err;
996 }
997
998 /* On success it returns ifp with increased reference count */
999
1000 static struct inet6_ifaddr *
1001 ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
1002 const struct in6_addr *peer_addr, int pfxlen,
1003 int scope, u32 flags, u32 valid_lft, u32 prefered_lft,
1004 bool can_block, struct netlink_ext_ack *extack)
1005 {
1006 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1007 struct net *net = dev_net(idev->dev);
1008 struct inet6_ifaddr *ifa = NULL;
1009 struct rt6_info *rt = NULL;
1010 int err = 0;
1011 int addr_type = ipv6_addr_type(addr);
1012
1013 if (addr_type == IPV6_ADDR_ANY ||
1014 addr_type & IPV6_ADDR_MULTICAST ||
1015 (!(idev->dev->flags & IFF_LOOPBACK) &&
1016 addr_type & IPV6_ADDR_LOOPBACK))
1017 return ERR_PTR(-EADDRNOTAVAIL);
1018
1019 if (idev->dead) {
1020 err = -ENODEV; /*XXX*/
1021 goto out;
1022 }
1023
1024 if (idev->cnf.disable_ipv6) {
1025 err = -EACCES;
1026 goto out;
1027 }
1028
1029 /* validator notifier needs to be blocking;
1030 * do not call in atomic context
1031 */
1032 if (can_block) {
1033 struct in6_validator_info i6vi = {
1034 .i6vi_addr = *addr,
1035 .i6vi_dev = idev,
1036 .extack = extack,
1037 };
1038
1039 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1040 err = notifier_to_errno(err);
1041 if (err < 0)
1042 goto out;
1043 }
1044
1045 ifa = kzalloc(sizeof(*ifa), gfp_flags);
1046 if (!ifa) {
1047 ADBG("ipv6_add_addr: malloc failed\n");
1048 err = -ENOBUFS;
1049 goto out;
1050 }
1051
1052 rt = addrconf_dst_alloc(idev, addr, false);
1053 if (IS_ERR(rt)) {
1054 err = PTR_ERR(rt);
1055 rt = NULL;
1056 goto out;
1057 }
1058
1059 if (net->ipv6.devconf_all->disable_policy ||
1060 idev->cnf.disable_policy)
1061 rt->dst.flags |= DST_NOPOLICY;
1062
1063 neigh_parms_data_state_setall(idev->nd_parms);
1064
1065 ifa->addr = *addr;
1066 if (peer_addr)
1067 ifa->peer_addr = *peer_addr;
1068
1069 spin_lock_init(&ifa->lock);
1070 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1071 INIT_HLIST_NODE(&ifa->addr_lst);
1072 ifa->scope = scope;
1073 ifa->prefix_len = pfxlen;
1074 ifa->flags = flags;
1075 /* No need to add the TENTATIVE flag for addresses with NODAD */
1076 if (!(flags & IFA_F_NODAD))
1077 ifa->flags |= IFA_F_TENTATIVE;
1078 ifa->valid_lft = valid_lft;
1079 ifa->prefered_lft = prefered_lft;
1080 ifa->cstamp = ifa->tstamp = jiffies;
1081 ifa->tokenized = false;
1082
1083 ifa->rt = rt;
1084
1085 ifa->idev = idev;
1086 in6_dev_hold(idev);
1087
1088 /* For caller */
1089 refcount_set(&ifa->refcnt, 1);
1090
1091 rcu_read_lock_bh();
1092
1093 err = ipv6_add_addr_hash(idev->dev, ifa);
1094 if (err < 0) {
1095 rcu_read_unlock_bh();
1096 goto out;
1097 }
1098
1099 write_lock(&idev->lock);
1100
1101 /* Add to inet6_dev unicast addr list. */
1102 ipv6_link_dev_addr(idev, ifa);
1103
1104 if (ifa->flags&IFA_F_TEMPORARY) {
1105 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1106 in6_ifa_hold(ifa);
1107 }
1108
1109 in6_ifa_hold(ifa);
1110 write_unlock(&idev->lock);
1111
1112 rcu_read_unlock_bh();
1113
1114 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1115 out:
1116 if (unlikely(err < 0)) {
1117 if (rt)
1118 ip6_rt_put(rt);
1119 if (ifa) {
1120 if (ifa->idev)
1121 in6_dev_put(ifa->idev);
1122 kfree(ifa);
1123 }
1124 ifa = ERR_PTR(err);
1125 }
1126
1127 return ifa;
1128 }
1129
1130 enum cleanup_prefix_rt_t {
1131 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1132 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1133 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1134 };
1135
1136 /*
1137 * Check, whether the prefix for ifp would still need a prefix route
1138 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1139 * constants.
1140 *
1141 * 1) we don't purge prefix if address was not permanent.
1142 * prefix is managed by its own lifetime.
1143 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1144 * 3) if there are no addresses, delete prefix.
1145 * 4) if there are still other permanent address(es),
1146 * corresponding prefix is still permanent.
1147 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1148 * don't purge the prefix, assume user space is managing it.
1149 * 6) otherwise, update prefix lifetime to the
1150 * longest valid lifetime among the corresponding
1151 * addresses on the device.
1152 * Note: subsequent RA will update lifetime.
1153 **/
1154 static enum cleanup_prefix_rt_t
1155 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1156 {
1157 struct inet6_ifaddr *ifa;
1158 struct inet6_dev *idev = ifp->idev;
1159 unsigned long lifetime;
1160 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1161
1162 *expires = jiffies;
1163
1164 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1165 if (ifa == ifp)
1166 continue;
1167 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1168 ifp->prefix_len))
1169 continue;
1170 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1171 return CLEANUP_PREFIX_RT_NOP;
1172
1173 action = CLEANUP_PREFIX_RT_EXPIRE;
1174
1175 spin_lock(&ifa->lock);
1176
1177 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1178 /*
1179 * Note: Because this address is
1180 * not permanent, lifetime <
1181 * LONG_MAX / HZ here.
1182 */
1183 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1184 *expires = ifa->tstamp + lifetime * HZ;
1185 spin_unlock(&ifa->lock);
1186 }
1187
1188 return action;
1189 }
1190
1191 static void
1192 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
1193 {
1194 struct rt6_info *rt;
1195
1196 rt = addrconf_get_prefix_route(&ifp->addr,
1197 ifp->prefix_len,
1198 ifp->idev->dev,
1199 0, RTF_GATEWAY | RTF_DEFAULT);
1200 if (rt) {
1201 if (del_rt)
1202 ip6_del_rt(rt);
1203 else {
1204 if (!(rt->rt6i_flags & RTF_EXPIRES))
1205 rt6_set_expires(rt, expires);
1206 ip6_rt_put(rt);
1207 }
1208 }
1209 }
1210
1211
1212 /* This function wants to get referenced ifp and releases it before return */
1213
1214 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1215 {
1216 int state;
1217 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1218 unsigned long expires;
1219
1220 ASSERT_RTNL();
1221
1222 spin_lock_bh(&ifp->lock);
1223 state = ifp->state;
1224 ifp->state = INET6_IFADDR_STATE_DEAD;
1225 spin_unlock_bh(&ifp->lock);
1226
1227 if (state == INET6_IFADDR_STATE_DEAD)
1228 goto out;
1229
1230 spin_lock_bh(&addrconf_hash_lock);
1231 hlist_del_init_rcu(&ifp->addr_lst);
1232 spin_unlock_bh(&addrconf_hash_lock);
1233
1234 write_lock_bh(&ifp->idev->lock);
1235
1236 if (ifp->flags&IFA_F_TEMPORARY) {
1237 list_del(&ifp->tmp_list);
1238 if (ifp->ifpub) {
1239 in6_ifa_put(ifp->ifpub);
1240 ifp->ifpub = NULL;
1241 }
1242 __in6_ifa_put(ifp);
1243 }
1244
1245 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1246 action = check_cleanup_prefix_route(ifp, &expires);
1247
1248 list_del_rcu(&ifp->if_list);
1249 __in6_ifa_put(ifp);
1250
1251 write_unlock_bh(&ifp->idev->lock);
1252
1253 addrconf_del_dad_work(ifp);
1254
1255 ipv6_ifa_notify(RTM_DELADDR, ifp);
1256
1257 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1258
1259 if (action != CLEANUP_PREFIX_RT_NOP) {
1260 cleanup_prefix_route(ifp, expires,
1261 action == CLEANUP_PREFIX_RT_DEL);
1262 }
1263
1264 /* clean up prefsrc entries */
1265 rt6_remove_prefsrc(ifp);
1266 out:
1267 in6_ifa_put(ifp);
1268 }
1269
1270 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp,
1271 struct inet6_ifaddr *ift,
1272 bool block)
1273 {
1274 struct inet6_dev *idev = ifp->idev;
1275 struct in6_addr addr, *tmpaddr;
1276 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
1277 unsigned long regen_advance;
1278 int tmp_plen;
1279 int ret = 0;
1280 u32 addr_flags;
1281 unsigned long now = jiffies;
1282 long max_desync_factor;
1283 s32 cnf_temp_preferred_lft;
1284
1285 write_lock_bh(&idev->lock);
1286 if (ift) {
1287 spin_lock_bh(&ift->lock);
1288 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1289 spin_unlock_bh(&ift->lock);
1290 tmpaddr = &addr;
1291 } else {
1292 tmpaddr = NULL;
1293 }
1294 retry:
1295 in6_dev_hold(idev);
1296 if (idev->cnf.use_tempaddr <= 0) {
1297 write_unlock_bh(&idev->lock);
1298 pr_info("%s: use_tempaddr is disabled\n", __func__);
1299 in6_dev_put(idev);
1300 ret = -1;
1301 goto out;
1302 }
1303 spin_lock_bh(&ifp->lock);
1304 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1305 idev->cnf.use_tempaddr = -1; /*XXX*/
1306 spin_unlock_bh(&ifp->lock);
1307 write_unlock_bh(&idev->lock);
1308 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1309 __func__);
1310 in6_dev_put(idev);
1311 ret = -1;
1312 goto out;
1313 }
1314 in6_ifa_hold(ifp);
1315 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1316 ipv6_try_regen_rndid(idev, tmpaddr);
1317 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1318 age = (now - ifp->tstamp) / HZ;
1319
1320 regen_advance = idev->cnf.regen_max_retry *
1321 idev->cnf.dad_transmits *
1322 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1323
1324 /* recalculate max_desync_factor each time and update
1325 * idev->desync_factor if it's larger
1326 */
1327 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1328 max_desync_factor = min_t(__u32,
1329 idev->cnf.max_desync_factor,
1330 cnf_temp_preferred_lft - regen_advance);
1331
1332 if (unlikely(idev->desync_factor > max_desync_factor)) {
1333 if (max_desync_factor > 0) {
1334 get_random_bytes(&idev->desync_factor,
1335 sizeof(idev->desync_factor));
1336 idev->desync_factor %= max_desync_factor;
1337 } else {
1338 idev->desync_factor = 0;
1339 }
1340 }
1341
1342 tmp_valid_lft = min_t(__u32,
1343 ifp->valid_lft,
1344 idev->cnf.temp_valid_lft + age);
1345 tmp_prefered_lft = cnf_temp_preferred_lft + age -
1346 idev->desync_factor;
1347 tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
1348 tmp_plen = ifp->prefix_len;
1349 tmp_tstamp = ifp->tstamp;
1350 spin_unlock_bh(&ifp->lock);
1351
1352 write_unlock_bh(&idev->lock);
1353
1354 /* A temporary address is created only if this calculated Preferred
1355 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1356 * an implementation must not create a temporary address with a zero
1357 * Preferred Lifetime.
1358 * Use age calculation as in addrconf_verify to avoid unnecessary
1359 * temporary addresses being generated.
1360 */
1361 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1362 if (tmp_prefered_lft <= regen_advance + age) {
1363 in6_ifa_put(ifp);
1364 in6_dev_put(idev);
1365 ret = -1;
1366 goto out;
1367 }
1368
1369 addr_flags = IFA_F_TEMPORARY;
1370 /* set in addrconf_prefix_rcv() */
1371 if (ifp->flags & IFA_F_OPTIMISTIC)
1372 addr_flags |= IFA_F_OPTIMISTIC;
1373
1374 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1375 ipv6_addr_scope(&addr), addr_flags,
1376 tmp_valid_lft, tmp_prefered_lft, block, NULL);
1377 if (IS_ERR(ift)) {
1378 in6_ifa_put(ifp);
1379 in6_dev_put(idev);
1380 pr_info("%s: retry temporary address regeneration\n", __func__);
1381 tmpaddr = &addr;
1382 write_lock_bh(&idev->lock);
1383 goto retry;
1384 }
1385
1386 spin_lock_bh(&ift->lock);
1387 ift->ifpub = ifp;
1388 ift->cstamp = now;
1389 ift->tstamp = tmp_tstamp;
1390 spin_unlock_bh(&ift->lock);
1391
1392 addrconf_dad_start(ift);
1393 in6_ifa_put(ift);
1394 in6_dev_put(idev);
1395 out:
1396 return ret;
1397 }
1398
1399 /*
1400 * Choose an appropriate source address (RFC3484)
1401 */
1402 enum {
1403 IPV6_SADDR_RULE_INIT = 0,
1404 IPV6_SADDR_RULE_LOCAL,
1405 IPV6_SADDR_RULE_SCOPE,
1406 IPV6_SADDR_RULE_PREFERRED,
1407 #ifdef CONFIG_IPV6_MIP6
1408 IPV6_SADDR_RULE_HOA,
1409 #endif
1410 IPV6_SADDR_RULE_OIF,
1411 IPV6_SADDR_RULE_LABEL,
1412 IPV6_SADDR_RULE_PRIVACY,
1413 IPV6_SADDR_RULE_ORCHID,
1414 IPV6_SADDR_RULE_PREFIX,
1415 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1416 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1417 #endif
1418 IPV6_SADDR_RULE_MAX
1419 };
1420
1421 struct ipv6_saddr_score {
1422 int rule;
1423 int addr_type;
1424 struct inet6_ifaddr *ifa;
1425 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1426 int scopedist;
1427 int matchlen;
1428 };
1429
1430 struct ipv6_saddr_dst {
1431 const struct in6_addr *addr;
1432 int ifindex;
1433 int scope;
1434 int label;
1435 unsigned int prefs;
1436 };
1437
1438 static inline int ipv6_saddr_preferred(int type)
1439 {
1440 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1441 return 1;
1442 return 0;
1443 }
1444
1445 static bool ipv6_use_optimistic_addr(struct net *net,
1446 struct inet6_dev *idev)
1447 {
1448 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1449 if (!idev)
1450 return false;
1451 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1452 return false;
1453 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1454 return false;
1455
1456 return true;
1457 #else
1458 return false;
1459 #endif
1460 }
1461
1462 static int ipv6_get_saddr_eval(struct net *net,
1463 struct ipv6_saddr_score *score,
1464 struct ipv6_saddr_dst *dst,
1465 int i)
1466 {
1467 int ret;
1468
1469 if (i <= score->rule) {
1470 switch (i) {
1471 case IPV6_SADDR_RULE_SCOPE:
1472 ret = score->scopedist;
1473 break;
1474 case IPV6_SADDR_RULE_PREFIX:
1475 ret = score->matchlen;
1476 break;
1477 default:
1478 ret = !!test_bit(i, score->scorebits);
1479 }
1480 goto out;
1481 }
1482
1483 switch (i) {
1484 case IPV6_SADDR_RULE_INIT:
1485 /* Rule 0: remember if hiscore is not ready yet */
1486 ret = !!score->ifa;
1487 break;
1488 case IPV6_SADDR_RULE_LOCAL:
1489 /* Rule 1: Prefer same address */
1490 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1491 break;
1492 case IPV6_SADDR_RULE_SCOPE:
1493 /* Rule 2: Prefer appropriate scope
1494 *
1495 * ret
1496 * ^
1497 * -1 | d 15
1498 * ---+--+-+---> scope
1499 * |
1500 * | d is scope of the destination.
1501 * B-d | \
1502 * | \ <- smaller scope is better if
1503 * B-15 | \ if scope is enough for destination.
1504 * | ret = B - scope (-1 <= scope >= d <= 15).
1505 * d-C-1 | /
1506 * |/ <- greater is better
1507 * -C / if scope is not enough for destination.
1508 * /| ret = scope - C (-1 <= d < scope <= 15).
1509 *
1510 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1511 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1512 * Assume B = 0 and we get C > 29.
1513 */
1514 ret = __ipv6_addr_src_scope(score->addr_type);
1515 if (ret >= dst->scope)
1516 ret = -ret;
1517 else
1518 ret -= 128; /* 30 is enough */
1519 score->scopedist = ret;
1520 break;
1521 case IPV6_SADDR_RULE_PREFERRED:
1522 {
1523 /* Rule 3: Avoid deprecated and optimistic addresses */
1524 u8 avoid = IFA_F_DEPRECATED;
1525
1526 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1527 avoid |= IFA_F_OPTIMISTIC;
1528 ret = ipv6_saddr_preferred(score->addr_type) ||
1529 !(score->ifa->flags & avoid);
1530 break;
1531 }
1532 #ifdef CONFIG_IPV6_MIP6
1533 case IPV6_SADDR_RULE_HOA:
1534 {
1535 /* Rule 4: Prefer home address */
1536 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1537 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1538 break;
1539 }
1540 #endif
1541 case IPV6_SADDR_RULE_OIF:
1542 /* Rule 5: Prefer outgoing interface */
1543 ret = (!dst->ifindex ||
1544 dst->ifindex == score->ifa->idev->dev->ifindex);
1545 break;
1546 case IPV6_SADDR_RULE_LABEL:
1547 /* Rule 6: Prefer matching label */
1548 ret = ipv6_addr_label(net,
1549 &score->ifa->addr, score->addr_type,
1550 score->ifa->idev->dev->ifindex) == dst->label;
1551 break;
1552 case IPV6_SADDR_RULE_PRIVACY:
1553 {
1554 /* Rule 7: Prefer public address
1555 * Note: prefer temporary address if use_tempaddr >= 2
1556 */
1557 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1558 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1559 score->ifa->idev->cnf.use_tempaddr >= 2;
1560 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1561 break;
1562 }
1563 case IPV6_SADDR_RULE_ORCHID:
1564 /* Rule 8-: Prefer ORCHID vs ORCHID or
1565 * non-ORCHID vs non-ORCHID
1566 */
1567 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1568 ipv6_addr_orchid(dst->addr));
1569 break;
1570 case IPV6_SADDR_RULE_PREFIX:
1571 /* Rule 8: Use longest matching prefix */
1572 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1573 if (ret > score->ifa->prefix_len)
1574 ret = score->ifa->prefix_len;
1575 score->matchlen = ret;
1576 break;
1577 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1578 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1579 /* Optimistic addresses still have lower precedence than other
1580 * preferred addresses.
1581 */
1582 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1583 break;
1584 #endif
1585 default:
1586 ret = 0;
1587 }
1588
1589 if (ret)
1590 __set_bit(i, score->scorebits);
1591 score->rule = i;
1592 out:
1593 return ret;
1594 }
1595
1596 static int __ipv6_dev_get_saddr(struct net *net,
1597 struct ipv6_saddr_dst *dst,
1598 struct inet6_dev *idev,
1599 struct ipv6_saddr_score *scores,
1600 int hiscore_idx)
1601 {
1602 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1603
1604 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1605 int i;
1606
1607 /*
1608 * - Tentative Address (RFC2462 section 5.4)
1609 * - A tentative address is not considered
1610 * "assigned to an interface" in the traditional
1611 * sense, unless it is also flagged as optimistic.
1612 * - Candidate Source Address (section 4)
1613 * - In any case, anycast addresses, multicast
1614 * addresses, and the unspecified address MUST
1615 * NOT be included in a candidate set.
1616 */
1617 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1618 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1619 continue;
1620
1621 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1622
1623 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1624 score->addr_type & IPV6_ADDR_MULTICAST)) {
1625 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1626 idev->dev->name);
1627 continue;
1628 }
1629
1630 score->rule = -1;
1631 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1632
1633 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1634 int minihiscore, miniscore;
1635
1636 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1637 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1638
1639 if (minihiscore > miniscore) {
1640 if (i == IPV6_SADDR_RULE_SCOPE &&
1641 score->scopedist > 0) {
1642 /*
1643 * special case:
1644 * each remaining entry
1645 * has too small (not enough)
1646 * scope, because ifa entries
1647 * are sorted by their scope
1648 * values.
1649 */
1650 goto out;
1651 }
1652 break;
1653 } else if (minihiscore < miniscore) {
1654 swap(hiscore, score);
1655 hiscore_idx = 1 - hiscore_idx;
1656
1657 /* restore our iterator */
1658 score->ifa = hiscore->ifa;
1659
1660 break;
1661 }
1662 }
1663 }
1664 out:
1665 return hiscore_idx;
1666 }
1667
1668 static int ipv6_get_saddr_master(struct net *net,
1669 const struct net_device *dst_dev,
1670 const struct net_device *master,
1671 struct ipv6_saddr_dst *dst,
1672 struct ipv6_saddr_score *scores,
1673 int hiscore_idx)
1674 {
1675 struct inet6_dev *idev;
1676
1677 idev = __in6_dev_get(dst_dev);
1678 if (idev)
1679 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1680 scores, hiscore_idx);
1681
1682 idev = __in6_dev_get(master);
1683 if (idev)
1684 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1685 scores, hiscore_idx);
1686
1687 return hiscore_idx;
1688 }
1689
1690 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1691 const struct in6_addr *daddr, unsigned int prefs,
1692 struct in6_addr *saddr)
1693 {
1694 struct ipv6_saddr_score scores[2], *hiscore;
1695 struct ipv6_saddr_dst dst;
1696 struct inet6_dev *idev;
1697 struct net_device *dev;
1698 int dst_type;
1699 bool use_oif_addr = false;
1700 int hiscore_idx = 0;
1701 int ret = 0;
1702
1703 dst_type = __ipv6_addr_type(daddr);
1704 dst.addr = daddr;
1705 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1706 dst.scope = __ipv6_addr_src_scope(dst_type);
1707 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1708 dst.prefs = prefs;
1709
1710 scores[hiscore_idx].rule = -1;
1711 scores[hiscore_idx].ifa = NULL;
1712
1713 rcu_read_lock();
1714
1715 /* Candidate Source Address (section 4)
1716 * - multicast and link-local destination address,
1717 * the set of candidate source address MUST only
1718 * include addresses assigned to interfaces
1719 * belonging to the same link as the outgoing
1720 * interface.
1721 * (- For site-local destination addresses, the
1722 * set of candidate source addresses MUST only
1723 * include addresses assigned to interfaces
1724 * belonging to the same site as the outgoing
1725 * interface.)
1726 * - "It is RECOMMENDED that the candidate source addresses
1727 * be the set of unicast addresses assigned to the
1728 * interface that will be used to send to the destination
1729 * (the 'outgoing' interface)." (RFC 6724)
1730 */
1731 if (dst_dev) {
1732 idev = __in6_dev_get(dst_dev);
1733 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1734 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1735 (idev && idev->cnf.use_oif_addrs_only)) {
1736 use_oif_addr = true;
1737 }
1738 }
1739
1740 if (use_oif_addr) {
1741 if (idev)
1742 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1743 } else {
1744 const struct net_device *master;
1745 int master_idx = 0;
1746
1747 /* if dst_dev exists and is enslaved to an L3 device, then
1748 * prefer addresses from dst_dev and then the master over
1749 * any other enslaved devices in the L3 domain.
1750 */
1751 master = l3mdev_master_dev_rcu(dst_dev);
1752 if (master) {
1753 master_idx = master->ifindex;
1754
1755 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1756 master, &dst,
1757 scores, hiscore_idx);
1758
1759 if (scores[hiscore_idx].ifa)
1760 goto out;
1761 }
1762
1763 for_each_netdev_rcu(net, dev) {
1764 /* only consider addresses on devices in the
1765 * same L3 domain
1766 */
1767 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1768 continue;
1769 idev = __in6_dev_get(dev);
1770 if (!idev)
1771 continue;
1772 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1773 }
1774 }
1775
1776 out:
1777 hiscore = &scores[hiscore_idx];
1778 if (!hiscore->ifa)
1779 ret = -EADDRNOTAVAIL;
1780 else
1781 *saddr = hiscore->ifa->addr;
1782
1783 rcu_read_unlock();
1784 return ret;
1785 }
1786 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1787
1788 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1789 u32 banned_flags)
1790 {
1791 struct inet6_ifaddr *ifp;
1792 int err = -EADDRNOTAVAIL;
1793
1794 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1795 if (ifp->scope > IFA_LINK)
1796 break;
1797 if (ifp->scope == IFA_LINK &&
1798 !(ifp->flags & banned_flags)) {
1799 *addr = ifp->addr;
1800 err = 0;
1801 break;
1802 }
1803 }
1804 return err;
1805 }
1806
1807 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1808 u32 banned_flags)
1809 {
1810 struct inet6_dev *idev;
1811 int err = -EADDRNOTAVAIL;
1812
1813 rcu_read_lock();
1814 idev = __in6_dev_get(dev);
1815 if (idev) {
1816 read_lock_bh(&idev->lock);
1817 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1818 read_unlock_bh(&idev->lock);
1819 }
1820 rcu_read_unlock();
1821 return err;
1822 }
1823
1824 static int ipv6_count_addresses(const struct inet6_dev *idev)
1825 {
1826 const struct inet6_ifaddr *ifp;
1827 int cnt = 0;
1828
1829 rcu_read_lock();
1830 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1831 cnt++;
1832 rcu_read_unlock();
1833 return cnt;
1834 }
1835
1836 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1837 const struct net_device *dev, int strict)
1838 {
1839 return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE);
1840 }
1841 EXPORT_SYMBOL(ipv6_chk_addr);
1842
1843 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1844 const struct net_device *dev, int strict,
1845 u32 banned_flags)
1846 {
1847 unsigned int hash = inet6_addr_hash(net, addr);
1848 struct inet6_ifaddr *ifp;
1849 u32 ifp_flags;
1850
1851 rcu_read_lock();
1852 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1853 if (!net_eq(dev_net(ifp->idev->dev), net))
1854 continue;
1855 /* Decouple optimistic from tentative for evaluation here.
1856 * Ban optimistic addresses explicitly, when required.
1857 */
1858 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1859 ? (ifp->flags&~IFA_F_TENTATIVE)
1860 : ifp->flags;
1861 if (ipv6_addr_equal(&ifp->addr, addr) &&
1862 !(ifp_flags&banned_flags) &&
1863 (!dev || ifp->idev->dev == dev ||
1864 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1865 rcu_read_unlock();
1866 return 1;
1867 }
1868 }
1869
1870 rcu_read_unlock();
1871 return 0;
1872 }
1873 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1874
1875
1876 /* Compares an address/prefix_len with addresses on device @dev.
1877 * If one is found it returns true.
1878 */
1879 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1880 const unsigned int prefix_len, struct net_device *dev)
1881 {
1882 const struct inet6_ifaddr *ifa;
1883 const struct inet6_dev *idev;
1884 bool ret = false;
1885
1886 rcu_read_lock();
1887 idev = __in6_dev_get(dev);
1888 if (idev) {
1889 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1890 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1891 if (ret)
1892 break;
1893 }
1894 }
1895 rcu_read_unlock();
1896
1897 return ret;
1898 }
1899 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1900
1901 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1902 {
1903 const struct inet6_ifaddr *ifa;
1904 const struct inet6_dev *idev;
1905 int onlink;
1906
1907 onlink = 0;
1908 rcu_read_lock();
1909 idev = __in6_dev_get(dev);
1910 if (idev) {
1911 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1912 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1913 ifa->prefix_len);
1914 if (onlink)
1915 break;
1916 }
1917 }
1918 rcu_read_unlock();
1919 return onlink;
1920 }
1921 EXPORT_SYMBOL(ipv6_chk_prefix);
1922
1923 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1924 struct net_device *dev, int strict)
1925 {
1926 unsigned int hash = inet6_addr_hash(net, addr);
1927 struct inet6_ifaddr *ifp, *result = NULL;
1928
1929 rcu_read_lock();
1930 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1931 if (!net_eq(dev_net(ifp->idev->dev), net))
1932 continue;
1933 if (ipv6_addr_equal(&ifp->addr, addr)) {
1934 if (!dev || ifp->idev->dev == dev ||
1935 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1936 result = ifp;
1937 in6_ifa_hold(ifp);
1938 break;
1939 }
1940 }
1941 }
1942 rcu_read_unlock();
1943
1944 return result;
1945 }
1946
1947 /* Gets referenced address, destroys ifaddr */
1948
1949 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1950 {
1951 if (dad_failed)
1952 ifp->flags |= IFA_F_DADFAILED;
1953
1954 if (ifp->flags&IFA_F_TEMPORARY) {
1955 struct inet6_ifaddr *ifpub;
1956 spin_lock_bh(&ifp->lock);
1957 ifpub = ifp->ifpub;
1958 if (ifpub) {
1959 in6_ifa_hold(ifpub);
1960 spin_unlock_bh(&ifp->lock);
1961 ipv6_create_tempaddr(ifpub, ifp, true);
1962 in6_ifa_put(ifpub);
1963 } else {
1964 spin_unlock_bh(&ifp->lock);
1965 }
1966 ipv6_del_addr(ifp);
1967 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
1968 spin_lock_bh(&ifp->lock);
1969 addrconf_del_dad_work(ifp);
1970 ifp->flags |= IFA_F_TENTATIVE;
1971 spin_unlock_bh(&ifp->lock);
1972 if (dad_failed)
1973 ipv6_ifa_notify(0, ifp);
1974 in6_ifa_put(ifp);
1975 } else {
1976 ipv6_del_addr(ifp);
1977 }
1978 }
1979
1980 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1981 {
1982 int err = -ENOENT;
1983
1984 spin_lock_bh(&ifp->lock);
1985 if (ifp->state == INET6_IFADDR_STATE_DAD) {
1986 ifp->state = INET6_IFADDR_STATE_POSTDAD;
1987 err = 0;
1988 }
1989 spin_unlock_bh(&ifp->lock);
1990
1991 return err;
1992 }
1993
1994 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
1995 {
1996 struct inet6_dev *idev = ifp->idev;
1997 struct net *net = dev_net(ifp->idev->dev);
1998
1999 if (addrconf_dad_end(ifp)) {
2000 in6_ifa_put(ifp);
2001 return;
2002 }
2003
2004 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2005 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2006
2007 spin_lock_bh(&ifp->lock);
2008
2009 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2010 int scope = ifp->scope;
2011 u32 flags = ifp->flags;
2012 struct in6_addr new_addr;
2013 struct inet6_ifaddr *ifp2;
2014 u32 valid_lft, preferred_lft;
2015 int pfxlen = ifp->prefix_len;
2016 int retries = ifp->stable_privacy_retry + 1;
2017
2018 if (retries > net->ipv6.sysctl.idgen_retries) {
2019 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2020 ifp->idev->dev->name);
2021 goto errdad;
2022 }
2023
2024 new_addr = ifp->addr;
2025 if (ipv6_generate_stable_address(&new_addr, retries,
2026 idev))
2027 goto errdad;
2028
2029 valid_lft = ifp->valid_lft;
2030 preferred_lft = ifp->prefered_lft;
2031
2032 spin_unlock_bh(&ifp->lock);
2033
2034 if (idev->cnf.max_addresses &&
2035 ipv6_count_addresses(idev) >=
2036 idev->cnf.max_addresses)
2037 goto lock_errdad;
2038
2039 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2040 ifp->idev->dev->name);
2041
2042 ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
2043 scope, flags, valid_lft,
2044 preferred_lft, false, NULL);
2045 if (IS_ERR(ifp2))
2046 goto lock_errdad;
2047
2048 spin_lock_bh(&ifp2->lock);
2049 ifp2->stable_privacy_retry = retries;
2050 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2051 spin_unlock_bh(&ifp2->lock);
2052
2053 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2054 in6_ifa_put(ifp2);
2055 lock_errdad:
2056 spin_lock_bh(&ifp->lock);
2057 }
2058
2059 errdad:
2060 /* transition from _POSTDAD to _ERRDAD */
2061 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2062 spin_unlock_bh(&ifp->lock);
2063
2064 addrconf_mod_dad_work(ifp, 0);
2065 in6_ifa_put(ifp);
2066 }
2067
2068 /* Join to solicited addr multicast group.
2069 * caller must hold RTNL */
2070 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2071 {
2072 struct in6_addr maddr;
2073
2074 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2075 return;
2076
2077 addrconf_addr_solict_mult(addr, &maddr);
2078 ipv6_dev_mc_inc(dev, &maddr);
2079 }
2080
2081 /* caller must hold RTNL */
2082 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2083 {
2084 struct in6_addr maddr;
2085
2086 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2087 return;
2088
2089 addrconf_addr_solict_mult(addr, &maddr);
2090 __ipv6_dev_mc_dec(idev, &maddr);
2091 }
2092
2093 /* caller must hold RTNL */
2094 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2095 {
2096 struct in6_addr addr;
2097
2098 if (ifp->prefix_len >= 127) /* RFC 6164 */
2099 return;
2100 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2101 if (ipv6_addr_any(&addr))
2102 return;
2103 __ipv6_dev_ac_inc(ifp->idev, &addr);
2104 }
2105
2106 /* caller must hold RTNL */
2107 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2108 {
2109 struct in6_addr addr;
2110
2111 if (ifp->prefix_len >= 127) /* RFC 6164 */
2112 return;
2113 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2114 if (ipv6_addr_any(&addr))
2115 return;
2116 __ipv6_dev_ac_dec(ifp->idev, &addr);
2117 }
2118
2119 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2120 {
2121 switch (dev->addr_len) {
2122 case ETH_ALEN:
2123 memcpy(eui, dev->dev_addr, 3);
2124 eui[3] = 0xFF;
2125 eui[4] = 0xFE;
2126 memcpy(eui + 5, dev->dev_addr + 3, 3);
2127 break;
2128 case EUI64_ADDR_LEN:
2129 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2130 eui[0] ^= 2;
2131 break;
2132 default:
2133 return -1;
2134 }
2135
2136 return 0;
2137 }
2138
2139 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2140 {
2141 union fwnet_hwaddr *ha;
2142
2143 if (dev->addr_len != FWNET_ALEN)
2144 return -1;
2145
2146 ha = (union fwnet_hwaddr *)dev->dev_addr;
2147
2148 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2149 eui[0] ^= 2;
2150 return 0;
2151 }
2152
2153 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2154 {
2155 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2156 if (dev->addr_len != ARCNET_ALEN)
2157 return -1;
2158 memset(eui, 0, 7);
2159 eui[7] = *(u8 *)dev->dev_addr;
2160 return 0;
2161 }
2162
2163 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2164 {
2165 if (dev->addr_len != INFINIBAND_ALEN)
2166 return -1;
2167 memcpy(eui, dev->dev_addr + 12, 8);
2168 eui[0] |= 2;
2169 return 0;
2170 }
2171
2172 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2173 {
2174 if (addr == 0)
2175 return -1;
2176 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2177 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2178 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2179 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2180 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2181 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2182 eui[1] = 0;
2183 eui[2] = 0x5E;
2184 eui[3] = 0xFE;
2185 memcpy(eui + 4, &addr, 4);
2186 return 0;
2187 }
2188
2189 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2190 {
2191 if (dev->priv_flags & IFF_ISATAP)
2192 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2193 return -1;
2194 }
2195
2196 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2197 {
2198 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2199 }
2200
2201 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2202 {
2203 memcpy(eui, dev->perm_addr, 3);
2204 memcpy(eui + 5, dev->perm_addr + 3, 3);
2205 eui[3] = 0xFF;
2206 eui[4] = 0xFE;
2207 eui[0] ^= 2;
2208 return 0;
2209 }
2210
2211 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2212 {
2213 switch (dev->type) {
2214 case ARPHRD_ETHER:
2215 case ARPHRD_FDDI:
2216 return addrconf_ifid_eui48(eui, dev);
2217 case ARPHRD_ARCNET:
2218 return addrconf_ifid_arcnet(eui, dev);
2219 case ARPHRD_INFINIBAND:
2220 return addrconf_ifid_infiniband(eui, dev);
2221 case ARPHRD_SIT:
2222 return addrconf_ifid_sit(eui, dev);
2223 case ARPHRD_IPGRE:
2224 case ARPHRD_TUNNEL:
2225 return addrconf_ifid_gre(eui, dev);
2226 case ARPHRD_6LOWPAN:
2227 return addrconf_ifid_6lowpan(eui, dev);
2228 case ARPHRD_IEEE1394:
2229 return addrconf_ifid_ieee1394(eui, dev);
2230 case ARPHRD_TUNNEL6:
2231 case ARPHRD_IP6GRE:
2232 return addrconf_ifid_ip6tnl(eui, dev);
2233 }
2234 return -1;
2235 }
2236
2237 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2238 {
2239 int err = -1;
2240 struct inet6_ifaddr *ifp;
2241
2242 read_lock_bh(&idev->lock);
2243 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2244 if (ifp->scope > IFA_LINK)
2245 break;
2246 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2247 memcpy(eui, ifp->addr.s6_addr+8, 8);
2248 err = 0;
2249 break;
2250 }
2251 }
2252 read_unlock_bh(&idev->lock);
2253 return err;
2254 }
2255
2256 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2257 static void ipv6_regen_rndid(struct inet6_dev *idev)
2258 {
2259 regen:
2260 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2261 idev->rndid[0] &= ~0x02;
2262
2263 /*
2264 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2265 * check if generated address is not inappropriate
2266 *
2267 * - Reserved subnet anycast (RFC 2526)
2268 * 11111101 11....11 1xxxxxxx
2269 * - ISATAP (RFC4214) 6.1
2270 * 00-00-5E-FE-xx-xx-xx-xx
2271 * - value 0
2272 * - XXX: already assigned to an address on the device
2273 */
2274 if (idev->rndid[0] == 0xfd &&
2275 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2276 (idev->rndid[7]&0x80))
2277 goto regen;
2278 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2279 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2280 goto regen;
2281 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2282 goto regen;
2283 }
2284 }
2285
2286 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2287 {
2288 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2289 ipv6_regen_rndid(idev);
2290 }
2291
2292 /*
2293 * Add prefix route.
2294 */
2295
2296 static void
2297 addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
2298 unsigned long expires, u32 flags)
2299 {
2300 struct fib6_config cfg = {
2301 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2302 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2303 .fc_ifindex = dev->ifindex,
2304 .fc_expires = expires,
2305 .fc_dst_len = plen,
2306 .fc_flags = RTF_UP | flags,
2307 .fc_nlinfo.nl_net = dev_net(dev),
2308 .fc_protocol = RTPROT_KERNEL,
2309 };
2310
2311 cfg.fc_dst = *pfx;
2312
2313 /* Prevent useless cloning on PtP SIT.
2314 This thing is done here expecting that the whole
2315 class of non-broadcast devices need not cloning.
2316 */
2317 #if IS_ENABLED(CONFIG_IPV6_SIT)
2318 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2319 cfg.fc_flags |= RTF_NONEXTHOP;
2320 #endif
2321
2322 ip6_route_add(&cfg, NULL);
2323 }
2324
2325
2326 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2327 int plen,
2328 const struct net_device *dev,
2329 u32 flags, u32 noflags)
2330 {
2331 struct fib6_node *fn;
2332 struct rt6_info *rt = NULL;
2333 struct fib6_table *table;
2334 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2335
2336 table = fib6_get_table(dev_net(dev), tb_id);
2337 if (!table)
2338 return NULL;
2339
2340 rcu_read_lock();
2341 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2342 if (!fn)
2343 goto out;
2344
2345 for_each_fib6_node_rt_rcu(fn) {
2346 if (rt->dst.dev->ifindex != dev->ifindex)
2347 continue;
2348 if ((rt->rt6i_flags & flags) != flags)
2349 continue;
2350 if ((rt->rt6i_flags & noflags) != 0)
2351 continue;
2352 if (!dst_hold_safe(&rt->dst))
2353 rt = NULL;
2354 break;
2355 }
2356 out:
2357 rcu_read_unlock();
2358 return rt;
2359 }
2360
2361
2362 /* Create "default" multicast route to the interface */
2363
2364 static void addrconf_add_mroute(struct net_device *dev)
2365 {
2366 struct fib6_config cfg = {
2367 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2368 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2369 .fc_ifindex = dev->ifindex,
2370 .fc_dst_len = 8,
2371 .fc_flags = RTF_UP,
2372 .fc_nlinfo.nl_net = dev_net(dev),
2373 };
2374
2375 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2376
2377 ip6_route_add(&cfg, NULL);
2378 }
2379
2380 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2381 {
2382 struct inet6_dev *idev;
2383
2384 ASSERT_RTNL();
2385
2386 idev = ipv6_find_idev(dev);
2387 if (!idev)
2388 return ERR_PTR(-ENOBUFS);
2389
2390 if (idev->cnf.disable_ipv6)
2391 return ERR_PTR(-EACCES);
2392
2393 /* Add default multicast route */
2394 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2395 addrconf_add_mroute(dev);
2396
2397 return idev;
2398 }
2399
2400 static void manage_tempaddrs(struct inet6_dev *idev,
2401 struct inet6_ifaddr *ifp,
2402 __u32 valid_lft, __u32 prefered_lft,
2403 bool create, unsigned long now)
2404 {
2405 u32 flags;
2406 struct inet6_ifaddr *ift;
2407
2408 read_lock_bh(&idev->lock);
2409 /* update all temporary addresses in the list */
2410 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2411 int age, max_valid, max_prefered;
2412
2413 if (ifp != ift->ifpub)
2414 continue;
2415
2416 /* RFC 4941 section 3.3:
2417 * If a received option will extend the lifetime of a public
2418 * address, the lifetimes of temporary addresses should
2419 * be extended, subject to the overall constraint that no
2420 * temporary addresses should ever remain "valid" or "preferred"
2421 * for a time longer than (TEMP_VALID_LIFETIME) or
2422 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2423 */
2424 age = (now - ift->cstamp) / HZ;
2425 max_valid = idev->cnf.temp_valid_lft - age;
2426 if (max_valid < 0)
2427 max_valid = 0;
2428
2429 max_prefered = idev->cnf.temp_prefered_lft -
2430 idev->desync_factor - age;
2431 if (max_prefered < 0)
2432 max_prefered = 0;
2433
2434 if (valid_lft > max_valid)
2435 valid_lft = max_valid;
2436
2437 if (prefered_lft > max_prefered)
2438 prefered_lft = max_prefered;
2439
2440 spin_lock(&ift->lock);
2441 flags = ift->flags;
2442 ift->valid_lft = valid_lft;
2443 ift->prefered_lft = prefered_lft;
2444 ift->tstamp = now;
2445 if (prefered_lft > 0)
2446 ift->flags &= ~IFA_F_DEPRECATED;
2447
2448 spin_unlock(&ift->lock);
2449 if (!(flags&IFA_F_TENTATIVE))
2450 ipv6_ifa_notify(0, ift);
2451 }
2452
2453 if ((create || list_empty(&idev->tempaddr_list)) &&
2454 idev->cnf.use_tempaddr > 0) {
2455 /* When a new public address is created as described
2456 * in [ADDRCONF], also create a new temporary address.
2457 * Also create a temporary address if it's enabled but
2458 * no temporary address currently exists.
2459 */
2460 read_unlock_bh(&idev->lock);
2461 ipv6_create_tempaddr(ifp, NULL, false);
2462 } else {
2463 read_unlock_bh(&idev->lock);
2464 }
2465 }
2466
2467 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2468 {
2469 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2470 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2471 }
2472
2473 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2474 const struct prefix_info *pinfo,
2475 struct inet6_dev *in6_dev,
2476 const struct in6_addr *addr, int addr_type,
2477 u32 addr_flags, bool sllao, bool tokenized,
2478 __u32 valid_lft, u32 prefered_lft)
2479 {
2480 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2481 int create = 0, update_lft = 0;
2482
2483 if (!ifp && valid_lft) {
2484 int max_addresses = in6_dev->cnf.max_addresses;
2485
2486 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2487 if ((net->ipv6.devconf_all->optimistic_dad ||
2488 in6_dev->cnf.optimistic_dad) &&
2489 !net->ipv6.devconf_all->forwarding && sllao)
2490 addr_flags |= IFA_F_OPTIMISTIC;
2491 #endif
2492
2493 /* Do not allow to create too much of autoconfigured
2494 * addresses; this would be too easy way to crash kernel.
2495 */
2496 if (!max_addresses ||
2497 ipv6_count_addresses(in6_dev) < max_addresses)
2498 ifp = ipv6_add_addr(in6_dev, addr, NULL,
2499 pinfo->prefix_len,
2500 addr_type&IPV6_ADDR_SCOPE_MASK,
2501 addr_flags, valid_lft,
2502 prefered_lft, false, NULL);
2503
2504 if (IS_ERR_OR_NULL(ifp))
2505 return -1;
2506
2507 update_lft = 0;
2508 create = 1;
2509 spin_lock_bh(&ifp->lock);
2510 ifp->flags |= IFA_F_MANAGETEMPADDR;
2511 ifp->cstamp = jiffies;
2512 ifp->tokenized = tokenized;
2513 spin_unlock_bh(&ifp->lock);
2514 addrconf_dad_start(ifp);
2515 }
2516
2517 if (ifp) {
2518 u32 flags;
2519 unsigned long now;
2520 u32 stored_lft;
2521
2522 /* update lifetime (RFC2462 5.5.3 e) */
2523 spin_lock_bh(&ifp->lock);
2524 now = jiffies;
2525 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2526 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2527 else
2528 stored_lft = 0;
2529 if (!update_lft && !create && stored_lft) {
2530 const u32 minimum_lft = min_t(u32,
2531 stored_lft, MIN_VALID_LIFETIME);
2532 valid_lft = max(valid_lft, minimum_lft);
2533
2534 /* RFC4862 Section 5.5.3e:
2535 * "Note that the preferred lifetime of the
2536 * corresponding address is always reset to
2537 * the Preferred Lifetime in the received
2538 * Prefix Information option, regardless of
2539 * whether the valid lifetime is also reset or
2540 * ignored."
2541 *
2542 * So we should always update prefered_lft here.
2543 */
2544 update_lft = 1;
2545 }
2546
2547 if (update_lft) {
2548 ifp->valid_lft = valid_lft;
2549 ifp->prefered_lft = prefered_lft;
2550 ifp->tstamp = now;
2551 flags = ifp->flags;
2552 ifp->flags &= ~IFA_F_DEPRECATED;
2553 spin_unlock_bh(&ifp->lock);
2554
2555 if (!(flags&IFA_F_TENTATIVE))
2556 ipv6_ifa_notify(0, ifp);
2557 } else
2558 spin_unlock_bh(&ifp->lock);
2559
2560 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2561 create, now);
2562
2563 in6_ifa_put(ifp);
2564 addrconf_verify();
2565 }
2566
2567 return 0;
2568 }
2569 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2570
2571 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2572 {
2573 struct prefix_info *pinfo;
2574 __u32 valid_lft;
2575 __u32 prefered_lft;
2576 int addr_type, err;
2577 u32 addr_flags = 0;
2578 struct inet6_dev *in6_dev;
2579 struct net *net = dev_net(dev);
2580
2581 pinfo = (struct prefix_info *) opt;
2582
2583 if (len < sizeof(struct prefix_info)) {
2584 ADBG("addrconf: prefix option too short\n");
2585 return;
2586 }
2587
2588 /*
2589 * Validation checks ([ADDRCONF], page 19)
2590 */
2591
2592 addr_type = ipv6_addr_type(&pinfo->prefix);
2593
2594 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2595 return;
2596
2597 valid_lft = ntohl(pinfo->valid);
2598 prefered_lft = ntohl(pinfo->prefered);
2599
2600 if (prefered_lft > valid_lft) {
2601 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2602 return;
2603 }
2604
2605 in6_dev = in6_dev_get(dev);
2606
2607 if (!in6_dev) {
2608 net_dbg_ratelimited("addrconf: device %s not configured\n",
2609 dev->name);
2610 return;
2611 }
2612
2613 /*
2614 * Two things going on here:
2615 * 1) Add routes for on-link prefixes
2616 * 2) Configure prefixes with the auto flag set
2617 */
2618
2619 if (pinfo->onlink) {
2620 struct rt6_info *rt;
2621 unsigned long rt_expires;
2622
2623 /* Avoid arithmetic overflow. Really, we could
2624 * save rt_expires in seconds, likely valid_lft,
2625 * but it would require division in fib gc, that it
2626 * not good.
2627 */
2628 if (HZ > USER_HZ)
2629 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2630 else
2631 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2632
2633 if (addrconf_finite_timeout(rt_expires))
2634 rt_expires *= HZ;
2635
2636 rt = addrconf_get_prefix_route(&pinfo->prefix,
2637 pinfo->prefix_len,
2638 dev,
2639 RTF_ADDRCONF | RTF_PREFIX_RT,
2640 RTF_GATEWAY | RTF_DEFAULT);
2641
2642 if (rt) {
2643 /* Autoconf prefix route */
2644 if (valid_lft == 0) {
2645 ip6_del_rt(rt);
2646 rt = NULL;
2647 } else if (addrconf_finite_timeout(rt_expires)) {
2648 /* not infinity */
2649 rt6_set_expires(rt, jiffies + rt_expires);
2650 } else {
2651 rt6_clean_expires(rt);
2652 }
2653 } else if (valid_lft) {
2654 clock_t expires = 0;
2655 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2656 if (addrconf_finite_timeout(rt_expires)) {
2657 /* not infinity */
2658 flags |= RTF_EXPIRES;
2659 expires = jiffies_to_clock_t(rt_expires);
2660 }
2661 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2662 dev, expires, flags);
2663 }
2664 ip6_rt_put(rt);
2665 }
2666
2667 /* Try to figure out our local address for this prefix */
2668
2669 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2670 struct in6_addr addr;
2671 bool tokenized = false, dev_addr_generated = false;
2672
2673 if (pinfo->prefix_len == 64) {
2674 memcpy(&addr, &pinfo->prefix, 8);
2675
2676 if (!ipv6_addr_any(&in6_dev->token)) {
2677 read_lock_bh(&in6_dev->lock);
2678 memcpy(addr.s6_addr + 8,
2679 in6_dev->token.s6_addr + 8, 8);
2680 read_unlock_bh(&in6_dev->lock);
2681 tokenized = true;
2682 } else if (is_addr_mode_generate_stable(in6_dev) &&
2683 !ipv6_generate_stable_address(&addr, 0,
2684 in6_dev)) {
2685 addr_flags |= IFA_F_STABLE_PRIVACY;
2686 goto ok;
2687 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2688 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2689 goto put;
2690 } else {
2691 dev_addr_generated = true;
2692 }
2693 goto ok;
2694 }
2695 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2696 pinfo->prefix_len);
2697 goto put;
2698
2699 ok:
2700 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2701 &addr, addr_type,
2702 addr_flags, sllao,
2703 tokenized, valid_lft,
2704 prefered_lft);
2705 if (err)
2706 goto put;
2707
2708 /* Ignore error case here because previous prefix add addr was
2709 * successful which will be notified.
2710 */
2711 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2712 addr_type, addr_flags, sllao,
2713 tokenized, valid_lft,
2714 prefered_lft,
2715 dev_addr_generated);
2716 }
2717 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2718 put:
2719 in6_dev_put(in6_dev);
2720 }
2721
2722 /*
2723 * Set destination address.
2724 * Special case for SIT interfaces where we create a new "virtual"
2725 * device.
2726 */
2727 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2728 {
2729 struct in6_ifreq ireq;
2730 struct net_device *dev;
2731 int err = -EINVAL;
2732
2733 rtnl_lock();
2734
2735 err = -EFAULT;
2736 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2737 goto err_exit;
2738
2739 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2740
2741 err = -ENODEV;
2742 if (!dev)
2743 goto err_exit;
2744
2745 #if IS_ENABLED(CONFIG_IPV6_SIT)
2746 if (dev->type == ARPHRD_SIT) {
2747 const struct net_device_ops *ops = dev->netdev_ops;
2748 struct ifreq ifr;
2749 struct ip_tunnel_parm p;
2750
2751 err = -EADDRNOTAVAIL;
2752 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2753 goto err_exit;
2754
2755 memset(&p, 0, sizeof(p));
2756 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2757 p.iph.saddr = 0;
2758 p.iph.version = 4;
2759 p.iph.ihl = 5;
2760 p.iph.protocol = IPPROTO_IPV6;
2761 p.iph.ttl = 64;
2762 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2763
2764 if (ops->ndo_do_ioctl) {
2765 mm_segment_t oldfs = get_fs();
2766
2767 set_fs(KERNEL_DS);
2768 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2769 set_fs(oldfs);
2770 } else
2771 err = -EOPNOTSUPP;
2772
2773 if (err == 0) {
2774 err = -ENOBUFS;
2775 dev = __dev_get_by_name(net, p.name);
2776 if (!dev)
2777 goto err_exit;
2778 err = dev_open(dev);
2779 }
2780 }
2781 #endif
2782
2783 err_exit:
2784 rtnl_unlock();
2785 return err;
2786 }
2787
2788 static int ipv6_mc_config(struct sock *sk, bool join,
2789 const struct in6_addr *addr, int ifindex)
2790 {
2791 int ret;
2792
2793 ASSERT_RTNL();
2794
2795 lock_sock(sk);
2796 if (join)
2797 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2798 else
2799 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2800 release_sock(sk);
2801
2802 return ret;
2803 }
2804
2805 /*
2806 * Manual configuration of address on an interface
2807 */
2808 static int inet6_addr_add(struct net *net, int ifindex,
2809 const struct in6_addr *pfx,
2810 const struct in6_addr *peer_pfx,
2811 unsigned int plen, __u32 ifa_flags,
2812 __u32 prefered_lft, __u32 valid_lft,
2813 struct netlink_ext_ack *extack)
2814 {
2815 struct inet6_ifaddr *ifp;
2816 struct inet6_dev *idev;
2817 struct net_device *dev;
2818 unsigned long timeout;
2819 clock_t expires;
2820 int scope;
2821 u32 flags;
2822
2823 ASSERT_RTNL();
2824
2825 if (plen > 128)
2826 return -EINVAL;
2827
2828 /* check the lifetime */
2829 if (!valid_lft || prefered_lft > valid_lft)
2830 return -EINVAL;
2831
2832 if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
2833 return -EINVAL;
2834
2835 dev = __dev_get_by_index(net, ifindex);
2836 if (!dev)
2837 return -ENODEV;
2838
2839 idev = addrconf_add_dev(dev);
2840 if (IS_ERR(idev))
2841 return PTR_ERR(idev);
2842
2843 if (ifa_flags & IFA_F_MCAUTOJOIN) {
2844 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2845 true, pfx, ifindex);
2846
2847 if (ret < 0)
2848 return ret;
2849 }
2850
2851 scope = ipv6_addr_scope(pfx);
2852
2853 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2854 if (addrconf_finite_timeout(timeout)) {
2855 expires = jiffies_to_clock_t(timeout * HZ);
2856 valid_lft = timeout;
2857 flags = RTF_EXPIRES;
2858 } else {
2859 expires = 0;
2860 flags = 0;
2861 ifa_flags |= IFA_F_PERMANENT;
2862 }
2863
2864 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2865 if (addrconf_finite_timeout(timeout)) {
2866 if (timeout == 0)
2867 ifa_flags |= IFA_F_DEPRECATED;
2868 prefered_lft = timeout;
2869 }
2870
2871 ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
2872 valid_lft, prefered_lft, true, extack);
2873
2874 if (!IS_ERR(ifp)) {
2875 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
2876 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2877 expires, flags);
2878 }
2879
2880 /*
2881 * Note that section 3.1 of RFC 4429 indicates
2882 * that the Optimistic flag should not be set for
2883 * manually configured addresses
2884 */
2885 addrconf_dad_start(ifp);
2886 if (ifa_flags & IFA_F_MANAGETEMPADDR)
2887 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
2888 true, jiffies);
2889 in6_ifa_put(ifp);
2890 addrconf_verify_rtnl();
2891 return 0;
2892 } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
2893 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2894 false, pfx, ifindex);
2895 }
2896
2897 return PTR_ERR(ifp);
2898 }
2899
2900 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2901 const struct in6_addr *pfx, unsigned int plen)
2902 {
2903 struct inet6_ifaddr *ifp;
2904 struct inet6_dev *idev;
2905 struct net_device *dev;
2906
2907 if (plen > 128)
2908 return -EINVAL;
2909
2910 dev = __dev_get_by_index(net, ifindex);
2911 if (!dev)
2912 return -ENODEV;
2913
2914 idev = __in6_dev_get(dev);
2915 if (!idev)
2916 return -ENXIO;
2917
2918 read_lock_bh(&idev->lock);
2919 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2920 if (ifp->prefix_len == plen &&
2921 ipv6_addr_equal(pfx, &ifp->addr)) {
2922 in6_ifa_hold(ifp);
2923 read_unlock_bh(&idev->lock);
2924
2925 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2926 (ifa_flags & IFA_F_MANAGETEMPADDR))
2927 manage_tempaddrs(idev, ifp, 0, 0, false,
2928 jiffies);
2929 ipv6_del_addr(ifp);
2930 addrconf_verify_rtnl();
2931 if (ipv6_addr_is_multicast(pfx)) {
2932 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2933 false, pfx, dev->ifindex);
2934 }
2935 return 0;
2936 }
2937 }
2938 read_unlock_bh(&idev->lock);
2939 return -EADDRNOTAVAIL;
2940 }
2941
2942
2943 int addrconf_add_ifaddr(struct net *net, void __user *arg)
2944 {
2945 struct in6_ifreq ireq;
2946 int err;
2947
2948 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2949 return -EPERM;
2950
2951 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2952 return -EFAULT;
2953
2954 rtnl_lock();
2955 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
2956 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2957 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME, NULL);
2958 rtnl_unlock();
2959 return err;
2960 }
2961
2962 int addrconf_del_ifaddr(struct net *net, void __user *arg)
2963 {
2964 struct in6_ifreq ireq;
2965 int err;
2966
2967 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2968 return -EPERM;
2969
2970 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2971 return -EFAULT;
2972
2973 rtnl_lock();
2974 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2975 ireq.ifr6_prefixlen);
2976 rtnl_unlock();
2977 return err;
2978 }
2979
2980 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2981 int plen, int scope)
2982 {
2983 struct inet6_ifaddr *ifp;
2984
2985 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2986 scope, IFA_F_PERMANENT,
2987 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME,
2988 true, NULL);
2989 if (!IS_ERR(ifp)) {
2990 spin_lock_bh(&ifp->lock);
2991 ifp->flags &= ~IFA_F_TENTATIVE;
2992 spin_unlock_bh(&ifp->lock);
2993 rt_genid_bump_ipv6(dev_net(idev->dev));
2994 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2995 in6_ifa_put(ifp);
2996 }
2997 }
2998
2999 #if IS_ENABLED(CONFIG_IPV6_SIT)
3000 static void sit_add_v4_addrs(struct inet6_dev *idev)
3001 {
3002 struct in6_addr addr;
3003 struct net_device *dev;
3004 struct net *net = dev_net(idev->dev);
3005 int scope, plen;
3006 u32 pflags = 0;
3007
3008 ASSERT_RTNL();
3009
3010 memset(&addr, 0, sizeof(struct in6_addr));
3011 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3012
3013 if (idev->dev->flags&IFF_POINTOPOINT) {
3014 addr.s6_addr32[0] = htonl(0xfe800000);
3015 scope = IFA_LINK;
3016 plen = 64;
3017 } else {
3018 scope = IPV6_ADDR_COMPATv4;
3019 plen = 96;
3020 pflags |= RTF_NONEXTHOP;
3021 }
3022
3023 if (addr.s6_addr32[3]) {
3024 add_addr(idev, &addr, plen, scope);
3025 addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags);
3026 return;
3027 }
3028
3029 for_each_netdev(net, dev) {
3030 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3031 if (in_dev && (dev->flags & IFF_UP)) {
3032 struct in_ifaddr *ifa;
3033
3034 int flag = scope;
3035
3036 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
3037
3038 addr.s6_addr32[3] = ifa->ifa_local;
3039
3040 if (ifa->ifa_scope == RT_SCOPE_LINK)
3041 continue;
3042 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3043 if (idev->dev->flags&IFF_POINTOPOINT)
3044 continue;
3045 flag |= IFA_HOST;
3046 }
3047
3048 add_addr(idev, &addr, plen, flag);
3049 addrconf_prefix_route(&addr, plen, idev->dev, 0,
3050 pflags);
3051 }
3052 }
3053 }
3054 }
3055 #endif
3056
3057 static void init_loopback(struct net_device *dev)
3058 {
3059 struct inet6_dev *idev;
3060
3061 /* ::1 */
3062
3063 ASSERT_RTNL();
3064
3065 idev = ipv6_find_idev(dev);
3066 if (!idev) {
3067 pr_debug("%s: add_dev failed\n", __func__);
3068 return;
3069 }
3070
3071 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3072 }
3073
3074 void addrconf_add_linklocal(struct inet6_dev *idev,
3075 const struct in6_addr *addr, u32 flags)
3076 {
3077 struct inet6_ifaddr *ifp;
3078 u32 addr_flags = flags | IFA_F_PERMANENT;
3079
3080 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3081 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3082 idev->cnf.optimistic_dad) &&
3083 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3084 addr_flags |= IFA_F_OPTIMISTIC;
3085 #endif
3086
3087 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
3088 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME, true, NULL);
3089 if (!IS_ERR(ifp)) {
3090 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
3091 addrconf_dad_start(ifp);
3092 in6_ifa_put(ifp);
3093 }
3094 }
3095 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3096
3097 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3098 {
3099 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3100 return true;
3101
3102 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3103 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3104 return true;
3105
3106 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3107 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3108 return true;
3109
3110 return false;
3111 }
3112
3113 static int ipv6_generate_stable_address(struct in6_addr *address,
3114 u8 dad_count,
3115 const struct inet6_dev *idev)
3116 {
3117 static DEFINE_SPINLOCK(lock);
3118 static __u32 digest[SHA_DIGEST_WORDS];
3119 static __u32 workspace[SHA_WORKSPACE_WORDS];
3120
3121 static union {
3122 char __data[SHA_MESSAGE_BYTES];
3123 struct {
3124 struct in6_addr secret;
3125 __be32 prefix[2];
3126 unsigned char hwaddr[MAX_ADDR_LEN];
3127 u8 dad_count;
3128 } __packed;
3129 } data;
3130
3131 struct in6_addr secret;
3132 struct in6_addr temp;
3133 struct net *net = dev_net(idev->dev);
3134
3135 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3136
3137 if (idev->cnf.stable_secret.initialized)
3138 secret = idev->cnf.stable_secret.secret;
3139 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3140 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3141 else
3142 return -1;
3143
3144 retry:
3145 spin_lock_bh(&lock);
3146
3147 sha_init(digest);
3148 memset(&data, 0, sizeof(data));
3149 memset(workspace, 0, sizeof(workspace));
3150 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3151 data.prefix[0] = address->s6_addr32[0];
3152 data.prefix[1] = address->s6_addr32[1];
3153 data.secret = secret;
3154 data.dad_count = dad_count;
3155
3156 sha_transform(digest, data.__data, workspace);
3157
3158 temp = *address;
3159 temp.s6_addr32[2] = (__force __be32)digest[0];
3160 temp.s6_addr32[3] = (__force __be32)digest[1];
3161
3162 spin_unlock_bh(&lock);
3163
3164 if (ipv6_reserved_interfaceid(temp)) {
3165 dad_count++;
3166 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3167 return -1;
3168 goto retry;
3169 }
3170
3171 *address = temp;
3172 return 0;
3173 }
3174
3175 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3176 {
3177 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3178
3179 if (s->initialized)
3180 return;
3181 s = &idev->cnf.stable_secret;
3182 get_random_bytes(&s->secret, sizeof(s->secret));
3183 s->initialized = true;
3184 }
3185
3186 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3187 {
3188 struct in6_addr addr;
3189
3190 /* no link local addresses on L3 master devices */
3191 if (netif_is_l3_master(idev->dev))
3192 return;
3193
3194 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3195
3196 switch (idev->cnf.addr_gen_mode) {
3197 case IN6_ADDR_GEN_MODE_RANDOM:
3198 ipv6_gen_mode_random_init(idev);
3199 /* fallthrough */
3200 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3201 if (!ipv6_generate_stable_address(&addr, 0, idev))
3202 addrconf_add_linklocal(idev, &addr,
3203 IFA_F_STABLE_PRIVACY);
3204 else if (prefix_route)
3205 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3206 break;
3207 case IN6_ADDR_GEN_MODE_EUI64:
3208 /* addrconf_add_linklocal also adds a prefix_route and we
3209 * only need to care about prefix routes if ipv6_generate_eui64
3210 * couldn't generate one.
3211 */
3212 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3213 addrconf_add_linklocal(idev, &addr, 0);
3214 else if (prefix_route)
3215 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3216 break;
3217 case IN6_ADDR_GEN_MODE_NONE:
3218 default:
3219 /* will not add any link local address */
3220 break;
3221 }
3222 }
3223
3224 static void addrconf_dev_config(struct net_device *dev)
3225 {
3226 struct inet6_dev *idev;
3227
3228 ASSERT_RTNL();
3229
3230 if ((dev->type != ARPHRD_ETHER) &&
3231 (dev->type != ARPHRD_FDDI) &&
3232 (dev->type != ARPHRD_ARCNET) &&
3233 (dev->type != ARPHRD_INFINIBAND) &&
3234 (dev->type != ARPHRD_IEEE1394) &&
3235 (dev->type != ARPHRD_TUNNEL6) &&
3236 (dev->type != ARPHRD_6LOWPAN) &&
3237 (dev->type != ARPHRD_IP6GRE) &&
3238 (dev->type != ARPHRD_IPGRE) &&
3239 (dev->type != ARPHRD_TUNNEL) &&
3240 (dev->type != ARPHRD_NONE)) {
3241 /* Alas, we support only Ethernet autoconfiguration. */
3242 return;
3243 }
3244
3245 idev = addrconf_add_dev(dev);
3246 if (IS_ERR(idev))
3247 return;
3248
3249 /* this device type has no EUI support */
3250 if (dev->type == ARPHRD_NONE &&
3251 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3252 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3253
3254 addrconf_addr_gen(idev, false);
3255 }
3256
3257 #if IS_ENABLED(CONFIG_IPV6_SIT)
3258 static void addrconf_sit_config(struct net_device *dev)
3259 {
3260 struct inet6_dev *idev;
3261
3262 ASSERT_RTNL();
3263
3264 /*
3265 * Configure the tunnel with one of our IPv4
3266 * addresses... we should configure all of
3267 * our v4 addrs in the tunnel
3268 */
3269
3270 idev = ipv6_find_idev(dev);
3271 if (!idev) {
3272 pr_debug("%s: add_dev failed\n", __func__);
3273 return;
3274 }
3275
3276 if (dev->priv_flags & IFF_ISATAP) {
3277 addrconf_addr_gen(idev, false);
3278 return;
3279 }
3280
3281 sit_add_v4_addrs(idev);
3282
3283 if (dev->flags&IFF_POINTOPOINT)
3284 addrconf_add_mroute(dev);
3285 }
3286 #endif
3287
3288 #if IS_ENABLED(CONFIG_NET_IPGRE)
3289 static void addrconf_gre_config(struct net_device *dev)
3290 {
3291 struct inet6_dev *idev;
3292
3293 ASSERT_RTNL();
3294
3295 idev = ipv6_find_idev(dev);
3296 if (!idev) {
3297 pr_debug("%s: add_dev failed\n", __func__);
3298 return;
3299 }
3300
3301 addrconf_addr_gen(idev, true);
3302 if (dev->flags & IFF_POINTOPOINT)
3303 addrconf_add_mroute(dev);
3304 }
3305 #endif
3306
3307 static int fixup_permanent_addr(struct inet6_dev *idev,
3308 struct inet6_ifaddr *ifp)
3309 {
3310 /* !rt6i_node means the host route was removed from the
3311 * FIB, for example, if 'lo' device is taken down. In that
3312 * case regenerate the host route.
3313 */
3314 if (!ifp->rt || !ifp->rt->rt6i_node) {
3315 struct rt6_info *rt, *prev;
3316
3317 rt = addrconf_dst_alloc(idev, &ifp->addr, false);
3318 if (IS_ERR(rt))
3319 return PTR_ERR(rt);
3320
3321 /* ifp->rt can be accessed outside of rtnl */
3322 spin_lock(&ifp->lock);
3323 prev = ifp->rt;
3324 ifp->rt = rt;
3325 spin_unlock(&ifp->lock);
3326
3327 ip6_rt_put(prev);
3328 }
3329
3330 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3331 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3332 idev->dev, 0, 0);
3333 }
3334
3335 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3336 addrconf_dad_start(ifp);
3337
3338 return 0;
3339 }
3340
3341 static void addrconf_permanent_addr(struct net_device *dev)
3342 {
3343 struct inet6_ifaddr *ifp, *tmp;
3344 struct inet6_dev *idev;
3345
3346 idev = __in6_dev_get(dev);
3347 if (!idev)
3348 return;
3349
3350 write_lock_bh(&idev->lock);
3351
3352 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3353 if ((ifp->flags & IFA_F_PERMANENT) &&
3354 fixup_permanent_addr(idev, ifp) < 0) {
3355 write_unlock_bh(&idev->lock);
3356 in6_ifa_hold(ifp);
3357 ipv6_del_addr(ifp);
3358 write_lock_bh(&idev->lock);
3359
3360 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3361 idev->dev->name, &ifp->addr);
3362 }
3363 }
3364
3365 write_unlock_bh(&idev->lock);
3366 }
3367
3368 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3369 void *ptr)
3370 {
3371 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3372 struct netdev_notifier_changeupper_info *info;
3373 struct inet6_dev *idev = __in6_dev_get(dev);
3374 struct net *net = dev_net(dev);
3375 int run_pending = 0;
3376 int err;
3377
3378 switch (event) {
3379 case NETDEV_REGISTER:
3380 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3381 idev = ipv6_add_dev(dev);
3382 if (IS_ERR(idev))
3383 return notifier_from_errno(PTR_ERR(idev));
3384 }
3385 break;
3386
3387 case NETDEV_CHANGEMTU:
3388 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3389 if (dev->mtu < IPV6_MIN_MTU) {
3390 addrconf_ifdown(dev, dev != net->loopback_dev);
3391 break;
3392 }
3393
3394 if (idev) {
3395 rt6_mtu_change(dev, dev->mtu);
3396 idev->cnf.mtu6 = dev->mtu;
3397 break;
3398 }
3399
3400 /* allocate new idev */
3401 idev = ipv6_add_dev(dev);
3402 if (IS_ERR(idev))
3403 break;
3404
3405 /* device is still not ready */
3406 if (!(idev->if_flags & IF_READY))
3407 break;
3408
3409 run_pending = 1;
3410
3411 /* fall through */
3412
3413 case NETDEV_UP:
3414 case NETDEV_CHANGE:
3415 if (dev->flags & IFF_SLAVE)
3416 break;
3417
3418 if (idev && idev->cnf.disable_ipv6)
3419 break;
3420
3421 if (event == NETDEV_UP) {
3422 /* restore routes for permanent addresses */
3423 addrconf_permanent_addr(dev);
3424
3425 if (!addrconf_link_ready(dev)) {
3426 /* device is not ready yet. */
3427 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3428 dev->name);
3429 break;
3430 }
3431
3432 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3433 idev = ipv6_add_dev(dev);
3434
3435 if (!IS_ERR_OR_NULL(idev)) {
3436 idev->if_flags |= IF_READY;
3437 run_pending = 1;
3438 }
3439 } else if (event == NETDEV_CHANGE) {
3440 if (!addrconf_link_ready(dev)) {
3441 /* device is still not ready. */
3442 break;
3443 }
3444
3445 if (idev) {
3446 if (idev->if_flags & IF_READY) {
3447 /* device is already configured -
3448 * but resend MLD reports, we might
3449 * have roamed and need to update
3450 * multicast snooping switches
3451 */
3452 ipv6_mc_up(idev);
3453 break;
3454 }
3455 idev->if_flags |= IF_READY;
3456 }
3457
3458 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3459 dev->name);
3460
3461 run_pending = 1;
3462 }
3463
3464 switch (dev->type) {
3465 #if IS_ENABLED(CONFIG_IPV6_SIT)
3466 case ARPHRD_SIT:
3467 addrconf_sit_config(dev);
3468 break;
3469 #endif
3470 #if IS_ENABLED(CONFIG_NET_IPGRE)
3471 case ARPHRD_IPGRE:
3472 addrconf_gre_config(dev);
3473 break;
3474 #endif
3475 case ARPHRD_LOOPBACK:
3476 init_loopback(dev);
3477 break;
3478
3479 default:
3480 addrconf_dev_config(dev);
3481 break;
3482 }
3483
3484 if (!IS_ERR_OR_NULL(idev)) {
3485 if (run_pending)
3486 addrconf_dad_run(idev);
3487
3488 /*
3489 * If the MTU changed during the interface down,
3490 * when the interface up, the changed MTU must be
3491 * reflected in the idev as well as routers.
3492 */
3493 if (idev->cnf.mtu6 != dev->mtu &&
3494 dev->mtu >= IPV6_MIN_MTU) {
3495 rt6_mtu_change(dev, dev->mtu);
3496 idev->cnf.mtu6 = dev->mtu;
3497 }
3498 idev->tstamp = jiffies;
3499 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3500
3501 /*
3502 * If the changed mtu during down is lower than
3503 * IPV6_MIN_MTU stop IPv6 on this interface.
3504 */
3505 if (dev->mtu < IPV6_MIN_MTU)
3506 addrconf_ifdown(dev, dev != net->loopback_dev);
3507 }
3508 break;
3509
3510 case NETDEV_DOWN:
3511 case NETDEV_UNREGISTER:
3512 /*
3513 * Remove all addresses from this interface.
3514 */
3515 addrconf_ifdown(dev, event != NETDEV_DOWN);
3516 break;
3517
3518 case NETDEV_CHANGENAME:
3519 if (idev) {
3520 snmp6_unregister_dev(idev);
3521 addrconf_sysctl_unregister(idev);
3522 err = addrconf_sysctl_register(idev);
3523 if (err)
3524 return notifier_from_errno(err);
3525 err = snmp6_register_dev(idev);
3526 if (err) {
3527 addrconf_sysctl_unregister(idev);
3528 return notifier_from_errno(err);
3529 }
3530 }
3531 break;
3532
3533 case NETDEV_PRE_TYPE_CHANGE:
3534 case NETDEV_POST_TYPE_CHANGE:
3535 if (idev)
3536 addrconf_type_change(dev, event);
3537 break;
3538
3539 case NETDEV_CHANGEUPPER:
3540 info = ptr;
3541
3542 /* flush all routes if dev is linked to or unlinked from
3543 * an L3 master device (e.g., VRF)
3544 */
3545 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3546 addrconf_ifdown(dev, 0);
3547 }
3548
3549 return NOTIFY_OK;
3550 }
3551
3552 /*
3553 * addrconf module should be notified of a device going up
3554 */
3555 static struct notifier_block ipv6_dev_notf = {
3556 .notifier_call = addrconf_notify,
3557 .priority = ADDRCONF_NOTIFY_PRIORITY,
3558 };
3559
3560 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3561 {
3562 struct inet6_dev *idev;
3563 ASSERT_RTNL();
3564
3565 idev = __in6_dev_get(dev);
3566
3567 if (event == NETDEV_POST_TYPE_CHANGE)
3568 ipv6_mc_remap(idev);
3569 else if (event == NETDEV_PRE_TYPE_CHANGE)
3570 ipv6_mc_unmap(idev);
3571 }
3572
3573 static bool addr_is_local(const struct in6_addr *addr)
3574 {
3575 return ipv6_addr_type(addr) &
3576 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3577 }
3578
3579 static int addrconf_ifdown(struct net_device *dev, int how)
3580 {
3581 struct net *net = dev_net(dev);
3582 struct inet6_dev *idev;
3583 struct inet6_ifaddr *ifa, *tmp;
3584 int _keep_addr;
3585 bool keep_addr;
3586 int state, i;
3587
3588 ASSERT_RTNL();
3589
3590 rt6_ifdown(net, dev);
3591 neigh_ifdown(&nd_tbl, dev);
3592
3593 idev = __in6_dev_get(dev);
3594 if (!idev)
3595 return -ENODEV;
3596
3597 /*
3598 * Step 1: remove reference to ipv6 device from parent device.
3599 * Do not dev_put!
3600 */
3601 if (how) {
3602 idev->dead = 1;
3603
3604 /* protected by rtnl_lock */
3605 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3606
3607 /* Step 1.5: remove snmp6 entry */
3608 snmp6_unregister_dev(idev);
3609
3610 }
3611
3612 /* aggregate the system setting and interface setting */
3613 _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3614 if (!_keep_addr)
3615 _keep_addr = idev->cnf.keep_addr_on_down;
3616
3617 /* combine the user config with event to determine if permanent
3618 * addresses are to be removed from address hash table
3619 */
3620 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3621
3622 /* Step 2: clear hash table */
3623 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3624 struct hlist_head *h = &inet6_addr_lst[i];
3625
3626 spin_lock_bh(&addrconf_hash_lock);
3627 restart:
3628 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3629 if (ifa->idev == idev) {
3630 addrconf_del_dad_work(ifa);
3631 /* combined flag + permanent flag decide if
3632 * address is retained on a down event
3633 */
3634 if (!keep_addr ||
3635 !(ifa->flags & IFA_F_PERMANENT) ||
3636 addr_is_local(&ifa->addr)) {
3637 hlist_del_init_rcu(&ifa->addr_lst);
3638 goto restart;
3639 }
3640 }
3641 }
3642 spin_unlock_bh(&addrconf_hash_lock);
3643 }
3644
3645 write_lock_bh(&idev->lock);
3646
3647 addrconf_del_rs_timer(idev);
3648
3649 /* Step 2: clear flags for stateless addrconf */
3650 if (!how)
3651 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3652
3653 /* Step 3: clear tempaddr list */
3654 while (!list_empty(&idev->tempaddr_list)) {
3655 ifa = list_first_entry(&idev->tempaddr_list,
3656 struct inet6_ifaddr, tmp_list);
3657 list_del(&ifa->tmp_list);
3658 write_unlock_bh(&idev->lock);
3659 spin_lock_bh(&ifa->lock);
3660
3661 if (ifa->ifpub) {
3662 in6_ifa_put(ifa->ifpub);
3663 ifa->ifpub = NULL;
3664 }
3665 spin_unlock_bh(&ifa->lock);
3666 in6_ifa_put(ifa);
3667 write_lock_bh(&idev->lock);
3668 }
3669
3670 /* re-combine the user config with event to determine if permanent
3671 * addresses are to be removed from the interface list
3672 */
3673 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3674
3675 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3676 struct rt6_info *rt = NULL;
3677 bool keep;
3678
3679 addrconf_del_dad_work(ifa);
3680
3681 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3682 !addr_is_local(&ifa->addr);
3683
3684 write_unlock_bh(&idev->lock);
3685 spin_lock_bh(&ifa->lock);
3686
3687 if (keep) {
3688 /* set state to skip the notifier below */
3689 state = INET6_IFADDR_STATE_DEAD;
3690 ifa->state = INET6_IFADDR_STATE_PREDAD;
3691 if (!(ifa->flags & IFA_F_NODAD))
3692 ifa->flags |= IFA_F_TENTATIVE;
3693
3694 rt = ifa->rt;
3695 ifa->rt = NULL;
3696 } else {
3697 state = ifa->state;
3698 ifa->state = INET6_IFADDR_STATE_DEAD;
3699 }
3700
3701 spin_unlock_bh(&ifa->lock);
3702
3703 if (rt)
3704 ip6_del_rt(rt);
3705
3706 if (state != INET6_IFADDR_STATE_DEAD) {
3707 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3708 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3709 } else {
3710 if (idev->cnf.forwarding)
3711 addrconf_leave_anycast(ifa);
3712 addrconf_leave_solict(ifa->idev, &ifa->addr);
3713 }
3714
3715 write_lock_bh(&idev->lock);
3716 if (!keep) {
3717 list_del_rcu(&ifa->if_list);
3718 in6_ifa_put(ifa);
3719 }
3720 }
3721
3722 write_unlock_bh(&idev->lock);
3723
3724 /* Step 5: Discard anycast and multicast list */
3725 if (how) {
3726 ipv6_ac_destroy_dev(idev);
3727 ipv6_mc_destroy_dev(idev);
3728 } else {
3729 ipv6_mc_down(idev);
3730 }
3731
3732 idev->tstamp = jiffies;
3733
3734 /* Last: Shot the device (if unregistered) */
3735 if (how) {
3736 addrconf_sysctl_unregister(idev);
3737 neigh_parms_release(&nd_tbl, idev->nd_parms);
3738 neigh_ifdown(&nd_tbl, dev);
3739 in6_dev_put(idev);
3740 }
3741 return 0;
3742 }
3743
3744 static void addrconf_rs_timer(struct timer_list *t)
3745 {
3746 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3747 struct net_device *dev = idev->dev;
3748 struct in6_addr lladdr;
3749
3750 write_lock(&idev->lock);
3751 if (idev->dead || !(idev->if_flags & IF_READY))
3752 goto out;
3753
3754 if (!ipv6_accept_ra(idev))
3755 goto out;
3756
3757 /* Announcement received after solicitation was sent */
3758 if (idev->if_flags & IF_RA_RCVD)
3759 goto out;
3760
3761 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3762 write_unlock(&idev->lock);
3763 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3764 ndisc_send_rs(dev, &lladdr,
3765 &in6addr_linklocal_allrouters);
3766 else
3767 goto put;
3768
3769 write_lock(&idev->lock);
3770 idev->rs_interval = rfc3315_s14_backoff_update(
3771 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3772 /* The wait after the last probe can be shorter */
3773 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3774 idev->cnf.rtr_solicits) ?
3775 idev->cnf.rtr_solicit_delay :
3776 idev->rs_interval);
3777 } else {
3778 /*
3779 * Note: we do not support deprecated "all on-link"
3780 * assumption any longer.
3781 */
3782 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3783 }
3784
3785 out:
3786 write_unlock(&idev->lock);
3787 put:
3788 in6_dev_put(idev);
3789 }
3790
3791 /*
3792 * Duplicate Address Detection
3793 */
3794 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3795 {
3796 unsigned long rand_num;
3797 struct inet6_dev *idev = ifp->idev;
3798 u64 nonce;
3799
3800 if (ifp->flags & IFA_F_OPTIMISTIC)
3801 rand_num = 0;
3802 else
3803 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3804
3805 nonce = 0;
3806 if (idev->cnf.enhanced_dad ||
3807 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3808 do
3809 get_random_bytes(&nonce, 6);
3810 while (nonce == 0);
3811 }
3812 ifp->dad_nonce = nonce;
3813 ifp->dad_probes = idev->cnf.dad_transmits;
3814 addrconf_mod_dad_work(ifp, rand_num);
3815 }
3816
3817 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3818 {
3819 struct inet6_dev *idev = ifp->idev;
3820 struct net_device *dev = idev->dev;
3821 bool bump_id, notify = false;
3822
3823 addrconf_join_solict(dev, &ifp->addr);
3824
3825 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3826
3827 read_lock_bh(&idev->lock);
3828 spin_lock(&ifp->lock);
3829 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3830 goto out;
3831
3832 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3833 (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
3834 idev->cnf.accept_dad < 1) ||
3835 !(ifp->flags&IFA_F_TENTATIVE) ||
3836 ifp->flags & IFA_F_NODAD) {
3837 bool send_na = false;
3838
3839 if (ifp->flags & IFA_F_TENTATIVE &&
3840 !(ifp->flags & IFA_F_OPTIMISTIC))
3841 send_na = true;
3842 bump_id = ifp->flags & IFA_F_TENTATIVE;
3843 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3844 spin_unlock(&ifp->lock);
3845 read_unlock_bh(&idev->lock);
3846
3847 addrconf_dad_completed(ifp, bump_id, send_na);
3848 return;
3849 }
3850
3851 if (!(idev->if_flags & IF_READY)) {
3852 spin_unlock(&ifp->lock);
3853 read_unlock_bh(&idev->lock);
3854 /*
3855 * If the device is not ready:
3856 * - keep it tentative if it is a permanent address.
3857 * - otherwise, kill it.
3858 */
3859 in6_ifa_hold(ifp);
3860 addrconf_dad_stop(ifp, 0);
3861 return;
3862 }
3863
3864 /*
3865 * Optimistic nodes can start receiving
3866 * Frames right away
3867 */
3868 if (ifp->flags & IFA_F_OPTIMISTIC) {
3869 ip6_ins_rt(ifp->rt);
3870 if (ipv6_use_optimistic_addr(dev_net(dev), idev)) {
3871 /* Because optimistic nodes can use this address,
3872 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3873 */
3874 notify = true;
3875 }
3876 }
3877
3878 addrconf_dad_kick(ifp);
3879 out:
3880 spin_unlock(&ifp->lock);
3881 read_unlock_bh(&idev->lock);
3882 if (notify)
3883 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3884 }
3885
3886 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3887 {
3888 bool begin_dad = false;
3889
3890 spin_lock_bh(&ifp->lock);
3891 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3892 ifp->state = INET6_IFADDR_STATE_PREDAD;
3893 begin_dad = true;
3894 }
3895 spin_unlock_bh(&ifp->lock);
3896
3897 if (begin_dad)
3898 addrconf_mod_dad_work(ifp, 0);
3899 }
3900
3901 static void addrconf_dad_work(struct work_struct *w)
3902 {
3903 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3904 struct inet6_ifaddr,
3905 dad_work);
3906 struct inet6_dev *idev = ifp->idev;
3907 bool bump_id, disable_ipv6 = false;
3908 struct in6_addr mcaddr;
3909
3910 enum {
3911 DAD_PROCESS,
3912 DAD_BEGIN,
3913 DAD_ABORT,
3914 } action = DAD_PROCESS;
3915
3916 rtnl_lock();
3917
3918 spin_lock_bh(&ifp->lock);
3919 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3920 action = DAD_BEGIN;
3921 ifp->state = INET6_IFADDR_STATE_DAD;
3922 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3923 action = DAD_ABORT;
3924 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3925
3926 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
3927 idev->cnf.accept_dad > 1) &&
3928 !idev->cnf.disable_ipv6 &&
3929 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
3930 struct in6_addr addr;
3931
3932 addr.s6_addr32[0] = htonl(0xfe800000);
3933 addr.s6_addr32[1] = 0;
3934
3935 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
3936 ipv6_addr_equal(&ifp->addr, &addr)) {
3937 /* DAD failed for link-local based on MAC */
3938 idev->cnf.disable_ipv6 = 1;
3939
3940 pr_info("%s: IPv6 being disabled!\n",
3941 ifp->idev->dev->name);
3942 disable_ipv6 = true;
3943 }
3944 }
3945 }
3946 spin_unlock_bh(&ifp->lock);
3947
3948 if (action == DAD_BEGIN) {
3949 addrconf_dad_begin(ifp);
3950 goto out;
3951 } else if (action == DAD_ABORT) {
3952 in6_ifa_hold(ifp);
3953 addrconf_dad_stop(ifp, 1);
3954 if (disable_ipv6)
3955 addrconf_ifdown(idev->dev, 0);
3956 goto out;
3957 }
3958
3959 if (!ifp->dad_probes && addrconf_dad_end(ifp))
3960 goto out;
3961
3962 write_lock_bh(&idev->lock);
3963 if (idev->dead || !(idev->if_flags & IF_READY)) {
3964 write_unlock_bh(&idev->lock);
3965 goto out;
3966 }
3967
3968 spin_lock(&ifp->lock);
3969 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
3970 spin_unlock(&ifp->lock);
3971 write_unlock_bh(&idev->lock);
3972 goto out;
3973 }
3974
3975 if (ifp->dad_probes == 0) {
3976 bool send_na = false;
3977
3978 /*
3979 * DAD was successful
3980 */
3981
3982 if (ifp->flags & IFA_F_TENTATIVE &&
3983 !(ifp->flags & IFA_F_OPTIMISTIC))
3984 send_na = true;
3985 bump_id = ifp->flags & IFA_F_TENTATIVE;
3986 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3987 spin_unlock(&ifp->lock);
3988 write_unlock_bh(&idev->lock);
3989
3990 addrconf_dad_completed(ifp, bump_id, send_na);
3991
3992 goto out;
3993 }
3994
3995 ifp->dad_probes--;
3996 addrconf_mod_dad_work(ifp,
3997 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
3998 spin_unlock(&ifp->lock);
3999 write_unlock_bh(&idev->lock);
4000
4001 /* send a neighbour solicitation for our addr */
4002 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4003 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4004 ifp->dad_nonce);
4005 out:
4006 in6_ifa_put(ifp);
4007 rtnl_unlock();
4008 }
4009
4010 /* ifp->idev must be at least read locked */
4011 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4012 {
4013 struct inet6_ifaddr *ifpiter;
4014 struct inet6_dev *idev = ifp->idev;
4015
4016 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4017 if (ifpiter->scope > IFA_LINK)
4018 break;
4019 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4020 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4021 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4022 IFA_F_PERMANENT)
4023 return false;
4024 }
4025 return true;
4026 }
4027
4028 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4029 bool send_na)
4030 {
4031 struct net_device *dev = ifp->idev->dev;
4032 struct in6_addr lladdr;
4033 bool send_rs, send_mld;
4034
4035 addrconf_del_dad_work(ifp);
4036
4037 /*
4038 * Configure the address for reception. Now it is valid.
4039 */
4040
4041 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4042
4043 /* If added prefix is link local and we are prepared to process
4044 router advertisements, start sending router solicitations.
4045 */
4046
4047 read_lock_bh(&ifp->idev->lock);
4048 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4049 send_rs = send_mld &&
4050 ipv6_accept_ra(ifp->idev) &&
4051 ifp->idev->cnf.rtr_solicits != 0 &&
4052 (dev->flags&IFF_LOOPBACK) == 0;
4053 read_unlock_bh(&ifp->idev->lock);
4054
4055 /* While dad is in progress mld report's source address is in6_addrany.
4056 * Resend with proper ll now.
4057 */
4058 if (send_mld)
4059 ipv6_mc_dad_complete(ifp->idev);
4060
4061 /* send unsolicited NA if enabled */
4062 if (send_na &&
4063 (ifp->idev->cnf.ndisc_notify ||
4064 dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4065 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4066 /*router=*/ !!ifp->idev->cnf.forwarding,
4067 /*solicited=*/ false, /*override=*/ true,
4068 /*inc_opt=*/ true);
4069 }
4070
4071 if (send_rs) {
4072 /*
4073 * If a host as already performed a random delay
4074 * [...] as part of DAD [...] there is no need
4075 * to delay again before sending the first RS
4076 */
4077 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4078 return;
4079 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4080
4081 write_lock_bh(&ifp->idev->lock);
4082 spin_lock(&ifp->lock);
4083 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4084 ifp->idev->cnf.rtr_solicit_interval);
4085 ifp->idev->rs_probes = 1;
4086 ifp->idev->if_flags |= IF_RS_SENT;
4087 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4088 spin_unlock(&ifp->lock);
4089 write_unlock_bh(&ifp->idev->lock);
4090 }
4091
4092 if (bump_id)
4093 rt_genid_bump_ipv6(dev_net(dev));
4094
4095 /* Make sure that a new temporary address will be created
4096 * before this temporary address becomes deprecated.
4097 */
4098 if (ifp->flags & IFA_F_TEMPORARY)
4099 addrconf_verify_rtnl();
4100 }
4101
4102 static void addrconf_dad_run(struct inet6_dev *idev)
4103 {
4104 struct inet6_ifaddr *ifp;
4105
4106 read_lock_bh(&idev->lock);
4107 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4108 spin_lock(&ifp->lock);
4109 if (ifp->flags & IFA_F_TENTATIVE &&
4110 ifp->state == INET6_IFADDR_STATE_DAD)
4111 addrconf_dad_kick(ifp);
4112 spin_unlock(&ifp->lock);
4113 }
4114 read_unlock_bh(&idev->lock);
4115 }
4116
4117 #ifdef CONFIG_PROC_FS
4118 struct if6_iter_state {
4119 struct seq_net_private p;
4120 int bucket;
4121 int offset;
4122 };
4123
4124 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4125 {
4126 struct if6_iter_state *state = seq->private;
4127 struct net *net = seq_file_net(seq);
4128 struct inet6_ifaddr *ifa = NULL;
4129 int p = 0;
4130
4131 /* initial bucket if pos is 0 */
4132 if (pos == 0) {
4133 state->bucket = 0;
4134 state->offset = 0;
4135 }
4136
4137 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4138 hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket],
4139 addr_lst) {
4140 if (!net_eq(dev_net(ifa->idev->dev), net))
4141 continue;
4142 /* sync with offset */
4143 if (p < state->offset) {
4144 p++;
4145 continue;
4146 }
4147 state->offset++;
4148 return ifa;
4149 }
4150
4151 /* prepare for next bucket */
4152 state->offset = 0;
4153 p = 0;
4154 }
4155 return NULL;
4156 }
4157
4158 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4159 struct inet6_ifaddr *ifa)
4160 {
4161 struct if6_iter_state *state = seq->private;
4162 struct net *net = seq_file_net(seq);
4163
4164 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4165 if (!net_eq(dev_net(ifa->idev->dev), net))
4166 continue;
4167 state->offset++;
4168 return ifa;
4169 }
4170
4171 while (++state->bucket < IN6_ADDR_HSIZE) {
4172 state->offset = 0;
4173 hlist_for_each_entry_rcu(ifa,
4174 &inet6_addr_lst[state->bucket], addr_lst) {
4175 if (!net_eq(dev_net(ifa->idev->dev), net))
4176 continue;
4177 state->offset++;
4178 return ifa;
4179 }
4180 }
4181
4182 return NULL;
4183 }
4184
4185 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4186 __acquires(rcu)
4187 {
4188 rcu_read_lock();
4189 return if6_get_first(seq, *pos);
4190 }
4191
4192 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4193 {
4194 struct inet6_ifaddr *ifa;
4195
4196 ifa = if6_get_next(seq, v);
4197 ++*pos;
4198 return ifa;
4199 }
4200
4201 static void if6_seq_stop(struct seq_file *seq, void *v)
4202 __releases(rcu)
4203 {
4204 rcu_read_unlock();
4205 }
4206
4207 static int if6_seq_show(struct seq_file *seq, void *v)
4208 {
4209 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4210 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4211 &ifp->addr,
4212 ifp->idev->dev->ifindex,
4213 ifp->prefix_len,
4214 ifp->scope,
4215 (u8) ifp->flags,
4216 ifp->idev->dev->name);
4217 return 0;
4218 }
4219
4220 static const struct seq_operations if6_seq_ops = {
4221 .start = if6_seq_start,
4222 .next = if6_seq_next,
4223 .show = if6_seq_show,
4224 .stop = if6_seq_stop,
4225 };
4226
4227 static int if6_seq_open(struct inode *inode, struct file *file)
4228 {
4229 return seq_open_net(inode, file, &if6_seq_ops,
4230 sizeof(struct if6_iter_state));
4231 }
4232
4233 static const struct file_operations if6_fops = {
4234 .owner = THIS_MODULE,
4235 .open = if6_seq_open,
4236 .read = seq_read,
4237 .llseek = seq_lseek,
4238 .release = seq_release_net,
4239 };
4240
4241 static int __net_init if6_proc_net_init(struct net *net)
4242 {
4243 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
4244 return -ENOMEM;
4245 return 0;
4246 }
4247
4248 static void __net_exit if6_proc_net_exit(struct net *net)
4249 {
4250 remove_proc_entry("if_inet6", net->proc_net);
4251 }
4252
4253 static struct pernet_operations if6_proc_net_ops = {
4254 .init = if6_proc_net_init,
4255 .exit = if6_proc_net_exit,
4256 };
4257
4258 int __init if6_proc_init(void)
4259 {
4260 return register_pernet_subsys(&if6_proc_net_ops);
4261 }
4262
4263 void if6_proc_exit(void)
4264 {
4265 unregister_pernet_subsys(&if6_proc_net_ops);
4266 }
4267 #endif /* CONFIG_PROC_FS */
4268
4269 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4270 /* Check if address is a home address configured on any interface. */
4271 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4272 {
4273 unsigned int hash = inet6_addr_hash(net, addr);
4274 struct inet6_ifaddr *ifp = NULL;
4275 int ret = 0;
4276
4277 rcu_read_lock();
4278 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4279 if (!net_eq(dev_net(ifp->idev->dev), net))
4280 continue;
4281 if (ipv6_addr_equal(&ifp->addr, addr) &&
4282 (ifp->flags & IFA_F_HOMEADDRESS)) {
4283 ret = 1;
4284 break;
4285 }
4286 }
4287 rcu_read_unlock();
4288 return ret;
4289 }
4290 #endif
4291
4292 /*
4293 * Periodic address status verification
4294 */
4295
4296 static void addrconf_verify_rtnl(void)
4297 {
4298 unsigned long now, next, next_sec, next_sched;
4299 struct inet6_ifaddr *ifp;
4300 int i;
4301
4302 ASSERT_RTNL();
4303
4304 rcu_read_lock_bh();
4305 now = jiffies;
4306 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4307
4308 cancel_delayed_work(&addr_chk_work);
4309
4310 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4311 restart:
4312 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4313 unsigned long age;
4314
4315 /* When setting preferred_lft to a value not zero or
4316 * infinity, while valid_lft is infinity
4317 * IFA_F_PERMANENT has a non-infinity life time.
4318 */
4319 if ((ifp->flags & IFA_F_PERMANENT) &&
4320 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4321 continue;
4322
4323 spin_lock(&ifp->lock);
4324 /* We try to batch several events at once. */
4325 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4326
4327 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4328 age >= ifp->valid_lft) {
4329 spin_unlock(&ifp->lock);
4330 in6_ifa_hold(ifp);
4331 ipv6_del_addr(ifp);
4332 goto restart;
4333 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4334 spin_unlock(&ifp->lock);
4335 continue;
4336 } else if (age >= ifp->prefered_lft) {
4337 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4338 int deprecate = 0;
4339
4340 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4341 deprecate = 1;
4342 ifp->flags |= IFA_F_DEPRECATED;
4343 }
4344
4345 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4346 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4347 next = ifp->tstamp + ifp->valid_lft * HZ;
4348
4349 spin_unlock(&ifp->lock);
4350
4351 if (deprecate) {
4352 in6_ifa_hold(ifp);
4353
4354 ipv6_ifa_notify(0, ifp);
4355 in6_ifa_put(ifp);
4356 goto restart;
4357 }
4358 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4359 !(ifp->flags&IFA_F_TENTATIVE)) {
4360 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4361 ifp->idev->cnf.dad_transmits *
4362 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4363
4364 if (age >= ifp->prefered_lft - regen_advance) {
4365 struct inet6_ifaddr *ifpub = ifp->ifpub;
4366 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4367 next = ifp->tstamp + ifp->prefered_lft * HZ;
4368 if (!ifp->regen_count && ifpub) {
4369 ifp->regen_count++;
4370 in6_ifa_hold(ifp);
4371 in6_ifa_hold(ifpub);
4372 spin_unlock(&ifp->lock);
4373
4374 spin_lock(&ifpub->lock);
4375 ifpub->regen_count = 0;
4376 spin_unlock(&ifpub->lock);
4377 rcu_read_unlock_bh();
4378 ipv6_create_tempaddr(ifpub, ifp, true);
4379 in6_ifa_put(ifpub);
4380 in6_ifa_put(ifp);
4381 rcu_read_lock_bh();
4382 goto restart;
4383 }
4384 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4385 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4386 spin_unlock(&ifp->lock);
4387 } else {
4388 /* ifp->prefered_lft <= ifp->valid_lft */
4389 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4390 next = ifp->tstamp + ifp->prefered_lft * HZ;
4391 spin_unlock(&ifp->lock);
4392 }
4393 }
4394 }
4395
4396 next_sec = round_jiffies_up(next);
4397 next_sched = next;
4398
4399 /* If rounded timeout is accurate enough, accept it. */
4400 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4401 next_sched = next_sec;
4402
4403 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4404 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4405 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4406
4407 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4408 now, next, next_sec, next_sched);
4409 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4410 rcu_read_unlock_bh();
4411 }
4412
4413 static void addrconf_verify_work(struct work_struct *w)
4414 {
4415 rtnl_lock();
4416 addrconf_verify_rtnl();
4417 rtnl_unlock();
4418 }
4419
4420 static void addrconf_verify(void)
4421 {
4422 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4423 }
4424
4425 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4426 struct in6_addr **peer_pfx)
4427 {
4428 struct in6_addr *pfx = NULL;
4429
4430 *peer_pfx = NULL;
4431
4432 if (addr)
4433 pfx = nla_data(addr);
4434
4435 if (local) {
4436 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4437 *peer_pfx = pfx;
4438 pfx = nla_data(local);
4439 }
4440
4441 return pfx;
4442 }
4443
4444 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4445 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4446 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4447 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4448 [IFA_FLAGS] = { .len = sizeof(u32) },
4449 };
4450
4451 static int
4452 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4453 struct netlink_ext_ack *extack)
4454 {
4455 struct net *net = sock_net(skb->sk);
4456 struct ifaddrmsg *ifm;
4457 struct nlattr *tb[IFA_MAX+1];
4458 struct in6_addr *pfx, *peer_pfx;
4459 u32 ifa_flags;
4460 int err;
4461
4462 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4463 extack);
4464 if (err < 0)
4465 return err;
4466
4467 ifm = nlmsg_data(nlh);
4468 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4469 if (!pfx)
4470 return -EINVAL;
4471
4472 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4473
4474 /* We ignore other flags so far. */
4475 ifa_flags &= IFA_F_MANAGETEMPADDR;
4476
4477 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4478 ifm->ifa_prefixlen);
4479 }
4480
4481 static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
4482 u32 prefered_lft, u32 valid_lft)
4483 {
4484 u32 flags;
4485 clock_t expires;
4486 unsigned long timeout;
4487 bool was_managetempaddr;
4488 bool had_prefixroute;
4489
4490 ASSERT_RTNL();
4491
4492 if (!valid_lft || (prefered_lft > valid_lft))
4493 return -EINVAL;
4494
4495 if (ifa_flags & IFA_F_MANAGETEMPADDR &&
4496 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4497 return -EINVAL;
4498
4499 timeout = addrconf_timeout_fixup(valid_lft, HZ);
4500 if (addrconf_finite_timeout(timeout)) {
4501 expires = jiffies_to_clock_t(timeout * HZ);
4502 valid_lft = timeout;
4503 flags = RTF_EXPIRES;
4504 } else {
4505 expires = 0;
4506 flags = 0;
4507 ifa_flags |= IFA_F_PERMANENT;
4508 }
4509
4510 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
4511 if (addrconf_finite_timeout(timeout)) {
4512 if (timeout == 0)
4513 ifa_flags |= IFA_F_DEPRECATED;
4514 prefered_lft = timeout;
4515 }
4516
4517 spin_lock_bh(&ifp->lock);
4518 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4519 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4520 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4521 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4522 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4523 IFA_F_NOPREFIXROUTE);
4524 ifp->flags |= ifa_flags;
4525 ifp->tstamp = jiffies;
4526 ifp->valid_lft = valid_lft;
4527 ifp->prefered_lft = prefered_lft;
4528
4529 spin_unlock_bh(&ifp->lock);
4530 if (!(ifp->flags&IFA_F_TENTATIVE))
4531 ipv6_ifa_notify(0, ifp);
4532
4533 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
4534 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
4535 expires, flags);
4536 } else if (had_prefixroute) {
4537 enum cleanup_prefix_rt_t action;
4538 unsigned long rt_expires;
4539
4540 write_lock_bh(&ifp->idev->lock);
4541 action = check_cleanup_prefix_route(ifp, &rt_expires);
4542 write_unlock_bh(&ifp->idev->lock);
4543
4544 if (action != CLEANUP_PREFIX_RT_NOP) {
4545 cleanup_prefix_route(ifp, rt_expires,
4546 action == CLEANUP_PREFIX_RT_DEL);
4547 }
4548 }
4549
4550 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4551 if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4552 valid_lft = prefered_lft = 0;
4553 manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
4554 !was_managetempaddr, jiffies);
4555 }
4556
4557 addrconf_verify_rtnl();
4558
4559 return 0;
4560 }
4561
4562 static int
4563 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4564 struct netlink_ext_ack *extack)
4565 {
4566 struct net *net = sock_net(skb->sk);
4567 struct ifaddrmsg *ifm;
4568 struct nlattr *tb[IFA_MAX+1];
4569 struct in6_addr *pfx, *peer_pfx;
4570 struct inet6_ifaddr *ifa;
4571 struct net_device *dev;
4572 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
4573 u32 ifa_flags;
4574 int err;
4575
4576 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4577 extack);
4578 if (err < 0)
4579 return err;
4580
4581 ifm = nlmsg_data(nlh);
4582 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4583 if (!pfx)
4584 return -EINVAL;
4585
4586 if (tb[IFA_CACHEINFO]) {
4587 struct ifa_cacheinfo *ci;
4588
4589 ci = nla_data(tb[IFA_CACHEINFO]);
4590 valid_lft = ci->ifa_valid;
4591 preferred_lft = ci->ifa_prefered;
4592 } else {
4593 preferred_lft = INFINITY_LIFE_TIME;
4594 valid_lft = INFINITY_LIFE_TIME;
4595 }
4596
4597 dev = __dev_get_by_index(net, ifm->ifa_index);
4598 if (!dev)
4599 return -ENODEV;
4600
4601 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4602
4603 /* We ignore other flags so far. */
4604 ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4605 IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
4606
4607 ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
4608 if (!ifa) {
4609 /*
4610 * It would be best to check for !NLM_F_CREATE here but
4611 * userspace already relies on not having to provide this.
4612 */
4613 return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
4614 ifm->ifa_prefixlen, ifa_flags,
4615 preferred_lft, valid_lft, extack);
4616 }
4617
4618 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4619 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4620 err = -EEXIST;
4621 else
4622 err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
4623
4624 in6_ifa_put(ifa);
4625
4626 return err;
4627 }
4628
4629 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4630 u8 scope, int ifindex)
4631 {
4632 struct ifaddrmsg *ifm;
4633
4634 ifm = nlmsg_data(nlh);
4635 ifm->ifa_family = AF_INET6;
4636 ifm->ifa_prefixlen = prefixlen;
4637 ifm->ifa_flags = flags;
4638 ifm->ifa_scope = scope;
4639 ifm->ifa_index = ifindex;
4640 }
4641
4642 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4643 unsigned long tstamp, u32 preferred, u32 valid)
4644 {
4645 struct ifa_cacheinfo ci;
4646
4647 ci.cstamp = cstamp_delta(cstamp);
4648 ci.tstamp = cstamp_delta(tstamp);
4649 ci.ifa_prefered = preferred;
4650 ci.ifa_valid = valid;
4651
4652 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4653 }
4654
4655 static inline int rt_scope(int ifa_scope)
4656 {
4657 if (ifa_scope & IFA_HOST)
4658 return RT_SCOPE_HOST;
4659 else if (ifa_scope & IFA_LINK)
4660 return RT_SCOPE_LINK;
4661 else if (ifa_scope & IFA_SITE)
4662 return RT_SCOPE_SITE;
4663 else
4664 return RT_SCOPE_UNIVERSE;
4665 }
4666
4667 static inline int inet6_ifaddr_msgsize(void)
4668 {
4669 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4670 + nla_total_size(16) /* IFA_LOCAL */
4671 + nla_total_size(16) /* IFA_ADDRESS */
4672 + nla_total_size(sizeof(struct ifa_cacheinfo))
4673 + nla_total_size(4) /* IFA_FLAGS */;
4674 }
4675
4676 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4677 u32 portid, u32 seq, int event, unsigned int flags)
4678 {
4679 struct nlmsghdr *nlh;
4680 u32 preferred, valid;
4681
4682 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4683 if (!nlh)
4684 return -EMSGSIZE;
4685
4686 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4687 ifa->idev->dev->ifindex);
4688
4689 if (!((ifa->flags&IFA_F_PERMANENT) &&
4690 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4691 preferred = ifa->prefered_lft;
4692 valid = ifa->valid_lft;
4693 if (preferred != INFINITY_LIFE_TIME) {
4694 long tval = (jiffies - ifa->tstamp)/HZ;
4695 if (preferred > tval)
4696 preferred -= tval;
4697 else
4698 preferred = 0;
4699 if (valid != INFINITY_LIFE_TIME) {
4700 if (valid > tval)
4701 valid -= tval;
4702 else
4703 valid = 0;
4704 }
4705 }
4706 } else {
4707 preferred = INFINITY_LIFE_TIME;
4708 valid = INFINITY_LIFE_TIME;
4709 }
4710
4711 if (!ipv6_addr_any(&ifa->peer_addr)) {
4712 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4713 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4714 goto error;
4715 } else
4716 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4717 goto error;
4718
4719 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4720 goto error;
4721
4722 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4723 goto error;
4724
4725 nlmsg_end(skb, nlh);
4726 return 0;
4727
4728 error:
4729 nlmsg_cancel(skb, nlh);
4730 return -EMSGSIZE;
4731 }
4732
4733 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4734 u32 portid, u32 seq, int event, u16 flags)
4735 {
4736 struct nlmsghdr *nlh;
4737 u8 scope = RT_SCOPE_UNIVERSE;
4738 int ifindex = ifmca->idev->dev->ifindex;
4739
4740 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4741 scope = RT_SCOPE_SITE;
4742
4743 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4744 if (!nlh)
4745 return -EMSGSIZE;
4746
4747 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4748 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4749 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4750 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4751 nlmsg_cancel(skb, nlh);
4752 return -EMSGSIZE;
4753 }
4754
4755 nlmsg_end(skb, nlh);
4756 return 0;
4757 }
4758
4759 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4760 u32 portid, u32 seq, int event, unsigned int flags)
4761 {
4762 struct nlmsghdr *nlh;
4763 u8 scope = RT_SCOPE_UNIVERSE;
4764 int ifindex = ifaca->aca_idev->dev->ifindex;
4765
4766 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4767 scope = RT_SCOPE_SITE;
4768
4769 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4770 if (!nlh)
4771 return -EMSGSIZE;
4772
4773 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4774 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4775 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4776 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4777 nlmsg_cancel(skb, nlh);
4778 return -EMSGSIZE;
4779 }
4780
4781 nlmsg_end(skb, nlh);
4782 return 0;
4783 }
4784
4785 enum addr_type_t {
4786 UNICAST_ADDR,
4787 MULTICAST_ADDR,
4788 ANYCAST_ADDR,
4789 };
4790
4791 /* called with rcu_read_lock() */
4792 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4793 struct netlink_callback *cb, enum addr_type_t type,
4794 int s_ip_idx, int *p_ip_idx)
4795 {
4796 struct ifmcaddr6 *ifmca;
4797 struct ifacaddr6 *ifaca;
4798 int err = 1;
4799 int ip_idx = *p_ip_idx;
4800
4801 read_lock_bh(&idev->lock);
4802 switch (type) {
4803 case UNICAST_ADDR: {
4804 struct inet6_ifaddr *ifa;
4805
4806 /* unicast address incl. temp addr */
4807 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4808 if (++ip_idx < s_ip_idx)
4809 continue;
4810 err = inet6_fill_ifaddr(skb, ifa,
4811 NETLINK_CB(cb->skb).portid,
4812 cb->nlh->nlmsg_seq,
4813 RTM_NEWADDR,
4814 NLM_F_MULTI);
4815 if (err < 0)
4816 break;
4817 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4818 }
4819 break;
4820 }
4821 case MULTICAST_ADDR:
4822 /* multicast address */
4823 for (ifmca = idev->mc_list; ifmca;
4824 ifmca = ifmca->next, ip_idx++) {
4825 if (ip_idx < s_ip_idx)
4826 continue;
4827 err = inet6_fill_ifmcaddr(skb, ifmca,
4828 NETLINK_CB(cb->skb).portid,
4829 cb->nlh->nlmsg_seq,
4830 RTM_GETMULTICAST,
4831 NLM_F_MULTI);
4832 if (err < 0)
4833 break;
4834 }
4835 break;
4836 case ANYCAST_ADDR:
4837 /* anycast address */
4838 for (ifaca = idev->ac_list; ifaca;
4839 ifaca = ifaca->aca_next, ip_idx++) {
4840 if (ip_idx < s_ip_idx)
4841 continue;
4842 err = inet6_fill_ifacaddr(skb, ifaca,
4843 NETLINK_CB(cb->skb).portid,
4844 cb->nlh->nlmsg_seq,
4845 RTM_GETANYCAST,
4846 NLM_F_MULTI);
4847 if (err < 0)
4848 break;
4849 }
4850 break;
4851 default:
4852 break;
4853 }
4854 read_unlock_bh(&idev->lock);
4855 *p_ip_idx = ip_idx;
4856 return err;
4857 }
4858
4859 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
4860 enum addr_type_t type)
4861 {
4862 struct net *net = sock_net(skb->sk);
4863 int h, s_h;
4864 int idx, ip_idx;
4865 int s_idx, s_ip_idx;
4866 struct net_device *dev;
4867 struct inet6_dev *idev;
4868 struct hlist_head *head;
4869
4870 s_h = cb->args[0];
4871 s_idx = idx = cb->args[1];
4872 s_ip_idx = ip_idx = cb->args[2];
4873
4874 rcu_read_lock();
4875 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
4876 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4877 idx = 0;
4878 head = &net->dev_index_head[h];
4879 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4880 if (idx < s_idx)
4881 goto cont;
4882 if (h > s_h || idx > s_idx)
4883 s_ip_idx = 0;
4884 ip_idx = 0;
4885 idev = __in6_dev_get(dev);
4886 if (!idev)
4887 goto cont;
4888
4889 if (in6_dump_addrs(idev, skb, cb, type,
4890 s_ip_idx, &ip_idx) < 0)
4891 goto done;
4892 cont:
4893 idx++;
4894 }
4895 }
4896 done:
4897 rcu_read_unlock();
4898 cb->args[0] = h;
4899 cb->args[1] = idx;
4900 cb->args[2] = ip_idx;
4901
4902 return skb->len;
4903 }
4904
4905 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
4906 {
4907 enum addr_type_t type = UNICAST_ADDR;
4908
4909 return inet6_dump_addr(skb, cb, type);
4910 }
4911
4912 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
4913 {
4914 enum addr_type_t type = MULTICAST_ADDR;
4915
4916 return inet6_dump_addr(skb, cb, type);
4917 }
4918
4919
4920 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
4921 {
4922 enum addr_type_t type = ANYCAST_ADDR;
4923
4924 return inet6_dump_addr(skb, cb, type);
4925 }
4926
4927 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4928 struct netlink_ext_ack *extack)
4929 {
4930 struct net *net = sock_net(in_skb->sk);
4931 struct ifaddrmsg *ifm;
4932 struct nlattr *tb[IFA_MAX+1];
4933 struct in6_addr *addr = NULL, *peer;
4934 struct net_device *dev = NULL;
4935 struct inet6_ifaddr *ifa;
4936 struct sk_buff *skb;
4937 int err;
4938
4939 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4940 extack);
4941 if (err < 0)
4942 return err;
4943
4944 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
4945 if (!addr)
4946 return -EINVAL;
4947
4948 ifm = nlmsg_data(nlh);
4949 if (ifm->ifa_index)
4950 dev = dev_get_by_index(net, ifm->ifa_index);
4951
4952 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
4953 if (!ifa) {
4954 err = -EADDRNOTAVAIL;
4955 goto errout;
4956 }
4957
4958 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
4959 if (!skb) {
4960 err = -ENOBUFS;
4961 goto errout_ifa;
4962 }
4963
4964 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
4965 nlh->nlmsg_seq, RTM_NEWADDR, 0);
4966 if (err < 0) {
4967 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4968 WARN_ON(err == -EMSGSIZE);
4969 kfree_skb(skb);
4970 goto errout_ifa;
4971 }
4972 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4973 errout_ifa:
4974 in6_ifa_put(ifa);
4975 errout:
4976 if (dev)
4977 dev_put(dev);
4978 return err;
4979 }
4980
4981 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
4982 {
4983 struct sk_buff *skb;
4984 struct net *net = dev_net(ifa->idev->dev);
4985 int err = -ENOBUFS;
4986
4987 /* Don't send DELADDR notification for TENTATIVE address,
4988 * since NEWADDR notification is sent only after removing
4989 * TENTATIVE flag, if DAD has not failed.
4990 */
4991 if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
4992 event == RTM_DELADDR)
4993 return;
4994
4995 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
4996 if (!skb)
4997 goto errout;
4998
4999 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
5000 if (err < 0) {
5001 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5002 WARN_ON(err == -EMSGSIZE);
5003 kfree_skb(skb);
5004 goto errout;
5005 }
5006 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5007 return;
5008 errout:
5009 if (err < 0)
5010 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5011 }
5012
5013 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5014 __s32 *array, int bytes)
5015 {
5016 BUG_ON(bytes < (DEVCONF_MAX * 4));
5017
5018 memset(array, 0, bytes);
5019 array[DEVCONF_FORWARDING] = cnf->forwarding;
5020 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5021 array[DEVCONF_MTU6] = cnf->mtu6;
5022 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5023 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5024 array[DEVCONF_AUTOCONF] = cnf->autoconf;
5025 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5026 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5027 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5028 jiffies_to_msecs(cnf->rtr_solicit_interval);
5029 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5030 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5031 array[DEVCONF_RTR_SOLICIT_DELAY] =
5032 jiffies_to_msecs(cnf->rtr_solicit_delay);
5033 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5034 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5035 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5036 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5037 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5038 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5039 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5040 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5041 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5042 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5043 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5044 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5045 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5046 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5047 #ifdef CONFIG_IPV6_ROUTER_PREF
5048 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5049 array[DEVCONF_RTR_PROBE_INTERVAL] =
5050 jiffies_to_msecs(cnf->rtr_probe_interval);
5051 #ifdef CONFIG_IPV6_ROUTE_INFO
5052 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5053 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5054 #endif
5055 #endif
5056 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5057 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5058 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5059 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5060 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5061 #endif
5062 #ifdef CONFIG_IPV6_MROUTE
5063 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
5064 #endif
5065 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5066 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5067 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5068 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5069 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5070 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5071 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5072 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5073 /* we omit DEVCONF_STABLE_SECRET for now */
5074 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5075 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5076 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5077 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5078 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5079 #ifdef CONFIG_IPV6_SEG6_HMAC
5080 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5081 #endif
5082 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5083 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5084 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5085 array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5086 }
5087
5088 static inline size_t inet6_ifla6_size(void)
5089 {
5090 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5091 + nla_total_size(sizeof(struct ifla_cacheinfo))
5092 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5093 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5094 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5095 + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
5096 }
5097
5098 static inline size_t inet6_if_nlmsg_size(void)
5099 {
5100 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5101 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5102 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5103 + nla_total_size(4) /* IFLA_MTU */
5104 + nla_total_size(4) /* IFLA_LINK */
5105 + nla_total_size(1) /* IFLA_OPERSTATE */
5106 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5107 }
5108
5109 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5110 int bytes)
5111 {
5112 int i;
5113 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5114 BUG_ON(pad < 0);
5115
5116 /* Use put_unaligned() because stats may not be aligned for u64. */
5117 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5118 for (i = 1; i < ICMP6_MIB_MAX; i++)
5119 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5120
5121 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5122 }
5123
5124 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5125 int bytes, size_t syncpoff)
5126 {
5127 int i, c;
5128 u64 buff[IPSTATS_MIB_MAX];
5129 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5130
5131 BUG_ON(pad < 0);
5132
5133 memset(buff, 0, sizeof(buff));
5134 buff[0] = IPSTATS_MIB_MAX;
5135
5136 for_each_possible_cpu(c) {
5137 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5138 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5139 }
5140
5141 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5142 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5143 }
5144
5145 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5146 int bytes)
5147 {
5148 switch (attrtype) {
5149 case IFLA_INET6_STATS:
5150 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5151 offsetof(struct ipstats_mib, syncp));
5152 break;
5153 case IFLA_INET6_ICMP6STATS:
5154 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5155 break;
5156 }
5157 }
5158
5159 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5160 u32 ext_filter_mask)
5161 {
5162 struct nlattr *nla;
5163 struct ifla_cacheinfo ci;
5164
5165 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5166 goto nla_put_failure;
5167 ci.max_reasm_len = IPV6_MAXPLEN;
5168 ci.tstamp = cstamp_delta(idev->tstamp);
5169 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5170 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5171 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5172 goto nla_put_failure;
5173 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5174 if (!nla)
5175 goto nla_put_failure;
5176 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5177
5178 /* XXX - MC not implemented */
5179
5180 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5181 return 0;
5182
5183 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5184 if (!nla)
5185 goto nla_put_failure;
5186 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5187
5188 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5189 if (!nla)
5190 goto nla_put_failure;
5191 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5192
5193 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5194 if (!nla)
5195 goto nla_put_failure;
5196
5197 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5198 goto nla_put_failure;
5199
5200 read_lock_bh(&idev->lock);
5201 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5202 read_unlock_bh(&idev->lock);
5203
5204 return 0;
5205
5206 nla_put_failure:
5207 return -EMSGSIZE;
5208 }
5209
5210 static size_t inet6_get_link_af_size(const struct net_device *dev,
5211 u32 ext_filter_mask)
5212 {
5213 if (!__in6_dev_get(dev))
5214 return 0;
5215
5216 return inet6_ifla6_size();
5217 }
5218
5219 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5220 u32 ext_filter_mask)
5221 {
5222 struct inet6_dev *idev = __in6_dev_get(dev);
5223
5224 if (!idev)
5225 return -ENODATA;
5226
5227 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5228 return -EMSGSIZE;
5229
5230 return 0;
5231 }
5232
5233 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5234 {
5235 struct inet6_ifaddr *ifp;
5236 struct net_device *dev = idev->dev;
5237 bool clear_token, update_rs = false;
5238 struct in6_addr ll_addr;
5239
5240 ASSERT_RTNL();
5241
5242 if (!token)
5243 return -EINVAL;
5244 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5245 return -EINVAL;
5246 if (!ipv6_accept_ra(idev))
5247 return -EINVAL;
5248 if (idev->cnf.rtr_solicits == 0)
5249 return -EINVAL;
5250
5251 write_lock_bh(&idev->lock);
5252
5253 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5254 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5255
5256 write_unlock_bh(&idev->lock);
5257
5258 clear_token = ipv6_addr_any(token);
5259 if (clear_token)
5260 goto update_lft;
5261
5262 if (!idev->dead && (idev->if_flags & IF_READY) &&
5263 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5264 IFA_F_OPTIMISTIC)) {
5265 /* If we're not ready, then normal ifup will take care
5266 * of this. Otherwise, we need to request our rs here.
5267 */
5268 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5269 update_rs = true;
5270 }
5271
5272 update_lft:
5273 write_lock_bh(&idev->lock);
5274
5275 if (update_rs) {
5276 idev->if_flags |= IF_RS_SENT;
5277 idev->rs_interval = rfc3315_s14_backoff_init(
5278 idev->cnf.rtr_solicit_interval);
5279 idev->rs_probes = 1;
5280 addrconf_mod_rs_timer(idev, idev->rs_interval);
5281 }
5282
5283 /* Well, that's kinda nasty ... */
5284 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5285 spin_lock(&ifp->lock);
5286 if (ifp->tokenized) {
5287 ifp->valid_lft = 0;
5288 ifp->prefered_lft = 0;
5289 }
5290 spin_unlock(&ifp->lock);
5291 }
5292
5293 write_unlock_bh(&idev->lock);
5294 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5295 addrconf_verify_rtnl();
5296 return 0;
5297 }
5298
5299 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5300 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5301 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5302 };
5303
5304 static int inet6_validate_link_af(const struct net_device *dev,
5305 const struct nlattr *nla)
5306 {
5307 struct nlattr *tb[IFLA_INET6_MAX + 1];
5308
5309 if (dev && !__in6_dev_get(dev))
5310 return -EAFNOSUPPORT;
5311
5312 return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy,
5313 NULL);
5314 }
5315
5316 static int check_addr_gen_mode(int mode)
5317 {
5318 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5319 mode != IN6_ADDR_GEN_MODE_NONE &&
5320 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5321 mode != IN6_ADDR_GEN_MODE_RANDOM)
5322 return -EINVAL;
5323 return 1;
5324 }
5325
5326 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5327 int mode)
5328 {
5329 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5330 !idev->cnf.stable_secret.initialized &&
5331 !net->ipv6.devconf_dflt->stable_secret.initialized)
5332 return -EINVAL;
5333 return 1;
5334 }
5335
5336 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5337 {
5338 int err = -EINVAL;
5339 struct inet6_dev *idev = __in6_dev_get(dev);
5340 struct nlattr *tb[IFLA_INET6_MAX + 1];
5341
5342 if (!idev)
5343 return -EAFNOSUPPORT;
5344
5345 if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5346 BUG();
5347
5348 if (tb[IFLA_INET6_TOKEN]) {
5349 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5350 if (err)
5351 return err;
5352 }
5353
5354 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5355 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5356
5357 if (check_addr_gen_mode(mode) < 0 ||
5358 check_stable_privacy(idev, dev_net(dev), mode) < 0)
5359 return -EINVAL;
5360
5361 idev->cnf.addr_gen_mode = mode;
5362 err = 0;
5363 }
5364
5365 return err;
5366 }
5367
5368 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5369 u32 portid, u32 seq, int event, unsigned int flags)
5370 {
5371 struct net_device *dev = idev->dev;
5372 struct ifinfomsg *hdr;
5373 struct nlmsghdr *nlh;
5374 void *protoinfo;
5375
5376 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5377 if (!nlh)
5378 return -EMSGSIZE;
5379
5380 hdr = nlmsg_data(nlh);
5381 hdr->ifi_family = AF_INET6;
5382 hdr->__ifi_pad = 0;
5383 hdr->ifi_type = dev->type;
5384 hdr->ifi_index = dev->ifindex;
5385 hdr->ifi_flags = dev_get_flags(dev);
5386 hdr->ifi_change = 0;
5387
5388 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5389 (dev->addr_len &&
5390 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5391 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5392 (dev->ifindex != dev_get_iflink(dev) &&
5393 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5394 nla_put_u8(skb, IFLA_OPERSTATE,
5395 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5396 goto nla_put_failure;
5397 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
5398 if (!protoinfo)
5399 goto nla_put_failure;
5400
5401 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5402 goto nla_put_failure;
5403
5404 nla_nest_end(skb, protoinfo);
5405 nlmsg_end(skb, nlh);
5406 return 0;
5407
5408 nla_put_failure:
5409 nlmsg_cancel(skb, nlh);
5410 return -EMSGSIZE;
5411 }
5412
5413 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5414 {
5415 struct net *net = sock_net(skb->sk);
5416 int h, s_h;
5417 int idx = 0, s_idx;
5418 struct net_device *dev;
5419 struct inet6_dev *idev;
5420 struct hlist_head *head;
5421
5422 s_h = cb->args[0];
5423 s_idx = cb->args[1];
5424
5425 rcu_read_lock();
5426 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5427 idx = 0;
5428 head = &net->dev_index_head[h];
5429 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5430 if (idx < s_idx)
5431 goto cont;
5432 idev = __in6_dev_get(dev);
5433 if (!idev)
5434 goto cont;
5435 if (inet6_fill_ifinfo(skb, idev,
5436 NETLINK_CB(cb->skb).portid,
5437 cb->nlh->nlmsg_seq,
5438 RTM_NEWLINK, NLM_F_MULTI) < 0)
5439 goto out;
5440 cont:
5441 idx++;
5442 }
5443 }
5444 out:
5445 rcu_read_unlock();
5446 cb->args[1] = idx;
5447 cb->args[0] = h;
5448
5449 return skb->len;
5450 }
5451
5452 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5453 {
5454 struct sk_buff *skb;
5455 struct net *net = dev_net(idev->dev);
5456 int err = -ENOBUFS;
5457
5458 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5459 if (!skb)
5460 goto errout;
5461
5462 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5463 if (err < 0) {
5464 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5465 WARN_ON(err == -EMSGSIZE);
5466 kfree_skb(skb);
5467 goto errout;
5468 }
5469 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5470 return;
5471 errout:
5472 if (err < 0)
5473 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5474 }
5475
5476 static inline size_t inet6_prefix_nlmsg_size(void)
5477 {
5478 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5479 + nla_total_size(sizeof(struct in6_addr))
5480 + nla_total_size(sizeof(struct prefix_cacheinfo));
5481 }
5482
5483 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5484 struct prefix_info *pinfo, u32 portid, u32 seq,
5485 int event, unsigned int flags)
5486 {
5487 struct prefixmsg *pmsg;
5488 struct nlmsghdr *nlh;
5489 struct prefix_cacheinfo ci;
5490
5491 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5492 if (!nlh)
5493 return -EMSGSIZE;
5494
5495 pmsg = nlmsg_data(nlh);
5496 pmsg->prefix_family = AF_INET6;
5497 pmsg->prefix_pad1 = 0;
5498 pmsg->prefix_pad2 = 0;
5499 pmsg->prefix_ifindex = idev->dev->ifindex;
5500 pmsg->prefix_len = pinfo->prefix_len;
5501 pmsg->prefix_type = pinfo->type;
5502 pmsg->prefix_pad3 = 0;
5503 pmsg->prefix_flags = 0;
5504 if (pinfo->onlink)
5505 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5506 if (pinfo->autoconf)
5507 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5508
5509 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5510 goto nla_put_failure;
5511 ci.preferred_time = ntohl(pinfo->prefered);
5512 ci.valid_time = ntohl(pinfo->valid);
5513 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5514 goto nla_put_failure;
5515 nlmsg_end(skb, nlh);
5516 return 0;
5517
5518 nla_put_failure:
5519 nlmsg_cancel(skb, nlh);
5520 return -EMSGSIZE;
5521 }
5522
5523 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5524 struct prefix_info *pinfo)
5525 {
5526 struct sk_buff *skb;
5527 struct net *net = dev_net(idev->dev);
5528 int err = -ENOBUFS;
5529
5530 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5531 if (!skb)
5532 goto errout;
5533
5534 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5535 if (err < 0) {
5536 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5537 WARN_ON(err == -EMSGSIZE);
5538 kfree_skb(skb);
5539 goto errout;
5540 }
5541 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5542 return;
5543 errout:
5544 if (err < 0)
5545 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5546 }
5547
5548 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5549 {
5550 struct net *net = dev_net(ifp->idev->dev);
5551
5552 if (event)
5553 ASSERT_RTNL();
5554
5555 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5556
5557 switch (event) {
5558 case RTM_NEWADDR:
5559 /*
5560 * If the address was optimistic
5561 * we inserted the route at the start of
5562 * our DAD process, so we don't need
5563 * to do it again
5564 */
5565 if (!rcu_access_pointer(ifp->rt->rt6i_node))
5566 ip6_ins_rt(ifp->rt);
5567 if (ifp->idev->cnf.forwarding)
5568 addrconf_join_anycast(ifp);
5569 if (!ipv6_addr_any(&ifp->peer_addr))
5570 addrconf_prefix_route(&ifp->peer_addr, 128,
5571 ifp->idev->dev, 0, 0);
5572 break;
5573 case RTM_DELADDR:
5574 if (ifp->idev->cnf.forwarding)
5575 addrconf_leave_anycast(ifp);
5576 addrconf_leave_solict(ifp->idev, &ifp->addr);
5577 if (!ipv6_addr_any(&ifp->peer_addr)) {
5578 struct rt6_info *rt;
5579
5580 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5581 ifp->idev->dev, 0, 0);
5582 if (rt)
5583 ip6_del_rt(rt);
5584 }
5585 if (ifp->rt) {
5586 if (dst_hold_safe(&ifp->rt->dst))
5587 ip6_del_rt(ifp->rt);
5588 }
5589 rt_genid_bump_ipv6(net);
5590 break;
5591 }
5592 atomic_inc(&net->ipv6.dev_addr_genid);
5593 }
5594
5595 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5596 {
5597 rcu_read_lock_bh();
5598 if (likely(ifp->idev->dead == 0))
5599 __ipv6_ifa_notify(event, ifp);
5600 rcu_read_unlock_bh();
5601 }
5602
5603 #ifdef CONFIG_SYSCTL
5604
5605 static
5606 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
5607 void __user *buffer, size_t *lenp, loff_t *ppos)
5608 {
5609 int *valp = ctl->data;
5610 int val = *valp;
5611 loff_t pos = *ppos;
5612 struct ctl_table lctl;
5613 int ret;
5614
5615 /*
5616 * ctl->data points to idev->cnf.forwarding, we should
5617 * not modify it until we get the rtnl lock.
5618 */
5619 lctl = *ctl;
5620 lctl.data = &val;
5621
5622 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5623
5624 if (write)
5625 ret = addrconf_fixup_forwarding(ctl, valp, val);
5626 if (ret)
5627 *ppos = pos;
5628 return ret;
5629 }
5630
5631 static
5632 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
5633 void __user *buffer, size_t *lenp, loff_t *ppos)
5634 {
5635 struct inet6_dev *idev = ctl->extra1;
5636 int min_mtu = IPV6_MIN_MTU;
5637 struct ctl_table lctl;
5638
5639 lctl = *ctl;
5640 lctl.extra1 = &min_mtu;
5641 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
5642
5643 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5644 }
5645
5646 static void dev_disable_change(struct inet6_dev *idev)
5647 {
5648 struct netdev_notifier_info info;
5649
5650 if (!idev || !idev->dev)
5651 return;
5652
5653 netdev_notifier_info_init(&info, idev->dev);
5654 if (idev->cnf.disable_ipv6)
5655 addrconf_notify(NULL, NETDEV_DOWN, &info);
5656 else
5657 addrconf_notify(NULL, NETDEV_UP, &info);
5658 }
5659
5660 static void addrconf_disable_change(struct net *net, __s32 newf)
5661 {
5662 struct net_device *dev;
5663 struct inet6_dev *idev;
5664
5665 for_each_netdev(net, dev) {
5666 idev = __in6_dev_get(dev);
5667 if (idev) {
5668 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
5669 idev->cnf.disable_ipv6 = newf;
5670 if (changed)
5671 dev_disable_change(idev);
5672 }
5673 }
5674 }
5675
5676 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
5677 {
5678 struct net *net;
5679 int old;
5680
5681 if (!rtnl_trylock())
5682 return restart_syscall();
5683
5684 net = (struct net *)table->extra2;
5685 old = *p;
5686 *p = newf;
5687
5688 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
5689 rtnl_unlock();
5690 return 0;
5691 }
5692
5693 if (p == &net->ipv6.devconf_all->disable_ipv6) {
5694 net->ipv6.devconf_dflt->disable_ipv6 = newf;
5695 addrconf_disable_change(net, newf);
5696 } else if ((!newf) ^ (!old))
5697 dev_disable_change((struct inet6_dev *)table->extra1);
5698
5699 rtnl_unlock();
5700 return 0;
5701 }
5702
5703 static
5704 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
5705 void __user *buffer, size_t *lenp, loff_t *ppos)
5706 {
5707 int *valp = ctl->data;
5708 int val = *valp;
5709 loff_t pos = *ppos;
5710 struct ctl_table lctl;
5711 int ret;
5712
5713 /*
5714 * ctl->data points to idev->cnf.disable_ipv6, we should
5715 * not modify it until we get the rtnl lock.
5716 */
5717 lctl = *ctl;
5718 lctl.data = &val;
5719
5720 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5721
5722 if (write)
5723 ret = addrconf_disable_ipv6(ctl, valp, val);
5724 if (ret)
5725 *ppos = pos;
5726 return ret;
5727 }
5728
5729 static
5730 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
5731 void __user *buffer, size_t *lenp, loff_t *ppos)
5732 {
5733 int *valp = ctl->data;
5734 int ret;
5735 int old, new;
5736
5737 old = *valp;
5738 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5739 new = *valp;
5740
5741 if (write && old != new) {
5742 struct net *net = ctl->extra2;
5743
5744 if (!rtnl_trylock())
5745 return restart_syscall();
5746
5747 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
5748 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5749 NETCONFA_PROXY_NEIGH,
5750 NETCONFA_IFINDEX_DEFAULT,
5751 net->ipv6.devconf_dflt);
5752 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
5753 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5754 NETCONFA_PROXY_NEIGH,
5755 NETCONFA_IFINDEX_ALL,
5756 net->ipv6.devconf_all);
5757 else {
5758 struct inet6_dev *idev = ctl->extra1;
5759
5760 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5761 NETCONFA_PROXY_NEIGH,
5762 idev->dev->ifindex,
5763 &idev->cnf);
5764 }
5765 rtnl_unlock();
5766 }
5767
5768 return ret;
5769 }
5770
5771 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5772 void __user *buffer, size_t *lenp,
5773 loff_t *ppos)
5774 {
5775 int ret = 0;
5776 int new_val;
5777 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
5778 struct net *net = (struct net *)ctl->extra2;
5779
5780 if (!rtnl_trylock())
5781 return restart_syscall();
5782
5783 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5784
5785 if (write) {
5786 new_val = *((int *)ctl->data);
5787
5788 if (check_addr_gen_mode(new_val) < 0) {
5789 ret = -EINVAL;
5790 goto out;
5791 }
5792
5793 /* request for default */
5794 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
5795 ipv6_devconf_dflt.addr_gen_mode = new_val;
5796
5797 /* request for individual net device */
5798 } else {
5799 if (!idev)
5800 goto out;
5801
5802 if (check_stable_privacy(idev, net, new_val) < 0) {
5803 ret = -EINVAL;
5804 goto out;
5805 }
5806
5807 if (idev->cnf.addr_gen_mode != new_val) {
5808 idev->cnf.addr_gen_mode = new_val;
5809 addrconf_dev_config(idev->dev);
5810 }
5811 }
5812 }
5813
5814 out:
5815 rtnl_unlock();
5816
5817 return ret;
5818 }
5819
5820 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
5821 void __user *buffer, size_t *lenp,
5822 loff_t *ppos)
5823 {
5824 int err;
5825 struct in6_addr addr;
5826 char str[IPV6_MAX_STRLEN];
5827 struct ctl_table lctl = *ctl;
5828 struct net *net = ctl->extra2;
5829 struct ipv6_stable_secret *secret = ctl->data;
5830
5831 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
5832 return -EIO;
5833
5834 lctl.maxlen = IPV6_MAX_STRLEN;
5835 lctl.data = str;
5836
5837 if (!rtnl_trylock())
5838 return restart_syscall();
5839
5840 if (!write && !secret->initialized) {
5841 err = -EIO;
5842 goto out;
5843 }
5844
5845 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
5846 if (err >= sizeof(str)) {
5847 err = -EIO;
5848 goto out;
5849 }
5850
5851 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
5852 if (err || !write)
5853 goto out;
5854
5855 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
5856 err = -EIO;
5857 goto out;
5858 }
5859
5860 secret->initialized = true;
5861 secret->secret = addr;
5862
5863 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
5864 struct net_device *dev;
5865
5866 for_each_netdev(net, dev) {
5867 struct inet6_dev *idev = __in6_dev_get(dev);
5868
5869 if (idev) {
5870 idev->cnf.addr_gen_mode =
5871 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5872 }
5873 }
5874 } else {
5875 struct inet6_dev *idev = ctl->extra1;
5876
5877 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5878 }
5879
5880 out:
5881 rtnl_unlock();
5882
5883 return err;
5884 }
5885
5886 static
5887 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
5888 int write,
5889 void __user *buffer,
5890 size_t *lenp,
5891 loff_t *ppos)
5892 {
5893 int *valp = ctl->data;
5894 int val = *valp;
5895 loff_t pos = *ppos;
5896 struct ctl_table lctl;
5897 int ret;
5898
5899 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
5900 * we should not modify it until we get the rtnl lock.
5901 */
5902 lctl = *ctl;
5903 lctl.data = &val;
5904
5905 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5906
5907 if (write)
5908 ret = addrconf_fixup_linkdown(ctl, valp, val);
5909 if (ret)
5910 *ppos = pos;
5911 return ret;
5912 }
5913
5914 static
5915 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
5916 {
5917 if (rt) {
5918 if (action)
5919 rt->dst.flags |= DST_NOPOLICY;
5920 else
5921 rt->dst.flags &= ~DST_NOPOLICY;
5922 }
5923 }
5924
5925 static
5926 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
5927 {
5928 struct inet6_ifaddr *ifa;
5929
5930 read_lock_bh(&idev->lock);
5931 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5932 spin_lock(&ifa->lock);
5933 if (ifa->rt) {
5934 struct rt6_info *rt = ifa->rt;
5935 int cpu;
5936
5937 rcu_read_lock();
5938 addrconf_set_nopolicy(ifa->rt, val);
5939 if (rt->rt6i_pcpu) {
5940 for_each_possible_cpu(cpu) {
5941 struct rt6_info **rtp;
5942
5943 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
5944 addrconf_set_nopolicy(*rtp, val);
5945 }
5946 }
5947 rcu_read_unlock();
5948 }
5949 spin_unlock(&ifa->lock);
5950 }
5951 read_unlock_bh(&idev->lock);
5952 }
5953
5954 static
5955 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
5956 {
5957 struct inet6_dev *idev;
5958 struct net *net;
5959
5960 if (!rtnl_trylock())
5961 return restart_syscall();
5962
5963 *valp = val;
5964
5965 net = (struct net *)ctl->extra2;
5966 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
5967 rtnl_unlock();
5968 return 0;
5969 }
5970
5971 if (valp == &net->ipv6.devconf_all->disable_policy) {
5972 struct net_device *dev;
5973
5974 for_each_netdev(net, dev) {
5975 idev = __in6_dev_get(dev);
5976 if (idev)
5977 addrconf_disable_policy_idev(idev, val);
5978 }
5979 } else {
5980 idev = (struct inet6_dev *)ctl->extra1;
5981 addrconf_disable_policy_idev(idev, val);
5982 }
5983
5984 rtnl_unlock();
5985 return 0;
5986 }
5987
5988 static
5989 int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
5990 void __user *buffer, size_t *lenp,
5991 loff_t *ppos)
5992 {
5993 int *valp = ctl->data;
5994 int val = *valp;
5995 loff_t pos = *ppos;
5996 struct ctl_table lctl;
5997 int ret;
5998
5999 lctl = *ctl;
6000 lctl.data = &val;
6001 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6002
6003 if (write && (*valp != val))
6004 ret = addrconf_disable_policy(ctl, valp, val);
6005
6006 if (ret)
6007 *ppos = pos;
6008
6009 return ret;
6010 }
6011
6012 static int minus_one = -1;
6013 static const int zero = 0;
6014 static const int one = 1;
6015 static const int two_five_five = 255;
6016
6017 static const struct ctl_table addrconf_sysctl[] = {
6018 {
6019 .procname = "forwarding",
6020 .data = &ipv6_devconf.forwarding,
6021 .maxlen = sizeof(int),
6022 .mode = 0644,
6023 .proc_handler = addrconf_sysctl_forward,
6024 },
6025 {
6026 .procname = "hop_limit",
6027 .data = &ipv6_devconf.hop_limit,
6028 .maxlen = sizeof(int),
6029 .mode = 0644,
6030 .proc_handler = proc_dointvec_minmax,
6031 .extra1 = (void *)&one,
6032 .extra2 = (void *)&two_five_five,
6033 },
6034 {
6035 .procname = "mtu",
6036 .data = &ipv6_devconf.mtu6,
6037 .maxlen = sizeof(int),
6038 .mode = 0644,
6039 .proc_handler = addrconf_sysctl_mtu,
6040 },
6041 {
6042 .procname = "accept_ra",
6043 .data = &ipv6_devconf.accept_ra,
6044 .maxlen = sizeof(int),
6045 .mode = 0644,
6046 .proc_handler = proc_dointvec,
6047 },
6048 {
6049 .procname = "accept_redirects",
6050 .data = &ipv6_devconf.accept_redirects,
6051 .maxlen = sizeof(int),
6052 .mode = 0644,
6053 .proc_handler = proc_dointvec,
6054 },
6055 {
6056 .procname = "autoconf",
6057 .data = &ipv6_devconf.autoconf,
6058 .maxlen = sizeof(int),
6059 .mode = 0644,
6060 .proc_handler = proc_dointvec,
6061 },
6062 {
6063 .procname = "dad_transmits",
6064 .data = &ipv6_devconf.dad_transmits,
6065 .maxlen = sizeof(int),
6066 .mode = 0644,
6067 .proc_handler = proc_dointvec,
6068 },
6069 {
6070 .procname = "router_solicitations",
6071 .data = &ipv6_devconf.rtr_solicits,
6072 .maxlen = sizeof(int),
6073 .mode = 0644,
6074 .proc_handler = proc_dointvec_minmax,
6075 .extra1 = &minus_one,
6076 },
6077 {
6078 .procname = "router_solicitation_interval",
6079 .data = &ipv6_devconf.rtr_solicit_interval,
6080 .maxlen = sizeof(int),
6081 .mode = 0644,
6082 .proc_handler = proc_dointvec_jiffies,
6083 },
6084 {
6085 .procname = "router_solicitation_max_interval",
6086 .data = &ipv6_devconf.rtr_solicit_max_interval,
6087 .maxlen = sizeof(int),
6088 .mode = 0644,
6089 .proc_handler = proc_dointvec_jiffies,
6090 },
6091 {
6092 .procname = "router_solicitation_delay",
6093 .data = &ipv6_devconf.rtr_solicit_delay,
6094 .maxlen = sizeof(int),
6095 .mode = 0644,
6096 .proc_handler = proc_dointvec_jiffies,
6097 },
6098 {
6099 .procname = "force_mld_version",
6100 .data = &ipv6_devconf.force_mld_version,
6101 .maxlen = sizeof(int),
6102 .mode = 0644,
6103 .proc_handler = proc_dointvec,
6104 },
6105 {
6106 .procname = "mldv1_unsolicited_report_interval",
6107 .data =
6108 &ipv6_devconf.mldv1_unsolicited_report_interval,
6109 .maxlen = sizeof(int),
6110 .mode = 0644,
6111 .proc_handler = proc_dointvec_ms_jiffies,
6112 },
6113 {
6114 .procname = "mldv2_unsolicited_report_interval",
6115 .data =
6116 &ipv6_devconf.mldv2_unsolicited_report_interval,
6117 .maxlen = sizeof(int),
6118 .mode = 0644,
6119 .proc_handler = proc_dointvec_ms_jiffies,
6120 },
6121 {
6122 .procname = "use_tempaddr",
6123 .data = &ipv6_devconf.use_tempaddr,
6124 .maxlen = sizeof(int),
6125 .mode = 0644,
6126 .proc_handler = proc_dointvec,
6127 },
6128 {
6129 .procname = "temp_valid_lft",
6130 .data = &ipv6_devconf.temp_valid_lft,
6131 .maxlen = sizeof(int),
6132 .mode = 0644,
6133 .proc_handler = proc_dointvec,
6134 },
6135 {
6136 .procname = "temp_prefered_lft",
6137 .data = &ipv6_devconf.temp_prefered_lft,
6138 .maxlen = sizeof(int),
6139 .mode = 0644,
6140 .proc_handler = proc_dointvec,
6141 },
6142 {
6143 .procname = "regen_max_retry",
6144 .data = &ipv6_devconf.regen_max_retry,
6145 .maxlen = sizeof(int),
6146 .mode = 0644,
6147 .proc_handler = proc_dointvec,
6148 },
6149 {
6150 .procname = "max_desync_factor",
6151 .data = &ipv6_devconf.max_desync_factor,
6152 .maxlen = sizeof(int),
6153 .mode = 0644,
6154 .proc_handler = proc_dointvec,
6155 },
6156 {
6157 .procname = "max_addresses",
6158 .data = &ipv6_devconf.max_addresses,
6159 .maxlen = sizeof(int),
6160 .mode = 0644,
6161 .proc_handler = proc_dointvec,
6162 },
6163 {
6164 .procname = "accept_ra_defrtr",
6165 .data = &ipv6_devconf.accept_ra_defrtr,
6166 .maxlen = sizeof(int),
6167 .mode = 0644,
6168 .proc_handler = proc_dointvec,
6169 },
6170 {
6171 .procname = "accept_ra_min_hop_limit",
6172 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6173 .maxlen = sizeof(int),
6174 .mode = 0644,
6175 .proc_handler = proc_dointvec,
6176 },
6177 {
6178 .procname = "accept_ra_pinfo",
6179 .data = &ipv6_devconf.accept_ra_pinfo,
6180 .maxlen = sizeof(int),
6181 .mode = 0644,
6182 .proc_handler = proc_dointvec,
6183 },
6184 #ifdef CONFIG_IPV6_ROUTER_PREF
6185 {
6186 .procname = "accept_ra_rtr_pref",
6187 .data = &ipv6_devconf.accept_ra_rtr_pref,
6188 .maxlen = sizeof(int),
6189 .mode = 0644,
6190 .proc_handler = proc_dointvec,
6191 },
6192 {
6193 .procname = "router_probe_interval",
6194 .data = &ipv6_devconf.rtr_probe_interval,
6195 .maxlen = sizeof(int),
6196 .mode = 0644,
6197 .proc_handler = proc_dointvec_jiffies,
6198 },
6199 #ifdef CONFIG_IPV6_ROUTE_INFO
6200 {
6201 .procname = "accept_ra_rt_info_min_plen",
6202 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6203 .maxlen = sizeof(int),
6204 .mode = 0644,
6205 .proc_handler = proc_dointvec,
6206 },
6207 {
6208 .procname = "accept_ra_rt_info_max_plen",
6209 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6210 .maxlen = sizeof(int),
6211 .mode = 0644,
6212 .proc_handler = proc_dointvec,
6213 },
6214 #endif
6215 #endif
6216 {
6217 .procname = "proxy_ndp",
6218 .data = &ipv6_devconf.proxy_ndp,
6219 .maxlen = sizeof(int),
6220 .mode = 0644,
6221 .proc_handler = addrconf_sysctl_proxy_ndp,
6222 },
6223 {
6224 .procname = "accept_source_route",
6225 .data = &ipv6_devconf.accept_source_route,
6226 .maxlen = sizeof(int),
6227 .mode = 0644,
6228 .proc_handler = proc_dointvec,
6229 },
6230 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6231 {
6232 .procname = "optimistic_dad",
6233 .data = &ipv6_devconf.optimistic_dad,
6234 .maxlen = sizeof(int),
6235 .mode = 0644,
6236 .proc_handler = proc_dointvec,
6237 },
6238 {
6239 .procname = "use_optimistic",
6240 .data = &ipv6_devconf.use_optimistic,
6241 .maxlen = sizeof(int),
6242 .mode = 0644,
6243 .proc_handler = proc_dointvec,
6244 },
6245 #endif
6246 #ifdef CONFIG_IPV6_MROUTE
6247 {
6248 .procname = "mc_forwarding",
6249 .data = &ipv6_devconf.mc_forwarding,
6250 .maxlen = sizeof(int),
6251 .mode = 0444,
6252 .proc_handler = proc_dointvec,
6253 },
6254 #endif
6255 {
6256 .procname = "disable_ipv6",
6257 .data = &ipv6_devconf.disable_ipv6,
6258 .maxlen = sizeof(int),
6259 .mode = 0644,
6260 .proc_handler = addrconf_sysctl_disable,
6261 },
6262 {
6263 .procname = "accept_dad",
6264 .data = &ipv6_devconf.accept_dad,
6265 .maxlen = sizeof(int),
6266 .mode = 0644,
6267 .proc_handler = proc_dointvec,
6268 },
6269 {
6270 .procname = "force_tllao",
6271 .data = &ipv6_devconf.force_tllao,
6272 .maxlen = sizeof(int),
6273 .mode = 0644,
6274 .proc_handler = proc_dointvec
6275 },
6276 {
6277 .procname = "ndisc_notify",
6278 .data = &ipv6_devconf.ndisc_notify,
6279 .maxlen = sizeof(int),
6280 .mode = 0644,
6281 .proc_handler = proc_dointvec
6282 },
6283 {
6284 .procname = "suppress_frag_ndisc",
6285 .data = &ipv6_devconf.suppress_frag_ndisc,
6286 .maxlen = sizeof(int),
6287 .mode = 0644,
6288 .proc_handler = proc_dointvec
6289 },
6290 {
6291 .procname = "accept_ra_from_local",
6292 .data = &ipv6_devconf.accept_ra_from_local,
6293 .maxlen = sizeof(int),
6294 .mode = 0644,
6295 .proc_handler = proc_dointvec,
6296 },
6297 {
6298 .procname = "accept_ra_mtu",
6299 .data = &ipv6_devconf.accept_ra_mtu,
6300 .maxlen = sizeof(int),
6301 .mode = 0644,
6302 .proc_handler = proc_dointvec,
6303 },
6304 {
6305 .procname = "stable_secret",
6306 .data = &ipv6_devconf.stable_secret,
6307 .maxlen = IPV6_MAX_STRLEN,
6308 .mode = 0600,
6309 .proc_handler = addrconf_sysctl_stable_secret,
6310 },
6311 {
6312 .procname = "use_oif_addrs_only",
6313 .data = &ipv6_devconf.use_oif_addrs_only,
6314 .maxlen = sizeof(int),
6315 .mode = 0644,
6316 .proc_handler = proc_dointvec,
6317 },
6318 {
6319 .procname = "ignore_routes_with_linkdown",
6320 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6321 .maxlen = sizeof(int),
6322 .mode = 0644,
6323 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6324 },
6325 {
6326 .procname = "drop_unicast_in_l2_multicast",
6327 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6328 .maxlen = sizeof(int),
6329 .mode = 0644,
6330 .proc_handler = proc_dointvec,
6331 },
6332 {
6333 .procname = "drop_unsolicited_na",
6334 .data = &ipv6_devconf.drop_unsolicited_na,
6335 .maxlen = sizeof(int),
6336 .mode = 0644,
6337 .proc_handler = proc_dointvec,
6338 },
6339 {
6340 .procname = "keep_addr_on_down",
6341 .data = &ipv6_devconf.keep_addr_on_down,
6342 .maxlen = sizeof(int),
6343 .mode = 0644,
6344 .proc_handler = proc_dointvec,
6345
6346 },
6347 {
6348 .procname = "seg6_enabled",
6349 .data = &ipv6_devconf.seg6_enabled,
6350 .maxlen = sizeof(int),
6351 .mode = 0644,
6352 .proc_handler = proc_dointvec,
6353 },
6354 #ifdef CONFIG_IPV6_SEG6_HMAC
6355 {
6356 .procname = "seg6_require_hmac",
6357 .data = &ipv6_devconf.seg6_require_hmac,
6358 .maxlen = sizeof(int),
6359 .mode = 0644,
6360 .proc_handler = proc_dointvec,
6361 },
6362 #endif
6363 {
6364 .procname = "enhanced_dad",
6365 .data = &ipv6_devconf.enhanced_dad,
6366 .maxlen = sizeof(int),
6367 .mode = 0644,
6368 .proc_handler = proc_dointvec,
6369 },
6370 {
6371 .procname = "addr_gen_mode",
6372 .data = &ipv6_devconf.addr_gen_mode,
6373 .maxlen = sizeof(int),
6374 .mode = 0644,
6375 .proc_handler = addrconf_sysctl_addr_gen_mode,
6376 },
6377 {
6378 .procname = "disable_policy",
6379 .data = &ipv6_devconf.disable_policy,
6380 .maxlen = sizeof(int),
6381 .mode = 0644,
6382 .proc_handler = addrconf_sysctl_disable_policy,
6383 },
6384 {
6385 .procname = "ndisc_tclass",
6386 .data = &ipv6_devconf.ndisc_tclass,
6387 .maxlen = sizeof(int),
6388 .mode = 0644,
6389 .proc_handler = proc_dointvec_minmax,
6390 .extra1 = (void *)&zero,
6391 .extra2 = (void *)&two_five_five,
6392 },
6393 {
6394 /* sentinel */
6395 }
6396 };
6397
6398 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6399 struct inet6_dev *idev, struct ipv6_devconf *p)
6400 {
6401 int i, ifindex;
6402 struct ctl_table *table;
6403 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6404
6405 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6406 if (!table)
6407 goto out;
6408
6409 for (i = 0; table[i].data; i++) {
6410 table[i].data += (char *)p - (char *)&ipv6_devconf;
6411 /* If one of these is already set, then it is not safe to
6412 * overwrite either of them: this makes proc_dointvec_minmax
6413 * usable.
6414 */
6415 if (!table[i].extra1 && !table[i].extra2) {
6416 table[i].extra1 = idev; /* embedded; no ref */
6417 table[i].extra2 = net;
6418 }
6419 }
6420
6421 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6422
6423 p->sysctl_header = register_net_sysctl(net, path, table);
6424 if (!p->sysctl_header)
6425 goto free;
6426
6427 if (!strcmp(dev_name, "all"))
6428 ifindex = NETCONFA_IFINDEX_ALL;
6429 else if (!strcmp(dev_name, "default"))
6430 ifindex = NETCONFA_IFINDEX_DEFAULT;
6431 else
6432 ifindex = idev->dev->ifindex;
6433 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6434 ifindex, p);
6435 return 0;
6436
6437 free:
6438 kfree(table);
6439 out:
6440 return -ENOBUFS;
6441 }
6442
6443 static void __addrconf_sysctl_unregister(struct net *net,
6444 struct ipv6_devconf *p, int ifindex)
6445 {
6446 struct ctl_table *table;
6447
6448 if (!p->sysctl_header)
6449 return;
6450
6451 table = p->sysctl_header->ctl_table_arg;
6452 unregister_net_sysctl_table(p->sysctl_header);
6453 p->sysctl_header = NULL;
6454 kfree(table);
6455
6456 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
6457 }
6458
6459 static int addrconf_sysctl_register(struct inet6_dev *idev)
6460 {
6461 int err;
6462
6463 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6464 return -EINVAL;
6465
6466 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6467 &ndisc_ifinfo_sysctl_change);
6468 if (err)
6469 return err;
6470 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6471 idev, &idev->cnf);
6472 if (err)
6473 neigh_sysctl_unregister(idev->nd_parms);
6474
6475 return err;
6476 }
6477
6478 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6479 {
6480 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
6481 idev->dev->ifindex);
6482 neigh_sysctl_unregister(idev->nd_parms);
6483 }
6484
6485
6486 #endif
6487
6488 static int __net_init addrconf_init_net(struct net *net)
6489 {
6490 int err = -ENOMEM;
6491 struct ipv6_devconf *all, *dflt;
6492
6493 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6494 if (!all)
6495 goto err_alloc_all;
6496
6497 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6498 if (!dflt)
6499 goto err_alloc_dflt;
6500
6501 /* these will be inherited by all namespaces */
6502 dflt->autoconf = ipv6_defaults.autoconf;
6503 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6504
6505 dflt->stable_secret.initialized = false;
6506 all->stable_secret.initialized = false;
6507
6508 net->ipv6.devconf_all = all;
6509 net->ipv6.devconf_dflt = dflt;
6510
6511 #ifdef CONFIG_SYSCTL
6512 err = __addrconf_sysctl_register(net, "all", NULL, all);
6513 if (err < 0)
6514 goto err_reg_all;
6515
6516 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6517 if (err < 0)
6518 goto err_reg_dflt;
6519 #endif
6520 return 0;
6521
6522 #ifdef CONFIG_SYSCTL
6523 err_reg_dflt:
6524 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
6525 err_reg_all:
6526 kfree(dflt);
6527 #endif
6528 err_alloc_dflt:
6529 kfree(all);
6530 err_alloc_all:
6531 return err;
6532 }
6533
6534 static void __net_exit addrconf_exit_net(struct net *net)
6535 {
6536 #ifdef CONFIG_SYSCTL
6537 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
6538 NETCONFA_IFINDEX_DEFAULT);
6539 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
6540 NETCONFA_IFINDEX_ALL);
6541 #endif
6542 kfree(net->ipv6.devconf_dflt);
6543 kfree(net->ipv6.devconf_all);
6544 }
6545
6546 static struct pernet_operations addrconf_ops = {
6547 .init = addrconf_init_net,
6548 .exit = addrconf_exit_net,
6549 };
6550
6551 static struct rtnl_af_ops inet6_ops __read_mostly = {
6552 .family = AF_INET6,
6553 .fill_link_af = inet6_fill_link_af,
6554 .get_link_af_size = inet6_get_link_af_size,
6555 .validate_link_af = inet6_validate_link_af,
6556 .set_link_af = inet6_set_link_af,
6557 };
6558
6559 /*
6560 * Init / cleanup code
6561 */
6562
6563 int __init addrconf_init(void)
6564 {
6565 struct inet6_dev *idev;
6566 int i, err;
6567
6568 err = ipv6_addr_label_init();
6569 if (err < 0) {
6570 pr_crit("%s: cannot initialize default policy table: %d\n",
6571 __func__, err);
6572 goto out;
6573 }
6574
6575 err = register_pernet_subsys(&addrconf_ops);
6576 if (err < 0)
6577 goto out_addrlabel;
6578
6579 addrconf_wq = create_workqueue("ipv6_addrconf");
6580 if (!addrconf_wq) {
6581 err = -ENOMEM;
6582 goto out_nowq;
6583 }
6584
6585 /* The addrconf netdev notifier requires that loopback_dev
6586 * has it's ipv6 private information allocated and setup
6587 * before it can bring up and give link-local addresses
6588 * to other devices which are up.
6589 *
6590 * Unfortunately, loopback_dev is not necessarily the first
6591 * entry in the global dev_base list of net devices. In fact,
6592 * it is likely to be the very last entry on that list.
6593 * So this causes the notifier registry below to try and
6594 * give link-local addresses to all devices besides loopback_dev
6595 * first, then loopback_dev, which cases all the non-loopback_dev
6596 * devices to fail to get a link-local address.
6597 *
6598 * So, as a temporary fix, allocate the ipv6 structure for
6599 * loopback_dev first by hand.
6600 * Longer term, all of the dependencies ipv6 has upon the loopback
6601 * device and it being up should be removed.
6602 */
6603 rtnl_lock();
6604 idev = ipv6_add_dev(init_net.loopback_dev);
6605 rtnl_unlock();
6606 if (IS_ERR(idev)) {
6607 err = PTR_ERR(idev);
6608 goto errlo;
6609 }
6610
6611 ip6_route_init_special_entries();
6612
6613 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6614 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
6615
6616 register_netdevice_notifier(&ipv6_dev_notf);
6617
6618 addrconf_verify();
6619
6620 rtnl_af_register(&inet6_ops);
6621
6622 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
6623 0);
6624 if (err < 0)
6625 goto errout;
6626
6627 /* Only the first call to __rtnl_register can fail */
6628 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, 0);
6629 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, 0);
6630 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
6631 inet6_dump_ifaddr, RTNL_FLAG_DOIT_UNLOCKED);
6632 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
6633 inet6_dump_ifmcaddr, 0);
6634 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
6635 inet6_dump_ifacaddr, 0);
6636 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
6637 inet6_netconf_dump_devconf, RTNL_FLAG_DOIT_UNLOCKED);
6638
6639 ipv6_addr_label_rtnl_register();
6640
6641 return 0;
6642 errout:
6643 rtnl_af_unregister(&inet6_ops);
6644 unregister_netdevice_notifier(&ipv6_dev_notf);
6645 errlo:
6646 destroy_workqueue(addrconf_wq);
6647 out_nowq:
6648 unregister_pernet_subsys(&addrconf_ops);
6649 out_addrlabel:
6650 ipv6_addr_label_cleanup();
6651 out:
6652 return err;
6653 }
6654
6655 void addrconf_cleanup(void)
6656 {
6657 struct net_device *dev;
6658 int i;
6659
6660 unregister_netdevice_notifier(&ipv6_dev_notf);
6661 unregister_pernet_subsys(&addrconf_ops);
6662 ipv6_addr_label_cleanup();
6663
6664 rtnl_af_unregister(&inet6_ops);
6665
6666 rtnl_lock();
6667
6668 /* clean dev list */
6669 for_each_netdev(&init_net, dev) {
6670 if (__in6_dev_get(dev) == NULL)
6671 continue;
6672 addrconf_ifdown(dev, 1);
6673 }
6674 addrconf_ifdown(init_net.loopback_dev, 2);
6675
6676 /*
6677 * Check hash table.
6678 */
6679 spin_lock_bh(&addrconf_hash_lock);
6680 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6681 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
6682 spin_unlock_bh(&addrconf_hash_lock);
6683 cancel_delayed_work(&addr_chk_work);
6684 rtnl_unlock();
6685
6686 destroy_workqueue(addrconf_wq);
6687 }