]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/ip6_tunnel.c
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / ip6_tunnel.c
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 *
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11 *
12 * RFC 2473
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
47
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78
79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85
86 static unsigned int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 /* the IPv6 tunnel fallback device */
89 struct net_device *fb_tnl_dev;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 struct ip6_tnl __rcu *tnls_wc[1];
93 struct ip6_tnl __rcu **tnls[2];
94 struct ip6_tnl __rcu *collect_md_tun;
95 };
96
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 struct pcpu_sw_netstats tmp, sum = { 0 };
100 int i;
101
102 for_each_possible_cpu(i) {
103 unsigned int start;
104 const struct pcpu_sw_netstats *tstats =
105 per_cpu_ptr(dev->tstats, i);
106
107 do {
108 start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 tmp.rx_packets = tstats->rx_packets;
110 tmp.rx_bytes = tstats->rx_bytes;
111 tmp.tx_packets = tstats->tx_packets;
112 tmp.tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114
115 sum.rx_packets += tmp.rx_packets;
116 sum.rx_bytes += tmp.rx_bytes;
117 sum.tx_packets += tmp.tx_packets;
118 sum.tx_bytes += tmp.tx_bytes;
119 }
120 dev->stats.rx_packets = sum.rx_packets;
121 dev->stats.rx_bytes = sum.rx_bytes;
122 dev->stats.tx_packets = sum.tx_packets;
123 dev->stats.tx_bytes = sum.tx_bytes;
124 return &dev->stats;
125 }
126
127 /**
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
131 *
132 * Return:
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
135 * else %NULL
136 **/
137
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 unsigned int hash = HASH(remote, local);
145 struct ip6_tnl *t;
146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 struct in6_addr any;
148
149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 ipv6_addr_equal(remote, &t->parms.raddr) &&
152 (t->dev->flags & IFF_UP))
153 return t;
154 }
155
156 memset(&any, 0, sizeof(any));
157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
161 (t->dev->flags & IFF_UP))
162 return t;
163 }
164
165 hash = HASH(remote, &any);
166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
169 (t->dev->flags & IFF_UP))
170 return t;
171 }
172
173 t = rcu_dereference(ip6n->collect_md_tun);
174 if (t && t->dev->flags & IFF_UP)
175 return t;
176
177 t = rcu_dereference(ip6n->tnls_wc[0]);
178 if (t && (t->dev->flags & IFF_UP))
179 return t;
180
181 return NULL;
182 }
183
184 /**
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
187 *
188 * Description:
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
191 *
192 * Return: head of IPv6 tunnel list
193 **/
194
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
197 {
198 const struct in6_addr *remote = &p->raddr;
199 const struct in6_addr *local = &p->laddr;
200 unsigned int h = 0;
201 int prio = 0;
202
203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
204 prio = 1;
205 h = HASH(remote, local);
206 }
207 return &ip6n->tnls[prio][h];
208 }
209
210 /**
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
213 **/
214
215 static void
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
217 {
218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
219
220 if (t->parms.collect_md)
221 rcu_assign_pointer(ip6n->collect_md_tun, t);
222 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 rcu_assign_pointer(*tp, t);
224 }
225
226 /**
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
229 **/
230
231 static void
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
233 {
234 struct ip6_tnl __rcu **tp;
235 struct ip6_tnl *iter;
236
237 if (t->parms.collect_md)
238 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
239
240 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 (iter = rtnl_dereference(*tp)) != NULL;
242 tp = &iter->next) {
243 if (t == iter) {
244 rcu_assign_pointer(*tp, t->next);
245 break;
246 }
247 }
248 }
249
250 static void ip6_dev_free(struct net_device *dev)
251 {
252 struct ip6_tnl *t = netdev_priv(dev);
253
254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats);
257 }
258
259 static int ip6_tnl_create2(struct net_device *dev)
260 {
261 struct ip6_tnl *t = netdev_priv(dev);
262 struct net *net = dev_net(dev);
263 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
264 int err;
265
266 t = netdev_priv(dev);
267
268 dev->rtnl_link_ops = &ip6_link_ops;
269 err = register_netdevice(dev);
270 if (err < 0)
271 goto out;
272
273 strcpy(t->parms.name, dev->name);
274
275 dev_hold(dev);
276 ip6_tnl_link(ip6n, t);
277 return 0;
278
279 out:
280 return err;
281 }
282
283 /**
284 * ip6_tnl_create - create a new tunnel
285 * @p: tunnel parameters
286 * @pt: pointer to new tunnel
287 *
288 * Description:
289 * Create tunnel matching given parameters.
290 *
291 * Return:
292 * created tunnel or error pointer
293 **/
294
295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
296 {
297 struct net_device *dev;
298 struct ip6_tnl *t;
299 char name[IFNAMSIZ];
300 int err = -ENOMEM;
301
302 if (p->name[0])
303 strlcpy(name, p->name, IFNAMSIZ);
304 else
305 sprintf(name, "ip6tnl%%d");
306
307 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
308 ip6_tnl_dev_setup);
309 if (!dev)
310 goto failed;
311
312 dev_net_set(dev, net);
313
314 t = netdev_priv(dev);
315 t->parms = *p;
316 t->net = dev_net(dev);
317 err = ip6_tnl_create2(dev);
318 if (err < 0)
319 goto failed_free;
320
321 return t;
322
323 failed_free:
324 free_netdev(dev);
325 failed:
326 return ERR_PTR(err);
327 }
328
329 /**
330 * ip6_tnl_locate - find or create tunnel matching given parameters
331 * @p: tunnel parameters
332 * @create: != 0 if allowed to create new tunnel if no match found
333 *
334 * Description:
335 * ip6_tnl_locate() first tries to locate an existing tunnel
336 * based on @parms. If this is unsuccessful, but @create is set a new
337 * tunnel device is created and registered for use.
338 *
339 * Return:
340 * matching tunnel or error pointer
341 **/
342
343 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
344 struct __ip6_tnl_parm *p, int create)
345 {
346 const struct in6_addr *remote = &p->raddr;
347 const struct in6_addr *local = &p->laddr;
348 struct ip6_tnl __rcu **tp;
349 struct ip6_tnl *t;
350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
351
352 for (tp = ip6_tnl_bucket(ip6n, p);
353 (t = rtnl_dereference(*tp)) != NULL;
354 tp = &t->next) {
355 if (ipv6_addr_equal(local, &t->parms.laddr) &&
356 ipv6_addr_equal(remote, &t->parms.raddr)) {
357 if (create)
358 return ERR_PTR(-EEXIST);
359
360 return t;
361 }
362 }
363 if (!create)
364 return ERR_PTR(-ENODEV);
365 return ip6_tnl_create(net, p);
366 }
367
368 /**
369 * ip6_tnl_dev_uninit - tunnel device uninitializer
370 * @dev: the device to be destroyed
371 *
372 * Description:
373 * ip6_tnl_dev_uninit() removes tunnel from its list
374 **/
375
376 static void
377 ip6_tnl_dev_uninit(struct net_device *dev)
378 {
379 struct ip6_tnl *t = netdev_priv(dev);
380 struct net *net = t->net;
381 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
382
383 if (dev == ip6n->fb_tnl_dev)
384 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
385 else
386 ip6_tnl_unlink(ip6n, t);
387 dst_cache_reset(&t->dst_cache);
388 dev_put(dev);
389 }
390
391 /**
392 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
393 * @skb: received socket buffer
394 *
395 * Return:
396 * 0 if none was found,
397 * else index to encapsulation limit
398 **/
399
400 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
401 {
402 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
403 unsigned int nhoff = raw - skb->data;
404 unsigned int off = nhoff + sizeof(*ipv6h);
405 u8 next, nexthdr = ipv6h->nexthdr;
406
407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 struct ipv6_opt_hdr *hdr;
409 u16 optlen;
410
411 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
412 break;
413
414 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
415 if (nexthdr == NEXTHDR_FRAGMENT) {
416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 if (frag_hdr->frag_off)
418 break;
419 optlen = 8;
420 } else if (nexthdr == NEXTHDR_AUTH) {
421 optlen = (hdr->hdrlen + 2) << 2;
422 } else {
423 optlen = ipv6_optlen(hdr);
424 }
425 /* cache hdr->nexthdr, since pskb_may_pull() might
426 * invalidate hdr
427 */
428 next = hdr->nexthdr;
429 if (nexthdr == NEXTHDR_DEST) {
430 u16 i = 2;
431
432 /* Remember : hdr is no longer valid at this point. */
433 if (!pskb_may_pull(skb, off + optlen))
434 break;
435
436 while (1) {
437 struct ipv6_tlv_tnl_enc_lim *tel;
438
439 /* No more room for encapsulation limit */
440 if (i + sizeof(*tel) > optlen)
441 break;
442
443 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
444 /* return index of option if found and valid */
445 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
446 tel->length == 1)
447 return i + off - nhoff;
448 /* else jump to next option */
449 if (tel->type)
450 i += tel->length + 2;
451 else
452 i++;
453 }
454 }
455 nexthdr = next;
456 off += optlen;
457 }
458 return 0;
459 }
460 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
461
462 /**
463 * ip6_tnl_err - tunnel error handler
464 *
465 * Description:
466 * ip6_tnl_err() should handle errors in the tunnel according
467 * to the specifications in RFC 2473.
468 **/
469
470 static int
471 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
472 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
473 {
474 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
475 struct net *net = dev_net(skb->dev);
476 u8 rel_type = ICMPV6_DEST_UNREACH;
477 u8 rel_code = ICMPV6_ADDR_UNREACH;
478 __u32 rel_info = 0;
479 struct ip6_tnl *t;
480 int err = -ENOENT;
481 int rel_msg = 0;
482 u8 tproto;
483 __u16 len;
484
485 /* If the packet doesn't contain the original IPv6 header we are
486 in trouble since we might need the source address for further
487 processing of the error. */
488
489 rcu_read_lock();
490 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
491 if (!t)
492 goto out;
493
494 tproto = READ_ONCE(t->parms.proto);
495 if (tproto != ipproto && tproto != 0)
496 goto out;
497
498 err = 0;
499
500 switch (*type) {
501 struct ipv6_tlv_tnl_enc_lim *tel;
502 __u32 mtu, teli;
503 case ICMPV6_DEST_UNREACH:
504 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
505 t->parms.name);
506 rel_msg = 1;
507 break;
508 case ICMPV6_TIME_EXCEED:
509 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
510 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
511 t->parms.name);
512 rel_msg = 1;
513 }
514 break;
515 case ICMPV6_PARAMPROB:
516 teli = 0;
517 if ((*code) == ICMPV6_HDR_FIELD)
518 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
519
520 if (teli && teli == *info - 2) {
521 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
522 if (tel->encap_limit == 0) {
523 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
524 t->parms.name);
525 rel_msg = 1;
526 }
527 } else {
528 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
529 t->parms.name);
530 }
531 break;
532 case ICMPV6_PKT_TOOBIG:
533 ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
534 sock_net_uid(net, NULL));
535 mtu = *info - offset;
536 if (mtu < IPV6_MIN_MTU)
537 mtu = IPV6_MIN_MTU;
538 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
539 if (len > mtu) {
540 rel_type = ICMPV6_PKT_TOOBIG;
541 rel_code = 0;
542 rel_info = mtu;
543 rel_msg = 1;
544 }
545 break;
546 case NDISC_REDIRECT:
547 ip6_redirect(skb, net, skb->dev->ifindex, 0,
548 sock_net_uid(net, NULL));
549 break;
550 }
551
552 *type = rel_type;
553 *code = rel_code;
554 *info = rel_info;
555 *msg = rel_msg;
556
557 out:
558 rcu_read_unlock();
559 return err;
560 }
561
562 static int
563 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
564 u8 type, u8 code, int offset, __be32 info)
565 {
566 __u32 rel_info = ntohl(info);
567 const struct iphdr *eiph;
568 struct sk_buff *skb2;
569 int err, rel_msg = 0;
570 u8 rel_type = type;
571 u8 rel_code = code;
572 struct rtable *rt;
573 struct flowi4 fl4;
574
575 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
576 &rel_msg, &rel_info, offset);
577 if (err < 0)
578 return err;
579
580 if (rel_msg == 0)
581 return 0;
582
583 switch (rel_type) {
584 case ICMPV6_DEST_UNREACH:
585 if (rel_code != ICMPV6_ADDR_UNREACH)
586 return 0;
587 rel_type = ICMP_DEST_UNREACH;
588 rel_code = ICMP_HOST_UNREACH;
589 break;
590 case ICMPV6_PKT_TOOBIG:
591 if (rel_code != 0)
592 return 0;
593 rel_type = ICMP_DEST_UNREACH;
594 rel_code = ICMP_FRAG_NEEDED;
595 break;
596 default:
597 return 0;
598 }
599
600 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
601 return 0;
602
603 skb2 = skb_clone(skb, GFP_ATOMIC);
604 if (!skb2)
605 return 0;
606
607 skb_dst_drop(skb2);
608
609 skb_pull(skb2, offset);
610 skb_reset_network_header(skb2);
611 eiph = ip_hdr(skb2);
612
613 /* Try to guess incoming interface */
614 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
615 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
616 if (IS_ERR(rt))
617 goto out;
618
619 skb2->dev = rt->dst.dev;
620 ip_rt_put(rt);
621
622 /* route "incoming" packet */
623 if (rt->rt_flags & RTCF_LOCAL) {
624 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
625 eiph->daddr, eiph->saddr, 0, 0,
626 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
627 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
628 if (!IS_ERR(rt))
629 ip_rt_put(rt);
630 goto out;
631 }
632 skb_dst_set(skb2, &rt->dst);
633 } else {
634 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
635 skb2->dev) ||
636 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
637 goto out;
638 }
639
640 /* change mtu on this route */
641 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
642 if (rel_info > dst_mtu(skb_dst(skb2)))
643 goto out;
644
645 skb_dst_update_pmtu(skb2, rel_info);
646 }
647
648 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
649
650 out:
651 kfree_skb(skb2);
652 return 0;
653 }
654
655 static int
656 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
657 u8 type, u8 code, int offset, __be32 info)
658 {
659 __u32 rel_info = ntohl(info);
660 int err, rel_msg = 0;
661 u8 rel_type = type;
662 u8 rel_code = code;
663
664 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
665 &rel_msg, &rel_info, offset);
666 if (err < 0)
667 return err;
668
669 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
670 struct rt6_info *rt;
671 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
672
673 if (!skb2)
674 return 0;
675
676 skb_dst_drop(skb2);
677 skb_pull(skb2, offset);
678 skb_reset_network_header(skb2);
679
680 /* Try to guess incoming interface */
681 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
682 NULL, 0, 0);
683
684 if (rt && rt->dst.dev)
685 skb2->dev = rt->dst.dev;
686
687 icmpv6_send(skb2, rel_type, rel_code, rel_info);
688
689 ip6_rt_put(rt);
690
691 kfree_skb(skb2);
692 }
693
694 return 0;
695 }
696
697 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
698 const struct ipv6hdr *ipv6h,
699 struct sk_buff *skb)
700 {
701 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
702
703 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
704 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
705
706 return IP6_ECN_decapsulate(ipv6h, skb);
707 }
708
709 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
710 const struct ipv6hdr *ipv6h,
711 struct sk_buff *skb)
712 {
713 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
714 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
715
716 return IP6_ECN_decapsulate(ipv6h, skb);
717 }
718
719 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
720 const struct in6_addr *laddr,
721 const struct in6_addr *raddr)
722 {
723 struct __ip6_tnl_parm *p = &t->parms;
724 int ltype = ipv6_addr_type(laddr);
725 int rtype = ipv6_addr_type(raddr);
726 __u32 flags = 0;
727
728 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
729 flags = IP6_TNL_F_CAP_PER_PACKET;
730 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
731 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
732 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
733 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
734 if (ltype&IPV6_ADDR_UNICAST)
735 flags |= IP6_TNL_F_CAP_XMIT;
736 if (rtype&IPV6_ADDR_UNICAST)
737 flags |= IP6_TNL_F_CAP_RCV;
738 }
739 return flags;
740 }
741 EXPORT_SYMBOL(ip6_tnl_get_cap);
742
743 /* called with rcu_read_lock() */
744 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
745 const struct in6_addr *laddr,
746 const struct in6_addr *raddr)
747 {
748 struct __ip6_tnl_parm *p = &t->parms;
749 int ret = 0;
750 struct net *net = t->net;
751
752 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
753 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
754 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
755 struct net_device *ldev = NULL;
756
757 if (p->link)
758 ldev = dev_get_by_index_rcu(net, p->link);
759
760 if ((ipv6_addr_is_multicast(laddr) ||
761 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
762 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
763 likely(!ipv6_chk_addr(net, raddr, NULL, 0))))
764 ret = 1;
765 }
766 return ret;
767 }
768 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
769
770 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
771 const struct tnl_ptk_info *tpi,
772 struct metadata_dst *tun_dst,
773 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
774 const struct ipv6hdr *ipv6h,
775 struct sk_buff *skb),
776 bool log_ecn_err)
777 {
778 struct pcpu_sw_netstats *tstats;
779 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
780 int err;
781
782 if ((!(tpi->flags & TUNNEL_CSUM) &&
783 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
784 ((tpi->flags & TUNNEL_CSUM) &&
785 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
786 tunnel->dev->stats.rx_crc_errors++;
787 tunnel->dev->stats.rx_errors++;
788 goto drop;
789 }
790
791 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
792 if (!(tpi->flags & TUNNEL_SEQ) ||
793 (tunnel->i_seqno &&
794 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
795 tunnel->dev->stats.rx_fifo_errors++;
796 tunnel->dev->stats.rx_errors++;
797 goto drop;
798 }
799 tunnel->i_seqno = ntohl(tpi->seq) + 1;
800 }
801
802 skb->protocol = tpi->proto;
803
804 /* Warning: All skb pointers will be invalidated! */
805 if (tunnel->dev->type == ARPHRD_ETHER) {
806 if (!pskb_may_pull(skb, ETH_HLEN)) {
807 tunnel->dev->stats.rx_length_errors++;
808 tunnel->dev->stats.rx_errors++;
809 goto drop;
810 }
811
812 ipv6h = ipv6_hdr(skb);
813 skb->protocol = eth_type_trans(skb, tunnel->dev);
814 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
815 } else {
816 skb->dev = tunnel->dev;
817 }
818
819 skb_reset_network_header(skb);
820 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
821
822 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
823
824 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
825 if (unlikely(err)) {
826 if (log_ecn_err)
827 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
828 &ipv6h->saddr,
829 ipv6_get_dsfield(ipv6h));
830 if (err > 1) {
831 ++tunnel->dev->stats.rx_frame_errors;
832 ++tunnel->dev->stats.rx_errors;
833 goto drop;
834 }
835 }
836
837 tstats = this_cpu_ptr(tunnel->dev->tstats);
838 u64_stats_update_begin(&tstats->syncp);
839 tstats->rx_packets++;
840 tstats->rx_bytes += skb->len;
841 u64_stats_update_end(&tstats->syncp);
842
843 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
844
845 if (tun_dst)
846 skb_dst_set(skb, (struct dst_entry *)tun_dst);
847
848 gro_cells_receive(&tunnel->gro_cells, skb);
849 return 0;
850
851 drop:
852 if (tun_dst)
853 dst_release((struct dst_entry *)tun_dst);
854 kfree_skb(skb);
855 return 0;
856 }
857
858 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
859 const struct tnl_ptk_info *tpi,
860 struct metadata_dst *tun_dst,
861 bool log_ecn_err)
862 {
863 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
864 log_ecn_err);
865 }
866 EXPORT_SYMBOL(ip6_tnl_rcv);
867
868 static const struct tnl_ptk_info tpi_v6 = {
869 /* no tunnel info required for ipxip6. */
870 .proto = htons(ETH_P_IPV6),
871 };
872
873 static const struct tnl_ptk_info tpi_v4 = {
874 /* no tunnel info required for ipxip6. */
875 .proto = htons(ETH_P_IP),
876 };
877
878 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
879 const struct tnl_ptk_info *tpi,
880 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
881 const struct ipv6hdr *ipv6h,
882 struct sk_buff *skb))
883 {
884 struct ip6_tnl *t;
885 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
886 struct metadata_dst *tun_dst = NULL;
887 int ret = -1;
888
889 rcu_read_lock();
890 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
891
892 if (t) {
893 u8 tproto = READ_ONCE(t->parms.proto);
894
895 if (tproto != ipproto && tproto != 0)
896 goto drop;
897 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
898 goto drop;
899 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
900 goto drop;
901 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
902 goto drop;
903 if (t->parms.collect_md) {
904 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
905 if (!tun_dst)
906 goto drop;
907 }
908 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
909 log_ecn_error);
910 }
911
912 rcu_read_unlock();
913
914 return ret;
915
916 drop:
917 rcu_read_unlock();
918 kfree_skb(skb);
919 return 0;
920 }
921
922 static int ip4ip6_rcv(struct sk_buff *skb)
923 {
924 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
925 ip4ip6_dscp_ecn_decapsulate);
926 }
927
928 static int ip6ip6_rcv(struct sk_buff *skb)
929 {
930 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
931 ip6ip6_dscp_ecn_decapsulate);
932 }
933
934 struct ipv6_tel_txoption {
935 struct ipv6_txoptions ops;
936 __u8 dst_opt[8];
937 };
938
939 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
940 {
941 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
942
943 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
944 opt->dst_opt[3] = 1;
945 opt->dst_opt[4] = encap_limit;
946 opt->dst_opt[5] = IPV6_TLV_PADN;
947 opt->dst_opt[6] = 1;
948
949 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
950 opt->ops.opt_nflen = 8;
951 }
952
953 /**
954 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
955 * @t: the outgoing tunnel device
956 * @hdr: IPv6 header from the incoming packet
957 *
958 * Description:
959 * Avoid trivial tunneling loop by checking that tunnel exit-point
960 * doesn't match source of incoming packet.
961 *
962 * Return:
963 * 1 if conflict,
964 * 0 else
965 **/
966
967 static inline bool
968 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
969 {
970 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
971 }
972
973 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
974 const struct in6_addr *laddr,
975 const struct in6_addr *raddr)
976 {
977 struct __ip6_tnl_parm *p = &t->parms;
978 int ret = 0;
979 struct net *net = t->net;
980
981 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
982 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
983 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
984 struct net_device *ldev = NULL;
985
986 rcu_read_lock();
987 if (p->link)
988 ldev = dev_get_by_index_rcu(net, p->link);
989
990 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
991 pr_warn("%s xmit: Local address not yet configured!\n",
992 p->name);
993 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
994 !ipv6_addr_is_multicast(raddr) &&
995 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
996 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
997 p->name);
998 else
999 ret = 1;
1000 rcu_read_unlock();
1001 }
1002 return ret;
1003 }
1004 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1005
1006 /**
1007 * ip6_tnl_xmit - encapsulate packet and send
1008 * @skb: the outgoing socket buffer
1009 * @dev: the outgoing tunnel device
1010 * @dsfield: dscp code for outer header
1011 * @fl6: flow of tunneled packet
1012 * @encap_limit: encapsulation limit
1013 * @pmtu: Path MTU is stored if packet is too big
1014 * @proto: next header value
1015 *
1016 * Description:
1017 * Build new header and do some sanity checks on the packet before sending
1018 * it.
1019 *
1020 * Return:
1021 * 0 on success
1022 * -1 fail
1023 * %-EMSGSIZE message too big. return mtu in this case.
1024 **/
1025
1026 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1027 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1028 __u8 proto)
1029 {
1030 struct ip6_tnl *t = netdev_priv(dev);
1031 struct net *net = t->net;
1032 struct net_device_stats *stats = &t->dev->stats;
1033 struct ipv6hdr *ipv6h;
1034 struct ipv6_tel_txoption opt;
1035 struct dst_entry *dst = NULL, *ndst = NULL;
1036 struct net_device *tdev;
1037 int mtu;
1038 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1039 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1040 unsigned int max_headroom = psh_hlen;
1041 bool use_cache = false;
1042 u8 hop_limit;
1043 int err = -1;
1044
1045 if (t->parms.collect_md) {
1046 hop_limit = skb_tunnel_info(skb)->key.ttl;
1047 goto route_lookup;
1048 } else {
1049 hop_limit = t->parms.hop_limit;
1050 }
1051
1052 /* NBMA tunnel */
1053 if (ipv6_addr_any(&t->parms.raddr)) {
1054 if (skb->protocol == htons(ETH_P_IPV6)) {
1055 struct in6_addr *addr6;
1056 struct neighbour *neigh;
1057 int addr_type;
1058
1059 if (!skb_dst(skb))
1060 goto tx_err_link_failure;
1061
1062 neigh = dst_neigh_lookup(skb_dst(skb),
1063 &ipv6_hdr(skb)->daddr);
1064 if (!neigh)
1065 goto tx_err_link_failure;
1066
1067 addr6 = (struct in6_addr *)&neigh->primary_key;
1068 addr_type = ipv6_addr_type(addr6);
1069
1070 if (addr_type == IPV6_ADDR_ANY)
1071 addr6 = &ipv6_hdr(skb)->daddr;
1072
1073 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1074 neigh_release(neigh);
1075 }
1076 } else if (t->parms.proto != 0 && !(t->parms.flags &
1077 (IP6_TNL_F_USE_ORIG_TCLASS |
1078 IP6_TNL_F_USE_ORIG_FWMARK))) {
1079 /* enable the cache only if neither the outer protocol nor the
1080 * routing decision depends on the current inner header value
1081 */
1082 use_cache = true;
1083 }
1084
1085 if (use_cache)
1086 dst = dst_cache_get(&t->dst_cache);
1087
1088 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1089 goto tx_err_link_failure;
1090
1091 if (!dst) {
1092 route_lookup:
1093 /* add dsfield to flowlabel for route lookup */
1094 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1095
1096 dst = ip6_route_output(net, NULL, fl6);
1097
1098 if (dst->error)
1099 goto tx_err_link_failure;
1100 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1101 if (IS_ERR(dst)) {
1102 err = PTR_ERR(dst);
1103 dst = NULL;
1104 goto tx_err_link_failure;
1105 }
1106 if (t->parms.collect_md &&
1107 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1108 &fl6->daddr, 0, &fl6->saddr))
1109 goto tx_err_link_failure;
1110 ndst = dst;
1111 }
1112
1113 tdev = dst->dev;
1114
1115 if (tdev == dev) {
1116 stats->collisions++;
1117 net_warn_ratelimited("%s: Local routing loop detected!\n",
1118 t->parms.name);
1119 goto tx_err_dst_release;
1120 }
1121 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1122 if (encap_limit >= 0) {
1123 max_headroom += 8;
1124 mtu -= 8;
1125 }
1126 if (skb->protocol == htons(ETH_P_IPV6)) {
1127 if (mtu < IPV6_MIN_MTU)
1128 mtu = IPV6_MIN_MTU;
1129 } else if (mtu < 576) {
1130 mtu = 576;
1131 }
1132
1133 skb_dst_update_pmtu(skb, mtu);
1134 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1135 *pmtu = mtu;
1136 err = -EMSGSIZE;
1137 goto tx_err_dst_release;
1138 }
1139
1140 if (t->err_count > 0) {
1141 if (time_before(jiffies,
1142 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1143 t->err_count--;
1144
1145 dst_link_failure(skb);
1146 } else {
1147 t->err_count = 0;
1148 }
1149 }
1150
1151 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1152
1153 /*
1154 * Okay, now see if we can stuff it in the buffer as-is.
1155 */
1156 max_headroom += LL_RESERVED_SPACE(tdev);
1157
1158 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1159 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1160 struct sk_buff *new_skb;
1161
1162 new_skb = skb_realloc_headroom(skb, max_headroom);
1163 if (!new_skb)
1164 goto tx_err_dst_release;
1165
1166 if (skb->sk)
1167 skb_set_owner_w(new_skb, skb->sk);
1168 consume_skb(skb);
1169 skb = new_skb;
1170 }
1171
1172 if (t->parms.collect_md) {
1173 if (t->encap.type != TUNNEL_ENCAP_NONE)
1174 goto tx_err_dst_release;
1175 } else {
1176 if (use_cache && ndst)
1177 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1178 }
1179 skb_dst_set(skb, dst);
1180
1181 if (encap_limit >= 0) {
1182 init_tel_txopt(&opt, encap_limit);
1183 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1184 }
1185 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
1186
1187 /* Calculate max headroom for all the headers and adjust
1188 * needed_headroom if necessary.
1189 */
1190 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1191 + dst->header_len + t->hlen;
1192 if (max_headroom > dev->needed_headroom)
1193 dev->needed_headroom = max_headroom;
1194
1195 err = ip6_tnl_encap(skb, t, &proto, fl6);
1196 if (err)
1197 return err;
1198
1199 skb_push(skb, sizeof(struct ipv6hdr));
1200 skb_reset_network_header(skb);
1201 ipv6h = ipv6_hdr(skb);
1202 ip6_flow_hdr(ipv6h, dsfield,
1203 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1204 ipv6h->hop_limit = hop_limit;
1205 ipv6h->nexthdr = proto;
1206 ipv6h->saddr = fl6->saddr;
1207 ipv6h->daddr = fl6->daddr;
1208 ip6tunnel_xmit(NULL, skb, dev);
1209 return 0;
1210 tx_err_link_failure:
1211 stats->tx_carrier_errors++;
1212 dst_link_failure(skb);
1213 tx_err_dst_release:
1214 dst_release(dst);
1215 return err;
1216 }
1217 EXPORT_SYMBOL(ip6_tnl_xmit);
1218
1219 static inline int
1220 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1221 {
1222 struct ip6_tnl *t = netdev_priv(dev);
1223 const struct iphdr *iph = ip_hdr(skb);
1224 int encap_limit = -1;
1225 struct flowi6 fl6;
1226 __u8 dsfield;
1227 __u32 mtu;
1228 u8 tproto;
1229 int err;
1230
1231 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1232
1233 tproto = READ_ONCE(t->parms.proto);
1234 if (tproto != IPPROTO_IPIP && tproto != 0)
1235 return -1;
1236
1237 if (t->parms.collect_md) {
1238 struct ip_tunnel_info *tun_info;
1239 const struct ip_tunnel_key *key;
1240
1241 tun_info = skb_tunnel_info(skb);
1242 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1243 ip_tunnel_info_af(tun_info) != AF_INET6))
1244 return -1;
1245 key = &tun_info->key;
1246 memset(&fl6, 0, sizeof(fl6));
1247 fl6.flowi6_proto = IPPROTO_IPIP;
1248 fl6.daddr = key->u.ipv6.dst;
1249 fl6.flowlabel = key->label;
1250 dsfield = key->tos;
1251 } else {
1252 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1253 encap_limit = t->parms.encap_limit;
1254
1255 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1256 fl6.flowi6_proto = IPPROTO_IPIP;
1257
1258 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1259 dsfield = ipv4_get_dsfield(iph);
1260 else
1261 dsfield = ip6_tclass(t->parms.flowinfo);
1262 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1263 fl6.flowi6_mark = skb->mark;
1264 else
1265 fl6.flowi6_mark = t->parms.fwmark;
1266 }
1267
1268 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1269
1270 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1271 return -1;
1272
1273 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1274
1275 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1276
1277 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1278 IPPROTO_IPIP);
1279 if (err != 0) {
1280 /* XXX: send ICMP error even if DF is not set. */
1281 if (err == -EMSGSIZE)
1282 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1283 htonl(mtu));
1284 return -1;
1285 }
1286
1287 return 0;
1288 }
1289
1290 static inline int
1291 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1292 {
1293 struct ip6_tnl *t = netdev_priv(dev);
1294 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1295 int encap_limit = -1;
1296 __u16 offset;
1297 struct flowi6 fl6;
1298 __u8 dsfield;
1299 __u32 mtu;
1300 u8 tproto;
1301 int err;
1302
1303 tproto = READ_ONCE(t->parms.proto);
1304 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1305 ip6_tnl_addr_conflict(t, ipv6h))
1306 return -1;
1307
1308 if (t->parms.collect_md) {
1309 struct ip_tunnel_info *tun_info;
1310 const struct ip_tunnel_key *key;
1311
1312 tun_info = skb_tunnel_info(skb);
1313 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1314 ip_tunnel_info_af(tun_info) != AF_INET6))
1315 return -1;
1316 key = &tun_info->key;
1317 memset(&fl6, 0, sizeof(fl6));
1318 fl6.flowi6_proto = IPPROTO_IPV6;
1319 fl6.daddr = key->u.ipv6.dst;
1320 fl6.flowlabel = key->label;
1321 dsfield = key->tos;
1322 } else {
1323 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1324 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1325 ipv6h = ipv6_hdr(skb);
1326 if (offset > 0) {
1327 struct ipv6_tlv_tnl_enc_lim *tel;
1328
1329 tel = (void *)&skb_network_header(skb)[offset];
1330 if (tel->encap_limit == 0) {
1331 icmpv6_send(skb, ICMPV6_PARAMPROB,
1332 ICMPV6_HDR_FIELD, offset + 2);
1333 return -1;
1334 }
1335 encap_limit = tel->encap_limit - 1;
1336 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1337 encap_limit = t->parms.encap_limit;
1338 }
1339
1340 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1341 fl6.flowi6_proto = IPPROTO_IPV6;
1342
1343 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1344 dsfield = ipv6_get_dsfield(ipv6h);
1345 else
1346 dsfield = ip6_tclass(t->parms.flowinfo);
1347 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1348 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1349 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1350 fl6.flowi6_mark = skb->mark;
1351 else
1352 fl6.flowi6_mark = t->parms.fwmark;
1353 }
1354
1355 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1356
1357 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1358 return -1;
1359
1360 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1361
1362 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1363
1364 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1365 IPPROTO_IPV6);
1366 if (err != 0) {
1367 if (err == -EMSGSIZE)
1368 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1369 return -1;
1370 }
1371
1372 return 0;
1373 }
1374
1375 static netdev_tx_t
1376 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1377 {
1378 struct ip6_tnl *t = netdev_priv(dev);
1379 struct net_device_stats *stats = &t->dev->stats;
1380 int ret;
1381
1382 switch (skb->protocol) {
1383 case htons(ETH_P_IP):
1384 ret = ip4ip6_tnl_xmit(skb, dev);
1385 break;
1386 case htons(ETH_P_IPV6):
1387 ret = ip6ip6_tnl_xmit(skb, dev);
1388 break;
1389 default:
1390 goto tx_err;
1391 }
1392
1393 if (ret < 0)
1394 goto tx_err;
1395
1396 return NETDEV_TX_OK;
1397
1398 tx_err:
1399 stats->tx_errors++;
1400 stats->tx_dropped++;
1401 kfree_skb(skb);
1402 return NETDEV_TX_OK;
1403 }
1404
1405 static void ip6_tnl_link_config(struct ip6_tnl *t)
1406 {
1407 struct net_device *dev = t->dev;
1408 struct __ip6_tnl_parm *p = &t->parms;
1409 struct flowi6 *fl6 = &t->fl.u.ip6;
1410 int t_hlen;
1411
1412 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1413 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1414
1415 /* Set up flowi template */
1416 fl6->saddr = p->laddr;
1417 fl6->daddr = p->raddr;
1418 fl6->flowi6_oif = p->link;
1419 fl6->flowlabel = 0;
1420
1421 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1422 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1423 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1424 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1425
1426 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1427 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1428
1429 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1430 dev->flags |= IFF_POINTOPOINT;
1431 else
1432 dev->flags &= ~IFF_POINTOPOINT;
1433
1434 t->tun_hlen = 0;
1435 t->hlen = t->encap_hlen + t->tun_hlen;
1436 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1437
1438 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1439 int strict = (ipv6_addr_type(&p->raddr) &
1440 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1441
1442 struct rt6_info *rt = rt6_lookup(t->net,
1443 &p->raddr, &p->laddr,
1444 p->link, strict);
1445
1446 if (!rt)
1447 return;
1448
1449 if (rt->dst.dev) {
1450 dev->hard_header_len = rt->dst.dev->hard_header_len +
1451 t_hlen;
1452
1453 dev->mtu = rt->dst.dev->mtu - t_hlen;
1454 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1455 dev->mtu -= 8;
1456
1457 if (dev->mtu < IPV6_MIN_MTU)
1458 dev->mtu = IPV6_MIN_MTU;
1459 }
1460 ip6_rt_put(rt);
1461 }
1462 }
1463
1464 /**
1465 * ip6_tnl_change - update the tunnel parameters
1466 * @t: tunnel to be changed
1467 * @p: tunnel configuration parameters
1468 *
1469 * Description:
1470 * ip6_tnl_change() updates the tunnel parameters
1471 **/
1472
1473 static int
1474 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1475 {
1476 t->parms.laddr = p->laddr;
1477 t->parms.raddr = p->raddr;
1478 t->parms.flags = p->flags;
1479 t->parms.hop_limit = p->hop_limit;
1480 t->parms.encap_limit = p->encap_limit;
1481 t->parms.flowinfo = p->flowinfo;
1482 t->parms.link = p->link;
1483 t->parms.proto = p->proto;
1484 t->parms.fwmark = p->fwmark;
1485 dst_cache_reset(&t->dst_cache);
1486 ip6_tnl_link_config(t);
1487 return 0;
1488 }
1489
1490 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1491 {
1492 struct net *net = t->net;
1493 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1494 int err;
1495
1496 ip6_tnl_unlink(ip6n, t);
1497 synchronize_net();
1498 err = ip6_tnl_change(t, p);
1499 ip6_tnl_link(ip6n, t);
1500 netdev_state_change(t->dev);
1501 return err;
1502 }
1503
1504 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1505 {
1506 /* for default tnl0 device allow to change only the proto */
1507 t->parms.proto = p->proto;
1508 netdev_state_change(t->dev);
1509 return 0;
1510 }
1511
1512 static void
1513 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1514 {
1515 p->laddr = u->laddr;
1516 p->raddr = u->raddr;
1517 p->flags = u->flags;
1518 p->hop_limit = u->hop_limit;
1519 p->encap_limit = u->encap_limit;
1520 p->flowinfo = u->flowinfo;
1521 p->link = u->link;
1522 p->proto = u->proto;
1523 memcpy(p->name, u->name, sizeof(u->name));
1524 }
1525
1526 static void
1527 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1528 {
1529 u->laddr = p->laddr;
1530 u->raddr = p->raddr;
1531 u->flags = p->flags;
1532 u->hop_limit = p->hop_limit;
1533 u->encap_limit = p->encap_limit;
1534 u->flowinfo = p->flowinfo;
1535 u->link = p->link;
1536 u->proto = p->proto;
1537 memcpy(u->name, p->name, sizeof(u->name));
1538 }
1539
1540 /**
1541 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1542 * @dev: virtual device associated with tunnel
1543 * @ifr: parameters passed from userspace
1544 * @cmd: command to be performed
1545 *
1546 * Description:
1547 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1548 * from userspace.
1549 *
1550 * The possible commands are the following:
1551 * %SIOCGETTUNNEL: get tunnel parameters for device
1552 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1553 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1554 * %SIOCDELTUNNEL: delete tunnel
1555 *
1556 * The fallback device "ip6tnl0", created during module
1557 * initialization, can be used for creating other tunnel devices.
1558 *
1559 * Return:
1560 * 0 on success,
1561 * %-EFAULT if unable to copy data to or from userspace,
1562 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1563 * %-EINVAL if passed tunnel parameters are invalid,
1564 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1565 * %-ENODEV if attempting to change or delete a nonexisting device
1566 **/
1567
1568 static int
1569 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1570 {
1571 int err = 0;
1572 struct ip6_tnl_parm p;
1573 struct __ip6_tnl_parm p1;
1574 struct ip6_tnl *t = netdev_priv(dev);
1575 struct net *net = t->net;
1576 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1577
1578 memset(&p1, 0, sizeof(p1));
1579
1580 switch (cmd) {
1581 case SIOCGETTUNNEL:
1582 if (dev == ip6n->fb_tnl_dev) {
1583 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1584 err = -EFAULT;
1585 break;
1586 }
1587 ip6_tnl_parm_from_user(&p1, &p);
1588 t = ip6_tnl_locate(net, &p1, 0);
1589 if (IS_ERR(t))
1590 t = netdev_priv(dev);
1591 } else {
1592 memset(&p, 0, sizeof(p));
1593 }
1594 ip6_tnl_parm_to_user(&p, &t->parms);
1595 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1596 err = -EFAULT;
1597 }
1598 break;
1599 case SIOCADDTUNNEL:
1600 case SIOCCHGTUNNEL:
1601 err = -EPERM;
1602 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1603 break;
1604 err = -EFAULT;
1605 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1606 break;
1607 err = -EINVAL;
1608 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1609 p.proto != 0)
1610 break;
1611 ip6_tnl_parm_from_user(&p1, &p);
1612 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1613 if (cmd == SIOCCHGTUNNEL) {
1614 if (!IS_ERR(t)) {
1615 if (t->dev != dev) {
1616 err = -EEXIST;
1617 break;
1618 }
1619 } else
1620 t = netdev_priv(dev);
1621 if (dev == ip6n->fb_tnl_dev)
1622 err = ip6_tnl0_update(t, &p1);
1623 else
1624 err = ip6_tnl_update(t, &p1);
1625 }
1626 if (!IS_ERR(t)) {
1627 err = 0;
1628 ip6_tnl_parm_to_user(&p, &t->parms);
1629 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1630 err = -EFAULT;
1631
1632 } else {
1633 err = PTR_ERR(t);
1634 }
1635 break;
1636 case SIOCDELTUNNEL:
1637 err = -EPERM;
1638 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1639 break;
1640
1641 if (dev == ip6n->fb_tnl_dev) {
1642 err = -EFAULT;
1643 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1644 break;
1645 err = -ENOENT;
1646 ip6_tnl_parm_from_user(&p1, &p);
1647 t = ip6_tnl_locate(net, &p1, 0);
1648 if (IS_ERR(t))
1649 break;
1650 err = -EPERM;
1651 if (t->dev == ip6n->fb_tnl_dev)
1652 break;
1653 dev = t->dev;
1654 }
1655 err = 0;
1656 unregister_netdevice(dev);
1657 break;
1658 default:
1659 err = -EINVAL;
1660 }
1661 return err;
1662 }
1663
1664 /**
1665 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1666 * @dev: virtual device associated with tunnel
1667 * @new_mtu: the new mtu
1668 *
1669 * Return:
1670 * 0 on success,
1671 * %-EINVAL if mtu too small
1672 **/
1673
1674 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1675 {
1676 struct ip6_tnl *tnl = netdev_priv(dev);
1677
1678 if (tnl->parms.proto == IPPROTO_IPV6) {
1679 if (new_mtu < IPV6_MIN_MTU)
1680 return -EINVAL;
1681 } else {
1682 if (new_mtu < ETH_MIN_MTU)
1683 return -EINVAL;
1684 }
1685 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1686 return -EINVAL;
1687 dev->mtu = new_mtu;
1688 return 0;
1689 }
1690 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1691
1692 int ip6_tnl_get_iflink(const struct net_device *dev)
1693 {
1694 struct ip6_tnl *t = netdev_priv(dev);
1695
1696 return t->parms.link;
1697 }
1698 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1699
1700 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1701 unsigned int num)
1702 {
1703 if (num >= MAX_IPTUN_ENCAP_OPS)
1704 return -ERANGE;
1705
1706 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1707 &ip6tun_encaps[num],
1708 NULL, ops) ? 0 : -1;
1709 }
1710 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1711
1712 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1713 unsigned int num)
1714 {
1715 int ret;
1716
1717 if (num >= MAX_IPTUN_ENCAP_OPS)
1718 return -ERANGE;
1719
1720 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1721 &ip6tun_encaps[num],
1722 ops, NULL) == ops) ? 0 : -1;
1723
1724 synchronize_net();
1725
1726 return ret;
1727 }
1728 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1729
1730 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1731 struct ip_tunnel_encap *ipencap)
1732 {
1733 int hlen;
1734
1735 memset(&t->encap, 0, sizeof(t->encap));
1736
1737 hlen = ip6_encap_hlen(ipencap);
1738 if (hlen < 0)
1739 return hlen;
1740
1741 t->encap.type = ipencap->type;
1742 t->encap.sport = ipencap->sport;
1743 t->encap.dport = ipencap->dport;
1744 t->encap.flags = ipencap->flags;
1745
1746 t->encap_hlen = hlen;
1747 t->hlen = t->encap_hlen + t->tun_hlen;
1748
1749 return 0;
1750 }
1751 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1752
1753 static const struct net_device_ops ip6_tnl_netdev_ops = {
1754 .ndo_init = ip6_tnl_dev_init,
1755 .ndo_uninit = ip6_tnl_dev_uninit,
1756 .ndo_start_xmit = ip6_tnl_start_xmit,
1757 .ndo_do_ioctl = ip6_tnl_ioctl,
1758 .ndo_change_mtu = ip6_tnl_change_mtu,
1759 .ndo_get_stats = ip6_get_stats,
1760 .ndo_get_iflink = ip6_tnl_get_iflink,
1761 };
1762
1763 #define IPXIPX_FEATURES (NETIF_F_SG | \
1764 NETIF_F_FRAGLIST | \
1765 NETIF_F_HIGHDMA | \
1766 NETIF_F_GSO_SOFTWARE | \
1767 NETIF_F_HW_CSUM)
1768
1769 /**
1770 * ip6_tnl_dev_setup - setup virtual tunnel device
1771 * @dev: virtual device associated with tunnel
1772 *
1773 * Description:
1774 * Initialize function pointers and device parameters
1775 **/
1776
1777 static void ip6_tnl_dev_setup(struct net_device *dev)
1778 {
1779 dev->netdev_ops = &ip6_tnl_netdev_ops;
1780 dev->needs_free_netdev = true;
1781 dev->priv_destructor = ip6_dev_free;
1782
1783 dev->type = ARPHRD_TUNNEL6;
1784 dev->flags |= IFF_NOARP;
1785 dev->addr_len = sizeof(struct in6_addr);
1786 dev->features |= NETIF_F_LLTX;
1787 netif_keep_dst(dev);
1788
1789 dev->features |= IPXIPX_FEATURES;
1790 dev->hw_features |= IPXIPX_FEATURES;
1791
1792 /* This perm addr will be used as interface identifier by IPv6 */
1793 dev->addr_assign_type = NET_ADDR_RANDOM;
1794 eth_random_addr(dev->perm_addr);
1795 }
1796
1797
1798 /**
1799 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1800 * @dev: virtual device associated with tunnel
1801 **/
1802
1803 static inline int
1804 ip6_tnl_dev_init_gen(struct net_device *dev)
1805 {
1806 struct ip6_tnl *t = netdev_priv(dev);
1807 int ret;
1808 int t_hlen;
1809
1810 t->dev = dev;
1811 t->net = dev_net(dev);
1812 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1813 if (!dev->tstats)
1814 return -ENOMEM;
1815
1816 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1817 if (ret)
1818 goto free_stats;
1819
1820 ret = gro_cells_init(&t->gro_cells, dev);
1821 if (ret)
1822 goto destroy_dst;
1823
1824 t->tun_hlen = 0;
1825 t->hlen = t->encap_hlen + t->tun_hlen;
1826 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1827
1828 dev->type = ARPHRD_TUNNEL6;
1829 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1830 dev->mtu = ETH_DATA_LEN - t_hlen;
1831 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1832 dev->mtu -= 8;
1833 dev->min_mtu = ETH_MIN_MTU;
1834 dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1835
1836 return 0;
1837
1838 destroy_dst:
1839 dst_cache_destroy(&t->dst_cache);
1840 free_stats:
1841 free_percpu(dev->tstats);
1842 dev->tstats = NULL;
1843
1844 return ret;
1845 }
1846
1847 /**
1848 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1849 * @dev: virtual device associated with tunnel
1850 **/
1851
1852 static int ip6_tnl_dev_init(struct net_device *dev)
1853 {
1854 struct ip6_tnl *t = netdev_priv(dev);
1855 int err = ip6_tnl_dev_init_gen(dev);
1856
1857 if (err)
1858 return err;
1859 ip6_tnl_link_config(t);
1860 if (t->parms.collect_md) {
1861 dev->features |= NETIF_F_NETNS_LOCAL;
1862 netif_keep_dst(dev);
1863 }
1864 return 0;
1865 }
1866
1867 /**
1868 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1869 * @dev: fallback device
1870 *
1871 * Return: 0
1872 **/
1873
1874 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1875 {
1876 struct ip6_tnl *t = netdev_priv(dev);
1877 struct net *net = dev_net(dev);
1878 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1879
1880 t->parms.proto = IPPROTO_IPV6;
1881 dev_hold(dev);
1882
1883 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1884 return 0;
1885 }
1886
1887 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1888 struct netlink_ext_ack *extack)
1889 {
1890 u8 proto;
1891
1892 if (!data || !data[IFLA_IPTUN_PROTO])
1893 return 0;
1894
1895 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1896 if (proto != IPPROTO_IPV6 &&
1897 proto != IPPROTO_IPIP &&
1898 proto != 0)
1899 return -EINVAL;
1900
1901 return 0;
1902 }
1903
1904 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1905 struct __ip6_tnl_parm *parms)
1906 {
1907 memset(parms, 0, sizeof(*parms));
1908
1909 if (!data)
1910 return;
1911
1912 if (data[IFLA_IPTUN_LINK])
1913 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1914
1915 if (data[IFLA_IPTUN_LOCAL])
1916 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1917
1918 if (data[IFLA_IPTUN_REMOTE])
1919 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1920
1921 if (data[IFLA_IPTUN_TTL])
1922 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1923
1924 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1925 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1926
1927 if (data[IFLA_IPTUN_FLOWINFO])
1928 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1929
1930 if (data[IFLA_IPTUN_FLAGS])
1931 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1932
1933 if (data[IFLA_IPTUN_PROTO])
1934 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1935
1936 if (data[IFLA_IPTUN_COLLECT_METADATA])
1937 parms->collect_md = true;
1938
1939 if (data[IFLA_IPTUN_FWMARK])
1940 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1941 }
1942
1943 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1944 struct ip_tunnel_encap *ipencap)
1945 {
1946 bool ret = false;
1947
1948 memset(ipencap, 0, sizeof(*ipencap));
1949
1950 if (!data)
1951 return ret;
1952
1953 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1954 ret = true;
1955 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1956 }
1957
1958 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1959 ret = true;
1960 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1961 }
1962
1963 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1964 ret = true;
1965 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1966 }
1967
1968 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1969 ret = true;
1970 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1971 }
1972
1973 return ret;
1974 }
1975
1976 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1977 struct nlattr *tb[], struct nlattr *data[],
1978 struct netlink_ext_ack *extack)
1979 {
1980 struct net *net = dev_net(dev);
1981 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1982 struct ip6_tnl *nt, *t;
1983 struct ip_tunnel_encap ipencap;
1984
1985 nt = netdev_priv(dev);
1986
1987 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1988 int err = ip6_tnl_encap_setup(nt, &ipencap);
1989
1990 if (err < 0)
1991 return err;
1992 }
1993
1994 ip6_tnl_netlink_parms(data, &nt->parms);
1995
1996 if (nt->parms.collect_md) {
1997 if (rtnl_dereference(ip6n->collect_md_tun))
1998 return -EEXIST;
1999 } else {
2000 t = ip6_tnl_locate(net, &nt->parms, 0);
2001 if (!IS_ERR(t))
2002 return -EEXIST;
2003 }
2004
2005 return ip6_tnl_create2(dev);
2006 }
2007
2008 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2009 struct nlattr *data[],
2010 struct netlink_ext_ack *extack)
2011 {
2012 struct ip6_tnl *t = netdev_priv(dev);
2013 struct __ip6_tnl_parm p;
2014 struct net *net = t->net;
2015 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2016 struct ip_tunnel_encap ipencap;
2017
2018 if (dev == ip6n->fb_tnl_dev)
2019 return -EINVAL;
2020
2021 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2022 int err = ip6_tnl_encap_setup(t, &ipencap);
2023
2024 if (err < 0)
2025 return err;
2026 }
2027 ip6_tnl_netlink_parms(data, &p);
2028 if (p.collect_md)
2029 return -EINVAL;
2030
2031 t = ip6_tnl_locate(net, &p, 0);
2032 if (!IS_ERR(t)) {
2033 if (t->dev != dev)
2034 return -EEXIST;
2035 } else
2036 t = netdev_priv(dev);
2037
2038 return ip6_tnl_update(t, &p);
2039 }
2040
2041 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2042 {
2043 struct net *net = dev_net(dev);
2044 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2045
2046 if (dev != ip6n->fb_tnl_dev)
2047 unregister_netdevice_queue(dev, head);
2048 }
2049
2050 static size_t ip6_tnl_get_size(const struct net_device *dev)
2051 {
2052 return
2053 /* IFLA_IPTUN_LINK */
2054 nla_total_size(4) +
2055 /* IFLA_IPTUN_LOCAL */
2056 nla_total_size(sizeof(struct in6_addr)) +
2057 /* IFLA_IPTUN_REMOTE */
2058 nla_total_size(sizeof(struct in6_addr)) +
2059 /* IFLA_IPTUN_TTL */
2060 nla_total_size(1) +
2061 /* IFLA_IPTUN_ENCAP_LIMIT */
2062 nla_total_size(1) +
2063 /* IFLA_IPTUN_FLOWINFO */
2064 nla_total_size(4) +
2065 /* IFLA_IPTUN_FLAGS */
2066 nla_total_size(4) +
2067 /* IFLA_IPTUN_PROTO */
2068 nla_total_size(1) +
2069 /* IFLA_IPTUN_ENCAP_TYPE */
2070 nla_total_size(2) +
2071 /* IFLA_IPTUN_ENCAP_FLAGS */
2072 nla_total_size(2) +
2073 /* IFLA_IPTUN_ENCAP_SPORT */
2074 nla_total_size(2) +
2075 /* IFLA_IPTUN_ENCAP_DPORT */
2076 nla_total_size(2) +
2077 /* IFLA_IPTUN_COLLECT_METADATA */
2078 nla_total_size(0) +
2079 /* IFLA_IPTUN_FWMARK */
2080 nla_total_size(4) +
2081 0;
2082 }
2083
2084 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2085 {
2086 struct ip6_tnl *tunnel = netdev_priv(dev);
2087 struct __ip6_tnl_parm *parm = &tunnel->parms;
2088
2089 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2090 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2091 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2092 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2093 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2094 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2095 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2096 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2097 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2098 goto nla_put_failure;
2099
2100 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2101 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2102 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2103 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2104 goto nla_put_failure;
2105
2106 if (parm->collect_md)
2107 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2108 goto nla_put_failure;
2109
2110 return 0;
2111
2112 nla_put_failure:
2113 return -EMSGSIZE;
2114 }
2115
2116 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2117 {
2118 struct ip6_tnl *tunnel = netdev_priv(dev);
2119
2120 return tunnel->net;
2121 }
2122 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2123
2124 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2125 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2126 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2127 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2128 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2129 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2130 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2131 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2132 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2133 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2134 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2135 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2136 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2137 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2138 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
2139 };
2140
2141 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2142 .kind = "ip6tnl",
2143 .maxtype = IFLA_IPTUN_MAX,
2144 .policy = ip6_tnl_policy,
2145 .priv_size = sizeof(struct ip6_tnl),
2146 .setup = ip6_tnl_dev_setup,
2147 .validate = ip6_tnl_validate,
2148 .newlink = ip6_tnl_newlink,
2149 .changelink = ip6_tnl_changelink,
2150 .dellink = ip6_tnl_dellink,
2151 .get_size = ip6_tnl_get_size,
2152 .fill_info = ip6_tnl_fill_info,
2153 .get_link_net = ip6_tnl_get_link_net,
2154 };
2155
2156 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2157 .handler = ip4ip6_rcv,
2158 .err_handler = ip4ip6_err,
2159 .priority = 1,
2160 };
2161
2162 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2163 .handler = ip6ip6_rcv,
2164 .err_handler = ip6ip6_err,
2165 .priority = 1,
2166 };
2167
2168 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2169 {
2170 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2171 struct net_device *dev, *aux;
2172 int h;
2173 struct ip6_tnl *t;
2174
2175 for_each_netdev_safe(net, dev, aux)
2176 if (dev->rtnl_link_ops == &ip6_link_ops)
2177 unregister_netdevice_queue(dev, list);
2178
2179 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2180 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2181 while (t) {
2182 /* If dev is in the same netns, it has already
2183 * been added to the list by the previous loop.
2184 */
2185 if (!net_eq(dev_net(t->dev), net))
2186 unregister_netdevice_queue(t->dev, list);
2187 t = rtnl_dereference(t->next);
2188 }
2189 }
2190 }
2191
2192 static int __net_init ip6_tnl_init_net(struct net *net)
2193 {
2194 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2195 struct ip6_tnl *t = NULL;
2196 int err;
2197
2198 ip6n->tnls[0] = ip6n->tnls_wc;
2199 ip6n->tnls[1] = ip6n->tnls_r_l;
2200
2201 err = -ENOMEM;
2202 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2203 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2204
2205 if (!ip6n->fb_tnl_dev)
2206 goto err_alloc_dev;
2207 dev_net_set(ip6n->fb_tnl_dev, net);
2208 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2209 /* FB netdevice is special: we have one, and only one per netns.
2210 * Allowing to move it to another netns is clearly unsafe.
2211 */
2212 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2213
2214 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2215 if (err < 0)
2216 goto err_register;
2217
2218 err = register_netdev(ip6n->fb_tnl_dev);
2219 if (err < 0)
2220 goto err_register;
2221
2222 t = netdev_priv(ip6n->fb_tnl_dev);
2223
2224 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2225 return 0;
2226
2227 err_register:
2228 free_netdev(ip6n->fb_tnl_dev);
2229 err_alloc_dev:
2230 return err;
2231 }
2232
2233 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2234 {
2235 struct net *net;
2236 LIST_HEAD(list);
2237
2238 rtnl_lock();
2239 list_for_each_entry(net, net_list, exit_list)
2240 ip6_tnl_destroy_tunnels(net, &list);
2241 unregister_netdevice_many(&list);
2242 rtnl_unlock();
2243 }
2244
2245 static struct pernet_operations ip6_tnl_net_ops = {
2246 .init = ip6_tnl_init_net,
2247 .exit_batch = ip6_tnl_exit_batch_net,
2248 .id = &ip6_tnl_net_id,
2249 .size = sizeof(struct ip6_tnl_net),
2250 };
2251
2252 /**
2253 * ip6_tunnel_init - register protocol and reserve needed resources
2254 *
2255 * Return: 0 on success
2256 **/
2257
2258 static int __init ip6_tunnel_init(void)
2259 {
2260 int err;
2261
2262 if (!ipv6_mod_enabled())
2263 return -EOPNOTSUPP;
2264
2265 err = register_pernet_device(&ip6_tnl_net_ops);
2266 if (err < 0)
2267 goto out_pernet;
2268
2269 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2270 if (err < 0) {
2271 pr_err("%s: can't register ip4ip6\n", __func__);
2272 goto out_ip4ip6;
2273 }
2274
2275 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2276 if (err < 0) {
2277 pr_err("%s: can't register ip6ip6\n", __func__);
2278 goto out_ip6ip6;
2279 }
2280 err = rtnl_link_register(&ip6_link_ops);
2281 if (err < 0)
2282 goto rtnl_link_failed;
2283
2284 return 0;
2285
2286 rtnl_link_failed:
2287 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2288 out_ip6ip6:
2289 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2290 out_ip4ip6:
2291 unregister_pernet_device(&ip6_tnl_net_ops);
2292 out_pernet:
2293 return err;
2294 }
2295
2296 /**
2297 * ip6_tunnel_cleanup - free resources and unregister protocol
2298 **/
2299
2300 static void __exit ip6_tunnel_cleanup(void)
2301 {
2302 rtnl_link_unregister(&ip6_link_ops);
2303 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2304 pr_info("%s: can't deregister ip4ip6\n", __func__);
2305
2306 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2307 pr_info("%s: can't deregister ip6ip6\n", __func__);
2308
2309 unregister_pernet_device(&ip6_tnl_net_ops);
2310 }
2311
2312 module_init(ip6_tunnel_init);
2313 module_exit(ip6_tunnel_cleanup);