]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/ip6_tunnel.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / ip6_tunnel.c
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 *
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11 *
12 * RFC 2473
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
47
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78
79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85
86 static unsigned int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 /* the IPv6 tunnel fallback device */
89 struct net_device *fb_tnl_dev;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 struct ip6_tnl __rcu *tnls_wc[1];
93 struct ip6_tnl __rcu **tnls[2];
94 struct ip6_tnl __rcu *collect_md_tun;
95 };
96
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 struct pcpu_sw_netstats tmp, sum = { 0 };
100 int i;
101
102 for_each_possible_cpu(i) {
103 unsigned int start;
104 const struct pcpu_sw_netstats *tstats =
105 per_cpu_ptr(dev->tstats, i);
106
107 do {
108 start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 tmp.rx_packets = tstats->rx_packets;
110 tmp.rx_bytes = tstats->rx_bytes;
111 tmp.tx_packets = tstats->tx_packets;
112 tmp.tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114
115 sum.rx_packets += tmp.rx_packets;
116 sum.rx_bytes += tmp.rx_bytes;
117 sum.tx_packets += tmp.tx_packets;
118 sum.tx_bytes += tmp.tx_bytes;
119 }
120 dev->stats.rx_packets = sum.rx_packets;
121 dev->stats.rx_bytes = sum.rx_bytes;
122 dev->stats.tx_packets = sum.tx_packets;
123 dev->stats.tx_bytes = sum.tx_bytes;
124 return &dev->stats;
125 }
126
127 /**
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
131 *
132 * Return:
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
135 * else %NULL
136 **/
137
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 unsigned int hash = HASH(remote, local);
145 struct ip6_tnl *t;
146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 struct in6_addr any;
148
149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 ipv6_addr_equal(remote, &t->parms.raddr) &&
152 (t->dev->flags & IFF_UP))
153 return t;
154 }
155
156 memset(&any, 0, sizeof(any));
157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
161 (t->dev->flags & IFF_UP))
162 return t;
163 }
164
165 hash = HASH(remote, &any);
166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
169 (t->dev->flags & IFF_UP))
170 return t;
171 }
172
173 t = rcu_dereference(ip6n->collect_md_tun);
174 if (t && t->dev->flags & IFF_UP)
175 return t;
176
177 t = rcu_dereference(ip6n->tnls_wc[0]);
178 if (t && (t->dev->flags & IFF_UP))
179 return t;
180
181 return NULL;
182 }
183
184 /**
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
187 *
188 * Description:
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
191 *
192 * Return: head of IPv6 tunnel list
193 **/
194
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
197 {
198 const struct in6_addr *remote = &p->raddr;
199 const struct in6_addr *local = &p->laddr;
200 unsigned int h = 0;
201 int prio = 0;
202
203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
204 prio = 1;
205 h = HASH(remote, local);
206 }
207 return &ip6n->tnls[prio][h];
208 }
209
210 /**
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
213 **/
214
215 static void
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
217 {
218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
219
220 if (t->parms.collect_md)
221 rcu_assign_pointer(ip6n->collect_md_tun, t);
222 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 rcu_assign_pointer(*tp, t);
224 }
225
226 /**
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
229 **/
230
231 static void
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
233 {
234 struct ip6_tnl __rcu **tp;
235 struct ip6_tnl *iter;
236
237 if (t->parms.collect_md)
238 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
239
240 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 (iter = rtnl_dereference(*tp)) != NULL;
242 tp = &iter->next) {
243 if (t == iter) {
244 rcu_assign_pointer(*tp, t->next);
245 break;
246 }
247 }
248 }
249
250 static void ip6_dev_free(struct net_device *dev)
251 {
252 struct ip6_tnl *t = netdev_priv(dev);
253
254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats);
257 }
258
259 static int ip6_tnl_create2(struct net_device *dev)
260 {
261 struct ip6_tnl *t = netdev_priv(dev);
262 struct net *net = dev_net(dev);
263 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
264 int err;
265
266 t = netdev_priv(dev);
267
268 dev->rtnl_link_ops = &ip6_link_ops;
269 err = register_netdevice(dev);
270 if (err < 0)
271 goto out;
272
273 strcpy(t->parms.name, dev->name);
274
275 dev_hold(dev);
276 ip6_tnl_link(ip6n, t);
277 return 0;
278
279 out:
280 return err;
281 }
282
283 /**
284 * ip6_tnl_create - create a new tunnel
285 * @p: tunnel parameters
286 * @pt: pointer to new tunnel
287 *
288 * Description:
289 * Create tunnel matching given parameters.
290 *
291 * Return:
292 * created tunnel or error pointer
293 **/
294
295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
296 {
297 struct net_device *dev;
298 struct ip6_tnl *t;
299 char name[IFNAMSIZ];
300 int err = -ENOMEM;
301
302 if (p->name[0])
303 strlcpy(name, p->name, IFNAMSIZ);
304 else
305 sprintf(name, "ip6tnl%%d");
306
307 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
308 ip6_tnl_dev_setup);
309 if (!dev)
310 goto failed;
311
312 dev_net_set(dev, net);
313
314 t = netdev_priv(dev);
315 t->parms = *p;
316 t->net = dev_net(dev);
317 err = ip6_tnl_create2(dev);
318 if (err < 0)
319 goto failed_free;
320
321 return t;
322
323 failed_free:
324 free_netdev(dev);
325 failed:
326 return ERR_PTR(err);
327 }
328
329 /**
330 * ip6_tnl_locate - find or create tunnel matching given parameters
331 * @p: tunnel parameters
332 * @create: != 0 if allowed to create new tunnel if no match found
333 *
334 * Description:
335 * ip6_tnl_locate() first tries to locate an existing tunnel
336 * based on @parms. If this is unsuccessful, but @create is set a new
337 * tunnel device is created and registered for use.
338 *
339 * Return:
340 * matching tunnel or error pointer
341 **/
342
343 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
344 struct __ip6_tnl_parm *p, int create)
345 {
346 const struct in6_addr *remote = &p->raddr;
347 const struct in6_addr *local = &p->laddr;
348 struct ip6_tnl __rcu **tp;
349 struct ip6_tnl *t;
350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
351
352 for (tp = ip6_tnl_bucket(ip6n, p);
353 (t = rtnl_dereference(*tp)) != NULL;
354 tp = &t->next) {
355 if (ipv6_addr_equal(local, &t->parms.laddr) &&
356 ipv6_addr_equal(remote, &t->parms.raddr)) {
357 if (create)
358 return ERR_PTR(-EEXIST);
359
360 return t;
361 }
362 }
363 if (!create)
364 return ERR_PTR(-ENODEV);
365 return ip6_tnl_create(net, p);
366 }
367
368 /**
369 * ip6_tnl_dev_uninit - tunnel device uninitializer
370 * @dev: the device to be destroyed
371 *
372 * Description:
373 * ip6_tnl_dev_uninit() removes tunnel from its list
374 **/
375
376 static void
377 ip6_tnl_dev_uninit(struct net_device *dev)
378 {
379 struct ip6_tnl *t = netdev_priv(dev);
380 struct net *net = t->net;
381 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
382
383 if (dev == ip6n->fb_tnl_dev)
384 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
385 else
386 ip6_tnl_unlink(ip6n, t);
387 dst_cache_reset(&t->dst_cache);
388 dev_put(dev);
389 }
390
391 /**
392 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
393 * @skb: received socket buffer
394 *
395 * Return:
396 * 0 if none was found,
397 * else index to encapsulation limit
398 **/
399
400 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
401 {
402 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
403 unsigned int nhoff = raw - skb->data;
404 unsigned int off = nhoff + sizeof(*ipv6h);
405 u8 next, nexthdr = ipv6h->nexthdr;
406
407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 struct ipv6_opt_hdr *hdr;
409 u16 optlen;
410
411 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
412 break;
413
414 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
415 if (nexthdr == NEXTHDR_FRAGMENT) {
416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 if (frag_hdr->frag_off)
418 break;
419 optlen = 8;
420 } else if (nexthdr == NEXTHDR_AUTH) {
421 optlen = (hdr->hdrlen + 2) << 2;
422 } else {
423 optlen = ipv6_optlen(hdr);
424 }
425 /* cache hdr->nexthdr, since pskb_may_pull() might
426 * invalidate hdr
427 */
428 next = hdr->nexthdr;
429 if (nexthdr == NEXTHDR_DEST) {
430 u16 i = 2;
431
432 /* Remember : hdr is no longer valid at this point. */
433 if (!pskb_may_pull(skb, off + optlen))
434 break;
435
436 while (1) {
437 struct ipv6_tlv_tnl_enc_lim *tel;
438
439 /* No more room for encapsulation limit */
440 if (i + sizeof(*tel) > optlen)
441 break;
442
443 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
444 /* return index of option if found and valid */
445 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
446 tel->length == 1)
447 return i + off - nhoff;
448 /* else jump to next option */
449 if (tel->type)
450 i += tel->length + 2;
451 else
452 i++;
453 }
454 }
455 nexthdr = next;
456 off += optlen;
457 }
458 return 0;
459 }
460 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
461
462 /**
463 * ip6_tnl_err - tunnel error handler
464 *
465 * Description:
466 * ip6_tnl_err() should handle errors in the tunnel according
467 * to the specifications in RFC 2473.
468 **/
469
470 static int
471 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
472 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
473 {
474 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
475 struct net *net = dev_net(skb->dev);
476 u8 rel_type = ICMPV6_DEST_UNREACH;
477 u8 rel_code = ICMPV6_ADDR_UNREACH;
478 __u32 rel_info = 0;
479 struct ip6_tnl *t;
480 int err = -ENOENT;
481 int rel_msg = 0;
482 u8 tproto;
483 __u16 len;
484
485 /* If the packet doesn't contain the original IPv6 header we are
486 in trouble since we might need the source address for further
487 processing of the error. */
488
489 rcu_read_lock();
490 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
491 if (!t)
492 goto out;
493
494 tproto = READ_ONCE(t->parms.proto);
495 if (tproto != ipproto && tproto != 0)
496 goto out;
497
498 err = 0;
499
500 switch (*type) {
501 struct ipv6_tlv_tnl_enc_lim *tel;
502 __u32 mtu, teli;
503 case ICMPV6_DEST_UNREACH:
504 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
505 t->parms.name);
506 rel_msg = 1;
507 break;
508 case ICMPV6_TIME_EXCEED:
509 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
510 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
511 t->parms.name);
512 rel_msg = 1;
513 }
514 break;
515 case ICMPV6_PARAMPROB:
516 teli = 0;
517 if ((*code) == ICMPV6_HDR_FIELD)
518 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
519
520 if (teli && teli == *info - 2) {
521 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
522 if (tel->encap_limit == 0) {
523 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
524 t->parms.name);
525 rel_msg = 1;
526 }
527 } else {
528 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
529 t->parms.name);
530 }
531 break;
532 case ICMPV6_PKT_TOOBIG:
533 ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
534 sock_net_uid(net, NULL));
535 mtu = *info - offset;
536 if (mtu < IPV6_MIN_MTU)
537 mtu = IPV6_MIN_MTU;
538 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
539 if (len > mtu) {
540 rel_type = ICMPV6_PKT_TOOBIG;
541 rel_code = 0;
542 rel_info = mtu;
543 rel_msg = 1;
544 }
545 break;
546 case NDISC_REDIRECT:
547 ip6_redirect(skb, net, skb->dev->ifindex, 0,
548 sock_net_uid(net, NULL));
549 break;
550 }
551
552 *type = rel_type;
553 *code = rel_code;
554 *info = rel_info;
555 *msg = rel_msg;
556
557 out:
558 rcu_read_unlock();
559 return err;
560 }
561
562 static int
563 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
564 u8 type, u8 code, int offset, __be32 info)
565 {
566 __u32 rel_info = ntohl(info);
567 const struct iphdr *eiph;
568 struct sk_buff *skb2;
569 int err, rel_msg = 0;
570 u8 rel_type = type;
571 u8 rel_code = code;
572 struct rtable *rt;
573 struct flowi4 fl4;
574
575 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
576 &rel_msg, &rel_info, offset);
577 if (err < 0)
578 return err;
579
580 if (rel_msg == 0)
581 return 0;
582
583 switch (rel_type) {
584 case ICMPV6_DEST_UNREACH:
585 if (rel_code != ICMPV6_ADDR_UNREACH)
586 return 0;
587 rel_type = ICMP_DEST_UNREACH;
588 rel_code = ICMP_HOST_UNREACH;
589 break;
590 case ICMPV6_PKT_TOOBIG:
591 if (rel_code != 0)
592 return 0;
593 rel_type = ICMP_DEST_UNREACH;
594 rel_code = ICMP_FRAG_NEEDED;
595 break;
596 default:
597 return 0;
598 }
599
600 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
601 return 0;
602
603 skb2 = skb_clone(skb, GFP_ATOMIC);
604 if (!skb2)
605 return 0;
606
607 skb_dst_drop(skb2);
608
609 skb_pull(skb2, offset);
610 skb_reset_network_header(skb2);
611 eiph = ip_hdr(skb2);
612
613 /* Try to guess incoming interface */
614 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
615 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
616 if (IS_ERR(rt))
617 goto out;
618
619 skb2->dev = rt->dst.dev;
620 ip_rt_put(rt);
621
622 /* route "incoming" packet */
623 if (rt->rt_flags & RTCF_LOCAL) {
624 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
625 eiph->daddr, eiph->saddr, 0, 0,
626 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
627 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
628 if (!IS_ERR(rt))
629 ip_rt_put(rt);
630 goto out;
631 }
632 skb_dst_set(skb2, &rt->dst);
633 } else {
634 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
635 skb2->dev) ||
636 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
637 goto out;
638 }
639
640 /* change mtu on this route */
641 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
642 if (rel_info > dst_mtu(skb_dst(skb2)))
643 goto out;
644
645 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2,
646 rel_info);
647 }
648
649 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
650
651 out:
652 kfree_skb(skb2);
653 return 0;
654 }
655
656 static int
657 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
658 u8 type, u8 code, int offset, __be32 info)
659 {
660 __u32 rel_info = ntohl(info);
661 int err, rel_msg = 0;
662 u8 rel_type = type;
663 u8 rel_code = code;
664
665 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
666 &rel_msg, &rel_info, offset);
667 if (err < 0)
668 return err;
669
670 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
671 struct rt6_info *rt;
672 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
673
674 if (!skb2)
675 return 0;
676
677 skb_dst_drop(skb2);
678 skb_pull(skb2, offset);
679 skb_reset_network_header(skb2);
680
681 /* Try to guess incoming interface */
682 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
683 NULL, 0, 0);
684
685 if (rt && rt->dst.dev)
686 skb2->dev = rt->dst.dev;
687
688 icmpv6_send(skb2, rel_type, rel_code, rel_info);
689
690 ip6_rt_put(rt);
691
692 kfree_skb(skb2);
693 }
694
695 return 0;
696 }
697
698 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
699 const struct ipv6hdr *ipv6h,
700 struct sk_buff *skb)
701 {
702 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
703
704 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
705 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
706
707 return IP6_ECN_decapsulate(ipv6h, skb);
708 }
709
710 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
711 const struct ipv6hdr *ipv6h,
712 struct sk_buff *skb)
713 {
714 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
715 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
716
717 return IP6_ECN_decapsulate(ipv6h, skb);
718 }
719
720 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
721 const struct in6_addr *laddr,
722 const struct in6_addr *raddr)
723 {
724 struct __ip6_tnl_parm *p = &t->parms;
725 int ltype = ipv6_addr_type(laddr);
726 int rtype = ipv6_addr_type(raddr);
727 __u32 flags = 0;
728
729 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
730 flags = IP6_TNL_F_CAP_PER_PACKET;
731 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
732 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
733 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
734 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
735 if (ltype&IPV6_ADDR_UNICAST)
736 flags |= IP6_TNL_F_CAP_XMIT;
737 if (rtype&IPV6_ADDR_UNICAST)
738 flags |= IP6_TNL_F_CAP_RCV;
739 }
740 return flags;
741 }
742 EXPORT_SYMBOL(ip6_tnl_get_cap);
743
744 /* called with rcu_read_lock() */
745 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
746 const struct in6_addr *laddr,
747 const struct in6_addr *raddr)
748 {
749 struct __ip6_tnl_parm *p = &t->parms;
750 int ret = 0;
751 struct net *net = t->net;
752
753 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
754 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
755 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
756 struct net_device *ldev = NULL;
757
758 if (p->link)
759 ldev = dev_get_by_index_rcu(net, p->link);
760
761 if ((ipv6_addr_is_multicast(laddr) ||
762 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
763 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
764 likely(!ipv6_chk_addr(net, raddr, NULL, 0))))
765 ret = 1;
766 }
767 return ret;
768 }
769 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
770
771 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
772 const struct tnl_ptk_info *tpi,
773 struct metadata_dst *tun_dst,
774 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
775 const struct ipv6hdr *ipv6h,
776 struct sk_buff *skb),
777 bool log_ecn_err)
778 {
779 struct pcpu_sw_netstats *tstats;
780 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
781 int err;
782
783 if ((!(tpi->flags & TUNNEL_CSUM) &&
784 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
785 ((tpi->flags & TUNNEL_CSUM) &&
786 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
787 tunnel->dev->stats.rx_crc_errors++;
788 tunnel->dev->stats.rx_errors++;
789 goto drop;
790 }
791
792 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
793 if (!(tpi->flags & TUNNEL_SEQ) ||
794 (tunnel->i_seqno &&
795 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
796 tunnel->dev->stats.rx_fifo_errors++;
797 tunnel->dev->stats.rx_errors++;
798 goto drop;
799 }
800 tunnel->i_seqno = ntohl(tpi->seq) + 1;
801 }
802
803 skb->protocol = tpi->proto;
804
805 /* Warning: All skb pointers will be invalidated! */
806 if (tunnel->dev->type == ARPHRD_ETHER) {
807 if (!pskb_may_pull(skb, ETH_HLEN)) {
808 tunnel->dev->stats.rx_length_errors++;
809 tunnel->dev->stats.rx_errors++;
810 goto drop;
811 }
812
813 ipv6h = ipv6_hdr(skb);
814 skb->protocol = eth_type_trans(skb, tunnel->dev);
815 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
816 } else {
817 skb->dev = tunnel->dev;
818 }
819
820 skb_reset_network_header(skb);
821 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
822
823 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
824
825 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
826 if (unlikely(err)) {
827 if (log_ecn_err)
828 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
829 &ipv6h->saddr,
830 ipv6_get_dsfield(ipv6h));
831 if (err > 1) {
832 ++tunnel->dev->stats.rx_frame_errors;
833 ++tunnel->dev->stats.rx_errors;
834 goto drop;
835 }
836 }
837
838 tstats = this_cpu_ptr(tunnel->dev->tstats);
839 u64_stats_update_begin(&tstats->syncp);
840 tstats->rx_packets++;
841 tstats->rx_bytes += skb->len;
842 u64_stats_update_end(&tstats->syncp);
843
844 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
845
846 if (tun_dst)
847 skb_dst_set(skb, (struct dst_entry *)tun_dst);
848
849 gro_cells_receive(&tunnel->gro_cells, skb);
850 return 0;
851
852 drop:
853 if (tun_dst)
854 dst_release((struct dst_entry *)tun_dst);
855 kfree_skb(skb);
856 return 0;
857 }
858
859 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
860 const struct tnl_ptk_info *tpi,
861 struct metadata_dst *tun_dst,
862 bool log_ecn_err)
863 {
864 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
865 log_ecn_err);
866 }
867 EXPORT_SYMBOL(ip6_tnl_rcv);
868
869 static const struct tnl_ptk_info tpi_v6 = {
870 /* no tunnel info required for ipxip6. */
871 .proto = htons(ETH_P_IPV6),
872 };
873
874 static const struct tnl_ptk_info tpi_v4 = {
875 /* no tunnel info required for ipxip6. */
876 .proto = htons(ETH_P_IP),
877 };
878
879 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
880 const struct tnl_ptk_info *tpi,
881 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
882 const struct ipv6hdr *ipv6h,
883 struct sk_buff *skb))
884 {
885 struct ip6_tnl *t;
886 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
887 struct metadata_dst *tun_dst = NULL;
888 int ret = -1;
889
890 rcu_read_lock();
891 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
892
893 if (t) {
894 u8 tproto = READ_ONCE(t->parms.proto);
895
896 if (tproto != ipproto && tproto != 0)
897 goto drop;
898 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
899 goto drop;
900 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
901 goto drop;
902 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
903 goto drop;
904 if (t->parms.collect_md) {
905 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
906 if (!tun_dst)
907 goto drop;
908 }
909 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
910 log_ecn_error);
911 }
912
913 rcu_read_unlock();
914
915 return ret;
916
917 drop:
918 rcu_read_unlock();
919 kfree_skb(skb);
920 return 0;
921 }
922
923 static int ip4ip6_rcv(struct sk_buff *skb)
924 {
925 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
926 ip4ip6_dscp_ecn_decapsulate);
927 }
928
929 static int ip6ip6_rcv(struct sk_buff *skb)
930 {
931 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
932 ip6ip6_dscp_ecn_decapsulate);
933 }
934
935 struct ipv6_tel_txoption {
936 struct ipv6_txoptions ops;
937 __u8 dst_opt[8];
938 };
939
940 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
941 {
942 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
943
944 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
945 opt->dst_opt[3] = 1;
946 opt->dst_opt[4] = encap_limit;
947 opt->dst_opt[5] = IPV6_TLV_PADN;
948 opt->dst_opt[6] = 1;
949
950 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
951 opt->ops.opt_nflen = 8;
952 }
953
954 /**
955 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
956 * @t: the outgoing tunnel device
957 * @hdr: IPv6 header from the incoming packet
958 *
959 * Description:
960 * Avoid trivial tunneling loop by checking that tunnel exit-point
961 * doesn't match source of incoming packet.
962 *
963 * Return:
964 * 1 if conflict,
965 * 0 else
966 **/
967
968 static inline bool
969 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
970 {
971 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
972 }
973
974 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
975 const struct in6_addr *laddr,
976 const struct in6_addr *raddr)
977 {
978 struct __ip6_tnl_parm *p = &t->parms;
979 int ret = 0;
980 struct net *net = t->net;
981
982 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
983 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
984 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
985 struct net_device *ldev = NULL;
986
987 rcu_read_lock();
988 if (p->link)
989 ldev = dev_get_by_index_rcu(net, p->link);
990
991 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
992 pr_warn("%s xmit: Local address not yet configured!\n",
993 p->name);
994 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
995 !ipv6_addr_is_multicast(raddr) &&
996 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
997 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
998 p->name);
999 else
1000 ret = 1;
1001 rcu_read_unlock();
1002 }
1003 return ret;
1004 }
1005 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1006
1007 /**
1008 * ip6_tnl_xmit - encapsulate packet and send
1009 * @skb: the outgoing socket buffer
1010 * @dev: the outgoing tunnel device
1011 * @dsfield: dscp code for outer header
1012 * @fl6: flow of tunneled packet
1013 * @encap_limit: encapsulation limit
1014 * @pmtu: Path MTU is stored if packet is too big
1015 * @proto: next header value
1016 *
1017 * Description:
1018 * Build new header and do some sanity checks on the packet before sending
1019 * it.
1020 *
1021 * Return:
1022 * 0 on success
1023 * -1 fail
1024 * %-EMSGSIZE message too big. return mtu in this case.
1025 **/
1026
1027 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1028 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1029 __u8 proto)
1030 {
1031 struct ip6_tnl *t = netdev_priv(dev);
1032 struct net *net = t->net;
1033 struct net_device_stats *stats = &t->dev->stats;
1034 struct ipv6hdr *ipv6h;
1035 struct ipv6_tel_txoption opt;
1036 struct dst_entry *dst = NULL, *ndst = NULL;
1037 struct net_device *tdev;
1038 int mtu;
1039 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1040 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1041 unsigned int max_headroom = psh_hlen;
1042 bool use_cache = false;
1043 u8 hop_limit;
1044 int err = -1;
1045
1046 if (t->parms.collect_md) {
1047 hop_limit = skb_tunnel_info(skb)->key.ttl;
1048 goto route_lookup;
1049 } else {
1050 hop_limit = t->parms.hop_limit;
1051 }
1052
1053 /* NBMA tunnel */
1054 if (ipv6_addr_any(&t->parms.raddr)) {
1055 if (skb->protocol == htons(ETH_P_IPV6)) {
1056 struct in6_addr *addr6;
1057 struct neighbour *neigh;
1058 int addr_type;
1059
1060 if (!skb_dst(skb))
1061 goto tx_err_link_failure;
1062
1063 neigh = dst_neigh_lookup(skb_dst(skb),
1064 &ipv6_hdr(skb)->daddr);
1065 if (!neigh)
1066 goto tx_err_link_failure;
1067
1068 addr6 = (struct in6_addr *)&neigh->primary_key;
1069 addr_type = ipv6_addr_type(addr6);
1070
1071 if (addr_type == IPV6_ADDR_ANY)
1072 addr6 = &ipv6_hdr(skb)->daddr;
1073
1074 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1075 neigh_release(neigh);
1076 }
1077 } else if (!(t->parms.flags &
1078 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1079 /* enable the cache only only if the routing decision does
1080 * not depend on the current inner header value
1081 */
1082 use_cache = true;
1083 }
1084
1085 if (use_cache)
1086 dst = dst_cache_get(&t->dst_cache);
1087
1088 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1089 goto tx_err_link_failure;
1090
1091 if (!dst) {
1092 route_lookup:
1093 /* add dsfield to flowlabel for route lookup */
1094 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1095
1096 dst = ip6_route_output(net, NULL, fl6);
1097
1098 if (dst->error)
1099 goto tx_err_link_failure;
1100 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1101 if (IS_ERR(dst)) {
1102 err = PTR_ERR(dst);
1103 dst = NULL;
1104 goto tx_err_link_failure;
1105 }
1106 if (t->parms.collect_md &&
1107 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1108 &fl6->daddr, 0, &fl6->saddr))
1109 goto tx_err_link_failure;
1110 ndst = dst;
1111 }
1112
1113 tdev = dst->dev;
1114
1115 if (tdev == dev) {
1116 stats->collisions++;
1117 net_warn_ratelimited("%s: Local routing loop detected!\n",
1118 t->parms.name);
1119 goto tx_err_dst_release;
1120 }
1121 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1122 if (encap_limit >= 0) {
1123 max_headroom += 8;
1124 mtu -= 8;
1125 }
1126 if (skb->protocol == htons(ETH_P_IPV6)) {
1127 if (mtu < IPV6_MIN_MTU)
1128 mtu = IPV6_MIN_MTU;
1129 } else if (mtu < 576) {
1130 mtu = 576;
1131 }
1132
1133 if (skb_dst(skb) && !t->parms.collect_md)
1134 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1135 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1136 *pmtu = mtu;
1137 err = -EMSGSIZE;
1138 goto tx_err_dst_release;
1139 }
1140
1141 if (t->err_count > 0) {
1142 if (time_before(jiffies,
1143 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1144 t->err_count--;
1145
1146 dst_link_failure(skb);
1147 } else {
1148 t->err_count = 0;
1149 }
1150 }
1151
1152 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1153
1154 /*
1155 * Okay, now see if we can stuff it in the buffer as-is.
1156 */
1157 max_headroom += LL_RESERVED_SPACE(tdev);
1158
1159 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1160 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1161 struct sk_buff *new_skb;
1162
1163 new_skb = skb_realloc_headroom(skb, max_headroom);
1164 if (!new_skb)
1165 goto tx_err_dst_release;
1166
1167 if (skb->sk)
1168 skb_set_owner_w(new_skb, skb->sk);
1169 consume_skb(skb);
1170 skb = new_skb;
1171 }
1172
1173 if (t->parms.collect_md) {
1174 if (t->encap.type != TUNNEL_ENCAP_NONE)
1175 goto tx_err_dst_release;
1176 } else {
1177 if (use_cache && ndst)
1178 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1179 }
1180 skb_dst_set(skb, dst);
1181
1182 if (encap_limit >= 0) {
1183 init_tel_txopt(&opt, encap_limit);
1184 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1185 }
1186 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
1187
1188 /* Calculate max headroom for all the headers and adjust
1189 * needed_headroom if necessary.
1190 */
1191 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1192 + dst->header_len + t->hlen;
1193 if (max_headroom > dev->needed_headroom)
1194 dev->needed_headroom = max_headroom;
1195
1196 err = ip6_tnl_encap(skb, t, &proto, fl6);
1197 if (err)
1198 return err;
1199
1200 skb_push(skb, sizeof(struct ipv6hdr));
1201 skb_reset_network_header(skb);
1202 ipv6h = ipv6_hdr(skb);
1203 ip6_flow_hdr(ipv6h, dsfield,
1204 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1205 ipv6h->hop_limit = hop_limit;
1206 ipv6h->nexthdr = proto;
1207 ipv6h->saddr = fl6->saddr;
1208 ipv6h->daddr = fl6->daddr;
1209 ip6tunnel_xmit(NULL, skb, dev);
1210 return 0;
1211 tx_err_link_failure:
1212 stats->tx_carrier_errors++;
1213 dst_link_failure(skb);
1214 tx_err_dst_release:
1215 dst_release(dst);
1216 return err;
1217 }
1218 EXPORT_SYMBOL(ip6_tnl_xmit);
1219
1220 static inline int
1221 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1222 {
1223 struct ip6_tnl *t = netdev_priv(dev);
1224 const struct iphdr *iph = ip_hdr(skb);
1225 int encap_limit = -1;
1226 struct flowi6 fl6;
1227 __u8 dsfield;
1228 __u32 mtu;
1229 u8 tproto;
1230 int err;
1231
1232 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1233
1234 tproto = READ_ONCE(t->parms.proto);
1235 if (tproto != IPPROTO_IPIP && tproto != 0)
1236 return -1;
1237
1238 if (t->parms.collect_md) {
1239 struct ip_tunnel_info *tun_info;
1240 const struct ip_tunnel_key *key;
1241
1242 tun_info = skb_tunnel_info(skb);
1243 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1244 ip_tunnel_info_af(tun_info) != AF_INET6))
1245 return -1;
1246 key = &tun_info->key;
1247 memset(&fl6, 0, sizeof(fl6));
1248 fl6.flowi6_proto = IPPROTO_IPIP;
1249 fl6.daddr = key->u.ipv6.dst;
1250 fl6.flowlabel = key->label;
1251 dsfield = key->tos;
1252 } else {
1253 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1254 encap_limit = t->parms.encap_limit;
1255
1256 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1257 fl6.flowi6_proto = IPPROTO_IPIP;
1258
1259 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1260 dsfield = ipv4_get_dsfield(iph);
1261 else
1262 dsfield = ip6_tclass(t->parms.flowinfo);
1263 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1264 fl6.flowi6_mark = skb->mark;
1265 else
1266 fl6.flowi6_mark = t->parms.fwmark;
1267 }
1268
1269 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1270
1271 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1272 return -1;
1273
1274 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1275
1276 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1277
1278 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1279 IPPROTO_IPIP);
1280 if (err != 0) {
1281 /* XXX: send ICMP error even if DF is not set. */
1282 if (err == -EMSGSIZE)
1283 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1284 htonl(mtu));
1285 return -1;
1286 }
1287
1288 return 0;
1289 }
1290
1291 static inline int
1292 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1293 {
1294 struct ip6_tnl *t = netdev_priv(dev);
1295 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1296 int encap_limit = -1;
1297 __u16 offset;
1298 struct flowi6 fl6;
1299 __u8 dsfield;
1300 __u32 mtu;
1301 u8 tproto;
1302 int err;
1303
1304 tproto = READ_ONCE(t->parms.proto);
1305 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1306 ip6_tnl_addr_conflict(t, ipv6h))
1307 return -1;
1308
1309 if (t->parms.collect_md) {
1310 struct ip_tunnel_info *tun_info;
1311 const struct ip_tunnel_key *key;
1312
1313 tun_info = skb_tunnel_info(skb);
1314 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1315 ip_tunnel_info_af(tun_info) != AF_INET6))
1316 return -1;
1317 key = &tun_info->key;
1318 memset(&fl6, 0, sizeof(fl6));
1319 fl6.flowi6_proto = IPPROTO_IPV6;
1320 fl6.daddr = key->u.ipv6.dst;
1321 fl6.flowlabel = key->label;
1322 dsfield = key->tos;
1323 } else {
1324 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1325 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1326 ipv6h = ipv6_hdr(skb);
1327 if (offset > 0) {
1328 struct ipv6_tlv_tnl_enc_lim *tel;
1329
1330 tel = (void *)&skb_network_header(skb)[offset];
1331 if (tel->encap_limit == 0) {
1332 icmpv6_send(skb, ICMPV6_PARAMPROB,
1333 ICMPV6_HDR_FIELD, offset + 2);
1334 return -1;
1335 }
1336 encap_limit = tel->encap_limit - 1;
1337 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1338 encap_limit = t->parms.encap_limit;
1339 }
1340
1341 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1342 fl6.flowi6_proto = IPPROTO_IPV6;
1343
1344 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1345 dsfield = ipv6_get_dsfield(ipv6h);
1346 else
1347 dsfield = ip6_tclass(t->parms.flowinfo);
1348 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1349 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1350 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1351 fl6.flowi6_mark = skb->mark;
1352 else
1353 fl6.flowi6_mark = t->parms.fwmark;
1354 }
1355
1356 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1357
1358 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1359 return -1;
1360
1361 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1362
1363 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1364
1365 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1366 IPPROTO_IPV6);
1367 if (err != 0) {
1368 if (err == -EMSGSIZE)
1369 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1370 return -1;
1371 }
1372
1373 return 0;
1374 }
1375
1376 static netdev_tx_t
1377 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1378 {
1379 struct ip6_tnl *t = netdev_priv(dev);
1380 struct net_device_stats *stats = &t->dev->stats;
1381 int ret;
1382
1383 switch (skb->protocol) {
1384 case htons(ETH_P_IP):
1385 ret = ip4ip6_tnl_xmit(skb, dev);
1386 break;
1387 case htons(ETH_P_IPV6):
1388 ret = ip6ip6_tnl_xmit(skb, dev);
1389 break;
1390 default:
1391 goto tx_err;
1392 }
1393
1394 if (ret < 0)
1395 goto tx_err;
1396
1397 return NETDEV_TX_OK;
1398
1399 tx_err:
1400 stats->tx_errors++;
1401 stats->tx_dropped++;
1402 kfree_skb(skb);
1403 return NETDEV_TX_OK;
1404 }
1405
1406 static void ip6_tnl_link_config(struct ip6_tnl *t)
1407 {
1408 struct net_device *dev = t->dev;
1409 struct __ip6_tnl_parm *p = &t->parms;
1410 struct flowi6 *fl6 = &t->fl.u.ip6;
1411 int t_hlen;
1412
1413 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1414 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1415
1416 /* Set up flowi template */
1417 fl6->saddr = p->laddr;
1418 fl6->daddr = p->raddr;
1419 fl6->flowi6_oif = p->link;
1420 fl6->flowlabel = 0;
1421
1422 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1423 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1424 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1425 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1426
1427 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1428 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1429
1430 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1431 dev->flags |= IFF_POINTOPOINT;
1432 else
1433 dev->flags &= ~IFF_POINTOPOINT;
1434
1435 t->tun_hlen = 0;
1436 t->hlen = t->encap_hlen + t->tun_hlen;
1437 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1438
1439 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1440 int strict = (ipv6_addr_type(&p->raddr) &
1441 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1442
1443 struct rt6_info *rt = rt6_lookup(t->net,
1444 &p->raddr, &p->laddr,
1445 p->link, strict);
1446
1447 if (!rt)
1448 return;
1449
1450 if (rt->dst.dev) {
1451 dev->hard_header_len = rt->dst.dev->hard_header_len +
1452 t_hlen;
1453
1454 dev->mtu = rt->dst.dev->mtu - t_hlen;
1455 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1456 dev->mtu -= 8;
1457
1458 if (dev->mtu < IPV6_MIN_MTU)
1459 dev->mtu = IPV6_MIN_MTU;
1460 }
1461 ip6_rt_put(rt);
1462 }
1463 }
1464
1465 /**
1466 * ip6_tnl_change - update the tunnel parameters
1467 * @t: tunnel to be changed
1468 * @p: tunnel configuration parameters
1469 *
1470 * Description:
1471 * ip6_tnl_change() updates the tunnel parameters
1472 **/
1473
1474 static int
1475 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1476 {
1477 t->parms.laddr = p->laddr;
1478 t->parms.raddr = p->raddr;
1479 t->parms.flags = p->flags;
1480 t->parms.hop_limit = p->hop_limit;
1481 t->parms.encap_limit = p->encap_limit;
1482 t->parms.flowinfo = p->flowinfo;
1483 t->parms.link = p->link;
1484 t->parms.proto = p->proto;
1485 t->parms.fwmark = p->fwmark;
1486 dst_cache_reset(&t->dst_cache);
1487 ip6_tnl_link_config(t);
1488 return 0;
1489 }
1490
1491 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1492 {
1493 struct net *net = t->net;
1494 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1495 int err;
1496
1497 ip6_tnl_unlink(ip6n, t);
1498 synchronize_net();
1499 err = ip6_tnl_change(t, p);
1500 ip6_tnl_link(ip6n, t);
1501 netdev_state_change(t->dev);
1502 return err;
1503 }
1504
1505 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1506 {
1507 /* for default tnl0 device allow to change only the proto */
1508 t->parms.proto = p->proto;
1509 netdev_state_change(t->dev);
1510 return 0;
1511 }
1512
1513 static void
1514 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1515 {
1516 p->laddr = u->laddr;
1517 p->raddr = u->raddr;
1518 p->flags = u->flags;
1519 p->hop_limit = u->hop_limit;
1520 p->encap_limit = u->encap_limit;
1521 p->flowinfo = u->flowinfo;
1522 p->link = u->link;
1523 p->proto = u->proto;
1524 memcpy(p->name, u->name, sizeof(u->name));
1525 }
1526
1527 static void
1528 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1529 {
1530 u->laddr = p->laddr;
1531 u->raddr = p->raddr;
1532 u->flags = p->flags;
1533 u->hop_limit = p->hop_limit;
1534 u->encap_limit = p->encap_limit;
1535 u->flowinfo = p->flowinfo;
1536 u->link = p->link;
1537 u->proto = p->proto;
1538 memcpy(u->name, p->name, sizeof(u->name));
1539 }
1540
1541 /**
1542 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1543 * @dev: virtual device associated with tunnel
1544 * @ifr: parameters passed from userspace
1545 * @cmd: command to be performed
1546 *
1547 * Description:
1548 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1549 * from userspace.
1550 *
1551 * The possible commands are the following:
1552 * %SIOCGETTUNNEL: get tunnel parameters for device
1553 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1554 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1555 * %SIOCDELTUNNEL: delete tunnel
1556 *
1557 * The fallback device "ip6tnl0", created during module
1558 * initialization, can be used for creating other tunnel devices.
1559 *
1560 * Return:
1561 * 0 on success,
1562 * %-EFAULT if unable to copy data to or from userspace,
1563 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1564 * %-EINVAL if passed tunnel parameters are invalid,
1565 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1566 * %-ENODEV if attempting to change or delete a nonexisting device
1567 **/
1568
1569 static int
1570 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1571 {
1572 int err = 0;
1573 struct ip6_tnl_parm p;
1574 struct __ip6_tnl_parm p1;
1575 struct ip6_tnl *t = netdev_priv(dev);
1576 struct net *net = t->net;
1577 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1578
1579 memset(&p1, 0, sizeof(p1));
1580
1581 switch (cmd) {
1582 case SIOCGETTUNNEL:
1583 if (dev == ip6n->fb_tnl_dev) {
1584 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1585 err = -EFAULT;
1586 break;
1587 }
1588 ip6_tnl_parm_from_user(&p1, &p);
1589 t = ip6_tnl_locate(net, &p1, 0);
1590 if (IS_ERR(t))
1591 t = netdev_priv(dev);
1592 } else {
1593 memset(&p, 0, sizeof(p));
1594 }
1595 ip6_tnl_parm_to_user(&p, &t->parms);
1596 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1597 err = -EFAULT;
1598 }
1599 break;
1600 case SIOCADDTUNNEL:
1601 case SIOCCHGTUNNEL:
1602 err = -EPERM;
1603 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1604 break;
1605 err = -EFAULT;
1606 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1607 break;
1608 err = -EINVAL;
1609 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1610 p.proto != 0)
1611 break;
1612 ip6_tnl_parm_from_user(&p1, &p);
1613 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1614 if (cmd == SIOCCHGTUNNEL) {
1615 if (!IS_ERR(t)) {
1616 if (t->dev != dev) {
1617 err = -EEXIST;
1618 break;
1619 }
1620 } else
1621 t = netdev_priv(dev);
1622 if (dev == ip6n->fb_tnl_dev)
1623 err = ip6_tnl0_update(t, &p1);
1624 else
1625 err = ip6_tnl_update(t, &p1);
1626 }
1627 if (!IS_ERR(t)) {
1628 err = 0;
1629 ip6_tnl_parm_to_user(&p, &t->parms);
1630 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1631 err = -EFAULT;
1632
1633 } else {
1634 err = PTR_ERR(t);
1635 }
1636 break;
1637 case SIOCDELTUNNEL:
1638 err = -EPERM;
1639 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1640 break;
1641
1642 if (dev == ip6n->fb_tnl_dev) {
1643 err = -EFAULT;
1644 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1645 break;
1646 err = -ENOENT;
1647 ip6_tnl_parm_from_user(&p1, &p);
1648 t = ip6_tnl_locate(net, &p1, 0);
1649 if (IS_ERR(t))
1650 break;
1651 err = -EPERM;
1652 if (t->dev == ip6n->fb_tnl_dev)
1653 break;
1654 dev = t->dev;
1655 }
1656 err = 0;
1657 unregister_netdevice(dev);
1658 break;
1659 default:
1660 err = -EINVAL;
1661 }
1662 return err;
1663 }
1664
1665 /**
1666 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1667 * @dev: virtual device associated with tunnel
1668 * @new_mtu: the new mtu
1669 *
1670 * Return:
1671 * 0 on success,
1672 * %-EINVAL if mtu too small
1673 **/
1674
1675 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1676 {
1677 struct ip6_tnl *tnl = netdev_priv(dev);
1678
1679 if (tnl->parms.proto == IPPROTO_IPIP) {
1680 if (new_mtu < ETH_MIN_MTU)
1681 return -EINVAL;
1682 } else {
1683 if (new_mtu < IPV6_MIN_MTU)
1684 return -EINVAL;
1685 }
1686 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1687 return -EINVAL;
1688 dev->mtu = new_mtu;
1689 return 0;
1690 }
1691 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1692
1693 int ip6_tnl_get_iflink(const struct net_device *dev)
1694 {
1695 struct ip6_tnl *t = netdev_priv(dev);
1696
1697 return t->parms.link;
1698 }
1699 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1700
1701 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1702 unsigned int num)
1703 {
1704 if (num >= MAX_IPTUN_ENCAP_OPS)
1705 return -ERANGE;
1706
1707 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1708 &ip6tun_encaps[num],
1709 NULL, ops) ? 0 : -1;
1710 }
1711 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1712
1713 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1714 unsigned int num)
1715 {
1716 int ret;
1717
1718 if (num >= MAX_IPTUN_ENCAP_OPS)
1719 return -ERANGE;
1720
1721 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1722 &ip6tun_encaps[num],
1723 ops, NULL) == ops) ? 0 : -1;
1724
1725 synchronize_net();
1726
1727 return ret;
1728 }
1729 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1730
1731 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1732 struct ip_tunnel_encap *ipencap)
1733 {
1734 int hlen;
1735
1736 memset(&t->encap, 0, sizeof(t->encap));
1737
1738 hlen = ip6_encap_hlen(ipencap);
1739 if (hlen < 0)
1740 return hlen;
1741
1742 t->encap.type = ipencap->type;
1743 t->encap.sport = ipencap->sport;
1744 t->encap.dport = ipencap->dport;
1745 t->encap.flags = ipencap->flags;
1746
1747 t->encap_hlen = hlen;
1748 t->hlen = t->encap_hlen + t->tun_hlen;
1749
1750 return 0;
1751 }
1752 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1753
1754 static const struct net_device_ops ip6_tnl_netdev_ops = {
1755 .ndo_init = ip6_tnl_dev_init,
1756 .ndo_uninit = ip6_tnl_dev_uninit,
1757 .ndo_start_xmit = ip6_tnl_start_xmit,
1758 .ndo_do_ioctl = ip6_tnl_ioctl,
1759 .ndo_change_mtu = ip6_tnl_change_mtu,
1760 .ndo_get_stats = ip6_get_stats,
1761 .ndo_get_iflink = ip6_tnl_get_iflink,
1762 };
1763
1764 #define IPXIPX_FEATURES (NETIF_F_SG | \
1765 NETIF_F_FRAGLIST | \
1766 NETIF_F_HIGHDMA | \
1767 NETIF_F_GSO_SOFTWARE | \
1768 NETIF_F_HW_CSUM)
1769
1770 /**
1771 * ip6_tnl_dev_setup - setup virtual tunnel device
1772 * @dev: virtual device associated with tunnel
1773 *
1774 * Description:
1775 * Initialize function pointers and device parameters
1776 **/
1777
1778 static void ip6_tnl_dev_setup(struct net_device *dev)
1779 {
1780 dev->netdev_ops = &ip6_tnl_netdev_ops;
1781 dev->needs_free_netdev = true;
1782 dev->priv_destructor = ip6_dev_free;
1783
1784 dev->type = ARPHRD_TUNNEL6;
1785 dev->flags |= IFF_NOARP;
1786 dev->addr_len = sizeof(struct in6_addr);
1787 dev->features |= NETIF_F_LLTX;
1788 netif_keep_dst(dev);
1789
1790 dev->features |= IPXIPX_FEATURES;
1791 dev->hw_features |= IPXIPX_FEATURES;
1792
1793 /* This perm addr will be used as interface identifier by IPv6 */
1794 dev->addr_assign_type = NET_ADDR_RANDOM;
1795 eth_random_addr(dev->perm_addr);
1796 }
1797
1798
1799 /**
1800 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1801 * @dev: virtual device associated with tunnel
1802 **/
1803
1804 static inline int
1805 ip6_tnl_dev_init_gen(struct net_device *dev)
1806 {
1807 struct ip6_tnl *t = netdev_priv(dev);
1808 int ret;
1809 int t_hlen;
1810
1811 t->dev = dev;
1812 t->net = dev_net(dev);
1813 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1814 if (!dev->tstats)
1815 return -ENOMEM;
1816
1817 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1818 if (ret)
1819 goto free_stats;
1820
1821 ret = gro_cells_init(&t->gro_cells, dev);
1822 if (ret)
1823 goto destroy_dst;
1824
1825 t->tun_hlen = 0;
1826 t->hlen = t->encap_hlen + t->tun_hlen;
1827 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1828
1829 dev->type = ARPHRD_TUNNEL6;
1830 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1831 dev->mtu = ETH_DATA_LEN - t_hlen;
1832 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1833 dev->mtu -= 8;
1834 dev->min_mtu = ETH_MIN_MTU;
1835 dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1836
1837 return 0;
1838
1839 destroy_dst:
1840 dst_cache_destroy(&t->dst_cache);
1841 free_stats:
1842 free_percpu(dev->tstats);
1843 dev->tstats = NULL;
1844
1845 return ret;
1846 }
1847
1848 /**
1849 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1850 * @dev: virtual device associated with tunnel
1851 **/
1852
1853 static int ip6_tnl_dev_init(struct net_device *dev)
1854 {
1855 struct ip6_tnl *t = netdev_priv(dev);
1856 int err = ip6_tnl_dev_init_gen(dev);
1857
1858 if (err)
1859 return err;
1860 ip6_tnl_link_config(t);
1861 if (t->parms.collect_md) {
1862 dev->features |= NETIF_F_NETNS_LOCAL;
1863 netif_keep_dst(dev);
1864 }
1865 return 0;
1866 }
1867
1868 /**
1869 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1870 * @dev: fallback device
1871 *
1872 * Return: 0
1873 **/
1874
1875 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1876 {
1877 struct ip6_tnl *t = netdev_priv(dev);
1878 struct net *net = dev_net(dev);
1879 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1880
1881 t->parms.proto = IPPROTO_IPV6;
1882 dev_hold(dev);
1883
1884 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1885 return 0;
1886 }
1887
1888 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1889 struct netlink_ext_ack *extack)
1890 {
1891 u8 proto;
1892
1893 if (!data || !data[IFLA_IPTUN_PROTO])
1894 return 0;
1895
1896 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1897 if (proto != IPPROTO_IPV6 &&
1898 proto != IPPROTO_IPIP &&
1899 proto != 0)
1900 return -EINVAL;
1901
1902 return 0;
1903 }
1904
1905 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1906 struct __ip6_tnl_parm *parms)
1907 {
1908 memset(parms, 0, sizeof(*parms));
1909
1910 if (!data)
1911 return;
1912
1913 if (data[IFLA_IPTUN_LINK])
1914 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1915
1916 if (data[IFLA_IPTUN_LOCAL])
1917 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1918
1919 if (data[IFLA_IPTUN_REMOTE])
1920 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1921
1922 if (data[IFLA_IPTUN_TTL])
1923 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1924
1925 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1926 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1927
1928 if (data[IFLA_IPTUN_FLOWINFO])
1929 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1930
1931 if (data[IFLA_IPTUN_FLAGS])
1932 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1933
1934 if (data[IFLA_IPTUN_PROTO])
1935 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1936
1937 if (data[IFLA_IPTUN_COLLECT_METADATA])
1938 parms->collect_md = true;
1939
1940 if (data[IFLA_IPTUN_FWMARK])
1941 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1942 }
1943
1944 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1945 struct ip_tunnel_encap *ipencap)
1946 {
1947 bool ret = false;
1948
1949 memset(ipencap, 0, sizeof(*ipencap));
1950
1951 if (!data)
1952 return ret;
1953
1954 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1955 ret = true;
1956 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1957 }
1958
1959 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1960 ret = true;
1961 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1962 }
1963
1964 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1965 ret = true;
1966 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1967 }
1968
1969 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1970 ret = true;
1971 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1972 }
1973
1974 return ret;
1975 }
1976
1977 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1978 struct nlattr *tb[], struct nlattr *data[],
1979 struct netlink_ext_ack *extack)
1980 {
1981 struct net *net = dev_net(dev);
1982 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1983 struct ip6_tnl *nt, *t;
1984 struct ip_tunnel_encap ipencap;
1985
1986 nt = netdev_priv(dev);
1987
1988 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1989 int err = ip6_tnl_encap_setup(nt, &ipencap);
1990
1991 if (err < 0)
1992 return err;
1993 }
1994
1995 ip6_tnl_netlink_parms(data, &nt->parms);
1996
1997 if (nt->parms.collect_md) {
1998 if (rtnl_dereference(ip6n->collect_md_tun))
1999 return -EEXIST;
2000 } else {
2001 t = ip6_tnl_locate(net, &nt->parms, 0);
2002 if (!IS_ERR(t))
2003 return -EEXIST;
2004 }
2005
2006 return ip6_tnl_create2(dev);
2007 }
2008
2009 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2010 struct nlattr *data[],
2011 struct netlink_ext_ack *extack)
2012 {
2013 struct ip6_tnl *t = netdev_priv(dev);
2014 struct __ip6_tnl_parm p;
2015 struct net *net = t->net;
2016 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2017 struct ip_tunnel_encap ipencap;
2018
2019 if (dev == ip6n->fb_tnl_dev)
2020 return -EINVAL;
2021
2022 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2023 int err = ip6_tnl_encap_setup(t, &ipencap);
2024
2025 if (err < 0)
2026 return err;
2027 }
2028 ip6_tnl_netlink_parms(data, &p);
2029 if (p.collect_md)
2030 return -EINVAL;
2031
2032 t = ip6_tnl_locate(net, &p, 0);
2033 if (!IS_ERR(t)) {
2034 if (t->dev != dev)
2035 return -EEXIST;
2036 } else
2037 t = netdev_priv(dev);
2038
2039 return ip6_tnl_update(t, &p);
2040 }
2041
2042 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2043 {
2044 struct net *net = dev_net(dev);
2045 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2046
2047 if (dev != ip6n->fb_tnl_dev)
2048 unregister_netdevice_queue(dev, head);
2049 }
2050
2051 static size_t ip6_tnl_get_size(const struct net_device *dev)
2052 {
2053 return
2054 /* IFLA_IPTUN_LINK */
2055 nla_total_size(4) +
2056 /* IFLA_IPTUN_LOCAL */
2057 nla_total_size(sizeof(struct in6_addr)) +
2058 /* IFLA_IPTUN_REMOTE */
2059 nla_total_size(sizeof(struct in6_addr)) +
2060 /* IFLA_IPTUN_TTL */
2061 nla_total_size(1) +
2062 /* IFLA_IPTUN_ENCAP_LIMIT */
2063 nla_total_size(1) +
2064 /* IFLA_IPTUN_FLOWINFO */
2065 nla_total_size(4) +
2066 /* IFLA_IPTUN_FLAGS */
2067 nla_total_size(4) +
2068 /* IFLA_IPTUN_PROTO */
2069 nla_total_size(1) +
2070 /* IFLA_IPTUN_ENCAP_TYPE */
2071 nla_total_size(2) +
2072 /* IFLA_IPTUN_ENCAP_FLAGS */
2073 nla_total_size(2) +
2074 /* IFLA_IPTUN_ENCAP_SPORT */
2075 nla_total_size(2) +
2076 /* IFLA_IPTUN_ENCAP_DPORT */
2077 nla_total_size(2) +
2078 /* IFLA_IPTUN_COLLECT_METADATA */
2079 nla_total_size(0) +
2080 /* IFLA_IPTUN_FWMARK */
2081 nla_total_size(4) +
2082 0;
2083 }
2084
2085 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2086 {
2087 struct ip6_tnl *tunnel = netdev_priv(dev);
2088 struct __ip6_tnl_parm *parm = &tunnel->parms;
2089
2090 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2091 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2092 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2093 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2094 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2095 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2096 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2097 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2098 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2099 goto nla_put_failure;
2100
2101 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2102 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2103 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2104 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2105 goto nla_put_failure;
2106
2107 if (parm->collect_md)
2108 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2109 goto nla_put_failure;
2110
2111 return 0;
2112
2113 nla_put_failure:
2114 return -EMSGSIZE;
2115 }
2116
2117 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2118 {
2119 struct ip6_tnl *tunnel = netdev_priv(dev);
2120
2121 return tunnel->net;
2122 }
2123 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2124
2125 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2126 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2127 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2128 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2129 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2130 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2131 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2132 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2133 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2134 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2135 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2136 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2137 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2138 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2139 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
2140 };
2141
2142 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2143 .kind = "ip6tnl",
2144 .maxtype = IFLA_IPTUN_MAX,
2145 .policy = ip6_tnl_policy,
2146 .priv_size = sizeof(struct ip6_tnl),
2147 .setup = ip6_tnl_dev_setup,
2148 .validate = ip6_tnl_validate,
2149 .newlink = ip6_tnl_newlink,
2150 .changelink = ip6_tnl_changelink,
2151 .dellink = ip6_tnl_dellink,
2152 .get_size = ip6_tnl_get_size,
2153 .fill_info = ip6_tnl_fill_info,
2154 .get_link_net = ip6_tnl_get_link_net,
2155 };
2156
2157 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2158 .handler = ip4ip6_rcv,
2159 .err_handler = ip4ip6_err,
2160 .priority = 1,
2161 };
2162
2163 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2164 .handler = ip6ip6_rcv,
2165 .err_handler = ip6ip6_err,
2166 .priority = 1,
2167 };
2168
2169 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2170 {
2171 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2172 struct net_device *dev, *aux;
2173 int h;
2174 struct ip6_tnl *t;
2175
2176 for_each_netdev_safe(net, dev, aux)
2177 if (dev->rtnl_link_ops == &ip6_link_ops)
2178 unregister_netdevice_queue(dev, list);
2179
2180 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2181 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2182 while (t) {
2183 /* If dev is in the same netns, it has already
2184 * been added to the list by the previous loop.
2185 */
2186 if (!net_eq(dev_net(t->dev), net))
2187 unregister_netdevice_queue(t->dev, list);
2188 t = rtnl_dereference(t->next);
2189 }
2190 }
2191 }
2192
2193 static int __net_init ip6_tnl_init_net(struct net *net)
2194 {
2195 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2196 struct ip6_tnl *t = NULL;
2197 int err;
2198
2199 ip6n->tnls[0] = ip6n->tnls_wc;
2200 ip6n->tnls[1] = ip6n->tnls_r_l;
2201
2202 err = -ENOMEM;
2203 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2204 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2205
2206 if (!ip6n->fb_tnl_dev)
2207 goto err_alloc_dev;
2208 dev_net_set(ip6n->fb_tnl_dev, net);
2209 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2210 /* FB netdevice is special: we have one, and only one per netns.
2211 * Allowing to move it to another netns is clearly unsafe.
2212 */
2213 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2214
2215 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2216 if (err < 0)
2217 goto err_register;
2218
2219 err = register_netdev(ip6n->fb_tnl_dev);
2220 if (err < 0)
2221 goto err_register;
2222
2223 t = netdev_priv(ip6n->fb_tnl_dev);
2224
2225 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2226 return 0;
2227
2228 err_register:
2229 free_netdev(ip6n->fb_tnl_dev);
2230 err_alloc_dev:
2231 return err;
2232 }
2233
2234 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2235 {
2236 struct net *net;
2237 LIST_HEAD(list);
2238
2239 rtnl_lock();
2240 list_for_each_entry(net, net_list, exit_list)
2241 ip6_tnl_destroy_tunnels(net, &list);
2242 unregister_netdevice_many(&list);
2243 rtnl_unlock();
2244 }
2245
2246 static struct pernet_operations ip6_tnl_net_ops = {
2247 .init = ip6_tnl_init_net,
2248 .exit_batch = ip6_tnl_exit_batch_net,
2249 .id = &ip6_tnl_net_id,
2250 .size = sizeof(struct ip6_tnl_net),
2251 };
2252
2253 /**
2254 * ip6_tunnel_init - register protocol and reserve needed resources
2255 *
2256 * Return: 0 on success
2257 **/
2258
2259 static int __init ip6_tunnel_init(void)
2260 {
2261 int err;
2262
2263 if (!ipv6_mod_enabled())
2264 return -EOPNOTSUPP;
2265
2266 err = register_pernet_device(&ip6_tnl_net_ops);
2267 if (err < 0)
2268 goto out_pernet;
2269
2270 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2271 if (err < 0) {
2272 pr_err("%s: can't register ip4ip6\n", __func__);
2273 goto out_ip4ip6;
2274 }
2275
2276 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2277 if (err < 0) {
2278 pr_err("%s: can't register ip6ip6\n", __func__);
2279 goto out_ip6ip6;
2280 }
2281 err = rtnl_link_register(&ip6_link_ops);
2282 if (err < 0)
2283 goto rtnl_link_failed;
2284
2285 return 0;
2286
2287 rtnl_link_failed:
2288 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2289 out_ip6ip6:
2290 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2291 out_ip4ip6:
2292 unregister_pernet_device(&ip6_tnl_net_ops);
2293 out_pernet:
2294 return err;
2295 }
2296
2297 /**
2298 * ip6_tunnel_cleanup - free resources and unregister protocol
2299 **/
2300
2301 static void __exit ip6_tunnel_cleanup(void)
2302 {
2303 rtnl_link_unregister(&ip6_link_ops);
2304 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2305 pr_info("%s: can't deregister ip4ip6\n", __func__);
2306
2307 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2308 pr_info("%s: can't deregister ip6ip6\n", __func__);
2309
2310 unregister_pernet_device(&ip6_tnl_net_ops);
2311 }
2312
2313 module_init(ip6_tunnel_init);
2314 module_exit(ip6_tunnel_cleanup);