]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/ip6_tunnel.c
ip6_tunnel: Match to ARPHRD_TUNNEL6 for dev type
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / ip6_tunnel.c
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 *
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11 *
12 * RFC 2473
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
47
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78
79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85
86 static unsigned int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 /* the IPv6 tunnel fallback device */
89 struct net_device *fb_tnl_dev;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 struct ip6_tnl __rcu *tnls_wc[1];
93 struct ip6_tnl __rcu **tnls[2];
94 struct ip6_tnl __rcu *collect_md_tun;
95 };
96
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 struct pcpu_sw_netstats tmp, sum = { 0 };
100 int i;
101
102 for_each_possible_cpu(i) {
103 unsigned int start;
104 const struct pcpu_sw_netstats *tstats =
105 per_cpu_ptr(dev->tstats, i);
106
107 do {
108 start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 tmp.rx_packets = tstats->rx_packets;
110 tmp.rx_bytes = tstats->rx_bytes;
111 tmp.tx_packets = tstats->tx_packets;
112 tmp.tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114
115 sum.rx_packets += tmp.rx_packets;
116 sum.rx_bytes += tmp.rx_bytes;
117 sum.tx_packets += tmp.tx_packets;
118 sum.tx_bytes += tmp.tx_bytes;
119 }
120 dev->stats.rx_packets = sum.rx_packets;
121 dev->stats.rx_bytes = sum.rx_bytes;
122 dev->stats.tx_packets = sum.tx_packets;
123 dev->stats.tx_bytes = sum.tx_bytes;
124 return &dev->stats;
125 }
126
127 /**
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
131 *
132 * Return:
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
135 * else %NULL
136 **/
137
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 unsigned int hash = HASH(remote, local);
145 struct ip6_tnl *t;
146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 struct in6_addr any;
148
149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 ipv6_addr_equal(remote, &t->parms.raddr) &&
152 (t->dev->flags & IFF_UP))
153 return t;
154 }
155
156 memset(&any, 0, sizeof(any));
157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
161 (t->dev->flags & IFF_UP))
162 return t;
163 }
164
165 hash = HASH(remote, &any);
166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
169 (t->dev->flags & IFF_UP))
170 return t;
171 }
172
173 t = rcu_dereference(ip6n->collect_md_tun);
174 if (t && t->dev->flags & IFF_UP)
175 return t;
176
177 t = rcu_dereference(ip6n->tnls_wc[0]);
178 if (t && (t->dev->flags & IFF_UP))
179 return t;
180
181 return NULL;
182 }
183
184 /**
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
187 *
188 * Description:
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
191 *
192 * Return: head of IPv6 tunnel list
193 **/
194
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
197 {
198 const struct in6_addr *remote = &p->raddr;
199 const struct in6_addr *local = &p->laddr;
200 unsigned int h = 0;
201 int prio = 0;
202
203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
204 prio = 1;
205 h = HASH(remote, local);
206 }
207 return &ip6n->tnls[prio][h];
208 }
209
210 /**
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
213 **/
214
215 static void
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
217 {
218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
219
220 if (t->parms.collect_md)
221 rcu_assign_pointer(ip6n->collect_md_tun, t);
222 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 rcu_assign_pointer(*tp, t);
224 }
225
226 /**
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
229 **/
230
231 static void
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
233 {
234 struct ip6_tnl __rcu **tp;
235 struct ip6_tnl *iter;
236
237 if (t->parms.collect_md)
238 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
239
240 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 (iter = rtnl_dereference(*tp)) != NULL;
242 tp = &iter->next) {
243 if (t == iter) {
244 rcu_assign_pointer(*tp, t->next);
245 break;
246 }
247 }
248 }
249
250 static void ip6_dev_free(struct net_device *dev)
251 {
252 struct ip6_tnl *t = netdev_priv(dev);
253
254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats);
257 }
258
259 static int ip6_tnl_create2(struct net_device *dev)
260 {
261 struct ip6_tnl *t = netdev_priv(dev);
262 struct net *net = dev_net(dev);
263 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
264 int err;
265
266 t = netdev_priv(dev);
267
268 dev->rtnl_link_ops = &ip6_link_ops;
269 err = register_netdevice(dev);
270 if (err < 0)
271 goto out;
272
273 strcpy(t->parms.name, dev->name);
274
275 dev_hold(dev);
276 ip6_tnl_link(ip6n, t);
277 return 0;
278
279 out:
280 return err;
281 }
282
283 /**
284 * ip6_tnl_create - create a new tunnel
285 * @p: tunnel parameters
286 * @pt: pointer to new tunnel
287 *
288 * Description:
289 * Create tunnel matching given parameters.
290 *
291 * Return:
292 * created tunnel or error pointer
293 **/
294
295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
296 {
297 struct net_device *dev;
298 struct ip6_tnl *t;
299 char name[IFNAMSIZ];
300 int err = -E2BIG;
301
302 if (p->name[0]) {
303 if (!dev_valid_name(p->name))
304 goto failed;
305 strlcpy(name, p->name, IFNAMSIZ);
306 } else {
307 sprintf(name, "ip6tnl%%d");
308 }
309 err = -ENOMEM;
310 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
311 ip6_tnl_dev_setup);
312 if (!dev)
313 goto failed;
314
315 dev_net_set(dev, net);
316
317 t = netdev_priv(dev);
318 t->parms = *p;
319 t->net = dev_net(dev);
320 err = ip6_tnl_create2(dev);
321 if (err < 0)
322 goto failed_free;
323
324 return t;
325
326 failed_free:
327 free_netdev(dev);
328 failed:
329 return ERR_PTR(err);
330 }
331
332 /**
333 * ip6_tnl_locate - find or create tunnel matching given parameters
334 * @p: tunnel parameters
335 * @create: != 0 if allowed to create new tunnel if no match found
336 *
337 * Description:
338 * ip6_tnl_locate() first tries to locate an existing tunnel
339 * based on @parms. If this is unsuccessful, but @create is set a new
340 * tunnel device is created and registered for use.
341 *
342 * Return:
343 * matching tunnel or error pointer
344 **/
345
346 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
347 struct __ip6_tnl_parm *p, int create)
348 {
349 const struct in6_addr *remote = &p->raddr;
350 const struct in6_addr *local = &p->laddr;
351 struct ip6_tnl __rcu **tp;
352 struct ip6_tnl *t;
353 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
354
355 for (tp = ip6_tnl_bucket(ip6n, p);
356 (t = rtnl_dereference(*tp)) != NULL;
357 tp = &t->next) {
358 if (ipv6_addr_equal(local, &t->parms.laddr) &&
359 ipv6_addr_equal(remote, &t->parms.raddr)) {
360 if (create)
361 return ERR_PTR(-EEXIST);
362
363 return t;
364 }
365 }
366 if (!create)
367 return ERR_PTR(-ENODEV);
368 return ip6_tnl_create(net, p);
369 }
370
371 /**
372 * ip6_tnl_dev_uninit - tunnel device uninitializer
373 * @dev: the device to be destroyed
374 *
375 * Description:
376 * ip6_tnl_dev_uninit() removes tunnel from its list
377 **/
378
379 static void
380 ip6_tnl_dev_uninit(struct net_device *dev)
381 {
382 struct ip6_tnl *t = netdev_priv(dev);
383 struct net *net = t->net;
384 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
385
386 if (dev == ip6n->fb_tnl_dev)
387 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
388 else
389 ip6_tnl_unlink(ip6n, t);
390 dst_cache_reset(&t->dst_cache);
391 dev_put(dev);
392 }
393
394 /**
395 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
396 * @skb: received socket buffer
397 *
398 * Return:
399 * 0 if none was found,
400 * else index to encapsulation limit
401 **/
402
403 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
404 {
405 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
406 unsigned int nhoff = raw - skb->data;
407 unsigned int off = nhoff + sizeof(*ipv6h);
408 u8 next, nexthdr = ipv6h->nexthdr;
409
410 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
411 struct ipv6_opt_hdr *hdr;
412 u16 optlen;
413
414 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
415 break;
416
417 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
418 if (nexthdr == NEXTHDR_FRAGMENT) {
419 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
420 if (frag_hdr->frag_off)
421 break;
422 optlen = 8;
423 } else if (nexthdr == NEXTHDR_AUTH) {
424 optlen = (hdr->hdrlen + 2) << 2;
425 } else {
426 optlen = ipv6_optlen(hdr);
427 }
428 /* cache hdr->nexthdr, since pskb_may_pull() might
429 * invalidate hdr
430 */
431 next = hdr->nexthdr;
432 if (nexthdr == NEXTHDR_DEST) {
433 u16 i = 2;
434
435 /* Remember : hdr is no longer valid at this point. */
436 if (!pskb_may_pull(skb, off + optlen))
437 break;
438
439 while (1) {
440 struct ipv6_tlv_tnl_enc_lim *tel;
441
442 /* No more room for encapsulation limit */
443 if (i + sizeof(*tel) > optlen)
444 break;
445
446 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
447 /* return index of option if found and valid */
448 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
449 tel->length == 1)
450 return i + off - nhoff;
451 /* else jump to next option */
452 if (tel->type)
453 i += tel->length + 2;
454 else
455 i++;
456 }
457 }
458 nexthdr = next;
459 off += optlen;
460 }
461 return 0;
462 }
463 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
464
465 /**
466 * ip6_tnl_err - tunnel error handler
467 *
468 * Description:
469 * ip6_tnl_err() should handle errors in the tunnel according
470 * to the specifications in RFC 2473.
471 **/
472
473 static int
474 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
475 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
476 {
477 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
478 struct net *net = dev_net(skb->dev);
479 u8 rel_type = ICMPV6_DEST_UNREACH;
480 u8 rel_code = ICMPV6_ADDR_UNREACH;
481 __u32 rel_info = 0;
482 struct ip6_tnl *t;
483 int err = -ENOENT;
484 int rel_msg = 0;
485 u8 tproto;
486 __u16 len;
487
488 /* If the packet doesn't contain the original IPv6 header we are
489 in trouble since we might need the source address for further
490 processing of the error. */
491
492 rcu_read_lock();
493 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
494 if (!t)
495 goto out;
496
497 tproto = READ_ONCE(t->parms.proto);
498 if (tproto != ipproto && tproto != 0)
499 goto out;
500
501 err = 0;
502
503 switch (*type) {
504 struct ipv6_tlv_tnl_enc_lim *tel;
505 __u32 mtu, teli;
506 case ICMPV6_DEST_UNREACH:
507 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
508 t->parms.name);
509 rel_msg = 1;
510 break;
511 case ICMPV6_TIME_EXCEED:
512 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
513 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
514 t->parms.name);
515 rel_msg = 1;
516 }
517 break;
518 case ICMPV6_PARAMPROB:
519 teli = 0;
520 if ((*code) == ICMPV6_HDR_FIELD)
521 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
522
523 if (teli && teli == *info - 2) {
524 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
525 if (tel->encap_limit == 0) {
526 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
527 t->parms.name);
528 rel_msg = 1;
529 }
530 } else {
531 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
532 t->parms.name);
533 }
534 break;
535 case ICMPV6_PKT_TOOBIG:
536 ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
537 sock_net_uid(net, NULL));
538 mtu = *info - offset;
539 if (mtu < IPV6_MIN_MTU)
540 mtu = IPV6_MIN_MTU;
541 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
542 if (len > mtu) {
543 rel_type = ICMPV6_PKT_TOOBIG;
544 rel_code = 0;
545 rel_info = mtu;
546 rel_msg = 1;
547 }
548 break;
549 case NDISC_REDIRECT:
550 ip6_redirect(skb, net, skb->dev->ifindex, 0,
551 sock_net_uid(net, NULL));
552 break;
553 }
554
555 *type = rel_type;
556 *code = rel_code;
557 *info = rel_info;
558 *msg = rel_msg;
559
560 out:
561 rcu_read_unlock();
562 return err;
563 }
564
565 static int
566 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
567 u8 type, u8 code, int offset, __be32 info)
568 {
569 __u32 rel_info = ntohl(info);
570 const struct iphdr *eiph;
571 struct sk_buff *skb2;
572 int err, rel_msg = 0;
573 u8 rel_type = type;
574 u8 rel_code = code;
575 struct rtable *rt;
576 struct flowi4 fl4;
577
578 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
579 &rel_msg, &rel_info, offset);
580 if (err < 0)
581 return err;
582
583 if (rel_msg == 0)
584 return 0;
585
586 switch (rel_type) {
587 case ICMPV6_DEST_UNREACH:
588 if (rel_code != ICMPV6_ADDR_UNREACH)
589 return 0;
590 rel_type = ICMP_DEST_UNREACH;
591 rel_code = ICMP_HOST_UNREACH;
592 break;
593 case ICMPV6_PKT_TOOBIG:
594 if (rel_code != 0)
595 return 0;
596 rel_type = ICMP_DEST_UNREACH;
597 rel_code = ICMP_FRAG_NEEDED;
598 break;
599 default:
600 return 0;
601 }
602
603 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
604 return 0;
605
606 skb2 = skb_clone(skb, GFP_ATOMIC);
607 if (!skb2)
608 return 0;
609
610 skb_dst_drop(skb2);
611
612 skb_pull(skb2, offset);
613 skb_reset_network_header(skb2);
614 eiph = ip_hdr(skb2);
615
616 /* Try to guess incoming interface */
617 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
618 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
619 if (IS_ERR(rt))
620 goto out;
621
622 skb2->dev = rt->dst.dev;
623 ip_rt_put(rt);
624
625 /* route "incoming" packet */
626 if (rt->rt_flags & RTCF_LOCAL) {
627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
628 eiph->daddr, eiph->saddr, 0, 0,
629 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
630 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
631 if (!IS_ERR(rt))
632 ip_rt_put(rt);
633 goto out;
634 }
635 skb_dst_set(skb2, &rt->dst);
636 } else {
637 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
638 skb2->dev) ||
639 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
640 goto out;
641 }
642
643 /* change mtu on this route */
644 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
645 if (rel_info > dst_mtu(skb_dst(skb2)))
646 goto out;
647
648 skb_dst_update_pmtu(skb2, rel_info);
649 }
650
651 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
652
653 out:
654 kfree_skb(skb2);
655 return 0;
656 }
657
658 static int
659 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
660 u8 type, u8 code, int offset, __be32 info)
661 {
662 __u32 rel_info = ntohl(info);
663 int err, rel_msg = 0;
664 u8 rel_type = type;
665 u8 rel_code = code;
666
667 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
668 &rel_msg, &rel_info, offset);
669 if (err < 0)
670 return err;
671
672 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
673 struct rt6_info *rt;
674 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
675
676 if (!skb2)
677 return 0;
678
679 skb_dst_drop(skb2);
680 skb_pull(skb2, offset);
681 skb_reset_network_header(skb2);
682
683 /* Try to guess incoming interface */
684 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
685 NULL, 0, 0);
686
687 if (rt && rt->dst.dev)
688 skb2->dev = rt->dst.dev;
689
690 icmpv6_send(skb2, rel_type, rel_code, rel_info);
691
692 ip6_rt_put(rt);
693
694 kfree_skb(skb2);
695 }
696
697 return 0;
698 }
699
700 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
701 const struct ipv6hdr *ipv6h,
702 struct sk_buff *skb)
703 {
704 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
705
706 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
707 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
708
709 return IP6_ECN_decapsulate(ipv6h, skb);
710 }
711
712 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
713 const struct ipv6hdr *ipv6h,
714 struct sk_buff *skb)
715 {
716 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
717 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
718
719 return IP6_ECN_decapsulate(ipv6h, skb);
720 }
721
722 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
723 const struct in6_addr *laddr,
724 const struct in6_addr *raddr)
725 {
726 struct __ip6_tnl_parm *p = &t->parms;
727 int ltype = ipv6_addr_type(laddr);
728 int rtype = ipv6_addr_type(raddr);
729 __u32 flags = 0;
730
731 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
732 flags = IP6_TNL_F_CAP_PER_PACKET;
733 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
734 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
735 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
736 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
737 if (ltype&IPV6_ADDR_UNICAST)
738 flags |= IP6_TNL_F_CAP_XMIT;
739 if (rtype&IPV6_ADDR_UNICAST)
740 flags |= IP6_TNL_F_CAP_RCV;
741 }
742 return flags;
743 }
744 EXPORT_SYMBOL(ip6_tnl_get_cap);
745
746 /* called with rcu_read_lock() */
747 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
748 const struct in6_addr *laddr,
749 const struct in6_addr *raddr)
750 {
751 struct __ip6_tnl_parm *p = &t->parms;
752 int ret = 0;
753 struct net *net = t->net;
754
755 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
756 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
757 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
758 struct net_device *ldev = NULL;
759
760 if (p->link)
761 ldev = dev_get_by_index_rcu(net, p->link);
762
763 if ((ipv6_addr_is_multicast(laddr) ||
764 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
765 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
766 likely(!ipv6_chk_addr(net, raddr, NULL, 0))))
767 ret = 1;
768 }
769 return ret;
770 }
771 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
772
773 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
774 const struct tnl_ptk_info *tpi,
775 struct metadata_dst *tun_dst,
776 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
777 const struct ipv6hdr *ipv6h,
778 struct sk_buff *skb),
779 bool log_ecn_err)
780 {
781 struct pcpu_sw_netstats *tstats;
782 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
783 int err;
784
785 if ((!(tpi->flags & TUNNEL_CSUM) &&
786 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
787 ((tpi->flags & TUNNEL_CSUM) &&
788 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
789 tunnel->dev->stats.rx_crc_errors++;
790 tunnel->dev->stats.rx_errors++;
791 goto drop;
792 }
793
794 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
795 if (!(tpi->flags & TUNNEL_SEQ) ||
796 (tunnel->i_seqno &&
797 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
798 tunnel->dev->stats.rx_fifo_errors++;
799 tunnel->dev->stats.rx_errors++;
800 goto drop;
801 }
802 tunnel->i_seqno = ntohl(tpi->seq) + 1;
803 }
804
805 skb->protocol = tpi->proto;
806
807 /* Warning: All skb pointers will be invalidated! */
808 if (tunnel->dev->type == ARPHRD_ETHER) {
809 if (!pskb_may_pull(skb, ETH_HLEN)) {
810 tunnel->dev->stats.rx_length_errors++;
811 tunnel->dev->stats.rx_errors++;
812 goto drop;
813 }
814
815 ipv6h = ipv6_hdr(skb);
816 skb->protocol = eth_type_trans(skb, tunnel->dev);
817 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
818 } else {
819 skb->dev = tunnel->dev;
820 }
821
822 skb_reset_network_header(skb);
823 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
824
825 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
826
827 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
828 if (unlikely(err)) {
829 if (log_ecn_err)
830 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
831 &ipv6h->saddr,
832 ipv6_get_dsfield(ipv6h));
833 if (err > 1) {
834 ++tunnel->dev->stats.rx_frame_errors;
835 ++tunnel->dev->stats.rx_errors;
836 goto drop;
837 }
838 }
839
840 tstats = this_cpu_ptr(tunnel->dev->tstats);
841 u64_stats_update_begin(&tstats->syncp);
842 tstats->rx_packets++;
843 tstats->rx_bytes += skb->len;
844 u64_stats_update_end(&tstats->syncp);
845
846 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
847
848 if (tun_dst)
849 skb_dst_set(skb, (struct dst_entry *)tun_dst);
850
851 gro_cells_receive(&tunnel->gro_cells, skb);
852 return 0;
853
854 drop:
855 if (tun_dst)
856 dst_release((struct dst_entry *)tun_dst);
857 kfree_skb(skb);
858 return 0;
859 }
860
861 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
862 const struct tnl_ptk_info *tpi,
863 struct metadata_dst *tun_dst,
864 bool log_ecn_err)
865 {
866 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
867 log_ecn_err);
868 }
869 EXPORT_SYMBOL(ip6_tnl_rcv);
870
871 static const struct tnl_ptk_info tpi_v6 = {
872 /* no tunnel info required for ipxip6. */
873 .proto = htons(ETH_P_IPV6),
874 };
875
876 static const struct tnl_ptk_info tpi_v4 = {
877 /* no tunnel info required for ipxip6. */
878 .proto = htons(ETH_P_IP),
879 };
880
881 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
882 const struct tnl_ptk_info *tpi,
883 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
884 const struct ipv6hdr *ipv6h,
885 struct sk_buff *skb))
886 {
887 struct ip6_tnl *t;
888 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
889 struct metadata_dst *tun_dst = NULL;
890 int ret = -1;
891
892 rcu_read_lock();
893 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
894
895 if (t) {
896 u8 tproto = READ_ONCE(t->parms.proto);
897
898 if (tproto != ipproto && tproto != 0)
899 goto drop;
900 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
901 goto drop;
902 ipv6h = ipv6_hdr(skb);
903 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
904 goto drop;
905 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
906 goto drop;
907 if (t->parms.collect_md) {
908 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
909 if (!tun_dst)
910 goto drop;
911 }
912 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
913 log_ecn_error);
914 }
915
916 rcu_read_unlock();
917
918 return ret;
919
920 drop:
921 rcu_read_unlock();
922 kfree_skb(skb);
923 return 0;
924 }
925
926 static int ip4ip6_rcv(struct sk_buff *skb)
927 {
928 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
929 ip4ip6_dscp_ecn_decapsulate);
930 }
931
932 static int ip6ip6_rcv(struct sk_buff *skb)
933 {
934 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
935 ip6ip6_dscp_ecn_decapsulate);
936 }
937
938 struct ipv6_tel_txoption {
939 struct ipv6_txoptions ops;
940 __u8 dst_opt[8];
941 };
942
943 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
944 {
945 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
946
947 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
948 opt->dst_opt[3] = 1;
949 opt->dst_opt[4] = encap_limit;
950 opt->dst_opt[5] = IPV6_TLV_PADN;
951 opt->dst_opt[6] = 1;
952
953 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
954 opt->ops.opt_nflen = 8;
955 }
956
957 /**
958 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
959 * @t: the outgoing tunnel device
960 * @hdr: IPv6 header from the incoming packet
961 *
962 * Description:
963 * Avoid trivial tunneling loop by checking that tunnel exit-point
964 * doesn't match source of incoming packet.
965 *
966 * Return:
967 * 1 if conflict,
968 * 0 else
969 **/
970
971 static inline bool
972 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
973 {
974 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
975 }
976
977 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
978 const struct in6_addr *laddr,
979 const struct in6_addr *raddr)
980 {
981 struct __ip6_tnl_parm *p = &t->parms;
982 int ret = 0;
983 struct net *net = t->net;
984
985 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
986 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
987 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
988 struct net_device *ldev = NULL;
989
990 rcu_read_lock();
991 if (p->link)
992 ldev = dev_get_by_index_rcu(net, p->link);
993
994 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
995 pr_warn("%s xmit: Local address not yet configured!\n",
996 p->name);
997 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
998 !ipv6_addr_is_multicast(raddr) &&
999 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
1000 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1001 p->name);
1002 else
1003 ret = 1;
1004 rcu_read_unlock();
1005 }
1006 return ret;
1007 }
1008 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1009
1010 /**
1011 * ip6_tnl_xmit - encapsulate packet and send
1012 * @skb: the outgoing socket buffer
1013 * @dev: the outgoing tunnel device
1014 * @dsfield: dscp code for outer header
1015 * @fl6: flow of tunneled packet
1016 * @encap_limit: encapsulation limit
1017 * @pmtu: Path MTU is stored if packet is too big
1018 * @proto: next header value
1019 *
1020 * Description:
1021 * Build new header and do some sanity checks on the packet before sending
1022 * it.
1023 *
1024 * Return:
1025 * 0 on success
1026 * -1 fail
1027 * %-EMSGSIZE message too big. return mtu in this case.
1028 **/
1029
1030 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1031 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1032 __u8 proto)
1033 {
1034 struct ip6_tnl *t = netdev_priv(dev);
1035 struct net *net = t->net;
1036 struct net_device_stats *stats = &t->dev->stats;
1037 struct ipv6hdr *ipv6h;
1038 struct ipv6_tel_txoption opt;
1039 struct dst_entry *dst = NULL, *ndst = NULL;
1040 struct net_device *tdev;
1041 int mtu;
1042 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1043 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1044 unsigned int max_headroom = psh_hlen;
1045 bool use_cache = false;
1046 u8 hop_limit;
1047 int err = -1;
1048
1049 if (t->parms.collect_md) {
1050 hop_limit = skb_tunnel_info(skb)->key.ttl;
1051 goto route_lookup;
1052 } else {
1053 hop_limit = t->parms.hop_limit;
1054 }
1055
1056 /* NBMA tunnel */
1057 if (ipv6_addr_any(&t->parms.raddr)) {
1058 if (skb->protocol == htons(ETH_P_IPV6)) {
1059 struct in6_addr *addr6;
1060 struct neighbour *neigh;
1061 int addr_type;
1062
1063 if (!skb_dst(skb))
1064 goto tx_err_link_failure;
1065
1066 neigh = dst_neigh_lookup(skb_dst(skb),
1067 &ipv6_hdr(skb)->daddr);
1068 if (!neigh)
1069 goto tx_err_link_failure;
1070
1071 addr6 = (struct in6_addr *)&neigh->primary_key;
1072 addr_type = ipv6_addr_type(addr6);
1073
1074 if (addr_type == IPV6_ADDR_ANY)
1075 addr6 = &ipv6_hdr(skb)->daddr;
1076
1077 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1078 neigh_release(neigh);
1079 }
1080 } else if (t->parms.proto != 0 && !(t->parms.flags &
1081 (IP6_TNL_F_USE_ORIG_TCLASS |
1082 IP6_TNL_F_USE_ORIG_FWMARK))) {
1083 /* enable the cache only if neither the outer protocol nor the
1084 * routing decision depends on the current inner header value
1085 */
1086 use_cache = true;
1087 }
1088
1089 if (use_cache)
1090 dst = dst_cache_get(&t->dst_cache);
1091
1092 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1093 goto tx_err_link_failure;
1094
1095 if (!dst) {
1096 route_lookup:
1097 /* add dsfield to flowlabel for route lookup */
1098 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1099
1100 dst = ip6_route_output(net, NULL, fl6);
1101
1102 if (dst->error)
1103 goto tx_err_link_failure;
1104 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1105 if (IS_ERR(dst)) {
1106 err = PTR_ERR(dst);
1107 dst = NULL;
1108 goto tx_err_link_failure;
1109 }
1110 if (t->parms.collect_md &&
1111 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1112 &fl6->daddr, 0, &fl6->saddr))
1113 goto tx_err_link_failure;
1114 ndst = dst;
1115 }
1116
1117 tdev = dst->dev;
1118
1119 if (tdev == dev) {
1120 stats->collisions++;
1121 net_warn_ratelimited("%s: Local routing loop detected!\n",
1122 t->parms.name);
1123 goto tx_err_dst_release;
1124 }
1125 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1126 if (encap_limit >= 0) {
1127 max_headroom += 8;
1128 mtu -= 8;
1129 }
1130 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1131 IPV6_MIN_MTU : IPV4_MIN_MTU);
1132
1133 skb_dst_update_pmtu(skb, mtu);
1134 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1135 *pmtu = mtu;
1136 err = -EMSGSIZE;
1137 goto tx_err_dst_release;
1138 }
1139
1140 if (t->err_count > 0) {
1141 if (time_before(jiffies,
1142 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1143 t->err_count--;
1144
1145 dst_link_failure(skb);
1146 } else {
1147 t->err_count = 0;
1148 }
1149 }
1150
1151 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1152
1153 /*
1154 * Okay, now see if we can stuff it in the buffer as-is.
1155 */
1156 max_headroom += LL_RESERVED_SPACE(tdev);
1157
1158 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1159 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1160 struct sk_buff *new_skb;
1161
1162 new_skb = skb_realloc_headroom(skb, max_headroom);
1163 if (!new_skb)
1164 goto tx_err_dst_release;
1165
1166 if (skb->sk)
1167 skb_set_owner_w(new_skb, skb->sk);
1168 consume_skb(skb);
1169 skb = new_skb;
1170 }
1171
1172 if (t->parms.collect_md) {
1173 if (t->encap.type != TUNNEL_ENCAP_NONE)
1174 goto tx_err_dst_release;
1175 } else {
1176 if (use_cache && ndst)
1177 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1178 }
1179 skb_dst_set(skb, dst);
1180
1181 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
1182
1183 /* Calculate max headroom for all the headers and adjust
1184 * needed_headroom if necessary.
1185 */
1186 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1187 + dst->header_len + t->hlen;
1188 if (max_headroom > dev->needed_headroom)
1189 dev->needed_headroom = max_headroom;
1190
1191 err = ip6_tnl_encap(skb, t, &proto, fl6);
1192 if (err)
1193 return err;
1194
1195 if (encap_limit >= 0) {
1196 init_tel_txopt(&opt, encap_limit);
1197 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1198 }
1199
1200 skb_push(skb, sizeof(struct ipv6hdr));
1201 skb_reset_network_header(skb);
1202 ipv6h = ipv6_hdr(skb);
1203 ip6_flow_hdr(ipv6h, dsfield,
1204 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1205 ipv6h->hop_limit = hop_limit;
1206 ipv6h->nexthdr = proto;
1207 ipv6h->saddr = fl6->saddr;
1208 ipv6h->daddr = fl6->daddr;
1209 ip6tunnel_xmit(NULL, skb, dev);
1210 return 0;
1211 tx_err_link_failure:
1212 stats->tx_carrier_errors++;
1213 dst_link_failure(skb);
1214 tx_err_dst_release:
1215 dst_release(dst);
1216 return err;
1217 }
1218 EXPORT_SYMBOL(ip6_tnl_xmit);
1219
1220 static inline int
1221 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1222 {
1223 struct ip6_tnl *t = netdev_priv(dev);
1224 const struct iphdr *iph;
1225 int encap_limit = -1;
1226 struct flowi6 fl6;
1227 __u8 dsfield;
1228 __u32 mtu;
1229 u8 tproto;
1230 int err;
1231
1232 iph = ip_hdr(skb);
1233 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1234
1235 tproto = READ_ONCE(t->parms.proto);
1236 if (tproto != IPPROTO_IPIP && tproto != 0)
1237 return -1;
1238
1239 if (t->parms.collect_md) {
1240 struct ip_tunnel_info *tun_info;
1241 const struct ip_tunnel_key *key;
1242
1243 tun_info = skb_tunnel_info(skb);
1244 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1245 ip_tunnel_info_af(tun_info) != AF_INET6))
1246 return -1;
1247 key = &tun_info->key;
1248 memset(&fl6, 0, sizeof(fl6));
1249 fl6.flowi6_proto = IPPROTO_IPIP;
1250 fl6.daddr = key->u.ipv6.dst;
1251 fl6.flowlabel = key->label;
1252 dsfield = key->tos;
1253 } else {
1254 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1255 encap_limit = t->parms.encap_limit;
1256
1257 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1258 fl6.flowi6_proto = IPPROTO_IPIP;
1259
1260 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1261 dsfield = ipv4_get_dsfield(iph);
1262 else
1263 dsfield = ip6_tclass(t->parms.flowinfo);
1264 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1265 fl6.flowi6_mark = skb->mark;
1266 else
1267 fl6.flowi6_mark = t->parms.fwmark;
1268 }
1269
1270 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1271
1272 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1273 return -1;
1274
1275 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1276
1277 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1278
1279 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1280 IPPROTO_IPIP);
1281 if (err != 0) {
1282 /* XXX: send ICMP error even if DF is not set. */
1283 if (err == -EMSGSIZE)
1284 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1285 htonl(mtu));
1286 return -1;
1287 }
1288
1289 return 0;
1290 }
1291
1292 static inline int
1293 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1294 {
1295 struct ip6_tnl *t = netdev_priv(dev);
1296 struct ipv6hdr *ipv6h;
1297 int encap_limit = -1;
1298 __u16 offset;
1299 struct flowi6 fl6;
1300 __u8 dsfield;
1301 __u32 mtu;
1302 u8 tproto;
1303 int err;
1304
1305 ipv6h = ipv6_hdr(skb);
1306 tproto = READ_ONCE(t->parms.proto);
1307 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1308 ip6_tnl_addr_conflict(t, ipv6h))
1309 return -1;
1310
1311 if (t->parms.collect_md) {
1312 struct ip_tunnel_info *tun_info;
1313 const struct ip_tunnel_key *key;
1314
1315 tun_info = skb_tunnel_info(skb);
1316 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1317 ip_tunnel_info_af(tun_info) != AF_INET6))
1318 return -1;
1319 key = &tun_info->key;
1320 memset(&fl6, 0, sizeof(fl6));
1321 fl6.flowi6_proto = IPPROTO_IPV6;
1322 fl6.daddr = key->u.ipv6.dst;
1323 fl6.flowlabel = key->label;
1324 dsfield = key->tos;
1325 } else {
1326 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1327 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1328 ipv6h = ipv6_hdr(skb);
1329 if (offset > 0) {
1330 struct ipv6_tlv_tnl_enc_lim *tel;
1331
1332 tel = (void *)&skb_network_header(skb)[offset];
1333 if (tel->encap_limit == 0) {
1334 icmpv6_send(skb, ICMPV6_PARAMPROB,
1335 ICMPV6_HDR_FIELD, offset + 2);
1336 return -1;
1337 }
1338 encap_limit = tel->encap_limit - 1;
1339 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1340 encap_limit = t->parms.encap_limit;
1341 }
1342
1343 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1344 fl6.flowi6_proto = IPPROTO_IPV6;
1345
1346 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1347 dsfield = ipv6_get_dsfield(ipv6h);
1348 else
1349 dsfield = ip6_tclass(t->parms.flowinfo);
1350 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1351 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1352 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1353 fl6.flowi6_mark = skb->mark;
1354 else
1355 fl6.flowi6_mark = t->parms.fwmark;
1356 }
1357
1358 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1359
1360 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1361 return -1;
1362
1363 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1364
1365 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1366
1367 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1368 IPPROTO_IPV6);
1369 if (err != 0) {
1370 if (err == -EMSGSIZE)
1371 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1372 return -1;
1373 }
1374
1375 return 0;
1376 }
1377
1378 static netdev_tx_t
1379 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1380 {
1381 struct ip6_tnl *t = netdev_priv(dev);
1382 struct net_device_stats *stats = &t->dev->stats;
1383 int ret;
1384
1385 if (!pskb_inet_may_pull(skb))
1386 goto tx_err;
1387
1388 switch (skb->protocol) {
1389 case htons(ETH_P_IP):
1390 ret = ip4ip6_tnl_xmit(skb, dev);
1391 break;
1392 case htons(ETH_P_IPV6):
1393 ret = ip6ip6_tnl_xmit(skb, dev);
1394 break;
1395 default:
1396 goto tx_err;
1397 }
1398
1399 if (ret < 0)
1400 goto tx_err;
1401
1402 return NETDEV_TX_OK;
1403
1404 tx_err:
1405 stats->tx_errors++;
1406 stats->tx_dropped++;
1407 kfree_skb(skb);
1408 return NETDEV_TX_OK;
1409 }
1410
1411 static void ip6_tnl_link_config(struct ip6_tnl *t)
1412 {
1413 struct net_device *dev = t->dev;
1414 struct __ip6_tnl_parm *p = &t->parms;
1415 struct flowi6 *fl6 = &t->fl.u.ip6;
1416 int t_hlen;
1417
1418 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1419 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1420
1421 /* Set up flowi template */
1422 fl6->saddr = p->laddr;
1423 fl6->daddr = p->raddr;
1424 fl6->flowi6_oif = p->link;
1425 fl6->flowlabel = 0;
1426
1427 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1428 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1429 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1430 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1431
1432 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1433 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1434
1435 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1436 dev->flags |= IFF_POINTOPOINT;
1437 else
1438 dev->flags &= ~IFF_POINTOPOINT;
1439
1440 t->tun_hlen = 0;
1441 t->hlen = t->encap_hlen + t->tun_hlen;
1442 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1443
1444 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1445 int strict = (ipv6_addr_type(&p->raddr) &
1446 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1447
1448 struct rt6_info *rt = rt6_lookup(t->net,
1449 &p->raddr, &p->laddr,
1450 p->link, strict);
1451
1452 if (!rt)
1453 return;
1454
1455 if (rt->dst.dev) {
1456 dev->hard_header_len = rt->dst.dev->hard_header_len +
1457 t_hlen;
1458
1459 dev->mtu = rt->dst.dev->mtu - t_hlen;
1460 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1461 dev->mtu -= 8;
1462
1463 if (dev->mtu < IPV6_MIN_MTU)
1464 dev->mtu = IPV6_MIN_MTU;
1465 }
1466 ip6_rt_put(rt);
1467 }
1468 }
1469
1470 /**
1471 * ip6_tnl_change - update the tunnel parameters
1472 * @t: tunnel to be changed
1473 * @p: tunnel configuration parameters
1474 *
1475 * Description:
1476 * ip6_tnl_change() updates the tunnel parameters
1477 **/
1478
1479 static int
1480 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1481 {
1482 t->parms.laddr = p->laddr;
1483 t->parms.raddr = p->raddr;
1484 t->parms.flags = p->flags;
1485 t->parms.hop_limit = p->hop_limit;
1486 t->parms.encap_limit = p->encap_limit;
1487 t->parms.flowinfo = p->flowinfo;
1488 t->parms.link = p->link;
1489 t->parms.proto = p->proto;
1490 t->parms.fwmark = p->fwmark;
1491 dst_cache_reset(&t->dst_cache);
1492 ip6_tnl_link_config(t);
1493 return 0;
1494 }
1495
1496 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1497 {
1498 struct net *net = t->net;
1499 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1500 int err;
1501
1502 ip6_tnl_unlink(ip6n, t);
1503 synchronize_net();
1504 err = ip6_tnl_change(t, p);
1505 ip6_tnl_link(ip6n, t);
1506 netdev_state_change(t->dev);
1507 return err;
1508 }
1509
1510 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1511 {
1512 /* for default tnl0 device allow to change only the proto */
1513 t->parms.proto = p->proto;
1514 netdev_state_change(t->dev);
1515 return 0;
1516 }
1517
1518 static void
1519 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1520 {
1521 p->laddr = u->laddr;
1522 p->raddr = u->raddr;
1523 p->flags = u->flags;
1524 p->hop_limit = u->hop_limit;
1525 p->encap_limit = u->encap_limit;
1526 p->flowinfo = u->flowinfo;
1527 p->link = u->link;
1528 p->proto = u->proto;
1529 memcpy(p->name, u->name, sizeof(u->name));
1530 }
1531
1532 static void
1533 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1534 {
1535 u->laddr = p->laddr;
1536 u->raddr = p->raddr;
1537 u->flags = p->flags;
1538 u->hop_limit = p->hop_limit;
1539 u->encap_limit = p->encap_limit;
1540 u->flowinfo = p->flowinfo;
1541 u->link = p->link;
1542 u->proto = p->proto;
1543 memcpy(u->name, p->name, sizeof(u->name));
1544 }
1545
1546 /**
1547 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1548 * @dev: virtual device associated with tunnel
1549 * @ifr: parameters passed from userspace
1550 * @cmd: command to be performed
1551 *
1552 * Description:
1553 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1554 * from userspace.
1555 *
1556 * The possible commands are the following:
1557 * %SIOCGETTUNNEL: get tunnel parameters for device
1558 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1559 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1560 * %SIOCDELTUNNEL: delete tunnel
1561 *
1562 * The fallback device "ip6tnl0", created during module
1563 * initialization, can be used for creating other tunnel devices.
1564 *
1565 * Return:
1566 * 0 on success,
1567 * %-EFAULT if unable to copy data to or from userspace,
1568 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1569 * %-EINVAL if passed tunnel parameters are invalid,
1570 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1571 * %-ENODEV if attempting to change or delete a nonexisting device
1572 **/
1573
1574 static int
1575 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1576 {
1577 int err = 0;
1578 struct ip6_tnl_parm p;
1579 struct __ip6_tnl_parm p1;
1580 struct ip6_tnl *t = netdev_priv(dev);
1581 struct net *net = t->net;
1582 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1583
1584 memset(&p1, 0, sizeof(p1));
1585
1586 switch (cmd) {
1587 case SIOCGETTUNNEL:
1588 if (dev == ip6n->fb_tnl_dev) {
1589 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1590 err = -EFAULT;
1591 break;
1592 }
1593 ip6_tnl_parm_from_user(&p1, &p);
1594 t = ip6_tnl_locate(net, &p1, 0);
1595 if (IS_ERR(t))
1596 t = netdev_priv(dev);
1597 } else {
1598 memset(&p, 0, sizeof(p));
1599 }
1600 ip6_tnl_parm_to_user(&p, &t->parms);
1601 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1602 err = -EFAULT;
1603 }
1604 break;
1605 case SIOCADDTUNNEL:
1606 case SIOCCHGTUNNEL:
1607 err = -EPERM;
1608 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1609 break;
1610 err = -EFAULT;
1611 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1612 break;
1613 err = -EINVAL;
1614 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1615 p.proto != 0)
1616 break;
1617 ip6_tnl_parm_from_user(&p1, &p);
1618 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1619 if (cmd == SIOCCHGTUNNEL) {
1620 if (!IS_ERR(t)) {
1621 if (t->dev != dev) {
1622 err = -EEXIST;
1623 break;
1624 }
1625 } else
1626 t = netdev_priv(dev);
1627 if (dev == ip6n->fb_tnl_dev)
1628 err = ip6_tnl0_update(t, &p1);
1629 else
1630 err = ip6_tnl_update(t, &p1);
1631 }
1632 if (!IS_ERR(t)) {
1633 err = 0;
1634 ip6_tnl_parm_to_user(&p, &t->parms);
1635 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1636 err = -EFAULT;
1637
1638 } else {
1639 err = PTR_ERR(t);
1640 }
1641 break;
1642 case SIOCDELTUNNEL:
1643 err = -EPERM;
1644 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1645 break;
1646
1647 if (dev == ip6n->fb_tnl_dev) {
1648 err = -EFAULT;
1649 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1650 break;
1651 err = -ENOENT;
1652 ip6_tnl_parm_from_user(&p1, &p);
1653 t = ip6_tnl_locate(net, &p1, 0);
1654 if (IS_ERR(t))
1655 break;
1656 err = -EPERM;
1657 if (t->dev == ip6n->fb_tnl_dev)
1658 break;
1659 dev = t->dev;
1660 }
1661 err = 0;
1662 unregister_netdevice(dev);
1663 break;
1664 default:
1665 err = -EINVAL;
1666 }
1667 return err;
1668 }
1669
1670 /**
1671 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1672 * @dev: virtual device associated with tunnel
1673 * @new_mtu: the new mtu
1674 *
1675 * Return:
1676 * 0 on success,
1677 * %-EINVAL if mtu too small
1678 **/
1679
1680 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1681 {
1682 struct ip6_tnl *tnl = netdev_priv(dev);
1683
1684 if (tnl->parms.proto == IPPROTO_IPV6) {
1685 if (new_mtu < IPV6_MIN_MTU)
1686 return -EINVAL;
1687 } else {
1688 if (new_mtu < ETH_MIN_MTU)
1689 return -EINVAL;
1690 }
1691 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1692 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1693 return -EINVAL;
1694 } else {
1695 if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1696 return -EINVAL;
1697 }
1698 dev->mtu = new_mtu;
1699 return 0;
1700 }
1701 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1702
1703 int ip6_tnl_get_iflink(const struct net_device *dev)
1704 {
1705 struct ip6_tnl *t = netdev_priv(dev);
1706
1707 return t->parms.link;
1708 }
1709 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1710
1711 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1712 unsigned int num)
1713 {
1714 if (num >= MAX_IPTUN_ENCAP_OPS)
1715 return -ERANGE;
1716
1717 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1718 &ip6tun_encaps[num],
1719 NULL, ops) ? 0 : -1;
1720 }
1721 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1722
1723 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1724 unsigned int num)
1725 {
1726 int ret;
1727
1728 if (num >= MAX_IPTUN_ENCAP_OPS)
1729 return -ERANGE;
1730
1731 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1732 &ip6tun_encaps[num],
1733 ops, NULL) == ops) ? 0 : -1;
1734
1735 synchronize_net();
1736
1737 return ret;
1738 }
1739 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1740
1741 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1742 struct ip_tunnel_encap *ipencap)
1743 {
1744 int hlen;
1745
1746 memset(&t->encap, 0, sizeof(t->encap));
1747
1748 hlen = ip6_encap_hlen(ipencap);
1749 if (hlen < 0)
1750 return hlen;
1751
1752 t->encap.type = ipencap->type;
1753 t->encap.sport = ipencap->sport;
1754 t->encap.dport = ipencap->dport;
1755 t->encap.flags = ipencap->flags;
1756
1757 t->encap_hlen = hlen;
1758 t->hlen = t->encap_hlen + t->tun_hlen;
1759
1760 return 0;
1761 }
1762 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1763
1764 static const struct net_device_ops ip6_tnl_netdev_ops = {
1765 .ndo_init = ip6_tnl_dev_init,
1766 .ndo_uninit = ip6_tnl_dev_uninit,
1767 .ndo_start_xmit = ip6_tnl_start_xmit,
1768 .ndo_do_ioctl = ip6_tnl_ioctl,
1769 .ndo_change_mtu = ip6_tnl_change_mtu,
1770 .ndo_get_stats = ip6_get_stats,
1771 .ndo_get_iflink = ip6_tnl_get_iflink,
1772 };
1773
1774 #define IPXIPX_FEATURES (NETIF_F_SG | \
1775 NETIF_F_FRAGLIST | \
1776 NETIF_F_HIGHDMA | \
1777 NETIF_F_GSO_SOFTWARE | \
1778 NETIF_F_HW_CSUM)
1779
1780 /**
1781 * ip6_tnl_dev_setup - setup virtual tunnel device
1782 * @dev: virtual device associated with tunnel
1783 *
1784 * Description:
1785 * Initialize function pointers and device parameters
1786 **/
1787
1788 static void ip6_tnl_dev_setup(struct net_device *dev)
1789 {
1790 dev->netdev_ops = &ip6_tnl_netdev_ops;
1791 dev->needs_free_netdev = true;
1792 dev->priv_destructor = ip6_dev_free;
1793
1794 dev->type = ARPHRD_TUNNEL6;
1795 dev->flags |= IFF_NOARP;
1796 dev->addr_len = sizeof(struct in6_addr);
1797 dev->features |= NETIF_F_LLTX;
1798 netif_keep_dst(dev);
1799
1800 dev->features |= IPXIPX_FEATURES;
1801 dev->hw_features |= IPXIPX_FEATURES;
1802
1803 /* This perm addr will be used as interface identifier by IPv6 */
1804 dev->addr_assign_type = NET_ADDR_RANDOM;
1805 eth_random_addr(dev->perm_addr);
1806 }
1807
1808
1809 /**
1810 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1811 * @dev: virtual device associated with tunnel
1812 **/
1813
1814 static inline int
1815 ip6_tnl_dev_init_gen(struct net_device *dev)
1816 {
1817 struct ip6_tnl *t = netdev_priv(dev);
1818 int ret;
1819 int t_hlen;
1820
1821 t->dev = dev;
1822 t->net = dev_net(dev);
1823 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1824 if (!dev->tstats)
1825 return -ENOMEM;
1826
1827 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1828 if (ret)
1829 goto free_stats;
1830
1831 ret = gro_cells_init(&t->gro_cells, dev);
1832 if (ret)
1833 goto destroy_dst;
1834
1835 t->tun_hlen = 0;
1836 t->hlen = t->encap_hlen + t->tun_hlen;
1837 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1838
1839 dev->type = ARPHRD_TUNNEL6;
1840 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1841 dev->mtu = ETH_DATA_LEN - t_hlen;
1842 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1843 dev->mtu -= 8;
1844 dev->min_mtu = ETH_MIN_MTU;
1845 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1846
1847 return 0;
1848
1849 destroy_dst:
1850 dst_cache_destroy(&t->dst_cache);
1851 free_stats:
1852 free_percpu(dev->tstats);
1853 dev->tstats = NULL;
1854
1855 return ret;
1856 }
1857
1858 /**
1859 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1860 * @dev: virtual device associated with tunnel
1861 **/
1862
1863 static int ip6_tnl_dev_init(struct net_device *dev)
1864 {
1865 struct ip6_tnl *t = netdev_priv(dev);
1866 int err = ip6_tnl_dev_init_gen(dev);
1867
1868 if (err)
1869 return err;
1870 ip6_tnl_link_config(t);
1871 if (t->parms.collect_md) {
1872 dev->features |= NETIF_F_NETNS_LOCAL;
1873 netif_keep_dst(dev);
1874 }
1875 return 0;
1876 }
1877
1878 /**
1879 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1880 * @dev: fallback device
1881 *
1882 * Return: 0
1883 **/
1884
1885 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1886 {
1887 struct ip6_tnl *t = netdev_priv(dev);
1888 struct net *net = dev_net(dev);
1889 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1890
1891 t->parms.proto = IPPROTO_IPV6;
1892 dev_hold(dev);
1893
1894 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1895 return 0;
1896 }
1897
1898 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1899 struct netlink_ext_ack *extack)
1900 {
1901 u8 proto;
1902
1903 if (!data || !data[IFLA_IPTUN_PROTO])
1904 return 0;
1905
1906 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1907 if (proto != IPPROTO_IPV6 &&
1908 proto != IPPROTO_IPIP &&
1909 proto != 0)
1910 return -EINVAL;
1911
1912 return 0;
1913 }
1914
1915 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1916 struct __ip6_tnl_parm *parms)
1917 {
1918 memset(parms, 0, sizeof(*parms));
1919
1920 if (!data)
1921 return;
1922
1923 if (data[IFLA_IPTUN_LINK])
1924 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1925
1926 if (data[IFLA_IPTUN_LOCAL])
1927 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1928
1929 if (data[IFLA_IPTUN_REMOTE])
1930 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1931
1932 if (data[IFLA_IPTUN_TTL])
1933 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1934
1935 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1936 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1937
1938 if (data[IFLA_IPTUN_FLOWINFO])
1939 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1940
1941 if (data[IFLA_IPTUN_FLAGS])
1942 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1943
1944 if (data[IFLA_IPTUN_PROTO])
1945 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1946
1947 if (data[IFLA_IPTUN_COLLECT_METADATA])
1948 parms->collect_md = true;
1949
1950 if (data[IFLA_IPTUN_FWMARK])
1951 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1952 }
1953
1954 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1955 struct ip_tunnel_encap *ipencap)
1956 {
1957 bool ret = false;
1958
1959 memset(ipencap, 0, sizeof(*ipencap));
1960
1961 if (!data)
1962 return ret;
1963
1964 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1965 ret = true;
1966 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1967 }
1968
1969 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1970 ret = true;
1971 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1972 }
1973
1974 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1975 ret = true;
1976 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1977 }
1978
1979 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1980 ret = true;
1981 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1982 }
1983
1984 return ret;
1985 }
1986
1987 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1988 struct nlattr *tb[], struct nlattr *data[],
1989 struct netlink_ext_ack *extack)
1990 {
1991 struct net *net = dev_net(dev);
1992 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1993 struct ip_tunnel_encap ipencap;
1994 struct ip6_tnl *nt, *t;
1995 int err;
1996
1997 nt = netdev_priv(dev);
1998
1999 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2000 err = ip6_tnl_encap_setup(nt, &ipencap);
2001 if (err < 0)
2002 return err;
2003 }
2004
2005 ip6_tnl_netlink_parms(data, &nt->parms);
2006
2007 if (nt->parms.collect_md) {
2008 if (rtnl_dereference(ip6n->collect_md_tun))
2009 return -EEXIST;
2010 } else {
2011 t = ip6_tnl_locate(net, &nt->parms, 0);
2012 if (!IS_ERR(t))
2013 return -EEXIST;
2014 }
2015
2016 err = ip6_tnl_create2(dev);
2017 if (!err && tb[IFLA_MTU])
2018 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2019
2020 return err;
2021 }
2022
2023 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2024 struct nlattr *data[],
2025 struct netlink_ext_ack *extack)
2026 {
2027 struct ip6_tnl *t = netdev_priv(dev);
2028 struct __ip6_tnl_parm p;
2029 struct net *net = t->net;
2030 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2031 struct ip_tunnel_encap ipencap;
2032
2033 if (dev == ip6n->fb_tnl_dev)
2034 return -EINVAL;
2035
2036 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2037 int err = ip6_tnl_encap_setup(t, &ipencap);
2038
2039 if (err < 0)
2040 return err;
2041 }
2042 ip6_tnl_netlink_parms(data, &p);
2043 if (p.collect_md)
2044 return -EINVAL;
2045
2046 t = ip6_tnl_locate(net, &p, 0);
2047 if (!IS_ERR(t)) {
2048 if (t->dev != dev)
2049 return -EEXIST;
2050 } else
2051 t = netdev_priv(dev);
2052
2053 return ip6_tnl_update(t, &p);
2054 }
2055
2056 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2057 {
2058 struct net *net = dev_net(dev);
2059 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2060
2061 if (dev != ip6n->fb_tnl_dev)
2062 unregister_netdevice_queue(dev, head);
2063 }
2064
2065 static size_t ip6_tnl_get_size(const struct net_device *dev)
2066 {
2067 return
2068 /* IFLA_IPTUN_LINK */
2069 nla_total_size(4) +
2070 /* IFLA_IPTUN_LOCAL */
2071 nla_total_size(sizeof(struct in6_addr)) +
2072 /* IFLA_IPTUN_REMOTE */
2073 nla_total_size(sizeof(struct in6_addr)) +
2074 /* IFLA_IPTUN_TTL */
2075 nla_total_size(1) +
2076 /* IFLA_IPTUN_ENCAP_LIMIT */
2077 nla_total_size(1) +
2078 /* IFLA_IPTUN_FLOWINFO */
2079 nla_total_size(4) +
2080 /* IFLA_IPTUN_FLAGS */
2081 nla_total_size(4) +
2082 /* IFLA_IPTUN_PROTO */
2083 nla_total_size(1) +
2084 /* IFLA_IPTUN_ENCAP_TYPE */
2085 nla_total_size(2) +
2086 /* IFLA_IPTUN_ENCAP_FLAGS */
2087 nla_total_size(2) +
2088 /* IFLA_IPTUN_ENCAP_SPORT */
2089 nla_total_size(2) +
2090 /* IFLA_IPTUN_ENCAP_DPORT */
2091 nla_total_size(2) +
2092 /* IFLA_IPTUN_COLLECT_METADATA */
2093 nla_total_size(0) +
2094 /* IFLA_IPTUN_FWMARK */
2095 nla_total_size(4) +
2096 0;
2097 }
2098
2099 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2100 {
2101 struct ip6_tnl *tunnel = netdev_priv(dev);
2102 struct __ip6_tnl_parm *parm = &tunnel->parms;
2103
2104 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2105 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2106 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2107 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2108 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2109 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2110 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2111 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2112 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2113 goto nla_put_failure;
2114
2115 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2116 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2117 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2118 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2119 goto nla_put_failure;
2120
2121 if (parm->collect_md)
2122 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2123 goto nla_put_failure;
2124
2125 return 0;
2126
2127 nla_put_failure:
2128 return -EMSGSIZE;
2129 }
2130
2131 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2132 {
2133 struct ip6_tnl *tunnel = netdev_priv(dev);
2134
2135 return tunnel->net;
2136 }
2137 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2138
2139 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2140 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2141 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2142 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2143 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2144 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2145 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2146 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2147 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2148 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2149 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2150 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2151 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2152 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2153 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
2154 };
2155
2156 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2157 .kind = "ip6tnl",
2158 .maxtype = IFLA_IPTUN_MAX,
2159 .policy = ip6_tnl_policy,
2160 .priv_size = sizeof(struct ip6_tnl),
2161 .setup = ip6_tnl_dev_setup,
2162 .validate = ip6_tnl_validate,
2163 .newlink = ip6_tnl_newlink,
2164 .changelink = ip6_tnl_changelink,
2165 .dellink = ip6_tnl_dellink,
2166 .get_size = ip6_tnl_get_size,
2167 .fill_info = ip6_tnl_fill_info,
2168 .get_link_net = ip6_tnl_get_link_net,
2169 };
2170
2171 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2172 .handler = ip4ip6_rcv,
2173 .err_handler = ip4ip6_err,
2174 .priority = 1,
2175 };
2176
2177 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2178 .handler = ip6ip6_rcv,
2179 .err_handler = ip6ip6_err,
2180 .priority = 1,
2181 };
2182
2183 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2184 {
2185 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2186 struct net_device *dev, *aux;
2187 int h;
2188 struct ip6_tnl *t;
2189
2190 for_each_netdev_safe(net, dev, aux)
2191 if (dev->rtnl_link_ops == &ip6_link_ops)
2192 unregister_netdevice_queue(dev, list);
2193
2194 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2195 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2196 while (t) {
2197 /* If dev is in the same netns, it has already
2198 * been added to the list by the previous loop.
2199 */
2200 if (!net_eq(dev_net(t->dev), net))
2201 unregister_netdevice_queue(t->dev, list);
2202 t = rtnl_dereference(t->next);
2203 }
2204 }
2205 }
2206
2207 static int __net_init ip6_tnl_init_net(struct net *net)
2208 {
2209 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2210 struct ip6_tnl *t = NULL;
2211 int err;
2212
2213 ip6n->tnls[0] = ip6n->tnls_wc;
2214 ip6n->tnls[1] = ip6n->tnls_r_l;
2215
2216 err = -ENOMEM;
2217 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2218 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2219
2220 if (!ip6n->fb_tnl_dev)
2221 goto err_alloc_dev;
2222 dev_net_set(ip6n->fb_tnl_dev, net);
2223 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2224 /* FB netdevice is special: we have one, and only one per netns.
2225 * Allowing to move it to another netns is clearly unsafe.
2226 */
2227 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2228
2229 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2230 if (err < 0)
2231 goto err_register;
2232
2233 err = register_netdev(ip6n->fb_tnl_dev);
2234 if (err < 0)
2235 goto err_register;
2236
2237 t = netdev_priv(ip6n->fb_tnl_dev);
2238
2239 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2240 return 0;
2241
2242 err_register:
2243 free_netdev(ip6n->fb_tnl_dev);
2244 err_alloc_dev:
2245 return err;
2246 }
2247
2248 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2249 {
2250 struct net *net;
2251 LIST_HEAD(list);
2252
2253 rtnl_lock();
2254 list_for_each_entry(net, net_list, exit_list)
2255 ip6_tnl_destroy_tunnels(net, &list);
2256 unregister_netdevice_many(&list);
2257 rtnl_unlock();
2258 }
2259
2260 static struct pernet_operations ip6_tnl_net_ops = {
2261 .init = ip6_tnl_init_net,
2262 .exit_batch = ip6_tnl_exit_batch_net,
2263 .id = &ip6_tnl_net_id,
2264 .size = sizeof(struct ip6_tnl_net),
2265 };
2266
2267 /**
2268 * ip6_tunnel_init - register protocol and reserve needed resources
2269 *
2270 * Return: 0 on success
2271 **/
2272
2273 static int __init ip6_tunnel_init(void)
2274 {
2275 int err;
2276
2277 if (!ipv6_mod_enabled())
2278 return -EOPNOTSUPP;
2279
2280 err = register_pernet_device(&ip6_tnl_net_ops);
2281 if (err < 0)
2282 goto out_pernet;
2283
2284 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2285 if (err < 0) {
2286 pr_err("%s: can't register ip4ip6\n", __func__);
2287 goto out_ip4ip6;
2288 }
2289
2290 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2291 if (err < 0) {
2292 pr_err("%s: can't register ip6ip6\n", __func__);
2293 goto out_ip6ip6;
2294 }
2295 err = rtnl_link_register(&ip6_link_ops);
2296 if (err < 0)
2297 goto rtnl_link_failed;
2298
2299 return 0;
2300
2301 rtnl_link_failed:
2302 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2303 out_ip6ip6:
2304 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2305 out_ip4ip6:
2306 unregister_pernet_device(&ip6_tnl_net_ops);
2307 out_pernet:
2308 return err;
2309 }
2310
2311 /**
2312 * ip6_tunnel_cleanup - free resources and unregister protocol
2313 **/
2314
2315 static void __exit ip6_tunnel_cleanup(void)
2316 {
2317 rtnl_link_unregister(&ip6_link_ops);
2318 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2319 pr_info("%s: can't deregister ip4ip6\n", __func__);
2320
2321 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2322 pr_info("%s: can't deregister ip6ip6\n", __func__);
2323
2324 unregister_pernet_device(&ip6_tnl_net_ops);
2325 }
2326
2327 module_init(ip6_tunnel_init);
2328 module_exit(ip6_tunnel_cleanup);