]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/ip6_tunnel.c
Merge tag 'sunxi-drm-fixes-for-4.9' of https://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / ip6_tunnel.c
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 *
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11 *
12 * RFC 2473
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44
45 #include <asm/uaccess.h>
46 #include <linux/atomic.h>
47
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78
79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85
86 static int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 /* the IPv6 tunnel fallback device */
89 struct net_device *fb_tnl_dev;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 struct ip6_tnl __rcu *tnls_wc[1];
93 struct ip6_tnl __rcu **tnls[2];
94 struct ip6_tnl __rcu *collect_md_tun;
95 };
96
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 struct pcpu_sw_netstats tmp, sum = { 0 };
100 int i;
101
102 for_each_possible_cpu(i) {
103 unsigned int start;
104 const struct pcpu_sw_netstats *tstats =
105 per_cpu_ptr(dev->tstats, i);
106
107 do {
108 start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 tmp.rx_packets = tstats->rx_packets;
110 tmp.rx_bytes = tstats->rx_bytes;
111 tmp.tx_packets = tstats->tx_packets;
112 tmp.tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114
115 sum.rx_packets += tmp.rx_packets;
116 sum.rx_bytes += tmp.rx_bytes;
117 sum.tx_packets += tmp.tx_packets;
118 sum.tx_bytes += tmp.tx_bytes;
119 }
120 dev->stats.rx_packets = sum.rx_packets;
121 dev->stats.rx_bytes = sum.rx_bytes;
122 dev->stats.tx_packets = sum.tx_packets;
123 dev->stats.tx_bytes = sum.tx_bytes;
124 return &dev->stats;
125 }
126
127 /**
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
131 *
132 * Return:
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
135 * else %NULL
136 **/
137
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 unsigned int hash = HASH(remote, local);
145 struct ip6_tnl *t;
146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 struct in6_addr any;
148
149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 ipv6_addr_equal(remote, &t->parms.raddr) &&
152 (t->dev->flags & IFF_UP))
153 return t;
154 }
155
156 memset(&any, 0, sizeof(any));
157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
161 (t->dev->flags & IFF_UP))
162 return t;
163 }
164
165 hash = HASH(remote, &any);
166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
169 (t->dev->flags & IFF_UP))
170 return t;
171 }
172
173 t = rcu_dereference(ip6n->collect_md_tun);
174 if (t)
175 return t;
176
177 t = rcu_dereference(ip6n->tnls_wc[0]);
178 if (t && (t->dev->flags & IFF_UP))
179 return t;
180
181 return NULL;
182 }
183
184 /**
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
187 *
188 * Description:
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
191 *
192 * Return: head of IPv6 tunnel list
193 **/
194
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
197 {
198 const struct in6_addr *remote = &p->raddr;
199 const struct in6_addr *local = &p->laddr;
200 unsigned int h = 0;
201 int prio = 0;
202
203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
204 prio = 1;
205 h = HASH(remote, local);
206 }
207 return &ip6n->tnls[prio][h];
208 }
209
210 /**
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
213 **/
214
215 static void
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
217 {
218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
219
220 if (t->parms.collect_md)
221 rcu_assign_pointer(ip6n->collect_md_tun, t);
222 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 rcu_assign_pointer(*tp, t);
224 }
225
226 /**
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
229 **/
230
231 static void
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
233 {
234 struct ip6_tnl __rcu **tp;
235 struct ip6_tnl *iter;
236
237 if (t->parms.collect_md)
238 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
239
240 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 (iter = rtnl_dereference(*tp)) != NULL;
242 tp = &iter->next) {
243 if (t == iter) {
244 rcu_assign_pointer(*tp, t->next);
245 break;
246 }
247 }
248 }
249
250 static void ip6_dev_free(struct net_device *dev)
251 {
252 struct ip6_tnl *t = netdev_priv(dev);
253
254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats);
257 free_netdev(dev);
258 }
259
260 static int ip6_tnl_create2(struct net_device *dev)
261 {
262 struct ip6_tnl *t = netdev_priv(dev);
263 struct net *net = dev_net(dev);
264 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
265 int err;
266
267 t = netdev_priv(dev);
268
269 dev->rtnl_link_ops = &ip6_link_ops;
270 err = register_netdevice(dev);
271 if (err < 0)
272 goto out;
273
274 strcpy(t->parms.name, dev->name);
275
276 dev_hold(dev);
277 ip6_tnl_link(ip6n, t);
278 return 0;
279
280 out:
281 return err;
282 }
283
284 /**
285 * ip6_tnl_create - create a new tunnel
286 * @p: tunnel parameters
287 * @pt: pointer to new tunnel
288 *
289 * Description:
290 * Create tunnel matching given parameters.
291 *
292 * Return:
293 * created tunnel or error pointer
294 **/
295
296 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
297 {
298 struct net_device *dev;
299 struct ip6_tnl *t;
300 char name[IFNAMSIZ];
301 int err = -ENOMEM;
302
303 if (p->name[0])
304 strlcpy(name, p->name, IFNAMSIZ);
305 else
306 sprintf(name, "ip6tnl%%d");
307
308 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
309 ip6_tnl_dev_setup);
310 if (!dev)
311 goto failed;
312
313 dev_net_set(dev, net);
314
315 t = netdev_priv(dev);
316 t->parms = *p;
317 t->net = dev_net(dev);
318 err = ip6_tnl_create2(dev);
319 if (err < 0)
320 goto failed_free;
321
322 return t;
323
324 failed_free:
325 ip6_dev_free(dev);
326 failed:
327 return ERR_PTR(err);
328 }
329
330 /**
331 * ip6_tnl_locate - find or create tunnel matching given parameters
332 * @p: tunnel parameters
333 * @create: != 0 if allowed to create new tunnel if no match found
334 *
335 * Description:
336 * ip6_tnl_locate() first tries to locate an existing tunnel
337 * based on @parms. If this is unsuccessful, but @create is set a new
338 * tunnel device is created and registered for use.
339 *
340 * Return:
341 * matching tunnel or error pointer
342 **/
343
344 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
345 struct __ip6_tnl_parm *p, int create)
346 {
347 const struct in6_addr *remote = &p->raddr;
348 const struct in6_addr *local = &p->laddr;
349 struct ip6_tnl __rcu **tp;
350 struct ip6_tnl *t;
351 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
352
353 for (tp = ip6_tnl_bucket(ip6n, p);
354 (t = rtnl_dereference(*tp)) != NULL;
355 tp = &t->next) {
356 if (ipv6_addr_equal(local, &t->parms.laddr) &&
357 ipv6_addr_equal(remote, &t->parms.raddr)) {
358 if (create)
359 return ERR_PTR(-EEXIST);
360
361 return t;
362 }
363 }
364 if (!create)
365 return ERR_PTR(-ENODEV);
366 return ip6_tnl_create(net, p);
367 }
368
369 /**
370 * ip6_tnl_dev_uninit - tunnel device uninitializer
371 * @dev: the device to be destroyed
372 *
373 * Description:
374 * ip6_tnl_dev_uninit() removes tunnel from its list
375 **/
376
377 static void
378 ip6_tnl_dev_uninit(struct net_device *dev)
379 {
380 struct ip6_tnl *t = netdev_priv(dev);
381 struct net *net = t->net;
382 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
383
384 if (dev == ip6n->fb_tnl_dev)
385 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
386 else
387 ip6_tnl_unlink(ip6n, t);
388 dst_cache_reset(&t->dst_cache);
389 dev_put(dev);
390 }
391
392 /**
393 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
394 * @skb: received socket buffer
395 *
396 * Return:
397 * 0 if none was found,
398 * else index to encapsulation limit
399 **/
400
401 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
402 {
403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
404 __u8 nexthdr = ipv6h->nexthdr;
405 __u16 off = sizeof(*ipv6h);
406
407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 __u16 optlen = 0;
409 struct ipv6_opt_hdr *hdr;
410 if (raw + off + sizeof(*hdr) > skb->data &&
411 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
412 break;
413
414 hdr = (struct ipv6_opt_hdr *) (raw + off);
415 if (nexthdr == NEXTHDR_FRAGMENT) {
416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 if (frag_hdr->frag_off)
418 break;
419 optlen = 8;
420 } else if (nexthdr == NEXTHDR_AUTH) {
421 optlen = (hdr->hdrlen + 2) << 2;
422 } else {
423 optlen = ipv6_optlen(hdr);
424 }
425 if (nexthdr == NEXTHDR_DEST) {
426 __u16 i = off + 2;
427 while (1) {
428 struct ipv6_tlv_tnl_enc_lim *tel;
429
430 /* No more room for encapsulation limit */
431 if (i + sizeof (*tel) > off + optlen)
432 break;
433
434 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
435 /* return index of option if found and valid */
436 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
437 tel->length == 1)
438 return i;
439 /* else jump to next option */
440 if (tel->type)
441 i += tel->length + 2;
442 else
443 i++;
444 }
445 }
446 nexthdr = hdr->nexthdr;
447 off += optlen;
448 }
449 return 0;
450 }
451 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
452
453 /**
454 * ip6_tnl_err - tunnel error handler
455 *
456 * Description:
457 * ip6_tnl_err() should handle errors in the tunnel according
458 * to the specifications in RFC 2473.
459 **/
460
461 static int
462 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
463 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
464 {
465 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
466 struct ip6_tnl *t;
467 int rel_msg = 0;
468 u8 rel_type = ICMPV6_DEST_UNREACH;
469 u8 rel_code = ICMPV6_ADDR_UNREACH;
470 u8 tproto;
471 __u32 rel_info = 0;
472 __u16 len;
473 int err = -ENOENT;
474
475 /* If the packet doesn't contain the original IPv6 header we are
476 in trouble since we might need the source address for further
477 processing of the error. */
478
479 rcu_read_lock();
480 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
481 if (!t)
482 goto out;
483
484 tproto = ACCESS_ONCE(t->parms.proto);
485 if (tproto != ipproto && tproto != 0)
486 goto out;
487
488 err = 0;
489
490 switch (*type) {
491 __u32 teli;
492 struct ipv6_tlv_tnl_enc_lim *tel;
493 __u32 mtu;
494 case ICMPV6_DEST_UNREACH:
495 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
496 t->parms.name);
497 rel_msg = 1;
498 break;
499 case ICMPV6_TIME_EXCEED:
500 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
501 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
502 t->parms.name);
503 rel_msg = 1;
504 }
505 break;
506 case ICMPV6_PARAMPROB:
507 teli = 0;
508 if ((*code) == ICMPV6_HDR_FIELD)
509 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
510
511 if (teli && teli == *info - 2) {
512 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
513 if (tel->encap_limit == 0) {
514 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
515 t->parms.name);
516 rel_msg = 1;
517 }
518 } else {
519 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
520 t->parms.name);
521 }
522 break;
523 case ICMPV6_PKT_TOOBIG:
524 mtu = *info - offset;
525 if (mtu < IPV6_MIN_MTU)
526 mtu = IPV6_MIN_MTU;
527 t->dev->mtu = mtu;
528
529 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
530 if (len > mtu) {
531 rel_type = ICMPV6_PKT_TOOBIG;
532 rel_code = 0;
533 rel_info = mtu;
534 rel_msg = 1;
535 }
536 break;
537 }
538
539 *type = rel_type;
540 *code = rel_code;
541 *info = rel_info;
542 *msg = rel_msg;
543
544 out:
545 rcu_read_unlock();
546 return err;
547 }
548
549 static int
550 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
551 u8 type, u8 code, int offset, __be32 info)
552 {
553 int rel_msg = 0;
554 u8 rel_type = type;
555 u8 rel_code = code;
556 __u32 rel_info = ntohl(info);
557 int err;
558 struct sk_buff *skb2;
559 const struct iphdr *eiph;
560 struct rtable *rt;
561 struct flowi4 fl4;
562
563 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
564 &rel_msg, &rel_info, offset);
565 if (err < 0)
566 return err;
567
568 if (rel_msg == 0)
569 return 0;
570
571 switch (rel_type) {
572 case ICMPV6_DEST_UNREACH:
573 if (rel_code != ICMPV6_ADDR_UNREACH)
574 return 0;
575 rel_type = ICMP_DEST_UNREACH;
576 rel_code = ICMP_HOST_UNREACH;
577 break;
578 case ICMPV6_PKT_TOOBIG:
579 if (rel_code != 0)
580 return 0;
581 rel_type = ICMP_DEST_UNREACH;
582 rel_code = ICMP_FRAG_NEEDED;
583 break;
584 case NDISC_REDIRECT:
585 rel_type = ICMP_REDIRECT;
586 rel_code = ICMP_REDIR_HOST;
587 default:
588 return 0;
589 }
590
591 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
592 return 0;
593
594 skb2 = skb_clone(skb, GFP_ATOMIC);
595 if (!skb2)
596 return 0;
597
598 skb_dst_drop(skb2);
599
600 skb_pull(skb2, offset);
601 skb_reset_network_header(skb2);
602 eiph = ip_hdr(skb2);
603
604 /* Try to guess incoming interface */
605 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
606 eiph->saddr, 0,
607 0, 0,
608 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
609 if (IS_ERR(rt))
610 goto out;
611
612 skb2->dev = rt->dst.dev;
613
614 /* route "incoming" packet */
615 if (rt->rt_flags & RTCF_LOCAL) {
616 ip_rt_put(rt);
617 rt = NULL;
618 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
619 eiph->daddr, eiph->saddr,
620 0, 0,
621 IPPROTO_IPIP,
622 RT_TOS(eiph->tos), 0);
623 if (IS_ERR(rt) ||
624 rt->dst.dev->type != ARPHRD_TUNNEL) {
625 if (!IS_ERR(rt))
626 ip_rt_put(rt);
627 goto out;
628 }
629 skb_dst_set(skb2, &rt->dst);
630 } else {
631 ip_rt_put(rt);
632 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
633 skb2->dev) ||
634 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
635 goto out;
636 }
637
638 /* change mtu on this route */
639 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
640 if (rel_info > dst_mtu(skb_dst(skb2)))
641 goto out;
642
643 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
644 }
645 if (rel_type == ICMP_REDIRECT)
646 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
647
648 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
649
650 out:
651 kfree_skb(skb2);
652 return 0;
653 }
654
655 static int
656 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
657 u8 type, u8 code, int offset, __be32 info)
658 {
659 int rel_msg = 0;
660 u8 rel_type = type;
661 u8 rel_code = code;
662 __u32 rel_info = ntohl(info);
663 int err;
664
665 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
666 &rel_msg, &rel_info, offset);
667 if (err < 0)
668 return err;
669
670 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
671 struct rt6_info *rt;
672 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
673
674 if (!skb2)
675 return 0;
676
677 skb_dst_drop(skb2);
678 skb_pull(skb2, offset);
679 skb_reset_network_header(skb2);
680
681 /* Try to guess incoming interface */
682 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
683 NULL, 0, 0);
684
685 if (rt && rt->dst.dev)
686 skb2->dev = rt->dst.dev;
687
688 icmpv6_send(skb2, rel_type, rel_code, rel_info);
689
690 ip6_rt_put(rt);
691
692 kfree_skb(skb2);
693 }
694
695 return 0;
696 }
697
698 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
699 const struct ipv6hdr *ipv6h,
700 struct sk_buff *skb)
701 {
702 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
703
704 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
705 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
706
707 return IP6_ECN_decapsulate(ipv6h, skb);
708 }
709
710 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
711 const struct ipv6hdr *ipv6h,
712 struct sk_buff *skb)
713 {
714 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
715 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
716
717 return IP6_ECN_decapsulate(ipv6h, skb);
718 }
719
720 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
721 const struct in6_addr *laddr,
722 const struct in6_addr *raddr)
723 {
724 struct __ip6_tnl_parm *p = &t->parms;
725 int ltype = ipv6_addr_type(laddr);
726 int rtype = ipv6_addr_type(raddr);
727 __u32 flags = 0;
728
729 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
730 flags = IP6_TNL_F_CAP_PER_PACKET;
731 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
732 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
733 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
734 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
735 if (ltype&IPV6_ADDR_UNICAST)
736 flags |= IP6_TNL_F_CAP_XMIT;
737 if (rtype&IPV6_ADDR_UNICAST)
738 flags |= IP6_TNL_F_CAP_RCV;
739 }
740 return flags;
741 }
742 EXPORT_SYMBOL(ip6_tnl_get_cap);
743
744 /* called with rcu_read_lock() */
745 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
746 const struct in6_addr *laddr,
747 const struct in6_addr *raddr)
748 {
749 struct __ip6_tnl_parm *p = &t->parms;
750 int ret = 0;
751 struct net *net = t->net;
752
753 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
754 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
755 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
756 struct net_device *ldev = NULL;
757
758 if (p->link)
759 ldev = dev_get_by_index_rcu(net, p->link);
760
761 if ((ipv6_addr_is_multicast(laddr) ||
762 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
763 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
764 ret = 1;
765 }
766 return ret;
767 }
768 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
769
770 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
771 const struct tnl_ptk_info *tpi,
772 struct metadata_dst *tun_dst,
773 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
774 const struct ipv6hdr *ipv6h,
775 struct sk_buff *skb),
776 bool log_ecn_err)
777 {
778 struct pcpu_sw_netstats *tstats;
779 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
780 int err;
781
782 if ((!(tpi->flags & TUNNEL_CSUM) &&
783 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
784 ((tpi->flags & TUNNEL_CSUM) &&
785 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
786 tunnel->dev->stats.rx_crc_errors++;
787 tunnel->dev->stats.rx_errors++;
788 goto drop;
789 }
790
791 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
792 if (!(tpi->flags & TUNNEL_SEQ) ||
793 (tunnel->i_seqno &&
794 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
795 tunnel->dev->stats.rx_fifo_errors++;
796 tunnel->dev->stats.rx_errors++;
797 goto drop;
798 }
799 tunnel->i_seqno = ntohl(tpi->seq) + 1;
800 }
801
802 skb->protocol = tpi->proto;
803
804 /* Warning: All skb pointers will be invalidated! */
805 if (tunnel->dev->type == ARPHRD_ETHER) {
806 if (!pskb_may_pull(skb, ETH_HLEN)) {
807 tunnel->dev->stats.rx_length_errors++;
808 tunnel->dev->stats.rx_errors++;
809 goto drop;
810 }
811
812 ipv6h = ipv6_hdr(skb);
813 skb->protocol = eth_type_trans(skb, tunnel->dev);
814 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
815 } else {
816 skb->dev = tunnel->dev;
817 }
818
819 skb_reset_network_header(skb);
820 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
821
822 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
823
824 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
825 if (unlikely(err)) {
826 if (log_ecn_err)
827 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
828 &ipv6h->saddr,
829 ipv6_get_dsfield(ipv6h));
830 if (err > 1) {
831 ++tunnel->dev->stats.rx_frame_errors;
832 ++tunnel->dev->stats.rx_errors;
833 goto drop;
834 }
835 }
836
837 tstats = this_cpu_ptr(tunnel->dev->tstats);
838 u64_stats_update_begin(&tstats->syncp);
839 tstats->rx_packets++;
840 tstats->rx_bytes += skb->len;
841 u64_stats_update_end(&tstats->syncp);
842
843 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
844
845 if (tun_dst)
846 skb_dst_set(skb, (struct dst_entry *)tun_dst);
847
848 gro_cells_receive(&tunnel->gro_cells, skb);
849 return 0;
850
851 drop:
852 kfree_skb(skb);
853 return 0;
854 }
855
856 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
857 const struct tnl_ptk_info *tpi,
858 struct metadata_dst *tun_dst,
859 bool log_ecn_err)
860 {
861 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
862 log_ecn_err);
863 }
864 EXPORT_SYMBOL(ip6_tnl_rcv);
865
866 static const struct tnl_ptk_info tpi_v6 = {
867 /* no tunnel info required for ipxip6. */
868 .proto = htons(ETH_P_IPV6),
869 };
870
871 static const struct tnl_ptk_info tpi_v4 = {
872 /* no tunnel info required for ipxip6. */
873 .proto = htons(ETH_P_IP),
874 };
875
876 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
877 const struct tnl_ptk_info *tpi,
878 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
879 const struct ipv6hdr *ipv6h,
880 struct sk_buff *skb))
881 {
882 struct ip6_tnl *t;
883 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
884 struct metadata_dst *tun_dst = NULL;
885 int ret = -1;
886
887 rcu_read_lock();
888 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
889
890 if (t) {
891 u8 tproto = ACCESS_ONCE(t->parms.proto);
892
893 if (tproto != ipproto && tproto != 0)
894 goto drop;
895 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
896 goto drop;
897 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
898 goto drop;
899 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
900 goto drop;
901 if (t->parms.collect_md) {
902 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
903 if (!tun_dst)
904 return 0;
905 }
906 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
907 log_ecn_error);
908 }
909
910 rcu_read_unlock();
911
912 return ret;
913
914 drop:
915 rcu_read_unlock();
916 kfree_skb(skb);
917 return 0;
918 }
919
920 static int ip4ip6_rcv(struct sk_buff *skb)
921 {
922 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
923 ip4ip6_dscp_ecn_decapsulate);
924 }
925
926 static int ip6ip6_rcv(struct sk_buff *skb)
927 {
928 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
929 ip6ip6_dscp_ecn_decapsulate);
930 }
931
932 struct ipv6_tel_txoption {
933 struct ipv6_txoptions ops;
934 __u8 dst_opt[8];
935 };
936
937 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
938 {
939 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
940
941 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
942 opt->dst_opt[3] = 1;
943 opt->dst_opt[4] = encap_limit;
944 opt->dst_opt[5] = IPV6_TLV_PADN;
945 opt->dst_opt[6] = 1;
946
947 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
948 opt->ops.opt_nflen = 8;
949 }
950
951 /**
952 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
953 * @t: the outgoing tunnel device
954 * @hdr: IPv6 header from the incoming packet
955 *
956 * Description:
957 * Avoid trivial tunneling loop by checking that tunnel exit-point
958 * doesn't match source of incoming packet.
959 *
960 * Return:
961 * 1 if conflict,
962 * 0 else
963 **/
964
965 static inline bool
966 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
967 {
968 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
969 }
970
971 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
972 const struct in6_addr *laddr,
973 const struct in6_addr *raddr)
974 {
975 struct __ip6_tnl_parm *p = &t->parms;
976 int ret = 0;
977 struct net *net = t->net;
978
979 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
980 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
981 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
982 struct net_device *ldev = NULL;
983
984 rcu_read_lock();
985 if (p->link)
986 ldev = dev_get_by_index_rcu(net, p->link);
987
988 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
989 pr_warn("%s xmit: Local address not yet configured!\n",
990 p->name);
991 else if (!ipv6_addr_is_multicast(raddr) &&
992 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
993 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
994 p->name);
995 else
996 ret = 1;
997 rcu_read_unlock();
998 }
999 return ret;
1000 }
1001 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1002
1003 /**
1004 * ip6_tnl_xmit - encapsulate packet and send
1005 * @skb: the outgoing socket buffer
1006 * @dev: the outgoing tunnel device
1007 * @dsfield: dscp code for outer header
1008 * @fl6: flow of tunneled packet
1009 * @encap_limit: encapsulation limit
1010 * @pmtu: Path MTU is stored if packet is too big
1011 * @proto: next header value
1012 *
1013 * Description:
1014 * Build new header and do some sanity checks on the packet before sending
1015 * it.
1016 *
1017 * Return:
1018 * 0 on success
1019 * -1 fail
1020 * %-EMSGSIZE message too big. return mtu in this case.
1021 **/
1022
1023 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1024 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1025 __u8 proto)
1026 {
1027 struct ip6_tnl *t = netdev_priv(dev);
1028 struct net *net = t->net;
1029 struct net_device_stats *stats = &t->dev->stats;
1030 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1031 struct ipv6_tel_txoption opt;
1032 struct dst_entry *dst = NULL, *ndst = NULL;
1033 struct net_device *tdev;
1034 int mtu;
1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1036 unsigned int max_headroom = psh_hlen;
1037 u8 hop_limit;
1038 int err = -1;
1039
1040 if (t->parms.collect_md) {
1041 hop_limit = skb_tunnel_info(skb)->key.ttl;
1042 goto route_lookup;
1043 } else {
1044 hop_limit = t->parms.hop_limit;
1045 }
1046
1047 /* NBMA tunnel */
1048 if (ipv6_addr_any(&t->parms.raddr)) {
1049 struct in6_addr *addr6;
1050 struct neighbour *neigh;
1051 int addr_type;
1052
1053 if (!skb_dst(skb))
1054 goto tx_err_link_failure;
1055
1056 neigh = dst_neigh_lookup(skb_dst(skb),
1057 &ipv6_hdr(skb)->daddr);
1058 if (!neigh)
1059 goto tx_err_link_failure;
1060
1061 addr6 = (struct in6_addr *)&neigh->primary_key;
1062 addr_type = ipv6_addr_type(addr6);
1063
1064 if (addr_type == IPV6_ADDR_ANY)
1065 addr6 = &ipv6_hdr(skb)->daddr;
1066
1067 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1068 neigh_release(neigh);
1069 } else if (!fl6->flowi6_mark)
1070 dst = dst_cache_get(&t->dst_cache);
1071
1072 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1073 goto tx_err_link_failure;
1074
1075 if (!dst) {
1076 route_lookup:
1077 dst = ip6_route_output(net, NULL, fl6);
1078
1079 if (dst->error)
1080 goto tx_err_link_failure;
1081 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1082 if (IS_ERR(dst)) {
1083 err = PTR_ERR(dst);
1084 dst = NULL;
1085 goto tx_err_link_failure;
1086 }
1087 if (t->parms.collect_md &&
1088 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1089 &fl6->daddr, 0, &fl6->saddr))
1090 goto tx_err_link_failure;
1091 ndst = dst;
1092 }
1093
1094 tdev = dst->dev;
1095
1096 if (tdev == dev) {
1097 stats->collisions++;
1098 net_warn_ratelimited("%s: Local routing loop detected!\n",
1099 t->parms.name);
1100 goto tx_err_dst_release;
1101 }
1102 mtu = dst_mtu(dst) - psh_hlen;
1103 if (encap_limit >= 0) {
1104 max_headroom += 8;
1105 mtu -= 8;
1106 }
1107 if (mtu < IPV6_MIN_MTU)
1108 mtu = IPV6_MIN_MTU;
1109 if (skb_dst(skb) && !t->parms.collect_md)
1110 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1111 if (skb->len > mtu && !skb_is_gso(skb)) {
1112 *pmtu = mtu;
1113 err = -EMSGSIZE;
1114 goto tx_err_dst_release;
1115 }
1116
1117 if (t->err_count > 0) {
1118 if (time_before(jiffies,
1119 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1120 t->err_count--;
1121
1122 dst_link_failure(skb);
1123 } else {
1124 t->err_count = 0;
1125 }
1126 }
1127
1128 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1129
1130 /*
1131 * Okay, now see if we can stuff it in the buffer as-is.
1132 */
1133 max_headroom += LL_RESERVED_SPACE(tdev);
1134
1135 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1136 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1137 struct sk_buff *new_skb;
1138
1139 new_skb = skb_realloc_headroom(skb, max_headroom);
1140 if (!new_skb)
1141 goto tx_err_dst_release;
1142
1143 if (skb->sk)
1144 skb_set_owner_w(new_skb, skb->sk);
1145 consume_skb(skb);
1146 skb = new_skb;
1147 }
1148
1149 if (t->parms.collect_md) {
1150 if (t->encap.type != TUNNEL_ENCAP_NONE)
1151 goto tx_err_dst_release;
1152 } else {
1153 if (!fl6->flowi6_mark && ndst)
1154 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1155 }
1156 skb_dst_set(skb, dst);
1157
1158 if (encap_limit >= 0) {
1159 init_tel_txopt(&opt, encap_limit);
1160 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
1161 }
1162
1163 /* Calculate max headroom for all the headers and adjust
1164 * needed_headroom if necessary.
1165 */
1166 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1167 + dst->header_len + t->hlen;
1168 if (max_headroom > dev->needed_headroom)
1169 dev->needed_headroom = max_headroom;
1170
1171 err = ip6_tnl_encap(skb, t, &proto, fl6);
1172 if (err)
1173 return err;
1174
1175 skb->protocol = htons(ETH_P_IPV6);
1176 skb_push(skb, sizeof(struct ipv6hdr));
1177 skb_reset_network_header(skb);
1178 ipv6h = ipv6_hdr(skb);
1179 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
1180 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1181 ipv6h->hop_limit = hop_limit;
1182 ipv6h->nexthdr = proto;
1183 ipv6h->saddr = fl6->saddr;
1184 ipv6h->daddr = fl6->daddr;
1185 ip6tunnel_xmit(NULL, skb, dev);
1186 return 0;
1187 tx_err_link_failure:
1188 stats->tx_carrier_errors++;
1189 dst_link_failure(skb);
1190 tx_err_dst_release:
1191 dst_release(dst);
1192 return err;
1193 }
1194 EXPORT_SYMBOL(ip6_tnl_xmit);
1195
1196 static inline int
1197 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1198 {
1199 struct ip6_tnl *t = netdev_priv(dev);
1200 const struct iphdr *iph = ip_hdr(skb);
1201 int encap_limit = -1;
1202 struct flowi6 fl6;
1203 __u8 dsfield;
1204 __u32 mtu;
1205 u8 tproto;
1206 int err;
1207
1208 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1209
1210 tproto = ACCESS_ONCE(t->parms.proto);
1211 if (tproto != IPPROTO_IPIP && tproto != 0)
1212 return -1;
1213
1214 dsfield = ipv4_get_dsfield(iph);
1215
1216 if (t->parms.collect_md) {
1217 struct ip_tunnel_info *tun_info;
1218 const struct ip_tunnel_key *key;
1219
1220 tun_info = skb_tunnel_info(skb);
1221 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1222 ip_tunnel_info_af(tun_info) != AF_INET6))
1223 return -1;
1224 key = &tun_info->key;
1225 memset(&fl6, 0, sizeof(fl6));
1226 fl6.flowi6_proto = IPPROTO_IPIP;
1227 fl6.daddr = key->u.ipv6.dst;
1228 fl6.flowlabel = key->label;
1229 } else {
1230 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1231 encap_limit = t->parms.encap_limit;
1232
1233 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1234 fl6.flowi6_proto = IPPROTO_IPIP;
1235
1236 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1237 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1238 & IPV6_TCLASS_MASK;
1239 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1240 fl6.flowi6_mark = skb->mark;
1241 }
1242
1243 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1244 return -1;
1245
1246 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1247
1248 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1249 IPPROTO_IPIP);
1250 if (err != 0) {
1251 /* XXX: send ICMP error even if DF is not set. */
1252 if (err == -EMSGSIZE)
1253 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1254 htonl(mtu));
1255 return -1;
1256 }
1257
1258 return 0;
1259 }
1260
1261 static inline int
1262 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1263 {
1264 struct ip6_tnl *t = netdev_priv(dev);
1265 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1266 int encap_limit = -1;
1267 __u16 offset;
1268 struct flowi6 fl6;
1269 __u8 dsfield;
1270 __u32 mtu;
1271 u8 tproto;
1272 int err;
1273
1274 tproto = ACCESS_ONCE(t->parms.proto);
1275 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1276 ip6_tnl_addr_conflict(t, ipv6h))
1277 return -1;
1278
1279 dsfield = ipv6_get_dsfield(ipv6h);
1280
1281 if (t->parms.collect_md) {
1282 struct ip_tunnel_info *tun_info;
1283 const struct ip_tunnel_key *key;
1284
1285 tun_info = skb_tunnel_info(skb);
1286 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1287 ip_tunnel_info_af(tun_info) != AF_INET6))
1288 return -1;
1289 key = &tun_info->key;
1290 memset(&fl6, 0, sizeof(fl6));
1291 fl6.flowi6_proto = IPPROTO_IPV6;
1292 fl6.daddr = key->u.ipv6.dst;
1293 fl6.flowlabel = key->label;
1294 } else {
1295 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1296 if (offset > 0) {
1297 struct ipv6_tlv_tnl_enc_lim *tel;
1298
1299 tel = (void *)&skb_network_header(skb)[offset];
1300 if (tel->encap_limit == 0) {
1301 icmpv6_send(skb, ICMPV6_PARAMPROB,
1302 ICMPV6_HDR_FIELD, offset + 2);
1303 return -1;
1304 }
1305 encap_limit = tel->encap_limit - 1;
1306 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1307 encap_limit = t->parms.encap_limit;
1308 }
1309
1310 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1311 fl6.flowi6_proto = IPPROTO_IPV6;
1312
1313 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1314 fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
1315 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1316 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1317 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1318 fl6.flowi6_mark = skb->mark;
1319 }
1320
1321 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1322 return -1;
1323
1324 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1325
1326 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1327 IPPROTO_IPV6);
1328 if (err != 0) {
1329 if (err == -EMSGSIZE)
1330 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1331 return -1;
1332 }
1333
1334 return 0;
1335 }
1336
1337 static netdev_tx_t
1338 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1339 {
1340 struct ip6_tnl *t = netdev_priv(dev);
1341 struct net_device_stats *stats = &t->dev->stats;
1342 int ret;
1343
1344 switch (skb->protocol) {
1345 case htons(ETH_P_IP):
1346 ret = ip4ip6_tnl_xmit(skb, dev);
1347 break;
1348 case htons(ETH_P_IPV6):
1349 ret = ip6ip6_tnl_xmit(skb, dev);
1350 break;
1351 default:
1352 goto tx_err;
1353 }
1354
1355 if (ret < 0)
1356 goto tx_err;
1357
1358 return NETDEV_TX_OK;
1359
1360 tx_err:
1361 stats->tx_errors++;
1362 stats->tx_dropped++;
1363 kfree_skb(skb);
1364 return NETDEV_TX_OK;
1365 }
1366
1367 static void ip6_tnl_link_config(struct ip6_tnl *t)
1368 {
1369 struct net_device *dev = t->dev;
1370 struct __ip6_tnl_parm *p = &t->parms;
1371 struct flowi6 *fl6 = &t->fl.u.ip6;
1372 int t_hlen;
1373
1374 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1375 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1376
1377 /* Set up flowi template */
1378 fl6->saddr = p->laddr;
1379 fl6->daddr = p->raddr;
1380 fl6->flowi6_oif = p->link;
1381 fl6->flowlabel = 0;
1382
1383 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1384 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1385 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1386 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1387
1388 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1389 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1390
1391 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1392 dev->flags |= IFF_POINTOPOINT;
1393 else
1394 dev->flags &= ~IFF_POINTOPOINT;
1395
1396 t->tun_hlen = 0;
1397 t->hlen = t->encap_hlen + t->tun_hlen;
1398 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1399
1400 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1401 int strict = (ipv6_addr_type(&p->raddr) &
1402 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1403
1404 struct rt6_info *rt = rt6_lookup(t->net,
1405 &p->raddr, &p->laddr,
1406 p->link, strict);
1407
1408 if (!rt)
1409 return;
1410
1411 if (rt->dst.dev) {
1412 dev->hard_header_len = rt->dst.dev->hard_header_len +
1413 t_hlen;
1414
1415 dev->mtu = rt->dst.dev->mtu - t_hlen;
1416 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1417 dev->mtu -= 8;
1418
1419 if (dev->mtu < IPV6_MIN_MTU)
1420 dev->mtu = IPV6_MIN_MTU;
1421 }
1422 ip6_rt_put(rt);
1423 }
1424 }
1425
1426 /**
1427 * ip6_tnl_change - update the tunnel parameters
1428 * @t: tunnel to be changed
1429 * @p: tunnel configuration parameters
1430 *
1431 * Description:
1432 * ip6_tnl_change() updates the tunnel parameters
1433 **/
1434
1435 static int
1436 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1437 {
1438 t->parms.laddr = p->laddr;
1439 t->parms.raddr = p->raddr;
1440 t->parms.flags = p->flags;
1441 t->parms.hop_limit = p->hop_limit;
1442 t->parms.encap_limit = p->encap_limit;
1443 t->parms.flowinfo = p->flowinfo;
1444 t->parms.link = p->link;
1445 t->parms.proto = p->proto;
1446 dst_cache_reset(&t->dst_cache);
1447 ip6_tnl_link_config(t);
1448 return 0;
1449 }
1450
1451 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1452 {
1453 struct net *net = t->net;
1454 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1455 int err;
1456
1457 ip6_tnl_unlink(ip6n, t);
1458 synchronize_net();
1459 err = ip6_tnl_change(t, p);
1460 ip6_tnl_link(ip6n, t);
1461 netdev_state_change(t->dev);
1462 return err;
1463 }
1464
1465 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1466 {
1467 /* for default tnl0 device allow to change only the proto */
1468 t->parms.proto = p->proto;
1469 netdev_state_change(t->dev);
1470 return 0;
1471 }
1472
1473 static void
1474 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1475 {
1476 p->laddr = u->laddr;
1477 p->raddr = u->raddr;
1478 p->flags = u->flags;
1479 p->hop_limit = u->hop_limit;
1480 p->encap_limit = u->encap_limit;
1481 p->flowinfo = u->flowinfo;
1482 p->link = u->link;
1483 p->proto = u->proto;
1484 memcpy(p->name, u->name, sizeof(u->name));
1485 }
1486
1487 static void
1488 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1489 {
1490 u->laddr = p->laddr;
1491 u->raddr = p->raddr;
1492 u->flags = p->flags;
1493 u->hop_limit = p->hop_limit;
1494 u->encap_limit = p->encap_limit;
1495 u->flowinfo = p->flowinfo;
1496 u->link = p->link;
1497 u->proto = p->proto;
1498 memcpy(u->name, p->name, sizeof(u->name));
1499 }
1500
1501 /**
1502 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1503 * @dev: virtual device associated with tunnel
1504 * @ifr: parameters passed from userspace
1505 * @cmd: command to be performed
1506 *
1507 * Description:
1508 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1509 * from userspace.
1510 *
1511 * The possible commands are the following:
1512 * %SIOCGETTUNNEL: get tunnel parameters for device
1513 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1514 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1515 * %SIOCDELTUNNEL: delete tunnel
1516 *
1517 * The fallback device "ip6tnl0", created during module
1518 * initialization, can be used for creating other tunnel devices.
1519 *
1520 * Return:
1521 * 0 on success,
1522 * %-EFAULT if unable to copy data to or from userspace,
1523 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1524 * %-EINVAL if passed tunnel parameters are invalid,
1525 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1526 * %-ENODEV if attempting to change or delete a nonexisting device
1527 **/
1528
1529 static int
1530 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1531 {
1532 int err = 0;
1533 struct ip6_tnl_parm p;
1534 struct __ip6_tnl_parm p1;
1535 struct ip6_tnl *t = netdev_priv(dev);
1536 struct net *net = t->net;
1537 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1538
1539 memset(&p1, 0, sizeof(p1));
1540
1541 switch (cmd) {
1542 case SIOCGETTUNNEL:
1543 if (dev == ip6n->fb_tnl_dev) {
1544 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1545 err = -EFAULT;
1546 break;
1547 }
1548 ip6_tnl_parm_from_user(&p1, &p);
1549 t = ip6_tnl_locate(net, &p1, 0);
1550 if (IS_ERR(t))
1551 t = netdev_priv(dev);
1552 } else {
1553 memset(&p, 0, sizeof(p));
1554 }
1555 ip6_tnl_parm_to_user(&p, &t->parms);
1556 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1557 err = -EFAULT;
1558 }
1559 break;
1560 case SIOCADDTUNNEL:
1561 case SIOCCHGTUNNEL:
1562 err = -EPERM;
1563 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1564 break;
1565 err = -EFAULT;
1566 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1567 break;
1568 err = -EINVAL;
1569 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1570 p.proto != 0)
1571 break;
1572 ip6_tnl_parm_from_user(&p1, &p);
1573 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1574 if (cmd == SIOCCHGTUNNEL) {
1575 if (!IS_ERR(t)) {
1576 if (t->dev != dev) {
1577 err = -EEXIST;
1578 break;
1579 }
1580 } else
1581 t = netdev_priv(dev);
1582 if (dev == ip6n->fb_tnl_dev)
1583 err = ip6_tnl0_update(t, &p1);
1584 else
1585 err = ip6_tnl_update(t, &p1);
1586 }
1587 if (!IS_ERR(t)) {
1588 err = 0;
1589 ip6_tnl_parm_to_user(&p, &t->parms);
1590 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1591 err = -EFAULT;
1592
1593 } else {
1594 err = PTR_ERR(t);
1595 }
1596 break;
1597 case SIOCDELTUNNEL:
1598 err = -EPERM;
1599 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1600 break;
1601
1602 if (dev == ip6n->fb_tnl_dev) {
1603 err = -EFAULT;
1604 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1605 break;
1606 err = -ENOENT;
1607 ip6_tnl_parm_from_user(&p1, &p);
1608 t = ip6_tnl_locate(net, &p1, 0);
1609 if (IS_ERR(t))
1610 break;
1611 err = -EPERM;
1612 if (t->dev == ip6n->fb_tnl_dev)
1613 break;
1614 dev = t->dev;
1615 }
1616 err = 0;
1617 unregister_netdevice(dev);
1618 break;
1619 default:
1620 err = -EINVAL;
1621 }
1622 return err;
1623 }
1624
1625 /**
1626 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1627 * @dev: virtual device associated with tunnel
1628 * @new_mtu: the new mtu
1629 *
1630 * Return:
1631 * 0 on success,
1632 * %-EINVAL if mtu too small
1633 **/
1634
1635 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1636 {
1637 struct ip6_tnl *tnl = netdev_priv(dev);
1638
1639 if (tnl->parms.proto == IPPROTO_IPIP) {
1640 if (new_mtu < 68)
1641 return -EINVAL;
1642 } else {
1643 if (new_mtu < IPV6_MIN_MTU)
1644 return -EINVAL;
1645 }
1646 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1647 return -EINVAL;
1648 dev->mtu = new_mtu;
1649 return 0;
1650 }
1651 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1652
1653 int ip6_tnl_get_iflink(const struct net_device *dev)
1654 {
1655 struct ip6_tnl *t = netdev_priv(dev);
1656
1657 return t->parms.link;
1658 }
1659 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1660
1661 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1662 unsigned int num)
1663 {
1664 if (num >= MAX_IPTUN_ENCAP_OPS)
1665 return -ERANGE;
1666
1667 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1668 &ip6tun_encaps[num],
1669 NULL, ops) ? 0 : -1;
1670 }
1671 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1672
1673 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1674 unsigned int num)
1675 {
1676 int ret;
1677
1678 if (num >= MAX_IPTUN_ENCAP_OPS)
1679 return -ERANGE;
1680
1681 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1682 &ip6tun_encaps[num],
1683 ops, NULL) == ops) ? 0 : -1;
1684
1685 synchronize_net();
1686
1687 return ret;
1688 }
1689 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1690
1691 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1692 struct ip_tunnel_encap *ipencap)
1693 {
1694 int hlen;
1695
1696 memset(&t->encap, 0, sizeof(t->encap));
1697
1698 hlen = ip6_encap_hlen(ipencap);
1699 if (hlen < 0)
1700 return hlen;
1701
1702 t->encap.type = ipencap->type;
1703 t->encap.sport = ipencap->sport;
1704 t->encap.dport = ipencap->dport;
1705 t->encap.flags = ipencap->flags;
1706
1707 t->encap_hlen = hlen;
1708 t->hlen = t->encap_hlen + t->tun_hlen;
1709
1710 return 0;
1711 }
1712 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1713
1714 static const struct net_device_ops ip6_tnl_netdev_ops = {
1715 .ndo_init = ip6_tnl_dev_init,
1716 .ndo_uninit = ip6_tnl_dev_uninit,
1717 .ndo_start_xmit = ip6_tnl_start_xmit,
1718 .ndo_do_ioctl = ip6_tnl_ioctl,
1719 .ndo_change_mtu = ip6_tnl_change_mtu,
1720 .ndo_get_stats = ip6_get_stats,
1721 .ndo_get_iflink = ip6_tnl_get_iflink,
1722 };
1723
1724 #define IPXIPX_FEATURES (NETIF_F_SG | \
1725 NETIF_F_FRAGLIST | \
1726 NETIF_F_HIGHDMA | \
1727 NETIF_F_GSO_SOFTWARE | \
1728 NETIF_F_HW_CSUM)
1729
1730 /**
1731 * ip6_tnl_dev_setup - setup virtual tunnel device
1732 * @dev: virtual device associated with tunnel
1733 *
1734 * Description:
1735 * Initialize function pointers and device parameters
1736 **/
1737
1738 static void ip6_tnl_dev_setup(struct net_device *dev)
1739 {
1740 dev->netdev_ops = &ip6_tnl_netdev_ops;
1741 dev->destructor = ip6_dev_free;
1742
1743 dev->type = ARPHRD_TUNNEL6;
1744 dev->flags |= IFF_NOARP;
1745 dev->addr_len = sizeof(struct in6_addr);
1746 dev->features |= NETIF_F_LLTX;
1747 netif_keep_dst(dev);
1748
1749 dev->features |= IPXIPX_FEATURES;
1750 dev->hw_features |= IPXIPX_FEATURES;
1751
1752 /* This perm addr will be used as interface identifier by IPv6 */
1753 dev->addr_assign_type = NET_ADDR_RANDOM;
1754 eth_random_addr(dev->perm_addr);
1755 }
1756
1757
1758 /**
1759 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1760 * @dev: virtual device associated with tunnel
1761 **/
1762
1763 static inline int
1764 ip6_tnl_dev_init_gen(struct net_device *dev)
1765 {
1766 struct ip6_tnl *t = netdev_priv(dev);
1767 int ret;
1768 int t_hlen;
1769
1770 t->dev = dev;
1771 t->net = dev_net(dev);
1772 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1773 if (!dev->tstats)
1774 return -ENOMEM;
1775
1776 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1777 if (ret)
1778 goto free_stats;
1779
1780 ret = gro_cells_init(&t->gro_cells, dev);
1781 if (ret)
1782 goto destroy_dst;
1783
1784 t->tun_hlen = 0;
1785 t->hlen = t->encap_hlen + t->tun_hlen;
1786 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1787
1788 dev->type = ARPHRD_TUNNEL6;
1789 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1790 dev->mtu = ETH_DATA_LEN - t_hlen;
1791 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1792 dev->mtu -= 8;
1793
1794 return 0;
1795
1796 destroy_dst:
1797 dst_cache_destroy(&t->dst_cache);
1798 free_stats:
1799 free_percpu(dev->tstats);
1800 dev->tstats = NULL;
1801
1802 return ret;
1803 }
1804
1805 /**
1806 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1807 * @dev: virtual device associated with tunnel
1808 **/
1809
1810 static int ip6_tnl_dev_init(struct net_device *dev)
1811 {
1812 struct ip6_tnl *t = netdev_priv(dev);
1813 int err = ip6_tnl_dev_init_gen(dev);
1814
1815 if (err)
1816 return err;
1817 ip6_tnl_link_config(t);
1818 if (t->parms.collect_md) {
1819 dev->features |= NETIF_F_NETNS_LOCAL;
1820 netif_keep_dst(dev);
1821 }
1822 return 0;
1823 }
1824
1825 /**
1826 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1827 * @dev: fallback device
1828 *
1829 * Return: 0
1830 **/
1831
1832 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1833 {
1834 struct ip6_tnl *t = netdev_priv(dev);
1835 struct net *net = dev_net(dev);
1836 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1837
1838 t->parms.proto = IPPROTO_IPV6;
1839 dev_hold(dev);
1840
1841 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1842 return 0;
1843 }
1844
1845 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1846 {
1847 u8 proto;
1848
1849 if (!data || !data[IFLA_IPTUN_PROTO])
1850 return 0;
1851
1852 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1853 if (proto != IPPROTO_IPV6 &&
1854 proto != IPPROTO_IPIP &&
1855 proto != 0)
1856 return -EINVAL;
1857
1858 return 0;
1859 }
1860
1861 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1862 struct __ip6_tnl_parm *parms)
1863 {
1864 memset(parms, 0, sizeof(*parms));
1865
1866 if (!data)
1867 return;
1868
1869 if (data[IFLA_IPTUN_LINK])
1870 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1871
1872 if (data[IFLA_IPTUN_LOCAL])
1873 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1874
1875 if (data[IFLA_IPTUN_REMOTE])
1876 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1877
1878 if (data[IFLA_IPTUN_TTL])
1879 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1880
1881 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1882 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1883
1884 if (data[IFLA_IPTUN_FLOWINFO])
1885 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1886
1887 if (data[IFLA_IPTUN_FLAGS])
1888 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1889
1890 if (data[IFLA_IPTUN_PROTO])
1891 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1892
1893 if (data[IFLA_IPTUN_COLLECT_METADATA])
1894 parms->collect_md = true;
1895 }
1896
1897 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1898 struct ip_tunnel_encap *ipencap)
1899 {
1900 bool ret = false;
1901
1902 memset(ipencap, 0, sizeof(*ipencap));
1903
1904 if (!data)
1905 return ret;
1906
1907 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1908 ret = true;
1909 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1910 }
1911
1912 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1913 ret = true;
1914 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1915 }
1916
1917 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1918 ret = true;
1919 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1920 }
1921
1922 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1923 ret = true;
1924 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1925 }
1926
1927 return ret;
1928 }
1929
1930 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1931 struct nlattr *tb[], struct nlattr *data[])
1932 {
1933 struct net *net = dev_net(dev);
1934 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1935 struct ip6_tnl *nt, *t;
1936 struct ip_tunnel_encap ipencap;
1937
1938 nt = netdev_priv(dev);
1939
1940 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1941 int err = ip6_tnl_encap_setup(nt, &ipencap);
1942
1943 if (err < 0)
1944 return err;
1945 }
1946
1947 ip6_tnl_netlink_parms(data, &nt->parms);
1948
1949 if (nt->parms.collect_md) {
1950 if (rtnl_dereference(ip6n->collect_md_tun))
1951 return -EEXIST;
1952 } else {
1953 t = ip6_tnl_locate(net, &nt->parms, 0);
1954 if (!IS_ERR(t))
1955 return -EEXIST;
1956 }
1957
1958 return ip6_tnl_create2(dev);
1959 }
1960
1961 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1962 struct nlattr *data[])
1963 {
1964 struct ip6_tnl *t = netdev_priv(dev);
1965 struct __ip6_tnl_parm p;
1966 struct net *net = t->net;
1967 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1968 struct ip_tunnel_encap ipencap;
1969
1970 if (dev == ip6n->fb_tnl_dev)
1971 return -EINVAL;
1972
1973 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1974 int err = ip6_tnl_encap_setup(t, &ipencap);
1975
1976 if (err < 0)
1977 return err;
1978 }
1979 ip6_tnl_netlink_parms(data, &p);
1980 if (p.collect_md)
1981 return -EINVAL;
1982
1983 t = ip6_tnl_locate(net, &p, 0);
1984 if (!IS_ERR(t)) {
1985 if (t->dev != dev)
1986 return -EEXIST;
1987 } else
1988 t = netdev_priv(dev);
1989
1990 return ip6_tnl_update(t, &p);
1991 }
1992
1993 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
1994 {
1995 struct net *net = dev_net(dev);
1996 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1997
1998 if (dev != ip6n->fb_tnl_dev)
1999 unregister_netdevice_queue(dev, head);
2000 }
2001
2002 static size_t ip6_tnl_get_size(const struct net_device *dev)
2003 {
2004 return
2005 /* IFLA_IPTUN_LINK */
2006 nla_total_size(4) +
2007 /* IFLA_IPTUN_LOCAL */
2008 nla_total_size(sizeof(struct in6_addr)) +
2009 /* IFLA_IPTUN_REMOTE */
2010 nla_total_size(sizeof(struct in6_addr)) +
2011 /* IFLA_IPTUN_TTL */
2012 nla_total_size(1) +
2013 /* IFLA_IPTUN_ENCAP_LIMIT */
2014 nla_total_size(1) +
2015 /* IFLA_IPTUN_FLOWINFO */
2016 nla_total_size(4) +
2017 /* IFLA_IPTUN_FLAGS */
2018 nla_total_size(4) +
2019 /* IFLA_IPTUN_PROTO */
2020 nla_total_size(1) +
2021 /* IFLA_IPTUN_ENCAP_TYPE */
2022 nla_total_size(2) +
2023 /* IFLA_IPTUN_ENCAP_FLAGS */
2024 nla_total_size(2) +
2025 /* IFLA_IPTUN_ENCAP_SPORT */
2026 nla_total_size(2) +
2027 /* IFLA_IPTUN_ENCAP_DPORT */
2028 nla_total_size(2) +
2029 /* IFLA_IPTUN_COLLECT_METADATA */
2030 nla_total_size(0) +
2031 0;
2032 }
2033
2034 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2035 {
2036 struct ip6_tnl *tunnel = netdev_priv(dev);
2037 struct __ip6_tnl_parm *parm = &tunnel->parms;
2038
2039 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2040 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2041 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2042 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2043 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2044 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2045 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2046 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
2047 goto nla_put_failure;
2048
2049 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2050 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2051 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2052 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2053 goto nla_put_failure;
2054
2055 if (parm->collect_md)
2056 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2057 goto nla_put_failure;
2058 return 0;
2059
2060 nla_put_failure:
2061 return -EMSGSIZE;
2062 }
2063
2064 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2065 {
2066 struct ip6_tnl *tunnel = netdev_priv(dev);
2067
2068 return tunnel->net;
2069 }
2070 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2071
2072 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2073 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2074 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2075 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2076 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2077 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2078 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2079 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2080 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2081 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2082 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2083 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2084 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2085 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2086 };
2087
2088 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2089 .kind = "ip6tnl",
2090 .maxtype = IFLA_IPTUN_MAX,
2091 .policy = ip6_tnl_policy,
2092 .priv_size = sizeof(struct ip6_tnl),
2093 .setup = ip6_tnl_dev_setup,
2094 .validate = ip6_tnl_validate,
2095 .newlink = ip6_tnl_newlink,
2096 .changelink = ip6_tnl_changelink,
2097 .dellink = ip6_tnl_dellink,
2098 .get_size = ip6_tnl_get_size,
2099 .fill_info = ip6_tnl_fill_info,
2100 .get_link_net = ip6_tnl_get_link_net,
2101 };
2102
2103 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2104 .handler = ip4ip6_rcv,
2105 .err_handler = ip4ip6_err,
2106 .priority = 1,
2107 };
2108
2109 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2110 .handler = ip6ip6_rcv,
2111 .err_handler = ip6ip6_err,
2112 .priority = 1,
2113 };
2114
2115 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
2116 {
2117 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2118 struct net_device *dev, *aux;
2119 int h;
2120 struct ip6_tnl *t;
2121 LIST_HEAD(list);
2122
2123 for_each_netdev_safe(net, dev, aux)
2124 if (dev->rtnl_link_ops == &ip6_link_ops)
2125 unregister_netdevice_queue(dev, &list);
2126
2127 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2128 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2129 while (t) {
2130 /* If dev is in the same netns, it has already
2131 * been added to the list by the previous loop.
2132 */
2133 if (!net_eq(dev_net(t->dev), net))
2134 unregister_netdevice_queue(t->dev, &list);
2135 t = rtnl_dereference(t->next);
2136 }
2137 }
2138
2139 unregister_netdevice_many(&list);
2140 }
2141
2142 static int __net_init ip6_tnl_init_net(struct net *net)
2143 {
2144 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2145 struct ip6_tnl *t = NULL;
2146 int err;
2147
2148 ip6n->tnls[0] = ip6n->tnls_wc;
2149 ip6n->tnls[1] = ip6n->tnls_r_l;
2150
2151 err = -ENOMEM;
2152 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2153 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2154
2155 if (!ip6n->fb_tnl_dev)
2156 goto err_alloc_dev;
2157 dev_net_set(ip6n->fb_tnl_dev, net);
2158 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2159 /* FB netdevice is special: we have one, and only one per netns.
2160 * Allowing to move it to another netns is clearly unsafe.
2161 */
2162 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2163
2164 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2165 if (err < 0)
2166 goto err_register;
2167
2168 err = register_netdev(ip6n->fb_tnl_dev);
2169 if (err < 0)
2170 goto err_register;
2171
2172 t = netdev_priv(ip6n->fb_tnl_dev);
2173
2174 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2175 return 0;
2176
2177 err_register:
2178 ip6_dev_free(ip6n->fb_tnl_dev);
2179 err_alloc_dev:
2180 return err;
2181 }
2182
2183 static void __net_exit ip6_tnl_exit_net(struct net *net)
2184 {
2185 rtnl_lock();
2186 ip6_tnl_destroy_tunnels(net);
2187 rtnl_unlock();
2188 }
2189
2190 static struct pernet_operations ip6_tnl_net_ops = {
2191 .init = ip6_tnl_init_net,
2192 .exit = ip6_tnl_exit_net,
2193 .id = &ip6_tnl_net_id,
2194 .size = sizeof(struct ip6_tnl_net),
2195 };
2196
2197 /**
2198 * ip6_tunnel_init - register protocol and reserve needed resources
2199 *
2200 * Return: 0 on success
2201 **/
2202
2203 static int __init ip6_tunnel_init(void)
2204 {
2205 int err;
2206
2207 err = register_pernet_device(&ip6_tnl_net_ops);
2208 if (err < 0)
2209 goto out_pernet;
2210
2211 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2212 if (err < 0) {
2213 pr_err("%s: can't register ip4ip6\n", __func__);
2214 goto out_ip4ip6;
2215 }
2216
2217 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2218 if (err < 0) {
2219 pr_err("%s: can't register ip6ip6\n", __func__);
2220 goto out_ip6ip6;
2221 }
2222 err = rtnl_link_register(&ip6_link_ops);
2223 if (err < 0)
2224 goto rtnl_link_failed;
2225
2226 return 0;
2227
2228 rtnl_link_failed:
2229 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2230 out_ip6ip6:
2231 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2232 out_ip4ip6:
2233 unregister_pernet_device(&ip6_tnl_net_ops);
2234 out_pernet:
2235 return err;
2236 }
2237
2238 /**
2239 * ip6_tunnel_cleanup - free resources and unregister protocol
2240 **/
2241
2242 static void __exit ip6_tunnel_cleanup(void)
2243 {
2244 rtnl_link_unregister(&ip6_link_ops);
2245 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2246 pr_info("%s: can't deregister ip4ip6\n", __func__);
2247
2248 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2249 pr_info("%s: can't deregister ip6ip6\n", __func__);
2250
2251 unregister_pernet_device(&ip6_tnl_net_ops);
2252 }
2253
2254 module_init(ip6_tunnel_init);
2255 module_exit(ip6_tunnel_cleanup);