]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/ip6_tunnel.c
Merge branches 'iommu/fixes', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'arm/mediatek...
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / ip6_tunnel.c
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 *
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11 *
12 * RFC 2473
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
47
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78
79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85
86 static unsigned int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 /* the IPv6 tunnel fallback device */
89 struct net_device *fb_tnl_dev;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 struct ip6_tnl __rcu *tnls_wc[1];
93 struct ip6_tnl __rcu **tnls[2];
94 struct ip6_tnl __rcu *collect_md_tun;
95 };
96
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 struct pcpu_sw_netstats tmp, sum = { 0 };
100 int i;
101
102 for_each_possible_cpu(i) {
103 unsigned int start;
104 const struct pcpu_sw_netstats *tstats =
105 per_cpu_ptr(dev->tstats, i);
106
107 do {
108 start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 tmp.rx_packets = tstats->rx_packets;
110 tmp.rx_bytes = tstats->rx_bytes;
111 tmp.tx_packets = tstats->tx_packets;
112 tmp.tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114
115 sum.rx_packets += tmp.rx_packets;
116 sum.rx_bytes += tmp.rx_bytes;
117 sum.tx_packets += tmp.tx_packets;
118 sum.tx_bytes += tmp.tx_bytes;
119 }
120 dev->stats.rx_packets = sum.rx_packets;
121 dev->stats.rx_bytes = sum.rx_bytes;
122 dev->stats.tx_packets = sum.tx_packets;
123 dev->stats.tx_bytes = sum.tx_bytes;
124 return &dev->stats;
125 }
126
127 /**
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
131 *
132 * Return:
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
135 * else %NULL
136 **/
137
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 unsigned int hash = HASH(remote, local);
145 struct ip6_tnl *t;
146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 struct in6_addr any;
148
149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 ipv6_addr_equal(remote, &t->parms.raddr) &&
152 (t->dev->flags & IFF_UP))
153 return t;
154 }
155
156 memset(&any, 0, sizeof(any));
157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
161 (t->dev->flags & IFF_UP))
162 return t;
163 }
164
165 hash = HASH(remote, &any);
166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
169 (t->dev->flags & IFF_UP))
170 return t;
171 }
172
173 t = rcu_dereference(ip6n->collect_md_tun);
174 if (t)
175 return t;
176
177 t = rcu_dereference(ip6n->tnls_wc[0]);
178 if (t && (t->dev->flags & IFF_UP))
179 return t;
180
181 return NULL;
182 }
183
184 /**
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
187 *
188 * Description:
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
191 *
192 * Return: head of IPv6 tunnel list
193 **/
194
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
197 {
198 const struct in6_addr *remote = &p->raddr;
199 const struct in6_addr *local = &p->laddr;
200 unsigned int h = 0;
201 int prio = 0;
202
203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
204 prio = 1;
205 h = HASH(remote, local);
206 }
207 return &ip6n->tnls[prio][h];
208 }
209
210 /**
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
213 **/
214
215 static void
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
217 {
218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
219
220 if (t->parms.collect_md)
221 rcu_assign_pointer(ip6n->collect_md_tun, t);
222 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 rcu_assign_pointer(*tp, t);
224 }
225
226 /**
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
229 **/
230
231 static void
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
233 {
234 struct ip6_tnl __rcu **tp;
235 struct ip6_tnl *iter;
236
237 if (t->parms.collect_md)
238 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
239
240 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 (iter = rtnl_dereference(*tp)) != NULL;
242 tp = &iter->next) {
243 if (t == iter) {
244 rcu_assign_pointer(*tp, t->next);
245 break;
246 }
247 }
248 }
249
250 static void ip6_dev_free(struct net_device *dev)
251 {
252 struct ip6_tnl *t = netdev_priv(dev);
253
254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats);
257 free_netdev(dev);
258 }
259
260 static int ip6_tnl_create2(struct net_device *dev)
261 {
262 struct ip6_tnl *t = netdev_priv(dev);
263 struct net *net = dev_net(dev);
264 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
265 int err;
266
267 t = netdev_priv(dev);
268
269 dev->rtnl_link_ops = &ip6_link_ops;
270 err = register_netdevice(dev);
271 if (err < 0)
272 goto out;
273
274 strcpy(t->parms.name, dev->name);
275
276 dev_hold(dev);
277 ip6_tnl_link(ip6n, t);
278 return 0;
279
280 out:
281 return err;
282 }
283
284 /**
285 * ip6_tnl_create - create a new tunnel
286 * @p: tunnel parameters
287 * @pt: pointer to new tunnel
288 *
289 * Description:
290 * Create tunnel matching given parameters.
291 *
292 * Return:
293 * created tunnel or error pointer
294 **/
295
296 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
297 {
298 struct net_device *dev;
299 struct ip6_tnl *t;
300 char name[IFNAMSIZ];
301 int err = -ENOMEM;
302
303 if (p->name[0])
304 strlcpy(name, p->name, IFNAMSIZ);
305 else
306 sprintf(name, "ip6tnl%%d");
307
308 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
309 ip6_tnl_dev_setup);
310 if (!dev)
311 goto failed;
312
313 dev_net_set(dev, net);
314
315 t = netdev_priv(dev);
316 t->parms = *p;
317 t->net = dev_net(dev);
318 err = ip6_tnl_create2(dev);
319 if (err < 0)
320 goto failed_free;
321
322 return t;
323
324 failed_free:
325 ip6_dev_free(dev);
326 failed:
327 return ERR_PTR(err);
328 }
329
330 /**
331 * ip6_tnl_locate - find or create tunnel matching given parameters
332 * @p: tunnel parameters
333 * @create: != 0 if allowed to create new tunnel if no match found
334 *
335 * Description:
336 * ip6_tnl_locate() first tries to locate an existing tunnel
337 * based on @parms. If this is unsuccessful, but @create is set a new
338 * tunnel device is created and registered for use.
339 *
340 * Return:
341 * matching tunnel or error pointer
342 **/
343
344 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
345 struct __ip6_tnl_parm *p, int create)
346 {
347 const struct in6_addr *remote = &p->raddr;
348 const struct in6_addr *local = &p->laddr;
349 struct ip6_tnl __rcu **tp;
350 struct ip6_tnl *t;
351 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
352
353 for (tp = ip6_tnl_bucket(ip6n, p);
354 (t = rtnl_dereference(*tp)) != NULL;
355 tp = &t->next) {
356 if (ipv6_addr_equal(local, &t->parms.laddr) &&
357 ipv6_addr_equal(remote, &t->parms.raddr)) {
358 if (create)
359 return ERR_PTR(-EEXIST);
360
361 return t;
362 }
363 }
364 if (!create)
365 return ERR_PTR(-ENODEV);
366 return ip6_tnl_create(net, p);
367 }
368
369 /**
370 * ip6_tnl_dev_uninit - tunnel device uninitializer
371 * @dev: the device to be destroyed
372 *
373 * Description:
374 * ip6_tnl_dev_uninit() removes tunnel from its list
375 **/
376
377 static void
378 ip6_tnl_dev_uninit(struct net_device *dev)
379 {
380 struct ip6_tnl *t = netdev_priv(dev);
381 struct net *net = t->net;
382 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
383
384 if (dev == ip6n->fb_tnl_dev)
385 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
386 else
387 ip6_tnl_unlink(ip6n, t);
388 dst_cache_reset(&t->dst_cache);
389 dev_put(dev);
390 }
391
392 /**
393 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
394 * @skb: received socket buffer
395 *
396 * Return:
397 * 0 if none was found,
398 * else index to encapsulation limit
399 **/
400
401 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
402 {
403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
404 unsigned int nhoff = raw - skb->data;
405 unsigned int off = nhoff + sizeof(*ipv6h);
406 u8 next, nexthdr = ipv6h->nexthdr;
407
408 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
409 struct ipv6_opt_hdr *hdr;
410 u16 optlen;
411
412 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
413 break;
414
415 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
416 if (nexthdr == NEXTHDR_FRAGMENT) {
417 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
418 if (frag_hdr->frag_off)
419 break;
420 optlen = 8;
421 } else if (nexthdr == NEXTHDR_AUTH) {
422 optlen = (hdr->hdrlen + 2) << 2;
423 } else {
424 optlen = ipv6_optlen(hdr);
425 }
426 /* cache hdr->nexthdr, since pskb_may_pull() might
427 * invalidate hdr
428 */
429 next = hdr->nexthdr;
430 if (nexthdr == NEXTHDR_DEST) {
431 u16 i = 2;
432
433 /* Remember : hdr is no longer valid at this point. */
434 if (!pskb_may_pull(skb, off + optlen))
435 break;
436
437 while (1) {
438 struct ipv6_tlv_tnl_enc_lim *tel;
439
440 /* No more room for encapsulation limit */
441 if (i + sizeof(*tel) > optlen)
442 break;
443
444 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
445 /* return index of option if found and valid */
446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
447 tel->length == 1)
448 return i + off - nhoff;
449 /* else jump to next option */
450 if (tel->type)
451 i += tel->length + 2;
452 else
453 i++;
454 }
455 }
456 nexthdr = next;
457 off += optlen;
458 }
459 return 0;
460 }
461 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
462
463 /**
464 * ip6_tnl_err - tunnel error handler
465 *
466 * Description:
467 * ip6_tnl_err() should handle errors in the tunnel according
468 * to the specifications in RFC 2473.
469 **/
470
471 static int
472 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
473 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
474 {
475 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
476 struct ip6_tnl *t;
477 int rel_msg = 0;
478 u8 rel_type = ICMPV6_DEST_UNREACH;
479 u8 rel_code = ICMPV6_ADDR_UNREACH;
480 u8 tproto;
481 __u32 rel_info = 0;
482 __u16 len;
483 int err = -ENOENT;
484
485 /* If the packet doesn't contain the original IPv6 header we are
486 in trouble since we might need the source address for further
487 processing of the error. */
488
489 rcu_read_lock();
490 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
491 if (!t)
492 goto out;
493
494 tproto = ACCESS_ONCE(t->parms.proto);
495 if (tproto != ipproto && tproto != 0)
496 goto out;
497
498 err = 0;
499
500 switch (*type) {
501 __u32 teli;
502 struct ipv6_tlv_tnl_enc_lim *tel;
503 __u32 mtu;
504 case ICMPV6_DEST_UNREACH:
505 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
506 t->parms.name);
507 rel_msg = 1;
508 break;
509 case ICMPV6_TIME_EXCEED:
510 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
511 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
512 t->parms.name);
513 rel_msg = 1;
514 }
515 break;
516 case ICMPV6_PARAMPROB:
517 teli = 0;
518 if ((*code) == ICMPV6_HDR_FIELD)
519 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
520
521 if (teli && teli == *info - 2) {
522 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
523 if (tel->encap_limit == 0) {
524 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
525 t->parms.name);
526 rel_msg = 1;
527 }
528 } else {
529 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
530 t->parms.name);
531 }
532 break;
533 case ICMPV6_PKT_TOOBIG:
534 mtu = *info - offset;
535 if (mtu < IPV6_MIN_MTU)
536 mtu = IPV6_MIN_MTU;
537 t->dev->mtu = mtu;
538
539 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
540 if (len > mtu) {
541 rel_type = ICMPV6_PKT_TOOBIG;
542 rel_code = 0;
543 rel_info = mtu;
544 rel_msg = 1;
545 }
546 break;
547 }
548
549 *type = rel_type;
550 *code = rel_code;
551 *info = rel_info;
552 *msg = rel_msg;
553
554 out:
555 rcu_read_unlock();
556 return err;
557 }
558
559 static int
560 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
561 u8 type, u8 code, int offset, __be32 info)
562 {
563 int rel_msg = 0;
564 u8 rel_type = type;
565 u8 rel_code = code;
566 __u32 rel_info = ntohl(info);
567 int err;
568 struct sk_buff *skb2;
569 const struct iphdr *eiph;
570 struct rtable *rt;
571 struct flowi4 fl4;
572
573 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
574 &rel_msg, &rel_info, offset);
575 if (err < 0)
576 return err;
577
578 if (rel_msg == 0)
579 return 0;
580
581 switch (rel_type) {
582 case ICMPV6_DEST_UNREACH:
583 if (rel_code != ICMPV6_ADDR_UNREACH)
584 return 0;
585 rel_type = ICMP_DEST_UNREACH;
586 rel_code = ICMP_HOST_UNREACH;
587 break;
588 case ICMPV6_PKT_TOOBIG:
589 if (rel_code != 0)
590 return 0;
591 rel_type = ICMP_DEST_UNREACH;
592 rel_code = ICMP_FRAG_NEEDED;
593 break;
594 case NDISC_REDIRECT:
595 rel_type = ICMP_REDIRECT;
596 rel_code = ICMP_REDIR_HOST;
597 default:
598 return 0;
599 }
600
601 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
602 return 0;
603
604 skb2 = skb_clone(skb, GFP_ATOMIC);
605 if (!skb2)
606 return 0;
607
608 skb_dst_drop(skb2);
609
610 skb_pull(skb2, offset);
611 skb_reset_network_header(skb2);
612 eiph = ip_hdr(skb2);
613
614 /* Try to guess incoming interface */
615 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
616 eiph->saddr, 0,
617 0, 0,
618 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
619 if (IS_ERR(rt))
620 goto out;
621
622 skb2->dev = rt->dst.dev;
623
624 /* route "incoming" packet */
625 if (rt->rt_flags & RTCF_LOCAL) {
626 ip_rt_put(rt);
627 rt = NULL;
628 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
629 eiph->daddr, eiph->saddr,
630 0, 0,
631 IPPROTO_IPIP,
632 RT_TOS(eiph->tos), 0);
633 if (IS_ERR(rt) ||
634 rt->dst.dev->type != ARPHRD_TUNNEL) {
635 if (!IS_ERR(rt))
636 ip_rt_put(rt);
637 goto out;
638 }
639 skb_dst_set(skb2, &rt->dst);
640 } else {
641 ip_rt_put(rt);
642 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
643 skb2->dev) ||
644 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
645 goto out;
646 }
647
648 /* change mtu on this route */
649 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
650 if (rel_info > dst_mtu(skb_dst(skb2)))
651 goto out;
652
653 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
654 }
655 if (rel_type == ICMP_REDIRECT)
656 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
657
658 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
659
660 out:
661 kfree_skb(skb2);
662 return 0;
663 }
664
665 static int
666 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
667 u8 type, u8 code, int offset, __be32 info)
668 {
669 int rel_msg = 0;
670 u8 rel_type = type;
671 u8 rel_code = code;
672 __u32 rel_info = ntohl(info);
673 int err;
674
675 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
676 &rel_msg, &rel_info, offset);
677 if (err < 0)
678 return err;
679
680 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
681 struct rt6_info *rt;
682 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
683
684 if (!skb2)
685 return 0;
686
687 skb_dst_drop(skb2);
688 skb_pull(skb2, offset);
689 skb_reset_network_header(skb2);
690
691 /* Try to guess incoming interface */
692 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
693 NULL, 0, 0);
694
695 if (rt && rt->dst.dev)
696 skb2->dev = rt->dst.dev;
697
698 icmpv6_send(skb2, rel_type, rel_code, rel_info);
699
700 ip6_rt_put(rt);
701
702 kfree_skb(skb2);
703 }
704
705 return 0;
706 }
707
708 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
709 const struct ipv6hdr *ipv6h,
710 struct sk_buff *skb)
711 {
712 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
713
714 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
715 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
716
717 return IP6_ECN_decapsulate(ipv6h, skb);
718 }
719
720 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
721 const struct ipv6hdr *ipv6h,
722 struct sk_buff *skb)
723 {
724 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
725 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
726
727 return IP6_ECN_decapsulate(ipv6h, skb);
728 }
729
730 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
731 const struct in6_addr *laddr,
732 const struct in6_addr *raddr)
733 {
734 struct __ip6_tnl_parm *p = &t->parms;
735 int ltype = ipv6_addr_type(laddr);
736 int rtype = ipv6_addr_type(raddr);
737 __u32 flags = 0;
738
739 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
740 flags = IP6_TNL_F_CAP_PER_PACKET;
741 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
742 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
743 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
744 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
745 if (ltype&IPV6_ADDR_UNICAST)
746 flags |= IP6_TNL_F_CAP_XMIT;
747 if (rtype&IPV6_ADDR_UNICAST)
748 flags |= IP6_TNL_F_CAP_RCV;
749 }
750 return flags;
751 }
752 EXPORT_SYMBOL(ip6_tnl_get_cap);
753
754 /* called with rcu_read_lock() */
755 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
756 const struct in6_addr *laddr,
757 const struct in6_addr *raddr)
758 {
759 struct __ip6_tnl_parm *p = &t->parms;
760 int ret = 0;
761 struct net *net = t->net;
762
763 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
764 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
765 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
766 struct net_device *ldev = NULL;
767
768 if (p->link)
769 ldev = dev_get_by_index_rcu(net, p->link);
770
771 if ((ipv6_addr_is_multicast(laddr) ||
772 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
773 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
774 ret = 1;
775 }
776 return ret;
777 }
778 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
779
780 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
781 const struct tnl_ptk_info *tpi,
782 struct metadata_dst *tun_dst,
783 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
784 const struct ipv6hdr *ipv6h,
785 struct sk_buff *skb),
786 bool log_ecn_err)
787 {
788 struct pcpu_sw_netstats *tstats;
789 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
790 int err;
791
792 if ((!(tpi->flags & TUNNEL_CSUM) &&
793 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
794 ((tpi->flags & TUNNEL_CSUM) &&
795 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
796 tunnel->dev->stats.rx_crc_errors++;
797 tunnel->dev->stats.rx_errors++;
798 goto drop;
799 }
800
801 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
802 if (!(tpi->flags & TUNNEL_SEQ) ||
803 (tunnel->i_seqno &&
804 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
805 tunnel->dev->stats.rx_fifo_errors++;
806 tunnel->dev->stats.rx_errors++;
807 goto drop;
808 }
809 tunnel->i_seqno = ntohl(tpi->seq) + 1;
810 }
811
812 skb->protocol = tpi->proto;
813
814 /* Warning: All skb pointers will be invalidated! */
815 if (tunnel->dev->type == ARPHRD_ETHER) {
816 if (!pskb_may_pull(skb, ETH_HLEN)) {
817 tunnel->dev->stats.rx_length_errors++;
818 tunnel->dev->stats.rx_errors++;
819 goto drop;
820 }
821
822 ipv6h = ipv6_hdr(skb);
823 skb->protocol = eth_type_trans(skb, tunnel->dev);
824 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
825 } else {
826 skb->dev = tunnel->dev;
827 }
828
829 skb_reset_network_header(skb);
830 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
831
832 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
833
834 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
835 if (unlikely(err)) {
836 if (log_ecn_err)
837 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
838 &ipv6h->saddr,
839 ipv6_get_dsfield(ipv6h));
840 if (err > 1) {
841 ++tunnel->dev->stats.rx_frame_errors;
842 ++tunnel->dev->stats.rx_errors;
843 goto drop;
844 }
845 }
846
847 tstats = this_cpu_ptr(tunnel->dev->tstats);
848 u64_stats_update_begin(&tstats->syncp);
849 tstats->rx_packets++;
850 tstats->rx_bytes += skb->len;
851 u64_stats_update_end(&tstats->syncp);
852
853 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
854
855 if (tun_dst)
856 skb_dst_set(skb, (struct dst_entry *)tun_dst);
857
858 gro_cells_receive(&tunnel->gro_cells, skb);
859 return 0;
860
861 drop:
862 kfree_skb(skb);
863 return 0;
864 }
865
866 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
867 const struct tnl_ptk_info *tpi,
868 struct metadata_dst *tun_dst,
869 bool log_ecn_err)
870 {
871 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
872 log_ecn_err);
873 }
874 EXPORT_SYMBOL(ip6_tnl_rcv);
875
876 static const struct tnl_ptk_info tpi_v6 = {
877 /* no tunnel info required for ipxip6. */
878 .proto = htons(ETH_P_IPV6),
879 };
880
881 static const struct tnl_ptk_info tpi_v4 = {
882 /* no tunnel info required for ipxip6. */
883 .proto = htons(ETH_P_IP),
884 };
885
886 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
887 const struct tnl_ptk_info *tpi,
888 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
889 const struct ipv6hdr *ipv6h,
890 struct sk_buff *skb))
891 {
892 struct ip6_tnl *t;
893 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
894 struct metadata_dst *tun_dst = NULL;
895 int ret = -1;
896
897 rcu_read_lock();
898 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
899
900 if (t) {
901 u8 tproto = ACCESS_ONCE(t->parms.proto);
902
903 if (tproto != ipproto && tproto != 0)
904 goto drop;
905 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
906 goto drop;
907 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
908 goto drop;
909 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
910 goto drop;
911 if (t->parms.collect_md) {
912 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
913 if (!tun_dst)
914 return 0;
915 }
916 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
917 log_ecn_error);
918 }
919
920 rcu_read_unlock();
921
922 return ret;
923
924 drop:
925 rcu_read_unlock();
926 kfree_skb(skb);
927 return 0;
928 }
929
930 static int ip4ip6_rcv(struct sk_buff *skb)
931 {
932 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
933 ip4ip6_dscp_ecn_decapsulate);
934 }
935
936 static int ip6ip6_rcv(struct sk_buff *skb)
937 {
938 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
939 ip6ip6_dscp_ecn_decapsulate);
940 }
941
942 struct ipv6_tel_txoption {
943 struct ipv6_txoptions ops;
944 __u8 dst_opt[8];
945 };
946
947 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
948 {
949 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
950
951 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
952 opt->dst_opt[3] = 1;
953 opt->dst_opt[4] = encap_limit;
954 opt->dst_opt[5] = IPV6_TLV_PADN;
955 opt->dst_opt[6] = 1;
956
957 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
958 opt->ops.opt_nflen = 8;
959 }
960
961 /**
962 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
963 * @t: the outgoing tunnel device
964 * @hdr: IPv6 header from the incoming packet
965 *
966 * Description:
967 * Avoid trivial tunneling loop by checking that tunnel exit-point
968 * doesn't match source of incoming packet.
969 *
970 * Return:
971 * 1 if conflict,
972 * 0 else
973 **/
974
975 static inline bool
976 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
977 {
978 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
979 }
980
981 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
982 const struct in6_addr *laddr,
983 const struct in6_addr *raddr)
984 {
985 struct __ip6_tnl_parm *p = &t->parms;
986 int ret = 0;
987 struct net *net = t->net;
988
989 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
990 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
991 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
992 struct net_device *ldev = NULL;
993
994 rcu_read_lock();
995 if (p->link)
996 ldev = dev_get_by_index_rcu(net, p->link);
997
998 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
999 pr_warn("%s xmit: Local address not yet configured!\n",
1000 p->name);
1001 else if (!ipv6_addr_is_multicast(raddr) &&
1002 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
1003 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1004 p->name);
1005 else
1006 ret = 1;
1007 rcu_read_unlock();
1008 }
1009 return ret;
1010 }
1011 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1012
1013 /**
1014 * ip6_tnl_xmit - encapsulate packet and send
1015 * @skb: the outgoing socket buffer
1016 * @dev: the outgoing tunnel device
1017 * @dsfield: dscp code for outer header
1018 * @fl6: flow of tunneled packet
1019 * @encap_limit: encapsulation limit
1020 * @pmtu: Path MTU is stored if packet is too big
1021 * @proto: next header value
1022 *
1023 * Description:
1024 * Build new header and do some sanity checks on the packet before sending
1025 * it.
1026 *
1027 * Return:
1028 * 0 on success
1029 * -1 fail
1030 * %-EMSGSIZE message too big. return mtu in this case.
1031 **/
1032
1033 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1034 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1035 __u8 proto)
1036 {
1037 struct ip6_tnl *t = netdev_priv(dev);
1038 struct net *net = t->net;
1039 struct net_device_stats *stats = &t->dev->stats;
1040 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1041 struct ipv6_tel_txoption opt;
1042 struct dst_entry *dst = NULL, *ndst = NULL;
1043 struct net_device *tdev;
1044 int mtu;
1045 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1046 unsigned int max_headroom = psh_hlen;
1047 bool use_cache = false;
1048 u8 hop_limit;
1049 int err = -1;
1050
1051 if (t->parms.collect_md) {
1052 hop_limit = skb_tunnel_info(skb)->key.ttl;
1053 goto route_lookup;
1054 } else {
1055 hop_limit = t->parms.hop_limit;
1056 }
1057
1058 /* NBMA tunnel */
1059 if (ipv6_addr_any(&t->parms.raddr)) {
1060 struct in6_addr *addr6;
1061 struct neighbour *neigh;
1062 int addr_type;
1063
1064 if (!skb_dst(skb))
1065 goto tx_err_link_failure;
1066
1067 neigh = dst_neigh_lookup(skb_dst(skb),
1068 &ipv6_hdr(skb)->daddr);
1069 if (!neigh)
1070 goto tx_err_link_failure;
1071
1072 addr6 = (struct in6_addr *)&neigh->primary_key;
1073 addr_type = ipv6_addr_type(addr6);
1074
1075 if (addr_type == IPV6_ADDR_ANY)
1076 addr6 = &ipv6_hdr(skb)->daddr;
1077
1078 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1079 neigh_release(neigh);
1080 } else if (!(t->parms.flags &
1081 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1082 /* enable the cache only only if the routing decision does
1083 * not depend on the current inner header value
1084 */
1085 use_cache = true;
1086 }
1087
1088 if (use_cache)
1089 dst = dst_cache_get(&t->dst_cache);
1090
1091 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1092 goto tx_err_link_failure;
1093
1094 if (!dst) {
1095 route_lookup:
1096 dst = ip6_route_output(net, NULL, fl6);
1097
1098 if (dst->error)
1099 goto tx_err_link_failure;
1100 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1101 if (IS_ERR(dst)) {
1102 err = PTR_ERR(dst);
1103 dst = NULL;
1104 goto tx_err_link_failure;
1105 }
1106 if (t->parms.collect_md &&
1107 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1108 &fl6->daddr, 0, &fl6->saddr))
1109 goto tx_err_link_failure;
1110 ndst = dst;
1111 }
1112
1113 tdev = dst->dev;
1114
1115 if (tdev == dev) {
1116 stats->collisions++;
1117 net_warn_ratelimited("%s: Local routing loop detected!\n",
1118 t->parms.name);
1119 goto tx_err_dst_release;
1120 }
1121 mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
1122 if (encap_limit >= 0) {
1123 max_headroom += 8;
1124 mtu -= 8;
1125 }
1126 if (mtu < IPV6_MIN_MTU)
1127 mtu = IPV6_MIN_MTU;
1128 if (skb_dst(skb) && !t->parms.collect_md)
1129 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1130 if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
1131 *pmtu = mtu;
1132 err = -EMSGSIZE;
1133 goto tx_err_dst_release;
1134 }
1135
1136 if (t->err_count > 0) {
1137 if (time_before(jiffies,
1138 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1139 t->err_count--;
1140
1141 dst_link_failure(skb);
1142 } else {
1143 t->err_count = 0;
1144 }
1145 }
1146
1147 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1148
1149 /*
1150 * Okay, now see if we can stuff it in the buffer as-is.
1151 */
1152 max_headroom += LL_RESERVED_SPACE(tdev);
1153
1154 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1155 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1156 struct sk_buff *new_skb;
1157
1158 new_skb = skb_realloc_headroom(skb, max_headroom);
1159 if (!new_skb)
1160 goto tx_err_dst_release;
1161
1162 if (skb->sk)
1163 skb_set_owner_w(new_skb, skb->sk);
1164 consume_skb(skb);
1165 skb = new_skb;
1166 }
1167
1168 if (t->parms.collect_md) {
1169 if (t->encap.type != TUNNEL_ENCAP_NONE)
1170 goto tx_err_dst_release;
1171 } else {
1172 if (use_cache && ndst)
1173 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1174 }
1175 skb_dst_set(skb, dst);
1176
1177 if (encap_limit >= 0) {
1178 init_tel_txopt(&opt, encap_limit);
1179 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL, NULL);
1180 }
1181
1182 /* Calculate max headroom for all the headers and adjust
1183 * needed_headroom if necessary.
1184 */
1185 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1186 + dst->header_len + t->hlen;
1187 if (max_headroom > dev->needed_headroom)
1188 dev->needed_headroom = max_headroom;
1189
1190 err = ip6_tnl_encap(skb, t, &proto, fl6);
1191 if (err)
1192 return err;
1193
1194 skb_push(skb, sizeof(struct ipv6hdr));
1195 skb_reset_network_header(skb);
1196 ipv6h = ipv6_hdr(skb);
1197 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
1198 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1199 ipv6h->hop_limit = hop_limit;
1200 ipv6h->nexthdr = proto;
1201 ipv6h->saddr = fl6->saddr;
1202 ipv6h->daddr = fl6->daddr;
1203 ip6tunnel_xmit(NULL, skb, dev);
1204 return 0;
1205 tx_err_link_failure:
1206 stats->tx_carrier_errors++;
1207 dst_link_failure(skb);
1208 tx_err_dst_release:
1209 dst_release(dst);
1210 return err;
1211 }
1212 EXPORT_SYMBOL(ip6_tnl_xmit);
1213
1214 static inline int
1215 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1216 {
1217 struct ip6_tnl *t = netdev_priv(dev);
1218 const struct iphdr *iph = ip_hdr(skb);
1219 int encap_limit = -1;
1220 struct flowi6 fl6;
1221 __u8 dsfield;
1222 __u32 mtu;
1223 u8 tproto;
1224 int err;
1225
1226 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1227
1228 tproto = ACCESS_ONCE(t->parms.proto);
1229 if (tproto != IPPROTO_IPIP && tproto != 0)
1230 return -1;
1231
1232 dsfield = ipv4_get_dsfield(iph);
1233
1234 if (t->parms.collect_md) {
1235 struct ip_tunnel_info *tun_info;
1236 const struct ip_tunnel_key *key;
1237
1238 tun_info = skb_tunnel_info(skb);
1239 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1240 ip_tunnel_info_af(tun_info) != AF_INET6))
1241 return -1;
1242 key = &tun_info->key;
1243 memset(&fl6, 0, sizeof(fl6));
1244 fl6.flowi6_proto = IPPROTO_IPIP;
1245 fl6.daddr = key->u.ipv6.dst;
1246 fl6.flowlabel = key->label;
1247 } else {
1248 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1249 encap_limit = t->parms.encap_limit;
1250
1251 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1252 fl6.flowi6_proto = IPPROTO_IPIP;
1253
1254 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1255 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1256 & IPV6_TCLASS_MASK;
1257 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1258 fl6.flowi6_mark = skb->mark;
1259 }
1260
1261 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1262
1263 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1264 return -1;
1265
1266 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1267
1268 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1269 IPPROTO_IPIP);
1270 if (err != 0) {
1271 /* XXX: send ICMP error even if DF is not set. */
1272 if (err == -EMSGSIZE)
1273 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1274 htonl(mtu));
1275 return -1;
1276 }
1277
1278 return 0;
1279 }
1280
1281 static inline int
1282 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1283 {
1284 struct ip6_tnl *t = netdev_priv(dev);
1285 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1286 int encap_limit = -1;
1287 __u16 offset;
1288 struct flowi6 fl6;
1289 __u8 dsfield;
1290 __u32 mtu;
1291 u8 tproto;
1292 int err;
1293
1294 tproto = ACCESS_ONCE(t->parms.proto);
1295 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1296 ip6_tnl_addr_conflict(t, ipv6h))
1297 return -1;
1298
1299 dsfield = ipv6_get_dsfield(ipv6h);
1300
1301 if (t->parms.collect_md) {
1302 struct ip_tunnel_info *tun_info;
1303 const struct ip_tunnel_key *key;
1304
1305 tun_info = skb_tunnel_info(skb);
1306 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1307 ip_tunnel_info_af(tun_info) != AF_INET6))
1308 return -1;
1309 key = &tun_info->key;
1310 memset(&fl6, 0, sizeof(fl6));
1311 fl6.flowi6_proto = IPPROTO_IPV6;
1312 fl6.daddr = key->u.ipv6.dst;
1313 fl6.flowlabel = key->label;
1314 } else {
1315 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1316 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1317 ipv6h = ipv6_hdr(skb);
1318 if (offset > 0) {
1319 struct ipv6_tlv_tnl_enc_lim *tel;
1320
1321 tel = (void *)&skb_network_header(skb)[offset];
1322 if (tel->encap_limit == 0) {
1323 icmpv6_send(skb, ICMPV6_PARAMPROB,
1324 ICMPV6_HDR_FIELD, offset + 2);
1325 return -1;
1326 }
1327 encap_limit = tel->encap_limit - 1;
1328 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1329 encap_limit = t->parms.encap_limit;
1330 }
1331
1332 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1333 fl6.flowi6_proto = IPPROTO_IPV6;
1334
1335 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1336 fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
1337 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1338 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1339 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1340 fl6.flowi6_mark = skb->mark;
1341 }
1342
1343 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1344
1345 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1346 return -1;
1347
1348 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1349
1350 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1351 IPPROTO_IPV6);
1352 if (err != 0) {
1353 if (err == -EMSGSIZE)
1354 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1355 return -1;
1356 }
1357
1358 return 0;
1359 }
1360
1361 static netdev_tx_t
1362 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1363 {
1364 struct ip6_tnl *t = netdev_priv(dev);
1365 struct net_device_stats *stats = &t->dev->stats;
1366 int ret;
1367
1368 switch (skb->protocol) {
1369 case htons(ETH_P_IP):
1370 ret = ip4ip6_tnl_xmit(skb, dev);
1371 break;
1372 case htons(ETH_P_IPV6):
1373 ret = ip6ip6_tnl_xmit(skb, dev);
1374 break;
1375 default:
1376 goto tx_err;
1377 }
1378
1379 if (ret < 0)
1380 goto tx_err;
1381
1382 return NETDEV_TX_OK;
1383
1384 tx_err:
1385 stats->tx_errors++;
1386 stats->tx_dropped++;
1387 kfree_skb(skb);
1388 return NETDEV_TX_OK;
1389 }
1390
1391 static void ip6_tnl_link_config(struct ip6_tnl *t)
1392 {
1393 struct net_device *dev = t->dev;
1394 struct __ip6_tnl_parm *p = &t->parms;
1395 struct flowi6 *fl6 = &t->fl.u.ip6;
1396 int t_hlen;
1397
1398 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1399 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1400
1401 /* Set up flowi template */
1402 fl6->saddr = p->laddr;
1403 fl6->daddr = p->raddr;
1404 fl6->flowi6_oif = p->link;
1405 fl6->flowlabel = 0;
1406
1407 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1408 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1409 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1410 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1411
1412 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1413 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1414
1415 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1416 dev->flags |= IFF_POINTOPOINT;
1417 else
1418 dev->flags &= ~IFF_POINTOPOINT;
1419
1420 t->tun_hlen = 0;
1421 t->hlen = t->encap_hlen + t->tun_hlen;
1422 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1423
1424 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1425 int strict = (ipv6_addr_type(&p->raddr) &
1426 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1427
1428 struct rt6_info *rt = rt6_lookup(t->net,
1429 &p->raddr, &p->laddr,
1430 p->link, strict);
1431
1432 if (!rt)
1433 return;
1434
1435 if (rt->dst.dev) {
1436 dev->hard_header_len = rt->dst.dev->hard_header_len +
1437 t_hlen;
1438
1439 dev->mtu = rt->dst.dev->mtu - t_hlen;
1440 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1441 dev->mtu -= 8;
1442
1443 if (dev->mtu < IPV6_MIN_MTU)
1444 dev->mtu = IPV6_MIN_MTU;
1445 }
1446 ip6_rt_put(rt);
1447 }
1448 }
1449
1450 /**
1451 * ip6_tnl_change - update the tunnel parameters
1452 * @t: tunnel to be changed
1453 * @p: tunnel configuration parameters
1454 *
1455 * Description:
1456 * ip6_tnl_change() updates the tunnel parameters
1457 **/
1458
1459 static int
1460 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1461 {
1462 t->parms.laddr = p->laddr;
1463 t->parms.raddr = p->raddr;
1464 t->parms.flags = p->flags;
1465 t->parms.hop_limit = p->hop_limit;
1466 t->parms.encap_limit = p->encap_limit;
1467 t->parms.flowinfo = p->flowinfo;
1468 t->parms.link = p->link;
1469 t->parms.proto = p->proto;
1470 dst_cache_reset(&t->dst_cache);
1471 ip6_tnl_link_config(t);
1472 return 0;
1473 }
1474
1475 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1476 {
1477 struct net *net = t->net;
1478 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1479 int err;
1480
1481 ip6_tnl_unlink(ip6n, t);
1482 synchronize_net();
1483 err = ip6_tnl_change(t, p);
1484 ip6_tnl_link(ip6n, t);
1485 netdev_state_change(t->dev);
1486 return err;
1487 }
1488
1489 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1490 {
1491 /* for default tnl0 device allow to change only the proto */
1492 t->parms.proto = p->proto;
1493 netdev_state_change(t->dev);
1494 return 0;
1495 }
1496
1497 static void
1498 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1499 {
1500 p->laddr = u->laddr;
1501 p->raddr = u->raddr;
1502 p->flags = u->flags;
1503 p->hop_limit = u->hop_limit;
1504 p->encap_limit = u->encap_limit;
1505 p->flowinfo = u->flowinfo;
1506 p->link = u->link;
1507 p->proto = u->proto;
1508 memcpy(p->name, u->name, sizeof(u->name));
1509 }
1510
1511 static void
1512 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1513 {
1514 u->laddr = p->laddr;
1515 u->raddr = p->raddr;
1516 u->flags = p->flags;
1517 u->hop_limit = p->hop_limit;
1518 u->encap_limit = p->encap_limit;
1519 u->flowinfo = p->flowinfo;
1520 u->link = p->link;
1521 u->proto = p->proto;
1522 memcpy(u->name, p->name, sizeof(u->name));
1523 }
1524
1525 /**
1526 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1527 * @dev: virtual device associated with tunnel
1528 * @ifr: parameters passed from userspace
1529 * @cmd: command to be performed
1530 *
1531 * Description:
1532 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1533 * from userspace.
1534 *
1535 * The possible commands are the following:
1536 * %SIOCGETTUNNEL: get tunnel parameters for device
1537 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1538 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1539 * %SIOCDELTUNNEL: delete tunnel
1540 *
1541 * The fallback device "ip6tnl0", created during module
1542 * initialization, can be used for creating other tunnel devices.
1543 *
1544 * Return:
1545 * 0 on success,
1546 * %-EFAULT if unable to copy data to or from userspace,
1547 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1548 * %-EINVAL if passed tunnel parameters are invalid,
1549 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1550 * %-ENODEV if attempting to change or delete a nonexisting device
1551 **/
1552
1553 static int
1554 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1555 {
1556 int err = 0;
1557 struct ip6_tnl_parm p;
1558 struct __ip6_tnl_parm p1;
1559 struct ip6_tnl *t = netdev_priv(dev);
1560 struct net *net = t->net;
1561 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1562
1563 memset(&p1, 0, sizeof(p1));
1564
1565 switch (cmd) {
1566 case SIOCGETTUNNEL:
1567 if (dev == ip6n->fb_tnl_dev) {
1568 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1569 err = -EFAULT;
1570 break;
1571 }
1572 ip6_tnl_parm_from_user(&p1, &p);
1573 t = ip6_tnl_locate(net, &p1, 0);
1574 if (IS_ERR(t))
1575 t = netdev_priv(dev);
1576 } else {
1577 memset(&p, 0, sizeof(p));
1578 }
1579 ip6_tnl_parm_to_user(&p, &t->parms);
1580 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1581 err = -EFAULT;
1582 }
1583 break;
1584 case SIOCADDTUNNEL:
1585 case SIOCCHGTUNNEL:
1586 err = -EPERM;
1587 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1588 break;
1589 err = -EFAULT;
1590 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1591 break;
1592 err = -EINVAL;
1593 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1594 p.proto != 0)
1595 break;
1596 ip6_tnl_parm_from_user(&p1, &p);
1597 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1598 if (cmd == SIOCCHGTUNNEL) {
1599 if (!IS_ERR(t)) {
1600 if (t->dev != dev) {
1601 err = -EEXIST;
1602 break;
1603 }
1604 } else
1605 t = netdev_priv(dev);
1606 if (dev == ip6n->fb_tnl_dev)
1607 err = ip6_tnl0_update(t, &p1);
1608 else
1609 err = ip6_tnl_update(t, &p1);
1610 }
1611 if (!IS_ERR(t)) {
1612 err = 0;
1613 ip6_tnl_parm_to_user(&p, &t->parms);
1614 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1615 err = -EFAULT;
1616
1617 } else {
1618 err = PTR_ERR(t);
1619 }
1620 break;
1621 case SIOCDELTUNNEL:
1622 err = -EPERM;
1623 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1624 break;
1625
1626 if (dev == ip6n->fb_tnl_dev) {
1627 err = -EFAULT;
1628 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1629 break;
1630 err = -ENOENT;
1631 ip6_tnl_parm_from_user(&p1, &p);
1632 t = ip6_tnl_locate(net, &p1, 0);
1633 if (IS_ERR(t))
1634 break;
1635 err = -EPERM;
1636 if (t->dev == ip6n->fb_tnl_dev)
1637 break;
1638 dev = t->dev;
1639 }
1640 err = 0;
1641 unregister_netdevice(dev);
1642 break;
1643 default:
1644 err = -EINVAL;
1645 }
1646 return err;
1647 }
1648
1649 /**
1650 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1651 * @dev: virtual device associated with tunnel
1652 * @new_mtu: the new mtu
1653 *
1654 * Return:
1655 * 0 on success,
1656 * %-EINVAL if mtu too small
1657 **/
1658
1659 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1660 {
1661 struct ip6_tnl *tnl = netdev_priv(dev);
1662
1663 if (tnl->parms.proto == IPPROTO_IPIP) {
1664 if (new_mtu < ETH_MIN_MTU)
1665 return -EINVAL;
1666 } else {
1667 if (new_mtu < IPV6_MIN_MTU)
1668 return -EINVAL;
1669 }
1670 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1671 return -EINVAL;
1672 dev->mtu = new_mtu;
1673 return 0;
1674 }
1675 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1676
1677 int ip6_tnl_get_iflink(const struct net_device *dev)
1678 {
1679 struct ip6_tnl *t = netdev_priv(dev);
1680
1681 return t->parms.link;
1682 }
1683 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1684
1685 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1686 unsigned int num)
1687 {
1688 if (num >= MAX_IPTUN_ENCAP_OPS)
1689 return -ERANGE;
1690
1691 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1692 &ip6tun_encaps[num],
1693 NULL, ops) ? 0 : -1;
1694 }
1695 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1696
1697 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1698 unsigned int num)
1699 {
1700 int ret;
1701
1702 if (num >= MAX_IPTUN_ENCAP_OPS)
1703 return -ERANGE;
1704
1705 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1706 &ip6tun_encaps[num],
1707 ops, NULL) == ops) ? 0 : -1;
1708
1709 synchronize_net();
1710
1711 return ret;
1712 }
1713 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1714
1715 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1716 struct ip_tunnel_encap *ipencap)
1717 {
1718 int hlen;
1719
1720 memset(&t->encap, 0, sizeof(t->encap));
1721
1722 hlen = ip6_encap_hlen(ipencap);
1723 if (hlen < 0)
1724 return hlen;
1725
1726 t->encap.type = ipencap->type;
1727 t->encap.sport = ipencap->sport;
1728 t->encap.dport = ipencap->dport;
1729 t->encap.flags = ipencap->flags;
1730
1731 t->encap_hlen = hlen;
1732 t->hlen = t->encap_hlen + t->tun_hlen;
1733
1734 return 0;
1735 }
1736 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1737
1738 static const struct net_device_ops ip6_tnl_netdev_ops = {
1739 .ndo_init = ip6_tnl_dev_init,
1740 .ndo_uninit = ip6_tnl_dev_uninit,
1741 .ndo_start_xmit = ip6_tnl_start_xmit,
1742 .ndo_do_ioctl = ip6_tnl_ioctl,
1743 .ndo_change_mtu = ip6_tnl_change_mtu,
1744 .ndo_get_stats = ip6_get_stats,
1745 .ndo_get_iflink = ip6_tnl_get_iflink,
1746 };
1747
1748 #define IPXIPX_FEATURES (NETIF_F_SG | \
1749 NETIF_F_FRAGLIST | \
1750 NETIF_F_HIGHDMA | \
1751 NETIF_F_GSO_SOFTWARE | \
1752 NETIF_F_HW_CSUM)
1753
1754 /**
1755 * ip6_tnl_dev_setup - setup virtual tunnel device
1756 * @dev: virtual device associated with tunnel
1757 *
1758 * Description:
1759 * Initialize function pointers and device parameters
1760 **/
1761
1762 static void ip6_tnl_dev_setup(struct net_device *dev)
1763 {
1764 dev->netdev_ops = &ip6_tnl_netdev_ops;
1765 dev->destructor = ip6_dev_free;
1766
1767 dev->type = ARPHRD_TUNNEL6;
1768 dev->flags |= IFF_NOARP;
1769 dev->addr_len = sizeof(struct in6_addr);
1770 dev->features |= NETIF_F_LLTX;
1771 netif_keep_dst(dev);
1772
1773 dev->features |= IPXIPX_FEATURES;
1774 dev->hw_features |= IPXIPX_FEATURES;
1775
1776 /* This perm addr will be used as interface identifier by IPv6 */
1777 dev->addr_assign_type = NET_ADDR_RANDOM;
1778 eth_random_addr(dev->perm_addr);
1779 }
1780
1781
1782 /**
1783 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1784 * @dev: virtual device associated with tunnel
1785 **/
1786
1787 static inline int
1788 ip6_tnl_dev_init_gen(struct net_device *dev)
1789 {
1790 struct ip6_tnl *t = netdev_priv(dev);
1791 int ret;
1792 int t_hlen;
1793
1794 t->dev = dev;
1795 t->net = dev_net(dev);
1796 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1797 if (!dev->tstats)
1798 return -ENOMEM;
1799
1800 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1801 if (ret)
1802 goto free_stats;
1803
1804 ret = gro_cells_init(&t->gro_cells, dev);
1805 if (ret)
1806 goto destroy_dst;
1807
1808 t->tun_hlen = 0;
1809 t->hlen = t->encap_hlen + t->tun_hlen;
1810 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1811
1812 dev->type = ARPHRD_TUNNEL6;
1813 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1814 dev->mtu = ETH_DATA_LEN - t_hlen;
1815 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1816 dev->mtu -= 8;
1817 dev->min_mtu = ETH_MIN_MTU;
1818 dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1819
1820 return 0;
1821
1822 destroy_dst:
1823 dst_cache_destroy(&t->dst_cache);
1824 free_stats:
1825 free_percpu(dev->tstats);
1826 dev->tstats = NULL;
1827
1828 return ret;
1829 }
1830
1831 /**
1832 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1833 * @dev: virtual device associated with tunnel
1834 **/
1835
1836 static int ip6_tnl_dev_init(struct net_device *dev)
1837 {
1838 struct ip6_tnl *t = netdev_priv(dev);
1839 int err = ip6_tnl_dev_init_gen(dev);
1840
1841 if (err)
1842 return err;
1843 ip6_tnl_link_config(t);
1844 if (t->parms.collect_md) {
1845 dev->features |= NETIF_F_NETNS_LOCAL;
1846 netif_keep_dst(dev);
1847 }
1848 return 0;
1849 }
1850
1851 /**
1852 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1853 * @dev: fallback device
1854 *
1855 * Return: 0
1856 **/
1857
1858 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1859 {
1860 struct ip6_tnl *t = netdev_priv(dev);
1861 struct net *net = dev_net(dev);
1862 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1863
1864 t->parms.proto = IPPROTO_IPV6;
1865 dev_hold(dev);
1866
1867 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1868 return 0;
1869 }
1870
1871 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1872 {
1873 u8 proto;
1874
1875 if (!data || !data[IFLA_IPTUN_PROTO])
1876 return 0;
1877
1878 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1879 if (proto != IPPROTO_IPV6 &&
1880 proto != IPPROTO_IPIP &&
1881 proto != 0)
1882 return -EINVAL;
1883
1884 return 0;
1885 }
1886
1887 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1888 struct __ip6_tnl_parm *parms)
1889 {
1890 memset(parms, 0, sizeof(*parms));
1891
1892 if (!data)
1893 return;
1894
1895 if (data[IFLA_IPTUN_LINK])
1896 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1897
1898 if (data[IFLA_IPTUN_LOCAL])
1899 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1900
1901 if (data[IFLA_IPTUN_REMOTE])
1902 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1903
1904 if (data[IFLA_IPTUN_TTL])
1905 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1906
1907 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1908 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1909
1910 if (data[IFLA_IPTUN_FLOWINFO])
1911 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1912
1913 if (data[IFLA_IPTUN_FLAGS])
1914 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1915
1916 if (data[IFLA_IPTUN_PROTO])
1917 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1918
1919 if (data[IFLA_IPTUN_COLLECT_METADATA])
1920 parms->collect_md = true;
1921 }
1922
1923 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1924 struct ip_tunnel_encap *ipencap)
1925 {
1926 bool ret = false;
1927
1928 memset(ipencap, 0, sizeof(*ipencap));
1929
1930 if (!data)
1931 return ret;
1932
1933 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1934 ret = true;
1935 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1936 }
1937
1938 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1939 ret = true;
1940 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1941 }
1942
1943 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1944 ret = true;
1945 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1946 }
1947
1948 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1949 ret = true;
1950 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1951 }
1952
1953 return ret;
1954 }
1955
1956 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1957 struct nlattr *tb[], struct nlattr *data[])
1958 {
1959 struct net *net = dev_net(dev);
1960 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1961 struct ip6_tnl *nt, *t;
1962 struct ip_tunnel_encap ipencap;
1963
1964 nt = netdev_priv(dev);
1965
1966 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1967 int err = ip6_tnl_encap_setup(nt, &ipencap);
1968
1969 if (err < 0)
1970 return err;
1971 }
1972
1973 ip6_tnl_netlink_parms(data, &nt->parms);
1974
1975 if (nt->parms.collect_md) {
1976 if (rtnl_dereference(ip6n->collect_md_tun))
1977 return -EEXIST;
1978 } else {
1979 t = ip6_tnl_locate(net, &nt->parms, 0);
1980 if (!IS_ERR(t))
1981 return -EEXIST;
1982 }
1983
1984 return ip6_tnl_create2(dev);
1985 }
1986
1987 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1988 struct nlattr *data[])
1989 {
1990 struct ip6_tnl *t = netdev_priv(dev);
1991 struct __ip6_tnl_parm p;
1992 struct net *net = t->net;
1993 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1994 struct ip_tunnel_encap ipencap;
1995
1996 if (dev == ip6n->fb_tnl_dev)
1997 return -EINVAL;
1998
1999 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2000 int err = ip6_tnl_encap_setup(t, &ipencap);
2001
2002 if (err < 0)
2003 return err;
2004 }
2005 ip6_tnl_netlink_parms(data, &p);
2006 if (p.collect_md)
2007 return -EINVAL;
2008
2009 t = ip6_tnl_locate(net, &p, 0);
2010 if (!IS_ERR(t)) {
2011 if (t->dev != dev)
2012 return -EEXIST;
2013 } else
2014 t = netdev_priv(dev);
2015
2016 return ip6_tnl_update(t, &p);
2017 }
2018
2019 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2020 {
2021 struct net *net = dev_net(dev);
2022 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2023
2024 if (dev != ip6n->fb_tnl_dev)
2025 unregister_netdevice_queue(dev, head);
2026 }
2027
2028 static size_t ip6_tnl_get_size(const struct net_device *dev)
2029 {
2030 return
2031 /* IFLA_IPTUN_LINK */
2032 nla_total_size(4) +
2033 /* IFLA_IPTUN_LOCAL */
2034 nla_total_size(sizeof(struct in6_addr)) +
2035 /* IFLA_IPTUN_REMOTE */
2036 nla_total_size(sizeof(struct in6_addr)) +
2037 /* IFLA_IPTUN_TTL */
2038 nla_total_size(1) +
2039 /* IFLA_IPTUN_ENCAP_LIMIT */
2040 nla_total_size(1) +
2041 /* IFLA_IPTUN_FLOWINFO */
2042 nla_total_size(4) +
2043 /* IFLA_IPTUN_FLAGS */
2044 nla_total_size(4) +
2045 /* IFLA_IPTUN_PROTO */
2046 nla_total_size(1) +
2047 /* IFLA_IPTUN_ENCAP_TYPE */
2048 nla_total_size(2) +
2049 /* IFLA_IPTUN_ENCAP_FLAGS */
2050 nla_total_size(2) +
2051 /* IFLA_IPTUN_ENCAP_SPORT */
2052 nla_total_size(2) +
2053 /* IFLA_IPTUN_ENCAP_DPORT */
2054 nla_total_size(2) +
2055 /* IFLA_IPTUN_COLLECT_METADATA */
2056 nla_total_size(0) +
2057 0;
2058 }
2059
2060 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2061 {
2062 struct ip6_tnl *tunnel = netdev_priv(dev);
2063 struct __ip6_tnl_parm *parm = &tunnel->parms;
2064
2065 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2066 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2067 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2068 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2069 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2070 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2071 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2072 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
2073 goto nla_put_failure;
2074
2075 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2076 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2077 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2078 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2079 goto nla_put_failure;
2080
2081 if (parm->collect_md)
2082 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2083 goto nla_put_failure;
2084 return 0;
2085
2086 nla_put_failure:
2087 return -EMSGSIZE;
2088 }
2089
2090 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2091 {
2092 struct ip6_tnl *tunnel = netdev_priv(dev);
2093
2094 return tunnel->net;
2095 }
2096 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2097
2098 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2099 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2100 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2101 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2102 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2103 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2104 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2105 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2106 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2107 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2108 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2109 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2110 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2111 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2112 };
2113
2114 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2115 .kind = "ip6tnl",
2116 .maxtype = IFLA_IPTUN_MAX,
2117 .policy = ip6_tnl_policy,
2118 .priv_size = sizeof(struct ip6_tnl),
2119 .setup = ip6_tnl_dev_setup,
2120 .validate = ip6_tnl_validate,
2121 .newlink = ip6_tnl_newlink,
2122 .changelink = ip6_tnl_changelink,
2123 .dellink = ip6_tnl_dellink,
2124 .get_size = ip6_tnl_get_size,
2125 .fill_info = ip6_tnl_fill_info,
2126 .get_link_net = ip6_tnl_get_link_net,
2127 };
2128
2129 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2130 .handler = ip4ip6_rcv,
2131 .err_handler = ip4ip6_err,
2132 .priority = 1,
2133 };
2134
2135 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2136 .handler = ip6ip6_rcv,
2137 .err_handler = ip6ip6_err,
2138 .priority = 1,
2139 };
2140
2141 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
2142 {
2143 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2144 struct net_device *dev, *aux;
2145 int h;
2146 struct ip6_tnl *t;
2147 LIST_HEAD(list);
2148
2149 for_each_netdev_safe(net, dev, aux)
2150 if (dev->rtnl_link_ops == &ip6_link_ops)
2151 unregister_netdevice_queue(dev, &list);
2152
2153 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2154 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2155 while (t) {
2156 /* If dev is in the same netns, it has already
2157 * been added to the list by the previous loop.
2158 */
2159 if (!net_eq(dev_net(t->dev), net))
2160 unregister_netdevice_queue(t->dev, &list);
2161 t = rtnl_dereference(t->next);
2162 }
2163 }
2164
2165 unregister_netdevice_many(&list);
2166 }
2167
2168 static int __net_init ip6_tnl_init_net(struct net *net)
2169 {
2170 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2171 struct ip6_tnl *t = NULL;
2172 int err;
2173
2174 ip6n->tnls[0] = ip6n->tnls_wc;
2175 ip6n->tnls[1] = ip6n->tnls_r_l;
2176
2177 err = -ENOMEM;
2178 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2179 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2180
2181 if (!ip6n->fb_tnl_dev)
2182 goto err_alloc_dev;
2183 dev_net_set(ip6n->fb_tnl_dev, net);
2184 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2185 /* FB netdevice is special: we have one, and only one per netns.
2186 * Allowing to move it to another netns is clearly unsafe.
2187 */
2188 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2189
2190 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2191 if (err < 0)
2192 goto err_register;
2193
2194 err = register_netdev(ip6n->fb_tnl_dev);
2195 if (err < 0)
2196 goto err_register;
2197
2198 t = netdev_priv(ip6n->fb_tnl_dev);
2199
2200 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2201 return 0;
2202
2203 err_register:
2204 ip6_dev_free(ip6n->fb_tnl_dev);
2205 err_alloc_dev:
2206 return err;
2207 }
2208
2209 static void __net_exit ip6_tnl_exit_net(struct net *net)
2210 {
2211 rtnl_lock();
2212 ip6_tnl_destroy_tunnels(net);
2213 rtnl_unlock();
2214 }
2215
2216 static struct pernet_operations ip6_tnl_net_ops = {
2217 .init = ip6_tnl_init_net,
2218 .exit = ip6_tnl_exit_net,
2219 .id = &ip6_tnl_net_id,
2220 .size = sizeof(struct ip6_tnl_net),
2221 };
2222
2223 /**
2224 * ip6_tunnel_init - register protocol and reserve needed resources
2225 *
2226 * Return: 0 on success
2227 **/
2228
2229 static int __init ip6_tunnel_init(void)
2230 {
2231 int err;
2232
2233 err = register_pernet_device(&ip6_tnl_net_ops);
2234 if (err < 0)
2235 goto out_pernet;
2236
2237 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2238 if (err < 0) {
2239 pr_err("%s: can't register ip4ip6\n", __func__);
2240 goto out_ip4ip6;
2241 }
2242
2243 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2244 if (err < 0) {
2245 pr_err("%s: can't register ip6ip6\n", __func__);
2246 goto out_ip6ip6;
2247 }
2248 err = rtnl_link_register(&ip6_link_ops);
2249 if (err < 0)
2250 goto rtnl_link_failed;
2251
2252 return 0;
2253
2254 rtnl_link_failed:
2255 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2256 out_ip6ip6:
2257 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2258 out_ip4ip6:
2259 unregister_pernet_device(&ip6_tnl_net_ops);
2260 out_pernet:
2261 return err;
2262 }
2263
2264 /**
2265 * ip6_tunnel_cleanup - free resources and unregister protocol
2266 **/
2267
2268 static void __exit ip6_tunnel_cleanup(void)
2269 {
2270 rtnl_link_unregister(&ip6_link_ops);
2271 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2272 pr_info("%s: can't deregister ip4ip6\n", __func__);
2273
2274 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2275 pr_info("%s: can't deregister ip6ip6\n", __func__);
2276
2277 unregister_pernet_device(&ip6_tnl_net_ops);
2278 }
2279
2280 module_init(ip6_tunnel_init);
2281 module_exit(ip6_tunnel_cleanup);