2 * IPv6 tunneling device
3 * Linux INET6 implementation
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
45 #include <asm/uaccess.h>
46 #include <linux/atomic.h>
50 #include <net/ip_tunnels.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
71 static bool log_ecn_error
= true;
72 module_param(log_ecn_error
, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
75 static u32
HASH(const struct in6_addr
*addr1
, const struct in6_addr
*addr2
)
77 u32 hash
= ipv6_addr_hash(addr1
) ^ ipv6_addr_hash(addr2
);
79 return hash_32(hash
, IP6_TUNNEL_HASH_SIZE_SHIFT
);
82 static int ip6_tnl_dev_init(struct net_device
*dev
);
83 static void ip6_tnl_dev_setup(struct net_device
*dev
);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly
;
86 static int ip6_tnl_net_id __read_mostly
;
88 /* the IPv6 tunnel fallback device */
89 struct net_device
*fb_tnl_dev
;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu
*tnls_r_l
[IP6_TUNNEL_HASH_SIZE
];
92 struct ip6_tnl __rcu
*tnls_wc
[1];
93 struct ip6_tnl __rcu
**tnls
[2];
94 struct ip6_tnl __rcu
*collect_md_tun
;
97 static struct net_device_stats
*ip6_get_stats(struct net_device
*dev
)
99 struct pcpu_sw_netstats tmp
, sum
= { 0 };
102 for_each_possible_cpu(i
) {
104 const struct pcpu_sw_netstats
*tstats
=
105 per_cpu_ptr(dev
->tstats
, i
);
108 start
= u64_stats_fetch_begin_irq(&tstats
->syncp
);
109 tmp
.rx_packets
= tstats
->rx_packets
;
110 tmp
.rx_bytes
= tstats
->rx_bytes
;
111 tmp
.tx_packets
= tstats
->tx_packets
;
112 tmp
.tx_bytes
= tstats
->tx_bytes
;
113 } while (u64_stats_fetch_retry_irq(&tstats
->syncp
, start
));
115 sum
.rx_packets
+= tmp
.rx_packets
;
116 sum
.rx_bytes
+= tmp
.rx_bytes
;
117 sum
.tx_packets
+= tmp
.tx_packets
;
118 sum
.tx_bytes
+= tmp
.tx_bytes
;
120 dev
->stats
.rx_packets
= sum
.rx_packets
;
121 dev
->stats
.rx_bytes
= sum
.rx_bytes
;
122 dev
->stats
.tx_packets
= sum
.tx_packets
;
123 dev
->stats
.tx_bytes
= sum
.tx_bytes
;
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
141 static struct ip6_tnl
*
142 ip6_tnl_lookup(struct net
*net
, const struct in6_addr
*remote
, const struct in6_addr
*local
)
144 unsigned int hash
= HASH(remote
, local
);
146 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
149 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
150 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
151 ipv6_addr_equal(remote
, &t
->parms
.raddr
) &&
152 (t
->dev
->flags
& IFF_UP
))
156 memset(&any
, 0, sizeof(any
));
157 hash
= HASH(&any
, local
);
158 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
159 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
160 ipv6_addr_any(&t
->parms
.raddr
) &&
161 (t
->dev
->flags
& IFF_UP
))
165 hash
= HASH(remote
, &any
);
166 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
167 if (ipv6_addr_equal(remote
, &t
->parms
.raddr
) &&
168 ipv6_addr_any(&t
->parms
.laddr
) &&
169 (t
->dev
->flags
& IFF_UP
))
173 t
= rcu_dereference(ip6n
->collect_md_tun
);
177 t
= rcu_dereference(ip6n
->tnls_wc
[0]);
178 if (t
&& (t
->dev
->flags
& IFF_UP
))
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
192 * Return: head of IPv6 tunnel list
195 static struct ip6_tnl __rcu
**
196 ip6_tnl_bucket(struct ip6_tnl_net
*ip6n
, const struct __ip6_tnl_parm
*p
)
198 const struct in6_addr
*remote
= &p
->raddr
;
199 const struct in6_addr
*local
= &p
->laddr
;
203 if (!ipv6_addr_any(remote
) || !ipv6_addr_any(local
)) {
205 h
= HASH(remote
, local
);
207 return &ip6n
->tnls
[prio
][h
];
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
216 ip6_tnl_link(struct ip6_tnl_net
*ip6n
, struct ip6_tnl
*t
)
218 struct ip6_tnl __rcu
**tp
= ip6_tnl_bucket(ip6n
, &t
->parms
);
220 if (t
->parms
.collect_md
)
221 rcu_assign_pointer(ip6n
->collect_md_tun
, t
);
222 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
223 rcu_assign_pointer(*tp
, t
);
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
232 ip6_tnl_unlink(struct ip6_tnl_net
*ip6n
, struct ip6_tnl
*t
)
234 struct ip6_tnl __rcu
**tp
;
235 struct ip6_tnl
*iter
;
237 if (t
->parms
.collect_md
)
238 rcu_assign_pointer(ip6n
->collect_md_tun
, NULL
);
240 for (tp
= ip6_tnl_bucket(ip6n
, &t
->parms
);
241 (iter
= rtnl_dereference(*tp
)) != NULL
;
244 rcu_assign_pointer(*tp
, t
->next
);
250 static void ip6_dev_free(struct net_device
*dev
)
252 struct ip6_tnl
*t
= netdev_priv(dev
);
254 gro_cells_destroy(&t
->gro_cells
);
255 dst_cache_destroy(&t
->dst_cache
);
256 free_percpu(dev
->tstats
);
260 static int ip6_tnl_create2(struct net_device
*dev
)
262 struct ip6_tnl
*t
= netdev_priv(dev
);
263 struct net
*net
= dev_net(dev
);
264 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
267 t
= netdev_priv(dev
);
269 dev
->rtnl_link_ops
= &ip6_link_ops
;
270 err
= register_netdevice(dev
);
274 strcpy(t
->parms
.name
, dev
->name
);
277 ip6_tnl_link(ip6n
, t
);
285 * ip6_tnl_create - create a new tunnel
286 * @p: tunnel parameters
287 * @pt: pointer to new tunnel
290 * Create tunnel matching given parameters.
293 * created tunnel or error pointer
296 static struct ip6_tnl
*ip6_tnl_create(struct net
*net
, struct __ip6_tnl_parm
*p
)
298 struct net_device
*dev
;
304 strlcpy(name
, p
->name
, IFNAMSIZ
);
306 sprintf(name
, "ip6tnl%%d");
308 dev
= alloc_netdev(sizeof(*t
), name
, NET_NAME_UNKNOWN
,
313 dev_net_set(dev
, net
);
315 t
= netdev_priv(dev
);
317 t
->net
= dev_net(dev
);
318 err
= ip6_tnl_create2(dev
);
331 * ip6_tnl_locate - find or create tunnel matching given parameters
332 * @p: tunnel parameters
333 * @create: != 0 if allowed to create new tunnel if no match found
336 * ip6_tnl_locate() first tries to locate an existing tunnel
337 * based on @parms. If this is unsuccessful, but @create is set a new
338 * tunnel device is created and registered for use.
341 * matching tunnel or error pointer
344 static struct ip6_tnl
*ip6_tnl_locate(struct net
*net
,
345 struct __ip6_tnl_parm
*p
, int create
)
347 const struct in6_addr
*remote
= &p
->raddr
;
348 const struct in6_addr
*local
= &p
->laddr
;
349 struct ip6_tnl __rcu
**tp
;
351 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
353 for (tp
= ip6_tnl_bucket(ip6n
, p
);
354 (t
= rtnl_dereference(*tp
)) != NULL
;
356 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
357 ipv6_addr_equal(remote
, &t
->parms
.raddr
)) {
359 return ERR_PTR(-EEXIST
);
365 return ERR_PTR(-ENODEV
);
366 return ip6_tnl_create(net
, p
);
370 * ip6_tnl_dev_uninit - tunnel device uninitializer
371 * @dev: the device to be destroyed
374 * ip6_tnl_dev_uninit() removes tunnel from its list
378 ip6_tnl_dev_uninit(struct net_device
*dev
)
380 struct ip6_tnl
*t
= netdev_priv(dev
);
381 struct net
*net
= t
->net
;
382 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
384 if (dev
== ip6n
->fb_tnl_dev
)
385 RCU_INIT_POINTER(ip6n
->tnls_wc
[0], NULL
);
387 ip6_tnl_unlink(ip6n
, t
);
388 dst_cache_reset(&t
->dst_cache
);
393 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
394 * @skb: received socket buffer
397 * 0 if none was found,
398 * else index to encapsulation limit
401 __u16
ip6_tnl_parse_tlv_enc_lim(struct sk_buff
*skb
, __u8
*raw
)
403 const struct ipv6hdr
*ipv6h
= (const struct ipv6hdr
*) raw
;
404 __u8 nexthdr
= ipv6h
->nexthdr
;
405 __u16 off
= sizeof(*ipv6h
);
407 while (ipv6_ext_hdr(nexthdr
) && nexthdr
!= NEXTHDR_NONE
) {
409 struct ipv6_opt_hdr
*hdr
;
410 if (raw
+ off
+ sizeof(*hdr
) > skb
->data
&&
411 !pskb_may_pull(skb
, raw
- skb
->data
+ off
+ sizeof (*hdr
)))
414 hdr
= (struct ipv6_opt_hdr
*) (raw
+ off
);
415 if (nexthdr
== NEXTHDR_FRAGMENT
) {
416 struct frag_hdr
*frag_hdr
= (struct frag_hdr
*) hdr
;
417 if (frag_hdr
->frag_off
)
420 } else if (nexthdr
== NEXTHDR_AUTH
) {
421 optlen
= (hdr
->hdrlen
+ 2) << 2;
423 optlen
= ipv6_optlen(hdr
);
425 if (nexthdr
== NEXTHDR_DEST
) {
428 struct ipv6_tlv_tnl_enc_lim
*tel
;
430 /* No more room for encapsulation limit */
431 if (i
+ sizeof (*tel
) > off
+ optlen
)
434 tel
= (struct ipv6_tlv_tnl_enc_lim
*) &raw
[i
];
435 /* return index of option if found and valid */
436 if (tel
->type
== IPV6_TLV_TNL_ENCAP_LIMIT
&&
439 /* else jump to next option */
441 i
+= tel
->length
+ 2;
446 nexthdr
= hdr
->nexthdr
;
451 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim
);
454 * ip6_tnl_err - tunnel error handler
457 * ip6_tnl_err() should handle errors in the tunnel according
458 * to the specifications in RFC 2473.
462 ip6_tnl_err(struct sk_buff
*skb
, __u8 ipproto
, struct inet6_skb_parm
*opt
,
463 u8
*type
, u8
*code
, int *msg
, __u32
*info
, int offset
)
465 const struct ipv6hdr
*ipv6h
= (const struct ipv6hdr
*) skb
->data
;
468 u8 rel_type
= ICMPV6_DEST_UNREACH
;
469 u8 rel_code
= ICMPV6_ADDR_UNREACH
;
475 /* If the packet doesn't contain the original IPv6 header we are
476 in trouble since we might need the source address for further
477 processing of the error. */
480 t
= ip6_tnl_lookup(dev_net(skb
->dev
), &ipv6h
->daddr
, &ipv6h
->saddr
);
484 tproto
= ACCESS_ONCE(t
->parms
.proto
);
485 if (tproto
!= ipproto
&& tproto
!= 0)
492 struct ipv6_tlv_tnl_enc_lim
*tel
;
494 case ICMPV6_DEST_UNREACH
:
495 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
499 case ICMPV6_TIME_EXCEED
:
500 if ((*code
) == ICMPV6_EXC_HOPLIMIT
) {
501 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
506 case ICMPV6_PARAMPROB
:
508 if ((*code
) == ICMPV6_HDR_FIELD
)
509 teli
= ip6_tnl_parse_tlv_enc_lim(skb
, skb
->data
);
511 if (teli
&& teli
== *info
- 2) {
512 tel
= (struct ipv6_tlv_tnl_enc_lim
*) &skb
->data
[teli
];
513 if (tel
->encap_limit
== 0) {
514 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
519 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
523 case ICMPV6_PKT_TOOBIG
:
524 mtu
= *info
- offset
;
525 if (mtu
< IPV6_MIN_MTU
)
529 len
= sizeof(*ipv6h
) + ntohs(ipv6h
->payload_len
);
531 rel_type
= ICMPV6_PKT_TOOBIG
;
550 ip4ip6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
551 u8 type
, u8 code
, int offset
, __be32 info
)
556 __u32 rel_info
= ntohl(info
);
558 struct sk_buff
*skb2
;
559 const struct iphdr
*eiph
;
563 err
= ip6_tnl_err(skb
, IPPROTO_IPIP
, opt
, &rel_type
, &rel_code
,
564 &rel_msg
, &rel_info
, offset
);
572 case ICMPV6_DEST_UNREACH
:
573 if (rel_code
!= ICMPV6_ADDR_UNREACH
)
575 rel_type
= ICMP_DEST_UNREACH
;
576 rel_code
= ICMP_HOST_UNREACH
;
578 case ICMPV6_PKT_TOOBIG
:
581 rel_type
= ICMP_DEST_UNREACH
;
582 rel_code
= ICMP_FRAG_NEEDED
;
585 rel_type
= ICMP_REDIRECT
;
586 rel_code
= ICMP_REDIR_HOST
;
591 if (!pskb_may_pull(skb
, offset
+ sizeof(struct iphdr
)))
594 skb2
= skb_clone(skb
, GFP_ATOMIC
);
600 skb_pull(skb2
, offset
);
601 skb_reset_network_header(skb2
);
604 /* Try to guess incoming interface */
605 rt
= ip_route_output_ports(dev_net(skb
->dev
), &fl4
, NULL
,
608 IPPROTO_IPIP
, RT_TOS(eiph
->tos
), 0);
612 skb2
->dev
= rt
->dst
.dev
;
614 /* route "incoming" packet */
615 if (rt
->rt_flags
& RTCF_LOCAL
) {
618 rt
= ip_route_output_ports(dev_net(skb
->dev
), &fl4
, NULL
,
619 eiph
->daddr
, eiph
->saddr
,
622 RT_TOS(eiph
->tos
), 0);
624 rt
->dst
.dev
->type
!= ARPHRD_TUNNEL
) {
629 skb_dst_set(skb2
, &rt
->dst
);
632 if (ip_route_input(skb2
, eiph
->daddr
, eiph
->saddr
, eiph
->tos
,
634 skb_dst(skb2
)->dev
->type
!= ARPHRD_TUNNEL
)
638 /* change mtu on this route */
639 if (rel_type
== ICMP_DEST_UNREACH
&& rel_code
== ICMP_FRAG_NEEDED
) {
640 if (rel_info
> dst_mtu(skb_dst(skb2
)))
643 skb_dst(skb2
)->ops
->update_pmtu(skb_dst(skb2
), NULL
, skb2
, rel_info
);
645 if (rel_type
== ICMP_REDIRECT
)
646 skb_dst(skb2
)->ops
->redirect(skb_dst(skb2
), NULL
, skb2
);
648 icmp_send(skb2
, rel_type
, rel_code
, htonl(rel_info
));
656 ip6ip6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
657 u8 type
, u8 code
, int offset
, __be32 info
)
662 __u32 rel_info
= ntohl(info
);
665 err
= ip6_tnl_err(skb
, IPPROTO_IPV6
, opt
, &rel_type
, &rel_code
,
666 &rel_msg
, &rel_info
, offset
);
670 if (rel_msg
&& pskb_may_pull(skb
, offset
+ sizeof(struct ipv6hdr
))) {
672 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
678 skb_pull(skb2
, offset
);
679 skb_reset_network_header(skb2
);
681 /* Try to guess incoming interface */
682 rt
= rt6_lookup(dev_net(skb
->dev
), &ipv6_hdr(skb2
)->saddr
,
685 if (rt
&& rt
->dst
.dev
)
686 skb2
->dev
= rt
->dst
.dev
;
688 icmpv6_send(skb2
, rel_type
, rel_code
, rel_info
);
698 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl
*t
,
699 const struct ipv6hdr
*ipv6h
,
702 __u8 dsfield
= ipv6_get_dsfield(ipv6h
) & ~INET_ECN_MASK
;
704 if (t
->parms
.flags
& IP6_TNL_F_RCV_DSCP_COPY
)
705 ipv4_change_dsfield(ip_hdr(skb
), INET_ECN_MASK
, dsfield
);
707 return IP6_ECN_decapsulate(ipv6h
, skb
);
710 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl
*t
,
711 const struct ipv6hdr
*ipv6h
,
714 if (t
->parms
.flags
& IP6_TNL_F_RCV_DSCP_COPY
)
715 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h
), ipv6_hdr(skb
));
717 return IP6_ECN_decapsulate(ipv6h
, skb
);
720 __u32
ip6_tnl_get_cap(struct ip6_tnl
*t
,
721 const struct in6_addr
*laddr
,
722 const struct in6_addr
*raddr
)
724 struct __ip6_tnl_parm
*p
= &t
->parms
;
725 int ltype
= ipv6_addr_type(laddr
);
726 int rtype
= ipv6_addr_type(raddr
);
729 if (ltype
== IPV6_ADDR_ANY
|| rtype
== IPV6_ADDR_ANY
) {
730 flags
= IP6_TNL_F_CAP_PER_PACKET
;
731 } else if (ltype
& (IPV6_ADDR_UNICAST
|IPV6_ADDR_MULTICAST
) &&
732 rtype
& (IPV6_ADDR_UNICAST
|IPV6_ADDR_MULTICAST
) &&
733 !((ltype
|rtype
) & IPV6_ADDR_LOOPBACK
) &&
734 (!((ltype
|rtype
) & IPV6_ADDR_LINKLOCAL
) || p
->link
)) {
735 if (ltype
&IPV6_ADDR_UNICAST
)
736 flags
|= IP6_TNL_F_CAP_XMIT
;
737 if (rtype
&IPV6_ADDR_UNICAST
)
738 flags
|= IP6_TNL_F_CAP_RCV
;
742 EXPORT_SYMBOL(ip6_tnl_get_cap
);
744 /* called with rcu_read_lock() */
745 int ip6_tnl_rcv_ctl(struct ip6_tnl
*t
,
746 const struct in6_addr
*laddr
,
747 const struct in6_addr
*raddr
)
749 struct __ip6_tnl_parm
*p
= &t
->parms
;
751 struct net
*net
= t
->net
;
753 if ((p
->flags
& IP6_TNL_F_CAP_RCV
) ||
754 ((p
->flags
& IP6_TNL_F_CAP_PER_PACKET
) &&
755 (ip6_tnl_get_cap(t
, laddr
, raddr
) & IP6_TNL_F_CAP_RCV
))) {
756 struct net_device
*ldev
= NULL
;
759 ldev
= dev_get_by_index_rcu(net
, p
->link
);
761 if ((ipv6_addr_is_multicast(laddr
) ||
762 likely(ipv6_chk_addr(net
, laddr
, ldev
, 0))) &&
763 likely(!ipv6_chk_addr(net
, raddr
, NULL
, 0)))
768 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl
);
770 static int __ip6_tnl_rcv(struct ip6_tnl
*tunnel
, struct sk_buff
*skb
,
771 const struct tnl_ptk_info
*tpi
,
772 struct metadata_dst
*tun_dst
,
773 int (*dscp_ecn_decapsulate
)(const struct ip6_tnl
*t
,
774 const struct ipv6hdr
*ipv6h
,
775 struct sk_buff
*skb
),
778 struct pcpu_sw_netstats
*tstats
;
779 const struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
782 if ((!(tpi
->flags
& TUNNEL_CSUM
) &&
783 (tunnel
->parms
.i_flags
& TUNNEL_CSUM
)) ||
784 ((tpi
->flags
& TUNNEL_CSUM
) &&
785 !(tunnel
->parms
.i_flags
& TUNNEL_CSUM
))) {
786 tunnel
->dev
->stats
.rx_crc_errors
++;
787 tunnel
->dev
->stats
.rx_errors
++;
791 if (tunnel
->parms
.i_flags
& TUNNEL_SEQ
) {
792 if (!(tpi
->flags
& TUNNEL_SEQ
) ||
794 (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
795 tunnel
->dev
->stats
.rx_fifo_errors
++;
796 tunnel
->dev
->stats
.rx_errors
++;
799 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
802 skb
->protocol
= tpi
->proto
;
804 /* Warning: All skb pointers will be invalidated! */
805 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
806 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
807 tunnel
->dev
->stats
.rx_length_errors
++;
808 tunnel
->dev
->stats
.rx_errors
++;
812 ipv6h
= ipv6_hdr(skb
);
813 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
814 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
816 skb
->dev
= tunnel
->dev
;
819 skb_reset_network_header(skb
);
820 memset(skb
->cb
, 0, sizeof(struct inet6_skb_parm
));
822 __skb_tunnel_rx(skb
, tunnel
->dev
, tunnel
->net
);
824 err
= dscp_ecn_decapsulate(tunnel
, ipv6h
, skb
);
827 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
829 ipv6_get_dsfield(ipv6h
));
831 ++tunnel
->dev
->stats
.rx_frame_errors
;
832 ++tunnel
->dev
->stats
.rx_errors
;
837 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
838 u64_stats_update_begin(&tstats
->syncp
);
839 tstats
->rx_packets
++;
840 tstats
->rx_bytes
+= skb
->len
;
841 u64_stats_update_end(&tstats
->syncp
);
843 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
846 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
848 gro_cells_receive(&tunnel
->gro_cells
, skb
);
856 int ip6_tnl_rcv(struct ip6_tnl
*t
, struct sk_buff
*skb
,
857 const struct tnl_ptk_info
*tpi
,
858 struct metadata_dst
*tun_dst
,
861 return __ip6_tnl_rcv(t
, skb
, tpi
, NULL
, ip6ip6_dscp_ecn_decapsulate
,
864 EXPORT_SYMBOL(ip6_tnl_rcv
);
866 static const struct tnl_ptk_info tpi_v6
= {
867 /* no tunnel info required for ipxip6. */
868 .proto
= htons(ETH_P_IPV6
),
871 static const struct tnl_ptk_info tpi_v4
= {
872 /* no tunnel info required for ipxip6. */
873 .proto
= htons(ETH_P_IP
),
876 static int ipxip6_rcv(struct sk_buff
*skb
, u8 ipproto
,
877 const struct tnl_ptk_info
*tpi
,
878 int (*dscp_ecn_decapsulate
)(const struct ip6_tnl
*t
,
879 const struct ipv6hdr
*ipv6h
,
880 struct sk_buff
*skb
))
883 const struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
884 struct metadata_dst
*tun_dst
= NULL
;
888 t
= ip6_tnl_lookup(dev_net(skb
->dev
), &ipv6h
->saddr
, &ipv6h
->daddr
);
891 u8 tproto
= ACCESS_ONCE(t
->parms
.proto
);
893 if (tproto
!= ipproto
&& tproto
!= 0)
895 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
897 if (!ip6_tnl_rcv_ctl(t
, &ipv6h
->daddr
, &ipv6h
->saddr
))
899 if (iptunnel_pull_header(skb
, 0, tpi
->proto
, false))
901 if (t
->parms
.collect_md
) {
902 tun_dst
= ipv6_tun_rx_dst(skb
, 0, 0, 0);
906 ret
= __ip6_tnl_rcv(t
, skb
, tpi
, tun_dst
, dscp_ecn_decapsulate
,
920 static int ip4ip6_rcv(struct sk_buff
*skb
)
922 return ipxip6_rcv(skb
, IPPROTO_IPIP
, &tpi_v4
,
923 ip4ip6_dscp_ecn_decapsulate
);
926 static int ip6ip6_rcv(struct sk_buff
*skb
)
928 return ipxip6_rcv(skb
, IPPROTO_IPV6
, &tpi_v6
,
929 ip6ip6_dscp_ecn_decapsulate
);
932 struct ipv6_tel_txoption
{
933 struct ipv6_txoptions ops
;
937 static void init_tel_txopt(struct ipv6_tel_txoption
*opt
, __u8 encap_limit
)
939 memset(opt
, 0, sizeof(struct ipv6_tel_txoption
));
941 opt
->dst_opt
[2] = IPV6_TLV_TNL_ENCAP_LIMIT
;
943 opt
->dst_opt
[4] = encap_limit
;
944 opt
->dst_opt
[5] = IPV6_TLV_PADN
;
947 opt
->ops
.dst0opt
= (struct ipv6_opt_hdr
*) opt
->dst_opt
;
948 opt
->ops
.opt_nflen
= 8;
952 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
953 * @t: the outgoing tunnel device
954 * @hdr: IPv6 header from the incoming packet
957 * Avoid trivial tunneling loop by checking that tunnel exit-point
958 * doesn't match source of incoming packet.
966 ip6_tnl_addr_conflict(const struct ip6_tnl
*t
, const struct ipv6hdr
*hdr
)
968 return ipv6_addr_equal(&t
->parms
.raddr
, &hdr
->saddr
);
971 int ip6_tnl_xmit_ctl(struct ip6_tnl
*t
,
972 const struct in6_addr
*laddr
,
973 const struct in6_addr
*raddr
)
975 struct __ip6_tnl_parm
*p
= &t
->parms
;
977 struct net
*net
= t
->net
;
979 if ((p
->flags
& IP6_TNL_F_CAP_XMIT
) ||
980 ((p
->flags
& IP6_TNL_F_CAP_PER_PACKET
) &&
981 (ip6_tnl_get_cap(t
, laddr
, raddr
) & IP6_TNL_F_CAP_XMIT
))) {
982 struct net_device
*ldev
= NULL
;
986 ldev
= dev_get_by_index_rcu(net
, p
->link
);
988 if (unlikely(!ipv6_chk_addr(net
, laddr
, ldev
, 0)))
989 pr_warn("%s xmit: Local address not yet configured!\n",
991 else if (!ipv6_addr_is_multicast(raddr
) &&
992 unlikely(ipv6_chk_addr(net
, raddr
, NULL
, 0)))
993 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1001 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl
);
1004 * ip6_tnl_xmit - encapsulate packet and send
1005 * @skb: the outgoing socket buffer
1006 * @dev: the outgoing tunnel device
1007 * @dsfield: dscp code for outer header
1008 * @fl6: flow of tunneled packet
1009 * @encap_limit: encapsulation limit
1010 * @pmtu: Path MTU is stored if packet is too big
1011 * @proto: next header value
1014 * Build new header and do some sanity checks on the packet before sending
1020 * %-EMSGSIZE message too big. return mtu in this case.
1023 int ip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
, __u8 dsfield
,
1024 struct flowi6
*fl6
, int encap_limit
, __u32
*pmtu
,
1027 struct ip6_tnl
*t
= netdev_priv(dev
);
1028 struct net
*net
= t
->net
;
1029 struct net_device_stats
*stats
= &t
->dev
->stats
;
1030 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
1031 struct ipv6_tel_txoption opt
;
1032 struct dst_entry
*dst
= NULL
, *ndst
= NULL
;
1033 struct net_device
*tdev
;
1035 unsigned int psh_hlen
= sizeof(struct ipv6hdr
) + t
->encap_hlen
;
1036 unsigned int max_headroom
= psh_hlen
;
1040 if (t
->parms
.collect_md
) {
1041 hop_limit
= skb_tunnel_info(skb
)->key
.ttl
;
1044 hop_limit
= t
->parms
.hop_limit
;
1048 if (ipv6_addr_any(&t
->parms
.raddr
)) {
1049 struct in6_addr
*addr6
;
1050 struct neighbour
*neigh
;
1054 goto tx_err_link_failure
;
1056 neigh
= dst_neigh_lookup(skb_dst(skb
),
1057 &ipv6_hdr(skb
)->daddr
);
1059 goto tx_err_link_failure
;
1061 addr6
= (struct in6_addr
*)&neigh
->primary_key
;
1062 addr_type
= ipv6_addr_type(addr6
);
1064 if (addr_type
== IPV6_ADDR_ANY
)
1065 addr6
= &ipv6_hdr(skb
)->daddr
;
1067 memcpy(&fl6
->daddr
, addr6
, sizeof(fl6
->daddr
));
1068 neigh_release(neigh
);
1069 } else if (!fl6
->flowi6_mark
)
1070 dst
= dst_cache_get(&t
->dst_cache
);
1072 if (!ip6_tnl_xmit_ctl(t
, &fl6
->saddr
, &fl6
->daddr
))
1073 goto tx_err_link_failure
;
1077 dst
= ip6_route_output(net
, NULL
, fl6
);
1080 goto tx_err_link_failure
;
1081 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(fl6
), NULL
, 0);
1085 goto tx_err_link_failure
;
1087 if (t
->parms
.collect_md
&&
1088 ipv6_dev_get_saddr(net
, ip6_dst_idev(dst
)->dev
,
1089 &fl6
->daddr
, 0, &fl6
->saddr
))
1090 goto tx_err_link_failure
;
1097 stats
->collisions
++;
1098 net_warn_ratelimited("%s: Local routing loop detected!\n",
1100 goto tx_err_dst_release
;
1102 mtu
= dst_mtu(dst
) - psh_hlen
;
1103 if (encap_limit
>= 0) {
1107 if (mtu
< IPV6_MIN_MTU
)
1109 if (skb_dst(skb
) && !t
->parms
.collect_md
)
1110 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
1111 if (skb
->len
> mtu
&& !skb_is_gso(skb
)) {
1114 goto tx_err_dst_release
;
1117 if (t
->err_count
> 0) {
1118 if (time_before(jiffies
,
1119 t
->err_time
+ IP6TUNNEL_ERR_TIMEO
)) {
1122 dst_link_failure(skb
);
1128 skb_scrub_packet(skb
, !net_eq(t
->net
, dev_net(dev
)));
1131 * Okay, now see if we can stuff it in the buffer as-is.
1133 max_headroom
+= LL_RESERVED_SPACE(tdev
);
1135 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
) ||
1136 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
1137 struct sk_buff
*new_skb
;
1139 new_skb
= skb_realloc_headroom(skb
, max_headroom
);
1141 goto tx_err_dst_release
;
1144 skb_set_owner_w(new_skb
, skb
->sk
);
1149 if (t
->parms
.collect_md
) {
1150 if (t
->encap
.type
!= TUNNEL_ENCAP_NONE
)
1151 goto tx_err_dst_release
;
1153 if (!fl6
->flowi6_mark
&& ndst
)
1154 dst_cache_set_ip6(&t
->dst_cache
, ndst
, &fl6
->saddr
);
1156 skb_dst_set(skb
, dst
);
1158 if (encap_limit
>= 0) {
1159 init_tel_txopt(&opt
, encap_limit
);
1160 ipv6_push_nfrag_opts(skb
, &opt
.ops
, &proto
, NULL
);
1163 /* Calculate max headroom for all the headers and adjust
1164 * needed_headroom if necessary.
1166 max_headroom
= LL_RESERVED_SPACE(dst
->dev
) + sizeof(struct ipv6hdr
)
1167 + dst
->header_len
+ t
->hlen
;
1168 if (max_headroom
> dev
->needed_headroom
)
1169 dev
->needed_headroom
= max_headroom
;
1171 err
= ip6_tnl_encap(skb
, t
, &proto
, fl6
);
1175 skb
->protocol
= htons(ETH_P_IPV6
);
1176 skb_push(skb
, sizeof(struct ipv6hdr
));
1177 skb_reset_network_header(skb
);
1178 ipv6h
= ipv6_hdr(skb
);
1179 ip6_flow_hdr(ipv6h
, INET_ECN_encapsulate(0, dsfield
),
1180 ip6_make_flowlabel(net
, skb
, fl6
->flowlabel
, true, fl6
));
1181 ipv6h
->hop_limit
= hop_limit
;
1182 ipv6h
->nexthdr
= proto
;
1183 ipv6h
->saddr
= fl6
->saddr
;
1184 ipv6h
->daddr
= fl6
->daddr
;
1185 ip6tunnel_xmit(NULL
, skb
, dev
);
1187 tx_err_link_failure
:
1188 stats
->tx_carrier_errors
++;
1189 dst_link_failure(skb
);
1194 EXPORT_SYMBOL(ip6_tnl_xmit
);
1197 ip4ip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1199 struct ip6_tnl
*t
= netdev_priv(dev
);
1200 const struct iphdr
*iph
= ip_hdr(skb
);
1201 int encap_limit
= -1;
1208 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1210 tproto
= ACCESS_ONCE(t
->parms
.proto
);
1211 if (tproto
!= IPPROTO_IPIP
&& tproto
!= 0)
1214 dsfield
= ipv4_get_dsfield(iph
);
1216 if (t
->parms
.collect_md
) {
1217 struct ip_tunnel_info
*tun_info
;
1218 const struct ip_tunnel_key
*key
;
1220 tun_info
= skb_tunnel_info(skb
);
1221 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
1222 ip_tunnel_info_af(tun_info
) != AF_INET6
))
1224 key
= &tun_info
->key
;
1225 memset(&fl6
, 0, sizeof(fl6
));
1226 fl6
.flowi6_proto
= IPPROTO_IPIP
;
1227 fl6
.daddr
= key
->u
.ipv6
.dst
;
1228 fl6
.flowlabel
= key
->label
;
1230 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1231 encap_limit
= t
->parms
.encap_limit
;
1233 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
1234 fl6
.flowi6_proto
= IPPROTO_IPIP
;
1236 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
1237 fl6
.flowlabel
|= htonl((__u32
)iph
->tos
<< IPV6_TCLASS_SHIFT
)
1239 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
1240 fl6
.flowi6_mark
= skb
->mark
;
1243 if (iptunnel_handle_offloads(skb
, SKB_GSO_IPXIP6
))
1246 skb_set_inner_ipproto(skb
, IPPROTO_IPIP
);
1248 err
= ip6_tnl_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
1251 /* XXX: send ICMP error even if DF is not set. */
1252 if (err
== -EMSGSIZE
)
1253 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
1262 ip6ip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1264 struct ip6_tnl
*t
= netdev_priv(dev
);
1265 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
1266 int encap_limit
= -1;
1274 tproto
= ACCESS_ONCE(t
->parms
.proto
);
1275 if ((tproto
!= IPPROTO_IPV6
&& tproto
!= 0) ||
1276 ip6_tnl_addr_conflict(t
, ipv6h
))
1279 dsfield
= ipv6_get_dsfield(ipv6h
);
1281 if (t
->parms
.collect_md
) {
1282 struct ip_tunnel_info
*tun_info
;
1283 const struct ip_tunnel_key
*key
;
1285 tun_info
= skb_tunnel_info(skb
);
1286 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
1287 ip_tunnel_info_af(tun_info
) != AF_INET6
))
1289 key
= &tun_info
->key
;
1290 memset(&fl6
, 0, sizeof(fl6
));
1291 fl6
.flowi6_proto
= IPPROTO_IPV6
;
1292 fl6
.daddr
= key
->u
.ipv6
.dst
;
1293 fl6
.flowlabel
= key
->label
;
1295 offset
= ip6_tnl_parse_tlv_enc_lim(skb
, skb_network_header(skb
));
1297 struct ipv6_tlv_tnl_enc_lim
*tel
;
1299 tel
= (void *)&skb_network_header(skb
)[offset
];
1300 if (tel
->encap_limit
== 0) {
1301 icmpv6_send(skb
, ICMPV6_PARAMPROB
,
1302 ICMPV6_HDR_FIELD
, offset
+ 2);
1305 encap_limit
= tel
->encap_limit
- 1;
1306 } else if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
)) {
1307 encap_limit
= t
->parms
.encap_limit
;
1310 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
1311 fl6
.flowi6_proto
= IPPROTO_IPV6
;
1313 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
1314 fl6
.flowlabel
|= (*(__be32
*)ipv6h
& IPV6_TCLASS_MASK
);
1315 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FLOWLABEL
)
1316 fl6
.flowlabel
|= ip6_flowlabel(ipv6h
);
1317 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
1318 fl6
.flowi6_mark
= skb
->mark
;
1321 if (iptunnel_handle_offloads(skb
, SKB_GSO_IPXIP6
))
1324 skb_set_inner_ipproto(skb
, IPPROTO_IPV6
);
1326 err
= ip6_tnl_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
1329 if (err
== -EMSGSIZE
)
1330 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
1338 ip6_tnl_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1340 struct ip6_tnl
*t
= netdev_priv(dev
);
1341 struct net_device_stats
*stats
= &t
->dev
->stats
;
1344 switch (skb
->protocol
) {
1345 case htons(ETH_P_IP
):
1346 ret
= ip4ip6_tnl_xmit(skb
, dev
);
1348 case htons(ETH_P_IPV6
):
1349 ret
= ip6ip6_tnl_xmit(skb
, dev
);
1358 return NETDEV_TX_OK
;
1362 stats
->tx_dropped
++;
1364 return NETDEV_TX_OK
;
1367 static void ip6_tnl_link_config(struct ip6_tnl
*t
)
1369 struct net_device
*dev
= t
->dev
;
1370 struct __ip6_tnl_parm
*p
= &t
->parms
;
1371 struct flowi6
*fl6
= &t
->fl
.u
.ip6
;
1374 memcpy(dev
->dev_addr
, &p
->laddr
, sizeof(struct in6_addr
));
1375 memcpy(dev
->broadcast
, &p
->raddr
, sizeof(struct in6_addr
));
1377 /* Set up flowi template */
1378 fl6
->saddr
= p
->laddr
;
1379 fl6
->daddr
= p
->raddr
;
1380 fl6
->flowi6_oif
= p
->link
;
1383 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_TCLASS
))
1384 fl6
->flowlabel
|= IPV6_TCLASS_MASK
& p
->flowinfo
;
1385 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_FLOWLABEL
))
1386 fl6
->flowlabel
|= IPV6_FLOWLABEL_MASK
& p
->flowinfo
;
1388 p
->flags
&= ~(IP6_TNL_F_CAP_XMIT
|IP6_TNL_F_CAP_RCV
|IP6_TNL_F_CAP_PER_PACKET
);
1389 p
->flags
|= ip6_tnl_get_cap(t
, &p
->laddr
, &p
->raddr
);
1391 if (p
->flags
&IP6_TNL_F_CAP_XMIT
&& p
->flags
&IP6_TNL_F_CAP_RCV
)
1392 dev
->flags
|= IFF_POINTOPOINT
;
1394 dev
->flags
&= ~IFF_POINTOPOINT
;
1397 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1398 t_hlen
= t
->hlen
+ sizeof(struct ipv6hdr
);
1400 if (p
->flags
& IP6_TNL_F_CAP_XMIT
) {
1401 int strict
= (ipv6_addr_type(&p
->raddr
) &
1402 (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LINKLOCAL
));
1404 struct rt6_info
*rt
= rt6_lookup(t
->net
,
1405 &p
->raddr
, &p
->laddr
,
1412 dev
->hard_header_len
= rt
->dst
.dev
->hard_header_len
+
1415 dev
->mtu
= rt
->dst
.dev
->mtu
- t_hlen
;
1416 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1419 if (dev
->mtu
< IPV6_MIN_MTU
)
1420 dev
->mtu
= IPV6_MIN_MTU
;
1427 * ip6_tnl_change - update the tunnel parameters
1428 * @t: tunnel to be changed
1429 * @p: tunnel configuration parameters
1432 * ip6_tnl_change() updates the tunnel parameters
1436 ip6_tnl_change(struct ip6_tnl
*t
, const struct __ip6_tnl_parm
*p
)
1438 t
->parms
.laddr
= p
->laddr
;
1439 t
->parms
.raddr
= p
->raddr
;
1440 t
->parms
.flags
= p
->flags
;
1441 t
->parms
.hop_limit
= p
->hop_limit
;
1442 t
->parms
.encap_limit
= p
->encap_limit
;
1443 t
->parms
.flowinfo
= p
->flowinfo
;
1444 t
->parms
.link
= p
->link
;
1445 t
->parms
.proto
= p
->proto
;
1446 dst_cache_reset(&t
->dst_cache
);
1447 ip6_tnl_link_config(t
);
1451 static int ip6_tnl_update(struct ip6_tnl
*t
, struct __ip6_tnl_parm
*p
)
1453 struct net
*net
= t
->net
;
1454 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1457 ip6_tnl_unlink(ip6n
, t
);
1459 err
= ip6_tnl_change(t
, p
);
1460 ip6_tnl_link(ip6n
, t
);
1461 netdev_state_change(t
->dev
);
1465 static int ip6_tnl0_update(struct ip6_tnl
*t
, struct __ip6_tnl_parm
*p
)
1467 /* for default tnl0 device allow to change only the proto */
1468 t
->parms
.proto
= p
->proto
;
1469 netdev_state_change(t
->dev
);
1474 ip6_tnl_parm_from_user(struct __ip6_tnl_parm
*p
, const struct ip6_tnl_parm
*u
)
1476 p
->laddr
= u
->laddr
;
1477 p
->raddr
= u
->raddr
;
1478 p
->flags
= u
->flags
;
1479 p
->hop_limit
= u
->hop_limit
;
1480 p
->encap_limit
= u
->encap_limit
;
1481 p
->flowinfo
= u
->flowinfo
;
1483 p
->proto
= u
->proto
;
1484 memcpy(p
->name
, u
->name
, sizeof(u
->name
));
1488 ip6_tnl_parm_to_user(struct ip6_tnl_parm
*u
, const struct __ip6_tnl_parm
*p
)
1490 u
->laddr
= p
->laddr
;
1491 u
->raddr
= p
->raddr
;
1492 u
->flags
= p
->flags
;
1493 u
->hop_limit
= p
->hop_limit
;
1494 u
->encap_limit
= p
->encap_limit
;
1495 u
->flowinfo
= p
->flowinfo
;
1497 u
->proto
= p
->proto
;
1498 memcpy(u
->name
, p
->name
, sizeof(u
->name
));
1502 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1503 * @dev: virtual device associated with tunnel
1504 * @ifr: parameters passed from userspace
1505 * @cmd: command to be performed
1508 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1511 * The possible commands are the following:
1512 * %SIOCGETTUNNEL: get tunnel parameters for device
1513 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1514 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1515 * %SIOCDELTUNNEL: delete tunnel
1517 * The fallback device "ip6tnl0", created during module
1518 * initialization, can be used for creating other tunnel devices.
1522 * %-EFAULT if unable to copy data to or from userspace,
1523 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1524 * %-EINVAL if passed tunnel parameters are invalid,
1525 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1526 * %-ENODEV if attempting to change or delete a nonexisting device
1530 ip6_tnl_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1533 struct ip6_tnl_parm p
;
1534 struct __ip6_tnl_parm p1
;
1535 struct ip6_tnl
*t
= netdev_priv(dev
);
1536 struct net
*net
= t
->net
;
1537 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1539 memset(&p1
, 0, sizeof(p1
));
1543 if (dev
== ip6n
->fb_tnl_dev
) {
1544 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1548 ip6_tnl_parm_from_user(&p1
, &p
);
1549 t
= ip6_tnl_locate(net
, &p1
, 0);
1551 t
= netdev_priv(dev
);
1553 memset(&p
, 0, sizeof(p
));
1555 ip6_tnl_parm_to_user(&p
, &t
->parms
);
1556 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
))) {
1563 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1566 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1569 if (p
.proto
!= IPPROTO_IPV6
&& p
.proto
!= IPPROTO_IPIP
&&
1572 ip6_tnl_parm_from_user(&p1
, &p
);
1573 t
= ip6_tnl_locate(net
, &p1
, cmd
== SIOCADDTUNNEL
);
1574 if (cmd
== SIOCCHGTUNNEL
) {
1576 if (t
->dev
!= dev
) {
1581 t
= netdev_priv(dev
);
1582 if (dev
== ip6n
->fb_tnl_dev
)
1583 err
= ip6_tnl0_update(t
, &p1
);
1585 err
= ip6_tnl_update(t
, &p1
);
1589 ip6_tnl_parm_to_user(&p
, &t
->parms
);
1590 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1599 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1602 if (dev
== ip6n
->fb_tnl_dev
) {
1604 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1607 ip6_tnl_parm_from_user(&p1
, &p
);
1608 t
= ip6_tnl_locate(net
, &p1
, 0);
1612 if (t
->dev
== ip6n
->fb_tnl_dev
)
1617 unregister_netdevice(dev
);
1626 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1627 * @dev: virtual device associated with tunnel
1628 * @new_mtu: the new mtu
1632 * %-EINVAL if mtu too small
1635 int ip6_tnl_change_mtu(struct net_device
*dev
, int new_mtu
)
1637 struct ip6_tnl
*tnl
= netdev_priv(dev
);
1639 if (tnl
->parms
.proto
== IPPROTO_IPIP
) {
1643 if (new_mtu
< IPV6_MIN_MTU
)
1646 if (new_mtu
> 0xFFF8 - dev
->hard_header_len
)
1651 EXPORT_SYMBOL(ip6_tnl_change_mtu
);
1653 int ip6_tnl_get_iflink(const struct net_device
*dev
)
1655 struct ip6_tnl
*t
= netdev_priv(dev
);
1657 return t
->parms
.link
;
1659 EXPORT_SYMBOL(ip6_tnl_get_iflink
);
1661 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops
*ops
,
1664 if (num
>= MAX_IPTUN_ENCAP_OPS
)
1667 return !cmpxchg((const struct ip6_tnl_encap_ops
**)
1668 &ip6tun_encaps
[num
],
1669 NULL
, ops
) ? 0 : -1;
1671 EXPORT_SYMBOL(ip6_tnl_encap_add_ops
);
1673 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops
*ops
,
1678 if (num
>= MAX_IPTUN_ENCAP_OPS
)
1681 ret
= (cmpxchg((const struct ip6_tnl_encap_ops
**)
1682 &ip6tun_encaps
[num
],
1683 ops
, NULL
) == ops
) ? 0 : -1;
1689 EXPORT_SYMBOL(ip6_tnl_encap_del_ops
);
1691 int ip6_tnl_encap_setup(struct ip6_tnl
*t
,
1692 struct ip_tunnel_encap
*ipencap
)
1696 memset(&t
->encap
, 0, sizeof(t
->encap
));
1698 hlen
= ip6_encap_hlen(ipencap
);
1702 t
->encap
.type
= ipencap
->type
;
1703 t
->encap
.sport
= ipencap
->sport
;
1704 t
->encap
.dport
= ipencap
->dport
;
1705 t
->encap
.flags
= ipencap
->flags
;
1707 t
->encap_hlen
= hlen
;
1708 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1712 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup
);
1714 static const struct net_device_ops ip6_tnl_netdev_ops
= {
1715 .ndo_init
= ip6_tnl_dev_init
,
1716 .ndo_uninit
= ip6_tnl_dev_uninit
,
1717 .ndo_start_xmit
= ip6_tnl_start_xmit
,
1718 .ndo_do_ioctl
= ip6_tnl_ioctl
,
1719 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1720 .ndo_get_stats
= ip6_get_stats
,
1721 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1724 #define IPXIPX_FEATURES (NETIF_F_SG | \
1725 NETIF_F_FRAGLIST | \
1727 NETIF_F_GSO_SOFTWARE | \
1731 * ip6_tnl_dev_setup - setup virtual tunnel device
1732 * @dev: virtual device associated with tunnel
1735 * Initialize function pointers and device parameters
1738 static void ip6_tnl_dev_setup(struct net_device
*dev
)
1740 dev
->netdev_ops
= &ip6_tnl_netdev_ops
;
1741 dev
->destructor
= ip6_dev_free
;
1743 dev
->type
= ARPHRD_TUNNEL6
;
1744 dev
->flags
|= IFF_NOARP
;
1745 dev
->addr_len
= sizeof(struct in6_addr
);
1746 dev
->features
|= NETIF_F_LLTX
;
1747 netif_keep_dst(dev
);
1749 dev
->features
|= IPXIPX_FEATURES
;
1750 dev
->hw_features
|= IPXIPX_FEATURES
;
1752 /* This perm addr will be used as interface identifier by IPv6 */
1753 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
1754 eth_random_addr(dev
->perm_addr
);
1759 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1760 * @dev: virtual device associated with tunnel
1764 ip6_tnl_dev_init_gen(struct net_device
*dev
)
1766 struct ip6_tnl
*t
= netdev_priv(dev
);
1771 t
->net
= dev_net(dev
);
1772 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1776 ret
= dst_cache_init(&t
->dst_cache
, GFP_KERNEL
);
1780 ret
= gro_cells_init(&t
->gro_cells
, dev
);
1785 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1786 t_hlen
= t
->hlen
+ sizeof(struct ipv6hdr
);
1788 dev
->type
= ARPHRD_TUNNEL6
;
1789 dev
->hard_header_len
= LL_MAX_HEADER
+ t_hlen
;
1790 dev
->mtu
= ETH_DATA_LEN
- t_hlen
;
1791 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1797 dst_cache_destroy(&t
->dst_cache
);
1799 free_percpu(dev
->tstats
);
1806 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1807 * @dev: virtual device associated with tunnel
1810 static int ip6_tnl_dev_init(struct net_device
*dev
)
1812 struct ip6_tnl
*t
= netdev_priv(dev
);
1813 int err
= ip6_tnl_dev_init_gen(dev
);
1817 ip6_tnl_link_config(t
);
1818 if (t
->parms
.collect_md
) {
1819 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1820 netif_keep_dst(dev
);
1826 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1827 * @dev: fallback device
1832 static int __net_init
ip6_fb_tnl_dev_init(struct net_device
*dev
)
1834 struct ip6_tnl
*t
= netdev_priv(dev
);
1835 struct net
*net
= dev_net(dev
);
1836 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1838 t
->parms
.proto
= IPPROTO_IPV6
;
1841 rcu_assign_pointer(ip6n
->tnls_wc
[0], t
);
1845 static int ip6_tnl_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1849 if (!data
|| !data
[IFLA_IPTUN_PROTO
])
1852 proto
= nla_get_u8(data
[IFLA_IPTUN_PROTO
]);
1853 if (proto
!= IPPROTO_IPV6
&&
1854 proto
!= IPPROTO_IPIP
&&
1861 static void ip6_tnl_netlink_parms(struct nlattr
*data
[],
1862 struct __ip6_tnl_parm
*parms
)
1864 memset(parms
, 0, sizeof(*parms
));
1869 if (data
[IFLA_IPTUN_LINK
])
1870 parms
->link
= nla_get_u32(data
[IFLA_IPTUN_LINK
]);
1872 if (data
[IFLA_IPTUN_LOCAL
])
1873 parms
->laddr
= nla_get_in6_addr(data
[IFLA_IPTUN_LOCAL
]);
1875 if (data
[IFLA_IPTUN_REMOTE
])
1876 parms
->raddr
= nla_get_in6_addr(data
[IFLA_IPTUN_REMOTE
]);
1878 if (data
[IFLA_IPTUN_TTL
])
1879 parms
->hop_limit
= nla_get_u8(data
[IFLA_IPTUN_TTL
]);
1881 if (data
[IFLA_IPTUN_ENCAP_LIMIT
])
1882 parms
->encap_limit
= nla_get_u8(data
[IFLA_IPTUN_ENCAP_LIMIT
]);
1884 if (data
[IFLA_IPTUN_FLOWINFO
])
1885 parms
->flowinfo
= nla_get_be32(data
[IFLA_IPTUN_FLOWINFO
]);
1887 if (data
[IFLA_IPTUN_FLAGS
])
1888 parms
->flags
= nla_get_u32(data
[IFLA_IPTUN_FLAGS
]);
1890 if (data
[IFLA_IPTUN_PROTO
])
1891 parms
->proto
= nla_get_u8(data
[IFLA_IPTUN_PROTO
]);
1893 if (data
[IFLA_IPTUN_COLLECT_METADATA
])
1894 parms
->collect_md
= true;
1897 static bool ip6_tnl_netlink_encap_parms(struct nlattr
*data
[],
1898 struct ip_tunnel_encap
*ipencap
)
1902 memset(ipencap
, 0, sizeof(*ipencap
));
1907 if (data
[IFLA_IPTUN_ENCAP_TYPE
]) {
1909 ipencap
->type
= nla_get_u16(data
[IFLA_IPTUN_ENCAP_TYPE
]);
1912 if (data
[IFLA_IPTUN_ENCAP_FLAGS
]) {
1914 ipencap
->flags
= nla_get_u16(data
[IFLA_IPTUN_ENCAP_FLAGS
]);
1917 if (data
[IFLA_IPTUN_ENCAP_SPORT
]) {
1919 ipencap
->sport
= nla_get_be16(data
[IFLA_IPTUN_ENCAP_SPORT
]);
1922 if (data
[IFLA_IPTUN_ENCAP_DPORT
]) {
1924 ipencap
->dport
= nla_get_be16(data
[IFLA_IPTUN_ENCAP_DPORT
]);
1930 static int ip6_tnl_newlink(struct net
*src_net
, struct net_device
*dev
,
1931 struct nlattr
*tb
[], struct nlattr
*data
[])
1933 struct net
*net
= dev_net(dev
);
1934 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1935 struct ip6_tnl
*nt
, *t
;
1936 struct ip_tunnel_encap ipencap
;
1938 nt
= netdev_priv(dev
);
1940 if (ip6_tnl_netlink_encap_parms(data
, &ipencap
)) {
1941 int err
= ip6_tnl_encap_setup(nt
, &ipencap
);
1947 ip6_tnl_netlink_parms(data
, &nt
->parms
);
1949 if (nt
->parms
.collect_md
) {
1950 if (rtnl_dereference(ip6n
->collect_md_tun
))
1953 t
= ip6_tnl_locate(net
, &nt
->parms
, 0);
1958 return ip6_tnl_create2(dev
);
1961 static int ip6_tnl_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1962 struct nlattr
*data
[])
1964 struct ip6_tnl
*t
= netdev_priv(dev
);
1965 struct __ip6_tnl_parm p
;
1966 struct net
*net
= t
->net
;
1967 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1968 struct ip_tunnel_encap ipencap
;
1970 if (dev
== ip6n
->fb_tnl_dev
)
1973 if (ip6_tnl_netlink_encap_parms(data
, &ipencap
)) {
1974 int err
= ip6_tnl_encap_setup(t
, &ipencap
);
1979 ip6_tnl_netlink_parms(data
, &p
);
1983 t
= ip6_tnl_locate(net
, &p
, 0);
1988 t
= netdev_priv(dev
);
1990 return ip6_tnl_update(t
, &p
);
1993 static void ip6_tnl_dellink(struct net_device
*dev
, struct list_head
*head
)
1995 struct net
*net
= dev_net(dev
);
1996 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1998 if (dev
!= ip6n
->fb_tnl_dev
)
1999 unregister_netdevice_queue(dev
, head
);
2002 static size_t ip6_tnl_get_size(const struct net_device
*dev
)
2005 /* IFLA_IPTUN_LINK */
2007 /* IFLA_IPTUN_LOCAL */
2008 nla_total_size(sizeof(struct in6_addr
)) +
2009 /* IFLA_IPTUN_REMOTE */
2010 nla_total_size(sizeof(struct in6_addr
)) +
2011 /* IFLA_IPTUN_TTL */
2013 /* IFLA_IPTUN_ENCAP_LIMIT */
2015 /* IFLA_IPTUN_FLOWINFO */
2017 /* IFLA_IPTUN_FLAGS */
2019 /* IFLA_IPTUN_PROTO */
2021 /* IFLA_IPTUN_ENCAP_TYPE */
2023 /* IFLA_IPTUN_ENCAP_FLAGS */
2025 /* IFLA_IPTUN_ENCAP_SPORT */
2027 /* IFLA_IPTUN_ENCAP_DPORT */
2029 /* IFLA_IPTUN_COLLECT_METADATA */
2034 static int ip6_tnl_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2036 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
2037 struct __ip6_tnl_parm
*parm
= &tunnel
->parms
;
2039 if (nla_put_u32(skb
, IFLA_IPTUN_LINK
, parm
->link
) ||
2040 nla_put_in6_addr(skb
, IFLA_IPTUN_LOCAL
, &parm
->laddr
) ||
2041 nla_put_in6_addr(skb
, IFLA_IPTUN_REMOTE
, &parm
->raddr
) ||
2042 nla_put_u8(skb
, IFLA_IPTUN_TTL
, parm
->hop_limit
) ||
2043 nla_put_u8(skb
, IFLA_IPTUN_ENCAP_LIMIT
, parm
->encap_limit
) ||
2044 nla_put_be32(skb
, IFLA_IPTUN_FLOWINFO
, parm
->flowinfo
) ||
2045 nla_put_u32(skb
, IFLA_IPTUN_FLAGS
, parm
->flags
) ||
2046 nla_put_u8(skb
, IFLA_IPTUN_PROTO
, parm
->proto
))
2047 goto nla_put_failure
;
2049 if (nla_put_u16(skb
, IFLA_IPTUN_ENCAP_TYPE
, tunnel
->encap
.type
) ||
2050 nla_put_be16(skb
, IFLA_IPTUN_ENCAP_SPORT
, tunnel
->encap
.sport
) ||
2051 nla_put_be16(skb
, IFLA_IPTUN_ENCAP_DPORT
, tunnel
->encap
.dport
) ||
2052 nla_put_u16(skb
, IFLA_IPTUN_ENCAP_FLAGS
, tunnel
->encap
.flags
))
2053 goto nla_put_failure
;
2055 if (parm
->collect_md
)
2056 if (nla_put_flag(skb
, IFLA_IPTUN_COLLECT_METADATA
))
2057 goto nla_put_failure
;
2064 struct net
*ip6_tnl_get_link_net(const struct net_device
*dev
)
2066 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
2070 EXPORT_SYMBOL(ip6_tnl_get_link_net
);
2072 static const struct nla_policy ip6_tnl_policy
[IFLA_IPTUN_MAX
+ 1] = {
2073 [IFLA_IPTUN_LINK
] = { .type
= NLA_U32
},
2074 [IFLA_IPTUN_LOCAL
] = { .len
= sizeof(struct in6_addr
) },
2075 [IFLA_IPTUN_REMOTE
] = { .len
= sizeof(struct in6_addr
) },
2076 [IFLA_IPTUN_TTL
] = { .type
= NLA_U8
},
2077 [IFLA_IPTUN_ENCAP_LIMIT
] = { .type
= NLA_U8
},
2078 [IFLA_IPTUN_FLOWINFO
] = { .type
= NLA_U32
},
2079 [IFLA_IPTUN_FLAGS
] = { .type
= NLA_U32
},
2080 [IFLA_IPTUN_PROTO
] = { .type
= NLA_U8
},
2081 [IFLA_IPTUN_ENCAP_TYPE
] = { .type
= NLA_U16
},
2082 [IFLA_IPTUN_ENCAP_FLAGS
] = { .type
= NLA_U16
},
2083 [IFLA_IPTUN_ENCAP_SPORT
] = { .type
= NLA_U16
},
2084 [IFLA_IPTUN_ENCAP_DPORT
] = { .type
= NLA_U16
},
2085 [IFLA_IPTUN_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
2088 static struct rtnl_link_ops ip6_link_ops __read_mostly
= {
2090 .maxtype
= IFLA_IPTUN_MAX
,
2091 .policy
= ip6_tnl_policy
,
2092 .priv_size
= sizeof(struct ip6_tnl
),
2093 .setup
= ip6_tnl_dev_setup
,
2094 .validate
= ip6_tnl_validate
,
2095 .newlink
= ip6_tnl_newlink
,
2096 .changelink
= ip6_tnl_changelink
,
2097 .dellink
= ip6_tnl_dellink
,
2098 .get_size
= ip6_tnl_get_size
,
2099 .fill_info
= ip6_tnl_fill_info
,
2100 .get_link_net
= ip6_tnl_get_link_net
,
2103 static struct xfrm6_tunnel ip4ip6_handler __read_mostly
= {
2104 .handler
= ip4ip6_rcv
,
2105 .err_handler
= ip4ip6_err
,
2109 static struct xfrm6_tunnel ip6ip6_handler __read_mostly
= {
2110 .handler
= ip6ip6_rcv
,
2111 .err_handler
= ip6ip6_err
,
2115 static void __net_exit
ip6_tnl_destroy_tunnels(struct net
*net
)
2117 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2118 struct net_device
*dev
, *aux
;
2123 for_each_netdev_safe(net
, dev
, aux
)
2124 if (dev
->rtnl_link_ops
== &ip6_link_ops
)
2125 unregister_netdevice_queue(dev
, &list
);
2127 for (h
= 0; h
< IP6_TUNNEL_HASH_SIZE
; h
++) {
2128 t
= rtnl_dereference(ip6n
->tnls_r_l
[h
]);
2130 /* If dev is in the same netns, it has already
2131 * been added to the list by the previous loop.
2133 if (!net_eq(dev_net(t
->dev
), net
))
2134 unregister_netdevice_queue(t
->dev
, &list
);
2135 t
= rtnl_dereference(t
->next
);
2139 unregister_netdevice_many(&list
);
2142 static int __net_init
ip6_tnl_init_net(struct net
*net
)
2144 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2145 struct ip6_tnl
*t
= NULL
;
2148 ip6n
->tnls
[0] = ip6n
->tnls_wc
;
2149 ip6n
->tnls
[1] = ip6n
->tnls_r_l
;
2152 ip6n
->fb_tnl_dev
= alloc_netdev(sizeof(struct ip6_tnl
), "ip6tnl0",
2153 NET_NAME_UNKNOWN
, ip6_tnl_dev_setup
);
2155 if (!ip6n
->fb_tnl_dev
)
2157 dev_net_set(ip6n
->fb_tnl_dev
, net
);
2158 ip6n
->fb_tnl_dev
->rtnl_link_ops
= &ip6_link_ops
;
2159 /* FB netdevice is special: we have one, and only one per netns.
2160 * Allowing to move it to another netns is clearly unsafe.
2162 ip6n
->fb_tnl_dev
->features
|= NETIF_F_NETNS_LOCAL
;
2164 err
= ip6_fb_tnl_dev_init(ip6n
->fb_tnl_dev
);
2168 err
= register_netdev(ip6n
->fb_tnl_dev
);
2172 t
= netdev_priv(ip6n
->fb_tnl_dev
);
2174 strcpy(t
->parms
.name
, ip6n
->fb_tnl_dev
->name
);
2178 ip6_dev_free(ip6n
->fb_tnl_dev
);
2183 static void __net_exit
ip6_tnl_exit_net(struct net
*net
)
2186 ip6_tnl_destroy_tunnels(net
);
2190 static struct pernet_operations ip6_tnl_net_ops
= {
2191 .init
= ip6_tnl_init_net
,
2192 .exit
= ip6_tnl_exit_net
,
2193 .id
= &ip6_tnl_net_id
,
2194 .size
= sizeof(struct ip6_tnl_net
),
2198 * ip6_tunnel_init - register protocol and reserve needed resources
2200 * Return: 0 on success
2203 static int __init
ip6_tunnel_init(void)
2207 err
= register_pernet_device(&ip6_tnl_net_ops
);
2211 err
= xfrm6_tunnel_register(&ip4ip6_handler
, AF_INET
);
2213 pr_err("%s: can't register ip4ip6\n", __func__
);
2217 err
= xfrm6_tunnel_register(&ip6ip6_handler
, AF_INET6
);
2219 pr_err("%s: can't register ip6ip6\n", __func__
);
2222 err
= rtnl_link_register(&ip6_link_ops
);
2224 goto rtnl_link_failed
;
2229 xfrm6_tunnel_deregister(&ip6ip6_handler
, AF_INET6
);
2231 xfrm6_tunnel_deregister(&ip4ip6_handler
, AF_INET
);
2233 unregister_pernet_device(&ip6_tnl_net_ops
);
2239 * ip6_tunnel_cleanup - free resources and unregister protocol
2242 static void __exit
ip6_tunnel_cleanup(void)
2244 rtnl_link_unregister(&ip6_link_ops
);
2245 if (xfrm6_tunnel_deregister(&ip4ip6_handler
, AF_INET
))
2246 pr_info("%s: can't deregister ip4ip6\n", __func__
);
2248 if (xfrm6_tunnel_deregister(&ip6ip6_handler
, AF_INET6
))
2249 pr_info("%s: can't deregister ip6ip6\n", __func__
);
2251 unregister_pernet_device(&ip6_tnl_net_ops
);
2254 module_init(ip6_tunnel_init
);
2255 module_exit(ip6_tunnel_cleanup
);