2 * IPv6 tunneling device
3 * Linux INET6 implementation
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
50 #include <net/ip_tunnels.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
71 static bool log_ecn_error
= true;
72 module_param(log_ecn_error
, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
75 static u32
HASH(const struct in6_addr
*addr1
, const struct in6_addr
*addr2
)
77 u32 hash
= ipv6_addr_hash(addr1
) ^ ipv6_addr_hash(addr2
);
79 return hash_32(hash
, IP6_TUNNEL_HASH_SIZE_SHIFT
);
82 static int ip6_tnl_dev_init(struct net_device
*dev
);
83 static void ip6_tnl_dev_setup(struct net_device
*dev
);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly
;
86 static unsigned int ip6_tnl_net_id __read_mostly
;
88 /* the IPv6 tunnel fallback device */
89 struct net_device
*fb_tnl_dev
;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu
*tnls_r_l
[IP6_TUNNEL_HASH_SIZE
];
92 struct ip6_tnl __rcu
*tnls_wc
[1];
93 struct ip6_tnl __rcu
**tnls
[2];
94 struct ip6_tnl __rcu
*collect_md_tun
;
97 static struct net_device_stats
*ip6_get_stats(struct net_device
*dev
)
99 struct pcpu_sw_netstats tmp
, sum
= { 0 };
102 for_each_possible_cpu(i
) {
104 const struct pcpu_sw_netstats
*tstats
=
105 per_cpu_ptr(dev
->tstats
, i
);
108 start
= u64_stats_fetch_begin_irq(&tstats
->syncp
);
109 tmp
.rx_packets
= tstats
->rx_packets
;
110 tmp
.rx_bytes
= tstats
->rx_bytes
;
111 tmp
.tx_packets
= tstats
->tx_packets
;
112 tmp
.tx_bytes
= tstats
->tx_bytes
;
113 } while (u64_stats_fetch_retry_irq(&tstats
->syncp
, start
));
115 sum
.rx_packets
+= tmp
.rx_packets
;
116 sum
.rx_bytes
+= tmp
.rx_bytes
;
117 sum
.tx_packets
+= tmp
.tx_packets
;
118 sum
.tx_bytes
+= tmp
.tx_bytes
;
120 dev
->stats
.rx_packets
= sum
.rx_packets
;
121 dev
->stats
.rx_bytes
= sum
.rx_bytes
;
122 dev
->stats
.tx_packets
= sum
.tx_packets
;
123 dev
->stats
.tx_bytes
= sum
.tx_bytes
;
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
141 static struct ip6_tnl
*
142 ip6_tnl_lookup(struct net
*net
, const struct in6_addr
*remote
, const struct in6_addr
*local
)
144 unsigned int hash
= HASH(remote
, local
);
146 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
149 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
150 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
151 ipv6_addr_equal(remote
, &t
->parms
.raddr
) &&
152 (t
->dev
->flags
& IFF_UP
))
156 memset(&any
, 0, sizeof(any
));
157 hash
= HASH(&any
, local
);
158 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
159 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
160 ipv6_addr_any(&t
->parms
.raddr
) &&
161 (t
->dev
->flags
& IFF_UP
))
165 hash
= HASH(remote
, &any
);
166 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
167 if (ipv6_addr_equal(remote
, &t
->parms
.raddr
) &&
168 ipv6_addr_any(&t
->parms
.laddr
) &&
169 (t
->dev
->flags
& IFF_UP
))
173 t
= rcu_dereference(ip6n
->collect_md_tun
);
174 if (t
&& t
->dev
->flags
& IFF_UP
)
177 t
= rcu_dereference(ip6n
->tnls_wc
[0]);
178 if (t
&& (t
->dev
->flags
& IFF_UP
))
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
192 * Return: head of IPv6 tunnel list
195 static struct ip6_tnl __rcu
**
196 ip6_tnl_bucket(struct ip6_tnl_net
*ip6n
, const struct __ip6_tnl_parm
*p
)
198 const struct in6_addr
*remote
= &p
->raddr
;
199 const struct in6_addr
*local
= &p
->laddr
;
203 if (!ipv6_addr_any(remote
) || !ipv6_addr_any(local
)) {
205 h
= HASH(remote
, local
);
207 return &ip6n
->tnls
[prio
][h
];
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
216 ip6_tnl_link(struct ip6_tnl_net
*ip6n
, struct ip6_tnl
*t
)
218 struct ip6_tnl __rcu
**tp
= ip6_tnl_bucket(ip6n
, &t
->parms
);
220 if (t
->parms
.collect_md
)
221 rcu_assign_pointer(ip6n
->collect_md_tun
, t
);
222 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
223 rcu_assign_pointer(*tp
, t
);
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
232 ip6_tnl_unlink(struct ip6_tnl_net
*ip6n
, struct ip6_tnl
*t
)
234 struct ip6_tnl __rcu
**tp
;
235 struct ip6_tnl
*iter
;
237 if (t
->parms
.collect_md
)
238 rcu_assign_pointer(ip6n
->collect_md_tun
, NULL
);
240 for (tp
= ip6_tnl_bucket(ip6n
, &t
->parms
);
241 (iter
= rtnl_dereference(*tp
)) != NULL
;
244 rcu_assign_pointer(*tp
, t
->next
);
250 static void ip6_dev_free(struct net_device
*dev
)
252 struct ip6_tnl
*t
= netdev_priv(dev
);
254 gro_cells_destroy(&t
->gro_cells
);
255 dst_cache_destroy(&t
->dst_cache
);
256 free_percpu(dev
->tstats
);
259 static int ip6_tnl_create2(struct net_device
*dev
)
261 struct ip6_tnl
*t
= netdev_priv(dev
);
262 struct net
*net
= dev_net(dev
);
263 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
266 t
= netdev_priv(dev
);
268 dev
->rtnl_link_ops
= &ip6_link_ops
;
269 err
= register_netdevice(dev
);
273 strcpy(t
->parms
.name
, dev
->name
);
276 ip6_tnl_link(ip6n
, t
);
284 * ip6_tnl_create - create a new tunnel
285 * @p: tunnel parameters
286 * @pt: pointer to new tunnel
289 * Create tunnel matching given parameters.
292 * created tunnel or error pointer
295 static struct ip6_tnl
*ip6_tnl_create(struct net
*net
, struct __ip6_tnl_parm
*p
)
297 struct net_device
*dev
;
303 if (!dev_valid_name(p
->name
))
305 strlcpy(name
, p
->name
, IFNAMSIZ
);
307 sprintf(name
, "ip6tnl%%d");
310 dev
= alloc_netdev(sizeof(*t
), name
, NET_NAME_UNKNOWN
,
315 dev_net_set(dev
, net
);
317 t
= netdev_priv(dev
);
319 t
->net
= dev_net(dev
);
320 err
= ip6_tnl_create2(dev
);
333 * ip6_tnl_locate - find or create tunnel matching given parameters
334 * @p: tunnel parameters
335 * @create: != 0 if allowed to create new tunnel if no match found
338 * ip6_tnl_locate() first tries to locate an existing tunnel
339 * based on @parms. If this is unsuccessful, but @create is set a new
340 * tunnel device is created and registered for use.
343 * matching tunnel or error pointer
346 static struct ip6_tnl
*ip6_tnl_locate(struct net
*net
,
347 struct __ip6_tnl_parm
*p
, int create
)
349 const struct in6_addr
*remote
= &p
->raddr
;
350 const struct in6_addr
*local
= &p
->laddr
;
351 struct ip6_tnl __rcu
**tp
;
353 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
355 for (tp
= ip6_tnl_bucket(ip6n
, p
);
356 (t
= rtnl_dereference(*tp
)) != NULL
;
358 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
359 ipv6_addr_equal(remote
, &t
->parms
.raddr
)) {
361 return ERR_PTR(-EEXIST
);
367 return ERR_PTR(-ENODEV
);
368 return ip6_tnl_create(net
, p
);
372 * ip6_tnl_dev_uninit - tunnel device uninitializer
373 * @dev: the device to be destroyed
376 * ip6_tnl_dev_uninit() removes tunnel from its list
380 ip6_tnl_dev_uninit(struct net_device
*dev
)
382 struct ip6_tnl
*t
= netdev_priv(dev
);
383 struct net
*net
= t
->net
;
384 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
386 if (dev
== ip6n
->fb_tnl_dev
)
387 RCU_INIT_POINTER(ip6n
->tnls_wc
[0], NULL
);
389 ip6_tnl_unlink(ip6n
, t
);
390 dst_cache_reset(&t
->dst_cache
);
395 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
396 * @skb: received socket buffer
399 * 0 if none was found,
400 * else index to encapsulation limit
403 __u16
ip6_tnl_parse_tlv_enc_lim(struct sk_buff
*skb
, __u8
*raw
)
405 const struct ipv6hdr
*ipv6h
= (const struct ipv6hdr
*)raw
;
406 unsigned int nhoff
= raw
- skb
->data
;
407 unsigned int off
= nhoff
+ sizeof(*ipv6h
);
408 u8 next
, nexthdr
= ipv6h
->nexthdr
;
410 while (ipv6_ext_hdr(nexthdr
) && nexthdr
!= NEXTHDR_NONE
) {
411 struct ipv6_opt_hdr
*hdr
;
414 if (!pskb_may_pull(skb
, off
+ sizeof(*hdr
)))
417 hdr
= (struct ipv6_opt_hdr
*)(skb
->data
+ off
);
418 if (nexthdr
== NEXTHDR_FRAGMENT
) {
419 struct frag_hdr
*frag_hdr
= (struct frag_hdr
*) hdr
;
420 if (frag_hdr
->frag_off
)
423 } else if (nexthdr
== NEXTHDR_AUTH
) {
424 optlen
= (hdr
->hdrlen
+ 2) << 2;
426 optlen
= ipv6_optlen(hdr
);
428 /* cache hdr->nexthdr, since pskb_may_pull() might
432 if (nexthdr
== NEXTHDR_DEST
) {
435 /* Remember : hdr is no longer valid at this point. */
436 if (!pskb_may_pull(skb
, off
+ optlen
))
440 struct ipv6_tlv_tnl_enc_lim
*tel
;
442 /* No more room for encapsulation limit */
443 if (i
+ sizeof(*tel
) > optlen
)
446 tel
= (struct ipv6_tlv_tnl_enc_lim
*)(skb
->data
+ off
+ i
);
447 /* return index of option if found and valid */
448 if (tel
->type
== IPV6_TLV_TNL_ENCAP_LIMIT
&&
450 return i
+ off
- nhoff
;
451 /* else jump to next option */
453 i
+= tel
->length
+ 2;
463 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim
);
466 * ip6_tnl_err - tunnel error handler
469 * ip6_tnl_err() should handle errors in the tunnel according
470 * to the specifications in RFC 2473.
474 ip6_tnl_err(struct sk_buff
*skb
, __u8 ipproto
, struct inet6_skb_parm
*opt
,
475 u8
*type
, u8
*code
, int *msg
, __u32
*info
, int offset
)
477 const struct ipv6hdr
*ipv6h
= (const struct ipv6hdr
*)skb
->data
;
478 struct net
*net
= dev_net(skb
->dev
);
479 u8 rel_type
= ICMPV6_DEST_UNREACH
;
480 u8 rel_code
= ICMPV6_ADDR_UNREACH
;
488 /* If the packet doesn't contain the original IPv6 header we are
489 in trouble since we might need the source address for further
490 processing of the error. */
493 t
= ip6_tnl_lookup(dev_net(skb
->dev
), &ipv6h
->daddr
, &ipv6h
->saddr
);
497 tproto
= READ_ONCE(t
->parms
.proto
);
498 if (tproto
!= ipproto
&& tproto
!= 0)
504 struct ipv6_tlv_tnl_enc_lim
*tel
;
506 case ICMPV6_DEST_UNREACH
:
507 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
511 case ICMPV6_TIME_EXCEED
:
512 if ((*code
) == ICMPV6_EXC_HOPLIMIT
) {
513 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
518 case ICMPV6_PARAMPROB
:
520 if ((*code
) == ICMPV6_HDR_FIELD
)
521 teli
= ip6_tnl_parse_tlv_enc_lim(skb
, skb
->data
);
523 if (teli
&& teli
== *info
- 2) {
524 tel
= (struct ipv6_tlv_tnl_enc_lim
*) &skb
->data
[teli
];
525 if (tel
->encap_limit
== 0) {
526 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
531 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
535 case ICMPV6_PKT_TOOBIG
:
536 ip6_update_pmtu(skb
, net
, htonl(*info
), 0, 0,
537 sock_net_uid(net
, NULL
));
538 mtu
= *info
- offset
;
539 if (mtu
< IPV6_MIN_MTU
)
541 len
= sizeof(*ipv6h
) + ntohs(ipv6h
->payload_len
);
543 rel_type
= ICMPV6_PKT_TOOBIG
;
550 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
551 sock_net_uid(net
, NULL
));
566 ip4ip6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
567 u8 type
, u8 code
, int offset
, __be32 info
)
569 __u32 rel_info
= ntohl(info
);
570 const struct iphdr
*eiph
;
571 struct sk_buff
*skb2
;
572 int err
, rel_msg
= 0;
578 err
= ip6_tnl_err(skb
, IPPROTO_IPIP
, opt
, &rel_type
, &rel_code
,
579 &rel_msg
, &rel_info
, offset
);
587 case ICMPV6_DEST_UNREACH
:
588 if (rel_code
!= ICMPV6_ADDR_UNREACH
)
590 rel_type
= ICMP_DEST_UNREACH
;
591 rel_code
= ICMP_HOST_UNREACH
;
593 case ICMPV6_PKT_TOOBIG
:
596 rel_type
= ICMP_DEST_UNREACH
;
597 rel_code
= ICMP_FRAG_NEEDED
;
603 if (!pskb_may_pull(skb
, offset
+ sizeof(struct iphdr
)))
606 skb2
= skb_clone(skb
, GFP_ATOMIC
);
612 skb_pull(skb2
, offset
);
613 skb_reset_network_header(skb2
);
616 /* Try to guess incoming interface */
617 rt
= ip_route_output_ports(dev_net(skb
->dev
), &fl4
, NULL
, eiph
->saddr
,
618 0, 0, 0, IPPROTO_IPIP
, RT_TOS(eiph
->tos
), 0);
622 skb2
->dev
= rt
->dst
.dev
;
625 /* route "incoming" packet */
626 if (rt
->rt_flags
& RTCF_LOCAL
) {
627 rt
= ip_route_output_ports(dev_net(skb
->dev
), &fl4
, NULL
,
628 eiph
->daddr
, eiph
->saddr
, 0, 0,
629 IPPROTO_IPIP
, RT_TOS(eiph
->tos
), 0);
630 if (IS_ERR(rt
) || rt
->dst
.dev
->type
!= ARPHRD_TUNNEL
) {
635 skb_dst_set(skb2
, &rt
->dst
);
637 if (ip_route_input(skb2
, eiph
->daddr
, eiph
->saddr
, eiph
->tos
,
639 skb_dst(skb2
)->dev
->type
!= ARPHRD_TUNNEL
)
643 /* change mtu on this route */
644 if (rel_type
== ICMP_DEST_UNREACH
&& rel_code
== ICMP_FRAG_NEEDED
) {
645 if (rel_info
> dst_mtu(skb_dst(skb2
)))
648 skb_dst_update_pmtu(skb2
, rel_info
);
651 icmp_send(skb2
, rel_type
, rel_code
, htonl(rel_info
));
659 ip6ip6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
660 u8 type
, u8 code
, int offset
, __be32 info
)
662 __u32 rel_info
= ntohl(info
);
663 int err
, rel_msg
= 0;
667 err
= ip6_tnl_err(skb
, IPPROTO_IPV6
, opt
, &rel_type
, &rel_code
,
668 &rel_msg
, &rel_info
, offset
);
672 if (rel_msg
&& pskb_may_pull(skb
, offset
+ sizeof(struct ipv6hdr
))) {
674 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
680 skb_pull(skb2
, offset
);
681 skb_reset_network_header(skb2
);
683 /* Try to guess incoming interface */
684 rt
= rt6_lookup(dev_net(skb
->dev
), &ipv6_hdr(skb2
)->saddr
,
687 if (rt
&& rt
->dst
.dev
)
688 skb2
->dev
= rt
->dst
.dev
;
690 icmpv6_send(skb2
, rel_type
, rel_code
, rel_info
);
700 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl
*t
,
701 const struct ipv6hdr
*ipv6h
,
704 __u8 dsfield
= ipv6_get_dsfield(ipv6h
) & ~INET_ECN_MASK
;
706 if (t
->parms
.flags
& IP6_TNL_F_RCV_DSCP_COPY
)
707 ipv4_change_dsfield(ip_hdr(skb
), INET_ECN_MASK
, dsfield
);
709 return IP6_ECN_decapsulate(ipv6h
, skb
);
712 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl
*t
,
713 const struct ipv6hdr
*ipv6h
,
716 if (t
->parms
.flags
& IP6_TNL_F_RCV_DSCP_COPY
)
717 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h
), ipv6_hdr(skb
));
719 return IP6_ECN_decapsulate(ipv6h
, skb
);
722 __u32
ip6_tnl_get_cap(struct ip6_tnl
*t
,
723 const struct in6_addr
*laddr
,
724 const struct in6_addr
*raddr
)
726 struct __ip6_tnl_parm
*p
= &t
->parms
;
727 int ltype
= ipv6_addr_type(laddr
);
728 int rtype
= ipv6_addr_type(raddr
);
731 if (ltype
== IPV6_ADDR_ANY
|| rtype
== IPV6_ADDR_ANY
) {
732 flags
= IP6_TNL_F_CAP_PER_PACKET
;
733 } else if (ltype
& (IPV6_ADDR_UNICAST
|IPV6_ADDR_MULTICAST
) &&
734 rtype
& (IPV6_ADDR_UNICAST
|IPV6_ADDR_MULTICAST
) &&
735 !((ltype
|rtype
) & IPV6_ADDR_LOOPBACK
) &&
736 (!((ltype
|rtype
) & IPV6_ADDR_LINKLOCAL
) || p
->link
)) {
737 if (ltype
&IPV6_ADDR_UNICAST
)
738 flags
|= IP6_TNL_F_CAP_XMIT
;
739 if (rtype
&IPV6_ADDR_UNICAST
)
740 flags
|= IP6_TNL_F_CAP_RCV
;
744 EXPORT_SYMBOL(ip6_tnl_get_cap
);
746 /* called with rcu_read_lock() */
747 int ip6_tnl_rcv_ctl(struct ip6_tnl
*t
,
748 const struct in6_addr
*laddr
,
749 const struct in6_addr
*raddr
)
751 struct __ip6_tnl_parm
*p
= &t
->parms
;
753 struct net
*net
= t
->net
;
755 if ((p
->flags
& IP6_TNL_F_CAP_RCV
) ||
756 ((p
->flags
& IP6_TNL_F_CAP_PER_PACKET
) &&
757 (ip6_tnl_get_cap(t
, laddr
, raddr
) & IP6_TNL_F_CAP_RCV
))) {
758 struct net_device
*ldev
= NULL
;
761 ldev
= dev_get_by_index_rcu(net
, p
->link
);
763 if ((ipv6_addr_is_multicast(laddr
) ||
764 likely(ipv6_chk_addr(net
, laddr
, ldev
, 0))) &&
765 ((p
->flags
& IP6_TNL_F_ALLOW_LOCAL_REMOTE
) ||
766 likely(!ipv6_chk_addr(net
, raddr
, NULL
, 0))))
771 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl
);
773 static int __ip6_tnl_rcv(struct ip6_tnl
*tunnel
, struct sk_buff
*skb
,
774 const struct tnl_ptk_info
*tpi
,
775 struct metadata_dst
*tun_dst
,
776 int (*dscp_ecn_decapsulate
)(const struct ip6_tnl
*t
,
777 const struct ipv6hdr
*ipv6h
,
778 struct sk_buff
*skb
),
781 struct pcpu_sw_netstats
*tstats
;
782 const struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
785 if ((!(tpi
->flags
& TUNNEL_CSUM
) &&
786 (tunnel
->parms
.i_flags
& TUNNEL_CSUM
)) ||
787 ((tpi
->flags
& TUNNEL_CSUM
) &&
788 !(tunnel
->parms
.i_flags
& TUNNEL_CSUM
))) {
789 tunnel
->dev
->stats
.rx_crc_errors
++;
790 tunnel
->dev
->stats
.rx_errors
++;
794 if (tunnel
->parms
.i_flags
& TUNNEL_SEQ
) {
795 if (!(tpi
->flags
& TUNNEL_SEQ
) ||
797 (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
798 tunnel
->dev
->stats
.rx_fifo_errors
++;
799 tunnel
->dev
->stats
.rx_errors
++;
802 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
805 skb
->protocol
= tpi
->proto
;
807 /* Warning: All skb pointers will be invalidated! */
808 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
809 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
810 tunnel
->dev
->stats
.rx_length_errors
++;
811 tunnel
->dev
->stats
.rx_errors
++;
815 ipv6h
= ipv6_hdr(skb
);
816 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
817 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
819 skb
->dev
= tunnel
->dev
;
822 skb_reset_network_header(skb
);
823 memset(skb
->cb
, 0, sizeof(struct inet6_skb_parm
));
825 __skb_tunnel_rx(skb
, tunnel
->dev
, tunnel
->net
);
827 err
= dscp_ecn_decapsulate(tunnel
, ipv6h
, skb
);
830 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
832 ipv6_get_dsfield(ipv6h
));
834 ++tunnel
->dev
->stats
.rx_frame_errors
;
835 ++tunnel
->dev
->stats
.rx_errors
;
840 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
841 u64_stats_update_begin(&tstats
->syncp
);
842 tstats
->rx_packets
++;
843 tstats
->rx_bytes
+= skb
->len
;
844 u64_stats_update_end(&tstats
->syncp
);
846 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
849 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
851 gro_cells_receive(&tunnel
->gro_cells
, skb
);
856 dst_release((struct dst_entry
*)tun_dst
);
861 int ip6_tnl_rcv(struct ip6_tnl
*t
, struct sk_buff
*skb
,
862 const struct tnl_ptk_info
*tpi
,
863 struct metadata_dst
*tun_dst
,
866 return __ip6_tnl_rcv(t
, skb
, tpi
, NULL
, ip6ip6_dscp_ecn_decapsulate
,
869 EXPORT_SYMBOL(ip6_tnl_rcv
);
871 static const struct tnl_ptk_info tpi_v6
= {
872 /* no tunnel info required for ipxip6. */
873 .proto
= htons(ETH_P_IPV6
),
876 static const struct tnl_ptk_info tpi_v4
= {
877 /* no tunnel info required for ipxip6. */
878 .proto
= htons(ETH_P_IP
),
881 static int ipxip6_rcv(struct sk_buff
*skb
, u8 ipproto
,
882 const struct tnl_ptk_info
*tpi
,
883 int (*dscp_ecn_decapsulate
)(const struct ip6_tnl
*t
,
884 const struct ipv6hdr
*ipv6h
,
885 struct sk_buff
*skb
))
888 const struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
889 struct metadata_dst
*tun_dst
= NULL
;
893 t
= ip6_tnl_lookup(dev_net(skb
->dev
), &ipv6h
->saddr
, &ipv6h
->daddr
);
896 u8 tproto
= READ_ONCE(t
->parms
.proto
);
898 if (tproto
!= ipproto
&& tproto
!= 0)
900 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
902 if (!ip6_tnl_rcv_ctl(t
, &ipv6h
->daddr
, &ipv6h
->saddr
))
904 if (iptunnel_pull_header(skb
, 0, tpi
->proto
, false))
906 if (t
->parms
.collect_md
) {
907 tun_dst
= ipv6_tun_rx_dst(skb
, 0, 0, 0);
911 ret
= __ip6_tnl_rcv(t
, skb
, tpi
, tun_dst
, dscp_ecn_decapsulate
,
925 static int ip4ip6_rcv(struct sk_buff
*skb
)
927 return ipxip6_rcv(skb
, IPPROTO_IPIP
, &tpi_v4
,
928 ip4ip6_dscp_ecn_decapsulate
);
931 static int ip6ip6_rcv(struct sk_buff
*skb
)
933 return ipxip6_rcv(skb
, IPPROTO_IPV6
, &tpi_v6
,
934 ip6ip6_dscp_ecn_decapsulate
);
937 struct ipv6_tel_txoption
{
938 struct ipv6_txoptions ops
;
942 static void init_tel_txopt(struct ipv6_tel_txoption
*opt
, __u8 encap_limit
)
944 memset(opt
, 0, sizeof(struct ipv6_tel_txoption
));
946 opt
->dst_opt
[2] = IPV6_TLV_TNL_ENCAP_LIMIT
;
948 opt
->dst_opt
[4] = encap_limit
;
949 opt
->dst_opt
[5] = IPV6_TLV_PADN
;
952 opt
->ops
.dst1opt
= (struct ipv6_opt_hdr
*) opt
->dst_opt
;
953 opt
->ops
.opt_nflen
= 8;
957 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
958 * @t: the outgoing tunnel device
959 * @hdr: IPv6 header from the incoming packet
962 * Avoid trivial tunneling loop by checking that tunnel exit-point
963 * doesn't match source of incoming packet.
971 ip6_tnl_addr_conflict(const struct ip6_tnl
*t
, const struct ipv6hdr
*hdr
)
973 return ipv6_addr_equal(&t
->parms
.raddr
, &hdr
->saddr
);
976 int ip6_tnl_xmit_ctl(struct ip6_tnl
*t
,
977 const struct in6_addr
*laddr
,
978 const struct in6_addr
*raddr
)
980 struct __ip6_tnl_parm
*p
= &t
->parms
;
982 struct net
*net
= t
->net
;
984 if ((p
->flags
& IP6_TNL_F_CAP_XMIT
) ||
985 ((p
->flags
& IP6_TNL_F_CAP_PER_PACKET
) &&
986 (ip6_tnl_get_cap(t
, laddr
, raddr
) & IP6_TNL_F_CAP_XMIT
))) {
987 struct net_device
*ldev
= NULL
;
991 ldev
= dev_get_by_index_rcu(net
, p
->link
);
993 if (unlikely(!ipv6_chk_addr(net
, laddr
, ldev
, 0)))
994 pr_warn("%s xmit: Local address not yet configured!\n",
996 else if (!(p
->flags
& IP6_TNL_F_ALLOW_LOCAL_REMOTE
) &&
997 !ipv6_addr_is_multicast(raddr
) &&
998 unlikely(ipv6_chk_addr(net
, raddr
, NULL
, 0)))
999 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1007 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl
);
1010 * ip6_tnl_xmit - encapsulate packet and send
1011 * @skb: the outgoing socket buffer
1012 * @dev: the outgoing tunnel device
1013 * @dsfield: dscp code for outer header
1014 * @fl6: flow of tunneled packet
1015 * @encap_limit: encapsulation limit
1016 * @pmtu: Path MTU is stored if packet is too big
1017 * @proto: next header value
1020 * Build new header and do some sanity checks on the packet before sending
1026 * %-EMSGSIZE message too big. return mtu in this case.
1029 int ip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
, __u8 dsfield
,
1030 struct flowi6
*fl6
, int encap_limit
, __u32
*pmtu
,
1033 struct ip6_tnl
*t
= netdev_priv(dev
);
1034 struct net
*net
= t
->net
;
1035 struct net_device_stats
*stats
= &t
->dev
->stats
;
1036 struct ipv6hdr
*ipv6h
;
1037 struct ipv6_tel_txoption opt
;
1038 struct dst_entry
*dst
= NULL
, *ndst
= NULL
;
1039 struct net_device
*tdev
;
1041 unsigned int eth_hlen
= t
->dev
->type
== ARPHRD_ETHER
? ETH_HLEN
: 0;
1042 unsigned int psh_hlen
= sizeof(struct ipv6hdr
) + t
->encap_hlen
;
1043 unsigned int max_headroom
= psh_hlen
;
1044 bool use_cache
= false;
1048 if (t
->parms
.collect_md
) {
1049 hop_limit
= skb_tunnel_info(skb
)->key
.ttl
;
1052 hop_limit
= t
->parms
.hop_limit
;
1056 if (ipv6_addr_any(&t
->parms
.raddr
)) {
1057 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1058 struct in6_addr
*addr6
;
1059 struct neighbour
*neigh
;
1063 goto tx_err_link_failure
;
1065 neigh
= dst_neigh_lookup(skb_dst(skb
),
1066 &ipv6_hdr(skb
)->daddr
);
1068 goto tx_err_link_failure
;
1070 addr6
= (struct in6_addr
*)&neigh
->primary_key
;
1071 addr_type
= ipv6_addr_type(addr6
);
1073 if (addr_type
== IPV6_ADDR_ANY
)
1074 addr6
= &ipv6_hdr(skb
)->daddr
;
1076 memcpy(&fl6
->daddr
, addr6
, sizeof(fl6
->daddr
));
1077 neigh_release(neigh
);
1079 } else if (t
->parms
.proto
!= 0 && !(t
->parms
.flags
&
1080 (IP6_TNL_F_USE_ORIG_TCLASS
|
1081 IP6_TNL_F_USE_ORIG_FWMARK
))) {
1082 /* enable the cache only if neither the outer protocol nor the
1083 * routing decision depends on the current inner header value
1089 dst
= dst_cache_get(&t
->dst_cache
);
1091 if (!ip6_tnl_xmit_ctl(t
, &fl6
->saddr
, &fl6
->daddr
))
1092 goto tx_err_link_failure
;
1096 /* add dsfield to flowlabel for route lookup */
1097 fl6
->flowlabel
= ip6_make_flowinfo(dsfield
, fl6
->flowlabel
);
1099 dst
= ip6_route_output(net
, NULL
, fl6
);
1102 goto tx_err_link_failure
;
1103 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(fl6
), NULL
, 0);
1107 goto tx_err_link_failure
;
1109 if (t
->parms
.collect_md
&&
1110 ipv6_dev_get_saddr(net
, ip6_dst_idev(dst
)->dev
,
1111 &fl6
->daddr
, 0, &fl6
->saddr
))
1112 goto tx_err_link_failure
;
1119 stats
->collisions
++;
1120 net_warn_ratelimited("%s: Local routing loop detected!\n",
1122 goto tx_err_dst_release
;
1124 mtu
= dst_mtu(dst
) - eth_hlen
- psh_hlen
- t
->tun_hlen
;
1125 if (encap_limit
>= 0) {
1129 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1130 if (mtu
< IPV6_MIN_MTU
)
1132 } else if (mtu
< 576) {
1136 skb_dst_update_pmtu(skb
, mtu
);
1137 if (skb
->len
- t
->tun_hlen
- eth_hlen
> mtu
&& !skb_is_gso(skb
)) {
1140 goto tx_err_dst_release
;
1143 if (t
->err_count
> 0) {
1144 if (time_before(jiffies
,
1145 t
->err_time
+ IP6TUNNEL_ERR_TIMEO
)) {
1148 dst_link_failure(skb
);
1154 skb_scrub_packet(skb
, !net_eq(t
->net
, dev_net(dev
)));
1157 * Okay, now see if we can stuff it in the buffer as-is.
1159 max_headroom
+= LL_RESERVED_SPACE(tdev
);
1161 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
) ||
1162 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
1163 struct sk_buff
*new_skb
;
1165 new_skb
= skb_realloc_headroom(skb
, max_headroom
);
1167 goto tx_err_dst_release
;
1170 skb_set_owner_w(new_skb
, skb
->sk
);
1175 if (t
->parms
.collect_md
) {
1176 if (t
->encap
.type
!= TUNNEL_ENCAP_NONE
)
1177 goto tx_err_dst_release
;
1179 if (use_cache
&& ndst
)
1180 dst_cache_set_ip6(&t
->dst_cache
, ndst
, &fl6
->saddr
);
1182 skb_dst_set(skb
, dst
);
1184 if (encap_limit
>= 0) {
1185 init_tel_txopt(&opt
, encap_limit
);
1186 ipv6_push_frag_opts(skb
, &opt
.ops
, &proto
);
1188 hop_limit
= hop_limit
? : ip6_dst_hoplimit(dst
);
1190 /* Calculate max headroom for all the headers and adjust
1191 * needed_headroom if necessary.
1193 max_headroom
= LL_RESERVED_SPACE(dst
->dev
) + sizeof(struct ipv6hdr
)
1194 + dst
->header_len
+ t
->hlen
;
1195 if (max_headroom
> dev
->needed_headroom
)
1196 dev
->needed_headroom
= max_headroom
;
1198 err
= ip6_tnl_encap(skb
, t
, &proto
, fl6
);
1202 skb_push(skb
, sizeof(struct ipv6hdr
));
1203 skb_reset_network_header(skb
);
1204 ipv6h
= ipv6_hdr(skb
);
1205 ip6_flow_hdr(ipv6h
, dsfield
,
1206 ip6_make_flowlabel(net
, skb
, fl6
->flowlabel
, true, fl6
));
1207 ipv6h
->hop_limit
= hop_limit
;
1208 ipv6h
->nexthdr
= proto
;
1209 ipv6h
->saddr
= fl6
->saddr
;
1210 ipv6h
->daddr
= fl6
->daddr
;
1211 ip6tunnel_xmit(NULL
, skb
, dev
);
1213 tx_err_link_failure
:
1214 stats
->tx_carrier_errors
++;
1215 dst_link_failure(skb
);
1220 EXPORT_SYMBOL(ip6_tnl_xmit
);
1223 ip4ip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1225 struct ip6_tnl
*t
= netdev_priv(dev
);
1226 const struct iphdr
*iph
= ip_hdr(skb
);
1227 int encap_limit
= -1;
1234 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1236 tproto
= READ_ONCE(t
->parms
.proto
);
1237 if (tproto
!= IPPROTO_IPIP
&& tproto
!= 0)
1240 if (t
->parms
.collect_md
) {
1241 struct ip_tunnel_info
*tun_info
;
1242 const struct ip_tunnel_key
*key
;
1244 tun_info
= skb_tunnel_info(skb
);
1245 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
1246 ip_tunnel_info_af(tun_info
) != AF_INET6
))
1248 key
= &tun_info
->key
;
1249 memset(&fl6
, 0, sizeof(fl6
));
1250 fl6
.flowi6_proto
= IPPROTO_IPIP
;
1251 fl6
.daddr
= key
->u
.ipv6
.dst
;
1252 fl6
.flowlabel
= key
->label
;
1255 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1256 encap_limit
= t
->parms
.encap_limit
;
1258 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
1259 fl6
.flowi6_proto
= IPPROTO_IPIP
;
1261 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
1262 dsfield
= ipv4_get_dsfield(iph
);
1264 dsfield
= ip6_tclass(t
->parms
.flowinfo
);
1265 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
1266 fl6
.flowi6_mark
= skb
->mark
;
1268 fl6
.flowi6_mark
= t
->parms
.fwmark
;
1271 fl6
.flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
1273 if (iptunnel_handle_offloads(skb
, SKB_GSO_IPXIP6
))
1276 dsfield
= INET_ECN_encapsulate(dsfield
, ipv4_get_dsfield(iph
));
1278 skb_set_inner_ipproto(skb
, IPPROTO_IPIP
);
1280 err
= ip6_tnl_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
1283 /* XXX: send ICMP error even if DF is not set. */
1284 if (err
== -EMSGSIZE
)
1285 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
1294 ip6ip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1296 struct ip6_tnl
*t
= netdev_priv(dev
);
1297 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
1298 int encap_limit
= -1;
1306 tproto
= READ_ONCE(t
->parms
.proto
);
1307 if ((tproto
!= IPPROTO_IPV6
&& tproto
!= 0) ||
1308 ip6_tnl_addr_conflict(t
, ipv6h
))
1311 if (t
->parms
.collect_md
) {
1312 struct ip_tunnel_info
*tun_info
;
1313 const struct ip_tunnel_key
*key
;
1315 tun_info
= skb_tunnel_info(skb
);
1316 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
1317 ip_tunnel_info_af(tun_info
) != AF_INET6
))
1319 key
= &tun_info
->key
;
1320 memset(&fl6
, 0, sizeof(fl6
));
1321 fl6
.flowi6_proto
= IPPROTO_IPV6
;
1322 fl6
.daddr
= key
->u
.ipv6
.dst
;
1323 fl6
.flowlabel
= key
->label
;
1326 offset
= ip6_tnl_parse_tlv_enc_lim(skb
, skb_network_header(skb
));
1327 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1328 ipv6h
= ipv6_hdr(skb
);
1330 struct ipv6_tlv_tnl_enc_lim
*tel
;
1332 tel
= (void *)&skb_network_header(skb
)[offset
];
1333 if (tel
->encap_limit
== 0) {
1334 icmpv6_send(skb
, ICMPV6_PARAMPROB
,
1335 ICMPV6_HDR_FIELD
, offset
+ 2);
1338 encap_limit
= tel
->encap_limit
- 1;
1339 } else if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
)) {
1340 encap_limit
= t
->parms
.encap_limit
;
1343 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
1344 fl6
.flowi6_proto
= IPPROTO_IPV6
;
1346 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
1347 dsfield
= ipv6_get_dsfield(ipv6h
);
1349 dsfield
= ip6_tclass(t
->parms
.flowinfo
);
1350 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FLOWLABEL
)
1351 fl6
.flowlabel
|= ip6_flowlabel(ipv6h
);
1352 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
1353 fl6
.flowi6_mark
= skb
->mark
;
1355 fl6
.flowi6_mark
= t
->parms
.fwmark
;
1358 fl6
.flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
1360 if (iptunnel_handle_offloads(skb
, SKB_GSO_IPXIP6
))
1363 dsfield
= INET_ECN_encapsulate(dsfield
, ipv6_get_dsfield(ipv6h
));
1365 skb_set_inner_ipproto(skb
, IPPROTO_IPV6
);
1367 err
= ip6_tnl_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
1370 if (err
== -EMSGSIZE
)
1371 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
1379 ip6_tnl_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1381 struct ip6_tnl
*t
= netdev_priv(dev
);
1382 struct net_device_stats
*stats
= &t
->dev
->stats
;
1385 switch (skb
->protocol
) {
1386 case htons(ETH_P_IP
):
1387 ret
= ip4ip6_tnl_xmit(skb
, dev
);
1389 case htons(ETH_P_IPV6
):
1390 ret
= ip6ip6_tnl_xmit(skb
, dev
);
1399 return NETDEV_TX_OK
;
1403 stats
->tx_dropped
++;
1405 return NETDEV_TX_OK
;
1408 static void ip6_tnl_link_config(struct ip6_tnl
*t
)
1410 struct net_device
*dev
= t
->dev
;
1411 struct __ip6_tnl_parm
*p
= &t
->parms
;
1412 struct flowi6
*fl6
= &t
->fl
.u
.ip6
;
1415 memcpy(dev
->dev_addr
, &p
->laddr
, sizeof(struct in6_addr
));
1416 memcpy(dev
->broadcast
, &p
->raddr
, sizeof(struct in6_addr
));
1418 /* Set up flowi template */
1419 fl6
->saddr
= p
->laddr
;
1420 fl6
->daddr
= p
->raddr
;
1421 fl6
->flowi6_oif
= p
->link
;
1424 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_TCLASS
))
1425 fl6
->flowlabel
|= IPV6_TCLASS_MASK
& p
->flowinfo
;
1426 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_FLOWLABEL
))
1427 fl6
->flowlabel
|= IPV6_FLOWLABEL_MASK
& p
->flowinfo
;
1429 p
->flags
&= ~(IP6_TNL_F_CAP_XMIT
|IP6_TNL_F_CAP_RCV
|IP6_TNL_F_CAP_PER_PACKET
);
1430 p
->flags
|= ip6_tnl_get_cap(t
, &p
->laddr
, &p
->raddr
);
1432 if (p
->flags
&IP6_TNL_F_CAP_XMIT
&& p
->flags
&IP6_TNL_F_CAP_RCV
)
1433 dev
->flags
|= IFF_POINTOPOINT
;
1435 dev
->flags
&= ~IFF_POINTOPOINT
;
1438 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1439 t_hlen
= t
->hlen
+ sizeof(struct ipv6hdr
);
1441 if (p
->flags
& IP6_TNL_F_CAP_XMIT
) {
1442 int strict
= (ipv6_addr_type(&p
->raddr
) &
1443 (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LINKLOCAL
));
1445 struct rt6_info
*rt
= rt6_lookup(t
->net
,
1446 &p
->raddr
, &p
->laddr
,
1453 dev
->hard_header_len
= rt
->dst
.dev
->hard_header_len
+
1456 dev
->mtu
= rt
->dst
.dev
->mtu
- t_hlen
;
1457 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1460 if (dev
->mtu
< IPV6_MIN_MTU
)
1461 dev
->mtu
= IPV6_MIN_MTU
;
1468 * ip6_tnl_change - update the tunnel parameters
1469 * @t: tunnel to be changed
1470 * @p: tunnel configuration parameters
1473 * ip6_tnl_change() updates the tunnel parameters
1477 ip6_tnl_change(struct ip6_tnl
*t
, const struct __ip6_tnl_parm
*p
)
1479 t
->parms
.laddr
= p
->laddr
;
1480 t
->parms
.raddr
= p
->raddr
;
1481 t
->parms
.flags
= p
->flags
;
1482 t
->parms
.hop_limit
= p
->hop_limit
;
1483 t
->parms
.encap_limit
= p
->encap_limit
;
1484 t
->parms
.flowinfo
= p
->flowinfo
;
1485 t
->parms
.link
= p
->link
;
1486 t
->parms
.proto
= p
->proto
;
1487 t
->parms
.fwmark
= p
->fwmark
;
1488 dst_cache_reset(&t
->dst_cache
);
1489 ip6_tnl_link_config(t
);
1493 static int ip6_tnl_update(struct ip6_tnl
*t
, struct __ip6_tnl_parm
*p
)
1495 struct net
*net
= t
->net
;
1496 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1499 ip6_tnl_unlink(ip6n
, t
);
1501 err
= ip6_tnl_change(t
, p
);
1502 ip6_tnl_link(ip6n
, t
);
1503 netdev_state_change(t
->dev
);
1507 static int ip6_tnl0_update(struct ip6_tnl
*t
, struct __ip6_tnl_parm
*p
)
1509 /* for default tnl0 device allow to change only the proto */
1510 t
->parms
.proto
= p
->proto
;
1511 netdev_state_change(t
->dev
);
1516 ip6_tnl_parm_from_user(struct __ip6_tnl_parm
*p
, const struct ip6_tnl_parm
*u
)
1518 p
->laddr
= u
->laddr
;
1519 p
->raddr
= u
->raddr
;
1520 p
->flags
= u
->flags
;
1521 p
->hop_limit
= u
->hop_limit
;
1522 p
->encap_limit
= u
->encap_limit
;
1523 p
->flowinfo
= u
->flowinfo
;
1525 p
->proto
= u
->proto
;
1526 memcpy(p
->name
, u
->name
, sizeof(u
->name
));
1530 ip6_tnl_parm_to_user(struct ip6_tnl_parm
*u
, const struct __ip6_tnl_parm
*p
)
1532 u
->laddr
= p
->laddr
;
1533 u
->raddr
= p
->raddr
;
1534 u
->flags
= p
->flags
;
1535 u
->hop_limit
= p
->hop_limit
;
1536 u
->encap_limit
= p
->encap_limit
;
1537 u
->flowinfo
= p
->flowinfo
;
1539 u
->proto
= p
->proto
;
1540 memcpy(u
->name
, p
->name
, sizeof(u
->name
));
1544 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1545 * @dev: virtual device associated with tunnel
1546 * @ifr: parameters passed from userspace
1547 * @cmd: command to be performed
1550 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1553 * The possible commands are the following:
1554 * %SIOCGETTUNNEL: get tunnel parameters for device
1555 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1556 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1557 * %SIOCDELTUNNEL: delete tunnel
1559 * The fallback device "ip6tnl0", created during module
1560 * initialization, can be used for creating other tunnel devices.
1564 * %-EFAULT if unable to copy data to or from userspace,
1565 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1566 * %-EINVAL if passed tunnel parameters are invalid,
1567 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1568 * %-ENODEV if attempting to change or delete a nonexisting device
1572 ip6_tnl_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1575 struct ip6_tnl_parm p
;
1576 struct __ip6_tnl_parm p1
;
1577 struct ip6_tnl
*t
= netdev_priv(dev
);
1578 struct net
*net
= t
->net
;
1579 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1581 memset(&p1
, 0, sizeof(p1
));
1585 if (dev
== ip6n
->fb_tnl_dev
) {
1586 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1590 ip6_tnl_parm_from_user(&p1
, &p
);
1591 t
= ip6_tnl_locate(net
, &p1
, 0);
1593 t
= netdev_priv(dev
);
1595 memset(&p
, 0, sizeof(p
));
1597 ip6_tnl_parm_to_user(&p
, &t
->parms
);
1598 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
))) {
1605 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1608 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1611 if (p
.proto
!= IPPROTO_IPV6
&& p
.proto
!= IPPROTO_IPIP
&&
1614 ip6_tnl_parm_from_user(&p1
, &p
);
1615 t
= ip6_tnl_locate(net
, &p1
, cmd
== SIOCADDTUNNEL
);
1616 if (cmd
== SIOCCHGTUNNEL
) {
1618 if (t
->dev
!= dev
) {
1623 t
= netdev_priv(dev
);
1624 if (dev
== ip6n
->fb_tnl_dev
)
1625 err
= ip6_tnl0_update(t
, &p1
);
1627 err
= ip6_tnl_update(t
, &p1
);
1631 ip6_tnl_parm_to_user(&p
, &t
->parms
);
1632 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1641 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1644 if (dev
== ip6n
->fb_tnl_dev
) {
1646 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1649 ip6_tnl_parm_from_user(&p1
, &p
);
1650 t
= ip6_tnl_locate(net
, &p1
, 0);
1654 if (t
->dev
== ip6n
->fb_tnl_dev
)
1659 unregister_netdevice(dev
);
1668 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1669 * @dev: virtual device associated with tunnel
1670 * @new_mtu: the new mtu
1674 * %-EINVAL if mtu too small
1677 int ip6_tnl_change_mtu(struct net_device
*dev
, int new_mtu
)
1679 struct ip6_tnl
*tnl
= netdev_priv(dev
);
1681 if (tnl
->parms
.proto
== IPPROTO_IPV6
) {
1682 if (new_mtu
< IPV6_MIN_MTU
)
1685 if (new_mtu
< ETH_MIN_MTU
)
1688 if (tnl
->parms
.proto
== IPPROTO_IPV6
|| tnl
->parms
.proto
== 0) {
1689 if (new_mtu
> IP6_MAX_MTU
- dev
->hard_header_len
)
1692 if (new_mtu
> IP_MAX_MTU
- dev
->hard_header_len
)
1698 EXPORT_SYMBOL(ip6_tnl_change_mtu
);
1700 int ip6_tnl_get_iflink(const struct net_device
*dev
)
1702 struct ip6_tnl
*t
= netdev_priv(dev
);
1704 return t
->parms
.link
;
1706 EXPORT_SYMBOL(ip6_tnl_get_iflink
);
1708 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops
*ops
,
1711 if (num
>= MAX_IPTUN_ENCAP_OPS
)
1714 return !cmpxchg((const struct ip6_tnl_encap_ops
**)
1715 &ip6tun_encaps
[num
],
1716 NULL
, ops
) ? 0 : -1;
1718 EXPORT_SYMBOL(ip6_tnl_encap_add_ops
);
1720 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops
*ops
,
1725 if (num
>= MAX_IPTUN_ENCAP_OPS
)
1728 ret
= (cmpxchg((const struct ip6_tnl_encap_ops
**)
1729 &ip6tun_encaps
[num
],
1730 ops
, NULL
) == ops
) ? 0 : -1;
1736 EXPORT_SYMBOL(ip6_tnl_encap_del_ops
);
1738 int ip6_tnl_encap_setup(struct ip6_tnl
*t
,
1739 struct ip_tunnel_encap
*ipencap
)
1743 memset(&t
->encap
, 0, sizeof(t
->encap
));
1745 hlen
= ip6_encap_hlen(ipencap
);
1749 t
->encap
.type
= ipencap
->type
;
1750 t
->encap
.sport
= ipencap
->sport
;
1751 t
->encap
.dport
= ipencap
->dport
;
1752 t
->encap
.flags
= ipencap
->flags
;
1754 t
->encap_hlen
= hlen
;
1755 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1759 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup
);
1761 static const struct net_device_ops ip6_tnl_netdev_ops
= {
1762 .ndo_init
= ip6_tnl_dev_init
,
1763 .ndo_uninit
= ip6_tnl_dev_uninit
,
1764 .ndo_start_xmit
= ip6_tnl_start_xmit
,
1765 .ndo_do_ioctl
= ip6_tnl_ioctl
,
1766 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1767 .ndo_get_stats
= ip6_get_stats
,
1768 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1771 #define IPXIPX_FEATURES (NETIF_F_SG | \
1772 NETIF_F_FRAGLIST | \
1774 NETIF_F_GSO_SOFTWARE | \
1778 * ip6_tnl_dev_setup - setup virtual tunnel device
1779 * @dev: virtual device associated with tunnel
1782 * Initialize function pointers and device parameters
1785 static void ip6_tnl_dev_setup(struct net_device
*dev
)
1787 dev
->netdev_ops
= &ip6_tnl_netdev_ops
;
1788 dev
->needs_free_netdev
= true;
1789 dev
->priv_destructor
= ip6_dev_free
;
1791 dev
->type
= ARPHRD_TUNNEL6
;
1792 dev
->flags
|= IFF_NOARP
;
1793 dev
->addr_len
= sizeof(struct in6_addr
);
1794 dev
->features
|= NETIF_F_LLTX
;
1795 netif_keep_dst(dev
);
1797 dev
->features
|= IPXIPX_FEATURES
;
1798 dev
->hw_features
|= IPXIPX_FEATURES
;
1800 /* This perm addr will be used as interface identifier by IPv6 */
1801 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
1802 eth_random_addr(dev
->perm_addr
);
1807 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1808 * @dev: virtual device associated with tunnel
1812 ip6_tnl_dev_init_gen(struct net_device
*dev
)
1814 struct ip6_tnl
*t
= netdev_priv(dev
);
1819 t
->net
= dev_net(dev
);
1820 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1824 ret
= dst_cache_init(&t
->dst_cache
, GFP_KERNEL
);
1828 ret
= gro_cells_init(&t
->gro_cells
, dev
);
1833 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1834 t_hlen
= t
->hlen
+ sizeof(struct ipv6hdr
);
1836 dev
->type
= ARPHRD_TUNNEL6
;
1837 dev
->hard_header_len
= LL_MAX_HEADER
+ t_hlen
;
1838 dev
->mtu
= ETH_DATA_LEN
- t_hlen
;
1839 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1841 dev
->min_mtu
= ETH_MIN_MTU
;
1842 dev
->max_mtu
= IP6_MAX_MTU
- dev
->hard_header_len
;
1847 dst_cache_destroy(&t
->dst_cache
);
1849 free_percpu(dev
->tstats
);
1856 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1857 * @dev: virtual device associated with tunnel
1860 static int ip6_tnl_dev_init(struct net_device
*dev
)
1862 struct ip6_tnl
*t
= netdev_priv(dev
);
1863 int err
= ip6_tnl_dev_init_gen(dev
);
1867 ip6_tnl_link_config(t
);
1868 if (t
->parms
.collect_md
) {
1869 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1870 netif_keep_dst(dev
);
1876 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1877 * @dev: fallback device
1882 static int __net_init
ip6_fb_tnl_dev_init(struct net_device
*dev
)
1884 struct ip6_tnl
*t
= netdev_priv(dev
);
1885 struct net
*net
= dev_net(dev
);
1886 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1888 t
->parms
.proto
= IPPROTO_IPV6
;
1891 rcu_assign_pointer(ip6n
->tnls_wc
[0], t
);
1895 static int ip6_tnl_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1896 struct netlink_ext_ack
*extack
)
1900 if (!data
|| !data
[IFLA_IPTUN_PROTO
])
1903 proto
= nla_get_u8(data
[IFLA_IPTUN_PROTO
]);
1904 if (proto
!= IPPROTO_IPV6
&&
1905 proto
!= IPPROTO_IPIP
&&
1912 static void ip6_tnl_netlink_parms(struct nlattr
*data
[],
1913 struct __ip6_tnl_parm
*parms
)
1915 memset(parms
, 0, sizeof(*parms
));
1920 if (data
[IFLA_IPTUN_LINK
])
1921 parms
->link
= nla_get_u32(data
[IFLA_IPTUN_LINK
]);
1923 if (data
[IFLA_IPTUN_LOCAL
])
1924 parms
->laddr
= nla_get_in6_addr(data
[IFLA_IPTUN_LOCAL
]);
1926 if (data
[IFLA_IPTUN_REMOTE
])
1927 parms
->raddr
= nla_get_in6_addr(data
[IFLA_IPTUN_REMOTE
]);
1929 if (data
[IFLA_IPTUN_TTL
])
1930 parms
->hop_limit
= nla_get_u8(data
[IFLA_IPTUN_TTL
]);
1932 if (data
[IFLA_IPTUN_ENCAP_LIMIT
])
1933 parms
->encap_limit
= nla_get_u8(data
[IFLA_IPTUN_ENCAP_LIMIT
]);
1935 if (data
[IFLA_IPTUN_FLOWINFO
])
1936 parms
->flowinfo
= nla_get_be32(data
[IFLA_IPTUN_FLOWINFO
]);
1938 if (data
[IFLA_IPTUN_FLAGS
])
1939 parms
->flags
= nla_get_u32(data
[IFLA_IPTUN_FLAGS
]);
1941 if (data
[IFLA_IPTUN_PROTO
])
1942 parms
->proto
= nla_get_u8(data
[IFLA_IPTUN_PROTO
]);
1944 if (data
[IFLA_IPTUN_COLLECT_METADATA
])
1945 parms
->collect_md
= true;
1947 if (data
[IFLA_IPTUN_FWMARK
])
1948 parms
->fwmark
= nla_get_u32(data
[IFLA_IPTUN_FWMARK
]);
1951 static bool ip6_tnl_netlink_encap_parms(struct nlattr
*data
[],
1952 struct ip_tunnel_encap
*ipencap
)
1956 memset(ipencap
, 0, sizeof(*ipencap
));
1961 if (data
[IFLA_IPTUN_ENCAP_TYPE
]) {
1963 ipencap
->type
= nla_get_u16(data
[IFLA_IPTUN_ENCAP_TYPE
]);
1966 if (data
[IFLA_IPTUN_ENCAP_FLAGS
]) {
1968 ipencap
->flags
= nla_get_u16(data
[IFLA_IPTUN_ENCAP_FLAGS
]);
1971 if (data
[IFLA_IPTUN_ENCAP_SPORT
]) {
1973 ipencap
->sport
= nla_get_be16(data
[IFLA_IPTUN_ENCAP_SPORT
]);
1976 if (data
[IFLA_IPTUN_ENCAP_DPORT
]) {
1978 ipencap
->dport
= nla_get_be16(data
[IFLA_IPTUN_ENCAP_DPORT
]);
1984 static int ip6_tnl_newlink(struct net
*src_net
, struct net_device
*dev
,
1985 struct nlattr
*tb
[], struct nlattr
*data
[],
1986 struct netlink_ext_ack
*extack
)
1988 struct net
*net
= dev_net(dev
);
1989 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1990 struct ip_tunnel_encap ipencap
;
1991 struct ip6_tnl
*nt
, *t
;
1994 nt
= netdev_priv(dev
);
1996 if (ip6_tnl_netlink_encap_parms(data
, &ipencap
)) {
1997 err
= ip6_tnl_encap_setup(nt
, &ipencap
);
2002 ip6_tnl_netlink_parms(data
, &nt
->parms
);
2004 if (nt
->parms
.collect_md
) {
2005 if (rtnl_dereference(ip6n
->collect_md_tun
))
2008 t
= ip6_tnl_locate(net
, &nt
->parms
, 0);
2013 err
= ip6_tnl_create2(dev
);
2014 if (!err
&& tb
[IFLA_MTU
])
2015 ip6_tnl_change_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
2020 static int ip6_tnl_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
2021 struct nlattr
*data
[],
2022 struct netlink_ext_ack
*extack
)
2024 struct ip6_tnl
*t
= netdev_priv(dev
);
2025 struct __ip6_tnl_parm p
;
2026 struct net
*net
= t
->net
;
2027 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2028 struct ip_tunnel_encap ipencap
;
2030 if (dev
== ip6n
->fb_tnl_dev
)
2033 if (ip6_tnl_netlink_encap_parms(data
, &ipencap
)) {
2034 int err
= ip6_tnl_encap_setup(t
, &ipencap
);
2039 ip6_tnl_netlink_parms(data
, &p
);
2043 t
= ip6_tnl_locate(net
, &p
, 0);
2048 t
= netdev_priv(dev
);
2050 return ip6_tnl_update(t
, &p
);
2053 static void ip6_tnl_dellink(struct net_device
*dev
, struct list_head
*head
)
2055 struct net
*net
= dev_net(dev
);
2056 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2058 if (dev
!= ip6n
->fb_tnl_dev
)
2059 unregister_netdevice_queue(dev
, head
);
2062 static size_t ip6_tnl_get_size(const struct net_device
*dev
)
2065 /* IFLA_IPTUN_LINK */
2067 /* IFLA_IPTUN_LOCAL */
2068 nla_total_size(sizeof(struct in6_addr
)) +
2069 /* IFLA_IPTUN_REMOTE */
2070 nla_total_size(sizeof(struct in6_addr
)) +
2071 /* IFLA_IPTUN_TTL */
2073 /* IFLA_IPTUN_ENCAP_LIMIT */
2075 /* IFLA_IPTUN_FLOWINFO */
2077 /* IFLA_IPTUN_FLAGS */
2079 /* IFLA_IPTUN_PROTO */
2081 /* IFLA_IPTUN_ENCAP_TYPE */
2083 /* IFLA_IPTUN_ENCAP_FLAGS */
2085 /* IFLA_IPTUN_ENCAP_SPORT */
2087 /* IFLA_IPTUN_ENCAP_DPORT */
2089 /* IFLA_IPTUN_COLLECT_METADATA */
2091 /* IFLA_IPTUN_FWMARK */
2096 static int ip6_tnl_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2098 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
2099 struct __ip6_tnl_parm
*parm
= &tunnel
->parms
;
2101 if (nla_put_u32(skb
, IFLA_IPTUN_LINK
, parm
->link
) ||
2102 nla_put_in6_addr(skb
, IFLA_IPTUN_LOCAL
, &parm
->laddr
) ||
2103 nla_put_in6_addr(skb
, IFLA_IPTUN_REMOTE
, &parm
->raddr
) ||
2104 nla_put_u8(skb
, IFLA_IPTUN_TTL
, parm
->hop_limit
) ||
2105 nla_put_u8(skb
, IFLA_IPTUN_ENCAP_LIMIT
, parm
->encap_limit
) ||
2106 nla_put_be32(skb
, IFLA_IPTUN_FLOWINFO
, parm
->flowinfo
) ||
2107 nla_put_u32(skb
, IFLA_IPTUN_FLAGS
, parm
->flags
) ||
2108 nla_put_u8(skb
, IFLA_IPTUN_PROTO
, parm
->proto
) ||
2109 nla_put_u32(skb
, IFLA_IPTUN_FWMARK
, parm
->fwmark
))
2110 goto nla_put_failure
;
2112 if (nla_put_u16(skb
, IFLA_IPTUN_ENCAP_TYPE
, tunnel
->encap
.type
) ||
2113 nla_put_be16(skb
, IFLA_IPTUN_ENCAP_SPORT
, tunnel
->encap
.sport
) ||
2114 nla_put_be16(skb
, IFLA_IPTUN_ENCAP_DPORT
, tunnel
->encap
.dport
) ||
2115 nla_put_u16(skb
, IFLA_IPTUN_ENCAP_FLAGS
, tunnel
->encap
.flags
))
2116 goto nla_put_failure
;
2118 if (parm
->collect_md
)
2119 if (nla_put_flag(skb
, IFLA_IPTUN_COLLECT_METADATA
))
2120 goto nla_put_failure
;
2128 struct net
*ip6_tnl_get_link_net(const struct net_device
*dev
)
2130 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
2134 EXPORT_SYMBOL(ip6_tnl_get_link_net
);
2136 static const struct nla_policy ip6_tnl_policy
[IFLA_IPTUN_MAX
+ 1] = {
2137 [IFLA_IPTUN_LINK
] = { .type
= NLA_U32
},
2138 [IFLA_IPTUN_LOCAL
] = { .len
= sizeof(struct in6_addr
) },
2139 [IFLA_IPTUN_REMOTE
] = { .len
= sizeof(struct in6_addr
) },
2140 [IFLA_IPTUN_TTL
] = { .type
= NLA_U8
},
2141 [IFLA_IPTUN_ENCAP_LIMIT
] = { .type
= NLA_U8
},
2142 [IFLA_IPTUN_FLOWINFO
] = { .type
= NLA_U32
},
2143 [IFLA_IPTUN_FLAGS
] = { .type
= NLA_U32
},
2144 [IFLA_IPTUN_PROTO
] = { .type
= NLA_U8
},
2145 [IFLA_IPTUN_ENCAP_TYPE
] = { .type
= NLA_U16
},
2146 [IFLA_IPTUN_ENCAP_FLAGS
] = { .type
= NLA_U16
},
2147 [IFLA_IPTUN_ENCAP_SPORT
] = { .type
= NLA_U16
},
2148 [IFLA_IPTUN_ENCAP_DPORT
] = { .type
= NLA_U16
},
2149 [IFLA_IPTUN_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
2150 [IFLA_IPTUN_FWMARK
] = { .type
= NLA_U32
},
2153 static struct rtnl_link_ops ip6_link_ops __read_mostly
= {
2155 .maxtype
= IFLA_IPTUN_MAX
,
2156 .policy
= ip6_tnl_policy
,
2157 .priv_size
= sizeof(struct ip6_tnl
),
2158 .setup
= ip6_tnl_dev_setup
,
2159 .validate
= ip6_tnl_validate
,
2160 .newlink
= ip6_tnl_newlink
,
2161 .changelink
= ip6_tnl_changelink
,
2162 .dellink
= ip6_tnl_dellink
,
2163 .get_size
= ip6_tnl_get_size
,
2164 .fill_info
= ip6_tnl_fill_info
,
2165 .get_link_net
= ip6_tnl_get_link_net
,
2168 static struct xfrm6_tunnel ip4ip6_handler __read_mostly
= {
2169 .handler
= ip4ip6_rcv
,
2170 .err_handler
= ip4ip6_err
,
2174 static struct xfrm6_tunnel ip6ip6_handler __read_mostly
= {
2175 .handler
= ip6ip6_rcv
,
2176 .err_handler
= ip6ip6_err
,
2180 static void __net_exit
ip6_tnl_destroy_tunnels(struct net
*net
, struct list_head
*list
)
2182 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2183 struct net_device
*dev
, *aux
;
2187 for_each_netdev_safe(net
, dev
, aux
)
2188 if (dev
->rtnl_link_ops
== &ip6_link_ops
)
2189 unregister_netdevice_queue(dev
, list
);
2191 for (h
= 0; h
< IP6_TUNNEL_HASH_SIZE
; h
++) {
2192 t
= rtnl_dereference(ip6n
->tnls_r_l
[h
]);
2194 /* If dev is in the same netns, it has already
2195 * been added to the list by the previous loop.
2197 if (!net_eq(dev_net(t
->dev
), net
))
2198 unregister_netdevice_queue(t
->dev
, list
);
2199 t
= rtnl_dereference(t
->next
);
2204 static int __net_init
ip6_tnl_init_net(struct net
*net
)
2206 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2207 struct ip6_tnl
*t
= NULL
;
2210 ip6n
->tnls
[0] = ip6n
->tnls_wc
;
2211 ip6n
->tnls
[1] = ip6n
->tnls_r_l
;
2214 ip6n
->fb_tnl_dev
= alloc_netdev(sizeof(struct ip6_tnl
), "ip6tnl0",
2215 NET_NAME_UNKNOWN
, ip6_tnl_dev_setup
);
2217 if (!ip6n
->fb_tnl_dev
)
2219 dev_net_set(ip6n
->fb_tnl_dev
, net
);
2220 ip6n
->fb_tnl_dev
->rtnl_link_ops
= &ip6_link_ops
;
2221 /* FB netdevice is special: we have one, and only one per netns.
2222 * Allowing to move it to another netns is clearly unsafe.
2224 ip6n
->fb_tnl_dev
->features
|= NETIF_F_NETNS_LOCAL
;
2226 err
= ip6_fb_tnl_dev_init(ip6n
->fb_tnl_dev
);
2230 err
= register_netdev(ip6n
->fb_tnl_dev
);
2234 t
= netdev_priv(ip6n
->fb_tnl_dev
);
2236 strcpy(t
->parms
.name
, ip6n
->fb_tnl_dev
->name
);
2240 free_netdev(ip6n
->fb_tnl_dev
);
2245 static void __net_exit
ip6_tnl_exit_batch_net(struct list_head
*net_list
)
2251 list_for_each_entry(net
, net_list
, exit_list
)
2252 ip6_tnl_destroy_tunnels(net
, &list
);
2253 unregister_netdevice_many(&list
);
2257 static struct pernet_operations ip6_tnl_net_ops
= {
2258 .init
= ip6_tnl_init_net
,
2259 .exit_batch
= ip6_tnl_exit_batch_net
,
2260 .id
= &ip6_tnl_net_id
,
2261 .size
= sizeof(struct ip6_tnl_net
),
2265 * ip6_tunnel_init - register protocol and reserve needed resources
2267 * Return: 0 on success
2270 static int __init
ip6_tunnel_init(void)
2274 if (!ipv6_mod_enabled())
2277 err
= register_pernet_device(&ip6_tnl_net_ops
);
2281 err
= xfrm6_tunnel_register(&ip4ip6_handler
, AF_INET
);
2283 pr_err("%s: can't register ip4ip6\n", __func__
);
2287 err
= xfrm6_tunnel_register(&ip6ip6_handler
, AF_INET6
);
2289 pr_err("%s: can't register ip6ip6\n", __func__
);
2292 err
= rtnl_link_register(&ip6_link_ops
);
2294 goto rtnl_link_failed
;
2299 xfrm6_tunnel_deregister(&ip6ip6_handler
, AF_INET6
);
2301 xfrm6_tunnel_deregister(&ip4ip6_handler
, AF_INET
);
2303 unregister_pernet_device(&ip6_tnl_net_ops
);
2309 * ip6_tunnel_cleanup - free resources and unregister protocol
2312 static void __exit
ip6_tunnel_cleanup(void)
2314 rtnl_link_unregister(&ip6_link_ops
);
2315 if (xfrm6_tunnel_deregister(&ip4ip6_handler
, AF_INET
))
2316 pr_info("%s: can't deregister ip4ip6\n", __func__
);
2318 if (xfrm6_tunnel_deregister(&ip6ip6_handler
, AF_INET6
))
2319 pr_info("%s: can't deregister ip6ip6\n", __func__
);
2321 unregister_pernet_device(&ip6_tnl_net_ops
);
2324 module_init(ip6_tunnel_init
);
2325 module_exit(ip6_tunnel_cleanup
);