2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
51 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ip6_fib.h>
54 #include <net/ip6_route.h>
61 1. The most important issue is detecting local dead loops.
62 They would cause complete host lockup in transmit, which
63 would be "resolved" by stack overflow or, if queueing is enabled,
64 with infinite looping in net_bh.
66 We cannot track such dead loops during route installation,
67 it is infeasible task. The most general solutions would be
68 to keep skb->encapsulation counter (sort of local ttl),
69 and silently drop packet when it expires. It is a good
70 solution, but it supposes maintaining new variable in ALL
71 skb, even if no tunneling is used.
73 Current solution: xmit_recursion breaks dead loops. This is a percpu
74 counter, since when we enter the first ndo_xmit(), cpu migration is
75 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77 2. Networking dead loops would not kill routers, but would really
78 kill network. IP hop limit plays role of "t->recursion" in this case,
79 if we copy it from packet being encapsulated to upper header.
80 It is very good solution, but it introduces two problems:
82 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
83 do not work over tunnels.
84 - traceroute does not work. I planned to relay ICMP from tunnel,
85 so that this problem would be solved and traceroute output
86 would even more informative. This idea appeared to be wrong:
87 only Linux complies to rfc1812 now (yes, guys, Linux is the only
88 true router now :-)), all routers (at least, in neighbourhood of mine)
89 return only 8 bytes of payload. It is the end.
91 Hence, if we want that OSPF worked or traceroute said something reasonable,
92 we should search for another solution.
94 One of them is to parse packet trying to detect inner encapsulation
95 made by our node. It is difficult or even impossible, especially,
96 taking into account fragmentation. TO be short, ttl is not solution at all.
98 Current solution: The solution was UNEXPECTEDLY SIMPLE.
99 We force DF flag on tunnels with preconfigured hop limit,
100 that is ALL. :-) Well, it does not remove the problem completely,
101 but exponential growth of network traffic is changed to linear
102 (branches, that exceed pmtu are pruned) and tunnel mtu
103 rapidly degrades to value <68, where looping stops.
104 Yes, it is not good if there exists a router in the loop,
105 which does not force DF, even when encapsulating packets have DF set.
106 But it is not our problem! Nobody could accuse us, we made
107 all that we could make. Even if it is your gated who injected
108 fatal route to network, even if it were you who configured
109 fatal static route: you are innocent. :-)
113 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114 practically identical code. It would be good to glue them
115 together, but it is not very evident, how to make them modular.
116 sit is integral part of IPv6, ipip and gre are naturally modular.
117 We could extract common parts (hash table, ioctl etc)
118 to a separate module (ip_tunnel.c).
123 static bool log_ecn_error
= true;
124 module_param(log_ecn_error
, bool, 0644);
125 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
127 static struct rtnl_link_ops ipgre_link_ops __read_mostly
;
128 static int ipgre_tunnel_init(struct net_device
*dev
);
129 static void ipgre_tunnel_setup(struct net_device
*dev
);
130 static int ipgre_tunnel_bind_dev(struct net_device
*dev
);
132 /* Fallback tunnel: no source, no destination, no key, no options */
136 static int ipgre_net_id __read_mostly
;
138 struct ip_tunnel __rcu
*tunnels
[4][HASH_SIZE
];
140 struct net_device
*fb_tunnel_dev
;
143 /* Tunnel hash table */
153 We require exact key match i.e. if a key is present in packet
154 it will match only tunnel with the same key; if it is not present,
155 it will match only keyless tunnel.
157 All keysless packets, if not matched configured keyless tunnels
158 will match fallback tunnel.
161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
163 #define tunnels_r_l tunnels[3]
164 #define tunnels_r tunnels[2]
165 #define tunnels_l tunnels[1]
166 #define tunnels_wc tunnels[0]
168 static struct rtnl_link_stats64
*ipgre_get_stats64(struct net_device
*dev
,
169 struct rtnl_link_stats64
*tot
)
173 for_each_possible_cpu(i
) {
174 const struct pcpu_tstats
*tstats
= per_cpu_ptr(dev
->tstats
, i
);
175 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
179 start
= u64_stats_fetch_begin_bh(&tstats
->syncp
);
180 rx_packets
= tstats
->rx_packets
;
181 tx_packets
= tstats
->tx_packets
;
182 rx_bytes
= tstats
->rx_bytes
;
183 tx_bytes
= tstats
->tx_bytes
;
184 } while (u64_stats_fetch_retry_bh(&tstats
->syncp
, start
));
186 tot
->rx_packets
+= rx_packets
;
187 tot
->tx_packets
+= tx_packets
;
188 tot
->rx_bytes
+= rx_bytes
;
189 tot
->tx_bytes
+= tx_bytes
;
192 tot
->multicast
= dev
->stats
.multicast
;
193 tot
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
194 tot
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
195 tot
->rx_length_errors
= dev
->stats
.rx_length_errors
;
196 tot
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
197 tot
->rx_errors
= dev
->stats
.rx_errors
;
199 tot
->tx_fifo_errors
= dev
->stats
.tx_fifo_errors
;
200 tot
->tx_carrier_errors
= dev
->stats
.tx_carrier_errors
;
201 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
202 tot
->tx_aborted_errors
= dev
->stats
.tx_aborted_errors
;
203 tot
->tx_errors
= dev
->stats
.tx_errors
;
208 /* Does key in tunnel parameters match packet */
209 static bool ipgre_key_match(const struct ip_tunnel_parm
*p
,
210 __be16 flags
, __be32 key
)
212 if (p
->i_flags
& GRE_KEY
) {
214 return key
== p
->i_key
;
216 return false; /* key expected, none present */
218 return !(flags
& GRE_KEY
);
221 /* Given src, dst and key, find appropriate for input tunnel. */
223 static struct ip_tunnel
*ipgre_tunnel_lookup(struct net_device
*dev
,
224 __be32 remote
, __be32 local
,
225 __be16 flags
, __be32 key
,
228 struct net
*net
= dev_net(dev
);
229 int link
= dev
->ifindex
;
230 unsigned int h0
= HASH(remote
);
231 unsigned int h1
= HASH(key
);
232 struct ip_tunnel
*t
, *cand
= NULL
;
233 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
234 int dev_type
= (gre_proto
== htons(ETH_P_TEB
)) ?
235 ARPHRD_ETHER
: ARPHRD_IPGRE
;
236 int score
, cand_score
= 4;
238 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r_l
[h0
^ h1
]) {
239 if (local
!= t
->parms
.iph
.saddr
||
240 remote
!= t
->parms
.iph
.daddr
||
241 !(t
->dev
->flags
& IFF_UP
))
244 if (!ipgre_key_match(&t
->parms
, flags
, key
))
247 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
248 t
->dev
->type
!= dev_type
)
252 if (t
->parms
.link
!= link
)
254 if (t
->dev
->type
!= dev_type
)
259 if (score
< cand_score
) {
265 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r
[h0
^ h1
]) {
266 if (remote
!= t
->parms
.iph
.daddr
||
267 !(t
->dev
->flags
& IFF_UP
))
270 if (!ipgre_key_match(&t
->parms
, flags
, key
))
273 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
274 t
->dev
->type
!= dev_type
)
278 if (t
->parms
.link
!= link
)
280 if (t
->dev
->type
!= dev_type
)
285 if (score
< cand_score
) {
291 for_each_ip_tunnel_rcu(t
, ign
->tunnels_l
[h1
]) {
292 if ((local
!= t
->parms
.iph
.saddr
&&
293 (local
!= t
->parms
.iph
.daddr
||
294 !ipv4_is_multicast(local
))) ||
295 !(t
->dev
->flags
& IFF_UP
))
298 if (!ipgre_key_match(&t
->parms
, flags
, key
))
301 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
302 t
->dev
->type
!= dev_type
)
306 if (t
->parms
.link
!= link
)
308 if (t
->dev
->type
!= dev_type
)
313 if (score
< cand_score
) {
319 for_each_ip_tunnel_rcu(t
, ign
->tunnels_wc
[h1
]) {
320 if (t
->parms
.i_key
!= key
||
321 !(t
->dev
->flags
& IFF_UP
))
324 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
325 t
->dev
->type
!= dev_type
)
329 if (t
->parms
.link
!= link
)
331 if (t
->dev
->type
!= dev_type
)
336 if (score
< cand_score
) {
345 dev
= ign
->fb_tunnel_dev
;
346 if (dev
->flags
& IFF_UP
)
347 return netdev_priv(dev
);
352 static struct ip_tunnel __rcu
**__ipgre_bucket(struct ipgre_net
*ign
,
353 struct ip_tunnel_parm
*parms
)
355 __be32 remote
= parms
->iph
.daddr
;
356 __be32 local
= parms
->iph
.saddr
;
357 __be32 key
= parms
->i_key
;
358 unsigned int h
= HASH(key
);
363 if (remote
&& !ipv4_is_multicast(remote
)) {
368 return &ign
->tunnels
[prio
][h
];
371 static inline struct ip_tunnel __rcu
**ipgre_bucket(struct ipgre_net
*ign
,
374 return __ipgre_bucket(ign
, &t
->parms
);
377 static void ipgre_tunnel_link(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
379 struct ip_tunnel __rcu
**tp
= ipgre_bucket(ign
, t
);
381 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
382 rcu_assign_pointer(*tp
, t
);
385 static void ipgre_tunnel_unlink(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
387 struct ip_tunnel __rcu
**tp
;
388 struct ip_tunnel
*iter
;
390 for (tp
= ipgre_bucket(ign
, t
);
391 (iter
= rtnl_dereference(*tp
)) != NULL
;
394 rcu_assign_pointer(*tp
, t
->next
);
400 static struct ip_tunnel
*ipgre_tunnel_find(struct net
*net
,
401 struct ip_tunnel_parm
*parms
,
404 __be32 remote
= parms
->iph
.daddr
;
405 __be32 local
= parms
->iph
.saddr
;
406 __be32 key
= parms
->i_key
;
407 int link
= parms
->link
;
409 struct ip_tunnel __rcu
**tp
;
410 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
412 for (tp
= __ipgre_bucket(ign
, parms
);
413 (t
= rtnl_dereference(*tp
)) != NULL
;
415 if (local
== t
->parms
.iph
.saddr
&&
416 remote
== t
->parms
.iph
.daddr
&&
417 key
== t
->parms
.i_key
&&
418 link
== t
->parms
.link
&&
419 type
== t
->dev
->type
)
425 static struct ip_tunnel
*ipgre_tunnel_locate(struct net
*net
,
426 struct ip_tunnel_parm
*parms
, int create
)
428 struct ip_tunnel
*t
, *nt
;
429 struct net_device
*dev
;
431 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
433 t
= ipgre_tunnel_find(net
, parms
, ARPHRD_IPGRE
);
438 strlcpy(name
, parms
->name
, IFNAMSIZ
);
440 strcpy(name
, "gre%d");
442 dev
= alloc_netdev(sizeof(*t
), name
, ipgre_tunnel_setup
);
446 dev_net_set(dev
, net
);
448 nt
= netdev_priv(dev
);
450 dev
->rtnl_link_ops
= &ipgre_link_ops
;
452 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
454 if (register_netdevice(dev
) < 0)
457 /* Can use a lockless transmit, unless we generate output sequences */
458 if (!(nt
->parms
.o_flags
& GRE_SEQ
))
459 dev
->features
|= NETIF_F_LLTX
;
462 ipgre_tunnel_link(ign
, nt
);
470 static void ipgre_tunnel_uninit(struct net_device
*dev
)
472 struct net
*net
= dev_net(dev
);
473 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
475 ipgre_tunnel_unlink(ign
, netdev_priv(dev
));
480 static void ipgre_err(struct sk_buff
*skb
, u32 info
)
483 /* All the routers (except for Linux) return only
484 8 bytes of packet payload. It means, that precise relaying of
485 ICMP in the real Internet is absolutely infeasible.
487 Moreover, Cisco "wise men" put GRE key to the third word
488 in GRE header. It makes impossible maintaining even soft state for keyed
489 GRE tunnels with enabled checksum. Tell them "thank you".
491 Well, I wonder, rfc1812 was written by Cisco employee,
492 what the hell these idiots break standards established
496 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
497 __be16
*p
= (__be16
*)(skb
->data
+(iph
->ihl
<<2));
498 int grehlen
= (iph
->ihl
<<2) + 4;
499 const int type
= icmp_hdr(skb
)->type
;
500 const int code
= icmp_hdr(skb
)->code
;
506 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
|GRE_ROUTING
|GRE_VERSION
)) {
507 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
516 /* If only 8 bytes returned, keyed message will be dropped here */
517 if (skb_headlen(skb
) < grehlen
)
521 key
= *(((__be32
*)p
) + (grehlen
/ 4) - 1);
525 case ICMP_PARAMETERPROB
:
528 case ICMP_DEST_UNREACH
:
531 case ICMP_PORT_UNREACH
:
532 /* Impossible event. */
535 /* All others are translated to HOST_UNREACH.
536 rfc2003 contains "deep thoughts" about NET_UNREACH,
537 I believe they are just ether pollution. --ANK
542 case ICMP_TIME_EXCEEDED
:
543 if (code
!= ICMP_EXC_TTL
)
551 t
= ipgre_tunnel_lookup(skb
->dev
, iph
->daddr
, iph
->saddr
,
557 if (type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
) {
558 ipv4_update_pmtu(skb
, dev_net(skb
->dev
), info
,
559 t
->parms
.link
, 0, IPPROTO_GRE
, 0);
562 if (type
== ICMP_REDIRECT
) {
563 ipv4_redirect(skb
, dev_net(skb
->dev
), t
->parms
.link
, 0,
567 if (t
->parms
.iph
.daddr
== 0 ||
568 ipv4_is_multicast(t
->parms
.iph
.daddr
))
571 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
574 if (time_before(jiffies
, t
->err_time
+ IPTUNNEL_ERR_TIMEO
))
578 t
->err_time
= jiffies
;
582 ipgre_ecn_encapsulate(u8 tos
, const struct iphdr
*old_iph
, struct sk_buff
*skb
)
585 if (skb
->protocol
== htons(ETH_P_IP
))
586 inner
= old_iph
->tos
;
587 else if (skb
->protocol
== htons(ETH_P_IPV6
))
588 inner
= ipv6_get_dsfield((const struct ipv6hdr
*)old_iph
);
589 return INET_ECN_encapsulate(tos
, inner
);
592 static int ipgre_rcv(struct sk_buff
*skb
)
594 const struct iphdr
*iph
;
600 struct ip_tunnel
*tunnel
;
605 if (!pskb_may_pull(skb
, 16))
610 flags
= *(__be16
*)h
;
612 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_ROUTING
|GRE_SEQ
|GRE_VERSION
)) {
613 /* - Version must be 0.
614 - We do not support routing headers.
616 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
619 if (flags
&GRE_CSUM
) {
620 switch (skb
->ip_summed
) {
621 case CHECKSUM_COMPLETE
:
622 csum
= csum_fold(skb
->csum
);
628 csum
= __skb_checksum_complete(skb
);
629 skb
->ip_summed
= CHECKSUM_COMPLETE
;
634 key
= *(__be32
*)(h
+ offset
);
638 seqno
= ntohl(*(__be32
*)(h
+ offset
));
643 gre_proto
= *(__be16
*)(h
+ 2);
645 tunnel
= ipgre_tunnel_lookup(skb
->dev
,
646 iph
->saddr
, iph
->daddr
, flags
, key
,
649 struct pcpu_tstats
*tstats
;
653 skb
->protocol
= gre_proto
;
654 /* WCCP version 1 and 2 protocol decoding.
655 * - Change protocol to IP
656 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
658 if (flags
== 0 && gre_proto
== htons(ETH_P_WCCP
)) {
659 skb
->protocol
= htons(ETH_P_IP
);
660 if ((*(h
+ offset
) & 0xF0) != 0x40)
664 skb
->mac_header
= skb
->network_header
;
665 __pskb_pull(skb
, offset
);
666 skb_postpull_rcsum(skb
, skb_transport_header(skb
), offset
);
667 skb
->pkt_type
= PACKET_HOST
;
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669 if (ipv4_is_multicast(iph
->daddr
)) {
670 /* Looped back packet, drop it! */
671 if (rt_is_output_route(skb_rtable(skb
)))
673 tunnel
->dev
->stats
.multicast
++;
674 skb
->pkt_type
= PACKET_BROADCAST
;
678 if (((flags
&GRE_CSUM
) && csum
) ||
679 (!(flags
&GRE_CSUM
) && tunnel
->parms
.i_flags
&GRE_CSUM
)) {
680 tunnel
->dev
->stats
.rx_crc_errors
++;
681 tunnel
->dev
->stats
.rx_errors
++;
684 if (tunnel
->parms
.i_flags
&GRE_SEQ
) {
685 if (!(flags
&GRE_SEQ
) ||
686 (tunnel
->i_seqno
&& (s32
)(seqno
- tunnel
->i_seqno
) < 0)) {
687 tunnel
->dev
->stats
.rx_fifo_errors
++;
688 tunnel
->dev
->stats
.rx_errors
++;
691 tunnel
->i_seqno
= seqno
+ 1;
694 /* Warning: All skb pointers will be invalidated! */
695 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
696 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
697 tunnel
->dev
->stats
.rx_length_errors
++;
698 tunnel
->dev
->stats
.rx_errors
++;
703 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
704 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
707 __skb_tunnel_rx(skb
, tunnel
->dev
);
709 skb_reset_network_header(skb
);
710 err
= IP_ECN_decapsulate(iph
, skb
);
713 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714 &iph
->saddr
, iph
->tos
);
716 ++tunnel
->dev
->stats
.rx_frame_errors
;
717 ++tunnel
->dev
->stats
.rx_errors
;
722 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
723 u64_stats_update_begin(&tstats
->syncp
);
724 tstats
->rx_packets
++;
725 tstats
->rx_bytes
+= skb
->len
;
726 u64_stats_update_end(&tstats
->syncp
);
728 gro_cells_receive(&tunnel
->gro_cells
, skb
);
731 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
738 static struct sk_buff
*handle_offloads(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
)
742 if (skb_is_gso(skb
)) {
743 err
= skb_unclone(skb
, GFP_ATOMIC
);
746 skb_shinfo(skb
)->gso_type
|= SKB_GSO_GRE
;
748 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
749 tunnel
->parms
.o_flags
&GRE_CSUM
) {
750 err
= skb_checksum_help(skb
);
753 } else if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
754 skb
->ip_summed
= CHECKSUM_NONE
;
763 static netdev_tx_t
ipgre_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
765 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
766 const struct iphdr
*old_iph
;
767 const struct iphdr
*tiph
;
771 struct rtable
*rt
; /* Route to the other host */
772 struct net_device
*tdev
; /* Device to other host */
773 struct iphdr
*iph
; /* Our new IP header */
774 unsigned int max_headroom
; /* The extra header space needed */
781 skb
= handle_offloads(tunnel
, skb
);
783 dev
->stats
.tx_dropped
++;
787 if (!skb
->encapsulation
) {
788 skb_reset_inner_headers(skb
);
789 skb
->encapsulation
= 1;
792 old_iph
= ip_hdr(skb
);
794 if (dev
->type
== ARPHRD_ETHER
)
795 IPCB(skb
)->flags
= 0;
797 if (dev
->header_ops
&& dev
->type
== ARPHRD_IPGRE
) {
799 tiph
= (const struct iphdr
*)skb
->data
;
801 gre_hlen
= tunnel
->hlen
;
802 tiph
= &tunnel
->parms
.iph
;
805 if ((dst
= tiph
->daddr
) == 0) {
808 if (skb_dst(skb
) == NULL
) {
809 dev
->stats
.tx_fifo_errors
++;
813 if (skb
->protocol
== htons(ETH_P_IP
)) {
814 rt
= skb_rtable(skb
);
815 dst
= rt_nexthop(rt
, old_iph
->daddr
);
817 #if IS_ENABLED(CONFIG_IPV6)
818 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
819 const struct in6_addr
*addr6
;
820 struct neighbour
*neigh
;
821 bool do_tx_error_icmp
;
824 neigh
= dst_neigh_lookup(skb_dst(skb
), &ipv6_hdr(skb
)->daddr
);
828 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
829 addr_type
= ipv6_addr_type(addr6
);
831 if (addr_type
== IPV6_ADDR_ANY
) {
832 addr6
= &ipv6_hdr(skb
)->daddr
;
833 addr_type
= ipv6_addr_type(addr6
);
836 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
837 do_tx_error_icmp
= true;
839 do_tx_error_icmp
= false;
840 dst
= addr6
->s6_addr32
[3];
842 neigh_release(neigh
);
843 if (do_tx_error_icmp
)
855 if (skb
->protocol
== htons(ETH_P_IP
))
857 else if (skb
->protocol
== htons(ETH_P_IPV6
))
858 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)old_iph
);
861 rt
= ip_route_output_gre(dev_net(dev
), &fl4
, dst
, tiph
->saddr
,
862 tunnel
->parms
.o_key
, RT_TOS(tos
),
865 dev
->stats
.tx_carrier_errors
++;
872 dev
->stats
.collisions
++;
878 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
- tunnel
->hlen
;
880 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
883 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
885 if (skb
->protocol
== htons(ETH_P_IP
)) {
886 df
|= (old_iph
->frag_off
&htons(IP_DF
));
888 if (!skb_is_gso(skb
) &&
889 (old_iph
->frag_off
&htons(IP_DF
)) &&
890 mtu
< ntohs(old_iph
->tot_len
)) {
891 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
896 #if IS_ENABLED(CONFIG_IPV6)
897 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
898 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
900 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) && mtu
>= IPV6_MIN_MTU
) {
901 if ((tunnel
->parms
.iph
.daddr
&&
902 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
903 rt6
->rt6i_dst
.plen
== 128) {
904 rt6
->rt6i_flags
|= RTF_MODIFIED
;
905 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
909 if (!skb_is_gso(skb
) &&
910 mtu
>= IPV6_MIN_MTU
&&
911 mtu
< skb
->len
- tunnel
->hlen
+ gre_hlen
) {
912 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
919 if (tunnel
->err_count
> 0) {
920 if (time_before(jiffies
,
921 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
924 dst_link_failure(skb
);
926 tunnel
->err_count
= 0;
929 max_headroom
= LL_RESERVED_SPACE(tdev
) + gre_hlen
+ rt
->dst
.header_len
;
931 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
)||
932 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
933 struct sk_buff
*new_skb
= skb_realloc_headroom(skb
, max_headroom
);
934 if (max_headroom
> dev
->needed_headroom
)
935 dev
->needed_headroom
= max_headroom
;
938 dev
->stats
.tx_dropped
++;
943 skb_set_owner_w(new_skb
, skb
->sk
);
946 old_iph
= ip_hdr(skb
);
947 /* Warning : tiph value might point to freed memory */
950 skb_push(skb
, gre_hlen
);
951 skb_reset_network_header(skb
);
952 skb_set_transport_header(skb
, sizeof(*iph
));
953 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
954 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
957 skb_dst_set(skb
, &rt
->dst
);
960 * Push down and install the IPIP header.
965 iph
->ihl
= sizeof(struct iphdr
) >> 2;
967 iph
->protocol
= IPPROTO_GRE
;
968 iph
->tos
= ipgre_ecn_encapsulate(tos
, old_iph
, skb
);
969 iph
->daddr
= fl4
.daddr
;
970 iph
->saddr
= fl4
.saddr
;
973 tunnel_ip_select_ident(skb
, old_iph
, &rt
->dst
);
976 if (skb
->protocol
== htons(ETH_P_IP
))
977 iph
->ttl
= old_iph
->ttl
;
978 #if IS_ENABLED(CONFIG_IPV6)
979 else if (skb
->protocol
== htons(ETH_P_IPV6
))
980 iph
->ttl
= ((const struct ipv6hdr
*)old_iph
)->hop_limit
;
983 iph
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
986 ((__be16
*)(iph
+ 1))[0] = tunnel
->parms
.o_flags
;
987 ((__be16
*)(iph
+ 1))[1] = (dev
->type
== ARPHRD_ETHER
) ?
988 htons(ETH_P_TEB
) : skb
->protocol
;
990 if (tunnel
->parms
.o_flags
&(GRE_KEY
|GRE_CSUM
|GRE_SEQ
)) {
991 __be32
*ptr
= (__be32
*)(((u8
*)iph
) + tunnel
->hlen
- 4);
993 if (tunnel
->parms
.o_flags
&GRE_SEQ
) {
995 *ptr
= htonl(tunnel
->o_seqno
);
998 if (tunnel
->parms
.o_flags
&GRE_KEY
) {
999 *ptr
= tunnel
->parms
.o_key
;
1002 /* Skip GRE checksum if skb is getting offloaded. */
1003 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_GRE
) &&
1004 (tunnel
->parms
.o_flags
&GRE_CSUM
)) {
1005 int offset
= skb_transport_offset(skb
);
1007 if (skb_has_shared_frag(skb
)) {
1008 err
= __skb_linearize(skb
);
1014 *(__sum16
*)ptr
= csum_fold(skb_checksum(skb
, offset
,
1020 iptunnel_xmit(skb
, dev
);
1021 return NETDEV_TX_OK
;
1023 #if IS_ENABLED(CONFIG_IPV6)
1025 dst_link_failure(skb
);
1028 dev
->stats
.tx_errors
++;
1030 return NETDEV_TX_OK
;
1033 static int ipgre_tunnel_bind_dev(struct net_device
*dev
)
1035 struct net_device
*tdev
= NULL
;
1036 struct ip_tunnel
*tunnel
;
1037 const struct iphdr
*iph
;
1038 int hlen
= LL_MAX_HEADER
;
1039 int mtu
= ETH_DATA_LEN
;
1040 int addend
= sizeof(struct iphdr
) + 4;
1042 tunnel
= netdev_priv(dev
);
1043 iph
= &tunnel
->parms
.iph
;
1045 /* Guess output device to choose reasonable mtu and needed_headroom */
1051 rt
= ip_route_output_gre(dev_net(dev
), &fl4
,
1052 iph
->daddr
, iph
->saddr
,
1053 tunnel
->parms
.o_key
,
1055 tunnel
->parms
.link
);
1061 if (dev
->type
!= ARPHRD_ETHER
)
1062 dev
->flags
|= IFF_POINTOPOINT
;
1065 if (!tdev
&& tunnel
->parms
.link
)
1066 tdev
= __dev_get_by_index(dev_net(dev
), tunnel
->parms
.link
);
1069 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
1072 dev
->iflink
= tunnel
->parms
.link
;
1074 /* Precalculate GRE options length */
1075 if (tunnel
->parms
.o_flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
)) {
1076 if (tunnel
->parms
.o_flags
&GRE_CSUM
)
1078 if (tunnel
->parms
.o_flags
&GRE_KEY
)
1080 if (tunnel
->parms
.o_flags
&GRE_SEQ
)
1083 dev
->needed_headroom
= addend
+ hlen
;
1084 mtu
-= dev
->hard_header_len
+ addend
;
1089 tunnel
->hlen
= addend
;
1090 /* TCP offload with GRE SEQ is not supported. */
1091 if (!(tunnel
->parms
.o_flags
& GRE_SEQ
)) {
1092 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1093 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1100 ipgre_tunnel_ioctl (struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1103 struct ip_tunnel_parm p
;
1104 struct ip_tunnel
*t
;
1105 struct net
*net
= dev_net(dev
);
1106 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1111 if (dev
== ign
->fb_tunnel_dev
) {
1112 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1116 t
= ipgre_tunnel_locate(net
, &p
, 0);
1119 t
= netdev_priv(dev
);
1120 memcpy(&p
, &t
->parms
, sizeof(p
));
1121 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1128 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1132 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1136 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_GRE
||
1137 p
.iph
.ihl
!= 5 || (p
.iph
.frag_off
&htons(~IP_DF
)) ||
1138 ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
)))
1141 p
.iph
.frag_off
|= htons(IP_DF
);
1143 if (!(p
.i_flags
&GRE_KEY
))
1145 if (!(p
.o_flags
&GRE_KEY
))
1148 t
= ipgre_tunnel_locate(net
, &p
, cmd
== SIOCADDTUNNEL
);
1150 if (dev
!= ign
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
1152 if (t
->dev
!= dev
) {
1157 unsigned int nflags
= 0;
1159 t
= netdev_priv(dev
);
1161 if (ipv4_is_multicast(p
.iph
.daddr
))
1162 nflags
= IFF_BROADCAST
;
1163 else if (p
.iph
.daddr
)
1164 nflags
= IFF_POINTOPOINT
;
1166 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
1170 ipgre_tunnel_unlink(ign
, t
);
1172 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1173 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1174 t
->parms
.i_key
= p
.i_key
;
1175 t
->parms
.o_key
= p
.o_key
;
1176 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1177 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1178 ipgre_tunnel_link(ign
, t
);
1179 netdev_state_change(dev
);
1185 if (cmd
== SIOCCHGTUNNEL
) {
1186 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1187 t
->parms
.iph
.tos
= p
.iph
.tos
;
1188 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1189 if (t
->parms
.link
!= p
.link
) {
1190 t
->parms
.link
= p
.link
;
1191 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
1192 netdev_state_change(dev
);
1195 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &t
->parms
, sizeof(p
)))
1198 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
1203 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1206 if (dev
== ign
->fb_tunnel_dev
) {
1208 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1211 if ((t
= ipgre_tunnel_locate(net
, &p
, 0)) == NULL
)
1214 if (t
== netdev_priv(ign
->fb_tunnel_dev
))
1218 unregister_netdevice(dev
);
1230 static int ipgre_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
1232 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1234 new_mtu
> 0xFFF8 - dev
->hard_header_len
- tunnel
->hlen
)
1240 /* Nice toy. Unfortunately, useless in real life :-)
1241 It allows to construct virtual multiprotocol broadcast "LAN"
1242 over the Internet, provided multicast routing is tuned.
1245 I have no idea was this bicycle invented before me,
1246 so that I had to set ARPHRD_IPGRE to a random value.
1247 I have an impression, that Cisco could make something similar,
1248 but this feature is apparently missing in IOS<=11.2(8).
1250 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1251 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1253 ping -t 255 224.66.66.66
1255 If nobody answers, mbone does not work.
1257 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1258 ip addr add 10.66.66.<somewhat>/24 dev Universe
1259 ifconfig Universe up
1260 ifconfig Universe add fe80::<Your_real_addr>/10
1261 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1264 ftp fec0:6666:6666::193.233.7.65
1269 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
1270 unsigned short type
,
1271 const void *daddr
, const void *saddr
, unsigned int len
)
1273 struct ip_tunnel
*t
= netdev_priv(dev
);
1274 struct iphdr
*iph
= (struct iphdr
*)skb_push(skb
, t
->hlen
);
1275 __be16
*p
= (__be16
*)(iph
+1);
1277 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
1278 p
[0] = t
->parms
.o_flags
;
1282 * Set the source hardware address.
1286 memcpy(&iph
->saddr
, saddr
, 4);
1288 memcpy(&iph
->daddr
, daddr
, 4);
1295 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
1297 const struct iphdr
*iph
= (const struct iphdr
*) skb_mac_header(skb
);
1298 memcpy(haddr
, &iph
->saddr
, 4);
1302 static const struct header_ops ipgre_header_ops
= {
1303 .create
= ipgre_header
,
1304 .parse
= ipgre_header_parse
,
1307 #ifdef CONFIG_NET_IPGRE_BROADCAST
1308 static int ipgre_open(struct net_device
*dev
)
1310 struct ip_tunnel
*t
= netdev_priv(dev
);
1312 if (ipv4_is_multicast(t
->parms
.iph
.daddr
)) {
1316 rt
= ip_route_output_gre(dev_net(dev
), &fl4
,
1320 RT_TOS(t
->parms
.iph
.tos
),
1323 return -EADDRNOTAVAIL
;
1326 if (__in_dev_get_rtnl(dev
) == NULL
)
1327 return -EADDRNOTAVAIL
;
1328 t
->mlink
= dev
->ifindex
;
1329 ip_mc_inc_group(__in_dev_get_rtnl(dev
), t
->parms
.iph
.daddr
);
1334 static int ipgre_close(struct net_device
*dev
)
1336 struct ip_tunnel
*t
= netdev_priv(dev
);
1338 if (ipv4_is_multicast(t
->parms
.iph
.daddr
) && t
->mlink
) {
1339 struct in_device
*in_dev
;
1340 in_dev
= inetdev_by_index(dev_net(dev
), t
->mlink
);
1342 ip_mc_dec_group(in_dev
, t
->parms
.iph
.daddr
);
1349 static const struct net_device_ops ipgre_netdev_ops
= {
1350 .ndo_init
= ipgre_tunnel_init
,
1351 .ndo_uninit
= ipgre_tunnel_uninit
,
1352 #ifdef CONFIG_NET_IPGRE_BROADCAST
1353 .ndo_open
= ipgre_open
,
1354 .ndo_stop
= ipgre_close
,
1356 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1357 .ndo_do_ioctl
= ipgre_tunnel_ioctl
,
1358 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1359 .ndo_get_stats64
= ipgre_get_stats64
,
1362 static void ipgre_dev_free(struct net_device
*dev
)
1364 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1366 gro_cells_destroy(&tunnel
->gro_cells
);
1367 free_percpu(dev
->tstats
);
1371 #define GRE_FEATURES (NETIF_F_SG | \
1372 NETIF_F_FRAGLIST | \
1376 static void ipgre_tunnel_setup(struct net_device
*dev
)
1378 dev
->netdev_ops
= &ipgre_netdev_ops
;
1379 dev
->destructor
= ipgre_dev_free
;
1381 dev
->type
= ARPHRD_IPGRE
;
1382 dev
->needed_headroom
= LL_MAX_HEADER
+ sizeof(struct iphdr
) + 4;
1383 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 4;
1384 dev
->flags
= IFF_NOARP
;
1387 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1388 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1390 dev
->features
|= GRE_FEATURES
;
1391 dev
->hw_features
|= GRE_FEATURES
;
1394 static int ipgre_tunnel_init(struct net_device
*dev
)
1396 struct ip_tunnel
*tunnel
;
1400 tunnel
= netdev_priv(dev
);
1401 iph
= &tunnel
->parms
.iph
;
1404 strcpy(tunnel
->parms
.name
, dev
->name
);
1406 memcpy(dev
->dev_addr
, &tunnel
->parms
.iph
.saddr
, 4);
1407 memcpy(dev
->broadcast
, &tunnel
->parms
.iph
.daddr
, 4);
1410 #ifdef CONFIG_NET_IPGRE_BROADCAST
1411 if (ipv4_is_multicast(iph
->daddr
)) {
1414 dev
->flags
= IFF_BROADCAST
;
1415 dev
->header_ops
= &ipgre_header_ops
;
1419 dev
->header_ops
= &ipgre_header_ops
;
1421 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1425 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1427 free_percpu(dev
->tstats
);
1434 static void ipgre_fb_tunnel_init(struct net_device
*dev
)
1436 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1437 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1440 strcpy(tunnel
->parms
.name
, dev
->name
);
1443 iph
->protocol
= IPPROTO_GRE
;
1445 tunnel
->hlen
= sizeof(struct iphdr
) + 4;
1451 static const struct gre_protocol ipgre_protocol
= {
1452 .handler
= ipgre_rcv
,
1453 .err_handler
= ipgre_err
,
1456 static void ipgre_destroy_tunnels(struct ipgre_net
*ign
, struct list_head
*head
)
1460 for (prio
= 0; prio
< 4; prio
++) {
1462 for (h
= 0; h
< HASH_SIZE
; h
++) {
1463 struct ip_tunnel
*t
;
1465 t
= rtnl_dereference(ign
->tunnels
[prio
][h
]);
1468 unregister_netdevice_queue(t
->dev
, head
);
1469 t
= rtnl_dereference(t
->next
);
1475 static int __net_init
ipgre_init_net(struct net
*net
)
1477 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1480 ign
->fb_tunnel_dev
= alloc_netdev(sizeof(struct ip_tunnel
), "gre0",
1481 ipgre_tunnel_setup
);
1482 if (!ign
->fb_tunnel_dev
) {
1486 dev_net_set(ign
->fb_tunnel_dev
, net
);
1488 ipgre_fb_tunnel_init(ign
->fb_tunnel_dev
);
1489 ign
->fb_tunnel_dev
->rtnl_link_ops
= &ipgre_link_ops
;
1491 if ((err
= register_netdev(ign
->fb_tunnel_dev
)))
1494 rcu_assign_pointer(ign
->tunnels_wc
[0],
1495 netdev_priv(ign
->fb_tunnel_dev
));
1499 ipgre_dev_free(ign
->fb_tunnel_dev
);
1504 static void __net_exit
ipgre_exit_net(struct net
*net
)
1506 struct ipgre_net
*ign
;
1509 ign
= net_generic(net
, ipgre_net_id
);
1511 ipgre_destroy_tunnels(ign
, &list
);
1512 unregister_netdevice_many(&list
);
1516 static struct pernet_operations ipgre_net_ops
= {
1517 .init
= ipgre_init_net
,
1518 .exit
= ipgre_exit_net
,
1519 .id
= &ipgre_net_id
,
1520 .size
= sizeof(struct ipgre_net
),
1523 static int ipgre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1531 if (data
[IFLA_GRE_IFLAGS
])
1532 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1533 if (data
[IFLA_GRE_OFLAGS
])
1534 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1535 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
1541 static int ipgre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1545 if (tb
[IFLA_ADDRESS
]) {
1546 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1548 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1549 return -EADDRNOTAVAIL
;
1555 if (data
[IFLA_GRE_REMOTE
]) {
1556 memcpy(&daddr
, nla_data(data
[IFLA_GRE_REMOTE
]), 4);
1562 return ipgre_tunnel_validate(tb
, data
);
1565 static void ipgre_netlink_parms(struct nlattr
*data
[],
1566 struct ip_tunnel_parm
*parms
)
1568 memset(parms
, 0, sizeof(*parms
));
1570 parms
->iph
.protocol
= IPPROTO_GRE
;
1575 if (data
[IFLA_GRE_LINK
])
1576 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
1578 if (data
[IFLA_GRE_IFLAGS
])
1579 parms
->i_flags
= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1581 if (data
[IFLA_GRE_OFLAGS
])
1582 parms
->o_flags
= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1584 if (data
[IFLA_GRE_IKEY
])
1585 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
1587 if (data
[IFLA_GRE_OKEY
])
1588 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
1590 if (data
[IFLA_GRE_LOCAL
])
1591 parms
->iph
.saddr
= nla_get_be32(data
[IFLA_GRE_LOCAL
]);
1593 if (data
[IFLA_GRE_REMOTE
])
1594 parms
->iph
.daddr
= nla_get_be32(data
[IFLA_GRE_REMOTE
]);
1596 if (data
[IFLA_GRE_TTL
])
1597 parms
->iph
.ttl
= nla_get_u8(data
[IFLA_GRE_TTL
]);
1599 if (data
[IFLA_GRE_TOS
])
1600 parms
->iph
.tos
= nla_get_u8(data
[IFLA_GRE_TOS
]);
1602 if (!data
[IFLA_GRE_PMTUDISC
] || nla_get_u8(data
[IFLA_GRE_PMTUDISC
]))
1603 parms
->iph
.frag_off
= htons(IP_DF
);
1606 static int ipgre_tap_init(struct net_device
*dev
)
1608 struct ip_tunnel
*tunnel
;
1610 tunnel
= netdev_priv(dev
);
1613 strcpy(tunnel
->parms
.name
, dev
->name
);
1615 ipgre_tunnel_bind_dev(dev
);
1617 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1624 static const struct net_device_ops ipgre_tap_netdev_ops
= {
1625 .ndo_init
= ipgre_tap_init
,
1626 .ndo_uninit
= ipgre_tunnel_uninit
,
1627 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1628 .ndo_set_mac_address
= eth_mac_addr
,
1629 .ndo_validate_addr
= eth_validate_addr
,
1630 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1631 .ndo_get_stats64
= ipgre_get_stats64
,
1634 static void ipgre_tap_setup(struct net_device
*dev
)
1639 dev
->netdev_ops
= &ipgre_tap_netdev_ops
;
1640 dev
->destructor
= ipgre_dev_free
;
1643 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1645 dev
->features
|= GRE_FEATURES
;
1646 dev
->hw_features
|= GRE_FEATURES
;
1649 static int ipgre_newlink(struct net
*src_net
, struct net_device
*dev
, struct nlattr
*tb
[],
1650 struct nlattr
*data
[])
1652 struct ip_tunnel
*nt
;
1653 struct net
*net
= dev_net(dev
);
1654 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1658 nt
= netdev_priv(dev
);
1659 ipgre_netlink_parms(data
, &nt
->parms
);
1661 if (ipgre_tunnel_find(net
, &nt
->parms
, dev
->type
))
1664 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1665 eth_hw_addr_random(dev
);
1667 mtu
= ipgre_tunnel_bind_dev(dev
);
1671 /* Can use a lockless transmit, unless we generate output sequences */
1672 if (!(nt
->parms
.o_flags
& GRE_SEQ
))
1673 dev
->features
|= NETIF_F_LLTX
;
1675 err
= register_netdevice(dev
);
1680 ipgre_tunnel_link(ign
, nt
);
1686 static int ipgre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1687 struct nlattr
*data
[])
1689 struct ip_tunnel
*t
, *nt
;
1690 struct net
*net
= dev_net(dev
);
1691 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1692 struct ip_tunnel_parm p
;
1695 if (dev
== ign
->fb_tunnel_dev
)
1698 nt
= netdev_priv(dev
);
1699 ipgre_netlink_parms(data
, &p
);
1701 t
= ipgre_tunnel_locate(net
, &p
, 0);
1709 if (dev
->type
!= ARPHRD_ETHER
) {
1710 unsigned int nflags
= 0;
1712 if (ipv4_is_multicast(p
.iph
.daddr
))
1713 nflags
= IFF_BROADCAST
;
1714 else if (p
.iph
.daddr
)
1715 nflags
= IFF_POINTOPOINT
;
1717 if ((dev
->flags
^ nflags
) &
1718 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1722 ipgre_tunnel_unlink(ign
, t
);
1723 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1724 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1725 t
->parms
.i_key
= p
.i_key
;
1726 if (dev
->type
!= ARPHRD_ETHER
) {
1727 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1728 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1730 ipgre_tunnel_link(ign
, t
);
1731 netdev_state_change(dev
);
1734 t
->parms
.o_key
= p
.o_key
;
1735 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1736 t
->parms
.iph
.tos
= p
.iph
.tos
;
1737 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1739 if (t
->parms
.link
!= p
.link
) {
1740 t
->parms
.link
= p
.link
;
1741 mtu
= ipgre_tunnel_bind_dev(dev
);
1744 netdev_state_change(dev
);
1750 static size_t ipgre_get_size(const struct net_device
*dev
)
1755 /* IFLA_GRE_IFLAGS */
1757 /* IFLA_GRE_OFLAGS */
1763 /* IFLA_GRE_LOCAL */
1765 /* IFLA_GRE_REMOTE */
1771 /* IFLA_GRE_PMTUDISC */
1776 static int ipgre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1778 struct ip_tunnel
*t
= netdev_priv(dev
);
1779 struct ip_tunnel_parm
*p
= &t
->parms
;
1781 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
1782 nla_put_be16(skb
, IFLA_GRE_IFLAGS
, p
->i_flags
) ||
1783 nla_put_be16(skb
, IFLA_GRE_OFLAGS
, p
->o_flags
) ||
1784 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
1785 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
1786 nla_put_be32(skb
, IFLA_GRE_LOCAL
, p
->iph
.saddr
) ||
1787 nla_put_be32(skb
, IFLA_GRE_REMOTE
, p
->iph
.daddr
) ||
1788 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->iph
.ttl
) ||
1789 nla_put_u8(skb
, IFLA_GRE_TOS
, p
->iph
.tos
) ||
1790 nla_put_u8(skb
, IFLA_GRE_PMTUDISC
,
1791 !!(p
->iph
.frag_off
& htons(IP_DF
))))
1792 goto nla_put_failure
;
1799 static const struct nla_policy ipgre_policy
[IFLA_GRE_MAX
+ 1] = {
1800 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
1801 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
1802 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
1803 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
1804 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
1805 [IFLA_GRE_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1806 [IFLA_GRE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1807 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
1808 [IFLA_GRE_TOS
] = { .type
= NLA_U8
},
1809 [IFLA_GRE_PMTUDISC
] = { .type
= NLA_U8
},
1812 static struct rtnl_link_ops ipgre_link_ops __read_mostly
= {
1814 .maxtype
= IFLA_GRE_MAX
,
1815 .policy
= ipgre_policy
,
1816 .priv_size
= sizeof(struct ip_tunnel
),
1817 .setup
= ipgre_tunnel_setup
,
1818 .validate
= ipgre_tunnel_validate
,
1819 .newlink
= ipgre_newlink
,
1820 .changelink
= ipgre_changelink
,
1821 .get_size
= ipgre_get_size
,
1822 .fill_info
= ipgre_fill_info
,
1825 static struct rtnl_link_ops ipgre_tap_ops __read_mostly
= {
1827 .maxtype
= IFLA_GRE_MAX
,
1828 .policy
= ipgre_policy
,
1829 .priv_size
= sizeof(struct ip_tunnel
),
1830 .setup
= ipgre_tap_setup
,
1831 .validate
= ipgre_tap_validate
,
1832 .newlink
= ipgre_newlink
,
1833 .changelink
= ipgre_changelink
,
1834 .get_size
= ipgre_get_size
,
1835 .fill_info
= ipgre_fill_info
,
1839 * And now the modules code and kernel interface.
1842 static int __init
ipgre_init(void)
1846 pr_info("GRE over IPv4 tunneling driver\n");
1848 err
= register_pernet_device(&ipgre_net_ops
);
1852 err
= gre_add_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1854 pr_info("%s: can't add protocol\n", __func__
);
1855 goto add_proto_failed
;
1858 err
= rtnl_link_register(&ipgre_link_ops
);
1860 goto rtnl_link_failed
;
1862 err
= rtnl_link_register(&ipgre_tap_ops
);
1864 goto tap_ops_failed
;
1870 rtnl_link_unregister(&ipgre_link_ops
);
1872 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1874 unregister_pernet_device(&ipgre_net_ops
);
1878 static void __exit
ipgre_fini(void)
1880 rtnl_link_unregister(&ipgre_tap_ops
);
1881 rtnl_link_unregister(&ipgre_link_ops
);
1882 if (gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
) < 0)
1883 pr_info("%s: can't remove protocol\n", __func__
);
1884 unregister_pernet_device(&ipgre_net_ops
);
1887 module_init(ipgre_init
);
1888 module_exit(ipgre_fini
);
1889 MODULE_LICENSE("GPL");
1890 MODULE_ALIAS_RTNL_LINK("gre");
1891 MODULE_ALIAS_RTNL_LINK("gretap");
1892 MODULE_ALIAS_NETDEV("gre0");