2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #ifndef USE_UPSTREAM_TUNNEL
16 #include <linux/capability.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/kconfig.h>
21 #include <linux/slab.h>
22 #include <asm/uaccess.h>
23 #include <linux/skbuff.h>
24 #include <linux/netdevice.h>
25 #include <linux/netdev_features.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/if_arp.h>
30 #include <linux/mroute.h>
31 #include <linux/if_vlan.h>
32 #include <linux/init.h>
33 #include <linux/in6.h>
34 #include <linux/inetdevice.h>
35 #include <linux/igmp.h>
36 #include <linux/netfilter_ipv4.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_ether.h>
43 #include <net/protocol.h>
44 #include <net/ip_tunnels.h>
46 #include <net/checksum.h>
47 #include <net/dsfield.h>
48 #include <net/inet_ecn.h>
50 #include <net/net_namespace.h>
51 #include <net/netns/generic.h>
52 #include <net/rtnetlink.h>
54 #include <net/dst_metadata.h>
55 #include <net/erspan.h>
57 #if IS_ENABLED(CONFIG_IPV6)
59 #include <net/ip6_fib.h>
60 #include <net/ip6_route.h>
64 #include "vport-netdev.h"
66 static int gre_tap_net_id __read_mostly
;
67 static int ipgre_net_id __read_mostly
;
68 static unsigned int erspan_net_id __read_mostly
;
69 static void erspan_build_header(struct sk_buff
*skb
,
71 bool truncate
, bool is_ipv4
);
73 static struct rtnl_link_ops ipgre_link_ops __read_mostly
;
75 #define ip_gre_calc_hlen rpl_ip_gre_calc_hlen
76 static int ip_gre_calc_hlen(__be16 o_flags
)
80 if (o_flags
& TUNNEL_CSUM
)
82 if (o_flags
& TUNNEL_KEY
)
84 if (o_flags
& TUNNEL_SEQ
)
89 /* Returns the least-significant 32 bits of a __be64. */
90 static __be32
tunnel_id_to_key(__be64 x
)
93 return (__force __be32
)x
;
95 return (__force __be32
)((__force u64
)x
>> 32);
99 #ifdef HAVE_DEMUX_PARSE_GRE_HEADER
100 /* Called with rcu_read_lock and BH disabled. */
101 static int gre_err(struct sk_buff
*skb
, u32 info
,
102 const struct tnl_ptk_info
*tpi
)
104 return PACKET_REJECT
;
107 static struct dst_ops md_dst_ops
= {
112 #define DST_METADATA 0x0080
115 static void rpl__metadata_dst_init(struct metadata_dst
*md_dst
,
116 enum metadata_type type
, u8 optslen
)
119 struct dst_entry
*dst
;
122 dst_init(dst
, &md_dst_ops
, NULL
, 1, DST_OBSOLETE_NONE
,
123 DST_METADATA
| DST_NOCOUNT
);
127 dst
->input
= dst_md_discard
;
128 dst
->output
= dst_md_discard_out
;
130 memset(dst
+ 1, 0, sizeof(*md_dst
) + optslen
- sizeof(*dst
));
134 static struct metadata_dst
*erspan_rpl_metadata_dst_alloc(u8 optslen
, enum metadata_type type
,
137 struct metadata_dst
*md_dst
;
139 md_dst
= kmalloc(sizeof(*md_dst
) + optslen
, flags
);
143 rpl__metadata_dst_init(md_dst
, type
, optslen
);
147 static inline struct metadata_dst
*rpl_tun_rx_dst(int md_size
)
149 struct metadata_dst
*tun_dst
;
151 tun_dst
= erspan_rpl_metadata_dst_alloc(md_size
, METADATA_IP_TUNNEL
, GFP_ATOMIC
);
155 tun_dst
->u
.tun_info
.options_len
= 0;
156 tun_dst
->u
.tun_info
.mode
= 0;
159 static inline struct metadata_dst
*rpl__ip_tun_set_dst(__be32 saddr
,
167 struct metadata_dst
*tun_dst
;
169 tun_dst
= rpl_tun_rx_dst(md_size
);
173 ip_tunnel_key_init(&tun_dst
->u
.tun_info
.key
,
174 saddr
, daddr
, tos
, ttl
,
175 0, 0, tp_dst
, tunnel_id
, flags
);
179 static inline struct metadata_dst
*rpl_ip_tun_rx_dst(struct sk_buff
*skb
,
184 const struct iphdr
*iph
= ip_hdr(skb
);
186 return rpl__ip_tun_set_dst(iph
->saddr
, iph
->daddr
, iph
->tos
, iph
->ttl
,
187 0, flags
, tunnel_id
, md_size
);
190 static int erspan_rcv(struct sk_buff
*skb
, struct tnl_ptk_info
*tpi
,
193 struct net
*net
= dev_net(skb
->dev
);
194 struct metadata_dst
*tun_dst
= NULL
;
195 struct erspan_base_hdr
*ershdr
;
196 struct erspan_metadata
*pkt_md
;
197 struct ip_tunnel_net
*itn
;
198 struct ip_tunnel
*tunnel
;
199 const struct iphdr
*iph
;
200 struct erspan_md2
*md2
;
204 itn
= net_generic(net
, erspan_net_id
);
205 len
= gre_hdr_len
+ sizeof(*ershdr
);
207 /* Check based hdr len */
208 if (unlikely(!pskb_may_pull(skb
, len
)))
209 return PACKET_REJECT
;
212 ershdr
= (struct erspan_base_hdr
*)(skb
->data
+ gre_hdr_len
);
215 /* The original GRE header does not have key field,
216 * Use ERSPAN 10-bit session ID as key.
218 tpi
->key
= cpu_to_be32(get_session_id(ershdr
));
219 /* OVS doesn't set tunnel key - so don't bother with it */
220 tunnel
= ip_tunnel_lookup(itn
, skb
->dev
->ifindex
,
222 iph
->saddr
, iph
->daddr
, 0);
225 len
= gre_hdr_len
+ erspan_hdr_len(ver
);
226 if (unlikely(!pskb_may_pull(skb
, len
)))
227 return PACKET_REJECT
;
229 ershdr
= (struct erspan_base_hdr
*)skb
->data
;
230 pkt_md
= (struct erspan_metadata
*)(ershdr
+ 1);
232 if (__iptunnel_pull_header(skb
,
238 if (tunnel
->collect_md
) {
239 struct ip_tunnel_info
*info
;
240 struct erspan_metadata
*md
;
244 tpi
->flags
|= TUNNEL_KEY
;
246 tun_id
= key32_to_tunnel_id(tpi
->key
);
248 tun_dst
= rpl_ip_tun_rx_dst(skb
, flags
, tun_id
, sizeof(*md
));
250 return PACKET_REJECT
;
252 md
= ip_tunnel_info_opts(&tun_dst
->u
.tun_info
);
255 memcpy(md2
, pkt_md
, ver
== 1 ? ERSPAN_V1_MDSIZE
:
258 info
= &tun_dst
->u
.tun_info
;
259 info
->key
.tun_flags
|= TUNNEL_ERSPAN_OPT
;
260 info
->options_len
= sizeof(*md
);
263 skb_reset_mac_header(skb
);
264 ovs_ip_tunnel_rcv(tunnel
->dev
, skb
, tun_dst
);
274 static int __ipgre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
,
275 struct ip_tunnel_net
*itn
, int hdr_len
, bool raw_proto
)
277 struct metadata_dst tun_dst
;
278 const struct iphdr
*iph
;
279 struct ip_tunnel
*tunnel
;
282 tunnel
= ip_tunnel_lookup(itn
, skb
->dev
->ifindex
, tpi
->flags
,
283 iph
->saddr
, iph
->daddr
, tpi
->key
);
286 if (__iptunnel_pull_header(skb
, hdr_len
, tpi
->proto
,
287 raw_proto
, false) < 0)
290 if (tunnel
->dev
->type
!= ARPHRD_NONE
)
291 skb_pop_mac_header(skb
);
293 skb_reset_mac_header(skb
);
294 if (tunnel
->collect_md
) {
298 flags
= tpi
->flags
& (TUNNEL_CSUM
| TUNNEL_KEY
);
299 tun_id
= key32_to_tunnel_id(tpi
->key
);
300 ovs_ip_tun_rx_dst(&tun_dst
, skb
, flags
, tun_id
, 0);
303 ovs_ip_tunnel_rcv(tunnel
->dev
, skb
, &tun_dst
);
314 static int ipgre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
,
317 struct net
*net
= dev_net(skb
->dev
);
318 struct ip_tunnel_net
*itn
;
321 if (tpi
->proto
== htons(ETH_P_TEB
))
322 itn
= net_generic(net
, gre_tap_net_id
);
324 itn
= net_generic(net
, ipgre_net_id
);
326 res
= __ipgre_rcv(skb
, tpi
, itn
, hdr_len
, false);
327 if (res
== PACKET_NEXT
&& tpi
->proto
== htons(ETH_P_TEB
)) {
328 /* ipgre tunnels in collect metadata mode should receive
329 * also ETH_P_TEB traffic.
331 itn
= net_generic(net
, ipgre_net_id
);
332 res
= __ipgre_rcv(skb
, tpi
, itn
, hdr_len
, true);
337 static void __gre_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
338 const struct iphdr
*tnl_params
,
341 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
342 struct tnl_ptk_info tpi
;
344 tpi
.flags
= tunnel
->parms
.o_flags
;
346 tpi
.key
= tunnel
->parms
.o_key
;
347 if (tunnel
->parms
.o_flags
& TUNNEL_SEQ
)
349 tpi
.seq
= htonl(tunnel
->o_seqno
);
351 /* Push GRE header. */
352 gre_build_header(skb
, &tpi
, tunnel
->hlen
);
354 ip_tunnel_xmit(skb
, dev
, tnl_params
, tnl_params
->protocol
);
357 #ifndef HAVE_DEMUX_PARSE_GRE_HEADER
358 static int gre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*unused_tpi
)
360 struct tnl_ptk_info tpi
;
361 bool csum_err
= false;
364 hdr_len
= gre_parse_header(skb
, &tpi
, &csum_err
, htons(ETH_P_IP
), 0);
368 if (unlikely(tpi
.proto
== htons(ETH_P_ERSPAN
) ||
369 tpi
.proto
== htons(ETH_P_ERSPAN2
))) {
370 if (erspan_rcv(skb
, &tpi
, hdr_len
) == PACKET_RCVD
)
375 if (ipgre_rcv(skb
, &tpi
, hdr_len
) == PACKET_RCVD
)
383 static int gre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*__tpi
)
385 struct tnl_ptk_info tpi
= *__tpi
;
387 if (unlikely(tpi
.proto
== htons(ETH_P_ERSPAN
) ||
388 tpi
.proto
== htons(ETH_P_ERSPAN2
))) {
389 if (erspan_rcv(skb
, &tpi
, 0) == PACKET_RCVD
)
394 if (ipgre_rcv(skb
, &tpi
, 0) == PACKET_RCVD
)
403 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
404 /* gre_handle_offloads() has different return type on older kernsl. */
405 static void gre_nop_fix(struct sk_buff
*skb
) { }
407 static void gre_csum_fix(struct sk_buff
*skb
)
409 struct gre_base_hdr
*greh
;
411 int gre_offset
= skb_transport_offset(skb
);
413 greh
= (struct gre_base_hdr
*)skb_transport_header(skb
);
414 options
= ((__be32
*)greh
+ 1);
417 *(__sum16
*)options
= csum_fold(skb_checksum(skb
, gre_offset
,
418 skb
->len
- gre_offset
, 0));
421 static bool is_gre_gso(struct sk_buff
*skb
)
423 return skb_is_gso(skb
);
426 #define gre_handle_offloads rpl_gre_handle_offloads
427 static int rpl_gre_handle_offloads(struct sk_buff
*skb
, bool gre_csum
)
429 int type
= gre_csum
? SKB_GSO_GRE_CSUM
: SKB_GSO_GRE
;
430 gso_fix_segment_t fix_segment
;
433 fix_segment
= gre_csum_fix
;
435 fix_segment
= gre_nop_fix
;
437 return ovs_iptunnel_handle_offloads(skb
, type
, fix_segment
);
441 static bool is_gre_gso(struct sk_buff
*skb
)
443 return skb_shinfo(skb
)->gso_type
&
444 (SKB_GSO_GRE
| SKB_GSO_GRE_CSUM
);
447 static int rpl_gre_handle_offloads(struct sk_buff
*skb
, bool gre_csum
)
449 if (skb_is_gso(skb
) && skb_is_encapsulated(skb
))
452 #undef gre_handle_offloads
453 return gre_handle_offloads(skb
, gre_csum
);
457 static void build_header(struct sk_buff
*skb
, int hdr_len
, __be16 flags
,
458 __be16 proto
, __be32 key
, __be32 seq
)
460 struct gre_base_hdr
*greh
;
462 skb_push(skb
, hdr_len
);
464 skb_reset_transport_header(skb
);
465 greh
= (struct gre_base_hdr
*)skb
->data
;
466 greh
->flags
= tnl_flags_to_gre_flags(flags
);
467 greh
->protocol
= proto
;
469 if (flags
& (TUNNEL_KEY
| TUNNEL_CSUM
| TUNNEL_SEQ
)) {
470 __be32
*ptr
= (__be32
*)(((u8
*)greh
) + hdr_len
- 4);
472 if (flags
& TUNNEL_SEQ
) {
476 if (flags
& TUNNEL_KEY
) {
480 if (flags
& TUNNEL_CSUM
&& !is_gre_gso(skb
)) {
482 *(__sum16
*)ptr
= csum_fold(skb_checksum(skb
, 0,
486 ovs_skb_set_inner_protocol(skb
, proto
);
489 static struct rtable
*gre_get_rt(struct sk_buff
*skb
,
490 struct net_device
*dev
,
492 const struct ip_tunnel_key
*key
)
494 struct net
*net
= dev_net(dev
);
496 memset(fl
, 0, sizeof(*fl
));
497 fl
->daddr
= key
->u
.ipv4
.dst
;
498 fl
->saddr
= key
->u
.ipv4
.src
;
499 fl
->flowi4_tos
= RT_TOS(key
->tos
);
500 fl
->flowi4_mark
= skb
->mark
;
501 fl
->flowi4_proto
= IPPROTO_GRE
;
503 return ip_route_output_key(net
, fl
);
506 static struct rtable
*prepare_fb_xmit(struct sk_buff
*skb
,
507 struct net_device
*dev
,
511 struct ip_tunnel_info
*tun_info
;
512 const struct ip_tunnel_key
*key
;
513 struct rtable
*rt
= NULL
;
518 tun_info
= skb_tunnel_info(skb
);
519 key
= &tun_info
->key
;
520 use_cache
= ip_tunnel_dst_cache_usable(skb
, tun_info
);
523 rt
= dst_cache_get_ip4(&tun_info
->dst_cache
, &fl
->saddr
);
525 rt
= gre_get_rt(skb
, dev
, fl
, key
);
529 dst_cache_set_ip4(&tun_info
->dst_cache
, &rt
->dst
,
533 min_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
534 + tunnel_hlen
+ sizeof(struct iphdr
);
535 if (skb_headroom(skb
) < min_headroom
|| skb_header_cloned(skb
)) {
536 int head_delta
= SKB_DATA_ALIGN(min_headroom
-
539 err
= pskb_expand_head(skb
, max_t(int, head_delta
, 0),
550 dev
->stats
.tx_dropped
++;
554 netdev_tx_t
rpl_gre_fb_xmit(struct sk_buff
*skb
)
556 struct net_device
*dev
= skb
->dev
;
557 struct ip_tunnel_info
*tun_info
;
558 const struct ip_tunnel_key
*key
;
566 tun_info
= skb_tunnel_info(skb
);
567 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
568 ip_tunnel_info_af(tun_info
) != AF_INET
))
571 key
= &tun_info
->key
;
573 rt
= gre_get_rt(skb
, dev
, &fl
, key
);
577 tunnel_hlen
= ip_gre_calc_hlen(key
->tun_flags
);
579 min_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
580 + tunnel_hlen
+ sizeof(struct iphdr
)
581 + (skb_vlan_tag_present(skb
) ? VLAN_HLEN
: 0);
582 if (skb_headroom(skb
) < min_headroom
|| skb_header_cloned(skb
)) {
583 int head_delta
= SKB_DATA_ALIGN(min_headroom
-
586 err
= pskb_expand_head(skb
, max_t(int, head_delta
, 0),
592 skb
= vlan_hwaccel_push_inside(skb
);
593 if (unlikely(!skb
)) {
598 /* Push Tunnel header. */
599 err
= rpl_gre_handle_offloads(skb
, !!(tun_info
->key
.tun_flags
& TUNNEL_CSUM
));
603 flags
= tun_info
->key
.tun_flags
& (TUNNEL_CSUM
| TUNNEL_KEY
);
604 build_header(skb
, tunnel_hlen
, flags
, htons(ETH_P_TEB
),
605 tunnel_id_to_key(tun_info
->key
.tun_id
), 0);
607 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
608 iptunnel_xmit(skb
->sk
, rt
, skb
, fl
.saddr
, key
->u
.ipv4
.dst
, IPPROTO_GRE
,
609 key
->tos
, key
->ttl
, df
, false);
616 dev
->stats
.tx_dropped
++;
619 EXPORT_SYMBOL(rpl_gre_fb_xmit
);
621 static void erspan_fb_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
624 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
625 struct ip_tunnel_info
*tun_info
;
626 const struct ip_tunnel_key
*key
;
627 struct erspan_metadata
*md
;
628 struct rtable
*rt
= NULL
;
629 struct tnl_ptk_info tpi
;
630 bool truncate
= false;
638 tun_info
= skb_tunnel_info(skb
);
639 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
640 ip_tunnel_info_af(tun_info
) != AF_INET
))
643 key
= &tun_info
->key
;
644 md
= ip_tunnel_info_opts(tun_info
);
648 /* ERSPAN has fixed 8 byte GRE header */
649 version
= md
->version
;
650 tunnel_hlen
= 8 + erspan_hdr_len(version
);
652 rt
= prepare_fb_xmit(skb
, dev
, &fl
, tunnel_hlen
);
656 if (gre_handle_offloads(skb
, false))
659 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
660 pskb_trim(skb
, dev
->mtu
+ dev
->hard_header_len
);
664 nhoff
= skb_network_header(skb
) - skb_mac_header(skb
);
665 if (skb
->protocol
== htons(ETH_P_IP
) &&
666 (ntohs(ip_hdr(skb
)->tot_len
) > skb
->len
- nhoff
))
669 thoff
= skb_transport_header(skb
) - skb_mac_header(skb
);
670 if (skb
->protocol
== htons(ETH_P_IPV6
) &&
671 (ntohs(ipv6_hdr(skb
)->payload_len
) > skb
->len
- thoff
))
675 erspan_build_header(skb
, ntohl(tunnel_id_to_key32(key
->tun_id
)),
676 ntohl(md
->u
.index
), truncate
, true);
677 tpi
.hdr_len
= ERSPAN_V1_MDSIZE
;
678 tpi
.proto
= htons(ETH_P_ERSPAN
);
679 } else if (version
== 2) {
680 erspan_build_header_v2(skb
,
681 ntohl(tunnel_id_to_key32(key
->tun_id
)),
683 get_hwid(&md
->u
.md2
),
685 tpi
.hdr_len
= ERSPAN_V2_MDSIZE
;
686 tpi
.proto
= htons(ETH_P_ERSPAN2
);
691 tpi
.flags
= TUNNEL_SEQ
;
692 tpi
.key
= tunnel_id_to_key32(key
->tun_id
);
693 tpi
.seq
= htonl(tunnel
->o_seqno
++);
695 gre_build_header(skb
, &tpi
, 8);
697 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
699 iptunnel_xmit(skb
->sk
, rt
, skb
, fl
.saddr
, key
->u
.ipv4
.dst
, IPPROTO_GRE
,
700 key
->tos
, key
->ttl
, df
, false);
707 dev
->stats
.tx_dropped
++;
710 #define GRE_FEATURES (NETIF_F_SG | \
716 static void __gre_tunnel_init(struct net_device
*dev
)
718 struct ip_tunnel
*tunnel
;
721 tunnel
= netdev_priv(dev
);
722 tunnel
->tun_hlen
= ip_gre_calc_hlen(tunnel
->parms
.o_flags
);
723 tunnel
->parms
.iph
.protocol
= IPPROTO_GRE
;
725 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
;
727 t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
729 dev
->features
|= GRE_FEATURES
;
730 dev
->hw_features
|= GRE_FEATURES
;
732 if (!(tunnel
->parms
.o_flags
& TUNNEL_SEQ
)) {
733 /* TCP offload with GRE SEQ is not supported, nor
734 * can we support 2 levels of outer headers requiring
737 if (!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
) ||
738 (tunnel
->encap
.type
== TUNNEL_ENCAP_NONE
)) {
739 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
740 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
743 /* Can use a lockless transmit, unless we generate
746 dev
->features
|= NETIF_F_LLTX
;
750 #ifdef HAVE_DEMUX_PARSE_GRE_HEADER
751 static struct gre_cisco_protocol ipgre_cisco_protocol
= {
753 .err_handler
= gre_err
,
758 static int __gre_rcv(struct sk_buff
*skb
)
760 return gre_rcv(skb
, NULL
);
763 void __gre_err(struct sk_buff
*skb
, u32 info
)
765 pr_warn("%s: GRE receive error\n", __func__
);
768 static const struct gre_protocol ipgre_protocol
= {
769 .handler
= __gre_rcv
,
770 .err_handler
= __gre_err
,
773 static int __net_init
ipgre_init_net(struct net
*net
)
775 return ip_tunnel_init_net(net
, ipgre_net_id
, &ipgre_link_ops
, NULL
);
778 static void __net_exit
ipgre_exit_net(struct net
*net
)
780 struct ip_tunnel_net
*itn
= net_generic(net
, ipgre_net_id
);
782 ip_tunnel_delete_net(itn
, &ipgre_link_ops
);
785 static struct pernet_operations ipgre_net_ops
= {
786 .init
= ipgre_init_net
,
787 .exit
= ipgre_exit_net
,
789 .size
= sizeof(struct ip_tunnel_net
),
792 static int ipgre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
800 if (data
[IFLA_GRE_IFLAGS
])
801 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
802 if (data
[IFLA_GRE_OFLAGS
])
803 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
804 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
810 static int ipgre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
814 if (tb
[IFLA_ADDRESS
]) {
815 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
817 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
818 return -EADDRNOTAVAIL
;
824 if (data
[IFLA_GRE_REMOTE
]) {
825 memcpy(&daddr
, nla_data(data
[IFLA_GRE_REMOTE
]), 4);
831 return ipgre_tunnel_validate(tb
, data
);
835 #ifndef HAVE_IFLA_GRE_ENCAP_DPORT
836 IFLA_GRE_ENCAP_TYPE
= IFLA_GRE_FLAGS
+ 1,
837 IFLA_GRE_ENCAP_FLAGS
,
838 IFLA_GRE_ENCAP_SPORT
,
839 IFLA_GRE_ENCAP_DPORT
,
841 #ifndef HAVE_IFLA_GRE_COLLECT_METADATA
842 IFLA_GRE_COLLECT_METADATA
= IFLA_GRE_ENCAP_DPORT
+ 1,
844 #ifndef HAVE_IFLA_GRE_IGNORE_DF
845 IFLA_GRE_IGNORE_DF
= IFLA_GRE_COLLECT_METADATA
+ 1,
847 #ifndef HAVE_IFLA_GRE_FWMARK
848 IFLA_GRE_FWMARK
= IFLA_GRE_IGNORE_DF
+ 1,
850 #ifndef HAVE_IFLA_GRE_ERSPAN_INDEX
851 IFLA_GRE_ERSPAN_INDEX
= IFLA_GRE_FWMARK
+ 1,
853 #ifndef HAVE_IFLA_GRE_ERSPAN_HWID
854 IFLA_GRE_ERSPAN_VER
= IFLA_GRE_ERSPAN_INDEX
+ 1,
856 IFLA_GRE_ERSPAN_HWID
,
860 #define RPL_IFLA_GRE_MAX (IFLA_GRE_ERSPAN_HWID + 1)
862 static int erspan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
870 ret
= ipgre_tap_validate(tb
, data
);
874 /* ERSPAN should only have GRE sequence and key flag */
875 if (data
[IFLA_GRE_OFLAGS
])
876 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
877 if (data
[IFLA_GRE_IFLAGS
])
878 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
879 if (!data
[IFLA_GRE_COLLECT_METADATA
] &&
880 flags
!= (GRE_SEQ
| GRE_KEY
))
883 /* ERSPAN Session ID only has 10-bit. Since we reuse
884 * 32-bit key field as ID, check it's range.
886 if (data
[IFLA_GRE_OKEY
] &&
887 (ntohl(nla_get_be32(data
[IFLA_GRE_OKEY
])) & ~ID_MASK
))
893 static int ipgre_netlink_parms(struct net_device
*dev
,
894 struct nlattr
*data
[],
896 struct ip_tunnel_parm
*parms
)
898 struct ip_tunnel
*t
= netdev_priv(dev
);
900 memset(parms
, 0, sizeof(*parms
));
902 parms
->iph
.protocol
= IPPROTO_GRE
;
907 if (data
[IFLA_GRE_LINK
])
908 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
910 if (data
[IFLA_GRE_IFLAGS
])
911 parms
->i_flags
= gre_flags_to_tnl_flags(nla_get_be16(data
[IFLA_GRE_IFLAGS
]));
913 if (data
[IFLA_GRE_OFLAGS
])
914 parms
->o_flags
= gre_flags_to_tnl_flags(nla_get_be16(data
[IFLA_GRE_OFLAGS
]));
916 if (data
[IFLA_GRE_IKEY
])
917 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
919 if (data
[IFLA_GRE_OKEY
])
920 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
922 if (data
[IFLA_GRE_LOCAL
])
923 parms
->iph
.saddr
= nla_get_in_addr(data
[IFLA_GRE_LOCAL
]);
925 if (data
[IFLA_GRE_REMOTE
])
926 parms
->iph
.daddr
= nla_get_in_addr(data
[IFLA_GRE_REMOTE
]);
928 if (data
[IFLA_GRE_TTL
])
929 parms
->iph
.ttl
= nla_get_u8(data
[IFLA_GRE_TTL
]);
931 if (data
[IFLA_GRE_TOS
])
932 parms
->iph
.tos
= nla_get_u8(data
[IFLA_GRE_TOS
]);
934 if (!data
[IFLA_GRE_PMTUDISC
] || nla_get_u8(data
[IFLA_GRE_PMTUDISC
])) {
937 parms
->iph
.frag_off
= htons(IP_DF
);
940 if (data
[IFLA_GRE_COLLECT_METADATA
]) {
941 t
->collect_md
= true;
942 if (dev
->type
== ARPHRD_IPGRE
)
943 dev
->type
= ARPHRD_NONE
;
946 if (data
[IFLA_GRE_IGNORE_DF
]) {
947 if (nla_get_u8(data
[IFLA_GRE_IGNORE_DF
])
948 && (parms
->iph
.frag_off
& htons(IP_DF
)))
950 t
->ignore_df
= !!nla_get_u8(data
[IFLA_GRE_IGNORE_DF
]);
953 if (data
[IFLA_GRE_ERSPAN_INDEX
]) {
954 t
->index
= nla_get_u32(data
[IFLA_GRE_ERSPAN_INDEX
]);
956 if (t
->index
& ~INDEX_MASK
)
963 static int gre_tap_init(struct net_device
*dev
)
965 __gre_tunnel_init(dev
);
966 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
968 return ip_tunnel_init(dev
);
971 static netdev_tx_t
gre_dev_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
973 /* Drop All packets coming from networking stack. OVS-CB is
974 * not initialized for these packets.
978 dev
->stats
.tx_dropped
++;
982 static netdev_tx_t
erspan_xmit(struct sk_buff
*skb
,
983 struct net_device
*dev
)
985 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
986 bool truncate
= false;
988 if (tunnel
->collect_md
) {
989 erspan_fb_xmit(skb
, dev
, skb
->protocol
);
993 if (gre_handle_offloads(skb
, false))
996 if (skb_cow_head(skb
, dev
->needed_headroom
))
999 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
1000 pskb_trim(skb
, dev
->mtu
+ dev
->hard_header_len
);
1004 /* Push ERSPAN header */
1005 if (tunnel
->erspan_ver
== 1)
1006 erspan_build_header(skb
, ntohl(tunnel
->parms
.o_key
),
1009 else if (tunnel
->erspan_ver
== 2)
1010 erspan_build_header_v2(skb
, ntohl(tunnel
->parms
.o_key
),
1011 tunnel
->dir
, tunnel
->hwid
,
1016 tunnel
->parms
.o_flags
&= ~TUNNEL_KEY
;
1017 __gre_xmit(skb
, dev
, &tunnel
->parms
.iph
, htons(ETH_P_ERSPAN
));
1018 return NETDEV_TX_OK
;
1022 dev
->stats
.tx_dropped
++;
1023 return NETDEV_TX_OK
;
1026 static netdev_tx_t
__erspan_fb_xmit(struct sk_buff
*skb
)
1028 erspan_fb_xmit(skb
, skb
->dev
, skb
->protocol
);
1029 return NETDEV_TX_OK
;
1032 int ovs_gre_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
1034 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
1038 if (ip_tunnel_info_af(info
) != AF_INET
)
1041 rt
= gre_get_rt(skb
, dev
, &fl4
, &info
->key
);
1046 info
->key
.u
.ipv4
.src
= fl4
.saddr
;
1049 EXPORT_SYMBOL_GPL(ovs_gre_fill_metadata_dst
);
1051 static int erspan_tunnel_init(struct net_device
*dev
)
1053 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1056 tunnel
->tun_hlen
= 8;
1057 tunnel
->parms
.iph
.protocol
= IPPROTO_GRE
;
1058 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
+
1059 erspan_hdr_len(tunnel
->erspan_ver
);
1060 t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
1062 dev
->needed_headroom
= LL_MAX_HEADER
+ t_hlen
+ 4;
1063 dev
->mtu
= ETH_DATA_LEN
- t_hlen
- 4;
1064 dev
->features
|= GRE_FEATURES
;
1065 dev
->hw_features
|= GRE_FEATURES
;
1066 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1067 netif_keep_dst(dev
);
1069 return ip_tunnel_init(dev
);
1072 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
1073 unsigned short type
,
1074 const void *daddr
, const void *saddr
, unsigned int len
)
1076 struct ip_tunnel
*t
= netdev_priv(dev
);
1078 struct gre_base_hdr
*greh
;
1080 iph
= (struct iphdr
*)__skb_push(skb
, t
->hlen
+ sizeof(*iph
));
1081 greh
= (struct gre_base_hdr
*)(iph
+1);
1082 greh
->flags
= gre_tnl_flags_to_gre_flags(t
->parms
.o_flags
);
1083 greh
->protocol
= htons(type
);
1085 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
1087 /* Set the source hardware address. */
1089 memcpy(&iph
->saddr
, saddr
, 4);
1091 memcpy(&iph
->daddr
, daddr
, 4);
1093 return t
->hlen
+ sizeof(*iph
);
1095 return -(t
->hlen
+ sizeof(*iph
));
1098 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
1100 const struct iphdr
*iph
= (const struct iphdr
*) skb_mac_header(skb
);
1101 memcpy(haddr
, &iph
->saddr
, 4);
1105 static const struct header_ops ipgre_header_ops
= {
1106 .create
= ipgre_header
,
1107 .parse
= ipgre_header_parse
,
1110 static int ipgre_tunnel_init(struct net_device
*dev
)
1112 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1113 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1115 __gre_tunnel_init(dev
);
1117 memcpy(dev
->dev_addr
, &iph
->saddr
, 4);
1118 memcpy(dev
->broadcast
, &iph
->daddr
, 4);
1120 dev
->flags
= IFF_NOARP
;
1121 netif_keep_dst(dev
);
1124 if (!tunnel
->collect_md
) {
1125 dev
->header_ops
= &ipgre_header_ops
;
1128 return ip_tunnel_init(dev
);
1131 static netdev_tx_t
ipgre_xmit(struct sk_buff
*skb
,
1132 struct net_device
*dev
)
1134 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1135 const struct iphdr
*tnl_params
;
1137 if (tunnel
->collect_md
) {
1139 return NETDEV_TX_OK
;
1142 if (dev
->header_ops
) {
1143 /* Need space for new headers */
1144 if (skb_cow_head(skb
, dev
->needed_headroom
-
1145 (tunnel
->hlen
+ sizeof(struct iphdr
))))
1148 tnl_params
= (const struct iphdr
*)skb
->data
;
1150 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
1153 skb_pull(skb
, tunnel
->hlen
+ sizeof(struct iphdr
));
1154 skb_reset_mac_header(skb
);
1156 if (skb_cow_head(skb
, dev
->needed_headroom
))
1159 tnl_params
= &tunnel
->parms
.iph
;
1162 if (gre_handle_offloads(skb
, !!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
)))
1165 __gre_xmit(skb
, dev
, tnl_params
, skb
->protocol
);
1166 return NETDEV_TX_OK
;
1170 dev
->stats
.tx_dropped
++;
1171 return NETDEV_TX_OK
;
1174 static const struct net_device_ops ipgre_netdev_ops
= {
1175 .ndo_init
= ipgre_tunnel_init
,
1176 .ndo_uninit
= rpl_ip_tunnel_uninit
,
1177 .ndo_start_xmit
= ipgre_xmit
,
1178 .ndo_change_mtu
= ip_tunnel_change_mtu
,
1179 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1180 #ifdef HAVE_GET_LINK_NET
1181 .ndo_get_iflink
= ip_tunnel_get_iflink
,
1185 static const struct net_device_ops gre_tap_netdev_ops
= {
1186 .ndo_init
= gre_tap_init
,
1187 .ndo_uninit
= rpl_ip_tunnel_uninit
,
1188 .ndo_start_xmit
= gre_dev_xmit
,
1189 .ndo_set_mac_address
= eth_mac_addr
,
1190 .ndo_validate_addr
= eth_validate_addr
,
1191 #ifdef HAVE_RHEL7_MAX_MTU
1192 .ndo_size
= sizeof(struct net_device_ops
),
1193 .extended
.ndo_change_mtu
= ip_tunnel_change_mtu
,
1195 .ndo_change_mtu
= ip_tunnel_change_mtu
,
1197 .ndo_get_stats64
= rpl_ip_tunnel_get_stats64
,
1198 #ifdef HAVE_NDO_GET_IFLINK
1199 .ndo_get_iflink
= rpl_ip_tunnel_get_iflink
,
1201 #ifdef HAVE_NDO_FILL_METADATA_DST
1202 .ndo_fill_metadata_dst
= gre_fill_metadata_dst
,
1206 static const struct net_device_ops erspan_netdev_ops
= {
1207 .ndo_init
= erspan_tunnel_init
,
1208 .ndo_uninit
= rpl_ip_tunnel_uninit
,
1209 .ndo_start_xmit
= erspan_xmit
,
1210 .ndo_set_mac_address
= eth_mac_addr
,
1211 .ndo_validate_addr
= eth_validate_addr
,
1212 .ndo_change_mtu
= ip_tunnel_change_mtu
,
1213 .ndo_get_stats64
= rpl_ip_tunnel_get_stats64
,
1214 #ifdef HAVE_NDO_GET_IFLINK
1215 .ndo_get_iflink
= rpl_ip_tunnel_get_iflink
,
1217 #ifdef HAVE_NDO_FILL_METADATA_DST
1218 .ndo_fill_metadata_dst
= gre_fill_metadata_dst
,
1222 static void ipgre_tunnel_setup(struct net_device
*dev
)
1224 dev
->netdev_ops
= &ipgre_netdev_ops
;
1225 dev
->type
= ARPHRD_IPGRE
;
1226 ip_tunnel_setup(dev
, ipgre_net_id
);
1229 static void ipgre_tap_setup(struct net_device
*dev
)
1232 #ifdef HAVE_NET_DEVICE_MAX_MTU
1235 dev
->netdev_ops
= &gre_tap_netdev_ops
;
1236 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1237 ip_tunnel_setup(dev
, gre_tap_net_id
);
1240 static void erspan_setup(struct net_device
*dev
)
1242 eth_hw_addr_random(dev
);
1244 dev
->netdev_ops
= &erspan_netdev_ops
;
1245 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1246 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1247 ip_tunnel_setup(dev
, erspan_net_id
);
1250 static int ipgre_newlink(struct net
*src_net
, struct net_device
*dev
,
1251 struct nlattr
*tb
[], struct nlattr
*data
[])
1253 struct ip_tunnel_parm p
;
1256 ipgre_netlink_parms(dev
, data
, tb
, &p
);
1257 err
= ip_tunnel_newlink(dev
, tb
, &p
);
1262 static size_t ipgre_get_size(const struct net_device
*dev
)
1267 /* IFLA_GRE_IFLAGS */
1269 /* IFLA_GRE_OFLAGS */
1275 /* IFLA_GRE_LOCAL */
1277 /* IFLA_GRE_REMOTE */
1283 /* IFLA_GRE_PMTUDISC */
1285 /* IFLA_GRE_ENCAP_TYPE */
1287 /* IFLA_GRE_ENCAP_FLAGS */
1289 /* IFLA_GRE_ENCAP_SPORT */
1291 /* IFLA_GRE_ENCAP_DPORT */
1293 /* IFLA_GRE_COLLECT_METADATA */
1295 /* IFLA_GRE_ERSPAN_INDEX */
1297 /* IFLA_GRE_ERSPAN_VER */
1299 /* IFLA_GRE_ERSPAN_DIR */
1301 /* IFLA_GRE_ERSPAN_HWID */
1306 static int ipgre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1308 struct ip_tunnel
*t
= netdev_priv(dev
);
1309 struct ip_tunnel_parm
*p
= &t
->parms
;
1311 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
1312 nla_put_be16(skb
, IFLA_GRE_IFLAGS
, tnl_flags_to_gre_flags(p
->i_flags
)) ||
1313 nla_put_be16(skb
, IFLA_GRE_OFLAGS
, tnl_flags_to_gre_flags(p
->o_flags
)) ||
1314 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
1315 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
1316 nla_put_in_addr(skb
, IFLA_GRE_LOCAL
, p
->iph
.saddr
) ||
1317 nla_put_in_addr(skb
, IFLA_GRE_REMOTE
, p
->iph
.daddr
) ||
1318 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->iph
.ttl
) ||
1319 nla_put_u8(skb
, IFLA_GRE_TOS
, p
->iph
.tos
) ||
1320 nla_put_u8(skb
, IFLA_GRE_PMTUDISC
,
1321 !!(p
->iph
.frag_off
& htons(IP_DF
))))
1322 goto nla_put_failure
;
1324 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_VER
, t
->erspan_ver
))
1325 goto nla_put_failure
;
1327 if (t
->erspan_ver
== 1) {
1328 if (nla_put_u32(skb
, IFLA_GRE_ERSPAN_INDEX
, t
->index
))
1329 goto nla_put_failure
;
1330 } else if (t
->erspan_ver
== 2) {
1331 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_DIR
, t
->dir
))
1332 goto nla_put_failure
;
1333 if (nla_put_u16(skb
, IFLA_GRE_ERSPAN_HWID
, t
->hwid
))
1334 goto nla_put_failure
;
1343 static const struct nla_policy ipgre_policy
[RPL_IFLA_GRE_MAX
+ 1] = {
1344 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
1345 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
1346 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
1347 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
1348 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
1349 [IFLA_GRE_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1350 [IFLA_GRE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1351 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
1352 [IFLA_GRE_TOS
] = { .type
= NLA_U8
},
1353 [IFLA_GRE_PMTUDISC
] = { .type
= NLA_U8
},
1354 [IFLA_GRE_ERSPAN_INDEX
] = { .type
= NLA_U32
},
1355 [IFLA_GRE_ERSPAN_VER
] = { .type
= NLA_U8
},
1356 [IFLA_GRE_ERSPAN_DIR
] = { .type
= NLA_U8
},
1357 [IFLA_GRE_ERSPAN_HWID
] = { .type
= NLA_U16
},
1360 static struct rtnl_link_ops ipgre_link_ops __read_mostly
= {
1362 .maxtype
= RPL_IFLA_GRE_MAX
,
1363 .policy
= ipgre_policy
,
1364 .priv_size
= sizeof(struct ip_tunnel
),
1365 .setup
= ipgre_tunnel_setup
,
1366 .validate
= ipgre_tunnel_validate
,
1367 .newlink
= ipgre_newlink
,
1368 .dellink
= ip_tunnel_dellink
,
1369 .get_size
= ipgre_get_size
,
1370 .fill_info
= ipgre_fill_info
,
1371 #ifdef HAVE_GET_LINK_NET
1372 .get_link_net
= ip_tunnel_get_link_net
,
1376 static struct rtnl_link_ops ipgre_tap_ops __read_mostly
= {
1377 .kind
= "ovs_gretap",
1378 .maxtype
= RPL_IFLA_GRE_MAX
,
1379 .policy
= ipgre_policy
,
1380 .priv_size
= sizeof(struct ip_tunnel
),
1381 .setup
= ipgre_tap_setup
,
1382 .validate
= ipgre_tap_validate
,
1383 .newlink
= ipgre_newlink
,
1384 .dellink
= ip_tunnel_dellink
,
1385 .get_size
= ipgre_get_size
,
1386 .fill_info
= ipgre_fill_info
,
1387 #ifdef HAVE_GET_LINK_NET
1388 .get_link_net
= ip_tunnel_get_link_net
,
1392 static struct rtnl_link_ops erspan_link_ops __read_mostly
= {
1394 .maxtype
= RPL_IFLA_GRE_MAX
,
1395 .policy
= ipgre_policy
,
1396 .priv_size
= sizeof(struct ip_tunnel
),
1397 .setup
= erspan_setup
,
1398 .validate
= erspan_validate
,
1399 .newlink
= ipgre_newlink
,
1400 .dellink
= ip_tunnel_dellink
,
1401 .get_size
= ipgre_get_size
,
1402 .fill_info
= ipgre_fill_info
,
1403 #ifdef HAVE_GET_LINK_NET
1404 .get_link_net
= ip_tunnel_get_link_net
,
1408 struct net_device
*rpl_gretap_fb_dev_create(struct net
*net
, const char *name
,
1409 u8 name_assign_type
)
1411 struct nlattr
*tb
[IFLA_MAX
+ 1];
1412 struct net_device
*dev
;
1413 LIST_HEAD(list_kill
);
1414 struct ip_tunnel
*t
;
1417 memset(&tb
, 0, sizeof(tb
));
1419 dev
= rtnl_create_link(net
, (char *)name
, name_assign_type
,
1420 &ipgre_tap_ops
, tb
);
1424 t
= netdev_priv(dev
);
1425 t
->collect_md
= true;
1426 /* Configure flow based GRE device. */
1427 err
= ipgre_newlink(net
, dev
, tb
, NULL
);
1430 return ERR_PTR(err
);
1433 /* openvswitch users expect packet sizes to be unrestricted,
1434 * so set the largest MTU we can.
1436 err
= __ip_tunnel_change_mtu(dev
, IP_MAX_MTU
, false);
1442 ip_tunnel_dellink(dev
, &list_kill
);
1443 unregister_netdevice_many(&list_kill
);
1444 return ERR_PTR(err
);
1446 EXPORT_SYMBOL_GPL(rpl_gretap_fb_dev_create
);
1448 static int __net_init
erspan_init_net(struct net
*net
)
1450 return ip_tunnel_init_net(net
, erspan_net_id
,
1451 &erspan_link_ops
, NULL
);
1454 static void __net_exit
erspan_exit_net(struct net
*net
)
1456 struct ip_tunnel_net
*itn
= net_generic(net
, erspan_net_id
);
1458 ip_tunnel_delete_net(itn
, &erspan_link_ops
);
1461 static struct pernet_operations erspan_net_ops
= {
1462 .init
= erspan_init_net
,
1463 .exit
= erspan_exit_net
,
1464 .id
= &erspan_net_id
,
1465 .size
= sizeof(struct ip_tunnel_net
),
1468 static int __net_init
ipgre_tap_init_net(struct net
*net
)
1470 return ip_tunnel_init_net(net
, gre_tap_net_id
, &ipgre_tap_ops
, "gretap0");
1473 static void __net_exit
ipgre_tap_exit_net(struct net
*net
)
1475 struct ip_tunnel_net
*itn
= net_generic(net
, gre_tap_net_id
);
1477 ip_tunnel_delete_net(itn
, &ipgre_tap_ops
);
1480 static struct pernet_operations ipgre_tap_net_ops
= {
1481 .init
= ipgre_tap_init_net
,
1482 .exit
= ipgre_tap_exit_net
,
1483 .id
= &gre_tap_net_id
,
1484 .size
= sizeof(struct ip_tunnel_net
),
1487 static struct net_device
*erspan_fb_dev_create(struct net
*net
,
1489 u8 name_assign_type
)
1491 struct nlattr
*tb
[IFLA_MAX
+ 1];
1492 struct net_device
*dev
;
1493 LIST_HEAD(list_kill
);
1494 struct ip_tunnel
*t
;
1497 memset(&tb
, 0, sizeof(tb
));
1499 dev
= rtnl_create_link(net
, (char *)name
, name_assign_type
,
1500 &erspan_link_ops
, tb
);
1504 t
= netdev_priv(dev
);
1505 t
->collect_md
= true;
1506 /* Configure flow based GRE device. */
1507 err
= ipgre_newlink(net
, dev
, tb
, NULL
);
1510 return ERR_PTR(err
);
1513 /* openvswitch users expect packet sizes to be unrestricted,
1514 * so set the largest MTU we can.
1516 err
= __ip_tunnel_change_mtu(dev
, IP_MAX_MTU
, false);
1522 ip_tunnel_dellink(dev
, &list_kill
);
1523 unregister_netdevice_many(&list_kill
);
1524 return ERR_PTR(err
);
1527 static struct vport_ops ovs_erspan_vport_ops
;
1529 static struct vport
*erspan_tnl_create(const struct vport_parms
*parms
)
1531 struct net
*net
= ovs_dp_get_net(parms
->dp
);
1532 struct net_device
*dev
;
1533 struct vport
*vport
;
1536 vport
= ovs_vport_alloc(0, &ovs_erspan_vport_ops
, parms
);
1541 dev
= erspan_fb_dev_create(net
, parms
->name
, NET_NAME_USER
);
1544 ovs_vport_free(vport
);
1545 return ERR_CAST(dev
);
1548 err
= dev_change_flags(dev
, dev
->flags
| IFF_UP
);
1550 rtnl_delete_link(dev
);
1552 ovs_vport_free(vport
);
1553 return ERR_PTR(err
);
1560 static struct vport
*erspan_create(const struct vport_parms
*parms
)
1562 struct vport
*vport
;
1564 vport
= erspan_tnl_create(parms
);
1568 return ovs_netdev_link(vport
, parms
->name
);
1571 static struct vport_ops ovs_erspan_vport_ops
= {
1572 .type
= OVS_VPORT_TYPE_ERSPAN
,
1573 .create
= erspan_create
,
1574 .send
= __erspan_fb_xmit
,
1575 #ifndef USE_UPSTREAM_TUNNEL
1576 .fill_metadata_dst
= gre_fill_metadata_dst
,
1578 .destroy
= ovs_netdev_tunnel_destroy
,
1581 static struct vport_ops ovs_ipgre_vport_ops
;
1583 static struct vport
*ipgre_tnl_create(const struct vport_parms
*parms
)
1585 struct net
*net
= ovs_dp_get_net(parms
->dp
);
1586 struct net_device
*dev
;
1587 struct vport
*vport
;
1590 vport
= ovs_vport_alloc(0, &ovs_ipgre_vport_ops
, parms
);
1595 dev
= gretap_fb_dev_create(net
, parms
->name
, NET_NAME_USER
);
1598 ovs_vport_free(vport
);
1599 return ERR_CAST(dev
);
1602 err
= dev_change_flags(dev
, dev
->flags
| IFF_UP
);
1604 rtnl_delete_link(dev
);
1606 ovs_vport_free(vport
);
1607 return ERR_PTR(err
);
1614 static struct vport
*ipgre_create(const struct vport_parms
*parms
)
1616 struct vport
*vport
;
1618 vport
= ipgre_tnl_create(parms
);
1622 return ovs_netdev_link(vport
, parms
->name
);
1625 static struct vport_ops ovs_ipgre_vport_ops
= {
1626 .type
= OVS_VPORT_TYPE_GRE
,
1627 .create
= ipgre_create
,
1628 .send
= gre_fb_xmit
,
1629 #ifndef USE_UPSTREAM_TUNNEL
1630 .fill_metadata_dst
= gre_fill_metadata_dst
,
1632 .destroy
= ovs_netdev_tunnel_destroy
,
1635 int rpl_ipgre_init(void)
1639 err
= register_pernet_device(&ipgre_tap_net_ops
);
1641 goto pnet_tap_failed
;
1643 err
= register_pernet_device(&erspan_net_ops
);
1645 goto pnet_erspan_failed
;
1647 err
= register_pernet_device(&ipgre_net_ops
);
1649 goto pnet_ipgre_failed
;
1651 #ifdef HAVE_DEMUX_PARSE_GRE_HEADER
1652 err
= gre_cisco_register(&ipgre_cisco_protocol
);
1654 pr_info("%s: can't add protocol\n", __func__
);
1655 goto add_proto_failed
;
1658 err
= gre_add_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1660 pr_info("%s: can't add protocol\n", __func__
);
1661 goto add_proto_failed
;
1665 pr_info("GRE over IPv4 tunneling driver\n");
1667 ovs_vport_ops_register(&ovs_ipgre_vport_ops
);
1668 ovs_vport_ops_register(&ovs_erspan_vport_ops
);
1672 unregister_pernet_device(&ipgre_net_ops
);
1674 unregister_pernet_device(&erspan_net_ops
);
1676 unregister_pernet_device(&ipgre_tap_net_ops
);
1678 pr_err("Error while initializing GRE %d\n", err
);
1682 void rpl_ipgre_fini(void)
1684 ovs_vport_ops_unregister(&ovs_erspan_vport_ops
);
1685 ovs_vport_ops_unregister(&ovs_ipgre_vport_ops
);
1686 #ifdef HAVE_DEMUX_PARSE_GRE_HEADER
1687 gre_cisco_unregister(&ipgre_cisco_protocol
);
1689 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1691 unregister_pernet_device(&ipgre_net_ops
);
1692 unregister_pernet_device(&erspan_net_ops
);
1693 unregister_pernet_device(&ipgre_tap_net_ops
);