#include <linux/kernel.h>
#include <linux/kconfig.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include "vport-netdev.h"
static int gre_tap_net_id __read_mostly;
-static int ipgre_net_id __read_mostly;
static unsigned int erspan_net_id __read_mostly;
static void erspan_build_header(struct sk_buff *skb,
__be32 id, u32 index,
bool truncate, bool is_ipv4);
-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
+static bool ip_gre_loaded = false;
-#define ip_gre_calc_hlen rpl_ip_gre_calc_hlen
-static int ip_gre_calc_hlen(__be16 o_flags)
-{
- int addend = 4;
-
- if (o_flags & TUNNEL_CSUM)
- addend += 4;
- if (o_flags & TUNNEL_KEY)
- addend += 4;
- if (o_flags & TUNNEL_SEQ)
- addend += 4;
- return addend;
-}
-
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 tunnel_id_to_key(__be64 x)
-{
-#ifdef __BIG_ENDIAN
- return (__force __be32)x;
-#else
- return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_err(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi)
-{
- return PACKET_REJECT;
-}
-
-static struct dst_ops md_dst_ops = {
+/* Normally in net/core/dst.c but move it here */
+struct dst_ops md_dst_ops = {
.family = AF_UNSPEC,
};
-#ifndef DST_METADATA
-#define DST_METADATA 0x0080
-#endif
-
-static void rpl__metadata_dst_init(struct metadata_dst *md_dst,
- enum metadata_type type, u8 optslen)
-
-{
- struct dst_entry *dst;
-
- dst = &md_dst->dst;
- dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
- DST_METADATA | DST_NOCOUNT);
-
-#if 0
- /* unused in OVS */
- dst->input = dst_md_discard;
- dst->output = dst_md_discard_out;
+#ifndef ip_gre_calc_hlen
+#define ip_gre_calc_hlen gre_calc_hlen
#endif
- memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
- md_dst->type = type;
-}
-
-static struct metadata_dst *erspan_rpl_metadata_dst_alloc(u8 optslen, enum metadata_type type,
- gfp_t flags)
-{
- struct metadata_dst *md_dst;
-
- md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
- if (!md_dst)
- return NULL;
-
- rpl__metadata_dst_init(md_dst, type, optslen);
-
- return md_dst;
-}
-static inline struct metadata_dst *rpl_tun_rx_dst(int md_size)
-{
- struct metadata_dst *tun_dst;
-
- tun_dst = erspan_rpl_metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
- if (!tun_dst)
- return NULL;
-
- tun_dst->u.tun_info.options_len = 0;
- tun_dst->u.tun_info.mode = 0;
- return tun_dst;
-}
-static inline struct metadata_dst *rpl__ip_tun_set_dst(__be32 saddr,
- __be32 daddr,
- __u8 tos, __u8 ttl,
- __be16 tp_dst,
- __be16 flags,
- __be64 tunnel_id,
- int md_size)
-{
- struct metadata_dst *tun_dst;
-
- tun_dst = rpl_tun_rx_dst(md_size);
- if (!tun_dst)
- return NULL;
-
- ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- saddr, daddr, tos, ttl,
- 0, 0, tp_dst, tunnel_id, flags);
- return tun_dst;
-}
-
-static inline struct metadata_dst *rpl_ip_tun_rx_dst(struct sk_buff *skb,
- __be16 flags,
- __be64 tunnel_id,
- int md_size)
-{
- const struct iphdr *iph = ip_hdr(skb);
-
- return rpl__ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
- 0, flags, tunnel_id, md_size);
-}
static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int gre_hdr_len)
* Use ERSPAN 10-bit session ID as key.
*/
tpi->key = cpu_to_be32(get_session_id(ershdr));
- /* OVS doesn't set tunnel key - so don't bother with it */
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags,
- iph->saddr, iph->daddr, 0);
+ iph->saddr, iph->daddr, tpi->key);
if (tunnel) {
len = gre_hdr_len + erspan_hdr_len(ver);
if (tpi->proto == htons(ETH_P_TEB))
itn = net_generic(net, gre_tap_net_id);
+ else if (tpi->proto == htons(ETH_P_ERSPAN) ||
+ tpi->proto == htons(ETH_P_ERSPAN2))
+ itn = net_generic(net, erspan_net_id);
else
- itn = net_generic(net, ipgre_net_id);
+ return PACKET_RCVD;
res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
- if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
- /* ipgre tunnels in collect metadata mode should receive
- * also ETH_P_TEB traffic.
- */
- itn = net_generic(net, ipgre_net_id);
- res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
- }
+
return res;
}
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
-#ifndef HAVE_DEMUX_PARSE_GRE_HEADER
static int gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *unused_tpi)
{
struct tnl_ptk_info tpi;
kfree_skb(skb);
return 0;
}
-#else
-static int gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *__tpi)
-{
- struct tnl_ptk_info tpi = *__tpi;
-
- if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
- tpi.proto == htons(ETH_P_ERSPAN2))) {
- if (erspan_rcv(skb, &tpi, 0) == PACKET_RCVD)
- return 0;
- goto drop;
- }
-
- if (ipgre_rcv(skb, &tpi, 0) == PACKET_RCVD)
- return 0;
-drop:
-
- kfree_skb(skb);
- return 0;
-}
-#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+#include "gso.h"
/* gre_handle_offloads() has different return type on older kernsl. */
static void gre_nop_fix(struct sk_buff *skb) { }
skb->len - gre_offset, 0));
}
-static bool is_gre_gso(struct sk_buff *skb)
-{
- return skb_is_gso(skb);
-}
-
#define gre_handle_offloads rpl_gre_handle_offloads
static int rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
return ovs_iptunnel_handle_offloads(skb, type, fix_segment);
}
#else
+static int gre_handle_offloads(struct sk_buff *skb, bool csum)
+{
+ return iptunnel_handle_offloads(skb, csum,
+ csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+}
+#endif
static bool is_gre_gso(struct sk_buff *skb)
{
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM);
}
-static int rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
-{
- if (skb_is_gso(skb) && skb_is_encapsulated(skb))
- return -ENOSYS;
-
-#undef gre_handle_offloads
- return gre_handle_offloads(skb, gre_csum);
-}
-#endif
-
static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
__be16 proto, __be32 key, __be32 seq)
{
goto err_free_rt;
}
- skb = vlan_hwaccel_push_inside(skb);
- if (unlikely(!skb)) {
- err = -ENOMEM;
- goto err_free_rt;
+ if (skb_vlan_tag_present(skb)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_free_rt;
+ }
}
/* Push Tunnel header. */
- err = rpl_gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
+ err = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
if (err)
goto err_free_rt;
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
- tunnel_id_to_key(tun_info->key.tun_id), 0);
+ tunnel_id_to_key32(tun_info->key.tun_id), 0);
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
goto err_free_skb;
key = &tun_info->key;
+ if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+ goto err_free_rt;
md = ip_tunnel_info_opts(tun_info);
if (!md)
goto err_free_rt;
static void __gre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
- int t_hlen;
tunnel = netdev_priv(dev);
tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
- t_hlen = tunnel->hlen + sizeof(struct iphdr);
-
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
}
}
-static struct gre_cisco_protocol ipgre_protocol = {
- .handler = gre_rcv,
- .err_handler = gre_err,
- .priority = 1,
-};
-
-static int __net_init ipgre_init_net(struct net *net)
+static int __gre_rcv(struct sk_buff *skb)
{
- return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
+ return gre_rcv(skb, NULL);
}
-static void __net_exit ipgre_exit_net(struct net *net)
+void __gre_err(struct sk_buff *skb, u32 info)
{
- struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
-
- ip_tunnel_delete_net(itn, &ipgre_link_ops);
+ pr_warn("%s: GRE receive error\n", __func__);
}
-static struct pernet_operations ipgre_net_ops = {
- .init = ipgre_init_net,
- .exit = ipgre_exit_net,
- .id = &ipgre_net_id,
- .size = sizeof(struct ip_tunnel_net),
+static const struct gre_protocol ipgre_protocol = {
+ .handler = __gre_rcv,
+ .err_handler = __gre_err,
};
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+#else
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
+#endif
{
__be16 flags;
return 0;
}
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+#else
static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
+#endif
{
__be32 daddr;
}
out:
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ return ipgre_tunnel_validate(tb, data, NULL);
+#else
return ipgre_tunnel_validate(tb, data);
+#endif
}
enum {
#define RPL_IFLA_GRE_MAX (IFLA_GRE_ERSPAN_HWID + 1)
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+#else
static int erspan_validate(struct nlattr *tb[], struct nlattr *data[])
+#endif
{
__be16 flags = 0;
int ret;
if (!data)
return 0;
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ ret = ipgre_tap_validate(tb, data, NULL);
+#else
ret = ipgre_tap_validate(tb, data);
+#endif
if (ret)
return ret;
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
truncate, true);
- else
+ else if (tunnel->erspan_ver == 2)
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
tunnel->dir, tunnel->hwid,
truncate, true);
+ else
+ goto free_skb;
tunnel->parms.o_flags &= ~TUNNEL_KEY;
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
static int erspan_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int t_hlen;
tunnel->tun_hlen = 8;
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
erspan_hdr_len(tunnel->erspan_ver);
- t_hlen = tunnel->hlen + sizeof(struct iphdr);
- dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
- dev->mtu = ETH_DATA_LEN - t_hlen - 4;
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
return ip_tunnel_init(dev);
}
-static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned int len)
-{
- struct ip_tunnel *t = netdev_priv(dev);
- struct iphdr *iph;
- struct gre_base_hdr *greh;
-
- iph = (struct iphdr *)__skb_push(skb, t->hlen + sizeof(*iph));
- greh = (struct gre_base_hdr *)(iph+1);
- greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
- greh->protocol = htons(type);
-
- memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
-
- /* Set the source hardware address. */
- if (saddr)
- memcpy(&iph->saddr, saddr, 4);
- if (daddr)
- memcpy(&iph->daddr, daddr, 4);
- if (iph->daddr)
- return t->hlen + sizeof(*iph);
-
- return -(t->hlen + sizeof(*iph));
-}
-
-static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
-{
- const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
- memcpy(haddr, &iph->saddr, 4);
- return 4;
-}
-
-static const struct header_ops ipgre_header_ops = {
- .create = ipgre_header,
- .parse = ipgre_header_parse,
-};
-
-static int ipgre_tunnel_init(struct net_device *dev)
-{
- struct ip_tunnel *tunnel = netdev_priv(dev);
- struct iphdr *iph = &tunnel->parms.iph;
-
- __gre_tunnel_init(dev);
-
- memcpy(dev->dev_addr, &iph->saddr, 4);
- memcpy(dev->broadcast, &iph->daddr, 4);
-
- dev->flags = IFF_NOARP;
- netif_keep_dst(dev);
- dev->addr_len = 4;
-
- if (!tunnel->collect_md) {
- dev->header_ops = &ipgre_header_ops;
- }
-
- return ip_tunnel_init(dev);
-}
-
-static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct ip_tunnel *tunnel = netdev_priv(dev);
- const struct iphdr *tnl_params;
-
- if (tunnel->collect_md) {
- gre_fb_xmit(skb);
- return NETDEV_TX_OK;
- }
-
- if (dev->header_ops) {
- /* Need space for new headers */
- if (skb_cow_head(skb, dev->needed_headroom -
- (tunnel->hlen + sizeof(struct iphdr))))
- goto free_skb;
-
- tnl_params = (const struct iphdr *)skb->data;
-
- /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
- * to gre header.
- */
- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
- skb_reset_mac_header(skb);
- } else {
- if (skb_cow_head(skb, dev->needed_headroom))
- goto free_skb;
-
- tnl_params = &tunnel->parms.iph;
- }
-
- if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
- goto free_skb;
-
- __gre_xmit(skb, dev, tnl_params, skb->protocol);
- return NETDEV_TX_OK;
-
-free_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops ipgre_netdev_ops = {
- .ndo_init = ipgre_tunnel_init,
- .ndo_uninit = rpl_ip_tunnel_uninit,
- .ndo_start_xmit = ipgre_xmit,
- .ndo_change_mtu = ip_tunnel_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
-#ifdef HAVE_GET_LINK_NET
- .ndo_get_iflink = ip_tunnel_get_iflink,
-#endif
-};
-
static const struct net_device_ops gre_tap_netdev_ops = {
.ndo_init = gre_tap_init,
.ndo_uninit = rpl_ip_tunnel_uninit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef HAVE_RHEL7_MAX_MTU
+ .ndo_size = sizeof(struct net_device_ops),
.extended.ndo_change_mtu = ip_tunnel_change_mtu,
#else
.ndo_change_mtu = ip_tunnel_change_mtu,
#endif
- .ndo_get_stats64 = rpl_ip_tunnel_get_stats64,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
#ifdef HAVE_NDO_GET_IFLINK
.ndo_get_iflink = rpl_ip_tunnel_get_iflink,
#endif
.ndo_start_xmit = erspan_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef HAVE_RHEL7_MAX_MTU
+ .ndo_size = sizeof(struct net_device_ops),
+ .extended.ndo_change_mtu = ip_tunnel_change_mtu,
+#else
.ndo_change_mtu = ip_tunnel_change_mtu,
- .ndo_get_stats64 = rpl_ip_tunnel_get_stats64,
+#endif
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
#ifdef HAVE_NDO_GET_IFLINK
.ndo_get_iflink = rpl_ip_tunnel_get_iflink,
#endif
#endif
};
-static void ipgre_tunnel_setup(struct net_device *dev)
-{
- dev->netdev_ops = &ipgre_netdev_ops;
- dev->type = ARPHRD_IPGRE;
- ip_tunnel_setup(dev, ipgre_net_id);
-}
-
static void ipgre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
static void erspan_setup(struct net_device *dev)
{
+ struct ip_tunnel *t = netdev_priv(dev);
+
eth_hw_addr_random(dev);
ether_setup(dev);
+#ifdef HAVE_NET_DEVICE_MAX_MTU
+ dev->max_mtu = 0;
+#endif
dev->netdev_ops = &erspan_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip_tunnel_setup(dev, erspan_net_id);
+ t->erspan_ver = 1;
}
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static int ipgre_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+#else
static int ipgre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
+#endif
{
struct ip_tunnel_parm p;
int err;
[IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
};
-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
- .kind = "gre",
- .maxtype = RPL_IFLA_GRE_MAX,
- .policy = ipgre_policy,
- .priv_size = sizeof(struct ip_tunnel),
- .setup = ipgre_tunnel_setup,
- .validate = ipgre_tunnel_validate,
- .newlink = ipgre_newlink,
- .dellink = ip_tunnel_dellink,
- .get_size = ipgre_get_size,
- .fill_info = ipgre_fill_info,
-#ifdef HAVE_GET_LINK_NET
- .get_link_net = ip_tunnel_get_link_net,
-#endif
-};
-
static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
.kind = "ovs_gretap",
.maxtype = RPL_IFLA_GRE_MAX,
t = netdev_priv(dev);
t->collect_md = true;
/* Configure flow based GRE device. */
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ err = ipgre_newlink(net, dev, tb, NULL, NULL);
+#else
err = ipgre_newlink(net, dev, tb, NULL);
+#endif
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
static int __net_init ipgre_tap_init_net(struct net *net)
{
- return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
+ return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "ovs-gretap0");
}
static void __net_exit ipgre_tap_exit_net(struct net *net)
t = netdev_priv(dev);
t->collect_md = true;
/* Configure flow based GRE device. */
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ err = ipgre_newlink(net, dev, tb, NULL, NULL);
+#else
err = ipgre_newlink(net, dev, tb, NULL);
+#endif
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
return ERR_CAST(dev);
}
- err = dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
return ovs_netdev_link(vport, parms->name);
}
-#ifndef OVS_VPORT_TYPE_ERSPAN
-/* Until integration is done... */
-#define OVS_VPORT_TYPE_ERSPAN 107 /* ERSPAN tunnel. */
-#endif
static struct vport_ops ovs_erspan_vport_ops = {
.type = OVS_VPORT_TYPE_ERSPAN,
.create = erspan_create,
return ERR_CAST(dev);
}
- err = dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
int err;
err = register_pernet_device(&ipgre_tap_net_ops);
- if (err < 0)
- goto pnet_tap_failed;
+ if (err < 0) {
+ if (err == -EEXIST)
+ goto ip_gre_loaded;
+ else
+ goto pnet_tap_failed;
+ }
err = register_pernet_device(&erspan_net_ops);
- if (err < 0)
- goto pnet_erspan_failed;
-
- err = register_pernet_device(&ipgre_net_ops);
- if (err < 0)
- goto pnet_ipgre_failed;
+ if (err < 0) {
+ if (err == -EEXIST)
+ goto ip_gre_loaded;
+ else
+ goto pnet_erspan_failed;
+ }
- err = gre_cisco_register(&ipgre_protocol);
+ err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
- goto add_proto_failed;
+ if (err == -EBUSY) {
+ goto ip_gre_loaded;
+ } else {
+ goto add_proto_failed;
+ }
}
pr_info("GRE over IPv4 tunneling driver\n");
-
+ ovs_vport_ops_register(&ovs_ipgre_vport_ops);
+ ovs_vport_ops_register(&ovs_erspan_vport_ops);
+ return 0;
+
+ip_gre_loaded:
+ /* Since GRE only allows single receiver to be registerd,
+ * we skip here so only gre transmit works, see:
+ *
+ * commit 9f57c67c379d88a10e8ad676426fee5ae7341b14
+ * Author: Pravin B Shelar <pshelar@nicira.com>
+ * Date: Fri Aug 7 23:51:52 2015 -0700
+ * gre: Remove support for sharing GRE protocol hook
+ *
+ * OVS GRE receive part is disabled.
+ */
+ pr_info("GRE TX only over IPv4 tunneling driver\n");
+ ip_gre_loaded = true;
ovs_vport_ops_register(&ovs_ipgre_vport_ops);
ovs_vport_ops_register(&ovs_erspan_vport_ops);
return 0;
add_proto_failed:
- unregister_pernet_device(&ipgre_net_ops);
-pnet_ipgre_failed:
unregister_pernet_device(&erspan_net_ops);
pnet_erspan_failed:
unregister_pernet_device(&ipgre_tap_net_ops);
{
ovs_vport_ops_unregister(&ovs_erspan_vport_ops);
ovs_vport_ops_unregister(&ovs_ipgre_vport_ops);
- gre_cisco_unregister(&ipgre_protocol);
- unregister_pernet_device(&ipgre_net_ops);
- unregister_pernet_device(&erspan_net_ops);
- unregister_pernet_device(&ipgre_tap_net_ops);
+
+ if (!ip_gre_loaded) {
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
+ unregister_pernet_device(&erspan_net_ops);
+ unregister_pernet_device(&ipgre_tap_net_ops);
+ }
}
#endif