#define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
static unsigned int ip6gre_net_id __read_mostly;
+static bool ip6_gre_loaded = false;
struct ip6gre_net {
struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
struct ip6_tnl __rcu *collect_md_tun;
+ struct ip6_tnl __rcu *collect_md_tun_erspan;
struct net_device *fb_tunnel_dev;
};
static void ip6gre_tunnel_setup(struct net_device *dev);
static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
+static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
#define gre_calc_hlen rpl_ip_gre_calc_hlen
static int rpl_ip_gre_calc_hlen(__be16 o_flags)
if (cand)
return cand;
- t = rcu_dereference(ign->collect_md_tun);
+ if (gre_proto == htons(ETH_P_ERSPAN) ||
+ gre_proto == htons(ETH_P_ERSPAN2))
+ t = rcu_dereference(ign->collect_md_tun_erspan);
+ else
+ t = rcu_dereference(ign->collect_md_tun);
+
if (t && t->dev->flags & IFF_UP)
return t;
return &ign->tunnels[prio][h];
}
+static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ign->collect_md_tun, t);
+}
+
+static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ign->collect_md_tun_erspan, t);
+}
+
+static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ign->collect_md_tun, NULL);
+}
+
+static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
+ struct ip6_tnl *t)
+{
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
+}
+
static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
const struct ip6_tnl *t)
{
{
struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
- if (t->parms.collect_md)
- rcu_assign_pointer(ign->collect_md_tun, t);
-
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
}
struct ip6_tnl __rcu **tp;
struct ip6_tnl *iter;
- if (t->parms.collect_md)
- rcu_assign_pointer(ign->collect_md_tun, NULL);
-
for (tp = ip6gre_bucket(ign, t);
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
return NULL;
}
+static void ip6erspan_tunnel_uninit(struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
+
+ ip6erspan_tunnel_unlink_md(ign, t);
+ ip6gre_tunnel_unlink(ign, t);
+ dst_cache_reset(&t->dst_cache);
+ dev_put(dev);
+}
+
static void ip6gre_tunnel_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
+ ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
#else
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
{
- return iptunnel_handle_offloads(skb,
+ return iptunnel_handle_offloads(skb, csum,
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
-
+}
#endif
static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
.ndo_uninit = ip6gre_tunnel_uninit,
.ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_do_ioctl = ip6gre_tunnel_ioctl,
+#ifdef HAVE_RHEL7_MAX_MTU
+ .ndo_size = sizeof(struct net_device_ops),
+ .extended.ndo_change_mtu = ip6_tnl_change_mtu,
+#else
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = rpl_ip_tunnel_get_stats64,
+#endif
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
#ifdef HAVE_NDO_GET_IFLINK
.ndo_get_iflink = ip6_tnl_get_iflink,
#endif
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};
-#ifdef HAVE_IP6GRE_EXTACK
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
static int rpl_ip6gre_tunnel_validate(struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
}
#define ip6gre_tunnel_validate rpl_ip6gre_tunnel_validate
-#ifdef HAVE_IP6GRE_EXTACK
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
static int rpl_ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
#else
}
out:
-#ifdef HAVE_IP6GRE_EXTACK
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
return ip6gre_tunnel_validate(tb, data, extack);
#else
return ip6gre_tunnel_validate(tb, data);
}
#define ip6gre_tap_validate rpl_ip6gre_tap_validate
-#ifdef HAVE_IP6GRE_EXTACK
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
static int rpl_ip6erspan_tap_validate(struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
if (!data)
return 0;
-#ifdef HAVE_IP6GRE_EXTACK
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
ret = ip6gre_tap_validate(tb, data, extack);
#else
ret = ip6gre_tap_validate(tb, data);
.ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef HAVE_RHEL7_MAX_MTU
+ .ndo_size = sizeof(struct net_device_ops),
+ .extended.ndo_change_mtu = ip6_tnl_change_mtu,
+#else
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = rpl_ip_tunnel_get_stats64,
+#endif
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
#ifdef HAVE_NDO_GET_IFLINK
.ndo_get_iflink = ip6_tnl_get_iflink,
#endif
};
+static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
+{
+ int t_hlen;
+
+ tunnel->tun_hlen = 8;
+ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+ erspan_hdr_len(tunnel->parms.erspan_ver);
+
+ t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+ tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ return t_hlen;
+}
+
static int ip6erspan_tap_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
return ret;
}
- tunnel->tun_hlen = 8;
- tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
- erspan_hdr_len(tunnel->parms.erspan_ver);
- t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
-
- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ t_hlen = ip6erspan_calc_hlen(tunnel);
dev->mtu = ETH_DATA_LEN - t_hlen;
if (dev->type == ARPHRD_ETHER)
dev->mtu -= ETH_HLEN;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
tunnel = netdev_priv(dev);
- ip6gre_tnl_link_config(tunnel, 1);
+ ip6erspan_tnl_link_config(tunnel, 1);
return 0;
}
static const struct net_device_ops ip6erspan_netdev_ops = {
.ndo_init = ip6erspan_tap_init,
- .ndo_uninit = ip6gre_tunnel_uninit,
+ .ndo_uninit = ip6erspan_tunnel_uninit,
.ndo_start_xmit = ip6erspan_tunnel_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef HAVE_RHEL7_MAX_MTU
+ .ndo_size = sizeof(struct net_device_ops),
+ .extended.ndo_change_mtu = ip6_tnl_change_mtu,
+#else
.ndo_change_mtu = ip6_tnl_change_mtu,
+#endif
.ndo_get_stats64 = ip_tunnel_get_stats64,
#ifdef HAVE_NDO_GET_IFLINK
.ndo_get_iflink = ip6_tnl_get_iflink,
return ret;
}
-#ifdef HAVE_IP6GRE_EXTACK
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
static int rpl_ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
#endif
{
struct ip6_tnl *nt;
- struct net *net = dev_net(dev);
- struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
struct ip_tunnel_encap ipencap;
int err;
return err;
}
- ip6gre_netlink_parms(data, &nt->parms);
-
- if (nt->parms.collect_md) {
- if (rtnl_dereference(ign->collect_md_tun))
- return -EEXIST;
- } else {
- if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
- return -EEXIST;
- }
-
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
}
#define ip6gre_newlink_common rpl_ip6gre_newlink_common
-#ifdef HAVE_IP6GRE_EXTACK
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
static int rpl_ip6gre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
struct nlattr *tb[], struct nlattr *data[])
#endif
{
-
-#ifdef HAVE_IP6GRE_EXTACK
- int err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
-#else
- int err = ip6gre_newlink_common(src_net, dev, tb, data);
-#endif
struct ip6_tnl *nt = netdev_priv(dev);
struct net *net = dev_net(dev);
+ struct ip6gre_net *ign;
+ int err;
+
+ ip6gre_netlink_parms(data, &nt->parms);
+ ign = net_generic(net, ip6gre_net_id);
+
+ if (nt->parms.collect_md) {
+ if (rtnl_dereference(ign->collect_md_tun))
+ return -EEXIST;
+ } else {
+ if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+ return -EEXIST;
+ }
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
+#else
+ err = ip6gre_newlink_common(src_net, dev, tb, data);
+#endif
if (!err) {
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+ ip6gre_tunnel_link_md(ign, nt);
ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
}
return err;
#define ip6gre_newlink rpl_ip6gre_newlink
-#ifdef HAVE_IP6GRE_EXTACK
-static int rpl_ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static struct ip6_tnl *
+rpl_ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[], struct __ip6_tnl_parm *p_p,
+ struct netlink_ext_ack *extack)
#else
-static int rpl_ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[])
+static struct ip6_tnl *
+rpl_ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[], struct __ip6_tnl_parm *p_p)
#endif
{
struct ip6_tnl *t, *nt = netdev_priv(dev);
struct net *net = nt->net;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
- struct __ip6_tnl_parm p;
struct ip_tunnel_encap ipencap;
if (dev == ign->fb_tunnel_dev)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (ip6gre_netlink_encap_parms(data, &ipencap)) {
int err = ip6_tnl_encap_setup(nt, &ipencap);
if (err < 0)
- return err;
+ return ERR_PTR(err);
}
- ip6gre_netlink_parms(data, &p);
+ ip6gre_netlink_parms(data, p_p);
- t = ip6gre_tunnel_locate(net, &p, 0);
+ t = ip6gre_tunnel_locate(net, p_p, 0);
if (t) {
if (t->dev != dev)
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
} else {
t = nt;
}
+ return t;
+}
+#define ip6gre_changelink_common rpl_ip6gre_changelink_common
+
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static int rpl_ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+#else
+static int rpl_ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+#endif
+{
+ struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
+ struct __ip6_tnl_parm p;
+ struct ip6_tnl *t;
+
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ t = ip6gre_changelink_common(dev, tb, data, &p, extack);
+#else
+ t = ip6gre_changelink_common(dev, tb, data, &p);
+#endif
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+ ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+ ip6gre_tunnel_link_md(ign, t);
ip6gre_tunnel_link(ign, t);
return 0;
}
netif_keep_dst(dev);
}
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static int rpl_ip6erspan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+#else
+static int rpl_ip6erspan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+#endif
+{
+ struct ip6_tnl *nt = netdev_priv(dev);
+ struct net *net = dev_net(dev);
+ struct ip6gre_net *ign;
+ int err;
+
+ ip6gre_netlink_parms(data, &nt->parms);
+ ign = net_generic(net, ip6gre_net_id);
+
+ if (nt->parms.collect_md) {
+ if (rtnl_dereference(ign->collect_md_tun_erspan))
+ return -EEXIST;
+ } else {
+ if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+ return -EEXIST;
+ }
+
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
+#else
+ err = ip6gre_newlink_common(src_net, dev, tb, data);
+#endif
+ if (!err) {
+ ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
+ ip6erspan_tunnel_link_md(ign, nt);
+ ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
+ }
+ return err;
+}
+#define ip6erspan_newlink rpl_ip6erspan_newlink
+
+static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+{
+ ip6gre_tnl_link_config_common(t);
+ ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
+}
+
+static int ip6erspan_tnl_change(struct ip6_tnl *t,
+ const struct __ip6_tnl_parm *p, int set_mtu)
+{
+ ip6gre_tnl_copy_tnl_parm(t, p);
+ ip6erspan_tnl_link_config(t, set_mtu);
+ return 0;
+}
+
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+static int rpl_ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+#else
+static int rpl_ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+#endif
+{
+ struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
+ struct __ip6_tnl_parm p;
+ struct ip6_tnl *t;
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ t = ip6gre_changelink_common(dev, tb, data, &p, extack);
+#else
+ t = ip6gre_changelink_common(dev, tb, data, &p);
+#endif
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+ ip6gre_tunnel_unlink_md(ign, t);
+ ip6gre_tunnel_unlink(ign, t);
+ ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
+ ip6erspan_tunnel_link_md(ign, t);
+ ip6gre_tunnel_link(ign, t);
+ return 0;
+}
+#define ip6erspan_changelink rpl_ip6erspan_changelink
+
static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
.kind = "ip6gre",
.maxtype = RPL_IFLA_GRE_MAX,
.priv_size = sizeof(struct ip6_tnl),
.setup = ip6erspan_tap_setup,
.validate = ip6erspan_tap_validate,
- .newlink = ip6gre_newlink,
- .changelink = ip6gre_changelink,
+ .newlink = ip6erspan_newlink,
+ .changelink = ip6erspan_changelink,
.dellink = ip6gre_dellink,
.get_size = ip6gre_get_size,
.fill_info = ip6gre_fill_info,
t = netdev_priv(dev);
t->parms.collect_md = true;
- err = ip6gre_newlink(net, dev, tb, NULL);
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ err = ip6erspan_newlink(net, dev, tb, NULL, NULL);
+#else
+ err = ip6erspan_newlink(net, dev, tb, NULL);
+#endif
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
t = netdev_priv(dev);
t->parms.collect_md = true;
+#ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
+ err = ip6gre_newlink(net, dev, tb, NULL, NULL);
+#else
err = ip6gre_newlink(net, dev, tb, NULL);
+#endif
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
int err;
err = register_pernet_device(&ip6gre_net_ops);
- if (err < 0)
- return err;
+ if (err < 0) {
+ if (err == -EEXIST)
+ goto ip6_gre_loaded;
+ else
+ goto out;
+ }
err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
- goto add_proto_failed;
+ unregister_pernet_device(&ip6gre_net_ops);
+ goto ip6_gre_loaded;
}
pr_info("GRE over IPv6 tunneling driver\n");
ovs_vport_ops_register(&ovs_ip6gre_vport_ops);
ovs_vport_ops_register(&ovs_erspan6_vport_ops);
- return 0;
-out:
return err;
-add_proto_failed:
- unregister_pernet_device(&ip6gre_net_ops);
- goto out;
+ip6_gre_loaded:
+ /* Since IPv6 GRE only allows single receiver to be registerd,
+ * we skip here so only transmit works, see:
+ *
+ * commit f9242b6b28d61295f2bf7e8adfb1060b382e5381
+ * Author: David S. Miller <davem@davemloft.net>
+ * Date: Tue Jun 19 18:56:21 2012 -0700
+ *
+ * inet: Sanitize inet{,6} protocol demux.
+ *
+ * OVS GRE receive part is disabled.
+ */
+ pr_info("GRE TX only over IPv6 tunneling driver\n");
+ ip6_gre_loaded = true;
+ ovs_vport_ops_register(&ovs_ip6gre_vport_ops);
+ ovs_vport_ops_register(&ovs_erspan6_vport_ops);
+out:
+ return err;
}
void rpl_ip6gre_fini(void)
{
ovs_vport_ops_unregister(&ovs_erspan6_vport_ops);
ovs_vport_ops_unregister(&ovs_ip6gre_vport_ops);
- inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
- unregister_pernet_device(&ip6gre_net_ops);
+ if (!ip6_gre_loaded) {
+ inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+ unregister_pernet_device(&ip6gre_net_ops);
+ }
}
#endif /* USE_UPSTREAM_TUNNEL */