#ifndef USE_UPSTREAM_TUNNEL
#if IS_ENABLED(CONFIG_NET_IPGRE_DEMUX)
+static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
+
+int rpl_gre_add_protocol(const struct gre_protocol *proto, u8 version)
+{
+ if (version >= GREPROTO_MAX)
+ return -EINVAL;
+
+ return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ?
+ 0 : -EBUSY;
+}
+EXPORT_SYMBOL_GPL(rpl_gre_add_protocol);
+
+int rpl_gre_del_protocol(const struct gre_protocol *proto, u8 version)
+{
+ int ret;
+
+ if (version >= GREPROTO_MAX)
+ return -EINVAL;
+
+ ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ?
+ 0 : -EBUSY;
+
+ if (ret)
+ return ret;
+
+ synchronize_rcu();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpl_gre_del_protocol);
+
+static int gre_rcv(struct sk_buff *skb)
+{
+ const struct gre_protocol *proto;
+ u8 ver;
+ int ret;
+
+ if (!pskb_may_pull(skb, 12))
+ goto drop;
+
+ ver = skb->data[1]&0x7f;
+ if (ver >= GREPROTO_MAX)
+ goto drop;
+
+ rcu_read_lock();
+ proto = rcu_dereference(gre_proto[ver]);
+ if (!proto || !proto->handler)
+ goto drop_unlock;
+ ret = proto->handler(skb);
+ rcu_read_unlock();
+ return ret;
+
+drop_unlock:
+ rcu_read_unlock();
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+ const struct gre_protocol *proto;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
+ u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
+
+ if (ver >= GREPROTO_MAX)
+ return;
+
+ rcu_read_lock();
+ proto = rcu_dereference(gre_proto[ver]);
+ if (proto && proto->err_handler)
+ proto->err_handler(skb, info);
+ rcu_read_unlock();
+}
+
+static const struct net_protocol net_gre_protocol = {
+ .handler = gre_rcv,
+ .err_handler = gre_err,
+ .netns_ok = 1,
+};
+
+int rpl_gre_init(void)
+{
+ pr_info("GRE over IPv4 demultiplexor driver\n");
+
+ if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
+ pr_err("can't add protocol\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpl_gre_init);
+
+void rpl_gre_exit(void)
+{
+ inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
+}
+EXPORT_SYMBOL_GPL(rpl_gre_exit);
+
#define ip_gre_calc_hlen rpl_ip_gre_calc_hlen
#define gre_calc_hlen rpl_ip_gre_calc_hlen
static int rpl_ip_gre_calc_hlen(__be16 o_flags)
return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
}
-#endif /* HAVE_DEMUX_PARSE_GRE_HEADER */
-
static struct gre_cisco_protocol __rcu *gre_cisco_proto;
static int gre_cisco_rcv(struct sk_buff *skb)
{
- struct tnl_ptk_info tpi;
struct gre_cisco_protocol *proto;
+ struct tnl_ptk_info tpi;
+ bool csum_err = false;
rcu_read_lock();
proto = rcu_dereference(gre_cisco_proto);
if (!proto)
goto drop;
-#ifdef HAVE_DEMUX_PARSE_GRE_HEADER
- {
- bool csum_err = false;
- if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0)
goto drop;
- }
-#endif
proto->handler(skb, &tpi);
rcu_read_unlock();
return 0;
int rpl_gre_cisco_unregister(struct gre_cisco_protocol *proto)
{
int ret;
-
ret = (cmpxchg((struct gre_cisco_protocol **)&gre_cisco_proto, proto, NULL) == proto) ?
0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(rpl_gre_cisco_unregister);
+#endif /* HAVE_DEMUX_PARSE_GRE_HEADER */
#endif /* !HAVE_GRE_CISCO_REGISTER */
#endif
{
}
+static inline int rpl_gre_init(void)
+{
+ return 0;
+}
+
+static inline void rpl_gre_exit(void)
+{
+}
+
#define gre_fb_xmit dev_queue_xmit
#ifdef CONFIG_INET
void rpl_ip6gre_fini(void);
int rpl_ip6_tunnel_init(void);
void rpl_ip6_tunnel_cleanup(void);
+int rpl_gre_init(void);
+void rpl_gre_exit(void);
#define gretap_fb_dev_create rpl_gretap_fb_dev_create
struct net_device *rpl_gretap_fb_dev_create(struct net *net, const char *name,
#define gre_fb_xmit rpl_gre_fb_xmit
netdev_tx_t rpl_gre_fb_xmit(struct sk_buff *skb);
+
+#define gre_add_protocol rpl_gre_add_protocol
+int rpl_gre_add_protocol(const struct gre_protocol *proto, u8 version);
+#define gre_del_protocol rpl_gre_del_protocol
+int rpl_gre_del_protocol(const struct gre_protocol *proto, u8 version);
#endif /* USE_UPSTREAM_TUNNEL */
#define ipgre_init rpl_ipgre_init
#define ip6gre_fini rpl_ip6gre_fini
#define ip6_tunnel_init rpl_ip6_tunnel_init
#define ip6_tunnel_cleanup rpl_ip6_tunnel_cleanup
+#define gre_init rpl_gre_init
+#define gre_exit rpl_gre_exit
#define gre_fill_metadata_dst ovs_gre_fill_metadata_dst
int ovs_gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
#endif
}
+#ifdef HAVE_DEMUX_PARSE_GRE_HEADER
/* Called with rcu_read_lock and BH disabled. */
static int gre_err(struct sk_buff *skb, u32 info,
const struct tnl_ptk_info *tpi)
{
return PACKET_REJECT;
}
-
+#endif
static struct dst_ops md_dst_ops = {
.family = AF_UNSPEC,
};
}
}
-static struct gre_cisco_protocol ipgre_protocol = {
+#ifdef HAVE_DEMUX_PARSE_GRE_HEADER
+static struct gre_cisco_protocol ipgre_cisco_protocol = {
.handler = gre_rcv,
.err_handler = gre_err,
.priority = 1,
};
+#endif
+
+static int __gre_rcv(struct sk_buff *skb)
+{
+ return gre_rcv(skb, NULL);
+}
+
+void __gre_err(struct sk_buff *skb, u32 info)
+{
+ pr_warn("%s: GRE receive error\n", __func__);
+}
+
+static const struct gre_protocol ipgre_protocol = {
+ .handler = __gre_rcv,
+ .err_handler = __gre_err,
+};
static int __net_init ipgre_init_net(struct net *net)
{
if (err < 0)
goto pnet_ipgre_failed;
- err = gre_cisco_register(&ipgre_protocol);
+#ifdef HAVE_DEMUX_PARSE_GRE_HEADER
+ err = gre_cisco_register(&ipgre_cisco_protocol);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
goto add_proto_failed;
}
+#else
+ err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
+ if (err < 0) {
+ pr_info("%s: can't add protocol\n", __func__);
+ goto add_proto_failed;
+ }
+#endif
pr_info("GRE over IPv4 tunneling driver\n");
{
ovs_vport_ops_unregister(&ovs_erspan_vport_ops);
ovs_vport_ops_unregister(&ovs_ipgre_vport_ops);
- gre_cisco_unregister(&ipgre_protocol);
+#ifdef HAVE_DEMUX_PARSE_GRE_HEADER
+ gre_cisco_unregister(&ipgre_cisco_protocol);
+#else
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
+#endif
unregister_pernet_device(&ipgre_net_ops);
unregister_pernet_device(&erspan_net_ops);
unregister_pernet_device(&ipgre_tap_net_ops);
goto err_lisp;
err = ipgre_init();
if (err)
- goto err_gre;
+ goto err_ipgre;
err = ip6gre_init();
if (err)
goto err_ip6gre;
err = geneve_init_module();
if (err)
goto err_geneve;
-
err = vxlan_init_module();
if (err)
goto err_vxlan;
err = ovs_stt_init_module();
if (err)
goto err_stt;
- return 0;
+ err = gre_init();
+ if (err)
+ goto err_gre;
+ return 0;
+err_gre:
+ ovs_stt_cleanup_module();
err_stt:
vxlan_cleanup_module();
err_vxlan:
ip6gre_fini();
err_ip6gre:
ipgre_fini();
-err_gre:
+err_ipgre:
lisp_cleanup_module();
err_lisp:
kfree(dev_table);
*/
void ovs_vport_exit(void)
{
+ gre_exit();
ovs_stt_cleanup_module();
vxlan_cleanup_module();
geneve_cleanup_module();