2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #include <linux/kconfig.h>
20 #if IS_ENABLED(CONFIG_NET_IPGRE_DEMUX)
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/skbuff.h>
26 #include <linux/if_tunnel.h>
27 #include <linux/if_vlan.h>
29 #include <linux/in_route.h>
30 #include <linux/inetdevice.h>
31 #include <linux/jhash.h>
32 #include <linux/list.h>
33 #include <linux/kernel.h>
34 #include <linux/workqueue.h>
35 #include <linux/rculist.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include <net/route.h>
43 #include <net/ip_tunnels.h>
45 #include <net/protocol.h>
50 /* Returns the least-significant 32 bits of a __be64. */
51 static __be32
be64_get_low32(__be64 x
)
54 return (__force __be32
)x
;
56 return (__force __be32
)((__force u64
)x
>> 32);
60 static __be16
filter_tnl_flags(__be16 flags
)
62 return flags
& (TUNNEL_CSUM
| TUNNEL_KEY
);
65 static struct sk_buff
*__build_header(struct sk_buff
*skb
,
67 __be32 seq
, __be16 gre64_flag
)
69 const struct ovs_key_ipv4_tunnel
*tun_key
;
70 struct tnl_ptk_info tpi
;
72 tun_key
= &OVS_CB(skb
)->egress_tun_info
->tunnel
;
73 skb
= gre_handle_offloads(skb
, !!(tun_key
->tun_flags
& TUNNEL_CSUM
));
77 tpi
.flags
= filter_tnl_flags(tun_key
->tun_flags
) | gre64_flag
;
79 tpi
.proto
= htons(ETH_P_TEB
);
80 tpi
.key
= be64_get_low32(tun_key
->tun_id
);
82 gre_build_header(skb
, &tpi
, tunnel_hlen
);
87 static __be64
key_to_tunnel_id(__be32 key
, __be32 seq
)
90 return (__force __be64
)((__force u64
)seq
<< 32 | (__force u32
)key
);
92 return (__force __be64
)((__force u64
)key
<< 32 | (__force u32
)seq
);
96 /* Called with rcu_read_lock and BH disabled. */
97 static int gre_rcv(struct sk_buff
*skb
,
98 const struct tnl_ptk_info
*tpi
)
100 struct ovs_tunnel_info tun_info
;
101 struct ovs_net
*ovs_net
;
105 ovs_net
= net_generic(dev_net(skb
->dev
), ovs_net_id
);
106 if ((tpi
->flags
& TUNNEL_KEY
) && (tpi
->flags
& TUNNEL_SEQ
))
107 vport
= rcu_dereference(ovs_net
->vport_net
.gre64_vport
);
109 vport
= rcu_dereference(ovs_net
->vport_net
.gre_vport
);
110 if (unlikely(!vport
))
111 return PACKET_REJECT
;
113 key
= key_to_tunnel_id(tpi
->key
, tpi
->seq
);
114 ovs_flow_tun_info_init(&tun_info
, ip_hdr(skb
), key
,
115 filter_tnl_flags(tpi
->flags
), NULL
, 0);
117 ovs_vport_receive(vport
, skb
, &tun_info
);
121 /* Called with rcu_read_lock and BH disabled. */
122 static int gre_err(struct sk_buff
*skb
, u32 info
,
123 const struct tnl_ptk_info
*tpi
)
125 struct ovs_net
*ovs_net
;
128 ovs_net
= net_generic(dev_net(skb
->dev
), ovs_net_id
);
129 if ((tpi
->flags
& TUNNEL_KEY
) && (tpi
->flags
& TUNNEL_SEQ
))
130 vport
= rcu_dereference(ovs_net
->vport_net
.gre64_vport
);
132 vport
= rcu_dereference(ovs_net
->vport_net
.gre_vport
);
134 if (unlikely(!vport
))
135 return PACKET_REJECT
;
140 static int __send(struct vport
*vport
, struct sk_buff
*skb
,
142 __be32 seq
, __be16 gre64_flag
)
144 struct ovs_key_ipv4_tunnel
*tun_key
;
152 tun_key
= &OVS_CB(skb
)->egress_tun_info
->tunnel
;
153 saddr
= tun_key
->ipv4_src
;
154 rt
= find_route(ovs_dp_get_net(vport
->dp
),
155 &saddr
, tun_key
->ipv4_dst
,
156 IPPROTO_GRE
, tun_key
->ipv4_tos
,
163 min_headroom
= LL_RESERVED_SPACE(rt_dst(rt
).dev
) + rt_dst(rt
).header_len
164 + tunnel_hlen
+ sizeof(struct iphdr
)
165 + (vlan_tx_tag_present(skb
) ? VLAN_HLEN
: 0);
167 if (skb_headroom(skb
) < min_headroom
|| skb_header_cloned(skb
)) {
168 int head_delta
= SKB_DATA_ALIGN(min_headroom
-
171 err
= pskb_expand_head(skb
, max_t(int, head_delta
, 0),
177 if (vlan_tx_tag_present(skb
)) {
178 if (unlikely(!__vlan_put_tag(skb
,
180 vlan_tx_tag_get(skb
)))) {
184 vlan_set_tci(skb
, 0);
187 /* Push Tunnel header. */
188 skb
= __build_header(skb
, tunnel_hlen
, seq
, gre64_flag
);
189 if (unlikely(!skb
)) {
194 df
= tun_key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
197 return iptunnel_xmit(rt
, skb
, saddr
,
198 tun_key
->ipv4_dst
, IPPROTO_GRE
,
200 tun_key
->ipv4_ttl
, df
, false);
207 static struct gre_cisco_protocol gre_protocol
= {
209 .err_handler
= gre_err
,
213 static int gre_ports
;
214 static int gre_init(void)
222 err
= gre_cisco_register(&gre_protocol
);
224 pr_warn("cannot register gre protocol handler\n");
229 static void gre_exit(void)
235 gre_cisco_unregister(&gre_protocol
);
238 static const char *gre_get_name(const struct vport
*vport
)
240 return vport_priv(vport
);
243 static struct vport
*gre_create(const struct vport_parms
*parms
)
245 struct net
*net
= ovs_dp_get_net(parms
->dp
);
246 struct ovs_net
*ovs_net
;
254 ovs_net
= net_generic(net
, ovs_net_id
);
255 if (ovsl_dereference(ovs_net
->vport_net
.gre_vport
)) {
256 vport
= ERR_PTR(-EEXIST
);
260 vport
= ovs_vport_alloc(IFNAMSIZ
, &ovs_gre_vport_ops
, parms
);
264 strncpy(vport_priv(vport
), parms
->name
, IFNAMSIZ
);
265 rcu_assign_pointer(ovs_net
->vport_net
.gre_vport
, vport
);
273 static void gre_tnl_destroy(struct vport
*vport
)
275 struct net
*net
= ovs_dp_get_net(vport
->dp
);
276 struct ovs_net
*ovs_net
;
278 ovs_net
= net_generic(net
, ovs_net_id
);
280 RCU_INIT_POINTER(ovs_net
->vport_net
.gre_vport
, NULL
);
281 ovs_vport_deferred_free(vport
);
285 static int gre_send(struct vport
*vport
, struct sk_buff
*skb
)
289 if (unlikely(!OVS_CB(skb
)->egress_tun_info
))
292 hlen
= ip_gre_calc_hlen(OVS_CB(skb
)->egress_tun_info
->tunnel
.tun_flags
);
294 return __send(vport
, skb
, hlen
, 0, 0);
297 const struct vport_ops ovs_gre_vport_ops
= {
298 .type
= OVS_VPORT_TYPE_GRE
,
299 .create
= gre_create
,
300 .destroy
= gre_tnl_destroy
,
301 .get_name
= gre_get_name
,
306 static struct vport
*gre64_create(const struct vport_parms
*parms
)
308 struct net
*net
= ovs_dp_get_net(parms
->dp
);
309 struct ovs_net
*ovs_net
;
317 ovs_net
= net_generic(net
, ovs_net_id
);
318 if (ovsl_dereference(ovs_net
->vport_net
.gre64_vport
)) {
319 vport
= ERR_PTR(-EEXIST
);
323 vport
= ovs_vport_alloc(IFNAMSIZ
, &ovs_gre64_vport_ops
, parms
);
327 strncpy(vport_priv(vport
), parms
->name
, IFNAMSIZ
);
328 rcu_assign_pointer(ovs_net
->vport_net
.gre64_vport
, vport
);
335 static void gre64_tnl_destroy(struct vport
*vport
)
337 struct net
*net
= ovs_dp_get_net(vport
->dp
);
338 struct ovs_net
*ovs_net
;
340 ovs_net
= net_generic(net
, ovs_net_id
);
342 rcu_assign_pointer(ovs_net
->vport_net
.gre64_vport
, NULL
);
343 ovs_vport_deferred_free(vport
);
347 static __be32
be64_get_high32(__be64 x
)
350 return (__force __be32
)((__force u64
)x
>> 32);
352 return (__force __be32
)x
;
356 static int gre64_send(struct vport
*vport
, struct sk_buff
*skb
)
358 int hlen
= GRE_HEADER_SECTION
+ /* GRE Hdr */
359 GRE_HEADER_SECTION
+ /* GRE Key */
360 GRE_HEADER_SECTION
; /* GRE SEQ */
363 if (unlikely(!OVS_CB(skb
)->egress_tun_info
))
366 if (OVS_CB(skb
)->egress_tun_info
->tunnel
.tun_flags
& TUNNEL_CSUM
)
367 hlen
+= GRE_HEADER_SECTION
;
369 seq
= be64_get_high32(OVS_CB(skb
)->egress_tun_info
->tunnel
.tun_id
);
370 return __send(vport
, skb
, hlen
, seq
, (TUNNEL_KEY
|TUNNEL_SEQ
));
373 const struct vport_ops ovs_gre64_vport_ops
= {
374 .type
= OVS_VPORT_TYPE_GRE64
,
375 .create
= gre64_create
,
376 .destroy
= gre64_tnl_destroy
,
377 .get_name
= gre_get_name
,