]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-gre.c
datapath: Wrap struct ovs_key_ipv4_tunnel in a new structure.
[mirror_ovs.git] / datapath / vport-gre.c
1 /*
2 * Copyright (c) 2007-2012 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include <linux/kconfig.h>
20 #if IS_ENABLED(CONFIG_NET_IPGRE_DEMUX)
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/if.h>
24 #include <linux/skbuff.h>
25 #include <linux/ip.h>
26 #include <linux/if_tunnel.h>
27 #include <linux/if_vlan.h>
28 #include <linux/in.h>
29 #include <linux/in_route.h>
30 #include <linux/inetdevice.h>
31 #include <linux/jhash.h>
32 #include <linux/list.h>
33 #include <linux/kernel.h>
34 #include <linux/workqueue.h>
35 #include <linux/rculist.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include <net/route.h>
39 #include <net/xfrm.h>
40
41 #include <net/icmp.h>
42 #include <net/ip.h>
43 #include <net/ip_tunnels.h>
44 #include <net/gre.h>
45 #include <net/protocol.h>
46
47 #include "datapath.h"
48 #include "vport.h"
49
50 /* Returns the least-significant 32 bits of a __be64. */
51 static __be32 be64_get_low32(__be64 x)
52 {
53 #ifdef __BIG_ENDIAN
54 return (__force __be32)x;
55 #else
56 return (__force __be32)((__force u64)x >> 32);
57 #endif
58 }
59
60 static __be16 filter_tnl_flags(__be16 flags)
61 {
62 return flags & (TUNNEL_CSUM | TUNNEL_KEY);
63 }
64
65 static struct sk_buff *__build_header(struct sk_buff *skb,
66 int tunnel_hlen,
67 __be32 seq, __be16 gre64_flag)
68 {
69 const struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
70 struct tnl_ptk_info tpi;
71
72 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
73 if (IS_ERR(skb))
74 return NULL;
75
76 tpi.flags = filter_tnl_flags(tun_key->tun_flags) | gre64_flag;
77
78 tpi.proto = htons(ETH_P_TEB);
79 tpi.key = be64_get_low32(tun_key->tun_id);
80 tpi.seq = seq;
81 gre_build_header(skb, &tpi, tunnel_hlen);
82
83 return skb;
84 }
85
86 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
87 {
88 #ifdef __BIG_ENDIAN
89 return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
90 #else
91 return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
92 #endif
93 }
94
95 /* Called with rcu_read_lock and BH disabled. */
96 static int gre_rcv(struct sk_buff *skb,
97 const struct tnl_ptk_info *tpi)
98 {
99 struct ovs_tunnel_info tun_info;
100 struct ovs_net *ovs_net;
101 struct vport *vport;
102 __be64 key;
103
104 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
105 if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ))
106 vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
107 else
108 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
109 if (unlikely(!vport))
110 return PACKET_REJECT;
111
112 key = key_to_tunnel_id(tpi->key, tpi->seq);
113 ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
114 filter_tnl_flags(tpi->flags));
115
116 ovs_vport_receive(vport, skb, &tun_info);
117 return PACKET_RCVD;
118 }
119
120 /* Called with rcu_read_lock and BH disabled. */
121 static int gre_err(struct sk_buff *skb, u32 info,
122 const struct tnl_ptk_info *tpi)
123 {
124 struct ovs_net *ovs_net;
125 struct vport *vport;
126
127 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
128 if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ))
129 vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
130 else
131 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
132
133 if (unlikely(!vport))
134 return PACKET_REJECT;
135 else
136 return PACKET_RCVD;
137 }
138
139 static int __send(struct vport *vport, struct sk_buff *skb,
140 int tunnel_hlen,
141 __be32 seq, __be16 gre64_flag)
142 {
143 struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
144 struct rtable *rt;
145 int min_headroom;
146 __be16 df;
147 __be32 saddr;
148 int err;
149
150 /* Route lookup */
151 saddr = tun_key->ipv4_src;
152 rt = find_route(ovs_dp_get_net(vport->dp),
153 &saddr, tun_key->ipv4_dst,
154 IPPROTO_GRE, tun_key->ipv4_tos,
155 skb->mark);
156 if (IS_ERR(rt)) {
157 err = PTR_ERR(rt);
158 goto error;
159 }
160
161 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
162 + tunnel_hlen + sizeof(struct iphdr)
163 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
164
165 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
166 int head_delta = SKB_DATA_ALIGN(min_headroom -
167 skb_headroom(skb) +
168 16);
169 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
170 0, GFP_ATOMIC);
171 if (unlikely(err))
172 goto err_free_rt;
173 }
174
175 if (vlan_tx_tag_present(skb)) {
176 if (unlikely(!__vlan_put_tag(skb,
177 skb->vlan_proto,
178 vlan_tx_tag_get(skb)))) {
179 err = -ENOMEM;
180 goto err_free_rt;
181 }
182 vlan_set_tci(skb, 0);
183 }
184
185 /* Push Tunnel header. */
186 skb = __build_header(skb, tunnel_hlen, seq, gre64_flag);
187 if (unlikely(!skb)) {
188 err = 0;
189 goto err_free_rt;
190 }
191
192 df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
193 skb->local_df = 1;
194
195 return iptunnel_xmit(rt, skb, saddr,
196 tun_key->ipv4_dst, IPPROTO_GRE,
197 tun_key->ipv4_tos,
198 tun_key->ipv4_ttl, df, false);
199 err_free_rt:
200 ip_rt_put(rt);
201 error:
202 return err;
203 }
204
205 static struct gre_cisco_protocol gre_protocol = {
206 .handler = gre_rcv,
207 .err_handler = gre_err,
208 .priority = 1,
209 };
210
211 static int gre_ports;
212 static int gre_init(void)
213 {
214 int err;
215
216 gre_ports++;
217 if (gre_ports > 1)
218 return 0;
219
220 err = gre_cisco_register(&gre_protocol);
221 if (err)
222 pr_warn("cannot register gre protocol handler\n");
223
224 return err;
225 }
226
227 static void gre_exit(void)
228 {
229 gre_ports--;
230 if (gre_ports > 0)
231 return;
232
233 gre_cisco_unregister(&gre_protocol);
234 }
235
236 static const char *gre_get_name(const struct vport *vport)
237 {
238 return vport_priv(vport);
239 }
240
241 static struct vport *gre_create(const struct vport_parms *parms)
242 {
243 struct net *net = ovs_dp_get_net(parms->dp);
244 struct ovs_net *ovs_net;
245 struct vport *vport;
246 int err;
247
248 err = gre_init();
249 if (err)
250 return ERR_PTR(err);
251
252 ovs_net = net_generic(net, ovs_net_id);
253 if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
254 vport = ERR_PTR(-EEXIST);
255 goto error;
256 }
257
258 vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
259 if (IS_ERR(vport))
260 goto error;
261
262 strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
263 rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
264 return vport;
265
266 error:
267 gre_exit();
268 return vport;
269 }
270
271 static void gre_tnl_destroy(struct vport *vport)
272 {
273 struct net *net = ovs_dp_get_net(vport->dp);
274 struct ovs_net *ovs_net;
275
276 ovs_net = net_generic(net, ovs_net_id);
277
278 RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
279 ovs_vport_deferred_free(vport);
280 gre_exit();
281 }
282
283 static int gre_send(struct vport *vport, struct sk_buff *skb)
284 {
285 int hlen;
286
287 if (unlikely(!OVS_CB(skb)->tun_info))
288 return -EINVAL;
289
290 hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_info->tunnel.tun_flags);
291
292 return __send(vport, skb, hlen, 0, 0);
293 }
294
295 const struct vport_ops ovs_gre_vport_ops = {
296 .type = OVS_VPORT_TYPE_GRE,
297 .create = gre_create,
298 .destroy = gre_tnl_destroy,
299 .get_name = gre_get_name,
300 .send = gre_send,
301 };
302
303 /* GRE64 vport. */
304 static struct vport *gre64_create(const struct vport_parms *parms)
305 {
306 struct net *net = ovs_dp_get_net(parms->dp);
307 struct ovs_net *ovs_net;
308 struct vport *vport;
309 int err;
310
311 err = gre_init();
312 if (err)
313 return ERR_PTR(err);
314
315 ovs_net = net_generic(net, ovs_net_id);
316 if (ovsl_dereference(ovs_net->vport_net.gre64_vport)) {
317 vport = ERR_PTR(-EEXIST);
318 goto error;
319 }
320
321 vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre64_vport_ops, parms);
322 if (IS_ERR(vport))
323 goto error;
324
325 strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
326 rcu_assign_pointer(ovs_net->vport_net.gre64_vport, vport);
327 return vport;
328 error:
329 gre_exit();
330 return vport;
331 }
332
333 static void gre64_tnl_destroy(struct vport *vport)
334 {
335 struct net *net = ovs_dp_get_net(vport->dp);
336 struct ovs_net *ovs_net;
337
338 ovs_net = net_generic(net, ovs_net_id);
339
340 rcu_assign_pointer(ovs_net->vport_net.gre64_vport, NULL);
341 ovs_vport_deferred_free(vport);
342 gre_exit();
343 }
344
345 static __be32 be64_get_high32(__be64 x)
346 {
347 #ifdef __BIG_ENDIAN
348 return (__force __be32)((__force u64)x >> 32);
349 #else
350 return (__force __be32)x;
351 #endif
352 }
353
354 static int gre64_send(struct vport *vport, struct sk_buff *skb)
355 {
356 int hlen = GRE_HEADER_SECTION + /* GRE Hdr */
357 GRE_HEADER_SECTION + /* GRE Key */
358 GRE_HEADER_SECTION; /* GRE SEQ */
359 __be32 seq;
360
361 if (unlikely(!OVS_CB(skb)->tun_info))
362 return -EINVAL;
363
364 if (OVS_CB(skb)->tun_info->tunnel.tun_flags & TUNNEL_CSUM)
365 hlen += GRE_HEADER_SECTION;
366
367 seq = be64_get_high32(OVS_CB(skb)->tun_info->tunnel.tun_id);
368 return __send(vport, skb, hlen, seq, (TUNNEL_KEY|TUNNEL_SEQ));
369 }
370
371 const struct vport_ops ovs_gre64_vport_ops = {
372 .type = OVS_VPORT_TYPE_GRE64,
373 .create = gre64_create,
374 .destroy = gre64_tnl_destroy,
375 .get_name = gre_get_name,
376 .send = gre64_send,
377 };
378 #endif