]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-gre.c
datapath: Reformat copyright messages.
[mirror_ovs.git] / datapath / vport-gre.c
1 /*
2 * Copyright (c) 2007-2011 Nicira Networks.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/if.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
26 #include <linux/in.h>
27
28 #include <net/icmp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31
32 #include "tunnel.h"
33 #include "vport.h"
34 #include "vport-generic.h"
35
36 /*
37 * The GRE header is composed of a series of sections: a base and then a variable
38 * number of options.
39 */
40 #define GRE_HEADER_SECTION 4
41
42 struct gre_base_hdr {
43 __be16 flags;
44 __be16 protocol;
45 };
46
47 static int gre_hdr_len(const struct tnl_mutable_config *mutable)
48 {
49 int len;
50
51 len = GRE_HEADER_SECTION;
52
53 if (mutable->flags & TNL_F_CSUM)
54 len += GRE_HEADER_SECTION;
55
56 if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION)
57 len += GRE_HEADER_SECTION;
58
59 return len;
60 }
61
62 /* Returns the least-significant 32 bits of a __be64. */
63 static __be32 be64_get_low32(__be64 x)
64 {
65 #ifdef __BIG_ENDIAN
66 return (__force __be32)x;
67 #else
68 return (__force __be32)((__force u64)x >> 32);
69 #endif
70 }
71
72 static void gre_build_header(const struct vport *vport,
73 const struct tnl_mutable_config *mutable,
74 void *header)
75 {
76 struct gre_base_hdr *greh = header;
77 __be32 *options = (__be32 *)(greh + 1);
78
79 greh->protocol = htons(ETH_P_TEB);
80 greh->flags = 0;
81
82 if (mutable->flags & TNL_F_CSUM) {
83 greh->flags |= GRE_CSUM;
84 *options = 0;
85 options++;
86 }
87
88 if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION)
89 greh->flags |= GRE_KEY;
90
91 if (mutable->out_key)
92 *options = be64_get_low32(mutable->out_key);
93 }
94
95 static struct sk_buff *gre_update_header(const struct vport *vport,
96 const struct tnl_mutable_config *mutable,
97 struct dst_entry *dst,
98 struct sk_buff *skb)
99 {
100 __be32 *options = (__be32 *)(skb_network_header(skb) + mutable->tunnel_hlen
101 - GRE_HEADER_SECTION);
102
103 /* Work backwards over the options so the checksum is last. */
104 if (mutable->flags & TNL_F_OUT_KEY_ACTION) {
105 *options = be64_get_low32(OVS_CB(skb)->tun_id);
106 options--;
107 }
108
109 if (mutable->flags & TNL_F_CSUM)
110 *(__sum16 *)options = csum_fold(skb_checksum(skb,
111 skb_transport_offset(skb),
112 skb->len - skb_transport_offset(skb),
113 0));
114 /*
115 * Allow our local IP stack to fragment the outer packet even if the
116 * DF bit is set as a last resort. We also need to force selection of
117 * an IP ID here because Linux will otherwise leave it at 0 if the
118 * packet originally had DF set.
119 */
120 skb->local_df = 1;
121 __ip_select_ident(ip_hdr(skb), dst, 0);
122
123 return skb;
124 }
125
126 /* Zero-extends a __be32 into the least-significant 32 bits of a __be64. */
127 static __be64 be32_extend_to_be64(__be32 x)
128 {
129 #ifdef __BIG_ENDIAN
130 return (__force __be64)x;
131 #else
132 return (__force __be64)((__force u64)x << 32);
133 #endif
134 }
135
136 static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *key)
137 {
138 /* IP and ICMP protocol handlers check that the IHL is valid. */
139 struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
140 __be32 *options = (__be32 *)(greh + 1);
141 int hdr_len;
142
143 *flags = greh->flags;
144
145 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
146 return -EINVAL;
147
148 if (unlikely(greh->protocol != htons(ETH_P_TEB)))
149 return -EINVAL;
150
151 hdr_len = GRE_HEADER_SECTION;
152
153 if (greh->flags & GRE_CSUM) {
154 hdr_len += GRE_HEADER_SECTION;
155 options++;
156 }
157
158 if (greh->flags & GRE_KEY) {
159 hdr_len += GRE_HEADER_SECTION;
160
161 *key = be32_extend_to_be64(*options);
162 options++;
163 } else
164 *key = 0;
165
166 if (unlikely(greh->flags & GRE_SEQ))
167 hdr_len += GRE_HEADER_SECTION;
168
169 return hdr_len;
170 }
171
172 /* Called with rcu_read_lock and BH disabled. */
173 static void gre_err(struct sk_buff *skb, u32 info)
174 {
175 struct vport *vport;
176 const struct tnl_mutable_config *mutable;
177 const int type = icmp_hdr(skb)->type;
178 const int code = icmp_hdr(skb)->code;
179 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
180
181 struct iphdr *iph;
182 __be16 flags;
183 __be64 key;
184 int tunnel_hdr_len, tot_hdr_len;
185 unsigned int orig_mac_header;
186 unsigned int orig_nw_header;
187
188 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
189 return;
190
191 /*
192 * The mimimum size packet that we would actually be able to process:
193 * encapsulating IP header, minimum GRE header, Ethernet header,
194 * inner IPv4 header.
195 */
196 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
197 ETH_HLEN + sizeof(struct iphdr)))
198 return;
199
200 iph = (struct iphdr *)skb->data;
201 if (ipv4_is_multicast(iph->daddr))
202 return;
203
204 tunnel_hdr_len = parse_header(iph, &flags, &key);
205 if (tunnel_hdr_len < 0)
206 return;
207
208 vport = tnl_find_port(iph->saddr, iph->daddr, key, TNL_T_PROTO_GRE,
209 &mutable);
210 if (!vport)
211 return;
212
213 /*
214 * Packets received by this function were previously sent by us, so
215 * any comparisons should be to the output values, not the input.
216 * However, it's not really worth it to have a hash table based on
217 * output keys (especially since ICMP error handling of tunneled packets
218 * isn't that reliable anyways). Therefore, we do a lookup based on the
219 * out key as if it were the in key and then check to see if the input
220 * and output keys are the same.
221 */
222 if (mutable->key.in_key != mutable->out_key)
223 return;
224
225 if (!!(mutable->flags & TNL_F_IN_KEY_MATCH) !=
226 !!(mutable->flags & TNL_F_OUT_KEY_ACTION))
227 return;
228
229 if ((mutable->flags & TNL_F_CSUM) && !(flags & GRE_CSUM))
230 return;
231
232 tunnel_hdr_len += iph->ihl << 2;
233
234 orig_mac_header = skb_mac_header(skb) - skb->data;
235 orig_nw_header = skb_network_header(skb) - skb->data;
236 skb_set_mac_header(skb, tunnel_hdr_len);
237
238 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
239
240 skb->protocol = eth_hdr(skb)->h_proto;
241 if (skb->protocol == htons(ETH_P_8021Q)) {
242 tot_hdr_len += VLAN_HLEN;
243 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
244 }
245
246 skb_set_network_header(skb, tot_hdr_len);
247 mtu -= tot_hdr_len;
248
249 if (skb->protocol == htons(ETH_P_IP))
250 tot_hdr_len += sizeof(struct iphdr);
251 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
252 else if (skb->protocol == htons(ETH_P_IPV6))
253 tot_hdr_len += sizeof(struct ipv6hdr);
254 #endif
255 else
256 goto out;
257
258 if (!pskb_may_pull(skb, tot_hdr_len))
259 goto out;
260
261 if (skb->protocol == htons(ETH_P_IP)) {
262 if (mtu < IP_MIN_MTU) {
263 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
264 mtu = IP_MIN_MTU;
265 else
266 goto out;
267 }
268
269 }
270 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
271 else if (skb->protocol == htons(ETH_P_IPV6)) {
272 if (mtu < IPV6_MIN_MTU) {
273 unsigned int packet_length = sizeof(struct ipv6hdr) +
274 ntohs(ipv6_hdr(skb)->payload_len);
275
276 if (packet_length >= IPV6_MIN_MTU
277 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
278 mtu = IPV6_MIN_MTU;
279 else
280 goto out;
281 }
282 }
283 #endif
284
285 __skb_pull(skb, tunnel_hdr_len);
286 tnl_frag_needed(vport, mutable, skb, mtu, key);
287 __skb_push(skb, tunnel_hdr_len);
288
289 out:
290 skb_set_mac_header(skb, orig_mac_header);
291 skb_set_network_header(skb, orig_nw_header);
292 skb->protocol = htons(ETH_P_IP);
293 }
294
295 static bool check_checksum(struct sk_buff *skb)
296 {
297 struct iphdr *iph = ip_hdr(skb);
298 struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
299 __sum16 csum = 0;
300
301 if (greh->flags & GRE_CSUM) {
302 switch (skb->ip_summed) {
303 case CHECKSUM_COMPLETE:
304 csum = csum_fold(skb->csum);
305
306 if (!csum)
307 break;
308 /* Fall through. */
309
310 case CHECKSUM_NONE:
311 skb->csum = 0;
312 csum = __skb_checksum_complete(skb);
313 skb->ip_summed = CHECKSUM_COMPLETE;
314 break;
315 }
316 }
317
318 return (csum == 0);
319 }
320
321 /* Called with rcu_read_lock and BH disabled. */
322 static int gre_rcv(struct sk_buff *skb)
323 {
324 struct vport *vport;
325 const struct tnl_mutable_config *mutable;
326 int hdr_len;
327 struct iphdr *iph;
328 __be16 flags;
329 __be64 key;
330
331 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
332 goto error;
333
334 if (unlikely(!check_checksum(skb)))
335 goto error;
336
337 hdr_len = parse_header(ip_hdr(skb), &flags, &key);
338 if (unlikely(hdr_len < 0))
339 goto error;
340
341 if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
342 goto error;
343
344 iph = ip_hdr(skb);
345 vport = tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_GRE,
346 &mutable);
347 if (unlikely(!vport)) {
348 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
349 goto error;
350 }
351
352 if (mutable->flags & TNL_F_IN_KEY_MATCH)
353 OVS_CB(skb)->tun_id = key;
354 else
355 OVS_CB(skb)->tun_id = 0;
356
357 __skb_pull(skb, hdr_len);
358 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
359
360 tnl_rcv(vport, skb, iph->tos);
361 return 0;
362
363 error:
364 kfree_skb(skb);
365 return 0;
366 }
367
368 static const struct tnl_ops gre_tnl_ops = {
369 .tunnel_type = TNL_T_PROTO_GRE,
370 .ipproto = IPPROTO_GRE,
371 .hdr_len = gre_hdr_len,
372 .build_header = gre_build_header,
373 .update_header = gre_update_header,
374 };
375
376 static struct vport *gre_create(const struct vport_parms *parms)
377 {
378 return tnl_create(parms, &gre_vport_ops, &gre_tnl_ops);
379 }
380
381 static const struct net_protocol gre_protocol_handlers = {
382 .handler = gre_rcv,
383 .err_handler = gre_err,
384 };
385
386 static int gre_init(void)
387 {
388 int err;
389
390 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
391 if (err)
392 pr_warn("cannot register gre protocol handler\n");
393
394 return err;
395 }
396
397 static void gre_exit(void)
398 {
399 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
400 }
401
402 const struct vport_ops gre_vport_ops = {
403 .type = OVS_VPORT_TYPE_GRE,
404 .flags = VPORT_F_TUN_ID,
405 .init = gre_init,
406 .exit = gre_exit,
407 .create = gre_create,
408 .destroy = tnl_destroy,
409 .set_addr = tnl_set_addr,
410 .get_name = tnl_get_name,
411 .get_addr = tnl_get_addr,
412 .get_options = tnl_get_options,
413 .set_options = tnl_set_options,
414 .get_dev_flags = vport_gen_get_dev_flags,
415 .is_running = vport_gen_is_running,
416 .get_operstate = vport_gen_get_operstate,
417 .send = tnl_send,
418 };