]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-gre.c
datapath: Add support for tunnel fragmentation.
[mirror_ovs.git] / datapath / vport-gre.c
1 /*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #include <linux/if.h>
10 #include <linux/skbuff.h>
11 #include <linux/ip.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_vlan.h>
14 #include <linux/in.h>
15
16 #include <net/icmp.h>
17 #include <net/ip.h>
18 #include <net/protocol.h>
19
20 #include "tunnel.h"
21 #include "vport.h"
22 #include "vport-generic.h"
23
24 /*
25 * The GRE header is composed of a series of sections: a base and then a variable
26 * number of options.
27 */
28 #define GRE_HEADER_SECTION 4
29
30 struct gre_base_hdr {
31 __be16 flags;
32 __be16 protocol;
33 };
34
35 static int gre_hdr_len(const struct tnl_port_config *port_config)
36 {
37 int len;
38
39 len = GRE_HEADER_SECTION;
40
41 if (port_config->flags & TNL_F_CSUM)
42 len += GRE_HEADER_SECTION;
43
44 if (port_config->out_key ||
45 port_config->flags & TNL_F_OUT_KEY_ACTION)
46 len += GRE_HEADER_SECTION;
47
48 return len;
49 }
50
51 static struct sk_buff *gre_build_header(struct sk_buff *skb,
52 const struct vport *vport,
53 const struct tnl_mutable_config *mutable,
54 struct dst_entry *dst)
55 {
56 struct gre_base_hdr *greh = (struct gre_base_hdr *)skb_transport_header(skb);
57 __be32 *options = (__be32 *)(skb_network_header(skb) + mutable->tunnel_hlen
58 - GRE_HEADER_SECTION);
59
60 greh->protocol = htons(ETH_P_TEB);
61 greh->flags = 0;
62
63 /* Work backwards over the options so the checksum is last. */
64 if (mutable->port_config.out_key ||
65 mutable->port_config.flags & TNL_F_OUT_KEY_ACTION) {
66 greh->flags |= GRE_KEY;
67
68 if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
69 *options = OVS_CB(skb)->tun_id;
70 else
71 *options = mutable->port_config.out_key;
72
73 options--;
74 }
75
76 if (mutable->port_config.flags & TNL_F_CSUM) {
77 greh->flags |= GRE_CSUM;
78
79 *options = 0;
80 *(__sum16 *)options = csum_fold(skb_checksum(skb,
81 sizeof(struct iphdr),
82 skb->len - sizeof(struct iphdr),
83 0));
84 }
85
86 /*
87 * Allow our local IP stack to fragment the outer packet even if the
88 * DF bit is set as a last resort.
89 */
90 skb->local_df = 1;
91
92 return skb;
93 }
94
95 static int parse_header(struct iphdr *iph, __be16 *flags, __be32 *key)
96 {
97 /* IP and ICMP protocol handlers check that the IHL is valid. */
98 struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
99 __be32 *options = (__be32 *)(greh + 1);
100 int hdr_len;
101
102 *flags = greh->flags;
103
104 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
105 return -EINVAL;
106
107 if (unlikely(greh->protocol != htons(ETH_P_TEB)))
108 return -EINVAL;
109
110 hdr_len = GRE_HEADER_SECTION;
111
112 if (greh->flags & GRE_CSUM) {
113 hdr_len += GRE_HEADER_SECTION;
114 options++;
115 }
116
117 if (greh->flags & GRE_KEY) {
118 hdr_len += GRE_HEADER_SECTION;
119
120 *key = *options;
121 options++;
122 } else
123 *key = 0;
124
125 if (unlikely(greh->flags & GRE_SEQ))
126 hdr_len += GRE_HEADER_SECTION;
127
128 return hdr_len;
129 }
130
131 /* Called with rcu_read_lock and BH disabled. */
132 static void gre_err(struct sk_buff *skb, u32 info)
133 {
134 struct vport *vport;
135 const struct tnl_mutable_config *mutable;
136 const int type = icmp_hdr(skb)->type;
137 const int code = icmp_hdr(skb)->code;
138 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
139
140 struct iphdr *iph;
141 __be16 flags;
142 __be32 key;
143 int tunnel_hdr_len, tot_hdr_len;
144 unsigned int orig_mac_header;
145 unsigned int orig_nw_header;
146
147 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
148 return;
149
150 /*
151 * The mimimum size packet that we would actually be able to process:
152 * encapsulating IP header, minimum GRE header, Ethernet header,
153 * inner IPv4 header.
154 */
155 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
156 ETH_HLEN + sizeof(struct iphdr)))
157 return;
158
159 iph = (struct iphdr *)skb->data;
160
161 tunnel_hdr_len = parse_header(iph, &flags, &key);
162 if (tunnel_hdr_len < 0)
163 return;
164
165 vport = tnl_find_port(iph->saddr, iph->daddr, key,
166 TNL_T_PROTO_GRE | TNL_T_KEY_EITHER, &mutable);
167 if (!vport)
168 return;
169
170 /*
171 * Packets received by this function were previously sent by us, so
172 * any comparisons should be to the output values, not the input.
173 * However, it's not really worth it to have a hash table based on
174 * output keys (especially since ICMP error handling of tunneled packets
175 * isn't that reliable anyways). Therefore, we do a lookup based on the
176 * out key as if it were the in key and then check to see if the input
177 * and output keys are the same.
178 */
179 if (mutable->port_config.in_key != mutable->port_config.out_key)
180 return;
181
182 if (!!(mutable->port_config.flags & TNL_F_IN_KEY_MATCH) !=
183 !!(mutable->port_config.flags & TNL_F_OUT_KEY_ACTION))
184 return;
185
186 if ((mutable->port_config.flags & TNL_F_CSUM) && !(flags & GRE_CSUM))
187 return;
188
189 tunnel_hdr_len += iph->ihl << 2;
190
191 orig_mac_header = skb_mac_header(skb) - skb->data;
192 orig_nw_header = skb_network_header(skb) - skb->data;
193 skb_set_mac_header(skb, tunnel_hdr_len);
194
195 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
196
197 skb->protocol = eth_hdr(skb)->h_proto;
198 if (skb->protocol == htons(ETH_P_8021Q)) {
199 tot_hdr_len += VLAN_HLEN;
200 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
201 }
202
203 skb_set_network_header(skb, tot_hdr_len);
204 mtu -= tot_hdr_len;
205
206 if (skb->protocol == htons(ETH_P_IP))
207 tot_hdr_len += sizeof(struct iphdr);
208 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
209 else if (skb->protocol == htons(ETH_P_IPV6))
210 tot_hdr_len += sizeof(struct ipv6hdr);
211 #endif
212 else
213 goto out;
214
215 if (!pskb_may_pull(skb, tot_hdr_len))
216 goto out;
217
218 if (skb->protocol == htons(ETH_P_IP)) {
219 if (mtu < IP_MIN_MTU) {
220 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
221 mtu = IP_MIN_MTU;
222 else
223 goto out;
224 }
225
226 }
227 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
228 else if (skb->protocol == htons(ETH_P_IPV6)) {
229 if (mtu < IPV6_MIN_MTU) {
230 unsigned int packet_length = sizeof(struct ipv6hdr) +
231 ntohs(ipv6_hdr(skb)->payload_len);
232
233 if (packet_length >= IPV6_MIN_MTU
234 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
235 mtu = IPV6_MIN_MTU;
236 else
237 goto out;
238 }
239 }
240 #endif
241
242 __skb_pull(skb, tunnel_hdr_len);
243 tnl_frag_needed(vport, mutable, skb, mtu, key);
244 __skb_push(skb, tunnel_hdr_len);
245
246 out:
247 skb_set_mac_header(skb, orig_mac_header);
248 skb_set_network_header(skb, orig_nw_header);
249 skb->protocol = htons(ETH_P_IP);
250 }
251
252 static bool check_checksum(struct sk_buff *skb)
253 {
254 struct iphdr *iph = ip_hdr(skb);
255 struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
256 __sum16 csum = 0;
257
258 if (greh->flags & GRE_CSUM) {
259 switch (skb->ip_summed) {
260 case CHECKSUM_COMPLETE:
261 csum = csum_fold(skb->csum);
262
263 if (!csum)
264 break;
265 /* Fall through. */
266
267 case CHECKSUM_NONE:
268 skb->csum = 0;
269 csum = __skb_checksum_complete(skb);
270 skb->ip_summed = CHECKSUM_COMPLETE;
271 break;
272 }
273 }
274
275 return (csum == 0);
276 }
277
278 /* Called with rcu_read_lock and BH disabled. */
279 static int gre_rcv(struct sk_buff *skb)
280 {
281 struct vport *vport;
282 const struct tnl_mutable_config *mutable;
283 int hdr_len;
284 struct iphdr *iph;
285 __be16 flags;
286 __be32 key;
287
288 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
289 goto error;
290
291 if (unlikely(!check_checksum(skb)))
292 goto error;
293
294 hdr_len = parse_header(ip_hdr(skb), &flags, &key);
295 if (unlikely(hdr_len < 0))
296 goto error;
297
298 if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
299 goto error;
300
301 iph = ip_hdr(skb);
302 vport = tnl_find_port(iph->daddr, iph->saddr, key,
303 TNL_T_PROTO_GRE | TNL_T_KEY_EITHER, &mutable);
304 if (unlikely(!vport)) {
305 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
306 goto error;
307 }
308
309 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH)
310 OVS_CB(skb)->tun_id = key;
311 else
312 OVS_CB(skb)->tun_id = 0;
313
314 __skb_pull(skb, hdr_len);
315 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
316
317 tnl_rcv(vport, skb);
318 return 0;
319
320 error:
321 kfree_skb(skb);
322 return 0;
323 }
324
325 struct tnl_ops gre_tnl_ops = {
326 .tunnel_type = TNL_T_PROTO_GRE,
327 .ipproto = IPPROTO_GRE,
328 .hdr_len = gre_hdr_len,
329 .build_header = gre_build_header,
330 };
331
332 static struct vport *gre_create(const char *name, const void __user *config)
333 {
334 return tnl_create(name, config, &gre_vport_ops, &gre_tnl_ops);
335 }
336
337 static struct net_protocol gre_protocol_handlers = {
338 .handler = gre_rcv,
339 .err_handler = gre_err,
340 };
341
342 static int gre_init(void)
343 {
344 int err;
345
346 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
347 if (err) {
348 printk(KERN_WARNING "openvswitch: cannot register gre protocol handler\n");
349 goto out;
350 }
351
352 err = tnl_init();
353
354 out:
355 return err;
356 }
357
358 static void gre_exit(void)
359 {
360 tnl_exit();
361 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
362 }
363
364 struct vport_ops gre_vport_ops = {
365 .type = "gre",
366 .flags = VPORT_F_GEN_STATS | VPORT_F_TUN_ID,
367 .init = gre_init,
368 .exit = gre_exit,
369 .create = gre_create,
370 .modify = tnl_modify,
371 .destroy = tnl_destroy,
372 .set_mtu = tnl_set_mtu,
373 .set_addr = tnl_set_addr,
374 .get_name = tnl_get_name,
375 .get_addr = tnl_get_addr,
376 .get_dev_flags = vport_gen_get_dev_flags,
377 .is_running = vport_gen_is_running,
378 .get_operstate = vport_gen_get_operstate,
379 .get_mtu = tnl_get_mtu,
380 .send = tnl_send,
381 };