]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-gre.c
tunneling: Rely on protocol handles to parse ToS.
[mirror_ovs.git] / datapath / vport-gre.c
1 /*
2 * Copyright (c) 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/if.h>
12 #include <linux/skbuff.h>
13 #include <linux/ip.h>
14 #include <linux/if_tunnel.h>
15 #include <linux/if_vlan.h>
16 #include <linux/in.h>
17
18 #include <net/icmp.h>
19 #include <net/ip.h>
20 #include <net/protocol.h>
21
22 #include "tunnel.h"
23 #include "vport.h"
24 #include "vport-generic.h"
25
26 /*
27 * The GRE header is composed of a series of sections: a base and then a variable
28 * number of options.
29 */
30 #define GRE_HEADER_SECTION 4
31
32 struct gre_base_hdr {
33 __be16 flags;
34 __be16 protocol;
35 };
36
37 static int gre_hdr_len(const struct tnl_mutable_config *mutable)
38 {
39 int len;
40
41 len = GRE_HEADER_SECTION;
42
43 if (mutable->flags & TNL_F_CSUM)
44 len += GRE_HEADER_SECTION;
45
46 if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION)
47 len += GRE_HEADER_SECTION;
48
49 return len;
50 }
51
52 /* Returns the least-significant 32 bits of a __be64. */
53 static __be32 be64_get_low32(__be64 x)
54 {
55 #ifdef __BIG_ENDIAN
56 return (__force __be32)x;
57 #else
58 return (__force __be32)((__force u64)x >> 32);
59 #endif
60 }
61
62 static void gre_build_header(const struct vport *vport,
63 const struct tnl_mutable_config *mutable,
64 void *header)
65 {
66 struct gre_base_hdr *greh = header;
67 __be32 *options = (__be32 *)(greh + 1);
68
69 greh->protocol = htons(ETH_P_TEB);
70 greh->flags = 0;
71
72 if (mutable->flags & TNL_F_CSUM) {
73 greh->flags |= GRE_CSUM;
74 *options = 0;
75 options++;
76 }
77
78 if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION)
79 greh->flags |= GRE_KEY;
80
81 if (mutable->out_key)
82 *options = be64_get_low32(mutable->out_key);
83 }
84
85 static struct sk_buff *gre_update_header(const struct vport *vport,
86 const struct tnl_mutable_config *mutable,
87 struct dst_entry *dst,
88 struct sk_buff *skb)
89 {
90 __be32 *options = (__be32 *)(skb_network_header(skb) + mutable->tunnel_hlen
91 - GRE_HEADER_SECTION);
92
93 /* Work backwards over the options so the checksum is last. */
94 if (mutable->flags & TNL_F_OUT_KEY_ACTION) {
95 *options = be64_get_low32(OVS_CB(skb)->tun_id);
96 options--;
97 }
98
99 if (mutable->flags & TNL_F_CSUM)
100 *(__sum16 *)options = csum_fold(skb_checksum(skb,
101 skb_transport_offset(skb),
102 skb->len - skb_transport_offset(skb),
103 0));
104 /*
105 * Allow our local IP stack to fragment the outer packet even if the
106 * DF bit is set as a last resort.
107 */
108 skb->local_df = 1;
109
110 return skb;
111 }
112
113 /* Zero-extends a __be32 into the least-significant 32 bits of a __be64. */
114 static __be64 be32_extend_to_be64(__be32 x)
115 {
116 #ifdef __BIG_ENDIAN
117 return (__force __be64)x;
118 #else
119 return (__force __be64)((__force u64)x << 32);
120 #endif
121 }
122
123 static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *key)
124 {
125 /* IP and ICMP protocol handlers check that the IHL is valid. */
126 struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
127 __be32 *options = (__be32 *)(greh + 1);
128 int hdr_len;
129
130 *flags = greh->flags;
131
132 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
133 return -EINVAL;
134
135 if (unlikely(greh->protocol != htons(ETH_P_TEB)))
136 return -EINVAL;
137
138 hdr_len = GRE_HEADER_SECTION;
139
140 if (greh->flags & GRE_CSUM) {
141 hdr_len += GRE_HEADER_SECTION;
142 options++;
143 }
144
145 if (greh->flags & GRE_KEY) {
146 hdr_len += GRE_HEADER_SECTION;
147
148 *key = be32_extend_to_be64(*options);
149 options++;
150 } else
151 *key = 0;
152
153 if (unlikely(greh->flags & GRE_SEQ))
154 hdr_len += GRE_HEADER_SECTION;
155
156 return hdr_len;
157 }
158
159 /* Called with rcu_read_lock and BH disabled. */
160 static void gre_err(struct sk_buff *skb, u32 info)
161 {
162 struct vport *vport;
163 const struct tnl_mutable_config *mutable;
164 const int type = icmp_hdr(skb)->type;
165 const int code = icmp_hdr(skb)->code;
166 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
167
168 struct iphdr *iph;
169 __be16 flags;
170 __be64 key;
171 int tunnel_hdr_len, tot_hdr_len;
172 unsigned int orig_mac_header;
173 unsigned int orig_nw_header;
174
175 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
176 return;
177
178 /*
179 * The mimimum size packet that we would actually be able to process:
180 * encapsulating IP header, minimum GRE header, Ethernet header,
181 * inner IPv4 header.
182 */
183 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
184 ETH_HLEN + sizeof(struct iphdr)))
185 return;
186
187 iph = (struct iphdr *)skb->data;
188
189 tunnel_hdr_len = parse_header(iph, &flags, &key);
190 if (tunnel_hdr_len < 0)
191 return;
192
193 vport = tnl_find_port(iph->saddr, iph->daddr, key,
194 TNL_T_PROTO_GRE | TNL_T_KEY_EITHER, &mutable);
195 if (!vport)
196 return;
197
198 /*
199 * Packets received by this function were previously sent by us, so
200 * any comparisons should be to the output values, not the input.
201 * However, it's not really worth it to have a hash table based on
202 * output keys (especially since ICMP error handling of tunneled packets
203 * isn't that reliable anyways). Therefore, we do a lookup based on the
204 * out key as if it were the in key and then check to see if the input
205 * and output keys are the same.
206 */
207 if (mutable->in_key != mutable->out_key)
208 return;
209
210 if (!!(mutable->flags & TNL_F_IN_KEY_MATCH) !=
211 !!(mutable->flags & TNL_F_OUT_KEY_ACTION))
212 return;
213
214 if ((mutable->flags & TNL_F_CSUM) && !(flags & GRE_CSUM))
215 return;
216
217 tunnel_hdr_len += iph->ihl << 2;
218
219 orig_mac_header = skb_mac_header(skb) - skb->data;
220 orig_nw_header = skb_network_header(skb) - skb->data;
221 skb_set_mac_header(skb, tunnel_hdr_len);
222
223 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
224
225 skb->protocol = eth_hdr(skb)->h_proto;
226 if (skb->protocol == htons(ETH_P_8021Q)) {
227 tot_hdr_len += VLAN_HLEN;
228 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
229 }
230
231 skb_set_network_header(skb, tot_hdr_len);
232 mtu -= tot_hdr_len;
233
234 if (skb->protocol == htons(ETH_P_IP))
235 tot_hdr_len += sizeof(struct iphdr);
236 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
237 else if (skb->protocol == htons(ETH_P_IPV6))
238 tot_hdr_len += sizeof(struct ipv6hdr);
239 #endif
240 else
241 goto out;
242
243 if (!pskb_may_pull(skb, tot_hdr_len))
244 goto out;
245
246 if (skb->protocol == htons(ETH_P_IP)) {
247 if (mtu < IP_MIN_MTU) {
248 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
249 mtu = IP_MIN_MTU;
250 else
251 goto out;
252 }
253
254 }
255 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
256 else if (skb->protocol == htons(ETH_P_IPV6)) {
257 if (mtu < IPV6_MIN_MTU) {
258 unsigned int packet_length = sizeof(struct ipv6hdr) +
259 ntohs(ipv6_hdr(skb)->payload_len);
260
261 if (packet_length >= IPV6_MIN_MTU
262 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
263 mtu = IPV6_MIN_MTU;
264 else
265 goto out;
266 }
267 }
268 #endif
269
270 __skb_pull(skb, tunnel_hdr_len);
271 tnl_frag_needed(vport, mutable, skb, mtu, key);
272 __skb_push(skb, tunnel_hdr_len);
273
274 out:
275 skb_set_mac_header(skb, orig_mac_header);
276 skb_set_network_header(skb, orig_nw_header);
277 skb->protocol = htons(ETH_P_IP);
278 }
279
280 static bool check_checksum(struct sk_buff *skb)
281 {
282 struct iphdr *iph = ip_hdr(skb);
283 struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
284 __sum16 csum = 0;
285
286 if (greh->flags & GRE_CSUM) {
287 switch (skb->ip_summed) {
288 case CHECKSUM_COMPLETE:
289 csum = csum_fold(skb->csum);
290
291 if (!csum)
292 break;
293 /* Fall through. */
294
295 case CHECKSUM_NONE:
296 skb->csum = 0;
297 csum = __skb_checksum_complete(skb);
298 skb->ip_summed = CHECKSUM_COMPLETE;
299 break;
300 }
301 }
302
303 return (csum == 0);
304 }
305
306 /* Called with rcu_read_lock and BH disabled. */
307 static int gre_rcv(struct sk_buff *skb)
308 {
309 struct vport *vport;
310 const struct tnl_mutable_config *mutable;
311 int hdr_len;
312 struct iphdr *iph;
313 __be16 flags;
314 __be64 key;
315
316 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
317 goto error;
318
319 if (unlikely(!check_checksum(skb)))
320 goto error;
321
322 hdr_len = parse_header(ip_hdr(skb), &flags, &key);
323 if (unlikely(hdr_len < 0))
324 goto error;
325
326 if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
327 goto error;
328
329 iph = ip_hdr(skb);
330 vport = tnl_find_port(iph->daddr, iph->saddr, key,
331 TNL_T_PROTO_GRE | TNL_T_KEY_EITHER, &mutable);
332 if (unlikely(!vport)) {
333 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
334 goto error;
335 }
336
337 if (mutable->flags & TNL_F_IN_KEY_MATCH)
338 OVS_CB(skb)->tun_id = key;
339 else
340 OVS_CB(skb)->tun_id = 0;
341
342 __skb_pull(skb, hdr_len);
343 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
344
345 tnl_rcv(vport, skb, iph->tos);
346 return 0;
347
348 error:
349 kfree_skb(skb);
350 return 0;
351 }
352
353 static const struct tnl_ops gre_tnl_ops = {
354 .tunnel_type = TNL_T_PROTO_GRE,
355 .ipproto = IPPROTO_GRE,
356 .hdr_len = gre_hdr_len,
357 .build_header = gre_build_header,
358 .update_header = gre_update_header,
359 };
360
361 static struct vport *gre_create(const struct vport_parms *parms)
362 {
363 return tnl_create(parms, &gre_vport_ops, &gre_tnl_ops);
364 }
365
366 static const struct net_protocol gre_protocol_handlers = {
367 .handler = gre_rcv,
368 .err_handler = gre_err,
369 };
370
371 static int gre_init(void)
372 {
373 int err;
374
375 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
376 if (err)
377 pr_warn("cannot register gre protocol handler\n");
378
379 return err;
380 }
381
382 static void gre_exit(void)
383 {
384 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
385 }
386
387 const struct vport_ops gre_vport_ops = {
388 .type = ODP_VPORT_TYPE_GRE,
389 .flags = VPORT_F_GEN_STATS | VPORT_F_TUN_ID,
390 .init = gre_init,
391 .exit = gre_exit,
392 .create = gre_create,
393 .destroy = tnl_destroy,
394 .set_addr = tnl_set_addr,
395 .get_name = tnl_get_name,
396 .get_addr = tnl_get_addr,
397 .get_options = tnl_get_options,
398 .set_options = tnl_set_options,
399 .get_dev_flags = vport_gen_get_dev_flags,
400 .is_running = vport_gen_is_running,
401 .get_operstate = vport_gen_get_operstate,
402 .send = tnl_send,
403 };