]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-gre.c
datapath: Add 'patch' vport.
[mirror_ovs.git] / datapath / vport-gre.c
1 /*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_vlan.h>
14 #include <linux/in.h>
15 #include <linux/in_route.h>
16 #include <linux/jhash.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19
20 #include <net/dsfield.h>
21 #include <net/dst.h>
22 #include <net/icmp.h>
23 #include <net/inet_ecn.h>
24 #include <net/ip.h>
25 #include <net/ipv6.h>
26 #include <net/protocol.h>
27 #include <net/route.h>
28 #include <net/xfrm.h>
29
30 #include "actions.h"
31 #include "datapath.h"
32 #include "openvswitch/gre.h"
33 #include "table.h"
34 #include "vport.h"
35 #include "vport-generic.h"
36
37 /* The absolute minimum fragment size. Note that there are many other
38 * definitions of the minimum MTU. */
39 #define IP_MIN_MTU 68
40
41 /* The GRE header is composed of a series of sections: a base and then a variable
42 * number of options. */
43 #define GRE_HEADER_SECTION 4
44
45 struct mutable_config {
46 struct rcu_head rcu;
47
48 unsigned char eth_addr[ETH_ALEN];
49 unsigned int mtu;
50 struct gre_port_config port_config;
51
52 int tunnel_hlen; /* Tunnel header length. */
53 };
54
55 struct gre_vport {
56 struct tbl_node tbl_node;
57
58 char name[IFNAMSIZ];
59
60 /* Protected by RCU. */
61 struct mutable_config *mutable;
62 };
63
64 struct vport_ops gre_vport_ops;
65
66 /* Protected by RCU. */
67 static struct tbl *port_table;
68
69 /* These are just used as an optimization: they don't require any kind of
70 * synchronization because we could have just as easily read the value before
71 * the port change happened. */
72 static unsigned int key_local_remote_ports;
73 static unsigned int key_remote_ports;
74 static unsigned int local_remote_ports;
75 static unsigned int remote_ports;
76
77 static inline struct gre_vport *
78 gre_vport_priv(const struct vport *vport)
79 {
80 return vport_priv(vport);
81 }
82
83 static inline struct vport *
84 gre_vport_to_vport(const struct gre_vport *gre_vport)
85 {
86 return vport_from_priv(gre_vport);
87 }
88
89 static inline struct gre_vport *
90 gre_vport_table_cast(const struct tbl_node *node)
91 {
92 return container_of(node, struct gre_vport, tbl_node);
93 }
94
95 /* RCU callback. */
96 static void
97 free_config(struct rcu_head *rcu)
98 {
99 struct mutable_config *c = container_of(rcu, struct mutable_config, rcu);
100 kfree(c);
101 }
102
103 static void
104 assign_config_rcu(struct vport *vport, struct mutable_config *new_config)
105 {
106 struct gre_vport *gre_vport = gre_vport_priv(vport);
107 struct mutable_config *old_config;
108
109 old_config = rcu_dereference(gre_vport->mutable);
110 rcu_assign_pointer(gre_vport->mutable, new_config);
111 call_rcu(&old_config->rcu, free_config);
112 }
113
114 static unsigned int *
115 find_port_pool(const struct mutable_config *mutable)
116 {
117 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH) {
118 if (mutable->port_config.saddr)
119 return &local_remote_ports;
120 else
121 return &remote_ports;
122 } else {
123 if (mutable->port_config.saddr)
124 return &key_local_remote_ports;
125 else
126 return &key_remote_ports;
127 }
128 }
129
130 enum lookup_key {
131 LOOKUP_SADDR = 0,
132 LOOKUP_DADDR = 1,
133 LOOKUP_KEY = 2,
134 LOOKUP_KEY_MATCH = 3
135 };
136
137 struct port_lookup_key {
138 u32 vals[4]; /* Contains enum lookup_key keys. */
139 const struct mutable_config *mutable;
140 };
141
142 /* Modifies 'target' to store the rcu_dereferenced pointer that was used to do
143 * the comparision. */
144 static int
145 port_cmp(const struct tbl_node *node, void *target)
146 {
147 const struct gre_vport *gre_vport = gre_vport_table_cast(node);
148 struct port_lookup_key *lookup = target;
149
150 lookup->mutable = rcu_dereference(gre_vport->mutable);
151
152 return ((lookup->mutable->port_config.flags & GRE_F_IN_KEY_MATCH) ==
153 lookup->vals[LOOKUP_KEY_MATCH]) &&
154 lookup->mutable->port_config.daddr == lookup->vals[LOOKUP_DADDR] &&
155 lookup->mutable->port_config.in_key == lookup->vals[LOOKUP_KEY] &&
156 lookup->mutable->port_config.saddr == lookup->vals[LOOKUP_SADDR];
157 }
158
159 static u32
160 port_hash(struct port_lookup_key *lookup)
161 {
162 return jhash2(lookup->vals, ARRAY_SIZE(lookup->vals), 0);
163 }
164
165 static int
166 add_port(struct vport *vport)
167 {
168 struct gre_vport *gre_vport = gre_vport_priv(vport);
169 struct port_lookup_key lookup;
170 int err;
171
172 if (!port_table) {
173 struct tbl *new_table;
174
175 new_table = tbl_create(0);
176 if (!new_table)
177 return -ENOMEM;
178
179 rcu_assign_pointer(port_table, new_table);
180
181 } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
182 struct tbl *old_table = port_table;
183 struct tbl *new_table;
184
185 new_table = tbl_expand(old_table);
186 if (IS_ERR(new_table))
187 return PTR_ERR(new_table);
188
189 rcu_assign_pointer(port_table, new_table);
190 tbl_deferred_destroy(old_table, NULL);
191 }
192
193 lookup.vals[LOOKUP_SADDR] = gre_vport->mutable->port_config.saddr;
194 lookup.vals[LOOKUP_DADDR] = gre_vport->mutable->port_config.daddr;
195 lookup.vals[LOOKUP_KEY] = gre_vport->mutable->port_config.in_key;
196 lookup.vals[LOOKUP_KEY_MATCH] = gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH;
197
198 err = tbl_insert(port_table, &gre_vport->tbl_node, port_hash(&lookup));
199 if (err)
200 return err;
201
202 (*find_port_pool(gre_vport->mutable))++;
203
204 return 0;
205 }
206
207 static int
208 del_port(struct vport *vport)
209 {
210 struct gre_vport *gre_vport = gre_vport_priv(vport);
211 int err;
212
213 err = tbl_remove(port_table, &gre_vport->tbl_node);
214 if (err)
215 return err;
216
217 (*find_port_pool(gre_vport->mutable))--;
218
219 return 0;
220 }
221
222 #define FIND_PORT_KEY (1 << 0)
223 #define FIND_PORT_MATCH (1 << 1)
224 #define FIND_PORT_ANY (FIND_PORT_KEY | FIND_PORT_MATCH)
225
226 static struct vport *
227 find_port(__be32 saddr, __be32 daddr, __be32 key, int port_type,
228 const struct mutable_config **mutable)
229 {
230 struct port_lookup_key lookup;
231 struct tbl *table = rcu_dereference(port_table);
232 struct tbl_node *tbl_node;
233
234 if (!table)
235 return NULL;
236
237 lookup.vals[LOOKUP_SADDR] = saddr;
238 lookup.vals[LOOKUP_DADDR] = daddr;
239
240 if (port_type & FIND_PORT_KEY) {
241 lookup.vals[LOOKUP_KEY] = key;
242 lookup.vals[LOOKUP_KEY_MATCH] = 0;
243
244 if (key_local_remote_ports) {
245 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
246 if (tbl_node)
247 goto found;
248 }
249
250 if (key_remote_ports) {
251 lookup.vals[LOOKUP_SADDR] = 0;
252
253 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
254 if (tbl_node)
255 goto found;
256
257 lookup.vals[LOOKUP_SADDR] = saddr;
258 }
259 }
260
261 if (port_type & FIND_PORT_MATCH) {
262 lookup.vals[LOOKUP_KEY] = 0;
263 lookup.vals[LOOKUP_KEY_MATCH] = GRE_F_IN_KEY_MATCH;
264
265 if (local_remote_ports) {
266 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
267 if (tbl_node)
268 goto found;
269 }
270
271 if (remote_ports) {
272 lookup.vals[LOOKUP_SADDR] = 0;
273
274 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
275 if (tbl_node)
276 goto found;
277 }
278 }
279
280 return NULL;
281
282 found:
283 *mutable = lookup.mutable;
284 return gre_vport_to_vport(gre_vport_table_cast(tbl_node));
285 }
286
287 static bool
288 check_ipv4_address(__be32 addr)
289 {
290 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
291 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
292 return false;
293
294 return true;
295 }
296
297 static bool
298 ipv4_should_icmp(struct sk_buff *skb)
299 {
300 struct iphdr *old_iph = ip_hdr(skb);
301
302 /* Don't respond to L2 broadcast. */
303 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
304 return false;
305
306 /* Don't respond to L3 broadcast or invalid addresses. */
307 if (!check_ipv4_address(old_iph->daddr) ||
308 !check_ipv4_address(old_iph->saddr))
309 return false;
310
311 /* Only respond to the first fragment. */
312 if (old_iph->frag_off & htons(IP_OFFSET))
313 return false;
314
315 /* Don't respond to ICMP error messages. */
316 if (old_iph->protocol == IPPROTO_ICMP) {
317 u8 icmp_type, *icmp_typep;
318
319 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
320 (old_iph->ihl << 2) +
321 offsetof(struct icmphdr, type) -
322 skb->data, sizeof(icmp_type),
323 &icmp_type);
324
325 if (!icmp_typep)
326 return false;
327
328 if (*icmp_typep > NR_ICMP_TYPES
329 || (*icmp_typep <= ICMP_PARAMETERPROB
330 && *icmp_typep != ICMP_ECHOREPLY
331 && *icmp_typep != ICMP_ECHO))
332 return false;
333 }
334
335 return true;
336 }
337
338 static void
339 ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
340 unsigned int mtu, unsigned int payload_length)
341 {
342 struct iphdr *iph, *old_iph = ip_hdr(skb);
343 struct icmphdr *icmph;
344 u8 *payload;
345
346 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
347 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
348 payload = skb_put(nskb, payload_length);
349
350 /* IP */
351 iph->version = 4;
352 iph->ihl = sizeof(struct iphdr) >> 2;
353 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
354 IPTOS_PREC_INTERNETCONTROL;
355 iph->tot_len = htons(sizeof(struct iphdr)
356 + sizeof(struct icmphdr)
357 + payload_length);
358 get_random_bytes(&iph->id, sizeof(iph->id));
359 iph->frag_off = 0;
360 iph->ttl = IPDEFTTL;
361 iph->protocol = IPPROTO_ICMP;
362 iph->daddr = old_iph->saddr;
363 iph->saddr = old_iph->daddr;
364
365 ip_send_check(iph);
366
367 /* ICMP */
368 icmph->type = ICMP_DEST_UNREACH;
369 icmph->code = ICMP_FRAG_NEEDED;
370 icmph->un.gateway = htonl(mtu);
371 icmph->checksum = 0;
372
373 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
374 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
375 payload, payload_length,
376 nskb->csum);
377 icmph->checksum = csum_fold(nskb->csum);
378 }
379
380 static bool
381 ipv6_should_icmp(struct sk_buff *skb)
382 {
383 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
384 int addr_type;
385 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
386 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
387
388 /* Check source address is valid. */
389 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
390 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
391 return false;
392
393 /* Don't reply to unspecified addresses. */
394 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
395 return false;
396
397 /* Don't respond to ICMP error messages. */
398 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
399 if (payload_off < 0)
400 return false;
401
402 if (nexthdr == NEXTHDR_ICMP) {
403 u8 icmp_type, *icmp_typep;
404
405 icmp_typep = skb_header_pointer(skb, payload_off +
406 offsetof(struct icmp6hdr,
407 icmp6_type),
408 sizeof(icmp_type), &icmp_type);
409
410 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
411 return false;
412 }
413
414 return true;
415 }
416
417 static void
418 ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb, unsigned int mtu,
419 unsigned int payload_length)
420 {
421 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
422 struct icmp6hdr *icmp6h;
423 u8 *payload;
424
425 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
426 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
427 payload = skb_put(nskb, payload_length);
428
429 /* IPv6 */
430 ipv6h->version = 6;
431 ipv6h->priority = 0;
432 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
433 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
434 + payload_length);
435 ipv6h->nexthdr = NEXTHDR_ICMP;
436 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
437 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
438 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
439
440 /* ICMPv6 */
441 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
442 icmp6h->icmp6_code = 0;
443 icmp6h->icmp6_cksum = 0;
444 icmp6h->icmp6_mtu = htonl(mtu);
445
446 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
447 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
448 payload, payload_length,
449 nskb->csum);
450 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
451 sizeof(struct icmp6hdr)
452 + payload_length,
453 ipv6h->nexthdr, nskb->csum);
454 }
455
456 static bool
457 send_frag_needed(struct vport *vport, const struct mutable_config *mutable,
458 struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
459 {
460 unsigned int eth_hdr_len = ETH_HLEN;
461 unsigned int total_length, header_length, payload_length;
462 struct ethhdr *eh, *old_eh = eth_hdr(skb);
463 struct sk_buff *nskb;
464
465 /* Sanity check */
466 if (skb->protocol == htons(ETH_P_IP)) {
467 if (mtu < IP_MIN_MTU)
468 return false;
469
470 if (!ipv4_should_icmp(skb))
471 return true;
472 } else {
473 if (mtu < IPV6_MIN_MTU)
474 return false;
475
476 /* In theory we should do PMTUD on IPv6 multicast messages but
477 * we don't have an address to send from so just fragment. */
478 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
479 return false;
480
481 if (!ipv6_should_icmp(skb))
482 return true;
483 }
484
485 /* Allocate */
486 if (old_eh->h_proto == htons(ETH_P_8021Q))
487 eth_hdr_len = VLAN_ETH_HLEN;
488
489 payload_length = skb->len - eth_hdr_len;
490 if (skb->protocol == htons(ETH_P_IP)) {
491 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
492 total_length = min_t(unsigned int, header_length +
493 payload_length, 576);
494 } else {
495 header_length = sizeof(struct ipv6hdr) +
496 sizeof(struct icmp6hdr);
497 total_length = min_t(unsigned int, header_length +
498 payload_length, IPV6_MIN_MTU);
499 }
500 total_length = min(total_length, mutable->mtu);
501 payload_length = total_length - header_length;
502
503 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
504 payload_length);
505 if (!nskb)
506 return false;
507
508 skb_reserve(nskb, NET_IP_ALIGN);
509
510 /* Ethernet / VLAN */
511 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
512 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
513 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
514 nskb->protocol = eh->h_proto = old_eh->h_proto;
515 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
516 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
517
518 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
519 vh->h_vlan_encapsulated_proto = skb->protocol;
520 }
521 skb_reset_mac_header(nskb);
522
523 /* Protocol */
524 if (skb->protocol == htons(ETH_P_IP))
525 ipv4_build_icmp(skb, nskb, mtu, payload_length);
526 else
527 ipv6_build_icmp(skb, nskb, mtu, payload_length);
528
529 /* Assume that flow based keys are symmetric with respect to input
530 * and output and use the key that we were going to put on the
531 * outgoing packet for the fake received packet. If the keys are
532 * not symmetric then PMTUD needs to be disabled since we won't have
533 * any way of synthesizing packets. */
534 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH &&
535 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
536 OVS_CB(nskb)->tun_id = flow_key;
537
538 compute_ip_summed(nskb, false);
539 vport_receive(vport, nskb);
540
541 return true;
542 }
543
544 static struct sk_buff *
545 check_headroom(struct sk_buff *skb, int headroom)
546 {
547 if (skb_headroom(skb) < headroom ||
548 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
549 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom);
550 if (!nskb) {
551 kfree_skb(skb);
552 return ERR_PTR(-ENOMEM);
553 }
554
555 set_skb_csum_bits(skb, nskb);
556
557 if (skb->sk)
558 skb_set_owner_w(nskb, skb->sk);
559
560 dev_kfree_skb(skb);
561 return nskb;
562 }
563
564 return skb;
565 }
566
567 static void
568 create_gre_header(struct sk_buff *skb, const struct mutable_config *mutable)
569 {
570 struct iphdr *iph = ip_hdr(skb);
571 __be16 *flags = (__be16 *)(iph + 1);
572 __be16 *protocol = flags + 1;
573 __be32 *options = (__be32 *)((u8 *)iph + mutable->tunnel_hlen
574 - GRE_HEADER_SECTION);
575
576 *protocol = htons(ETH_P_TEB);
577 *flags = 0;
578
579 /* Work backwards over the options so the checksum is last. */
580 if (mutable->port_config.out_key ||
581 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION) {
582 *flags |= GRE_KEY;
583
584 if (mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
585 *options = OVS_CB(skb)->tun_id;
586 else
587 *options = mutable->port_config.out_key;
588
589 options--;
590 }
591
592 if (mutable->port_config.flags & GRE_F_OUT_CSUM) {
593 *flags |= GRE_CSUM;
594
595 *options = 0;
596 *(__sum16 *)options = csum_fold(skb_checksum(skb,
597 sizeof(struct iphdr),
598 skb->len - sizeof(struct iphdr),
599 0));
600 }
601 }
602
603 static int
604 check_checksum(struct sk_buff *skb)
605 {
606 struct iphdr *iph = ip_hdr(skb);
607 __be16 flags = *(__be16 *)(iph + 1);
608 __sum16 csum = 0;
609
610 if (flags & GRE_CSUM) {
611 switch (skb->ip_summed) {
612 case CHECKSUM_COMPLETE:
613 csum = csum_fold(skb->csum);
614
615 if (!csum)
616 break;
617 /* Fall through. */
618
619 case CHECKSUM_NONE:
620 skb->csum = 0;
621 csum = __skb_checksum_complete(skb);
622 skb->ip_summed = CHECKSUM_COMPLETE;
623 break;
624 }
625 }
626
627 return (csum == 0);
628 }
629
630 static int
631 parse_gre_header(struct iphdr *iph, __be16 *flags, __be32 *key)
632 {
633 /* IP and ICMP protocol handlers check that the IHL is valid. */
634 __be16 *flagsp = (__be16 *)((u8 *)iph + (iph->ihl << 2));
635 __be16 *protocol = flagsp + 1;
636 __be32 *options = (__be32 *)(protocol + 1);
637 int hdr_len;
638
639 *flags = *flagsp;
640
641 if (*flags & (GRE_VERSION | GRE_ROUTING))
642 return -EINVAL;
643
644 if (*protocol != htons(ETH_P_TEB))
645 return -EINVAL;
646
647 hdr_len = GRE_HEADER_SECTION;
648
649 if (*flags & GRE_CSUM) {
650 hdr_len += GRE_HEADER_SECTION;
651 options++;
652 }
653
654 if (*flags & GRE_KEY) {
655 hdr_len += GRE_HEADER_SECTION;
656
657 *key = *options;
658 options++;
659 } else
660 *key = 0;
661
662 if (*flags & GRE_SEQ)
663 hdr_len += GRE_HEADER_SECTION;
664
665 return hdr_len;
666 }
667
668 static inline u8
669 ecn_encapsulate(u8 tos, struct sk_buff *skb)
670 {
671 u8 inner;
672
673 if (skb->protocol == htons(ETH_P_IP))
674 inner = ((struct iphdr *)skb_network_header(skb))->tos;
675 else if (skb->protocol == htons(ETH_P_IPV6))
676 inner = ipv6_get_dsfield((struct ipv6hdr *)skb_network_header(skb));
677 else
678 inner = 0;
679
680 return INET_ECN_encapsulate(tos, inner);
681 }
682
683 static inline void
684 ecn_decapsulate(u8 tos, struct sk_buff *skb)
685 {
686 if (INET_ECN_is_ce(tos)) {
687 __be16 protocol = skb->protocol;
688 unsigned int nw_header = skb_network_header(skb) - skb->data;
689
690 if (skb->protocol == htons(ETH_P_8021Q)) {
691 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
692 return;
693
694 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
695 nw_header += VLAN_HLEN;
696 }
697
698 if (protocol == htons(ETH_P_IP)) {
699 if (unlikely(!pskb_may_pull(skb, nw_header
700 + sizeof(struct iphdr))))
701 return;
702
703 IP_ECN_set_ce((struct iphdr *)(nw_header + skb->data));
704 } else if (protocol == htons(ETH_P_IPV6)) {
705 if (unlikely(!pskb_may_pull(skb, nw_header
706 + sizeof(struct ipv6hdr))))
707 return;
708
709 IP6_ECN_set_ce((struct ipv6hdr *)(nw_header
710 + skb->data));
711 }
712 }
713 }
714
715 static struct sk_buff *
716 handle_gso(struct sk_buff *skb)
717 {
718 if (skb_is_gso(skb)) {
719 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG);
720
721 dev_kfree_skb(skb);
722 return nskb;
723 }
724
725 return skb;
726 }
727
728 static int
729 handle_csum_offload(struct sk_buff *skb)
730 {
731 if (skb->ip_summed == CHECKSUM_PARTIAL)
732 return skb_checksum_help(skb);
733 else {
734 skb->ip_summed = CHECKSUM_NONE;
735 return 0;
736 }
737 }
738
739 /* Called with rcu_read_lock. */
740 static void
741 gre_err(struct sk_buff *skb, u32 info)
742 {
743 struct vport *vport;
744 const struct mutable_config *mutable;
745 const int type = icmp_hdr(skb)->type;
746 const int code = icmp_hdr(skb)->code;
747 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
748
749 struct iphdr *iph;
750 __be16 flags;
751 __be32 key;
752 int tunnel_hdr_len, tot_hdr_len;
753 unsigned int orig_mac_header;
754 unsigned int orig_nw_header;
755
756 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
757 return;
758
759 /* The mimimum size packet that we would actually be able to process:
760 * encapsulating IP header, minimum GRE header, Ethernet header,
761 * inner IPv4 header. */
762 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
763 ETH_HLEN + sizeof(struct iphdr)))
764 return;
765
766 iph = (struct iphdr *)skb->data;
767
768 tunnel_hdr_len = parse_gre_header(iph, &flags, &key);
769 if (tunnel_hdr_len < 0)
770 return;
771
772 vport = find_port(iph->saddr, iph->daddr, key, FIND_PORT_ANY, &mutable);
773 if (!vport)
774 return;
775
776 /* Packets received by this function were previously sent by us, so
777 * any comparisons should be to the output values, not the input.
778 * However, it's not really worth it to have a hash table based on
779 * output keys (especially since ICMP error handling of tunneled packets
780 * isn't that reliable anyways). Therefore, we do a lookup based on the
781 * out key as if it were the in key and then check to see if the input
782 * and output keys are the same. */
783 if (mutable->port_config.in_key != mutable->port_config.out_key)
784 return;
785
786 if (!!(mutable->port_config.flags & GRE_F_IN_KEY_MATCH) !=
787 !!(mutable->port_config.flags & GRE_F_OUT_KEY_ACTION))
788 return;
789
790 if ((mutable->port_config.flags & GRE_F_OUT_CSUM) && !(flags & GRE_CSUM))
791 return;
792
793 tunnel_hdr_len += iph->ihl << 2;
794
795 orig_mac_header = skb_mac_header(skb) - skb->data;
796 orig_nw_header = skb_network_header(skb) - skb->data;
797 skb_set_mac_header(skb, tunnel_hdr_len);
798
799 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
800
801 skb->protocol = eth_hdr(skb)->h_proto;
802 if (skb->protocol == htons(ETH_P_8021Q)) {
803 tot_hdr_len += VLAN_HLEN;
804 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
805 }
806
807 skb_set_network_header(skb, tot_hdr_len);
808 mtu -= tot_hdr_len;
809
810 if (skb->protocol == htons(ETH_P_IP))
811 tot_hdr_len += sizeof(struct iphdr);
812 else if (skb->protocol == htons(ETH_P_IPV6))
813 tot_hdr_len += sizeof(struct ipv6hdr);
814 else
815 goto out;
816
817 if (!pskb_may_pull(skb, tot_hdr_len))
818 goto out;
819
820 if (skb->protocol == htons(ETH_P_IP)) {
821 if (mtu < IP_MIN_MTU) {
822 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
823 mtu = IP_MIN_MTU;
824 else
825 goto out;
826 }
827
828 } else if (skb->protocol == htons(ETH_P_IPV6)) {
829 if (mtu < IPV6_MIN_MTU) {
830 unsigned int packet_length = sizeof(struct ipv6hdr) +
831 ntohs(ipv6_hdr(skb)->payload_len);
832
833 if (packet_length >= IPV6_MIN_MTU
834 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
835 mtu = IPV6_MIN_MTU;
836 else
837 goto out;
838 }
839 }
840
841 __pskb_pull(skb, tunnel_hdr_len);
842 send_frag_needed(vport, mutable, skb, mtu, key);
843 skb_push(skb, tunnel_hdr_len);
844
845 out:
846 skb_set_mac_header(skb, orig_mac_header);
847 skb_set_network_header(skb, orig_nw_header);
848 skb->protocol = htons(ETH_P_IP);
849 }
850
851 /* Called with rcu_read_lock. */
852 static int
853 gre_rcv(struct sk_buff *skb)
854 {
855 struct vport *vport;
856 const struct mutable_config *mutable;
857 int hdr_len;
858 struct iphdr *iph;
859 __be16 flags;
860 __be32 key;
861
862 if (!pskb_may_pull(skb, GRE_HEADER_SECTION + ETH_HLEN))
863 goto error;
864
865 if (!check_checksum(skb))
866 goto error;
867
868 iph = ip_hdr(skb);
869
870 hdr_len = parse_gre_header(iph, &flags, &key);
871 if (hdr_len < 0)
872 goto error;
873
874 vport = find_port(iph->daddr, iph->saddr, key, FIND_PORT_ANY, &mutable);
875 if (!vport) {
876 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
877 goto error;
878 }
879
880 if ((mutable->port_config.flags & GRE_F_IN_CSUM) && !(flags & GRE_CSUM)) {
881 vport_record_error(vport, VPORT_E_RX_CRC);
882 goto error;
883 }
884
885 if (!pskb_pull(skb, hdr_len) || !pskb_may_pull(skb, ETH_HLEN)) {
886 vport_record_error(vport, VPORT_E_RX_ERROR);
887 goto error;
888 }
889
890 skb->pkt_type = PACKET_HOST;
891 skb->protocol = eth_type_trans(skb, skb->dev);
892 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
893
894 skb_dst_drop(skb);
895 nf_reset(skb);
896 secpath_reset(skb);
897 skb_reset_network_header(skb);
898
899 ecn_decapsulate(iph->tos, skb);
900
901 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH)
902 OVS_CB(skb)->tun_id = key;
903 else
904 OVS_CB(skb)->tun_id = 0;
905
906 skb_push(skb, ETH_HLEN);
907 compute_ip_summed(skb, false);
908
909 vport_receive(vport, skb);
910
911 return 0;
912
913 error:
914 kfree_skb(skb);
915 return 0;
916 }
917
918 static int
919 build_packet(struct vport *vport, const struct mutable_config *mutable,
920 struct iphdr *iph, struct rtable *rt, int max_headroom, int mtu,
921 struct sk_buff *skb)
922 {
923 int err;
924 struct iphdr *new_iph;
925 int orig_len = skb->len;
926 __be16 frag_off = iph->frag_off;
927
928 skb = check_headroom(skb, max_headroom);
929 if (unlikely(IS_ERR(skb)))
930 goto error;
931
932 err = handle_csum_offload(skb);
933 if (err)
934 goto error_free;
935
936 if (skb->protocol == htons(ETH_P_IP)) {
937 struct iphdr *old_iph = ip_hdr(skb);
938
939 if ((old_iph->frag_off & htons(IP_DF)) &&
940 mtu < ntohs(old_iph->tot_len)) {
941 if (send_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
942 goto error_free;
943 }
944
945 } else if (skb->protocol == htons(ETH_P_IPV6)) {
946 unsigned int packet_length = skb->len - ETH_HLEN
947 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
948
949 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
950 if (packet_length > IPV6_MIN_MTU)
951 frag_off = htons(IP_DF);
952
953 if (mtu < packet_length) {
954 if (send_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
955 goto error_free;
956 }
957 }
958
959 skb_reset_transport_header(skb);
960 new_iph = (struct iphdr *)skb_push(skb, mutable->tunnel_hlen);
961 skb_reset_network_header(skb);
962
963 memcpy(new_iph, iph, sizeof(struct iphdr));
964 new_iph->frag_off = frag_off;
965 ip_select_ident(new_iph, &rt->u.dst, NULL);
966
967 create_gre_header(skb, mutable);
968
969 /* Allow our local IP stack to fragment the outer packet even if the
970 * DF bit is set as a last resort. */
971 skb->local_df = 1;
972
973 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
974 IPCB(skb)->flags = 0;
975
976 err = ip_local_out(skb);
977 if (likely(net_xmit_eval(err) == 0))
978 return orig_len;
979 else {
980 vport_record_error(vport, VPORT_E_TX_ERROR);
981 return 0;
982 }
983
984 error_free:
985 kfree_skb(skb);
986 error:
987 vport_record_error(vport, VPORT_E_TX_DROPPED);
988
989 return 0;
990 }
991
992 static int
993 gre_send(struct vport *vport, struct sk_buff *skb)
994 {
995 struct gre_vport *gre_vport = gre_vport_priv(vport);
996 const struct mutable_config *mutable = rcu_dereference(gre_vport->mutable);
997
998 struct iphdr *old_iph;
999 struct ipv6hdr *old_ipv6h;
1000 int orig_len;
1001 struct iphdr iph;
1002 struct rtable *rt;
1003 int max_headroom;
1004 int mtu;
1005
1006 /* Validate the protocol headers before we try to use them. */
1007 if (skb->protocol == htons(ETH_P_8021Q)) {
1008 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1009 goto error_free;
1010
1011 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1012 skb_set_network_header(skb, VLAN_ETH_HLEN);
1013 }
1014
1015 if (skb->protocol == htons(ETH_P_IP)) {
1016 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
1017 + sizeof(struct iphdr) - skb->data)))
1018 skb->protocol = 0;
1019 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1020 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
1021 + sizeof(struct ipv6hdr) - skb->data)))
1022 skb->protocol = 0;
1023 }
1024
1025 old_iph = ip_hdr(skb);
1026 old_ipv6h = ipv6_hdr(skb);
1027
1028 iph.tos = mutable->port_config.tos;
1029 if (mutable->port_config.flags & GRE_F_TOS_INHERIT) {
1030 if (skb->protocol == htons(ETH_P_IP))
1031 iph.tos = old_iph->tos;
1032 else if (skb->protocol == htons(ETH_P_IPV6))
1033 iph.tos = ipv6_get_dsfield(ipv6_hdr(skb));
1034 }
1035 iph.tos = ecn_encapsulate(iph.tos, skb);
1036
1037 {
1038 struct flowi fl = { .nl_u = { .ip4_u =
1039 { .daddr = mutable->port_config.daddr,
1040 .saddr = mutable->port_config.saddr,
1041 .tos = RT_TOS(iph.tos) } },
1042 .proto = IPPROTO_GRE };
1043
1044 if (ip_route_output_key(&init_net, &rt, &fl))
1045 goto error_free;
1046 }
1047
1048 iph.ttl = mutable->port_config.ttl;
1049 if (mutable->port_config.flags & GRE_F_TTL_INHERIT) {
1050 if (skb->protocol == htons(ETH_P_IP))
1051 iph.ttl = old_iph->ttl;
1052 else if (skb->protocol == htons(ETH_P_IPV6))
1053 iph.ttl = old_ipv6h->hop_limit;
1054 }
1055 if (!iph.ttl)
1056 iph.ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
1057
1058 iph.frag_off = (mutable->port_config.flags & GRE_F_PMTUD) ? htons(IP_DF) : 0;
1059 if (iph.frag_off)
1060 mtu = dst_mtu(&rt->u.dst)
1061 - ETH_HLEN
1062 - mutable->tunnel_hlen
1063 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
1064 else
1065 mtu = mutable->mtu;
1066
1067 if (skb->protocol == htons(ETH_P_IP)) {
1068 iph.frag_off |= old_iph->frag_off & htons(IP_DF);
1069 mtu = max(mtu, IP_MIN_MTU);
1070
1071 } else if (skb->protocol == htons(ETH_P_IPV6))
1072 mtu = max(mtu, IPV6_MIN_MTU);
1073
1074 iph.version = 4;
1075 iph.ihl = sizeof(struct iphdr) >> 2;
1076 iph.protocol = IPPROTO_GRE;
1077 iph.daddr = rt->rt_dst;
1078 iph.saddr = rt->rt_src;
1079
1080 nf_reset(skb);
1081 secpath_reset(skb);
1082 skb_dst_drop(skb);
1083 skb_dst_set(skb, &rt->u.dst);
1084
1085 /* If we are doing GSO on a pskb it is better to make sure that the
1086 * headroom is correct now. We will only have to copy the portion in
1087 * the linear data area and GSO will preserve headroom when it creates
1088 * the segments. This is particularly beneficial on Xen where we get
1089 * lots of GSO pskbs. Conversely, we delay copying if it is just to
1090 * get our own writable clone because GSO may do the copy for us. */
1091 max_headroom = LL_RESERVED_SPACE(rt->u.dst.dev) + rt->u.dst.header_len
1092 + mutable->tunnel_hlen;
1093
1094 if (skb_headroom(skb) < max_headroom) {
1095 skb = check_headroom(skb, max_headroom);
1096 if (unlikely(IS_ERR(skb))) {
1097 vport_record_error(vport, VPORT_E_TX_DROPPED);
1098 goto error;
1099 }
1100 }
1101
1102 forward_ip_summed(skb);
1103 vswitch_skb_checksum_setup(skb);
1104
1105 skb = handle_gso(skb);
1106 if (unlikely(IS_ERR(skb))) {
1107 vport_record_error(vport, VPORT_E_TX_DROPPED);
1108 goto error;
1109 }
1110
1111 /* Process GSO segments. Try to do any work for the entire packet that
1112 * doesn't involve actually writing to it before this point. */
1113 orig_len = 0;
1114 do {
1115 struct sk_buff *next_skb = skb->next;
1116 skb->next = NULL;
1117
1118 orig_len += build_packet(vport, mutable, &iph, rt, max_headroom, mtu, skb);
1119
1120 skb = next_skb;
1121 } while (skb);
1122
1123 return orig_len;
1124
1125 error_free:
1126 kfree_skb(skb);
1127 vport_record_error(vport, VPORT_E_TX_ERROR);
1128 error:
1129 return 0;
1130 }
1131
1132 static struct net_protocol gre_protocol_handlers = {
1133 .handler = gre_rcv,
1134 .err_handler = gre_err,
1135 };
1136
1137 static int
1138 gre_init(void)
1139 {
1140 int err;
1141
1142 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
1143 if (err)
1144 printk(KERN_WARNING "openvswitch: cannot register gre protocol handler\n");
1145
1146 return err;
1147 }
1148
1149 static void
1150 gre_exit(void)
1151 {
1152 tbl_destroy(port_table, NULL);
1153 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
1154 }
1155
1156 static int
1157 set_config(const struct vport *cur_vport, struct mutable_config *mutable,
1158 const void __user *uconfig)
1159 {
1160 const struct vport *old_vport;
1161 const struct mutable_config *old_mutable;
1162 int port_type;
1163
1164 if (copy_from_user(&mutable->port_config, uconfig, sizeof(struct gre_port_config)))
1165 return -EFAULT;
1166
1167 if (mutable->port_config.daddr == 0)
1168 return -EINVAL;
1169
1170 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH) {
1171 port_type = FIND_PORT_MATCH;
1172 mutable->port_config.in_key = 0;
1173 } else
1174 port_type = FIND_PORT_KEY;
1175
1176 old_vport = find_port(mutable->port_config.saddr,
1177 mutable->port_config.daddr,
1178 mutable->port_config.in_key, port_type,
1179 &old_mutable);
1180
1181 if (old_vport && old_vport != cur_vport)
1182 return -EEXIST;
1183
1184 if (mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
1185 mutable->port_config.out_key = 0;
1186
1187 mutable->tunnel_hlen = sizeof(struct iphdr) + GRE_HEADER_SECTION;
1188
1189 if (mutable->port_config.flags & GRE_F_OUT_CSUM)
1190 mutable->tunnel_hlen += GRE_HEADER_SECTION;
1191
1192 if (mutable->port_config.out_key ||
1193 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
1194 mutable->tunnel_hlen += GRE_HEADER_SECTION;
1195
1196 return 0;
1197 }
1198
1199 static struct vport *
1200 gre_create(const char *name, const void __user *config)
1201 {
1202 struct vport *vport;
1203 struct gre_vport *gre_vport;
1204 int err;
1205
1206 vport = vport_alloc(sizeof(struct gre_vport), &gre_vport_ops);
1207 if (IS_ERR(vport)) {
1208 err = PTR_ERR(vport);
1209 goto error;
1210 }
1211
1212 gre_vport = gre_vport_priv(vport);
1213
1214 strcpy(gre_vport->name, name);
1215
1216 gre_vport->mutable = kmalloc(sizeof(struct mutable_config), GFP_KERNEL);
1217 if (!gre_vport->mutable) {
1218 err = -ENOMEM;
1219 goto error_free_vport;
1220 }
1221
1222 vport_gen_rand_ether_addr(gre_vport->mutable->eth_addr);
1223 gre_vport->mutable->mtu = ETH_DATA_LEN;
1224
1225 err = set_config(NULL, gre_vport->mutable, config);
1226 if (err)
1227 goto error_free_mutable;
1228
1229 err = add_port(vport);
1230 if (err)
1231 goto error_free_mutable;
1232
1233 return vport;
1234
1235 error_free_mutable:
1236 kfree(gre_vport->mutable);
1237 error_free_vport:
1238 vport_free(vport);
1239 error:
1240 return ERR_PTR(err);
1241 }
1242
1243 static int
1244 gre_modify(struct vport *vport, const void __user *config)
1245 {
1246 struct gre_vport *gre_vport = gre_vport_priv(vport);
1247 struct mutable_config *mutable;
1248 int err;
1249 int update_hash = 0;
1250
1251 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1252 if (!mutable) {
1253 err = -ENOMEM;
1254 goto error;
1255 }
1256
1257 err = set_config(vport, mutable, config);
1258 if (err)
1259 goto error_free;
1260
1261 /* Only remove the port from the hash table if something that would
1262 * affect the lookup has changed. */
1263 if (gre_vport->mutable->port_config.saddr != mutable->port_config.saddr ||
1264 gre_vport->mutable->port_config.daddr != mutable->port_config.daddr ||
1265 gre_vport->mutable->port_config.in_key != mutable->port_config.in_key ||
1266 (gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH) !=
1267 (mutable->port_config.flags & GRE_F_IN_KEY_MATCH))
1268 update_hash = 1;
1269
1270
1271 /* This update is not atomic but the lookup uses the config, which
1272 * serves as an inherent double check. */
1273 if (update_hash) {
1274 err = del_port(vport);
1275 if (err)
1276 goto error_free;
1277 }
1278
1279 assign_config_rcu(vport, mutable);
1280
1281 if (update_hash) {
1282 err = add_port(vport);
1283 if (err)
1284 goto error_free;
1285 }
1286
1287 return 0;
1288
1289 error_free:
1290 kfree(mutable);
1291 error:
1292 return err;
1293 }
1294
1295 static int
1296 gre_destroy(struct vport *vport)
1297 {
1298 struct gre_vport *gre_vport = gre_vport_priv(vport);
1299 int port_type;
1300 const struct mutable_config *old_mutable;
1301
1302 /* Do a hash table lookup to make sure that the port exists. It should
1303 * exist but might not if a modify failed earlier. */
1304 if (gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH)
1305 port_type = FIND_PORT_MATCH;
1306 else
1307 port_type = FIND_PORT_KEY;
1308
1309 if (vport == find_port(gre_vport->mutable->port_config.saddr,
1310 gre_vport->mutable->port_config.daddr,
1311 gre_vport->mutable->port_config.in_key, port_type, &old_mutable))
1312 del_port(vport);
1313
1314 kfree(gre_vport->mutable);
1315 vport_free(vport);
1316
1317 return 0;
1318 }
1319
1320 static int
1321 gre_set_mtu(struct vport *vport, int mtu)
1322 {
1323 struct gre_vport *gre_vport = gre_vport_priv(vport);
1324 struct mutable_config *mutable;
1325
1326 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1327 if (!mutable)
1328 return -ENOMEM;
1329
1330 mutable->mtu = mtu;
1331 assign_config_rcu(vport, mutable);
1332
1333 return 0;
1334 }
1335
1336 static int
1337 gre_set_addr(struct vport *vport, const unsigned char *addr)
1338 {
1339 struct gre_vport *gre_vport = gre_vport_priv(vport);
1340 struct mutable_config *mutable;
1341
1342 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1343 if (!mutable)
1344 return -ENOMEM;
1345
1346 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1347 assign_config_rcu(vport, mutable);
1348
1349 return 0;
1350 }
1351
1352
1353 static const char *
1354 gre_get_name(const struct vport *vport)
1355 {
1356 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1357 return gre_vport->name;
1358 }
1359
1360 static const unsigned char *
1361 gre_get_addr(const struct vport *vport)
1362 {
1363 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1364 return rcu_dereference(gre_vport->mutable)->eth_addr;
1365 }
1366
1367 static int
1368 gre_get_mtu(const struct vport *vport)
1369 {
1370 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1371 return rcu_dereference(gre_vport->mutable)->mtu;
1372 }
1373
1374 struct vport_ops gre_vport_ops = {
1375 .type = "gre",
1376 .flags = VPORT_F_GEN_STATS | VPORT_F_TUN_ID,
1377 .init = gre_init,
1378 .exit = gre_exit,
1379 .create = gre_create,
1380 .modify = gre_modify,
1381 .destroy = gre_destroy,
1382 .set_mtu = gre_set_mtu,
1383 .set_addr = gre_set_addr,
1384 .get_name = gre_get_name,
1385 .get_addr = gre_get_addr,
1386 .get_dev_flags = vport_gen_get_dev_flags,
1387 .is_running = vport_gen_is_running,
1388 .get_operstate = vport_gen_get_operstate,
1389 .get_mtu = gre_get_mtu,
1390 .send = gre_send,
1391 };