]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport-gre.c
datapath: Don't expect bottom-halves to be disabled.
[mirror_ovs.git] / datapath / vport-gre.c
1 /*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_vlan.h>
14 #include <linux/in.h>
15 #include <linux/in_route.h>
16 #include <linux/jhash.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19
20 #include <net/dsfield.h>
21 #include <net/dst.h>
22 #include <net/icmp.h>
23 #include <net/inet_ecn.h>
24 #include <net/ip.h>
25 #include <net/ipv6.h>
26 #include <net/protocol.h>
27 #include <net/route.h>
28 #include <net/xfrm.h>
29
30 #include "actions.h"
31 #include "datapath.h"
32 #include "openvswitch/gre.h"
33 #include "table.h"
34 #include "vport.h"
35
36 /* The absolute minimum fragment size. Note that there are many other
37 * definitions of the minimum MTU. */
38 #define IP_MIN_MTU 68
39
40 /* The GRE header is composed of a series of sections: a base and then a variable
41 * number of options. */
42 #define GRE_HEADER_SECTION 4
43
44 struct mutable_config {
45 struct rcu_head rcu;
46
47 unsigned char eth_addr[ETH_ALEN];
48 unsigned int mtu;
49 struct gre_port_config port_config;
50
51 int tunnel_hlen; /* Tunnel header length. */
52 };
53
54 struct gre_vport {
55 struct tbl_node tbl_node;
56
57 char name[IFNAMSIZ];
58
59 /* Protected by RCU. */
60 struct mutable_config *mutable;
61 };
62
63 struct vport_ops gre_vport_ops;
64
65 /* Protected by RCU. */
66 static struct tbl *port_table;
67
68 /* These are just used as an optimization: they don't require any kind of
69 * synchronization because we could have just as easily read the value before
70 * the port change happened. */
71 static unsigned int key_local_remote_ports;
72 static unsigned int key_remote_ports;
73 static unsigned int local_remote_ports;
74 static unsigned int remote_ports;
75
76 static inline struct gre_vport *
77 gre_vport_priv(const struct vport *vport)
78 {
79 return vport_priv(vport);
80 }
81
82 static inline struct vport *
83 gre_vport_to_vport(const struct gre_vport *gre_vport)
84 {
85 return vport_from_priv(gre_vport);
86 }
87
88 static inline struct gre_vport *
89 gre_vport_table_cast(const struct tbl_node *node)
90 {
91 return container_of(node, struct gre_vport, tbl_node);
92 }
93
94 /* RCU callback. */
95 static void
96 free_config(struct rcu_head *rcu)
97 {
98 struct mutable_config *c = container_of(rcu, struct mutable_config, rcu);
99 kfree(c);
100 }
101
102 static void
103 assign_config_rcu(struct vport *vport, struct mutable_config *new_config)
104 {
105 struct gre_vport *gre_vport = gre_vport_priv(vport);
106 struct mutable_config *old_config;
107
108 old_config = rcu_dereference(gre_vport->mutable);
109 rcu_assign_pointer(gre_vport->mutable, new_config);
110 call_rcu(&old_config->rcu, free_config);
111 }
112
113 static unsigned int *
114 find_port_pool(const struct mutable_config *mutable)
115 {
116 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH) {
117 if (mutable->port_config.saddr)
118 return &local_remote_ports;
119 else
120 return &remote_ports;
121 } else {
122 if (mutable->port_config.saddr)
123 return &key_local_remote_ports;
124 else
125 return &key_remote_ports;
126 }
127 }
128
129 enum lookup_key {
130 LOOKUP_SADDR = 0,
131 LOOKUP_DADDR = 1,
132 LOOKUP_KEY = 2,
133 LOOKUP_KEY_MATCH = 3
134 };
135
136 struct port_lookup_key {
137 u32 vals[4]; /* Contains enum lookup_key keys. */
138 const struct mutable_config *mutable;
139 };
140
141 /* Modifies 'target' to store the rcu_dereferenced pointer that was used to do
142 * the comparision. */
143 static int
144 port_cmp(const struct tbl_node *node, void *target)
145 {
146 const struct gre_vport *gre_vport = gre_vport_table_cast(node);
147 struct port_lookup_key *lookup = target;
148
149 lookup->mutable = rcu_dereference(gre_vport->mutable);
150
151 return ((lookup->mutable->port_config.flags & GRE_F_IN_KEY_MATCH) ==
152 lookup->vals[LOOKUP_KEY_MATCH]) &&
153 lookup->mutable->port_config.daddr == lookup->vals[LOOKUP_DADDR] &&
154 lookup->mutable->port_config.in_key == lookup->vals[LOOKUP_KEY] &&
155 lookup->mutable->port_config.saddr == lookup->vals[LOOKUP_SADDR];
156 }
157
158 static u32
159 port_hash(struct port_lookup_key *lookup)
160 {
161 return jhash2(lookup->vals, ARRAY_SIZE(lookup->vals), 0);
162 }
163
164 static int
165 add_port(struct vport *vport)
166 {
167 struct gre_vport *gre_vport = gre_vport_priv(vport);
168 struct port_lookup_key lookup;
169 int err;
170
171 if (!port_table) {
172 struct tbl *new_table;
173
174 new_table = tbl_create(0);
175 if (!new_table)
176 return -ENOMEM;
177
178 rcu_assign_pointer(port_table, new_table);
179
180 } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
181 struct tbl *old_table = port_table;
182 struct tbl *new_table;
183
184 new_table = tbl_expand(old_table);
185 if (IS_ERR(new_table))
186 return PTR_ERR(new_table);
187
188 rcu_assign_pointer(port_table, new_table);
189 tbl_deferred_destroy(old_table, NULL);
190 }
191
192 lookup.vals[LOOKUP_SADDR] = gre_vport->mutable->port_config.saddr;
193 lookup.vals[LOOKUP_DADDR] = gre_vport->mutable->port_config.daddr;
194 lookup.vals[LOOKUP_KEY] = gre_vport->mutable->port_config.in_key;
195 lookup.vals[LOOKUP_KEY_MATCH] = gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH;
196
197 err = tbl_insert(port_table, &gre_vport->tbl_node, port_hash(&lookup));
198 if (err)
199 return err;
200
201 (*find_port_pool(gre_vport->mutable))++;
202
203 return 0;
204 }
205
206 static int
207 del_port(struct vport *vport)
208 {
209 struct gre_vport *gre_vport = gre_vport_priv(vport);
210 int err;
211
212 err = tbl_remove(port_table, &gre_vport->tbl_node);
213 if (err)
214 return err;
215
216 (*find_port_pool(gre_vport->mutable))--;
217
218 return 0;
219 }
220
221 #define FIND_PORT_KEY (1 << 0)
222 #define FIND_PORT_MATCH (1 << 1)
223 #define FIND_PORT_ANY (FIND_PORT_KEY | FIND_PORT_MATCH)
224
225 static struct vport *
226 find_port(__be32 saddr, __be32 daddr, __be32 key, int port_type,
227 const struct mutable_config **mutable)
228 {
229 struct port_lookup_key lookup;
230 struct tbl *table = rcu_dereference(port_table);
231 struct tbl_node *tbl_node;
232
233 if (!table)
234 return NULL;
235
236 lookup.vals[LOOKUP_SADDR] = saddr;
237 lookup.vals[LOOKUP_DADDR] = daddr;
238
239 if (port_type & FIND_PORT_KEY) {
240 lookup.vals[LOOKUP_KEY] = key;
241 lookup.vals[LOOKUP_KEY_MATCH] = 0;
242
243 if (key_local_remote_ports) {
244 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
245 if (tbl_node)
246 goto found;
247 }
248
249 if (key_remote_ports) {
250 lookup.vals[LOOKUP_SADDR] = 0;
251
252 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
253 if (tbl_node)
254 goto found;
255
256 lookup.vals[LOOKUP_SADDR] = saddr;
257 }
258 }
259
260 if (port_type & FIND_PORT_MATCH) {
261 lookup.vals[LOOKUP_KEY] = 0;
262 lookup.vals[LOOKUP_KEY_MATCH] = GRE_F_IN_KEY_MATCH;
263
264 if (local_remote_ports) {
265 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
266 if (tbl_node)
267 goto found;
268 }
269
270 if (remote_ports) {
271 lookup.vals[LOOKUP_SADDR] = 0;
272
273 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
274 if (tbl_node)
275 goto found;
276 }
277 }
278
279 return NULL;
280
281 found:
282 *mutable = lookup.mutable;
283 return gre_vport_to_vport(gre_vport_table_cast(tbl_node));
284 }
285
286 static bool
287 check_ipv4_address(__be32 addr)
288 {
289 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
290 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
291 return false;
292
293 return true;
294 }
295
296 static bool
297 ipv4_should_icmp(struct sk_buff *skb)
298 {
299 struct iphdr *old_iph = ip_hdr(skb);
300
301 /* Don't respond to L2 broadcast. */
302 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
303 return false;
304
305 /* Don't respond to L3 broadcast or invalid addresses. */
306 if (!check_ipv4_address(old_iph->daddr) ||
307 !check_ipv4_address(old_iph->saddr))
308 return false;
309
310 /* Only respond to the first fragment. */
311 if (old_iph->frag_off & htons(IP_OFFSET))
312 return false;
313
314 /* Don't respond to ICMP error messages. */
315 if (old_iph->protocol == IPPROTO_ICMP) {
316 u8 icmp_type, *icmp_typep;
317
318 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
319 (old_iph->ihl << 2) +
320 offsetof(struct icmphdr, type) -
321 skb->data, sizeof(icmp_type),
322 &icmp_type);
323
324 if (!icmp_typep)
325 return false;
326
327 if (*icmp_typep > NR_ICMP_TYPES
328 || (*icmp_typep <= ICMP_PARAMETERPROB
329 && *icmp_typep != ICMP_ECHOREPLY
330 && *icmp_typep != ICMP_ECHO))
331 return false;
332 }
333
334 return true;
335 }
336
337 static void
338 ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
339 unsigned int mtu, unsigned int payload_length)
340 {
341 struct iphdr *iph, *old_iph = ip_hdr(skb);
342 struct icmphdr *icmph;
343 u8 *payload;
344
345 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
346 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
347 payload = skb_put(nskb, payload_length);
348
349 /* IP */
350 iph->version = 4;
351 iph->ihl = sizeof(struct iphdr) >> 2;
352 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
353 IPTOS_PREC_INTERNETCONTROL;
354 iph->tot_len = htons(sizeof(struct iphdr)
355 + sizeof(struct icmphdr)
356 + payload_length);
357 get_random_bytes(&iph->id, sizeof(iph->id));
358 iph->frag_off = 0;
359 iph->ttl = IPDEFTTL;
360 iph->protocol = IPPROTO_ICMP;
361 iph->daddr = old_iph->saddr;
362 iph->saddr = old_iph->daddr;
363
364 ip_send_check(iph);
365
366 /* ICMP */
367 icmph->type = ICMP_DEST_UNREACH;
368 icmph->code = ICMP_FRAG_NEEDED;
369 icmph->un.gateway = htonl(mtu);
370 icmph->checksum = 0;
371
372 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
373 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
374 payload, payload_length,
375 nskb->csum);
376 icmph->checksum = csum_fold(nskb->csum);
377 }
378
379 static bool
380 ipv6_should_icmp(struct sk_buff *skb)
381 {
382 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
383 int addr_type;
384 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
385 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
386
387 /* Check source address is valid. */
388 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
389 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
390 return false;
391
392 /* Don't reply to unspecified addresses. */
393 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
394 return false;
395
396 /* Don't respond to ICMP error messages. */
397 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
398 if (payload_off < 0)
399 return false;
400
401 if (nexthdr == NEXTHDR_ICMP) {
402 u8 icmp_type, *icmp_typep;
403
404 icmp_typep = skb_header_pointer(skb, payload_off +
405 offsetof(struct icmp6hdr,
406 icmp6_type),
407 sizeof(icmp_type), &icmp_type);
408
409 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
410 return false;
411 }
412
413 return true;
414 }
415
416 static void
417 ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb, unsigned int mtu,
418 unsigned int payload_length)
419 {
420 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
421 struct icmp6hdr *icmp6h;
422 u8 *payload;
423
424 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
425 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
426 payload = skb_put(nskb, payload_length);
427
428 /* IPv6 */
429 ipv6h->version = 6;
430 ipv6h->priority = 0;
431 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
432 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
433 + payload_length);
434 ipv6h->nexthdr = NEXTHDR_ICMP;
435 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
436 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
437 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
438
439 /* ICMPv6 */
440 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
441 icmp6h->icmp6_code = 0;
442 icmp6h->icmp6_cksum = 0;
443 icmp6h->icmp6_mtu = htonl(mtu);
444
445 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
446 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
447 payload, payload_length,
448 nskb->csum);
449 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
450 sizeof(struct icmp6hdr)
451 + payload_length,
452 ipv6h->nexthdr, nskb->csum);
453 }
454
455 static bool
456 send_frag_needed(struct vport *vport, const struct mutable_config *mutable,
457 struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
458 {
459 unsigned int eth_hdr_len = ETH_HLEN;
460 unsigned int total_length, header_length, payload_length;
461 struct ethhdr *eh, *old_eh = eth_hdr(skb);
462 struct sk_buff *nskb;
463
464 /* Sanity check */
465 if (skb->protocol == htons(ETH_P_IP)) {
466 if (mtu < IP_MIN_MTU)
467 return false;
468
469 if (!ipv4_should_icmp(skb))
470 return true;
471 } else {
472 if (mtu < IPV6_MIN_MTU)
473 return false;
474
475 /* In theory we should do PMTUD on IPv6 multicast messages but
476 * we don't have an address to send from so just fragment. */
477 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
478 return false;
479
480 if (!ipv6_should_icmp(skb))
481 return true;
482 }
483
484 /* Allocate */
485 if (old_eh->h_proto == htons(ETH_P_8021Q))
486 eth_hdr_len = VLAN_ETH_HLEN;
487
488 payload_length = skb->len - eth_hdr_len;
489 if (skb->protocol == htons(ETH_P_IP)) {
490 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
491 total_length = min_t(unsigned int, header_length +
492 payload_length, 576);
493 } else {
494 header_length = sizeof(struct ipv6hdr) +
495 sizeof(struct icmp6hdr);
496 total_length = min_t(unsigned int, header_length +
497 payload_length, IPV6_MIN_MTU);
498 }
499 total_length = min(total_length, mutable->mtu);
500 payload_length = total_length - header_length;
501
502 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
503 payload_length);
504 if (!nskb)
505 return false;
506
507 skb_reserve(nskb, NET_IP_ALIGN);
508
509 /* Ethernet / VLAN */
510 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
511 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
512 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
513 nskb->protocol = eh->h_proto = old_eh->h_proto;
514 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
515 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
516
517 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
518 vh->h_vlan_encapsulated_proto = skb->protocol;
519 }
520 skb_reset_mac_header(nskb);
521
522 /* Protocol */
523 if (skb->protocol == htons(ETH_P_IP))
524 ipv4_build_icmp(skb, nskb, mtu, payload_length);
525 else
526 ipv6_build_icmp(skb, nskb, mtu, payload_length);
527
528 /* Assume that flow based keys are symmetric with respect to input
529 * and output and use the key that we were going to put on the
530 * outgoing packet for the fake received packet. If the keys are
531 * not symmetric then PMTUD needs to be disabled since we won't have
532 * any way of synthesizing packets. */
533 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH &&
534 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
535 OVS_CB(nskb)->tun_id = flow_key;
536
537 compute_ip_summed(nskb, false);
538 vport_receive(vport, nskb);
539
540 return true;
541 }
542
543 static struct sk_buff *
544 check_headroom(struct sk_buff *skb, int headroom)
545 {
546 if (skb_headroom(skb) < headroom ||
547 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
548 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom);
549 if (!nskb) {
550 kfree_skb(skb);
551 return ERR_PTR(-ENOMEM);
552 }
553
554 set_skb_csum_bits(skb, nskb);
555
556 if (skb->sk)
557 skb_set_owner_w(nskb, skb->sk);
558
559 dev_kfree_skb(skb);
560 return nskb;
561 }
562
563 return skb;
564 }
565
566 static void
567 create_gre_header(struct sk_buff *skb, const struct mutable_config *mutable)
568 {
569 struct iphdr *iph = ip_hdr(skb);
570 __be16 *flags = (__be16 *)(iph + 1);
571 __be16 *protocol = flags + 1;
572 __be32 *options = (__be32 *)((u8 *)iph + mutable->tunnel_hlen
573 - GRE_HEADER_SECTION);
574
575 *protocol = htons(ETH_P_TEB);
576 *flags = 0;
577
578 /* Work backwards over the options so the checksum is last. */
579 if (mutable->port_config.out_key ||
580 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION) {
581 *flags |= GRE_KEY;
582
583 if (mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
584 *options = OVS_CB(skb)->tun_id;
585 else
586 *options = mutable->port_config.out_key;
587
588 options--;
589 }
590
591 if (mutable->port_config.flags & GRE_F_OUT_CSUM) {
592 *flags |= GRE_CSUM;
593
594 *options = 0;
595 *(__sum16 *)options = csum_fold(skb_checksum(skb,
596 sizeof(struct iphdr),
597 skb->len - sizeof(struct iphdr),
598 0));
599 }
600 }
601
602 static int
603 check_checksum(struct sk_buff *skb)
604 {
605 struct iphdr *iph = ip_hdr(skb);
606 __be16 flags = *(__be16 *)(iph + 1);
607 __sum16 csum = 0;
608
609 if (flags & GRE_CSUM) {
610 switch (skb->ip_summed) {
611 case CHECKSUM_COMPLETE:
612 csum = csum_fold(skb->csum);
613
614 if (!csum)
615 break;
616 /* Fall through. */
617
618 case CHECKSUM_NONE:
619 skb->csum = 0;
620 csum = __skb_checksum_complete(skb);
621 skb->ip_summed = CHECKSUM_COMPLETE;
622 break;
623 }
624 }
625
626 return (csum == 0);
627 }
628
629 static int
630 parse_gre_header(struct iphdr *iph, __be16 *flags, __be32 *key)
631 {
632 /* IP and ICMP protocol handlers check that the IHL is valid. */
633 __be16 *flagsp = (__be16 *)((u8 *)iph + (iph->ihl << 2));
634 __be16 *protocol = flagsp + 1;
635 __be32 *options = (__be32 *)(protocol + 1);
636 int hdr_len;
637
638 *flags = *flagsp;
639
640 if (*flags & (GRE_VERSION | GRE_ROUTING))
641 return -EINVAL;
642
643 if (*protocol != htons(ETH_P_TEB))
644 return -EINVAL;
645
646 hdr_len = GRE_HEADER_SECTION;
647
648 if (*flags & GRE_CSUM) {
649 hdr_len += GRE_HEADER_SECTION;
650 options++;
651 }
652
653 if (*flags & GRE_KEY) {
654 hdr_len += GRE_HEADER_SECTION;
655
656 *key = *options;
657 options++;
658 } else
659 *key = 0;
660
661 if (*flags & GRE_SEQ)
662 hdr_len += GRE_HEADER_SECTION;
663
664 return hdr_len;
665 }
666
667 static inline u8
668 ecn_encapsulate(u8 tos, struct sk_buff *skb)
669 {
670 u8 inner;
671
672 if (skb->protocol == htons(ETH_P_IP))
673 inner = ((struct iphdr *)skb_network_header(skb))->tos;
674 else if (skb->protocol == htons(ETH_P_IPV6))
675 inner = ipv6_get_dsfield((struct ipv6hdr *)skb_network_header(skb));
676 else
677 inner = 0;
678
679 return INET_ECN_encapsulate(tos, inner);
680 }
681
682 static inline void
683 ecn_decapsulate(u8 tos, struct sk_buff *skb)
684 {
685 if (INET_ECN_is_ce(tos)) {
686 __be16 protocol = skb->protocol;
687 unsigned int nw_header = skb_network_header(skb) - skb->data;
688
689 if (skb->protocol == htons(ETH_P_8021Q)) {
690 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
691 return;
692
693 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
694 nw_header += VLAN_HLEN;
695 }
696
697 if (protocol == htons(ETH_P_IP)) {
698 if (unlikely(!pskb_may_pull(skb, nw_header
699 + sizeof(struct iphdr))))
700 return;
701
702 IP_ECN_set_ce((struct iphdr *)(nw_header + skb->data));
703 } else if (protocol == htons(ETH_P_IPV6)) {
704 if (unlikely(!pskb_may_pull(skb, nw_header
705 + sizeof(struct ipv6hdr))))
706 return;
707
708 IP6_ECN_set_ce((struct ipv6hdr *)(nw_header
709 + skb->data));
710 }
711 }
712 }
713
714 static struct sk_buff *
715 handle_gso(struct sk_buff *skb)
716 {
717 if (skb_is_gso(skb)) {
718 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG);
719
720 dev_kfree_skb(skb);
721 return nskb;
722 }
723
724 return skb;
725 }
726
727 static int
728 handle_csum_offload(struct sk_buff *skb)
729 {
730 if (skb->ip_summed == CHECKSUM_PARTIAL)
731 return skb_checksum_help(skb);
732 else {
733 skb->ip_summed = CHECKSUM_NONE;
734 return 0;
735 }
736 }
737
738 /* Called with rcu_read_lock. */
739 static void
740 gre_err(struct sk_buff *skb, u32 info)
741 {
742 struct vport *vport;
743 const struct mutable_config *mutable;
744 const int type = icmp_hdr(skb)->type;
745 const int code = icmp_hdr(skb)->code;
746 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
747
748 struct iphdr *iph;
749 __be16 flags;
750 __be32 key;
751 int tunnel_hdr_len, tot_hdr_len;
752 unsigned int orig_mac_header;
753 unsigned int orig_nw_header;
754
755 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
756 return;
757
758 /* The mimimum size packet that we would actually be able to process:
759 * encapsulating IP header, minimum GRE header, Ethernet header,
760 * inner IPv4 header. */
761 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
762 ETH_HLEN + sizeof(struct iphdr)))
763 return;
764
765 iph = (struct iphdr *)skb->data;
766
767 tunnel_hdr_len = parse_gre_header(iph, &flags, &key);
768 if (tunnel_hdr_len < 0)
769 return;
770
771 vport = find_port(iph->saddr, iph->daddr, key, FIND_PORT_ANY, &mutable);
772 if (!vport)
773 return;
774
775 /* Packets received by this function were previously sent by us, so
776 * any comparisons should be to the output values, not the input.
777 * However, it's not really worth it to have a hash table based on
778 * output keys (especially since ICMP error handling of tunneled packets
779 * isn't that reliable anyways). Therefore, we do a lookup based on the
780 * out key as if it were the in key and then check to see if the input
781 * and output keys are the same. */
782 if (mutable->port_config.in_key != mutable->port_config.out_key)
783 return;
784
785 if (!!(mutable->port_config.flags & GRE_F_IN_KEY_MATCH) !=
786 !!(mutable->port_config.flags & GRE_F_OUT_KEY_ACTION))
787 return;
788
789 if ((mutable->port_config.flags & GRE_F_OUT_CSUM) && !(flags & GRE_CSUM))
790 return;
791
792 tunnel_hdr_len += iph->ihl << 2;
793
794 orig_mac_header = skb_mac_header(skb) - skb->data;
795 orig_nw_header = skb_network_header(skb) - skb->data;
796 skb_set_mac_header(skb, tunnel_hdr_len);
797
798 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
799
800 skb->protocol = eth_hdr(skb)->h_proto;
801 if (skb->protocol == htons(ETH_P_8021Q)) {
802 tot_hdr_len += VLAN_HLEN;
803 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
804 }
805
806 skb_set_network_header(skb, tot_hdr_len);
807 mtu -= tot_hdr_len;
808
809 if (skb->protocol == htons(ETH_P_IP))
810 tot_hdr_len += sizeof(struct iphdr);
811 else if (skb->protocol == htons(ETH_P_IPV6))
812 tot_hdr_len += sizeof(struct ipv6hdr);
813 else
814 goto out;
815
816 if (!pskb_may_pull(skb, tot_hdr_len))
817 goto out;
818
819 if (skb->protocol == htons(ETH_P_IP)) {
820 if (mtu < IP_MIN_MTU) {
821 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
822 mtu = IP_MIN_MTU;
823 else
824 goto out;
825 }
826
827 } else if (skb->protocol == htons(ETH_P_IPV6)) {
828 if (mtu < IPV6_MIN_MTU) {
829 unsigned int packet_length = sizeof(struct ipv6hdr) +
830 ntohs(ipv6_hdr(skb)->payload_len);
831
832 if (packet_length >= IPV6_MIN_MTU
833 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
834 mtu = IPV6_MIN_MTU;
835 else
836 goto out;
837 }
838 }
839
840 __pskb_pull(skb, tunnel_hdr_len);
841 send_frag_needed(vport, mutable, skb, mtu, key);
842 skb_push(skb, tunnel_hdr_len);
843
844 out:
845 skb_set_mac_header(skb, orig_mac_header);
846 skb_set_network_header(skb, orig_nw_header);
847 skb->protocol = htons(ETH_P_IP);
848 }
849
850 /* Called with rcu_read_lock. */
851 static int
852 gre_rcv(struct sk_buff *skb)
853 {
854 struct vport *vport;
855 const struct mutable_config *mutable;
856 int hdr_len;
857 struct iphdr *iph;
858 __be16 flags;
859 __be32 key;
860
861 if (!pskb_may_pull(skb, GRE_HEADER_SECTION + ETH_HLEN))
862 goto error;
863
864 if (!check_checksum(skb))
865 goto error;
866
867 iph = ip_hdr(skb);
868
869 hdr_len = parse_gre_header(iph, &flags, &key);
870 if (hdr_len < 0)
871 goto error;
872
873 vport = find_port(iph->daddr, iph->saddr, key, FIND_PORT_ANY, &mutable);
874 if (!vport) {
875 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
876 goto error;
877 }
878
879 if ((mutable->port_config.flags & GRE_F_IN_CSUM) && !(flags & GRE_CSUM)) {
880 vport_record_error(vport, VPORT_E_RX_CRC);
881 goto error;
882 }
883
884 if (!pskb_pull(skb, hdr_len) || !pskb_may_pull(skb, ETH_HLEN)) {
885 vport_record_error(vport, VPORT_E_RX_ERROR);
886 goto error;
887 }
888
889 skb->pkt_type = PACKET_HOST;
890 skb->protocol = eth_type_trans(skb, skb->dev);
891 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
892
893 skb_dst_drop(skb);
894 nf_reset(skb);
895 secpath_reset(skb);
896 skb_reset_network_header(skb);
897
898 ecn_decapsulate(iph->tos, skb);
899
900 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH)
901 OVS_CB(skb)->tun_id = key;
902 else
903 OVS_CB(skb)->tun_id = 0;
904
905 skb_push(skb, ETH_HLEN);
906 compute_ip_summed(skb, false);
907
908 vport_receive(vport, skb);
909
910 return 0;
911
912 error:
913 kfree_skb(skb);
914 return 0;
915 }
916
917 static int
918 build_packet(struct vport *vport, const struct mutable_config *mutable,
919 struct iphdr *iph, struct rtable *rt, int max_headroom, int mtu,
920 struct sk_buff *skb)
921 {
922 int err;
923 struct iphdr *new_iph;
924 int orig_len = skb->len;
925 __be16 frag_off = iph->frag_off;
926
927 skb = check_headroom(skb, max_headroom);
928 if (unlikely(IS_ERR(skb)))
929 goto error;
930
931 err = handle_csum_offload(skb);
932 if (err)
933 goto error_free;
934
935 if (skb->protocol == htons(ETH_P_IP)) {
936 struct iphdr *old_iph = ip_hdr(skb);
937
938 if ((old_iph->frag_off & htons(IP_DF)) &&
939 mtu < ntohs(old_iph->tot_len)) {
940 if (send_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
941 goto error_free;
942 }
943
944 } else if (skb->protocol == htons(ETH_P_IPV6)) {
945 unsigned int packet_length = skb->len - ETH_HLEN
946 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
947
948 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
949 if (packet_length > IPV6_MIN_MTU)
950 frag_off = htons(IP_DF);
951
952 if (mtu < packet_length) {
953 if (send_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
954 goto error_free;
955 }
956 }
957
958 skb_reset_transport_header(skb);
959 new_iph = (struct iphdr *)skb_push(skb, mutable->tunnel_hlen);
960 skb_reset_network_header(skb);
961
962 memcpy(new_iph, iph, sizeof(struct iphdr));
963 new_iph->frag_off = frag_off;
964 ip_select_ident(new_iph, &rt->u.dst, NULL);
965
966 create_gre_header(skb, mutable);
967
968 /* Allow our local IP stack to fragment the outer packet even if the
969 * DF bit is set as a last resort. */
970 skb->local_df = 1;
971
972 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
973 IPCB(skb)->flags = 0;
974
975 err = ip_local_out(skb);
976 if (likely(net_xmit_eval(err) == 0))
977 return orig_len;
978 else {
979 vport_record_error(vport, VPORT_E_TX_ERROR);
980 return 0;
981 }
982
983 error_free:
984 kfree_skb(skb);
985 error:
986 vport_record_error(vport, VPORT_E_TX_DROPPED);
987
988 return 0;
989 }
990
991 static int
992 gre_send(struct vport *vport, struct sk_buff *skb)
993 {
994 struct gre_vport *gre_vport = gre_vport_priv(vport);
995 const struct mutable_config *mutable = rcu_dereference(gre_vport->mutable);
996
997 struct iphdr *old_iph;
998 struct ipv6hdr *old_ipv6h;
999 int orig_len;
1000 struct iphdr iph;
1001 struct rtable *rt;
1002 int max_headroom;
1003 int mtu;
1004
1005 /* Validate the protocol headers before we try to use them. */
1006 if (skb->protocol == htons(ETH_P_8021Q)) {
1007 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1008 goto error_free;
1009
1010 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1011 skb_set_network_header(skb, VLAN_ETH_HLEN);
1012 }
1013
1014 if (skb->protocol == htons(ETH_P_IP)) {
1015 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
1016 + sizeof(struct iphdr) - skb->data)))
1017 skb->protocol = 0;
1018 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1019 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
1020 + sizeof(struct ipv6hdr) - skb->data)))
1021 skb->protocol = 0;
1022 }
1023
1024 old_iph = ip_hdr(skb);
1025 old_ipv6h = ipv6_hdr(skb);
1026
1027 iph.tos = mutable->port_config.tos;
1028 if (mutable->port_config.flags & GRE_F_TOS_INHERIT) {
1029 if (skb->protocol == htons(ETH_P_IP))
1030 iph.tos = old_iph->tos;
1031 else if (skb->protocol == htons(ETH_P_IPV6))
1032 iph.tos = ipv6_get_dsfield(ipv6_hdr(skb));
1033 }
1034 iph.tos = ecn_encapsulate(iph.tos, skb);
1035
1036 {
1037 struct flowi fl = { .nl_u = { .ip4_u =
1038 { .daddr = mutable->port_config.daddr,
1039 .saddr = mutable->port_config.saddr,
1040 .tos = RT_TOS(iph.tos) } },
1041 .proto = IPPROTO_GRE };
1042
1043 if (ip_route_output_key(&init_net, &rt, &fl))
1044 goto error_free;
1045 }
1046
1047 iph.ttl = mutable->port_config.ttl;
1048 if (mutable->port_config.flags & GRE_F_TTL_INHERIT) {
1049 if (skb->protocol == htons(ETH_P_IP))
1050 iph.ttl = old_iph->ttl;
1051 else if (skb->protocol == htons(ETH_P_IPV6))
1052 iph.ttl = old_ipv6h->hop_limit;
1053 }
1054 if (!iph.ttl)
1055 iph.ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
1056
1057 iph.frag_off = (mutable->port_config.flags & GRE_F_PMTUD) ? htons(IP_DF) : 0;
1058 if (iph.frag_off)
1059 mtu = dst_mtu(&rt->u.dst)
1060 - ETH_HLEN
1061 - mutable->tunnel_hlen
1062 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
1063 else
1064 mtu = mutable->mtu;
1065
1066 if (skb->protocol == htons(ETH_P_IP)) {
1067 iph.frag_off |= old_iph->frag_off & htons(IP_DF);
1068 mtu = max(mtu, IP_MIN_MTU);
1069
1070 } else if (skb->protocol == htons(ETH_P_IPV6))
1071 mtu = max(mtu, IPV6_MIN_MTU);
1072
1073 iph.version = 4;
1074 iph.ihl = sizeof(struct iphdr) >> 2;
1075 iph.protocol = IPPROTO_GRE;
1076 iph.daddr = rt->rt_dst;
1077 iph.saddr = rt->rt_src;
1078
1079 nf_reset(skb);
1080 secpath_reset(skb);
1081 skb_dst_drop(skb);
1082 skb_dst_set(skb, &rt->u.dst);
1083
1084 /* If we are doing GSO on a pskb it is better to make sure that the
1085 * headroom is correct now. We will only have to copy the portion in
1086 * the linear data area and GSO will preserve headroom when it creates
1087 * the segments. This is particularly beneficial on Xen where we get
1088 * lots of GSO pskbs. Conversely, we delay copying if it is just to
1089 * get our own writable clone because GSO may do the copy for us. */
1090 max_headroom = LL_RESERVED_SPACE(rt->u.dst.dev) + rt->u.dst.header_len
1091 + mutable->tunnel_hlen;
1092
1093 if (skb_headroom(skb) < max_headroom) {
1094 skb = check_headroom(skb, max_headroom);
1095 if (unlikely(IS_ERR(skb))) {
1096 vport_record_error(vport, VPORT_E_TX_DROPPED);
1097 goto error;
1098 }
1099 }
1100
1101 forward_ip_summed(skb);
1102 vswitch_skb_checksum_setup(skb);
1103
1104 skb = handle_gso(skb);
1105 if (unlikely(IS_ERR(skb))) {
1106 vport_record_error(vport, VPORT_E_TX_DROPPED);
1107 goto error;
1108 }
1109
1110 /* Process GSO segments. Try to do any work for the entire packet that
1111 * doesn't involve actually writing to it before this point. */
1112 orig_len = 0;
1113 do {
1114 struct sk_buff *next_skb = skb->next;
1115 skb->next = NULL;
1116
1117 orig_len += build_packet(vport, mutable, &iph, rt, max_headroom, mtu, skb);
1118
1119 skb = next_skb;
1120 } while (skb);
1121
1122 return orig_len;
1123
1124 error_free:
1125 kfree_skb(skb);
1126 vport_record_error(vport, VPORT_E_TX_ERROR);
1127 error:
1128 return 0;
1129 }
1130
1131 static struct net_protocol gre_protocol_handlers = {
1132 .handler = gre_rcv,
1133 .err_handler = gre_err,
1134 };
1135
1136 static int
1137 gre_init(void)
1138 {
1139 int err;
1140
1141 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
1142 if (err)
1143 printk(KERN_WARNING "openvswitch: cannot register gre protocol handler\n");
1144
1145 return err;
1146 }
1147
1148 static void
1149 gre_exit(void)
1150 {
1151 tbl_destroy(port_table, NULL);
1152 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
1153 }
1154
1155 static int
1156 set_config(const struct vport *cur_vport, struct mutable_config *mutable,
1157 const void __user *uconfig)
1158 {
1159 const struct vport *old_vport;
1160 const struct mutable_config *old_mutable;
1161 int port_type;
1162
1163 if (copy_from_user(&mutable->port_config, uconfig, sizeof(struct gre_port_config)))
1164 return -EFAULT;
1165
1166 if (mutable->port_config.daddr == 0)
1167 return -EINVAL;
1168
1169 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH) {
1170 port_type = FIND_PORT_MATCH;
1171 mutable->port_config.in_key = 0;
1172 } else
1173 port_type = FIND_PORT_KEY;
1174
1175 old_vport = find_port(mutable->port_config.saddr,
1176 mutable->port_config.daddr,
1177 mutable->port_config.in_key, port_type,
1178 &old_mutable);
1179
1180 if (old_vport && old_vport != cur_vport)
1181 return -EEXIST;
1182
1183 if (mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
1184 mutable->port_config.out_key = 0;
1185
1186 mutable->tunnel_hlen = sizeof(struct iphdr) + GRE_HEADER_SECTION;
1187
1188 if (mutable->port_config.flags & GRE_F_OUT_CSUM)
1189 mutable->tunnel_hlen += GRE_HEADER_SECTION;
1190
1191 if (mutable->port_config.out_key ||
1192 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
1193 mutable->tunnel_hlen += GRE_HEADER_SECTION;
1194
1195 return 0;
1196 }
1197
1198 static struct vport *
1199 gre_create(const char *name, const void __user *config)
1200 {
1201 struct vport *vport;
1202 struct gre_vport *gre_vport;
1203 int err;
1204
1205 vport = vport_alloc(sizeof(struct gre_vport), &gre_vport_ops);
1206 if (IS_ERR(vport)) {
1207 err = PTR_ERR(vport);
1208 goto error;
1209 }
1210
1211 gre_vport = gre_vport_priv(vport);
1212
1213 strcpy(gre_vport->name, name);
1214
1215 gre_vport->mutable = kmalloc(sizeof(struct mutable_config), GFP_KERNEL);
1216 if (!gre_vport->mutable) {
1217 err = -ENOMEM;
1218 goto error_free_vport;
1219 }
1220
1221 vport_gen_ether_addr(gre_vport->mutable->eth_addr);
1222 gre_vport->mutable->mtu = ETH_DATA_LEN;
1223
1224 err = set_config(NULL, gre_vport->mutable, config);
1225 if (err)
1226 goto error_free_mutable;
1227
1228 err = add_port(vport);
1229 if (err)
1230 goto error_free_mutable;
1231
1232 return vport;
1233
1234 error_free_mutable:
1235 kfree(gre_vport->mutable);
1236 error_free_vport:
1237 vport_free(vport);
1238 error:
1239 return ERR_PTR(err);
1240 }
1241
1242 static int
1243 gre_modify(struct vport *vport, const void __user *config)
1244 {
1245 struct gre_vport *gre_vport = gre_vport_priv(vport);
1246 struct mutable_config *mutable;
1247 int err;
1248 int update_hash = 0;
1249
1250 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1251 if (!mutable) {
1252 err = -ENOMEM;
1253 goto error;
1254 }
1255
1256 err = set_config(vport, mutable, config);
1257 if (err)
1258 goto error_free;
1259
1260 /* Only remove the port from the hash table if something that would
1261 * affect the lookup has changed. */
1262 if (gre_vport->mutable->port_config.saddr != mutable->port_config.saddr ||
1263 gre_vport->mutable->port_config.daddr != mutable->port_config.daddr ||
1264 gre_vport->mutable->port_config.in_key != mutable->port_config.in_key ||
1265 (gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH) !=
1266 (mutable->port_config.flags & GRE_F_IN_KEY_MATCH))
1267 update_hash = 1;
1268
1269
1270 /* This update is not atomic but the lookup uses the config, which
1271 * serves as an inherent double check. */
1272 if (update_hash) {
1273 err = del_port(vport);
1274 if (err)
1275 goto error_free;
1276 }
1277
1278 assign_config_rcu(vport, mutable);
1279
1280 if (update_hash) {
1281 err = add_port(vport);
1282 if (err)
1283 goto error_free;
1284 }
1285
1286 return 0;
1287
1288 error_free:
1289 kfree(mutable);
1290 error:
1291 return err;
1292 }
1293
1294 static int
1295 gre_destroy(struct vport *vport)
1296 {
1297 struct gre_vport *gre_vport = gre_vport_priv(vport);
1298 int port_type;
1299 const struct mutable_config *old_mutable;
1300
1301 /* Do a hash table lookup to make sure that the port exists. It should
1302 * exist but might not if a modify failed earlier. */
1303 if (gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH)
1304 port_type = FIND_PORT_MATCH;
1305 else
1306 port_type = FIND_PORT_KEY;
1307
1308 if (vport == find_port(gre_vport->mutable->port_config.saddr,
1309 gre_vport->mutable->port_config.daddr,
1310 gre_vport->mutable->port_config.in_key, port_type, &old_mutable))
1311 del_port(vport);
1312
1313 kfree(gre_vport->mutable);
1314 vport_free(vport);
1315
1316 return 0;
1317 }
1318
1319 static int
1320 gre_set_mtu(struct vport *vport, int mtu)
1321 {
1322 struct gre_vport *gre_vport = gre_vport_priv(vport);
1323 struct mutable_config *mutable;
1324 struct dp_port *dp_port;
1325
1326 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1327 if (!mutable)
1328 return -ENOMEM;
1329
1330 mutable->mtu = mtu;
1331 assign_config_rcu(vport, mutable);
1332
1333 dp_port = vport_get_dp_port(vport);
1334 if (dp_port)
1335 set_internal_devs_mtu(dp_port->dp);
1336
1337 return 0;
1338 }
1339
1340 static int
1341 gre_set_addr(struct vport *vport, const unsigned char *addr)
1342 {
1343 struct gre_vport *gre_vport = gre_vport_priv(vport);
1344 struct mutable_config *mutable;
1345
1346 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1347 if (!mutable)
1348 return -ENOMEM;
1349
1350 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1351 assign_config_rcu(vport, mutable);
1352
1353 return 0;
1354 }
1355
1356
1357 static const char *
1358 gre_get_name(const struct vport *vport)
1359 {
1360 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1361 return gre_vport->name;
1362 }
1363
1364 static const unsigned char *
1365 gre_get_addr(const struct vport *vport)
1366 {
1367 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1368 return rcu_dereference(gre_vport->mutable)->eth_addr;
1369 }
1370
1371 static unsigned
1372 gre_get_dev_flags(const struct vport *vport)
1373 {
1374 return IFF_UP | IFF_RUNNING | IFF_LOWER_UP;
1375 }
1376
1377 static int
1378 gre_is_running(const struct vport *vport)
1379 {
1380 return 1;
1381 }
1382
1383 static unsigned char
1384 gre_get_operstate(const struct vport *vport)
1385 {
1386 return IF_OPER_UP;
1387 }
1388
1389 static int
1390 gre_get_mtu(const struct vport *vport)
1391 {
1392 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1393 return rcu_dereference(gre_vport->mutable)->mtu;
1394 }
1395
1396 struct vport_ops gre_vport_ops = {
1397 .type = "gre",
1398 .flags = VPORT_F_GEN_STATS | VPORT_F_TUN_ID,
1399 .init = gre_init,
1400 .exit = gre_exit,
1401 .create = gre_create,
1402 .modify = gre_modify,
1403 .destroy = gre_destroy,
1404 .set_mtu = gre_set_mtu,
1405 .set_addr = gre_set_addr,
1406 .get_name = gre_get_name,
1407 .get_addr = gre_get_addr,
1408 .get_dev_flags = gre_get_dev_flags,
1409 .is_running = gre_is_running,
1410 .get_operstate = gre_get_operstate,
1411 .get_mtu = gre_get_mtu,
1412 .send = gre_send,
1413 };