]> git.proxmox.com Git - ovs.git/blame - datapath/tunnel.c
tunneling: Clear OVS_CB after call to update_header().
[ovs.git] / datapath / tunnel.c
CommitLineData
d1eb60cc
JG
1/*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9#include <linux/if_arp.h>
10#include <linux/if_ether.h>
11#include <linux/ip.h>
12#include <linux/if_vlan.h>
13#include <linux/in.h>
14#include <linux/in_route.h>
15#include <linux/jhash.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
842cf6f4 18#include <linux/workqueue.h>
d1eb60cc
JG
19
20#include <net/dsfield.h>
21#include <net/dst.h>
22#include <net/icmp.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26#include <net/ipv6.h>
27#endif
28#include <net/route.h>
29#include <net/xfrm.h>
30
31#include "actions.h"
32#include "datapath.h"
33#include "table.h"
34#include "tunnel.h"
35#include "vport.h"
36#include "vport-generic.h"
842cf6f4
JG
37#include "vport-internal_dev.h"
38
39#ifdef NEED_CACHE_TIMEOUT
40/*
41 * On kernels where we can't quickly detect changes in the rest of the system
42 * we use an expiration time to invalidate the cache. A shorter expiration
43 * reduces the length of time that we may potentially blackhole packets while
44 * a longer time increases performance by reducing the frequency that the
45 * cache needs to be rebuilt. A variety of factors may cause the cache to be
46 * invalidated before the expiration time but this is the maximum. The time
47 * is expressed in jiffies.
48 */
49#define MAX_CACHE_EXP HZ
50#endif
51
52/*
53 * Interval to check for and remove caches that are no longer valid. Caches
54 * are checked for validity before they are used for packet encapsulation and
55 * old caches are removed at that time. However, if no packets are sent through
56 * the tunnel then the cache will never be destroyed. Since it holds
57 * references to a number of system objects, the cache will continue to use
58 * system resources by not allowing those objects to be destroyed. The cache
59 * cleaner is periodically run to free invalid caches. It does not
60 * significantly affect system performance. A lower interval will release
61 * resources faster but will itself consume resources by requiring more frequent
62 * checks. A longer interval may result in messages being printed to the kernel
63 * message buffer about unreleased resources. The interval is expressed in
64 * jiffies.
65 */
66#define CACHE_CLEANER_INTERVAL (5 * HZ)
67
68#define CACHE_DATA_ALIGN 16
d1eb60cc
JG
69
70/* Protected by RCU. */
71static struct tbl *port_table;
72
842cf6f4
JG
73static void cache_cleaner(struct work_struct *work);
74DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
75
d1eb60cc
JG
76/*
77 * These are just used as an optimization: they don't require any kind of
78 * synchronization because we could have just as easily read the value before
79 * the port change happened.
80 */
81static unsigned int key_local_remote_ports;
82static unsigned int key_remote_ports;
83static unsigned int local_remote_ports;
84static unsigned int remote_ports;
85
86#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
87#define rt_dst(rt) (rt->dst)
88#else
89#define rt_dst(rt) (rt->u.dst)
90#endif
91
92static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
93{
94 return vport_from_priv(tnl_vport);
95}
96
97static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
98{
99 return container_of(node, struct tnl_vport, tbl_node);
100}
101
842cf6f4
JG
102static inline void schedule_cache_cleaner(void)
103{
104 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
105}
106
107static void free_cache(struct tnl_cache *cache)
108{
109 if (!cache)
110 return;
111
112 flow_put(cache->flow);
113 ip_rt_put(cache->rt);
114 kfree(cache);
115}
116
117static void free_config_rcu(struct rcu_head *rcu)
d1eb60cc
JG
118{
119 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
120 kfree(c);
121}
122
842cf6f4
JG
123static void free_cache_rcu(struct rcu_head *rcu)
124{
125 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
126 free_cache(c);
127}
128
d1eb60cc
JG
129static void assign_config_rcu(struct vport *vport,
130 struct tnl_mutable_config *new_config)
131{
132 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
133 struct tnl_mutable_config *old_config;
134
842cf6f4 135 old_config = tnl_vport->mutable;
d1eb60cc 136 rcu_assign_pointer(tnl_vport->mutable, new_config);
842cf6f4
JG
137 call_rcu(&old_config->rcu, free_config_rcu);
138}
139
140static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
141{
142 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
143 struct tnl_cache *old_cache;
144
145 old_cache = tnl_vport->cache;
146 rcu_assign_pointer(tnl_vport->cache, new_cache);
147
148 if (old_cache)
149 call_rcu(&old_cache->rcu, free_cache_rcu);
d1eb60cc
JG
150}
151
152static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
153{
154 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
155 if (mutable->port_config.saddr)
156 return &local_remote_ports;
157 else
158 return &remote_ports;
159 } else {
160 if (mutable->port_config.saddr)
161 return &key_local_remote_ports;
162 else
163 return &key_remote_ports;
164 }
165}
166
167enum lookup_key {
168 LOOKUP_TUNNEL_TYPE = 0,
169 LOOKUP_SADDR = 1,
170 LOOKUP_DADDR = 2,
171 LOOKUP_KEY = 3,
172};
173
174struct port_lookup_key {
175 u32 vals[4]; /* Contains enum lookup_key keys. */
176 const struct tnl_mutable_config *mutable;
177};
178
179/*
180 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
181 * the comparision.
182 */
183static int port_cmp(const struct tbl_node *node, void *target)
184{
185 const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
186 struct port_lookup_key *lookup = target;
187
188 lookup->mutable = rcu_dereference(tnl_vport->mutable);
189
190 return (lookup->mutable->tunnel_type == lookup->vals[LOOKUP_TUNNEL_TYPE]) &&
191 lookup->mutable->port_config.daddr == lookup->vals[LOOKUP_DADDR] &&
192 lookup->mutable->port_config.in_key == lookup->vals[LOOKUP_KEY] &&
193 lookup->mutable->port_config.saddr == lookup->vals[LOOKUP_SADDR];
194}
195
196static u32 port_hash(struct port_lookup_key *lookup)
197{
198 return jhash2(lookup->vals, ARRAY_SIZE(lookup->vals), 0);
199}
200
842cf6f4
JG
201static u32 mutable_hash(const struct tnl_mutable_config *mutable)
202{
203 struct port_lookup_key lookup;
204
205 lookup.vals[LOOKUP_SADDR] = mutable->port_config.saddr;
206 lookup.vals[LOOKUP_DADDR] = mutable->port_config.daddr;
207 lookup.vals[LOOKUP_KEY] = mutable->port_config.in_key;
208 lookup.vals[LOOKUP_TUNNEL_TYPE] = mutable->tunnel_type;
209
210 return port_hash(&lookup);
211}
212
213static void check_table_empty(void)
214{
215 if (tbl_count(port_table) == 0) {
216 struct tbl *old_table = port_table;
217
218 cancel_delayed_work_sync(&cache_cleaner_wq);
219 rcu_assign_pointer(port_table, NULL);
220 tbl_deferred_destroy(old_table, NULL);
221 }
222}
223
d1eb60cc
JG
224static int add_port(struct vport *vport)
225{
226 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
d1eb60cc
JG
227 int err;
228
229 if (!port_table) {
230 struct tbl *new_table;
231
232 new_table = tbl_create(0);
233 if (!new_table)
234 return -ENOMEM;
235
236 rcu_assign_pointer(port_table, new_table);
842cf6f4 237 schedule_cache_cleaner();
d1eb60cc
JG
238
239 } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
240 struct tbl *old_table = port_table;
241 struct tbl *new_table;
242
243 new_table = tbl_expand(old_table);
244 if (IS_ERR(new_table))
245 return PTR_ERR(new_table);
246
247 rcu_assign_pointer(port_table, new_table);
248 tbl_deferred_destroy(old_table, NULL);
249 }
250
842cf6f4
JG
251 err = tbl_insert(port_table, &tnl_vport->tbl_node, mutable_hash(tnl_vport->mutable));
252 if (err) {
253 check_table_empty();
254 return err;
255 }
256
257 (*find_port_pool(tnl_vport->mutable))++;
258
259 return 0;
260}
261
262static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
263{
264 int err;
265 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
266 u32 hash;
267
268 hash = mutable_hash(new_mutable);
269 if (hash == tnl_vport->tbl_node.hash)
270 goto table_updated;
d1eb60cc 271
842cf6f4
JG
272 /*
273 * Ideally we should make this move atomic to avoid having gaps in
274 * finding tunnels or the possibility of failure. However, if we do
275 * find a tunnel it will always be consistent.
276 */
277 err = tbl_remove(port_table, &tnl_vport->tbl_node);
d1eb60cc
JG
278 if (err)
279 return err;
280
842cf6f4
JG
281 err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
282 if (err) {
283 check_table_empty();
284 return err;
285 }
286
287table_updated:
288 assign_config_rcu(vport, new_mutable);
d1eb60cc
JG
289
290 return 0;
291}
292
293static int del_port(struct vport *vport)
294{
295 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
296 int err;
297
298 err = tbl_remove(port_table, &tnl_vport->tbl_node);
299 if (err)
300 return err;
301
842cf6f4 302 check_table_empty();
d1eb60cc
JG
303 (*find_port_pool(tnl_vport->mutable))--;
304
305 return 0;
306}
307
308struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
309 int tunnel_type,
310 const struct tnl_mutable_config **mutable)
311{
312 struct port_lookup_key lookup;
313 struct tbl *table = rcu_dereference(port_table);
314 struct tbl_node *tbl_node;
315
842cf6f4 316 if (unlikely(!table))
d1eb60cc
JG
317 return NULL;
318
319 lookup.vals[LOOKUP_SADDR] = saddr;
320 lookup.vals[LOOKUP_DADDR] = daddr;
321
322 if (tunnel_type & TNL_T_KEY_EXACT) {
323 lookup.vals[LOOKUP_KEY] = key;
324 lookup.vals[LOOKUP_TUNNEL_TYPE] = tunnel_type & ~TNL_T_KEY_MATCH;
325
326 if (key_local_remote_ports) {
327 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
328 if (tbl_node)
329 goto found;
330 }
331
332 if (key_remote_ports) {
333 lookup.vals[LOOKUP_SADDR] = 0;
334
335 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
336 if (tbl_node)
337 goto found;
338
339 lookup.vals[LOOKUP_SADDR] = saddr;
340 }
341 }
342
343 if (tunnel_type & TNL_T_KEY_MATCH) {
344 lookup.vals[LOOKUP_KEY] = 0;
345 lookup.vals[LOOKUP_TUNNEL_TYPE] = tunnel_type & ~TNL_T_KEY_EXACT;
346
347 if (local_remote_ports) {
348 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
349 if (tbl_node)
350 goto found;
351 }
352
353 if (remote_ports) {
354 lookup.vals[LOOKUP_SADDR] = 0;
355
356 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
357 if (tbl_node)
358 goto found;
359 }
360 }
361
362 return NULL;
363
364found:
365 *mutable = lookup.mutable;
366 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
367}
368
842cf6f4
JG
369static inline void ecn_decapsulate(struct sk_buff *skb)
370{
371 u8 tos = ip_hdr(skb)->tos;
372
373 if (INET_ECN_is_ce(tos)) {
374 __be16 protocol = skb->protocol;
375 unsigned int nw_header = skb_network_offset(skb);
376
377 if (skb->protocol == htons(ETH_P_8021Q)) {
378 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
379 return;
380
381 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
382 nw_header += VLAN_HLEN;
383 }
384
385 if (protocol == htons(ETH_P_IP)) {
386 if (unlikely(!pskb_may_pull(skb, nw_header
387 + sizeof(struct iphdr))))
388 return;
389
390 IP_ECN_set_ce((struct iphdr *)(skb->data + nw_header));
391 }
392#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
393 else if (protocol == htons(ETH_P_IPV6)) {
394 if (unlikely(!pskb_may_pull(skb, nw_header
395 + sizeof(struct ipv6hdr))))
396 return;
397
398 IP6_ECN_set_ce((struct ipv6hdr *)(skb->data + nw_header));
399 }
400#endif
401 }
402}
403
404/* Called with rcu_read_lock. */
405void tnl_rcv(struct vport *vport, struct sk_buff *skb)
406{
407 skb->pkt_type = PACKET_HOST;
408 skb->protocol = eth_type_trans(skb, skb->dev);
409
410 skb_dst_drop(skb);
411 nf_reset(skb);
412 secpath_reset(skb);
413 skb_reset_network_header(skb);
414
415 ecn_decapsulate(skb);
416
417 skb_push(skb, ETH_HLEN);
418 compute_ip_summed(skb, false);
419
420 vport_receive(vport, skb);
421}
422
d1eb60cc
JG
423static bool check_ipv4_address(__be32 addr)
424{
425 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
426 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
427 return false;
428
429 return true;
430}
431
432static bool ipv4_should_icmp(struct sk_buff *skb)
433{
434 struct iphdr *old_iph = ip_hdr(skb);
435
436 /* Don't respond to L2 broadcast. */
437 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
438 return false;
439
440 /* Don't respond to L3 broadcast or invalid addresses. */
441 if (!check_ipv4_address(old_iph->daddr) ||
442 !check_ipv4_address(old_iph->saddr))
443 return false;
444
445 /* Only respond to the first fragment. */
446 if (old_iph->frag_off & htons(IP_OFFSET))
447 return false;
448
449 /* Don't respond to ICMP error messages. */
450 if (old_iph->protocol == IPPROTO_ICMP) {
451 u8 icmp_type, *icmp_typep;
452
453 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
454 (old_iph->ihl << 2) +
455 offsetof(struct icmphdr, type) -
456 skb->data, sizeof(icmp_type),
457 &icmp_type);
458
459 if (!icmp_typep)
460 return false;
461
462 if (*icmp_typep > NR_ICMP_TYPES
463 || (*icmp_typep <= ICMP_PARAMETERPROB
464 && *icmp_typep != ICMP_ECHOREPLY
465 && *icmp_typep != ICMP_ECHO))
466 return false;
467 }
468
469 return true;
470}
471
472static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
473 unsigned int mtu, unsigned int payload_length)
474{
475 struct iphdr *iph, *old_iph = ip_hdr(skb);
476 struct icmphdr *icmph;
477 u8 *payload;
478
479 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
480 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
481 payload = skb_put(nskb, payload_length);
482
483 /* IP */
484 iph->version = 4;
485 iph->ihl = sizeof(struct iphdr) >> 2;
486 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
487 IPTOS_PREC_INTERNETCONTROL;
488 iph->tot_len = htons(sizeof(struct iphdr)
489 + sizeof(struct icmphdr)
490 + payload_length);
491 get_random_bytes(&iph->id, sizeof(iph->id));
492 iph->frag_off = 0;
493 iph->ttl = IPDEFTTL;
494 iph->protocol = IPPROTO_ICMP;
495 iph->daddr = old_iph->saddr;
496 iph->saddr = old_iph->daddr;
497
498 ip_send_check(iph);
499
500 /* ICMP */
501 icmph->type = ICMP_DEST_UNREACH;
502 icmph->code = ICMP_FRAG_NEEDED;
503 icmph->un.gateway = htonl(mtu);
504 icmph->checksum = 0;
505
506 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
507 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
508 payload, payload_length,
509 nskb->csum);
510 icmph->checksum = csum_fold(nskb->csum);
511}
512
513#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
514static bool ipv6_should_icmp(struct sk_buff *skb)
515{
516 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
517 int addr_type;
518 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
519 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
520
521 /* Check source address is valid. */
522 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
523 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
524 return false;
525
526 /* Don't reply to unspecified addresses. */
527 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
528 return false;
529
530 /* Don't respond to ICMP error messages. */
531 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
532 if (payload_off < 0)
533 return false;
534
535 if (nexthdr == NEXTHDR_ICMP) {
536 u8 icmp_type, *icmp_typep;
537
538 icmp_typep = skb_header_pointer(skb, payload_off +
539 offsetof(struct icmp6hdr,
540 icmp6_type),
541 sizeof(icmp_type), &icmp_type);
542
543 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
544 return false;
545 }
546
547 return true;
548}
549
550static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
551 unsigned int mtu, unsigned int payload_length)
552{
553 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
554 struct icmp6hdr *icmp6h;
555 u8 *payload;
556
557 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
558 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
559 payload = skb_put(nskb, payload_length);
560
561 /* IPv6 */
562 ipv6h->version = 6;
563 ipv6h->priority = 0;
564 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
565 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
566 + payload_length);
567 ipv6h->nexthdr = NEXTHDR_ICMP;
568 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
569 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
570 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
571
572 /* ICMPv6 */
573 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
574 icmp6h->icmp6_code = 0;
575 icmp6h->icmp6_cksum = 0;
576 icmp6h->icmp6_mtu = htonl(mtu);
577
578 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
579 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
580 payload, payload_length,
581 nskb->csum);
582 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
583 sizeof(struct icmp6hdr)
584 + payload_length,
585 ipv6h->nexthdr, nskb->csum);
586}
587#endif /* IPv6 */
588
589bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
590 struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
591{
592 unsigned int eth_hdr_len = ETH_HLEN;
593 unsigned int total_length = 0, header_length = 0, payload_length;
594 struct ethhdr *eh, *old_eh = eth_hdr(skb);
595 struct sk_buff *nskb;
596
597 /* Sanity check */
598 if (skb->protocol == htons(ETH_P_IP)) {
599 if (mtu < IP_MIN_MTU)
600 return false;
601
602 if (!ipv4_should_icmp(skb))
603 return true;
604 }
605#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
606 else if (skb->protocol == htons(ETH_P_IPV6)) {
607 if (mtu < IPV6_MIN_MTU)
608 return false;
609
610 /*
611 * In theory we should do PMTUD on IPv6 multicast messages but
612 * we don't have an address to send from so just fragment.
613 */
614 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
615 return false;
616
617 if (!ipv6_should_icmp(skb))
618 return true;
619 }
620#endif
621 else
622 return false;
623
624 /* Allocate */
625 if (old_eh->h_proto == htons(ETH_P_8021Q))
626 eth_hdr_len = VLAN_ETH_HLEN;
627
628 payload_length = skb->len - eth_hdr_len;
629 if (skb->protocol == htons(ETH_P_IP)) {
630 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
631 total_length = min_t(unsigned int, header_length +
632 payload_length, 576);
633 }
634#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
635 else {
636 header_length = sizeof(struct ipv6hdr) +
637 sizeof(struct icmp6hdr);
638 total_length = min_t(unsigned int, header_length +
639 payload_length, IPV6_MIN_MTU);
640 }
641#endif
642
643 total_length = min(total_length, mutable->mtu);
644 payload_length = total_length - header_length;
645
646 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
647 payload_length);
648 if (!nskb)
649 return false;
650
651 skb_reserve(nskb, NET_IP_ALIGN);
652
653 /* Ethernet / VLAN */
654 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
655 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
656 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
657 nskb->protocol = eh->h_proto = old_eh->h_proto;
658 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
659 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
660
661 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
662 vh->h_vlan_encapsulated_proto = skb->protocol;
663 }
664 skb_reset_mac_header(nskb);
665
666 /* Protocol */
667 if (skb->protocol == htons(ETH_P_IP))
668 ipv4_build_icmp(skb, nskb, mtu, payload_length);
669#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
670 else
671 ipv6_build_icmp(skb, nskb, mtu, payload_length);
672#endif
673
674 /*
675 * Assume that flow based keys are symmetric with respect to input
676 * and output and use the key that we were going to put on the
677 * outgoing packet for the fake received packet. If the keys are
678 * not symmetric then PMTUD needs to be disabled since we won't have
679 * any way of synthesizing packets.
680 */
681 if ((mutable->port_config.flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
682 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
683 OVS_CB(nskb)->tun_id = flow_key;
684
685 compute_ip_summed(nskb, false);
686 vport_receive(vport, nskb);
687
688 return true;
689}
690
842cf6f4
JG
691static bool check_mtu(struct sk_buff *skb,
692 struct vport *vport,
693 const struct tnl_mutable_config *mutable,
694 const struct rtable *rt, __be16 *frag_offp)
d1eb60cc 695{
842cf6f4
JG
696 int mtu;
697 __be16 frag_off;
698
699 frag_off = (mutable->port_config.flags & TNL_F_PMTUD) ? htons(IP_DF) : 0;
700 if (frag_off)
701 mtu = dst_mtu(&rt_dst(rt))
702 - ETH_HLEN
703 - mutable->tunnel_hlen
704 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
705 else
706 mtu = mutable->mtu;
707
708 if (skb->protocol == htons(ETH_P_IP)) {
709 struct iphdr *old_iph = ip_hdr(skb);
710
711 frag_off |= old_iph->frag_off & htons(IP_DF);
712 mtu = max(mtu, IP_MIN_MTU);
713
714 if ((old_iph->frag_off & htons(IP_DF)) &&
715 mtu < ntohs(old_iph->tot_len)) {
716 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
717 goto drop;
d1eb60cc 718 }
842cf6f4
JG
719 }
720#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
721 else if (skb->protocol == htons(ETH_P_IPV6)) {
722 unsigned int packet_length = skb->len - ETH_HLEN
723 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
d1eb60cc 724
842cf6f4 725 mtu = max(mtu, IPV6_MIN_MTU);
d1eb60cc 726
842cf6f4
JG
727 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
728 if (packet_length > IPV6_MIN_MTU)
729 frag_off = htons(IP_DF);
d1eb60cc 730
842cf6f4
JG
731 if (mtu < packet_length) {
732 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
733 goto drop;
734 }
d1eb60cc 735 }
842cf6f4 736#endif
d1eb60cc 737
842cf6f4
JG
738 *frag_offp = frag_off;
739 return true;
740
741drop:
742 *frag_offp = 0;
743 return false;
d1eb60cc
JG
744}
745
842cf6f4
JG
746static void create_tunnel_header(const struct vport *vport,
747 const struct tnl_mutable_config *mutable,
748 const struct rtable *rt, void *header)
d1eb60cc 749{
842cf6f4
JG
750 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
751 struct iphdr *iph = header;
752
753 iph->version = 4;
754 iph->ihl = sizeof(struct iphdr) >> 2;
755 iph->frag_off = htons(IP_DF);
756 iph->protocol = tnl_vport->tnl_ops->ipproto;
757 iph->tos = mutable->port_config.tos;
758 iph->daddr = rt->rt_dst;
759 iph->saddr = rt->rt_src;
760 iph->ttl = mutable->port_config.ttl;
761 if (!iph->ttl)
762 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
763
764 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
765}
d1eb60cc 766
842cf6f4
JG
767static inline void *get_cached_header(const struct tnl_cache *cache)
768{
769 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
770}
d1eb60cc 771
842cf6f4
JG
772static inline bool check_cache_valid(const struct tnl_cache *cache,
773 const struct tnl_mutable_config *mutable)
774{
775 return cache &&
776#ifdef NEED_CACHE_TIMEOUT
777 time_before(jiffies, cache->expiration) &&
778#endif
779#ifdef HAVE_RT_GENID
780 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
781#endif
782#ifdef HAVE_HH_SEQ
783 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
784#endif
785 mutable->seq == cache->mutable_seq &&
786 (!is_internal_dev(rt_dst(cache->rt).dev) ||
787 (cache->flow && !cache->flow->dead));
d1eb60cc
JG
788}
789
842cf6f4 790static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
d1eb60cc 791{
842cf6f4
JG
792 struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
793 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
794 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
d1eb60cc 795
842cf6f4
JG
796 if (cache && !check_cache_valid(cache, mutable) &&
797 spin_trylock_bh(&tnl_vport->cache_lock)) {
798 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
799 spin_unlock_bh(&tnl_vport->cache_lock);
800 }
d1eb60cc 801
842cf6f4
JG
802 return 0;
803}
d1eb60cc 804
842cf6f4
JG
805static void cache_cleaner(struct work_struct *work)
806{
807 schedule_cache_cleaner();
d1eb60cc 808
842cf6f4
JG
809 rcu_read_lock();
810 tbl_foreach(port_table, cache_cleaner_cb, NULL);
811 rcu_read_unlock();
812}
d1eb60cc 813
842cf6f4
JG
814static inline void create_eth_hdr(struct tnl_cache *cache,
815 const struct rtable *rt)
816{
817 void *cache_data = get_cached_header(cache);
818 int hh_len = rt_dst(rt).hh->hh_len;
819 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
d1eb60cc 820
842cf6f4
JG
821#ifdef HAVE_HH_SEQ
822 unsigned hh_seq;
823
824 do {
825 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
826 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
827 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
828
829 cache->hh_seq = hh_seq;
830#else
831 read_lock_bh(&rt_dst(rt).hh->hh_lock);
832 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
833 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
d1eb60cc 834#endif
d1eb60cc
JG
835}
836
842cf6f4
JG
837static struct tnl_cache *build_cache(struct vport *vport,
838 const struct tnl_mutable_config *mutable,
839 struct rtable *rt)
d1eb60cc 840{
842cf6f4
JG
841 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
842 struct tnl_cache *cache;
843 void *cache_data;
844 int cache_len;
d1eb60cc 845
842cf6f4
JG
846 if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
847 return NULL;
848
849 /*
850 * If there is no entry in the ARP cache or if this device does not
851 * support hard header caching just fall back to the IP stack.
852 */
853 if (!rt_dst(rt).hh)
854 return NULL;
855
856 /*
857 * If lock is contended fall back to directly building the header.
858 * We're not going to help performance by sitting here spinning.
859 */
860 if (!spin_trylock_bh(&tnl_vport->cache_lock))
861 return NULL;
862
863 cache = tnl_vport->cache;
864 if (check_cache_valid(cache, mutable))
865 goto unlock;
866 else
867 cache = NULL;
868
869 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
870
871 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
872 cache_len, GFP_ATOMIC);
873 if (!cache)
874 goto unlock;
875
876 cache->len = cache_len;
877
878 create_eth_hdr(cache, rt);
879 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
880
881 create_tunnel_header(vport, mutable, rt, cache_data);
882
883 cache->mutable_seq = mutable->seq;
884 cache->rt = rt;
885#ifdef NEED_CACHE_TIMEOUT
886 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
887#endif
888
889 if (is_internal_dev(rt_dst(rt).dev)) {
890 int err;
891 struct vport *vport;
892 struct dp_port *dp_port;
893 struct sk_buff *skb;
894 bool is_frag;
895 struct odp_flow_key flow_key;
896 struct tbl_node *flow_node;
897
898 vport = internal_dev_get_vport(rt_dst(rt).dev);
899 if (!vport)
900 goto done;
901
902 dp_port = vport_get_dp_port(vport);
903 if (!dp_port)
904 goto done;
905
906 skb = alloc_skb(cache->len, GFP_ATOMIC);
907 if (!skb)
908 goto done;
909
910 __skb_put(skb, cache->len);
911 memcpy(skb->data, get_cached_header(cache), cache->len);
912
913 err = flow_extract(skb, dp_port->port_no, &flow_key, &is_frag);
914
915 kfree_skb(skb);
916 if (err || is_frag)
917 goto done;
918
919 flow_node = tbl_lookup(rcu_dereference(dp_port->dp->table),
920 &flow_key, flow_hash(&flow_key),
921 flow_cmp);
922 if (flow_node) {
923 struct sw_flow *flow = flow_cast(flow_node);
924
925 cache->flow = flow;
926 flow_hold(flow);
927 }
d1eb60cc
JG
928 }
929
842cf6f4
JG
930done:
931 assign_cache_rcu(vport, cache);
932
933unlock:
934 spin_unlock_bh(&tnl_vport->cache_lock);
935
936 return cache;
d1eb60cc
JG
937}
938
842cf6f4
JG
939static struct rtable *find_route(struct vport *vport,
940 const struct tnl_mutable_config *mutable,
941 u8 tos, struct tnl_cache **cache)
d1eb60cc 942{
842cf6f4
JG
943 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
944 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
945
946 *cache = NULL;
947 tos = RT_TOS(tos);
948
949 if (likely(tos == mutable->port_config.tos &&
950 check_cache_valid(cur_cache, mutable))) {
951 *cache = cur_cache;
952 return cur_cache->rt;
953 } else {
954 struct rtable *rt;
955 struct flowi fl = { .nl_u = { .ip4_u =
956 { .daddr = mutable->port_config.daddr,
957 .saddr = mutable->port_config.saddr,
958 .tos = tos } },
959 .proto = tnl_vport->tnl_ops->ipproto };
960
961 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
962 return NULL;
963
964 if (likely(tos == mutable->port_config.tos))
965 *cache = build_cache(vport, mutable, rt);
966
967 return rt;
d1eb60cc
JG
968 }
969}
970
842cf6f4 971static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
d1eb60cc 972{
842cf6f4
JG
973 if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
974 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
975 if (unlikely(!nskb)) {
976 kfree_skb(skb);
977 return ERR_PTR(-ENOMEM);
978 }
d1eb60cc 979
842cf6f4 980 set_skb_csum_bits(skb, nskb);
d1eb60cc 981
842cf6f4
JG
982 if (skb->sk)
983 skb_set_owner_w(nskb, skb->sk);
d1eb60cc 984
842cf6f4
JG
985 kfree_skb(skb);
986 return nskb;
987 }
d1eb60cc 988
842cf6f4 989 return skb;
d1eb60cc
JG
990}
991
842cf6f4 992static inline bool need_linearize(const struct sk_buff *skb)
d1eb60cc 993{
842cf6f4
JG
994 int i;
995
996 if (unlikely(skb_shinfo(skb)->frag_list))
997 return true;
998
999 /*
1000 * Generally speaking we should linearize if there are paged frags.
1001 * However, if all of the refcounts are 1 we know nobody else can
1002 * change them from underneath us and we can skip the linearization.
1003 */
1004 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1005 if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
1006 return true;
1007
1008 return false;
1009}
1010
1011static struct sk_buff *handle_offloads(struct sk_buff *skb,
1012 const struct tnl_mutable_config *mutable,
1013 const struct rtable *rt)
1014{
1015 int min_headroom;
d1eb60cc 1016 int err;
d1eb60cc 1017
842cf6f4 1018 forward_ip_summed(skb);
d1eb60cc 1019
842cf6f4 1020 err = vswitch_skb_checksum_setup(skb);
d1eb60cc
JG
1021 if (unlikely(err))
1022 goto error_free;
1023
842cf6f4
JG
1024 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1025 + mutable->tunnel_hlen;
d1eb60cc 1026
842cf6f4
JG
1027 if (skb_is_gso(skb)) {
1028 struct sk_buff *nskb;
1029
1030 /*
1031 * If we are doing GSO on a pskb it is better to make sure that
1032 * the headroom is correct now. We will only have to copy the
1033 * portion in the linear data area and GSO will preserve
1034 * headroom when it creates the segments. This is particularly
1035 * beneficial on Xen where we get a lot of GSO pskbs.
1036 * Conversely, we avoid copying if it is just to get our own
1037 * writable clone because GSO will do the copy for us.
1038 */
1039 if (skb_headroom(skb) < min_headroom) {
1040 skb = check_headroom(skb, min_headroom);
1041 if (unlikely(IS_ERR(skb))) {
1042 err = PTR_ERR(skb);
1043 goto error;
1044 }
d1eb60cc
JG
1045 }
1046
842cf6f4
JG
1047 nskb = skb_gso_segment(skb, 0);
1048 kfree_skb(skb);
1049 if (unlikely(IS_ERR(nskb))) {
1050 err = PTR_ERR(nskb);
1051 goto error;
1052 }
d1eb60cc 1053
842cf6f4
JG
1054 skb = nskb;
1055 } else {
1056 skb = check_headroom(skb, min_headroom);
1057 if (unlikely(IS_ERR(skb))) {
1058 err = PTR_ERR(skb);
1059 goto error;
1060 }
d1eb60cc 1061
842cf6f4
JG
1062 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1063 /*
1064 * Pages aren't locked and could change at any time.
1065 * If this happens after we compute the checksum, the
1066 * checksum will be wrong. We linearize now to avoid
1067 * this problem.
1068 */
1069 if (unlikely(need_linearize(skb))) {
1070 err = __skb_linearize(skb);
1071 if (unlikely(err))
1072 goto error_free;
1073 }
1074
1075 err = skb_checksum_help(skb);
1076 if (unlikely(err))
d1eb60cc 1077 goto error_free;
842cf6f4
JG
1078 } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1079 skb->ip_summed = CHECKSUM_NONE;
d1eb60cc 1080 }
d1eb60cc 1081
842cf6f4 1082 return skb;
d1eb60cc 1083
842cf6f4
JG
1084error_free:
1085 kfree_skb(skb);
1086error:
1087 return ERR_PTR(err);
1088}
d1eb60cc 1089
842cf6f4
JG
1090static int send_frags(struct sk_buff *skb,
1091 const struct tnl_mutable_config *mutable)
1092{
1093 int sent_len;
1094 int err;
d1eb60cc 1095
842cf6f4 1096 sent_len = 0;
5214f5c4
JG
1097 while (skb) {
1098 struct sk_buff *next = skb->next;
1099 int frag_len = skb->len - mutable->tunnel_hlen;
d1eb60cc 1100
5214f5c4
JG
1101 skb->next = NULL;
1102
7da5c939
JG
1103 memset(&IPCB(skb)->opt, 0, sizeof(IPCB(skb)->opt));
1104 IPCB(skb)->flags = 0;
1105
5214f5c4 1106 err = ip_local_out(skb);
842cf6f4
JG
1107 if (likely(net_xmit_eval(err) == 0))
1108 sent_len += frag_len;
1109 else {
5214f5c4
JG
1110 skb = next;
1111 goto free_frags;
1112 }
1113
1114 skb = next;
842cf6f4 1115 }
5214f5c4 1116
842cf6f4 1117 return sent_len;
d1eb60cc 1118
5214f5c4
JG
1119free_frags:
1120 /*
1121 * There's no point in continuing to send fragments once one has been
1122 * dropped so just free the rest. This may help improve the congestion
1123 * that caused the first packet to be dropped.
1124 */
842cf6f4
JG
1125 tnl_free_linked_skbs(skb);
1126 return sent_len;
d1eb60cc
JG
1127}
1128
1129int tnl_send(struct vport *vport, struct sk_buff *skb)
1130{
1131 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1132 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1133
842cf6f4 1134 enum vport_err_type err = VPORT_E_TX_ERROR;
d1eb60cc 1135 struct rtable *rt;
842cf6f4
JG
1136 struct dst_entry *unattached_dst = NULL;
1137 struct tnl_cache *cache;
1138 int sent_len = 0;
1139 __be16 frag_off;
1140 u8 ttl;
1141 u8 inner_tos;
1142 u8 tos;
d1eb60cc
JG
1143
1144 /* Validate the protocol headers before we try to use them. */
1145 if (skb->protocol == htons(ETH_P_8021Q)) {
1146 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1147 goto error_free;
1148
1149 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1150 skb_set_network_header(skb, VLAN_ETH_HLEN);
1151 }
1152
1153 if (skb->protocol == htons(ETH_P_IP)) {
842cf6f4
JG
1154 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1155 + sizeof(struct iphdr))))
d1eb60cc
JG
1156 skb->protocol = 0;
1157 }
1158#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1159 else if (skb->protocol == htons(ETH_P_IPV6)) {
842cf6f4
JG
1160 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1161 + sizeof(struct ipv6hdr))))
d1eb60cc
JG
1162 skb->protocol = 0;
1163 }
1164#endif
d1eb60cc 1165
842cf6f4
JG
1166 /* ToS */
1167 if (skb->protocol == htons(ETH_P_IP))
1168 inner_tos = ip_hdr(skb)->tos;
d1eb60cc 1169#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
842cf6f4
JG
1170 else if (skb->protocol == htons(ETH_P_IPV6))
1171 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
d1eb60cc 1172#endif
842cf6f4
JG
1173 else
1174 inner_tos = 0;
d1eb60cc 1175
842cf6f4
JG
1176 if (mutable->port_config.flags & TNL_F_TOS_INHERIT)
1177 tos = inner_tos;
1178 else
1179 tos = mutable->port_config.tos;
d1eb60cc 1180
842cf6f4
JG
1181 tos = INET_ECN_encapsulate(tos, inner_tos);
1182
1183 /* Route lookup */
1184 rt = find_route(vport, mutable, tos, &cache);
1185 if (unlikely(!rt))
1186 goto error_free;
1187 if (unlikely(!cache))
1188 unattached_dst = &rt_dst(rt);
1189
1190 /* Reset SKB */
1191 nf_reset(skb);
1192 secpath_reset(skb);
1193 skb_dst_drop(skb);
1194
1195 /* Offloading */
1196 skb = handle_offloads(skb, mutable, rt);
1197 if (unlikely(IS_ERR(skb)))
1198 goto error;
1199
1200 /* MTU */
1201 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1202 err = VPORT_E_TX_DROPPED;
1203 goto error_free;
d1eb60cc
JG
1204 }
1205
842cf6f4
JG
1206 /*
1207 * If we are over the MTU, allow the IP stack to handle fragmentation.
1208 * Fragmentation is a slow path anyways.
1209 */
1210 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1211 cache)) {
1212 unattached_dst = &rt_dst(rt);
1213 dst_hold(unattached_dst);
1214 cache = NULL;
1215 }
1216
1217 /* TTL */
1218 ttl = mutable->port_config.ttl;
1219 if (!ttl)
1220 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1221
d1eb60cc
JG
1222 if (mutable->port_config.flags & TNL_F_TTL_INHERIT) {
1223 if (skb->protocol == htons(ETH_P_IP))
842cf6f4 1224 ttl = ip_hdr(skb)->ttl;
d1eb60cc
JG
1225#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1226 else if (skb->protocol == htons(ETH_P_IPV6))
842cf6f4 1227 ttl = ipv6_hdr(skb)->hop_limit;
d1eb60cc
JG
1228#endif
1229 }
d1eb60cc 1230
842cf6f4
JG
1231 while (skb) {
1232 struct iphdr *iph;
1233 struct sk_buff *next_skb = skb->next;
1234 skb->next = NULL;
d1eb60cc 1235
842cf6f4
JG
1236 if (likely(cache)) {
1237 skb_push(skb, cache->len);
1238 memcpy(skb->data, get_cached_header(cache), cache->len);
1239 skb_reset_mac_header(skb);
1240 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
d1eb60cc 1241
842cf6f4
JG
1242 } else {
1243 skb_push(skb, mutable->tunnel_hlen);
1244 create_tunnel_header(vport, mutable, rt, skb->data);
1245 skb_reset_network_header(skb);
d1eb60cc 1246
842cf6f4
JG
1247 if (next_skb)
1248 skb_dst_set(skb, dst_clone(unattached_dst));
1249 else {
1250 skb_dst_set(skb, unattached_dst);
1251 unattached_dst = NULL;
1252 }
d1eb60cc 1253 }
842cf6f4 1254 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
d1eb60cc 1255
842cf6f4
JG
1256 iph = ip_hdr(skb);
1257 iph->tos = tos;
1258 iph->ttl = ttl;
1259 iph->frag_off = frag_off;
1260 ip_select_ident(iph, &rt_dst(rt), NULL);
d1eb60cc 1261
842cf6f4
JG
1262 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1263 if (unlikely(!skb))
1264 goto next;
d1eb60cc 1265
842cf6f4
JG
1266 if (likely(cache)) {
1267 int orig_len = skb->len - cache->len;
17a07f9f 1268 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
d1eb60cc 1269
842cf6f4 1270 skb->protocol = htons(ETH_P_IP);
842cf6f4
JG
1271 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1272 ip_send_check(iph);
d1eb60cc 1273
17a07f9f 1274 if (cache_vport) {
842cf6f4
JG
1275 OVS_CB(skb)->flow = cache->flow;
1276 compute_ip_summed(skb, true);
17a07f9f 1277 vport_receive(cache_vport, skb);
842cf6f4
JG
1278 sent_len += orig_len;
1279 } else {
1280 int err;
d1eb60cc 1281
842cf6f4
JG
1282 skb->dev = rt_dst(rt).dev;
1283 err = dev_queue_xmit(skb);
1284
1285 if (likely(net_xmit_eval(err) == 0))
1286 sent_len += orig_len;
1287 }
1288 } else
1289 sent_len += send_frags(skb, mutable);
1290
1291next:
d1eb60cc 1292 skb = next_skb;
842cf6f4 1293 }
d1eb60cc 1294
842cf6f4 1295 if (unlikely(sent_len == 0))
5214f5c4
JG
1296 vport_record_error(vport, VPORT_E_TX_DROPPED);
1297
842cf6f4 1298 goto out;
d1eb60cc
JG
1299
1300error_free:
842cf6f4 1301 tnl_free_linked_skbs(skb);
d1eb60cc 1302error:
842cf6f4
JG
1303 dst_release(unattached_dst);
1304 vport_record_error(vport, err);
1305out:
1306 return sent_len;
d1eb60cc
JG
1307}
1308
1309static int set_config(const void __user *uconfig, const struct tnl_ops *tnl_ops,
1310 const struct vport *cur_vport,
1311 struct tnl_mutable_config *mutable)
1312{
1313 const struct vport *old_vport;
1314 const struct tnl_mutable_config *old_mutable;
1315
1316 if (copy_from_user(&mutable->port_config, uconfig, sizeof(struct tnl_port_config)))
1317 return -EFAULT;
1318
842cf6f4
JG
1319 if (mutable->port_config.daddr == 0)
1320 return -EINVAL;
1321
1322 if (mutable->port_config.tos != RT_TOS(mutable->port_config.tos))
1323 return -EINVAL;
1324
d1eb60cc
JG
1325 mutable->tunnel_hlen = tnl_ops->hdr_len(&mutable->port_config);
1326 if (mutable->tunnel_hlen < 0)
1327 return mutable->tunnel_hlen;
1328
1329 mutable->tunnel_hlen += sizeof(struct iphdr);
1330
d1eb60cc
JG
1331 mutable->tunnel_type = tnl_ops->tunnel_type;
1332 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
1333 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1334 mutable->port_config.in_key = 0;
1335 } else
1336 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1337
1338 old_vport = tnl_find_port(mutable->port_config.saddr,
1339 mutable->port_config.daddr,
1340 mutable->port_config.in_key,
1341 mutable->tunnel_type,
1342 &old_mutable);
1343
1344 if (old_vport && old_vport != cur_vport)
1345 return -EEXIST;
1346
1347 if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
1348 mutable->port_config.out_key = 0;
1349
1350 return 0;
1351}
1352
1353struct vport *tnl_create(const char *name, const void __user *config,
1354 const struct vport_ops *vport_ops,
1355 const struct tnl_ops *tnl_ops)
1356{
1357 struct vport *vport;
1358 struct tnl_vport *tnl_vport;
5214f5c4 1359 int initial_frag_id;
d1eb60cc
JG
1360 int err;
1361
1362 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops);
1363 if (IS_ERR(vport)) {
1364 err = PTR_ERR(vport);
1365 goto error;
1366 }
1367
1368 tnl_vport = tnl_vport_priv(vport);
1369
1370 strcpy(tnl_vport->name, name);
1371 tnl_vport->tnl_ops = tnl_ops;
1372
842cf6f4 1373 tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
d1eb60cc
JG
1374 if (!tnl_vport->mutable) {
1375 err = -ENOMEM;
1376 goto error_free_vport;
1377 }
1378
1379 vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
1380 tnl_vport->mutable->mtu = ETH_DATA_LEN;
1381
5214f5c4
JG
1382 get_random_bytes(&initial_frag_id, sizeof(int));
1383 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1384
d1eb60cc
JG
1385 err = set_config(config, tnl_ops, NULL, tnl_vport->mutable);
1386 if (err)
1387 goto error_free_mutable;
1388
842cf6f4
JG
1389 spin_lock_init(&tnl_vport->cache_lock);
1390
1391#ifdef NEED_CACHE_TIMEOUT
1392 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1393 (net_random() % (MAX_CACHE_EXP / 2));
1394#endif
1395
d1eb60cc
JG
1396 err = add_port(vport);
1397 if (err)
1398 goto error_free_mutable;
1399
1400 return vport;
1401
1402error_free_mutable:
1403 kfree(tnl_vport->mutable);
1404error_free_vport:
1405 vport_free(vport);
1406error:
1407 return ERR_PTR(err);
1408}
1409
1410int tnl_modify(struct vport *vport, const void __user *config)
1411{
1412 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1413 struct tnl_mutable_config *mutable;
1414 int err;
d1eb60cc
JG
1415
1416 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1417 if (!mutable) {
1418 err = -ENOMEM;
1419 goto error;
1420 }
1421
1422 err = set_config(config, tnl_vport->tnl_ops, vport, mutable);
1423 if (err)
1424 goto error_free;
1425
842cf6f4 1426 mutable->seq++;
d1eb60cc 1427
842cf6f4
JG
1428 err = move_port(vport, mutable);
1429 if (err)
1430 goto error_free;
d1eb60cc
JG
1431
1432 return 0;
1433
1434error_free:
1435 kfree(mutable);
1436error:
1437 return err;
1438}
1439
842cf6f4 1440static void free_port_rcu(struct rcu_head *rcu)
d1eb60cc
JG
1441{
1442 struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
1443
842cf6f4
JG
1444 spin_lock_bh(&tnl_vport->cache_lock);
1445 free_cache(tnl_vport->cache);
1446 spin_unlock_bh(&tnl_vport->cache_lock);
1447
d1eb60cc
JG
1448 kfree(tnl_vport->mutable);
1449 vport_free(tnl_vport_to_vport(tnl_vport));
1450}
1451
1452int tnl_destroy(struct vport *vport)
1453{
1454 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1455 const struct tnl_mutable_config *old_mutable;
1456
1457 if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
1458 tnl_vport->mutable->port_config.daddr,
1459 tnl_vport->mutable->port_config.in_key,
1460 tnl_vport->mutable->tunnel_type,
1461 &old_mutable))
1462 del_port(vport);
1463
842cf6f4 1464 call_rcu(&tnl_vport->rcu, free_port_rcu);
d1eb60cc
JG
1465
1466 return 0;
1467}
1468
1469int tnl_set_mtu(struct vport *vport, int mtu)
1470{
1471 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1472 struct tnl_mutable_config *mutable;
1473
1474 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1475 if (!mutable)
1476 return -ENOMEM;
1477
1478 mutable->mtu = mtu;
1479 assign_config_rcu(vport, mutable);
1480
1481 return 0;
1482}
1483
1484int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1485{
1486 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1487 struct tnl_mutable_config *mutable;
1488
1489 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1490 if (!mutable)
1491 return -ENOMEM;
1492
1493 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1494 assign_config_rcu(vport, mutable);
1495
1496 return 0;
1497}
1498
d1eb60cc
JG
1499const char *tnl_get_name(const struct vport *vport)
1500{
1501 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1502 return tnl_vport->name;
1503}
1504
1505const unsigned char *tnl_get_addr(const struct vport *vport)
1506{
1507 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1508 return rcu_dereference(tnl_vport->mutable)->eth_addr;
1509}
1510
1511int tnl_get_mtu(const struct vport *vport)
1512{
1513 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1514 return rcu_dereference(tnl_vport->mutable)->mtu;
1515}
842cf6f4
JG
1516
1517void tnl_free_linked_skbs(struct sk_buff *skb)
1518{
1519 if (unlikely(!skb))
1520 return;
1521
1522 while (skb) {
1523 struct sk_buff *next = skb->next;
1524 kfree_skb(skb);
1525 skb = next;
1526 }
1527}