]> git.proxmox.com Git - ovs.git/blame - datapath/tunnel.c
datapath: Use NULL instead of 0 in alloc_buckets().
[ovs.git] / datapath / tunnel.c
CommitLineData
d1eb60cc
JG
1/*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9#include <linux/if_arp.h>
10#include <linux/if_ether.h>
11#include <linux/ip.h>
12#include <linux/if_vlan.h>
13#include <linux/in.h>
14#include <linux/in_route.h>
15#include <linux/jhash.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
842cf6f4 18#include <linux/workqueue.h>
d1eb60cc
JG
19
20#include <net/dsfield.h>
21#include <net/dst.h>
22#include <net/icmp.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26#include <net/ipv6.h>
27#endif
28#include <net/route.h>
29#include <net/xfrm.h>
30
31#include "actions.h"
dd8d6b8c 32#include "checksum.h"
d1eb60cc
JG
33#include "datapath.h"
34#include "table.h"
35#include "tunnel.h"
36#include "vport.h"
37#include "vport-generic.h"
842cf6f4
JG
38#include "vport-internal_dev.h"
39
40#ifdef NEED_CACHE_TIMEOUT
41/*
42 * On kernels where we can't quickly detect changes in the rest of the system
43 * we use an expiration time to invalidate the cache. A shorter expiration
44 * reduces the length of time that we may potentially blackhole packets while
45 * a longer time increases performance by reducing the frequency that the
46 * cache needs to be rebuilt. A variety of factors may cause the cache to be
47 * invalidated before the expiration time but this is the maximum. The time
48 * is expressed in jiffies.
49 */
50#define MAX_CACHE_EXP HZ
51#endif
52
53/*
54 * Interval to check for and remove caches that are no longer valid. Caches
55 * are checked for validity before they are used for packet encapsulation and
56 * old caches are removed at that time. However, if no packets are sent through
57 * the tunnel then the cache will never be destroyed. Since it holds
58 * references to a number of system objects, the cache will continue to use
59 * system resources by not allowing those objects to be destroyed. The cache
60 * cleaner is periodically run to free invalid caches. It does not
61 * significantly affect system performance. A lower interval will release
62 * resources faster but will itself consume resources by requiring more frequent
63 * checks. A longer interval may result in messages being printed to the kernel
64 * message buffer about unreleased resources. The interval is expressed in
65 * jiffies.
66 */
67#define CACHE_CLEANER_INTERVAL (5 * HZ)
68
69#define CACHE_DATA_ALIGN 16
d1eb60cc
JG
70
71/* Protected by RCU. */
83e3e75b 72static struct tbl *port_table __read_mostly;
d1eb60cc 73
842cf6f4
JG
74static void cache_cleaner(struct work_struct *work);
75DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
76
d1eb60cc
JG
77/*
78 * These are just used as an optimization: they don't require any kind of
79 * synchronization because we could have just as easily read the value before
80 * the port change happened.
81 */
83e3e75b
JG
82static unsigned int key_local_remote_ports __read_mostly;
83static unsigned int key_remote_ports __read_mostly;
84static unsigned int local_remote_ports __read_mostly;
85static unsigned int remote_ports __read_mostly;
d1eb60cc
JG
86
87#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
88#define rt_dst(rt) (rt->dst)
89#else
90#define rt_dst(rt) (rt->u.dst)
91#endif
92
93static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
94{
95 return vport_from_priv(tnl_vport);
96}
97
98static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
99{
100 return container_of(node, struct tnl_vport, tbl_node);
101}
102
842cf6f4
JG
103static inline void schedule_cache_cleaner(void)
104{
105 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
106}
107
108static void free_cache(struct tnl_cache *cache)
109{
110 if (!cache)
111 return;
112
113 flow_put(cache->flow);
114 ip_rt_put(cache->rt);
115 kfree(cache);
116}
117
118static void free_config_rcu(struct rcu_head *rcu)
d1eb60cc
JG
119{
120 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
121 kfree(c);
122}
123
842cf6f4
JG
124static void free_cache_rcu(struct rcu_head *rcu)
125{
126 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
127 free_cache(c);
128}
129
d1eb60cc
JG
130static void assign_config_rcu(struct vport *vport,
131 struct tnl_mutable_config *new_config)
132{
133 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
134 struct tnl_mutable_config *old_config;
135
842cf6f4 136 old_config = tnl_vport->mutable;
d1eb60cc 137 rcu_assign_pointer(tnl_vport->mutable, new_config);
842cf6f4
JG
138 call_rcu(&old_config->rcu, free_config_rcu);
139}
140
141static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
142{
143 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
144 struct tnl_cache *old_cache;
145
146 old_cache = tnl_vport->cache;
147 rcu_assign_pointer(tnl_vport->cache, new_cache);
148
149 if (old_cache)
150 call_rcu(&old_cache->rcu, free_cache_rcu);
d1eb60cc
JG
151}
152
153static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
154{
155 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
156 if (mutable->port_config.saddr)
157 return &local_remote_ports;
158 else
159 return &remote_ports;
160 } else {
161 if (mutable->port_config.saddr)
162 return &key_local_remote_ports;
163 else
164 return &key_remote_ports;
165 }
166}
167
d1eb60cc 168struct port_lookup_key {
4029c21a
BP
169 u32 tunnel_type;
170 __be32 saddr;
171 __be32 daddr;
172 __be32 key;
d1eb60cc
JG
173 const struct tnl_mutable_config *mutable;
174};
175
176/*
177 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
178 * the comparision.
179 */
180static int port_cmp(const struct tbl_node *node, void *target)
181{
182 const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
183 struct port_lookup_key *lookup = target;
184
185 lookup->mutable = rcu_dereference(tnl_vport->mutable);
186
4029c21a
BP
187 return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
188 lookup->mutable->port_config.daddr == lookup->daddr &&
189 lookup->mutable->port_config.in_key == lookup->key &&
190 lookup->mutable->port_config.saddr == lookup->saddr);
d1eb60cc
JG
191}
192
4029c21a 193static u32 port_hash(struct port_lookup_key *k)
d1eb60cc 194{
4029c21a 195 return jhash_3words(k->key, k->saddr, k->daddr, k->tunnel_type);
d1eb60cc
JG
196}
197
842cf6f4
JG
198static u32 mutable_hash(const struct tnl_mutable_config *mutable)
199{
200 struct port_lookup_key lookup;
201
4029c21a
BP
202 lookup.saddr = mutable->port_config.saddr;
203 lookup.daddr = mutable->port_config.daddr;
204 lookup.key = mutable->port_config.in_key;
205 lookup.tunnel_type = mutable->tunnel_type;
842cf6f4
JG
206
207 return port_hash(&lookup);
208}
209
210static void check_table_empty(void)
211{
212 if (tbl_count(port_table) == 0) {
213 struct tbl *old_table = port_table;
214
215 cancel_delayed_work_sync(&cache_cleaner_wq);
216 rcu_assign_pointer(port_table, NULL);
217 tbl_deferred_destroy(old_table, NULL);
218 }
219}
220
d1eb60cc
JG
221static int add_port(struct vport *vport)
222{
223 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
d1eb60cc
JG
224 int err;
225
226 if (!port_table) {
227 struct tbl *new_table;
228
229 new_table = tbl_create(0);
230 if (!new_table)
231 return -ENOMEM;
232
233 rcu_assign_pointer(port_table, new_table);
842cf6f4 234 schedule_cache_cleaner();
d1eb60cc
JG
235
236 } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
237 struct tbl *old_table = port_table;
238 struct tbl *new_table;
239
240 new_table = tbl_expand(old_table);
241 if (IS_ERR(new_table))
242 return PTR_ERR(new_table);
243
244 rcu_assign_pointer(port_table, new_table);
245 tbl_deferred_destroy(old_table, NULL);
246 }
247
842cf6f4
JG
248 err = tbl_insert(port_table, &tnl_vport->tbl_node, mutable_hash(tnl_vport->mutable));
249 if (err) {
6569d3f4 250 (*find_port_pool(tnl_vport->mutable))--;
842cf6f4
JG
251 check_table_empty();
252 return err;
253 }
254
255 (*find_port_pool(tnl_vport->mutable))++;
256
257 return 0;
258}
259
260static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
261{
262 int err;
263 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
264 u32 hash;
265
266 hash = mutable_hash(new_mutable);
267 if (hash == tnl_vport->tbl_node.hash)
268 goto table_updated;
d1eb60cc 269
842cf6f4
JG
270 /*
271 * Ideally we should make this move atomic to avoid having gaps in
272 * finding tunnels or the possibility of failure. However, if we do
273 * find a tunnel it will always be consistent.
274 */
275 err = tbl_remove(port_table, &tnl_vport->tbl_node);
d1eb60cc
JG
276 if (err)
277 return err;
278
842cf6f4
JG
279 err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
280 if (err) {
281 check_table_empty();
282 return err;
283 }
284
285table_updated:
6569d3f4 286 (*find_port_pool(tnl_vport->mutable))--;
842cf6f4 287 assign_config_rcu(vport, new_mutable);
6569d3f4 288 (*find_port_pool(tnl_vport->mutable))++;
d1eb60cc
JG
289
290 return 0;
291}
292
293static int del_port(struct vport *vport)
294{
295 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
296 int err;
297
298 err = tbl_remove(port_table, &tnl_vport->tbl_node);
299 if (err)
300 return err;
301
842cf6f4 302 check_table_empty();
d1eb60cc
JG
303 (*find_port_pool(tnl_vport->mutable))--;
304
305 return 0;
306}
307
308struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
309 int tunnel_type,
310 const struct tnl_mutable_config **mutable)
311{
312 struct port_lookup_key lookup;
313 struct tbl *table = rcu_dereference(port_table);
314 struct tbl_node *tbl_node;
315
842cf6f4 316 if (unlikely(!table))
d1eb60cc
JG
317 return NULL;
318
4029c21a
BP
319 lookup.saddr = saddr;
320 lookup.daddr = daddr;
d1eb60cc
JG
321
322 if (tunnel_type & TNL_T_KEY_EXACT) {
4029c21a
BP
323 lookup.key = key;
324 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
d1eb60cc
JG
325
326 if (key_local_remote_ports) {
327 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
328 if (tbl_node)
329 goto found;
330 }
331
332 if (key_remote_ports) {
4029c21a 333 lookup.saddr = 0;
d1eb60cc
JG
334
335 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
336 if (tbl_node)
337 goto found;
338
4029c21a 339 lookup.saddr = saddr;
d1eb60cc
JG
340 }
341 }
342
343 if (tunnel_type & TNL_T_KEY_MATCH) {
4029c21a
BP
344 lookup.key = 0;
345 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
d1eb60cc
JG
346
347 if (local_remote_ports) {
348 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
349 if (tbl_node)
350 goto found;
351 }
352
353 if (remote_ports) {
4029c21a 354 lookup.saddr = 0;
d1eb60cc
JG
355
356 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
357 if (tbl_node)
358 goto found;
359 }
360 }
361
362 return NULL;
363
364found:
365 *mutable = lookup.mutable;
366 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
367}
368
842cf6f4
JG
369static inline void ecn_decapsulate(struct sk_buff *skb)
370{
a2a96c04
JG
371 /* This is accessing the outer IP header of the tunnel, which we've
372 * already validated to be OK. skb->data is currently set to the start
373 * of the inner Ethernet header, and we've validated ETH_HLEN.
374 */
375 if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
842cf6f4 376 __be16 protocol = skb->protocol;
a2a96c04
JG
377
378 skb_set_network_header(skb, ETH_HLEN);
842cf6f4
JG
379
380 if (skb->protocol == htons(ETH_P_8021Q)) {
381 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
382 return;
383
384 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
a2a96c04 385 skb_set_network_header(skb, VLAN_ETH_HLEN);
842cf6f4
JG
386 }
387
388 if (protocol == htons(ETH_P_IP)) {
a2a96c04 389 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
842cf6f4
JG
390 + sizeof(struct iphdr))))
391 return;
392
a2a96c04 393 IP_ECN_set_ce(ip_hdr(skb));
842cf6f4
JG
394 }
395#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
396 else if (protocol == htons(ETH_P_IPV6)) {
a2a96c04 397 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
842cf6f4
JG
398 + sizeof(struct ipv6hdr))))
399 return;
400
a2a96c04 401 IP6_ECN_set_ce(ipv6_hdr(skb));
842cf6f4
JG
402 }
403#endif
404 }
405}
406
407/* Called with rcu_read_lock. */
408void tnl_rcv(struct vport *vport, struct sk_buff *skb)
409{
9851dd67
JG
410 /* Packets received by this function are in the following state:
411 * - skb->data points to the inner Ethernet header.
412 * - The inner Ethernet header is in the linear data area.
413 * - skb->csum does not include the inner Ethernet header.
414 * - The layer pointers point at the outer headers.
415 */
416
417 struct ethhdr *eh = (struct ethhdr *)skb->data;
418
419 if (likely(ntohs(eh->h_proto) >= 1536))
420 skb->protocol = eh->h_proto;
421 else
422 skb->protocol = htons(ETH_P_802_2);
842cf6f4
JG
423
424 skb_dst_drop(skb);
425 nf_reset(skb);
426 secpath_reset(skb);
842cf6f4
JG
427
428 ecn_decapsulate(skb);
842cf6f4
JG
429 compute_ip_summed(skb, false);
430
431 vport_receive(vport, skb);
432}
433
d1eb60cc
JG
434static bool check_ipv4_address(__be32 addr)
435{
436 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
437 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
438 return false;
439
440 return true;
441}
442
443static bool ipv4_should_icmp(struct sk_buff *skb)
444{
445 struct iphdr *old_iph = ip_hdr(skb);
446
447 /* Don't respond to L2 broadcast. */
448 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
449 return false;
450
451 /* Don't respond to L3 broadcast or invalid addresses. */
452 if (!check_ipv4_address(old_iph->daddr) ||
453 !check_ipv4_address(old_iph->saddr))
454 return false;
455
456 /* Only respond to the first fragment. */
457 if (old_iph->frag_off & htons(IP_OFFSET))
458 return false;
459
460 /* Don't respond to ICMP error messages. */
461 if (old_iph->protocol == IPPROTO_ICMP) {
462 u8 icmp_type, *icmp_typep;
463
464 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
465 (old_iph->ihl << 2) +
466 offsetof(struct icmphdr, type) -
467 skb->data, sizeof(icmp_type),
468 &icmp_type);
469
470 if (!icmp_typep)
471 return false;
472
473 if (*icmp_typep > NR_ICMP_TYPES
474 || (*icmp_typep <= ICMP_PARAMETERPROB
475 && *icmp_typep != ICMP_ECHOREPLY
476 && *icmp_typep != ICMP_ECHO))
477 return false;
478 }
479
480 return true;
481}
482
483static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
484 unsigned int mtu, unsigned int payload_length)
485{
486 struct iphdr *iph, *old_iph = ip_hdr(skb);
487 struct icmphdr *icmph;
488 u8 *payload;
489
490 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
491 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
492 payload = skb_put(nskb, payload_length);
493
494 /* IP */
495 iph->version = 4;
496 iph->ihl = sizeof(struct iphdr) >> 2;
497 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
498 IPTOS_PREC_INTERNETCONTROL;
499 iph->tot_len = htons(sizeof(struct iphdr)
500 + sizeof(struct icmphdr)
501 + payload_length);
502 get_random_bytes(&iph->id, sizeof(iph->id));
503 iph->frag_off = 0;
504 iph->ttl = IPDEFTTL;
505 iph->protocol = IPPROTO_ICMP;
506 iph->daddr = old_iph->saddr;
507 iph->saddr = old_iph->daddr;
508
509 ip_send_check(iph);
510
511 /* ICMP */
512 icmph->type = ICMP_DEST_UNREACH;
513 icmph->code = ICMP_FRAG_NEEDED;
514 icmph->un.gateway = htonl(mtu);
515 icmph->checksum = 0;
516
517 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
518 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
519 payload, payload_length,
520 nskb->csum);
521 icmph->checksum = csum_fold(nskb->csum);
522}
523
524#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
525static bool ipv6_should_icmp(struct sk_buff *skb)
526{
527 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
528 int addr_type;
529 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
530 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
531
532 /* Check source address is valid. */
533 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
534 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
535 return false;
536
537 /* Don't reply to unspecified addresses. */
538 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
539 return false;
540
541 /* Don't respond to ICMP error messages. */
542 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
543 if (payload_off < 0)
544 return false;
545
546 if (nexthdr == NEXTHDR_ICMP) {
547 u8 icmp_type, *icmp_typep;
548
549 icmp_typep = skb_header_pointer(skb, payload_off +
550 offsetof(struct icmp6hdr,
551 icmp6_type),
552 sizeof(icmp_type), &icmp_type);
553
554 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
555 return false;
556 }
557
558 return true;
559}
560
561static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
562 unsigned int mtu, unsigned int payload_length)
563{
564 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
565 struct icmp6hdr *icmp6h;
566 u8 *payload;
567
568 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
569 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
570 payload = skb_put(nskb, payload_length);
571
572 /* IPv6 */
573 ipv6h->version = 6;
574 ipv6h->priority = 0;
575 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
576 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
577 + payload_length);
578 ipv6h->nexthdr = NEXTHDR_ICMP;
579 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
580 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
581 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
582
583 /* ICMPv6 */
584 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
585 icmp6h->icmp6_code = 0;
586 icmp6h->icmp6_cksum = 0;
587 icmp6h->icmp6_mtu = htonl(mtu);
588
589 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
590 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
591 payload, payload_length,
592 nskb->csum);
593 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
594 sizeof(struct icmp6hdr)
595 + payload_length,
596 ipv6h->nexthdr, nskb->csum);
597}
598#endif /* IPv6 */
599
600bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
601 struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
602{
603 unsigned int eth_hdr_len = ETH_HLEN;
604 unsigned int total_length = 0, header_length = 0, payload_length;
605 struct ethhdr *eh, *old_eh = eth_hdr(skb);
606 struct sk_buff *nskb;
607
608 /* Sanity check */
609 if (skb->protocol == htons(ETH_P_IP)) {
610 if (mtu < IP_MIN_MTU)
611 return false;
612
613 if (!ipv4_should_icmp(skb))
614 return true;
615 }
616#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
617 else if (skb->protocol == htons(ETH_P_IPV6)) {
618 if (mtu < IPV6_MIN_MTU)
619 return false;
620
621 /*
622 * In theory we should do PMTUD on IPv6 multicast messages but
623 * we don't have an address to send from so just fragment.
624 */
625 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
626 return false;
627
628 if (!ipv6_should_icmp(skb))
629 return true;
630 }
631#endif
632 else
633 return false;
634
635 /* Allocate */
636 if (old_eh->h_proto == htons(ETH_P_8021Q))
637 eth_hdr_len = VLAN_ETH_HLEN;
638
639 payload_length = skb->len - eth_hdr_len;
640 if (skb->protocol == htons(ETH_P_IP)) {
641 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
642 total_length = min_t(unsigned int, header_length +
643 payload_length, 576);
644 }
645#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
646 else {
647 header_length = sizeof(struct ipv6hdr) +
648 sizeof(struct icmp6hdr);
649 total_length = min_t(unsigned int, header_length +
650 payload_length, IPV6_MIN_MTU);
651 }
652#endif
653
654 total_length = min(total_length, mutable->mtu);
655 payload_length = total_length - header_length;
656
657 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
658 payload_length);
659 if (!nskb)
660 return false;
661
662 skb_reserve(nskb, NET_IP_ALIGN);
663
664 /* Ethernet / VLAN */
665 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
666 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
667 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
668 nskb->protocol = eh->h_proto = old_eh->h_proto;
669 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
670 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
671
672 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
673 vh->h_vlan_encapsulated_proto = skb->protocol;
674 }
675 skb_reset_mac_header(nskb);
676
677 /* Protocol */
678 if (skb->protocol == htons(ETH_P_IP))
679 ipv4_build_icmp(skb, nskb, mtu, payload_length);
680#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
681 else
682 ipv6_build_icmp(skb, nskb, mtu, payload_length);
683#endif
684
685 /*
686 * Assume that flow based keys are symmetric with respect to input
687 * and output and use the key that we were going to put on the
688 * outgoing packet for the fake received packet. If the keys are
689 * not symmetric then PMTUD needs to be disabled since we won't have
690 * any way of synthesizing packets.
691 */
692 if ((mutable->port_config.flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
693 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
694 OVS_CB(nskb)->tun_id = flow_key;
695
696 compute_ip_summed(nskb, false);
697 vport_receive(vport, nskb);
698
699 return true;
700}
701
842cf6f4
JG
702static bool check_mtu(struct sk_buff *skb,
703 struct vport *vport,
704 const struct tnl_mutable_config *mutable,
705 const struct rtable *rt, __be16 *frag_offp)
d1eb60cc 706{
842cf6f4
JG
707 int mtu;
708 __be16 frag_off;
709
710 frag_off = (mutable->port_config.flags & TNL_F_PMTUD) ? htons(IP_DF) : 0;
711 if (frag_off)
712 mtu = dst_mtu(&rt_dst(rt))
713 - ETH_HLEN
714 - mutable->tunnel_hlen
715 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
716 else
717 mtu = mutable->mtu;
718
719 if (skb->protocol == htons(ETH_P_IP)) {
720 struct iphdr *old_iph = ip_hdr(skb);
721
722 frag_off |= old_iph->frag_off & htons(IP_DF);
723 mtu = max(mtu, IP_MIN_MTU);
724
725 if ((old_iph->frag_off & htons(IP_DF)) &&
726 mtu < ntohs(old_iph->tot_len)) {
727 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
728 goto drop;
d1eb60cc 729 }
842cf6f4
JG
730 }
731#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
732 else if (skb->protocol == htons(ETH_P_IPV6)) {
733 unsigned int packet_length = skb->len - ETH_HLEN
734 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
d1eb60cc 735
842cf6f4 736 mtu = max(mtu, IPV6_MIN_MTU);
d1eb60cc 737
842cf6f4
JG
738 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
739 if (packet_length > IPV6_MIN_MTU)
740 frag_off = htons(IP_DF);
d1eb60cc 741
842cf6f4
JG
742 if (mtu < packet_length) {
743 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
744 goto drop;
745 }
d1eb60cc 746 }
842cf6f4 747#endif
d1eb60cc 748
842cf6f4
JG
749 *frag_offp = frag_off;
750 return true;
751
752drop:
753 *frag_offp = 0;
754 return false;
d1eb60cc
JG
755}
756
842cf6f4
JG
757static void create_tunnel_header(const struct vport *vport,
758 const struct tnl_mutable_config *mutable,
759 const struct rtable *rt, void *header)
d1eb60cc 760{
842cf6f4
JG
761 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
762 struct iphdr *iph = header;
763
764 iph->version = 4;
765 iph->ihl = sizeof(struct iphdr) >> 2;
766 iph->frag_off = htons(IP_DF);
767 iph->protocol = tnl_vport->tnl_ops->ipproto;
768 iph->tos = mutable->port_config.tos;
769 iph->daddr = rt->rt_dst;
770 iph->saddr = rt->rt_src;
771 iph->ttl = mutable->port_config.ttl;
772 if (!iph->ttl)
773 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
774
775 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
776}
d1eb60cc 777
842cf6f4
JG
778static inline void *get_cached_header(const struct tnl_cache *cache)
779{
780 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
781}
d1eb60cc 782
842cf6f4
JG
783static inline bool check_cache_valid(const struct tnl_cache *cache,
784 const struct tnl_mutable_config *mutable)
785{
786 return cache &&
787#ifdef NEED_CACHE_TIMEOUT
788 time_before(jiffies, cache->expiration) &&
789#endif
790#ifdef HAVE_RT_GENID
791 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
792#endif
793#ifdef HAVE_HH_SEQ
794 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
795#endif
796 mutable->seq == cache->mutable_seq &&
797 (!is_internal_dev(rt_dst(cache->rt).dev) ||
798 (cache->flow && !cache->flow->dead));
d1eb60cc
JG
799}
800
842cf6f4 801static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
d1eb60cc 802{
842cf6f4
JG
803 struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
804 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
805 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
d1eb60cc 806
842cf6f4
JG
807 if (cache && !check_cache_valid(cache, mutable) &&
808 spin_trylock_bh(&tnl_vport->cache_lock)) {
809 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
810 spin_unlock_bh(&tnl_vport->cache_lock);
811 }
d1eb60cc 812
842cf6f4
JG
813 return 0;
814}
d1eb60cc 815
842cf6f4
JG
816static void cache_cleaner(struct work_struct *work)
817{
818 schedule_cache_cleaner();
d1eb60cc 819
842cf6f4
JG
820 rcu_read_lock();
821 tbl_foreach(port_table, cache_cleaner_cb, NULL);
822 rcu_read_unlock();
823}
d1eb60cc 824
842cf6f4
JG
825static inline void create_eth_hdr(struct tnl_cache *cache,
826 const struct rtable *rt)
827{
828 void *cache_data = get_cached_header(cache);
829 int hh_len = rt_dst(rt).hh->hh_len;
830 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
d1eb60cc 831
842cf6f4
JG
832#ifdef HAVE_HH_SEQ
833 unsigned hh_seq;
834
835 do {
836 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
837 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
838 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
839
840 cache->hh_seq = hh_seq;
841#else
842 read_lock_bh(&rt_dst(rt).hh->hh_lock);
843 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
844 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
d1eb60cc 845#endif
d1eb60cc
JG
846}
847
842cf6f4
JG
848static struct tnl_cache *build_cache(struct vport *vport,
849 const struct tnl_mutable_config *mutable,
850 struct rtable *rt)
d1eb60cc 851{
842cf6f4
JG
852 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
853 struct tnl_cache *cache;
854 void *cache_data;
855 int cache_len;
d1eb60cc 856
842cf6f4
JG
857 if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
858 return NULL;
859
860 /*
861 * If there is no entry in the ARP cache or if this device does not
862 * support hard header caching just fall back to the IP stack.
863 */
864 if (!rt_dst(rt).hh)
865 return NULL;
866
867 /*
868 * If lock is contended fall back to directly building the header.
869 * We're not going to help performance by sitting here spinning.
870 */
871 if (!spin_trylock_bh(&tnl_vport->cache_lock))
872 return NULL;
873
874 cache = tnl_vport->cache;
875 if (check_cache_valid(cache, mutable))
876 goto unlock;
877 else
878 cache = NULL;
879
880 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
881
882 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
883 cache_len, GFP_ATOMIC);
884 if (!cache)
885 goto unlock;
886
887 cache->len = cache_len;
888
889 create_eth_hdr(cache, rt);
890 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
891
892 create_tunnel_header(vport, mutable, rt, cache_data);
893
894 cache->mutable_seq = mutable->seq;
895 cache->rt = rt;
896#ifdef NEED_CACHE_TIMEOUT
897 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
898#endif
899
900 if (is_internal_dev(rt_dst(rt).dev)) {
e779d8d9
BP
901 struct odp_flow_key flow_key;
902 struct tbl_node *flow_node;
842cf6f4 903 struct vport *vport;
842cf6f4
JG
904 struct sk_buff *skb;
905 bool is_frag;
e779d8d9 906 int err;
842cf6f4
JG
907
908 vport = internal_dev_get_vport(rt_dst(rt).dev);
909 if (!vport)
910 goto done;
911
842cf6f4
JG
912 skb = alloc_skb(cache->len, GFP_ATOMIC);
913 if (!skb)
914 goto done;
915
916 __skb_put(skb, cache->len);
917 memcpy(skb->data, get_cached_header(cache), cache->len);
918
e779d8d9 919 err = flow_extract(skb, vport->port_no, &flow_key, &is_frag);
842cf6f4
JG
920
921 kfree_skb(skb);
922 if (err || is_frag)
923 goto done;
924
e779d8d9 925 flow_node = tbl_lookup(rcu_dereference(vport->dp->table),
842cf6f4
JG
926 &flow_key, flow_hash(&flow_key),
927 flow_cmp);
928 if (flow_node) {
929 struct sw_flow *flow = flow_cast(flow_node);
930
931 cache->flow = flow;
932 flow_hold(flow);
933 }
d1eb60cc
JG
934 }
935
842cf6f4
JG
936done:
937 assign_cache_rcu(vport, cache);
938
939unlock:
940 spin_unlock_bh(&tnl_vport->cache_lock);
941
942 return cache;
d1eb60cc
JG
943}
944
842cf6f4
JG
945static struct rtable *find_route(struct vport *vport,
946 const struct tnl_mutable_config *mutable,
947 u8 tos, struct tnl_cache **cache)
d1eb60cc 948{
842cf6f4
JG
949 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
950 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
951
952 *cache = NULL;
953 tos = RT_TOS(tos);
954
955 if (likely(tos == mutable->port_config.tos &&
956 check_cache_valid(cur_cache, mutable))) {
957 *cache = cur_cache;
958 return cur_cache->rt;
959 } else {
960 struct rtable *rt;
961 struct flowi fl = { .nl_u = { .ip4_u =
962 { .daddr = mutable->port_config.daddr,
963 .saddr = mutable->port_config.saddr,
964 .tos = tos } },
965 .proto = tnl_vport->tnl_ops->ipproto };
966
967 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
968 return NULL;
969
970 if (likely(tos == mutable->port_config.tos))
971 *cache = build_cache(vport, mutable, rt);
972
973 return rt;
d1eb60cc
JG
974 }
975}
976
842cf6f4 977static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
d1eb60cc 978{
842cf6f4
JG
979 if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
980 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
981 if (unlikely(!nskb)) {
982 kfree_skb(skb);
983 return ERR_PTR(-ENOMEM);
984 }
d1eb60cc 985
842cf6f4 986 set_skb_csum_bits(skb, nskb);
d1eb60cc 987
842cf6f4
JG
988 if (skb->sk)
989 skb_set_owner_w(nskb, skb->sk);
d1eb60cc 990
842cf6f4
JG
991 kfree_skb(skb);
992 return nskb;
993 }
d1eb60cc 994
842cf6f4 995 return skb;
d1eb60cc
JG
996}
997
842cf6f4 998static inline bool need_linearize(const struct sk_buff *skb)
d1eb60cc 999{
842cf6f4
JG
1000 int i;
1001
1002 if (unlikely(skb_shinfo(skb)->frag_list))
1003 return true;
1004
1005 /*
1006 * Generally speaking we should linearize if there are paged frags.
1007 * However, if all of the refcounts are 1 we know nobody else can
1008 * change them from underneath us and we can skip the linearization.
1009 */
1010 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1011 if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
1012 return true;
1013
1014 return false;
1015}
1016
1017static struct sk_buff *handle_offloads(struct sk_buff *skb,
1018 const struct tnl_mutable_config *mutable,
1019 const struct rtable *rt)
1020{
1021 int min_headroom;
d1eb60cc 1022 int err;
d1eb60cc 1023
842cf6f4 1024 forward_ip_summed(skb);
d1eb60cc 1025
842cf6f4 1026 err = vswitch_skb_checksum_setup(skb);
d1eb60cc
JG
1027 if (unlikely(err))
1028 goto error_free;
1029
842cf6f4
JG
1030 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1031 + mutable->tunnel_hlen;
d1eb60cc 1032
842cf6f4
JG
1033 if (skb_is_gso(skb)) {
1034 struct sk_buff *nskb;
1035
1036 /*
1037 * If we are doing GSO on a pskb it is better to make sure that
1038 * the headroom is correct now. We will only have to copy the
1039 * portion in the linear data area and GSO will preserve
1040 * headroom when it creates the segments. This is particularly
1041 * beneficial on Xen where we get a lot of GSO pskbs.
1042 * Conversely, we avoid copying if it is just to get our own
1043 * writable clone because GSO will do the copy for us.
1044 */
1045 if (skb_headroom(skb) < min_headroom) {
1046 skb = check_headroom(skb, min_headroom);
1047 if (unlikely(IS_ERR(skb))) {
1048 err = PTR_ERR(skb);
1049 goto error;
1050 }
d1eb60cc
JG
1051 }
1052
842cf6f4
JG
1053 nskb = skb_gso_segment(skb, 0);
1054 kfree_skb(skb);
1055 if (unlikely(IS_ERR(nskb))) {
1056 err = PTR_ERR(nskb);
1057 goto error;
1058 }
d1eb60cc 1059
842cf6f4
JG
1060 skb = nskb;
1061 } else {
1062 skb = check_headroom(skb, min_headroom);
1063 if (unlikely(IS_ERR(skb))) {
1064 err = PTR_ERR(skb);
1065 goto error;
1066 }
d1eb60cc 1067
842cf6f4
JG
1068 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1069 /*
1070 * Pages aren't locked and could change at any time.
1071 * If this happens after we compute the checksum, the
1072 * checksum will be wrong. We linearize now to avoid
1073 * this problem.
1074 */
1075 if (unlikely(need_linearize(skb))) {
1076 err = __skb_linearize(skb);
1077 if (unlikely(err))
1078 goto error_free;
1079 }
1080
1081 err = skb_checksum_help(skb);
1082 if (unlikely(err))
d1eb60cc 1083 goto error_free;
842cf6f4
JG
1084 } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1085 skb->ip_summed = CHECKSUM_NONE;
d1eb60cc 1086 }
d1eb60cc 1087
842cf6f4 1088 return skb;
d1eb60cc 1089
842cf6f4
JG
1090error_free:
1091 kfree_skb(skb);
1092error:
1093 return ERR_PTR(err);
1094}
d1eb60cc 1095
842cf6f4
JG
1096static int send_frags(struct sk_buff *skb,
1097 const struct tnl_mutable_config *mutable)
1098{
1099 int sent_len;
1100 int err;
d1eb60cc 1101
842cf6f4 1102 sent_len = 0;
5214f5c4
JG
1103 while (skb) {
1104 struct sk_buff *next = skb->next;
1105 int frag_len = skb->len - mutable->tunnel_hlen;
d1eb60cc 1106
5214f5c4 1107 skb->next = NULL;
b1195d37 1108 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
7da5c939 1109
5214f5c4 1110 err = ip_local_out(skb);
842cf6f4
JG
1111 if (likely(net_xmit_eval(err) == 0))
1112 sent_len += frag_len;
1113 else {
5214f5c4
JG
1114 skb = next;
1115 goto free_frags;
1116 }
1117
1118 skb = next;
842cf6f4 1119 }
5214f5c4 1120
842cf6f4 1121 return sent_len;
d1eb60cc 1122
5214f5c4
JG
1123free_frags:
1124 /*
1125 * There's no point in continuing to send fragments once one has been
1126 * dropped so just free the rest. This may help improve the congestion
1127 * that caused the first packet to be dropped.
1128 */
842cf6f4
JG
1129 tnl_free_linked_skbs(skb);
1130 return sent_len;
d1eb60cc
JG
1131}
1132
1133int tnl_send(struct vport *vport, struct sk_buff *skb)
1134{
1135 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1136 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1137
842cf6f4 1138 enum vport_err_type err = VPORT_E_TX_ERROR;
d1eb60cc 1139 struct rtable *rt;
842cf6f4
JG
1140 struct dst_entry *unattached_dst = NULL;
1141 struct tnl_cache *cache;
1142 int sent_len = 0;
1143 __be16 frag_off;
1144 u8 ttl;
1145 u8 inner_tos;
1146 u8 tos;
d1eb60cc
JG
1147
1148 /* Validate the protocol headers before we try to use them. */
1149 if (skb->protocol == htons(ETH_P_8021Q)) {
1150 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1151 goto error_free;
1152
1153 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1154 skb_set_network_header(skb, VLAN_ETH_HLEN);
1155 }
1156
1157 if (skb->protocol == htons(ETH_P_IP)) {
842cf6f4
JG
1158 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1159 + sizeof(struct iphdr))))
d1eb60cc
JG
1160 skb->protocol = 0;
1161 }
1162#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1163 else if (skb->protocol == htons(ETH_P_IPV6)) {
842cf6f4
JG
1164 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1165 + sizeof(struct ipv6hdr))))
d1eb60cc
JG
1166 skb->protocol = 0;
1167 }
1168#endif
d1eb60cc 1169
842cf6f4
JG
1170 /* ToS */
1171 if (skb->protocol == htons(ETH_P_IP))
1172 inner_tos = ip_hdr(skb)->tos;
d1eb60cc 1173#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
842cf6f4
JG
1174 else if (skb->protocol == htons(ETH_P_IPV6))
1175 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
d1eb60cc 1176#endif
842cf6f4
JG
1177 else
1178 inner_tos = 0;
d1eb60cc 1179
842cf6f4
JG
1180 if (mutable->port_config.flags & TNL_F_TOS_INHERIT)
1181 tos = inner_tos;
1182 else
1183 tos = mutable->port_config.tos;
d1eb60cc 1184
842cf6f4
JG
1185 tos = INET_ECN_encapsulate(tos, inner_tos);
1186
1187 /* Route lookup */
1188 rt = find_route(vport, mutable, tos, &cache);
1189 if (unlikely(!rt))
1190 goto error_free;
1191 if (unlikely(!cache))
1192 unattached_dst = &rt_dst(rt);
1193
1194 /* Reset SKB */
1195 nf_reset(skb);
1196 secpath_reset(skb);
1197 skb_dst_drop(skb);
1198
1199 /* Offloading */
1200 skb = handle_offloads(skb, mutable, rt);
1201 if (unlikely(IS_ERR(skb)))
1202 goto error;
1203
1204 /* MTU */
1205 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1206 err = VPORT_E_TX_DROPPED;
1207 goto error_free;
d1eb60cc
JG
1208 }
1209
842cf6f4
JG
1210 /*
1211 * If we are over the MTU, allow the IP stack to handle fragmentation.
1212 * Fragmentation is a slow path anyways.
1213 */
1214 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1215 cache)) {
1216 unattached_dst = &rt_dst(rt);
1217 dst_hold(unattached_dst);
1218 cache = NULL;
1219 }
1220
1221 /* TTL */
1222 ttl = mutable->port_config.ttl;
1223 if (!ttl)
1224 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1225
d1eb60cc
JG
1226 if (mutable->port_config.flags & TNL_F_TTL_INHERIT) {
1227 if (skb->protocol == htons(ETH_P_IP))
842cf6f4 1228 ttl = ip_hdr(skb)->ttl;
d1eb60cc
JG
1229#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1230 else if (skb->protocol == htons(ETH_P_IPV6))
842cf6f4 1231 ttl = ipv6_hdr(skb)->hop_limit;
d1eb60cc
JG
1232#endif
1233 }
d1eb60cc 1234
842cf6f4
JG
1235 while (skb) {
1236 struct iphdr *iph;
1237 struct sk_buff *next_skb = skb->next;
1238 skb->next = NULL;
d1eb60cc 1239
842cf6f4
JG
1240 if (likely(cache)) {
1241 skb_push(skb, cache->len);
1242 memcpy(skb->data, get_cached_header(cache), cache->len);
1243 skb_reset_mac_header(skb);
1244 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
d1eb60cc 1245
842cf6f4
JG
1246 } else {
1247 skb_push(skb, mutable->tunnel_hlen);
1248 create_tunnel_header(vport, mutable, rt, skb->data);
1249 skb_reset_network_header(skb);
d1eb60cc 1250
842cf6f4
JG
1251 if (next_skb)
1252 skb_dst_set(skb, dst_clone(unattached_dst));
1253 else {
1254 skb_dst_set(skb, unattached_dst);
1255 unattached_dst = NULL;
1256 }
d1eb60cc 1257 }
842cf6f4 1258 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
d1eb60cc 1259
842cf6f4
JG
1260 iph = ip_hdr(skb);
1261 iph->tos = tos;
1262 iph->ttl = ttl;
1263 iph->frag_off = frag_off;
1264 ip_select_ident(iph, &rt_dst(rt), NULL);
d1eb60cc 1265
842cf6f4
JG
1266 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1267 if (unlikely(!skb))
1268 goto next;
d1eb60cc 1269
842cf6f4
JG
1270 if (likely(cache)) {
1271 int orig_len = skb->len - cache->len;
17a07f9f 1272 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
d1eb60cc 1273
842cf6f4 1274 skb->protocol = htons(ETH_P_IP);
842cf6f4
JG
1275 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1276 ip_send_check(iph);
d1eb60cc 1277
17a07f9f 1278 if (cache_vport) {
842cf6f4
JG
1279 OVS_CB(skb)->flow = cache->flow;
1280 compute_ip_summed(skb, true);
17a07f9f 1281 vport_receive(cache_vport, skb);
842cf6f4
JG
1282 sent_len += orig_len;
1283 } else {
1284 int err;
d1eb60cc 1285
842cf6f4
JG
1286 skb->dev = rt_dst(rt).dev;
1287 err = dev_queue_xmit(skb);
1288
1289 if (likely(net_xmit_eval(err) == 0))
1290 sent_len += orig_len;
1291 }
1292 } else
1293 sent_len += send_frags(skb, mutable);
1294
1295next:
d1eb60cc 1296 skb = next_skb;
842cf6f4 1297 }
d1eb60cc 1298
842cf6f4 1299 if (unlikely(sent_len == 0))
5214f5c4
JG
1300 vport_record_error(vport, VPORT_E_TX_DROPPED);
1301
842cf6f4 1302 goto out;
d1eb60cc
JG
1303
1304error_free:
842cf6f4 1305 tnl_free_linked_skbs(skb);
d1eb60cc 1306error:
842cf6f4
JG
1307 dst_release(unattached_dst);
1308 vport_record_error(vport, err);
1309out:
1310 return sent_len;
d1eb60cc
JG
1311}
1312
c3827f61 1313static int set_config(const void *config, const struct tnl_ops *tnl_ops,
d1eb60cc
JG
1314 const struct vport *cur_vport,
1315 struct tnl_mutable_config *mutable)
1316{
1317 const struct vport *old_vport;
1318 const struct tnl_mutable_config *old_mutable;
1319
c3827f61 1320 mutable->port_config = *(struct tnl_port_config *)config;
d1eb60cc 1321
842cf6f4
JG
1322 if (mutable->port_config.daddr == 0)
1323 return -EINVAL;
1324
1325 if (mutable->port_config.tos != RT_TOS(mutable->port_config.tos))
1326 return -EINVAL;
1327
d1eb60cc
JG
1328 mutable->tunnel_hlen = tnl_ops->hdr_len(&mutable->port_config);
1329 if (mutable->tunnel_hlen < 0)
1330 return mutable->tunnel_hlen;
1331
1332 mutable->tunnel_hlen += sizeof(struct iphdr);
1333
d1eb60cc
JG
1334 mutable->tunnel_type = tnl_ops->tunnel_type;
1335 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
1336 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1337 mutable->port_config.in_key = 0;
1338 } else
1339 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1340
1341 old_vport = tnl_find_port(mutable->port_config.saddr,
1342 mutable->port_config.daddr,
1343 mutable->port_config.in_key,
1344 mutable->tunnel_type,
1345 &old_mutable);
1346
1347 if (old_vport && old_vport != cur_vport)
1348 return -EEXIST;
1349
1350 if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
1351 mutable->port_config.out_key = 0;
1352
1353 return 0;
1354}
1355
94903c98 1356struct vport *tnl_create(const struct vport_parms *parms,
d1eb60cc
JG
1357 const struct vport_ops *vport_ops,
1358 const struct tnl_ops *tnl_ops)
1359{
1360 struct vport *vport;
1361 struct tnl_vport *tnl_vport;
5214f5c4 1362 int initial_frag_id;
d1eb60cc
JG
1363 int err;
1364
e779d8d9 1365 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
d1eb60cc
JG
1366 if (IS_ERR(vport)) {
1367 err = PTR_ERR(vport);
1368 goto error;
1369 }
1370
1371 tnl_vport = tnl_vport_priv(vport);
1372
94903c98 1373 strcpy(tnl_vport->name, parms->name);
d1eb60cc
JG
1374 tnl_vport->tnl_ops = tnl_ops;
1375
842cf6f4 1376 tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
d1eb60cc
JG
1377 if (!tnl_vport->mutable) {
1378 err = -ENOMEM;
1379 goto error_free_vport;
1380 }
1381
1382 vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
1383 tnl_vport->mutable->mtu = ETH_DATA_LEN;
1384
5214f5c4
JG
1385 get_random_bytes(&initial_frag_id, sizeof(int));
1386 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1387
94903c98 1388 err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
d1eb60cc
JG
1389 if (err)
1390 goto error_free_mutable;
1391
842cf6f4
JG
1392 spin_lock_init(&tnl_vport->cache_lock);
1393
1394#ifdef NEED_CACHE_TIMEOUT
1395 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1396 (net_random() % (MAX_CACHE_EXP / 2));
1397#endif
1398
d1eb60cc
JG
1399 err = add_port(vport);
1400 if (err)
1401 goto error_free_mutable;
1402
1403 return vport;
1404
1405error_free_mutable:
1406 kfree(tnl_vport->mutable);
1407error_free_vport:
1408 vport_free(vport);
1409error:
1410 return ERR_PTR(err);
1411}
1412
c3827f61 1413int tnl_modify(struct vport *vport, struct odp_port *port)
d1eb60cc
JG
1414{
1415 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1416 struct tnl_mutable_config *mutable;
1417 int err;
d1eb60cc
JG
1418
1419 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1420 if (!mutable) {
1421 err = -ENOMEM;
1422 goto error;
1423 }
1424
c3827f61 1425 err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
d1eb60cc
JG
1426 if (err)
1427 goto error_free;
1428
842cf6f4 1429 mutable->seq++;
d1eb60cc 1430
842cf6f4
JG
1431 err = move_port(vport, mutable);
1432 if (err)
1433 goto error_free;
d1eb60cc
JG
1434
1435 return 0;
1436
1437error_free:
1438 kfree(mutable);
1439error:
1440 return err;
1441}
1442
842cf6f4 1443static void free_port_rcu(struct rcu_head *rcu)
d1eb60cc
JG
1444{
1445 struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
1446
842cf6f4
JG
1447 spin_lock_bh(&tnl_vport->cache_lock);
1448 free_cache(tnl_vport->cache);
1449 spin_unlock_bh(&tnl_vport->cache_lock);
1450
d1eb60cc
JG
1451 kfree(tnl_vport->mutable);
1452 vport_free(tnl_vport_to_vport(tnl_vport));
1453}
1454
1455int tnl_destroy(struct vport *vport)
1456{
1457 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1458 const struct tnl_mutable_config *old_mutable;
1459
1460 if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
1461 tnl_vport->mutable->port_config.daddr,
1462 tnl_vport->mutable->port_config.in_key,
1463 tnl_vport->mutable->tunnel_type,
1464 &old_mutable))
1465 del_port(vport);
1466
842cf6f4 1467 call_rcu(&tnl_vport->rcu, free_port_rcu);
d1eb60cc
JG
1468
1469 return 0;
1470}
1471
1472int tnl_set_mtu(struct vport *vport, int mtu)
1473{
1474 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1475 struct tnl_mutable_config *mutable;
1476
1477 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1478 if (!mutable)
1479 return -ENOMEM;
1480
1481 mutable->mtu = mtu;
1482 assign_config_rcu(vport, mutable);
1483
1484 return 0;
1485}
1486
1487int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1488{
1489 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1490 struct tnl_mutable_config *mutable;
1491
1492 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1493 if (!mutable)
1494 return -ENOMEM;
1495
1496 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1497 assign_config_rcu(vport, mutable);
1498
1499 return 0;
1500}
1501
d1eb60cc
JG
1502const char *tnl_get_name(const struct vport *vport)
1503{
1504 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1505 return tnl_vport->name;
1506}
1507
1508const unsigned char *tnl_get_addr(const struct vport *vport)
1509{
1510 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1511 return rcu_dereference(tnl_vport->mutable)->eth_addr;
1512}
1513
1514int tnl_get_mtu(const struct vport *vport)
1515{
1516 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1517 return rcu_dereference(tnl_vport->mutable)->mtu;
1518}
842cf6f4
JG
1519
1520void tnl_free_linked_skbs(struct sk_buff *skb)
1521{
1522 if (unlikely(!skb))
1523 return;
1524
1525 while (skb) {
1526 struct sk_buff *next = skb->next;
1527 kfree_skb(skb);
1528 skb = next;
1529 }
1530}