]> git.proxmox.com Git - ovs.git/blame - datapath/tunnel.c
datapath: Remove explicit 'unlikely' from IS_ERR calls.
[ovs.git] / datapath / tunnel.c
CommitLineData
d1eb60cc
JG
1/*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9#include <linux/if_arp.h>
10#include <linux/if_ether.h>
11#include <linux/ip.h>
12#include <linux/if_vlan.h>
13#include <linux/in.h>
14#include <linux/in_route.h>
15#include <linux/jhash.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
842cf6f4 18#include <linux/workqueue.h>
d1eb60cc
JG
19
20#include <net/dsfield.h>
21#include <net/dst.h>
22#include <net/icmp.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26#include <net/ipv6.h>
27#endif
28#include <net/route.h>
29#include <net/xfrm.h>
30
31#include "actions.h"
dd8d6b8c 32#include "checksum.h"
d1eb60cc
JG
33#include "datapath.h"
34#include "table.h"
35#include "tunnel.h"
36#include "vport.h"
37#include "vport-generic.h"
842cf6f4
JG
38#include "vport-internal_dev.h"
39
40#ifdef NEED_CACHE_TIMEOUT
41/*
42 * On kernels where we can't quickly detect changes in the rest of the system
43 * we use an expiration time to invalidate the cache. A shorter expiration
44 * reduces the length of time that we may potentially blackhole packets while
45 * a longer time increases performance by reducing the frequency that the
46 * cache needs to be rebuilt. A variety of factors may cause the cache to be
47 * invalidated before the expiration time but this is the maximum. The time
48 * is expressed in jiffies.
49 */
50#define MAX_CACHE_EXP HZ
51#endif
52
53/*
54 * Interval to check for and remove caches that are no longer valid. Caches
55 * are checked for validity before they are used for packet encapsulation and
56 * old caches are removed at that time. However, if no packets are sent through
57 * the tunnel then the cache will never be destroyed. Since it holds
58 * references to a number of system objects, the cache will continue to use
59 * system resources by not allowing those objects to be destroyed. The cache
60 * cleaner is periodically run to free invalid caches. It does not
61 * significantly affect system performance. A lower interval will release
62 * resources faster but will itself consume resources by requiring more frequent
63 * checks. A longer interval may result in messages being printed to the kernel
64 * message buffer about unreleased resources. The interval is expressed in
65 * jiffies.
66 */
67#define CACHE_CLEANER_INTERVAL (5 * HZ)
68
69#define CACHE_DATA_ALIGN 16
d1eb60cc
JG
70
71/* Protected by RCU. */
83e3e75b 72static struct tbl *port_table __read_mostly;
d1eb60cc 73
842cf6f4 74static void cache_cleaner(struct work_struct *work);
33b38b63 75static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
842cf6f4 76
d1eb60cc
JG
77/*
78 * These are just used as an optimization: they don't require any kind of
79 * synchronization because we could have just as easily read the value before
80 * the port change happened.
81 */
83e3e75b
JG
82static unsigned int key_local_remote_ports __read_mostly;
83static unsigned int key_remote_ports __read_mostly;
84static unsigned int local_remote_ports __read_mostly;
85static unsigned int remote_ports __read_mostly;
d1eb60cc
JG
86
87#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
88#define rt_dst(rt) (rt->dst)
89#else
90#define rt_dst(rt) (rt->u.dst)
91#endif
92
93static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
94{
95 return vport_from_priv(tnl_vport);
96}
97
98static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
99{
100 return container_of(node, struct tnl_vport, tbl_node);
101}
102
842cf6f4
JG
103static inline void schedule_cache_cleaner(void)
104{
105 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
106}
107
108static void free_cache(struct tnl_cache *cache)
109{
110 if (!cache)
111 return;
112
113 flow_put(cache->flow);
114 ip_rt_put(cache->rt);
115 kfree(cache);
116}
117
118static void free_config_rcu(struct rcu_head *rcu)
d1eb60cc
JG
119{
120 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
121 kfree(c);
122}
123
842cf6f4
JG
124static void free_cache_rcu(struct rcu_head *rcu)
125{
126 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
127 free_cache(c);
128}
129
d1eb60cc
JG
130static void assign_config_rcu(struct vport *vport,
131 struct tnl_mutable_config *new_config)
132{
133 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
134 struct tnl_mutable_config *old_config;
135
842cf6f4 136 old_config = tnl_vport->mutable;
d1eb60cc 137 rcu_assign_pointer(tnl_vport->mutable, new_config);
842cf6f4
JG
138 call_rcu(&old_config->rcu, free_config_rcu);
139}
140
141static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
142{
143 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
144 struct tnl_cache *old_cache;
145
146 old_cache = tnl_vport->cache;
147 rcu_assign_pointer(tnl_vport->cache, new_cache);
148
149 if (old_cache)
150 call_rcu(&old_cache->rcu, free_cache_rcu);
d1eb60cc
JG
151}
152
153static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
154{
155 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
156 if (mutable->port_config.saddr)
157 return &local_remote_ports;
158 else
159 return &remote_ports;
160 } else {
161 if (mutable->port_config.saddr)
162 return &key_local_remote_ports;
163 else
164 return &key_remote_ports;
165 }
166}
167
d1eb60cc 168struct port_lookup_key {
b9298d3f
BP
169 const struct tnl_mutable_config *mutable;
170 __be64 key;
4029c21a
BP
171 u32 tunnel_type;
172 __be32 saddr;
173 __be32 daddr;
d1eb60cc
JG
174};
175
176/*
177 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
178 * the comparision.
179 */
180static int port_cmp(const struct tbl_node *node, void *target)
181{
182 const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
183 struct port_lookup_key *lookup = target;
184
185 lookup->mutable = rcu_dereference(tnl_vport->mutable);
186
4029c21a
BP
187 return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
188 lookup->mutable->port_config.daddr == lookup->daddr &&
189 lookup->mutable->port_config.in_key == lookup->key &&
190 lookup->mutable->port_config.saddr == lookup->saddr);
d1eb60cc
JG
191}
192
4029c21a 193static u32 port_hash(struct port_lookup_key *k)
d1eb60cc 194{
b9298d3f
BP
195 u32 x = jhash_3words(k->saddr, k->daddr, k->tunnel_type, 0);
196 return jhash_2words(k->key >> 32, k->key, x);
d1eb60cc
JG
197}
198
842cf6f4
JG
199static u32 mutable_hash(const struct tnl_mutable_config *mutable)
200{
201 struct port_lookup_key lookup;
202
4029c21a
BP
203 lookup.saddr = mutable->port_config.saddr;
204 lookup.daddr = mutable->port_config.daddr;
205 lookup.key = mutable->port_config.in_key;
206 lookup.tunnel_type = mutable->tunnel_type;
842cf6f4
JG
207
208 return port_hash(&lookup);
209}
210
211static void check_table_empty(void)
212{
213 if (tbl_count(port_table) == 0) {
214 struct tbl *old_table = port_table;
215
216 cancel_delayed_work_sync(&cache_cleaner_wq);
217 rcu_assign_pointer(port_table, NULL);
218 tbl_deferred_destroy(old_table, NULL);
219 }
220}
221
d1eb60cc
JG
222static int add_port(struct vport *vport)
223{
224 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
d1eb60cc
JG
225 int err;
226
227 if (!port_table) {
228 struct tbl *new_table;
229
230 new_table = tbl_create(0);
231 if (!new_table)
232 return -ENOMEM;
233
234 rcu_assign_pointer(port_table, new_table);
842cf6f4 235 schedule_cache_cleaner();
d1eb60cc
JG
236
237 } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
238 struct tbl *old_table = port_table;
239 struct tbl *new_table;
240
241 new_table = tbl_expand(old_table);
242 if (IS_ERR(new_table))
243 return PTR_ERR(new_table);
244
245 rcu_assign_pointer(port_table, new_table);
246 tbl_deferred_destroy(old_table, NULL);
247 }
248
842cf6f4
JG
249 err = tbl_insert(port_table, &tnl_vport->tbl_node, mutable_hash(tnl_vport->mutable));
250 if (err) {
251 check_table_empty();
252 return err;
253 }
254
255 (*find_port_pool(tnl_vport->mutable))++;
256
257 return 0;
258}
259
260static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
261{
262 int err;
263 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
264 u32 hash;
265
266 hash = mutable_hash(new_mutable);
267 if (hash == tnl_vport->tbl_node.hash)
268 goto table_updated;
d1eb60cc 269
842cf6f4
JG
270 /*
271 * Ideally we should make this move atomic to avoid having gaps in
272 * finding tunnels or the possibility of failure. However, if we do
273 * find a tunnel it will always be consistent.
274 */
275 err = tbl_remove(port_table, &tnl_vport->tbl_node);
d1eb60cc
JG
276 if (err)
277 return err;
278
842cf6f4
JG
279 err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
280 if (err) {
09da9c7d 281 (*find_port_pool(tnl_vport->mutable))--;
842cf6f4
JG
282 check_table_empty();
283 return err;
284 }
285
286table_updated:
6569d3f4 287 (*find_port_pool(tnl_vport->mutable))--;
842cf6f4 288 assign_config_rcu(vport, new_mutable);
6569d3f4 289 (*find_port_pool(tnl_vport->mutable))++;
d1eb60cc
JG
290
291 return 0;
292}
293
294static int del_port(struct vport *vport)
295{
296 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
297 int err;
298
299 err = tbl_remove(port_table, &tnl_vport->tbl_node);
300 if (err)
301 return err;
302
842cf6f4 303 check_table_empty();
d1eb60cc
JG
304 (*find_port_pool(tnl_vport->mutable))--;
305
306 return 0;
307}
308
b9298d3f 309struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
d1eb60cc
JG
310 int tunnel_type,
311 const struct tnl_mutable_config **mutable)
312{
313 struct port_lookup_key lookup;
314 struct tbl *table = rcu_dereference(port_table);
315 struct tbl_node *tbl_node;
316
842cf6f4 317 if (unlikely(!table))
d1eb60cc
JG
318 return NULL;
319
4029c21a
BP
320 lookup.saddr = saddr;
321 lookup.daddr = daddr;
d1eb60cc
JG
322
323 if (tunnel_type & TNL_T_KEY_EXACT) {
4029c21a
BP
324 lookup.key = key;
325 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
d1eb60cc
JG
326
327 if (key_local_remote_ports) {
328 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
329 if (tbl_node)
330 goto found;
331 }
332
333 if (key_remote_ports) {
4029c21a 334 lookup.saddr = 0;
d1eb60cc
JG
335
336 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
337 if (tbl_node)
338 goto found;
339
4029c21a 340 lookup.saddr = saddr;
d1eb60cc
JG
341 }
342 }
343
344 if (tunnel_type & TNL_T_KEY_MATCH) {
4029c21a
BP
345 lookup.key = 0;
346 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
d1eb60cc
JG
347
348 if (local_remote_ports) {
349 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
350 if (tbl_node)
351 goto found;
352 }
353
354 if (remote_ports) {
4029c21a 355 lookup.saddr = 0;
d1eb60cc
JG
356
357 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
358 if (tbl_node)
359 goto found;
360 }
361 }
362
363 return NULL;
364
365found:
366 *mutable = lookup.mutable;
367 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
368}
369
842cf6f4
JG
370static inline void ecn_decapsulate(struct sk_buff *skb)
371{
a2a96c04
JG
372 /* This is accessing the outer IP header of the tunnel, which we've
373 * already validated to be OK. skb->data is currently set to the start
374 * of the inner Ethernet header, and we've validated ETH_HLEN.
375 */
376 if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
842cf6f4 377 __be16 protocol = skb->protocol;
a2a96c04
JG
378
379 skb_set_network_header(skb, ETH_HLEN);
842cf6f4
JG
380
381 if (skb->protocol == htons(ETH_P_8021Q)) {
382 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
383 return;
384
385 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
a2a96c04 386 skb_set_network_header(skb, VLAN_ETH_HLEN);
842cf6f4
JG
387 }
388
389 if (protocol == htons(ETH_P_IP)) {
a2a96c04 390 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
842cf6f4
JG
391 + sizeof(struct iphdr))))
392 return;
393
a2a96c04 394 IP_ECN_set_ce(ip_hdr(skb));
842cf6f4
JG
395 }
396#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
397 else if (protocol == htons(ETH_P_IPV6)) {
a2a96c04 398 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
842cf6f4
JG
399 + sizeof(struct ipv6hdr))))
400 return;
401
a2a96c04 402 IP6_ECN_set_ce(ipv6_hdr(skb));
842cf6f4
JG
403 }
404#endif
405 }
406}
407
408/* Called with rcu_read_lock. */
409void tnl_rcv(struct vport *vport, struct sk_buff *skb)
410{
9851dd67
JG
411 /* Packets received by this function are in the following state:
412 * - skb->data points to the inner Ethernet header.
413 * - The inner Ethernet header is in the linear data area.
414 * - skb->csum does not include the inner Ethernet header.
415 * - The layer pointers point at the outer headers.
416 */
417
418 struct ethhdr *eh = (struct ethhdr *)skb->data;
419
420 if (likely(ntohs(eh->h_proto) >= 1536))
421 skb->protocol = eh->h_proto;
422 else
423 skb->protocol = htons(ETH_P_802_2);
842cf6f4
JG
424
425 skb_dst_drop(skb);
426 nf_reset(skb);
427 secpath_reset(skb);
842cf6f4
JG
428
429 ecn_decapsulate(skb);
842cf6f4
JG
430 compute_ip_summed(skb, false);
431
432 vport_receive(vport, skb);
433}
434
d1eb60cc
JG
435static bool check_ipv4_address(__be32 addr)
436{
437 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
438 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
439 return false;
440
441 return true;
442}
443
444static bool ipv4_should_icmp(struct sk_buff *skb)
445{
446 struct iphdr *old_iph = ip_hdr(skb);
447
448 /* Don't respond to L2 broadcast. */
449 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
450 return false;
451
452 /* Don't respond to L3 broadcast or invalid addresses. */
453 if (!check_ipv4_address(old_iph->daddr) ||
454 !check_ipv4_address(old_iph->saddr))
455 return false;
456
457 /* Only respond to the first fragment. */
458 if (old_iph->frag_off & htons(IP_OFFSET))
459 return false;
460
461 /* Don't respond to ICMP error messages. */
462 if (old_iph->protocol == IPPROTO_ICMP) {
463 u8 icmp_type, *icmp_typep;
464
465 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
466 (old_iph->ihl << 2) +
467 offsetof(struct icmphdr, type) -
468 skb->data, sizeof(icmp_type),
469 &icmp_type);
470
471 if (!icmp_typep)
472 return false;
473
474 if (*icmp_typep > NR_ICMP_TYPES
475 || (*icmp_typep <= ICMP_PARAMETERPROB
476 && *icmp_typep != ICMP_ECHOREPLY
477 && *icmp_typep != ICMP_ECHO))
478 return false;
479 }
480
481 return true;
482}
483
484static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
485 unsigned int mtu, unsigned int payload_length)
486{
487 struct iphdr *iph, *old_iph = ip_hdr(skb);
488 struct icmphdr *icmph;
489 u8 *payload;
490
491 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
492 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
493 payload = skb_put(nskb, payload_length);
494
495 /* IP */
496 iph->version = 4;
497 iph->ihl = sizeof(struct iphdr) >> 2;
498 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
499 IPTOS_PREC_INTERNETCONTROL;
500 iph->tot_len = htons(sizeof(struct iphdr)
501 + sizeof(struct icmphdr)
502 + payload_length);
503 get_random_bytes(&iph->id, sizeof(iph->id));
504 iph->frag_off = 0;
505 iph->ttl = IPDEFTTL;
506 iph->protocol = IPPROTO_ICMP;
507 iph->daddr = old_iph->saddr;
508 iph->saddr = old_iph->daddr;
509
510 ip_send_check(iph);
511
512 /* ICMP */
513 icmph->type = ICMP_DEST_UNREACH;
514 icmph->code = ICMP_FRAG_NEEDED;
515 icmph->un.gateway = htonl(mtu);
516 icmph->checksum = 0;
517
518 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
519 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
520 payload, payload_length,
521 nskb->csum);
522 icmph->checksum = csum_fold(nskb->csum);
523}
524
525#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
526static bool ipv6_should_icmp(struct sk_buff *skb)
527{
528 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
529 int addr_type;
530 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
531 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
532
533 /* Check source address is valid. */
534 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
535 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
536 return false;
537
538 /* Don't reply to unspecified addresses. */
539 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
540 return false;
541
542 /* Don't respond to ICMP error messages. */
543 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
544 if (payload_off < 0)
545 return false;
546
547 if (nexthdr == NEXTHDR_ICMP) {
548 u8 icmp_type, *icmp_typep;
549
550 icmp_typep = skb_header_pointer(skb, payload_off +
551 offsetof(struct icmp6hdr,
552 icmp6_type),
553 sizeof(icmp_type), &icmp_type);
554
555 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
556 return false;
557 }
558
559 return true;
560}
561
562static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
563 unsigned int mtu, unsigned int payload_length)
564{
565 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
566 struct icmp6hdr *icmp6h;
567 u8 *payload;
568
569 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
570 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
571 payload = skb_put(nskb, payload_length);
572
573 /* IPv6 */
574 ipv6h->version = 6;
575 ipv6h->priority = 0;
576 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
577 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
578 + payload_length);
579 ipv6h->nexthdr = NEXTHDR_ICMP;
580 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
581 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
582 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
583
584 /* ICMPv6 */
585 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
586 icmp6h->icmp6_code = 0;
587 icmp6h->icmp6_cksum = 0;
588 icmp6h->icmp6_mtu = htonl(mtu);
589
590 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
591 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
592 payload, payload_length,
593 nskb->csum);
594 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
595 sizeof(struct icmp6hdr)
596 + payload_length,
597 ipv6h->nexthdr, nskb->csum);
598}
599#endif /* IPv6 */
600
601bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
b9298d3f 602 struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
d1eb60cc
JG
603{
604 unsigned int eth_hdr_len = ETH_HLEN;
605 unsigned int total_length = 0, header_length = 0, payload_length;
606 struct ethhdr *eh, *old_eh = eth_hdr(skb);
607 struct sk_buff *nskb;
608
609 /* Sanity check */
610 if (skb->protocol == htons(ETH_P_IP)) {
611 if (mtu < IP_MIN_MTU)
612 return false;
613
614 if (!ipv4_should_icmp(skb))
615 return true;
616 }
617#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
618 else if (skb->protocol == htons(ETH_P_IPV6)) {
619 if (mtu < IPV6_MIN_MTU)
620 return false;
621
622 /*
623 * In theory we should do PMTUD on IPv6 multicast messages but
624 * we don't have an address to send from so just fragment.
625 */
626 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
627 return false;
628
629 if (!ipv6_should_icmp(skb))
630 return true;
631 }
632#endif
633 else
634 return false;
635
636 /* Allocate */
637 if (old_eh->h_proto == htons(ETH_P_8021Q))
638 eth_hdr_len = VLAN_ETH_HLEN;
639
640 payload_length = skb->len - eth_hdr_len;
641 if (skb->protocol == htons(ETH_P_IP)) {
642 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
643 total_length = min_t(unsigned int, header_length +
644 payload_length, 576);
645 }
646#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
647 else {
648 header_length = sizeof(struct ipv6hdr) +
649 sizeof(struct icmp6hdr);
650 total_length = min_t(unsigned int, header_length +
651 payload_length, IPV6_MIN_MTU);
652 }
653#endif
654
655 total_length = min(total_length, mutable->mtu);
656 payload_length = total_length - header_length;
657
658 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
659 payload_length);
660 if (!nskb)
661 return false;
662
663 skb_reserve(nskb, NET_IP_ALIGN);
664
665 /* Ethernet / VLAN */
666 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
667 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
668 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
669 nskb->protocol = eh->h_proto = old_eh->h_proto;
670 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
671 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
672
673 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
674 vh->h_vlan_encapsulated_proto = skb->protocol;
675 }
676 skb_reset_mac_header(nskb);
677
678 /* Protocol */
679 if (skb->protocol == htons(ETH_P_IP))
680 ipv4_build_icmp(skb, nskb, mtu, payload_length);
681#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
682 else
683 ipv6_build_icmp(skb, nskb, mtu, payload_length);
684#endif
685
686 /*
687 * Assume that flow based keys are symmetric with respect to input
688 * and output and use the key that we were going to put on the
689 * outgoing packet for the fake received packet. If the keys are
690 * not symmetric then PMTUD needs to be disabled since we won't have
691 * any way of synthesizing packets.
692 */
693 if ((mutable->port_config.flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
694 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
695 OVS_CB(nskb)->tun_id = flow_key;
696
697 compute_ip_summed(nskb, false);
698 vport_receive(vport, nskb);
699
700 return true;
701}
702
842cf6f4
JG
703static bool check_mtu(struct sk_buff *skb,
704 struct vport *vport,
705 const struct tnl_mutable_config *mutable,
706 const struct rtable *rt, __be16 *frag_offp)
d1eb60cc 707{
842cf6f4
JG
708 int mtu;
709 __be16 frag_off;
710
711 frag_off = (mutable->port_config.flags & TNL_F_PMTUD) ? htons(IP_DF) : 0;
712 if (frag_off)
713 mtu = dst_mtu(&rt_dst(rt))
714 - ETH_HLEN
715 - mutable->tunnel_hlen
716 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
717 else
718 mtu = mutable->mtu;
719
720 if (skb->protocol == htons(ETH_P_IP)) {
721 struct iphdr *old_iph = ip_hdr(skb);
722
723 frag_off |= old_iph->frag_off & htons(IP_DF);
724 mtu = max(mtu, IP_MIN_MTU);
725
726 if ((old_iph->frag_off & htons(IP_DF)) &&
727 mtu < ntohs(old_iph->tot_len)) {
728 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
729 goto drop;
d1eb60cc 730 }
842cf6f4
JG
731 }
732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
733 else if (skb->protocol == htons(ETH_P_IPV6)) {
734 unsigned int packet_length = skb->len - ETH_HLEN
735 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
d1eb60cc 736
842cf6f4 737 mtu = max(mtu, IPV6_MIN_MTU);
d1eb60cc 738
842cf6f4
JG
739 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
740 if (packet_length > IPV6_MIN_MTU)
741 frag_off = htons(IP_DF);
d1eb60cc 742
842cf6f4
JG
743 if (mtu < packet_length) {
744 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
745 goto drop;
746 }
d1eb60cc 747 }
842cf6f4 748#endif
d1eb60cc 749
842cf6f4
JG
750 *frag_offp = frag_off;
751 return true;
752
753drop:
754 *frag_offp = 0;
755 return false;
d1eb60cc
JG
756}
757
842cf6f4
JG
758static void create_tunnel_header(const struct vport *vport,
759 const struct tnl_mutable_config *mutable,
760 const struct rtable *rt, void *header)
d1eb60cc 761{
842cf6f4
JG
762 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
763 struct iphdr *iph = header;
764
765 iph->version = 4;
766 iph->ihl = sizeof(struct iphdr) >> 2;
767 iph->frag_off = htons(IP_DF);
768 iph->protocol = tnl_vport->tnl_ops->ipproto;
769 iph->tos = mutable->port_config.tos;
770 iph->daddr = rt->rt_dst;
771 iph->saddr = rt->rt_src;
772 iph->ttl = mutable->port_config.ttl;
773 if (!iph->ttl)
774 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
775
776 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
777}
d1eb60cc 778
842cf6f4
JG
779static inline void *get_cached_header(const struct tnl_cache *cache)
780{
781 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
782}
d1eb60cc 783
842cf6f4
JG
784static inline bool check_cache_valid(const struct tnl_cache *cache,
785 const struct tnl_mutable_config *mutable)
786{
787 return cache &&
788#ifdef NEED_CACHE_TIMEOUT
789 time_before(jiffies, cache->expiration) &&
790#endif
791#ifdef HAVE_RT_GENID
792 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
793#endif
794#ifdef HAVE_HH_SEQ
795 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
796#endif
797 mutable->seq == cache->mutable_seq &&
798 (!is_internal_dev(rt_dst(cache->rt).dev) ||
799 (cache->flow && !cache->flow->dead));
d1eb60cc
JG
800}
801
842cf6f4 802static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
d1eb60cc 803{
842cf6f4
JG
804 struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
805 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
806 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
d1eb60cc 807
842cf6f4
JG
808 if (cache && !check_cache_valid(cache, mutable) &&
809 spin_trylock_bh(&tnl_vport->cache_lock)) {
810 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
811 spin_unlock_bh(&tnl_vport->cache_lock);
812 }
d1eb60cc 813
842cf6f4
JG
814 return 0;
815}
d1eb60cc 816
842cf6f4
JG
817static void cache_cleaner(struct work_struct *work)
818{
819 schedule_cache_cleaner();
d1eb60cc 820
842cf6f4 821 rcu_read_lock();
1e71f10f 822 tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
842cf6f4
JG
823 rcu_read_unlock();
824}
d1eb60cc 825
842cf6f4
JG
826static inline void create_eth_hdr(struct tnl_cache *cache,
827 const struct rtable *rt)
828{
829 void *cache_data = get_cached_header(cache);
830 int hh_len = rt_dst(rt).hh->hh_len;
831 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
d1eb60cc 832
842cf6f4
JG
833#ifdef HAVE_HH_SEQ
834 unsigned hh_seq;
835
836 do {
837 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
838 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
839 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
840
841 cache->hh_seq = hh_seq;
842#else
843 read_lock_bh(&rt_dst(rt).hh->hh_lock);
844 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
845 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
d1eb60cc 846#endif
d1eb60cc
JG
847}
848
842cf6f4
JG
849static struct tnl_cache *build_cache(struct vport *vport,
850 const struct tnl_mutable_config *mutable,
851 struct rtable *rt)
d1eb60cc 852{
842cf6f4
JG
853 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
854 struct tnl_cache *cache;
855 void *cache_data;
856 int cache_len;
d1eb60cc 857
842cf6f4
JG
858 if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
859 return NULL;
860
861 /*
862 * If there is no entry in the ARP cache or if this device does not
863 * support hard header caching just fall back to the IP stack.
864 */
865 if (!rt_dst(rt).hh)
866 return NULL;
867
868 /*
869 * If lock is contended fall back to directly building the header.
870 * We're not going to help performance by sitting here spinning.
871 */
872 if (!spin_trylock_bh(&tnl_vport->cache_lock))
873 return NULL;
874
875 cache = tnl_vport->cache;
876 if (check_cache_valid(cache, mutable))
877 goto unlock;
878 else
879 cache = NULL;
880
881 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
882
883 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
884 cache_len, GFP_ATOMIC);
885 if (!cache)
886 goto unlock;
887
888 cache->len = cache_len;
889
890 create_eth_hdr(cache, rt);
891 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
892
893 create_tunnel_header(vport, mutable, rt, cache_data);
894
895 cache->mutable_seq = mutable->seq;
896 cache->rt = rt;
897#ifdef NEED_CACHE_TIMEOUT
898 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
899#endif
900
901 if (is_internal_dev(rt_dst(rt).dev)) {
e779d8d9
BP
902 struct odp_flow_key flow_key;
903 struct tbl_node *flow_node;
842cf6f4 904 struct vport *vport;
842cf6f4
JG
905 struct sk_buff *skb;
906 bool is_frag;
e779d8d9 907 int err;
842cf6f4
JG
908
909 vport = internal_dev_get_vport(rt_dst(rt).dev);
910 if (!vport)
911 goto done;
912
842cf6f4
JG
913 skb = alloc_skb(cache->len, GFP_ATOMIC);
914 if (!skb)
915 goto done;
916
917 __skb_put(skb, cache->len);
918 memcpy(skb->data, get_cached_header(cache), cache->len);
919
e779d8d9 920 err = flow_extract(skb, vport->port_no, &flow_key, &is_frag);
842cf6f4
JG
921
922 kfree_skb(skb);
923 if (err || is_frag)
924 goto done;
925
e779d8d9 926 flow_node = tbl_lookup(rcu_dereference(vport->dp->table),
842cf6f4
JG
927 &flow_key, flow_hash(&flow_key),
928 flow_cmp);
929 if (flow_node) {
930 struct sw_flow *flow = flow_cast(flow_node);
931
932 cache->flow = flow;
933 flow_hold(flow);
934 }
d1eb60cc
JG
935 }
936
842cf6f4
JG
937done:
938 assign_cache_rcu(vport, cache);
939
940unlock:
941 spin_unlock_bh(&tnl_vport->cache_lock);
942
943 return cache;
d1eb60cc
JG
944}
945
842cf6f4
JG
946static struct rtable *find_route(struct vport *vport,
947 const struct tnl_mutable_config *mutable,
948 u8 tos, struct tnl_cache **cache)
d1eb60cc 949{
842cf6f4
JG
950 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
951 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
952
953 *cache = NULL;
954 tos = RT_TOS(tos);
955
956 if (likely(tos == mutable->port_config.tos &&
957 check_cache_valid(cur_cache, mutable))) {
958 *cache = cur_cache;
959 return cur_cache->rt;
960 } else {
961 struct rtable *rt;
962 struct flowi fl = { .nl_u = { .ip4_u =
963 { .daddr = mutable->port_config.daddr,
964 .saddr = mutable->port_config.saddr,
965 .tos = tos } },
966 .proto = tnl_vport->tnl_ops->ipproto };
967
968 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
969 return NULL;
970
971 if (likely(tos == mutable->port_config.tos))
972 *cache = build_cache(vport, mutable, rt);
973
974 return rt;
d1eb60cc
JG
975 }
976}
977
842cf6f4 978static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
d1eb60cc 979{
842cf6f4
JG
980 if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
981 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
982 if (unlikely(!nskb)) {
983 kfree_skb(skb);
984 return ERR_PTR(-ENOMEM);
985 }
d1eb60cc 986
842cf6f4 987 set_skb_csum_bits(skb, nskb);
d1eb60cc 988
842cf6f4
JG
989 if (skb->sk)
990 skb_set_owner_w(nskb, skb->sk);
d1eb60cc 991
842cf6f4
JG
992 kfree_skb(skb);
993 return nskb;
994 }
d1eb60cc 995
842cf6f4 996 return skb;
d1eb60cc
JG
997}
998
842cf6f4 999static inline bool need_linearize(const struct sk_buff *skb)
d1eb60cc 1000{
842cf6f4
JG
1001 int i;
1002
1003 if (unlikely(skb_shinfo(skb)->frag_list))
1004 return true;
1005
1006 /*
1007 * Generally speaking we should linearize if there are paged frags.
1008 * However, if all of the refcounts are 1 we know nobody else can
1009 * change them from underneath us and we can skip the linearization.
1010 */
1011 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1012 if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
1013 return true;
1014
1015 return false;
1016}
1017
1018static struct sk_buff *handle_offloads(struct sk_buff *skb,
1019 const struct tnl_mutable_config *mutable,
1020 const struct rtable *rt)
1021{
1022 int min_headroom;
d1eb60cc 1023 int err;
d1eb60cc 1024
842cf6f4 1025 forward_ip_summed(skb);
d1eb60cc 1026
842cf6f4 1027 err = vswitch_skb_checksum_setup(skb);
d1eb60cc
JG
1028 if (unlikely(err))
1029 goto error_free;
1030
842cf6f4
JG
1031 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1032 + mutable->tunnel_hlen;
d1eb60cc 1033
842cf6f4
JG
1034 if (skb_is_gso(skb)) {
1035 struct sk_buff *nskb;
1036
1037 /*
1038 * If we are doing GSO on a pskb it is better to make sure that
1039 * the headroom is correct now. We will only have to copy the
1040 * portion in the linear data area and GSO will preserve
1041 * headroom when it creates the segments. This is particularly
1042 * beneficial on Xen where we get a lot of GSO pskbs.
1043 * Conversely, we avoid copying if it is just to get our own
1044 * writable clone because GSO will do the copy for us.
1045 */
1046 if (skb_headroom(skb) < min_headroom) {
1047 skb = check_headroom(skb, min_headroom);
40796b34 1048 if (IS_ERR(skb)) {
842cf6f4
JG
1049 err = PTR_ERR(skb);
1050 goto error;
1051 }
d1eb60cc
JG
1052 }
1053
842cf6f4
JG
1054 nskb = skb_gso_segment(skb, 0);
1055 kfree_skb(skb);
40796b34 1056 if (IS_ERR(nskb)) {
842cf6f4
JG
1057 err = PTR_ERR(nskb);
1058 goto error;
1059 }
d1eb60cc 1060
842cf6f4
JG
1061 skb = nskb;
1062 } else {
1063 skb = check_headroom(skb, min_headroom);
40796b34 1064 if (IS_ERR(skb)) {
842cf6f4
JG
1065 err = PTR_ERR(skb);
1066 goto error;
1067 }
d1eb60cc 1068
842cf6f4
JG
1069 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1070 /*
1071 * Pages aren't locked and could change at any time.
1072 * If this happens after we compute the checksum, the
1073 * checksum will be wrong. We linearize now to avoid
1074 * this problem.
1075 */
1076 if (unlikely(need_linearize(skb))) {
1077 err = __skb_linearize(skb);
1078 if (unlikely(err))
1079 goto error_free;
1080 }
1081
1082 err = skb_checksum_help(skb);
1083 if (unlikely(err))
d1eb60cc 1084 goto error_free;
842cf6f4
JG
1085 } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1086 skb->ip_summed = CHECKSUM_NONE;
d1eb60cc 1087 }
d1eb60cc 1088
842cf6f4 1089 return skb;
d1eb60cc 1090
842cf6f4
JG
1091error_free:
1092 kfree_skb(skb);
1093error:
1094 return ERR_PTR(err);
1095}
d1eb60cc 1096
842cf6f4
JG
1097static int send_frags(struct sk_buff *skb,
1098 const struct tnl_mutable_config *mutable)
1099{
1100 int sent_len;
1101 int err;
d1eb60cc 1102
842cf6f4 1103 sent_len = 0;
5214f5c4
JG
1104 while (skb) {
1105 struct sk_buff *next = skb->next;
1106 int frag_len = skb->len - mutable->tunnel_hlen;
d1eb60cc 1107
5214f5c4 1108 skb->next = NULL;
b1195d37 1109 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
7da5c939 1110
5214f5c4 1111 err = ip_local_out(skb);
842cf6f4
JG
1112 if (likely(net_xmit_eval(err) == 0))
1113 sent_len += frag_len;
1114 else {
5214f5c4
JG
1115 skb = next;
1116 goto free_frags;
1117 }
1118
1119 skb = next;
842cf6f4 1120 }
5214f5c4 1121
842cf6f4 1122 return sent_len;
d1eb60cc 1123
5214f5c4
JG
1124free_frags:
1125 /*
1126 * There's no point in continuing to send fragments once one has been
1127 * dropped so just free the rest. This may help improve the congestion
1128 * that caused the first packet to be dropped.
1129 */
842cf6f4
JG
1130 tnl_free_linked_skbs(skb);
1131 return sent_len;
d1eb60cc
JG
1132}
1133
1134int tnl_send(struct vport *vport, struct sk_buff *skb)
1135{
1136 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1137 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1138
842cf6f4 1139 enum vport_err_type err = VPORT_E_TX_ERROR;
d1eb60cc 1140 struct rtable *rt;
842cf6f4
JG
1141 struct dst_entry *unattached_dst = NULL;
1142 struct tnl_cache *cache;
1143 int sent_len = 0;
1144 __be16 frag_off;
1145 u8 ttl;
1146 u8 inner_tos;
1147 u8 tos;
d1eb60cc
JG
1148
1149 /* Validate the protocol headers before we try to use them. */
1150 if (skb->protocol == htons(ETH_P_8021Q)) {
1151 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1152 goto error_free;
1153
1154 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1155 skb_set_network_header(skb, VLAN_ETH_HLEN);
1156 }
1157
1158 if (skb->protocol == htons(ETH_P_IP)) {
842cf6f4
JG
1159 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1160 + sizeof(struct iphdr))))
d1eb60cc
JG
1161 skb->protocol = 0;
1162 }
1163#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1164 else if (skb->protocol == htons(ETH_P_IPV6)) {
842cf6f4
JG
1165 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1166 + sizeof(struct ipv6hdr))))
d1eb60cc
JG
1167 skb->protocol = 0;
1168 }
1169#endif
d1eb60cc 1170
842cf6f4
JG
1171 /* ToS */
1172 if (skb->protocol == htons(ETH_P_IP))
1173 inner_tos = ip_hdr(skb)->tos;
d1eb60cc 1174#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
842cf6f4
JG
1175 else if (skb->protocol == htons(ETH_P_IPV6))
1176 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
d1eb60cc 1177#endif
842cf6f4
JG
1178 else
1179 inner_tos = 0;
d1eb60cc 1180
842cf6f4
JG
1181 if (mutable->port_config.flags & TNL_F_TOS_INHERIT)
1182 tos = inner_tos;
1183 else
1184 tos = mutable->port_config.tos;
d1eb60cc 1185
842cf6f4
JG
1186 tos = INET_ECN_encapsulate(tos, inner_tos);
1187
1188 /* Route lookup */
1189 rt = find_route(vport, mutable, tos, &cache);
1190 if (unlikely(!rt))
1191 goto error_free;
1192 if (unlikely(!cache))
1193 unattached_dst = &rt_dst(rt);
1194
1195 /* Reset SKB */
1196 nf_reset(skb);
1197 secpath_reset(skb);
1198 skb_dst_drop(skb);
1199
1200 /* Offloading */
1201 skb = handle_offloads(skb, mutable, rt);
40796b34 1202 if (IS_ERR(skb))
842cf6f4
JG
1203 goto error;
1204
1205 /* MTU */
1206 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1207 err = VPORT_E_TX_DROPPED;
1208 goto error_free;
d1eb60cc
JG
1209 }
1210
842cf6f4
JG
1211 /*
1212 * If we are over the MTU, allow the IP stack to handle fragmentation.
1213 * Fragmentation is a slow path anyways.
1214 */
1215 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1216 cache)) {
1217 unattached_dst = &rt_dst(rt);
1218 dst_hold(unattached_dst);
1219 cache = NULL;
1220 }
1221
1222 /* TTL */
1223 ttl = mutable->port_config.ttl;
1224 if (!ttl)
1225 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1226
d1eb60cc
JG
1227 if (mutable->port_config.flags & TNL_F_TTL_INHERIT) {
1228 if (skb->protocol == htons(ETH_P_IP))
842cf6f4 1229 ttl = ip_hdr(skb)->ttl;
d1eb60cc
JG
1230#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1231 else if (skb->protocol == htons(ETH_P_IPV6))
842cf6f4 1232 ttl = ipv6_hdr(skb)->hop_limit;
d1eb60cc
JG
1233#endif
1234 }
d1eb60cc 1235
842cf6f4
JG
1236 while (skb) {
1237 struct iphdr *iph;
1238 struct sk_buff *next_skb = skb->next;
1239 skb->next = NULL;
d1eb60cc 1240
842cf6f4
JG
1241 if (likely(cache)) {
1242 skb_push(skb, cache->len);
1243 memcpy(skb->data, get_cached_header(cache), cache->len);
1244 skb_reset_mac_header(skb);
1245 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
d1eb60cc 1246
842cf6f4
JG
1247 } else {
1248 skb_push(skb, mutable->tunnel_hlen);
1249 create_tunnel_header(vport, mutable, rt, skb->data);
1250 skb_reset_network_header(skb);
d1eb60cc 1251
842cf6f4
JG
1252 if (next_skb)
1253 skb_dst_set(skb, dst_clone(unattached_dst));
1254 else {
1255 skb_dst_set(skb, unattached_dst);
1256 unattached_dst = NULL;
1257 }
d1eb60cc 1258 }
842cf6f4 1259 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
d1eb60cc 1260
842cf6f4
JG
1261 iph = ip_hdr(skb);
1262 iph->tos = tos;
1263 iph->ttl = ttl;
1264 iph->frag_off = frag_off;
1265 ip_select_ident(iph, &rt_dst(rt), NULL);
d1eb60cc 1266
842cf6f4
JG
1267 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1268 if (unlikely(!skb))
1269 goto next;
d1eb60cc 1270
842cf6f4
JG
1271 if (likely(cache)) {
1272 int orig_len = skb->len - cache->len;
17a07f9f 1273 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
d1eb60cc 1274
842cf6f4 1275 skb->protocol = htons(ETH_P_IP);
842cf6f4
JG
1276 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1277 ip_send_check(iph);
d1eb60cc 1278
17a07f9f 1279 if (cache_vport) {
842cf6f4
JG
1280 OVS_CB(skb)->flow = cache->flow;
1281 compute_ip_summed(skb, true);
17a07f9f 1282 vport_receive(cache_vport, skb);
842cf6f4
JG
1283 sent_len += orig_len;
1284 } else {
1285 int err;
d1eb60cc 1286
842cf6f4
JG
1287 skb->dev = rt_dst(rt).dev;
1288 err = dev_queue_xmit(skb);
1289
1290 if (likely(net_xmit_eval(err) == 0))
1291 sent_len += orig_len;
1292 }
1293 } else
1294 sent_len += send_frags(skb, mutable);
1295
1296next:
d1eb60cc 1297 skb = next_skb;
842cf6f4 1298 }
d1eb60cc 1299
842cf6f4 1300 if (unlikely(sent_len == 0))
5214f5c4
JG
1301 vport_record_error(vport, VPORT_E_TX_DROPPED);
1302
842cf6f4 1303 goto out;
d1eb60cc
JG
1304
1305error_free:
842cf6f4 1306 tnl_free_linked_skbs(skb);
d1eb60cc 1307error:
842cf6f4
JG
1308 dst_release(unattached_dst);
1309 vport_record_error(vport, err);
1310out:
1311 return sent_len;
d1eb60cc
JG
1312}
1313
c3827f61 1314static int set_config(const void *config, const struct tnl_ops *tnl_ops,
d1eb60cc
JG
1315 const struct vport *cur_vport,
1316 struct tnl_mutable_config *mutable)
1317{
1318 const struct vport *old_vport;
1319 const struct tnl_mutable_config *old_mutable;
1320
c3827f61 1321 mutable->port_config = *(struct tnl_port_config *)config;
d1eb60cc 1322
842cf6f4
JG
1323 if (mutable->port_config.daddr == 0)
1324 return -EINVAL;
1325
1326 if (mutable->port_config.tos != RT_TOS(mutable->port_config.tos))
1327 return -EINVAL;
1328
d1eb60cc
JG
1329 mutable->tunnel_hlen = tnl_ops->hdr_len(&mutable->port_config);
1330 if (mutable->tunnel_hlen < 0)
1331 return mutable->tunnel_hlen;
1332
1333 mutable->tunnel_hlen += sizeof(struct iphdr);
1334
d1eb60cc
JG
1335 mutable->tunnel_type = tnl_ops->tunnel_type;
1336 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
1337 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1338 mutable->port_config.in_key = 0;
1339 } else
1340 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1341
1342 old_vport = tnl_find_port(mutable->port_config.saddr,
1343 mutable->port_config.daddr,
1344 mutable->port_config.in_key,
1345 mutable->tunnel_type,
1346 &old_mutable);
1347
1348 if (old_vport && old_vport != cur_vport)
1349 return -EEXIST;
1350
1351 if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
1352 mutable->port_config.out_key = 0;
1353
1354 return 0;
1355}
1356
94903c98 1357struct vport *tnl_create(const struct vport_parms *parms,
d1eb60cc
JG
1358 const struct vport_ops *vport_ops,
1359 const struct tnl_ops *tnl_ops)
1360{
1361 struct vport *vport;
1362 struct tnl_vport *tnl_vport;
5214f5c4 1363 int initial_frag_id;
d1eb60cc
JG
1364 int err;
1365
e779d8d9 1366 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
d1eb60cc
JG
1367 if (IS_ERR(vport)) {
1368 err = PTR_ERR(vport);
1369 goto error;
1370 }
1371
1372 tnl_vport = tnl_vport_priv(vport);
1373
94903c98 1374 strcpy(tnl_vport->name, parms->name);
d1eb60cc
JG
1375 tnl_vport->tnl_ops = tnl_ops;
1376
842cf6f4 1377 tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
d1eb60cc
JG
1378 if (!tnl_vport->mutable) {
1379 err = -ENOMEM;
1380 goto error_free_vport;
1381 }
1382
1383 vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
1384 tnl_vport->mutable->mtu = ETH_DATA_LEN;
1385
5214f5c4
JG
1386 get_random_bytes(&initial_frag_id, sizeof(int));
1387 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1388
94903c98 1389 err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
d1eb60cc
JG
1390 if (err)
1391 goto error_free_mutable;
1392
842cf6f4
JG
1393 spin_lock_init(&tnl_vport->cache_lock);
1394
1395#ifdef NEED_CACHE_TIMEOUT
1396 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1397 (net_random() % (MAX_CACHE_EXP / 2));
1398#endif
1399
d1eb60cc
JG
1400 err = add_port(vport);
1401 if (err)
1402 goto error_free_mutable;
1403
1404 return vport;
1405
1406error_free_mutable:
1407 kfree(tnl_vport->mutable);
1408error_free_vport:
1409 vport_free(vport);
1410error:
1411 return ERR_PTR(err);
1412}
1413
c3827f61 1414int tnl_modify(struct vport *vport, struct odp_port *port)
d1eb60cc
JG
1415{
1416 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1417 struct tnl_mutable_config *mutable;
1418 int err;
d1eb60cc
JG
1419
1420 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1421 if (!mutable) {
1422 err = -ENOMEM;
1423 goto error;
1424 }
1425
c3827f61 1426 err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
d1eb60cc
JG
1427 if (err)
1428 goto error_free;
1429
842cf6f4 1430 mutable->seq++;
d1eb60cc 1431
842cf6f4
JG
1432 err = move_port(vport, mutable);
1433 if (err)
1434 goto error_free;
d1eb60cc
JG
1435
1436 return 0;
1437
1438error_free:
1439 kfree(mutable);
1440error:
1441 return err;
1442}
1443
842cf6f4 1444static void free_port_rcu(struct rcu_head *rcu)
d1eb60cc
JG
1445{
1446 struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
1447
842cf6f4
JG
1448 spin_lock_bh(&tnl_vport->cache_lock);
1449 free_cache(tnl_vport->cache);
1450 spin_unlock_bh(&tnl_vport->cache_lock);
1451
d1eb60cc
JG
1452 kfree(tnl_vport->mutable);
1453 vport_free(tnl_vport_to_vport(tnl_vport));
1454}
1455
1456int tnl_destroy(struct vport *vport)
1457{
1458 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1459 const struct tnl_mutable_config *old_mutable;
1460
1461 if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
1462 tnl_vport->mutable->port_config.daddr,
1463 tnl_vport->mutable->port_config.in_key,
1464 tnl_vport->mutable->tunnel_type,
1465 &old_mutable))
1466 del_port(vport);
1467
842cf6f4 1468 call_rcu(&tnl_vport->rcu, free_port_rcu);
d1eb60cc
JG
1469
1470 return 0;
1471}
1472
1473int tnl_set_mtu(struct vport *vport, int mtu)
1474{
1475 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1476 struct tnl_mutable_config *mutable;
1477
1478 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1479 if (!mutable)
1480 return -ENOMEM;
1481
1482 mutable->mtu = mtu;
1483 assign_config_rcu(vport, mutable);
1484
1485 return 0;
1486}
1487
1488int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1489{
1490 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1491 struct tnl_mutable_config *mutable;
1492
1493 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1494 if (!mutable)
1495 return -ENOMEM;
1496
1497 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1498 assign_config_rcu(vport, mutable);
1499
1500 return 0;
1501}
1502
d1eb60cc
JG
1503const char *tnl_get_name(const struct vport *vport)
1504{
1505 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1506 return tnl_vport->name;
1507}
1508
1509const unsigned char *tnl_get_addr(const struct vport *vport)
1510{
1511 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1512 return rcu_dereference(tnl_vport->mutable)->eth_addr;
1513}
1514
1515int tnl_get_mtu(const struct vport *vport)
1516{
1517 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1518 return rcu_dereference(tnl_vport->mutable)->mtu;
1519}
842cf6f4
JG
1520
1521void tnl_free_linked_skbs(struct sk_buff *skb)
1522{
1523 if (unlikely(!skb))
1524 return;
1525
1526 while (skb) {
1527 struct sk_buff *next = skb->next;
1528 kfree_skb(skb);
1529 skb = next;
1530 }
1531}