]> git.proxmox.com Git - ovs.git/blame - datapath/tunnel.c
datapath: Consider tunnels to have no MTU, fixing jumbo frame support.
[ovs.git] / datapath / tunnel.c
CommitLineData
d1eb60cc 1/*
574f1fb5 2 * Copyright (c) 2010, 2011 Nicira Networks.
d1eb60cc
JG
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9#include <linux/if_arp.h>
10#include <linux/if_ether.h>
11#include <linux/ip.h>
12#include <linux/if_vlan.h>
13#include <linux/in.h>
14#include <linux/in_route.h>
15#include <linux/jhash.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
842cf6f4 18#include <linux/workqueue.h>
d1eb60cc
JG
19
20#include <net/dsfield.h>
21#include <net/dst.h>
22#include <net/icmp.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26#include <net/ipv6.h>
27#endif
28#include <net/route.h>
29#include <net/xfrm.h>
30
31#include "actions.h"
dd8d6b8c 32#include "checksum.h"
d1eb60cc
JG
33#include "datapath.h"
34#include "table.h"
35#include "tunnel.h"
36#include "vport.h"
37#include "vport-generic.h"
842cf6f4
JG
38#include "vport-internal_dev.h"
39
40#ifdef NEED_CACHE_TIMEOUT
41/*
42 * On kernels where we can't quickly detect changes in the rest of the system
43 * we use an expiration time to invalidate the cache. A shorter expiration
44 * reduces the length of time that we may potentially blackhole packets while
45 * a longer time increases performance by reducing the frequency that the
46 * cache needs to be rebuilt. A variety of factors may cause the cache to be
47 * invalidated before the expiration time but this is the maximum. The time
48 * is expressed in jiffies.
49 */
50#define MAX_CACHE_EXP HZ
51#endif
52
53/*
54 * Interval to check for and remove caches that are no longer valid. Caches
55 * are checked for validity before they are used for packet encapsulation and
56 * old caches are removed at that time. However, if no packets are sent through
57 * the tunnel then the cache will never be destroyed. Since it holds
58 * references to a number of system objects, the cache will continue to use
59 * system resources by not allowing those objects to be destroyed. The cache
60 * cleaner is periodically run to free invalid caches. It does not
61 * significantly affect system performance. A lower interval will release
62 * resources faster but will itself consume resources by requiring more frequent
63 * checks. A longer interval may result in messages being printed to the kernel
64 * message buffer about unreleased resources. The interval is expressed in
65 * jiffies.
66 */
67#define CACHE_CLEANER_INTERVAL (5 * HZ)
68
69#define CACHE_DATA_ALIGN 16
d1eb60cc 70
e1040c77 71static struct tbl __rcu *port_table __read_mostly;
d1eb60cc 72
842cf6f4 73static void cache_cleaner(struct work_struct *work);
33b38b63 74static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
842cf6f4 75
d1eb60cc
JG
76/*
77 * These are just used as an optimization: they don't require any kind of
78 * synchronization because we could have just as easily read the value before
79 * the port change happened.
80 */
83e3e75b
JG
81static unsigned int key_local_remote_ports __read_mostly;
82static unsigned int key_remote_ports __read_mostly;
83static unsigned int local_remote_ports __read_mostly;
84static unsigned int remote_ports __read_mostly;
d1eb60cc
JG
85
86#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
87#define rt_dst(rt) (rt->dst)
88#else
89#define rt_dst(rt) (rt->u.dst)
90#endif
91
92static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
93{
94 return vport_from_priv(tnl_vport);
95}
96
97static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
98{
99 return container_of(node, struct tnl_vport, tbl_node);
100}
101
758a12d2
JG
102/* This is analogous to rtnl_dereference for the tunnel cache. It checks that
103 * cache_lock is held, so it is only for update side code.
104 */
105static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
106{
107 return rcu_dereference_protected(tnl_vport->cache,
108 lockdep_is_held(&tnl_vport->cache_lock));
109}
110
842cf6f4
JG
111static inline void schedule_cache_cleaner(void)
112{
113 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
114}
115
116static void free_cache(struct tnl_cache *cache)
117{
118 if (!cache)
119 return;
120
121 flow_put(cache->flow);
122 ip_rt_put(cache->rt);
123 kfree(cache);
124}
125
126static void free_config_rcu(struct rcu_head *rcu)
d1eb60cc
JG
127{
128 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
129 kfree(c);
130}
131
842cf6f4
JG
132static void free_cache_rcu(struct rcu_head *rcu)
133{
134 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
135 free_cache(c);
136}
137
d1eb60cc
JG
138static void assign_config_rcu(struct vport *vport,
139 struct tnl_mutable_config *new_config)
140{
141 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
142 struct tnl_mutable_config *old_config;
143
ad919711 144 old_config = rtnl_dereference(tnl_vport->mutable);
d1eb60cc 145 rcu_assign_pointer(tnl_vport->mutable, new_config);
842cf6f4
JG
146 call_rcu(&old_config->rcu, free_config_rcu);
147}
148
149static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
150{
151 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
152 struct tnl_cache *old_cache;
153
758a12d2 154 old_cache = cache_dereference(tnl_vport);
842cf6f4
JG
155 rcu_assign_pointer(tnl_vport->cache, new_cache);
156
157 if (old_cache)
158 call_rcu(&old_cache->rcu, free_cache_rcu);
d1eb60cc
JG
159}
160
161static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
162{
c19e6535
BP
163 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
164 if (mutable->saddr)
d1eb60cc
JG
165 return &local_remote_ports;
166 else
167 return &remote_ports;
168 } else {
c19e6535 169 if (mutable->saddr)
d1eb60cc
JG
170 return &key_local_remote_ports;
171 else
172 return &key_remote_ports;
173 }
174}
175
d1eb60cc 176struct port_lookup_key {
b9298d3f
BP
177 const struct tnl_mutable_config *mutable;
178 __be64 key;
4029c21a
BP
179 u32 tunnel_type;
180 __be32 saddr;
181 __be32 daddr;
d1eb60cc
JG
182};
183
184/*
185 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
186 * the comparision.
187 */
188static int port_cmp(const struct tbl_node *node, void *target)
189{
190 const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
191 struct port_lookup_key *lookup = target;
192
e33adfd0 193 lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
d1eb60cc 194
4029c21a 195 return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
c19e6535
BP
196 lookup->mutable->daddr == lookup->daddr &&
197 lookup->mutable->in_key == lookup->key &&
198 lookup->mutable->saddr == lookup->saddr);
d1eb60cc
JG
199}
200
4029c21a 201static u32 port_hash(struct port_lookup_key *k)
d1eb60cc 202{
8dda8c9b
JG
203 u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
204 k->tunnel_type, 0);
205 return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
d1eb60cc
JG
206}
207
842cf6f4
JG
208static u32 mutable_hash(const struct tnl_mutable_config *mutable)
209{
210 struct port_lookup_key lookup;
211
c19e6535
BP
212 lookup.saddr = mutable->saddr;
213 lookup.daddr = mutable->daddr;
214 lookup.key = mutable->in_key;
4029c21a 215 lookup.tunnel_type = mutable->tunnel_type;
842cf6f4
JG
216
217 return port_hash(&lookup);
218}
219
220static void check_table_empty(void)
221{
ad919711 222 struct tbl *old_table = rtnl_dereference(port_table);
842cf6f4 223
ad919711 224 if (tbl_count(old_table) == 0) {
842cf6f4
JG
225 cancel_delayed_work_sync(&cache_cleaner_wq);
226 rcu_assign_pointer(port_table, NULL);
227 tbl_deferred_destroy(old_table, NULL);
228 }
229}
230
d1eb60cc
JG
231static int add_port(struct vport *vport)
232{
ad919711 233 struct tbl *cur_table = rtnl_dereference(port_table);
d1eb60cc 234 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
d1eb60cc
JG
235 int err;
236
237 if (!port_table) {
238 struct tbl *new_table;
239
c6fadeb1 240 new_table = tbl_create(TBL_MIN_BUCKETS);
d1eb60cc
JG
241 if (!new_table)
242 return -ENOMEM;
243
244 rcu_assign_pointer(port_table, new_table);
842cf6f4 245 schedule_cache_cleaner();
d1eb60cc 246
ad919711 247 } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
d1eb60cc
JG
248 struct tbl *new_table;
249
ad919711 250 new_table = tbl_expand(cur_table);
d1eb60cc
JG
251 if (IS_ERR(new_table))
252 return PTR_ERR(new_table);
253
254 rcu_assign_pointer(port_table, new_table);
ad919711 255 tbl_deferred_destroy(cur_table, NULL);
d1eb60cc
JG
256 }
257
ad919711
JG
258 err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
259 mutable_hash(rtnl_dereference(tnl_vport->mutable)));
842cf6f4
JG
260 if (err) {
261 check_table_empty();
262 return err;
263 }
264
ad919711 265 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
842cf6f4
JG
266
267 return 0;
268}
269
270static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
271{
272 int err;
ad919711 273 struct tbl *cur_table = rtnl_dereference(port_table);
842cf6f4
JG
274 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
275 u32 hash;
276
277 hash = mutable_hash(new_mutable);
278 if (hash == tnl_vport->tbl_node.hash)
279 goto table_updated;
d1eb60cc 280
842cf6f4
JG
281 /*
282 * Ideally we should make this move atomic to avoid having gaps in
283 * finding tunnels or the possibility of failure. However, if we do
284 * find a tunnel it will always be consistent.
285 */
ad919711 286 err = tbl_remove(cur_table, &tnl_vport->tbl_node);
d1eb60cc
JG
287 if (err)
288 return err;
289
ad919711 290 err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
842cf6f4 291 if (err) {
ad919711 292 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
842cf6f4
JG
293 check_table_empty();
294 return err;
295 }
296
297table_updated:
ad919711 298 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
842cf6f4 299 assign_config_rcu(vport, new_mutable);
ad919711 300 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
d1eb60cc
JG
301
302 return 0;
303}
304
305static int del_port(struct vport *vport)
306{
307 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
308 int err;
309
ad919711 310 err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
d1eb60cc
JG
311 if (err)
312 return err;
313
842cf6f4 314 check_table_empty();
ad919711 315 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
d1eb60cc
JG
316
317 return 0;
318}
319
b9298d3f 320struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
d1eb60cc
JG
321 int tunnel_type,
322 const struct tnl_mutable_config **mutable)
323{
324 struct port_lookup_key lookup;
e33adfd0 325 struct tbl *table = rcu_dereference_rtnl(port_table);
d1eb60cc
JG
326 struct tbl_node *tbl_node;
327
842cf6f4 328 if (unlikely(!table))
d1eb60cc
JG
329 return NULL;
330
4029c21a
BP
331 lookup.saddr = saddr;
332 lookup.daddr = daddr;
d1eb60cc
JG
333
334 if (tunnel_type & TNL_T_KEY_EXACT) {
4029c21a
BP
335 lookup.key = key;
336 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
d1eb60cc
JG
337
338 if (key_local_remote_ports) {
339 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
340 if (tbl_node)
341 goto found;
342 }
343
344 if (key_remote_ports) {
4029c21a 345 lookup.saddr = 0;
d1eb60cc
JG
346
347 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
348 if (tbl_node)
349 goto found;
350
4029c21a 351 lookup.saddr = saddr;
d1eb60cc
JG
352 }
353 }
354
355 if (tunnel_type & TNL_T_KEY_MATCH) {
4029c21a
BP
356 lookup.key = 0;
357 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
d1eb60cc
JG
358
359 if (local_remote_ports) {
360 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
361 if (tbl_node)
362 goto found;
363 }
364
365 if (remote_ports) {
4029c21a 366 lookup.saddr = 0;
d1eb60cc
JG
367
368 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
369 if (tbl_node)
370 goto found;
371 }
372 }
373
374 return NULL;
375
376found:
377 *mutable = lookup.mutable;
378 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
379}
380
842cf6f4
JG
381static inline void ecn_decapsulate(struct sk_buff *skb)
382{
a2a96c04
JG
383 /* This is accessing the outer IP header of the tunnel, which we've
384 * already validated to be OK. skb->data is currently set to the start
385 * of the inner Ethernet header, and we've validated ETH_HLEN.
386 */
387 if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
842cf6f4 388 __be16 protocol = skb->protocol;
a2a96c04
JG
389
390 skb_set_network_header(skb, ETH_HLEN);
842cf6f4
JG
391
392 if (skb->protocol == htons(ETH_P_8021Q)) {
393 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
394 return;
395
396 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
a2a96c04 397 skb_set_network_header(skb, VLAN_ETH_HLEN);
842cf6f4
JG
398 }
399
400 if (protocol == htons(ETH_P_IP)) {
a2a96c04 401 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
842cf6f4
JG
402 + sizeof(struct iphdr))))
403 return;
404
a2a96c04 405 IP_ECN_set_ce(ip_hdr(skb));
842cf6f4
JG
406 }
407#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
408 else if (protocol == htons(ETH_P_IPV6)) {
a2a96c04 409 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
842cf6f4
JG
410 + sizeof(struct ipv6hdr))))
411 return;
412
a2a96c04 413 IP6_ECN_set_ce(ipv6_hdr(skb));
842cf6f4
JG
414 }
415#endif
416 }
417}
418
419/* Called with rcu_read_lock. */
420void tnl_rcv(struct vport *vport, struct sk_buff *skb)
421{
9851dd67
JG
422 /* Packets received by this function are in the following state:
423 * - skb->data points to the inner Ethernet header.
424 * - The inner Ethernet header is in the linear data area.
425 * - skb->csum does not include the inner Ethernet header.
426 * - The layer pointers point at the outer headers.
427 */
428
429 struct ethhdr *eh = (struct ethhdr *)skb->data;
430
431 if (likely(ntohs(eh->h_proto) >= 1536))
432 skb->protocol = eh->h_proto;
433 else
434 skb->protocol = htons(ETH_P_802_2);
842cf6f4
JG
435
436 skb_dst_drop(skb);
437 nf_reset(skb);
438 secpath_reset(skb);
842cf6f4
JG
439
440 ecn_decapsulate(skb);
842cf6f4
JG
441 compute_ip_summed(skb, false);
442
443 vport_receive(vport, skb);
444}
445
d1eb60cc
JG
446static bool check_ipv4_address(__be32 addr)
447{
448 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
449 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
450 return false;
451
452 return true;
453}
454
455static bool ipv4_should_icmp(struct sk_buff *skb)
456{
457 struct iphdr *old_iph = ip_hdr(skb);
458
459 /* Don't respond to L2 broadcast. */
460 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
461 return false;
462
463 /* Don't respond to L3 broadcast or invalid addresses. */
464 if (!check_ipv4_address(old_iph->daddr) ||
465 !check_ipv4_address(old_iph->saddr))
466 return false;
467
468 /* Only respond to the first fragment. */
469 if (old_iph->frag_off & htons(IP_OFFSET))
470 return false;
471
472 /* Don't respond to ICMP error messages. */
473 if (old_iph->protocol == IPPROTO_ICMP) {
474 u8 icmp_type, *icmp_typep;
475
476 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
477 (old_iph->ihl << 2) +
478 offsetof(struct icmphdr, type) -
479 skb->data, sizeof(icmp_type),
480 &icmp_type);
481
482 if (!icmp_typep)
483 return false;
484
485 if (*icmp_typep > NR_ICMP_TYPES
486 || (*icmp_typep <= ICMP_PARAMETERPROB
487 && *icmp_typep != ICMP_ECHOREPLY
488 && *icmp_typep != ICMP_ECHO))
489 return false;
490 }
491
492 return true;
493}
494
495static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
496 unsigned int mtu, unsigned int payload_length)
497{
498 struct iphdr *iph, *old_iph = ip_hdr(skb);
499 struct icmphdr *icmph;
500 u8 *payload;
501
502 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
503 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
504 payload = skb_put(nskb, payload_length);
505
506 /* IP */
507 iph->version = 4;
508 iph->ihl = sizeof(struct iphdr) >> 2;
509 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
510 IPTOS_PREC_INTERNETCONTROL;
511 iph->tot_len = htons(sizeof(struct iphdr)
512 + sizeof(struct icmphdr)
513 + payload_length);
514 get_random_bytes(&iph->id, sizeof(iph->id));
515 iph->frag_off = 0;
516 iph->ttl = IPDEFTTL;
517 iph->protocol = IPPROTO_ICMP;
518 iph->daddr = old_iph->saddr;
519 iph->saddr = old_iph->daddr;
520
521 ip_send_check(iph);
522
523 /* ICMP */
524 icmph->type = ICMP_DEST_UNREACH;
525 icmph->code = ICMP_FRAG_NEEDED;
526 icmph->un.gateway = htonl(mtu);
527 icmph->checksum = 0;
528
529 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
530 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
531 payload, payload_length,
532 nskb->csum);
533 icmph->checksum = csum_fold(nskb->csum);
534}
535
536#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
537static bool ipv6_should_icmp(struct sk_buff *skb)
538{
539 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
540 int addr_type;
541 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
542 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
543
544 /* Check source address is valid. */
545 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
546 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
547 return false;
548
549 /* Don't reply to unspecified addresses. */
550 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
551 return false;
552
553 /* Don't respond to ICMP error messages. */
554 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
555 if (payload_off < 0)
556 return false;
557
558 if (nexthdr == NEXTHDR_ICMP) {
559 u8 icmp_type, *icmp_typep;
560
561 icmp_typep = skb_header_pointer(skb, payload_off +
562 offsetof(struct icmp6hdr,
563 icmp6_type),
564 sizeof(icmp_type), &icmp_type);
565
566 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
567 return false;
568 }
569
570 return true;
571}
572
573static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
574 unsigned int mtu, unsigned int payload_length)
575{
576 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
577 struct icmp6hdr *icmp6h;
578 u8 *payload;
579
580 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
581 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
582 payload = skb_put(nskb, payload_length);
583
584 /* IPv6 */
585 ipv6h->version = 6;
586 ipv6h->priority = 0;
587 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
588 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
589 + payload_length);
590 ipv6h->nexthdr = NEXTHDR_ICMP;
591 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
592 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
593 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
594
595 /* ICMPv6 */
596 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
597 icmp6h->icmp6_code = 0;
598 icmp6h->icmp6_cksum = 0;
599 icmp6h->icmp6_mtu = htonl(mtu);
600
601 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
602 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
603 payload, payload_length,
604 nskb->csum);
605 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
606 sizeof(struct icmp6hdr)
607 + payload_length,
608 ipv6h->nexthdr, nskb->csum);
609}
610#endif /* IPv6 */
611
612bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
b9298d3f 613 struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
d1eb60cc
JG
614{
615 unsigned int eth_hdr_len = ETH_HLEN;
616 unsigned int total_length = 0, header_length = 0, payload_length;
617 struct ethhdr *eh, *old_eh = eth_hdr(skb);
618 struct sk_buff *nskb;
619
620 /* Sanity check */
621 if (skb->protocol == htons(ETH_P_IP)) {
622 if (mtu < IP_MIN_MTU)
623 return false;
624
625 if (!ipv4_should_icmp(skb))
626 return true;
627 }
628#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
629 else if (skb->protocol == htons(ETH_P_IPV6)) {
630 if (mtu < IPV6_MIN_MTU)
631 return false;
632
633 /*
634 * In theory we should do PMTUD on IPv6 multicast messages but
635 * we don't have an address to send from so just fragment.
636 */
637 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
638 return false;
639
640 if (!ipv6_should_icmp(skb))
641 return true;
642 }
643#endif
644 else
645 return false;
646
647 /* Allocate */
648 if (old_eh->h_proto == htons(ETH_P_8021Q))
649 eth_hdr_len = VLAN_ETH_HLEN;
650
651 payload_length = skb->len - eth_hdr_len;
652 if (skb->protocol == htons(ETH_P_IP)) {
653 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
654 total_length = min_t(unsigned int, header_length +
655 payload_length, 576);
656 }
657#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
658 else {
659 header_length = sizeof(struct ipv6hdr) +
660 sizeof(struct icmp6hdr);
661 total_length = min_t(unsigned int, header_length +
662 payload_length, IPV6_MIN_MTU);
663 }
664#endif
665
d1eb60cc
JG
666 payload_length = total_length - header_length;
667
668 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
669 payload_length);
670 if (!nskb)
671 return false;
672
673 skb_reserve(nskb, NET_IP_ALIGN);
674
675 /* Ethernet / VLAN */
676 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
677 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
678 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
679 nskb->protocol = eh->h_proto = old_eh->h_proto;
680 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
681 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
682
683 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
684 vh->h_vlan_encapsulated_proto = skb->protocol;
685 }
686 skb_reset_mac_header(nskb);
687
688 /* Protocol */
689 if (skb->protocol == htons(ETH_P_IP))
690 ipv4_build_icmp(skb, nskb, mtu, payload_length);
691#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
692 else
693 ipv6_build_icmp(skb, nskb, mtu, payload_length);
694#endif
695
696 /*
697 * Assume that flow based keys are symmetric with respect to input
698 * and output and use the key that we were going to put on the
699 * outgoing packet for the fake received packet. If the keys are
700 * not symmetric then PMTUD needs to be disabled since we won't have
701 * any way of synthesizing packets.
702 */
c19e6535 703 if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
d1eb60cc
JG
704 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
705 OVS_CB(nskb)->tun_id = flow_key;
706
707 compute_ip_summed(nskb, false);
708 vport_receive(vport, nskb);
709
710 return true;
711}
712
842cf6f4
JG
713static bool check_mtu(struct sk_buff *skb,
714 struct vport *vport,
715 const struct tnl_mutable_config *mutable,
716 const struct rtable *rt, __be16 *frag_offp)
d1eb60cc 717{
bfa68a2b
JG
718 bool pmtud = mutable->flags & TNL_F_PMTUD;
719 __be16 frag_off = 0;
842cf6f4 720 int mtu;
842cf6f4 721
bfa68a2b
JG
722 if (pmtud) {
723 frag_off = htons(IP_DF);
724
842cf6f4
JG
725 mtu = dst_mtu(&rt_dst(rt))
726 - ETH_HLEN
727 - mutable->tunnel_hlen
bfa68a2b
JG
728 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ?
729 VLAN_HLEN : 0);
730 }
842cf6f4
JG
731
732 if (skb->protocol == htons(ETH_P_IP)) {
bfa68a2b
JG
733 struct iphdr *iph = ip_hdr(skb);
734
735 frag_off |= iph->frag_off & htons(IP_DF);
842cf6f4 736
bfa68a2b
JG
737 if (pmtud && iph->frag_off & htons(IP_DF)) {
738 mtu = max(mtu, IP_MIN_MTU);
842cf6f4 739
bfa68a2b
JG
740 if (ntohs(iph->tot_len) > mtu &&
741 tnl_frag_needed(vport, mutable, skb, mtu,
742 OVS_CB(skb)->tun_id))
743 return false;
d1eb60cc 744 }
842cf6f4
JG
745 }
746#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
747 else if (skb->protocol == htons(ETH_P_IPV6)) {
748 unsigned int packet_length = skb->len - ETH_HLEN
bfa68a2b
JG
749 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ?
750 VLAN_HLEN : 0);
d1eb60cc 751
842cf6f4
JG
752 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
753 if (packet_length > IPV6_MIN_MTU)
754 frag_off = htons(IP_DF);
d1eb60cc 755
bfa68a2b
JG
756 if (pmtud) {
757 mtu = max(mtu, IPV6_MIN_MTU);
758
759 if (packet_length > mtu &&
760 tnl_frag_needed(vport, mutable, skb, mtu,
761 OVS_CB(skb)->tun_id))
762 return false;
842cf6f4 763 }
d1eb60cc 764 }
842cf6f4 765#endif
d1eb60cc 766
842cf6f4
JG
767 *frag_offp = frag_off;
768 return true;
d1eb60cc
JG
769}
770
842cf6f4
JG
771static void create_tunnel_header(const struct vport *vport,
772 const struct tnl_mutable_config *mutable,
773 const struct rtable *rt, void *header)
d1eb60cc 774{
842cf6f4
JG
775 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
776 struct iphdr *iph = header;
777
778 iph->version = 4;
779 iph->ihl = sizeof(struct iphdr) >> 2;
780 iph->frag_off = htons(IP_DF);
781 iph->protocol = tnl_vport->tnl_ops->ipproto;
c19e6535 782 iph->tos = mutable->tos;
842cf6f4
JG
783 iph->daddr = rt->rt_dst;
784 iph->saddr = rt->rt_src;
c19e6535 785 iph->ttl = mutable->ttl;
842cf6f4
JG
786 if (!iph->ttl)
787 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
788
789 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
790}
d1eb60cc 791
842cf6f4
JG
792static inline void *get_cached_header(const struct tnl_cache *cache)
793{
794 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
795}
d1eb60cc 796
842cf6f4
JG
797static inline bool check_cache_valid(const struct tnl_cache *cache,
798 const struct tnl_mutable_config *mutable)
799{
800 return cache &&
801#ifdef NEED_CACHE_TIMEOUT
802 time_before(jiffies, cache->expiration) &&
803#endif
804#ifdef HAVE_RT_GENID
805 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
806#endif
807#ifdef HAVE_HH_SEQ
808 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
809#endif
810 mutable->seq == cache->mutable_seq &&
811 (!is_internal_dev(rt_dst(cache->rt).dev) ||
812 (cache->flow && !cache->flow->dead));
d1eb60cc
JG
813}
814
842cf6f4 815static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
d1eb60cc 816{
842cf6f4
JG
817 struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
818 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
819 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
d1eb60cc 820
842cf6f4
JG
821 if (cache && !check_cache_valid(cache, mutable) &&
822 spin_trylock_bh(&tnl_vport->cache_lock)) {
823 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
824 spin_unlock_bh(&tnl_vport->cache_lock);
825 }
d1eb60cc 826
842cf6f4
JG
827 return 0;
828}
d1eb60cc 829
842cf6f4
JG
830static void cache_cleaner(struct work_struct *work)
831{
832 schedule_cache_cleaner();
d1eb60cc 833
842cf6f4 834 rcu_read_lock();
1e71f10f 835 tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
842cf6f4
JG
836 rcu_read_unlock();
837}
d1eb60cc 838
842cf6f4
JG
839static inline void create_eth_hdr(struct tnl_cache *cache,
840 const struct rtable *rt)
841{
842 void *cache_data = get_cached_header(cache);
843 int hh_len = rt_dst(rt).hh->hh_len;
844 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
d1eb60cc 845
842cf6f4
JG
846#ifdef HAVE_HH_SEQ
847 unsigned hh_seq;
848
849 do {
850 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
851 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
852 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
853
854 cache->hh_seq = hh_seq;
855#else
856 read_lock_bh(&rt_dst(rt).hh->hh_lock);
857 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
858 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
d1eb60cc 859#endif
d1eb60cc
JG
860}
861
842cf6f4
JG
862static struct tnl_cache *build_cache(struct vport *vport,
863 const struct tnl_mutable_config *mutable,
864 struct rtable *rt)
d1eb60cc 865{
842cf6f4
JG
866 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
867 struct tnl_cache *cache;
868 void *cache_data;
869 int cache_len;
d1eb60cc 870
c19e6535 871 if (!(mutable->flags & TNL_F_HDR_CACHE))
842cf6f4
JG
872 return NULL;
873
874 /*
875 * If there is no entry in the ARP cache or if this device does not
876 * support hard header caching just fall back to the IP stack.
877 */
878 if (!rt_dst(rt).hh)
879 return NULL;
880
881 /*
882 * If lock is contended fall back to directly building the header.
883 * We're not going to help performance by sitting here spinning.
884 */
885 if (!spin_trylock_bh(&tnl_vport->cache_lock))
886 return NULL;
887
758a12d2 888 cache = cache_dereference(tnl_vport);
842cf6f4
JG
889 if (check_cache_valid(cache, mutable))
890 goto unlock;
891 else
892 cache = NULL;
893
894 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
895
896 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
897 cache_len, GFP_ATOMIC);
898 if (!cache)
899 goto unlock;
900
901 cache->len = cache_len;
902
903 create_eth_hdr(cache, rt);
904 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
905
906 create_tunnel_header(vport, mutable, rt, cache_data);
907
908 cache->mutable_seq = mutable->seq;
909 cache->rt = rt;
910#ifdef NEED_CACHE_TIMEOUT
911 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
912#endif
913
914 if (is_internal_dev(rt_dst(rt).dev)) {
36956a7d 915 struct sw_flow_key flow_key;
e779d8d9 916 struct tbl_node *flow_node;
ecd859a2 917 struct vport *dst_vport;
842cf6f4
JG
918 struct sk_buff *skb;
919 bool is_frag;
e779d8d9 920 int err;
842cf6f4 921
ecd859a2
JG
922 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
923 if (!dst_vport)
842cf6f4
JG
924 goto done;
925
842cf6f4
JG
926 skb = alloc_skb(cache->len, GFP_ATOMIC);
927 if (!skb)
928 goto done;
929
930 __skb_put(skb, cache->len);
931 memcpy(skb->data, get_cached_header(cache), cache->len);
932
ecd859a2 933 err = flow_extract(skb, dst_vport->port_no, &flow_key, &is_frag);
842cf6f4
JG
934
935 kfree_skb(skb);
936 if (err || is_frag)
937 goto done;
938
ecd859a2 939 flow_node = tbl_lookup(rcu_dereference(dst_vport->dp->table),
842cf6f4
JG
940 &flow_key, flow_hash(&flow_key),
941 flow_cmp);
942 if (flow_node) {
943 struct sw_flow *flow = flow_cast(flow_node);
944
945 cache->flow = flow;
946 flow_hold(flow);
947 }
d1eb60cc
JG
948 }
949
842cf6f4
JG
950done:
951 assign_cache_rcu(vport, cache);
952
953unlock:
954 spin_unlock_bh(&tnl_vport->cache_lock);
955
956 return cache;
d1eb60cc
JG
957}
958
842cf6f4
JG
959static struct rtable *find_route(struct vport *vport,
960 const struct tnl_mutable_config *mutable,
961 u8 tos, struct tnl_cache **cache)
d1eb60cc 962{
842cf6f4
JG
963 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
964 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
965
966 *cache = NULL;
967 tos = RT_TOS(tos);
968
c19e6535 969 if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
842cf6f4
JG
970 *cache = cur_cache;
971 return cur_cache->rt;
972 } else {
973 struct rtable *rt;
974 struct flowi fl = { .nl_u = { .ip4_u =
c19e6535
BP
975 { .daddr = mutable->daddr,
976 .saddr = mutable->saddr,
842cf6f4
JG
977 .tos = tos } },
978 .proto = tnl_vport->tnl_ops->ipproto };
979
980 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
981 return NULL;
982
c19e6535 983 if (likely(tos == mutable->tos))
842cf6f4
JG
984 *cache = build_cache(vport, mutable, rt);
985
986 return rt;
d1eb60cc
JG
987 }
988}
989
842cf6f4 990static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
d1eb60cc 991{
842cf6f4
JG
992 if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
993 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
994 if (unlikely(!nskb)) {
995 kfree_skb(skb);
996 return ERR_PTR(-ENOMEM);
997 }
d1eb60cc 998
842cf6f4 999 set_skb_csum_bits(skb, nskb);
d1eb60cc 1000
842cf6f4
JG
1001 if (skb->sk)
1002 skb_set_owner_w(nskb, skb->sk);
d1eb60cc 1003
842cf6f4
JG
1004 kfree_skb(skb);
1005 return nskb;
1006 }
d1eb60cc 1007
842cf6f4 1008 return skb;
d1eb60cc
JG
1009}
1010
842cf6f4 1011static inline bool need_linearize(const struct sk_buff *skb)
d1eb60cc 1012{
842cf6f4
JG
1013 int i;
1014
1015 if (unlikely(skb_shinfo(skb)->frag_list))
1016 return true;
1017
1018 /*
1019 * Generally speaking we should linearize if there are paged frags.
1020 * However, if all of the refcounts are 1 we know nobody else can
1021 * change them from underneath us and we can skip the linearization.
1022 */
1023 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1024 if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
1025 return true;
1026
1027 return false;
1028}
1029
1030static struct sk_buff *handle_offloads(struct sk_buff *skb,
1031 const struct tnl_mutable_config *mutable,
1032 const struct rtable *rt)
1033{
1034 int min_headroom;
d1eb60cc 1035 int err;
d1eb60cc 1036
842cf6f4 1037 forward_ip_summed(skb);
d1eb60cc 1038
842cf6f4 1039 err = vswitch_skb_checksum_setup(skb);
d1eb60cc
JG
1040 if (unlikely(err))
1041 goto error_free;
1042
842cf6f4
JG
1043 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1044 + mutable->tunnel_hlen;
d1eb60cc 1045
842cf6f4
JG
1046 if (skb_is_gso(skb)) {
1047 struct sk_buff *nskb;
1048
1049 /*
1050 * If we are doing GSO on a pskb it is better to make sure that
1051 * the headroom is correct now. We will only have to copy the
1052 * portion in the linear data area and GSO will preserve
1053 * headroom when it creates the segments. This is particularly
1054 * beneficial on Xen where we get a lot of GSO pskbs.
1055 * Conversely, we avoid copying if it is just to get our own
1056 * writable clone because GSO will do the copy for us.
1057 */
1058 if (skb_headroom(skb) < min_headroom) {
1059 skb = check_headroom(skb, min_headroom);
40796b34 1060 if (IS_ERR(skb)) {
842cf6f4
JG
1061 err = PTR_ERR(skb);
1062 goto error;
1063 }
d1eb60cc
JG
1064 }
1065
842cf6f4
JG
1066 nskb = skb_gso_segment(skb, 0);
1067 kfree_skb(skb);
40796b34 1068 if (IS_ERR(nskb)) {
842cf6f4
JG
1069 err = PTR_ERR(nskb);
1070 goto error;
1071 }
d1eb60cc 1072
842cf6f4
JG
1073 skb = nskb;
1074 } else {
1075 skb = check_headroom(skb, min_headroom);
40796b34 1076 if (IS_ERR(skb)) {
842cf6f4
JG
1077 err = PTR_ERR(skb);
1078 goto error;
1079 }
d1eb60cc 1080
842cf6f4
JG
1081 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1082 /*
1083 * Pages aren't locked and could change at any time.
1084 * If this happens after we compute the checksum, the
1085 * checksum will be wrong. We linearize now to avoid
1086 * this problem.
1087 */
1088 if (unlikely(need_linearize(skb))) {
1089 err = __skb_linearize(skb);
1090 if (unlikely(err))
1091 goto error_free;
1092 }
1093
1094 err = skb_checksum_help(skb);
1095 if (unlikely(err))
d1eb60cc 1096 goto error_free;
842cf6f4
JG
1097 } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1098 skb->ip_summed = CHECKSUM_NONE;
d1eb60cc 1099 }
d1eb60cc 1100
842cf6f4 1101 return skb;
d1eb60cc 1102
842cf6f4
JG
1103error_free:
1104 kfree_skb(skb);
1105error:
1106 return ERR_PTR(err);
1107}
d1eb60cc 1108
842cf6f4
JG
1109static int send_frags(struct sk_buff *skb,
1110 const struct tnl_mutable_config *mutable)
1111{
1112 int sent_len;
1113 int err;
d1eb60cc 1114
842cf6f4 1115 sent_len = 0;
5214f5c4
JG
1116 while (skb) {
1117 struct sk_buff *next = skb->next;
1118 int frag_len = skb->len - mutable->tunnel_hlen;
d1eb60cc 1119
5214f5c4 1120 skb->next = NULL;
b1195d37 1121 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
7da5c939 1122
5214f5c4 1123 err = ip_local_out(skb);
842cf6f4
JG
1124 if (likely(net_xmit_eval(err) == 0))
1125 sent_len += frag_len;
1126 else {
5214f5c4
JG
1127 skb = next;
1128 goto free_frags;
1129 }
1130
1131 skb = next;
842cf6f4 1132 }
5214f5c4 1133
842cf6f4 1134 return sent_len;
d1eb60cc 1135
5214f5c4
JG
1136free_frags:
1137 /*
1138 * There's no point in continuing to send fragments once one has been
1139 * dropped so just free the rest. This may help improve the congestion
1140 * that caused the first packet to be dropped.
1141 */
842cf6f4
JG
1142 tnl_free_linked_skbs(skb);
1143 return sent_len;
d1eb60cc
JG
1144}
1145
1146int tnl_send(struct vport *vport, struct sk_buff *skb)
1147{
1148 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1149 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1150
842cf6f4 1151 enum vport_err_type err = VPORT_E_TX_ERROR;
d1eb60cc 1152 struct rtable *rt;
842cf6f4
JG
1153 struct dst_entry *unattached_dst = NULL;
1154 struct tnl_cache *cache;
1155 int sent_len = 0;
bfa68a2b 1156 __be16 frag_off = 0;
842cf6f4
JG
1157 u8 ttl;
1158 u8 inner_tos;
1159 u8 tos;
d1eb60cc
JG
1160
1161 /* Validate the protocol headers before we try to use them. */
1162 if (skb->protocol == htons(ETH_P_8021Q)) {
1163 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1164 goto error_free;
1165
1166 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1167 skb_set_network_header(skb, VLAN_ETH_HLEN);
1168 }
1169
1170 if (skb->protocol == htons(ETH_P_IP)) {
842cf6f4
JG
1171 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1172 + sizeof(struct iphdr))))
d1eb60cc
JG
1173 skb->protocol = 0;
1174 }
1175#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1176 else if (skb->protocol == htons(ETH_P_IPV6)) {
842cf6f4
JG
1177 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1178 + sizeof(struct ipv6hdr))))
d1eb60cc
JG
1179 skb->protocol = 0;
1180 }
1181#endif
d1eb60cc 1182
842cf6f4
JG
1183 /* ToS */
1184 if (skb->protocol == htons(ETH_P_IP))
1185 inner_tos = ip_hdr(skb)->tos;
d1eb60cc 1186#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
842cf6f4
JG
1187 else if (skb->protocol == htons(ETH_P_IPV6))
1188 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
d1eb60cc 1189#endif
842cf6f4
JG
1190 else
1191 inner_tos = 0;
d1eb60cc 1192
c19e6535 1193 if (mutable->flags & TNL_F_TOS_INHERIT)
842cf6f4
JG
1194 tos = inner_tos;
1195 else
c19e6535 1196 tos = mutable->tos;
d1eb60cc 1197
842cf6f4
JG
1198 tos = INET_ECN_encapsulate(tos, inner_tos);
1199
1200 /* Route lookup */
1201 rt = find_route(vport, mutable, tos, &cache);
1202 if (unlikely(!rt))
1203 goto error_free;
1204 if (unlikely(!cache))
1205 unattached_dst = &rt_dst(rt);
1206
1207 /* Reset SKB */
1208 nf_reset(skb);
1209 secpath_reset(skb);
1210 skb_dst_drop(skb);
1211
1212 /* Offloading */
1213 skb = handle_offloads(skb, mutable, rt);
40796b34 1214 if (IS_ERR(skb))
842cf6f4
JG
1215 goto error;
1216
1217 /* MTU */
1218 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1219 err = VPORT_E_TX_DROPPED;
1220 goto error_free;
d1eb60cc
JG
1221 }
1222
842cf6f4
JG
1223 /*
1224 * If we are over the MTU, allow the IP stack to handle fragmentation.
1225 * Fragmentation is a slow path anyways.
1226 */
1227 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1228 cache)) {
1229 unattached_dst = &rt_dst(rt);
1230 dst_hold(unattached_dst);
1231 cache = NULL;
1232 }
1233
1234 /* TTL */
c19e6535 1235 ttl = mutable->ttl;
842cf6f4
JG
1236 if (!ttl)
1237 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1238
c19e6535 1239 if (mutable->flags & TNL_F_TTL_INHERIT) {
d1eb60cc 1240 if (skb->protocol == htons(ETH_P_IP))
842cf6f4 1241 ttl = ip_hdr(skb)->ttl;
d1eb60cc
JG
1242#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1243 else if (skb->protocol == htons(ETH_P_IPV6))
842cf6f4 1244 ttl = ipv6_hdr(skb)->hop_limit;
d1eb60cc
JG
1245#endif
1246 }
d1eb60cc 1247
842cf6f4
JG
1248 while (skb) {
1249 struct iphdr *iph;
1250 struct sk_buff *next_skb = skb->next;
1251 skb->next = NULL;
d1eb60cc 1252
842cf6f4
JG
1253 if (likely(cache)) {
1254 skb_push(skb, cache->len);
1255 memcpy(skb->data, get_cached_header(cache), cache->len);
1256 skb_reset_mac_header(skb);
1257 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
d1eb60cc 1258
842cf6f4
JG
1259 } else {
1260 skb_push(skb, mutable->tunnel_hlen);
1261 create_tunnel_header(vport, mutable, rt, skb->data);
1262 skb_reset_network_header(skb);
d1eb60cc 1263
842cf6f4
JG
1264 if (next_skb)
1265 skb_dst_set(skb, dst_clone(unattached_dst));
1266 else {
1267 skb_dst_set(skb, unattached_dst);
1268 unattached_dst = NULL;
1269 }
d1eb60cc 1270 }
842cf6f4 1271 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
d1eb60cc 1272
842cf6f4
JG
1273 iph = ip_hdr(skb);
1274 iph->tos = tos;
1275 iph->ttl = ttl;
1276 iph->frag_off = frag_off;
1277 ip_select_ident(iph, &rt_dst(rt), NULL);
d1eb60cc 1278
842cf6f4
JG
1279 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1280 if (unlikely(!skb))
1281 goto next;
d1eb60cc 1282
842cf6f4
JG
1283 if (likely(cache)) {
1284 int orig_len = skb->len - cache->len;
17a07f9f 1285 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
d1eb60cc 1286
842cf6f4 1287 skb->protocol = htons(ETH_P_IP);
3d898123 1288 iph = ip_hdr(skb);
842cf6f4
JG
1289 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1290 ip_send_check(iph);
d1eb60cc 1291
17a07f9f 1292 if (cache_vport) {
842cf6f4
JG
1293 OVS_CB(skb)->flow = cache->flow;
1294 compute_ip_summed(skb, true);
17a07f9f 1295 vport_receive(cache_vport, skb);
842cf6f4
JG
1296 sent_len += orig_len;
1297 } else {
ddffedda 1298 int xmit_err;
d1eb60cc 1299
842cf6f4 1300 skb->dev = rt_dst(rt).dev;
ddffedda 1301 xmit_err = dev_queue_xmit(skb);
842cf6f4 1302
ddffedda 1303 if (likely(net_xmit_eval(xmit_err) == 0))
842cf6f4
JG
1304 sent_len += orig_len;
1305 }
1306 } else
1307 sent_len += send_frags(skb, mutable);
1308
1309next:
d1eb60cc 1310 skb = next_skb;
842cf6f4 1311 }
d1eb60cc 1312
842cf6f4 1313 if (unlikely(sent_len == 0))
5214f5c4
JG
1314 vport_record_error(vport, VPORT_E_TX_DROPPED);
1315
842cf6f4 1316 goto out;
d1eb60cc
JG
1317
1318error_free:
842cf6f4 1319 tnl_free_linked_skbs(skb);
d1eb60cc 1320error:
842cf6f4
JG
1321 dst_release(unattached_dst);
1322 vport_record_error(vport, err);
1323out:
1324 return sent_len;
d1eb60cc
JG
1325}
1326
c19e6535
BP
1327static const struct nla_policy tnl_policy[ODP_TUNNEL_ATTR_MAX + 1] = {
1328 [ODP_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
1329 [ODP_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1330 [ODP_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1331 [ODP_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
1332 [ODP_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
1333 [ODP_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
1334 [ODP_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
1335};
1336
1337/* Sets ODP_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1338static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
574f1fb5
BP
1339 const struct vport *cur_vport,
1340 struct tnl_mutable_config *mutable)
d1eb60cc
JG
1341{
1342 const struct vport *old_vport;
1343 const struct tnl_mutable_config *old_mutable;
c19e6535
BP
1344 struct nlattr *a[ODP_TUNNEL_ATTR_MAX + 1];
1345 int err;
d1eb60cc 1346
c19e6535 1347 if (!options)
842cf6f4
JG
1348 return -EINVAL;
1349
c19e6535
BP
1350 err = nla_parse_nested(a, ODP_TUNNEL_ATTR_MAX, options, tnl_policy);
1351 if (err)
1352 return err;
1353
1354 if (!a[ODP_TUNNEL_ATTR_FLAGS] || !a[ODP_TUNNEL_ATTR_DST_IPV4])
842cf6f4
JG
1355 return -EINVAL;
1356
c19e6535
BP
1357 mutable->flags = nla_get_u32(a[ODP_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1358
1359 if (a[ODP_TUNNEL_ATTR_SRC_IPV4])
1360 mutable->saddr = nla_get_be32(a[ODP_TUNNEL_ATTR_SRC_IPV4]);
1361 mutable->daddr = nla_get_be32(a[ODP_TUNNEL_ATTR_DST_IPV4]);
1362
1363 if (a[ODP_TUNNEL_ATTR_TOS]) {
1364 mutable->tos = nla_get_u8(a[ODP_TUNNEL_ATTR_TOS]);
1365 if (mutable->tos != RT_TOS(mutable->tos))
1366 return -EINVAL;
1367 }
1368
1369 if (a[ODP_TUNNEL_ATTR_TTL])
1370 mutable->ttl = nla_get_u8(a[ODP_TUNNEL_ATTR_TTL]);
1371
1372 mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
d1eb60cc
JG
1373 if (mutable->tunnel_hlen < 0)
1374 return mutable->tunnel_hlen;
1375
1376 mutable->tunnel_hlen += sizeof(struct iphdr);
1377
d1eb60cc 1378 mutable->tunnel_type = tnl_ops->tunnel_type;
c19e6535 1379 if (!a[ODP_TUNNEL_ATTR_IN_KEY]) {
d1eb60cc 1380 mutable->tunnel_type |= TNL_T_KEY_MATCH;
c19e6535
BP
1381 mutable->flags |= TNL_F_IN_KEY_MATCH;
1382 } else {
d1eb60cc 1383 mutable->tunnel_type |= TNL_T_KEY_EXACT;
c19e6535
BP
1384 mutable->in_key = nla_get_be64(a[ODP_TUNNEL_ATTR_IN_KEY]);
1385 }
1386
1387 if (!a[ODP_TUNNEL_ATTR_OUT_KEY])
1388 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1389 else
1390 mutable->out_key = nla_get_be64(a[ODP_TUNNEL_ATTR_OUT_KEY]);
d1eb60cc 1391
c19e6535
BP
1392 old_vport = tnl_find_port(mutable->saddr, mutable->daddr,
1393 mutable->in_key, mutable->tunnel_type,
d1eb60cc
JG
1394 &old_mutable);
1395
1396 if (old_vport && old_vport != cur_vport)
1397 return -EEXIST;
1398
d1eb60cc
JG
1399 return 0;
1400}
1401
94903c98 1402struct vport *tnl_create(const struct vport_parms *parms,
d1eb60cc
JG
1403 const struct vport_ops *vport_ops,
1404 const struct tnl_ops *tnl_ops)
1405{
1406 struct vport *vport;
1407 struct tnl_vport *tnl_vport;
13267549 1408 struct tnl_mutable_config *mutable;
5214f5c4 1409 int initial_frag_id;
d1eb60cc
JG
1410 int err;
1411
e779d8d9 1412 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
d1eb60cc
JG
1413 if (IS_ERR(vport)) {
1414 err = PTR_ERR(vport);
1415 goto error;
1416 }
1417
1418 tnl_vport = tnl_vport_priv(vport);
1419
94903c98 1420 strcpy(tnl_vport->name, parms->name);
d1eb60cc
JG
1421 tnl_vport->tnl_ops = tnl_ops;
1422
13267549
JG
1423 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1424 if (!mutable) {
d1eb60cc
JG
1425 err = -ENOMEM;
1426 goto error_free_vport;
1427 }
1428
13267549 1429 vport_gen_rand_ether_addr(mutable->eth_addr);
d1eb60cc 1430
5214f5c4
JG
1431 get_random_bytes(&initial_frag_id, sizeof(int));
1432 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1433
c19e6535 1434 err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
d1eb60cc
JG
1435 if (err)
1436 goto error_free_mutable;
1437
842cf6f4
JG
1438 spin_lock_init(&tnl_vport->cache_lock);
1439
1440#ifdef NEED_CACHE_TIMEOUT
1441 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
13267549 1442 (net_random() % (MAX_CACHE_EXP / 2));
842cf6f4
JG
1443#endif
1444
13267549
JG
1445 rcu_assign_pointer(tnl_vport->mutable, mutable);
1446
d1eb60cc
JG
1447 err = add_port(vport);
1448 if (err)
1449 goto error_free_mutable;
1450
1451 return vport;
1452
1453error_free_mutable:
13267549 1454 kfree(mutable);
d1eb60cc
JG
1455error_free_vport:
1456 vport_free(vport);
1457error:
1458 return ERR_PTR(err);
1459}
1460
c19e6535 1461int tnl_set_options(struct vport *vport, struct nlattr *options)
d1eb60cc
JG
1462{
1463 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
c19e6535 1464 const struct tnl_mutable_config *old_mutable;
d1eb60cc
JG
1465 struct tnl_mutable_config *mutable;
1466 int err;
d1eb60cc 1467
c19e6535 1468 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
d1eb60cc
JG
1469 if (!mutable) {
1470 err = -ENOMEM;
1471 goto error;
1472 }
1473
c19e6535
BP
1474 /* Copy fields whose values should be retained. */
1475 old_mutable = rtnl_dereference(tnl_vport->mutable);
1476 mutable->seq = old_mutable->seq + 1;
1477 memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
c19e6535
BP
1478
1479 /* Parse the others configured by userspace. */
1480 err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
d1eb60cc
JG
1481 if (err)
1482 goto error_free;
1483
842cf6f4
JG
1484 err = move_port(vport, mutable);
1485 if (err)
1486 goto error_free;
d1eb60cc
JG
1487
1488 return 0;
1489
1490error_free:
1491 kfree(mutable);
1492error:
1493 return err;
1494}
1495
c19e6535
BP
1496int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1497{
1498 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1499 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1500
1501 NLA_PUT_U32(skb, ODP_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1502 NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_DST_IPV4, mutable->daddr);
1503
1504 if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1505 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_IN_KEY, mutable->in_key);
1506 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1507 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1508 if (mutable->saddr)
1509 NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_SRC_IPV4, mutable->saddr);
1510 if (mutable->tos)
1511 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TOS, mutable->tos);
1512 if (mutable->ttl)
1513 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TTL, mutable->ttl);
1514
1515 return 0;
1516
1517nla_put_failure:
1518 return -EMSGSIZE;
1519}
1520
842cf6f4 1521static void free_port_rcu(struct rcu_head *rcu)
d1eb60cc 1522{
39872c70
JG
1523 struct tnl_vport *tnl_vport = container_of(rcu,
1524 struct tnl_vport, rcu);
d1eb60cc 1525
39872c70
JG
1526 free_cache((struct tnl_cache __force *)tnl_vport->cache);
1527 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
d1eb60cc
JG
1528 vport_free(tnl_vport_to_vport(tnl_vport));
1529}
1530
1531int tnl_destroy(struct vport *vport)
1532{
1533 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
f827d749
JG
1534 const struct tnl_mutable_config *mutable, *old_mutable;
1535
1536 mutable = rtnl_dereference(tnl_vport->mutable);
d1eb60cc 1537
c19e6535
BP
1538 if (vport == tnl_find_port(mutable->saddr, mutable->daddr,
1539 mutable->in_key, mutable->tunnel_type,
1540 &old_mutable))
d1eb60cc
JG
1541 del_port(vport);
1542
842cf6f4 1543 call_rcu(&tnl_vport->rcu, free_port_rcu);
d1eb60cc
JG
1544
1545 return 0;
1546}
1547
d1eb60cc
JG
1548int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1549{
1550 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1551 struct tnl_mutable_config *mutable;
1552
f827d749
JG
1553 mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1554 sizeof(struct tnl_mutable_config), GFP_KERNEL);
d1eb60cc
JG
1555 if (!mutable)
1556 return -ENOMEM;
1557
1558 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1559 assign_config_rcu(vport, mutable);
1560
1561 return 0;
1562}
1563
d1eb60cc
JG
1564const char *tnl_get_name(const struct vport *vport)
1565{
1566 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1567 return tnl_vport->name;
1568}
1569
1570const unsigned char *tnl_get_addr(const struct vport *vport)
1571{
1572 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
e33adfd0 1573 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
d1eb60cc
JG
1574}
1575
842cf6f4
JG
1576void tnl_free_linked_skbs(struct sk_buff *skb)
1577{
1578 if (unlikely(!skb))
1579 return;
1580
1581 while (skb) {
1582 struct sk_buff *next = skb->next;
1583 kfree_skb(skb);
1584 skb = next;
1585 }
1586}