]> git.proxmox.com Git - ovs.git/blame - datapath/tunnel.c
netdev-linux: Don't treat "system" devices as vports for setting stats.
[ovs.git] / datapath / tunnel.c
CommitLineData
d1eb60cc
JG
1/*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9#include <linux/if_arp.h>
10#include <linux/if_ether.h>
11#include <linux/ip.h>
12#include <linux/if_vlan.h>
13#include <linux/in.h>
14#include <linux/in_route.h>
15#include <linux/jhash.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
842cf6f4 18#include <linux/workqueue.h>
d1eb60cc
JG
19
20#include <net/dsfield.h>
21#include <net/dst.h>
22#include <net/icmp.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26#include <net/ipv6.h>
27#endif
28#include <net/route.h>
29#include <net/xfrm.h>
30
31#include "actions.h"
32#include "datapath.h"
33#include "table.h"
34#include "tunnel.h"
35#include "vport.h"
36#include "vport-generic.h"
842cf6f4
JG
37#include "vport-internal_dev.h"
38
39#ifdef NEED_CACHE_TIMEOUT
40/*
41 * On kernels where we can't quickly detect changes in the rest of the system
42 * we use an expiration time to invalidate the cache. A shorter expiration
43 * reduces the length of time that we may potentially blackhole packets while
44 * a longer time increases performance by reducing the frequency that the
45 * cache needs to be rebuilt. A variety of factors may cause the cache to be
46 * invalidated before the expiration time but this is the maximum. The time
47 * is expressed in jiffies.
48 */
49#define MAX_CACHE_EXP HZ
50#endif
51
52/*
53 * Interval to check for and remove caches that are no longer valid. Caches
54 * are checked for validity before they are used for packet encapsulation and
55 * old caches are removed at that time. However, if no packets are sent through
56 * the tunnel then the cache will never be destroyed. Since it holds
57 * references to a number of system objects, the cache will continue to use
58 * system resources by not allowing those objects to be destroyed. The cache
59 * cleaner is periodically run to free invalid caches. It does not
60 * significantly affect system performance. A lower interval will release
61 * resources faster but will itself consume resources by requiring more frequent
62 * checks. A longer interval may result in messages being printed to the kernel
63 * message buffer about unreleased resources. The interval is expressed in
64 * jiffies.
65 */
66#define CACHE_CLEANER_INTERVAL (5 * HZ)
67
68#define CACHE_DATA_ALIGN 16
d1eb60cc
JG
69
70/* Protected by RCU. */
83e3e75b 71static struct tbl *port_table __read_mostly;
d1eb60cc 72
842cf6f4
JG
73static void cache_cleaner(struct work_struct *work);
74DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
75
d1eb60cc
JG
76/*
77 * These are just used as an optimization: they don't require any kind of
78 * synchronization because we could have just as easily read the value before
79 * the port change happened.
80 */
83e3e75b
JG
81static unsigned int key_local_remote_ports __read_mostly;
82static unsigned int key_remote_ports __read_mostly;
83static unsigned int local_remote_ports __read_mostly;
84static unsigned int remote_ports __read_mostly;
d1eb60cc
JG
85
86#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
87#define rt_dst(rt) (rt->dst)
88#else
89#define rt_dst(rt) (rt->u.dst)
90#endif
91
92static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
93{
94 return vport_from_priv(tnl_vport);
95}
96
97static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
98{
99 return container_of(node, struct tnl_vport, tbl_node);
100}
101
842cf6f4
JG
102static inline void schedule_cache_cleaner(void)
103{
104 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
105}
106
107static void free_cache(struct tnl_cache *cache)
108{
109 if (!cache)
110 return;
111
112 flow_put(cache->flow);
113 ip_rt_put(cache->rt);
114 kfree(cache);
115}
116
117static void free_config_rcu(struct rcu_head *rcu)
d1eb60cc
JG
118{
119 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
120 kfree(c);
121}
122
842cf6f4
JG
123static void free_cache_rcu(struct rcu_head *rcu)
124{
125 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
126 free_cache(c);
127}
128
d1eb60cc
JG
129static void assign_config_rcu(struct vport *vport,
130 struct tnl_mutable_config *new_config)
131{
132 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
133 struct tnl_mutable_config *old_config;
134
842cf6f4 135 old_config = tnl_vport->mutable;
d1eb60cc 136 rcu_assign_pointer(tnl_vport->mutable, new_config);
842cf6f4
JG
137 call_rcu(&old_config->rcu, free_config_rcu);
138}
139
140static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
141{
142 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
143 struct tnl_cache *old_cache;
144
145 old_cache = tnl_vport->cache;
146 rcu_assign_pointer(tnl_vport->cache, new_cache);
147
148 if (old_cache)
149 call_rcu(&old_cache->rcu, free_cache_rcu);
d1eb60cc
JG
150}
151
152static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
153{
154 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
155 if (mutable->port_config.saddr)
156 return &local_remote_ports;
157 else
158 return &remote_ports;
159 } else {
160 if (mutable->port_config.saddr)
161 return &key_local_remote_ports;
162 else
163 return &key_remote_ports;
164 }
165}
166
d1eb60cc 167struct port_lookup_key {
4029c21a
BP
168 u32 tunnel_type;
169 __be32 saddr;
170 __be32 daddr;
171 __be32 key;
d1eb60cc
JG
172 const struct tnl_mutable_config *mutable;
173};
174
175/*
176 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
177 * the comparision.
178 */
179static int port_cmp(const struct tbl_node *node, void *target)
180{
181 const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
182 struct port_lookup_key *lookup = target;
183
184 lookup->mutable = rcu_dereference(tnl_vport->mutable);
185
4029c21a
BP
186 return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
187 lookup->mutable->port_config.daddr == lookup->daddr &&
188 lookup->mutable->port_config.in_key == lookup->key &&
189 lookup->mutable->port_config.saddr == lookup->saddr);
d1eb60cc
JG
190}
191
4029c21a 192static u32 port_hash(struct port_lookup_key *k)
d1eb60cc 193{
4029c21a 194 return jhash_3words(k->key, k->saddr, k->daddr, k->tunnel_type);
d1eb60cc
JG
195}
196
842cf6f4
JG
197static u32 mutable_hash(const struct tnl_mutable_config *mutable)
198{
199 struct port_lookup_key lookup;
200
4029c21a
BP
201 lookup.saddr = mutable->port_config.saddr;
202 lookup.daddr = mutable->port_config.daddr;
203 lookup.key = mutable->port_config.in_key;
204 lookup.tunnel_type = mutable->tunnel_type;
842cf6f4
JG
205
206 return port_hash(&lookup);
207}
208
209static void check_table_empty(void)
210{
211 if (tbl_count(port_table) == 0) {
212 struct tbl *old_table = port_table;
213
214 cancel_delayed_work_sync(&cache_cleaner_wq);
215 rcu_assign_pointer(port_table, NULL);
216 tbl_deferred_destroy(old_table, NULL);
217 }
218}
219
d1eb60cc
JG
220static int add_port(struct vport *vport)
221{
222 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
d1eb60cc
JG
223 int err;
224
225 if (!port_table) {
226 struct tbl *new_table;
227
228 new_table = tbl_create(0);
229 if (!new_table)
230 return -ENOMEM;
231
232 rcu_assign_pointer(port_table, new_table);
842cf6f4 233 schedule_cache_cleaner();
d1eb60cc
JG
234
235 } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
236 struct tbl *old_table = port_table;
237 struct tbl *new_table;
238
239 new_table = tbl_expand(old_table);
240 if (IS_ERR(new_table))
241 return PTR_ERR(new_table);
242
243 rcu_assign_pointer(port_table, new_table);
244 tbl_deferred_destroy(old_table, NULL);
245 }
246
842cf6f4
JG
247 err = tbl_insert(port_table, &tnl_vport->tbl_node, mutable_hash(tnl_vport->mutable));
248 if (err) {
249 check_table_empty();
250 return err;
251 }
252
253 (*find_port_pool(tnl_vport->mutable))++;
254
255 return 0;
256}
257
258static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
259{
260 int err;
261 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
262 u32 hash;
263
264 hash = mutable_hash(new_mutable);
265 if (hash == tnl_vport->tbl_node.hash)
266 goto table_updated;
d1eb60cc 267
842cf6f4
JG
268 /*
269 * Ideally we should make this move atomic to avoid having gaps in
270 * finding tunnels or the possibility of failure. However, if we do
271 * find a tunnel it will always be consistent.
272 */
273 err = tbl_remove(port_table, &tnl_vport->tbl_node);
d1eb60cc
JG
274 if (err)
275 return err;
276
842cf6f4
JG
277 err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
278 if (err) {
279 check_table_empty();
280 return err;
281 }
282
283table_updated:
284 assign_config_rcu(vport, new_mutable);
d1eb60cc
JG
285
286 return 0;
287}
288
289static int del_port(struct vport *vport)
290{
291 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
292 int err;
293
294 err = tbl_remove(port_table, &tnl_vport->tbl_node);
295 if (err)
296 return err;
297
842cf6f4 298 check_table_empty();
d1eb60cc
JG
299 (*find_port_pool(tnl_vport->mutable))--;
300
301 return 0;
302}
303
304struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
305 int tunnel_type,
306 const struct tnl_mutable_config **mutable)
307{
308 struct port_lookup_key lookup;
309 struct tbl *table = rcu_dereference(port_table);
310 struct tbl_node *tbl_node;
311
842cf6f4 312 if (unlikely(!table))
d1eb60cc
JG
313 return NULL;
314
4029c21a
BP
315 lookup.saddr = saddr;
316 lookup.daddr = daddr;
d1eb60cc
JG
317
318 if (tunnel_type & TNL_T_KEY_EXACT) {
4029c21a
BP
319 lookup.key = key;
320 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
d1eb60cc
JG
321
322 if (key_local_remote_ports) {
323 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
324 if (tbl_node)
325 goto found;
326 }
327
328 if (key_remote_ports) {
4029c21a 329 lookup.saddr = 0;
d1eb60cc
JG
330
331 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
332 if (tbl_node)
333 goto found;
334
4029c21a 335 lookup.saddr = saddr;
d1eb60cc
JG
336 }
337 }
338
339 if (tunnel_type & TNL_T_KEY_MATCH) {
4029c21a
BP
340 lookup.key = 0;
341 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
d1eb60cc
JG
342
343 if (local_remote_ports) {
344 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
345 if (tbl_node)
346 goto found;
347 }
348
349 if (remote_ports) {
4029c21a 350 lookup.saddr = 0;
d1eb60cc
JG
351
352 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
353 if (tbl_node)
354 goto found;
355 }
356 }
357
358 return NULL;
359
360found:
361 *mutable = lookup.mutable;
362 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
363}
364
842cf6f4
JG
365static inline void ecn_decapsulate(struct sk_buff *skb)
366{
367 u8 tos = ip_hdr(skb)->tos;
368
369 if (INET_ECN_is_ce(tos)) {
370 __be16 protocol = skb->protocol;
371 unsigned int nw_header = skb_network_offset(skb);
372
373 if (skb->protocol == htons(ETH_P_8021Q)) {
374 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
375 return;
376
377 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
378 nw_header += VLAN_HLEN;
379 }
380
381 if (protocol == htons(ETH_P_IP)) {
382 if (unlikely(!pskb_may_pull(skb, nw_header
383 + sizeof(struct iphdr))))
384 return;
385
386 IP_ECN_set_ce((struct iphdr *)(skb->data + nw_header));
387 }
388#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
389 else if (protocol == htons(ETH_P_IPV6)) {
390 if (unlikely(!pskb_may_pull(skb, nw_header
391 + sizeof(struct ipv6hdr))))
392 return;
393
394 IP6_ECN_set_ce((struct ipv6hdr *)(skb->data + nw_header));
395 }
396#endif
397 }
398}
399
400/* Called with rcu_read_lock. */
401void tnl_rcv(struct vport *vport, struct sk_buff *skb)
402{
403 skb->pkt_type = PACKET_HOST;
404 skb->protocol = eth_type_trans(skb, skb->dev);
405
406 skb_dst_drop(skb);
407 nf_reset(skb);
408 secpath_reset(skb);
409 skb_reset_network_header(skb);
410
411 ecn_decapsulate(skb);
412
413 skb_push(skb, ETH_HLEN);
414 compute_ip_summed(skb, false);
415
416 vport_receive(vport, skb);
417}
418
d1eb60cc
JG
419static bool check_ipv4_address(__be32 addr)
420{
421 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
422 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
423 return false;
424
425 return true;
426}
427
428static bool ipv4_should_icmp(struct sk_buff *skb)
429{
430 struct iphdr *old_iph = ip_hdr(skb);
431
432 /* Don't respond to L2 broadcast. */
433 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
434 return false;
435
436 /* Don't respond to L3 broadcast or invalid addresses. */
437 if (!check_ipv4_address(old_iph->daddr) ||
438 !check_ipv4_address(old_iph->saddr))
439 return false;
440
441 /* Only respond to the first fragment. */
442 if (old_iph->frag_off & htons(IP_OFFSET))
443 return false;
444
445 /* Don't respond to ICMP error messages. */
446 if (old_iph->protocol == IPPROTO_ICMP) {
447 u8 icmp_type, *icmp_typep;
448
449 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
450 (old_iph->ihl << 2) +
451 offsetof(struct icmphdr, type) -
452 skb->data, sizeof(icmp_type),
453 &icmp_type);
454
455 if (!icmp_typep)
456 return false;
457
458 if (*icmp_typep > NR_ICMP_TYPES
459 || (*icmp_typep <= ICMP_PARAMETERPROB
460 && *icmp_typep != ICMP_ECHOREPLY
461 && *icmp_typep != ICMP_ECHO))
462 return false;
463 }
464
465 return true;
466}
467
468static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
469 unsigned int mtu, unsigned int payload_length)
470{
471 struct iphdr *iph, *old_iph = ip_hdr(skb);
472 struct icmphdr *icmph;
473 u8 *payload;
474
475 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
476 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
477 payload = skb_put(nskb, payload_length);
478
479 /* IP */
480 iph->version = 4;
481 iph->ihl = sizeof(struct iphdr) >> 2;
482 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
483 IPTOS_PREC_INTERNETCONTROL;
484 iph->tot_len = htons(sizeof(struct iphdr)
485 + sizeof(struct icmphdr)
486 + payload_length);
487 get_random_bytes(&iph->id, sizeof(iph->id));
488 iph->frag_off = 0;
489 iph->ttl = IPDEFTTL;
490 iph->protocol = IPPROTO_ICMP;
491 iph->daddr = old_iph->saddr;
492 iph->saddr = old_iph->daddr;
493
494 ip_send_check(iph);
495
496 /* ICMP */
497 icmph->type = ICMP_DEST_UNREACH;
498 icmph->code = ICMP_FRAG_NEEDED;
499 icmph->un.gateway = htonl(mtu);
500 icmph->checksum = 0;
501
502 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
503 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
504 payload, payload_length,
505 nskb->csum);
506 icmph->checksum = csum_fold(nskb->csum);
507}
508
509#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
510static bool ipv6_should_icmp(struct sk_buff *skb)
511{
512 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
513 int addr_type;
514 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
515 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
516
517 /* Check source address is valid. */
518 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
519 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
520 return false;
521
522 /* Don't reply to unspecified addresses. */
523 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
524 return false;
525
526 /* Don't respond to ICMP error messages. */
527 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
528 if (payload_off < 0)
529 return false;
530
531 if (nexthdr == NEXTHDR_ICMP) {
532 u8 icmp_type, *icmp_typep;
533
534 icmp_typep = skb_header_pointer(skb, payload_off +
535 offsetof(struct icmp6hdr,
536 icmp6_type),
537 sizeof(icmp_type), &icmp_type);
538
539 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
540 return false;
541 }
542
543 return true;
544}
545
546static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
547 unsigned int mtu, unsigned int payload_length)
548{
549 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
550 struct icmp6hdr *icmp6h;
551 u8 *payload;
552
553 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
554 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
555 payload = skb_put(nskb, payload_length);
556
557 /* IPv6 */
558 ipv6h->version = 6;
559 ipv6h->priority = 0;
560 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
561 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
562 + payload_length);
563 ipv6h->nexthdr = NEXTHDR_ICMP;
564 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
565 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
566 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
567
568 /* ICMPv6 */
569 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
570 icmp6h->icmp6_code = 0;
571 icmp6h->icmp6_cksum = 0;
572 icmp6h->icmp6_mtu = htonl(mtu);
573
574 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
575 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
576 payload, payload_length,
577 nskb->csum);
578 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
579 sizeof(struct icmp6hdr)
580 + payload_length,
581 ipv6h->nexthdr, nskb->csum);
582}
583#endif /* IPv6 */
584
585bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
586 struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
587{
588 unsigned int eth_hdr_len = ETH_HLEN;
589 unsigned int total_length = 0, header_length = 0, payload_length;
590 struct ethhdr *eh, *old_eh = eth_hdr(skb);
591 struct sk_buff *nskb;
592
593 /* Sanity check */
594 if (skb->protocol == htons(ETH_P_IP)) {
595 if (mtu < IP_MIN_MTU)
596 return false;
597
598 if (!ipv4_should_icmp(skb))
599 return true;
600 }
601#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
602 else if (skb->protocol == htons(ETH_P_IPV6)) {
603 if (mtu < IPV6_MIN_MTU)
604 return false;
605
606 /*
607 * In theory we should do PMTUD on IPv6 multicast messages but
608 * we don't have an address to send from so just fragment.
609 */
610 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
611 return false;
612
613 if (!ipv6_should_icmp(skb))
614 return true;
615 }
616#endif
617 else
618 return false;
619
620 /* Allocate */
621 if (old_eh->h_proto == htons(ETH_P_8021Q))
622 eth_hdr_len = VLAN_ETH_HLEN;
623
624 payload_length = skb->len - eth_hdr_len;
625 if (skb->protocol == htons(ETH_P_IP)) {
626 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
627 total_length = min_t(unsigned int, header_length +
628 payload_length, 576);
629 }
630#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
631 else {
632 header_length = sizeof(struct ipv6hdr) +
633 sizeof(struct icmp6hdr);
634 total_length = min_t(unsigned int, header_length +
635 payload_length, IPV6_MIN_MTU);
636 }
637#endif
638
639 total_length = min(total_length, mutable->mtu);
640 payload_length = total_length - header_length;
641
642 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
643 payload_length);
644 if (!nskb)
645 return false;
646
647 skb_reserve(nskb, NET_IP_ALIGN);
648
649 /* Ethernet / VLAN */
650 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
651 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
652 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
653 nskb->protocol = eh->h_proto = old_eh->h_proto;
654 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
655 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
656
657 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
658 vh->h_vlan_encapsulated_proto = skb->protocol;
659 }
660 skb_reset_mac_header(nskb);
661
662 /* Protocol */
663 if (skb->protocol == htons(ETH_P_IP))
664 ipv4_build_icmp(skb, nskb, mtu, payload_length);
665#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
666 else
667 ipv6_build_icmp(skb, nskb, mtu, payload_length);
668#endif
669
670 /*
671 * Assume that flow based keys are symmetric with respect to input
672 * and output and use the key that we were going to put on the
673 * outgoing packet for the fake received packet. If the keys are
674 * not symmetric then PMTUD needs to be disabled since we won't have
675 * any way of synthesizing packets.
676 */
677 if ((mutable->port_config.flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
678 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
679 OVS_CB(nskb)->tun_id = flow_key;
680
681 compute_ip_summed(nskb, false);
682 vport_receive(vport, nskb);
683
684 return true;
685}
686
842cf6f4
JG
687static bool check_mtu(struct sk_buff *skb,
688 struct vport *vport,
689 const struct tnl_mutable_config *mutable,
690 const struct rtable *rt, __be16 *frag_offp)
d1eb60cc 691{
842cf6f4
JG
692 int mtu;
693 __be16 frag_off;
694
695 frag_off = (mutable->port_config.flags & TNL_F_PMTUD) ? htons(IP_DF) : 0;
696 if (frag_off)
697 mtu = dst_mtu(&rt_dst(rt))
698 - ETH_HLEN
699 - mutable->tunnel_hlen
700 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
701 else
702 mtu = mutable->mtu;
703
704 if (skb->protocol == htons(ETH_P_IP)) {
705 struct iphdr *old_iph = ip_hdr(skb);
706
707 frag_off |= old_iph->frag_off & htons(IP_DF);
708 mtu = max(mtu, IP_MIN_MTU);
709
710 if ((old_iph->frag_off & htons(IP_DF)) &&
711 mtu < ntohs(old_iph->tot_len)) {
712 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
713 goto drop;
d1eb60cc 714 }
842cf6f4
JG
715 }
716#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
717 else if (skb->protocol == htons(ETH_P_IPV6)) {
718 unsigned int packet_length = skb->len - ETH_HLEN
719 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
d1eb60cc 720
842cf6f4 721 mtu = max(mtu, IPV6_MIN_MTU);
d1eb60cc 722
842cf6f4
JG
723 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
724 if (packet_length > IPV6_MIN_MTU)
725 frag_off = htons(IP_DF);
d1eb60cc 726
842cf6f4
JG
727 if (mtu < packet_length) {
728 if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
729 goto drop;
730 }
d1eb60cc 731 }
842cf6f4 732#endif
d1eb60cc 733
842cf6f4
JG
734 *frag_offp = frag_off;
735 return true;
736
737drop:
738 *frag_offp = 0;
739 return false;
d1eb60cc
JG
740}
741
842cf6f4
JG
742static void create_tunnel_header(const struct vport *vport,
743 const struct tnl_mutable_config *mutable,
744 const struct rtable *rt, void *header)
d1eb60cc 745{
842cf6f4
JG
746 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
747 struct iphdr *iph = header;
748
749 iph->version = 4;
750 iph->ihl = sizeof(struct iphdr) >> 2;
751 iph->frag_off = htons(IP_DF);
752 iph->protocol = tnl_vport->tnl_ops->ipproto;
753 iph->tos = mutable->port_config.tos;
754 iph->daddr = rt->rt_dst;
755 iph->saddr = rt->rt_src;
756 iph->ttl = mutable->port_config.ttl;
757 if (!iph->ttl)
758 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
759
760 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
761}
d1eb60cc 762
842cf6f4
JG
763static inline void *get_cached_header(const struct tnl_cache *cache)
764{
765 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
766}
d1eb60cc 767
842cf6f4
JG
768static inline bool check_cache_valid(const struct tnl_cache *cache,
769 const struct tnl_mutable_config *mutable)
770{
771 return cache &&
772#ifdef NEED_CACHE_TIMEOUT
773 time_before(jiffies, cache->expiration) &&
774#endif
775#ifdef HAVE_RT_GENID
776 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
777#endif
778#ifdef HAVE_HH_SEQ
779 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
780#endif
781 mutable->seq == cache->mutable_seq &&
782 (!is_internal_dev(rt_dst(cache->rt).dev) ||
783 (cache->flow && !cache->flow->dead));
d1eb60cc
JG
784}
785
842cf6f4 786static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
d1eb60cc 787{
842cf6f4
JG
788 struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
789 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
790 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
d1eb60cc 791
842cf6f4
JG
792 if (cache && !check_cache_valid(cache, mutable) &&
793 spin_trylock_bh(&tnl_vport->cache_lock)) {
794 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
795 spin_unlock_bh(&tnl_vport->cache_lock);
796 }
d1eb60cc 797
842cf6f4
JG
798 return 0;
799}
d1eb60cc 800
842cf6f4
JG
801static void cache_cleaner(struct work_struct *work)
802{
803 schedule_cache_cleaner();
d1eb60cc 804
842cf6f4
JG
805 rcu_read_lock();
806 tbl_foreach(port_table, cache_cleaner_cb, NULL);
807 rcu_read_unlock();
808}
d1eb60cc 809
842cf6f4
JG
810static inline void create_eth_hdr(struct tnl_cache *cache,
811 const struct rtable *rt)
812{
813 void *cache_data = get_cached_header(cache);
814 int hh_len = rt_dst(rt).hh->hh_len;
815 int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
d1eb60cc 816
842cf6f4
JG
817#ifdef HAVE_HH_SEQ
818 unsigned hh_seq;
819
820 do {
821 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
822 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
823 } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
824
825 cache->hh_seq = hh_seq;
826#else
827 read_lock_bh(&rt_dst(rt).hh->hh_lock);
828 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
829 read_unlock_bh(&rt_dst(rt).hh->hh_lock);
d1eb60cc 830#endif
d1eb60cc
JG
831}
832
842cf6f4
JG
833static struct tnl_cache *build_cache(struct vport *vport,
834 const struct tnl_mutable_config *mutable,
835 struct rtable *rt)
d1eb60cc 836{
842cf6f4
JG
837 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
838 struct tnl_cache *cache;
839 void *cache_data;
840 int cache_len;
d1eb60cc 841
842cf6f4
JG
842 if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
843 return NULL;
844
845 /*
846 * If there is no entry in the ARP cache or if this device does not
847 * support hard header caching just fall back to the IP stack.
848 */
849 if (!rt_dst(rt).hh)
850 return NULL;
851
852 /*
853 * If lock is contended fall back to directly building the header.
854 * We're not going to help performance by sitting here spinning.
855 */
856 if (!spin_trylock_bh(&tnl_vport->cache_lock))
857 return NULL;
858
859 cache = tnl_vport->cache;
860 if (check_cache_valid(cache, mutable))
861 goto unlock;
862 else
863 cache = NULL;
864
865 cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
866
867 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
868 cache_len, GFP_ATOMIC);
869 if (!cache)
870 goto unlock;
871
872 cache->len = cache_len;
873
874 create_eth_hdr(cache, rt);
875 cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
876
877 create_tunnel_header(vport, mutable, rt, cache_data);
878
879 cache->mutable_seq = mutable->seq;
880 cache->rt = rt;
881#ifdef NEED_CACHE_TIMEOUT
882 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
883#endif
884
885 if (is_internal_dev(rt_dst(rt).dev)) {
886 int err;
887 struct vport *vport;
888 struct dp_port *dp_port;
889 struct sk_buff *skb;
890 bool is_frag;
891 struct odp_flow_key flow_key;
892 struct tbl_node *flow_node;
893
894 vport = internal_dev_get_vport(rt_dst(rt).dev);
895 if (!vport)
896 goto done;
897
898 dp_port = vport_get_dp_port(vport);
899 if (!dp_port)
900 goto done;
901
902 skb = alloc_skb(cache->len, GFP_ATOMIC);
903 if (!skb)
904 goto done;
905
906 __skb_put(skb, cache->len);
907 memcpy(skb->data, get_cached_header(cache), cache->len);
908
909 err = flow_extract(skb, dp_port->port_no, &flow_key, &is_frag);
910
911 kfree_skb(skb);
912 if (err || is_frag)
913 goto done;
914
915 flow_node = tbl_lookup(rcu_dereference(dp_port->dp->table),
916 &flow_key, flow_hash(&flow_key),
917 flow_cmp);
918 if (flow_node) {
919 struct sw_flow *flow = flow_cast(flow_node);
920
921 cache->flow = flow;
922 flow_hold(flow);
923 }
d1eb60cc
JG
924 }
925
842cf6f4
JG
926done:
927 assign_cache_rcu(vport, cache);
928
929unlock:
930 spin_unlock_bh(&tnl_vport->cache_lock);
931
932 return cache;
d1eb60cc
JG
933}
934
842cf6f4
JG
935static struct rtable *find_route(struct vport *vport,
936 const struct tnl_mutable_config *mutable,
937 u8 tos, struct tnl_cache **cache)
d1eb60cc 938{
842cf6f4
JG
939 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
940 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
941
942 *cache = NULL;
943 tos = RT_TOS(tos);
944
945 if (likely(tos == mutable->port_config.tos &&
946 check_cache_valid(cur_cache, mutable))) {
947 *cache = cur_cache;
948 return cur_cache->rt;
949 } else {
950 struct rtable *rt;
951 struct flowi fl = { .nl_u = { .ip4_u =
952 { .daddr = mutable->port_config.daddr,
953 .saddr = mutable->port_config.saddr,
954 .tos = tos } },
955 .proto = tnl_vport->tnl_ops->ipproto };
956
957 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
958 return NULL;
959
960 if (likely(tos == mutable->port_config.tos))
961 *cache = build_cache(vport, mutable, rt);
962
963 return rt;
d1eb60cc
JG
964 }
965}
966
842cf6f4 967static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
d1eb60cc 968{
842cf6f4
JG
969 if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
970 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
971 if (unlikely(!nskb)) {
972 kfree_skb(skb);
973 return ERR_PTR(-ENOMEM);
974 }
d1eb60cc 975
842cf6f4 976 set_skb_csum_bits(skb, nskb);
d1eb60cc 977
842cf6f4
JG
978 if (skb->sk)
979 skb_set_owner_w(nskb, skb->sk);
d1eb60cc 980
842cf6f4
JG
981 kfree_skb(skb);
982 return nskb;
983 }
d1eb60cc 984
842cf6f4 985 return skb;
d1eb60cc
JG
986}
987
842cf6f4 988static inline bool need_linearize(const struct sk_buff *skb)
d1eb60cc 989{
842cf6f4
JG
990 int i;
991
992 if (unlikely(skb_shinfo(skb)->frag_list))
993 return true;
994
995 /*
996 * Generally speaking we should linearize if there are paged frags.
997 * However, if all of the refcounts are 1 we know nobody else can
998 * change them from underneath us and we can skip the linearization.
999 */
1000 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1001 if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
1002 return true;
1003
1004 return false;
1005}
1006
1007static struct sk_buff *handle_offloads(struct sk_buff *skb,
1008 const struct tnl_mutable_config *mutable,
1009 const struct rtable *rt)
1010{
1011 int min_headroom;
d1eb60cc 1012 int err;
d1eb60cc 1013
842cf6f4 1014 forward_ip_summed(skb);
d1eb60cc 1015
842cf6f4 1016 err = vswitch_skb_checksum_setup(skb);
d1eb60cc
JG
1017 if (unlikely(err))
1018 goto error_free;
1019
842cf6f4
JG
1020 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1021 + mutable->tunnel_hlen;
d1eb60cc 1022
842cf6f4
JG
1023 if (skb_is_gso(skb)) {
1024 struct sk_buff *nskb;
1025
1026 /*
1027 * If we are doing GSO on a pskb it is better to make sure that
1028 * the headroom is correct now. We will only have to copy the
1029 * portion in the linear data area and GSO will preserve
1030 * headroom when it creates the segments. This is particularly
1031 * beneficial on Xen where we get a lot of GSO pskbs.
1032 * Conversely, we avoid copying if it is just to get our own
1033 * writable clone because GSO will do the copy for us.
1034 */
1035 if (skb_headroom(skb) < min_headroom) {
1036 skb = check_headroom(skb, min_headroom);
1037 if (unlikely(IS_ERR(skb))) {
1038 err = PTR_ERR(skb);
1039 goto error;
1040 }
d1eb60cc
JG
1041 }
1042
842cf6f4
JG
1043 nskb = skb_gso_segment(skb, 0);
1044 kfree_skb(skb);
1045 if (unlikely(IS_ERR(nskb))) {
1046 err = PTR_ERR(nskb);
1047 goto error;
1048 }
d1eb60cc 1049
842cf6f4
JG
1050 skb = nskb;
1051 } else {
1052 skb = check_headroom(skb, min_headroom);
1053 if (unlikely(IS_ERR(skb))) {
1054 err = PTR_ERR(skb);
1055 goto error;
1056 }
d1eb60cc 1057
842cf6f4
JG
1058 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1059 /*
1060 * Pages aren't locked and could change at any time.
1061 * If this happens after we compute the checksum, the
1062 * checksum will be wrong. We linearize now to avoid
1063 * this problem.
1064 */
1065 if (unlikely(need_linearize(skb))) {
1066 err = __skb_linearize(skb);
1067 if (unlikely(err))
1068 goto error_free;
1069 }
1070
1071 err = skb_checksum_help(skb);
1072 if (unlikely(err))
d1eb60cc 1073 goto error_free;
842cf6f4
JG
1074 } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1075 skb->ip_summed = CHECKSUM_NONE;
d1eb60cc 1076 }
d1eb60cc 1077
842cf6f4 1078 return skb;
d1eb60cc 1079
842cf6f4
JG
1080error_free:
1081 kfree_skb(skb);
1082error:
1083 return ERR_PTR(err);
1084}
d1eb60cc 1085
842cf6f4
JG
1086static int send_frags(struct sk_buff *skb,
1087 const struct tnl_mutable_config *mutable)
1088{
1089 int sent_len;
1090 int err;
d1eb60cc 1091
842cf6f4 1092 sent_len = 0;
5214f5c4
JG
1093 while (skb) {
1094 struct sk_buff *next = skb->next;
1095 int frag_len = skb->len - mutable->tunnel_hlen;
d1eb60cc 1096
5214f5c4 1097 skb->next = NULL;
b1195d37 1098 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
7da5c939 1099
5214f5c4 1100 err = ip_local_out(skb);
842cf6f4
JG
1101 if (likely(net_xmit_eval(err) == 0))
1102 sent_len += frag_len;
1103 else {
5214f5c4
JG
1104 skb = next;
1105 goto free_frags;
1106 }
1107
1108 skb = next;
842cf6f4 1109 }
5214f5c4 1110
842cf6f4 1111 return sent_len;
d1eb60cc 1112
5214f5c4
JG
1113free_frags:
1114 /*
1115 * There's no point in continuing to send fragments once one has been
1116 * dropped so just free the rest. This may help improve the congestion
1117 * that caused the first packet to be dropped.
1118 */
842cf6f4
JG
1119 tnl_free_linked_skbs(skb);
1120 return sent_len;
d1eb60cc
JG
1121}
1122
1123int tnl_send(struct vport *vport, struct sk_buff *skb)
1124{
1125 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1126 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1127
842cf6f4 1128 enum vport_err_type err = VPORT_E_TX_ERROR;
d1eb60cc 1129 struct rtable *rt;
842cf6f4
JG
1130 struct dst_entry *unattached_dst = NULL;
1131 struct tnl_cache *cache;
1132 int sent_len = 0;
1133 __be16 frag_off;
1134 u8 ttl;
1135 u8 inner_tos;
1136 u8 tos;
d1eb60cc
JG
1137
1138 /* Validate the protocol headers before we try to use them. */
1139 if (skb->protocol == htons(ETH_P_8021Q)) {
1140 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1141 goto error_free;
1142
1143 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1144 skb_set_network_header(skb, VLAN_ETH_HLEN);
1145 }
1146
1147 if (skb->protocol == htons(ETH_P_IP)) {
842cf6f4
JG
1148 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1149 + sizeof(struct iphdr))))
d1eb60cc
JG
1150 skb->protocol = 0;
1151 }
1152#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1153 else if (skb->protocol == htons(ETH_P_IPV6)) {
842cf6f4
JG
1154 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1155 + sizeof(struct ipv6hdr))))
d1eb60cc
JG
1156 skb->protocol = 0;
1157 }
1158#endif
d1eb60cc 1159
842cf6f4
JG
1160 /* ToS */
1161 if (skb->protocol == htons(ETH_P_IP))
1162 inner_tos = ip_hdr(skb)->tos;
d1eb60cc 1163#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
842cf6f4
JG
1164 else if (skb->protocol == htons(ETH_P_IPV6))
1165 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
d1eb60cc 1166#endif
842cf6f4
JG
1167 else
1168 inner_tos = 0;
d1eb60cc 1169
842cf6f4
JG
1170 if (mutable->port_config.flags & TNL_F_TOS_INHERIT)
1171 tos = inner_tos;
1172 else
1173 tos = mutable->port_config.tos;
d1eb60cc 1174
842cf6f4
JG
1175 tos = INET_ECN_encapsulate(tos, inner_tos);
1176
1177 /* Route lookup */
1178 rt = find_route(vport, mutable, tos, &cache);
1179 if (unlikely(!rt))
1180 goto error_free;
1181 if (unlikely(!cache))
1182 unattached_dst = &rt_dst(rt);
1183
1184 /* Reset SKB */
1185 nf_reset(skb);
1186 secpath_reset(skb);
1187 skb_dst_drop(skb);
1188
1189 /* Offloading */
1190 skb = handle_offloads(skb, mutable, rt);
1191 if (unlikely(IS_ERR(skb)))
1192 goto error;
1193
1194 /* MTU */
1195 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1196 err = VPORT_E_TX_DROPPED;
1197 goto error_free;
d1eb60cc
JG
1198 }
1199
842cf6f4
JG
1200 /*
1201 * If we are over the MTU, allow the IP stack to handle fragmentation.
1202 * Fragmentation is a slow path anyways.
1203 */
1204 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1205 cache)) {
1206 unattached_dst = &rt_dst(rt);
1207 dst_hold(unattached_dst);
1208 cache = NULL;
1209 }
1210
1211 /* TTL */
1212 ttl = mutable->port_config.ttl;
1213 if (!ttl)
1214 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1215
d1eb60cc
JG
1216 if (mutable->port_config.flags & TNL_F_TTL_INHERIT) {
1217 if (skb->protocol == htons(ETH_P_IP))
842cf6f4 1218 ttl = ip_hdr(skb)->ttl;
d1eb60cc
JG
1219#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1220 else if (skb->protocol == htons(ETH_P_IPV6))
842cf6f4 1221 ttl = ipv6_hdr(skb)->hop_limit;
d1eb60cc
JG
1222#endif
1223 }
d1eb60cc 1224
842cf6f4
JG
1225 while (skb) {
1226 struct iphdr *iph;
1227 struct sk_buff *next_skb = skb->next;
1228 skb->next = NULL;
d1eb60cc 1229
842cf6f4
JG
1230 if (likely(cache)) {
1231 skb_push(skb, cache->len);
1232 memcpy(skb->data, get_cached_header(cache), cache->len);
1233 skb_reset_mac_header(skb);
1234 skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
d1eb60cc 1235
842cf6f4
JG
1236 } else {
1237 skb_push(skb, mutable->tunnel_hlen);
1238 create_tunnel_header(vport, mutable, rt, skb->data);
1239 skb_reset_network_header(skb);
d1eb60cc 1240
842cf6f4
JG
1241 if (next_skb)
1242 skb_dst_set(skb, dst_clone(unattached_dst));
1243 else {
1244 skb_dst_set(skb, unattached_dst);
1245 unattached_dst = NULL;
1246 }
d1eb60cc 1247 }
842cf6f4 1248 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
d1eb60cc 1249
842cf6f4
JG
1250 iph = ip_hdr(skb);
1251 iph->tos = tos;
1252 iph->ttl = ttl;
1253 iph->frag_off = frag_off;
1254 ip_select_ident(iph, &rt_dst(rt), NULL);
d1eb60cc 1255
842cf6f4
JG
1256 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1257 if (unlikely(!skb))
1258 goto next;
d1eb60cc 1259
842cf6f4
JG
1260 if (likely(cache)) {
1261 int orig_len = skb->len - cache->len;
17a07f9f 1262 struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
d1eb60cc 1263
842cf6f4 1264 skb->protocol = htons(ETH_P_IP);
842cf6f4
JG
1265 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1266 ip_send_check(iph);
d1eb60cc 1267
17a07f9f 1268 if (cache_vport) {
842cf6f4
JG
1269 OVS_CB(skb)->flow = cache->flow;
1270 compute_ip_summed(skb, true);
17a07f9f 1271 vport_receive(cache_vport, skb);
842cf6f4
JG
1272 sent_len += orig_len;
1273 } else {
1274 int err;
d1eb60cc 1275
842cf6f4
JG
1276 skb->dev = rt_dst(rt).dev;
1277 err = dev_queue_xmit(skb);
1278
1279 if (likely(net_xmit_eval(err) == 0))
1280 sent_len += orig_len;
1281 }
1282 } else
1283 sent_len += send_frags(skb, mutable);
1284
1285next:
d1eb60cc 1286 skb = next_skb;
842cf6f4 1287 }
d1eb60cc 1288
842cf6f4 1289 if (unlikely(sent_len == 0))
5214f5c4
JG
1290 vport_record_error(vport, VPORT_E_TX_DROPPED);
1291
842cf6f4 1292 goto out;
d1eb60cc
JG
1293
1294error_free:
842cf6f4 1295 tnl_free_linked_skbs(skb);
d1eb60cc 1296error:
842cf6f4
JG
1297 dst_release(unattached_dst);
1298 vport_record_error(vport, err);
1299out:
1300 return sent_len;
d1eb60cc
JG
1301}
1302
c3827f61 1303static int set_config(const void *config, const struct tnl_ops *tnl_ops,
d1eb60cc
JG
1304 const struct vport *cur_vport,
1305 struct tnl_mutable_config *mutable)
1306{
1307 const struct vport *old_vport;
1308 const struct tnl_mutable_config *old_mutable;
1309
c3827f61 1310 mutable->port_config = *(struct tnl_port_config *)config;
d1eb60cc 1311
842cf6f4
JG
1312 if (mutable->port_config.daddr == 0)
1313 return -EINVAL;
1314
1315 if (mutable->port_config.tos != RT_TOS(mutable->port_config.tos))
1316 return -EINVAL;
1317
d1eb60cc
JG
1318 mutable->tunnel_hlen = tnl_ops->hdr_len(&mutable->port_config);
1319 if (mutable->tunnel_hlen < 0)
1320 return mutable->tunnel_hlen;
1321
1322 mutable->tunnel_hlen += sizeof(struct iphdr);
1323
d1eb60cc
JG
1324 mutable->tunnel_type = tnl_ops->tunnel_type;
1325 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
1326 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1327 mutable->port_config.in_key = 0;
1328 } else
1329 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1330
1331 old_vport = tnl_find_port(mutable->port_config.saddr,
1332 mutable->port_config.daddr,
1333 mutable->port_config.in_key,
1334 mutable->tunnel_type,
1335 &old_mutable);
1336
1337 if (old_vport && old_vport != cur_vport)
1338 return -EEXIST;
1339
1340 if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
1341 mutable->port_config.out_key = 0;
1342
1343 return 0;
1344}
1345
94903c98 1346struct vport *tnl_create(const struct vport_parms *parms,
d1eb60cc
JG
1347 const struct vport_ops *vport_ops,
1348 const struct tnl_ops *tnl_ops)
1349{
1350 struct vport *vport;
1351 struct tnl_vport *tnl_vport;
5214f5c4 1352 int initial_frag_id;
d1eb60cc
JG
1353 int err;
1354
1355 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops);
1356 if (IS_ERR(vport)) {
1357 err = PTR_ERR(vport);
1358 goto error;
1359 }
1360
1361 tnl_vport = tnl_vport_priv(vport);
1362
94903c98 1363 strcpy(tnl_vport->name, parms->name);
d1eb60cc
JG
1364 tnl_vport->tnl_ops = tnl_ops;
1365
842cf6f4 1366 tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
d1eb60cc
JG
1367 if (!tnl_vport->mutable) {
1368 err = -ENOMEM;
1369 goto error_free_vport;
1370 }
1371
1372 vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
1373 tnl_vport->mutable->mtu = ETH_DATA_LEN;
1374
5214f5c4
JG
1375 get_random_bytes(&initial_frag_id, sizeof(int));
1376 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1377
94903c98 1378 err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
d1eb60cc
JG
1379 if (err)
1380 goto error_free_mutable;
1381
842cf6f4
JG
1382 spin_lock_init(&tnl_vport->cache_lock);
1383
1384#ifdef NEED_CACHE_TIMEOUT
1385 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1386 (net_random() % (MAX_CACHE_EXP / 2));
1387#endif
1388
d1eb60cc
JG
1389 err = add_port(vport);
1390 if (err)
1391 goto error_free_mutable;
1392
1393 return vport;
1394
1395error_free_mutable:
1396 kfree(tnl_vport->mutable);
1397error_free_vport:
1398 vport_free(vport);
1399error:
1400 return ERR_PTR(err);
1401}
1402
c3827f61 1403int tnl_modify(struct vport *vport, struct odp_port *port)
d1eb60cc
JG
1404{
1405 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1406 struct tnl_mutable_config *mutable;
1407 int err;
d1eb60cc
JG
1408
1409 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1410 if (!mutable) {
1411 err = -ENOMEM;
1412 goto error;
1413 }
1414
c3827f61 1415 err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
d1eb60cc
JG
1416 if (err)
1417 goto error_free;
1418
842cf6f4 1419 mutable->seq++;
d1eb60cc 1420
842cf6f4
JG
1421 err = move_port(vport, mutable);
1422 if (err)
1423 goto error_free;
d1eb60cc
JG
1424
1425 return 0;
1426
1427error_free:
1428 kfree(mutable);
1429error:
1430 return err;
1431}
1432
842cf6f4 1433static void free_port_rcu(struct rcu_head *rcu)
d1eb60cc
JG
1434{
1435 struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
1436
842cf6f4
JG
1437 spin_lock_bh(&tnl_vport->cache_lock);
1438 free_cache(tnl_vport->cache);
1439 spin_unlock_bh(&tnl_vport->cache_lock);
1440
d1eb60cc
JG
1441 kfree(tnl_vport->mutable);
1442 vport_free(tnl_vport_to_vport(tnl_vport));
1443}
1444
1445int tnl_destroy(struct vport *vport)
1446{
1447 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1448 const struct tnl_mutable_config *old_mutable;
1449
1450 if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
1451 tnl_vport->mutable->port_config.daddr,
1452 tnl_vport->mutable->port_config.in_key,
1453 tnl_vport->mutable->tunnel_type,
1454 &old_mutable))
1455 del_port(vport);
1456
842cf6f4 1457 call_rcu(&tnl_vport->rcu, free_port_rcu);
d1eb60cc
JG
1458
1459 return 0;
1460}
1461
1462int tnl_set_mtu(struct vport *vport, int mtu)
1463{
1464 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1465 struct tnl_mutable_config *mutable;
1466
1467 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1468 if (!mutable)
1469 return -ENOMEM;
1470
1471 mutable->mtu = mtu;
1472 assign_config_rcu(vport, mutable);
1473
1474 return 0;
1475}
1476
1477int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1478{
1479 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1480 struct tnl_mutable_config *mutable;
1481
1482 mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1483 if (!mutable)
1484 return -ENOMEM;
1485
1486 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1487 assign_config_rcu(vport, mutable);
1488
1489 return 0;
1490}
1491
d1eb60cc
JG
1492const char *tnl_get_name(const struct vport *vport)
1493{
1494 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1495 return tnl_vport->name;
1496}
1497
1498const unsigned char *tnl_get_addr(const struct vport *vport)
1499{
1500 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1501 return rcu_dereference(tnl_vport->mutable)->eth_addr;
1502}
1503
1504int tnl_get_mtu(const struct vport *vport)
1505{
1506 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1507 return rcu_dereference(tnl_vport->mutable)->mtu;
1508}
842cf6f4
JG
1509
1510void tnl_free_linked_skbs(struct sk_buff *skb)
1511{
1512 if (unlikely(!skb))
1513 return;
1514
1515 while (skb) {
1516 struct sk_buff *next = skb->next;
1517 kfree_skb(skb);
1518 skb = next;
1519 }
1520}