2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/workqueue.h>
20 #include <net/dsfield.h>
23 #include <net/inet_ecn.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/route.h>
36 #include "vport-generic.h"
37 #include "vport-internal_dev.h"
39 #ifdef NEED_CACHE_TIMEOUT
41 * On kernels where we can't quickly detect changes in the rest of the system
42 * we use an expiration time to invalidate the cache. A shorter expiration
43 * reduces the length of time that we may potentially blackhole packets while
44 * a longer time increases performance by reducing the frequency that the
45 * cache needs to be rebuilt. A variety of factors may cause the cache to be
46 * invalidated before the expiration time but this is the maximum. The time
47 * is expressed in jiffies.
49 #define MAX_CACHE_EXP HZ
53 * Interval to check for and remove caches that are no longer valid. Caches
54 * are checked for validity before they are used for packet encapsulation and
55 * old caches are removed at that time. However, if no packets are sent through
56 * the tunnel then the cache will never be destroyed. Since it holds
57 * references to a number of system objects, the cache will continue to use
58 * system resources by not allowing those objects to be destroyed. The cache
59 * cleaner is periodically run to free invalid caches. It does not
60 * significantly affect system performance. A lower interval will release
61 * resources faster but will itself consume resources by requiring more frequent
62 * checks. A longer interval may result in messages being printed to the kernel
63 * message buffer about unreleased resources. The interval is expressed in
66 #define CACHE_CLEANER_INTERVAL (5 * HZ)
68 #define CACHE_DATA_ALIGN 16
70 /* Protected by RCU. */
71 static struct tbl
*port_table __read_mostly
;
73 static void cache_cleaner(struct work_struct
*work
);
74 DECLARE_DELAYED_WORK(cache_cleaner_wq
, cache_cleaner
);
77 * These are just used as an optimization: they don't require any kind of
78 * synchronization because we could have just as easily read the value before
79 * the port change happened.
81 static unsigned int key_local_remote_ports __read_mostly
;
82 static unsigned int key_remote_ports __read_mostly
;
83 static unsigned int local_remote_ports __read_mostly
;
84 static unsigned int remote_ports __read_mostly
;
86 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
87 #define rt_dst(rt) (rt->dst)
89 #define rt_dst(rt) (rt->u.dst)
92 static inline struct vport
*tnl_vport_to_vport(const struct tnl_vport
*tnl_vport
)
94 return vport_from_priv(tnl_vport
);
97 static inline struct tnl_vport
*tnl_vport_table_cast(const struct tbl_node
*node
)
99 return container_of(node
, struct tnl_vport
, tbl_node
);
102 static inline void schedule_cache_cleaner(void)
104 schedule_delayed_work(&cache_cleaner_wq
, CACHE_CLEANER_INTERVAL
);
107 static void free_cache(struct tnl_cache
*cache
)
112 flow_put(cache
->flow
);
113 ip_rt_put(cache
->rt
);
117 static void free_config_rcu(struct rcu_head
*rcu
)
119 struct tnl_mutable_config
*c
= container_of(rcu
, struct tnl_mutable_config
, rcu
);
123 static void free_cache_rcu(struct rcu_head
*rcu
)
125 struct tnl_cache
*c
= container_of(rcu
, struct tnl_cache
, rcu
);
129 static void assign_config_rcu(struct vport
*vport
,
130 struct tnl_mutable_config
*new_config
)
132 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
133 struct tnl_mutable_config
*old_config
;
135 old_config
= tnl_vport
->mutable;
136 rcu_assign_pointer(tnl_vport
->mutable, new_config
);
137 call_rcu(&old_config
->rcu
, free_config_rcu
);
140 static void assign_cache_rcu(struct vport
*vport
, struct tnl_cache
*new_cache
)
142 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
143 struct tnl_cache
*old_cache
;
145 old_cache
= tnl_vport
->cache
;
146 rcu_assign_pointer(tnl_vport
->cache
, new_cache
);
149 call_rcu(&old_cache
->rcu
, free_cache_rcu
);
152 static unsigned int *find_port_pool(const struct tnl_mutable_config
*mutable)
154 if (mutable->port_config
.flags
& TNL_F_IN_KEY_MATCH
) {
155 if (mutable->port_config
.saddr
)
156 return &local_remote_ports
;
158 return &remote_ports
;
160 if (mutable->port_config
.saddr
)
161 return &key_local_remote_ports
;
163 return &key_remote_ports
;
167 struct port_lookup_key
{
172 const struct tnl_mutable_config
*mutable;
176 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
179 static int port_cmp(const struct tbl_node
*node
, void *target
)
181 const struct tnl_vport
*tnl_vport
= tnl_vport_table_cast(node
);
182 struct port_lookup_key
*lookup
= target
;
184 lookup
->mutable = rcu_dereference(tnl_vport
->mutable);
186 return (lookup
->mutable->tunnel_type
== lookup
->tunnel_type
&&
187 lookup
->mutable->port_config
.daddr
== lookup
->daddr
&&
188 lookup
->mutable->port_config
.in_key
== lookup
->key
&&
189 lookup
->mutable->port_config
.saddr
== lookup
->saddr
);
192 static u32
port_hash(struct port_lookup_key
*k
)
194 return jhash_3words(k
->key
, k
->saddr
, k
->daddr
, k
->tunnel_type
);
197 static u32
mutable_hash(const struct tnl_mutable_config
*mutable)
199 struct port_lookup_key lookup
;
201 lookup
.saddr
= mutable->port_config
.saddr
;
202 lookup
.daddr
= mutable->port_config
.daddr
;
203 lookup
.key
= mutable->port_config
.in_key
;
204 lookup
.tunnel_type
= mutable->tunnel_type
;
206 return port_hash(&lookup
);
209 static void check_table_empty(void)
211 if (tbl_count(port_table
) == 0) {
212 struct tbl
*old_table
= port_table
;
214 cancel_delayed_work_sync(&cache_cleaner_wq
);
215 rcu_assign_pointer(port_table
, NULL
);
216 tbl_deferred_destroy(old_table
, NULL
);
220 static int add_port(struct vport
*vport
)
222 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
226 struct tbl
*new_table
;
228 new_table
= tbl_create(0);
232 rcu_assign_pointer(port_table
, new_table
);
233 schedule_cache_cleaner();
235 } else if (tbl_count(port_table
) > tbl_n_buckets(port_table
)) {
236 struct tbl
*old_table
= port_table
;
237 struct tbl
*new_table
;
239 new_table
= tbl_expand(old_table
);
240 if (IS_ERR(new_table
))
241 return PTR_ERR(new_table
);
243 rcu_assign_pointer(port_table
, new_table
);
244 tbl_deferred_destroy(old_table
, NULL
);
247 err
= tbl_insert(port_table
, &tnl_vport
->tbl_node
, mutable_hash(tnl_vport
->mutable));
253 (*find_port_pool(tnl_vport
->mutable))++;
258 static int move_port(struct vport
*vport
, struct tnl_mutable_config
*new_mutable
)
261 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
264 hash
= mutable_hash(new_mutable
);
265 if (hash
== tnl_vport
->tbl_node
.hash
)
269 * Ideally we should make this move atomic to avoid having gaps in
270 * finding tunnels or the possibility of failure. However, if we do
271 * find a tunnel it will always be consistent.
273 err
= tbl_remove(port_table
, &tnl_vport
->tbl_node
);
277 err
= tbl_insert(port_table
, &tnl_vport
->tbl_node
, hash
);
284 assign_config_rcu(vport
, new_mutable
);
289 static int del_port(struct vport
*vport
)
291 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
294 err
= tbl_remove(port_table
, &tnl_vport
->tbl_node
);
299 (*find_port_pool(tnl_vport
->mutable))--;
304 struct vport
*tnl_find_port(__be32 saddr
, __be32 daddr
, __be32 key
,
306 const struct tnl_mutable_config
**mutable)
308 struct port_lookup_key lookup
;
309 struct tbl
*table
= rcu_dereference(port_table
);
310 struct tbl_node
*tbl_node
;
312 if (unlikely(!table
))
315 lookup
.saddr
= saddr
;
316 lookup
.daddr
= daddr
;
318 if (tunnel_type
& TNL_T_KEY_EXACT
) {
320 lookup
.tunnel_type
= tunnel_type
& ~TNL_T_KEY_MATCH
;
322 if (key_local_remote_ports
) {
323 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
328 if (key_remote_ports
) {
331 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
335 lookup
.saddr
= saddr
;
339 if (tunnel_type
& TNL_T_KEY_MATCH
) {
341 lookup
.tunnel_type
= tunnel_type
& ~TNL_T_KEY_EXACT
;
343 if (local_remote_ports
) {
344 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
352 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
361 *mutable = lookup
.mutable;
362 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node
));
365 static inline void ecn_decapsulate(struct sk_buff
*skb
)
367 u8 tos
= ip_hdr(skb
)->tos
;
369 if (INET_ECN_is_ce(tos
)) {
370 __be16 protocol
= skb
->protocol
;
371 unsigned int nw_header
= skb_network_offset(skb
);
373 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
374 if (unlikely(!pskb_may_pull(skb
, VLAN_ETH_HLEN
)))
377 protocol
= vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
;
378 nw_header
+= VLAN_HLEN
;
381 if (protocol
== htons(ETH_P_IP
)) {
382 if (unlikely(!pskb_may_pull(skb
, nw_header
383 + sizeof(struct iphdr
))))
386 IP_ECN_set_ce((struct iphdr
*)(skb
->data
+ nw_header
));
388 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
389 else if (protocol
== htons(ETH_P_IPV6
)) {
390 if (unlikely(!pskb_may_pull(skb
, nw_header
391 + sizeof(struct ipv6hdr
))))
394 IP6_ECN_set_ce((struct ipv6hdr
*)(skb
->data
+ nw_header
));
400 /* Called with rcu_read_lock. */
401 void tnl_rcv(struct vport
*vport
, struct sk_buff
*skb
)
403 skb
->pkt_type
= PACKET_HOST
;
404 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
409 skb_reset_network_header(skb
);
411 ecn_decapsulate(skb
);
413 skb_push(skb
, ETH_HLEN
);
414 compute_ip_summed(skb
, false);
416 vport_receive(vport
, skb
);
419 static bool check_ipv4_address(__be32 addr
)
421 if (ipv4_is_multicast(addr
) || ipv4_is_lbcast(addr
)
422 || ipv4_is_loopback(addr
) || ipv4_is_zeronet(addr
))
428 static bool ipv4_should_icmp(struct sk_buff
*skb
)
430 struct iphdr
*old_iph
= ip_hdr(skb
);
432 /* Don't respond to L2 broadcast. */
433 if (is_multicast_ether_addr(eth_hdr(skb
)->h_dest
))
436 /* Don't respond to L3 broadcast or invalid addresses. */
437 if (!check_ipv4_address(old_iph
->daddr
) ||
438 !check_ipv4_address(old_iph
->saddr
))
441 /* Only respond to the first fragment. */
442 if (old_iph
->frag_off
& htons(IP_OFFSET
))
445 /* Don't respond to ICMP error messages. */
446 if (old_iph
->protocol
== IPPROTO_ICMP
) {
447 u8 icmp_type
, *icmp_typep
;
449 icmp_typep
= skb_header_pointer(skb
, (u8
*)old_iph
+
450 (old_iph
->ihl
<< 2) +
451 offsetof(struct icmphdr
, type
) -
452 skb
->data
, sizeof(icmp_type
),
458 if (*icmp_typep
> NR_ICMP_TYPES
459 || (*icmp_typep
<= ICMP_PARAMETERPROB
460 && *icmp_typep
!= ICMP_ECHOREPLY
461 && *icmp_typep
!= ICMP_ECHO
))
468 static void ipv4_build_icmp(struct sk_buff
*skb
, struct sk_buff
*nskb
,
469 unsigned int mtu
, unsigned int payload_length
)
471 struct iphdr
*iph
, *old_iph
= ip_hdr(skb
);
472 struct icmphdr
*icmph
;
475 iph
= (struct iphdr
*)skb_put(nskb
, sizeof(struct iphdr
));
476 icmph
= (struct icmphdr
*)skb_put(nskb
, sizeof(struct icmphdr
));
477 payload
= skb_put(nskb
, payload_length
);
481 iph
->ihl
= sizeof(struct iphdr
) >> 2;
482 iph
->tos
= (old_iph
->tos
& IPTOS_TOS_MASK
) |
483 IPTOS_PREC_INTERNETCONTROL
;
484 iph
->tot_len
= htons(sizeof(struct iphdr
)
485 + sizeof(struct icmphdr
)
487 get_random_bytes(&iph
->id
, sizeof(iph
->id
));
490 iph
->protocol
= IPPROTO_ICMP
;
491 iph
->daddr
= old_iph
->saddr
;
492 iph
->saddr
= old_iph
->daddr
;
497 icmph
->type
= ICMP_DEST_UNREACH
;
498 icmph
->code
= ICMP_FRAG_NEEDED
;
499 icmph
->un
.gateway
= htonl(mtu
);
502 nskb
->csum
= csum_partial((u8
*)icmph
, sizeof(struct icmphdr
), 0);
503 nskb
->csum
= skb_copy_and_csum_bits(skb
, (u8
*)old_iph
- skb
->data
,
504 payload
, payload_length
,
506 icmph
->checksum
= csum_fold(nskb
->csum
);
509 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
510 static bool ipv6_should_icmp(struct sk_buff
*skb
)
512 struct ipv6hdr
*old_ipv6h
= ipv6_hdr(skb
);
514 int payload_off
= (u8
*)(old_ipv6h
+ 1) - skb
->data
;
515 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
517 /* Check source address is valid. */
518 addr_type
= ipv6_addr_type(&old_ipv6h
->saddr
);
519 if (addr_type
& IPV6_ADDR_MULTICAST
|| addr_type
== IPV6_ADDR_ANY
)
522 /* Don't reply to unspecified addresses. */
523 if (ipv6_addr_type(&old_ipv6h
->daddr
) == IPV6_ADDR_ANY
)
526 /* Don't respond to ICMP error messages. */
527 payload_off
= ipv6_skip_exthdr(skb
, payload_off
, &nexthdr
);
531 if (nexthdr
== NEXTHDR_ICMP
) {
532 u8 icmp_type
, *icmp_typep
;
534 icmp_typep
= skb_header_pointer(skb
, payload_off
+
535 offsetof(struct icmp6hdr
,
537 sizeof(icmp_type
), &icmp_type
);
539 if (!icmp_typep
|| !(*icmp_typep
& ICMPV6_INFOMSG_MASK
))
546 static void ipv6_build_icmp(struct sk_buff
*skb
, struct sk_buff
*nskb
,
547 unsigned int mtu
, unsigned int payload_length
)
549 struct ipv6hdr
*ipv6h
, *old_ipv6h
= ipv6_hdr(skb
);
550 struct icmp6hdr
*icmp6h
;
553 ipv6h
= (struct ipv6hdr
*)skb_put(nskb
, sizeof(struct ipv6hdr
));
554 icmp6h
= (struct icmp6hdr
*)skb_put(nskb
, sizeof(struct icmp6hdr
));
555 payload
= skb_put(nskb
, payload_length
);
560 memset(&ipv6h
->flow_lbl
, 0, sizeof(ipv6h
->flow_lbl
));
561 ipv6h
->payload_len
= htons(sizeof(struct icmp6hdr
)
563 ipv6h
->nexthdr
= NEXTHDR_ICMP
;
564 ipv6h
->hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
565 ipv6_addr_copy(&ipv6h
->daddr
, &old_ipv6h
->saddr
);
566 ipv6_addr_copy(&ipv6h
->saddr
, &old_ipv6h
->daddr
);
569 icmp6h
->icmp6_type
= ICMPV6_PKT_TOOBIG
;
570 icmp6h
->icmp6_code
= 0;
571 icmp6h
->icmp6_cksum
= 0;
572 icmp6h
->icmp6_mtu
= htonl(mtu
);
574 nskb
->csum
= csum_partial((u8
*)icmp6h
, sizeof(struct icmp6hdr
), 0);
575 nskb
->csum
= skb_copy_and_csum_bits(skb
, (u8
*)old_ipv6h
- skb
->data
,
576 payload
, payload_length
,
578 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
,
579 sizeof(struct icmp6hdr
)
581 ipv6h
->nexthdr
, nskb
->csum
);
585 bool tnl_frag_needed(struct vport
*vport
, const struct tnl_mutable_config
*mutable,
586 struct sk_buff
*skb
, unsigned int mtu
, __be32 flow_key
)
588 unsigned int eth_hdr_len
= ETH_HLEN
;
589 unsigned int total_length
= 0, header_length
= 0, payload_length
;
590 struct ethhdr
*eh
, *old_eh
= eth_hdr(skb
);
591 struct sk_buff
*nskb
;
594 if (skb
->protocol
== htons(ETH_P_IP
)) {
595 if (mtu
< IP_MIN_MTU
)
598 if (!ipv4_should_icmp(skb
))
601 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
602 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
603 if (mtu
< IPV6_MIN_MTU
)
607 * In theory we should do PMTUD on IPv6 multicast messages but
608 * we don't have an address to send from so just fragment.
610 if (ipv6_addr_type(&ipv6_hdr(skb
)->daddr
) & IPV6_ADDR_MULTICAST
)
613 if (!ipv6_should_icmp(skb
))
621 if (old_eh
->h_proto
== htons(ETH_P_8021Q
))
622 eth_hdr_len
= VLAN_ETH_HLEN
;
624 payload_length
= skb
->len
- eth_hdr_len
;
625 if (skb
->protocol
== htons(ETH_P_IP
)) {
626 header_length
= sizeof(struct iphdr
) + sizeof(struct icmphdr
);
627 total_length
= min_t(unsigned int, header_length
+
628 payload_length
, 576);
630 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
632 header_length
= sizeof(struct ipv6hdr
) +
633 sizeof(struct icmp6hdr
);
634 total_length
= min_t(unsigned int, header_length
+
635 payload_length
, IPV6_MIN_MTU
);
639 total_length
= min(total_length
, mutable->mtu
);
640 payload_length
= total_length
- header_length
;
642 nskb
= dev_alloc_skb(NET_IP_ALIGN
+ eth_hdr_len
+ header_length
+
647 skb_reserve(nskb
, NET_IP_ALIGN
);
649 /* Ethernet / VLAN */
650 eh
= (struct ethhdr
*)skb_put(nskb
, eth_hdr_len
);
651 memcpy(eh
->h_dest
, old_eh
->h_source
, ETH_ALEN
);
652 memcpy(eh
->h_source
, mutable->eth_addr
, ETH_ALEN
);
653 nskb
->protocol
= eh
->h_proto
= old_eh
->h_proto
;
654 if (old_eh
->h_proto
== htons(ETH_P_8021Q
)) {
655 struct vlan_ethhdr
*vh
= (struct vlan_ethhdr
*)eh
;
657 vh
->h_vlan_TCI
= vlan_eth_hdr(skb
)->h_vlan_TCI
;
658 vh
->h_vlan_encapsulated_proto
= skb
->protocol
;
660 skb_reset_mac_header(nskb
);
663 if (skb
->protocol
== htons(ETH_P_IP
))
664 ipv4_build_icmp(skb
, nskb
, mtu
, payload_length
);
665 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
667 ipv6_build_icmp(skb
, nskb
, mtu
, payload_length
);
671 * Assume that flow based keys are symmetric with respect to input
672 * and output and use the key that we were going to put on the
673 * outgoing packet for the fake received packet. If the keys are
674 * not symmetric then PMTUD needs to be disabled since we won't have
675 * any way of synthesizing packets.
677 if ((mutable->port_config
.flags
& (TNL_F_IN_KEY_MATCH
| TNL_F_OUT_KEY_ACTION
)) ==
678 (TNL_F_IN_KEY_MATCH
| TNL_F_OUT_KEY_ACTION
))
679 OVS_CB(nskb
)->tun_id
= flow_key
;
681 compute_ip_summed(nskb
, false);
682 vport_receive(vport
, nskb
);
687 static bool check_mtu(struct sk_buff
*skb
,
689 const struct tnl_mutable_config
*mutable,
690 const struct rtable
*rt
, __be16
*frag_offp
)
695 frag_off
= (mutable->port_config
.flags
& TNL_F_PMTUD
) ? htons(IP_DF
) : 0;
697 mtu
= dst_mtu(&rt_dst(rt
))
699 - mutable->tunnel_hlen
700 - (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
) ? VLAN_HLEN
: 0);
704 if (skb
->protocol
== htons(ETH_P_IP
)) {
705 struct iphdr
*old_iph
= ip_hdr(skb
);
707 frag_off
|= old_iph
->frag_off
& htons(IP_DF
);
708 mtu
= max(mtu
, IP_MIN_MTU
);
710 if ((old_iph
->frag_off
& htons(IP_DF
)) &&
711 mtu
< ntohs(old_iph
->tot_len
)) {
712 if (tnl_frag_needed(vport
, mutable, skb
, mtu
, OVS_CB(skb
)->tun_id
))
716 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
717 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
718 unsigned int packet_length
= skb
->len
- ETH_HLEN
719 - (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
) ? VLAN_HLEN
: 0);
721 mtu
= max(mtu
, IPV6_MIN_MTU
);
723 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
724 if (packet_length
> IPV6_MIN_MTU
)
725 frag_off
= htons(IP_DF
);
727 if (mtu
< packet_length
) {
728 if (tnl_frag_needed(vport
, mutable, skb
, mtu
, OVS_CB(skb
)->tun_id
))
734 *frag_offp
= frag_off
;
742 static void create_tunnel_header(const struct vport
*vport
,
743 const struct tnl_mutable_config
*mutable,
744 const struct rtable
*rt
, void *header
)
746 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
747 struct iphdr
*iph
= header
;
750 iph
->ihl
= sizeof(struct iphdr
) >> 2;
751 iph
->frag_off
= htons(IP_DF
);
752 iph
->protocol
= tnl_vport
->tnl_ops
->ipproto
;
753 iph
->tos
= mutable->port_config
.tos
;
754 iph
->daddr
= rt
->rt_dst
;
755 iph
->saddr
= rt
->rt_src
;
756 iph
->ttl
= mutable->port_config
.ttl
;
758 iph
->ttl
= dst_metric(&rt_dst(rt
), RTAX_HOPLIMIT
);
760 tnl_vport
->tnl_ops
->build_header(vport
, mutable, iph
+ 1);
763 static inline void *get_cached_header(const struct tnl_cache
*cache
)
765 return (void *)cache
+ ALIGN(sizeof(struct tnl_cache
), CACHE_DATA_ALIGN
);
768 static inline bool check_cache_valid(const struct tnl_cache
*cache
,
769 const struct tnl_mutable_config
*mutable)
772 #ifdef NEED_CACHE_TIMEOUT
773 time_before(jiffies
, cache
->expiration
) &&
776 atomic_read(&init_net
.ipv4
.rt_genid
) == cache
->rt
->rt_genid
&&
779 rt_dst(cache
->rt
).hh
->hh_lock
.sequence
== cache
->hh_seq
&&
781 mutable->seq
== cache
->mutable_seq
&&
782 (!is_internal_dev(rt_dst(cache
->rt
).dev
) ||
783 (cache
->flow
&& !cache
->flow
->dead
));
786 static int cache_cleaner_cb(struct tbl_node
*tbl_node
, void *aux
)
788 struct tnl_vport
*tnl_vport
= tnl_vport_table_cast(tbl_node
);
789 const struct tnl_mutable_config
*mutable = rcu_dereference(tnl_vport
->mutable);
790 const struct tnl_cache
*cache
= rcu_dereference(tnl_vport
->cache
);
792 if (cache
&& !check_cache_valid(cache
, mutable) &&
793 spin_trylock_bh(&tnl_vport
->cache_lock
)) {
794 assign_cache_rcu(tnl_vport_to_vport(tnl_vport
), NULL
);
795 spin_unlock_bh(&tnl_vport
->cache_lock
);
801 static void cache_cleaner(struct work_struct
*work
)
803 schedule_cache_cleaner();
806 tbl_foreach(port_table
, cache_cleaner_cb
, NULL
);
810 static inline void create_eth_hdr(struct tnl_cache
*cache
,
811 const struct rtable
*rt
)
813 void *cache_data
= get_cached_header(cache
);
814 int hh_len
= rt_dst(rt
).hh
->hh_len
;
815 int hh_off
= HH_DATA_ALIGN(rt_dst(rt
).hh
->hh_len
) - hh_len
;
821 hh_seq
= read_seqbegin(&rt_dst(rt
).hh
->hh_lock
);
822 memcpy(cache_data
, (void *)rt_dst(rt
).hh
->hh_data
+ hh_off
, hh_len
);
823 } while (read_seqretry(&rt_dst(rt
).hh
->hh_lock
, hh_seq
));
825 cache
->hh_seq
= hh_seq
;
827 read_lock_bh(&rt_dst(rt
).hh
->hh_lock
);
828 memcpy(cache_data
, (void *)rt_dst(rt
).hh
->hh_data
+ hh_off
, hh_len
);
829 read_unlock_bh(&rt_dst(rt
).hh
->hh_lock
);
833 static struct tnl_cache
*build_cache(struct vport
*vport
,
834 const struct tnl_mutable_config
*mutable,
837 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
838 struct tnl_cache
*cache
;
842 if (!(mutable->port_config
.flags
& TNL_F_HDR_CACHE
))
846 * If there is no entry in the ARP cache or if this device does not
847 * support hard header caching just fall back to the IP stack.
853 * If lock is contended fall back to directly building the header.
854 * We're not going to help performance by sitting here spinning.
856 if (!spin_trylock_bh(&tnl_vport
->cache_lock
))
859 cache
= tnl_vport
->cache
;
860 if (check_cache_valid(cache
, mutable))
865 cache_len
= rt_dst(rt
).hh
->hh_len
+ mutable->tunnel_hlen
;
867 cache
= kzalloc(ALIGN(sizeof(struct tnl_cache
), CACHE_DATA_ALIGN
) +
868 cache_len
, GFP_ATOMIC
);
872 cache
->len
= cache_len
;
874 create_eth_hdr(cache
, rt
);
875 cache_data
= get_cached_header(cache
) + rt_dst(rt
).hh
->hh_len
;
877 create_tunnel_header(vport
, mutable, rt
, cache_data
);
879 cache
->mutable_seq
= mutable->seq
;
881 #ifdef NEED_CACHE_TIMEOUT
882 cache
->expiration
= jiffies
+ tnl_vport
->cache_exp_interval
;
885 if (is_internal_dev(rt_dst(rt
).dev
)) {
886 struct odp_flow_key flow_key
;
887 struct tbl_node
*flow_node
;
893 vport
= internal_dev_get_vport(rt_dst(rt
).dev
);
897 skb
= alloc_skb(cache
->len
, GFP_ATOMIC
);
901 __skb_put(skb
, cache
->len
);
902 memcpy(skb
->data
, get_cached_header(cache
), cache
->len
);
904 err
= flow_extract(skb
, vport
->port_no
, &flow_key
, &is_frag
);
910 flow_node
= tbl_lookup(rcu_dereference(vport
->dp
->table
),
911 &flow_key
, flow_hash(&flow_key
),
914 struct sw_flow
*flow
= flow_cast(flow_node
);
922 assign_cache_rcu(vport
, cache
);
925 spin_unlock_bh(&tnl_vport
->cache_lock
);
930 static struct rtable
*find_route(struct vport
*vport
,
931 const struct tnl_mutable_config
*mutable,
932 u8 tos
, struct tnl_cache
**cache
)
934 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
935 struct tnl_cache
*cur_cache
= rcu_dereference(tnl_vport
->cache
);
940 if (likely(tos
== mutable->port_config
.tos
&&
941 check_cache_valid(cur_cache
, mutable))) {
943 return cur_cache
->rt
;
946 struct flowi fl
= { .nl_u
= { .ip4_u
=
947 { .daddr
= mutable->port_config
.daddr
,
948 .saddr
= mutable->port_config
.saddr
,
950 .proto
= tnl_vport
->tnl_ops
->ipproto
};
952 if (unlikely(ip_route_output_key(&init_net
, &rt
, &fl
)))
955 if (likely(tos
== mutable->port_config
.tos
))
956 *cache
= build_cache(vport
, mutable, rt
);
962 static struct sk_buff
*check_headroom(struct sk_buff
*skb
, int headroom
)
964 if (skb_headroom(skb
) < headroom
|| skb_header_cloned(skb
)) {
965 struct sk_buff
*nskb
= skb_realloc_headroom(skb
, headroom
+ 16);
966 if (unlikely(!nskb
)) {
968 return ERR_PTR(-ENOMEM
);
971 set_skb_csum_bits(skb
, nskb
);
974 skb_set_owner_w(nskb
, skb
->sk
);
983 static inline bool need_linearize(const struct sk_buff
*skb
)
987 if (unlikely(skb_shinfo(skb
)->frag_list
))
991 * Generally speaking we should linearize if there are paged frags.
992 * However, if all of the refcounts are 1 we know nobody else can
993 * change them from underneath us and we can skip the linearization.
995 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
996 if (unlikely(page_count(skb_shinfo(skb
)->frags
[0].page
) > 1))
1002 static struct sk_buff
*handle_offloads(struct sk_buff
*skb
,
1003 const struct tnl_mutable_config
*mutable,
1004 const struct rtable
*rt
)
1009 forward_ip_summed(skb
);
1011 err
= vswitch_skb_checksum_setup(skb
);
1015 min_headroom
= LL_RESERVED_SPACE(rt_dst(rt
).dev
) + rt_dst(rt
).header_len
1016 + mutable->tunnel_hlen
;
1018 if (skb_is_gso(skb
)) {
1019 struct sk_buff
*nskb
;
1022 * If we are doing GSO on a pskb it is better to make sure that
1023 * the headroom is correct now. We will only have to copy the
1024 * portion in the linear data area and GSO will preserve
1025 * headroom when it creates the segments. This is particularly
1026 * beneficial on Xen where we get a lot of GSO pskbs.
1027 * Conversely, we avoid copying if it is just to get our own
1028 * writable clone because GSO will do the copy for us.
1030 if (skb_headroom(skb
) < min_headroom
) {
1031 skb
= check_headroom(skb
, min_headroom
);
1032 if (unlikely(IS_ERR(skb
))) {
1038 nskb
= skb_gso_segment(skb
, 0);
1040 if (unlikely(IS_ERR(nskb
))) {
1041 err
= PTR_ERR(nskb
);
1047 skb
= check_headroom(skb
, min_headroom
);
1048 if (unlikely(IS_ERR(skb
))) {
1053 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1055 * Pages aren't locked and could change at any time.
1056 * If this happens after we compute the checksum, the
1057 * checksum will be wrong. We linearize now to avoid
1060 if (unlikely(need_linearize(skb
))) {
1061 err
= __skb_linearize(skb
);
1066 err
= skb_checksum_help(skb
);
1069 } else if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1070 skb
->ip_summed
= CHECKSUM_NONE
;
1078 return ERR_PTR(err
);
1081 static int send_frags(struct sk_buff
*skb
,
1082 const struct tnl_mutable_config
*mutable)
1089 struct sk_buff
*next
= skb
->next
;
1090 int frag_len
= skb
->len
- mutable->tunnel_hlen
;
1093 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
1095 err
= ip_local_out(skb
);
1096 if (likely(net_xmit_eval(err
) == 0))
1097 sent_len
+= frag_len
;
1110 * There's no point in continuing to send fragments once one has been
1111 * dropped so just free the rest. This may help improve the congestion
1112 * that caused the first packet to be dropped.
1114 tnl_free_linked_skbs(skb
);
1118 int tnl_send(struct vport
*vport
, struct sk_buff
*skb
)
1120 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1121 const struct tnl_mutable_config
*mutable = rcu_dereference(tnl_vport
->mutable);
1123 enum vport_err_type err
= VPORT_E_TX_ERROR
;
1125 struct dst_entry
*unattached_dst
= NULL
;
1126 struct tnl_cache
*cache
;
1133 /* Validate the protocol headers before we try to use them. */
1134 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
1135 if (unlikely(!pskb_may_pull(skb
, VLAN_ETH_HLEN
)))
1138 skb
->protocol
= vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
;
1139 skb_set_network_header(skb
, VLAN_ETH_HLEN
);
1142 if (skb
->protocol
== htons(ETH_P_IP
)) {
1143 if (unlikely(!pskb_may_pull(skb
, skb_network_offset(skb
)
1144 + sizeof(struct iphdr
))))
1147 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1148 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1149 if (unlikely(!pskb_may_pull(skb
, skb_network_offset(skb
)
1150 + sizeof(struct ipv6hdr
))))
1156 if (skb
->protocol
== htons(ETH_P_IP
))
1157 inner_tos
= ip_hdr(skb
)->tos
;
1158 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1159 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1160 inner_tos
= ipv6_get_dsfield(ipv6_hdr(skb
));
1165 if (mutable->port_config
.flags
& TNL_F_TOS_INHERIT
)
1168 tos
= mutable->port_config
.tos
;
1170 tos
= INET_ECN_encapsulate(tos
, inner_tos
);
1173 rt
= find_route(vport
, mutable, tos
, &cache
);
1176 if (unlikely(!cache
))
1177 unattached_dst
= &rt_dst(rt
);
1185 skb
= handle_offloads(skb
, mutable, rt
);
1186 if (unlikely(IS_ERR(skb
)))
1190 if (unlikely(!check_mtu(skb
, vport
, mutable, rt
, &frag_off
))) {
1191 err
= VPORT_E_TX_DROPPED
;
1196 * If we are over the MTU, allow the IP stack to handle fragmentation.
1197 * Fragmentation is a slow path anyways.
1199 if (unlikely(skb
->len
+ mutable->tunnel_hlen
> dst_mtu(&rt_dst(rt
)) &&
1201 unattached_dst
= &rt_dst(rt
);
1202 dst_hold(unattached_dst
);
1207 ttl
= mutable->port_config
.ttl
;
1209 ttl
= dst_metric(&rt_dst(rt
), RTAX_HOPLIMIT
);
1211 if (mutable->port_config
.flags
& TNL_F_TTL_INHERIT
) {
1212 if (skb
->protocol
== htons(ETH_P_IP
))
1213 ttl
= ip_hdr(skb
)->ttl
;
1214 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1215 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1216 ttl
= ipv6_hdr(skb
)->hop_limit
;
1222 struct sk_buff
*next_skb
= skb
->next
;
1225 if (likely(cache
)) {
1226 skb_push(skb
, cache
->len
);
1227 memcpy(skb
->data
, get_cached_header(cache
), cache
->len
);
1228 skb_reset_mac_header(skb
);
1229 skb_set_network_header(skb
, rt_dst(rt
).hh
->hh_len
);
1232 skb_push(skb
, mutable->tunnel_hlen
);
1233 create_tunnel_header(vport
, mutable, rt
, skb
->data
);
1234 skb_reset_network_header(skb
);
1237 skb_dst_set(skb
, dst_clone(unattached_dst
));
1239 skb_dst_set(skb
, unattached_dst
);
1240 unattached_dst
= NULL
;
1243 skb_set_transport_header(skb
, skb_network_offset(skb
) + sizeof(struct iphdr
));
1248 iph
->frag_off
= frag_off
;
1249 ip_select_ident(iph
, &rt_dst(rt
), NULL
);
1251 skb
= tnl_vport
->tnl_ops
->update_header(vport
, mutable, &rt_dst(rt
), skb
);
1255 if (likely(cache
)) {
1256 int orig_len
= skb
->len
- cache
->len
;
1257 struct vport
*cache_vport
= internal_dev_get_vport(rt_dst(rt
).dev
);
1259 skb
->protocol
= htons(ETH_P_IP
);
1260 iph
->tot_len
= htons(skb
->len
- skb_network_offset(skb
));
1264 OVS_CB(skb
)->flow
= cache
->flow
;
1265 compute_ip_summed(skb
, true);
1266 vport_receive(cache_vport
, skb
);
1267 sent_len
+= orig_len
;
1271 skb
->dev
= rt_dst(rt
).dev
;
1272 err
= dev_queue_xmit(skb
);
1274 if (likely(net_xmit_eval(err
) == 0))
1275 sent_len
+= orig_len
;
1278 sent_len
+= send_frags(skb
, mutable);
1284 if (unlikely(sent_len
== 0))
1285 vport_record_error(vport
, VPORT_E_TX_DROPPED
);
1290 tnl_free_linked_skbs(skb
);
1292 dst_release(unattached_dst
);
1293 vport_record_error(vport
, err
);
1298 static int set_config(const void *config
, const struct tnl_ops
*tnl_ops
,
1299 const struct vport
*cur_vport
,
1300 struct tnl_mutable_config
*mutable)
1302 const struct vport
*old_vport
;
1303 const struct tnl_mutable_config
*old_mutable
;
1305 mutable->port_config
= *(struct tnl_port_config
*)config
;
1307 if (mutable->port_config
.daddr
== 0)
1310 if (mutable->port_config
.tos
!= RT_TOS(mutable->port_config
.tos
))
1313 mutable->tunnel_hlen
= tnl_ops
->hdr_len(&mutable->port_config
);
1314 if (mutable->tunnel_hlen
< 0)
1315 return mutable->tunnel_hlen
;
1317 mutable->tunnel_hlen
+= sizeof(struct iphdr
);
1319 mutable->tunnel_type
= tnl_ops
->tunnel_type
;
1320 if (mutable->port_config
.flags
& TNL_F_IN_KEY_MATCH
) {
1321 mutable->tunnel_type
|= TNL_T_KEY_MATCH
;
1322 mutable->port_config
.in_key
= 0;
1324 mutable->tunnel_type
|= TNL_T_KEY_EXACT
;
1326 old_vport
= tnl_find_port(mutable->port_config
.saddr
,
1327 mutable->port_config
.daddr
,
1328 mutable->port_config
.in_key
,
1329 mutable->tunnel_type
,
1332 if (old_vport
&& old_vport
!= cur_vport
)
1335 if (mutable->port_config
.flags
& TNL_F_OUT_KEY_ACTION
)
1336 mutable->port_config
.out_key
= 0;
1341 struct vport
*tnl_create(const struct vport_parms
*parms
,
1342 const struct vport_ops
*vport_ops
,
1343 const struct tnl_ops
*tnl_ops
)
1345 struct vport
*vport
;
1346 struct tnl_vport
*tnl_vport
;
1347 int initial_frag_id
;
1350 vport
= vport_alloc(sizeof(struct tnl_vport
), vport_ops
, parms
);
1351 if (IS_ERR(vport
)) {
1352 err
= PTR_ERR(vport
);
1356 tnl_vport
= tnl_vport_priv(vport
);
1358 strcpy(tnl_vport
->name
, parms
->name
);
1359 tnl_vport
->tnl_ops
= tnl_ops
;
1361 tnl_vport
->mutable = kzalloc(sizeof(struct tnl_mutable_config
), GFP_KERNEL
);
1362 if (!tnl_vport
->mutable) {
1364 goto error_free_vport
;
1367 vport_gen_rand_ether_addr(tnl_vport
->mutable->eth_addr
);
1368 tnl_vport
->mutable->mtu
= ETH_DATA_LEN
;
1370 get_random_bytes(&initial_frag_id
, sizeof(int));
1371 atomic_set(&tnl_vport
->frag_id
, initial_frag_id
);
1373 err
= set_config(parms
->config
, tnl_ops
, NULL
, tnl_vport
->mutable);
1375 goto error_free_mutable
;
1377 spin_lock_init(&tnl_vport
->cache_lock
);
1379 #ifdef NEED_CACHE_TIMEOUT
1380 tnl_vport
->cache_exp_interval
= MAX_CACHE_EXP
-
1381 (net_random() % (MAX_CACHE_EXP
/ 2));
1384 err
= add_port(vport
);
1386 goto error_free_mutable
;
1391 kfree(tnl_vport
->mutable);
1395 return ERR_PTR(err
);
1398 int tnl_modify(struct vport
*vport
, struct odp_port
*port
)
1400 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1401 struct tnl_mutable_config
*mutable;
1404 mutable = kmemdup(tnl_vport
->mutable, sizeof(struct tnl_mutable_config
), GFP_KERNEL
);
1410 err
= set_config(port
->config
, tnl_vport
->tnl_ops
, vport
, mutable);
1416 err
= move_port(vport
, mutable);
1428 static void free_port_rcu(struct rcu_head
*rcu
)
1430 struct tnl_vport
*tnl_vport
= container_of(rcu
, struct tnl_vport
, rcu
);
1432 spin_lock_bh(&tnl_vport
->cache_lock
);
1433 free_cache(tnl_vport
->cache
);
1434 spin_unlock_bh(&tnl_vport
->cache_lock
);
1436 kfree(tnl_vport
->mutable);
1437 vport_free(tnl_vport_to_vport(tnl_vport
));
1440 int tnl_destroy(struct vport
*vport
)
1442 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1443 const struct tnl_mutable_config
*old_mutable
;
1445 if (vport
== tnl_find_port(tnl_vport
->mutable->port_config
.saddr
,
1446 tnl_vport
->mutable->port_config
.daddr
,
1447 tnl_vport
->mutable->port_config
.in_key
,
1448 tnl_vport
->mutable->tunnel_type
,
1452 call_rcu(&tnl_vport
->rcu
, free_port_rcu
);
1457 int tnl_set_mtu(struct vport
*vport
, int mtu
)
1459 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1460 struct tnl_mutable_config
*mutable;
1462 mutable = kmemdup(tnl_vport
->mutable, sizeof(struct tnl_mutable_config
), GFP_KERNEL
);
1467 assign_config_rcu(vport
, mutable);
1472 int tnl_set_addr(struct vport
*vport
, const unsigned char *addr
)
1474 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1475 struct tnl_mutable_config
*mutable;
1477 mutable = kmemdup(tnl_vport
->mutable, sizeof(struct tnl_mutable_config
), GFP_KERNEL
);
1481 memcpy(mutable->eth_addr
, addr
, ETH_ALEN
);
1482 assign_config_rcu(vport
, mutable);
1487 const char *tnl_get_name(const struct vport
*vport
)
1489 const struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1490 return tnl_vport
->name
;
1493 const unsigned char *tnl_get_addr(const struct vport
*vport
)
1495 const struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1496 return rcu_dereference(tnl_vport
->mutable)->eth_addr
;
1499 int tnl_get_mtu(const struct vport
*vport
)
1501 const struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1502 return rcu_dereference(tnl_vport
->mutable)->mtu
;
1505 void tnl_free_linked_skbs(struct sk_buff
*skb
)
1511 struct sk_buff
*next
= skb
->next
;