2 * Copyright (c) 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/workqueue.h>
20 #include <net/dsfield.h>
23 #include <net/inet_ecn.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/route.h>
37 #include "vport-generic.h"
38 #include "vport-internal_dev.h"
40 #ifdef NEED_CACHE_TIMEOUT
42 * On kernels where we can't quickly detect changes in the rest of the system
43 * we use an expiration time to invalidate the cache. A shorter expiration
44 * reduces the length of time that we may potentially blackhole packets while
45 * a longer time increases performance by reducing the frequency that the
46 * cache needs to be rebuilt. A variety of factors may cause the cache to be
47 * invalidated before the expiration time but this is the maximum. The time
48 * is expressed in jiffies.
50 #define MAX_CACHE_EXP HZ
54 * Interval to check for and remove caches that are no longer valid. Caches
55 * are checked for validity before they are used for packet encapsulation and
56 * old caches are removed at that time. However, if no packets are sent through
57 * the tunnel then the cache will never be destroyed. Since it holds
58 * references to a number of system objects, the cache will continue to use
59 * system resources by not allowing those objects to be destroyed. The cache
60 * cleaner is periodically run to free invalid caches. It does not
61 * significantly affect system performance. A lower interval will release
62 * resources faster but will itself consume resources by requiring more frequent
63 * checks. A longer interval may result in messages being printed to the kernel
64 * message buffer about unreleased resources. The interval is expressed in
67 #define CACHE_CLEANER_INTERVAL (5 * HZ)
69 #define CACHE_DATA_ALIGN 16
71 static struct tbl __rcu
*port_table __read_mostly
;
73 static void cache_cleaner(struct work_struct
*work
);
74 static DECLARE_DELAYED_WORK(cache_cleaner_wq
, cache_cleaner
);
77 * These are just used as an optimization: they don't require any kind of
78 * synchronization because we could have just as easily read the value before
79 * the port change happened.
81 static unsigned int key_local_remote_ports __read_mostly
;
82 static unsigned int key_remote_ports __read_mostly
;
83 static unsigned int local_remote_ports __read_mostly
;
84 static unsigned int remote_ports __read_mostly
;
86 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
87 #define rt_dst(rt) (rt->dst)
89 #define rt_dst(rt) (rt->u.dst)
92 static inline struct vport
*tnl_vport_to_vport(const struct tnl_vport
*tnl_vport
)
94 return vport_from_priv(tnl_vport
);
97 static inline struct tnl_vport
*tnl_vport_table_cast(const struct tbl_node
*node
)
99 return container_of(node
, struct tnl_vport
, tbl_node
);
102 /* This is analogous to rtnl_dereference for the tunnel cache. It checks that
103 * cache_lock is held, so it is only for update side code.
105 static inline struct tnl_cache
*cache_dereference(struct tnl_vport
*tnl_vport
)
107 return rcu_dereference_protected(tnl_vport
->cache
,
108 lockdep_is_held(&tnl_vport
->cache_lock
));
111 static inline void schedule_cache_cleaner(void)
113 schedule_delayed_work(&cache_cleaner_wq
, CACHE_CLEANER_INTERVAL
);
116 static void free_cache(struct tnl_cache
*cache
)
121 flow_put(cache
->flow
);
122 ip_rt_put(cache
->rt
);
126 static void free_config_rcu(struct rcu_head
*rcu
)
128 struct tnl_mutable_config
*c
= container_of(rcu
, struct tnl_mutable_config
, rcu
);
132 static void free_cache_rcu(struct rcu_head
*rcu
)
134 struct tnl_cache
*c
= container_of(rcu
, struct tnl_cache
, rcu
);
138 static void assign_config_rcu(struct vport
*vport
,
139 struct tnl_mutable_config
*new_config
)
141 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
142 struct tnl_mutable_config
*old_config
;
144 old_config
= rtnl_dereference(tnl_vport
->mutable);
145 rcu_assign_pointer(tnl_vport
->mutable, new_config
);
146 call_rcu(&old_config
->rcu
, free_config_rcu
);
149 static void assign_cache_rcu(struct vport
*vport
, struct tnl_cache
*new_cache
)
151 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
152 struct tnl_cache
*old_cache
;
154 old_cache
= cache_dereference(tnl_vport
);
155 rcu_assign_pointer(tnl_vport
->cache
, new_cache
);
158 call_rcu(&old_cache
->rcu
, free_cache_rcu
);
161 static unsigned int *find_port_pool(const struct tnl_mutable_config
*mutable)
163 if (mutable->flags
& TNL_F_IN_KEY_MATCH
) {
165 return &local_remote_ports
;
167 return &remote_ports
;
170 return &key_local_remote_ports
;
172 return &key_remote_ports
;
176 struct port_lookup_key
{
177 const struct tnl_mutable_config
*mutable;
185 * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
188 static int port_cmp(const struct tbl_node
*node
, void *target
)
190 const struct tnl_vport
*tnl_vport
= tnl_vport_table_cast(node
);
191 struct port_lookup_key
*lookup
= target
;
193 lookup
->mutable = rcu_dereference_rtnl(tnl_vport
->mutable);
195 return (lookup
->mutable->tunnel_type
== lookup
->tunnel_type
&&
196 lookup
->mutable->daddr
== lookup
->daddr
&&
197 lookup
->mutable->in_key
== lookup
->key
&&
198 lookup
->mutable->saddr
== lookup
->saddr
);
201 static u32
port_hash(struct port_lookup_key
*k
)
203 u32 x
= jhash_3words((__force u32
)k
->saddr
, (__force u32
)k
->daddr
,
205 return jhash_2words((__force u64
)k
->key
>> 32, (__force u32
)k
->key
, x
);
208 static u32
mutable_hash(const struct tnl_mutable_config
*mutable)
210 struct port_lookup_key lookup
;
212 lookup
.saddr
= mutable->saddr
;
213 lookup
.daddr
= mutable->daddr
;
214 lookup
.key
= mutable->in_key
;
215 lookup
.tunnel_type
= mutable->tunnel_type
;
217 return port_hash(&lookup
);
220 static void check_table_empty(void)
222 struct tbl
*old_table
= rtnl_dereference(port_table
);
224 if (tbl_count(old_table
) == 0) {
225 cancel_delayed_work_sync(&cache_cleaner_wq
);
226 rcu_assign_pointer(port_table
, NULL
);
227 tbl_deferred_destroy(old_table
, NULL
);
231 static int add_port(struct vport
*vport
)
233 struct tbl
*cur_table
= rtnl_dereference(port_table
);
234 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
238 struct tbl
*new_table
;
240 new_table
= tbl_create(TBL_MIN_BUCKETS
);
244 rcu_assign_pointer(port_table
, new_table
);
245 schedule_cache_cleaner();
247 } else if (tbl_count(cur_table
) > tbl_n_buckets(cur_table
)) {
248 struct tbl
*new_table
;
250 new_table
= tbl_expand(cur_table
);
251 if (IS_ERR(new_table
))
252 return PTR_ERR(new_table
);
254 rcu_assign_pointer(port_table
, new_table
);
255 tbl_deferred_destroy(cur_table
, NULL
);
258 err
= tbl_insert(rtnl_dereference(port_table
), &tnl_vport
->tbl_node
,
259 mutable_hash(rtnl_dereference(tnl_vport
->mutable)));
265 (*find_port_pool(rtnl_dereference(tnl_vport
->mutable)))++;
270 static int move_port(struct vport
*vport
, struct tnl_mutable_config
*new_mutable
)
273 struct tbl
*cur_table
= rtnl_dereference(port_table
);
274 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
277 hash
= mutable_hash(new_mutable
);
278 if (hash
== tnl_vport
->tbl_node
.hash
)
282 * Ideally we should make this move atomic to avoid having gaps in
283 * finding tunnels or the possibility of failure. However, if we do
284 * find a tunnel it will always be consistent.
286 err
= tbl_remove(cur_table
, &tnl_vport
->tbl_node
);
290 err
= tbl_insert(cur_table
, &tnl_vport
->tbl_node
, hash
);
292 (*find_port_pool(rtnl_dereference(tnl_vport
->mutable)))--;
298 (*find_port_pool(rtnl_dereference(tnl_vport
->mutable)))--;
299 assign_config_rcu(vport
, new_mutable
);
300 (*find_port_pool(rtnl_dereference(tnl_vport
->mutable)))++;
305 static int del_port(struct vport
*vport
)
307 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
310 err
= tbl_remove(rtnl_dereference(port_table
), &tnl_vport
->tbl_node
);
315 (*find_port_pool(rtnl_dereference(tnl_vport
->mutable)))--;
320 struct vport
*tnl_find_port(__be32 saddr
, __be32 daddr
, __be64 key
,
322 const struct tnl_mutable_config
**mutable)
324 struct port_lookup_key lookup
;
325 struct tbl
*table
= rcu_dereference_rtnl(port_table
);
326 struct tbl_node
*tbl_node
;
328 if (unlikely(!table
))
331 lookup
.saddr
= saddr
;
332 lookup
.daddr
= daddr
;
334 if (tunnel_type
& TNL_T_KEY_EXACT
) {
336 lookup
.tunnel_type
= tunnel_type
& ~TNL_T_KEY_MATCH
;
338 if (key_local_remote_ports
) {
339 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
344 if (key_remote_ports
) {
347 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
351 lookup
.saddr
= saddr
;
355 if (tunnel_type
& TNL_T_KEY_MATCH
) {
357 lookup
.tunnel_type
= tunnel_type
& ~TNL_T_KEY_EXACT
;
359 if (local_remote_ports
) {
360 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
368 tbl_node
= tbl_lookup(table
, &lookup
, port_hash(&lookup
), port_cmp
);
377 *mutable = lookup
.mutable;
378 return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node
));
381 static inline void ecn_decapsulate(struct sk_buff
*skb
)
383 /* This is accessing the outer IP header of the tunnel, which we've
384 * already validated to be OK. skb->data is currently set to the start
385 * of the inner Ethernet header, and we've validated ETH_HLEN.
387 if (unlikely(INET_ECN_is_ce(ip_hdr(skb
)->tos
))) {
388 __be16 protocol
= skb
->protocol
;
390 skb_set_network_header(skb
, ETH_HLEN
);
392 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
393 if (unlikely(!pskb_may_pull(skb
, VLAN_ETH_HLEN
)))
396 protocol
= vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
;
397 skb_set_network_header(skb
, VLAN_ETH_HLEN
);
400 if (protocol
== htons(ETH_P_IP
)) {
401 if (unlikely(!pskb_may_pull(skb
, skb_network_offset(skb
)
402 + sizeof(struct iphdr
))))
405 IP_ECN_set_ce(ip_hdr(skb
));
407 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
408 else if (protocol
== htons(ETH_P_IPV6
)) {
409 if (unlikely(!pskb_may_pull(skb
, skb_network_offset(skb
)
410 + sizeof(struct ipv6hdr
))))
413 IP6_ECN_set_ce(ipv6_hdr(skb
));
419 /* Called with rcu_read_lock. */
420 void tnl_rcv(struct vport
*vport
, struct sk_buff
*skb
)
422 /* Packets received by this function are in the following state:
423 * - skb->data points to the inner Ethernet header.
424 * - The inner Ethernet header is in the linear data area.
425 * - skb->csum does not include the inner Ethernet header.
426 * - The layer pointers point at the outer headers.
429 struct ethhdr
*eh
= (struct ethhdr
*)skb
->data
;
431 if (likely(ntohs(eh
->h_proto
) >= 1536))
432 skb
->protocol
= eh
->h_proto
;
434 skb
->protocol
= htons(ETH_P_802_2
);
440 ecn_decapsulate(skb
);
441 compute_ip_summed(skb
, false);
443 vport_receive(vport
, skb
);
446 static bool check_ipv4_address(__be32 addr
)
448 if (ipv4_is_multicast(addr
) || ipv4_is_lbcast(addr
)
449 || ipv4_is_loopback(addr
) || ipv4_is_zeronet(addr
))
455 static bool ipv4_should_icmp(struct sk_buff
*skb
)
457 struct iphdr
*old_iph
= ip_hdr(skb
);
459 /* Don't respond to L2 broadcast. */
460 if (is_multicast_ether_addr(eth_hdr(skb
)->h_dest
))
463 /* Don't respond to L3 broadcast or invalid addresses. */
464 if (!check_ipv4_address(old_iph
->daddr
) ||
465 !check_ipv4_address(old_iph
->saddr
))
468 /* Only respond to the first fragment. */
469 if (old_iph
->frag_off
& htons(IP_OFFSET
))
472 /* Don't respond to ICMP error messages. */
473 if (old_iph
->protocol
== IPPROTO_ICMP
) {
474 u8 icmp_type
, *icmp_typep
;
476 icmp_typep
= skb_header_pointer(skb
, (u8
*)old_iph
+
477 (old_iph
->ihl
<< 2) +
478 offsetof(struct icmphdr
, type
) -
479 skb
->data
, sizeof(icmp_type
),
485 if (*icmp_typep
> NR_ICMP_TYPES
486 || (*icmp_typep
<= ICMP_PARAMETERPROB
487 && *icmp_typep
!= ICMP_ECHOREPLY
488 && *icmp_typep
!= ICMP_ECHO
))
495 static void ipv4_build_icmp(struct sk_buff
*skb
, struct sk_buff
*nskb
,
496 unsigned int mtu
, unsigned int payload_length
)
498 struct iphdr
*iph
, *old_iph
= ip_hdr(skb
);
499 struct icmphdr
*icmph
;
502 iph
= (struct iphdr
*)skb_put(nskb
, sizeof(struct iphdr
));
503 icmph
= (struct icmphdr
*)skb_put(nskb
, sizeof(struct icmphdr
));
504 payload
= skb_put(nskb
, payload_length
);
508 iph
->ihl
= sizeof(struct iphdr
) >> 2;
509 iph
->tos
= (old_iph
->tos
& IPTOS_TOS_MASK
) |
510 IPTOS_PREC_INTERNETCONTROL
;
511 iph
->tot_len
= htons(sizeof(struct iphdr
)
512 + sizeof(struct icmphdr
)
514 get_random_bytes(&iph
->id
, sizeof(iph
->id
));
517 iph
->protocol
= IPPROTO_ICMP
;
518 iph
->daddr
= old_iph
->saddr
;
519 iph
->saddr
= old_iph
->daddr
;
524 icmph
->type
= ICMP_DEST_UNREACH
;
525 icmph
->code
= ICMP_FRAG_NEEDED
;
526 icmph
->un
.gateway
= htonl(mtu
);
529 nskb
->csum
= csum_partial((u8
*)icmph
, sizeof(struct icmphdr
), 0);
530 nskb
->csum
= skb_copy_and_csum_bits(skb
, (u8
*)old_iph
- skb
->data
,
531 payload
, payload_length
,
533 icmph
->checksum
= csum_fold(nskb
->csum
);
536 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
537 static bool ipv6_should_icmp(struct sk_buff
*skb
)
539 struct ipv6hdr
*old_ipv6h
= ipv6_hdr(skb
);
541 int payload_off
= (u8
*)(old_ipv6h
+ 1) - skb
->data
;
542 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
544 /* Check source address is valid. */
545 addr_type
= ipv6_addr_type(&old_ipv6h
->saddr
);
546 if (addr_type
& IPV6_ADDR_MULTICAST
|| addr_type
== IPV6_ADDR_ANY
)
549 /* Don't reply to unspecified addresses. */
550 if (ipv6_addr_type(&old_ipv6h
->daddr
) == IPV6_ADDR_ANY
)
553 /* Don't respond to ICMP error messages. */
554 payload_off
= ipv6_skip_exthdr(skb
, payload_off
, &nexthdr
);
558 if (nexthdr
== NEXTHDR_ICMP
) {
559 u8 icmp_type
, *icmp_typep
;
561 icmp_typep
= skb_header_pointer(skb
, payload_off
+
562 offsetof(struct icmp6hdr
,
564 sizeof(icmp_type
), &icmp_type
);
566 if (!icmp_typep
|| !(*icmp_typep
& ICMPV6_INFOMSG_MASK
))
573 static void ipv6_build_icmp(struct sk_buff
*skb
, struct sk_buff
*nskb
,
574 unsigned int mtu
, unsigned int payload_length
)
576 struct ipv6hdr
*ipv6h
, *old_ipv6h
= ipv6_hdr(skb
);
577 struct icmp6hdr
*icmp6h
;
580 ipv6h
= (struct ipv6hdr
*)skb_put(nskb
, sizeof(struct ipv6hdr
));
581 icmp6h
= (struct icmp6hdr
*)skb_put(nskb
, sizeof(struct icmp6hdr
));
582 payload
= skb_put(nskb
, payload_length
);
587 memset(&ipv6h
->flow_lbl
, 0, sizeof(ipv6h
->flow_lbl
));
588 ipv6h
->payload_len
= htons(sizeof(struct icmp6hdr
)
590 ipv6h
->nexthdr
= NEXTHDR_ICMP
;
591 ipv6h
->hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
592 ipv6_addr_copy(&ipv6h
->daddr
, &old_ipv6h
->saddr
);
593 ipv6_addr_copy(&ipv6h
->saddr
, &old_ipv6h
->daddr
);
596 icmp6h
->icmp6_type
= ICMPV6_PKT_TOOBIG
;
597 icmp6h
->icmp6_code
= 0;
598 icmp6h
->icmp6_cksum
= 0;
599 icmp6h
->icmp6_mtu
= htonl(mtu
);
601 nskb
->csum
= csum_partial((u8
*)icmp6h
, sizeof(struct icmp6hdr
), 0);
602 nskb
->csum
= skb_copy_and_csum_bits(skb
, (u8
*)old_ipv6h
- skb
->data
,
603 payload
, payload_length
,
605 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
,
606 sizeof(struct icmp6hdr
)
608 ipv6h
->nexthdr
, nskb
->csum
);
612 bool tnl_frag_needed(struct vport
*vport
, const struct tnl_mutable_config
*mutable,
613 struct sk_buff
*skb
, unsigned int mtu
, __be64 flow_key
)
615 unsigned int eth_hdr_len
= ETH_HLEN
;
616 unsigned int total_length
= 0, header_length
= 0, payload_length
;
617 struct ethhdr
*eh
, *old_eh
= eth_hdr(skb
);
618 struct sk_buff
*nskb
;
621 if (skb
->protocol
== htons(ETH_P_IP
)) {
622 if (mtu
< IP_MIN_MTU
)
625 if (!ipv4_should_icmp(skb
))
628 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
629 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
630 if (mtu
< IPV6_MIN_MTU
)
634 * In theory we should do PMTUD on IPv6 multicast messages but
635 * we don't have an address to send from so just fragment.
637 if (ipv6_addr_type(&ipv6_hdr(skb
)->daddr
) & IPV6_ADDR_MULTICAST
)
640 if (!ipv6_should_icmp(skb
))
648 if (old_eh
->h_proto
== htons(ETH_P_8021Q
))
649 eth_hdr_len
= VLAN_ETH_HLEN
;
651 payload_length
= skb
->len
- eth_hdr_len
;
652 if (skb
->protocol
== htons(ETH_P_IP
)) {
653 header_length
= sizeof(struct iphdr
) + sizeof(struct icmphdr
);
654 total_length
= min_t(unsigned int, header_length
+
655 payload_length
, 576);
657 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
659 header_length
= sizeof(struct ipv6hdr
) +
660 sizeof(struct icmp6hdr
);
661 total_length
= min_t(unsigned int, header_length
+
662 payload_length
, IPV6_MIN_MTU
);
666 payload_length
= total_length
- header_length
;
668 nskb
= dev_alloc_skb(NET_IP_ALIGN
+ eth_hdr_len
+ header_length
+
673 skb_reserve(nskb
, NET_IP_ALIGN
);
675 /* Ethernet / VLAN */
676 eh
= (struct ethhdr
*)skb_put(nskb
, eth_hdr_len
);
677 memcpy(eh
->h_dest
, old_eh
->h_source
, ETH_ALEN
);
678 memcpy(eh
->h_source
, mutable->eth_addr
, ETH_ALEN
);
679 nskb
->protocol
= eh
->h_proto
= old_eh
->h_proto
;
680 if (old_eh
->h_proto
== htons(ETH_P_8021Q
)) {
681 struct vlan_ethhdr
*vh
= (struct vlan_ethhdr
*)eh
;
683 vh
->h_vlan_TCI
= vlan_eth_hdr(skb
)->h_vlan_TCI
;
684 vh
->h_vlan_encapsulated_proto
= skb
->protocol
;
686 skb_reset_mac_header(nskb
);
689 if (skb
->protocol
== htons(ETH_P_IP
))
690 ipv4_build_icmp(skb
, nskb
, mtu
, payload_length
);
691 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
693 ipv6_build_icmp(skb
, nskb
, mtu
, payload_length
);
697 * Assume that flow based keys are symmetric with respect to input
698 * and output and use the key that we were going to put on the
699 * outgoing packet for the fake received packet. If the keys are
700 * not symmetric then PMTUD needs to be disabled since we won't have
701 * any way of synthesizing packets.
703 if ((mutable->flags
& (TNL_F_IN_KEY_MATCH
| TNL_F_OUT_KEY_ACTION
)) ==
704 (TNL_F_IN_KEY_MATCH
| TNL_F_OUT_KEY_ACTION
))
705 OVS_CB(nskb
)->tun_id
= flow_key
;
707 compute_ip_summed(nskb
, false);
708 vport_receive(vport
, nskb
);
713 static bool check_mtu(struct sk_buff
*skb
,
715 const struct tnl_mutable_config
*mutable,
716 const struct rtable
*rt
, __be16
*frag_offp
)
718 bool pmtud
= mutable->flags
& TNL_F_PMTUD
;
723 frag_off
= htons(IP_DF
);
725 mtu
= dst_mtu(&rt_dst(rt
))
727 - mutable->tunnel_hlen
728 - (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
) ?
732 if (skb
->protocol
== htons(ETH_P_IP
)) {
733 struct iphdr
*iph
= ip_hdr(skb
);
735 frag_off
|= iph
->frag_off
& htons(IP_DF
);
737 if (pmtud
&& iph
->frag_off
& htons(IP_DF
)) {
738 mtu
= max(mtu
, IP_MIN_MTU
);
740 if (ntohs(iph
->tot_len
) > mtu
&&
741 tnl_frag_needed(vport
, mutable, skb
, mtu
,
742 OVS_CB(skb
)->tun_id
))
746 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
747 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
748 unsigned int packet_length
= skb
->len
- ETH_HLEN
749 - (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
) ?
752 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
753 if (packet_length
> IPV6_MIN_MTU
)
754 frag_off
= htons(IP_DF
);
757 mtu
= max(mtu
, IPV6_MIN_MTU
);
759 if (packet_length
> mtu
&&
760 tnl_frag_needed(vport
, mutable, skb
, mtu
,
761 OVS_CB(skb
)->tun_id
))
767 *frag_offp
= frag_off
;
771 static void create_tunnel_header(const struct vport
*vport
,
772 const struct tnl_mutable_config
*mutable,
773 const struct rtable
*rt
, void *header
)
775 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
776 struct iphdr
*iph
= header
;
779 iph
->ihl
= sizeof(struct iphdr
) >> 2;
780 iph
->frag_off
= htons(IP_DF
);
781 iph
->protocol
= tnl_vport
->tnl_ops
->ipproto
;
782 iph
->tos
= mutable->tos
;
783 iph
->daddr
= rt
->rt_dst
;
784 iph
->saddr
= rt
->rt_src
;
785 iph
->ttl
= mutable->ttl
;
787 iph
->ttl
= dst_metric(&rt_dst(rt
), RTAX_HOPLIMIT
);
789 tnl_vport
->tnl_ops
->build_header(vport
, mutable, iph
+ 1);
792 static inline void *get_cached_header(const struct tnl_cache
*cache
)
794 return (void *)cache
+ ALIGN(sizeof(struct tnl_cache
), CACHE_DATA_ALIGN
);
797 static inline bool check_cache_valid(const struct tnl_cache
*cache
,
798 const struct tnl_mutable_config
*mutable)
801 #ifdef NEED_CACHE_TIMEOUT
802 time_before(jiffies
, cache
->expiration
) &&
805 atomic_read(&init_net
.ipv4
.rt_genid
) == cache
->rt
->rt_genid
&&
808 rt_dst(cache
->rt
).hh
->hh_lock
.sequence
== cache
->hh_seq
&&
810 mutable->seq
== cache
->mutable_seq
&&
811 (!is_internal_dev(rt_dst(cache
->rt
).dev
) ||
812 (cache
->flow
&& !cache
->flow
->dead
));
815 static int cache_cleaner_cb(struct tbl_node
*tbl_node
, void *aux
)
817 struct tnl_vport
*tnl_vport
= tnl_vport_table_cast(tbl_node
);
818 const struct tnl_mutable_config
*mutable = rcu_dereference(tnl_vport
->mutable);
819 const struct tnl_cache
*cache
= rcu_dereference(tnl_vport
->cache
);
821 if (cache
&& !check_cache_valid(cache
, mutable) &&
822 spin_trylock_bh(&tnl_vport
->cache_lock
)) {
823 assign_cache_rcu(tnl_vport_to_vport(tnl_vport
), NULL
);
824 spin_unlock_bh(&tnl_vport
->cache_lock
);
830 static void cache_cleaner(struct work_struct
*work
)
832 schedule_cache_cleaner();
835 tbl_foreach(rcu_dereference(port_table
), cache_cleaner_cb
, NULL
);
839 static inline void create_eth_hdr(struct tnl_cache
*cache
,
840 const struct rtable
*rt
)
842 void *cache_data
= get_cached_header(cache
);
843 int hh_len
= rt_dst(rt
).hh
->hh_len
;
844 int hh_off
= HH_DATA_ALIGN(rt_dst(rt
).hh
->hh_len
) - hh_len
;
850 hh_seq
= read_seqbegin(&rt_dst(rt
).hh
->hh_lock
);
851 memcpy(cache_data
, (void *)rt_dst(rt
).hh
->hh_data
+ hh_off
, hh_len
);
852 } while (read_seqretry(&rt_dst(rt
).hh
->hh_lock
, hh_seq
));
854 cache
->hh_seq
= hh_seq
;
856 read_lock_bh(&rt_dst(rt
).hh
->hh_lock
);
857 memcpy(cache_data
, (void *)rt_dst(rt
).hh
->hh_data
+ hh_off
, hh_len
);
858 read_unlock_bh(&rt_dst(rt
).hh
->hh_lock
);
862 static struct tnl_cache
*build_cache(struct vport
*vport
,
863 const struct tnl_mutable_config
*mutable,
866 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
867 struct tnl_cache
*cache
;
871 if (!(mutable->flags
& TNL_F_HDR_CACHE
))
875 * If there is no entry in the ARP cache or if this device does not
876 * support hard header caching just fall back to the IP stack.
882 * If lock is contended fall back to directly building the header.
883 * We're not going to help performance by sitting here spinning.
885 if (!spin_trylock_bh(&tnl_vport
->cache_lock
))
888 cache
= cache_dereference(tnl_vport
);
889 if (check_cache_valid(cache
, mutable))
894 cache_len
= rt_dst(rt
).hh
->hh_len
+ mutable->tunnel_hlen
;
896 cache
= kzalloc(ALIGN(sizeof(struct tnl_cache
), CACHE_DATA_ALIGN
) +
897 cache_len
, GFP_ATOMIC
);
901 cache
->len
= cache_len
;
903 create_eth_hdr(cache
, rt
);
904 cache_data
= get_cached_header(cache
) + rt_dst(rt
).hh
->hh_len
;
906 create_tunnel_header(vport
, mutable, rt
, cache_data
);
908 cache
->mutable_seq
= mutable->seq
;
910 #ifdef NEED_CACHE_TIMEOUT
911 cache
->expiration
= jiffies
+ tnl_vport
->cache_exp_interval
;
914 if (is_internal_dev(rt_dst(rt
).dev
)) {
915 struct sw_flow_key flow_key
;
916 struct tbl_node
*flow_node
;
917 struct vport
*dst_vport
;
922 dst_vport
= internal_dev_get_vport(rt_dst(rt
).dev
);
926 skb
= alloc_skb(cache
->len
, GFP_ATOMIC
);
930 __skb_put(skb
, cache
->len
);
931 memcpy(skb
->data
, get_cached_header(cache
), cache
->len
);
933 err
= flow_extract(skb
, dst_vport
->port_no
, &flow_key
, &is_frag
);
939 flow_node
= tbl_lookup(rcu_dereference(dst_vport
->dp
->table
),
940 &flow_key
, flow_hash(&flow_key
),
943 struct sw_flow
*flow
= flow_cast(flow_node
);
951 assign_cache_rcu(vport
, cache
);
954 spin_unlock_bh(&tnl_vport
->cache_lock
);
959 static struct rtable
*find_route(struct vport
*vport
,
960 const struct tnl_mutable_config
*mutable,
961 u8 tos
, struct tnl_cache
**cache
)
963 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
964 struct tnl_cache
*cur_cache
= rcu_dereference(tnl_vport
->cache
);
969 if (likely(tos
== mutable->tos
&& check_cache_valid(cur_cache
, mutable))) {
971 return cur_cache
->rt
;
974 struct flowi fl
= { .nl_u
= { .ip4_u
=
975 { .daddr
= mutable->daddr
,
976 .saddr
= mutable->saddr
,
978 .proto
= tnl_vport
->tnl_ops
->ipproto
};
980 if (unlikely(ip_route_output_key(&init_net
, &rt
, &fl
)))
983 if (likely(tos
== mutable->tos
))
984 *cache
= build_cache(vport
, mutable, rt
);
990 static struct sk_buff
*check_headroom(struct sk_buff
*skb
, int headroom
)
992 if (skb_headroom(skb
) < headroom
|| skb_header_cloned(skb
)) {
993 struct sk_buff
*nskb
= skb_realloc_headroom(skb
, headroom
+ 16);
994 if (unlikely(!nskb
)) {
996 return ERR_PTR(-ENOMEM
);
999 set_skb_csum_bits(skb
, nskb
);
1002 skb_set_owner_w(nskb
, skb
->sk
);
1011 static inline bool need_linearize(const struct sk_buff
*skb
)
1015 if (unlikely(skb_shinfo(skb
)->frag_list
))
1019 * Generally speaking we should linearize if there are paged frags.
1020 * However, if all of the refcounts are 1 we know nobody else can
1021 * change them from underneath us and we can skip the linearization.
1023 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
1024 if (unlikely(page_count(skb_shinfo(skb
)->frags
[0].page
) > 1))
1030 static struct sk_buff
*handle_offloads(struct sk_buff
*skb
,
1031 const struct tnl_mutable_config
*mutable,
1032 const struct rtable
*rt
)
1037 forward_ip_summed(skb
);
1039 err
= vswitch_skb_checksum_setup(skb
);
1043 min_headroom
= LL_RESERVED_SPACE(rt_dst(rt
).dev
) + rt_dst(rt
).header_len
1044 + mutable->tunnel_hlen
;
1046 if (skb_is_gso(skb
)) {
1047 struct sk_buff
*nskb
;
1050 * If we are doing GSO on a pskb it is better to make sure that
1051 * the headroom is correct now. We will only have to copy the
1052 * portion in the linear data area and GSO will preserve
1053 * headroom when it creates the segments. This is particularly
1054 * beneficial on Xen where we get a lot of GSO pskbs.
1055 * Conversely, we avoid copying if it is just to get our own
1056 * writable clone because GSO will do the copy for us.
1058 if (skb_headroom(skb
) < min_headroom
) {
1059 skb
= check_headroom(skb
, min_headroom
);
1066 nskb
= skb_gso_segment(skb
, 0);
1069 err
= PTR_ERR(nskb
);
1075 skb
= check_headroom(skb
, min_headroom
);
1081 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1083 * Pages aren't locked and could change at any time.
1084 * If this happens after we compute the checksum, the
1085 * checksum will be wrong. We linearize now to avoid
1088 if (unlikely(need_linearize(skb
))) {
1089 err
= __skb_linearize(skb
);
1094 err
= skb_checksum_help(skb
);
1097 } else if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1098 skb
->ip_summed
= CHECKSUM_NONE
;
1106 return ERR_PTR(err
);
1109 static int send_frags(struct sk_buff
*skb
,
1110 const struct tnl_mutable_config
*mutable)
1117 struct sk_buff
*next
= skb
->next
;
1118 int frag_len
= skb
->len
- mutable->tunnel_hlen
;
1121 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
1123 err
= ip_local_out(skb
);
1124 if (likely(net_xmit_eval(err
) == 0))
1125 sent_len
+= frag_len
;
1138 * There's no point in continuing to send fragments once one has been
1139 * dropped so just free the rest. This may help improve the congestion
1140 * that caused the first packet to be dropped.
1142 tnl_free_linked_skbs(skb
);
1146 int tnl_send(struct vport
*vport
, struct sk_buff
*skb
)
1148 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1149 const struct tnl_mutable_config
*mutable = rcu_dereference(tnl_vport
->mutable);
1151 enum vport_err_type err
= VPORT_E_TX_ERROR
;
1153 struct dst_entry
*unattached_dst
= NULL
;
1154 struct tnl_cache
*cache
;
1156 __be16 frag_off
= 0;
1161 /* Validate the protocol headers before we try to use them. */
1162 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
1163 if (unlikely(!pskb_may_pull(skb
, VLAN_ETH_HLEN
)))
1166 skb
->protocol
= vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
;
1167 skb_set_network_header(skb
, VLAN_ETH_HLEN
);
1170 if (skb
->protocol
== htons(ETH_P_IP
)) {
1171 if (unlikely(!pskb_may_pull(skb
, skb_network_offset(skb
)
1172 + sizeof(struct iphdr
))))
1175 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1176 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1177 if (unlikely(!pskb_may_pull(skb
, skb_network_offset(skb
)
1178 + sizeof(struct ipv6hdr
))))
1184 if (skb
->protocol
== htons(ETH_P_IP
))
1185 inner_tos
= ip_hdr(skb
)->tos
;
1186 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1187 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1188 inner_tos
= ipv6_get_dsfield(ipv6_hdr(skb
));
1193 if (mutable->flags
& TNL_F_TOS_INHERIT
)
1198 tos
= INET_ECN_encapsulate(tos
, inner_tos
);
1201 rt
= find_route(vport
, mutable, tos
, &cache
);
1204 if (unlikely(!cache
))
1205 unattached_dst
= &rt_dst(rt
);
1213 skb
= handle_offloads(skb
, mutable, rt
);
1218 if (unlikely(!check_mtu(skb
, vport
, mutable, rt
, &frag_off
))) {
1219 err
= VPORT_E_TX_DROPPED
;
1224 * If we are over the MTU, allow the IP stack to handle fragmentation.
1225 * Fragmentation is a slow path anyways.
1227 if (unlikely(skb
->len
+ mutable->tunnel_hlen
> dst_mtu(&rt_dst(rt
)) &&
1229 unattached_dst
= &rt_dst(rt
);
1230 dst_hold(unattached_dst
);
1237 ttl
= dst_metric(&rt_dst(rt
), RTAX_HOPLIMIT
);
1239 if (mutable->flags
& TNL_F_TTL_INHERIT
) {
1240 if (skb
->protocol
== htons(ETH_P_IP
))
1241 ttl
= ip_hdr(skb
)->ttl
;
1242 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1243 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1244 ttl
= ipv6_hdr(skb
)->hop_limit
;
1250 struct sk_buff
*next_skb
= skb
->next
;
1253 if (likely(cache
)) {
1254 skb_push(skb
, cache
->len
);
1255 memcpy(skb
->data
, get_cached_header(cache
), cache
->len
);
1256 skb_reset_mac_header(skb
);
1257 skb_set_network_header(skb
, rt_dst(rt
).hh
->hh_len
);
1260 skb_push(skb
, mutable->tunnel_hlen
);
1261 create_tunnel_header(vport
, mutable, rt
, skb
->data
);
1262 skb_reset_network_header(skb
);
1265 skb_dst_set(skb
, dst_clone(unattached_dst
));
1267 skb_dst_set(skb
, unattached_dst
);
1268 unattached_dst
= NULL
;
1271 skb_set_transport_header(skb
, skb_network_offset(skb
) + sizeof(struct iphdr
));
1276 iph
->frag_off
= frag_off
;
1277 ip_select_ident(iph
, &rt_dst(rt
), NULL
);
1279 skb
= tnl_vport
->tnl_ops
->update_header(vport
, mutable, &rt_dst(rt
), skb
);
1283 if (likely(cache
)) {
1284 int orig_len
= skb
->len
- cache
->len
;
1285 struct vport
*cache_vport
= internal_dev_get_vport(rt_dst(rt
).dev
);
1287 skb
->protocol
= htons(ETH_P_IP
);
1289 iph
->tot_len
= htons(skb
->len
- skb_network_offset(skb
));
1293 OVS_CB(skb
)->flow
= cache
->flow
;
1294 compute_ip_summed(skb
, true);
1295 vport_receive(cache_vport
, skb
);
1296 sent_len
+= orig_len
;
1300 skb
->dev
= rt_dst(rt
).dev
;
1301 xmit_err
= dev_queue_xmit(skb
);
1303 if (likely(net_xmit_eval(xmit_err
) == 0))
1304 sent_len
+= orig_len
;
1307 sent_len
+= send_frags(skb
, mutable);
1313 if (unlikely(sent_len
== 0))
1314 vport_record_error(vport
, VPORT_E_TX_DROPPED
);
1319 tnl_free_linked_skbs(skb
);
1321 dst_release(unattached_dst
);
1322 vport_record_error(vport
, err
);
1327 static const struct nla_policy tnl_policy
[ODP_TUNNEL_ATTR_MAX
+ 1] = {
1328 [ODP_TUNNEL_ATTR_FLAGS
] = { .type
= NLA_U32
},
1329 [ODP_TUNNEL_ATTR_DST_IPV4
] = { .type
= NLA_U32
},
1330 [ODP_TUNNEL_ATTR_SRC_IPV4
] = { .type
= NLA_U32
},
1331 [ODP_TUNNEL_ATTR_OUT_KEY
] = { .type
= NLA_U64
},
1332 [ODP_TUNNEL_ATTR_IN_KEY
] = { .type
= NLA_U64
},
1333 [ODP_TUNNEL_ATTR_TOS
] = { .type
= NLA_U8
},
1334 [ODP_TUNNEL_ATTR_TTL
] = { .type
= NLA_U8
},
1337 /* Sets ODP_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1338 static int tnl_set_config(struct nlattr
*options
, const struct tnl_ops
*tnl_ops
,
1339 const struct vport
*cur_vport
,
1340 struct tnl_mutable_config
*mutable)
1342 const struct vport
*old_vport
;
1343 const struct tnl_mutable_config
*old_mutable
;
1344 struct nlattr
*a
[ODP_TUNNEL_ATTR_MAX
+ 1];
1350 err
= nla_parse_nested(a
, ODP_TUNNEL_ATTR_MAX
, options
, tnl_policy
);
1354 if (!a
[ODP_TUNNEL_ATTR_FLAGS
] || !a
[ODP_TUNNEL_ATTR_DST_IPV4
])
1357 mutable->flags
= nla_get_u32(a
[ODP_TUNNEL_ATTR_FLAGS
]) & TNL_F_PUBLIC
;
1359 if (a
[ODP_TUNNEL_ATTR_SRC_IPV4
])
1360 mutable->saddr
= nla_get_be32(a
[ODP_TUNNEL_ATTR_SRC_IPV4
]);
1361 mutable->daddr
= nla_get_be32(a
[ODP_TUNNEL_ATTR_DST_IPV4
]);
1363 if (a
[ODP_TUNNEL_ATTR_TOS
]) {
1364 mutable->tos
= nla_get_u8(a
[ODP_TUNNEL_ATTR_TOS
]);
1365 if (mutable->tos
!= RT_TOS(mutable->tos
))
1369 if (a
[ODP_TUNNEL_ATTR_TTL
])
1370 mutable->ttl
= nla_get_u8(a
[ODP_TUNNEL_ATTR_TTL
]);
1372 mutable->tunnel_hlen
= tnl_ops
->hdr_len(mutable);
1373 if (mutable->tunnel_hlen
< 0)
1374 return mutable->tunnel_hlen
;
1376 mutable->tunnel_hlen
+= sizeof(struct iphdr
);
1378 mutable->tunnel_type
= tnl_ops
->tunnel_type
;
1379 if (!a
[ODP_TUNNEL_ATTR_IN_KEY
]) {
1380 mutable->tunnel_type
|= TNL_T_KEY_MATCH
;
1381 mutable->flags
|= TNL_F_IN_KEY_MATCH
;
1383 mutable->tunnel_type
|= TNL_T_KEY_EXACT
;
1384 mutable->in_key
= nla_get_be64(a
[ODP_TUNNEL_ATTR_IN_KEY
]);
1387 if (!a
[ODP_TUNNEL_ATTR_OUT_KEY
])
1388 mutable->flags
|= TNL_F_OUT_KEY_ACTION
;
1390 mutable->out_key
= nla_get_be64(a
[ODP_TUNNEL_ATTR_OUT_KEY
]);
1392 old_vport
= tnl_find_port(mutable->saddr
, mutable->daddr
,
1393 mutable->in_key
, mutable->tunnel_type
,
1396 if (old_vport
&& old_vport
!= cur_vport
)
1402 struct vport
*tnl_create(const struct vport_parms
*parms
,
1403 const struct vport_ops
*vport_ops
,
1404 const struct tnl_ops
*tnl_ops
)
1406 struct vport
*vport
;
1407 struct tnl_vport
*tnl_vport
;
1408 struct tnl_mutable_config
*mutable;
1409 int initial_frag_id
;
1412 vport
= vport_alloc(sizeof(struct tnl_vport
), vport_ops
, parms
);
1413 if (IS_ERR(vport
)) {
1414 err
= PTR_ERR(vport
);
1418 tnl_vport
= tnl_vport_priv(vport
);
1420 strcpy(tnl_vport
->name
, parms
->name
);
1421 tnl_vport
->tnl_ops
= tnl_ops
;
1423 mutable = kzalloc(sizeof(struct tnl_mutable_config
), GFP_KERNEL
);
1426 goto error_free_vport
;
1429 vport_gen_rand_ether_addr(mutable->eth_addr
);
1431 get_random_bytes(&initial_frag_id
, sizeof(int));
1432 atomic_set(&tnl_vport
->frag_id
, initial_frag_id
);
1434 err
= tnl_set_config(parms
->options
, tnl_ops
, NULL
, mutable);
1436 goto error_free_mutable
;
1438 spin_lock_init(&tnl_vport
->cache_lock
);
1440 #ifdef NEED_CACHE_TIMEOUT
1441 tnl_vport
->cache_exp_interval
= MAX_CACHE_EXP
-
1442 (net_random() % (MAX_CACHE_EXP
/ 2));
1445 rcu_assign_pointer(tnl_vport
->mutable, mutable);
1447 err
= add_port(vport
);
1449 goto error_free_mutable
;
1458 return ERR_PTR(err
);
1461 int tnl_set_options(struct vport
*vport
, struct nlattr
*options
)
1463 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1464 const struct tnl_mutable_config
*old_mutable
;
1465 struct tnl_mutable_config
*mutable;
1468 mutable = kzalloc(sizeof(struct tnl_mutable_config
), GFP_KERNEL
);
1474 /* Copy fields whose values should be retained. */
1475 old_mutable
= rtnl_dereference(tnl_vport
->mutable);
1476 mutable->seq
= old_mutable
->seq
+ 1;
1477 memcpy(mutable->eth_addr
, old_mutable
->eth_addr
, ETH_ALEN
);
1479 /* Parse the others configured by userspace. */
1480 err
= tnl_set_config(options
, tnl_vport
->tnl_ops
, vport
, mutable);
1484 err
= move_port(vport
, mutable);
1496 int tnl_get_options(const struct vport
*vport
, struct sk_buff
*skb
)
1498 const struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1499 const struct tnl_mutable_config
*mutable = rcu_dereference_rtnl(tnl_vport
->mutable);
1501 NLA_PUT_U32(skb
, ODP_TUNNEL_ATTR_FLAGS
, mutable->flags
& TNL_F_PUBLIC
);
1502 NLA_PUT_BE32(skb
, ODP_TUNNEL_ATTR_DST_IPV4
, mutable->daddr
);
1504 if (!(mutable->flags
& TNL_F_IN_KEY_MATCH
))
1505 NLA_PUT_BE64(skb
, ODP_TUNNEL_ATTR_IN_KEY
, mutable->in_key
);
1506 if (!(mutable->flags
& TNL_F_OUT_KEY_ACTION
))
1507 NLA_PUT_BE64(skb
, ODP_TUNNEL_ATTR_OUT_KEY
, mutable->out_key
);
1509 NLA_PUT_BE32(skb
, ODP_TUNNEL_ATTR_SRC_IPV4
, mutable->saddr
);
1511 NLA_PUT_U8(skb
, ODP_TUNNEL_ATTR_TOS
, mutable->tos
);
1513 NLA_PUT_U8(skb
, ODP_TUNNEL_ATTR_TTL
, mutable->ttl
);
1521 static void free_port_rcu(struct rcu_head
*rcu
)
1523 struct tnl_vport
*tnl_vport
= container_of(rcu
,
1524 struct tnl_vport
, rcu
);
1526 free_cache((struct tnl_cache __force
*)tnl_vport
->cache
);
1527 kfree((struct tnl_mutable __force
*)tnl_vport
->mutable);
1528 vport_free(tnl_vport_to_vport(tnl_vport
));
1531 int tnl_destroy(struct vport
*vport
)
1533 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1534 const struct tnl_mutable_config
*mutable, *old_mutable
;
1536 mutable = rtnl_dereference(tnl_vport
->mutable);
1538 if (vport
== tnl_find_port(mutable->saddr
, mutable->daddr
,
1539 mutable->in_key
, mutable->tunnel_type
,
1543 call_rcu(&tnl_vport
->rcu
, free_port_rcu
);
1548 int tnl_set_addr(struct vport
*vport
, const unsigned char *addr
)
1550 struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1551 struct tnl_mutable_config
*mutable;
1553 mutable = kmemdup(rtnl_dereference(tnl_vport
->mutable),
1554 sizeof(struct tnl_mutable_config
), GFP_KERNEL
);
1558 memcpy(mutable->eth_addr
, addr
, ETH_ALEN
);
1559 assign_config_rcu(vport
, mutable);
1564 const char *tnl_get_name(const struct vport
*vport
)
1566 const struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1567 return tnl_vport
->name
;
1570 const unsigned char *tnl_get_addr(const struct vport
*vport
)
1572 const struct tnl_vport
*tnl_vport
= tnl_vport_priv(vport
);
1573 return rcu_dereference_rtnl(tnl_vport
->mutable)->eth_addr
;
1576 void tnl_free_linked_skbs(struct sk_buff
*skb
)
1582 struct sk_buff
*next
= skb
->next
;