2 * Copyright (c) 2007-2017 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
36 #include <net/checksum.h>
37 #include <net/dsfield.h>
39 #include <net/sctp/checksum.h>
42 #include "conntrack.h"
46 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
47 struct sw_flow_key
*key
,
48 const struct nlattr
*attr
, int len
);
50 struct deferred_action
{
52 const struct nlattr
*actions
;
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key
;
59 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60 struct ovs_frag_data
{
64 __be16 inner_protocol
;
65 u16 network_offset
; /* valid only for MPLS */
70 u8 l2_data
[MAX_L2_LEN
];
73 static DEFINE_PER_CPU(struct ovs_frag_data
, ovs_frag_data_storage
);
75 #define DEFERRED_ACTION_FIFO_SIZE 10
76 #define OVS_RECURSION_LIMIT 4
77 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
81 /* Deferred action fifo queue storage. */
82 struct deferred_action fifo
[DEFERRED_ACTION_FIFO_SIZE
];
85 struct action_flow_keys
{
86 struct sw_flow_key key
[OVS_DEFERRED_ACTION_THRESHOLD
];
89 static struct action_fifo __percpu
*action_fifos
;
90 static struct action_flow_keys __percpu
*flow_keys
;
91 static DEFINE_PER_CPU(int, exec_actions_level
);
93 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
94 * space. Return NULL if out of key spaces.
96 static struct sw_flow_key
*clone_key(const struct sw_flow_key
*key_
)
98 struct action_flow_keys
*keys
= this_cpu_ptr(flow_keys
);
99 int level
= this_cpu_read(exec_actions_level
);
100 struct sw_flow_key
*key
= NULL
;
102 if (level
<= OVS_DEFERRED_ACTION_THRESHOLD
) {
103 key
= &keys
->key
[level
- 1];
110 static void action_fifo_init(struct action_fifo
*fifo
)
116 static bool action_fifo_is_empty(const struct action_fifo
*fifo
)
118 return (fifo
->head
== fifo
->tail
);
121 static struct deferred_action
*action_fifo_get(struct action_fifo
*fifo
)
123 if (action_fifo_is_empty(fifo
))
126 return &fifo
->fifo
[fifo
->tail
++];
129 static struct deferred_action
*action_fifo_put(struct action_fifo
*fifo
)
131 if (fifo
->head
>= DEFERRED_ACTION_FIFO_SIZE
- 1)
134 return &fifo
->fifo
[fifo
->head
++];
137 /* Return queue entry if fifo is not full */
138 static struct deferred_action
*add_deferred_actions(struct sk_buff
*skb
,
139 const struct sw_flow_key
*key
,
140 const struct nlattr
*actions
,
141 const int actions_len
)
143 struct action_fifo
*fifo
;
144 struct deferred_action
*da
;
146 fifo
= this_cpu_ptr(action_fifos
);
147 da
= action_fifo_put(fifo
);
150 da
->actions
= actions
;
151 da
->actions_len
= actions_len
;
158 static void invalidate_flow_key(struct sw_flow_key
*key
)
160 key
->mac_proto
|= SW_FLOW_KEY_INVALID
;
163 static bool is_flow_key_valid(const struct sw_flow_key
*key
)
165 return !(key
->mac_proto
& SW_FLOW_KEY_INVALID
);
168 static int clone_execute(struct datapath
*dp
, struct sk_buff
*skb
,
169 struct sw_flow_key
*key
,
171 const struct nlattr
*actions
, int len
,
172 bool last
, bool clone_flow_key
);
174 static void update_ethertype(struct sk_buff
*skb
, struct ethhdr
*hdr
,
177 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
178 __be16 diff
[] = { ~(hdr
->h_proto
), ethertype
};
180 skb
->csum
= ~csum_partial((char *)diff
, sizeof(diff
),
184 hdr
->h_proto
= ethertype
;
187 static int push_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
188 const struct ovs_action_push_mpls
*mpls
)
190 struct mpls_shim_hdr
*new_mpls_lse
;
192 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
193 if (skb
->encapsulation
)
196 if (skb_cow_head(skb
, MPLS_HLEN
) < 0)
199 if (!ovs_skb_get_inner_protocol(skb
)) {
200 skb_set_inner_network_header(skb
, skb
->mac_len
);
201 ovs_skb_set_inner_protocol(skb
, skb
->protocol
);
204 skb_push(skb
, MPLS_HLEN
);
205 memmove(skb_mac_header(skb
) - MPLS_HLEN
, skb_mac_header(skb
),
207 skb_reset_mac_header(skb
);
208 #ifdef MPLS_HEADER_IS_L3
209 skb_set_network_header(skb
, skb
->mac_len
);
212 new_mpls_lse
= mpls_hdr(skb
);
213 new_mpls_lse
->label_stack_entry
= mpls
->mpls_lse
;
215 skb_postpush_rcsum(skb
, new_mpls_lse
, MPLS_HLEN
);
217 if (ovs_key_mac_proto(key
) == MAC_PROTO_ETHERNET
)
218 update_ethertype(skb
, eth_hdr(skb
), mpls
->mpls_ethertype
);
219 skb
->protocol
= mpls
->mpls_ethertype
;
221 invalidate_flow_key(key
);
225 static int pop_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
226 const __be16 ethertype
)
230 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
234 skb_postpull_rcsum(skb
, mpls_hdr(skb
), MPLS_HLEN
);
236 memmove(skb_mac_header(skb
) + MPLS_HLEN
, skb_mac_header(skb
),
239 __skb_pull(skb
, MPLS_HLEN
);
240 skb_reset_mac_header(skb
);
241 skb_set_network_header(skb
, skb
->mac_len
);
243 if (ovs_key_mac_proto(key
) == MAC_PROTO_ETHERNET
) {
246 /* mpls_hdr() is used to locate the ethertype
247 * field correctly in the presence of VLAN tags.
249 hdr
= (struct ethhdr
*)((void*)mpls_hdr(skb
) - ETH_HLEN
);
250 update_ethertype(skb
, hdr
, ethertype
);
252 if (eth_p_mpls(skb
->protocol
))
253 skb
->protocol
= ethertype
;
255 invalidate_flow_key(key
);
259 static int set_mpls(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
260 const __be32
*mpls_lse
, const __be32
*mask
)
262 struct mpls_shim_hdr
*stack
;
266 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
270 stack
= mpls_hdr(skb
);
271 lse
= OVS_MASKED(stack
->label_stack_entry
, *mpls_lse
, *mask
);
272 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
273 __be32 diff
[] = { ~(stack
->label_stack_entry
), lse
};
275 skb
->csum
= ~csum_partial((char *)diff
, sizeof(diff
),
279 stack
->label_stack_entry
= lse
;
280 flow_key
->mpls
.top_lse
= lse
;
284 static int pop_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
288 err
= skb_vlan_pop(skb
);
289 if (skb_vlan_tag_present(skb
)) {
290 invalidate_flow_key(key
);
292 key
->eth
.vlan
.tci
= 0;
293 key
->eth
.vlan
.tpid
= 0;
298 static int push_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
,
299 const struct ovs_action_push_vlan
*vlan
)
301 if (skb_vlan_tag_present(skb
)) {
302 invalidate_flow_key(key
);
304 key
->eth
.vlan
.tci
= vlan
->vlan_tci
;
305 key
->eth
.vlan
.tpid
= vlan
->vlan_tpid
;
307 return skb_vlan_push(skb
, vlan
->vlan_tpid
,
308 ntohs(vlan
->vlan_tci
) & ~VLAN_TAG_PRESENT
);
311 /* 'src' is already properly masked. */
312 static void ether_addr_copy_masked(u8
*dst_
, const u8
*src_
, const u8
*mask_
)
314 u16
*dst
= (u16
*)dst_
;
315 const u16
*src
= (const u16
*)src_
;
316 const u16
*mask
= (const u16
*)mask_
;
318 OVS_SET_MASKED(dst
[0], src
[0], mask
[0]);
319 OVS_SET_MASKED(dst
[1], src
[1], mask
[1]);
320 OVS_SET_MASKED(dst
[2], src
[2], mask
[2]);
323 static int set_eth_addr(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
324 const struct ovs_key_ethernet
*key
,
325 const struct ovs_key_ethernet
*mask
)
329 err
= skb_ensure_writable(skb
, ETH_HLEN
);
333 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
335 ether_addr_copy_masked(eth_hdr(skb
)->h_source
, key
->eth_src
,
337 ether_addr_copy_masked(eth_hdr(skb
)->h_dest
, key
->eth_dst
,
340 skb_postpush_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
342 ether_addr_copy(flow_key
->eth
.src
, eth_hdr(skb
)->h_source
);
343 ether_addr_copy(flow_key
->eth
.dst
, eth_hdr(skb
)->h_dest
);
347 /* pop_eth does not support VLAN packets as this action is never called
350 static int pop_eth(struct sk_buff
*skb
, struct sw_flow_key
*key
)
352 skb_pull_rcsum(skb
, ETH_HLEN
);
353 skb_reset_mac_header(skb
);
354 skb_reset_mac_len(skb
);
356 /* safe right before invalidate_flow_key */
357 key
->mac_proto
= MAC_PROTO_NONE
;
358 invalidate_flow_key(key
);
362 static int push_eth(struct sk_buff
*skb
, struct sw_flow_key
*key
,
363 const struct ovs_action_push_eth
*ethh
)
367 /* Add the new Ethernet header */
368 if (skb_cow_head(skb
, ETH_HLEN
) < 0)
371 skb_push(skb
, ETH_HLEN
);
372 skb_reset_mac_header(skb
);
373 skb_reset_mac_len(skb
);
376 ether_addr_copy(hdr
->h_source
, ethh
->addresses
.eth_src
);
377 ether_addr_copy(hdr
->h_dest
, ethh
->addresses
.eth_dst
);
378 hdr
->h_proto
= skb
->protocol
;
380 skb_postpush_rcsum(skb
, hdr
, ETH_HLEN
);
382 /* safe right before invalidate_flow_key */
383 key
->mac_proto
= MAC_PROTO_ETHERNET
;
384 invalidate_flow_key(key
);
388 static void update_ip_l4_checksum(struct sk_buff
*skb
, struct iphdr
*nh
,
389 __be32 addr
, __be32 new_addr
)
391 int transport_len
= skb
->len
- skb_transport_offset(skb
);
393 if (nh
->frag_off
& htons(IP_OFFSET
))
396 if (nh
->protocol
== IPPROTO_TCP
) {
397 if (likely(transport_len
>= sizeof(struct tcphdr
)))
398 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
399 addr
, new_addr
, true);
400 } else if (nh
->protocol
== IPPROTO_UDP
) {
401 if (likely(transport_len
>= sizeof(struct udphdr
))) {
402 struct udphdr
*uh
= udp_hdr(skb
);
404 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
405 inet_proto_csum_replace4(&uh
->check
, skb
,
406 addr
, new_addr
, true);
408 uh
->check
= CSUM_MANGLED_0
;
415 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
416 __be32
*addr
, __be32 new_addr
)
418 update_ip_l4_checksum(skb
, nh
, *addr
, new_addr
);
419 csum_replace4(&nh
->check
, *addr
, new_addr
);
424 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
425 __be32 addr
[4], const __be32 new_addr
[4])
427 int transport_len
= skb
->len
- skb_transport_offset(skb
);
429 if (l4_proto
== NEXTHDR_TCP
) {
430 if (likely(transport_len
>= sizeof(struct tcphdr
)))
431 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
432 addr
, new_addr
, true);
433 } else if (l4_proto
== NEXTHDR_UDP
) {
434 if (likely(transport_len
>= sizeof(struct udphdr
))) {
435 struct udphdr
*uh
= udp_hdr(skb
);
437 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
438 inet_proto_csum_replace16(&uh
->check
, skb
,
439 addr
, new_addr
, true);
441 uh
->check
= CSUM_MANGLED_0
;
444 } else if (l4_proto
== NEXTHDR_ICMP
) {
445 if (likely(transport_len
>= sizeof(struct icmp6hdr
)))
446 inet_proto_csum_replace16(&icmp6_hdr(skb
)->icmp6_cksum
,
447 skb
, addr
, new_addr
, true);
451 static void mask_ipv6_addr(const __be32 old
[4], const __be32 addr
[4],
452 const __be32 mask
[4], __be32 masked
[4])
454 masked
[0] = OVS_MASKED(old
[0], addr
[0], mask
[0]);
455 masked
[1] = OVS_MASKED(old
[1], addr
[1], mask
[1]);
456 masked
[2] = OVS_MASKED(old
[2], addr
[2], mask
[2]);
457 masked
[3] = OVS_MASKED(old
[3], addr
[3], mask
[3]);
460 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
461 __be32 addr
[4], const __be32 new_addr
[4],
462 bool recalculate_csum
)
464 if (likely(recalculate_csum
))
465 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
468 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
471 static void set_ipv6_fl(struct ipv6hdr
*nh
, u32 fl
, u32 mask
)
473 /* Bits 21-24 are always unmasked, so this retains their values. */
474 OVS_SET_MASKED(nh
->flow_lbl
[0], (u8
)(fl
>> 16), (u8
)(mask
>> 16));
475 OVS_SET_MASKED(nh
->flow_lbl
[1], (u8
)(fl
>> 8), (u8
)(mask
>> 8));
476 OVS_SET_MASKED(nh
->flow_lbl
[2], (u8
)fl
, (u8
)mask
);
479 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
,
482 new_ttl
= OVS_MASKED(nh
->ttl
, new_ttl
, mask
);
484 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
488 static int set_ipv4(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
489 const struct ovs_key_ipv4
*key
,
490 const struct ovs_key_ipv4
*mask
)
496 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
497 sizeof(struct iphdr
));
503 /* Setting an IP addresses is typically only a side effect of
504 * matching on them in the current userspace implementation, so it
505 * makes sense to check if the value actually changed.
507 if (mask
->ipv4_src
) {
508 new_addr
= OVS_MASKED(nh
->saddr
, key
->ipv4_src
, mask
->ipv4_src
);
510 if (unlikely(new_addr
!= nh
->saddr
)) {
511 set_ip_addr(skb
, nh
, &nh
->saddr
, new_addr
);
512 flow_key
->ipv4
.addr
.src
= new_addr
;
515 if (mask
->ipv4_dst
) {
516 new_addr
= OVS_MASKED(nh
->daddr
, key
->ipv4_dst
, mask
->ipv4_dst
);
518 if (unlikely(new_addr
!= nh
->daddr
)) {
519 set_ip_addr(skb
, nh
, &nh
->daddr
, new_addr
);
520 flow_key
->ipv4
.addr
.dst
= new_addr
;
523 if (mask
->ipv4_tos
) {
524 ipv4_change_dsfield(nh
, ~mask
->ipv4_tos
, key
->ipv4_tos
);
525 flow_key
->ip
.tos
= nh
->tos
;
527 if (mask
->ipv4_ttl
) {
528 set_ip_ttl(skb
, nh
, key
->ipv4_ttl
, mask
->ipv4_ttl
);
529 flow_key
->ip
.ttl
= nh
->ttl
;
535 static bool is_ipv6_mask_nonzero(const __be32 addr
[4])
537 return !!(addr
[0] | addr
[1] | addr
[2] | addr
[3]);
540 static int set_ipv6(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
541 const struct ovs_key_ipv6
*key
,
542 const struct ovs_key_ipv6
*mask
)
547 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
548 sizeof(struct ipv6hdr
));
554 /* Setting an IP addresses is typically only a side effect of
555 * matching on them in the current userspace implementation, so it
556 * makes sense to check if the value actually changed.
558 if (is_ipv6_mask_nonzero(mask
->ipv6_src
)) {
559 __be32
*saddr
= (__be32
*)&nh
->saddr
;
562 mask_ipv6_addr(saddr
, key
->ipv6_src
, mask
->ipv6_src
, masked
);
564 if (unlikely(memcmp(saddr
, masked
, sizeof(masked
)))) {
565 set_ipv6_addr(skb
, flow_key
->ip
.proto
, saddr
, masked
,
567 memcpy(&flow_key
->ipv6
.addr
.src
, masked
,
568 sizeof(flow_key
->ipv6
.addr
.src
));
571 if (is_ipv6_mask_nonzero(mask
->ipv6_dst
)) {
572 unsigned int offset
= 0;
573 int flags
= IP6_FH_F_SKIP_RH
;
574 bool recalc_csum
= true;
575 __be32
*daddr
= (__be32
*)&nh
->daddr
;
578 mask_ipv6_addr(daddr
, key
->ipv6_dst
, mask
->ipv6_dst
, masked
);
580 if (unlikely(memcmp(daddr
, masked
, sizeof(masked
)))) {
581 if (ipv6_ext_hdr(nh
->nexthdr
))
582 recalc_csum
= (ipv6_find_hdr(skb
, &offset
,
587 set_ipv6_addr(skb
, flow_key
->ip
.proto
, daddr
, masked
,
589 memcpy(&flow_key
->ipv6
.addr
.dst
, masked
,
590 sizeof(flow_key
->ipv6
.addr
.dst
));
593 if (mask
->ipv6_tclass
) {
594 ipv6_change_dsfield(nh
, ~mask
->ipv6_tclass
, key
->ipv6_tclass
);
595 flow_key
->ip
.tos
= ipv6_get_dsfield(nh
);
597 if (mask
->ipv6_label
) {
598 set_ipv6_fl(nh
, ntohl(key
->ipv6_label
),
599 ntohl(mask
->ipv6_label
));
600 flow_key
->ipv6
.label
=
601 *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
603 if (mask
->ipv6_hlimit
) {
604 OVS_SET_MASKED(nh
->hop_limit
, key
->ipv6_hlimit
,
606 flow_key
->ip
.ttl
= nh
->hop_limit
;
611 /* Must follow skb_ensure_writable() since that can move the skb data. */
612 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
613 __be16 new_port
, __sum16
*check
)
615 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, false);
619 static int set_udp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
620 const struct ovs_key_udp
*key
,
621 const struct ovs_key_udp
*mask
)
627 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
628 sizeof(struct udphdr
));
633 /* Either of the masks is non-zero, so do not bother checking them. */
634 src
= OVS_MASKED(uh
->source
, key
->udp_src
, mask
->udp_src
);
635 dst
= OVS_MASKED(uh
->dest
, key
->udp_dst
, mask
->udp_dst
);
637 if (uh
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
638 if (likely(src
!= uh
->source
)) {
639 set_tp_port(skb
, &uh
->source
, src
, &uh
->check
);
640 flow_key
->tp
.src
= src
;
642 if (likely(dst
!= uh
->dest
)) {
643 set_tp_port(skb
, &uh
->dest
, dst
, &uh
->check
);
644 flow_key
->tp
.dst
= dst
;
647 if (unlikely(!uh
->check
))
648 uh
->check
= CSUM_MANGLED_0
;
652 flow_key
->tp
.src
= src
;
653 flow_key
->tp
.dst
= dst
;
661 static int set_tcp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
662 const struct ovs_key_tcp
*key
,
663 const struct ovs_key_tcp
*mask
)
669 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
670 sizeof(struct tcphdr
));
675 src
= OVS_MASKED(th
->source
, key
->tcp_src
, mask
->tcp_src
);
676 if (likely(src
!= th
->source
)) {
677 set_tp_port(skb
, &th
->source
, src
, &th
->check
);
678 flow_key
->tp
.src
= src
;
680 dst
= OVS_MASKED(th
->dest
, key
->tcp_dst
, mask
->tcp_dst
);
681 if (likely(dst
!= th
->dest
)) {
682 set_tp_port(skb
, &th
->dest
, dst
, &th
->check
);
683 flow_key
->tp
.dst
= dst
;
690 static int set_sctp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
691 const struct ovs_key_sctp
*key
,
692 const struct ovs_key_sctp
*mask
)
694 unsigned int sctphoff
= skb_transport_offset(skb
);
696 __le32 old_correct_csum
, new_csum
, old_csum
;
699 err
= skb_ensure_writable(skb
, sctphoff
+ sizeof(struct sctphdr
));
704 old_csum
= sh
->checksum
;
705 old_correct_csum
= sctp_compute_cksum(skb
, sctphoff
);
707 sh
->source
= OVS_MASKED(sh
->source
, key
->sctp_src
, mask
->sctp_src
);
708 sh
->dest
= OVS_MASKED(sh
->dest
, key
->sctp_dst
, mask
->sctp_dst
);
710 new_csum
= sctp_compute_cksum(skb
, sctphoff
);
712 /* Carry any checksum errors through. */
713 sh
->checksum
= old_csum
^ old_correct_csum
^ new_csum
;
716 flow_key
->tp
.src
= sh
->source
;
717 flow_key
->tp
.dst
= sh
->dest
;
722 static int ovs_vport_output(OVS_VPORT_OUTPUT_PARAMS
)
724 struct ovs_frag_data
*data
= this_cpu_ptr(&ovs_frag_data_storage
);
725 struct vport
*vport
= data
->vport
;
727 if (skb_cow_head(skb
, data
->l2_len
) < 0) {
732 __skb_dst_copy(skb
, data
->dst
);
733 *OVS_GSO_CB(skb
) = data
->cb
;
734 ovs_skb_set_inner_protocol(skb
, data
->inner_protocol
);
735 skb
->vlan_tci
= data
->vlan_tci
;
736 skb
->vlan_proto
= data
->vlan_proto
;
738 /* Reconstruct the MAC header. */
739 skb_push(skb
, data
->l2_len
);
740 memcpy(skb
->data
, &data
->l2_data
, data
->l2_len
);
741 skb_postpush_rcsum(skb
, skb
->data
, data
->l2_len
);
742 skb_reset_mac_header(skb
);
744 if (eth_p_mpls(skb
->protocol
)) {
745 skb
->inner_network_header
= skb
->network_header
;
746 skb_set_network_header(skb
, data
->network_offset
);
747 skb_reset_mac_len(skb
);
750 ovs_vport_send(vport
, skb
, data
->mac_proto
);
755 ovs_dst_get_mtu(const struct dst_entry
*dst
)
757 return dst
->dev
->mtu
;
760 static struct dst_ops ovs_dst_ops
= {
762 .mtu
= ovs_dst_get_mtu
,
765 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
766 * ovs_vport_output(), which is called once per fragmented packet.
768 static void prepare_frag(struct vport
*vport
, struct sk_buff
*skb
,
769 u16 orig_network_offset
, u8 mac_proto
)
771 unsigned int hlen
= skb_network_offset(skb
);
772 struct ovs_frag_data
*data
;
774 data
= this_cpu_ptr(&ovs_frag_data_storage
);
775 data
->dst
= (unsigned long) skb_dst(skb
);
777 data
->cb
= *OVS_GSO_CB(skb
);
778 data
->inner_protocol
= ovs_skb_get_inner_protocol(skb
);
779 data
->network_offset
= orig_network_offset
;
780 data
->vlan_tci
= skb
->vlan_tci
;
781 data
->vlan_proto
= skb
->vlan_proto
;
782 data
->mac_proto
= mac_proto
;
784 memcpy(&data
->l2_data
, skb
->data
, hlen
);
786 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
790 static void ovs_fragment(struct net
*net
, struct vport
*vport
,
791 struct sk_buff
*skb
, u16 mru
,
792 struct sw_flow_key
*key
)
794 u16 orig_network_offset
= 0;
796 if (eth_p_mpls(skb
->protocol
)) {
797 orig_network_offset
= skb_network_offset(skb
);
798 skb
->network_header
= skb
->inner_network_header
;
801 if (skb_network_offset(skb
) > MAX_L2_LEN
) {
802 OVS_NLERR(1, "L2 header too long to fragment");
806 if (key
->eth
.type
== htons(ETH_P_IP
)) {
807 struct dst_entry ovs_dst
;
808 unsigned long orig_dst
;
810 prepare_frag(vport
, skb
, orig_network_offset
,
811 ovs_key_mac_proto(key
));
812 dst_init(&ovs_dst
, &ovs_dst_ops
, NULL
, 1,
813 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
814 ovs_dst
.dev
= vport
->dev
;
816 orig_dst
= (unsigned long) skb_dst(skb
);
817 skb_dst_set_noref(skb
, &ovs_dst
);
818 IPCB(skb
)->frag_max_size
= mru
;
820 ip_do_fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
821 refdst_drop(orig_dst
);
822 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
823 const struct nf_ipv6_ops
*v6ops
= nf_get_ipv6_ops();
824 unsigned long orig_dst
;
825 struct rt6_info ovs_rt
;
830 prepare_frag(vport
, skb
, orig_network_offset
,
831 ovs_key_mac_proto(key
));
832 memset(&ovs_rt
, 0, sizeof(ovs_rt
));
833 dst_init(&ovs_rt
.dst
, &ovs_dst_ops
, NULL
, 1,
834 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
835 ovs_rt
.dst
.dev
= vport
->dev
;
837 orig_dst
= (unsigned long) skb_dst(skb
);
838 skb_dst_set_noref(skb
, &ovs_rt
.dst
);
839 IP6CB(skb
)->frag_max_size
= mru
;
840 #ifdef HAVE_IP_LOCAL_OUT_TAKES_NET
841 v6ops
->fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
843 v6ops
->fragment(skb
->sk
, skb
, ovs_vport_output
);
845 refdst_drop(orig_dst
);
847 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
848 ovs_vport_name(vport
), ntohs(key
->eth
.type
), mru
,
858 static void do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
,
859 struct sw_flow_key
*key
)
861 struct vport
*vport
= ovs_vport_rcu(dp
, out_port
);
864 u16 mru
= OVS_CB(skb
)->mru
;
865 u32 cutlen
= OVS_CB(skb
)->cutlen
;
867 if (unlikely(cutlen
> 0)) {
868 if (skb
->len
- cutlen
> ovs_mac_header_len(key
))
869 pskb_trim(skb
, skb
->len
- cutlen
);
871 pskb_trim(skb
, ovs_mac_header_len(key
));
875 (skb
->len
<= mru
+ vport
->dev
->hard_header_len
))) {
876 ovs_vport_send(vport
, skb
, ovs_key_mac_proto(key
));
877 } else if (mru
<= vport
->dev
->mtu
) {
878 struct net
*net
= ovs_dp_get_net(dp
);
880 ovs_fragment(net
, vport
, skb
, mru
, key
);
882 OVS_NLERR(true, "Cannot fragment IP frames");
890 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
891 struct sw_flow_key
*key
, const struct nlattr
*attr
,
892 const struct nlattr
*actions
, int actions_len
,
895 struct dp_upcall_info upcall
;
896 const struct nlattr
*a
;
899 memset(&upcall
, 0, sizeof(upcall
));
900 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
901 upcall
.mru
= OVS_CB(skb
)->mru
;
903 SKB_INIT_FILL_METADATA_DST(skb
);
904 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
905 a
= nla_next(a
, &rem
)) {
906 switch (nla_type(a
)) {
907 case OVS_USERSPACE_ATTR_USERDATA
:
911 case OVS_USERSPACE_ATTR_PID
:
912 upcall
.portid
= nla_get_u32(a
);
915 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT
: {
916 /* Get out tunnel info. */
919 vport
= ovs_vport_rcu(dp
, nla_get_u32(a
));
921 err
= dev_fill_metadata_dst(vport
->dev
, skb
);
923 upcall
.egress_tun_info
= skb_tunnel_info(skb
);
929 case OVS_USERSPACE_ATTR_ACTIONS
: {
930 /* Include actions. */
931 upcall
.actions
= actions
;
932 upcall
.actions_len
= actions_len
;
936 } /* End of switch. */
939 err
= ovs_dp_upcall(dp
, skb
, key
, &upcall
, cutlen
);
940 SKB_RESTORE_FILL_METADATA_DST(skb
);
944 /* When 'last' is true, sample() should always consume the 'skb'.
945 * Otherwise, sample() should keep 'skb' intact regardless what
946 * actions are executed within sample().
948 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
949 struct sw_flow_key
*key
, const struct nlattr
*attr
,
952 struct nlattr
*actions
;
953 struct nlattr
*sample_arg
;
954 int rem
= nla_len(attr
);
955 const struct sample_arg
*arg
;
958 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
959 sample_arg
= nla_data(attr
);
960 arg
= nla_data(sample_arg
);
961 actions
= nla_next(sample_arg
, &rem
);
963 if ((arg
->probability
!= U32_MAX
) &&
964 (!arg
->probability
|| prandom_u32() > arg
->probability
)) {
970 clone_flow_key
= !arg
->exec
;
971 return clone_execute(dp
, skb
, key
, 0, actions
, rem
, last
,
975 static void execute_hash(struct sk_buff
*skb
, struct sw_flow_key
*key
,
976 const struct nlattr
*attr
)
978 struct ovs_action_hash
*hash_act
= nla_data(attr
);
981 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
982 hash
= skb_get_hash(skb
);
983 hash
= jhash_1word(hash
, hash_act
->hash_basis
);
987 key
->ovs_flow_hash
= hash
;
990 static int execute_set_action(struct sk_buff
*skb
,
991 struct sw_flow_key
*flow_key
,
992 const struct nlattr
*a
)
994 /* Only tunnel set execution is supported without a mask. */
995 if (nla_type(a
) == OVS_KEY_ATTR_TUNNEL_INFO
) {
996 struct ovs_tunnel_info
*tun
= nla_data(a
);
998 ovs_skb_dst_drop(skb
);
999 ovs_dst_hold((struct dst_entry
*)tun
->tun_dst
);
1000 ovs_skb_dst_set(skb
, (struct dst_entry
*)tun
->tun_dst
);
1007 /* Mask is at the midpoint of the data. */
1008 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1010 static int execute_masked_set_action(struct sk_buff
*skb
,
1011 struct sw_flow_key
*flow_key
,
1012 const struct nlattr
*a
)
1016 switch (nla_type(a
)) {
1017 case OVS_KEY_ATTR_PRIORITY
:
1018 OVS_SET_MASKED(skb
->priority
, nla_get_u32(a
),
1019 *get_mask(a
, u32
*));
1020 flow_key
->phy
.priority
= skb
->priority
;
1023 case OVS_KEY_ATTR_SKB_MARK
:
1024 OVS_SET_MASKED(skb
->mark
, nla_get_u32(a
), *get_mask(a
, u32
*));
1025 flow_key
->phy
.skb_mark
= skb
->mark
;
1028 case OVS_KEY_ATTR_TUNNEL_INFO
:
1029 /* Masked data not supported for tunnel. */
1033 case OVS_KEY_ATTR_ETHERNET
:
1034 err
= set_eth_addr(skb
, flow_key
, nla_data(a
),
1035 get_mask(a
, struct ovs_key_ethernet
*));
1038 case OVS_KEY_ATTR_IPV4
:
1039 err
= set_ipv4(skb
, flow_key
, nla_data(a
),
1040 get_mask(a
, struct ovs_key_ipv4
*));
1043 case OVS_KEY_ATTR_IPV6
:
1044 err
= set_ipv6(skb
, flow_key
, nla_data(a
),
1045 get_mask(a
, struct ovs_key_ipv6
*));
1048 case OVS_KEY_ATTR_TCP
:
1049 err
= set_tcp(skb
, flow_key
, nla_data(a
),
1050 get_mask(a
, struct ovs_key_tcp
*));
1053 case OVS_KEY_ATTR_UDP
:
1054 err
= set_udp(skb
, flow_key
, nla_data(a
),
1055 get_mask(a
, struct ovs_key_udp
*));
1058 case OVS_KEY_ATTR_SCTP
:
1059 err
= set_sctp(skb
, flow_key
, nla_data(a
),
1060 get_mask(a
, struct ovs_key_sctp
*));
1063 case OVS_KEY_ATTR_MPLS
:
1064 err
= set_mpls(skb
, flow_key
, nla_data(a
), get_mask(a
,
1068 case OVS_KEY_ATTR_CT_STATE
:
1069 case OVS_KEY_ATTR_CT_ZONE
:
1070 case OVS_KEY_ATTR_CT_MARK
:
1071 case OVS_KEY_ATTR_CT_LABELS
:
1072 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
:
1073 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
:
1081 static int execute_recirc(struct datapath
*dp
, struct sk_buff
*skb
,
1082 struct sw_flow_key
*key
,
1083 const struct nlattr
*a
, bool last
)
1087 if (!is_flow_key_valid(key
)) {
1090 err
= ovs_flow_key_update(skb
, key
);
1094 BUG_ON(!is_flow_key_valid(key
));
1096 recirc_id
= nla_get_u32(a
);
1097 return clone_execute(dp
, skb
, key
, recirc_id
, NULL
, 0, last
, true);
1100 /* Execute a list of actions against 'skb'. */
1101 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1102 struct sw_flow_key
*key
,
1103 const struct nlattr
*attr
, int len
)
1105 const struct nlattr
*a
;
1108 for (a
= attr
, rem
= len
; rem
> 0;
1109 a
= nla_next(a
, &rem
)) {
1112 switch (nla_type(a
)) {
1113 case OVS_ACTION_ATTR_OUTPUT
: {
1114 int port
= nla_get_u32(a
);
1115 struct sk_buff
*clone
;
1117 /* Every output action needs a separate clone
1118 * of 'skb', In case the output action is the
1119 * last action, cloning can be avoided.
1121 if (nla_is_last(a
, rem
)) {
1122 do_output(dp
, skb
, port
, key
);
1123 /* 'skb' has been used for output.
1128 clone
= skb_clone(skb
, GFP_ATOMIC
);
1130 do_output(dp
, clone
, port
, key
);
1131 OVS_CB(skb
)->cutlen
= 0;
1135 case OVS_ACTION_ATTR_TRUNC
: {
1136 struct ovs_action_trunc
*trunc
= nla_data(a
);
1138 if (skb
->len
> trunc
->max_len
)
1139 OVS_CB(skb
)->cutlen
= skb
->len
- trunc
->max_len
;
1143 case OVS_ACTION_ATTR_USERSPACE
:
1144 output_userspace(dp
, skb
, key
, a
, attr
,
1145 len
, OVS_CB(skb
)->cutlen
);
1146 OVS_CB(skb
)->cutlen
= 0;
1149 case OVS_ACTION_ATTR_HASH
:
1150 execute_hash(skb
, key
, a
);
1153 case OVS_ACTION_ATTR_PUSH_MPLS
:
1154 err
= push_mpls(skb
, key
, nla_data(a
));
1157 case OVS_ACTION_ATTR_POP_MPLS
:
1158 err
= pop_mpls(skb
, key
, nla_get_be16(a
));
1161 case OVS_ACTION_ATTR_PUSH_VLAN
:
1162 err
= push_vlan(skb
, key
, nla_data(a
));
1165 case OVS_ACTION_ATTR_POP_VLAN
:
1166 err
= pop_vlan(skb
, key
);
1169 case OVS_ACTION_ATTR_RECIRC
: {
1170 bool last
= nla_is_last(a
, rem
);
1172 err
= execute_recirc(dp
, skb
, key
, a
, last
);
1174 /* If this is the last action, the skb has
1175 * been consumed or freed.
1176 * Return immediately.
1183 case OVS_ACTION_ATTR_SET
:
1184 err
= execute_set_action(skb
, key
, nla_data(a
));
1187 case OVS_ACTION_ATTR_SET_MASKED
:
1188 case OVS_ACTION_ATTR_SET_TO_MASKED
:
1189 err
= execute_masked_set_action(skb
, key
, nla_data(a
));
1192 case OVS_ACTION_ATTR_SAMPLE
: {
1193 bool last
= nla_is_last(a
, rem
);
1195 err
= sample(dp
, skb
, key
, a
, last
);
1202 case OVS_ACTION_ATTR_CT
:
1203 if (!is_flow_key_valid(key
)) {
1204 err
= ovs_flow_key_update(skb
, key
);
1209 err
= ovs_ct_execute(ovs_dp_get_net(dp
), skb
, key
,
1212 /* Hide stolen IP fragments from user space. */
1214 return err
== -EINPROGRESS
? 0 : err
;
1217 case OVS_ACTION_ATTR_PUSH_ETH
:
1218 err
= push_eth(skb
, key
, nla_data(a
));
1221 case OVS_ACTION_ATTR_POP_ETH
:
1222 err
= pop_eth(skb
, key
);
1226 if (unlikely(err
)) {
1236 /* Execute the actions on the clone of the packet. The effect of the
1237 * execution does not affect the original 'skb' nor the original 'key'.
1239 * The execution may be deferred in case the actions can not be executed
1242 static int clone_execute(struct datapath
*dp
, struct sk_buff
*skb
,
1243 struct sw_flow_key
*key
, u32 recirc_id
,
1244 const struct nlattr
*actions
, int len
,
1245 bool last
, bool clone_flow_key
)
1247 struct deferred_action
*da
;
1248 struct sw_flow_key
*clone
;
1250 skb
= last
? skb
: skb_clone(skb
, GFP_ATOMIC
);
1252 /* Out of memory, skip this action.
1257 /* When clone_flow_key is false, the 'key' will not be change
1258 * by the actions, then the 'key' can be used directly.
1259 * Otherwise, try to clone key from the next recursion level of
1260 * 'flow_keys'. If clone is successful, execute the actions
1261 * without deferring.
1263 clone
= clone_flow_key
? clone_key(key
) : key
;
1267 if (actions
) { /* Sample action */
1269 __this_cpu_inc(exec_actions_level
);
1271 err
= do_execute_actions(dp
, skb
, clone
,
1275 __this_cpu_dec(exec_actions_level
);
1276 } else { /* Recirc action */
1277 clone
->recirc_id
= recirc_id
;
1278 ovs_dp_process_packet(skb
, clone
);
1283 /* Out of 'flow_keys' space. Defer actions */
1284 da
= add_deferred_actions(skb
, key
, actions
, len
);
1286 if (!actions
) { /* Recirc action */
1288 key
->recirc_id
= recirc_id
;
1291 /* Out of per CPU action FIFO space. Drop the 'skb' and
1296 if (net_ratelimit()) {
1297 if (actions
) { /* Sample action */
1298 pr_warn("%s: deferred action limit reached, drop sample action\n",
1300 } else { /* Recirc action */
1301 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1309 static void process_deferred_actions(struct datapath
*dp
)
1311 struct action_fifo
*fifo
= this_cpu_ptr(action_fifos
);
1313 /* Do not touch the FIFO in case there is no deferred actions. */
1314 if (action_fifo_is_empty(fifo
))
1317 /* Finishing executing all deferred actions. */
1319 struct deferred_action
*da
= action_fifo_get(fifo
);
1320 struct sk_buff
*skb
= da
->skb
;
1321 struct sw_flow_key
*key
= &da
->pkt_key
;
1322 const struct nlattr
*actions
= da
->actions
;
1323 int actions_len
= da
->actions_len
;
1326 do_execute_actions(dp
, skb
, key
, actions
, actions_len
);
1328 ovs_dp_process_packet(skb
, key
);
1329 } while (!action_fifo_is_empty(fifo
));
1331 /* Reset FIFO for the next packet. */
1332 action_fifo_init(fifo
);
1335 /* Execute a list of actions against 'skb'. */
1336 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1337 const struct sw_flow_actions
*acts
,
1338 struct sw_flow_key
*key
)
1342 level
= __this_cpu_inc_return(exec_actions_level
);
1343 if (unlikely(level
> OVS_RECURSION_LIMIT
)) {
1344 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1351 OVS_CB(skb
)->acts_origlen
= acts
->orig_len
;
1352 err
= do_execute_actions(dp
, skb
, key
,
1353 acts
->actions
, acts
->actions_len
);
1356 process_deferred_actions(dp
);
1359 __this_cpu_dec(exec_actions_level
);
1363 int action_fifos_init(void)
1365 action_fifos
= alloc_percpu(struct action_fifo
);
1369 flow_keys
= alloc_percpu(struct action_flow_keys
);
1371 free_percpu(action_fifos
);
1378 void action_fifos_exit(void)
1380 free_percpu(action_fifos
);
1381 free_percpu(flow_keys
);