2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
34 #include <net/checksum.h>
35 #include <net/dsfield.h>
37 #include <net/sctp/checksum.h>
44 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
45 struct sw_flow_key
*key
,
46 const struct nlattr
*attr
, int len
);
48 struct deferred_action
{
50 const struct nlattr
*actions
;
52 /* Store pkt_key clone when creating deferred action. */
53 struct sw_flow_key pkt_key
;
56 #define DEFERRED_ACTION_FIFO_SIZE 10
60 /* Deferred action fifo queue storage. */
61 struct deferred_action fifo
[DEFERRED_ACTION_FIFO_SIZE
];
64 static struct action_fifo __percpu
*action_fifos
;
65 #define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
66 * looping by the network stack
68 static DEFINE_PER_CPU(int, exec_actions_level
);
70 static void action_fifo_init(struct action_fifo
*fifo
)
76 static bool action_fifo_is_empty(const struct action_fifo
*fifo
)
78 return (fifo
->head
== fifo
->tail
);
81 static struct deferred_action
*action_fifo_get(struct action_fifo
*fifo
)
83 if (action_fifo_is_empty(fifo
))
86 return &fifo
->fifo
[fifo
->tail
++];
89 static struct deferred_action
*action_fifo_put(struct action_fifo
*fifo
)
91 if (fifo
->head
>= DEFERRED_ACTION_FIFO_SIZE
- 1)
94 return &fifo
->fifo
[fifo
->head
++];
97 /* Return queue entry if fifo is not full */
98 static struct deferred_action
*add_deferred_actions(struct sk_buff
*skb
,
99 const struct sw_flow_key
*key
,
100 const struct nlattr
*attr
)
102 struct action_fifo
*fifo
;
103 struct deferred_action
*da
;
105 fifo
= this_cpu_ptr(action_fifos
);
106 da
= action_fifo_put(fifo
);
116 static void invalidate_flow_key(struct sw_flow_key
*key
)
118 key
->eth
.type
= htons(0);
121 static bool is_flow_key_valid(const struct sw_flow_key
*key
)
123 return !!key
->eth
.type
;
126 static int push_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
127 const struct ovs_action_push_mpls
*mpls
)
129 __be32
*new_mpls_lse
;
132 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
133 if (skb_encapsulation(skb
))
136 if (skb_cow_head(skb
, MPLS_HLEN
) < 0)
139 skb_push(skb
, MPLS_HLEN
);
140 memmove(skb_mac_header(skb
) - MPLS_HLEN
, skb_mac_header(skb
),
142 skb_reset_mac_header(skb
);
144 new_mpls_lse
= (__be32
*)skb_mpls_header(skb
);
145 *new_mpls_lse
= mpls
->mpls_lse
;
147 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
148 skb
->csum
= csum_add(skb
->csum
, csum_partial(new_mpls_lse
,
152 hdr
->h_proto
= mpls
->mpls_ethertype
;
153 if (!ovs_skb_get_inner_protocol(skb
))
154 ovs_skb_set_inner_protocol(skb
, skb
->protocol
);
155 skb
->protocol
= mpls
->mpls_ethertype
;
157 invalidate_flow_key(key
);
161 static int pop_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
162 const __be16 ethertype
)
167 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
171 skb_postpull_rcsum(skb
, skb_mpls_header(skb
), MPLS_HLEN
);
173 memmove(skb_mac_header(skb
) + MPLS_HLEN
, skb_mac_header(skb
),
176 __skb_pull(skb
, MPLS_HLEN
);
177 skb_reset_mac_header(skb
);
179 /* skb_mpls_header() is used to locate the ethertype
180 * field correctly in the presence of VLAN tags.
182 hdr
= (struct ethhdr
*)(skb_mpls_header(skb
) - ETH_HLEN
);
183 hdr
->h_proto
= ethertype
;
184 if (eth_p_mpls(skb
->protocol
))
185 skb
->protocol
= ethertype
;
187 invalidate_flow_key(key
);
191 /* 'KEY' must not have any bits set outside of the 'MASK' */
192 #define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
193 #define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
195 static int set_mpls(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
196 const __be32
*mpls_lse
, const __be32
*mask
)
202 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
206 stack
= (__be32
*)skb_mpls_header(skb
);
207 lse
= MASKED(*stack
, *mpls_lse
, *mask
);
208 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
209 __be32 diff
[] = { ~(*stack
), lse
};
211 skb
->csum
= ~csum_partial((char *)diff
, sizeof(diff
),
216 flow_key
->mpls
.top_lse
= lse
;
220 static int pop_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
224 err
= skb_vlan_pop(skb
);
225 if (skb_vlan_tag_present(skb
))
226 invalidate_flow_key(key
);
232 static int push_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
,
233 const struct ovs_action_push_vlan
*vlan
)
235 if (skb_vlan_tag_present(skb
))
236 invalidate_flow_key(key
);
238 key
->eth
.tci
= vlan
->vlan_tci
;
239 return skb_vlan_push(skb
, vlan
->vlan_tpid
,
240 ntohs(vlan
->vlan_tci
) & ~VLAN_TAG_PRESENT
);
243 /* 'src' is already properly masked. */
244 static void ether_addr_copy_masked(u8
*dst_
, const u8
*src_
, const u8
*mask_
)
246 u16
*dst
= (u16
*)dst_
;
247 const u16
*src
= (const u16
*)src_
;
248 const u16
*mask
= (const u16
*)mask_
;
250 SET_MASKED(dst
[0], src
[0], mask
[0]);
251 SET_MASKED(dst
[1], src
[1], mask
[1]);
252 SET_MASKED(dst
[2], src
[2], mask
[2]);
255 static int set_eth_addr(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
256 const struct ovs_key_ethernet
*key
,
257 const struct ovs_key_ethernet
*mask
)
261 err
= skb_ensure_writable(skb
, ETH_HLEN
);
265 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
267 ether_addr_copy_masked(eth_hdr(skb
)->h_source
, key
->eth_src
,
269 ether_addr_copy_masked(eth_hdr(skb
)->h_dest
, key
->eth_dst
,
272 ovs_skb_postpush_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
274 ether_addr_copy(flow_key
->eth
.src
, eth_hdr(skb
)->h_source
);
275 ether_addr_copy(flow_key
->eth
.dst
, eth_hdr(skb
)->h_dest
);
279 static void update_ip_l4_checksum(struct sk_buff
*skb
, struct iphdr
*nh
,
280 __be32 addr
, __be32 new_addr
)
282 int transport_len
= skb
->len
- skb_transport_offset(skb
);
284 if (nh
->frag_off
& htons(IP_OFFSET
))
287 if (nh
->protocol
== IPPROTO_TCP
) {
288 if (likely(transport_len
>= sizeof(struct tcphdr
)))
289 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
291 } else if (nh
->protocol
== IPPROTO_UDP
) {
292 if (likely(transport_len
>= sizeof(struct udphdr
))) {
293 struct udphdr
*uh
= udp_hdr(skb
);
295 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
296 inet_proto_csum_replace4(&uh
->check
, skb
,
299 uh
->check
= CSUM_MANGLED_0
;
306 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
307 __be32
*addr
, __be32 new_addr
)
309 update_ip_l4_checksum(skb
, nh
, *addr
, new_addr
);
310 csum_replace4(&nh
->check
, *addr
, new_addr
);
315 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
316 __be32 addr
[4], const __be32 new_addr
[4])
318 int transport_len
= skb
->len
- skb_transport_offset(skb
);
320 if (l4_proto
== NEXTHDR_TCP
) {
321 if (likely(transport_len
>= sizeof(struct tcphdr
)))
322 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
324 } else if (l4_proto
== NEXTHDR_UDP
) {
325 if (likely(transport_len
>= sizeof(struct udphdr
))) {
326 struct udphdr
*uh
= udp_hdr(skb
);
328 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
329 inet_proto_csum_replace16(&uh
->check
, skb
,
332 uh
->check
= CSUM_MANGLED_0
;
335 } else if (l4_proto
== NEXTHDR_ICMP
) {
336 if (likely(transport_len
>= sizeof(struct icmp6hdr
)))
337 inet_proto_csum_replace16(&icmp6_hdr(skb
)->icmp6_cksum
,
338 skb
, addr
, new_addr
, 1);
342 static void mask_ipv6_addr(const __be32 old
[4], const __be32 addr
[4],
343 const __be32 mask
[4], __be32 masked
[4])
345 masked
[0] = MASKED(old
[0], addr
[0], mask
[0]);
346 masked
[1] = MASKED(old
[1], addr
[1], mask
[1]);
347 masked
[2] = MASKED(old
[2], addr
[2], mask
[2]);
348 masked
[3] = MASKED(old
[3], addr
[3], mask
[3]);
351 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
352 __be32 addr
[4], const __be32 new_addr
[4],
353 bool recalculate_csum
)
355 if (likely(recalculate_csum
))
356 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
359 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
362 static void set_ipv6_fl(struct ipv6hdr
*nh
, u32 fl
, u32 mask
)
364 /* Bits 21-24 are always unmasked, so this retains their values. */
365 SET_MASKED(nh
->flow_lbl
[0], (u8
)(fl
>> 16), (u8
)(mask
>> 16));
366 SET_MASKED(nh
->flow_lbl
[1], (u8
)(fl
>> 8), (u8
)(mask
>> 8));
367 SET_MASKED(nh
->flow_lbl
[2], (u8
)fl
, (u8
)mask
);
370 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
,
373 new_ttl
= MASKED(nh
->ttl
, new_ttl
, mask
);
375 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
379 static int set_ipv4(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
380 const struct ovs_key_ipv4
*key
,
381 const struct ovs_key_ipv4
*mask
)
387 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
388 sizeof(struct iphdr
));
394 /* Setting an IP addresses is typically only a side effect of
395 * matching on them in the current userspace implementation, so it
396 * makes sense to check if the value actually changed.
398 if (mask
->ipv4_src
) {
399 new_addr
= MASKED(nh
->saddr
, key
->ipv4_src
, mask
->ipv4_src
);
401 if (unlikely(new_addr
!= nh
->saddr
)) {
402 set_ip_addr(skb
, nh
, &nh
->saddr
, new_addr
);
403 flow_key
->ipv4
.addr
.src
= new_addr
;
406 if (mask
->ipv4_dst
) {
407 new_addr
= MASKED(nh
->daddr
, key
->ipv4_dst
, mask
->ipv4_dst
);
409 if (unlikely(new_addr
!= nh
->daddr
)) {
410 set_ip_addr(skb
, nh
, &nh
->daddr
, new_addr
);
411 flow_key
->ipv4
.addr
.dst
= new_addr
;
414 if (mask
->ipv4_tos
) {
415 ipv4_change_dsfield(nh
, ~mask
->ipv4_tos
, key
->ipv4_tos
);
416 flow_key
->ip
.tos
= nh
->tos
;
418 if (mask
->ipv4_ttl
) {
419 set_ip_ttl(skb
, nh
, key
->ipv4_ttl
, mask
->ipv4_ttl
);
420 flow_key
->ip
.ttl
= nh
->ttl
;
426 static bool is_ipv6_mask_nonzero(const __be32 addr
[4])
428 return !!(addr
[0] | addr
[1] | addr
[2] | addr
[3]);
431 static int set_ipv6(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
432 const struct ovs_key_ipv6
*key
,
433 const struct ovs_key_ipv6
*mask
)
438 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
439 sizeof(struct ipv6hdr
));
445 /* Setting an IP addresses is typically only a side effect of
446 * matching on them in the current userspace implementation, so it
447 * makes sense to check if the value actually changed.
449 if (is_ipv6_mask_nonzero(mask
->ipv6_src
)) {
450 __be32
*saddr
= (__be32
*)&nh
->saddr
;
453 mask_ipv6_addr(saddr
, key
->ipv6_src
, mask
->ipv6_src
, masked
);
455 if (unlikely(memcmp(saddr
, masked
, sizeof(masked
)))) {
456 set_ipv6_addr(skb
, key
->ipv6_proto
, saddr
, masked
,
458 memcpy(&flow_key
->ipv6
.addr
.src
, masked
,
459 sizeof(flow_key
->ipv6
.addr
.src
));
462 if (is_ipv6_mask_nonzero(mask
->ipv6_dst
)) {
463 unsigned int offset
= 0;
464 int flags
= IP6_FH_F_SKIP_RH
;
465 bool recalc_csum
= true;
466 __be32
*daddr
= (__be32
*)&nh
->daddr
;
469 mask_ipv6_addr(daddr
, key
->ipv6_dst
, mask
->ipv6_dst
, masked
);
471 if (unlikely(memcmp(daddr
, masked
, sizeof(masked
)))) {
472 if (ipv6_ext_hdr(nh
->nexthdr
))
473 recalc_csum
= (ipv6_find_hdr(skb
, &offset
,
478 set_ipv6_addr(skb
, key
->ipv6_proto
, daddr
, masked
,
480 memcpy(&flow_key
->ipv6
.addr
.dst
, masked
,
481 sizeof(flow_key
->ipv6
.addr
.dst
));
484 if (mask
->ipv6_tclass
) {
485 ipv6_change_dsfield(nh
, ~mask
->ipv6_tclass
, key
->ipv6_tclass
);
486 flow_key
->ip
.tos
= ipv6_get_dsfield(nh
);
488 if (mask
->ipv6_label
) {
489 set_ipv6_fl(nh
, ntohl(key
->ipv6_label
),
490 ntohl(mask
->ipv6_label
));
491 flow_key
->ipv6
.label
=
492 *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
494 if (mask
->ipv6_hlimit
) {
495 SET_MASKED(nh
->hop_limit
, key
->ipv6_hlimit
, mask
->ipv6_hlimit
);
496 flow_key
->ip
.ttl
= nh
->hop_limit
;
501 /* Must follow skb_ensure_writable() since that can move the skb data. */
502 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
503 __be16 new_port
, __sum16
*check
)
505 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, 0);
509 static int set_udp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
510 const struct ovs_key_udp
*key
,
511 const struct ovs_key_udp
*mask
)
517 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
518 sizeof(struct udphdr
));
523 /* Either of the masks is non-zero, so do not bother checking them. */
524 src
= MASKED(uh
->source
, key
->udp_src
, mask
->udp_src
);
525 dst
= MASKED(uh
->dest
, key
->udp_dst
, mask
->udp_dst
);
527 if (uh
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
528 if (likely(src
!= uh
->source
)) {
529 set_tp_port(skb
, &uh
->source
, src
, &uh
->check
);
530 flow_key
->tp
.src
= src
;
532 if (likely(dst
!= uh
->dest
)) {
533 set_tp_port(skb
, &uh
->dest
, dst
, &uh
->check
);
534 flow_key
->tp
.dst
= dst
;
537 if (unlikely(!uh
->check
))
538 uh
->check
= CSUM_MANGLED_0
;
542 flow_key
->tp
.src
= src
;
543 flow_key
->tp
.dst
= dst
;
551 static int set_tcp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
552 const struct ovs_key_tcp
*key
,
553 const struct ovs_key_tcp
*mask
)
559 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
560 sizeof(struct tcphdr
));
565 src
= MASKED(th
->source
, key
->tcp_src
, mask
->tcp_src
);
566 if (likely(src
!= th
->source
)) {
567 set_tp_port(skb
, &th
->source
, src
, &th
->check
);
568 flow_key
->tp
.src
= src
;
570 dst
= MASKED(th
->dest
, key
->tcp_dst
, mask
->tcp_dst
);
571 if (likely(dst
!= th
->dest
)) {
572 set_tp_port(skb
, &th
->dest
, dst
, &th
->check
);
573 flow_key
->tp
.dst
= dst
;
580 static int set_sctp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
581 const struct ovs_key_sctp
*key
,
582 const struct ovs_key_sctp
*mask
)
584 unsigned int sctphoff
= skb_transport_offset(skb
);
586 __le32 old_correct_csum
, new_csum
, old_csum
;
589 err
= skb_ensure_writable(skb
, sctphoff
+ sizeof(struct sctphdr
));
594 old_csum
= sh
->checksum
;
595 old_correct_csum
= sctp_compute_cksum(skb
, sctphoff
);
597 sh
->source
= MASKED(sh
->source
, key
->sctp_src
, mask
->sctp_src
);
598 sh
->dest
= MASKED(sh
->dest
, key
->sctp_dst
, mask
->sctp_dst
);
600 new_csum
= sctp_compute_cksum(skb
, sctphoff
);
602 /* Carry any checksum errors through. */
603 sh
->checksum
= old_csum
^ old_correct_csum
^ new_csum
;
606 flow_key
->tp
.src
= sh
->source
;
607 flow_key
->tp
.dst
= sh
->dest
;
612 static void do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
)
614 struct vport
*vport
= ovs_vport_rcu(dp
, out_port
);
617 ovs_vport_send(vport
, skb
);
622 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
623 struct sw_flow_key
*key
, const struct nlattr
*attr
,
624 const struct nlattr
*actions
, int actions_len
)
626 struct ovs_tunnel_info info
;
627 struct dp_upcall_info upcall
;
628 const struct nlattr
*a
;
631 memset(&upcall
, 0, sizeof(upcall
));
632 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
634 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
635 a
= nla_next(a
, &rem
)) {
636 switch (nla_type(a
)) {
637 case OVS_USERSPACE_ATTR_USERDATA
:
641 case OVS_USERSPACE_ATTR_PID
:
642 upcall
.portid
= nla_get_u32(a
);
645 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT
: {
646 /* Get out tunnel info. */
649 vport
= ovs_vport_rcu(dp
, nla_get_u32(a
));
653 err
= ovs_vport_get_egress_tun_info(vport
, skb
,
656 upcall
.egress_tun_info
= &info
;
661 case OVS_USERSPACE_ATTR_ACTIONS
: {
662 /* Include actions. */
663 upcall
.actions
= actions
;
664 upcall
.actions_len
= actions_len
;
668 } /* End of switch. */
671 return ovs_dp_upcall(dp
, skb
, key
, &upcall
);
674 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
675 struct sw_flow_key
*key
, const struct nlattr
*attr
,
676 const struct nlattr
*actions
, int actions_len
)
678 const struct nlattr
*acts_list
= NULL
;
679 const struct nlattr
*a
;
682 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
683 a
= nla_next(a
, &rem
)) {
686 switch (nla_type(a
)) {
687 case OVS_SAMPLE_ATTR_PROBABILITY
:
688 probability
= nla_get_u32(a
);
689 if (!probability
|| prandom_u32() > probability
)
693 case OVS_SAMPLE_ATTR_ACTIONS
:
699 rem
= nla_len(acts_list
);
700 a
= nla_data(acts_list
);
702 /* Actions list is empty, do nothing */
706 /* The only known usage of sample action is having a single user-space
707 * action. Treat this usage as a special case.
708 * The output_userspace() should clone the skb to be sent to the
709 * user space. This skb will be consumed by its caller.
711 if (likely(nla_type(a
) == OVS_ACTION_ATTR_USERSPACE
&&
712 nla_is_last(a
, rem
)))
713 return output_userspace(dp
, skb
, key
, a
, actions
, actions_len
);
715 skb
= skb_clone(skb
, GFP_ATOMIC
);
717 /* Skip the sample action when out of memory. */
720 if (!add_deferred_actions(skb
, key
, a
)) {
722 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
730 static void execute_hash(struct sk_buff
*skb
, struct sw_flow_key
*key
,
731 const struct nlattr
*attr
)
733 struct ovs_action_hash
*hash_act
= nla_data(attr
);
736 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
737 hash
= skb_get_hash(skb
);
738 hash
= jhash_1word(hash
, hash_act
->hash_basis
);
742 key
->ovs_flow_hash
= hash
;
745 static int execute_set_action(struct sk_buff
*skb
,
746 struct sw_flow_key
*flow_key
,
747 const struct nlattr
*a
)
749 /* Only tunnel set execution is supported without a mask. */
750 if (nla_type(a
) == OVS_KEY_ATTR_TUNNEL_INFO
) {
751 OVS_CB(skb
)->egress_tun_info
= nla_data(a
);
758 /* Mask is at the midpoint of the data. */
759 #define get_mask(a, type) ((const type)nla_data(a) + 1)
761 static int execute_masked_set_action(struct sk_buff
*skb
,
762 struct sw_flow_key
*flow_key
,
763 const struct nlattr
*a
)
767 switch (nla_type(a
)) {
768 case OVS_KEY_ATTR_PRIORITY
:
769 SET_MASKED(skb
->priority
, nla_get_u32(a
), *get_mask(a
, u32
*));
770 flow_key
->phy
.priority
= skb
->priority
;
773 case OVS_KEY_ATTR_SKB_MARK
:
774 SET_MASKED(skb
->mark
, nla_get_u32(a
), *get_mask(a
, u32
*));
775 flow_key
->phy
.skb_mark
= skb
->mark
;
778 case OVS_KEY_ATTR_TUNNEL_INFO
:
779 /* Masked data not supported for tunnel. */
783 case OVS_KEY_ATTR_ETHERNET
:
784 err
= set_eth_addr(skb
, flow_key
, nla_data(a
),
785 get_mask(a
, struct ovs_key_ethernet
*));
788 case OVS_KEY_ATTR_IPV4
:
789 err
= set_ipv4(skb
, flow_key
, nla_data(a
),
790 get_mask(a
, struct ovs_key_ipv4
*));
793 case OVS_KEY_ATTR_IPV6
:
794 err
= set_ipv6(skb
, flow_key
, nla_data(a
),
795 get_mask(a
, struct ovs_key_ipv6
*));
798 case OVS_KEY_ATTR_TCP
:
799 err
= set_tcp(skb
, flow_key
, nla_data(a
),
800 get_mask(a
, struct ovs_key_tcp
*));
803 case OVS_KEY_ATTR_UDP
:
804 err
= set_udp(skb
, flow_key
, nla_data(a
),
805 get_mask(a
, struct ovs_key_udp
*));
808 case OVS_KEY_ATTR_SCTP
:
809 err
= set_sctp(skb
, flow_key
, nla_data(a
),
810 get_mask(a
, struct ovs_key_sctp
*));
813 case OVS_KEY_ATTR_MPLS
:
814 err
= set_mpls(skb
, flow_key
, nla_data(a
), get_mask(a
,
822 static int execute_recirc(struct datapath
*dp
, struct sk_buff
*skb
,
823 struct sw_flow_key
*key
,
824 const struct nlattr
*a
, int rem
)
826 struct deferred_action
*da
;
828 if (!is_flow_key_valid(key
)) {
831 err
= ovs_flow_key_update(skb
, key
);
835 BUG_ON(!is_flow_key_valid(key
));
837 if (!nla_is_last(a
, rem
)) {
838 /* Recirc action is the not the last action
839 * of the action list, need to clone the skb.
841 skb
= skb_clone(skb
, GFP_ATOMIC
);
843 /* Skip the recirc action when out of memory, but
844 * continue on with the rest of the action list.
850 da
= add_deferred_actions(skb
, key
, NULL
);
852 da
->pkt_key
.recirc_id
= nla_get_u32(a
);
857 pr_warn("%s: deferred action limit reached, drop recirc action\n",
864 /* Execute a list of actions against 'skb'. */
865 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
866 struct sw_flow_key
*key
,
867 const struct nlattr
*attr
, int len
)
869 /* Every output action needs a separate clone of 'skb', but the common
870 * case is just a single output action, so that doing a clone and
871 * then freeing the original skbuff is wasteful. So the following code
872 * is slightly obscure just to avoid that.
875 const struct nlattr
*a
;
878 for (a
= attr
, rem
= len
; rem
> 0;
879 a
= nla_next(a
, &rem
)) {
882 if (unlikely(prev_port
!= -1)) {
883 struct sk_buff
*out_skb
= skb_clone(skb
, GFP_ATOMIC
);
886 do_output(dp
, out_skb
, prev_port
);
891 switch (nla_type(a
)) {
892 case OVS_ACTION_ATTR_OUTPUT
:
893 prev_port
= nla_get_u32(a
);
896 case OVS_ACTION_ATTR_USERSPACE
:
897 output_userspace(dp
, skb
, key
, a
, attr
, len
);
900 case OVS_ACTION_ATTR_HASH
:
901 execute_hash(skb
, key
, a
);
904 case OVS_ACTION_ATTR_PUSH_MPLS
:
905 err
= push_mpls(skb
, key
, nla_data(a
));
908 case OVS_ACTION_ATTR_POP_MPLS
:
909 err
= pop_mpls(skb
, key
, nla_get_be16(a
));
912 case OVS_ACTION_ATTR_PUSH_VLAN
:
913 err
= push_vlan(skb
, key
, nla_data(a
));
916 case OVS_ACTION_ATTR_POP_VLAN
:
917 err
= pop_vlan(skb
, key
);
920 case OVS_ACTION_ATTR_RECIRC
:
921 err
= execute_recirc(dp
, skb
, key
, a
, rem
);
922 if (nla_is_last(a
, rem
)) {
923 /* If this is the last action, the skb has
924 * been consumed or freed.
925 * Return immediately.
931 case OVS_ACTION_ATTR_SET
:
932 err
= execute_set_action(skb
, key
, nla_data(a
));
935 case OVS_ACTION_ATTR_SET_MASKED
:
936 case OVS_ACTION_ATTR_SET_TO_MASKED
:
937 err
= execute_masked_set_action(skb
, key
, nla_data(a
));
940 case OVS_ACTION_ATTR_SAMPLE
:
941 err
= sample(dp
, skb
, key
, a
, attr
, len
);
952 do_output(dp
, skb
, prev_port
);
959 static void process_deferred_actions(struct datapath
*dp
)
961 struct action_fifo
*fifo
= this_cpu_ptr(action_fifos
);
963 /* Do not touch the FIFO in case there is no deferred actions. */
964 if (action_fifo_is_empty(fifo
))
967 /* Finishing executing all deferred actions. */
969 struct deferred_action
*da
= action_fifo_get(fifo
);
970 struct sk_buff
*skb
= da
->skb
;
971 struct sw_flow_key
*key
= &da
->pkt_key
;
972 const struct nlattr
*actions
= da
->actions
;
975 do_execute_actions(dp
, skb
, key
, actions
,
978 ovs_dp_process_packet(skb
, key
);
979 } while (!action_fifo_is_empty(fifo
));
981 /* Reset FIFO for the next packet. */
982 action_fifo_init(fifo
);
985 /* Execute a list of actions against 'skb'. */
986 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
987 const struct sw_flow_actions
*acts
,
988 struct sw_flow_key
*key
)
990 int level
= this_cpu_read(exec_actions_level
);
993 if (unlikely(level
>= EXEC_ACTIONS_LEVEL_LIMIT
)) {
995 pr_warn("%s: packet loop detected, dropping.\n",
1002 this_cpu_inc(exec_actions_level
);
1003 err
= do_execute_actions(dp
, skb
, key
,
1004 acts
->actions
, acts
->actions_len
);
1007 process_deferred_actions(dp
);
1009 this_cpu_dec(exec_actions_level
);
1011 /* This return status currently does not reflect the errors
1012 * encounted during deferred actions execution. Probably needs to
1013 * be fixed in the future.
1018 int action_fifos_init(void)
1020 action_fifos
= alloc_percpu(struct action_fifo
);
1027 void action_fifos_exit(void)
1029 free_percpu(action_fifos
);