2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
36 #include <net/sctp/checksum.h>
43 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
44 struct sw_flow_key
*key
,
45 const struct nlattr
*attr
, int len
);
47 struct deferred_action
{
49 const struct nlattr
*actions
;
51 /* Store pkt_key clone when creating deferred action. */
52 struct sw_flow_key pkt_key
;
55 #define DEFERRED_ACTION_FIFO_SIZE 10
59 /* Deferred action fifo queue storage. */
60 struct deferred_action fifo
[DEFERRED_ACTION_FIFO_SIZE
];
63 static struct action_fifo __percpu
*action_fifos
;
64 #define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
65 * looping by the network stack
67 static DEFINE_PER_CPU(int, exec_actions_level
);
69 static void action_fifo_init(struct action_fifo
*fifo
)
75 static bool action_fifo_is_empty(const struct action_fifo
*fifo
)
77 return (fifo
->head
== fifo
->tail
);
80 static struct deferred_action
*action_fifo_get(struct action_fifo
*fifo
)
82 if (action_fifo_is_empty(fifo
))
85 return &fifo
->fifo
[fifo
->tail
++];
88 static struct deferred_action
*action_fifo_put(struct action_fifo
*fifo
)
90 if (fifo
->head
>= DEFERRED_ACTION_FIFO_SIZE
- 1)
93 return &fifo
->fifo
[fifo
->head
++];
96 /* Return queue entry if fifo is not full */
97 static struct deferred_action
*add_deferred_actions(struct sk_buff
*skb
,
98 const struct sw_flow_key
*key
,
99 const struct nlattr
*attr
)
101 struct action_fifo
*fifo
;
102 struct deferred_action
*da
;
104 fifo
= this_cpu_ptr(action_fifos
);
105 da
= action_fifo_put(fifo
);
115 static void invalidate_flow_key(struct sw_flow_key
*key
)
117 key
->eth
.type
= htons(0);
120 static bool is_flow_key_valid(const struct sw_flow_key
*key
)
122 return !!key
->eth
.type
;
125 static int make_writable(struct sk_buff
*skb
, int write_len
)
127 if (!pskb_may_pull(skb
, write_len
))
130 if (!skb_cloned(skb
) || skb_clone_writable(skb
, write_len
))
133 return pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
136 static int push_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
137 const struct ovs_action_push_mpls
*mpls
)
139 __be32
*new_mpls_lse
;
142 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
143 if (skb_encapsulation(skb
))
146 if (skb_cow_head(skb
, MPLS_HLEN
) < 0)
149 skb_push(skb
, MPLS_HLEN
);
150 memmove(skb_mac_header(skb
) - MPLS_HLEN
, skb_mac_header(skb
),
152 skb_reset_mac_header(skb
);
154 new_mpls_lse
= (__be32
*)skb_mpls_header(skb
);
155 *new_mpls_lse
= mpls
->mpls_lse
;
157 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
158 skb
->csum
= csum_add(skb
->csum
, csum_partial(new_mpls_lse
,
162 hdr
->h_proto
= mpls
->mpls_ethertype
;
163 if (!ovs_skb_get_inner_protocol(skb
))
164 ovs_skb_set_inner_protocol(skb
, skb
->protocol
);
165 skb
->protocol
= mpls
->mpls_ethertype
;
167 invalidate_flow_key(key
);
171 static int pop_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
172 const __be16 ethertype
)
177 err
= make_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
181 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
182 skb
->csum
= csum_sub(skb
->csum
,
183 csum_partial(skb_mpls_header(skb
),
186 memmove(skb_mac_header(skb
) + MPLS_HLEN
, skb_mac_header(skb
),
189 __skb_pull(skb
, MPLS_HLEN
);
190 skb_reset_mac_header(skb
);
192 /* skb_mpls_header() is used to locate the ethertype
193 * field correctly in the presence of VLAN tags.
195 hdr
= (struct ethhdr
*)(skb_mpls_header(skb
) - ETH_HLEN
);
196 hdr
->h_proto
= ethertype
;
197 if (eth_p_mpls(skb
->protocol
))
198 skb
->protocol
= ethertype
;
200 invalidate_flow_key(key
);
204 static int set_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
205 const __be32
*mpls_lse
)
210 err
= make_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
214 stack
= (__be32
*)skb_mpls_header(skb
);
215 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
216 __be32 diff
[] = { ~(*stack
), *mpls_lse
};
217 skb
->csum
= ~csum_partial((char *)diff
, sizeof(diff
),
222 key
->mpls
.top_lse
= *mpls_lse
;
226 /* remove VLAN header from packet and update csum accordingly. */
227 static int __pop_vlan_tci(struct sk_buff
*skb
, __be16
*current_tci
)
229 struct vlan_hdr
*vhdr
;
232 err
= make_writable(skb
, VLAN_ETH_HLEN
);
236 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
237 skb
->csum
= csum_sub(skb
->csum
, csum_partial(skb
->data
238 + (2 * ETH_ALEN
), VLAN_HLEN
, 0));
240 vhdr
= (struct vlan_hdr
*)(skb
->data
+ ETH_HLEN
);
241 *current_tci
= vhdr
->h_vlan_TCI
;
243 memmove(skb
->data
+ VLAN_HLEN
, skb
->data
, 2 * ETH_ALEN
);
244 __skb_pull(skb
, VLAN_HLEN
);
246 vlan_set_encap_proto(skb
, vhdr
);
247 skb
->mac_header
+= VLAN_HLEN
;
248 /* Update mac_len for subsequent MPLS actions */
249 skb
->mac_len
-= VLAN_HLEN
;
254 static int pop_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
259 if (likely(vlan_tx_tag_present(skb
))) {
260 vlan_set_tci(skb
, 0);
262 if (unlikely(skb
->protocol
!= htons(ETH_P_8021Q
) ||
263 skb
->len
< VLAN_ETH_HLEN
))
266 err
= __pop_vlan_tci(skb
, &tci
);
270 /* move next vlan tag to hw accel tag */
271 if (likely(skb
->protocol
!= htons(ETH_P_8021Q
) ||
272 skb
->len
< VLAN_ETH_HLEN
)) {
277 invalidate_flow_key(key
);
278 err
= __pop_vlan_tci(skb
, &tci
);
282 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(tci
));
286 static int push_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
,
287 const struct ovs_action_push_vlan
*vlan
)
289 if (unlikely(vlan_tx_tag_present(skb
))) {
292 /* push down current VLAN tag */
293 current_tag
= vlan_tx_tag_get(skb
);
295 if (!__vlan_put_tag(skb
, skb
->vlan_proto
, current_tag
))
297 /* Update mac_len for subsequent MPLS actions */
298 skb
->mac_len
+= VLAN_HLEN
;
300 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
301 skb
->csum
= csum_add(skb
->csum
, csum_partial(skb
->data
302 + (2 * ETH_ALEN
), VLAN_HLEN
, 0));
304 invalidate_flow_key(key
);
306 key
->eth
.tci
= vlan
->vlan_tci
;
308 __vlan_hwaccel_put_tag(skb
, vlan
->vlan_tpid
, ntohs(vlan
->vlan_tci
) & ~VLAN_TAG_PRESENT
);
312 static int set_eth_addr(struct sk_buff
*skb
, struct sw_flow_key
*key
,
313 const struct ovs_key_ethernet
*eth_key
)
316 err
= make_writable(skb
, ETH_HLEN
);
320 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
322 ether_addr_copy(eth_hdr(skb
)->h_source
, eth_key
->eth_src
);
323 ether_addr_copy(eth_hdr(skb
)->h_dest
, eth_key
->eth_dst
);
325 ovs_skb_postpush_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
327 ether_addr_copy(key
->eth
.src
, eth_key
->eth_src
);
328 ether_addr_copy(key
->eth
.dst
, eth_key
->eth_dst
);
332 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
333 __be32
*addr
, __be32 new_addr
)
335 int transport_len
= skb
->len
- skb_transport_offset(skb
);
337 if (nh
->protocol
== IPPROTO_TCP
) {
338 if (likely(transport_len
>= sizeof(struct tcphdr
)))
339 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
341 } else if (nh
->protocol
== IPPROTO_UDP
) {
342 if (likely(transport_len
>= sizeof(struct udphdr
))) {
343 struct udphdr
*uh
= udp_hdr(skb
);
345 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
346 inet_proto_csum_replace4(&uh
->check
, skb
,
349 uh
->check
= CSUM_MANGLED_0
;
354 csum_replace4(&nh
->check
, *addr
, new_addr
);
359 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
360 __be32 addr
[4], const __be32 new_addr
[4])
362 int transport_len
= skb
->len
- skb_transport_offset(skb
);
364 if (l4_proto
== NEXTHDR_TCP
) {
365 if (likely(transport_len
>= sizeof(struct tcphdr
)))
366 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
368 } else if (l4_proto
== NEXTHDR_UDP
) {
369 if (likely(transport_len
>= sizeof(struct udphdr
))) {
370 struct udphdr
*uh
= udp_hdr(skb
);
372 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
373 inet_proto_csum_replace16(&uh
->check
, skb
,
376 uh
->check
= CSUM_MANGLED_0
;
379 } else if (l4_proto
== NEXTHDR_ICMP
) {
380 if (likely(transport_len
>= sizeof(struct icmp6hdr
)))
381 inet_proto_csum_replace16(&icmp6_hdr(skb
)->icmp6_cksum
,
382 skb
, addr
, new_addr
, 1);
386 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
387 __be32 addr
[4], const __be32 new_addr
[4],
388 bool recalculate_csum
)
390 if (likely(recalculate_csum
))
391 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
394 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
397 static void set_ipv6_tc(struct ipv6hdr
*nh
, u8 tc
)
399 nh
->priority
= tc
>> 4;
400 nh
->flow_lbl
[0] = (nh
->flow_lbl
[0] & 0x0F) | ((tc
& 0x0F) << 4);
403 static void set_ipv6_fl(struct ipv6hdr
*nh
, u32 fl
)
405 nh
->flow_lbl
[0] = (nh
->flow_lbl
[0] & 0xF0) | (fl
& 0x000F0000) >> 16;
406 nh
->flow_lbl
[1] = (fl
& 0x0000FF00) >> 8;
407 nh
->flow_lbl
[2] = fl
& 0x000000FF;
410 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
)
412 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
416 static int set_ipv4(struct sk_buff
*skb
, struct sw_flow_key
*key
,
417 const struct ovs_key_ipv4
*ipv4_key
)
422 err
= make_writable(skb
, skb_network_offset(skb
) +
423 sizeof(struct iphdr
));
429 if (ipv4_key
->ipv4_src
!= nh
->saddr
) {
430 set_ip_addr(skb
, nh
, &nh
->saddr
, ipv4_key
->ipv4_src
);
431 key
->ipv4
.addr
.src
= ipv4_key
->ipv4_src
;
434 if (ipv4_key
->ipv4_dst
!= nh
->daddr
) {
435 set_ip_addr(skb
, nh
, &nh
->daddr
, ipv4_key
->ipv4_dst
);
436 key
->ipv4
.addr
.dst
= ipv4_key
->ipv4_dst
;
439 if (ipv4_key
->ipv4_tos
!= nh
->tos
) {
440 ipv4_change_dsfield(nh
, 0, ipv4_key
->ipv4_tos
);
441 key
->ip
.tos
= nh
->tos
;
444 if (ipv4_key
->ipv4_ttl
!= nh
->ttl
) {
445 set_ip_ttl(skb
, nh
, ipv4_key
->ipv4_ttl
);
446 key
->ip
.ttl
= ipv4_key
->ipv4_ttl
;
452 static int set_ipv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
453 const struct ovs_key_ipv6
*ipv6_key
)
460 err
= make_writable(skb
, skb_network_offset(skb
) +
461 sizeof(struct ipv6hdr
));
466 saddr
= (__be32
*)&nh
->saddr
;
467 daddr
= (__be32
*)&nh
->daddr
;
469 if (memcmp(ipv6_key
->ipv6_src
, saddr
, sizeof(ipv6_key
->ipv6_src
))) {
470 set_ipv6_addr(skb
, ipv6_key
->ipv6_proto
, saddr
,
471 ipv6_key
->ipv6_src
, true);
472 memcpy(&key
->ipv6
.addr
.src
, ipv6_key
->ipv6_src
,
473 sizeof(ipv6_key
->ipv6_src
));
476 if (memcmp(ipv6_key
->ipv6_dst
, daddr
, sizeof(ipv6_key
->ipv6_dst
))) {
477 unsigned int offset
= 0;
478 int flags
= IP6_FH_F_SKIP_RH
;
479 bool recalc_csum
= true;
481 if (ipv6_ext_hdr(nh
->nexthdr
))
482 recalc_csum
= ipv6_find_hdr(skb
, &offset
,
483 NEXTHDR_ROUTING
, NULL
,
484 &flags
) != NEXTHDR_ROUTING
;
486 set_ipv6_addr(skb
, ipv6_key
->ipv6_proto
, daddr
,
487 ipv6_key
->ipv6_dst
, recalc_csum
);
488 memcpy(&key
->ipv6
.addr
.dst
, ipv6_key
->ipv6_dst
,
489 sizeof(ipv6_key
->ipv6_dst
));
492 set_ipv6_tc(nh
, ipv6_key
->ipv6_tclass
);
493 key
->ip
.tos
= ipv6_get_dsfield(nh
);
495 set_ipv6_fl(nh
, ntohl(ipv6_key
->ipv6_label
));
496 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
498 nh
->hop_limit
= ipv6_key
->ipv6_hlimit
;
499 key
->ip
.ttl
= ipv6_key
->ipv6_hlimit
;
503 /* Must follow make_writable() since that can move the skb data. */
504 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
505 __be16 new_port
, __sum16
*check
)
507 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, 0);
512 static void set_udp_port(struct sk_buff
*skb
, __be16
*port
, __be16 new_port
)
514 struct udphdr
*uh
= udp_hdr(skb
);
516 if (uh
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
517 set_tp_port(skb
, port
, new_port
, &uh
->check
);
520 uh
->check
= CSUM_MANGLED_0
;
527 static int set_udp(struct sk_buff
*skb
, struct sw_flow_key
*key
,
528 const struct ovs_key_udp
*udp_port_key
)
533 err
= make_writable(skb
, skb_transport_offset(skb
) +
534 sizeof(struct udphdr
));
539 if (udp_port_key
->udp_src
!= uh
->source
) {
540 set_udp_port(skb
, &uh
->source
, udp_port_key
->udp_src
);
541 key
->tp
.src
= udp_port_key
->udp_src
;
544 if (udp_port_key
->udp_dst
!= uh
->dest
) {
545 set_udp_port(skb
, &uh
->dest
, udp_port_key
->udp_dst
);
546 key
->tp
.dst
= udp_port_key
->udp_dst
;
552 static int set_tcp(struct sk_buff
*skb
, struct sw_flow_key
*key
,
553 const struct ovs_key_tcp
*tcp_port_key
)
558 err
= make_writable(skb
, skb_transport_offset(skb
) +
559 sizeof(struct tcphdr
));
564 if (tcp_port_key
->tcp_src
!= th
->source
) {
565 set_tp_port(skb
, &th
->source
, tcp_port_key
->tcp_src
, &th
->check
);
566 key
->tp
.src
= tcp_port_key
->tcp_src
;
569 if (tcp_port_key
->tcp_dst
!= th
->dest
) {
570 set_tp_port(skb
, &th
->dest
, tcp_port_key
->tcp_dst
, &th
->check
);
571 key
->tp
.dst
= tcp_port_key
->tcp_dst
;
577 static int set_sctp(struct sk_buff
*skb
, struct sw_flow_key
*key
,
578 const struct ovs_key_sctp
*sctp_port_key
)
582 unsigned int sctphoff
= skb_transport_offset(skb
);
584 err
= make_writable(skb
, sctphoff
+ sizeof(struct sctphdr
));
589 if (sctp_port_key
->sctp_src
!= sh
->source
||
590 sctp_port_key
->sctp_dst
!= sh
->dest
) {
591 __le32 old_correct_csum
, new_csum
, old_csum
;
593 old_csum
= sh
->checksum
;
594 old_correct_csum
= sctp_compute_cksum(skb
, sctphoff
);
596 sh
->source
= sctp_port_key
->sctp_src
;
597 sh
->dest
= sctp_port_key
->sctp_dst
;
599 new_csum
= sctp_compute_cksum(skb
, sctphoff
);
601 /* Carry any checksum errors through. */
602 sh
->checksum
= old_csum
^ old_correct_csum
^ new_csum
;
605 key
->tp
.src
= sctp_port_key
->sctp_src
;
606 key
->tp
.dst
= sctp_port_key
->sctp_dst
;
612 static void do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
)
614 struct vport
*vport
= ovs_vport_rcu(dp
, out_port
);
617 ovs_vport_send(vport
, skb
);
622 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
623 struct sw_flow_key
*key
, const struct nlattr
*attr
)
625 struct ovs_tunnel_info info
;
626 struct dp_upcall_info upcall
;
627 const struct nlattr
*a
;
630 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
631 upcall
.userdata
= NULL
;
633 upcall
.egress_tun_info
= NULL
;
635 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
636 a
= nla_next(a
, &rem
)) {
637 switch (nla_type(a
)) {
638 case OVS_USERSPACE_ATTR_USERDATA
:
642 case OVS_USERSPACE_ATTR_PID
:
643 upcall
.portid
= nla_get_u32(a
);
646 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT
: {
647 /* Get out tunnel info. */
650 vport
= ovs_vport_rcu(dp
, nla_get_u32(a
));
654 err
= ovs_vport_get_egress_tun_info(vport
, skb
,
657 upcall
.egress_tun_info
= &info
;
662 } /* End of switch. */
665 return ovs_dp_upcall(dp
, skb
, key
, &upcall
);
668 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
669 struct sw_flow_key
*key
, const struct nlattr
*attr
)
671 const struct nlattr
*acts_list
= NULL
;
672 const struct nlattr
*a
;
675 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
676 a
= nla_next(a
, &rem
)) {
677 switch (nla_type(a
)) {
678 case OVS_SAMPLE_ATTR_PROBABILITY
:
679 if (prandom_u32() >= nla_get_u32(a
))
683 case OVS_SAMPLE_ATTR_ACTIONS
:
689 rem
= nla_len(acts_list
);
690 a
= nla_data(acts_list
);
692 /* Actions list is empty, do nothing */
696 /* The only known usage of sample action is having a single user-space
697 * action. Treat this usage as a special case.
698 * The output_userspace() should clone the skb to be sent to the
699 * user space. This skb will be consumed by its caller.
701 if (likely(nla_type(a
) == OVS_ACTION_ATTR_USERSPACE
&&
702 nla_is_last(a
, rem
)))
703 return output_userspace(dp
, skb
, key
, a
);
705 skb
= skb_clone(skb
, GFP_ATOMIC
);
707 /* Skip the sample action when out of memory. */
710 if (!add_deferred_actions(skb
, key
, a
)) {
712 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
720 static void execute_hash(struct sk_buff
*skb
, struct sw_flow_key
*key
,
721 const struct nlattr
*attr
)
723 struct ovs_action_hash
*hash_act
= nla_data(attr
);
726 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
727 hash
= skb_get_hash(skb
);
728 hash
= jhash_1word(hash
, hash_act
->hash_basis
);
732 key
->ovs_flow_hash
= hash
;
735 static int execute_set_action(struct sk_buff
*skb
, struct sw_flow_key
*key
,
736 const struct nlattr
*nested_attr
)
740 switch (nla_type(nested_attr
)) {
741 case OVS_KEY_ATTR_PRIORITY
:
742 skb
->priority
= nla_get_u32(nested_attr
);
743 key
->phy
.priority
= skb
->priority
;
746 case OVS_KEY_ATTR_SKB_MARK
:
747 skb
->mark
= nla_get_u32(nested_attr
);
748 key
->phy
.skb_mark
= skb
->mark
;
751 case OVS_KEY_ATTR_TUNNEL_INFO
:
752 OVS_CB(skb
)->egress_tun_info
= nla_data(nested_attr
);
755 case OVS_KEY_ATTR_ETHERNET
:
756 err
= set_eth_addr(skb
, key
, nla_data(nested_attr
));
759 case OVS_KEY_ATTR_IPV4
:
760 err
= set_ipv4(skb
, key
, nla_data(nested_attr
));
763 case OVS_KEY_ATTR_IPV6
:
764 err
= set_ipv6(skb
, key
, nla_data(nested_attr
));
767 case OVS_KEY_ATTR_TCP
:
768 err
= set_tcp(skb
, key
, nla_data(nested_attr
));
771 case OVS_KEY_ATTR_UDP
:
772 err
= set_udp(skb
, key
, nla_data(nested_attr
));
775 case OVS_KEY_ATTR_SCTP
:
776 err
= set_sctp(skb
, key
, nla_data(nested_attr
));
779 case OVS_KEY_ATTR_MPLS
:
780 err
= set_mpls(skb
, key
, nla_data(nested_attr
));
787 static int execute_recirc(struct datapath
*dp
, struct sk_buff
*skb
,
788 struct sw_flow_key
*key
,
789 const struct nlattr
*a
, int rem
)
791 struct deferred_action
*da
;
793 if (!is_flow_key_valid(key
)) {
796 err
= ovs_flow_key_update(skb
, key
);
800 BUG_ON(!is_flow_key_valid(key
));
802 if (!nla_is_last(a
, rem
)) {
803 /* Recirc action is the not the last action
804 * of the action list, need to clone the skb.
806 skb
= skb_clone(skb
, GFP_ATOMIC
);
808 /* Skip the recirc action when out of memory, but
809 * continue on with the rest of the action list.
815 da
= add_deferred_actions(skb
, key
, NULL
);
817 da
->pkt_key
.recirc_id
= nla_get_u32(a
);
822 pr_warn("%s: deferred action limit reached, drop recirc action\n",
829 /* Execute a list of actions against 'skb'. */
830 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
831 struct sw_flow_key
*key
,
832 const struct nlattr
*attr
, int len
)
834 /* Every output action needs a separate clone of 'skb', but the common
835 * case is just a single output action, so that doing a clone and
836 * then freeing the original skbuff is wasteful. So the following code
837 * is slightly obscure just to avoid that.
840 const struct nlattr
*a
;
843 for (a
= attr
, rem
= len
; rem
> 0;
844 a
= nla_next(a
, &rem
)) {
847 if (unlikely(prev_port
!= -1)) {
848 struct sk_buff
*out_skb
= skb_clone(skb
, GFP_ATOMIC
);
851 do_output(dp
, out_skb
, prev_port
);
856 switch (nla_type(a
)) {
857 case OVS_ACTION_ATTR_OUTPUT
:
858 prev_port
= nla_get_u32(a
);
861 case OVS_ACTION_ATTR_USERSPACE
:
862 output_userspace(dp
, skb
, key
, a
);
865 case OVS_ACTION_ATTR_HASH
:
866 execute_hash(skb
, key
, a
);
869 case OVS_ACTION_ATTR_PUSH_MPLS
:
870 err
= push_mpls(skb
, key
, nla_data(a
));
873 case OVS_ACTION_ATTR_POP_MPLS
:
874 err
= pop_mpls(skb
, key
, nla_get_be16(a
));
877 case OVS_ACTION_ATTR_PUSH_VLAN
:
878 err
= push_vlan(skb
, key
, nla_data(a
));
879 if (unlikely(err
)) /* skb already freed. */
883 case OVS_ACTION_ATTR_POP_VLAN
:
884 err
= pop_vlan(skb
, key
);
887 case OVS_ACTION_ATTR_RECIRC
:
888 err
= execute_recirc(dp
, skb
, key
, a
, rem
);
889 if (nla_is_last(a
, rem
)) {
890 /* If this is the last action, the skb has
891 * been consumed or freed.
892 * Return immediately.
898 case OVS_ACTION_ATTR_SET
:
899 err
= execute_set_action(skb
, key
, nla_data(a
));
902 case OVS_ACTION_ATTR_SAMPLE
:
903 err
= sample(dp
, skb
, key
, a
);
914 do_output(dp
, skb
, prev_port
);
921 static void process_deferred_actions(struct datapath
*dp
)
923 struct action_fifo
*fifo
= this_cpu_ptr(action_fifos
);
925 /* Do not touch the FIFO in case there is no deferred actions. */
926 if (action_fifo_is_empty(fifo
))
929 /* Finishing executing all deferred actions. */
931 struct deferred_action
*da
= action_fifo_get(fifo
);
932 struct sk_buff
*skb
= da
->skb
;
933 struct sw_flow_key
*key
= &da
->pkt_key
;
934 const struct nlattr
*actions
= da
->actions
;
937 do_execute_actions(dp
, skb
, key
, actions
,
940 ovs_dp_process_packet(skb
, key
);
941 } while (!action_fifo_is_empty(fifo
));
943 /* Reset FIFO for the next packet. */
944 action_fifo_init(fifo
);
947 /* Execute a list of actions against 'skb'. */
948 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
949 const struct sw_flow_actions
*acts
,
950 struct sw_flow_key
*key
)
952 int level
= this_cpu_read(exec_actions_level
);
955 if (unlikely(level
>= EXEC_ACTIONS_LEVEL_LIMIT
)) {
957 pr_warn("%s: packet loop detected, dropping.\n",
964 this_cpu_inc(exec_actions_level
);
965 err
= do_execute_actions(dp
, skb
, key
,
966 acts
->actions
, acts
->actions_len
);
969 process_deferred_actions(dp
);
971 this_cpu_dec(exec_actions_level
);
973 /* This return status currently does not reflect the errors
974 * encounted during deferred actions execution. Probably needs to
975 * be fixed in the future.
980 int action_fifos_init(void)
982 action_fifos
= alloc_percpu(struct action_fifo
);
989 void action_fifos_exit(void)
991 free_percpu(action_fifos
);