2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
36 #include <net/ip6_fib.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
40 #include <net/sctp/checksum.h>
44 #include "conntrack.h"
47 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
48 struct sw_flow_key
*key
,
49 const struct nlattr
*attr
, int len
);
51 struct deferred_action
{
53 const struct nlattr
*actions
;
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key
;
59 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60 struct ovs_frag_data
{
64 __be16 inner_protocol
;
65 u16 network_offset
; /* valid only for MPLS */
69 u8 l2_data
[MAX_L2_LEN
];
72 static DEFINE_PER_CPU(struct ovs_frag_data
, ovs_frag_data_storage
);
74 #define DEFERRED_ACTION_FIFO_SIZE 10
75 #define OVS_RECURSION_LIMIT 5
76 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
80 /* Deferred action fifo queue storage. */
81 struct deferred_action fifo
[DEFERRED_ACTION_FIFO_SIZE
];
85 struct sw_flow_key key
[OVS_DEFERRED_ACTION_THRESHOLD
];
88 static struct action_fifo __percpu
*action_fifos
;
89 static struct recirc_keys __percpu
*recirc_keys
;
90 static DEFINE_PER_CPU(int, exec_actions_level
);
92 static void action_fifo_init(struct action_fifo
*fifo
)
98 static bool action_fifo_is_empty(const struct action_fifo
*fifo
)
100 return (fifo
->head
== fifo
->tail
);
103 static struct deferred_action
*action_fifo_get(struct action_fifo
*fifo
)
105 if (action_fifo_is_empty(fifo
))
108 return &fifo
->fifo
[fifo
->tail
++];
111 static struct deferred_action
*action_fifo_put(struct action_fifo
*fifo
)
113 if (fifo
->head
>= DEFERRED_ACTION_FIFO_SIZE
- 1)
116 return &fifo
->fifo
[fifo
->head
++];
119 /* Return true if fifo is not full */
120 static struct deferred_action
*add_deferred_actions(struct sk_buff
*skb
,
121 const struct sw_flow_key
*key
,
122 const struct nlattr
*attr
)
124 struct action_fifo
*fifo
;
125 struct deferred_action
*da
;
127 fifo
= this_cpu_ptr(action_fifos
);
128 da
= action_fifo_put(fifo
);
138 static void invalidate_flow_key(struct sw_flow_key
*key
)
140 key
->eth
.type
= htons(0);
143 static bool is_flow_key_valid(const struct sw_flow_key
*key
)
145 return !!key
->eth
.type
;
148 static void update_ethertype(struct sk_buff
*skb
, struct ethhdr
*hdr
,
151 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
152 __be16 diff
[] = { ~(hdr
->h_proto
), ethertype
};
154 skb
->csum
= ~csum_partial((char *)diff
, sizeof(diff
),
158 hdr
->h_proto
= ethertype
;
161 static int push_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
162 const struct ovs_action_push_mpls
*mpls
)
164 struct mpls_shim_hdr
*new_mpls_lse
;
166 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
167 if (skb
->encapsulation
)
170 if (skb_cow_head(skb
, MPLS_HLEN
) < 0)
173 if (!skb
->inner_protocol
) {
174 skb_set_inner_network_header(skb
, skb
->mac_len
);
175 skb_set_inner_protocol(skb
, skb
->protocol
);
178 skb_push(skb
, MPLS_HLEN
);
179 memmove(skb_mac_header(skb
) - MPLS_HLEN
, skb_mac_header(skb
),
181 skb_reset_mac_header(skb
);
182 skb_set_network_header(skb
, skb
->mac_len
);
184 new_mpls_lse
= mpls_hdr(skb
);
185 new_mpls_lse
->label_stack_entry
= mpls
->mpls_lse
;
187 skb_postpush_rcsum(skb
, new_mpls_lse
, MPLS_HLEN
);
189 update_ethertype(skb
, eth_hdr(skb
), mpls
->mpls_ethertype
);
190 skb
->protocol
= mpls
->mpls_ethertype
;
192 invalidate_flow_key(key
);
196 static int pop_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
197 const __be16 ethertype
)
202 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
206 skb_postpull_rcsum(skb
, mpls_hdr(skb
), MPLS_HLEN
);
208 memmove(skb_mac_header(skb
) + MPLS_HLEN
, skb_mac_header(skb
),
211 __skb_pull(skb
, MPLS_HLEN
);
212 skb_reset_mac_header(skb
);
213 skb_set_network_header(skb
, skb
->mac_len
);
215 /* mpls_hdr() is used to locate the ethertype field correctly in the
216 * presence of VLAN tags.
218 hdr
= (struct ethhdr
*)((void *)mpls_hdr(skb
) - ETH_HLEN
);
219 update_ethertype(skb
, hdr
, ethertype
);
220 if (eth_p_mpls(skb
->protocol
))
221 skb
->protocol
= ethertype
;
223 invalidate_flow_key(key
);
227 static int set_mpls(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
228 const __be32
*mpls_lse
, const __be32
*mask
)
230 struct mpls_shim_hdr
*stack
;
234 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
238 stack
= mpls_hdr(skb
);
239 lse
= OVS_MASKED(stack
->label_stack_entry
, *mpls_lse
, *mask
);
240 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
241 __be32 diff
[] = { ~(stack
->label_stack_entry
), lse
};
243 skb
->csum
= ~csum_partial((char *)diff
, sizeof(diff
),
247 stack
->label_stack_entry
= lse
;
248 flow_key
->mpls
.top_lse
= lse
;
252 static int pop_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
256 err
= skb_vlan_pop(skb
);
257 if (skb_vlan_tag_present(skb
)) {
258 invalidate_flow_key(key
);
260 key
->eth
.vlan
.tci
= 0;
261 key
->eth
.vlan
.tpid
= 0;
266 static int push_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
,
267 const struct ovs_action_push_vlan
*vlan
)
269 if (skb_vlan_tag_present(skb
)) {
270 invalidate_flow_key(key
);
272 key
->eth
.vlan
.tci
= vlan
->vlan_tci
;
273 key
->eth
.vlan
.tpid
= vlan
->vlan_tpid
;
275 return skb_vlan_push(skb
, vlan
->vlan_tpid
,
276 ntohs(vlan
->vlan_tci
) & ~VLAN_TAG_PRESENT
);
279 /* 'src' is already properly masked. */
280 static void ether_addr_copy_masked(u8
*dst_
, const u8
*src_
, const u8
*mask_
)
282 u16
*dst
= (u16
*)dst_
;
283 const u16
*src
= (const u16
*)src_
;
284 const u16
*mask
= (const u16
*)mask_
;
286 OVS_SET_MASKED(dst
[0], src
[0], mask
[0]);
287 OVS_SET_MASKED(dst
[1], src
[1], mask
[1]);
288 OVS_SET_MASKED(dst
[2], src
[2], mask
[2]);
291 static int set_eth_addr(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
292 const struct ovs_key_ethernet
*key
,
293 const struct ovs_key_ethernet
*mask
)
297 err
= skb_ensure_writable(skb
, ETH_HLEN
);
301 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
303 ether_addr_copy_masked(eth_hdr(skb
)->h_source
, key
->eth_src
,
305 ether_addr_copy_masked(eth_hdr(skb
)->h_dest
, key
->eth_dst
,
308 skb_postpush_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
310 ether_addr_copy(flow_key
->eth
.src
, eth_hdr(skb
)->h_source
);
311 ether_addr_copy(flow_key
->eth
.dst
, eth_hdr(skb
)->h_dest
);
315 static void update_ip_l4_checksum(struct sk_buff
*skb
, struct iphdr
*nh
,
316 __be32 addr
, __be32 new_addr
)
318 int transport_len
= skb
->len
- skb_transport_offset(skb
);
320 if (nh
->frag_off
& htons(IP_OFFSET
))
323 if (nh
->protocol
== IPPROTO_TCP
) {
324 if (likely(transport_len
>= sizeof(struct tcphdr
)))
325 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
326 addr
, new_addr
, true);
327 } else if (nh
->protocol
== IPPROTO_UDP
) {
328 if (likely(transport_len
>= sizeof(struct udphdr
))) {
329 struct udphdr
*uh
= udp_hdr(skb
);
331 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
332 inet_proto_csum_replace4(&uh
->check
, skb
,
333 addr
, new_addr
, true);
335 uh
->check
= CSUM_MANGLED_0
;
341 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
342 __be32
*addr
, __be32 new_addr
)
344 update_ip_l4_checksum(skb
, nh
, *addr
, new_addr
);
345 csum_replace4(&nh
->check
, *addr
, new_addr
);
350 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
351 __be32 addr
[4], const __be32 new_addr
[4])
353 int transport_len
= skb
->len
- skb_transport_offset(skb
);
355 if (l4_proto
== NEXTHDR_TCP
) {
356 if (likely(transport_len
>= sizeof(struct tcphdr
)))
357 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
358 addr
, new_addr
, true);
359 } else if (l4_proto
== NEXTHDR_UDP
) {
360 if (likely(transport_len
>= sizeof(struct udphdr
))) {
361 struct udphdr
*uh
= udp_hdr(skb
);
363 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
364 inet_proto_csum_replace16(&uh
->check
, skb
,
365 addr
, new_addr
, true);
367 uh
->check
= CSUM_MANGLED_0
;
370 } else if (l4_proto
== NEXTHDR_ICMP
) {
371 if (likely(transport_len
>= sizeof(struct icmp6hdr
)))
372 inet_proto_csum_replace16(&icmp6_hdr(skb
)->icmp6_cksum
,
373 skb
, addr
, new_addr
, true);
377 static void mask_ipv6_addr(const __be32 old
[4], const __be32 addr
[4],
378 const __be32 mask
[4], __be32 masked
[4])
380 masked
[0] = OVS_MASKED(old
[0], addr
[0], mask
[0]);
381 masked
[1] = OVS_MASKED(old
[1], addr
[1], mask
[1]);
382 masked
[2] = OVS_MASKED(old
[2], addr
[2], mask
[2]);
383 masked
[3] = OVS_MASKED(old
[3], addr
[3], mask
[3]);
386 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
387 __be32 addr
[4], const __be32 new_addr
[4],
388 bool recalculate_csum
)
390 if (recalculate_csum
)
391 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
394 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
397 static void set_ipv6_fl(struct ipv6hdr
*nh
, u32 fl
, u32 mask
)
399 /* Bits 21-24 are always unmasked, so this retains their values. */
400 OVS_SET_MASKED(nh
->flow_lbl
[0], (u8
)(fl
>> 16), (u8
)(mask
>> 16));
401 OVS_SET_MASKED(nh
->flow_lbl
[1], (u8
)(fl
>> 8), (u8
)(mask
>> 8));
402 OVS_SET_MASKED(nh
->flow_lbl
[2], (u8
)fl
, (u8
)mask
);
405 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
,
408 new_ttl
= OVS_MASKED(nh
->ttl
, new_ttl
, mask
);
410 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
414 static int set_ipv4(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
415 const struct ovs_key_ipv4
*key
,
416 const struct ovs_key_ipv4
*mask
)
422 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
423 sizeof(struct iphdr
));
429 /* Setting an IP addresses is typically only a side effect of
430 * matching on them in the current userspace implementation, so it
431 * makes sense to check if the value actually changed.
433 if (mask
->ipv4_src
) {
434 new_addr
= OVS_MASKED(nh
->saddr
, key
->ipv4_src
, mask
->ipv4_src
);
436 if (unlikely(new_addr
!= nh
->saddr
)) {
437 set_ip_addr(skb
, nh
, &nh
->saddr
, new_addr
);
438 flow_key
->ipv4
.addr
.src
= new_addr
;
441 if (mask
->ipv4_dst
) {
442 new_addr
= OVS_MASKED(nh
->daddr
, key
->ipv4_dst
, mask
->ipv4_dst
);
444 if (unlikely(new_addr
!= nh
->daddr
)) {
445 set_ip_addr(skb
, nh
, &nh
->daddr
, new_addr
);
446 flow_key
->ipv4
.addr
.dst
= new_addr
;
449 if (mask
->ipv4_tos
) {
450 ipv4_change_dsfield(nh
, ~mask
->ipv4_tos
, key
->ipv4_tos
);
451 flow_key
->ip
.tos
= nh
->tos
;
453 if (mask
->ipv4_ttl
) {
454 set_ip_ttl(skb
, nh
, key
->ipv4_ttl
, mask
->ipv4_ttl
);
455 flow_key
->ip
.ttl
= nh
->ttl
;
461 static bool is_ipv6_mask_nonzero(const __be32 addr
[4])
463 return !!(addr
[0] | addr
[1] | addr
[2] | addr
[3]);
466 static int set_ipv6(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
467 const struct ovs_key_ipv6
*key
,
468 const struct ovs_key_ipv6
*mask
)
473 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
474 sizeof(struct ipv6hdr
));
480 /* Setting an IP addresses is typically only a side effect of
481 * matching on them in the current userspace implementation, so it
482 * makes sense to check if the value actually changed.
484 if (is_ipv6_mask_nonzero(mask
->ipv6_src
)) {
485 __be32
*saddr
= (__be32
*)&nh
->saddr
;
488 mask_ipv6_addr(saddr
, key
->ipv6_src
, mask
->ipv6_src
, masked
);
490 if (unlikely(memcmp(saddr
, masked
, sizeof(masked
)))) {
491 set_ipv6_addr(skb
, flow_key
->ip
.proto
, saddr
, masked
,
493 memcpy(&flow_key
->ipv6
.addr
.src
, masked
,
494 sizeof(flow_key
->ipv6
.addr
.src
));
497 if (is_ipv6_mask_nonzero(mask
->ipv6_dst
)) {
498 unsigned int offset
= 0;
499 int flags
= IP6_FH_F_SKIP_RH
;
500 bool recalc_csum
= true;
501 __be32
*daddr
= (__be32
*)&nh
->daddr
;
504 mask_ipv6_addr(daddr
, key
->ipv6_dst
, mask
->ipv6_dst
, masked
);
506 if (unlikely(memcmp(daddr
, masked
, sizeof(masked
)))) {
507 if (ipv6_ext_hdr(nh
->nexthdr
))
508 recalc_csum
= (ipv6_find_hdr(skb
, &offset
,
513 set_ipv6_addr(skb
, flow_key
->ip
.proto
, daddr
, masked
,
515 memcpy(&flow_key
->ipv6
.addr
.dst
, masked
,
516 sizeof(flow_key
->ipv6
.addr
.dst
));
519 if (mask
->ipv6_tclass
) {
520 ipv6_change_dsfield(nh
, ~mask
->ipv6_tclass
, key
->ipv6_tclass
);
521 flow_key
->ip
.tos
= ipv6_get_dsfield(nh
);
523 if (mask
->ipv6_label
) {
524 set_ipv6_fl(nh
, ntohl(key
->ipv6_label
),
525 ntohl(mask
->ipv6_label
));
526 flow_key
->ipv6
.label
=
527 *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
529 if (mask
->ipv6_hlimit
) {
530 OVS_SET_MASKED(nh
->hop_limit
, key
->ipv6_hlimit
,
532 flow_key
->ip
.ttl
= nh
->hop_limit
;
537 /* Must follow skb_ensure_writable() since that can move the skb data. */
538 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
539 __be16 new_port
, __sum16
*check
)
541 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, false);
545 static int set_udp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
546 const struct ovs_key_udp
*key
,
547 const struct ovs_key_udp
*mask
)
553 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
554 sizeof(struct udphdr
));
559 /* Either of the masks is non-zero, so do not bother checking them. */
560 src
= OVS_MASKED(uh
->source
, key
->udp_src
, mask
->udp_src
);
561 dst
= OVS_MASKED(uh
->dest
, key
->udp_dst
, mask
->udp_dst
);
563 if (uh
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
564 if (likely(src
!= uh
->source
)) {
565 set_tp_port(skb
, &uh
->source
, src
, &uh
->check
);
566 flow_key
->tp
.src
= src
;
568 if (likely(dst
!= uh
->dest
)) {
569 set_tp_port(skb
, &uh
->dest
, dst
, &uh
->check
);
570 flow_key
->tp
.dst
= dst
;
573 if (unlikely(!uh
->check
))
574 uh
->check
= CSUM_MANGLED_0
;
578 flow_key
->tp
.src
= src
;
579 flow_key
->tp
.dst
= dst
;
587 static int set_tcp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
588 const struct ovs_key_tcp
*key
,
589 const struct ovs_key_tcp
*mask
)
595 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
596 sizeof(struct tcphdr
));
601 src
= OVS_MASKED(th
->source
, key
->tcp_src
, mask
->tcp_src
);
602 if (likely(src
!= th
->source
)) {
603 set_tp_port(skb
, &th
->source
, src
, &th
->check
);
604 flow_key
->tp
.src
= src
;
606 dst
= OVS_MASKED(th
->dest
, key
->tcp_dst
, mask
->tcp_dst
);
607 if (likely(dst
!= th
->dest
)) {
608 set_tp_port(skb
, &th
->dest
, dst
, &th
->check
);
609 flow_key
->tp
.dst
= dst
;
616 static int set_sctp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
617 const struct ovs_key_sctp
*key
,
618 const struct ovs_key_sctp
*mask
)
620 unsigned int sctphoff
= skb_transport_offset(skb
);
622 __le32 old_correct_csum
, new_csum
, old_csum
;
625 err
= skb_ensure_writable(skb
, sctphoff
+ sizeof(struct sctphdr
));
630 old_csum
= sh
->checksum
;
631 old_correct_csum
= sctp_compute_cksum(skb
, sctphoff
);
633 sh
->source
= OVS_MASKED(sh
->source
, key
->sctp_src
, mask
->sctp_src
);
634 sh
->dest
= OVS_MASKED(sh
->dest
, key
->sctp_dst
, mask
->sctp_dst
);
636 new_csum
= sctp_compute_cksum(skb
, sctphoff
);
638 /* Carry any checksum errors through. */
639 sh
->checksum
= old_csum
^ old_correct_csum
^ new_csum
;
642 flow_key
->tp
.src
= sh
->source
;
643 flow_key
->tp
.dst
= sh
->dest
;
648 static int ovs_vport_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
650 struct ovs_frag_data
*data
= this_cpu_ptr(&ovs_frag_data_storage
);
651 struct vport
*vport
= data
->vport
;
653 if (skb_cow_head(skb
, data
->l2_len
) < 0) {
658 __skb_dst_copy(skb
, data
->dst
);
659 *OVS_CB(skb
) = data
->cb
;
660 skb
->inner_protocol
= data
->inner_protocol
;
661 skb
->vlan_tci
= data
->vlan_tci
;
662 skb
->vlan_proto
= data
->vlan_proto
;
664 /* Reconstruct the MAC header. */
665 skb_push(skb
, data
->l2_len
);
666 memcpy(skb
->data
, &data
->l2_data
, data
->l2_len
);
667 skb_postpush_rcsum(skb
, skb
->data
, data
->l2_len
);
668 skb_reset_mac_header(skb
);
670 if (eth_p_mpls(skb
->protocol
)) {
671 skb
->inner_network_header
= skb
->network_header
;
672 skb_set_network_header(skb
, data
->network_offset
);
673 skb_reset_mac_len(skb
);
676 ovs_vport_send(vport
, skb
);
681 ovs_dst_get_mtu(const struct dst_entry
*dst
)
683 return dst
->dev
->mtu
;
686 static struct dst_ops ovs_dst_ops
= {
688 .mtu
= ovs_dst_get_mtu
,
691 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
692 * ovs_vport_output(), which is called once per fragmented packet.
694 static void prepare_frag(struct vport
*vport
, struct sk_buff
*skb
,
695 u16 orig_network_offset
)
697 unsigned int hlen
= skb_network_offset(skb
);
698 struct ovs_frag_data
*data
;
700 data
= this_cpu_ptr(&ovs_frag_data_storage
);
701 data
->dst
= skb
->_skb_refdst
;
703 data
->cb
= *OVS_CB(skb
);
704 data
->inner_protocol
= skb
->inner_protocol
;
705 data
->network_offset
= orig_network_offset
;
706 data
->vlan_tci
= skb
->vlan_tci
;
707 data
->vlan_proto
= skb
->vlan_proto
;
709 memcpy(&data
->l2_data
, skb
->data
, hlen
);
711 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
715 static void ovs_fragment(struct net
*net
, struct vport
*vport
,
716 struct sk_buff
*skb
, u16 mru
, __be16 ethertype
)
718 u16 orig_network_offset
= 0;
720 if (eth_p_mpls(skb
->protocol
)) {
721 orig_network_offset
= skb_network_offset(skb
);
722 skb
->network_header
= skb
->inner_network_header
;
725 if (skb_network_offset(skb
) > MAX_L2_LEN
) {
726 OVS_NLERR(1, "L2 header too long to fragment");
730 if (ethertype
== htons(ETH_P_IP
)) {
731 struct dst_entry ovs_dst
;
732 unsigned long orig_dst
;
734 prepare_frag(vport
, skb
, orig_network_offset
);
735 dst_init(&ovs_dst
, &ovs_dst_ops
, NULL
, 1,
736 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
737 ovs_dst
.dev
= vport
->dev
;
739 orig_dst
= skb
->_skb_refdst
;
740 skb_dst_set_noref(skb
, &ovs_dst
);
741 IPCB(skb
)->frag_max_size
= mru
;
743 ip_do_fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
744 refdst_drop(orig_dst
);
745 } else if (ethertype
== htons(ETH_P_IPV6
)) {
746 const struct nf_ipv6_ops
*v6ops
= nf_get_ipv6_ops();
747 unsigned long orig_dst
;
748 struct rt6_info ovs_rt
;
754 prepare_frag(vport
, skb
, orig_network_offset
);
755 memset(&ovs_rt
, 0, sizeof(ovs_rt
));
756 dst_init(&ovs_rt
.dst
, &ovs_dst_ops
, NULL
, 1,
757 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
758 ovs_rt
.dst
.dev
= vport
->dev
;
760 orig_dst
= skb
->_skb_refdst
;
761 skb_dst_set_noref(skb
, &ovs_rt
.dst
);
762 IP6CB(skb
)->frag_max_size
= mru
;
764 v6ops
->fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
765 refdst_drop(orig_dst
);
767 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
768 ovs_vport_name(vport
), ntohs(ethertype
), mru
,
778 static void do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
,
779 struct sw_flow_key
*key
)
781 struct vport
*vport
= ovs_vport_rcu(dp
, out_port
);
784 u16 mru
= OVS_CB(skb
)->mru
;
785 u32 cutlen
= OVS_CB(skb
)->cutlen
;
787 if (unlikely(cutlen
> 0)) {
788 if (skb
->len
- cutlen
> ETH_HLEN
)
789 pskb_trim(skb
, skb
->len
- cutlen
);
791 pskb_trim(skb
, ETH_HLEN
);
795 (skb
->len
<= mru
+ vport
->dev
->hard_header_len
))) {
796 ovs_vport_send(vport
, skb
);
797 } else if (mru
<= vport
->dev
->mtu
) {
798 struct net
*net
= read_pnet(&dp
->net
);
799 __be16 ethertype
= key
->eth
.type
;
801 if (!is_flow_key_valid(key
)) {
802 if (eth_p_mpls(skb
->protocol
))
803 ethertype
= skb
->inner_protocol
;
805 ethertype
= vlan_get_protocol(skb
);
808 ovs_fragment(net
, vport
, skb
, mru
, ethertype
);
817 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
818 struct sw_flow_key
*key
, const struct nlattr
*attr
,
819 const struct nlattr
*actions
, int actions_len
,
822 struct dp_upcall_info upcall
;
823 const struct nlattr
*a
;
826 memset(&upcall
, 0, sizeof(upcall
));
827 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
828 upcall
.mru
= OVS_CB(skb
)->mru
;
830 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
831 a
= nla_next(a
, &rem
)) {
832 switch (nla_type(a
)) {
833 case OVS_USERSPACE_ATTR_USERDATA
:
837 case OVS_USERSPACE_ATTR_PID
:
838 upcall
.portid
= nla_get_u32(a
);
841 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT
: {
842 /* Get out tunnel info. */
845 vport
= ovs_vport_rcu(dp
, nla_get_u32(a
));
849 err
= dev_fill_metadata_dst(vport
->dev
, skb
);
851 upcall
.egress_tun_info
= skb_tunnel_info(skb
);
857 case OVS_USERSPACE_ATTR_ACTIONS
: {
858 /* Include actions. */
859 upcall
.actions
= actions
;
860 upcall
.actions_len
= actions_len
;
864 } /* End of switch. */
867 return ovs_dp_upcall(dp
, skb
, key
, &upcall
, cutlen
);
870 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
871 struct sw_flow_key
*key
, const struct nlattr
*attr
,
872 const struct nlattr
*actions
, int actions_len
)
874 const struct nlattr
*acts_list
= NULL
;
875 const struct nlattr
*a
;
879 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
880 a
= nla_next(a
, &rem
)) {
883 switch (nla_type(a
)) {
884 case OVS_SAMPLE_ATTR_PROBABILITY
:
885 probability
= nla_get_u32(a
);
886 if (!probability
|| prandom_u32() > probability
)
890 case OVS_SAMPLE_ATTR_ACTIONS
:
896 rem
= nla_len(acts_list
);
897 a
= nla_data(acts_list
);
899 /* Actions list is empty, do nothing */
903 /* The only known usage of sample action is having a single user-space
904 * action, or having a truncate action followed by a single user-space
905 * action. Treat this usage as a special case.
906 * The output_userspace() should clone the skb to be sent to the
907 * user space. This skb will be consumed by its caller.
909 if (unlikely(nla_type(a
) == OVS_ACTION_ATTR_TRUNC
)) {
910 struct ovs_action_trunc
*trunc
= nla_data(a
);
912 if (skb
->len
> trunc
->max_len
)
913 cutlen
= skb
->len
- trunc
->max_len
;
915 a
= nla_next(a
, &rem
);
918 if (likely(nla_type(a
) == OVS_ACTION_ATTR_USERSPACE
&&
919 nla_is_last(a
, rem
)))
920 return output_userspace(dp
, skb
, key
, a
, actions
,
921 actions_len
, cutlen
);
923 skb
= skb_clone(skb
, GFP_ATOMIC
);
925 /* Skip the sample action when out of memory. */
928 if (!add_deferred_actions(skb
, key
, a
)) {
930 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
938 static void execute_hash(struct sk_buff
*skb
, struct sw_flow_key
*key
,
939 const struct nlattr
*attr
)
941 struct ovs_action_hash
*hash_act
= nla_data(attr
);
944 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
945 hash
= skb_get_hash(skb
);
946 hash
= jhash_1word(hash
, hash_act
->hash_basis
);
950 key
->ovs_flow_hash
= hash
;
953 static int execute_set_action(struct sk_buff
*skb
,
954 struct sw_flow_key
*flow_key
,
955 const struct nlattr
*a
)
957 /* Only tunnel set execution is supported without a mask. */
958 if (nla_type(a
) == OVS_KEY_ATTR_TUNNEL_INFO
) {
959 struct ovs_tunnel_info
*tun
= nla_data(a
);
962 dst_hold((struct dst_entry
*)tun
->tun_dst
);
963 skb_dst_set(skb
, (struct dst_entry
*)tun
->tun_dst
);
970 /* Mask is at the midpoint of the data. */
971 #define get_mask(a, type) ((const type)nla_data(a) + 1)
973 static int execute_masked_set_action(struct sk_buff
*skb
,
974 struct sw_flow_key
*flow_key
,
975 const struct nlattr
*a
)
979 switch (nla_type(a
)) {
980 case OVS_KEY_ATTR_PRIORITY
:
981 OVS_SET_MASKED(skb
->priority
, nla_get_u32(a
),
982 *get_mask(a
, u32
*));
983 flow_key
->phy
.priority
= skb
->priority
;
986 case OVS_KEY_ATTR_SKB_MARK
:
987 OVS_SET_MASKED(skb
->mark
, nla_get_u32(a
), *get_mask(a
, u32
*));
988 flow_key
->phy
.skb_mark
= skb
->mark
;
991 case OVS_KEY_ATTR_TUNNEL_INFO
:
992 /* Masked data not supported for tunnel. */
996 case OVS_KEY_ATTR_ETHERNET
:
997 err
= set_eth_addr(skb
, flow_key
, nla_data(a
),
998 get_mask(a
, struct ovs_key_ethernet
*));
1001 case OVS_KEY_ATTR_IPV4
:
1002 err
= set_ipv4(skb
, flow_key
, nla_data(a
),
1003 get_mask(a
, struct ovs_key_ipv4
*));
1006 case OVS_KEY_ATTR_IPV6
:
1007 err
= set_ipv6(skb
, flow_key
, nla_data(a
),
1008 get_mask(a
, struct ovs_key_ipv6
*));
1011 case OVS_KEY_ATTR_TCP
:
1012 err
= set_tcp(skb
, flow_key
, nla_data(a
),
1013 get_mask(a
, struct ovs_key_tcp
*));
1016 case OVS_KEY_ATTR_UDP
:
1017 err
= set_udp(skb
, flow_key
, nla_data(a
),
1018 get_mask(a
, struct ovs_key_udp
*));
1021 case OVS_KEY_ATTR_SCTP
:
1022 err
= set_sctp(skb
, flow_key
, nla_data(a
),
1023 get_mask(a
, struct ovs_key_sctp
*));
1026 case OVS_KEY_ATTR_MPLS
:
1027 err
= set_mpls(skb
, flow_key
, nla_data(a
), get_mask(a
,
1031 case OVS_KEY_ATTR_CT_STATE
:
1032 case OVS_KEY_ATTR_CT_ZONE
:
1033 case OVS_KEY_ATTR_CT_MARK
:
1034 case OVS_KEY_ATTR_CT_LABELS
:
1042 static int execute_recirc(struct datapath
*dp
, struct sk_buff
*skb
,
1043 struct sw_flow_key
*key
,
1044 const struct nlattr
*a
, int rem
)
1046 struct deferred_action
*da
;
1049 if (!is_flow_key_valid(key
)) {
1052 err
= ovs_flow_key_update(skb
, key
);
1056 BUG_ON(!is_flow_key_valid(key
));
1058 if (!nla_is_last(a
, rem
)) {
1059 /* Recirc action is the not the last action
1060 * of the action list, need to clone the skb.
1062 skb
= skb_clone(skb
, GFP_ATOMIC
);
1064 /* Skip the recirc action when out of memory, but
1065 * continue on with the rest of the action list.
1071 level
= this_cpu_read(exec_actions_level
);
1072 if (level
<= OVS_DEFERRED_ACTION_THRESHOLD
) {
1073 struct recirc_keys
*rks
= this_cpu_ptr(recirc_keys
);
1074 struct sw_flow_key
*recirc_key
= &rks
->key
[level
- 1];
1077 recirc_key
->recirc_id
= nla_get_u32(a
);
1078 ovs_dp_process_packet(skb
, recirc_key
);
1083 da
= add_deferred_actions(skb
, key
, NULL
);
1085 da
->pkt_key
.recirc_id
= nla_get_u32(a
);
1089 if (net_ratelimit())
1090 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1097 /* Execute a list of actions against 'skb'. */
1098 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1099 struct sw_flow_key
*key
,
1100 const struct nlattr
*attr
, int len
)
1102 /* Every output action needs a separate clone of 'skb', but the common
1103 * case is just a single output action, so that doing a clone and
1104 * then freeing the original skbuff is wasteful. So the following code
1105 * is slightly obscure just to avoid that.
1108 const struct nlattr
*a
;
1111 for (a
= attr
, rem
= len
; rem
> 0;
1112 a
= nla_next(a
, &rem
)) {
1115 if (unlikely(prev_port
!= -1)) {
1116 struct sk_buff
*out_skb
= skb_clone(skb
, GFP_ATOMIC
);
1119 do_output(dp
, out_skb
, prev_port
, key
);
1121 OVS_CB(skb
)->cutlen
= 0;
1125 switch (nla_type(a
)) {
1126 case OVS_ACTION_ATTR_OUTPUT
:
1127 prev_port
= nla_get_u32(a
);
1130 case OVS_ACTION_ATTR_TRUNC
: {
1131 struct ovs_action_trunc
*trunc
= nla_data(a
);
1133 if (skb
->len
> trunc
->max_len
)
1134 OVS_CB(skb
)->cutlen
= skb
->len
- trunc
->max_len
;
1138 case OVS_ACTION_ATTR_USERSPACE
:
1139 output_userspace(dp
, skb
, key
, a
, attr
,
1140 len
, OVS_CB(skb
)->cutlen
);
1141 OVS_CB(skb
)->cutlen
= 0;
1144 case OVS_ACTION_ATTR_HASH
:
1145 execute_hash(skb
, key
, a
);
1148 case OVS_ACTION_ATTR_PUSH_MPLS
:
1149 err
= push_mpls(skb
, key
, nla_data(a
));
1152 case OVS_ACTION_ATTR_POP_MPLS
:
1153 err
= pop_mpls(skb
, key
, nla_get_be16(a
));
1156 case OVS_ACTION_ATTR_PUSH_VLAN
:
1157 err
= push_vlan(skb
, key
, nla_data(a
));
1160 case OVS_ACTION_ATTR_POP_VLAN
:
1161 err
= pop_vlan(skb
, key
);
1164 case OVS_ACTION_ATTR_RECIRC
:
1165 err
= execute_recirc(dp
, skb
, key
, a
, rem
);
1166 if (nla_is_last(a
, rem
)) {
1167 /* If this is the last action, the skb has
1168 * been consumed or freed.
1169 * Return immediately.
1175 case OVS_ACTION_ATTR_SET
:
1176 err
= execute_set_action(skb
, key
, nla_data(a
));
1179 case OVS_ACTION_ATTR_SET_MASKED
:
1180 case OVS_ACTION_ATTR_SET_TO_MASKED
:
1181 err
= execute_masked_set_action(skb
, key
, nla_data(a
));
1184 case OVS_ACTION_ATTR_SAMPLE
:
1185 err
= sample(dp
, skb
, key
, a
, attr
, len
);
1188 case OVS_ACTION_ATTR_CT
:
1189 if (!is_flow_key_valid(key
)) {
1190 err
= ovs_flow_key_update(skb
, key
);
1195 err
= ovs_ct_execute(ovs_dp_get_net(dp
), skb
, key
,
1198 /* Hide stolen IP fragments from user space. */
1200 return err
== -EINPROGRESS
? 0 : err
;
1204 if (unlikely(err
)) {
1210 if (prev_port
!= -1)
1211 do_output(dp
, skb
, prev_port
, key
);
1218 static void process_deferred_actions(struct datapath
*dp
)
1220 struct action_fifo
*fifo
= this_cpu_ptr(action_fifos
);
1222 /* Do not touch the FIFO in case there is no deferred actions. */
1223 if (action_fifo_is_empty(fifo
))
1226 /* Finishing executing all deferred actions. */
1228 struct deferred_action
*da
= action_fifo_get(fifo
);
1229 struct sk_buff
*skb
= da
->skb
;
1230 struct sw_flow_key
*key
= &da
->pkt_key
;
1231 const struct nlattr
*actions
= da
->actions
;
1234 do_execute_actions(dp
, skb
, key
, actions
,
1237 ovs_dp_process_packet(skb
, key
);
1238 } while (!action_fifo_is_empty(fifo
));
1240 /* Reset FIFO for the next packet. */
1241 action_fifo_init(fifo
);
1244 /* Execute a list of actions against 'skb'. */
1245 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1246 const struct sw_flow_actions
*acts
,
1247 struct sw_flow_key
*key
)
1251 level
= __this_cpu_inc_return(exec_actions_level
);
1252 if (unlikely(level
> OVS_RECURSION_LIMIT
)) {
1253 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1260 err
= do_execute_actions(dp
, skb
, key
,
1261 acts
->actions
, acts
->actions_len
);
1264 process_deferred_actions(dp
);
1267 __this_cpu_dec(exec_actions_level
);
1271 int action_fifos_init(void)
1273 action_fifos
= alloc_percpu(struct action_fifo
);
1277 recirc_keys
= alloc_percpu(struct recirc_keys
);
1279 free_percpu(action_fifos
);
1286 void action_fifos_exit(void)
1288 free_percpu(action_fifos
);
1289 free_percpu(recirc_keys
);