]> git.proxmox.com Git - ovs.git/blame - datapath/actions.c
datapath: compat: vxlan: fix udp-csum typo
[ovs.git] / datapath / actions.c
CommitLineData
064af421 1/*
e23775f2 2 * Copyright (c) 2007-2015 Nicira, Inc.
a14bc59f 3 *
a9a29d22
JG
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
064af421
BP
17 */
18
e9141eec
PS
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
064af421
BP
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
077257b8 24#include <linux/openvswitch.h>
a94ebc39 25#include <linux/netfilter_ipv6.h>
10f72e3d 26#include <linux/sctp.h>
064af421
BP
27#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
401eeb92 30#include <linux/if_arp.h>
064af421 31#include <linux/if_vlan.h>
a0fb56c1 32
a94ebc39 33#include <net/dst.h>
064af421 34#include <net/ip.h>
bc7a5acd 35#include <net/ipv6.h>
064af421 36#include <net/checksum.h>
530180fd 37#include <net/dsfield.h>
2baf0e0c 38#include <net/mpls.h>
10f72e3d 39#include <net/sctp/checksum.h>
f2459fe7 40
f2459fe7 41#include "datapath.h"
a94ebc39 42#include "conntrack.h"
ccf43786 43#include "gso.h"
f2459fe7 44#include "vport.h"
064af421 45
e74d4817
PS
46static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
47 struct sw_flow_key *key,
48 const struct nlattr *attr, int len);
49
2c8c4fb7
AZ
50struct deferred_action {
51 struct sk_buff *skb;
52 const struct nlattr *actions;
53
54 /* Store pkt_key clone when creating deferred action. */
55 struct sw_flow_key pkt_key;
56};
57
a94ebc39
JS
58#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
59struct ovs_frag_data {
60 unsigned long dst;
61 struct vport *vport;
86c2eb45 62 struct ovs_gso_cb cb;
a94ebc39
JS
63 __be16 inner_protocol;
64 __u16 vlan_tci;
65 __be16 vlan_proto;
66 unsigned int l2_len;
67 u8 l2_data[MAX_L2_LEN];
68};
69
70static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
71
2c8c4fb7
AZ
72#define DEFERRED_ACTION_FIFO_SIZE 10
73struct action_fifo {
74 int head;
75 int tail;
76 /* Deferred action fifo queue storage. */
77 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
78};
79
80static struct action_fifo __percpu *action_fifos;
77a9a338 81
2c8c4fb7
AZ
82static DEFINE_PER_CPU(int, exec_actions_level);
83
84static void action_fifo_init(struct action_fifo *fifo)
85{
86 fifo->head = 0;
87 fifo->tail = 0;
88}
89
f1f60b85 90static bool action_fifo_is_empty(const struct action_fifo *fifo)
2c8c4fb7
AZ
91{
92 return (fifo->head == fifo->tail);
93}
94
e74d4817 95static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
2c8c4fb7
AZ
96{
97 if (action_fifo_is_empty(fifo))
98 return NULL;
99
100 return &fifo->fifo[fifo->tail++];
101}
102
e74d4817 103static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
2c8c4fb7
AZ
104{
105 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
106 return NULL;
107
108 return &fifo->fifo[fifo->head++];
109}
110
e74d4817
PS
111/* Return queue entry if fifo is not full */
112static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
f1f60b85 113 const struct sw_flow_key *key,
e74d4817 114 const struct nlattr *attr)
2c8c4fb7
AZ
115{
116 struct action_fifo *fifo;
117 struct deferred_action *da;
118
119 fifo = this_cpu_ptr(action_fifos);
120 da = action_fifo_put(fifo);
121 if (da) {
122 da->skb = skb;
123 da->actions = attr;
e74d4817 124 da->pkt_key = *key;
2c8c4fb7
AZ
125 }
126
e74d4817 127 return da;
e16138e2
PS
128}
129
e74d4817 130static void invalidate_flow_key(struct sw_flow_key *key)
e16138e2 131{
e74d4817 132 key->eth.type = htons(0);
e16138e2
PS
133}
134
f1f60b85 135static bool is_flow_key_valid(const struct sw_flow_key *key)
e16138e2 136{
e74d4817 137 return !!key->eth.type;
e16138e2
PS
138}
139
b51367aa
PS
140static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
141 __be16 ethertype)
142{
143 if (skb->ip_summed == CHECKSUM_COMPLETE) {
144 __be16 diff[] = { ~(hdr->h_proto), ethertype };
145
146 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
147 ~skb->csum);
148 }
149
150 hdr->h_proto = ethertype;
151}
152
e74d4817 153static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
ccf43786
SH
154 const struct ovs_action_push_mpls *mpls)
155{
156 __be32 *new_mpls_lse;
ccf43786 157
2baf0e0c 158 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
8063e095 159 if (skb->encapsulation)
2baf0e0c
PS
160 return -ENOTSUPP;
161
ccf43786
SH
162 if (skb_cow_head(skb, MPLS_HLEN) < 0)
163 return -ENOMEM;
164
165 skb_push(skb, MPLS_HLEN);
166 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
167 skb->mac_len);
168 skb_reset_mac_header(skb);
169
2baf0e0c 170 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
ccf43786
SH
171 *new_mpls_lse = mpls->mpls_lse;
172
ea3acd7a 173 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
ccf43786 174
b51367aa 175 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
ccf43786
SH
176 if (!ovs_skb_get_inner_protocol(skb))
177 ovs_skb_set_inner_protocol(skb, skb->protocol);
178 skb->protocol = mpls->mpls_ethertype;
2baf0e0c 179
e74d4817 180 invalidate_flow_key(key);
ccf43786
SH
181 return 0;
182}
183
e74d4817
PS
184static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
185 const __be16 ethertype)
ccf43786
SH
186{
187 struct ethhdr *hdr;
188 int err;
189
5cce04b6 190 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
ccf43786
SH
191 if (unlikely(err))
192 return err;
193
f021a62f 194 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
ccf43786
SH
195
196 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
197 skb->mac_len);
198
199 __skb_pull(skb, MPLS_HLEN);
200 skb_reset_mac_header(skb);
201
2baf0e0c 202 /* skb_mpls_header() is used to locate the ethertype
ccf43786
SH
203 * field correctly in the presence of VLAN tags.
204 */
2baf0e0c 205 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
b51367aa 206 update_ethertype(skb, hdr, ethertype);
ccf43786
SH
207 if (eth_p_mpls(skb->protocol))
208 skb->protocol = ethertype;
2baf0e0c 209
e74d4817 210 invalidate_flow_key(key);
ccf43786
SH
211 return 0;
212}
213
b940b3d7
JR
214static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
215 const __be32 *mpls_lse, const __be32 *mask)
ccf43786 216{
2baf0e0c 217 __be32 *stack;
b940b3d7 218 __be32 lse;
ccf43786
SH
219 int err;
220
5cce04b6 221 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
ccf43786
SH
222 if (unlikely(err))
223 return err;
224
2baf0e0c 225 stack = (__be32 *)skb_mpls_header(skb);
e281bb23 226 lse = OVS_MASKED(*stack, *mpls_lse, *mask);
ccf43786 227 if (skb->ip_summed == CHECKSUM_COMPLETE) {
b940b3d7
JR
228 __be32 diff[] = { ~(*stack), lse };
229
ccf43786
SH
230 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
231 ~skb->csum);
232 }
233
b940b3d7
JR
234 *stack = lse;
235 flow_key->mpls.top_lse = lse;
ccf43786
SH
236 return 0;
237}
238
e74d4817 239static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
064af421 240{
d9065a90 241 int err;
10db8b20 242
97894370 243 err = skb_vlan_pop(skb);
efd8a18e 244 if (skb_vlan_tag_present(skb))
97894370
TG
245 invalidate_flow_key(key);
246 else
e74d4817 247 key->eth.tci = 0;
97894370 248 return err;
d9065a90
PS
249}
250
e74d4817
PS
251static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
252 const struct ovs_action_push_vlan *vlan)
d9065a90 253{
efd8a18e 254 if (skb_vlan_tag_present(skb))
e74d4817 255 invalidate_flow_key(key);
97894370 256 else
e74d4817 257 key->eth.tci = vlan->vlan_tci;
97894370
TG
258 return skb_vlan_push(skb, vlan->vlan_tpid,
259 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
064af421
BP
260}
261
b940b3d7
JR
262/* 'src' is already properly masked. */
263static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
264{
265 u16 *dst = (u16 *)dst_;
266 const u16 *src = (const u16 *)src_;
267 const u16 *mask = (const u16 *)mask_;
268
e281bb23
JS
269 OVS_SET_MASKED(dst[0], src[0], mask[0]);
270 OVS_SET_MASKED(dst[1], src[1], mask[1]);
271 OVS_SET_MASKED(dst[2], src[2], mask[2]);
b940b3d7
JR
272}
273
274static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
275 const struct ovs_key_ethernet *key,
276 const struct ovs_key_ethernet *mask)
ca78c6b6 277{
4edb9ae9 278 int err;
b940b3d7 279
5cce04b6 280 err = skb_ensure_writable(skb, ETH_HLEN);
4edb9ae9
PS
281 if (unlikely(err))
282 return err;
283
237c4f2a 284 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
3cfede14 285
b940b3d7
JR
286 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
287 mask->eth_src);
288 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
289 mask->eth_dst);
4edb9ae9 290
ea3acd7a 291 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
3cfede14 292
b940b3d7
JR
293 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
294 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
4edb9ae9 295 return 0;
ca78c6b6
BP
296}
297
efdb0c9f
GG
298static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
299 __be32 addr, __be32 new_addr)
ca78c6b6
BP
300{
301 int transport_len = skb->len - skb_transport_offset(skb);
4edb9ae9 302
efdb0c9f
GG
303 if (nh->frag_off & htons(IP_OFFSET))
304 return;
305
4edb9ae9 306 if (nh->protocol == IPPROTO_TCP) {
ca78c6b6 307 if (likely(transport_len >= sizeof(struct tcphdr)))
4edb9ae9 308 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
8063e095 309 addr, new_addr, true);
4edb9ae9 310 } else if (nh->protocol == IPPROTO_UDP) {
55ce87bc
JG
311 if (likely(transport_len >= sizeof(struct udphdr))) {
312 struct udphdr *uh = udp_hdr(skb);
313
237c4f2a 314 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
55ce87bc 315 inet_proto_csum_replace4(&uh->check, skb,
8063e095 316 addr, new_addr, true);
55ce87bc
JG
317 if (!uh->check)
318 uh->check = CSUM_MANGLED_0;
319 }
320 }
ca78c6b6 321 }
4edb9ae9 322
efdb0c9f
GG
323}
324
325static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
326 __be32 *addr, __be32 new_addr)
327{
328 update_ip_l4_checksum(skb, nh, *addr, new_addr);
4edb9ae9 329 csum_replace4(&nh->check, *addr, new_addr);
e2f3178f 330 skb_clear_hash(skb);
4edb9ae9 331 *addr = new_addr;
ca78c6b6
BP
332}
333
bc7a5acd
AA
334static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
335 __be32 addr[4], const __be32 new_addr[4])
336{
337 int transport_len = skb->len - skb_transport_offset(skb);
338
00894212 339 if (l4_proto == NEXTHDR_TCP) {
bc7a5acd
AA
340 if (likely(transport_len >= sizeof(struct tcphdr)))
341 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
8063e095 342 addr, new_addr, true);
00894212 343 } else if (l4_proto == NEXTHDR_UDP) {
bc7a5acd
AA
344 if (likely(transport_len >= sizeof(struct udphdr))) {
345 struct udphdr *uh = udp_hdr(skb);
346
237c4f2a 347 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
bc7a5acd 348 inet_proto_csum_replace16(&uh->check, skb,
8063e095 349 addr, new_addr, true);
bc7a5acd
AA
350 if (!uh->check)
351 uh->check = CSUM_MANGLED_0;
352 }
353 }
00894212
JG
354 } else if (l4_proto == NEXTHDR_ICMP) {
355 if (likely(transport_len >= sizeof(struct icmp6hdr)))
356 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
8063e095 357 skb, addr, new_addr, true);
bc7a5acd
AA
358 }
359}
360
b940b3d7
JR
361static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
362 const __be32 mask[4], __be32 masked[4])
363{
e281bb23
JS
364 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
365 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
366 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
367 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
b940b3d7
JR
368}
369
bc7a5acd
AA
370static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
371 __be32 addr[4], const __be32 new_addr[4],
372 bool recalculate_csum)
373{
51cf5e71 374 if (likely(recalculate_csum))
bc7a5acd
AA
375 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
376
e2f3178f 377 skb_clear_hash(skb);
bc7a5acd
AA
378 memcpy(addr, new_addr, sizeof(__be32[4]));
379}
380
b940b3d7 381static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
bc7a5acd 382{
b940b3d7 383 /* Bits 21-24 are always unmasked, so this retains their values. */
e281bb23
JS
384 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
385 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
386 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
bc7a5acd
AA
387}
388
b940b3d7
JR
389static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
390 u8 mask)
bc7a5acd 391{
e281bb23 392 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
bc7a5acd 393
a61680c6
JP
394 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
395 nh->ttl = new_ttl;
396}
397
b940b3d7
JR
398static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
399 const struct ovs_key_ipv4 *key,
400 const struct ovs_key_ipv4 *mask)
064af421 401{
ca78c6b6 402 struct iphdr *nh;
b940b3d7 403 __be32 new_addr;
10db8b20 404 int err;
ca78c6b6 405
5cce04b6
TG
406 err = skb_ensure_writable(skb, skb_network_offset(skb) +
407 sizeof(struct iphdr));
10db8b20
JG
408 if (unlikely(err))
409 return err;
ca78c6b6
BP
410
411 nh = ip_hdr(skb);
ca78c6b6 412
b940b3d7
JR
413 /* Setting an IP addresses is typically only a side effect of
414 * matching on them in the current userspace implementation, so it
415 * makes sense to check if the value actually changed.
416 */
417 if (mask->ipv4_src) {
e281bb23 418 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
ca78c6b6 419
b940b3d7
JR
420 if (unlikely(new_addr != nh->saddr)) {
421 set_ip_addr(skb, nh, &nh->saddr, new_addr);
422 flow_key->ipv4.addr.src = new_addr;
423 }
e16138e2 424 }
b940b3d7 425 if (mask->ipv4_dst) {
e281bb23 426 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
a4a26436 427
b940b3d7
JR
428 if (unlikely(new_addr != nh->daddr)) {
429 set_ip_addr(skb, nh, &nh->daddr, new_addr);
430 flow_key->ipv4.addr.dst = new_addr;
431 }
e16138e2 432 }
b940b3d7
JR
433 if (mask->ipv4_tos) {
434 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
435 flow_key->ip.tos = nh->tos;
436 }
437 if (mask->ipv4_ttl) {
438 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
439 flow_key->ip.ttl = nh->ttl;
e16138e2 440 }
a61680c6 441
10db8b20 442 return 0;
064af421
BP
443}
444
b940b3d7
JR
445static bool is_ipv6_mask_nonzero(const __be32 addr[4])
446{
447 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
448}
449
450static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
451 const struct ovs_key_ipv6 *key,
452 const struct ovs_key_ipv6 *mask)
bc7a5acd
AA
453{
454 struct ipv6hdr *nh;
455 int err;
bc7a5acd 456
5cce04b6
TG
457 err = skb_ensure_writable(skb, skb_network_offset(skb) +
458 sizeof(struct ipv6hdr));
bc7a5acd
AA
459 if (unlikely(err))
460 return err;
461
462 nh = ipv6_hdr(skb);
bc7a5acd 463
b940b3d7
JR
464 /* Setting an IP addresses is typically only a side effect of
465 * matching on them in the current userspace implementation, so it
466 * makes sense to check if the value actually changed.
467 */
468 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
469 __be32 *saddr = (__be32 *)&nh->saddr;
470 __be32 masked[4];
471
472 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
473
474 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
49a8eef8 475 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
b940b3d7
JR
476 true);
477 memcpy(&flow_key->ipv6.addr.src, masked,
478 sizeof(flow_key->ipv6.addr.src));
479 }
480 }
481 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
bc7a5acd 482 unsigned int offset = 0;
8abaa53c 483 int flags = IP6_FH_F_SKIP_RH;
bc7a5acd 484 bool recalc_csum = true;
b940b3d7
JR
485 __be32 *daddr = (__be32 *)&nh->daddr;
486 __be32 masked[4];
487
488 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
489
490 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
491 if (ipv6_ext_hdr(nh->nexthdr))
492 recalc_csum = (ipv6_find_hdr(skb, &offset,
493 NEXTHDR_ROUTING,
494 NULL, &flags)
495 != NEXTHDR_ROUTING);
496
49a8eef8 497 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
b940b3d7
JR
498 recalc_csum);
499 memcpy(&flow_key->ipv6.addr.dst, masked,
500 sizeof(flow_key->ipv6.addr.dst));
501 }
502 }
503 if (mask->ipv6_tclass) {
504 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
505 flow_key->ip.tos = ipv6_get_dsfield(nh);
506 }
507 if (mask->ipv6_label) {
508 set_ipv6_fl(nh, ntohl(key->ipv6_label),
509 ntohl(mask->ipv6_label));
510 flow_key->ipv6.label =
511 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
512 }
513 if (mask->ipv6_hlimit) {
e281bb23
JS
514 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
515 mask->ipv6_hlimit);
b940b3d7 516 flow_key->ip.ttl = nh->hop_limit;
bc7a5acd 517 }
bc7a5acd
AA
518 return 0;
519}
520
5cce04b6 521/* Must follow skb_ensure_writable() since that can move the skb data. */
4edb9ae9 522static void set_tp_port(struct sk_buff *skb, __be16 *port,
b940b3d7 523 __be16 new_port, __sum16 *check)
959a2ecd 524{
8063e095 525 inet_proto_csum_replace2(check, skb, *port, new_port, false);
4edb9ae9 526 *port = new_port;
55ce87bc
JG
527}
528
b940b3d7
JR
529static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
530 const struct ovs_key_udp *key,
531 const struct ovs_key_udp *mask)
4edb9ae9
PS
532{
533 struct udphdr *uh;
b940b3d7 534 __be16 src, dst;
4edb9ae9 535 int err;
10db8b20 536
5cce04b6
TG
537 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
538 sizeof(struct udphdr));
10db8b20
JG
539 if (unlikely(err))
540 return err;
541
4edb9ae9 542 uh = udp_hdr(skb);
b940b3d7 543 /* Either of the masks is non-zero, so do not bother checking them. */
e281bb23
JS
544 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
545 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
4edb9ae9 546
b940b3d7
JR
547 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
548 if (likely(src != uh->source)) {
549 set_tp_port(skb, &uh->source, src, &uh->check);
550 flow_key->tp.src = src;
551 }
552 if (likely(dst != uh->dest)) {
553 set_tp_port(skb, &uh->dest, dst, &uh->check);
554 flow_key->tp.dst = dst;
555 }
556
557 if (unlikely(!uh->check))
558 uh->check = CSUM_MANGLED_0;
559 } else {
560 uh->source = src;
561 uh->dest = dst;
562 flow_key->tp.src = src;
563 flow_key->tp.dst = dst;
e16138e2 564 }
10db8b20 565
b940b3d7
JR
566 skb_clear_hash(skb);
567
10db8b20 568 return 0;
959a2ecd
JP
569}
570
b940b3d7
JR
571static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
572 const struct ovs_key_tcp *key,
573 const struct ovs_key_tcp *mask)
064af421 574{
4edb9ae9 575 struct tcphdr *th;
b940b3d7 576 __be16 src, dst;
10db8b20 577 int err;
064af421 578
5cce04b6
TG
579 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
580 sizeof(struct tcphdr));
10db8b20
JG
581 if (unlikely(err))
582 return err;
ca78c6b6 583
4edb9ae9 584 th = tcp_hdr(skb);
e281bb23 585 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
b940b3d7
JR
586 if (likely(src != th->source)) {
587 set_tp_port(skb, &th->source, src, &th->check);
588 flow_key->tp.src = src;
589 }
e281bb23 590 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
b940b3d7
JR
591 if (likely(dst != th->dest)) {
592 set_tp_port(skb, &th->dest, dst, &th->check);
593 flow_key->tp.dst = dst;
e16138e2 594 }
b940b3d7 595 skb_clear_hash(skb);
ca78c6b6 596
10db8b20 597 return 0;
064af421
BP
598}
599
b940b3d7
JR
600static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
601 const struct ovs_key_sctp *key,
602 const struct ovs_key_sctp *mask)
10f72e3d 603{
b940b3d7 604 unsigned int sctphoff = skb_transport_offset(skb);
10f72e3d 605 struct sctphdr *sh;
b940b3d7 606 __le32 old_correct_csum, new_csum, old_csum;
10f72e3d 607 int err;
10f72e3d 608
5cce04b6 609 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
10f72e3d
JS
610 if (unlikely(err))
611 return err;
612
613 sh = sctp_hdr(skb);
b940b3d7
JR
614 old_csum = sh->checksum;
615 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
10f72e3d 616
e281bb23
JS
617 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
618 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
10f72e3d 619
b940b3d7 620 new_csum = sctp_compute_cksum(skb, sctphoff);
10f72e3d 621
b940b3d7
JR
622 /* Carry any checksum errors through. */
623 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
10f72e3d 624
b940b3d7
JR
625 skb_clear_hash(skb);
626 flow_key->tp.src = sh->source;
627 flow_key->tp.dst = sh->dest;
10f72e3d
JS
628
629 return 0;
630}
631
a94ebc39
JS
632static int ovs_vport_output(OVS_VPORT_OUTPUT_PARAMS)
633{
e9326797 634 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
a94ebc39
JS
635 struct vport *vport = data->vport;
636
637 if (skb_cow_head(skb, data->l2_len) < 0) {
638 kfree_skb(skb);
639 return -ENOMEM;
640 }
641
642 __skb_dst_copy(skb, data->dst);
86c2eb45 643 *OVS_GSO_CB(skb) = data->cb;
a94ebc39
JS
644 ovs_skb_set_inner_protocol(skb, data->inner_protocol);
645 skb->vlan_tci = data->vlan_tci;
646 skb->vlan_proto = data->vlan_proto;
647
648 /* Reconstruct the MAC header. */
649 skb_push(skb, data->l2_len);
650 memcpy(skb->data, &data->l2_data, data->l2_len);
ea3acd7a 651 skb_postpush_rcsum(skb, skb->data, data->l2_len);
a94ebc39
JS
652 skb_reset_mac_header(skb);
653
654 ovs_vport_send(vport, skb);
655 return 0;
656}
657
658static unsigned int
659ovs_dst_get_mtu(const struct dst_entry *dst)
660{
661 return dst->dev->mtu;
662}
663
664static struct dst_ops ovs_dst_ops = {
665 .family = AF_UNSPEC,
666 .mtu = ovs_dst_get_mtu,
667};
668
669/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
670 * ovs_vport_output(), which is called once per fragmented packet.
671 */
672static void prepare_frag(struct vport *vport, struct sk_buff *skb)
673{
674 unsigned int hlen = skb_network_offset(skb);
675 struct ovs_frag_data *data;
676
e9326797 677 data = this_cpu_ptr(&ovs_frag_data_storage);
a94ebc39
JS
678 data->dst = (unsigned long) skb_dst(skb);
679 data->vport = vport;
86c2eb45 680 data->cb = *OVS_GSO_CB(skb);
a94ebc39
JS
681 data->inner_protocol = ovs_skb_get_inner_protocol(skb);
682 data->vlan_tci = skb->vlan_tci;
683 data->vlan_proto = skb->vlan_proto;
684 data->l2_len = hlen;
685 memcpy(&data->l2_data, skb->data, hlen);
686
687 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
688 skb_pull(skb, hlen);
689}
690
025813bb
EB
691static void ovs_fragment(struct net *net, struct vport *vport,
692 struct sk_buff *skb, u16 mru, __be16 ethertype)
a94ebc39
JS
693{
694 if (skb_network_offset(skb) > MAX_L2_LEN) {
695 OVS_NLERR(1, "L2 header too long to fragment");
c05e2094 696 goto err;
a94ebc39
JS
697 }
698
699 if (ethertype == htons(ETH_P_IP)) {
700 struct dst_entry ovs_dst;
701 unsigned long orig_dst;
702
703 prepare_frag(vport, skb);
704 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
705 DST_OBSOLETE_NONE, DST_NOCOUNT);
706 ovs_dst.dev = vport->dev;
707
708 orig_dst = (unsigned long) skb_dst(skb);
709 skb_dst_set_noref(skb, &ovs_dst);
710 IPCB(skb)->frag_max_size = mru;
711
0374bcbe 712 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
a94ebc39
JS
713 refdst_drop(orig_dst);
714 } else if (ethertype == htons(ETH_P_IPV6)) {
715 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
716 unsigned long orig_dst;
717 struct rt6_info ovs_rt;
718
719 if (!v6ops) {
c05e2094 720 goto err;
a94ebc39
JS
721 }
722
723 prepare_frag(vport, skb);
724 memset(&ovs_rt, 0, sizeof(ovs_rt));
725 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
726 DST_OBSOLETE_NONE, DST_NOCOUNT);
727 ovs_rt.dst.dev = vport->dev;
728
729 orig_dst = (unsigned long) skb_dst(skb);
730 skb_dst_set_noref(skb, &ovs_rt.dst);
731 IP6CB(skb)->frag_max_size = mru;
0643a78b
PS
732#ifdef HAVE_IP_LOCAL_OUT_TAKES_NET
733 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
734#else
a94ebc39 735 v6ops->fragment(skb->sk, skb, ovs_vport_output);
0643a78b 736#endif
a94ebc39
JS
737 refdst_drop(orig_dst);
738 } else {
739 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
740 ovs_vport_name(vport), ntohs(ethertype), mru,
741 vport->dev->mtu);
c05e2094 742 goto err;
a94ebc39 743 }
c05e2094
JS
744
745 return;
746err:
747 kfree_skb(skb);
a94ebc39 748}
a94ebc39
JS
749
750static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
751 struct sw_flow_key *key)
064af421 752{
fe90efd9 753 struct vport *vport = ovs_vport_rcu(dp, out_port);
064af421 754
a94ebc39
JS
755 if (likely(vport)) {
756 u16 mru = OVS_CB(skb)->mru;
4c7804f1
WT
757 u32 cutlen = OVS_CB(skb)->cutlen;
758
759 if (unlikely(cutlen > 0)) {
760 if (skb->len - cutlen > ETH_HLEN)
761 pskb_trim(skb, skb->len - cutlen);
762 else
763 pskb_trim(skb, ETH_HLEN);
764 }
a94ebc39
JS
765
766 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
767 ovs_vport_send(vport, skb);
768 } else if (mru <= vport->dev->mtu) {
025813bb 769 struct net *net = ovs_dp_get_net(dp);
a94ebc39
JS
770 __be16 ethertype = key->eth.type;
771
772 if (!is_flow_key_valid(key)) {
773 if (eth_p_mpls(skb->protocol))
774 ethertype = ovs_skb_get_inner_protocol(skb);
775 else
776 ethertype = vlan_get_protocol(skb);
777 }
778
025813bb 779 ovs_fragment(net, vport, skb, mru, ethertype);
a94ebc39
JS
780 } else {
781 OVS_NLERR(true, "Cannot fragment IP frames");
782 kfree_skb(skb);
783 }
784 } else {
f15c8639 785 kfree_skb(skb);
a94ebc39 786 }
064af421 787}
aad7cb91 788
98403001 789static int output_userspace(struct datapath *dp, struct sk_buff *skb,
0e469d3b 790 struct sw_flow_key *key, const struct nlattr *attr,
4c7804f1
WT
791 const struct nlattr *actions, int actions_len,
792 uint32_t cutlen)
064af421 793{
856081f6 794 struct dp_upcall_info upcall;
98403001 795 const struct nlattr *a;
aad7cb91 796 int rem, err;
856081f6 797
0e469d3b 798 memset(&upcall, 0, sizeof(upcall));
df2c07f4 799 upcall.cmd = OVS_PACKET_CMD_ACTION;
a94ebc39 800 upcall.mru = OVS_CB(skb)->mru;
98403001 801
aad7cb91 802 SKB_INIT_FILL_METADATA_DST(skb);
98403001
BP
803 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
804 a = nla_next(a, &rem)) {
805 switch (nla_type(a)) {
806 case OVS_USERSPACE_ATTR_USERDATA:
807 upcall.userdata = a;
808 break;
809
810 case OVS_USERSPACE_ATTR_PID:
28aea917 811 upcall.portid = nla_get_u32(a);
98403001 812 break;
8b7ea2d4
WZ
813
814 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
815 /* Get out tunnel info. */
816 struct vport *vport;
817
818 vport = ovs_vport_rcu(dp, nla_get_u32(a));
819 if (vport) {
aad7cb91
PS
820 err = dev_fill_metadata_dst(vport->dev, skb);
821 if (!err)
822 upcall.egress_tun_info = skb_tunnel_info(skb);
8b7ea2d4 823 }
e23775f2 824
8b7ea2d4 825 break;
98403001 826 }
8b7ea2d4 827
0e469d3b
NM
828 case OVS_USERSPACE_ATTR_ACTIONS: {
829 /* Include actions. */
830 upcall.actions = actions;
831 upcall.actions_len = actions_len;
832 break;
833 }
834
8b7ea2d4 835 } /* End of switch. */
98403001
BP
836 }
837
aad7cb91
PS
838 err = ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
839 SKB_RESTORE_FILL_METADATA_DST(skb);
840 return err;
064af421
BP
841}
842
6ff686f2 843static int sample(struct datapath *dp, struct sk_buff *skb,
0e469d3b
NM
844 struct sw_flow_key *key, const struct nlattr *attr,
845 const struct nlattr *actions, int actions_len)
6ff686f2
PS
846{
847 const struct nlattr *acts_list = NULL;
848 const struct nlattr *a;
849 int rem;
4c7804f1 850 u32 cutlen = 0;
6ff686f2
PS
851
852 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
853 a = nla_next(a, &rem)) {
c02c4967
WZ
854 u32 probability;
855
6ff686f2
PS
856 switch (nla_type(a)) {
857 case OVS_SAMPLE_ATTR_PROBABILITY:
c02c4967
WZ
858 probability = nla_get_u32(a);
859 if (!probability || prandom_u32() > probability)
6ff686f2
PS
860 return 0;
861 break;
862
863 case OVS_SAMPLE_ATTR_ACTIONS:
864 acts_list = a;
865 break;
866 }
867 }
868
fbf4f74d
SH
869 rem = nla_len(acts_list);
870 a = nla_data(acts_list);
871
d7ff93d7
AZ
872 /* Actions list is empty, do nothing */
873 if (unlikely(!rem))
874 return 0;
e16138e2 875
d7ff93d7 876 /* The only known usage of sample action is having a single user-space
4c7804f1 877 * action, or having a truncate action followed by a single user-space
d7ff93d7
AZ
878 * action. Treat this usage as a special case.
879 * The output_userspace() should clone the skb to be sent to the
e74d4817
PS
880 * user space. This skb will be consumed by its caller.
881 */
4c7804f1
WT
882 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
883 struct ovs_action_trunc *trunc = nla_data(a);
884
885 if (skb->len > trunc->max_len)
886 cutlen = skb->len - trunc->max_len;
887
888 a = nla_next(a, &rem);
889 }
890
d7ff93d7 891 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
684b5f5d 892 nla_is_last(a, rem)))
4c7804f1
WT
893 return output_userspace(dp, skb, key, a, actions,
894 actions_len, cutlen);
d7ff93d7
AZ
895
896 skb = skb_clone(skb, GFP_ATOMIC);
897 if (!skb)
898 /* Skip the sample action when out of memory. */
899 return 0;
900
e74d4817 901 if (!add_deferred_actions(skb, key, a)) {
2c8c4fb7
AZ
902 if (net_ratelimit())
903 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
904 ovs_dp_name(dp));
fbf4f74d 905
2c8c4fb7
AZ
906 kfree_skb(skb);
907 }
2c8c4fb7 908 return 0;
6ff686f2
PS
909}
910
e74d4817
PS
911static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
912 const struct nlattr *attr)
7804df20 913{
7804df20
AZ
914 struct ovs_action_hash *hash_act = nla_data(attr);
915 u32 hash = 0;
916
917 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
e2f3178f 918 hash = skb_get_hash(skb);
7804df20
AZ
919 hash = jhash_1word(hash, hash_act->hash_basis);
920 if (!hash)
921 hash = 0x1;
922
923 key->ovs_flow_hash = hash;
924}
925
b940b3d7
JR
926static int execute_set_action(struct sk_buff *skb,
927 struct sw_flow_key *flow_key,
928 const struct nlattr *a)
929{
930 /* Only tunnel set execution is supported without a mask. */
931 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
e23775f2
PS
932 struct ovs_tunnel_info *tun = nla_data(a);
933
934 ovs_skb_dst_drop(skb);
935 ovs_dst_hold((struct dst_entry *)tun->tun_dst);
936 ovs_skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
b940b3d7
JR
937 return 0;
938 }
939
940 return -EINVAL;
b940b3d7
JR
941}
942
943/* Mask is at the midpoint of the data. */
944#define get_mask(a, type) ((const type)nla_data(a) + 1)
945
946static int execute_masked_set_action(struct sk_buff *skb,
947 struct sw_flow_key *flow_key,
948 const struct nlattr *a)
4edb9ae9 949{
15c39847 950 int err = 0;
4edb9ae9 951
b940b3d7 952 switch (nla_type(a)) {
abff858b 953 case OVS_KEY_ATTR_PRIORITY:
e281bb23
JS
954 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
955 *get_mask(a, u32 *));
b940b3d7 956 flow_key->phy.priority = skb->priority;
abff858b
PS
957 break;
958
72e8bf28 959 case OVS_KEY_ATTR_SKB_MARK:
e281bb23 960 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
b940b3d7 961 flow_key->phy.skb_mark = skb->mark;
72e8bf28
AA
962 break;
963
f0cd669f 964 case OVS_KEY_ATTR_TUNNEL_INFO:
b940b3d7
JR
965 /* Masked data not supported for tunnel. */
966 err = -EINVAL;
4edb9ae9
PS
967 break;
968
969 case OVS_KEY_ATTR_ETHERNET:
b940b3d7
JR
970 err = set_eth_addr(skb, flow_key, nla_data(a),
971 get_mask(a, struct ovs_key_ethernet *));
4edb9ae9
PS
972 break;
973
974 case OVS_KEY_ATTR_IPV4:
b940b3d7
JR
975 err = set_ipv4(skb, flow_key, nla_data(a),
976 get_mask(a, struct ovs_key_ipv4 *));
4edb9ae9
PS
977 break;
978
bc7a5acd 979 case OVS_KEY_ATTR_IPV6:
b940b3d7
JR
980 err = set_ipv6(skb, flow_key, nla_data(a),
981 get_mask(a, struct ovs_key_ipv6 *));
bc7a5acd
AA
982 break;
983
4edb9ae9 984 case OVS_KEY_ATTR_TCP:
b940b3d7
JR
985 err = set_tcp(skb, flow_key, nla_data(a),
986 get_mask(a, struct ovs_key_tcp *));
4edb9ae9
PS
987 break;
988
989 case OVS_KEY_ATTR_UDP:
b940b3d7
JR
990 err = set_udp(skb, flow_key, nla_data(a),
991 get_mask(a, struct ovs_key_udp *));
4edb9ae9 992 break;
10f72e3d
JS
993
994 case OVS_KEY_ATTR_SCTP:
b940b3d7
JR
995 err = set_sctp(skb, flow_key, nla_data(a),
996 get_mask(a, struct ovs_key_sctp *));
10f72e3d 997 break;
ccf43786
SH
998
999 case OVS_KEY_ATTR_MPLS:
b940b3d7
JR
1000 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1001 __be32 *));
ccf43786 1002 break;
a94ebc39
JS
1003
1004 case OVS_KEY_ATTR_CT_STATE:
1005 case OVS_KEY_ATTR_CT_ZONE:
372ce973 1006 case OVS_KEY_ATTR_CT_MARK:
c05e2094 1007 case OVS_KEY_ATTR_CT_LABELS:
a94ebc39
JS
1008 err = -EINVAL;
1009 break;
4edb9ae9 1010 }
15c39847 1011
4edb9ae9
PS
1012 return err;
1013}
1014
a6059080 1015static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
7d16c847
PS
1016 struct sw_flow_key *key,
1017 const struct nlattr *a, int rem)
a6059080 1018{
e74d4817
PS
1019 struct deferred_action *da;
1020
1021 if (!is_flow_key_valid(key)) {
867e37ba
AZ
1022 int err;
1023
e74d4817 1024 err = ovs_flow_key_update(skb, key);
867e37ba
AZ
1025 if (err)
1026 return err;
867e37ba 1027 }
e74d4817 1028 BUG_ON(!is_flow_key_valid(key));
a6059080 1029
684b5f5d 1030 if (!nla_is_last(a, rem)) {
e16138e2 1031 /* Recirc action is the not the last action
e74d4817
PS
1032 * of the action list, need to clone the skb.
1033 */
e16138e2
PS
1034 skb = skb_clone(skb, GFP_ATOMIC);
1035
1036 /* Skip the recirc action when out of memory, but
e74d4817
PS
1037 * continue on with the rest of the action list.
1038 */
e16138e2
PS
1039 if (!skb)
1040 return 0;
2c8c4fb7 1041 }
a6059080 1042
e74d4817
PS
1043 da = add_deferred_actions(skb, key, NULL);
1044 if (da) {
1045 da->pkt_key.recirc_id = nla_get_u32(a);
2c8c4fb7
AZ
1046 } else {
1047 kfree_skb(skb);
1048
1049 if (net_ratelimit())
1050 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1051 ovs_dp_name(dp));
867e37ba 1052 }
a6059080
AZ
1053
1054 return 0;
1055}
1056
064af421 1057/* Execute a list of actions against 'skb'. */
871dfe07 1058static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
e74d4817
PS
1059 struct sw_flow_key *key,
1060 const struct nlattr *attr, int len)
064af421
BP
1061{
1062 /* Every output action needs a separate clone of 'skb', but the common
1063 * case is just a single output action, so that doing a clone and
1064 * then freeing the original skbuff is wasteful. So the following code
e74d4817
PS
1065 * is slightly obscure just to avoid that.
1066 */
064af421 1067 int prev_port = -1;
cdee00fd 1068 const struct nlattr *a;
10db8b20 1069 int rem;
72b06300 1070
6ff686f2 1071 for (a = attr, rem = len; rem > 0;
a4af2475 1072 a = nla_next(a, &rem)) {
10db8b20
JG
1073 int err = 0;
1074
fe90efd9
AZ
1075 if (unlikely(prev_port != -1)) {
1076 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1077
1078 if (out_skb)
a94ebc39 1079 do_output(dp, out_skb, prev_port, key);
fe90efd9 1080
4c7804f1 1081 OVS_CB(skb)->cutlen = 0;
064af421
BP
1082 prev_port = -1;
1083 }
1084
cdee00fd 1085 switch (nla_type(a)) {
df2c07f4 1086 case OVS_ACTION_ATTR_OUTPUT:
cdee00fd 1087 prev_port = nla_get_u32(a);
064af421
BP
1088 break;
1089
4c7804f1
WT
1090 case OVS_ACTION_ATTR_TRUNC: {
1091 struct ovs_action_trunc *trunc = nla_data(a);
1092
1093 if (skb->len > trunc->max_len)
1094 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1095 break;
1096 }
1097
df2c07f4 1098 case OVS_ACTION_ATTR_USERSPACE:
4c7804f1
WT
1099 output_userspace(dp, skb, key, a, attr,
1100 len, OVS_CB(skb)->cutlen);
1101 OVS_CB(skb)->cutlen = 0;
064af421 1102 break;
7804df20
AZ
1103
1104 case OVS_ACTION_ATTR_HASH:
e74d4817 1105 execute_hash(skb, key, a);
7804df20 1106 break;
064af421 1107
ccf43786 1108 case OVS_ACTION_ATTR_PUSH_MPLS:
e74d4817 1109 err = push_mpls(skb, key, nla_data(a));
ccf43786
SH
1110 break;
1111
1112 case OVS_ACTION_ATTR_POP_MPLS:
e74d4817 1113 err = pop_mpls(skb, key, nla_get_be16(a));
ccf43786
SH
1114 break;
1115
fea393b1 1116 case OVS_ACTION_ATTR_PUSH_VLAN:
e74d4817 1117 err = push_vlan(skb, key, nla_data(a));
064af421
BP
1118 break;
1119
fea393b1 1120 case OVS_ACTION_ATTR_POP_VLAN:
e74d4817 1121 err = pop_vlan(skb, key);
064af421
BP
1122 break;
1123
e16138e2 1124 case OVS_ACTION_ATTR_RECIRC:
e74d4817 1125 err = execute_recirc(dp, skb, key, a, rem);
684b5f5d 1126 if (nla_is_last(a, rem)) {
867e37ba
AZ
1127 /* If this is the last action, the skb has
1128 * been consumed or freed.
e74d4817
PS
1129 * Return immediately.
1130 */
867e37ba
AZ
1131 return err;
1132 }
a6059080 1133 break;
a6059080 1134
4edb9ae9 1135 case OVS_ACTION_ATTR_SET:
e74d4817 1136 err = execute_set_action(skb, key, nla_data(a));
064af421 1137 break;
c1c9c9c4 1138
b940b3d7
JR
1139 case OVS_ACTION_ATTR_SET_MASKED:
1140 case OVS_ACTION_ATTR_SET_TO_MASKED:
1141 err = execute_masked_set_action(skb, key, nla_data(a));
1142 break;
1143
6ff686f2 1144 case OVS_ACTION_ATTR_SAMPLE:
0e469d3b 1145 err = sample(dp, skb, key, a, attr, len);
6ff686f2 1146 break;
a94ebc39
JS
1147
1148 case OVS_ACTION_ATTR_CT:
c05e2094
JS
1149 if (!is_flow_key_valid(key)) {
1150 err = ovs_flow_key_update(skb, key);
1151 if (err)
1152 return err;
1153 }
1154
a94ebc39
JS
1155 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1156 nla_data(a));
1157
1158 /* Hide stolen IP fragments from user space. */
c05e2094
JS
1159 if (err)
1160 return err == -EINPROGRESS ? 0 : err;
a94ebc39 1161 break;
6ff686f2 1162 }
15c39847 1163
10db8b20
JG
1164 if (unlikely(err)) {
1165 kfree_skb(skb);
1166 return err;
1167 }
064af421 1168 }
6c222e55 1169
fbf4f74d 1170 if (prev_port != -1)
a94ebc39 1171 do_output(dp, skb, prev_port, key);
fbf4f74d 1172 else
5b95ab0e 1173 consume_skb(skb);
10db8b20 1174
a5225dd6 1175 return 0;
064af421 1176}
871dfe07 1177
2c8c4fb7
AZ
1178static void process_deferred_actions(struct datapath *dp)
1179{
1180 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1181
1182 /* Do not touch the FIFO in case there is no deferred actions. */
1183 if (action_fifo_is_empty(fifo))
1184 return;
1185
1186 /* Finishing executing all deferred actions. */
1187 do {
1188 struct deferred_action *da = action_fifo_get(fifo);
1189 struct sk_buff *skb = da->skb;
7d16c847 1190 struct sw_flow_key *key = &da->pkt_key;
2c8c4fb7
AZ
1191 const struct nlattr *actions = da->actions;
1192
1193 if (actions)
7d16c847 1194 do_execute_actions(dp, skb, key, actions,
2c8c4fb7
AZ
1195 nla_len(actions));
1196 else
7d16c847 1197 ovs_dp_process_packet(skb, key);
2c8c4fb7
AZ
1198 } while (!action_fifo_is_empty(fifo));
1199
1200 /* Reset FIFO for the next packet. */
1201 action_fifo_init(fifo);
1202}
1203
871dfe07 1204/* Execute a list of actions against 'skb'. */
2c8c4fb7 1205int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
7d16c847
PS
1206 const struct sw_flow_actions *acts,
1207 struct sw_flow_key *key)
2c8c4fb7 1208{
77a9a338
PS
1209 static const int ovs_recursion_limit = 4;
1210 int err, level;
2c8c4fb7 1211
77a9a338
PS
1212 level = __this_cpu_inc_return(exec_actions_level);
1213 if (unlikely(level > ovs_recursion_limit)) {
1214 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1215 ovs_dp_name(dp));
2c8c4fb7 1216 kfree_skb(skb);
77a9a338
PS
1217 err = -ENETDOWN;
1218 goto out;
2c8c4fb7
AZ
1219 }
1220
7d16c847
PS
1221 err = do_execute_actions(dp, skb, key,
1222 acts->actions, acts->actions_len);
2c8c4fb7 1223
77a9a338 1224 if (level == 1)
2c8c4fb7
AZ
1225 process_deferred_actions(dp);
1226
77a9a338
PS
1227out:
1228 __this_cpu_dec(exec_actions_level);
2c8c4fb7
AZ
1229 return err;
1230}
1231
1232int action_fifos_init(void)
1233{
1234 action_fifos = alloc_percpu(struct action_fifo);
1235 if (!action_fifos)
1236 return -ENOMEM;
1237
1238 return 0;
1239}
1240
1241void action_fifos_exit(void)
60759b2b 1242{
2c8c4fb7 1243 free_percpu(action_fifos);
871dfe07 1244}