]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/actions.c
c5b5b0ee86e079aaa3235d58aaf575225c3bc485
[mirror_ovs.git] / datapath / actions.c
1 /*
2 * Copyright (c) 2007-2015 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/skbuff.h>
22 #include <linux/in.h>
23 #include <linux/ip.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
32
33 #include <net/dst.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/checksum.h>
37 #include <net/dsfield.h>
38 #include <net/mpls.h>
39 #include <net/sctp/checksum.h>
40
41 #include "datapath.h"
42 #include "conntrack.h"
43 #include "gso.h"
44 #include "vport.h"
45
46 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
47 struct sw_flow_key *key,
48 const struct nlattr *attr, int len);
49
50 struct deferred_action {
51 struct sk_buff *skb;
52 const struct nlattr *actions;
53
54 /* Store pkt_key clone when creating deferred action. */
55 struct sw_flow_key pkt_key;
56 };
57
58 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
59 struct ovs_frag_data {
60 unsigned long dst;
61 struct vport *vport;
62 struct ovs_gso_cb cb;
63 __be16 inner_protocol;
64 __u16 vlan_tci;
65 __be16 vlan_proto;
66 unsigned int l2_len;
67 u8 mac_proto;
68 u8 l2_data[MAX_L2_LEN];
69 };
70
71 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72
73 #define DEFERRED_ACTION_FIFO_SIZE 10
74 #define OVS_RECURSION_LIMIT 4
75 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
76 struct action_fifo {
77 int head;
78 int tail;
79 /* Deferred action fifo queue storage. */
80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
81 };
82
83 struct recirc_keys {
84 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
85 };
86
87 static struct action_fifo __percpu *action_fifos;
88 static struct recirc_keys __percpu *recirc_keys;
89 static DEFINE_PER_CPU(int, exec_actions_level);
90
91 static void action_fifo_init(struct action_fifo *fifo)
92 {
93 fifo->head = 0;
94 fifo->tail = 0;
95 }
96
97 static bool action_fifo_is_empty(const struct action_fifo *fifo)
98 {
99 return (fifo->head == fifo->tail);
100 }
101
102 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
103 {
104 if (action_fifo_is_empty(fifo))
105 return NULL;
106
107 return &fifo->fifo[fifo->tail++];
108 }
109
110 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
111 {
112 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
113 return NULL;
114
115 return &fifo->fifo[fifo->head++];
116 }
117
118 /* Return queue entry if fifo is not full */
119 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
120 const struct sw_flow_key *key,
121 const struct nlattr *attr)
122 {
123 struct action_fifo *fifo;
124 struct deferred_action *da;
125
126 fifo = this_cpu_ptr(action_fifos);
127 da = action_fifo_put(fifo);
128 if (da) {
129 da->skb = skb;
130 da->actions = attr;
131 da->pkt_key = *key;
132 }
133
134 return da;
135 }
136
137 static void invalidate_flow_key(struct sw_flow_key *key)
138 {
139 key->mac_proto |= SW_FLOW_KEY_INVALID;
140 }
141
142 static bool is_flow_key_valid(const struct sw_flow_key *key)
143 {
144 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
145 }
146
147 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
148 __be16 ethertype)
149 {
150 if (skb->ip_summed == CHECKSUM_COMPLETE) {
151 __be16 diff[] = { ~(hdr->h_proto), ethertype };
152
153 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
154 ~skb->csum);
155 }
156
157 hdr->h_proto = ethertype;
158 }
159
160 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
161 const struct ovs_action_push_mpls *mpls)
162 {
163 __be32 *new_mpls_lse;
164
165 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
166 if (skb->encapsulation)
167 return -ENOTSUPP;
168
169 if (skb_cow_head(skb, MPLS_HLEN) < 0)
170 return -ENOMEM;
171
172 skb_push(skb, MPLS_HLEN);
173 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
174 skb->mac_len);
175 skb_reset_mac_header(skb);
176
177 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
178 *new_mpls_lse = mpls->mpls_lse;
179
180 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
181
182 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
183 if (!ovs_skb_get_inner_protocol(skb))
184 ovs_skb_set_inner_protocol(skb, skb->protocol);
185 skb->protocol = mpls->mpls_ethertype;
186
187 invalidate_flow_key(key);
188 return 0;
189 }
190
191 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
192 const __be16 ethertype)
193 {
194 struct ethhdr *hdr;
195 int err;
196
197 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
198 if (unlikely(err))
199 return err;
200
201 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
202
203 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
204 skb->mac_len);
205
206 __skb_pull(skb, MPLS_HLEN);
207 skb_reset_mac_header(skb);
208
209 /* skb_mpls_header() is used to locate the ethertype
210 * field correctly in the presence of VLAN tags.
211 */
212 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
213 update_ethertype(skb, hdr, ethertype);
214 if (eth_p_mpls(skb->protocol))
215 skb->protocol = ethertype;
216
217 invalidate_flow_key(key);
218 return 0;
219 }
220
221 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
222 const __be32 *mpls_lse, const __be32 *mask)
223 {
224 __be32 *stack;
225 __be32 lse;
226 int err;
227
228 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
229 if (unlikely(err))
230 return err;
231
232 stack = (__be32 *)skb_mpls_header(skb);
233 lse = OVS_MASKED(*stack, *mpls_lse, *mask);
234 if (skb->ip_summed == CHECKSUM_COMPLETE) {
235 __be32 diff[] = { ~(*stack), lse };
236
237 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
238 ~skb->csum);
239 }
240
241 *stack = lse;
242 flow_key->mpls.top_lse = lse;
243 return 0;
244 }
245
246 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
247 {
248 int err;
249
250 err = skb_vlan_pop(skb);
251 if (skb_vlan_tag_present(skb)) {
252 invalidate_flow_key(key);
253 } else {
254 key->eth.vlan.tci = 0;
255 key->eth.vlan.tpid = 0;
256 }
257 return err;
258 }
259
260 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
261 const struct ovs_action_push_vlan *vlan)
262 {
263 if (skb_vlan_tag_present(skb)) {
264 invalidate_flow_key(key);
265 } else {
266 key->eth.vlan.tci = vlan->vlan_tci;
267 key->eth.vlan.tpid = vlan->vlan_tpid;
268 }
269 return skb_vlan_push(skb, vlan->vlan_tpid,
270 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
271 }
272
273 /* 'src' is already properly masked. */
274 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
275 {
276 u16 *dst = (u16 *)dst_;
277 const u16 *src = (const u16 *)src_;
278 const u16 *mask = (const u16 *)mask_;
279
280 OVS_SET_MASKED(dst[0], src[0], mask[0]);
281 OVS_SET_MASKED(dst[1], src[1], mask[1]);
282 OVS_SET_MASKED(dst[2], src[2], mask[2]);
283 }
284
285 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
286 const struct ovs_key_ethernet *key,
287 const struct ovs_key_ethernet *mask)
288 {
289 int err;
290
291 err = skb_ensure_writable(skb, ETH_HLEN);
292 if (unlikely(err))
293 return err;
294
295 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
296
297 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
298 mask->eth_src);
299 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
300 mask->eth_dst);
301
302 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
303
304 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
305 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
306 return 0;
307 }
308
309 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
310 __be32 addr, __be32 new_addr)
311 {
312 int transport_len = skb->len - skb_transport_offset(skb);
313
314 if (nh->frag_off & htons(IP_OFFSET))
315 return;
316
317 if (nh->protocol == IPPROTO_TCP) {
318 if (likely(transport_len >= sizeof(struct tcphdr)))
319 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
320 addr, new_addr, true);
321 } else if (nh->protocol == IPPROTO_UDP) {
322 if (likely(transport_len >= sizeof(struct udphdr))) {
323 struct udphdr *uh = udp_hdr(skb);
324
325 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
326 inet_proto_csum_replace4(&uh->check, skb,
327 addr, new_addr, true);
328 if (!uh->check)
329 uh->check = CSUM_MANGLED_0;
330 }
331 }
332 }
333
334 }
335
336 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
337 __be32 *addr, __be32 new_addr)
338 {
339 update_ip_l4_checksum(skb, nh, *addr, new_addr);
340 csum_replace4(&nh->check, *addr, new_addr);
341 skb_clear_hash(skb);
342 *addr = new_addr;
343 }
344
345 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
346 __be32 addr[4], const __be32 new_addr[4])
347 {
348 int transport_len = skb->len - skb_transport_offset(skb);
349
350 if (l4_proto == NEXTHDR_TCP) {
351 if (likely(transport_len >= sizeof(struct tcphdr)))
352 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
353 addr, new_addr, true);
354 } else if (l4_proto == NEXTHDR_UDP) {
355 if (likely(transport_len >= sizeof(struct udphdr))) {
356 struct udphdr *uh = udp_hdr(skb);
357
358 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
359 inet_proto_csum_replace16(&uh->check, skb,
360 addr, new_addr, true);
361 if (!uh->check)
362 uh->check = CSUM_MANGLED_0;
363 }
364 }
365 } else if (l4_proto == NEXTHDR_ICMP) {
366 if (likely(transport_len >= sizeof(struct icmp6hdr)))
367 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
368 skb, addr, new_addr, true);
369 }
370 }
371
372 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
373 const __be32 mask[4], __be32 masked[4])
374 {
375 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
376 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
377 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
378 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
379 }
380
381 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
382 __be32 addr[4], const __be32 new_addr[4],
383 bool recalculate_csum)
384 {
385 if (likely(recalculate_csum))
386 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
387
388 skb_clear_hash(skb);
389 memcpy(addr, new_addr, sizeof(__be32[4]));
390 }
391
392 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
393 {
394 /* Bits 21-24 are always unmasked, so this retains their values. */
395 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
396 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
397 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
398 }
399
400 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
401 u8 mask)
402 {
403 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
404
405 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
406 nh->ttl = new_ttl;
407 }
408
409 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
410 const struct ovs_key_ipv4 *key,
411 const struct ovs_key_ipv4 *mask)
412 {
413 struct iphdr *nh;
414 __be32 new_addr;
415 int err;
416
417 err = skb_ensure_writable(skb, skb_network_offset(skb) +
418 sizeof(struct iphdr));
419 if (unlikely(err))
420 return err;
421
422 nh = ip_hdr(skb);
423
424 /* Setting an IP addresses is typically only a side effect of
425 * matching on them in the current userspace implementation, so it
426 * makes sense to check if the value actually changed.
427 */
428 if (mask->ipv4_src) {
429 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
430
431 if (unlikely(new_addr != nh->saddr)) {
432 set_ip_addr(skb, nh, &nh->saddr, new_addr);
433 flow_key->ipv4.addr.src = new_addr;
434 }
435 }
436 if (mask->ipv4_dst) {
437 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
438
439 if (unlikely(new_addr != nh->daddr)) {
440 set_ip_addr(skb, nh, &nh->daddr, new_addr);
441 flow_key->ipv4.addr.dst = new_addr;
442 }
443 }
444 if (mask->ipv4_tos) {
445 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
446 flow_key->ip.tos = nh->tos;
447 }
448 if (mask->ipv4_ttl) {
449 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
450 flow_key->ip.ttl = nh->ttl;
451 }
452
453 return 0;
454 }
455
456 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
457 {
458 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
459 }
460
461 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
462 const struct ovs_key_ipv6 *key,
463 const struct ovs_key_ipv6 *mask)
464 {
465 struct ipv6hdr *nh;
466 int err;
467
468 err = skb_ensure_writable(skb, skb_network_offset(skb) +
469 sizeof(struct ipv6hdr));
470 if (unlikely(err))
471 return err;
472
473 nh = ipv6_hdr(skb);
474
475 /* Setting an IP addresses is typically only a side effect of
476 * matching on them in the current userspace implementation, so it
477 * makes sense to check if the value actually changed.
478 */
479 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
480 __be32 *saddr = (__be32 *)&nh->saddr;
481 __be32 masked[4];
482
483 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
484
485 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
486 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
487 true);
488 memcpy(&flow_key->ipv6.addr.src, masked,
489 sizeof(flow_key->ipv6.addr.src));
490 }
491 }
492 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
493 unsigned int offset = 0;
494 int flags = IP6_FH_F_SKIP_RH;
495 bool recalc_csum = true;
496 __be32 *daddr = (__be32 *)&nh->daddr;
497 __be32 masked[4];
498
499 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
500
501 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
502 if (ipv6_ext_hdr(nh->nexthdr))
503 recalc_csum = (ipv6_find_hdr(skb, &offset,
504 NEXTHDR_ROUTING,
505 NULL, &flags)
506 != NEXTHDR_ROUTING);
507
508 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
509 recalc_csum);
510 memcpy(&flow_key->ipv6.addr.dst, masked,
511 sizeof(flow_key->ipv6.addr.dst));
512 }
513 }
514 if (mask->ipv6_tclass) {
515 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
516 flow_key->ip.tos = ipv6_get_dsfield(nh);
517 }
518 if (mask->ipv6_label) {
519 set_ipv6_fl(nh, ntohl(key->ipv6_label),
520 ntohl(mask->ipv6_label));
521 flow_key->ipv6.label =
522 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
523 }
524 if (mask->ipv6_hlimit) {
525 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
526 mask->ipv6_hlimit);
527 flow_key->ip.ttl = nh->hop_limit;
528 }
529 return 0;
530 }
531
532 /* Must follow skb_ensure_writable() since that can move the skb data. */
533 static void set_tp_port(struct sk_buff *skb, __be16 *port,
534 __be16 new_port, __sum16 *check)
535 {
536 inet_proto_csum_replace2(check, skb, *port, new_port, false);
537 *port = new_port;
538 }
539
540 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
541 const struct ovs_key_udp *key,
542 const struct ovs_key_udp *mask)
543 {
544 struct udphdr *uh;
545 __be16 src, dst;
546 int err;
547
548 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
549 sizeof(struct udphdr));
550 if (unlikely(err))
551 return err;
552
553 uh = udp_hdr(skb);
554 /* Either of the masks is non-zero, so do not bother checking them. */
555 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
556 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
557
558 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
559 if (likely(src != uh->source)) {
560 set_tp_port(skb, &uh->source, src, &uh->check);
561 flow_key->tp.src = src;
562 }
563 if (likely(dst != uh->dest)) {
564 set_tp_port(skb, &uh->dest, dst, &uh->check);
565 flow_key->tp.dst = dst;
566 }
567
568 if (unlikely(!uh->check))
569 uh->check = CSUM_MANGLED_0;
570 } else {
571 uh->source = src;
572 uh->dest = dst;
573 flow_key->tp.src = src;
574 flow_key->tp.dst = dst;
575 }
576
577 skb_clear_hash(skb);
578
579 return 0;
580 }
581
582 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
583 const struct ovs_key_tcp *key,
584 const struct ovs_key_tcp *mask)
585 {
586 struct tcphdr *th;
587 __be16 src, dst;
588 int err;
589
590 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
591 sizeof(struct tcphdr));
592 if (unlikely(err))
593 return err;
594
595 th = tcp_hdr(skb);
596 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
597 if (likely(src != th->source)) {
598 set_tp_port(skb, &th->source, src, &th->check);
599 flow_key->tp.src = src;
600 }
601 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
602 if (likely(dst != th->dest)) {
603 set_tp_port(skb, &th->dest, dst, &th->check);
604 flow_key->tp.dst = dst;
605 }
606 skb_clear_hash(skb);
607
608 return 0;
609 }
610
611 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
612 const struct ovs_key_sctp *key,
613 const struct ovs_key_sctp *mask)
614 {
615 unsigned int sctphoff = skb_transport_offset(skb);
616 struct sctphdr *sh;
617 __le32 old_correct_csum, new_csum, old_csum;
618 int err;
619
620 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
621 if (unlikely(err))
622 return err;
623
624 sh = sctp_hdr(skb);
625 old_csum = sh->checksum;
626 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
627
628 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
629 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
630
631 new_csum = sctp_compute_cksum(skb, sctphoff);
632
633 /* Carry any checksum errors through. */
634 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
635
636 skb_clear_hash(skb);
637 flow_key->tp.src = sh->source;
638 flow_key->tp.dst = sh->dest;
639
640 return 0;
641 }
642
643 static int ovs_vport_output(OVS_VPORT_OUTPUT_PARAMS)
644 {
645 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
646 struct vport *vport = data->vport;
647
648 if (skb_cow_head(skb, data->l2_len) < 0) {
649 kfree_skb(skb);
650 return -ENOMEM;
651 }
652
653 __skb_dst_copy(skb, data->dst);
654 *OVS_GSO_CB(skb) = data->cb;
655 ovs_skb_set_inner_protocol(skb, data->inner_protocol);
656 skb->vlan_tci = data->vlan_tci;
657 skb->vlan_proto = data->vlan_proto;
658
659 /* Reconstruct the MAC header. */
660 skb_push(skb, data->l2_len);
661 memcpy(skb->data, &data->l2_data, data->l2_len);
662 skb_postpush_rcsum(skb, skb->data, data->l2_len);
663 skb_reset_mac_header(skb);
664
665 ovs_vport_send(vport, skb, data->mac_proto);
666 return 0;
667 }
668
669 static unsigned int
670 ovs_dst_get_mtu(const struct dst_entry *dst)
671 {
672 return dst->dev->mtu;
673 }
674
675 static struct dst_ops ovs_dst_ops = {
676 .family = AF_UNSPEC,
677 .mtu = ovs_dst_get_mtu,
678 };
679
680 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
681 * ovs_vport_output(), which is called once per fragmented packet.
682 */
683 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
684 u8 mac_proto)
685 {
686 unsigned int hlen = skb_network_offset(skb);
687 struct ovs_frag_data *data;
688
689 data = this_cpu_ptr(&ovs_frag_data_storage);
690 data->dst = (unsigned long) skb_dst(skb);
691 data->vport = vport;
692 data->cb = *OVS_GSO_CB(skb);
693 data->inner_protocol = ovs_skb_get_inner_protocol(skb);
694 data->vlan_tci = skb->vlan_tci;
695 data->vlan_proto = skb->vlan_proto;
696 data->mac_proto = mac_proto;
697 data->l2_len = hlen;
698 memcpy(&data->l2_data, skb->data, hlen);
699
700 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
701 skb_pull(skb, hlen);
702 }
703
704 static void ovs_fragment(struct net *net, struct vport *vport,
705 struct sk_buff *skb, u16 mru,
706 struct sw_flow_key *key)
707 {
708 if (skb_network_offset(skb) > MAX_L2_LEN) {
709 OVS_NLERR(1, "L2 header too long to fragment");
710 goto err;
711 }
712
713 if (key->eth.type == htons(ETH_P_IP)) {
714 struct dst_entry ovs_dst;
715 unsigned long orig_dst;
716
717 prepare_frag(vport, skb, ovs_key_mac_proto(key));
718 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
719 DST_OBSOLETE_NONE, DST_NOCOUNT);
720 ovs_dst.dev = vport->dev;
721
722 orig_dst = (unsigned long) skb_dst(skb);
723 skb_dst_set_noref(skb, &ovs_dst);
724 IPCB(skb)->frag_max_size = mru;
725
726 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
727 refdst_drop(orig_dst);
728 } else if (key->eth.type == htons(ETH_P_IPV6)) {
729 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
730 unsigned long orig_dst;
731 struct rt6_info ovs_rt;
732
733 if (!v6ops) {
734 goto err;
735 }
736
737 prepare_frag(vport, skb,
738 ovs_key_mac_proto(key));
739 memset(&ovs_rt, 0, sizeof(ovs_rt));
740 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
741 DST_OBSOLETE_NONE, DST_NOCOUNT);
742 ovs_rt.dst.dev = vport->dev;
743
744 orig_dst = (unsigned long) skb_dst(skb);
745 skb_dst_set_noref(skb, &ovs_rt.dst);
746 IP6CB(skb)->frag_max_size = mru;
747 #ifdef HAVE_IP_LOCAL_OUT_TAKES_NET
748 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
749 #else
750 v6ops->fragment(skb->sk, skb, ovs_vport_output);
751 #endif
752 refdst_drop(orig_dst);
753 } else {
754 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
755 ovs_vport_name(vport), ntohs(key->eth.type), mru,
756 vport->dev->mtu);
757 goto err;
758 }
759
760 return;
761 err:
762 kfree_skb(skb);
763 }
764
765 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
766 struct sw_flow_key *key)
767 {
768 struct vport *vport = ovs_vport_rcu(dp, out_port);
769
770 if (likely(vport)) {
771 u16 mru = OVS_CB(skb)->mru;
772 u32 cutlen = OVS_CB(skb)->cutlen;
773
774 if (unlikely(cutlen > 0)) {
775 if (skb->len - cutlen > ovs_mac_header_len(key))
776 pskb_trim(skb, skb->len - cutlen);
777 else
778 pskb_trim(skb, ovs_mac_header_len(key));
779 }
780
781 if (likely(!mru ||
782 (skb->len <= mru + vport->dev->hard_header_len))) {
783 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
784 } else if (mru <= vport->dev->mtu) {
785 struct net *net = ovs_dp_get_net(dp);
786
787 ovs_fragment(net, vport, skb, mru, key);
788 } else {
789 OVS_NLERR(true, "Cannot fragment IP frames");
790 kfree_skb(skb);
791 }
792 } else {
793 kfree_skb(skb);
794 }
795 }
796
797 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
798 struct sw_flow_key *key, const struct nlattr *attr,
799 const struct nlattr *actions, int actions_len,
800 uint32_t cutlen)
801 {
802 struct dp_upcall_info upcall;
803 const struct nlattr *a;
804 int rem, err;
805
806 memset(&upcall, 0, sizeof(upcall));
807 upcall.cmd = OVS_PACKET_CMD_ACTION;
808 upcall.mru = OVS_CB(skb)->mru;
809
810 SKB_INIT_FILL_METADATA_DST(skb);
811 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
812 a = nla_next(a, &rem)) {
813 switch (nla_type(a)) {
814 case OVS_USERSPACE_ATTR_USERDATA:
815 upcall.userdata = a;
816 break;
817
818 case OVS_USERSPACE_ATTR_PID:
819 upcall.portid = nla_get_u32(a);
820 break;
821
822 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
823 /* Get out tunnel info. */
824 struct vport *vport;
825
826 vport = ovs_vport_rcu(dp, nla_get_u32(a));
827 if (vport) {
828 err = dev_fill_metadata_dst(vport->dev, skb);
829 if (!err)
830 upcall.egress_tun_info = skb_tunnel_info(skb);
831 }
832
833 break;
834 }
835
836 case OVS_USERSPACE_ATTR_ACTIONS: {
837 /* Include actions. */
838 upcall.actions = actions;
839 upcall.actions_len = actions_len;
840 break;
841 }
842
843 } /* End of switch. */
844 }
845
846 err = ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
847 SKB_RESTORE_FILL_METADATA_DST(skb);
848 return err;
849 }
850
851 static int sample(struct datapath *dp, struct sk_buff *skb,
852 struct sw_flow_key *key, const struct nlattr *attr,
853 const struct nlattr *actions, int actions_len)
854 {
855 const struct nlattr *acts_list = NULL;
856 const struct nlattr *a;
857 int rem;
858 u32 cutlen = 0;
859
860 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
861 a = nla_next(a, &rem)) {
862 u32 probability;
863
864 switch (nla_type(a)) {
865 case OVS_SAMPLE_ATTR_PROBABILITY:
866 probability = nla_get_u32(a);
867 if (!probability || prandom_u32() > probability)
868 return 0;
869 break;
870
871 case OVS_SAMPLE_ATTR_ACTIONS:
872 acts_list = a;
873 break;
874 }
875 }
876
877 rem = nla_len(acts_list);
878 a = nla_data(acts_list);
879
880 /* Actions list is empty, do nothing */
881 if (unlikely(!rem))
882 return 0;
883
884 /* The only known usage of sample action is having a single user-space
885 * action, or having a truncate action followed by a single user-space
886 * action. Treat this usage as a special case.
887 * The output_userspace() should clone the skb to be sent to the
888 * user space. This skb will be consumed by its caller.
889 */
890 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
891 struct ovs_action_trunc *trunc = nla_data(a);
892
893 if (skb->len > trunc->max_len)
894 cutlen = skb->len - trunc->max_len;
895
896 a = nla_next(a, &rem);
897 }
898
899 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
900 nla_is_last(a, rem)))
901 return output_userspace(dp, skb, key, a, actions,
902 actions_len, cutlen);
903
904 skb = skb_clone(skb, GFP_ATOMIC);
905 if (!skb)
906 /* Skip the sample action when out of memory. */
907 return 0;
908
909 if (!add_deferred_actions(skb, key, a)) {
910 if (net_ratelimit())
911 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
912 ovs_dp_name(dp));
913
914 kfree_skb(skb);
915 }
916 return 0;
917 }
918
919 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
920 const struct nlattr *attr)
921 {
922 struct ovs_action_hash *hash_act = nla_data(attr);
923 u32 hash = 0;
924
925 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
926 hash = skb_get_hash(skb);
927 hash = jhash_1word(hash, hash_act->hash_basis);
928 if (!hash)
929 hash = 0x1;
930
931 key->ovs_flow_hash = hash;
932 }
933
934 static int execute_set_action(struct sk_buff *skb,
935 struct sw_flow_key *flow_key,
936 const struct nlattr *a)
937 {
938 /* Only tunnel set execution is supported without a mask. */
939 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
940 struct ovs_tunnel_info *tun = nla_data(a);
941
942 ovs_skb_dst_drop(skb);
943 ovs_dst_hold((struct dst_entry *)tun->tun_dst);
944 ovs_skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
945 return 0;
946 }
947
948 return -EINVAL;
949 }
950
951 /* Mask is at the midpoint of the data. */
952 #define get_mask(a, type) ((const type)nla_data(a) + 1)
953
954 static int execute_masked_set_action(struct sk_buff *skb,
955 struct sw_flow_key *flow_key,
956 const struct nlattr *a)
957 {
958 int err = 0;
959
960 switch (nla_type(a)) {
961 case OVS_KEY_ATTR_PRIORITY:
962 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
963 *get_mask(a, u32 *));
964 flow_key->phy.priority = skb->priority;
965 break;
966
967 case OVS_KEY_ATTR_SKB_MARK:
968 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
969 flow_key->phy.skb_mark = skb->mark;
970 break;
971
972 case OVS_KEY_ATTR_TUNNEL_INFO:
973 /* Masked data not supported for tunnel. */
974 err = -EINVAL;
975 break;
976
977 case OVS_KEY_ATTR_ETHERNET:
978 err = set_eth_addr(skb, flow_key, nla_data(a),
979 get_mask(a, struct ovs_key_ethernet *));
980 break;
981
982 case OVS_KEY_ATTR_IPV4:
983 err = set_ipv4(skb, flow_key, nla_data(a),
984 get_mask(a, struct ovs_key_ipv4 *));
985 break;
986
987 case OVS_KEY_ATTR_IPV6:
988 err = set_ipv6(skb, flow_key, nla_data(a),
989 get_mask(a, struct ovs_key_ipv6 *));
990 break;
991
992 case OVS_KEY_ATTR_TCP:
993 err = set_tcp(skb, flow_key, nla_data(a),
994 get_mask(a, struct ovs_key_tcp *));
995 break;
996
997 case OVS_KEY_ATTR_UDP:
998 err = set_udp(skb, flow_key, nla_data(a),
999 get_mask(a, struct ovs_key_udp *));
1000 break;
1001
1002 case OVS_KEY_ATTR_SCTP:
1003 err = set_sctp(skb, flow_key, nla_data(a),
1004 get_mask(a, struct ovs_key_sctp *));
1005 break;
1006
1007 case OVS_KEY_ATTR_MPLS:
1008 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1009 __be32 *));
1010 break;
1011
1012 case OVS_KEY_ATTR_CT_STATE:
1013 case OVS_KEY_ATTR_CT_ZONE:
1014 case OVS_KEY_ATTR_CT_MARK:
1015 case OVS_KEY_ATTR_CT_LABELS:
1016 err = -EINVAL;
1017 break;
1018 }
1019
1020 return err;
1021 }
1022
1023 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1024 struct sw_flow_key *key,
1025 const struct nlattr *a, int rem)
1026 {
1027 struct deferred_action *da;
1028 int level;
1029
1030 if (!is_flow_key_valid(key)) {
1031 int err;
1032
1033 err = ovs_flow_key_update(skb, key);
1034 if (err)
1035 return err;
1036 }
1037 BUG_ON(!is_flow_key_valid(key));
1038
1039 if (!nla_is_last(a, rem)) {
1040 /* Recirc action is the not the last action
1041 * of the action list, need to clone the skb.
1042 */
1043 skb = skb_clone(skb, GFP_ATOMIC);
1044
1045 /* Skip the recirc action when out of memory, but
1046 * continue on with the rest of the action list.
1047 */
1048 if (!skb)
1049 return 0;
1050 }
1051
1052 level = this_cpu_read(exec_actions_level);
1053 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
1054 struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
1055 struct sw_flow_key *recirc_key = &rks->key[level - 1];
1056
1057 *recirc_key = *key;
1058 recirc_key->recirc_id = nla_get_u32(a);
1059 ovs_dp_process_packet(skb, recirc_key);
1060
1061 return 0;
1062 }
1063
1064 da = add_deferred_actions(skb, key, NULL);
1065 if (da) {
1066 da->pkt_key.recirc_id = nla_get_u32(a);
1067 } else {
1068 kfree_skb(skb);
1069
1070 if (net_ratelimit())
1071 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1072 ovs_dp_name(dp));
1073 }
1074
1075 return 0;
1076 }
1077
1078 /* Execute a list of actions against 'skb'. */
1079 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1080 struct sw_flow_key *key,
1081 const struct nlattr *attr, int len)
1082 {
1083 /* Every output action needs a separate clone of 'skb', but the common
1084 * case is just a single output action, so that doing a clone and
1085 * then freeing the original skbuff is wasteful. So the following code
1086 * is slightly obscure just to avoid that.
1087 */
1088 int prev_port = -1;
1089 const struct nlattr *a;
1090 int rem;
1091
1092 for (a = attr, rem = len; rem > 0;
1093 a = nla_next(a, &rem)) {
1094 int err = 0;
1095
1096 if (unlikely(prev_port != -1)) {
1097 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1098
1099 if (out_skb)
1100 do_output(dp, out_skb, prev_port, key);
1101
1102 OVS_CB(skb)->cutlen = 0;
1103 prev_port = -1;
1104 }
1105
1106 switch (nla_type(a)) {
1107 case OVS_ACTION_ATTR_OUTPUT:
1108 prev_port = nla_get_u32(a);
1109 break;
1110
1111 case OVS_ACTION_ATTR_TRUNC: {
1112 struct ovs_action_trunc *trunc = nla_data(a);
1113
1114 if (skb->len > trunc->max_len)
1115 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1116 break;
1117 }
1118
1119 case OVS_ACTION_ATTR_USERSPACE:
1120 output_userspace(dp, skb, key, a, attr,
1121 len, OVS_CB(skb)->cutlen);
1122 OVS_CB(skb)->cutlen = 0;
1123 break;
1124
1125 case OVS_ACTION_ATTR_HASH:
1126 execute_hash(skb, key, a);
1127 break;
1128
1129 case OVS_ACTION_ATTR_PUSH_MPLS:
1130 err = push_mpls(skb, key, nla_data(a));
1131 break;
1132
1133 case OVS_ACTION_ATTR_POP_MPLS:
1134 err = pop_mpls(skb, key, nla_get_be16(a));
1135 break;
1136
1137 case OVS_ACTION_ATTR_PUSH_VLAN:
1138 err = push_vlan(skb, key, nla_data(a));
1139 break;
1140
1141 case OVS_ACTION_ATTR_POP_VLAN:
1142 err = pop_vlan(skb, key);
1143 break;
1144
1145 case OVS_ACTION_ATTR_RECIRC:
1146 err = execute_recirc(dp, skb, key, a, rem);
1147 if (nla_is_last(a, rem)) {
1148 /* If this is the last action, the skb has
1149 * been consumed or freed.
1150 * Return immediately.
1151 */
1152 return err;
1153 }
1154 break;
1155
1156 case OVS_ACTION_ATTR_SET:
1157 err = execute_set_action(skb, key, nla_data(a));
1158 break;
1159
1160 case OVS_ACTION_ATTR_SET_MASKED:
1161 case OVS_ACTION_ATTR_SET_TO_MASKED:
1162 err = execute_masked_set_action(skb, key, nla_data(a));
1163 break;
1164
1165 case OVS_ACTION_ATTR_SAMPLE:
1166 err = sample(dp, skb, key, a, attr, len);
1167 break;
1168
1169 case OVS_ACTION_ATTR_CT:
1170 if (!is_flow_key_valid(key)) {
1171 err = ovs_flow_key_update(skb, key);
1172 if (err)
1173 return err;
1174 }
1175
1176 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1177 nla_data(a));
1178
1179 /* Hide stolen IP fragments from user space. */
1180 if (err)
1181 return err == -EINPROGRESS ? 0 : err;
1182 break;
1183 }
1184
1185 if (unlikely(err)) {
1186 kfree_skb(skb);
1187 return err;
1188 }
1189 }
1190
1191 if (prev_port != -1)
1192 do_output(dp, skb, prev_port, key);
1193 else
1194 consume_skb(skb);
1195
1196 return 0;
1197 }
1198
1199 static void process_deferred_actions(struct datapath *dp)
1200 {
1201 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1202
1203 /* Do not touch the FIFO in case there is no deferred actions. */
1204 if (action_fifo_is_empty(fifo))
1205 return;
1206
1207 /* Finishing executing all deferred actions. */
1208 do {
1209 struct deferred_action *da = action_fifo_get(fifo);
1210 struct sk_buff *skb = da->skb;
1211 struct sw_flow_key *key = &da->pkt_key;
1212 const struct nlattr *actions = da->actions;
1213
1214 if (actions)
1215 do_execute_actions(dp, skb, key, actions,
1216 nla_len(actions));
1217 else
1218 ovs_dp_process_packet(skb, key);
1219 } while (!action_fifo_is_empty(fifo));
1220
1221 /* Reset FIFO for the next packet. */
1222 action_fifo_init(fifo);
1223 }
1224
1225 /* Execute a list of actions against 'skb'. */
1226 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1227 const struct sw_flow_actions *acts,
1228 struct sw_flow_key *key)
1229 {
1230 int err, level;
1231
1232 level = __this_cpu_inc_return(exec_actions_level);
1233 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1234 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1235 ovs_dp_name(dp));
1236 kfree_skb(skb);
1237 err = -ENETDOWN;
1238 goto out;
1239 }
1240
1241 err = do_execute_actions(dp, skb, key,
1242 acts->actions, acts->actions_len);
1243
1244 if (level == 1)
1245 process_deferred_actions(dp);
1246
1247 out:
1248 __this_cpu_dec(exec_actions_level);
1249 return err;
1250 }
1251
1252 int action_fifos_init(void)
1253 {
1254 action_fifos = alloc_percpu(struct action_fifo);
1255 if (!action_fifos)
1256 return -ENOMEM;
1257
1258 recirc_keys = alloc_percpu(struct recirc_keys);
1259 if (!recirc_keys) {
1260 free_percpu(action_fifos);
1261 return -ENOMEM;
1262 }
1263
1264 return 0;
1265 }
1266
1267 void action_fifos_exit(void)
1268 {
1269 free_percpu(action_fifos);
1270 free_percpu(recirc_keys);
1271 }