]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/openvswitch/flow_netlink.c
openvswitch: Convert dp rcu read operation to locked operations
[mirror_ubuntu-artful-kernel.git] / net / openvswitch / flow_netlink.c
CommitLineData
e6445719 1/*
971427f3 2 * Copyright (c) 2007-2014 Nicira, Inc.
e6445719
PS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
2235ad1c
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
e6445719
PS
21#include "flow.h"
22#include "datapath.h"
23#include <linux/uaccess.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <net/llc_pdu.h>
29#include <linux/kernel.h>
30#include <linux/jhash.h>
31#include <linux/jiffies.h>
32#include <linux/llc.h>
33#include <linux/module.h>
34#include <linux/in.h>
35#include <linux/rcupdate.h>
36#include <linux/if_arp.h>
37#include <linux/ip.h>
38#include <linux/ipv6.h>
39#include <linux/sctp.h>
40#include <linux/tcp.h>
41#include <linux/udp.h>
42#include <linux/icmp.h>
43#include <linux/icmpv6.h>
44#include <linux/rculist.h>
f5796684 45#include <net/geneve.h>
e6445719
PS
46#include <net/ip.h>
47#include <net/ipv6.h>
48#include <net/ndisc.h>
49
50#include "flow_netlink.h"
51
52static void update_range__(struct sw_flow_match *match,
53 size_t offset, size_t size, bool is_mask)
54{
55 struct sw_flow_key_range *range = NULL;
56 size_t start = rounddown(offset, sizeof(long));
57 size_t end = roundup(offset + size, sizeof(long));
58
59 if (!is_mask)
60 range = &match->range;
61 else if (match->mask)
62 range = &match->mask->range;
63
64 if (!range)
65 return;
66
67 if (range->start == range->end) {
68 range->start = start;
69 range->end = end;
70 return;
71 }
72
73 if (range->start > start)
74 range->start = start;
75
76 if (range->end < end)
77 range->end = end;
78}
79
80#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
81 do { \
82 update_range__(match, offsetof(struct sw_flow_key, field), \
83 sizeof((match)->key->field), is_mask); \
84 if (is_mask) { \
85 if ((match)->mask) \
86 (match)->mask->key.field = value; \
87 } else { \
88 (match)->key->field = value; \
89 } \
90 } while (0)
91
f5796684
JG
92#define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
93 do { \
94 update_range__(match, offset, len, is_mask); \
95 if (is_mask) \
96 memcpy((u8 *)&(match)->mask->key + offset, value_p, \
97 len); \
98 else \
99 memcpy((u8 *)(match)->key + offset, value_p, len); \
e6445719
PS
100 } while (0)
101
f5796684
JG
102#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
103 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
104 value_p, len, is_mask)
105
f47de068
PS
106#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
107 do { \
108 update_range__(match, offsetof(struct sw_flow_key, field), \
109 sizeof((match)->key->field), is_mask); \
110 if (is_mask) { \
111 if ((match)->mask) \
112 memset((u8 *)&(match)->mask->key.field, value,\
113 sizeof((match)->mask->key.field)); \
114 } else { \
115 memset((u8 *)&(match)->key->field, value, \
116 sizeof((match)->key->field)); \
117 } \
118 } while (0)
e6445719
PS
119
120static bool match_validate(const struct sw_flow_match *match,
121 u64 key_attrs, u64 mask_attrs)
122{
123 u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
124 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
125
126 /* The following mask attributes allowed only if they
127 * pass the validation tests. */
128 mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
129 | (1 << OVS_KEY_ATTR_IPV6)
130 | (1 << OVS_KEY_ATTR_TCP)
5eb26b15 131 | (1 << OVS_KEY_ATTR_TCP_FLAGS)
e6445719
PS
132 | (1 << OVS_KEY_ATTR_UDP)
133 | (1 << OVS_KEY_ATTR_SCTP)
134 | (1 << OVS_KEY_ATTR_ICMP)
135 | (1 << OVS_KEY_ATTR_ICMPV6)
136 | (1 << OVS_KEY_ATTR_ARP)
137 | (1 << OVS_KEY_ATTR_ND));
138
139 /* Always allowed mask fields. */
140 mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
141 | (1 << OVS_KEY_ATTR_IN_PORT)
142 | (1 << OVS_KEY_ATTR_ETHERTYPE));
143
144 /* Check key attributes. */
145 if (match->key->eth.type == htons(ETH_P_ARP)
146 || match->key->eth.type == htons(ETH_P_RARP)) {
147 key_expected |= 1 << OVS_KEY_ATTR_ARP;
19e7a3df 148 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
e6445719
PS
149 mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
150 }
151
152 if (match->key->eth.type == htons(ETH_P_IP)) {
153 key_expected |= 1 << OVS_KEY_ATTR_IPV4;
154 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
155 mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
156
157 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
158 if (match->key->ip.proto == IPPROTO_UDP) {
159 key_expected |= 1 << OVS_KEY_ATTR_UDP;
160 if (match->mask && (match->mask->key.ip.proto == 0xff))
161 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
162 }
163
164 if (match->key->ip.proto == IPPROTO_SCTP) {
165 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
166 if (match->mask && (match->mask->key.ip.proto == 0xff))
167 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
168 }
169
170 if (match->key->ip.proto == IPPROTO_TCP) {
171 key_expected |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
172 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
173 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
e6445719 174 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
175 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
176 }
e6445719
PS
177 }
178
179 if (match->key->ip.proto == IPPROTO_ICMP) {
180 key_expected |= 1 << OVS_KEY_ATTR_ICMP;
181 if (match->mask && (match->mask->key.ip.proto == 0xff))
182 mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
183 }
184 }
185 }
186
187 if (match->key->eth.type == htons(ETH_P_IPV6)) {
188 key_expected |= 1 << OVS_KEY_ATTR_IPV6;
189 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
190 mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
191
192 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
193 if (match->key->ip.proto == IPPROTO_UDP) {
194 key_expected |= 1 << OVS_KEY_ATTR_UDP;
195 if (match->mask && (match->mask->key.ip.proto == 0xff))
196 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
197 }
198
199 if (match->key->ip.proto == IPPROTO_SCTP) {
200 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
201 if (match->mask && (match->mask->key.ip.proto == 0xff))
202 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
203 }
204
205 if (match->key->ip.proto == IPPROTO_TCP) {
206 key_expected |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
207 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
208 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
e6445719 209 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
210 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
211 }
e6445719
PS
212 }
213
214 if (match->key->ip.proto == IPPROTO_ICMPV6) {
215 key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
216 if (match->mask && (match->mask->key.ip.proto == 0xff))
217 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
218
1139e241 219 if (match->key->tp.src ==
e6445719 220 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
1139e241 221 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
e6445719 222 key_expected |= 1 << OVS_KEY_ATTR_ND;
1139e241 223 if (match->mask && (match->mask->key.tp.src == htons(0xffff)))
e6445719
PS
224 mask_allowed |= 1 << OVS_KEY_ATTR_ND;
225 }
226 }
227 }
228 }
229
230 if ((key_attrs & key_expected) != key_expected) {
231 /* Key attributes check failed. */
232 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
cc23ebf3 233 (unsigned long long)key_attrs, (unsigned long long)key_expected);
e6445719
PS
234 return false;
235 }
236
237 if ((mask_attrs & mask_allowed) != mask_attrs) {
238 /* Mask attributes check failed. */
239 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
cc23ebf3 240 (unsigned long long)mask_attrs, (unsigned long long)mask_allowed);
e6445719
PS
241 return false;
242 }
243
244 return true;
245}
246
247/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
248static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
249 [OVS_KEY_ATTR_ENCAP] = -1,
250 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
251 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
252 [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
253 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
254 [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
255 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
256 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
257 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
258 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
5eb26b15 259 [OVS_KEY_ATTR_TCP_FLAGS] = sizeof(__be16),
e6445719
PS
260 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
261 [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
262 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
263 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
264 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
265 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
971427f3
AZ
266 [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32),
267 [OVS_KEY_ATTR_DP_HASH] = sizeof(u32),
e6445719
PS
268 [OVS_KEY_ATTR_TUNNEL] = -1,
269};
270
271static bool is_all_zero(const u8 *fp, size_t size)
272{
273 int i;
274
275 if (!fp)
276 return false;
277
278 for (i = 0; i < size; i++)
279 if (fp[i])
280 return false;
281
282 return true;
283}
284
285static int __parse_flow_nlattrs(const struct nlattr *attr,
286 const struct nlattr *a[],
287 u64 *attrsp, bool nz)
288{
289 const struct nlattr *nla;
290 u64 attrs;
291 int rem;
292
293 attrs = *attrsp;
294 nla_for_each_nested(nla, attr, rem) {
295 u16 type = nla_type(nla);
296 int expected_len;
297
298 if (type > OVS_KEY_ATTR_MAX) {
299 OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
300 type, OVS_KEY_ATTR_MAX);
301 return -EINVAL;
302 }
303
304 if (attrs & (1 << type)) {
305 OVS_NLERR("Duplicate key attribute (type %d).\n", type);
306 return -EINVAL;
307 }
308
309 expected_len = ovs_key_lens[type];
310 if (nla_len(nla) != expected_len && expected_len != -1) {
311 OVS_NLERR("Key attribute has unexpected length (type=%d"
312 ", length=%d, expected=%d).\n", type,
313 nla_len(nla), expected_len);
314 return -EINVAL;
315 }
316
317 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
318 attrs |= 1 << type;
319 a[type] = nla;
320 }
321 }
322 if (rem) {
323 OVS_NLERR("Message has %d unknown bytes.\n", rem);
324 return -EINVAL;
325 }
326
327 *attrsp = attrs;
328 return 0;
329}
330
331static int parse_flow_mask_nlattrs(const struct nlattr *attr,
332 const struct nlattr *a[], u64 *attrsp)
333{
334 return __parse_flow_nlattrs(attr, a, attrsp, true);
335}
336
337static int parse_flow_nlattrs(const struct nlattr *attr,
338 const struct nlattr *a[], u64 *attrsp)
339{
340 return __parse_flow_nlattrs(attr, a, attrsp, false);
341}
342
343static int ipv4_tun_from_nlattr(const struct nlattr *attr,
344 struct sw_flow_match *match, bool is_mask)
345{
346 struct nlattr *a;
347 int rem;
348 bool ttl = false;
349 __be16 tun_flags = 0;
f5796684 350 unsigned long opt_key_offset;
e6445719
PS
351
352 nla_for_each_nested(a, attr, rem) {
353 int type = nla_type(a);
354 static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
355 [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
356 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
357 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
358 [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
359 [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
360 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
361 [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
67fa0341 362 [OVS_TUNNEL_KEY_ATTR_OAM] = 0,
f5796684 363 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1,
e6445719
PS
364 };
365
366 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
367 OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
368 type, OVS_TUNNEL_KEY_ATTR_MAX);
369 return -EINVAL;
370 }
371
f5796684
JG
372 if (ovs_tunnel_key_lens[type] != nla_len(a) &&
373 ovs_tunnel_key_lens[type] != -1) {
e6445719
PS
374 OVS_NLERR("IPv4 tunnel attribute type has unexpected "
375 " length (type=%d, length=%d, expected=%d).\n",
376 type, nla_len(a), ovs_tunnel_key_lens[type]);
377 return -EINVAL;
378 }
379
380 switch (type) {
381 case OVS_TUNNEL_KEY_ATTR_ID:
382 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
383 nla_get_be64(a), is_mask);
384 tun_flags |= TUNNEL_KEY;
385 break;
386 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
387 SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
388 nla_get_be32(a), is_mask);
389 break;
390 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
391 SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
392 nla_get_be32(a), is_mask);
393 break;
394 case OVS_TUNNEL_KEY_ATTR_TOS:
395 SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
396 nla_get_u8(a), is_mask);
397 break;
398 case OVS_TUNNEL_KEY_ATTR_TTL:
399 SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
400 nla_get_u8(a), is_mask);
401 ttl = true;
402 break;
403 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
404 tun_flags |= TUNNEL_DONT_FRAGMENT;
405 break;
406 case OVS_TUNNEL_KEY_ATTR_CSUM:
407 tun_flags |= TUNNEL_CSUM;
408 break;
67fa0341
JG
409 case OVS_TUNNEL_KEY_ATTR_OAM:
410 tun_flags |= TUNNEL_OAM;
411 break;
f5796684
JG
412 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
413 tun_flags |= TUNNEL_OPTIONS_PRESENT;
414 if (nla_len(a) > sizeof(match->key->tun_opts)) {
415 OVS_NLERR("Geneve option length exceeds maximum size (len %d, max %zu).\n",
416 nla_len(a),
417 sizeof(match->key->tun_opts));
418 return -EINVAL;
419 }
420
421 if (nla_len(a) % 4 != 0) {
422 OVS_NLERR("Geneve option length is not a multiple of 4 (len %d).\n",
423 nla_len(a));
424 return -EINVAL;
425 }
426
427 /* We need to record the length of the options passed
428 * down, otherwise packets with the same format but
429 * additional options will be silently matched.
430 */
431 if (!is_mask) {
432 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
433 false);
434 } else {
435 /* This is somewhat unusual because it looks at
436 * both the key and mask while parsing the
437 * attributes (and by extension assumes the key
438 * is parsed first). Normally, we would verify
439 * that each is the correct length and that the
440 * attributes line up in the validate function.
441 * However, that is difficult because this is
442 * variable length and we won't have the
443 * information later.
444 */
445 if (match->key->tun_opts_len != nla_len(a)) {
446 OVS_NLERR("Geneve option key length (%d) is different from mask length (%d).",
447 match->key->tun_opts_len,
448 nla_len(a));
449 return -EINVAL;
450 }
451
452 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff,
453 true);
454 }
455
456 opt_key_offset = (unsigned long)GENEVE_OPTS(
457 (struct sw_flow_key *)0,
458 nla_len(a));
459 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset,
460 nla_data(a), nla_len(a),
461 is_mask);
462 break;
e6445719 463 default:
f5796684
JG
464 OVS_NLERR("Unknown IPv4 tunnel attribute (%d).\n",
465 type);
e6445719
PS
466 return -EINVAL;
467 }
468 }
469
470 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
471
472 if (rem > 0) {
473 OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
474 return -EINVAL;
475 }
476
477 if (!is_mask) {
478 if (!match->key->tun_key.ipv4_dst) {
479 OVS_NLERR("IPv4 tunnel destination address is zero.\n");
480 return -EINVAL;
481 }
482
483 if (!ttl) {
484 OVS_NLERR("IPv4 tunnel TTL not specified.\n");
485 return -EINVAL;
486 }
487 }
488
489 return 0;
490}
491
f5796684
JG
492static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
493 const struct ovs_key_ipv4_tunnel *output,
494 const struct geneve_opt *tun_opts,
495 int swkey_tun_opts_len)
e6445719 496{
e6445719
PS
497 if (output->tun_flags & TUNNEL_KEY &&
498 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
499 return -EMSGSIZE;
500 if (output->ipv4_src &&
67fa0341 501 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
e6445719
PS
502 return -EMSGSIZE;
503 if (output->ipv4_dst &&
67fa0341 504 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
e6445719
PS
505 return -EMSGSIZE;
506 if (output->ipv4_tos &&
67fa0341 507 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
e6445719
PS
508 return -EMSGSIZE;
509 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
510 return -EMSGSIZE;
511 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
67fa0341 512 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
e6445719
PS
513 return -EMSGSIZE;
514 if ((output->tun_flags & TUNNEL_CSUM) &&
67fa0341
JG
515 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
516 return -EMSGSIZE;
517 if ((output->tun_flags & TUNNEL_OAM) &&
518 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
e6445719 519 return -EMSGSIZE;
f5796684
JG
520 if (tun_opts &&
521 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
522 swkey_tun_opts_len, tun_opts))
523 return -EMSGSIZE;
e6445719 524
e6445719
PS
525 return 0;
526}
527
528
f5796684
JG
529static int ipv4_tun_to_nlattr(struct sk_buff *skb,
530 const struct ovs_key_ipv4_tunnel *output,
531 const struct geneve_opt *tun_opts,
532 int swkey_tun_opts_len)
533{
534 struct nlattr *nla;
535 int err;
536
537 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
538 if (!nla)
539 return -EMSGSIZE;
540
541 err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
542 if (err)
543 return err;
544
545 nla_nest_end(skb, nla);
546 return 0;
547}
548
e6445719
PS
549static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
550 const struct nlattr **a, bool is_mask)
551{
971427f3
AZ
552 if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
553 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
554
555 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
556 *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH);
557 }
558
559 if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) {
560 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
561
562 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
563 *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID);
564 }
565
e6445719
PS
566 if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
567 SW_FLOW_KEY_PUT(match, phy.priority,
568 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
569 *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
570 }
571
572 if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
573 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
574
575 if (is_mask)
576 in_port = 0xffffffff; /* Always exact match in_port. */
577 else if (in_port >= DP_MAX_PORTS)
578 return -EINVAL;
579
580 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
581 *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
582 } else if (!is_mask) {
583 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
584 }
585
586 if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
587 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
588
589 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
590 *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
591 }
592 if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
593 if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
594 is_mask))
595 return -EINVAL;
596 *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
597 }
598 return 0;
599}
600
23dabf88
JR
601static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
602 const struct nlattr **a, bool is_mask)
e6445719
PS
603{
604 int err;
605 u64 orig_attrs = attrs;
606
607 err = metadata_from_nlattrs(match, &attrs, a, is_mask);
608 if (err)
609 return err;
610
611 if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
612 const struct ovs_key_ethernet *eth_key;
613
614 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
615 SW_FLOW_KEY_MEMCPY(match, eth.src,
616 eth_key->eth_src, ETH_ALEN, is_mask);
617 SW_FLOW_KEY_MEMCPY(match, eth.dst,
618 eth_key->eth_dst, ETH_ALEN, is_mask);
619 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
620 }
621
622 if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
623 __be16 tci;
624
625 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
626 if (!(tci & htons(VLAN_TAG_PRESENT))) {
627 if (is_mask)
628 OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
629 else
630 OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
631
632 return -EINVAL;
633 }
634
635 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
636 attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
637 } else if (!is_mask)
638 SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
639
640 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
641 __be16 eth_type;
642
643 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
644 if (is_mask) {
645 /* Always exact match EtherType. */
646 eth_type = htons(0xffff);
647 } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
648 OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
649 ntohs(eth_type), ETH_P_802_3_MIN);
650 return -EINVAL;
651 }
652
653 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
654 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
655 } else if (!is_mask) {
656 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
657 }
658
659 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
660 const struct ovs_key_ipv4 *ipv4_key;
661
662 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
663 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
664 OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
665 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
666 return -EINVAL;
667 }
668 SW_FLOW_KEY_PUT(match, ip.proto,
669 ipv4_key->ipv4_proto, is_mask);
670 SW_FLOW_KEY_PUT(match, ip.tos,
671 ipv4_key->ipv4_tos, is_mask);
672 SW_FLOW_KEY_PUT(match, ip.ttl,
673 ipv4_key->ipv4_ttl, is_mask);
674 SW_FLOW_KEY_PUT(match, ip.frag,
675 ipv4_key->ipv4_frag, is_mask);
676 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
677 ipv4_key->ipv4_src, is_mask);
678 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
679 ipv4_key->ipv4_dst, is_mask);
680 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
681 }
682
683 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
684 const struct ovs_key_ipv6 *ipv6_key;
685
686 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
687 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
688 OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
689 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
690 return -EINVAL;
691 }
692 SW_FLOW_KEY_PUT(match, ipv6.label,
693 ipv6_key->ipv6_label, is_mask);
694 SW_FLOW_KEY_PUT(match, ip.proto,
695 ipv6_key->ipv6_proto, is_mask);
696 SW_FLOW_KEY_PUT(match, ip.tos,
697 ipv6_key->ipv6_tclass, is_mask);
698 SW_FLOW_KEY_PUT(match, ip.ttl,
699 ipv6_key->ipv6_hlimit, is_mask);
700 SW_FLOW_KEY_PUT(match, ip.frag,
701 ipv6_key->ipv6_frag, is_mask);
702 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
703 ipv6_key->ipv6_src,
704 sizeof(match->key->ipv6.addr.src),
705 is_mask);
706 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
707 ipv6_key->ipv6_dst,
708 sizeof(match->key->ipv6.addr.dst),
709 is_mask);
710
711 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
712 }
713
714 if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
715 const struct ovs_key_arp *arp_key;
716
717 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
718 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
719 OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
720 arp_key->arp_op);
721 return -EINVAL;
722 }
723
724 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
725 arp_key->arp_sip, is_mask);
726 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
727 arp_key->arp_tip, is_mask);
728 SW_FLOW_KEY_PUT(match, ip.proto,
729 ntohs(arp_key->arp_op), is_mask);
730 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
731 arp_key->arp_sha, ETH_ALEN, is_mask);
732 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
733 arp_key->arp_tha, ETH_ALEN, is_mask);
734
735 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
736 }
737
738 if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
739 const struct ovs_key_tcp *tcp_key;
740
741 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1139e241
JR
742 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
743 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
e6445719
PS
744 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
745 }
746
5eb26b15
JR
747 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
748 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
1139e241 749 SW_FLOW_KEY_PUT(match, tp.flags,
5eb26b15
JR
750 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
751 is_mask);
752 } else {
1139e241 753 SW_FLOW_KEY_PUT(match, tp.flags,
5eb26b15
JR
754 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
755 is_mask);
756 }
757 attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
758 }
759
e6445719
PS
760 if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
761 const struct ovs_key_udp *udp_key;
762
763 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1139e241
JR
764 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
765 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
e6445719
PS
766 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
767 }
768
769 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
770 const struct ovs_key_sctp *sctp_key;
771
772 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
1139e241
JR
773 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
774 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
e6445719
PS
775 attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
776 }
777
778 if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
779 const struct ovs_key_icmp *icmp_key;
780
781 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1139e241 782 SW_FLOW_KEY_PUT(match, tp.src,
e6445719 783 htons(icmp_key->icmp_type), is_mask);
1139e241 784 SW_FLOW_KEY_PUT(match, tp.dst,
e6445719
PS
785 htons(icmp_key->icmp_code), is_mask);
786 attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
787 }
788
789 if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
790 const struct ovs_key_icmpv6 *icmpv6_key;
791
792 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1139e241 793 SW_FLOW_KEY_PUT(match, tp.src,
e6445719 794 htons(icmpv6_key->icmpv6_type), is_mask);
1139e241 795 SW_FLOW_KEY_PUT(match, tp.dst,
e6445719
PS
796 htons(icmpv6_key->icmpv6_code), is_mask);
797 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
798 }
799
800 if (attrs & (1 << OVS_KEY_ATTR_ND)) {
801 const struct ovs_key_nd *nd_key;
802
803 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
804 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
805 nd_key->nd_target,
806 sizeof(match->key->ipv6.nd.target),
807 is_mask);
808 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
809 nd_key->nd_sll, ETH_ALEN, is_mask);
810 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
811 nd_key->nd_tll, ETH_ALEN, is_mask);
812 attrs &= ~(1 << OVS_KEY_ATTR_ND);
813 }
814
815 if (attrs != 0)
816 return -EINVAL;
817
818 return 0;
819}
820
f47de068 821static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key)
e6445719 822{
f47de068
PS
823 struct nlattr *nla;
824 int rem;
e6445719 825
f47de068
PS
826 /* The nlattr stream should already have been validated */
827 nla_for_each_nested(nla, attr, rem) {
828 /* We assume that ovs_key_lens[type] == -1 means that type is a
829 * nested attribute
830 */
831 if (is_attr_mask_key && ovs_key_lens[nla_type(nla)] == -1)
832 nlattr_set(nla, val, false);
833 else
834 memset(nla_data(nla), val, nla_len(nla));
835 }
836}
837
838static void mask_set_nlattr(struct nlattr *attr, u8 val)
839{
840 nlattr_set(attr, val, true);
e6445719
PS
841}
842
843/**
844 * ovs_nla_get_match - parses Netlink attributes into a flow key and
845 * mask. In case the 'mask' is NULL, the flow is treated as exact match
846 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
847 * does not include any don't care bit.
848 * @match: receives the extracted flow match information.
849 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
850 * sequence. The fields should of the packet that triggered the creation
851 * of this flow.
852 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
853 * attribute specifies the mask field of the wildcarded flow.
854 */
855int ovs_nla_get_match(struct sw_flow_match *match,
856 const struct nlattr *key,
857 const struct nlattr *mask)
858{
859 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
860 const struct nlattr *encap;
f47de068 861 struct nlattr *newmask = NULL;
e6445719
PS
862 u64 key_attrs = 0;
863 u64 mask_attrs = 0;
864 bool encap_valid = false;
865 int err;
866
867 err = parse_flow_nlattrs(key, a, &key_attrs);
868 if (err)
869 return err;
870
871 if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
872 (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
873 (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
874 __be16 tci;
875
876 if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
877 (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
878 OVS_NLERR("Invalid Vlan frame.\n");
879 return -EINVAL;
880 }
881
882 key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
883 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
884 encap = a[OVS_KEY_ATTR_ENCAP];
885 key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
886 encap_valid = true;
887
888 if (tci & htons(VLAN_TAG_PRESENT)) {
889 err = parse_flow_nlattrs(encap, a, &key_attrs);
890 if (err)
891 return err;
892 } else if (!tci) {
893 /* Corner case for truncated 802.1Q header. */
894 if (nla_len(encap)) {
895 OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
896 return -EINVAL;
897 }
898 } else {
899 OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
900 return -EINVAL;
901 }
902 }
903
23dabf88 904 err = ovs_key_from_nlattrs(match, key_attrs, a, false);
e6445719
PS
905 if (err)
906 return err;
907
f47de068
PS
908 if (match->mask && !mask) {
909 /* Create an exact match mask. We need to set to 0xff all the
910 * 'match->mask' fields that have been touched in 'match->key'.
911 * We cannot simply memset 'match->mask', because padding bytes
912 * and fields not specified in 'match->key' should be left to 0.
913 * Instead, we use a stream of netlink attributes, copied from
914 * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care
915 * of filling 'match->mask' appropriately.
916 */
917 newmask = kmemdup(key, nla_total_size(nla_len(key)),
918 GFP_KERNEL);
919 if (!newmask)
920 return -ENOMEM;
921
922 mask_set_nlattr(newmask, 0xff);
923
924 /* The userspace does not send tunnel attributes that are 0,
925 * but we should not wildcard them nonetheless.
926 */
927 if (match->key->tun_key.ipv4_dst)
928 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true);
929
930 mask = newmask;
931 }
932
e6445719
PS
933 if (mask) {
934 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
935 if (err)
f47de068 936 goto free_newmask;
e6445719 937
f47de068 938 if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
e6445719
PS
939 __be16 eth_type = 0;
940 __be16 tci = 0;
941
942 if (!encap_valid) {
943 OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
f47de068
PS
944 err = -EINVAL;
945 goto free_newmask;
e6445719
PS
946 }
947
948 mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
949 if (a[OVS_KEY_ATTR_ETHERTYPE])
950 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
951
952 if (eth_type == htons(0xffff)) {
953 mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
954 encap = a[OVS_KEY_ATTR_ENCAP];
955 err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
f47de068
PS
956 if (err)
957 goto free_newmask;
e6445719
PS
958 } else {
959 OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
960 ntohs(eth_type));
f47de068
PS
961 err = -EINVAL;
962 goto free_newmask;
e6445719
PS
963 }
964
965 if (a[OVS_KEY_ATTR_VLAN])
966 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
967
968 if (!(tci & htons(VLAN_TAG_PRESENT))) {
969 OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
f47de068
PS
970 err = -EINVAL;
971 goto free_newmask;
e6445719
PS
972 }
973 }
974
23dabf88 975 err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
e6445719 976 if (err)
f47de068 977 goto free_newmask;
e6445719
PS
978 }
979
980 if (!match_validate(match, key_attrs, mask_attrs))
f47de068 981 err = -EINVAL;
e6445719 982
f47de068
PS
983free_newmask:
984 kfree(newmask);
985 return err;
e6445719
PS
986}
987
988/**
989 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
83c8df26 990 * @key: Receives extracted in_port, priority, tun_key and skb_mark.
e6445719
PS
991 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
992 * sequence.
993 *
994 * This parses a series of Netlink attributes that form a flow key, which must
995 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
996 * get the metadata, that is, the parts of the flow key that cannot be
997 * extracted from the packet itself.
998 */
999
83c8df26
PS
1000int ovs_nla_get_flow_metadata(const struct nlattr *attr,
1001 struct sw_flow_key *key)
e6445719 1002{
e6445719 1003 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
83c8df26 1004 struct sw_flow_match match;
e6445719
PS
1005 u64 attrs = 0;
1006 int err;
e6445719
PS
1007
1008 err = parse_flow_nlattrs(attr, a, &attrs);
1009 if (err)
1010 return -EINVAL;
1011
1012 memset(&match, 0, sizeof(match));
83c8df26 1013 match.key = key;
e6445719 1014
83c8df26 1015 key->phy.in_port = DP_MAX_PORTS;
e6445719 1016
83c8df26 1017 return metadata_from_nlattrs(&match, &attrs, a, false);
e6445719
PS
1018}
1019
1020int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1021 const struct sw_flow_key *output, struct sk_buff *skb)
1022{
1023 struct ovs_key_ethernet *eth_key;
1024 struct nlattr *nla, *encap;
1025 bool is_mask = (swkey != output);
1026
971427f3
AZ
1027 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1028 goto nla_put_failure;
1029
1030 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1031 goto nla_put_failure;
1032
e6445719
PS
1033 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1034 goto nla_put_failure;
1035
f5796684
JG
1036 if ((swkey->tun_key.ipv4_dst || is_mask)) {
1037 const struct geneve_opt *opts = NULL;
1038
1039 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
1040 opts = GENEVE_OPTS(output, swkey->tun_opts_len);
1041
1042 if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
1043 swkey->tun_opts_len))
1044 goto nla_put_failure;
1045 }
e6445719
PS
1046
1047 if (swkey->phy.in_port == DP_MAX_PORTS) {
1048 if (is_mask && (output->phy.in_port == 0xffff))
1049 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1050 goto nla_put_failure;
1051 } else {
1052 u16 upper_u16;
1053 upper_u16 = !is_mask ? 0 : 0xffff;
1054
1055 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1056 (upper_u16 << 16) | output->phy.in_port))
1057 goto nla_put_failure;
1058 }
1059
1060 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1061 goto nla_put_failure;
1062
1063 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1064 if (!nla)
1065 goto nla_put_failure;
1066
1067 eth_key = nla_data(nla);
8c63ff09
JP
1068 ether_addr_copy(eth_key->eth_src, output->eth.src);
1069 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
e6445719
PS
1070
1071 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1072 __be16 eth_type;
1073 eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
1074 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1075 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1076 goto nla_put_failure;
1077 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1078 if (!swkey->eth.tci)
1079 goto unencap;
1080 } else
1081 encap = NULL;
1082
1083 if (swkey->eth.type == htons(ETH_P_802_2)) {
1084 /*
1085 * Ethertype 802.2 is represented in the netlink with omitted
1086 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1087 * 0xffff in the mask attribute. Ethertype can also
1088 * be wildcarded.
1089 */
1090 if (is_mask && output->eth.type)
1091 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
1092 output->eth.type))
1093 goto nla_put_failure;
1094 goto unencap;
1095 }
1096
1097 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1098 goto nla_put_failure;
1099
1100 if (swkey->eth.type == htons(ETH_P_IP)) {
1101 struct ovs_key_ipv4 *ipv4_key;
1102
1103 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1104 if (!nla)
1105 goto nla_put_failure;
1106 ipv4_key = nla_data(nla);
1107 ipv4_key->ipv4_src = output->ipv4.addr.src;
1108 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1109 ipv4_key->ipv4_proto = output->ip.proto;
1110 ipv4_key->ipv4_tos = output->ip.tos;
1111 ipv4_key->ipv4_ttl = output->ip.ttl;
1112 ipv4_key->ipv4_frag = output->ip.frag;
1113 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1114 struct ovs_key_ipv6 *ipv6_key;
1115
1116 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1117 if (!nla)
1118 goto nla_put_failure;
1119 ipv6_key = nla_data(nla);
1120 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1121 sizeof(ipv6_key->ipv6_src));
1122 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1123 sizeof(ipv6_key->ipv6_dst));
1124 ipv6_key->ipv6_label = output->ipv6.label;
1125 ipv6_key->ipv6_proto = output->ip.proto;
1126 ipv6_key->ipv6_tclass = output->ip.tos;
1127 ipv6_key->ipv6_hlimit = output->ip.ttl;
1128 ipv6_key->ipv6_frag = output->ip.frag;
1129 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1130 swkey->eth.type == htons(ETH_P_RARP)) {
1131 struct ovs_key_arp *arp_key;
1132
1133 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1134 if (!nla)
1135 goto nla_put_failure;
1136 arp_key = nla_data(nla);
1137 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1138 arp_key->arp_sip = output->ipv4.addr.src;
1139 arp_key->arp_tip = output->ipv4.addr.dst;
1140 arp_key->arp_op = htons(output->ip.proto);
8c63ff09
JP
1141 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1142 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
e6445719
PS
1143 }
1144
1145 if ((swkey->eth.type == htons(ETH_P_IP) ||
1146 swkey->eth.type == htons(ETH_P_IPV6)) &&
1147 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1148
1149 if (swkey->ip.proto == IPPROTO_TCP) {
1150 struct ovs_key_tcp *tcp_key;
1151
1152 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1153 if (!nla)
1154 goto nla_put_failure;
1155 tcp_key = nla_data(nla);
1139e241
JR
1156 tcp_key->tcp_src = output->tp.src;
1157 tcp_key->tcp_dst = output->tp.dst;
1158 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1159 output->tp.flags))
1160 goto nla_put_failure;
e6445719
PS
1161 } else if (swkey->ip.proto == IPPROTO_UDP) {
1162 struct ovs_key_udp *udp_key;
1163
1164 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1165 if (!nla)
1166 goto nla_put_failure;
1167 udp_key = nla_data(nla);
1139e241
JR
1168 udp_key->udp_src = output->tp.src;
1169 udp_key->udp_dst = output->tp.dst;
e6445719
PS
1170 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1171 struct ovs_key_sctp *sctp_key;
1172
1173 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
1174 if (!nla)
1175 goto nla_put_failure;
1176 sctp_key = nla_data(nla);
1139e241
JR
1177 sctp_key->sctp_src = output->tp.src;
1178 sctp_key->sctp_dst = output->tp.dst;
e6445719
PS
1179 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1180 swkey->ip.proto == IPPROTO_ICMP) {
1181 struct ovs_key_icmp *icmp_key;
1182
1183 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1184 if (!nla)
1185 goto nla_put_failure;
1186 icmp_key = nla_data(nla);
1139e241
JR
1187 icmp_key->icmp_type = ntohs(output->tp.src);
1188 icmp_key->icmp_code = ntohs(output->tp.dst);
e6445719
PS
1189 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1190 swkey->ip.proto == IPPROTO_ICMPV6) {
1191 struct ovs_key_icmpv6 *icmpv6_key;
1192
1193 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1194 sizeof(*icmpv6_key));
1195 if (!nla)
1196 goto nla_put_failure;
1197 icmpv6_key = nla_data(nla);
1139e241
JR
1198 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1199 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
e6445719
PS
1200
1201 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1202 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1203 struct ovs_key_nd *nd_key;
1204
1205 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1206 if (!nla)
1207 goto nla_put_failure;
1208 nd_key = nla_data(nla);
1209 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1210 sizeof(nd_key->nd_target));
8c63ff09
JP
1211 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1212 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
e6445719
PS
1213 }
1214 }
1215 }
1216
1217unencap:
1218 if (encap)
1219 nla_nest_end(skb, encap);
1220
1221 return 0;
1222
1223nla_put_failure:
1224 return -EMSGSIZE;
1225}
1226
1227#define MAX_ACTIONS_BUFSIZE (32 * 1024)
1228
1229struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
1230{
1231 struct sw_flow_actions *sfa;
1232
1233 if (size > MAX_ACTIONS_BUFSIZE)
1234 return ERR_PTR(-EINVAL);
1235
1236 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
1237 if (!sfa)
1238 return ERR_PTR(-ENOMEM);
1239
1240 sfa->actions_len = 0;
1241 return sfa;
1242}
1243
e6445719
PS
1244/* Schedules 'sf_acts' to be freed after the next RCU grace period.
1245 * The caller must hold rcu_read_lock for this to be sensible. */
1246void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
1247{
11d6c461 1248 kfree_rcu(sf_acts, rcu);
e6445719
PS
1249}
1250
1251static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1252 int attr_len)
1253{
1254
1255 struct sw_flow_actions *acts;
1256 int new_acts_size;
1257 int req_size = NLA_ALIGN(attr_len);
1258 int next_offset = offsetof(struct sw_flow_actions, actions) +
1259 (*sfa)->actions_len;
1260
1261 if (req_size <= (ksize(*sfa) - next_offset))
1262 goto out;
1263
1264 new_acts_size = ksize(*sfa) * 2;
1265
1266 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1267 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
1268 return ERR_PTR(-EMSGSIZE);
1269 new_acts_size = MAX_ACTIONS_BUFSIZE;
1270 }
1271
1272 acts = ovs_nla_alloc_flow_actions(new_acts_size);
1273 if (IS_ERR(acts))
1274 return (void *)acts;
1275
1276 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
1277 acts->actions_len = (*sfa)->actions_len;
1278 kfree(*sfa);
1279 *sfa = acts;
1280
1281out:
1282 (*sfa)->actions_len += req_size;
1283 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
1284}
1285
f0b128c1
JG
1286static struct nlattr *__add_action(struct sw_flow_actions **sfa,
1287 int attrtype, void *data, int len)
e6445719
PS
1288{
1289 struct nlattr *a;
1290
1291 a = reserve_sfa_size(sfa, nla_attr_size(len));
1292 if (IS_ERR(a))
f0b128c1 1293 return a;
e6445719
PS
1294
1295 a->nla_type = attrtype;
1296 a->nla_len = nla_attr_size(len);
1297
1298 if (data)
1299 memcpy(nla_data(a), data, len);
1300 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
1301
f0b128c1
JG
1302 return a;
1303}
1304
1305static int add_action(struct sw_flow_actions **sfa, int attrtype,
1306 void *data, int len)
1307{
1308 struct nlattr *a;
1309
1310 a = __add_action(sfa, attrtype, data, len);
1311 if (IS_ERR(a))
1312 return PTR_ERR(a);
1313
e6445719
PS
1314 return 0;
1315}
1316
1317static inline int add_nested_action_start(struct sw_flow_actions **sfa,
1318 int attrtype)
1319{
1320 int used = (*sfa)->actions_len;
1321 int err;
1322
1323 err = add_action(sfa, attrtype, NULL, 0);
1324 if (err)
1325 return err;
1326
1327 return used;
1328}
1329
1330static inline void add_nested_action_end(struct sw_flow_actions *sfa,
1331 int st_offset)
1332{
1333 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
1334 st_offset);
1335
1336 a->nla_len = sfa->actions_len - st_offset;
1337}
1338
1339static int validate_and_copy_sample(const struct nlattr *attr,
1340 const struct sw_flow_key *key, int depth,
1341 struct sw_flow_actions **sfa)
1342{
1343 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
1344 const struct nlattr *probability, *actions;
1345 const struct nlattr *a;
1346 int rem, start, err, st_acts;
1347
1348 memset(attrs, 0, sizeof(attrs));
1349 nla_for_each_nested(a, attr, rem) {
1350 int type = nla_type(a);
1351 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
1352 return -EINVAL;
1353 attrs[type] = a;
1354 }
1355 if (rem)
1356 return -EINVAL;
1357
1358 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
1359 if (!probability || nla_len(probability) != sizeof(u32))
1360 return -EINVAL;
1361
1362 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
1363 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
1364 return -EINVAL;
1365
1366 /* validation done, copy sample action. */
1367 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
1368 if (start < 0)
1369 return start;
1370 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
1371 nla_data(probability), sizeof(u32));
1372 if (err)
1373 return err;
1374 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
1375 if (st_acts < 0)
1376 return st_acts;
1377
1378 err = ovs_nla_copy_actions(actions, key, depth + 1, sfa);
1379 if (err)
1380 return err;
1381
1382 add_nested_action_end(*sfa, st_acts);
1383 add_nested_action_end(*sfa, start);
1384
1385 return 0;
1386}
1387
1388static int validate_tp_port(const struct sw_flow_key *flow_key)
1389{
1139e241
JR
1390 if ((flow_key->eth.type == htons(ETH_P_IP) ||
1391 flow_key->eth.type == htons(ETH_P_IPV6)) &&
1392 (flow_key->tp.src || flow_key->tp.dst))
1393 return 0;
e6445719
PS
1394
1395 return -EINVAL;
1396}
1397
1398void ovs_match_init(struct sw_flow_match *match,
1399 struct sw_flow_key *key,
1400 struct sw_flow_mask *mask)
1401{
1402 memset(match, 0, sizeof(*match));
1403 match->key = key;
1404 match->mask = mask;
1405
1406 memset(key, 0, sizeof(*key));
1407
1408 if (mask) {
1409 memset(&mask->key, 0, sizeof(mask->key));
1410 mask->range.start = mask->range.end = 0;
1411 }
1412}
1413
1414static int validate_and_copy_set_tun(const struct nlattr *attr,
1415 struct sw_flow_actions **sfa)
1416{
1417 struct sw_flow_match match;
1418 struct sw_flow_key key;
f0b128c1
JG
1419 struct ovs_tunnel_info *tun_info;
1420 struct nlattr *a;
e6445719
PS
1421 int err, start;
1422
1423 ovs_match_init(&match, &key, NULL);
1424 err = ipv4_tun_from_nlattr(nla_data(attr), &match, false);
1425 if (err)
1426 return err;
1427
f5796684
JG
1428 if (key.tun_opts_len) {
1429 struct geneve_opt *option = GENEVE_OPTS(&key,
1430 key.tun_opts_len);
1431 int opts_len = key.tun_opts_len;
1432 bool crit_opt = false;
1433
1434 while (opts_len > 0) {
1435 int len;
1436
1437 if (opts_len < sizeof(*option))
1438 return -EINVAL;
1439
1440 len = sizeof(*option) + option->length * 4;
1441 if (len > opts_len)
1442 return -EINVAL;
1443
1444 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
1445
1446 option = (struct geneve_opt *)((u8 *)option + len);
1447 opts_len -= len;
1448 };
1449
1450 key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
1451 };
1452
e6445719
PS
1453 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
1454 if (start < 0)
1455 return start;
1456
f0b128c1 1457 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
f5796684 1458 sizeof(*tun_info) + key.tun_opts_len);
f0b128c1
JG
1459 if (IS_ERR(a))
1460 return PTR_ERR(a);
1461
1462 tun_info = nla_data(a);
1463 tun_info->tunnel = key.tun_key;
f5796684
JG
1464 tun_info->options_len = key.tun_opts_len;
1465
1466 if (tun_info->options_len) {
1467 /* We need to store the options in the action itself since
1468 * everything else will go away after flow setup. We can append
1469 * it to tun_info and then point there.
1470 */
1471 memcpy((tun_info + 1), GENEVE_OPTS(&key, key.tun_opts_len),
1472 key.tun_opts_len);
1473 tun_info->options = (struct geneve_opt *)(tun_info + 1);
1474 } else {
1475 tun_info->options = NULL;
1476 }
f0b128c1 1477
e6445719
PS
1478 add_nested_action_end(*sfa, start);
1479
1480 return err;
1481}
1482
1483static int validate_set(const struct nlattr *a,
1484 const struct sw_flow_key *flow_key,
1485 struct sw_flow_actions **sfa,
1486 bool *set_tun)
1487{
1488 const struct nlattr *ovs_key = nla_data(a);
1489 int key_type = nla_type(ovs_key);
1490
1491 /* There can be only one key in a action */
1492 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
1493 return -EINVAL;
1494
1495 if (key_type > OVS_KEY_ATTR_MAX ||
1496 (ovs_key_lens[key_type] != nla_len(ovs_key) &&
1497 ovs_key_lens[key_type] != -1))
1498 return -EINVAL;
1499
1500 switch (key_type) {
1501 const struct ovs_key_ipv4 *ipv4_key;
1502 const struct ovs_key_ipv6 *ipv6_key;
1503 int err;
1504
1505 case OVS_KEY_ATTR_PRIORITY:
1506 case OVS_KEY_ATTR_SKB_MARK:
1507 case OVS_KEY_ATTR_ETHERNET:
1508 break;
1509
1510 case OVS_KEY_ATTR_TUNNEL:
1511 *set_tun = true;
1512 err = validate_and_copy_set_tun(a, sfa);
1513 if (err)
1514 return err;
1515 break;
1516
1517 case OVS_KEY_ATTR_IPV4:
1518 if (flow_key->eth.type != htons(ETH_P_IP))
1519 return -EINVAL;
1520
1521 if (!flow_key->ip.proto)
1522 return -EINVAL;
1523
1524 ipv4_key = nla_data(ovs_key);
1525 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
1526 return -EINVAL;
1527
1528 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
1529 return -EINVAL;
1530
1531 break;
1532
1533 case OVS_KEY_ATTR_IPV6:
1534 if (flow_key->eth.type != htons(ETH_P_IPV6))
1535 return -EINVAL;
1536
1537 if (!flow_key->ip.proto)
1538 return -EINVAL;
1539
1540 ipv6_key = nla_data(ovs_key);
1541 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
1542 return -EINVAL;
1543
1544 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
1545 return -EINVAL;
1546
1547 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
1548 return -EINVAL;
1549
1550 break;
1551
1552 case OVS_KEY_ATTR_TCP:
1553 if (flow_key->ip.proto != IPPROTO_TCP)
1554 return -EINVAL;
1555
1556 return validate_tp_port(flow_key);
1557
1558 case OVS_KEY_ATTR_UDP:
1559 if (flow_key->ip.proto != IPPROTO_UDP)
1560 return -EINVAL;
1561
1562 return validate_tp_port(flow_key);
1563
1564 case OVS_KEY_ATTR_SCTP:
1565 if (flow_key->ip.proto != IPPROTO_SCTP)
1566 return -EINVAL;
1567
1568 return validate_tp_port(flow_key);
1569
1570 default:
1571 return -EINVAL;
1572 }
1573
1574 return 0;
1575}
1576
1577static int validate_userspace(const struct nlattr *attr)
1578{
1579 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
1580 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
1581 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
1582 };
1583 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
1584 int error;
1585
1586 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
1587 attr, userspace_policy);
1588 if (error)
1589 return error;
1590
1591 if (!a[OVS_USERSPACE_ATTR_PID] ||
1592 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
1593 return -EINVAL;
1594
1595 return 0;
1596}
1597
1598static int copy_action(const struct nlattr *from,
1599 struct sw_flow_actions **sfa)
1600{
1601 int totlen = NLA_ALIGN(from->nla_len);
1602 struct nlattr *to;
1603
1604 to = reserve_sfa_size(sfa, from->nla_len);
1605 if (IS_ERR(to))
1606 return PTR_ERR(to);
1607
1608 memcpy(to, from, totlen);
1609 return 0;
1610}
1611
1612int ovs_nla_copy_actions(const struct nlattr *attr,
1613 const struct sw_flow_key *key,
1614 int depth,
1615 struct sw_flow_actions **sfa)
1616{
1617 const struct nlattr *a;
1618 int rem, err;
1619
1620 if (depth >= SAMPLE_ACTION_DEPTH)
1621 return -EOVERFLOW;
1622
1623 nla_for_each_nested(a, attr, rem) {
1624 /* Expected argument lengths, (u32)-1 for variable length. */
1625 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
1626 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
971427f3 1627 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
e6445719
PS
1628 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
1629 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
1630 [OVS_ACTION_ATTR_POP_VLAN] = 0,
1631 [OVS_ACTION_ATTR_SET] = (u32)-1,
971427f3
AZ
1632 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
1633 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
e6445719
PS
1634 };
1635 const struct ovs_action_push_vlan *vlan;
1636 int type = nla_type(a);
1637 bool skip_copy;
1638
1639 if (type > OVS_ACTION_ATTR_MAX ||
1640 (action_lens[type] != nla_len(a) &&
1641 action_lens[type] != (u32)-1))
1642 return -EINVAL;
1643
1644 skip_copy = false;
1645 switch (type) {
1646 case OVS_ACTION_ATTR_UNSPEC:
1647 return -EINVAL;
1648
1649 case OVS_ACTION_ATTR_USERSPACE:
1650 err = validate_userspace(a);
1651 if (err)
1652 return err;
1653 break;
1654
1655 case OVS_ACTION_ATTR_OUTPUT:
1656 if (nla_get_u32(a) >= DP_MAX_PORTS)
1657 return -EINVAL;
1658 break;
1659
971427f3
AZ
1660 case OVS_ACTION_ATTR_HASH: {
1661 const struct ovs_action_hash *act_hash = nla_data(a);
1662
1663 switch (act_hash->hash_alg) {
1664 case OVS_HASH_ALG_L4:
1665 break;
1666 default:
1667 return -EINVAL;
1668 }
1669
1670 break;
1671 }
e6445719
PS
1672
1673 case OVS_ACTION_ATTR_POP_VLAN:
1674 break;
1675
1676 case OVS_ACTION_ATTR_PUSH_VLAN:
1677 vlan = nla_data(a);
1678 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
1679 return -EINVAL;
1680 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
1681 return -EINVAL;
1682 break;
1683
971427f3
AZ
1684 case OVS_ACTION_ATTR_RECIRC:
1685 break;
1686
e6445719
PS
1687 case OVS_ACTION_ATTR_SET:
1688 err = validate_set(a, key, sfa, &skip_copy);
1689 if (err)
1690 return err;
1691 break;
1692
1693 case OVS_ACTION_ATTR_SAMPLE:
1694 err = validate_and_copy_sample(a, key, depth, sfa);
1695 if (err)
1696 return err;
1697 skip_copy = true;
1698 break;
1699
1700 default:
1701 return -EINVAL;
1702 }
1703 if (!skip_copy) {
1704 err = copy_action(a, sfa);
1705 if (err)
1706 return err;
1707 }
1708 }
1709
1710 if (rem > 0)
1711 return -EINVAL;
1712
1713 return 0;
1714}
1715
1716static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
1717{
1718 const struct nlattr *a;
1719 struct nlattr *start;
1720 int err = 0, rem;
1721
1722 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
1723 if (!start)
1724 return -EMSGSIZE;
1725
1726 nla_for_each_nested(a, attr, rem) {
1727 int type = nla_type(a);
1728 struct nlattr *st_sample;
1729
1730 switch (type) {
1731 case OVS_SAMPLE_ATTR_PROBABILITY:
1732 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
1733 sizeof(u32), nla_data(a)))
1734 return -EMSGSIZE;
1735 break;
1736 case OVS_SAMPLE_ATTR_ACTIONS:
1737 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
1738 if (!st_sample)
1739 return -EMSGSIZE;
1740 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
1741 if (err)
1742 return err;
1743 nla_nest_end(skb, st_sample);
1744 break;
1745 }
1746 }
1747
1748 nla_nest_end(skb, start);
1749 return err;
1750}
1751
1752static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
1753{
1754 const struct nlattr *ovs_key = nla_data(a);
1755 int key_type = nla_type(ovs_key);
1756 struct nlattr *start;
1757 int err;
1758
1759 switch (key_type) {
f0b128c1
JG
1760 case OVS_KEY_ATTR_TUNNEL_INFO: {
1761 struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
1762
e6445719
PS
1763 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
1764 if (!start)
1765 return -EMSGSIZE;
1766
f0b128c1 1767 err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
f5796684
JG
1768 tun_info->options_len ?
1769 tun_info->options : NULL,
1770 tun_info->options_len);
e6445719
PS
1771 if (err)
1772 return err;
1773 nla_nest_end(skb, start);
1774 break;
f0b128c1 1775 }
e6445719
PS
1776 default:
1777 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1778 return -EMSGSIZE;
1779 break;
1780 }
1781
1782 return 0;
1783}
1784
1785int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
1786{
1787 const struct nlattr *a;
1788 int rem, err;
1789
1790 nla_for_each_attr(a, attr, len, rem) {
1791 int type = nla_type(a);
1792
1793 switch (type) {
1794 case OVS_ACTION_ATTR_SET:
1795 err = set_action_to_attr(a, skb);
1796 if (err)
1797 return err;
1798 break;
1799
1800 case OVS_ACTION_ATTR_SAMPLE:
1801 err = sample_action_to_attr(a, skb);
1802 if (err)
1803 return err;
1804 break;
1805 default:
1806 if (nla_put(skb, type, nla_len(a), nla_data(a)))
1807 return -EMSGSIZE;
1808 break;
1809 }
1810 }
1811
1812 return 0;
1813}