]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/openvswitch/flow_netlink.c
uapi: fix to export linux/vm_sockets.h
[mirror_ubuntu-jammy-kernel.git] / net / openvswitch / flow_netlink.c
CommitLineData
e6445719 1/*
971427f3 2 * Copyright (c) 2007-2014 Nicira, Inc.
e6445719
PS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
2235ad1c
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
e6445719
PS
21#include "flow.h"
22#include "datapath.h"
23#include <linux/uaccess.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <net/llc_pdu.h>
29#include <linux/kernel.h>
30#include <linux/jhash.h>
31#include <linux/jiffies.h>
32#include <linux/llc.h>
33#include <linux/module.h>
34#include <linux/in.h>
35#include <linux/rcupdate.h>
36#include <linux/if_arp.h>
37#include <linux/ip.h>
38#include <linux/ipv6.h>
39#include <linux/sctp.h>
40#include <linux/tcp.h>
41#include <linux/udp.h>
42#include <linux/icmp.h>
43#include <linux/icmpv6.h>
44#include <linux/rculist.h>
f5796684 45#include <net/geneve.h>
e6445719
PS
46#include <net/ip.h>
47#include <net/ipv6.h>
48#include <net/ndisc.h>
49
50#include "flow_netlink.h"
51
52static void update_range__(struct sw_flow_match *match,
53 size_t offset, size_t size, bool is_mask)
54{
55 struct sw_flow_key_range *range = NULL;
56 size_t start = rounddown(offset, sizeof(long));
57 size_t end = roundup(offset + size, sizeof(long));
58
59 if (!is_mask)
60 range = &match->range;
61 else if (match->mask)
62 range = &match->mask->range;
63
64 if (!range)
65 return;
66
67 if (range->start == range->end) {
68 range->start = start;
69 range->end = end;
70 return;
71 }
72
73 if (range->start > start)
74 range->start = start;
75
76 if (range->end < end)
77 range->end = end;
78}
79
80#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
81 do { \
82 update_range__(match, offsetof(struct sw_flow_key, field), \
83 sizeof((match)->key->field), is_mask); \
84 if (is_mask) { \
85 if ((match)->mask) \
86 (match)->mask->key.field = value; \
87 } else { \
88 (match)->key->field = value; \
89 } \
90 } while (0)
91
f5796684
JG
92#define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
93 do { \
94 update_range__(match, offset, len, is_mask); \
95 if (is_mask) \
96 memcpy((u8 *)&(match)->mask->key + offset, value_p, \
97 len); \
98 else \
99 memcpy((u8 *)(match)->key + offset, value_p, len); \
e6445719
PS
100 } while (0)
101
f5796684
JG
102#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
103 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
104 value_p, len, is_mask)
105
f47de068
PS
106#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
107 do { \
108 update_range__(match, offsetof(struct sw_flow_key, field), \
109 sizeof((match)->key->field), is_mask); \
110 if (is_mask) { \
111 if ((match)->mask) \
112 memset((u8 *)&(match)->mask->key.field, value,\
113 sizeof((match)->mask->key.field)); \
114 } else { \
115 memset((u8 *)&(match)->key->field, value, \
116 sizeof((match)->key->field)); \
117 } \
118 } while (0)
e6445719
PS
119
120static bool match_validate(const struct sw_flow_match *match,
121 u64 key_attrs, u64 mask_attrs)
122{
123 u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
124 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
125
126 /* The following mask attributes allowed only if they
127 * pass the validation tests. */
128 mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
129 | (1 << OVS_KEY_ATTR_IPV6)
130 | (1 << OVS_KEY_ATTR_TCP)
5eb26b15 131 | (1 << OVS_KEY_ATTR_TCP_FLAGS)
e6445719
PS
132 | (1 << OVS_KEY_ATTR_UDP)
133 | (1 << OVS_KEY_ATTR_SCTP)
134 | (1 << OVS_KEY_ATTR_ICMP)
135 | (1 << OVS_KEY_ATTR_ICMPV6)
136 | (1 << OVS_KEY_ATTR_ARP)
137 | (1 << OVS_KEY_ATTR_ND));
138
139 /* Always allowed mask fields. */
140 mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
141 | (1 << OVS_KEY_ATTR_IN_PORT)
142 | (1 << OVS_KEY_ATTR_ETHERTYPE));
143
144 /* Check key attributes. */
145 if (match->key->eth.type == htons(ETH_P_ARP)
146 || match->key->eth.type == htons(ETH_P_RARP)) {
147 key_expected |= 1 << OVS_KEY_ATTR_ARP;
19e7a3df 148 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
e6445719
PS
149 mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
150 }
151
152 if (match->key->eth.type == htons(ETH_P_IP)) {
153 key_expected |= 1 << OVS_KEY_ATTR_IPV4;
154 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
155 mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
156
157 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
158 if (match->key->ip.proto == IPPROTO_UDP) {
159 key_expected |= 1 << OVS_KEY_ATTR_UDP;
160 if (match->mask && (match->mask->key.ip.proto == 0xff))
161 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
162 }
163
164 if (match->key->ip.proto == IPPROTO_SCTP) {
165 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
166 if (match->mask && (match->mask->key.ip.proto == 0xff))
167 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
168 }
169
170 if (match->key->ip.proto == IPPROTO_TCP) {
171 key_expected |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
172 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
173 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
e6445719 174 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
175 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
176 }
e6445719
PS
177 }
178
179 if (match->key->ip.proto == IPPROTO_ICMP) {
180 key_expected |= 1 << OVS_KEY_ATTR_ICMP;
181 if (match->mask && (match->mask->key.ip.proto == 0xff))
182 mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
183 }
184 }
185 }
186
187 if (match->key->eth.type == htons(ETH_P_IPV6)) {
188 key_expected |= 1 << OVS_KEY_ATTR_IPV6;
189 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
190 mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
191
192 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
193 if (match->key->ip.proto == IPPROTO_UDP) {
194 key_expected |= 1 << OVS_KEY_ATTR_UDP;
195 if (match->mask && (match->mask->key.ip.proto == 0xff))
196 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
197 }
198
199 if (match->key->ip.proto == IPPROTO_SCTP) {
200 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
201 if (match->mask && (match->mask->key.ip.proto == 0xff))
202 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
203 }
204
205 if (match->key->ip.proto == IPPROTO_TCP) {
206 key_expected |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
207 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
208 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
e6445719 209 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
5eb26b15
JR
210 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
211 }
e6445719
PS
212 }
213
214 if (match->key->ip.proto == IPPROTO_ICMPV6) {
215 key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
216 if (match->mask && (match->mask->key.ip.proto == 0xff))
217 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
218
1139e241 219 if (match->key->tp.src ==
e6445719 220 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
1139e241 221 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
e6445719 222 key_expected |= 1 << OVS_KEY_ATTR_ND;
1139e241 223 if (match->mask && (match->mask->key.tp.src == htons(0xffff)))
e6445719
PS
224 mask_allowed |= 1 << OVS_KEY_ATTR_ND;
225 }
226 }
227 }
228 }
229
230 if ((key_attrs & key_expected) != key_expected) {
231 /* Key attributes check failed. */
232 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
cc23ebf3 233 (unsigned long long)key_attrs, (unsigned long long)key_expected);
e6445719
PS
234 return false;
235 }
236
237 if ((mask_attrs & mask_allowed) != mask_attrs) {
238 /* Mask attributes check failed. */
239 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
cc23ebf3 240 (unsigned long long)mask_attrs, (unsigned long long)mask_allowed);
e6445719
PS
241 return false;
242 }
243
244 return true;
245}
246
247/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
248static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
249 [OVS_KEY_ATTR_ENCAP] = -1,
250 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
251 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
252 [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
253 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
254 [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
255 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
256 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
257 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
258 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
5eb26b15 259 [OVS_KEY_ATTR_TCP_FLAGS] = sizeof(__be16),
e6445719
PS
260 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
261 [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
262 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
263 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
264 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
265 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
971427f3
AZ
266 [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32),
267 [OVS_KEY_ATTR_DP_HASH] = sizeof(u32),
e6445719
PS
268 [OVS_KEY_ATTR_TUNNEL] = -1,
269};
270
271static bool is_all_zero(const u8 *fp, size_t size)
272{
273 int i;
274
275 if (!fp)
276 return false;
277
278 for (i = 0; i < size; i++)
279 if (fp[i])
280 return false;
281
282 return true;
283}
284
285static int __parse_flow_nlattrs(const struct nlattr *attr,
286 const struct nlattr *a[],
287 u64 *attrsp, bool nz)
288{
289 const struct nlattr *nla;
290 u64 attrs;
291 int rem;
292
293 attrs = *attrsp;
294 nla_for_each_nested(nla, attr, rem) {
295 u16 type = nla_type(nla);
296 int expected_len;
297
298 if (type > OVS_KEY_ATTR_MAX) {
299 OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
300 type, OVS_KEY_ATTR_MAX);
301 return -EINVAL;
302 }
303
304 if (attrs & (1 << type)) {
305 OVS_NLERR("Duplicate key attribute (type %d).\n", type);
306 return -EINVAL;
307 }
308
309 expected_len = ovs_key_lens[type];
310 if (nla_len(nla) != expected_len && expected_len != -1) {
311 OVS_NLERR("Key attribute has unexpected length (type=%d"
312 ", length=%d, expected=%d).\n", type,
313 nla_len(nla), expected_len);
314 return -EINVAL;
315 }
316
317 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
318 attrs |= 1 << type;
319 a[type] = nla;
320 }
321 }
322 if (rem) {
323 OVS_NLERR("Message has %d unknown bytes.\n", rem);
324 return -EINVAL;
325 }
326
327 *attrsp = attrs;
328 return 0;
329}
330
331static int parse_flow_mask_nlattrs(const struct nlattr *attr,
332 const struct nlattr *a[], u64 *attrsp)
333{
334 return __parse_flow_nlattrs(attr, a, attrsp, true);
335}
336
337static int parse_flow_nlattrs(const struct nlattr *attr,
338 const struct nlattr *a[], u64 *attrsp)
339{
340 return __parse_flow_nlattrs(attr, a, attrsp, false);
341}
342
343static int ipv4_tun_from_nlattr(const struct nlattr *attr,
344 struct sw_flow_match *match, bool is_mask)
345{
346 struct nlattr *a;
347 int rem;
348 bool ttl = false;
349 __be16 tun_flags = 0;
f5796684 350 unsigned long opt_key_offset;
e6445719
PS
351
352 nla_for_each_nested(a, attr, rem) {
353 int type = nla_type(a);
354 static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
355 [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
356 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
357 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
358 [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
359 [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
360 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
361 [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
67fa0341 362 [OVS_TUNNEL_KEY_ATTR_OAM] = 0,
f5796684 363 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1,
e6445719
PS
364 };
365
366 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
367 OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
368 type, OVS_TUNNEL_KEY_ATTR_MAX);
369 return -EINVAL;
370 }
371
f5796684
JG
372 if (ovs_tunnel_key_lens[type] != nla_len(a) &&
373 ovs_tunnel_key_lens[type] != -1) {
e6445719
PS
374 OVS_NLERR("IPv4 tunnel attribute type has unexpected "
375 " length (type=%d, length=%d, expected=%d).\n",
376 type, nla_len(a), ovs_tunnel_key_lens[type]);
377 return -EINVAL;
378 }
379
380 switch (type) {
381 case OVS_TUNNEL_KEY_ATTR_ID:
382 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
383 nla_get_be64(a), is_mask);
384 tun_flags |= TUNNEL_KEY;
385 break;
386 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
387 SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
388 nla_get_be32(a), is_mask);
389 break;
390 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
391 SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
392 nla_get_be32(a), is_mask);
393 break;
394 case OVS_TUNNEL_KEY_ATTR_TOS:
395 SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
396 nla_get_u8(a), is_mask);
397 break;
398 case OVS_TUNNEL_KEY_ATTR_TTL:
399 SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
400 nla_get_u8(a), is_mask);
401 ttl = true;
402 break;
403 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
404 tun_flags |= TUNNEL_DONT_FRAGMENT;
405 break;
406 case OVS_TUNNEL_KEY_ATTR_CSUM:
407 tun_flags |= TUNNEL_CSUM;
408 break;
67fa0341
JG
409 case OVS_TUNNEL_KEY_ATTR_OAM:
410 tun_flags |= TUNNEL_OAM;
411 break;
f5796684
JG
412 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
413 tun_flags |= TUNNEL_OPTIONS_PRESENT;
414 if (nla_len(a) > sizeof(match->key->tun_opts)) {
415 OVS_NLERR("Geneve option length exceeds maximum size (len %d, max %zu).\n",
416 nla_len(a),
417 sizeof(match->key->tun_opts));
418 return -EINVAL;
419 }
420
421 if (nla_len(a) % 4 != 0) {
422 OVS_NLERR("Geneve option length is not a multiple of 4 (len %d).\n",
423 nla_len(a));
424 return -EINVAL;
425 }
426
427 /* We need to record the length of the options passed
428 * down, otherwise packets with the same format but
429 * additional options will be silently matched.
430 */
431 if (!is_mask) {
432 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
433 false);
434 } else {
435 /* This is somewhat unusual because it looks at
436 * both the key and mask while parsing the
437 * attributes (and by extension assumes the key
438 * is parsed first). Normally, we would verify
439 * that each is the correct length and that the
440 * attributes line up in the validate function.
441 * However, that is difficult because this is
442 * variable length and we won't have the
443 * information later.
444 */
445 if (match->key->tun_opts_len != nla_len(a)) {
446 OVS_NLERR("Geneve option key length (%d) is different from mask length (%d).",
447 match->key->tun_opts_len,
448 nla_len(a));
449 return -EINVAL;
450 }
451
452 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff,
453 true);
454 }
455
456 opt_key_offset = (unsigned long)GENEVE_OPTS(
457 (struct sw_flow_key *)0,
458 nla_len(a));
459 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset,
460 nla_data(a), nla_len(a),
461 is_mask);
462 break;
e6445719 463 default:
f5796684
JG
464 OVS_NLERR("Unknown IPv4 tunnel attribute (%d).\n",
465 type);
e6445719
PS
466 return -EINVAL;
467 }
468 }
469
470 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
471
472 if (rem > 0) {
473 OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
474 return -EINVAL;
475 }
476
477 if (!is_mask) {
478 if (!match->key->tun_key.ipv4_dst) {
479 OVS_NLERR("IPv4 tunnel destination address is zero.\n");
480 return -EINVAL;
481 }
482
483 if (!ttl) {
484 OVS_NLERR("IPv4 tunnel TTL not specified.\n");
485 return -EINVAL;
486 }
487 }
488
489 return 0;
490}
491
f5796684
JG
492static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
493 const struct ovs_key_ipv4_tunnel *output,
494 const struct geneve_opt *tun_opts,
495 int swkey_tun_opts_len)
e6445719 496{
e6445719
PS
497 if (output->tun_flags & TUNNEL_KEY &&
498 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
499 return -EMSGSIZE;
500 if (output->ipv4_src &&
67fa0341 501 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
e6445719
PS
502 return -EMSGSIZE;
503 if (output->ipv4_dst &&
67fa0341 504 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
e6445719
PS
505 return -EMSGSIZE;
506 if (output->ipv4_tos &&
67fa0341 507 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
e6445719
PS
508 return -EMSGSIZE;
509 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
510 return -EMSGSIZE;
511 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
67fa0341 512 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
e6445719
PS
513 return -EMSGSIZE;
514 if ((output->tun_flags & TUNNEL_CSUM) &&
67fa0341
JG
515 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
516 return -EMSGSIZE;
517 if ((output->tun_flags & TUNNEL_OAM) &&
518 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
e6445719 519 return -EMSGSIZE;
f5796684
JG
520 if (tun_opts &&
521 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
522 swkey_tun_opts_len, tun_opts))
523 return -EMSGSIZE;
e6445719 524
e6445719
PS
525 return 0;
526}
527
528
f5796684
JG
529static int ipv4_tun_to_nlattr(struct sk_buff *skb,
530 const struct ovs_key_ipv4_tunnel *output,
531 const struct geneve_opt *tun_opts,
532 int swkey_tun_opts_len)
533{
534 struct nlattr *nla;
535 int err;
536
537 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
538 if (!nla)
539 return -EMSGSIZE;
540
541 err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
542 if (err)
543 return err;
544
545 nla_nest_end(skb, nla);
546 return 0;
547}
548
e6445719
PS
549static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
550 const struct nlattr **a, bool is_mask)
551{
971427f3
AZ
552 if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
553 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
554
555 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
556 *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH);
557 }
558
559 if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) {
560 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
561
562 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
563 *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID);
564 }
565
e6445719
PS
566 if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
567 SW_FLOW_KEY_PUT(match, phy.priority,
568 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
569 *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
570 }
571
572 if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
573 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
574
575 if (is_mask)
576 in_port = 0xffffffff; /* Always exact match in_port. */
577 else if (in_port >= DP_MAX_PORTS)
578 return -EINVAL;
579
580 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
581 *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
582 } else if (!is_mask) {
583 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
584 }
585
586 if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
587 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
588
589 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
590 *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
591 }
592 if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
593 if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
594 is_mask))
595 return -EINVAL;
596 *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
597 }
598 return 0;
599}
600
23dabf88
JR
601static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
602 const struct nlattr **a, bool is_mask)
e6445719
PS
603{
604 int err;
605 u64 orig_attrs = attrs;
606
607 err = metadata_from_nlattrs(match, &attrs, a, is_mask);
608 if (err)
609 return err;
610
611 if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
612 const struct ovs_key_ethernet *eth_key;
613
614 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
615 SW_FLOW_KEY_MEMCPY(match, eth.src,
616 eth_key->eth_src, ETH_ALEN, is_mask);
617 SW_FLOW_KEY_MEMCPY(match, eth.dst,
618 eth_key->eth_dst, ETH_ALEN, is_mask);
619 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
620 }
621
622 if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
623 __be16 tci;
624
625 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
626 if (!(tci & htons(VLAN_TAG_PRESENT))) {
627 if (is_mask)
628 OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
629 else
630 OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
631
632 return -EINVAL;
633 }
634
635 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
636 attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
637 } else if (!is_mask)
638 SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
639
640 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
641 __be16 eth_type;
642
643 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
644 if (is_mask) {
645 /* Always exact match EtherType. */
646 eth_type = htons(0xffff);
647 } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
648 OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
649 ntohs(eth_type), ETH_P_802_3_MIN);
650 return -EINVAL;
651 }
652
653 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
654 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
655 } else if (!is_mask) {
656 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
657 }
658
659 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
660 const struct ovs_key_ipv4 *ipv4_key;
661
662 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
663 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
664 OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
665 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
666 return -EINVAL;
667 }
668 SW_FLOW_KEY_PUT(match, ip.proto,
669 ipv4_key->ipv4_proto, is_mask);
670 SW_FLOW_KEY_PUT(match, ip.tos,
671 ipv4_key->ipv4_tos, is_mask);
672 SW_FLOW_KEY_PUT(match, ip.ttl,
673 ipv4_key->ipv4_ttl, is_mask);
674 SW_FLOW_KEY_PUT(match, ip.frag,
675 ipv4_key->ipv4_frag, is_mask);
676 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
677 ipv4_key->ipv4_src, is_mask);
678 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
679 ipv4_key->ipv4_dst, is_mask);
680 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
681 }
682
683 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
684 const struct ovs_key_ipv6 *ipv6_key;
685
686 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
687 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
688 OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
689 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
690 return -EINVAL;
691 }
fecaef85 692
d3052bb5 693 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
fecaef85
JR
694 OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n",
695 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
696 return -EINVAL;
697 }
698
e6445719
PS
699 SW_FLOW_KEY_PUT(match, ipv6.label,
700 ipv6_key->ipv6_label, is_mask);
701 SW_FLOW_KEY_PUT(match, ip.proto,
702 ipv6_key->ipv6_proto, is_mask);
703 SW_FLOW_KEY_PUT(match, ip.tos,
704 ipv6_key->ipv6_tclass, is_mask);
705 SW_FLOW_KEY_PUT(match, ip.ttl,
706 ipv6_key->ipv6_hlimit, is_mask);
707 SW_FLOW_KEY_PUT(match, ip.frag,
708 ipv6_key->ipv6_frag, is_mask);
709 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
710 ipv6_key->ipv6_src,
711 sizeof(match->key->ipv6.addr.src),
712 is_mask);
713 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
714 ipv6_key->ipv6_dst,
715 sizeof(match->key->ipv6.addr.dst),
716 is_mask);
717
718 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
719 }
720
721 if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
722 const struct ovs_key_arp *arp_key;
723
724 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
725 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
726 OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
727 arp_key->arp_op);
728 return -EINVAL;
729 }
730
731 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
732 arp_key->arp_sip, is_mask);
733 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
734 arp_key->arp_tip, is_mask);
735 SW_FLOW_KEY_PUT(match, ip.proto,
736 ntohs(arp_key->arp_op), is_mask);
737 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
738 arp_key->arp_sha, ETH_ALEN, is_mask);
739 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
740 arp_key->arp_tha, ETH_ALEN, is_mask);
741
742 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
743 }
744
745 if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
746 const struct ovs_key_tcp *tcp_key;
747
748 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1139e241
JR
749 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
750 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
e6445719
PS
751 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
752 }
753
5eb26b15
JR
754 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
755 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
1139e241 756 SW_FLOW_KEY_PUT(match, tp.flags,
5eb26b15
JR
757 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
758 is_mask);
759 } else {
1139e241 760 SW_FLOW_KEY_PUT(match, tp.flags,
5eb26b15
JR
761 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
762 is_mask);
763 }
764 attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
765 }
766
e6445719
PS
767 if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
768 const struct ovs_key_udp *udp_key;
769
770 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1139e241
JR
771 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
772 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
e6445719
PS
773 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
774 }
775
776 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
777 const struct ovs_key_sctp *sctp_key;
778
779 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
1139e241
JR
780 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
781 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
e6445719
PS
782 attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
783 }
784
785 if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
786 const struct ovs_key_icmp *icmp_key;
787
788 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1139e241 789 SW_FLOW_KEY_PUT(match, tp.src,
e6445719 790 htons(icmp_key->icmp_type), is_mask);
1139e241 791 SW_FLOW_KEY_PUT(match, tp.dst,
e6445719
PS
792 htons(icmp_key->icmp_code), is_mask);
793 attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
794 }
795
796 if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
797 const struct ovs_key_icmpv6 *icmpv6_key;
798
799 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1139e241 800 SW_FLOW_KEY_PUT(match, tp.src,
e6445719 801 htons(icmpv6_key->icmpv6_type), is_mask);
1139e241 802 SW_FLOW_KEY_PUT(match, tp.dst,
e6445719
PS
803 htons(icmpv6_key->icmpv6_code), is_mask);
804 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
805 }
806
807 if (attrs & (1 << OVS_KEY_ATTR_ND)) {
808 const struct ovs_key_nd *nd_key;
809
810 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
811 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
812 nd_key->nd_target,
813 sizeof(match->key->ipv6.nd.target),
814 is_mask);
815 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
816 nd_key->nd_sll, ETH_ALEN, is_mask);
817 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
818 nd_key->nd_tll, ETH_ALEN, is_mask);
819 attrs &= ~(1 << OVS_KEY_ATTR_ND);
820 }
821
822 if (attrs != 0)
823 return -EINVAL;
824
825 return 0;
826}
827
f47de068 828static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key)
e6445719 829{
f47de068
PS
830 struct nlattr *nla;
831 int rem;
e6445719 832
f47de068
PS
833 /* The nlattr stream should already have been validated */
834 nla_for_each_nested(nla, attr, rem) {
835 /* We assume that ovs_key_lens[type] == -1 means that type is a
836 * nested attribute
837 */
838 if (is_attr_mask_key && ovs_key_lens[nla_type(nla)] == -1)
839 nlattr_set(nla, val, false);
840 else
841 memset(nla_data(nla), val, nla_len(nla));
842 }
843}
844
845static void mask_set_nlattr(struct nlattr *attr, u8 val)
846{
847 nlattr_set(attr, val, true);
e6445719
PS
848}
849
850/**
851 * ovs_nla_get_match - parses Netlink attributes into a flow key and
852 * mask. In case the 'mask' is NULL, the flow is treated as exact match
853 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
854 * does not include any don't care bit.
855 * @match: receives the extracted flow match information.
856 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
857 * sequence. The fields should of the packet that triggered the creation
858 * of this flow.
859 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
860 * attribute specifies the mask field of the wildcarded flow.
861 */
862int ovs_nla_get_match(struct sw_flow_match *match,
863 const struct nlattr *key,
864 const struct nlattr *mask)
865{
866 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
867 const struct nlattr *encap;
f47de068 868 struct nlattr *newmask = NULL;
e6445719
PS
869 u64 key_attrs = 0;
870 u64 mask_attrs = 0;
871 bool encap_valid = false;
872 int err;
873
874 err = parse_flow_nlattrs(key, a, &key_attrs);
875 if (err)
876 return err;
877
878 if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
879 (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
880 (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
881 __be16 tci;
882
883 if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
884 (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
885 OVS_NLERR("Invalid Vlan frame.\n");
886 return -EINVAL;
887 }
888
889 key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
890 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
891 encap = a[OVS_KEY_ATTR_ENCAP];
892 key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
893 encap_valid = true;
894
895 if (tci & htons(VLAN_TAG_PRESENT)) {
896 err = parse_flow_nlattrs(encap, a, &key_attrs);
897 if (err)
898 return err;
899 } else if (!tci) {
900 /* Corner case for truncated 802.1Q header. */
901 if (nla_len(encap)) {
902 OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
903 return -EINVAL;
904 }
905 } else {
906 OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
907 return -EINVAL;
908 }
909 }
910
23dabf88 911 err = ovs_key_from_nlattrs(match, key_attrs, a, false);
e6445719
PS
912 if (err)
913 return err;
914
f47de068
PS
915 if (match->mask && !mask) {
916 /* Create an exact match mask. We need to set to 0xff all the
917 * 'match->mask' fields that have been touched in 'match->key'.
918 * We cannot simply memset 'match->mask', because padding bytes
919 * and fields not specified in 'match->key' should be left to 0.
920 * Instead, we use a stream of netlink attributes, copied from
921 * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care
922 * of filling 'match->mask' appropriately.
923 */
924 newmask = kmemdup(key, nla_total_size(nla_len(key)),
925 GFP_KERNEL);
926 if (!newmask)
927 return -ENOMEM;
928
929 mask_set_nlattr(newmask, 0xff);
930
931 /* The userspace does not send tunnel attributes that are 0,
932 * but we should not wildcard them nonetheless.
933 */
934 if (match->key->tun_key.ipv4_dst)
935 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true);
936
937 mask = newmask;
938 }
939
e6445719
PS
940 if (mask) {
941 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
942 if (err)
f47de068 943 goto free_newmask;
e6445719 944
f47de068 945 if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
e6445719
PS
946 __be16 eth_type = 0;
947 __be16 tci = 0;
948
949 if (!encap_valid) {
950 OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
f47de068
PS
951 err = -EINVAL;
952 goto free_newmask;
e6445719
PS
953 }
954
955 mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
956 if (a[OVS_KEY_ATTR_ETHERTYPE])
957 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
958
959 if (eth_type == htons(0xffff)) {
960 mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
961 encap = a[OVS_KEY_ATTR_ENCAP];
962 err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
f47de068
PS
963 if (err)
964 goto free_newmask;
e6445719
PS
965 } else {
966 OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
967 ntohs(eth_type));
f47de068
PS
968 err = -EINVAL;
969 goto free_newmask;
e6445719
PS
970 }
971
972 if (a[OVS_KEY_ATTR_VLAN])
973 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
974
975 if (!(tci & htons(VLAN_TAG_PRESENT))) {
976 OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
f47de068
PS
977 err = -EINVAL;
978 goto free_newmask;
e6445719
PS
979 }
980 }
981
23dabf88 982 err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
e6445719 983 if (err)
f47de068 984 goto free_newmask;
e6445719
PS
985 }
986
987 if (!match_validate(match, key_attrs, mask_attrs))
f47de068 988 err = -EINVAL;
e6445719 989
f47de068
PS
990free_newmask:
991 kfree(newmask);
992 return err;
e6445719
PS
993}
994
995/**
996 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
83c8df26 997 * @key: Receives extracted in_port, priority, tun_key and skb_mark.
e6445719
PS
998 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
999 * sequence.
1000 *
1001 * This parses a series of Netlink attributes that form a flow key, which must
1002 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1003 * get the metadata, that is, the parts of the flow key that cannot be
1004 * extracted from the packet itself.
1005 */
1006
83c8df26
PS
1007int ovs_nla_get_flow_metadata(const struct nlattr *attr,
1008 struct sw_flow_key *key)
e6445719 1009{
e6445719 1010 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
83c8df26 1011 struct sw_flow_match match;
e6445719
PS
1012 u64 attrs = 0;
1013 int err;
e6445719
PS
1014
1015 err = parse_flow_nlattrs(attr, a, &attrs);
1016 if (err)
1017 return -EINVAL;
1018
1019 memset(&match, 0, sizeof(match));
83c8df26 1020 match.key = key;
e6445719 1021
83c8df26 1022 key->phy.in_port = DP_MAX_PORTS;
e6445719 1023
83c8df26 1024 return metadata_from_nlattrs(&match, &attrs, a, false);
e6445719
PS
1025}
1026
1027int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1028 const struct sw_flow_key *output, struct sk_buff *skb)
1029{
1030 struct ovs_key_ethernet *eth_key;
1031 struct nlattr *nla, *encap;
1032 bool is_mask = (swkey != output);
1033
971427f3
AZ
1034 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1035 goto nla_put_failure;
1036
1037 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1038 goto nla_put_failure;
1039
e6445719
PS
1040 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1041 goto nla_put_failure;
1042
f5796684
JG
1043 if ((swkey->tun_key.ipv4_dst || is_mask)) {
1044 const struct geneve_opt *opts = NULL;
1045
1046 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
1047 opts = GENEVE_OPTS(output, swkey->tun_opts_len);
1048
1049 if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
1050 swkey->tun_opts_len))
1051 goto nla_put_failure;
1052 }
e6445719
PS
1053
1054 if (swkey->phy.in_port == DP_MAX_PORTS) {
1055 if (is_mask && (output->phy.in_port == 0xffff))
1056 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1057 goto nla_put_failure;
1058 } else {
1059 u16 upper_u16;
1060 upper_u16 = !is_mask ? 0 : 0xffff;
1061
1062 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1063 (upper_u16 << 16) | output->phy.in_port))
1064 goto nla_put_failure;
1065 }
1066
1067 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1068 goto nla_put_failure;
1069
1070 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1071 if (!nla)
1072 goto nla_put_failure;
1073
1074 eth_key = nla_data(nla);
8c63ff09
JP
1075 ether_addr_copy(eth_key->eth_src, output->eth.src);
1076 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
e6445719
PS
1077
1078 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1079 __be16 eth_type;
1080 eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
1081 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1082 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1083 goto nla_put_failure;
1084 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1085 if (!swkey->eth.tci)
1086 goto unencap;
1087 } else
1088 encap = NULL;
1089
1090 if (swkey->eth.type == htons(ETH_P_802_2)) {
1091 /*
1092 * Ethertype 802.2 is represented in the netlink with omitted
1093 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1094 * 0xffff in the mask attribute. Ethertype can also
1095 * be wildcarded.
1096 */
1097 if (is_mask && output->eth.type)
1098 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
1099 output->eth.type))
1100 goto nla_put_failure;
1101 goto unencap;
1102 }
1103
1104 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1105 goto nla_put_failure;
1106
1107 if (swkey->eth.type == htons(ETH_P_IP)) {
1108 struct ovs_key_ipv4 *ipv4_key;
1109
1110 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1111 if (!nla)
1112 goto nla_put_failure;
1113 ipv4_key = nla_data(nla);
1114 ipv4_key->ipv4_src = output->ipv4.addr.src;
1115 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1116 ipv4_key->ipv4_proto = output->ip.proto;
1117 ipv4_key->ipv4_tos = output->ip.tos;
1118 ipv4_key->ipv4_ttl = output->ip.ttl;
1119 ipv4_key->ipv4_frag = output->ip.frag;
1120 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1121 struct ovs_key_ipv6 *ipv6_key;
1122
1123 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1124 if (!nla)
1125 goto nla_put_failure;
1126 ipv6_key = nla_data(nla);
1127 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1128 sizeof(ipv6_key->ipv6_src));
1129 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1130 sizeof(ipv6_key->ipv6_dst));
1131 ipv6_key->ipv6_label = output->ipv6.label;
1132 ipv6_key->ipv6_proto = output->ip.proto;
1133 ipv6_key->ipv6_tclass = output->ip.tos;
1134 ipv6_key->ipv6_hlimit = output->ip.ttl;
1135 ipv6_key->ipv6_frag = output->ip.frag;
1136 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1137 swkey->eth.type == htons(ETH_P_RARP)) {
1138 struct ovs_key_arp *arp_key;
1139
1140 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1141 if (!nla)
1142 goto nla_put_failure;
1143 arp_key = nla_data(nla);
1144 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1145 arp_key->arp_sip = output->ipv4.addr.src;
1146 arp_key->arp_tip = output->ipv4.addr.dst;
1147 arp_key->arp_op = htons(output->ip.proto);
8c63ff09
JP
1148 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1149 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
e6445719
PS
1150 }
1151
1152 if ((swkey->eth.type == htons(ETH_P_IP) ||
1153 swkey->eth.type == htons(ETH_P_IPV6)) &&
1154 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1155
1156 if (swkey->ip.proto == IPPROTO_TCP) {
1157 struct ovs_key_tcp *tcp_key;
1158
1159 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1160 if (!nla)
1161 goto nla_put_failure;
1162 tcp_key = nla_data(nla);
1139e241
JR
1163 tcp_key->tcp_src = output->tp.src;
1164 tcp_key->tcp_dst = output->tp.dst;
1165 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1166 output->tp.flags))
1167 goto nla_put_failure;
e6445719
PS
1168 } else if (swkey->ip.proto == IPPROTO_UDP) {
1169 struct ovs_key_udp *udp_key;
1170
1171 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1172 if (!nla)
1173 goto nla_put_failure;
1174 udp_key = nla_data(nla);
1139e241
JR
1175 udp_key->udp_src = output->tp.src;
1176 udp_key->udp_dst = output->tp.dst;
e6445719
PS
1177 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1178 struct ovs_key_sctp *sctp_key;
1179
1180 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
1181 if (!nla)
1182 goto nla_put_failure;
1183 sctp_key = nla_data(nla);
1139e241
JR
1184 sctp_key->sctp_src = output->tp.src;
1185 sctp_key->sctp_dst = output->tp.dst;
e6445719
PS
1186 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1187 swkey->ip.proto == IPPROTO_ICMP) {
1188 struct ovs_key_icmp *icmp_key;
1189
1190 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1191 if (!nla)
1192 goto nla_put_failure;
1193 icmp_key = nla_data(nla);
1139e241
JR
1194 icmp_key->icmp_type = ntohs(output->tp.src);
1195 icmp_key->icmp_code = ntohs(output->tp.dst);
e6445719
PS
1196 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1197 swkey->ip.proto == IPPROTO_ICMPV6) {
1198 struct ovs_key_icmpv6 *icmpv6_key;
1199
1200 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1201 sizeof(*icmpv6_key));
1202 if (!nla)
1203 goto nla_put_failure;
1204 icmpv6_key = nla_data(nla);
1139e241
JR
1205 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1206 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
e6445719
PS
1207
1208 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1209 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1210 struct ovs_key_nd *nd_key;
1211
1212 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1213 if (!nla)
1214 goto nla_put_failure;
1215 nd_key = nla_data(nla);
1216 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1217 sizeof(nd_key->nd_target));
8c63ff09
JP
1218 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1219 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
e6445719
PS
1220 }
1221 }
1222 }
1223
1224unencap:
1225 if (encap)
1226 nla_nest_end(skb, encap);
1227
1228 return 0;
1229
1230nla_put_failure:
1231 return -EMSGSIZE;
1232}
1233
1234#define MAX_ACTIONS_BUFSIZE (32 * 1024)
1235
1236struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
1237{
1238 struct sw_flow_actions *sfa;
1239
1240 if (size > MAX_ACTIONS_BUFSIZE)
1241 return ERR_PTR(-EINVAL);
1242
1243 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
1244 if (!sfa)
1245 return ERR_PTR(-ENOMEM);
1246
1247 sfa->actions_len = 0;
1248 return sfa;
1249}
1250
e6445719
PS
1251/* Schedules 'sf_acts' to be freed after the next RCU grace period.
1252 * The caller must hold rcu_read_lock for this to be sensible. */
1253void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
1254{
11d6c461 1255 kfree_rcu(sf_acts, rcu);
e6445719
PS
1256}
1257
1258static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1259 int attr_len)
1260{
1261
1262 struct sw_flow_actions *acts;
1263 int new_acts_size;
1264 int req_size = NLA_ALIGN(attr_len);
1265 int next_offset = offsetof(struct sw_flow_actions, actions) +
1266 (*sfa)->actions_len;
1267
1268 if (req_size <= (ksize(*sfa) - next_offset))
1269 goto out;
1270
1271 new_acts_size = ksize(*sfa) * 2;
1272
1273 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1274 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
1275 return ERR_PTR(-EMSGSIZE);
1276 new_acts_size = MAX_ACTIONS_BUFSIZE;
1277 }
1278
1279 acts = ovs_nla_alloc_flow_actions(new_acts_size);
1280 if (IS_ERR(acts))
1281 return (void *)acts;
1282
1283 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
1284 acts->actions_len = (*sfa)->actions_len;
1285 kfree(*sfa);
1286 *sfa = acts;
1287
1288out:
1289 (*sfa)->actions_len += req_size;
1290 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
1291}
1292
f0b128c1
JG
1293static struct nlattr *__add_action(struct sw_flow_actions **sfa,
1294 int attrtype, void *data, int len)
e6445719
PS
1295{
1296 struct nlattr *a;
1297
1298 a = reserve_sfa_size(sfa, nla_attr_size(len));
1299 if (IS_ERR(a))
f0b128c1 1300 return a;
e6445719
PS
1301
1302 a->nla_type = attrtype;
1303 a->nla_len = nla_attr_size(len);
1304
1305 if (data)
1306 memcpy(nla_data(a), data, len);
1307 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
1308
f0b128c1
JG
1309 return a;
1310}
1311
1312static int add_action(struct sw_flow_actions **sfa, int attrtype,
1313 void *data, int len)
1314{
1315 struct nlattr *a;
1316
1317 a = __add_action(sfa, attrtype, data, len);
1318 if (IS_ERR(a))
1319 return PTR_ERR(a);
1320
e6445719
PS
1321 return 0;
1322}
1323
1324static inline int add_nested_action_start(struct sw_flow_actions **sfa,
1325 int attrtype)
1326{
1327 int used = (*sfa)->actions_len;
1328 int err;
1329
1330 err = add_action(sfa, attrtype, NULL, 0);
1331 if (err)
1332 return err;
1333
1334 return used;
1335}
1336
1337static inline void add_nested_action_end(struct sw_flow_actions *sfa,
1338 int st_offset)
1339{
1340 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
1341 st_offset);
1342
1343 a->nla_len = sfa->actions_len - st_offset;
1344}
1345
1346static int validate_and_copy_sample(const struct nlattr *attr,
1347 const struct sw_flow_key *key, int depth,
1348 struct sw_flow_actions **sfa)
1349{
1350 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
1351 const struct nlattr *probability, *actions;
1352 const struct nlattr *a;
1353 int rem, start, err, st_acts;
1354
1355 memset(attrs, 0, sizeof(attrs));
1356 nla_for_each_nested(a, attr, rem) {
1357 int type = nla_type(a);
1358 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
1359 return -EINVAL;
1360 attrs[type] = a;
1361 }
1362 if (rem)
1363 return -EINVAL;
1364
1365 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
1366 if (!probability || nla_len(probability) != sizeof(u32))
1367 return -EINVAL;
1368
1369 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
1370 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
1371 return -EINVAL;
1372
1373 /* validation done, copy sample action. */
1374 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
1375 if (start < 0)
1376 return start;
1377 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
1378 nla_data(probability), sizeof(u32));
1379 if (err)
1380 return err;
1381 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
1382 if (st_acts < 0)
1383 return st_acts;
1384
1385 err = ovs_nla_copy_actions(actions, key, depth + 1, sfa);
1386 if (err)
1387 return err;
1388
1389 add_nested_action_end(*sfa, st_acts);
1390 add_nested_action_end(*sfa, start);
1391
1392 return 0;
1393}
1394
1395static int validate_tp_port(const struct sw_flow_key *flow_key)
1396{
1139e241
JR
1397 if ((flow_key->eth.type == htons(ETH_P_IP) ||
1398 flow_key->eth.type == htons(ETH_P_IPV6)) &&
1399 (flow_key->tp.src || flow_key->tp.dst))
1400 return 0;
e6445719
PS
1401
1402 return -EINVAL;
1403}
1404
1405void ovs_match_init(struct sw_flow_match *match,
1406 struct sw_flow_key *key,
1407 struct sw_flow_mask *mask)
1408{
1409 memset(match, 0, sizeof(*match));
1410 match->key = key;
1411 match->mask = mask;
1412
1413 memset(key, 0, sizeof(*key));
1414
1415 if (mask) {
1416 memset(&mask->key, 0, sizeof(mask->key));
1417 mask->range.start = mask->range.end = 0;
1418 }
1419}
1420
1421static int validate_and_copy_set_tun(const struct nlattr *attr,
1422 struct sw_flow_actions **sfa)
1423{
1424 struct sw_flow_match match;
1425 struct sw_flow_key key;
f0b128c1
JG
1426 struct ovs_tunnel_info *tun_info;
1427 struct nlattr *a;
e6445719
PS
1428 int err, start;
1429
1430 ovs_match_init(&match, &key, NULL);
1431 err = ipv4_tun_from_nlattr(nla_data(attr), &match, false);
1432 if (err)
1433 return err;
1434
f5796684
JG
1435 if (key.tun_opts_len) {
1436 struct geneve_opt *option = GENEVE_OPTS(&key,
1437 key.tun_opts_len);
1438 int opts_len = key.tun_opts_len;
1439 bool crit_opt = false;
1440
1441 while (opts_len > 0) {
1442 int len;
1443
1444 if (opts_len < sizeof(*option))
1445 return -EINVAL;
1446
1447 len = sizeof(*option) + option->length * 4;
1448 if (len > opts_len)
1449 return -EINVAL;
1450
1451 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
1452
1453 option = (struct geneve_opt *)((u8 *)option + len);
1454 opts_len -= len;
1455 };
1456
1457 key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
1458 };
1459
e6445719
PS
1460 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
1461 if (start < 0)
1462 return start;
1463
f0b128c1 1464 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
f5796684 1465 sizeof(*tun_info) + key.tun_opts_len);
f0b128c1
JG
1466 if (IS_ERR(a))
1467 return PTR_ERR(a);
1468
1469 tun_info = nla_data(a);
1470 tun_info->tunnel = key.tun_key;
f5796684
JG
1471 tun_info->options_len = key.tun_opts_len;
1472
1473 if (tun_info->options_len) {
1474 /* We need to store the options in the action itself since
1475 * everything else will go away after flow setup. We can append
1476 * it to tun_info and then point there.
1477 */
1478 memcpy((tun_info + 1), GENEVE_OPTS(&key, key.tun_opts_len),
1479 key.tun_opts_len);
1480 tun_info->options = (struct geneve_opt *)(tun_info + 1);
1481 } else {
1482 tun_info->options = NULL;
1483 }
f0b128c1 1484
e6445719
PS
1485 add_nested_action_end(*sfa, start);
1486
1487 return err;
1488}
1489
1490static int validate_set(const struct nlattr *a,
1491 const struct sw_flow_key *flow_key,
1492 struct sw_flow_actions **sfa,
1493 bool *set_tun)
1494{
1495 const struct nlattr *ovs_key = nla_data(a);
1496 int key_type = nla_type(ovs_key);
1497
1498 /* There can be only one key in a action */
1499 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
1500 return -EINVAL;
1501
1502 if (key_type > OVS_KEY_ATTR_MAX ||
1503 (ovs_key_lens[key_type] != nla_len(ovs_key) &&
1504 ovs_key_lens[key_type] != -1))
1505 return -EINVAL;
1506
1507 switch (key_type) {
1508 const struct ovs_key_ipv4 *ipv4_key;
1509 const struct ovs_key_ipv6 *ipv6_key;
1510 int err;
1511
1512 case OVS_KEY_ATTR_PRIORITY:
1513 case OVS_KEY_ATTR_SKB_MARK:
1514 case OVS_KEY_ATTR_ETHERNET:
1515 break;
1516
1517 case OVS_KEY_ATTR_TUNNEL:
1518 *set_tun = true;
1519 err = validate_and_copy_set_tun(a, sfa);
1520 if (err)
1521 return err;
1522 break;
1523
1524 case OVS_KEY_ATTR_IPV4:
1525 if (flow_key->eth.type != htons(ETH_P_IP))
1526 return -EINVAL;
1527
1528 if (!flow_key->ip.proto)
1529 return -EINVAL;
1530
1531 ipv4_key = nla_data(ovs_key);
1532 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
1533 return -EINVAL;
1534
1535 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
1536 return -EINVAL;
1537
1538 break;
1539
1540 case OVS_KEY_ATTR_IPV6:
1541 if (flow_key->eth.type != htons(ETH_P_IPV6))
1542 return -EINVAL;
1543
1544 if (!flow_key->ip.proto)
1545 return -EINVAL;
1546
1547 ipv6_key = nla_data(ovs_key);
1548 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
1549 return -EINVAL;
1550
1551 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
1552 return -EINVAL;
1553
1554 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
1555 return -EINVAL;
1556
1557 break;
1558
1559 case OVS_KEY_ATTR_TCP:
1560 if (flow_key->ip.proto != IPPROTO_TCP)
1561 return -EINVAL;
1562
1563 return validate_tp_port(flow_key);
1564
1565 case OVS_KEY_ATTR_UDP:
1566 if (flow_key->ip.proto != IPPROTO_UDP)
1567 return -EINVAL;
1568
1569 return validate_tp_port(flow_key);
1570
1571 case OVS_KEY_ATTR_SCTP:
1572 if (flow_key->ip.proto != IPPROTO_SCTP)
1573 return -EINVAL;
1574
1575 return validate_tp_port(flow_key);
1576
1577 default:
1578 return -EINVAL;
1579 }
1580
1581 return 0;
1582}
1583
1584static int validate_userspace(const struct nlattr *attr)
1585{
1586 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
1587 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
1588 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
1589 };
1590 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
1591 int error;
1592
1593 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
1594 attr, userspace_policy);
1595 if (error)
1596 return error;
1597
1598 if (!a[OVS_USERSPACE_ATTR_PID] ||
1599 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
1600 return -EINVAL;
1601
1602 return 0;
1603}
1604
1605static int copy_action(const struct nlattr *from,
1606 struct sw_flow_actions **sfa)
1607{
1608 int totlen = NLA_ALIGN(from->nla_len);
1609 struct nlattr *to;
1610
1611 to = reserve_sfa_size(sfa, from->nla_len);
1612 if (IS_ERR(to))
1613 return PTR_ERR(to);
1614
1615 memcpy(to, from, totlen);
1616 return 0;
1617}
1618
1619int ovs_nla_copy_actions(const struct nlattr *attr,
1620 const struct sw_flow_key *key,
1621 int depth,
1622 struct sw_flow_actions **sfa)
1623{
1624 const struct nlattr *a;
1625 int rem, err;
1626
1627 if (depth >= SAMPLE_ACTION_DEPTH)
1628 return -EOVERFLOW;
1629
1630 nla_for_each_nested(a, attr, rem) {
1631 /* Expected argument lengths, (u32)-1 for variable length. */
1632 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
1633 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
971427f3 1634 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
e6445719
PS
1635 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
1636 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
1637 [OVS_ACTION_ATTR_POP_VLAN] = 0,
1638 [OVS_ACTION_ATTR_SET] = (u32)-1,
971427f3
AZ
1639 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
1640 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
e6445719
PS
1641 };
1642 const struct ovs_action_push_vlan *vlan;
1643 int type = nla_type(a);
1644 bool skip_copy;
1645
1646 if (type > OVS_ACTION_ATTR_MAX ||
1647 (action_lens[type] != nla_len(a) &&
1648 action_lens[type] != (u32)-1))
1649 return -EINVAL;
1650
1651 skip_copy = false;
1652 switch (type) {
1653 case OVS_ACTION_ATTR_UNSPEC:
1654 return -EINVAL;
1655
1656 case OVS_ACTION_ATTR_USERSPACE:
1657 err = validate_userspace(a);
1658 if (err)
1659 return err;
1660 break;
1661
1662 case OVS_ACTION_ATTR_OUTPUT:
1663 if (nla_get_u32(a) >= DP_MAX_PORTS)
1664 return -EINVAL;
1665 break;
1666
971427f3
AZ
1667 case OVS_ACTION_ATTR_HASH: {
1668 const struct ovs_action_hash *act_hash = nla_data(a);
1669
1670 switch (act_hash->hash_alg) {
1671 case OVS_HASH_ALG_L4:
1672 break;
1673 default:
1674 return -EINVAL;
1675 }
1676
1677 break;
1678 }
e6445719
PS
1679
1680 case OVS_ACTION_ATTR_POP_VLAN:
1681 break;
1682
1683 case OVS_ACTION_ATTR_PUSH_VLAN:
1684 vlan = nla_data(a);
1685 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
1686 return -EINVAL;
1687 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
1688 return -EINVAL;
1689 break;
1690
971427f3
AZ
1691 case OVS_ACTION_ATTR_RECIRC:
1692 break;
1693
e6445719
PS
1694 case OVS_ACTION_ATTR_SET:
1695 err = validate_set(a, key, sfa, &skip_copy);
1696 if (err)
1697 return err;
1698 break;
1699
1700 case OVS_ACTION_ATTR_SAMPLE:
1701 err = validate_and_copy_sample(a, key, depth, sfa);
1702 if (err)
1703 return err;
1704 skip_copy = true;
1705 break;
1706
1707 default:
1708 return -EINVAL;
1709 }
1710 if (!skip_copy) {
1711 err = copy_action(a, sfa);
1712 if (err)
1713 return err;
1714 }
1715 }
1716
1717 if (rem > 0)
1718 return -EINVAL;
1719
1720 return 0;
1721}
1722
1723static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
1724{
1725 const struct nlattr *a;
1726 struct nlattr *start;
1727 int err = 0, rem;
1728
1729 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
1730 if (!start)
1731 return -EMSGSIZE;
1732
1733 nla_for_each_nested(a, attr, rem) {
1734 int type = nla_type(a);
1735 struct nlattr *st_sample;
1736
1737 switch (type) {
1738 case OVS_SAMPLE_ATTR_PROBABILITY:
1739 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
1740 sizeof(u32), nla_data(a)))
1741 return -EMSGSIZE;
1742 break;
1743 case OVS_SAMPLE_ATTR_ACTIONS:
1744 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
1745 if (!st_sample)
1746 return -EMSGSIZE;
1747 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
1748 if (err)
1749 return err;
1750 nla_nest_end(skb, st_sample);
1751 break;
1752 }
1753 }
1754
1755 nla_nest_end(skb, start);
1756 return err;
1757}
1758
1759static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
1760{
1761 const struct nlattr *ovs_key = nla_data(a);
1762 int key_type = nla_type(ovs_key);
1763 struct nlattr *start;
1764 int err;
1765
1766 switch (key_type) {
f0b128c1
JG
1767 case OVS_KEY_ATTR_TUNNEL_INFO: {
1768 struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
1769
e6445719
PS
1770 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
1771 if (!start)
1772 return -EMSGSIZE;
1773
f0b128c1 1774 err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
f5796684
JG
1775 tun_info->options_len ?
1776 tun_info->options : NULL,
1777 tun_info->options_len);
e6445719
PS
1778 if (err)
1779 return err;
1780 nla_nest_end(skb, start);
1781 break;
f0b128c1 1782 }
e6445719
PS
1783 default:
1784 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1785 return -EMSGSIZE;
1786 break;
1787 }
1788
1789 return 0;
1790}
1791
1792int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
1793{
1794 const struct nlattr *a;
1795 int rem, err;
1796
1797 nla_for_each_attr(a, attr, len, rem) {
1798 int type = nla_type(a);
1799
1800 switch (type) {
1801 case OVS_ACTION_ATTR_SET:
1802 err = set_action_to_attr(a, skb);
1803 if (err)
1804 return err;
1805 break;
1806
1807 case OVS_ACTION_ATTR_SAMPLE:
1808 err = sample_action_to_attr(a, skb);
1809 if (err)
1810 return err;
1811 break;
1812 default:
1813 if (nla_put(skb, type, nla_len(a), nla_data(a)))
1814 return -EMSGSIZE;
1815 break;
1816 }
1817 }
1818
1819 return 0;
1820}