]> git.proxmox.com Git - ovs.git/blame - datapath/flow_netlink.c
tunneling: Track recursion levels across ARP generation.
[ovs.git] / datapath / flow_netlink.c
CommitLineData
a097c0b2 1/*
05499369 2 * Copyright (c) 2007-2014 Nicira, Inc.
a097c0b2
PS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
0a0857df
JP
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
a097c0b2
PS
21#include <linux/uaccess.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/if_ether.h>
25#include <linux/if_vlan.h>
26#include <net/llc_pdu.h>
27#include <linux/kernel.h>
28#include <linux/jhash.h>
29#include <linux/jiffies.h>
30#include <linux/llc.h>
31#include <linux/module.h>
32#include <linux/in.h>
33#include <linux/rcupdate.h>
34#include <linux/if_arp.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/sctp.h>
38#include <linux/tcp.h>
39#include <linux/udp.h>
40#include <linux/icmp.h>
41#include <linux/icmpv6.h>
42#include <linux/rculist.h>
c1fc1411 43#include <net/geneve.h>
a097c0b2
PS
44#include <net/ip.h>
45#include <net/ipv6.h>
46#include <net/ndisc.h>
2baf0e0c 47#include <net/mpls.h>
a097c0b2 48
2baf0e0c
PS
49#include "datapath.h"
50#include "flow.h"
a097c0b2 51#include "flow_netlink.h"
0c7930a3 52#include "vport-vxlan.h"
a097c0b2 53
ec959cdc
TG
54struct ovs_len_tbl {
55 int len;
56 const struct ovs_len_tbl *next;
57};
58
59#define OVS_ATTR_NESTED -1
60
f3ccd17d
PS
61static void update_range(struct sw_flow_match *match,
62 size_t offset, size_t size, bool is_mask)
a097c0b2 63{
f3ccd17d 64 struct sw_flow_key_range *range;
a097c0b2
PS
65 size_t start = rounddown(offset, sizeof(long));
66 size_t end = roundup(offset + size, sizeof(long));
67
68 if (!is_mask)
69 range = &match->range;
f3ccd17d 70 else
a097c0b2
PS
71 range = &match->mask->range;
72
a097c0b2
PS
73 if (range->start == range->end) {
74 range->start = start;
75 range->end = end;
76 return;
77 }
78
79 if (range->start > start)
80 range->start = start;
81
82 if (range->end < end)
83 range->end = end;
84}
85
86#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
87 do { \
f3ccd17d
PS
88 update_range(match, offsetof(struct sw_flow_key, field), \
89 sizeof((match)->key->field), is_mask); \
90 if (is_mask) \
91 (match)->mask->key.field = value; \
92 else \
a097c0b2 93 (match)->key->field = value; \
a097c0b2
PS
94 } while (0)
95
f3ccd17d
PS
96#define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
97 do { \
98 update_range(match, offset, len, is_mask); \
99 if (is_mask) \
100 memcpy((u8 *)&(match)->mask->key + offset, value_p, len);\
101 else \
102 memcpy((u8 *)(match)->key + offset, value_p, len); \
a097c0b2
PS
103 } while (0)
104
f3ccd17d 105#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
c1fc1411
JG
106 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
107 value_p, len, is_mask)
108
f3ccd17d
PS
109#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
110 do { \
111 update_range(match, offsetof(struct sw_flow_key, field), \
112 sizeof((match)->key->field), is_mask); \
113 if (is_mask) \
114 memset((u8 *)&(match)->mask->key.field, value, \
115 sizeof((match)->mask->key.field)); \
116 else \
0b496cda
DDP
117 memset((u8 *)&(match)->key->field, value, \
118 sizeof((match)->key->field)); \
0b496cda
DDP
119 } while (0)
120
a097c0b2 121static bool match_validate(const struct sw_flow_match *match,
9233cef7 122 u64 key_attrs, u64 mask_attrs, bool log)
a097c0b2
PS
123{
124 u64 key_expected = 1ULL << OVS_KEY_ATTR_ETHERNET;
125 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
126
127 /* The following mask attributes allowed only if they
af465b67
PS
128 * pass the validation tests.
129 */
a097c0b2
PS
130 mask_allowed &= ~((1ULL << OVS_KEY_ATTR_IPV4)
131 | (1ULL << OVS_KEY_ATTR_IPV6)
132 | (1ULL << OVS_KEY_ATTR_TCP)
dc235f7f 133 | (1ULL << OVS_KEY_ATTR_TCP_FLAGS)
a097c0b2
PS
134 | (1ULL << OVS_KEY_ATTR_UDP)
135 | (1ULL << OVS_KEY_ATTR_SCTP)
136 | (1ULL << OVS_KEY_ATTR_ICMP)
137 | (1ULL << OVS_KEY_ATTR_ICMPV6)
138 | (1ULL << OVS_KEY_ATTR_ARP)
ccf43786
SH
139 | (1ULL << OVS_KEY_ATTR_ND)
140 | (1ULL << OVS_KEY_ATTR_MPLS));
a097c0b2
PS
141
142 /* Always allowed mask fields. */
143 mask_allowed |= ((1ULL << OVS_KEY_ATTR_TUNNEL)
144 | (1ULL << OVS_KEY_ATTR_IN_PORT)
145 | (1ULL << OVS_KEY_ATTR_ETHERTYPE));
146
147 /* Check key attributes. */
148 if (match->key->eth.type == htons(ETH_P_ARP)
149 || match->key->eth.type == htons(ETH_P_RARP)) {
150 key_expected |= 1ULL << OVS_KEY_ATTR_ARP;
151 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
152 mask_allowed |= 1ULL << OVS_KEY_ATTR_ARP;
153 }
154
ccf43786
SH
155 if (eth_p_mpls(match->key->eth.type)) {
156 key_expected |= 1ULL << OVS_KEY_ATTR_MPLS;
157 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
158 mask_allowed |= 1ULL << OVS_KEY_ATTR_MPLS;
159 }
160
a097c0b2
PS
161 if (match->key->eth.type == htons(ETH_P_IP)) {
162 key_expected |= 1ULL << OVS_KEY_ATTR_IPV4;
163 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
164 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV4;
165
166 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
167 if (match->key->ip.proto == IPPROTO_UDP) {
168 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
169 if (match->mask && (match->mask->key.ip.proto == 0xff))
170 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
171 }
172
173 if (match->key->ip.proto == IPPROTO_SCTP) {
174 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
175 if (match->mask && (match->mask->key.ip.proto == 0xff))
176 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
177 }
178
179 if (match->key->ip.proto == IPPROTO_TCP) {
180 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
dc235f7f
JR
181 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
182 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
a097c0b2 183 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
dc235f7f
JR
184 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
185 }
a097c0b2
PS
186 }
187
188 if (match->key->ip.proto == IPPROTO_ICMP) {
189 key_expected |= 1ULL << OVS_KEY_ATTR_ICMP;
190 if (match->mask && (match->mask->key.ip.proto == 0xff))
191 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMP;
192 }
193 }
194 }
195
196 if (match->key->eth.type == htons(ETH_P_IPV6)) {
197 key_expected |= 1ULL << OVS_KEY_ATTR_IPV6;
198 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
199 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV6;
200
201 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
202 if (match->key->ip.proto == IPPROTO_UDP) {
203 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
204 if (match->mask && (match->mask->key.ip.proto == 0xff))
205 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
206 }
207
208 if (match->key->ip.proto == IPPROTO_SCTP) {
209 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
210 if (match->mask && (match->mask->key.ip.proto == 0xff))
211 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
212 }
213
214 if (match->key->ip.proto == IPPROTO_TCP) {
215 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
dc235f7f
JR
216 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
217 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
a097c0b2 218 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
dc235f7f
JR
219 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
220 }
a097c0b2
PS
221 }
222
223 if (match->key->ip.proto == IPPROTO_ICMPV6) {
224 key_expected |= 1ULL << OVS_KEY_ATTR_ICMPV6;
225 if (match->mask && (match->mask->key.ip.proto == 0xff))
226 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMPV6;
227
708fb4c5 228 if (match->key->tp.src ==
a097c0b2 229 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
708fb4c5 230 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
a097c0b2 231 key_expected |= 1ULL << OVS_KEY_ATTR_ND;
560f3099 232 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
a097c0b2
PS
233 mask_allowed |= 1ULL << OVS_KEY_ATTR_ND;
234 }
235 }
236 }
237 }
238
239 if ((key_attrs & key_expected) != key_expected) {
240 /* Key attributes check failed. */
7d16c847 241 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
9233cef7
JR
242 (unsigned long long)key_attrs,
243 (unsigned long long)key_expected);
a097c0b2
PS
244 return false;
245 }
246
247 if ((mask_attrs & mask_allowed) != mask_attrs) {
248 /* Mask attributes check failed. */
7d16c847 249 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
9233cef7
JR
250 (unsigned long long)mask_attrs,
251 (unsigned long long)mask_allowed);
a097c0b2
PS
252 return false;
253 }
254
255 return true;
256}
257
8b7ea2d4
WZ
258size_t ovs_tun_key_attr_size(void)
259{
260 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
af465b67
PS
261 * updating this function.
262 */
8b7ea2d4
WZ
263 return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
264 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
265 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
266 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
267 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
268 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
269 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
270 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
271 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
0c7930a3
TG
272 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
273 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
274 */
8b7ea2d4
WZ
275 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
276 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
277}
278
4e25b8c1
JS
279size_t ovs_key_attr_size(void)
280{
281 /* Whenever adding new OVS_KEY_ FIELDS, we should consider
af465b67
PS
282 * updating this function.
283 */
4e25b8c1
JS
284 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
285
286 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
287 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
8b7ea2d4 288 + ovs_tun_key_attr_size()
4e25b8c1
JS
289 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
290 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
291 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
292 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
293 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
294 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
295 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
296 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
297 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
298 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
299 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
300 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
301}
302
ec959cdc
TG
303static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
304 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
305 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
306 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
307 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
308 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
309 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
310 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
311 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
312 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
313 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
314 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED },
0c7930a3 315 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED },
a097c0b2
PS
316};
317
ec959cdc
TG
318/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
319static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
320 [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
321 [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
322 [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
323 [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
324 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
325 [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
326 [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
327 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
328 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
329 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
330 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
331 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
332 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
333 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
334 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
335 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
336 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
337 [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
338 [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
339 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
340 .next = ovs_tunnel_key_lens, },
341 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
342};
a0fb56c1 343
a097c0b2
PS
344static bool is_all_zero(const u8 *fp, size_t size)
345{
346 int i;
347
348 if (!fp)
349 return false;
350
351 for (i = 0; i < size; i++)
352 if (fp[i])
353 return false;
354
355 return true;
356}
357
358static int __parse_flow_nlattrs(const struct nlattr *attr,
359 const struct nlattr *a[],
9233cef7 360 u64 *attrsp, bool log, bool nz)
a097c0b2
PS
361{
362 const struct nlattr *nla;
363 u64 attrs;
364 int rem;
365
366 attrs = *attrsp;
367 nla_for_each_nested(nla, attr, rem) {
368 u16 type = nla_type(nla);
369 int expected_len;
370
371 if (type > OVS_KEY_ATTR_MAX) {
7d16c847 372 OVS_NLERR(log, "Key type %d is out of range max %d",
a097c0b2
PS
373 type, OVS_KEY_ATTR_MAX);
374 return -EINVAL;
375 }
376
377 if (attrs & (1ULL << type)) {
7d16c847 378 OVS_NLERR(log, "Duplicate key (type %d).", type);
a097c0b2
PS
379 return -EINVAL;
380 }
381
ec959cdc
TG
382 expected_len = ovs_key_lens[type].len;
383 if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) {
7d16c847
PS
384 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
385 type, nla_len(nla), expected_len);
a097c0b2
PS
386 return -EINVAL;
387 }
388
389 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
390 attrs |= 1ULL << type;
391 a[type] = nla;
392 }
393 }
394 if (rem) {
9233cef7 395 OVS_NLERR(log, "Message has %d unknown bytes.", rem);
a097c0b2
PS
396 return -EINVAL;
397 }
398
399 *attrsp = attrs;
400 return 0;
401}
402
403static int parse_flow_mask_nlattrs(const struct nlattr *attr,
9233cef7
JR
404 const struct nlattr *a[], u64 *attrsp,
405 bool log)
a097c0b2 406{
9233cef7 407 return __parse_flow_nlattrs(attr, a, attrsp, log, true);
a097c0b2
PS
408}
409
410static int parse_flow_nlattrs(const struct nlattr *attr,
9233cef7
JR
411 const struct nlattr *a[], u64 *attrsp,
412 bool log)
a097c0b2 413{
9233cef7 414 return __parse_flow_nlattrs(attr, a, attrsp, log, false);
a097c0b2
PS
415}
416
7d16c847
PS
417static int genev_tun_opt_from_nlattr(const struct nlattr *a,
418 struct sw_flow_match *match, bool is_mask,
419 bool log)
420{
421 unsigned long opt_key_offset;
422
423 if (nla_len(a) > sizeof(match->key->tun_opts)) {
424 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
425 nla_len(a), sizeof(match->key->tun_opts));
426 return -EINVAL;
427 }
428
429 if (nla_len(a) % 4 != 0) {
430 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
431 nla_len(a));
432 return -EINVAL;
433 }
434
435 /* We need to record the length of the options passed
436 * down, otherwise packets with the same format but
437 * additional options will be silently matched.
438 */
439 if (!is_mask) {
440 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
441 false);
442 } else {
443 /* This is somewhat unusual because it looks at
444 * both the key and mask while parsing the
445 * attributes (and by extension assumes the key
446 * is parsed first). Normally, we would verify
447 * that each is the correct length and that the
448 * attributes line up in the validate function.
449 * However, that is difficult because this is
450 * variable length and we won't have the
451 * information later.
452 */
453 if (match->key->tun_opts_len != nla_len(a)) {
454 OVS_NLERR(log, "Geneve option len %d != mask len %d",
455 match->key->tun_opts_len, nla_len(a));
456 return -EINVAL;
457 }
458
459 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
460 }
461
4b163224 462 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
7d16c847
PS
463 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
464 nla_len(a), is_mask);
465 return 0;
466}
467
0c7930a3
TG
468static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = {
469 [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 },
470};
471
472static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
473 struct sw_flow_match *match, bool is_mask,
474 bool log)
475{
476 struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
477 unsigned long opt_key_offset;
478 struct ovs_vxlan_opts opts;
479 int err;
480
481 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
482
483 err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
484 if (err < 0)
485 return err;
486
487 memset(&opts, 0, sizeof(opts));
488
489 if (tb[OVS_VXLAN_EXT_GBP])
490 opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]);
491
492 if (!is_mask)
493 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
494 else
495 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
496
497 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
498 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
499 is_mask);
500 return 0;
501}
502
a097c0b2 503static int ipv4_tun_from_nlattr(const struct nlattr *attr,
9233cef7
JR
504 struct sw_flow_match *match, bool is_mask,
505 bool log)
a097c0b2
PS
506{
507 struct nlattr *a;
508 int rem;
509 bool ttl = false;
510 __be16 tun_flags = 0;
0c7930a3 511 int opts_type = 0;
a097c0b2
PS
512
513 nla_for_each_nested(a, attr, rem) {
514 int type = nla_type(a);
7d16c847
PS
515 int err;
516
a097c0b2 517 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
7d16c847 518 OVS_NLERR(log, "Tunnel attr %d out of range max %d",
9233cef7 519 type, OVS_TUNNEL_KEY_ATTR_MAX);
a097c0b2
PS
520 return -EINVAL;
521 }
522
ec959cdc
TG
523 if (ovs_tunnel_key_lens[type].len != nla_len(a) &&
524 ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) {
7d16c847 525 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
ec959cdc 526 type, nla_len(a), ovs_tunnel_key_lens[type].len);
a097c0b2
PS
527 return -EINVAL;
528 }
529
530 switch (type) {
531 case OVS_TUNNEL_KEY_ATTR_ID:
532 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
533 nla_get_be64(a), is_mask);
534 tun_flags |= TUNNEL_KEY;
535 break;
536 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
537 SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
dd693f9b 538 nla_get_in_addr(a), is_mask);
a097c0b2
PS
539 break;
540 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
541 SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
dd693f9b 542 nla_get_in_addr(a), is_mask);
a097c0b2
PS
543 break;
544 case OVS_TUNNEL_KEY_ATTR_TOS:
545 SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
546 nla_get_u8(a), is_mask);
547 break;
548 case OVS_TUNNEL_KEY_ATTR_TTL:
549 SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
550 nla_get_u8(a), is_mask);
551 ttl = true;
552 break;
553 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
554 tun_flags |= TUNNEL_DONT_FRAGMENT;
555 break;
556 case OVS_TUNNEL_KEY_ATTR_CSUM:
557 tun_flags |= TUNNEL_CSUM;
558 break;
8b7ea2d4
WZ
559 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
560 SW_FLOW_KEY_PUT(match, tun_key.tp_src,
561 nla_get_be16(a), is_mask);
562 break;
563 case OVS_TUNNEL_KEY_ATTR_TP_DST:
564 SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
565 nla_get_be16(a), is_mask);
566 break;
94872594
JG
567 case OVS_TUNNEL_KEY_ATTR_OAM:
568 tun_flags |= TUNNEL_OAM;
569 break;
c1fc1411 570 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
0c7930a3
TG
571 if (opts_type) {
572 OVS_NLERR(log, "Multiple metadata blocks provided");
573 return -EINVAL;
574 }
575
7d16c847
PS
576 err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
577 if (err)
578 return err;
c1fc1411 579
0c7930a3
TG
580 tun_flags |= TUNNEL_GENEVE_OPT;
581 opts_type = type;
582 break;
583 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
584 if (opts_type) {
585 OVS_NLERR(log, "Multiple metadata blocks provided");
586 return -EINVAL;
587 }
588
589 err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
590 if (err)
591 return err;
592
593 tun_flags |= TUNNEL_VXLAN_OPT;
594 opts_type = type;
c1fc1411 595 break;
a097c0b2 596 default:
7d16c847 597 OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
9233cef7 598 type);
a097c0b2
PS
599 return -EINVAL;
600 }
601 }
602
603 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
604
605 if (rem > 0) {
9233cef7
JR
606 OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.",
607 rem);
a097c0b2
PS
608 return -EINVAL;
609 }
610
611 if (!is_mask) {
612 if (!match->key->tun_key.ipv4_dst) {
7d16c847 613 OVS_NLERR(log, "IPv4 tunnel dst address is zero");
a097c0b2
PS
614 return -EINVAL;
615 }
616
617 if (!ttl) {
9233cef7 618 OVS_NLERR(log, "IPv4 tunnel TTL not specified.");
a097c0b2
PS
619 return -EINVAL;
620 }
621 }
622
0c7930a3
TG
623 return opts_type;
624}
625
626static int vxlan_opt_to_nlattr(struct sk_buff *skb,
627 const void *tun_opts, int swkey_tun_opts_len)
628{
629 const struct ovs_vxlan_opts *opts = tun_opts;
630 struct nlattr *nla;
631
632 nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
633 if (!nla)
634 return -EMSGSIZE;
635
636 if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
637 return -EMSGSIZE;
638
639 nla_nest_end(skb, nla);
a097c0b2
PS
640 return 0;
641}
642
8b7ea2d4
WZ
643static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
644 const struct ovs_key_ipv4_tunnel *output,
4b163224 645 const void *tun_opts, int swkey_tun_opts_len)
a097c0b2 646{
a097c0b2
PS
647 if (output->tun_flags & TUNNEL_KEY &&
648 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
649 return -EMSGSIZE;
650 if (output->ipv4_src &&
dd693f9b 651 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
a097c0b2
PS
652 return -EMSGSIZE;
653 if (output->ipv4_dst &&
dd693f9b 654 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
a097c0b2
PS
655 return -EMSGSIZE;
656 if (output->ipv4_tos &&
7d16c847 657 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
a097c0b2
PS
658 return -EMSGSIZE;
659 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
660 return -EMSGSIZE;
661 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
7d16c847 662 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
a097c0b2
PS
663 return -EMSGSIZE;
664 if ((output->tun_flags & TUNNEL_CSUM) &&
7d16c847 665 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
a097c0b2 666 return -EMSGSIZE;
8b7ea2d4 667 if (output->tp_src &&
7d16c847 668 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
8b7ea2d4
WZ
669 return -EMSGSIZE;
670 if (output->tp_dst &&
7d16c847 671 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
8b7ea2d4 672 return -EMSGSIZE;
94872594 673 if ((output->tun_flags & TUNNEL_OAM) &&
7d16c847 674 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
94872594 675 return -EMSGSIZE;
0c7930a3
TG
676 if (tun_opts) {
677 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
678 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
679 swkey_tun_opts_len, tun_opts))
680 return -EMSGSIZE;
681 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
682 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
683 return -EMSGSIZE;
684 }
a097c0b2 685
8b7ea2d4
WZ
686 return 0;
687}
688
8b7ea2d4
WZ
689static int ipv4_tun_to_nlattr(struct sk_buff *skb,
690 const struct ovs_key_ipv4_tunnel *output,
4b163224 691 const void *tun_opts, int swkey_tun_opts_len)
8b7ea2d4
WZ
692{
693 struct nlattr *nla;
694 int err;
695
696 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
697 if (!nla)
698 return -EMSGSIZE;
699
700 err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
701 if (err)
702 return err;
703
a097c0b2
PS
704 nla_nest_end(skb, nla);
705 return 0;
706}
707
8b7ea2d4
WZ
708int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
709 const struct ovs_tunnel_info *egress_tun_info)
710{
711 return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
712 egress_tun_info->options,
713 egress_tun_info->options_len);
714}
a097c0b2
PS
715
716static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
9233cef7
JR
717 const struct nlattr **a, bool is_mask,
718 bool log)
a097c0b2 719{
7804df20
AZ
720 if (*attrs & (1ULL << OVS_KEY_ATTR_DP_HASH)) {
721 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
722
723 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
724 *attrs &= ~(1ULL << OVS_KEY_ATTR_DP_HASH);
725 }
726
a6059080
AZ
727 if (*attrs & (1ULL << OVS_KEY_ATTR_RECIRC_ID)) {
728 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
729
730 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
731 *attrs &= ~(1ULL << OVS_KEY_ATTR_RECIRC_ID);
732 }
733
a097c0b2
PS
734 if (*attrs & (1ULL << OVS_KEY_ATTR_PRIORITY)) {
735 SW_FLOW_KEY_PUT(match, phy.priority,
736 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
737 *attrs &= ~(1ULL << OVS_KEY_ATTR_PRIORITY);
738 }
739
740 if (*attrs & (1ULL << OVS_KEY_ATTR_IN_PORT)) {
741 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
742
a473df5b 743 if (is_mask) {
a097c0b2 744 in_port = 0xffffffff; /* Always exact match in_port. */
a473df5b 745 } else if (in_port >= DP_MAX_PORTS) {
7d16c847 746 OVS_NLERR(log, "Port %d exceeds max allowable %d",
a473df5b 747 in_port, DP_MAX_PORTS);
a097c0b2 748 return -EINVAL;
a473df5b 749 }
a097c0b2
PS
750
751 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
752 *attrs &= ~(1ULL << OVS_KEY_ATTR_IN_PORT);
753 } else if (!is_mask) {
754 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
755 }
756
757 if (*attrs & (1ULL << OVS_KEY_ATTR_SKB_MARK)) {
758 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
759
760 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
761 *attrs &= ~(1ULL << OVS_KEY_ATTR_SKB_MARK);
762 }
763 if (*attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
764 if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
0c7930a3 765 is_mask, log) < 0)
a097c0b2
PS
766 return -EINVAL;
767 *attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
768 }
769 return 0;
770}
771
df65fec1 772static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
9233cef7
JR
773 const struct nlattr **a, bool is_mask,
774 bool log)
a097c0b2
PS
775{
776 int err;
a097c0b2 777
9233cef7 778 err = metadata_from_nlattrs(match, &attrs, a, is_mask, log);
a097c0b2
PS
779 if (err)
780 return err;
781
782 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) {
783 const struct ovs_key_ethernet *eth_key;
784
785 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
786 SW_FLOW_KEY_MEMCPY(match, eth.src,
787 eth_key->eth_src, ETH_ALEN, is_mask);
788 SW_FLOW_KEY_MEMCPY(match, eth.dst,
789 eth_key->eth_dst, ETH_ALEN, is_mask);
790 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERNET);
791 }
792
793 if (attrs & (1ULL << OVS_KEY_ATTR_VLAN)) {
794 __be16 tci;
795
796 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
797 if (!(tci & htons(VLAN_TAG_PRESENT))) {
798 if (is_mask)
7d16c847 799 OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.");
a097c0b2 800 else
7d16c847 801 OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set.");
a097c0b2
PS
802
803 return -EINVAL;
804 }
805
806 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
807 attrs &= ~(1ULL << OVS_KEY_ATTR_VLAN);
f3ccd17d 808 }
a097c0b2
PS
809
810 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) {
811 __be16 eth_type;
812
813 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
814 if (is_mask) {
815 /* Always exact match EtherType. */
816 eth_type = htons(0xffff);
935fc582 817 } else if (!eth_proto_is_802_3(eth_type)) {
7d16c847 818 OVS_NLERR(log, "EtherType %x is less than min %x",
9233cef7 819 ntohs(eth_type), ETH_P_802_3_MIN);
a097c0b2
PS
820 return -EINVAL;
821 }
822
823 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
824 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
825 } else if (!is_mask) {
826 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
827 }
828
829 if (attrs & (1ULL << OVS_KEY_ATTR_IPV4)) {
830 const struct ovs_key_ipv4 *ipv4_key;
831
832 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
833 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
7d16c847 834 OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
9233cef7 835 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
a097c0b2
PS
836 return -EINVAL;
837 }
838 SW_FLOW_KEY_PUT(match, ip.proto,
839 ipv4_key->ipv4_proto, is_mask);
840 SW_FLOW_KEY_PUT(match, ip.tos,
841 ipv4_key->ipv4_tos, is_mask);
842 SW_FLOW_KEY_PUT(match, ip.ttl,
843 ipv4_key->ipv4_ttl, is_mask);
844 SW_FLOW_KEY_PUT(match, ip.frag,
845 ipv4_key->ipv4_frag, is_mask);
846 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
847 ipv4_key->ipv4_src, is_mask);
848 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
849 ipv4_key->ipv4_dst, is_mask);
850 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4);
851 }
852
853 if (attrs & (1ULL << OVS_KEY_ATTR_IPV6)) {
854 const struct ovs_key_ipv6 *ipv6_key;
855
856 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
857 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
7d16c847 858 OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
9233cef7 859 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
a097c0b2
PS
860 return -EINVAL;
861 }
a0fb56c1 862
65721038 863 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
9233cef7
JR
864 OVS_NLERR(log,
865 "Invalid IPv6 flow label value (value=%x, max=%x).",
67782539
JR
866 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
867 return -EINVAL;
868 }
a0fb56c1 869
a097c0b2
PS
870 SW_FLOW_KEY_PUT(match, ipv6.label,
871 ipv6_key->ipv6_label, is_mask);
872 SW_FLOW_KEY_PUT(match, ip.proto,
873 ipv6_key->ipv6_proto, is_mask);
874 SW_FLOW_KEY_PUT(match, ip.tos,
875 ipv6_key->ipv6_tclass, is_mask);
876 SW_FLOW_KEY_PUT(match, ip.ttl,
877 ipv6_key->ipv6_hlimit, is_mask);
878 SW_FLOW_KEY_PUT(match, ip.frag,
879 ipv6_key->ipv6_frag, is_mask);
880 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
881 ipv6_key->ipv6_src,
882 sizeof(match->key->ipv6.addr.src),
883 is_mask);
884 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
885 ipv6_key->ipv6_dst,
886 sizeof(match->key->ipv6.addr.dst),
887 is_mask);
888
889 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6);
890 }
891
892 if (attrs & (1ULL << OVS_KEY_ATTR_ARP)) {
893 const struct ovs_key_arp *arp_key;
894
895 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
896 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
9233cef7 897 OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
a097c0b2
PS
898 arp_key->arp_op);
899 return -EINVAL;
900 }
901
902 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
903 arp_key->arp_sip, is_mask);
904 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
905 arp_key->arp_tip, is_mask);
906 SW_FLOW_KEY_PUT(match, ip.proto,
907 ntohs(arp_key->arp_op), is_mask);
908 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
909 arp_key->arp_sha, ETH_ALEN, is_mask);
910 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
911 arp_key->arp_tha, ETH_ALEN, is_mask);
912
913 attrs &= ~(1ULL << OVS_KEY_ATTR_ARP);
914 }
915
ccf43786
SH
916 if (attrs & (1ULL << OVS_KEY_ATTR_MPLS)) {
917 const struct ovs_key_mpls *mpls_key;
918
919 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
920 SW_FLOW_KEY_PUT(match, mpls.top_lse,
921 mpls_key->mpls_lse, is_mask);
922
923 attrs &= ~(1ULL << OVS_KEY_ATTR_MPLS);
62974663 924 }
ccf43786 925
a097c0b2
PS
926 if (attrs & (1ULL << OVS_KEY_ATTR_TCP)) {
927 const struct ovs_key_tcp *tcp_key;
928
929 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
708fb4c5
JR
930 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
931 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
a097c0b2
PS
932 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP);
933 }
934
dc235f7f 935 if (attrs & (1ULL << OVS_KEY_ATTR_TCP_FLAGS)) {
cab29271
JS
936 SW_FLOW_KEY_PUT(match, tp.flags,
937 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
938 is_mask);
dc235f7f
JR
939 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP_FLAGS);
940 }
941
a097c0b2
PS
942 if (attrs & (1ULL << OVS_KEY_ATTR_UDP)) {
943 const struct ovs_key_udp *udp_key;
944
945 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
708fb4c5
JR
946 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
947 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
a097c0b2
PS
948 attrs &= ~(1ULL << OVS_KEY_ATTR_UDP);
949 }
950
951 if (attrs & (1ULL << OVS_KEY_ATTR_SCTP)) {
952 const struct ovs_key_sctp *sctp_key;
953
954 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
708fb4c5
JR
955 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
956 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
a097c0b2
PS
957 attrs &= ~(1ULL << OVS_KEY_ATTR_SCTP);
958 }
959
960 if (attrs & (1ULL << OVS_KEY_ATTR_ICMP)) {
961 const struct ovs_key_icmp *icmp_key;
962
963 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
708fb4c5 964 SW_FLOW_KEY_PUT(match, tp.src,
a097c0b2 965 htons(icmp_key->icmp_type), is_mask);
708fb4c5 966 SW_FLOW_KEY_PUT(match, tp.dst,
a097c0b2
PS
967 htons(icmp_key->icmp_code), is_mask);
968 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMP);
969 }
970
971 if (attrs & (1ULL << OVS_KEY_ATTR_ICMPV6)) {
972 const struct ovs_key_icmpv6 *icmpv6_key;
973
974 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
708fb4c5 975 SW_FLOW_KEY_PUT(match, tp.src,
a097c0b2 976 htons(icmpv6_key->icmpv6_type), is_mask);
708fb4c5 977 SW_FLOW_KEY_PUT(match, tp.dst,
a097c0b2
PS
978 htons(icmpv6_key->icmpv6_code), is_mask);
979 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMPV6);
980 }
981
982 if (attrs & (1ULL << OVS_KEY_ATTR_ND)) {
983 const struct ovs_key_nd *nd_key;
984
985 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
986 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
987 nd_key->nd_target,
988 sizeof(match->key->ipv6.nd.target),
989 is_mask);
990 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
991 nd_key->nd_sll, ETH_ALEN, is_mask);
992 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
993 nd_key->nd_tll, ETH_ALEN, is_mask);
994 attrs &= ~(1ULL << OVS_KEY_ATTR_ND);
995 }
996
a473df5b 997 if (attrs != 0) {
7d16c847 998 OVS_NLERR(log, "Unknown key attributes %llx",
a473df5b 999 (unsigned long long)attrs);
a097c0b2 1000 return -EINVAL;
a473df5b 1001 }
a097c0b2
PS
1002
1003 return 0;
1004}
1005
ec959cdc
TG
1006static void nlattr_set(struct nlattr *attr, u8 val,
1007 const struct ovs_len_tbl *tbl)
a097c0b2 1008{
62974663
DDP
1009 struct nlattr *nla;
1010 int rem;
a097c0b2 1011
62974663
DDP
1012 /* The nlattr stream should already have been validated */
1013 nla_for_each_nested(nla, attr, rem) {
ec959cdc
TG
1014 if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1015 nlattr_set(nla, val, tbl[nla_type(nla)].next);
62974663
DDP
1016 else
1017 memset(nla_data(nla), val, nla_len(nla));
1018 }
1019}
1020
1021static void mask_set_nlattr(struct nlattr *attr, u8 val)
1022{
ec959cdc 1023 nlattr_set(attr, val, ovs_key_lens);
a097c0b2
PS
1024}
1025
1026/**
1027 * ovs_nla_get_match - parses Netlink attributes into a flow key and
1028 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1029 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1030 * does not include any don't care bit.
1031 * @match: receives the extracted flow match information.
1032 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1033 * sequence. The fields should of the packet that triggered the creation
1034 * of this flow.
1035 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1036 * attribute specifies the mask field of the wildcarded flow.
9233cef7
JR
1037 * @log: Boolean to allow kernel error logging. Normally true, but when
1038 * probing for feature compatibility this should be passed in as false to
1039 * suppress unnecessary error logging.
a097c0b2
PS
1040 */
1041int ovs_nla_get_match(struct sw_flow_match *match,
f3ccd17d 1042 const struct nlattr *nla_key,
9233cef7
JR
1043 const struct nlattr *nla_mask,
1044 bool log)
a097c0b2
PS
1045{
1046 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1047 const struct nlattr *encap;
62974663 1048 struct nlattr *newmask = NULL;
a097c0b2
PS
1049 u64 key_attrs = 0;
1050 u64 mask_attrs = 0;
1051 bool encap_valid = false;
1052 int err;
1053
9233cef7 1054 err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
a097c0b2
PS
1055 if (err)
1056 return err;
1057
1058 if ((key_attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) &&
1059 (key_attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) &&
1060 (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
1061 __be16 tci;
1062
1063 if (!((key_attrs & (1ULL << OVS_KEY_ATTR_VLAN)) &&
1064 (key_attrs & (1ULL << OVS_KEY_ATTR_ENCAP)))) {
9233cef7 1065 OVS_NLERR(log, "Invalid Vlan frame.");
a097c0b2
PS
1066 return -EINVAL;
1067 }
1068
1069 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1070 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1071 encap = a[OVS_KEY_ATTR_ENCAP];
1072 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP);
1073 encap_valid = true;
1074
1075 if (tci & htons(VLAN_TAG_PRESENT)) {
9233cef7 1076 err = parse_flow_nlattrs(encap, a, &key_attrs, log);
a097c0b2
PS
1077 if (err)
1078 return err;
1079 } else if (!tci) {
1080 /* Corner case for truncated 802.1Q header. */
1081 if (nla_len(encap)) {
7d16c847 1082 OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute.");
a097c0b2
PS
1083 return -EINVAL;
1084 }
1085 } else {
7d16c847 1086 OVS_NLERR(log, "Encap attr is set for non-VLAN frame");
a097c0b2
PS
1087 return -EINVAL;
1088 }
1089 }
1090
9233cef7 1091 err = ovs_key_from_nlattrs(match, key_attrs, a, false, log);
a097c0b2
PS
1092 if (err)
1093 return err;
1094
f3ccd17d 1095 if (match->mask) {
f3ccd17d
PS
1096 if (!nla_mask) {
1097 /* Create an exact match mask. We need to set to 0xff
1098 * all the 'match->mask' fields that have been touched
1099 * in 'match->key'. We cannot simply memset
1100 * 'match->mask', because padding bytes and fields not
1101 * specified in 'match->key' should be left to 0.
1102 * Instead, we use a stream of netlink attributes,
7d16c847
PS
1103 * copied from 'key' and set to 0xff.
1104 * ovs_key_from_nlattrs() will take care of filling
1105 * 'match->mask' appropriately.
f3ccd17d
PS
1106 */
1107 newmask = kmemdup(nla_key,
1108 nla_total_size(nla_len(nla_key)),
1109 GFP_KERNEL);
1110 if (!newmask)
1111 return -ENOMEM;
62974663 1112
f3ccd17d 1113 mask_set_nlattr(newmask, 0xff);
62974663 1114
f3ccd17d
PS
1115 /* The userspace does not send tunnel attributes that
1116 * are 0, but we should not wildcard them nonetheless.
1117 */
1118 if (match->key->tun_key.ipv4_dst)
1119 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
1120 0xff, true);
0b496cda 1121
f3ccd17d
PS
1122 nla_mask = newmask;
1123 }
62974663 1124
9233cef7 1125 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
a097c0b2 1126 if (err)
62974663 1127 goto free_newmask;
a097c0b2 1128
f3ccd17d
PS
1129 /* Always match on tci. */
1130 SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
1131
62974663 1132 if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) {
a097c0b2
PS
1133 __be16 eth_type = 0;
1134 __be16 tci = 0;
1135
1136 if (!encap_valid) {
7d16c847 1137 OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame.");
62974663
DDP
1138 err = -EINVAL;
1139 goto free_newmask;
a097c0b2
PS
1140 }
1141
1142 mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP);
1143 if (a[OVS_KEY_ATTR_ETHERTYPE])
1144 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1145
1146 if (eth_type == htons(0xffff)) {
1147 mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1148 encap = a[OVS_KEY_ATTR_ENCAP];
9233cef7
JR
1149 err = parse_flow_mask_nlattrs(encap, a,
1150 &mask_attrs, log);
3854ab21
AW
1151 if (err)
1152 goto free_newmask;
a097c0b2 1153 } else {
7d16c847 1154 OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).",
9233cef7 1155 ntohs(eth_type));
62974663
DDP
1156 err = -EINVAL;
1157 goto free_newmask;
a097c0b2
PS
1158 }
1159
1160 if (a[OVS_KEY_ATTR_VLAN])
1161 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1162
1163 if (!(tci & htons(VLAN_TAG_PRESENT))) {
7d16c847 1164 OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).",
9233cef7 1165 ntohs(tci));
62974663
DDP
1166 err = -EINVAL;
1167 goto free_newmask;
a097c0b2
PS
1168 }
1169 }
1170
9233cef7 1171 err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log);
a097c0b2 1172 if (err)
62974663 1173 goto free_newmask;
a097c0b2
PS
1174 }
1175
9233cef7 1176 if (!match_validate(match, key_attrs, mask_attrs, log))
62974663 1177 err = -EINVAL;
a097c0b2 1178
62974663
DDP
1179free_newmask:
1180 kfree(newmask);
1181 return err;
a097c0b2
PS
1182}
1183
bc619e29
JS
1184static size_t get_ufid_len(const struct nlattr *attr, bool log)
1185{
1186 size_t len;
1187
1188 if (!attr)
1189 return 0;
1190
1191 len = nla_len(attr);
1192 if (len < 1 || len > MAX_UFID_LENGTH) {
1193 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
1194 nla_len(attr), MAX_UFID_LENGTH);
1195 return 0;
1196 }
1197
1198 return len;
1199}
1200
1201/* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1202 * or false otherwise.
1203 */
1204bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
1205 bool log)
1206{
1207 sfid->ufid_len = get_ufid_len(attr, log);
1208 if (sfid->ufid_len)
1209 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
1210
1211 return sfid->ufid_len;
1212}
1213
1214int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
1215 const struct sw_flow_key *key, bool log)
1216{
1217 struct sw_flow_key *new_key;
1218
1219 if (ovs_nla_get_ufid(sfid, ufid, log))
1220 return 0;
1221
1222 /* If UFID was not provided, use unmasked key. */
1223 new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
1224 if (!new_key)
1225 return -ENOMEM;
1226 memcpy(new_key, key, sizeof(*key));
1227 sfid->unmasked_key = new_key;
1228
1229 return 0;
1230}
1231
1232u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
1233{
1234 return attr ? nla_get_u32(attr) : 0;
1235}
1236
a097c0b2
PS
1237/**
1238 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
c135bba1 1239 * @key: Receives extracted in_port, priority, tun_key and skb_mark.
a097c0b2
PS
1240 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1241 * sequence.
9233cef7
JR
1242 * @log: Boolean to allow kernel error logging. Normally true, but when
1243 * probing for feature compatibility this should be passed in as false to
1244 * suppress unnecessary error logging.
a097c0b2
PS
1245 *
1246 * This parses a series of Netlink attributes that form a flow key, which must
1247 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1248 * get the metadata, that is, the parts of the flow key that cannot be
1249 * extracted from the packet itself.
1250 */
7d16c847 1251
c135bba1 1252int ovs_nla_get_flow_metadata(const struct nlattr *attr,
9233cef7
JR
1253 struct sw_flow_key *key,
1254 bool log)
a097c0b2 1255{
a097c0b2 1256 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
c135bba1 1257 struct sw_flow_match match;
a097c0b2
PS
1258 u64 attrs = 0;
1259 int err;
a097c0b2 1260
9233cef7 1261 err = parse_flow_nlattrs(attr, a, &attrs, log);
a097c0b2
PS
1262 if (err)
1263 return -EINVAL;
1264
1265 memset(&match, 0, sizeof(match));
c135bba1 1266 match.key = key;
a097c0b2 1267
7f45215a 1268 memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
c135bba1 1269 key->phy.in_port = DP_MAX_PORTS;
a097c0b2 1270
9233cef7 1271 return metadata_from_nlattrs(&match, &attrs, a, false, log);
a097c0b2
PS
1272}
1273
db7f2238
JS
1274static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
1275 const struct sw_flow_key *output, bool is_mask,
1276 struct sk_buff *skb)
a097c0b2
PS
1277{
1278 struct ovs_key_ethernet *eth_key;
1279 struct nlattr *nla, *encap;
a097c0b2 1280
7d16c847 1281 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
7804df20
AZ
1282 goto nla_put_failure;
1283
7d16c847 1284 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
a6059080
AZ
1285 goto nla_put_failure;
1286
a097c0b2
PS
1287 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1288 goto nla_put_failure;
1289
c1fc1411 1290 if ((swkey->tun_key.ipv4_dst || is_mask)) {
4b163224 1291 const void *opts = NULL;
c1fc1411 1292
fa6395df 1293 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
4b163224 1294 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
c1fc1411
JG
1295
1296 if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
7d16c847 1297 swkey->tun_opts_len))
c1fc1411
JG
1298 goto nla_put_failure;
1299 }
a097c0b2
PS
1300
1301 if (swkey->phy.in_port == DP_MAX_PORTS) {
1302 if (is_mask && (output->phy.in_port == 0xffff))
1303 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1304 goto nla_put_failure;
1305 } else {
1306 u16 upper_u16;
1307 upper_u16 = !is_mask ? 0 : 0xffff;
1308
1309 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1310 (upper_u16 << 16) | output->phy.in_port))
1311 goto nla_put_failure;
1312 }
1313
1314 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1315 goto nla_put_failure;
1316
1317 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1318 if (!nla)
1319 goto nla_put_failure;
1320
1321 eth_key = nla_data(nla);
982a47ec
JP
1322 ether_addr_copy(eth_key->eth_src, output->eth.src);
1323 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
a097c0b2
PS
1324
1325 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1326 __be16 eth_type;
1327 eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
1328 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1329 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1330 goto nla_put_failure;
1331 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1332 if (!swkey->eth.tci)
1333 goto unencap;
1334 } else
1335 encap = NULL;
1336
1337 if (swkey->eth.type == htons(ETH_P_802_2)) {
1338 /*
1339 * Ethertype 802.2 is represented in the netlink with omitted
1340 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1341 * 0xffff in the mask attribute. Ethertype can also
1342 * be wildcarded.
1343 */
1344 if (is_mask && output->eth.type)
1345 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
1346 output->eth.type))
1347 goto nla_put_failure;
1348 goto unencap;
1349 }
1350
1351 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1352 goto nla_put_failure;
1353
1354 if (swkey->eth.type == htons(ETH_P_IP)) {
1355 struct ovs_key_ipv4 *ipv4_key;
1356
1357 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1358 if (!nla)
1359 goto nla_put_failure;
1360 ipv4_key = nla_data(nla);
1361 ipv4_key->ipv4_src = output->ipv4.addr.src;
1362 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1363 ipv4_key->ipv4_proto = output->ip.proto;
1364 ipv4_key->ipv4_tos = output->ip.tos;
1365 ipv4_key->ipv4_ttl = output->ip.ttl;
1366 ipv4_key->ipv4_frag = output->ip.frag;
1367 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1368 struct ovs_key_ipv6 *ipv6_key;
1369
1370 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1371 if (!nla)
1372 goto nla_put_failure;
1373 ipv6_key = nla_data(nla);
1374 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1375 sizeof(ipv6_key->ipv6_src));
1376 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1377 sizeof(ipv6_key->ipv6_dst));
1378 ipv6_key->ipv6_label = output->ipv6.label;
1379 ipv6_key->ipv6_proto = output->ip.proto;
1380 ipv6_key->ipv6_tclass = output->ip.tos;
1381 ipv6_key->ipv6_hlimit = output->ip.ttl;
1382 ipv6_key->ipv6_frag = output->ip.frag;
1383 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1384 swkey->eth.type == htons(ETH_P_RARP)) {
1385 struct ovs_key_arp *arp_key;
1386
1387 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1388 if (!nla)
1389 goto nla_put_failure;
1390 arp_key = nla_data(nla);
1391 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1392 arp_key->arp_sip = output->ipv4.addr.src;
1393 arp_key->arp_tip = output->ipv4.addr.dst;
1394 arp_key->arp_op = htons(output->ip.proto);
982a47ec
JP
1395 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1396 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
ccf43786
SH
1397 } else if (eth_p_mpls(swkey->eth.type)) {
1398 struct ovs_key_mpls *mpls_key;
1399
1400 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
1401 if (!nla)
1402 goto nla_put_failure;
1403 mpls_key = nla_data(nla);
1404 mpls_key->mpls_lse = output->mpls.top_lse;
a097c0b2
PS
1405 }
1406
1407 if ((swkey->eth.type == htons(ETH_P_IP) ||
1408 swkey->eth.type == htons(ETH_P_IPV6)) &&
1409 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1410
1411 if (swkey->ip.proto == IPPROTO_TCP) {
1412 struct ovs_key_tcp *tcp_key;
1413
1414 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1415 if (!nla)
1416 goto nla_put_failure;
1417 tcp_key = nla_data(nla);
708fb4c5
JR
1418 tcp_key->tcp_src = output->tp.src;
1419 tcp_key->tcp_dst = output->tp.dst;
1420 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1421 output->tp.flags))
1422 goto nla_put_failure;
a097c0b2
PS
1423 } else if (swkey->ip.proto == IPPROTO_UDP) {
1424 struct ovs_key_udp *udp_key;
1425
1426 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1427 if (!nla)
1428 goto nla_put_failure;
1429 udp_key = nla_data(nla);
708fb4c5
JR
1430 udp_key->udp_src = output->tp.src;
1431 udp_key->udp_dst = output->tp.dst;
a097c0b2
PS
1432 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1433 struct ovs_key_sctp *sctp_key;
1434
1435 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
1436 if (!nla)
1437 goto nla_put_failure;
1438 sctp_key = nla_data(nla);
708fb4c5
JR
1439 sctp_key->sctp_src = output->tp.src;
1440 sctp_key->sctp_dst = output->tp.dst;
a097c0b2
PS
1441 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1442 swkey->ip.proto == IPPROTO_ICMP) {
1443 struct ovs_key_icmp *icmp_key;
1444
1445 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1446 if (!nla)
1447 goto nla_put_failure;
1448 icmp_key = nla_data(nla);
708fb4c5
JR
1449 icmp_key->icmp_type = ntohs(output->tp.src);
1450 icmp_key->icmp_code = ntohs(output->tp.dst);
a097c0b2
PS
1451 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1452 swkey->ip.proto == IPPROTO_ICMPV6) {
1453 struct ovs_key_icmpv6 *icmpv6_key;
1454
1455 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1456 sizeof(*icmpv6_key));
1457 if (!nla)
1458 goto nla_put_failure;
1459 icmpv6_key = nla_data(nla);
708fb4c5
JR
1460 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1461 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
a097c0b2
PS
1462
1463 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1464 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1465 struct ovs_key_nd *nd_key;
1466
1467 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1468 if (!nla)
1469 goto nla_put_failure;
1470 nd_key = nla_data(nla);
1471 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1472 sizeof(nd_key->nd_target));
982a47ec
JP
1473 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1474 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
a097c0b2
PS
1475 }
1476 }
1477 }
1478
1479unencap:
1480 if (encap)
1481 nla_nest_end(skb, encap);
1482
1483 return 0;
1484
1485nla_put_failure:
1486 return -EMSGSIZE;
1487}
1488
db7f2238
JS
1489int ovs_nla_put_key(const struct sw_flow_key *swkey,
1490 const struct sw_flow_key *output, int attr, bool is_mask,
1491 struct sk_buff *skb)
1492{
1493 int err;
1494 struct nlattr *nla;
1495
1496 nla = nla_nest_start(skb, attr);
1497 if (!nla)
1498 return -EMSGSIZE;
1499 err = __ovs_nla_put_key(swkey, output, is_mask, skb);
1500 if (err)
1501 return err;
1502 nla_nest_end(skb, nla);
1503
1504 return 0;
1505}
1506
1507/* Called with ovs_mutex or RCU read lock. */
bc619e29
JS
1508int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
1509{
1510 if (ovs_identifier_is_ufid(&flow->id))
1511 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
1512 flow->id.ufid);
1513
1514 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
1515 OVS_FLOW_ATTR_KEY, false, skb);
1516}
1517
1518/* Called with ovs_mutex or RCU read lock. */
1519int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
db7f2238 1520{
b037bc5a 1521 return ovs_nla_put_key(&flow->key, &flow->key,
db7f2238
JS
1522 OVS_FLOW_ATTR_KEY, false, skb);
1523}
1524
1525/* Called with ovs_mutex or RCU read lock. */
1526int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
1527{
1528 return ovs_nla_put_key(&flow->key, &flow->mask->key,
1529 OVS_FLOW_ATTR_MASK, true, skb);
1530}
1531
a097c0b2
PS
1532#define MAX_ACTIONS_BUFSIZE (32 * 1024)
1533
9233cef7 1534static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
a097c0b2
PS
1535{
1536 struct sw_flow_actions *sfa;
1537
a473df5b 1538 if (size > MAX_ACTIONS_BUFSIZE) {
7d16c847 1539 OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
a097c0b2 1540 return ERR_PTR(-EINVAL);
a473df5b 1541 }
a097c0b2
PS
1542
1543 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
1544 if (!sfa)
1545 return ERR_PTR(-ENOMEM);
1546
1547 sfa->actions_len = 0;
1548 return sfa;
1549}
1550
1551/* RCU callback used by ovs_nla_free_flow_actions. */
1552static void rcu_free_acts_callback(struct rcu_head *rcu)
1553{
1554 struct sw_flow_actions *sf_acts = container_of(rcu,
1555 struct sw_flow_actions, rcu);
1556 kfree(sf_acts);
1557}
1558
1559/* Schedules 'sf_acts' to be freed after the next RCU grace period.
af465b67
PS
1560 * The caller must hold rcu_read_lock for this to be sensible.
1561 */
a097c0b2
PS
1562void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
1563{
1564 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
1565}
1566
1567static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
9233cef7 1568 int attr_len, bool log)
a097c0b2
PS
1569{
1570
1571 struct sw_flow_actions *acts;
1572 int new_acts_size;
1573 int req_size = NLA_ALIGN(attr_len);
1574 int next_offset = offsetof(struct sw_flow_actions, actions) +
1575 (*sfa)->actions_len;
1576
1577 if (req_size <= (ksize(*sfa) - next_offset))
1578 goto out;
1579
1580 new_acts_size = ksize(*sfa) * 2;
1581
1582 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1583 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
1584 return ERR_PTR(-EMSGSIZE);
1585 new_acts_size = MAX_ACTIONS_BUFSIZE;
1586 }
1587
9233cef7 1588 acts = nla_alloc_flow_actions(new_acts_size, log);
a097c0b2
PS
1589 if (IS_ERR(acts))
1590 return (void *)acts;
1591
1592 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
1593 acts->actions_len = (*sfa)->actions_len;
1594 kfree(*sfa);
1595 *sfa = acts;
1596
1597out:
1598 (*sfa)->actions_len += req_size;
1599 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
1600}
1601
7d16c847
PS
1602static struct nlattr *__add_action(struct sw_flow_actions **sfa,
1603 int attrtype, void *data, int len, bool log)
a097c0b2
PS
1604{
1605 struct nlattr *a;
1606
9233cef7 1607 a = reserve_sfa_size(sfa, nla_attr_size(len), log);
a097c0b2 1608 if (IS_ERR(a))
f0cd669f 1609 return a;
a097c0b2
PS
1610
1611 a->nla_type = attrtype;
1612 a->nla_len = nla_attr_size(len);
1613
1614 if (data)
1615 memcpy(nla_data(a), data, len);
1616 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
1617
f0cd669f
JG
1618 return a;
1619}
1620
1621static int add_action(struct sw_flow_actions **sfa, int attrtype,
9233cef7 1622 void *data, int len, bool log)
f0cd669f
JG
1623{
1624 struct nlattr *a;
1625
9233cef7 1626 a = __add_action(sfa, attrtype, data, len, log);
f0cd669f
JG
1627 if (IS_ERR(a))
1628 return PTR_ERR(a);
1629
a097c0b2
PS
1630 return 0;
1631}
1632
1633static inline int add_nested_action_start(struct sw_flow_actions **sfa,
9233cef7 1634 int attrtype, bool log)
a097c0b2
PS
1635{
1636 int used = (*sfa)->actions_len;
1637 int err;
1638
9233cef7 1639 err = add_action(sfa, attrtype, NULL, 0, log);
a097c0b2
PS
1640 if (err)
1641 return err;
1642
1643 return used;
1644}
1645
1646static inline void add_nested_action_end(struct sw_flow_actions *sfa,
1647 int st_offset)
1648{
1649 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
1650 st_offset);
1651
1652 a->nla_len = sfa->actions_len - st_offset;
1653}
1654
ff27161e 1655static int __ovs_nla_copy_actions(const struct nlattr *attr,
ccf43786
SH
1656 const struct sw_flow_key *key,
1657 int depth, struct sw_flow_actions **sfa,
9233cef7 1658 __be16 eth_type, __be16 vlan_tci, bool log);
ccf43786 1659
a097c0b2
PS
1660static int validate_and_copy_sample(const struct nlattr *attr,
1661 const struct sw_flow_key *key, int depth,
ccf43786 1662 struct sw_flow_actions **sfa,
9233cef7 1663 __be16 eth_type, __be16 vlan_tci, bool log)
a097c0b2
PS
1664{
1665 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
1666 const struct nlattr *probability, *actions;
1667 const struct nlattr *a;
1668 int rem, start, err, st_acts;
1669
1670 memset(attrs, 0, sizeof(attrs));
1671 nla_for_each_nested(a, attr, rem) {
1672 int type = nla_type(a);
1673 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
1674 return -EINVAL;
1675 attrs[type] = a;
1676 }
1677 if (rem)
1678 return -EINVAL;
1679
1680 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
1681 if (!probability || nla_len(probability) != sizeof(u32))
1682 return -EINVAL;
1683
1684 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
1685 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
1686 return -EINVAL;
1687
1688 /* validation done, copy sample action. */
9233cef7 1689 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
a097c0b2
PS
1690 if (start < 0)
1691 return start;
1692 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
9233cef7 1693 nla_data(probability), sizeof(u32), log);
a097c0b2
PS
1694 if (err)
1695 return err;
9233cef7 1696 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
a097c0b2
PS
1697 if (st_acts < 0)
1698 return st_acts;
1699
ff27161e 1700 err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa,
9233cef7 1701 eth_type, vlan_tci, log);
a097c0b2
PS
1702 if (err)
1703 return err;
1704
1705 add_nested_action_end(*sfa, st_acts);
1706 add_nested_action_end(*sfa, start);
1707
1708 return 0;
1709}
1710
a097c0b2
PS
1711void ovs_match_init(struct sw_flow_match *match,
1712 struct sw_flow_key *key,
1713 struct sw_flow_mask *mask)
1714{
1715 memset(match, 0, sizeof(*match));
1716 match->key = key;
1717 match->mask = mask;
1718
1719 memset(key, 0, sizeof(*key));
1720
1721 if (mask) {
1722 memset(&mask->key, 0, sizeof(mask->key));
1723 mask->range.start = mask->range.end = 0;
1724 }
1725}
1726
4b163224
TG
1727static int validate_geneve_opts(struct sw_flow_key *key)
1728{
1729 struct geneve_opt *option;
1730 int opts_len = key->tun_opts_len;
1731 bool crit_opt = false;
1732
1733 option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
1734 while (opts_len > 0) {
1735 int len;
1736
1737 if (opts_len < sizeof(*option))
1738 return -EINVAL;
1739
1740 len = sizeof(*option) + option->length * 4;
1741 if (len > opts_len)
1742 return -EINVAL;
1743
1744 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
1745
1746 option = (struct geneve_opt *)((u8 *)option + len);
1747 opts_len -= len;
1748 };
1749
1750 key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
1751
1752 return 0;
1753}
1754
a097c0b2 1755static int validate_and_copy_set_tun(const struct nlattr *attr,
9233cef7 1756 struct sw_flow_actions **sfa, bool log)
a097c0b2
PS
1757{
1758 struct sw_flow_match match;
1759 struct sw_flow_key key;
f0cd669f
JG
1760 struct ovs_tunnel_info *tun_info;
1761 struct nlattr *a;
d08113d6
AW
1762 int start, opts_type;
1763 int err = 0;
a097c0b2
PS
1764
1765 ovs_match_init(&match, &key, NULL);
0c7930a3
TG
1766 opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
1767 if (opts_type < 0)
1768 return opts_type;
a097c0b2 1769
c1fc1411 1770 if (key.tun_opts_len) {
0c7930a3
TG
1771 switch (opts_type) {
1772 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
1773 err = validate_geneve_opts(&key);
1774 if (err < 0)
1775 return err;
1776 break;
1777 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
1778 break;
1779 }
c1fc1411
JG
1780 };
1781
9233cef7 1782 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
a097c0b2
PS
1783 if (start < 0)
1784 return start;
1785
f0cd669f 1786 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
9233cef7 1787 sizeof(*tun_info) + key.tun_opts_len, log);
f0cd669f
JG
1788 if (IS_ERR(a))
1789 return PTR_ERR(a);
1790
1791 tun_info = nla_data(a);
1792 tun_info->tunnel = key.tun_key;
c1fc1411
JG
1793 tun_info->options_len = key.tun_opts_len;
1794
1795 if (tun_info->options_len) {
1796 /* We need to store the options in the action itself since
1797 * everything else will go away after flow setup. We can append
1798 * it to tun_info and then point there.
1799 */
4b163224
TG
1800 memcpy((tun_info + 1),
1801 TUN_METADATA_OPTS(&key, key.tun_opts_len), key.tun_opts_len);
1802 tun_info->options = (tun_info + 1);
c1fc1411
JG
1803 } else {
1804 tun_info->options = NULL;
1805 }
f0cd669f 1806
a097c0b2
PS
1807 add_nested_action_end(*sfa, start);
1808
1809 return err;
1810}
1811
b940b3d7
JR
1812/* Return false if there are any non-masked bits set.
1813 * Mask follows data immediately, before any netlink padding.
1814 */
1815static bool validate_masked(u8 *data, int len)
1816{
1817 u8 *mask = data + len;
1818
1819 while (len--)
1820 if (*data++ & ~*mask++)
1821 return false;
1822
1823 return true;
1824}
1825
a097c0b2
PS
1826static int validate_set(const struct nlattr *a,
1827 const struct sw_flow_key *flow_key,
1828 struct sw_flow_actions **sfa,
b940b3d7 1829 bool *skip_copy, __be16 eth_type, bool masked, bool log)
a097c0b2
PS
1830{
1831 const struct nlattr *ovs_key = nla_data(a);
1832 int key_type = nla_type(ovs_key);
b940b3d7 1833 size_t key_len;
a097c0b2
PS
1834
1835 /* There can be only one key in a action */
1836 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
1837 return -EINVAL;
1838
b940b3d7
JR
1839 key_len = nla_len(ovs_key);
1840 if (masked)
1841 key_len /= 2;
1842
a097c0b2 1843 if (key_type > OVS_KEY_ATTR_MAX ||
b940b3d7 1844 (ovs_key_lens[key_type].len != key_len &&
ec959cdc 1845 ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
a097c0b2
PS
1846 return -EINVAL;
1847
b940b3d7
JR
1848 if (masked && !validate_masked(nla_data(ovs_key), key_len))
1849 return -EINVAL;
1850
a097c0b2
PS
1851 switch (key_type) {
1852 const struct ovs_key_ipv4 *ipv4_key;
1853 const struct ovs_key_ipv6 *ipv6_key;
1854 int err;
1855
1856 case OVS_KEY_ATTR_PRIORITY:
1857 case OVS_KEY_ATTR_SKB_MARK:
1858 case OVS_KEY_ATTR_ETHERNET:
1859 break;
1860
1861 case OVS_KEY_ATTR_TUNNEL:
2baf0e0c
PS
1862 if (eth_p_mpls(eth_type))
1863 return -EINVAL;
1864
b940b3d7
JR
1865 if (masked)
1866 return -EINVAL; /* Masked tunnel set not supported. */
1867
1868 *skip_copy = true;
9233cef7 1869 err = validate_and_copy_set_tun(a, sfa, log);
a097c0b2
PS
1870 if (err)
1871 return err;
1872 break;
1873
1874 case OVS_KEY_ATTR_IPV4:
ccf43786 1875 if (eth_type != htons(ETH_P_IP))
a097c0b2
PS
1876 return -EINVAL;
1877
a097c0b2 1878 ipv4_key = nla_data(ovs_key);
a097c0b2 1879
b940b3d7
JR
1880 if (masked) {
1881 const struct ovs_key_ipv4 *mask = ipv4_key + 1;
a097c0b2 1882
b940b3d7
JR
1883 /* Non-writeable fields. */
1884 if (mask->ipv4_proto || mask->ipv4_frag)
1885 return -EINVAL;
1886 } else {
1887 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
1888 return -EINVAL;
1889
1890 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
1891 return -EINVAL;
1892 }
a097c0b2
PS
1893 break;
1894
1895 case OVS_KEY_ATTR_IPV6:
ccf43786 1896 if (eth_type != htons(ETH_P_IPV6))
a097c0b2
PS
1897 return -EINVAL;
1898
a097c0b2 1899 ipv6_key = nla_data(ovs_key);
a0fb56c1 1900
b940b3d7
JR
1901 if (masked) {
1902 const struct ovs_key_ipv6 *mask = ipv6_key + 1;
a097c0b2 1903
b940b3d7
JR
1904 /* Non-writeable fields. */
1905 if (mask->ipv6_proto || mask->ipv6_frag)
1906 return -EINVAL;
a097c0b2 1907
b940b3d7
JR
1908 /* Invalid bits in the flow label mask? */
1909 if (ntohl(mask->ipv6_label) & 0xFFF00000)
1910 return -EINVAL;
1911 } else {
1912 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
1913 return -EINVAL;
1914
1915 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
1916 return -EINVAL;
1917 }
a097c0b2
PS
1918 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
1919 return -EINVAL;
1920
1921 break;
1922
1923 case OVS_KEY_ATTR_TCP:
b940b3d7
JR
1924 if ((eth_type != htons(ETH_P_IP) &&
1925 eth_type != htons(ETH_P_IPV6)) ||
1926 flow_key->ip.proto != IPPROTO_TCP)
a097c0b2
PS
1927 return -EINVAL;
1928
b940b3d7 1929 break;
a097c0b2
PS
1930
1931 case OVS_KEY_ATTR_UDP:
b940b3d7
JR
1932 if ((eth_type != htons(ETH_P_IP) &&
1933 eth_type != htons(ETH_P_IPV6)) ||
1934 flow_key->ip.proto != IPPROTO_UDP)
a097c0b2
PS
1935 return -EINVAL;
1936
b940b3d7 1937 break;
ccf43786
SH
1938
1939 case OVS_KEY_ATTR_MPLS:
1940 if (!eth_p_mpls(eth_type))
1941 return -EINVAL;
1942 break;
a097c0b2
PS
1943
1944 case OVS_KEY_ATTR_SCTP:
b940b3d7
JR
1945 if ((eth_type != htons(ETH_P_IP) &&
1946 eth_type != htons(ETH_P_IPV6)) ||
1947 flow_key->ip.proto != IPPROTO_SCTP)
a097c0b2
PS
1948 return -EINVAL;
1949
b940b3d7 1950 break;
a097c0b2
PS
1951
1952 default:
1953 return -EINVAL;
1954 }
1955
b940b3d7
JR
1956 /* Convert non-masked non-tunnel set actions to masked set actions. */
1957 if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
1958 int start, len = key_len * 2;
1959 struct nlattr *at;
1960
1961 *skip_copy = true;
1962
1963 start = add_nested_action_start(sfa,
1964 OVS_ACTION_ATTR_SET_TO_MASKED,
1965 log);
1966 if (start < 0)
1967 return start;
1968
1969 at = __add_action(sfa, key_type, NULL, len, log);
1970 if (IS_ERR(at))
1971 return PTR_ERR(at);
1972
1973 memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
1974 memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */
1975 /* Clear non-writeable bits from otherwise writeable fields. */
1976 if (key_type == OVS_KEY_ATTR_IPV6) {
1977 struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
1978
1979 mask->ipv6_label &= htonl(0x000FFFFF);
1980 }
1981 add_nested_action_end(*sfa, start);
1982 }
1983
a097c0b2
PS
1984 return 0;
1985}
1986
1987static int validate_userspace(const struct nlattr *attr)
1988{
1989 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
1990 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
1991 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
8b7ea2d4 1992 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
a097c0b2
PS
1993 };
1994 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
1995 int error;
1996
1997 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
1998 attr, userspace_policy);
1999 if (error)
2000 return error;
2001
2002 if (!a[OVS_USERSPACE_ATTR_PID] ||
2003 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
2004 return -EINVAL;
2005
2006 return 0;
2007}
2008
2009static int copy_action(const struct nlattr *from,
9233cef7 2010 struct sw_flow_actions **sfa, bool log)
a097c0b2
PS
2011{
2012 int totlen = NLA_ALIGN(from->nla_len);
2013 struct nlattr *to;
2014
9233cef7 2015 to = reserve_sfa_size(sfa, from->nla_len, log);
a097c0b2
PS
2016 if (IS_ERR(to))
2017 return PTR_ERR(to);
2018
2019 memcpy(to, from, totlen);
2020 return 0;
2021}
2022
ff27161e 2023static int __ovs_nla_copy_actions(const struct nlattr *attr,
ccf43786
SH
2024 const struct sw_flow_key *key,
2025 int depth, struct sw_flow_actions **sfa,
9233cef7 2026 __be16 eth_type, __be16 vlan_tci, bool log)
a097c0b2
PS
2027{
2028 const struct nlattr *a;
2029 int rem, err;
2030
2031 if (depth >= SAMPLE_ACTION_DEPTH)
2032 return -EOVERFLOW;
2033
2034 nla_for_each_nested(a, attr, rem) {
2035 /* Expected argument lengths, (u32)-1 for variable length. */
2036 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
2037 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
a6059080 2038 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
a097c0b2 2039 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
ccf43786
SH
2040 [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
2041 [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
a097c0b2
PS
2042 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
2043 [OVS_ACTION_ATTR_POP_VLAN] = 0,
2044 [OVS_ACTION_ATTR_SET] = (u32)-1,
b940b3d7 2045 [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
7804df20
AZ
2046 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
2047 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
a097c0b2
PS
2048 };
2049 const struct ovs_action_push_vlan *vlan;
2050 int type = nla_type(a);
2051 bool skip_copy;
2052
2053 if (type > OVS_ACTION_ATTR_MAX ||
2054 (action_lens[type] != nla_len(a) &&
2055 action_lens[type] != (u32)-1))
2056 return -EINVAL;
2057
2058 skip_copy = false;
2059 switch (type) {
2060 case OVS_ACTION_ATTR_UNSPEC:
2061 return -EINVAL;
2062
2063 case OVS_ACTION_ATTR_USERSPACE:
2064 err = validate_userspace(a);
2065 if (err)
2066 return err;
2067 break;
2068
2069 case OVS_ACTION_ATTR_OUTPUT:
2070 if (nla_get_u32(a) >= DP_MAX_PORTS)
2071 return -EINVAL;
2072 break;
2073
7804df20
AZ
2074 case OVS_ACTION_ATTR_HASH: {
2075 const struct ovs_action_hash *act_hash = nla_data(a);
2076
2077 switch (act_hash->hash_alg) {
2078 case OVS_HASH_ALG_L4:
2079 break;
2080 default:
2081 return -EINVAL;
2082 }
2083
2084 break;
2085 }
a097c0b2
PS
2086
2087 case OVS_ACTION_ATTR_POP_VLAN:
e0b8f73f 2088 vlan_tci = htons(0);
a097c0b2
PS
2089 break;
2090
2091 case OVS_ACTION_ATTR_PUSH_VLAN:
2092 vlan = nla_data(a);
2093 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
2094 return -EINVAL;
2095 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
2096 return -EINVAL;
ccf43786 2097 vlan_tci = vlan->vlan_tci;
a097c0b2
PS
2098 break;
2099
a6059080
AZ
2100 case OVS_ACTION_ATTR_RECIRC:
2101 break;
2102
ccf43786
SH
2103 case OVS_ACTION_ATTR_PUSH_MPLS: {
2104 const struct ovs_action_push_mpls *mpls = nla_data(a);
2105
2106 if (!eth_p_mpls(mpls->mpls_ethertype))
2107 return -EINVAL;
2108 /* Prohibit push MPLS other than to a white list
2109 * for packets that have a known tag order.
e0b8f73f 2110 */
ccf43786
SH
2111 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2112 (eth_type != htons(ETH_P_IP) &&
2113 eth_type != htons(ETH_P_IPV6) &&
2114 eth_type != htons(ETH_P_ARP) &&
2115 eth_type != htons(ETH_P_RARP) &&
2116 !eth_p_mpls(eth_type)))
2117 return -EINVAL;
2118 eth_type = mpls->mpls_ethertype;
2119 break;
2120 }
2121
2122 case OVS_ACTION_ATTR_POP_MPLS:
2123 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2124 !eth_p_mpls(eth_type))
2125 return -EINVAL;
2126
2127 /* Disallow subsequent L2.5+ set and mpls_pop actions
2128 * as there is no check here to ensure that the new
2129 * eth_type is valid and thus set actions could
2130 * write off the end of the packet or otherwise
2131 * corrupt it.
2132 *
2133 * Support for these actions is planned using packet
2134 * recirculation.
2135 */
2136 eth_type = htons(0);
2137 break;
2138
a097c0b2 2139 case OVS_ACTION_ATTR_SET:
2baf0e0c 2140 err = validate_set(a, key, sfa,
b940b3d7
JR
2141 &skip_copy, eth_type, false, log);
2142 if (err)
2143 return err;
2144 break;
2145
2146 case OVS_ACTION_ATTR_SET_MASKED:
2147 err = validate_set(a, key, sfa,
2148 &skip_copy, eth_type, true, log);
a097c0b2
PS
2149 if (err)
2150 return err;
2151 break;
2152
2153 case OVS_ACTION_ATTR_SAMPLE:
ccf43786 2154 err = validate_and_copy_sample(a, key, depth, sfa,
9233cef7 2155 eth_type, vlan_tci, log);
a097c0b2
PS
2156 if (err)
2157 return err;
2158 skip_copy = true;
2159 break;
2160
2161 default:
7d16c847 2162 OVS_NLERR(log, "Unknown Action type %d", type);
a097c0b2
PS
2163 return -EINVAL;
2164 }
2165 if (!skip_copy) {
9233cef7 2166 err = copy_action(a, sfa, log);
a097c0b2
PS
2167 if (err)
2168 return err;
2169 }
2170 }
2171
2172 if (rem > 0)
2173 return -EINVAL;
2174
2175 return 0;
2176}
2177
b940b3d7 2178/* 'key' must be the masked key. */
ccf43786
SH
2179int ovs_nla_copy_actions(const struct nlattr *attr,
2180 const struct sw_flow_key *key,
9233cef7 2181 struct sw_flow_actions **sfa, bool log)
ccf43786 2182{
ff27161e
PS
2183 int err;
2184
9233cef7 2185 *sfa = nla_alloc_flow_actions(nla_len(attr), log);
ff27161e
PS
2186 if (IS_ERR(*sfa))
2187 return PTR_ERR(*sfa);
2188
2189 err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
9233cef7 2190 key->eth.tci, log);
ff27161e
PS
2191 if (err)
2192 kfree(*sfa);
2193
2194 return err;
ccf43786
SH
2195}
2196
a097c0b2
PS
2197static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
2198{
2199 const struct nlattr *a;
2200 struct nlattr *start;
2201 int err = 0, rem;
2202
2203 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
2204 if (!start)
2205 return -EMSGSIZE;
2206
2207 nla_for_each_nested(a, attr, rem) {
2208 int type = nla_type(a);
2209 struct nlattr *st_sample;
2210
2211 switch (type) {
2212 case OVS_SAMPLE_ATTR_PROBABILITY:
2213 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
2214 sizeof(u32), nla_data(a)))
2215 return -EMSGSIZE;
2216 break;
2217 case OVS_SAMPLE_ATTR_ACTIONS:
2218 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
2219 if (!st_sample)
2220 return -EMSGSIZE;
2221 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
2222 if (err)
2223 return err;
2224 nla_nest_end(skb, st_sample);
2225 break;
2226 }
2227 }
2228
2229 nla_nest_end(skb, start);
2230 return err;
2231}
2232
2233static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
2234{
2235 const struct nlattr *ovs_key = nla_data(a);
2236 int key_type = nla_type(ovs_key);
2237 struct nlattr *start;
2238 int err;
2239
2240 switch (key_type) {
f0cd669f
JG
2241 case OVS_KEY_ATTR_TUNNEL_INFO: {
2242 struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
2243
a097c0b2
PS
2244 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2245 if (!start)
2246 return -EMSGSIZE;
2247
f0cd669f 2248 err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
c1fc1411
JG
2249 tun_info->options_len ?
2250 tun_info->options : NULL,
2251 tun_info->options_len);
a097c0b2
PS
2252 if (err)
2253 return err;
2254 nla_nest_end(skb, start);
2255 break;
f0cd669f 2256 }
a097c0b2
PS
2257 default:
2258 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
2259 return -EMSGSIZE;
2260 break;
2261 }
2262
2263 return 0;
2264}
2265
b940b3d7
JR
2266static int masked_set_action_to_set_action_attr(const struct nlattr *a,
2267 struct sk_buff *skb)
2268{
2269 const struct nlattr *ovs_key = nla_data(a);
2270 size_t key_len = nla_len(ovs_key) / 2;
2271
2272 /* Revert the conversion we did from a non-masked set action to
2273 * masked set action.
2274 */
2275 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key))
2276 return -EMSGSIZE;
2277
2278 return 0;
2279}
2280
a097c0b2
PS
2281int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
2282{
2283 const struct nlattr *a;
2284 int rem, err;
2285
2286 nla_for_each_attr(a, attr, len, rem) {
2287 int type = nla_type(a);
2288
2289 switch (type) {
2290 case OVS_ACTION_ATTR_SET:
2291 err = set_action_to_attr(a, skb);
2292 if (err)
2293 return err;
2294 break;
2295
b940b3d7
JR
2296 case OVS_ACTION_ATTR_SET_TO_MASKED:
2297 err = masked_set_action_to_set_action_attr(a, skb);
2298 if (err)
2299 return err;
2300 break;
2301
a097c0b2
PS
2302 case OVS_ACTION_ATTR_SAMPLE:
2303 err = sample_action_to_attr(a, skb);
2304 if (err)
2305 return err;
2306 break;
2307 default:
2308 if (nla_put(skb, type, nla_len(a), nla_data(a)))
2309 return -EMSGSIZE;
2310 break;
2311 }
2312 }
2313
2314 return 0;
2315}