]> git.proxmox.com Git - ovs.git/blob - datapath/flow_netlink.c
Merge "master" into "ovn".
[ovs.git] / datapath / flow_netlink.c
1 /*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/geneve.h>
44 #include <net/ip.h>
45 #include <net/ipv6.h>
46 #include <net/ndisc.h>
47 #include <net/mpls.h>
48
49 #include "datapath.h"
50 #include "flow.h"
51 #include "flow_netlink.h"
52 #include "vport-vxlan.h"
53
54 struct ovs_len_tbl {
55 int len;
56 const struct ovs_len_tbl *next;
57 };
58
59 #define OVS_ATTR_NESTED -1
60
61 static void update_range(struct sw_flow_match *match,
62 size_t offset, size_t size, bool is_mask)
63 {
64 struct sw_flow_key_range *range;
65 size_t start = rounddown(offset, sizeof(long));
66 size_t end = roundup(offset + size, sizeof(long));
67
68 if (!is_mask)
69 range = &match->range;
70 else
71 range = &match->mask->range;
72
73 if (range->start == range->end) {
74 range->start = start;
75 range->end = end;
76 return;
77 }
78
79 if (range->start > start)
80 range->start = start;
81
82 if (range->end < end)
83 range->end = end;
84 }
85
86 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
87 do { \
88 update_range(match, offsetof(struct sw_flow_key, field), \
89 sizeof((match)->key->field), is_mask); \
90 if (is_mask) \
91 (match)->mask->key.field = value; \
92 else \
93 (match)->key->field = value; \
94 } while (0)
95
96 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
97 do { \
98 update_range(match, offset, len, is_mask); \
99 if (is_mask) \
100 memcpy((u8 *)&(match)->mask->key + offset, value_p, len);\
101 else \
102 memcpy((u8 *)(match)->key + offset, value_p, len); \
103 } while (0)
104
105 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
106 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
107 value_p, len, is_mask)
108
109 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
110 do { \
111 update_range(match, offsetof(struct sw_flow_key, field), \
112 sizeof((match)->key->field), is_mask); \
113 if (is_mask) \
114 memset((u8 *)&(match)->mask->key.field, value, \
115 sizeof((match)->mask->key.field)); \
116 else \
117 memset((u8 *)&(match)->key->field, value, \
118 sizeof((match)->key->field)); \
119 } while (0)
120
121 static bool match_validate(const struct sw_flow_match *match,
122 u64 key_attrs, u64 mask_attrs, bool log)
123 {
124 u64 key_expected = 1ULL << OVS_KEY_ATTR_ETHERNET;
125 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
126
127 /* The following mask attributes allowed only if they
128 * pass the validation tests.
129 */
130 mask_allowed &= ~((1ULL << OVS_KEY_ATTR_IPV4)
131 | (1ULL << OVS_KEY_ATTR_IPV6)
132 | (1ULL << OVS_KEY_ATTR_TCP)
133 | (1ULL << OVS_KEY_ATTR_TCP_FLAGS)
134 | (1ULL << OVS_KEY_ATTR_UDP)
135 | (1ULL << OVS_KEY_ATTR_SCTP)
136 | (1ULL << OVS_KEY_ATTR_ICMP)
137 | (1ULL << OVS_KEY_ATTR_ICMPV6)
138 | (1ULL << OVS_KEY_ATTR_ARP)
139 | (1ULL << OVS_KEY_ATTR_ND)
140 | (1ULL << OVS_KEY_ATTR_MPLS));
141
142 /* Always allowed mask fields. */
143 mask_allowed |= ((1ULL << OVS_KEY_ATTR_TUNNEL)
144 | (1ULL << OVS_KEY_ATTR_IN_PORT)
145 | (1ULL << OVS_KEY_ATTR_ETHERTYPE));
146
147 /* Check key attributes. */
148 if (match->key->eth.type == htons(ETH_P_ARP)
149 || match->key->eth.type == htons(ETH_P_RARP)) {
150 key_expected |= 1ULL << OVS_KEY_ATTR_ARP;
151 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
152 mask_allowed |= 1ULL << OVS_KEY_ATTR_ARP;
153 }
154
155 if (eth_p_mpls(match->key->eth.type)) {
156 key_expected |= 1ULL << OVS_KEY_ATTR_MPLS;
157 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
158 mask_allowed |= 1ULL << OVS_KEY_ATTR_MPLS;
159 }
160
161 if (match->key->eth.type == htons(ETH_P_IP)) {
162 key_expected |= 1ULL << OVS_KEY_ATTR_IPV4;
163 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
164 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV4;
165
166 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
167 if (match->key->ip.proto == IPPROTO_UDP) {
168 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
169 if (match->mask && (match->mask->key.ip.proto == 0xff))
170 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
171 }
172
173 if (match->key->ip.proto == IPPROTO_SCTP) {
174 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
175 if (match->mask && (match->mask->key.ip.proto == 0xff))
176 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
177 }
178
179 if (match->key->ip.proto == IPPROTO_TCP) {
180 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
181 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
182 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
183 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
184 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
185 }
186 }
187
188 if (match->key->ip.proto == IPPROTO_ICMP) {
189 key_expected |= 1ULL << OVS_KEY_ATTR_ICMP;
190 if (match->mask && (match->mask->key.ip.proto == 0xff))
191 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMP;
192 }
193 }
194 }
195
196 if (match->key->eth.type == htons(ETH_P_IPV6)) {
197 key_expected |= 1ULL << OVS_KEY_ATTR_IPV6;
198 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
199 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV6;
200
201 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
202 if (match->key->ip.proto == IPPROTO_UDP) {
203 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
204 if (match->mask && (match->mask->key.ip.proto == 0xff))
205 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
206 }
207
208 if (match->key->ip.proto == IPPROTO_SCTP) {
209 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
210 if (match->mask && (match->mask->key.ip.proto == 0xff))
211 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
212 }
213
214 if (match->key->ip.proto == IPPROTO_TCP) {
215 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
216 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
217 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
218 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
219 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
220 }
221 }
222
223 if (match->key->ip.proto == IPPROTO_ICMPV6) {
224 key_expected |= 1ULL << OVS_KEY_ATTR_ICMPV6;
225 if (match->mask && (match->mask->key.ip.proto == 0xff))
226 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMPV6;
227
228 if (match->key->tp.src ==
229 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
230 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
231 key_expected |= 1ULL << OVS_KEY_ATTR_ND;
232 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
233 mask_allowed |= 1ULL << OVS_KEY_ATTR_ND;
234 }
235 }
236 }
237 }
238
239 if ((key_attrs & key_expected) != key_expected) {
240 /* Key attributes check failed. */
241 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
242 (unsigned long long)key_attrs,
243 (unsigned long long)key_expected);
244 return false;
245 }
246
247 if ((mask_attrs & mask_allowed) != mask_attrs) {
248 /* Mask attributes check failed. */
249 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
250 (unsigned long long)mask_attrs,
251 (unsigned long long)mask_allowed);
252 return false;
253 }
254
255 return true;
256 }
257
258 size_t ovs_tun_key_attr_size(void)
259 {
260 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
261 * updating this function.
262 */
263 return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
264 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
265 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
266 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
267 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
268 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
269 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
270 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
271 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
272 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
273 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
274 */
275 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
276 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
277 }
278
279 size_t ovs_key_attr_size(void)
280 {
281 /* Whenever adding new OVS_KEY_ FIELDS, we should consider
282 * updating this function.
283 */
284 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
285
286 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
287 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
288 + ovs_tun_key_attr_size()
289 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
290 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
291 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
292 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
293 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
294 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
295 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
296 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
297 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
298 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
299 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
300 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
301 }
302
303 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
304 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
305 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
306 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
307 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
308 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
309 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
310 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
311 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
312 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
313 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
314 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED },
315 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED },
316 };
317
318 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
319 static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
320 [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
321 [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
322 [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
323 [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
324 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
325 [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
326 [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
327 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
328 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
329 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
330 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
331 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
332 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
333 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
334 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
335 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
336 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
337 [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
338 [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
339 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
340 .next = ovs_tunnel_key_lens, },
341 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
342 };
343 static bool is_all_zero(const u8 *fp, size_t size)
344 {
345 int i;
346
347 if (!fp)
348 return false;
349
350 for (i = 0; i < size; i++)
351 if (fp[i])
352 return false;
353
354 return true;
355 }
356
357 static int __parse_flow_nlattrs(const struct nlattr *attr,
358 const struct nlattr *a[],
359 u64 *attrsp, bool log, bool nz)
360 {
361 const struct nlattr *nla;
362 u64 attrs;
363 int rem;
364
365 attrs = *attrsp;
366 nla_for_each_nested(nla, attr, rem) {
367 u16 type = nla_type(nla);
368 int expected_len;
369
370 if (type > OVS_KEY_ATTR_MAX) {
371 OVS_NLERR(log, "Key type %d is out of range max %d",
372 type, OVS_KEY_ATTR_MAX);
373 return -EINVAL;
374 }
375
376 if (attrs & (1ULL << type)) {
377 OVS_NLERR(log, "Duplicate key (type %d).", type);
378 return -EINVAL;
379 }
380
381 expected_len = ovs_key_lens[type].len;
382 if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) {
383 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
384 type, nla_len(nla), expected_len);
385 return -EINVAL;
386 }
387
388 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
389 attrs |= 1ULL << type;
390 a[type] = nla;
391 }
392 }
393 if (rem) {
394 OVS_NLERR(log, "Message has %d unknown bytes.", rem);
395 return -EINVAL;
396 }
397
398 *attrsp = attrs;
399 return 0;
400 }
401
402 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
403 const struct nlattr *a[], u64 *attrsp,
404 bool log)
405 {
406 return __parse_flow_nlattrs(attr, a, attrsp, log, true);
407 }
408
409 static int parse_flow_nlattrs(const struct nlattr *attr,
410 const struct nlattr *a[], u64 *attrsp,
411 bool log)
412 {
413 return __parse_flow_nlattrs(attr, a, attrsp, log, false);
414 }
415
416 static int genev_tun_opt_from_nlattr(const struct nlattr *a,
417 struct sw_flow_match *match, bool is_mask,
418 bool log)
419 {
420 unsigned long opt_key_offset;
421
422 if (nla_len(a) > sizeof(match->key->tun_opts)) {
423 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
424 nla_len(a), sizeof(match->key->tun_opts));
425 return -EINVAL;
426 }
427
428 if (nla_len(a) % 4 != 0) {
429 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
430 nla_len(a));
431 return -EINVAL;
432 }
433
434 /* We need to record the length of the options passed
435 * down, otherwise packets with the same format but
436 * additional options will be silently matched.
437 */
438 if (!is_mask) {
439 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
440 false);
441 } else {
442 /* This is somewhat unusual because it looks at
443 * both the key and mask while parsing the
444 * attributes (and by extension assumes the key
445 * is parsed first). Normally, we would verify
446 * that each is the correct length and that the
447 * attributes line up in the validate function.
448 * However, that is difficult because this is
449 * variable length and we won't have the
450 * information later.
451 */
452 if (match->key->tun_opts_len != nla_len(a)) {
453 OVS_NLERR(log, "Geneve option len %d != mask len %d",
454 match->key->tun_opts_len, nla_len(a));
455 return -EINVAL;
456 }
457
458 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
459 }
460
461 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
462 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
463 nla_len(a), is_mask);
464 return 0;
465 }
466
467 static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = {
468 [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 },
469 };
470
471 static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
472 struct sw_flow_match *match, bool is_mask,
473 bool log)
474 {
475 struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
476 unsigned long opt_key_offset;
477 struct ovs_vxlan_opts opts;
478 int err;
479
480 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
481
482 err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
483 if (err < 0)
484 return err;
485
486 memset(&opts, 0, sizeof(opts));
487
488 if (tb[OVS_VXLAN_EXT_GBP])
489 opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]);
490
491 if (!is_mask)
492 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
493 else
494 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
495
496 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
497 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
498 is_mask);
499 return 0;
500 }
501
502 static int ipv4_tun_from_nlattr(const struct nlattr *attr,
503 struct sw_flow_match *match, bool is_mask,
504 bool log)
505 {
506 struct nlattr *a;
507 int rem;
508 bool ttl = false;
509 __be16 tun_flags = 0;
510 int opts_type = 0;
511
512 nla_for_each_nested(a, attr, rem) {
513 int type = nla_type(a);
514 int err;
515
516 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
517 OVS_NLERR(log, "Tunnel attr %d out of range max %d",
518 type, OVS_TUNNEL_KEY_ATTR_MAX);
519 return -EINVAL;
520 }
521
522 if (ovs_tunnel_key_lens[type].len != nla_len(a) &&
523 ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) {
524 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
525 type, nla_len(a), ovs_tunnel_key_lens[type].len);
526 return -EINVAL;
527 }
528
529 switch (type) {
530 case OVS_TUNNEL_KEY_ATTR_ID:
531 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
532 nla_get_be64(a), is_mask);
533 tun_flags |= TUNNEL_KEY;
534 break;
535 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
536 SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
537 nla_get_be32(a), is_mask);
538 break;
539 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
540 SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
541 nla_get_be32(a), is_mask);
542 break;
543 case OVS_TUNNEL_KEY_ATTR_TOS:
544 SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
545 nla_get_u8(a), is_mask);
546 break;
547 case OVS_TUNNEL_KEY_ATTR_TTL:
548 SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
549 nla_get_u8(a), is_mask);
550 ttl = true;
551 break;
552 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
553 tun_flags |= TUNNEL_DONT_FRAGMENT;
554 break;
555 case OVS_TUNNEL_KEY_ATTR_CSUM:
556 tun_flags |= TUNNEL_CSUM;
557 break;
558 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
559 SW_FLOW_KEY_PUT(match, tun_key.tp_src,
560 nla_get_be16(a), is_mask);
561 break;
562 case OVS_TUNNEL_KEY_ATTR_TP_DST:
563 SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
564 nla_get_be16(a), is_mask);
565 break;
566 case OVS_TUNNEL_KEY_ATTR_OAM:
567 tun_flags |= TUNNEL_OAM;
568 break;
569 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
570 if (opts_type) {
571 OVS_NLERR(log, "Multiple metadata blocks provided");
572 return -EINVAL;
573 }
574
575 err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
576 if (err)
577 return err;
578
579 tun_flags |= TUNNEL_GENEVE_OPT;
580 opts_type = type;
581 break;
582 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
583 if (opts_type) {
584 OVS_NLERR(log, "Multiple metadata blocks provided");
585 return -EINVAL;
586 }
587
588 err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
589 if (err)
590 return err;
591
592 tun_flags |= TUNNEL_VXLAN_OPT;
593 opts_type = type;
594 break;
595 default:
596 OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
597 type);
598 return -EINVAL;
599 }
600 }
601
602 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
603
604 if (rem > 0) {
605 OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.",
606 rem);
607 return -EINVAL;
608 }
609
610 if (!is_mask) {
611 if (!match->key->tun_key.ipv4_dst) {
612 OVS_NLERR(log, "IPv4 tunnel dst address is zero");
613 return -EINVAL;
614 }
615
616 if (!ttl) {
617 OVS_NLERR(log, "IPv4 tunnel TTL not specified.");
618 return -EINVAL;
619 }
620 }
621
622 return opts_type;
623 }
624
625 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
626 const void *tun_opts, int swkey_tun_opts_len)
627 {
628 const struct ovs_vxlan_opts *opts = tun_opts;
629 struct nlattr *nla;
630
631 nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
632 if (!nla)
633 return -EMSGSIZE;
634
635 if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
636 return -EMSGSIZE;
637
638 nla_nest_end(skb, nla);
639 return 0;
640 }
641
642 static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
643 const struct ovs_key_ipv4_tunnel *output,
644 const void *tun_opts, int swkey_tun_opts_len)
645 {
646 if (output->tun_flags & TUNNEL_KEY &&
647 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
648 return -EMSGSIZE;
649 if (output->ipv4_src &&
650 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
651 return -EMSGSIZE;
652 if (output->ipv4_dst &&
653 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
654 return -EMSGSIZE;
655 if (output->ipv4_tos &&
656 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
657 return -EMSGSIZE;
658 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
659 return -EMSGSIZE;
660 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
661 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
662 return -EMSGSIZE;
663 if ((output->tun_flags & TUNNEL_CSUM) &&
664 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
665 return -EMSGSIZE;
666 if (output->tp_src &&
667 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
668 return -EMSGSIZE;
669 if (output->tp_dst &&
670 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
671 return -EMSGSIZE;
672 if ((output->tun_flags & TUNNEL_OAM) &&
673 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
674 return -EMSGSIZE;
675 if (tun_opts) {
676 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
677 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
678 swkey_tun_opts_len, tun_opts))
679 return -EMSGSIZE;
680 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
681 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
682 return -EMSGSIZE;
683 }
684
685 return 0;
686 }
687
688 static int ipv4_tun_to_nlattr(struct sk_buff *skb,
689 const struct ovs_key_ipv4_tunnel *output,
690 const void *tun_opts, int swkey_tun_opts_len)
691 {
692 struct nlattr *nla;
693 int err;
694
695 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
696 if (!nla)
697 return -EMSGSIZE;
698
699 err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
700 if (err)
701 return err;
702
703 nla_nest_end(skb, nla);
704 return 0;
705 }
706
707 int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
708 const struct ovs_tunnel_info *egress_tun_info)
709 {
710 return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
711 egress_tun_info->options,
712 egress_tun_info->options_len);
713 }
714
715 static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
716 const struct nlattr **a, bool is_mask,
717 bool log)
718 {
719 if (*attrs & (1ULL << OVS_KEY_ATTR_DP_HASH)) {
720 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
721
722 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
723 *attrs &= ~(1ULL << OVS_KEY_ATTR_DP_HASH);
724 }
725
726 if (*attrs & (1ULL << OVS_KEY_ATTR_RECIRC_ID)) {
727 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
728
729 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
730 *attrs &= ~(1ULL << OVS_KEY_ATTR_RECIRC_ID);
731 }
732
733 if (*attrs & (1ULL << OVS_KEY_ATTR_PRIORITY)) {
734 SW_FLOW_KEY_PUT(match, phy.priority,
735 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
736 *attrs &= ~(1ULL << OVS_KEY_ATTR_PRIORITY);
737 }
738
739 if (*attrs & (1ULL << OVS_KEY_ATTR_IN_PORT)) {
740 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
741
742 if (is_mask) {
743 in_port = 0xffffffff; /* Always exact match in_port. */
744 } else if (in_port >= DP_MAX_PORTS) {
745 OVS_NLERR(log, "Port %d exceeds max allowable %d",
746 in_port, DP_MAX_PORTS);
747 return -EINVAL;
748 }
749
750 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
751 *attrs &= ~(1ULL << OVS_KEY_ATTR_IN_PORT);
752 } else if (!is_mask) {
753 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
754 }
755
756 if (*attrs & (1ULL << OVS_KEY_ATTR_SKB_MARK)) {
757 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
758
759 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
760 *attrs &= ~(1ULL << OVS_KEY_ATTR_SKB_MARK);
761 }
762 if (*attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
763 if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
764 is_mask, log) < 0)
765 return -EINVAL;
766 *attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
767 }
768 return 0;
769 }
770
771 static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
772 const struct nlattr **a, bool is_mask,
773 bool log)
774 {
775 int err;
776
777 err = metadata_from_nlattrs(match, &attrs, a, is_mask, log);
778 if (err)
779 return err;
780
781 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) {
782 const struct ovs_key_ethernet *eth_key;
783
784 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
785 SW_FLOW_KEY_MEMCPY(match, eth.src,
786 eth_key->eth_src, ETH_ALEN, is_mask);
787 SW_FLOW_KEY_MEMCPY(match, eth.dst,
788 eth_key->eth_dst, ETH_ALEN, is_mask);
789 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERNET);
790 }
791
792 if (attrs & (1ULL << OVS_KEY_ATTR_VLAN)) {
793 __be16 tci;
794
795 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
796 if (!(tci & htons(VLAN_TAG_PRESENT))) {
797 if (is_mask)
798 OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.");
799 else
800 OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set.");
801
802 return -EINVAL;
803 }
804
805 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
806 attrs &= ~(1ULL << OVS_KEY_ATTR_VLAN);
807 }
808
809 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) {
810 __be16 eth_type;
811
812 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
813 if (is_mask) {
814 /* Always exact match EtherType. */
815 eth_type = htons(0xffff);
816 } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
817 OVS_NLERR(log, "EtherType %x is less than min %x",
818 ntohs(eth_type), ETH_P_802_3_MIN);
819 return -EINVAL;
820 }
821
822 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
823 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
824 } else if (!is_mask) {
825 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
826 }
827
828 if (attrs & (1ULL << OVS_KEY_ATTR_IPV4)) {
829 const struct ovs_key_ipv4 *ipv4_key;
830
831 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
832 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
833 OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
834 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
835 return -EINVAL;
836 }
837 SW_FLOW_KEY_PUT(match, ip.proto,
838 ipv4_key->ipv4_proto, is_mask);
839 SW_FLOW_KEY_PUT(match, ip.tos,
840 ipv4_key->ipv4_tos, is_mask);
841 SW_FLOW_KEY_PUT(match, ip.ttl,
842 ipv4_key->ipv4_ttl, is_mask);
843 SW_FLOW_KEY_PUT(match, ip.frag,
844 ipv4_key->ipv4_frag, is_mask);
845 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
846 ipv4_key->ipv4_src, is_mask);
847 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
848 ipv4_key->ipv4_dst, is_mask);
849 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4);
850 }
851
852 if (attrs & (1ULL << OVS_KEY_ATTR_IPV6)) {
853 const struct ovs_key_ipv6 *ipv6_key;
854
855 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
856 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
857 OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
858 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
859 return -EINVAL;
860 }
861 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
862 OVS_NLERR(log,
863 "Invalid IPv6 flow label value (value=%x, max=%x).",
864 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
865 return -EINVAL;
866 }
867 SW_FLOW_KEY_PUT(match, ipv6.label,
868 ipv6_key->ipv6_label, is_mask);
869 SW_FLOW_KEY_PUT(match, ip.proto,
870 ipv6_key->ipv6_proto, is_mask);
871 SW_FLOW_KEY_PUT(match, ip.tos,
872 ipv6_key->ipv6_tclass, is_mask);
873 SW_FLOW_KEY_PUT(match, ip.ttl,
874 ipv6_key->ipv6_hlimit, is_mask);
875 SW_FLOW_KEY_PUT(match, ip.frag,
876 ipv6_key->ipv6_frag, is_mask);
877 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
878 ipv6_key->ipv6_src,
879 sizeof(match->key->ipv6.addr.src),
880 is_mask);
881 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
882 ipv6_key->ipv6_dst,
883 sizeof(match->key->ipv6.addr.dst),
884 is_mask);
885
886 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6);
887 }
888
889 if (attrs & (1ULL << OVS_KEY_ATTR_ARP)) {
890 const struct ovs_key_arp *arp_key;
891
892 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
893 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
894 OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
895 arp_key->arp_op);
896 return -EINVAL;
897 }
898
899 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
900 arp_key->arp_sip, is_mask);
901 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
902 arp_key->arp_tip, is_mask);
903 SW_FLOW_KEY_PUT(match, ip.proto,
904 ntohs(arp_key->arp_op), is_mask);
905 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
906 arp_key->arp_sha, ETH_ALEN, is_mask);
907 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
908 arp_key->arp_tha, ETH_ALEN, is_mask);
909
910 attrs &= ~(1ULL << OVS_KEY_ATTR_ARP);
911 }
912
913 if (attrs & (1ULL << OVS_KEY_ATTR_MPLS)) {
914 const struct ovs_key_mpls *mpls_key;
915
916 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
917 SW_FLOW_KEY_PUT(match, mpls.top_lse,
918 mpls_key->mpls_lse, is_mask);
919
920 attrs &= ~(1ULL << OVS_KEY_ATTR_MPLS);
921 }
922
923 if (attrs & (1ULL << OVS_KEY_ATTR_TCP)) {
924 const struct ovs_key_tcp *tcp_key;
925
926 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
927 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
928 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
929 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP);
930 }
931
932 if (attrs & (1ULL << OVS_KEY_ATTR_TCP_FLAGS)) {
933 SW_FLOW_KEY_PUT(match, tp.flags,
934 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
935 is_mask);
936 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP_FLAGS);
937 }
938
939 if (attrs & (1ULL << OVS_KEY_ATTR_UDP)) {
940 const struct ovs_key_udp *udp_key;
941
942 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
943 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
944 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
945 attrs &= ~(1ULL << OVS_KEY_ATTR_UDP);
946 }
947
948 if (attrs & (1ULL << OVS_KEY_ATTR_SCTP)) {
949 const struct ovs_key_sctp *sctp_key;
950
951 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
952 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
953 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
954 attrs &= ~(1ULL << OVS_KEY_ATTR_SCTP);
955 }
956
957 if (attrs & (1ULL << OVS_KEY_ATTR_ICMP)) {
958 const struct ovs_key_icmp *icmp_key;
959
960 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
961 SW_FLOW_KEY_PUT(match, tp.src,
962 htons(icmp_key->icmp_type), is_mask);
963 SW_FLOW_KEY_PUT(match, tp.dst,
964 htons(icmp_key->icmp_code), is_mask);
965 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMP);
966 }
967
968 if (attrs & (1ULL << OVS_KEY_ATTR_ICMPV6)) {
969 const struct ovs_key_icmpv6 *icmpv6_key;
970
971 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
972 SW_FLOW_KEY_PUT(match, tp.src,
973 htons(icmpv6_key->icmpv6_type), is_mask);
974 SW_FLOW_KEY_PUT(match, tp.dst,
975 htons(icmpv6_key->icmpv6_code), is_mask);
976 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMPV6);
977 }
978
979 if (attrs & (1ULL << OVS_KEY_ATTR_ND)) {
980 const struct ovs_key_nd *nd_key;
981
982 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
983 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
984 nd_key->nd_target,
985 sizeof(match->key->ipv6.nd.target),
986 is_mask);
987 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
988 nd_key->nd_sll, ETH_ALEN, is_mask);
989 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
990 nd_key->nd_tll, ETH_ALEN, is_mask);
991 attrs &= ~(1ULL << OVS_KEY_ATTR_ND);
992 }
993
994 if (attrs != 0) {
995 OVS_NLERR(log, "Unknown key attributes %llx",
996 (unsigned long long)attrs);
997 return -EINVAL;
998 }
999
1000 return 0;
1001 }
1002
1003 static void nlattr_set(struct nlattr *attr, u8 val,
1004 const struct ovs_len_tbl *tbl)
1005 {
1006 struct nlattr *nla;
1007 int rem;
1008
1009 /* The nlattr stream should already have been validated */
1010 nla_for_each_nested(nla, attr, rem) {
1011 if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1012 nlattr_set(nla, val, tbl[nla_type(nla)].next);
1013 else
1014 memset(nla_data(nla), val, nla_len(nla));
1015 }
1016 }
1017
1018 static void mask_set_nlattr(struct nlattr *attr, u8 val)
1019 {
1020 nlattr_set(attr, val, ovs_key_lens);
1021 }
1022
1023 /**
1024 * ovs_nla_get_match - parses Netlink attributes into a flow key and
1025 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1026 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1027 * does not include any don't care bit.
1028 * @match: receives the extracted flow match information.
1029 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1030 * sequence. The fields should of the packet that triggered the creation
1031 * of this flow.
1032 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1033 * attribute specifies the mask field of the wildcarded flow.
1034 * @log: Boolean to allow kernel error logging. Normally true, but when
1035 * probing for feature compatibility this should be passed in as false to
1036 * suppress unnecessary error logging.
1037 */
1038 int ovs_nla_get_match(struct sw_flow_match *match,
1039 const struct nlattr *nla_key,
1040 const struct nlattr *nla_mask,
1041 bool log)
1042 {
1043 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1044 const struct nlattr *encap;
1045 struct nlattr *newmask = NULL;
1046 u64 key_attrs = 0;
1047 u64 mask_attrs = 0;
1048 bool encap_valid = false;
1049 int err;
1050
1051 err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
1052 if (err)
1053 return err;
1054
1055 if ((key_attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) &&
1056 (key_attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) &&
1057 (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
1058 __be16 tci;
1059
1060 if (!((key_attrs & (1ULL << OVS_KEY_ATTR_VLAN)) &&
1061 (key_attrs & (1ULL << OVS_KEY_ATTR_ENCAP)))) {
1062 OVS_NLERR(log, "Invalid Vlan frame.");
1063 return -EINVAL;
1064 }
1065
1066 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1067 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1068 encap = a[OVS_KEY_ATTR_ENCAP];
1069 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP);
1070 encap_valid = true;
1071
1072 if (tci & htons(VLAN_TAG_PRESENT)) {
1073 err = parse_flow_nlattrs(encap, a, &key_attrs, log);
1074 if (err)
1075 return err;
1076 } else if (!tci) {
1077 /* Corner case for truncated 802.1Q header. */
1078 if (nla_len(encap)) {
1079 OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute.");
1080 return -EINVAL;
1081 }
1082 } else {
1083 OVS_NLERR(log, "Encap attr is set for non-VLAN frame");
1084 return -EINVAL;
1085 }
1086 }
1087
1088 err = ovs_key_from_nlattrs(match, key_attrs, a, false, log);
1089 if (err)
1090 return err;
1091
1092 if (match->mask) {
1093 if (!nla_mask) {
1094 /* Create an exact match mask. We need to set to 0xff
1095 * all the 'match->mask' fields that have been touched
1096 * in 'match->key'. We cannot simply memset
1097 * 'match->mask', because padding bytes and fields not
1098 * specified in 'match->key' should be left to 0.
1099 * Instead, we use a stream of netlink attributes,
1100 * copied from 'key' and set to 0xff.
1101 * ovs_key_from_nlattrs() will take care of filling
1102 * 'match->mask' appropriately.
1103 */
1104 newmask = kmemdup(nla_key,
1105 nla_total_size(nla_len(nla_key)),
1106 GFP_KERNEL);
1107 if (!newmask)
1108 return -ENOMEM;
1109
1110 mask_set_nlattr(newmask, 0xff);
1111
1112 /* The userspace does not send tunnel attributes that
1113 * are 0, but we should not wildcard them nonetheless.
1114 */
1115 if (match->key->tun_key.ipv4_dst)
1116 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
1117 0xff, true);
1118
1119 nla_mask = newmask;
1120 }
1121
1122 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
1123 if (err)
1124 goto free_newmask;
1125
1126 /* Always match on tci. */
1127 SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
1128
1129 if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) {
1130 __be16 eth_type = 0;
1131 __be16 tci = 0;
1132
1133 if (!encap_valid) {
1134 OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame.");
1135 err = -EINVAL;
1136 goto free_newmask;
1137 }
1138
1139 mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP);
1140 if (a[OVS_KEY_ATTR_ETHERTYPE])
1141 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1142
1143 if (eth_type == htons(0xffff)) {
1144 mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1145 encap = a[OVS_KEY_ATTR_ENCAP];
1146 err = parse_flow_mask_nlattrs(encap, a,
1147 &mask_attrs, log);
1148 if (err)
1149 goto free_newmask;
1150 } else {
1151 OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).",
1152 ntohs(eth_type));
1153 err = -EINVAL;
1154 goto free_newmask;
1155 }
1156
1157 if (a[OVS_KEY_ATTR_VLAN])
1158 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1159
1160 if (!(tci & htons(VLAN_TAG_PRESENT))) {
1161 OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).",
1162 ntohs(tci));
1163 err = -EINVAL;
1164 goto free_newmask;
1165 }
1166 }
1167
1168 err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log);
1169 if (err)
1170 goto free_newmask;
1171 }
1172
1173 if (!match_validate(match, key_attrs, mask_attrs, log))
1174 err = -EINVAL;
1175
1176 free_newmask:
1177 kfree(newmask);
1178 return err;
1179 }
1180
1181 static size_t get_ufid_len(const struct nlattr *attr, bool log)
1182 {
1183 size_t len;
1184
1185 if (!attr)
1186 return 0;
1187
1188 len = nla_len(attr);
1189 if (len < 1 || len > MAX_UFID_LENGTH) {
1190 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
1191 nla_len(attr), MAX_UFID_LENGTH);
1192 return 0;
1193 }
1194
1195 return len;
1196 }
1197
1198 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1199 * or false otherwise.
1200 */
1201 bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
1202 bool log)
1203 {
1204 sfid->ufid_len = get_ufid_len(attr, log);
1205 if (sfid->ufid_len)
1206 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
1207
1208 return sfid->ufid_len;
1209 }
1210
1211 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
1212 const struct sw_flow_key *key, bool log)
1213 {
1214 struct sw_flow_key *new_key;
1215
1216 if (ovs_nla_get_ufid(sfid, ufid, log))
1217 return 0;
1218
1219 /* If UFID was not provided, use unmasked key. */
1220 new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
1221 if (!new_key)
1222 return -ENOMEM;
1223 memcpy(new_key, key, sizeof(*key));
1224 sfid->unmasked_key = new_key;
1225
1226 return 0;
1227 }
1228
1229 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
1230 {
1231 return attr ? nla_get_u32(attr) : 0;
1232 }
1233
1234 /**
1235 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
1236 * @key: Receives extracted in_port, priority, tun_key and skb_mark.
1237 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1238 * sequence.
1239 * @log: Boolean to allow kernel error logging. Normally true, but when
1240 * probing for feature compatibility this should be passed in as false to
1241 * suppress unnecessary error logging.
1242 *
1243 * This parses a series of Netlink attributes that form a flow key, which must
1244 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1245 * get the metadata, that is, the parts of the flow key that cannot be
1246 * extracted from the packet itself.
1247 */
1248
1249 int ovs_nla_get_flow_metadata(const struct nlattr *attr,
1250 struct sw_flow_key *key,
1251 bool log)
1252 {
1253 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1254 struct sw_flow_match match;
1255 u64 attrs = 0;
1256 int err;
1257
1258 err = parse_flow_nlattrs(attr, a, &attrs, log);
1259 if (err)
1260 return -EINVAL;
1261
1262 memset(&match, 0, sizeof(match));
1263 match.key = key;
1264
1265 memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
1266 key->phy.in_port = DP_MAX_PORTS;
1267
1268 return metadata_from_nlattrs(&match, &attrs, a, false, log);
1269 }
1270
1271 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
1272 const struct sw_flow_key *output, bool is_mask,
1273 struct sk_buff *skb)
1274 {
1275 struct ovs_key_ethernet *eth_key;
1276 struct nlattr *nla, *encap;
1277
1278 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1279 goto nla_put_failure;
1280
1281 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1282 goto nla_put_failure;
1283
1284 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1285 goto nla_put_failure;
1286
1287 if ((swkey->tun_key.ipv4_dst || is_mask)) {
1288 const void *opts = NULL;
1289
1290 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
1291 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
1292
1293 if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
1294 swkey->tun_opts_len))
1295 goto nla_put_failure;
1296 }
1297
1298 if (swkey->phy.in_port == DP_MAX_PORTS) {
1299 if (is_mask && (output->phy.in_port == 0xffff))
1300 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1301 goto nla_put_failure;
1302 } else {
1303 u16 upper_u16;
1304 upper_u16 = !is_mask ? 0 : 0xffff;
1305
1306 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1307 (upper_u16 << 16) | output->phy.in_port))
1308 goto nla_put_failure;
1309 }
1310
1311 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1312 goto nla_put_failure;
1313
1314 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1315 if (!nla)
1316 goto nla_put_failure;
1317
1318 eth_key = nla_data(nla);
1319 ether_addr_copy(eth_key->eth_src, output->eth.src);
1320 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
1321
1322 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1323 __be16 eth_type;
1324 eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
1325 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1326 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1327 goto nla_put_failure;
1328 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1329 if (!swkey->eth.tci)
1330 goto unencap;
1331 } else
1332 encap = NULL;
1333
1334 if (swkey->eth.type == htons(ETH_P_802_2)) {
1335 /*
1336 * Ethertype 802.2 is represented in the netlink with omitted
1337 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1338 * 0xffff in the mask attribute. Ethertype can also
1339 * be wildcarded.
1340 */
1341 if (is_mask && output->eth.type)
1342 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
1343 output->eth.type))
1344 goto nla_put_failure;
1345 goto unencap;
1346 }
1347
1348 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1349 goto nla_put_failure;
1350
1351 if (swkey->eth.type == htons(ETH_P_IP)) {
1352 struct ovs_key_ipv4 *ipv4_key;
1353
1354 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1355 if (!nla)
1356 goto nla_put_failure;
1357 ipv4_key = nla_data(nla);
1358 ipv4_key->ipv4_src = output->ipv4.addr.src;
1359 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1360 ipv4_key->ipv4_proto = output->ip.proto;
1361 ipv4_key->ipv4_tos = output->ip.tos;
1362 ipv4_key->ipv4_ttl = output->ip.ttl;
1363 ipv4_key->ipv4_frag = output->ip.frag;
1364 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1365 struct ovs_key_ipv6 *ipv6_key;
1366
1367 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1368 if (!nla)
1369 goto nla_put_failure;
1370 ipv6_key = nla_data(nla);
1371 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1372 sizeof(ipv6_key->ipv6_src));
1373 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1374 sizeof(ipv6_key->ipv6_dst));
1375 ipv6_key->ipv6_label = output->ipv6.label;
1376 ipv6_key->ipv6_proto = output->ip.proto;
1377 ipv6_key->ipv6_tclass = output->ip.tos;
1378 ipv6_key->ipv6_hlimit = output->ip.ttl;
1379 ipv6_key->ipv6_frag = output->ip.frag;
1380 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1381 swkey->eth.type == htons(ETH_P_RARP)) {
1382 struct ovs_key_arp *arp_key;
1383
1384 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1385 if (!nla)
1386 goto nla_put_failure;
1387 arp_key = nla_data(nla);
1388 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1389 arp_key->arp_sip = output->ipv4.addr.src;
1390 arp_key->arp_tip = output->ipv4.addr.dst;
1391 arp_key->arp_op = htons(output->ip.proto);
1392 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1393 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
1394 } else if (eth_p_mpls(swkey->eth.type)) {
1395 struct ovs_key_mpls *mpls_key;
1396
1397 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
1398 if (!nla)
1399 goto nla_put_failure;
1400 mpls_key = nla_data(nla);
1401 mpls_key->mpls_lse = output->mpls.top_lse;
1402 }
1403
1404 if ((swkey->eth.type == htons(ETH_P_IP) ||
1405 swkey->eth.type == htons(ETH_P_IPV6)) &&
1406 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1407
1408 if (swkey->ip.proto == IPPROTO_TCP) {
1409 struct ovs_key_tcp *tcp_key;
1410
1411 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1412 if (!nla)
1413 goto nla_put_failure;
1414 tcp_key = nla_data(nla);
1415 tcp_key->tcp_src = output->tp.src;
1416 tcp_key->tcp_dst = output->tp.dst;
1417 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1418 output->tp.flags))
1419 goto nla_put_failure;
1420 } else if (swkey->ip.proto == IPPROTO_UDP) {
1421 struct ovs_key_udp *udp_key;
1422
1423 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1424 if (!nla)
1425 goto nla_put_failure;
1426 udp_key = nla_data(nla);
1427 udp_key->udp_src = output->tp.src;
1428 udp_key->udp_dst = output->tp.dst;
1429 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1430 struct ovs_key_sctp *sctp_key;
1431
1432 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
1433 if (!nla)
1434 goto nla_put_failure;
1435 sctp_key = nla_data(nla);
1436 sctp_key->sctp_src = output->tp.src;
1437 sctp_key->sctp_dst = output->tp.dst;
1438 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1439 swkey->ip.proto == IPPROTO_ICMP) {
1440 struct ovs_key_icmp *icmp_key;
1441
1442 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1443 if (!nla)
1444 goto nla_put_failure;
1445 icmp_key = nla_data(nla);
1446 icmp_key->icmp_type = ntohs(output->tp.src);
1447 icmp_key->icmp_code = ntohs(output->tp.dst);
1448 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1449 swkey->ip.proto == IPPROTO_ICMPV6) {
1450 struct ovs_key_icmpv6 *icmpv6_key;
1451
1452 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1453 sizeof(*icmpv6_key));
1454 if (!nla)
1455 goto nla_put_failure;
1456 icmpv6_key = nla_data(nla);
1457 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1458 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
1459
1460 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1461 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1462 struct ovs_key_nd *nd_key;
1463
1464 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1465 if (!nla)
1466 goto nla_put_failure;
1467 nd_key = nla_data(nla);
1468 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1469 sizeof(nd_key->nd_target));
1470 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1471 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
1472 }
1473 }
1474 }
1475
1476 unencap:
1477 if (encap)
1478 nla_nest_end(skb, encap);
1479
1480 return 0;
1481
1482 nla_put_failure:
1483 return -EMSGSIZE;
1484 }
1485
1486 int ovs_nla_put_key(const struct sw_flow_key *swkey,
1487 const struct sw_flow_key *output, int attr, bool is_mask,
1488 struct sk_buff *skb)
1489 {
1490 int err;
1491 struct nlattr *nla;
1492
1493 nla = nla_nest_start(skb, attr);
1494 if (!nla)
1495 return -EMSGSIZE;
1496 err = __ovs_nla_put_key(swkey, output, is_mask, skb);
1497 if (err)
1498 return err;
1499 nla_nest_end(skb, nla);
1500
1501 return 0;
1502 }
1503
1504 /* Called with ovs_mutex or RCU read lock. */
1505 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
1506 {
1507 if (ovs_identifier_is_ufid(&flow->id))
1508 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
1509 flow->id.ufid);
1510
1511 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
1512 OVS_FLOW_ATTR_KEY, false, skb);
1513 }
1514
1515 /* Called with ovs_mutex or RCU read lock. */
1516 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
1517 {
1518 return ovs_nla_put_key(&flow->key, &flow->key,
1519 OVS_FLOW_ATTR_KEY, false, skb);
1520 }
1521
1522 /* Called with ovs_mutex or RCU read lock. */
1523 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
1524 {
1525 return ovs_nla_put_key(&flow->key, &flow->mask->key,
1526 OVS_FLOW_ATTR_MASK, true, skb);
1527 }
1528
1529 #define MAX_ACTIONS_BUFSIZE (32 * 1024)
1530
1531 static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
1532 {
1533 struct sw_flow_actions *sfa;
1534
1535 if (size > MAX_ACTIONS_BUFSIZE) {
1536 OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
1537 return ERR_PTR(-EINVAL);
1538 }
1539
1540 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
1541 if (!sfa)
1542 return ERR_PTR(-ENOMEM);
1543
1544 sfa->actions_len = 0;
1545 return sfa;
1546 }
1547
1548 /* RCU callback used by ovs_nla_free_flow_actions. */
1549 static void rcu_free_acts_callback(struct rcu_head *rcu)
1550 {
1551 struct sw_flow_actions *sf_acts = container_of(rcu,
1552 struct sw_flow_actions, rcu);
1553 kfree(sf_acts);
1554 }
1555
1556 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
1557 * The caller must hold rcu_read_lock for this to be sensible.
1558 */
1559 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
1560 {
1561 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
1562 }
1563
1564 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1565 int attr_len, bool log)
1566 {
1567
1568 struct sw_flow_actions *acts;
1569 int new_acts_size;
1570 int req_size = NLA_ALIGN(attr_len);
1571 int next_offset = offsetof(struct sw_flow_actions, actions) +
1572 (*sfa)->actions_len;
1573
1574 if (req_size <= (ksize(*sfa) - next_offset))
1575 goto out;
1576
1577 new_acts_size = ksize(*sfa) * 2;
1578
1579 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1580 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
1581 return ERR_PTR(-EMSGSIZE);
1582 new_acts_size = MAX_ACTIONS_BUFSIZE;
1583 }
1584
1585 acts = nla_alloc_flow_actions(new_acts_size, log);
1586 if (IS_ERR(acts))
1587 return (void *)acts;
1588
1589 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
1590 acts->actions_len = (*sfa)->actions_len;
1591 kfree(*sfa);
1592 *sfa = acts;
1593
1594 out:
1595 (*sfa)->actions_len += req_size;
1596 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
1597 }
1598
1599 static struct nlattr *__add_action(struct sw_flow_actions **sfa,
1600 int attrtype, void *data, int len, bool log)
1601 {
1602 struct nlattr *a;
1603
1604 a = reserve_sfa_size(sfa, nla_attr_size(len), log);
1605 if (IS_ERR(a))
1606 return a;
1607
1608 a->nla_type = attrtype;
1609 a->nla_len = nla_attr_size(len);
1610
1611 if (data)
1612 memcpy(nla_data(a), data, len);
1613 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
1614
1615 return a;
1616 }
1617
1618 static int add_action(struct sw_flow_actions **sfa, int attrtype,
1619 void *data, int len, bool log)
1620 {
1621 struct nlattr *a;
1622
1623 a = __add_action(sfa, attrtype, data, len, log);
1624 if (IS_ERR(a))
1625 return PTR_ERR(a);
1626
1627 return 0;
1628 }
1629
1630 static inline int add_nested_action_start(struct sw_flow_actions **sfa,
1631 int attrtype, bool log)
1632 {
1633 int used = (*sfa)->actions_len;
1634 int err;
1635
1636 err = add_action(sfa, attrtype, NULL, 0, log);
1637 if (err)
1638 return err;
1639
1640 return used;
1641 }
1642
1643 static inline void add_nested_action_end(struct sw_flow_actions *sfa,
1644 int st_offset)
1645 {
1646 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
1647 st_offset);
1648
1649 a->nla_len = sfa->actions_len - st_offset;
1650 }
1651
1652 static int __ovs_nla_copy_actions(const struct nlattr *attr,
1653 const struct sw_flow_key *key,
1654 int depth, struct sw_flow_actions **sfa,
1655 __be16 eth_type, __be16 vlan_tci, bool log);
1656
1657 static int validate_and_copy_sample(const struct nlattr *attr,
1658 const struct sw_flow_key *key, int depth,
1659 struct sw_flow_actions **sfa,
1660 __be16 eth_type, __be16 vlan_tci, bool log)
1661 {
1662 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
1663 const struct nlattr *probability, *actions;
1664 const struct nlattr *a;
1665 int rem, start, err, st_acts;
1666
1667 memset(attrs, 0, sizeof(attrs));
1668 nla_for_each_nested(a, attr, rem) {
1669 int type = nla_type(a);
1670 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
1671 return -EINVAL;
1672 attrs[type] = a;
1673 }
1674 if (rem)
1675 return -EINVAL;
1676
1677 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
1678 if (!probability || nla_len(probability) != sizeof(u32))
1679 return -EINVAL;
1680
1681 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
1682 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
1683 return -EINVAL;
1684
1685 /* validation done, copy sample action. */
1686 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
1687 if (start < 0)
1688 return start;
1689 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
1690 nla_data(probability), sizeof(u32), log);
1691 if (err)
1692 return err;
1693 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
1694 if (st_acts < 0)
1695 return st_acts;
1696
1697 err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa,
1698 eth_type, vlan_tci, log);
1699 if (err)
1700 return err;
1701
1702 add_nested_action_end(*sfa, st_acts);
1703 add_nested_action_end(*sfa, start);
1704
1705 return 0;
1706 }
1707
1708 static int validate_tp_port(const struct sw_flow_key *flow_key,
1709 __be16 eth_type)
1710 {
1711 if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) &&
1712 (flow_key->tp.src || flow_key->tp.dst))
1713 return 0;
1714
1715 return -EINVAL;
1716 }
1717
1718 void ovs_match_init(struct sw_flow_match *match,
1719 struct sw_flow_key *key,
1720 struct sw_flow_mask *mask)
1721 {
1722 memset(match, 0, sizeof(*match));
1723 match->key = key;
1724 match->mask = mask;
1725
1726 memset(key, 0, sizeof(*key));
1727
1728 if (mask) {
1729 memset(&mask->key, 0, sizeof(mask->key));
1730 mask->range.start = mask->range.end = 0;
1731 }
1732 }
1733
1734 static int validate_geneve_opts(struct sw_flow_key *key)
1735 {
1736 struct geneve_opt *option;
1737 int opts_len = key->tun_opts_len;
1738 bool crit_opt = false;
1739
1740 option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
1741 while (opts_len > 0) {
1742 int len;
1743
1744 if (opts_len < sizeof(*option))
1745 return -EINVAL;
1746
1747 len = sizeof(*option) + option->length * 4;
1748 if (len > opts_len)
1749 return -EINVAL;
1750
1751 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
1752
1753 option = (struct geneve_opt *)((u8 *)option + len);
1754 opts_len -= len;
1755 };
1756
1757 key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
1758
1759 return 0;
1760 }
1761
1762 static int validate_and_copy_set_tun(const struct nlattr *attr,
1763 struct sw_flow_actions **sfa, bool log)
1764 {
1765 struct sw_flow_match match;
1766 struct sw_flow_key key;
1767 struct ovs_tunnel_info *tun_info;
1768 struct nlattr *a;
1769 int start, opts_type;
1770 int err = 0;
1771
1772 ovs_match_init(&match, &key, NULL);
1773 opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
1774 if (opts_type < 0)
1775 return opts_type;
1776
1777 if (key.tun_opts_len) {
1778 switch (opts_type) {
1779 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
1780 err = validate_geneve_opts(&key);
1781 if (err < 0)
1782 return err;
1783 break;
1784 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
1785 break;
1786 }
1787 };
1788
1789 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
1790 if (start < 0)
1791 return start;
1792
1793 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
1794 sizeof(*tun_info) + key.tun_opts_len, log);
1795 if (IS_ERR(a))
1796 return PTR_ERR(a);
1797
1798 tun_info = nla_data(a);
1799 tun_info->tunnel = key.tun_key;
1800 tun_info->options_len = key.tun_opts_len;
1801
1802 if (tun_info->options_len) {
1803 /* We need to store the options in the action itself since
1804 * everything else will go away after flow setup. We can append
1805 * it to tun_info and then point there.
1806 */
1807 memcpy((tun_info + 1),
1808 TUN_METADATA_OPTS(&key, key.tun_opts_len), key.tun_opts_len);
1809 tun_info->options = (tun_info + 1);
1810
1811 } else {
1812 tun_info->options = NULL;
1813 }
1814
1815 add_nested_action_end(*sfa, start);
1816
1817 return err;
1818 }
1819
1820 static int validate_set(const struct nlattr *a,
1821 const struct sw_flow_key *flow_key,
1822 struct sw_flow_actions **sfa,
1823 bool *set_tun, __be16 eth_type, bool log)
1824 {
1825 const struct nlattr *ovs_key = nla_data(a);
1826 int key_type = nla_type(ovs_key);
1827
1828 /* There can be only one key in a action */
1829 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
1830 return -EINVAL;
1831
1832 if (key_type > OVS_KEY_ATTR_MAX ||
1833 (ovs_key_lens[key_type].len != nla_len(ovs_key) &&
1834 ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
1835 return -EINVAL;
1836
1837 switch (key_type) {
1838 const struct ovs_key_ipv4 *ipv4_key;
1839 const struct ovs_key_ipv6 *ipv6_key;
1840 int err;
1841
1842 case OVS_KEY_ATTR_PRIORITY:
1843 case OVS_KEY_ATTR_SKB_MARK:
1844 case OVS_KEY_ATTR_ETHERNET:
1845 break;
1846
1847 case OVS_KEY_ATTR_TUNNEL:
1848 if (eth_p_mpls(eth_type))
1849 return -EINVAL;
1850
1851 *set_tun = true;
1852 err = validate_and_copy_set_tun(a, sfa, log);
1853 if (err)
1854 return err;
1855 break;
1856
1857 case OVS_KEY_ATTR_IPV4:
1858 if (eth_type != htons(ETH_P_IP))
1859 return -EINVAL;
1860
1861 if (!flow_key->ip.proto)
1862 return -EINVAL;
1863
1864 ipv4_key = nla_data(ovs_key);
1865 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
1866 return -EINVAL;
1867
1868 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
1869 return -EINVAL;
1870
1871 break;
1872
1873 case OVS_KEY_ATTR_IPV6:
1874 if (eth_type != htons(ETH_P_IPV6))
1875 return -EINVAL;
1876
1877 if (!flow_key->ip.proto)
1878 return -EINVAL;
1879
1880 ipv6_key = nla_data(ovs_key);
1881 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
1882 return -EINVAL;
1883
1884 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
1885 return -EINVAL;
1886
1887 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
1888 return -EINVAL;
1889
1890 break;
1891
1892 case OVS_KEY_ATTR_TCP:
1893 if (flow_key->ip.proto != IPPROTO_TCP)
1894 return -EINVAL;
1895
1896 return validate_tp_port(flow_key, eth_type);
1897
1898 case OVS_KEY_ATTR_UDP:
1899 if (flow_key->ip.proto != IPPROTO_UDP)
1900 return -EINVAL;
1901
1902 return validate_tp_port(flow_key, eth_type);
1903
1904 case OVS_KEY_ATTR_MPLS:
1905 if (!eth_p_mpls(eth_type))
1906 return -EINVAL;
1907 break;
1908
1909 case OVS_KEY_ATTR_SCTP:
1910 if (flow_key->ip.proto != IPPROTO_SCTP)
1911 return -EINVAL;
1912
1913 return validate_tp_port(flow_key, eth_type);
1914
1915 default:
1916 return -EINVAL;
1917 }
1918
1919 return 0;
1920 }
1921
1922 static int validate_userspace(const struct nlattr *attr)
1923 {
1924 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
1925 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
1926 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
1927 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
1928 };
1929 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
1930 int error;
1931
1932 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
1933 attr, userspace_policy);
1934 if (error)
1935 return error;
1936
1937 if (!a[OVS_USERSPACE_ATTR_PID] ||
1938 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
1939 return -EINVAL;
1940
1941 return 0;
1942 }
1943
1944 static int copy_action(const struct nlattr *from,
1945 struct sw_flow_actions **sfa, bool log)
1946 {
1947 int totlen = NLA_ALIGN(from->nla_len);
1948 struct nlattr *to;
1949
1950 to = reserve_sfa_size(sfa, from->nla_len, log);
1951 if (IS_ERR(to))
1952 return PTR_ERR(to);
1953
1954 memcpy(to, from, totlen);
1955 return 0;
1956 }
1957
1958 static int __ovs_nla_copy_actions(const struct nlattr *attr,
1959 const struct sw_flow_key *key,
1960 int depth, struct sw_flow_actions **sfa,
1961 __be16 eth_type, __be16 vlan_tci, bool log)
1962 {
1963 const struct nlattr *a;
1964 int rem, err;
1965
1966 if (depth >= SAMPLE_ACTION_DEPTH)
1967 return -EOVERFLOW;
1968
1969 nla_for_each_nested(a, attr, rem) {
1970 /* Expected argument lengths, (u32)-1 for variable length. */
1971 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
1972 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
1973 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
1974 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
1975 [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
1976 [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
1977 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
1978 [OVS_ACTION_ATTR_POP_VLAN] = 0,
1979 [OVS_ACTION_ATTR_SET] = (u32)-1,
1980 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
1981 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
1982 };
1983 const struct ovs_action_push_vlan *vlan;
1984 int type = nla_type(a);
1985 bool skip_copy;
1986
1987 if (type > OVS_ACTION_ATTR_MAX ||
1988 (action_lens[type] != nla_len(a) &&
1989 action_lens[type] != (u32)-1))
1990 return -EINVAL;
1991
1992 skip_copy = false;
1993 switch (type) {
1994 case OVS_ACTION_ATTR_UNSPEC:
1995 return -EINVAL;
1996
1997 case OVS_ACTION_ATTR_USERSPACE:
1998 err = validate_userspace(a);
1999 if (err)
2000 return err;
2001 break;
2002
2003 case OVS_ACTION_ATTR_OUTPUT:
2004 if (nla_get_u32(a) >= DP_MAX_PORTS)
2005 return -EINVAL;
2006
2007 break;
2008
2009 case OVS_ACTION_ATTR_HASH: {
2010 const struct ovs_action_hash *act_hash = nla_data(a);
2011
2012 switch (act_hash->hash_alg) {
2013 case OVS_HASH_ALG_L4:
2014 break;
2015 default:
2016 return -EINVAL;
2017 }
2018
2019 break;
2020 }
2021
2022 case OVS_ACTION_ATTR_POP_VLAN:
2023 vlan_tci = htons(0);
2024 break;
2025
2026 case OVS_ACTION_ATTR_PUSH_VLAN:
2027 vlan = nla_data(a);
2028 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
2029 return -EINVAL;
2030 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
2031 return -EINVAL;
2032 vlan_tci = vlan->vlan_tci;
2033 break;
2034
2035 case OVS_ACTION_ATTR_RECIRC:
2036 break;
2037
2038 case OVS_ACTION_ATTR_PUSH_MPLS: {
2039 const struct ovs_action_push_mpls *mpls = nla_data(a);
2040
2041 if (!eth_p_mpls(mpls->mpls_ethertype))
2042 return -EINVAL;
2043
2044 /* Prohibit push MPLS other than to a white list
2045 * for packets that have a known tag order.
2046 */
2047 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2048 (eth_type != htons(ETH_P_IP) &&
2049 eth_type != htons(ETH_P_IPV6) &&
2050 eth_type != htons(ETH_P_ARP) &&
2051 eth_type != htons(ETH_P_RARP) &&
2052 !eth_p_mpls(eth_type)))
2053 return -EINVAL;
2054 eth_type = mpls->mpls_ethertype;
2055 break;
2056 }
2057
2058 case OVS_ACTION_ATTR_POP_MPLS:
2059 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2060 !eth_p_mpls(eth_type))
2061 return -EINVAL;
2062
2063 /* Disallow subsequent L2.5+ set and mpls_pop actions
2064 * as there is no check here to ensure that the new
2065 * eth_type is valid and thus set actions could
2066 * write off the end of the packet or otherwise
2067 * corrupt it.
2068 *
2069 * Support for these actions is planned using packet
2070 * recirculation.
2071 */
2072 eth_type = htons(0);
2073 break;
2074
2075 case OVS_ACTION_ATTR_SET:
2076 err = validate_set(a, key, sfa,
2077 &skip_copy, eth_type, log);
2078 if (err)
2079 return err;
2080 break;
2081
2082 case OVS_ACTION_ATTR_SAMPLE:
2083 err = validate_and_copy_sample(a, key, depth, sfa,
2084 eth_type, vlan_tci, log);
2085 if (err)
2086 return err;
2087 skip_copy = true;
2088 break;
2089
2090 default:
2091 OVS_NLERR(log, "Unknown Action type %d", type);
2092 return -EINVAL;
2093 }
2094 if (!skip_copy) {
2095 err = copy_action(a, sfa, log);
2096 if (err)
2097 return err;
2098 }
2099 }
2100
2101 if (rem > 0)
2102 return -EINVAL;
2103
2104 return 0;
2105 }
2106
2107 int ovs_nla_copy_actions(const struct nlattr *attr,
2108 const struct sw_flow_key *key,
2109 struct sw_flow_actions **sfa, bool log)
2110 {
2111 int err;
2112
2113 *sfa = nla_alloc_flow_actions(nla_len(attr), log);
2114 if (IS_ERR(*sfa))
2115 return PTR_ERR(*sfa);
2116
2117 err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
2118 key->eth.tci, log);
2119 if (err)
2120 kfree(*sfa);
2121
2122 return err;
2123 }
2124
2125 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
2126 {
2127 const struct nlattr *a;
2128 struct nlattr *start;
2129 int err = 0, rem;
2130
2131 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
2132 if (!start)
2133 return -EMSGSIZE;
2134
2135 nla_for_each_nested(a, attr, rem) {
2136 int type = nla_type(a);
2137 struct nlattr *st_sample;
2138
2139 switch (type) {
2140 case OVS_SAMPLE_ATTR_PROBABILITY:
2141 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
2142 sizeof(u32), nla_data(a)))
2143 return -EMSGSIZE;
2144 break;
2145 case OVS_SAMPLE_ATTR_ACTIONS:
2146 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
2147 if (!st_sample)
2148 return -EMSGSIZE;
2149 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
2150 if (err)
2151 return err;
2152 nla_nest_end(skb, st_sample);
2153 break;
2154 }
2155 }
2156
2157 nla_nest_end(skb, start);
2158 return err;
2159 }
2160
2161 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
2162 {
2163 const struct nlattr *ovs_key = nla_data(a);
2164 int key_type = nla_type(ovs_key);
2165 struct nlattr *start;
2166 int err;
2167
2168 switch (key_type) {
2169 case OVS_KEY_ATTR_TUNNEL_INFO: {
2170 struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
2171
2172 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2173 if (!start)
2174 return -EMSGSIZE;
2175
2176 err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
2177 tun_info->options_len ?
2178 tun_info->options : NULL,
2179 tun_info->options_len);
2180 if (err)
2181 return err;
2182 nla_nest_end(skb, start);
2183 break;
2184 }
2185 default:
2186 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
2187 return -EMSGSIZE;
2188 break;
2189 }
2190
2191 return 0;
2192 }
2193
2194 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
2195 {
2196 const struct nlattr *a;
2197 int rem, err;
2198
2199 nla_for_each_attr(a, attr, len, rem) {
2200 int type = nla_type(a);
2201
2202 switch (type) {
2203 case OVS_ACTION_ATTR_SET:
2204 err = set_action_to_attr(a, skb);
2205 if (err)
2206 return err;
2207 break;
2208
2209 case OVS_ACTION_ATTR_SAMPLE:
2210 err = sample_action_to_attr(a, skb);
2211 if (err)
2212 return err;
2213 break;
2214 default:
2215 if (nla_put(skb, type, nla_len(a), nla_data(a)))
2216 return -EMSGSIZE;
2217 break;
2218 }
2219 }
2220
2221 return 0;
2222 }