]> git.proxmox.com Git - mirror_ovs.git/blob - lib/odp-util.c
Support for match & set ICMPv6 reserved and options type fields
[mirror_ovs.git] / lib / odp-util.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <sys/types.h>
19 #include <netinet/in.h>
20 #include <arpa/inet.h>
21 #include "odp-util.h"
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <math.h>
25 #include <netinet/icmp6.h>
26 #include <netinet/ip6.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include "byte-order.h"
31 #include "coverage.h"
32 #include "dpif.h"
33 #include "openvswitch/dynamic-string.h"
34 #include "flow.h"
35 #include "netlink.h"
36 #include "openvswitch/ofpbuf.h"
37 #include "packets.h"
38 #include "simap.h"
39 #include "timeval.h"
40 #include "tun-metadata.h"
41 #include "unaligned.h"
42 #include "util.h"
43 #include "uuid.h"
44 #include "openvswitch/vlog.h"
45 #include "openvswitch/match.h"
46
47 VLOG_DEFINE_THIS_MODULE(odp_util);
48
49 /* The interface between userspace and kernel uses an "OVS_*" prefix.
50 * Since this is fairly non-specific for the OVS userspace components,
51 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
52 * interactions with the datapath.
53 */
54
55 /* The set of characters that may separate one action or one key attribute
56 * from another. */
57 static const char *delimiters = ", \t\r\n";
58 static const char *delimiters_end = ", \t\r\n)";
59
60 #define MAX_ODP_NESTED 32
61
62 struct parse_odp_context {
63 const struct simap *port_names;
64 int depth; /* Current nested depth of odp string. */
65 };
66
67 static int parse_odp_key_mask_attr(struct parse_odp_context *, const char *,
68 struct ofpbuf *, struct ofpbuf *);
69 static void format_odp_key_attr(const struct nlattr *a,
70 const struct nlattr *ma,
71 const struct hmap *portno_names, struct ds *ds,
72 bool verbose);
73
74 struct geneve_scan {
75 struct geneve_opt d[63];
76 int len;
77 };
78
79 static int scan_geneve(const char *s, struct geneve_scan *key,
80 struct geneve_scan *mask);
81 static void format_geneve_opts(const struct geneve_opt *opt,
82 const struct geneve_opt *mask, int opts_len,
83 struct ds *, bool verbose);
84
85 static struct nlattr *generate_all_wildcard_mask(const struct attr_len_tbl tbl[],
86 int max, struct ofpbuf *,
87 const struct nlattr *key);
88 static void format_u128(struct ds *d, const ovs_32aligned_u128 *key,
89 const ovs_32aligned_u128 *mask, bool verbose);
90 static int scan_u128(const char *s, ovs_u128 *value, ovs_u128 *mask);
91
92 static int parse_odp_action(const char *s, const struct simap *port_names,
93 struct ofpbuf *actions);
94
95 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
96 * 'type':
97 *
98 * - For an action whose argument has a fixed length, returned that
99 * nonnegative length in bytes.
100 *
101 * - For an action with a variable-length argument, returns ATTR_LEN_VARIABLE.
102 *
103 * - For an invalid 'type', returns ATTR_LEN_INVALID. */
104 static int
105 odp_action_len(uint16_t type)
106 {
107 if (type > OVS_ACTION_ATTR_MAX) {
108 return -1;
109 }
110
111 switch ((enum ovs_action_attr) type) {
112 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
113 case OVS_ACTION_ATTR_TRUNC: return sizeof(struct ovs_action_trunc);
114 case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
115 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
116 case OVS_ACTION_ATTR_METER: return sizeof(uint32_t);
117 case OVS_ACTION_ATTR_USERSPACE: return ATTR_LEN_VARIABLE;
118 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
119 case OVS_ACTION_ATTR_POP_VLAN: return 0;
120 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
121 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
122 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
123 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
124 case OVS_ACTION_ATTR_SET: return ATTR_LEN_VARIABLE;
125 case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
126 case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
127 case OVS_ACTION_ATTR_CT: return ATTR_LEN_VARIABLE;
128 case OVS_ACTION_ATTR_CT_CLEAR: return 0;
129 case OVS_ACTION_ATTR_PUSH_ETH: return sizeof(struct ovs_action_push_eth);
130 case OVS_ACTION_ATTR_POP_ETH: return 0;
131 case OVS_ACTION_ATTR_CLONE: return ATTR_LEN_VARIABLE;
132 case OVS_ACTION_ATTR_PUSH_NSH: return ATTR_LEN_VARIABLE;
133 case OVS_ACTION_ATTR_POP_NSH: return 0;
134
135 case OVS_ACTION_ATTR_UNSPEC:
136 case __OVS_ACTION_ATTR_MAX:
137 return ATTR_LEN_INVALID;
138 }
139
140 return ATTR_LEN_INVALID;
141 }
142
143 /* Returns a string form of 'attr'. The return value is either a statically
144 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
145 * should be at least OVS_KEY_ATTR_BUFSIZE. */
146 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
147 static const char *
148 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
149 {
150 switch (attr) {
151 case OVS_KEY_ATTR_UNSPEC: return "unspec";
152 case OVS_KEY_ATTR_ENCAP: return "encap";
153 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
154 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
155 case OVS_KEY_ATTR_CT_STATE: return "ct_state";
156 case OVS_KEY_ATTR_CT_ZONE: return "ct_zone";
157 case OVS_KEY_ATTR_CT_MARK: return "ct_mark";
158 case OVS_KEY_ATTR_CT_LABELS: return "ct_label";
159 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: return "ct_tuple4";
160 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: return "ct_tuple6";
161 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
162 case OVS_KEY_ATTR_IN_PORT: return "in_port";
163 case OVS_KEY_ATTR_ETHERNET: return "eth";
164 case OVS_KEY_ATTR_VLAN: return "vlan";
165 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
166 case OVS_KEY_ATTR_IPV4: return "ipv4";
167 case OVS_KEY_ATTR_IPV6: return "ipv6";
168 case OVS_KEY_ATTR_TCP: return "tcp";
169 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
170 case OVS_KEY_ATTR_UDP: return "udp";
171 case OVS_KEY_ATTR_SCTP: return "sctp";
172 case OVS_KEY_ATTR_ICMP: return "icmp";
173 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
174 case OVS_KEY_ATTR_ARP: return "arp";
175 case OVS_KEY_ATTR_ND: return "nd";
176 case OVS_KEY_ATTR_ND_EXTENSIONS: return "nd_ext";
177 case OVS_KEY_ATTR_MPLS: return "mpls";
178 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
179 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
180 case OVS_KEY_ATTR_PACKET_TYPE: return "packet_type";
181 case OVS_KEY_ATTR_NSH: return "nsh";
182
183 case __OVS_KEY_ATTR_MAX:
184 default:
185 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
186 return namebuf;
187 }
188 }
189
190 static void
191 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
192 {
193 size_t len = nl_attr_get_size(a);
194
195 ds_put_format(ds, "action%d", nl_attr_type(a));
196 if (len) {
197 const uint8_t *unspec;
198 unsigned int i;
199
200 unspec = nl_attr_get(a);
201 for (i = 0; i < len; i++) {
202 ds_put_char(ds, i ? ' ': '(');
203 ds_put_format(ds, "%02x", unspec[i]);
204 }
205 ds_put_char(ds, ')');
206 }
207 }
208
209 static void
210 format_odp_sample_action(struct ds *ds, const struct nlattr *attr,
211 const struct hmap *portno_names)
212 {
213 static const struct nl_policy ovs_sample_policy[] = {
214 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
215 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
216 };
217 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
218 double percentage;
219 const struct nlattr *nla_acts;
220 int len;
221
222 ds_put_cstr(ds, "sample");
223
224 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
225 ds_put_cstr(ds, "(error)");
226 return;
227 }
228
229 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
230 UINT32_MAX;
231
232 ds_put_format(ds, "(sample=%.1f%%,", percentage);
233
234 ds_put_cstr(ds, "actions(");
235 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
236 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
237 format_odp_actions(ds, nla_acts, len, portno_names);
238 ds_put_format(ds, "))");
239 }
240
241 static void
242 format_odp_clone_action(struct ds *ds, const struct nlattr *attr,
243 const struct hmap *portno_names)
244 {
245 const struct nlattr *nla_acts = nl_attr_get(attr);
246 int len = nl_attr_get_size(attr);
247
248 ds_put_cstr(ds, "clone");
249 ds_put_format(ds, "(");
250 format_odp_actions(ds, nla_acts, len, portno_names);
251 ds_put_format(ds, ")");
252 }
253
254 static void
255 format_nsh_key(struct ds *ds, const struct ovs_key_nsh *key)
256 {
257 ds_put_format(ds, "flags=%d", key->flags);
258 ds_put_format(ds, "ttl=%d", key->ttl);
259 ds_put_format(ds, ",mdtype=%d", key->mdtype);
260 ds_put_format(ds, ",np=%d", key->np);
261 ds_put_format(ds, ",spi=0x%x",
262 nsh_path_hdr_to_spi_uint32(key->path_hdr));
263 ds_put_format(ds, ",si=%d",
264 nsh_path_hdr_to_si(key->path_hdr));
265
266 switch (key->mdtype) {
267 case NSH_M_TYPE1:
268 for (int i = 0; i < 4; i++) {
269 ds_put_format(ds, ",c%d=0x%x", i + 1, ntohl(key->context[i]));
270 }
271 break;
272 case NSH_M_TYPE2:
273 default:
274 /* No support for matching other metadata formats yet. */
275 break;
276 }
277 }
278
279 static void
280 format_uint8_masked(struct ds *s, bool *first, const char *name,
281 uint8_t value, uint8_t mask)
282 {
283 if (mask != 0) {
284 if (!*first) {
285 ds_put_char(s, ',');
286 }
287 ds_put_format(s, "%s=", name);
288 if (mask == UINT8_MAX) {
289 ds_put_format(s, "%"PRIu8, value);
290 } else {
291 ds_put_format(s, "0x%02"PRIx8"/0x%02"PRIx8, value, mask);
292 }
293 *first = false;
294 }
295 }
296
297 static void
298 format_be32_masked(struct ds *s, bool *first, const char *name,
299 ovs_be32 value, ovs_be32 mask)
300 {
301 if (mask != htonl(0)) {
302 if (!*first) {
303 ds_put_char(s, ',');
304 }
305 ds_put_format(s, "%s=", name);
306 if (mask == OVS_BE32_MAX) {
307 ds_put_format(s, "0x%"PRIx32, ntohl(value));
308 } else {
309 ds_put_format(s, "0x%"PRIx32"/0x%08"PRIx32,
310 ntohl(value), ntohl(mask));
311 }
312 *first = false;
313 }
314 }
315
316 static void
317 format_nsh_key_mask(struct ds *ds, const struct ovs_key_nsh *key,
318 const struct ovs_key_nsh *mask)
319 {
320 if (!mask) {
321 format_nsh_key(ds, key);
322 } else {
323 bool first = true;
324 uint32_t spi = nsh_path_hdr_to_spi_uint32(key->path_hdr);
325 uint32_t spi_mask = nsh_path_hdr_to_spi_uint32(mask->path_hdr);
326 if (spi_mask == (NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
327 spi_mask = UINT32_MAX;
328 }
329 uint8_t si = nsh_path_hdr_to_si(key->path_hdr);
330 uint8_t si_mask = nsh_path_hdr_to_si(mask->path_hdr);
331
332 format_uint8_masked(ds, &first, "flags", key->flags, mask->flags);
333 format_uint8_masked(ds, &first, "ttl", key->ttl, mask->ttl);
334 format_uint8_masked(ds, &first, "mdtype", key->mdtype, mask->mdtype);
335 format_uint8_masked(ds, &first, "np", key->np, mask->np);
336 format_be32_masked(ds, &first, "spi", htonl(spi), htonl(spi_mask));
337 format_uint8_masked(ds, &first, "si", si, si_mask);
338 format_be32_masked(ds, &first, "c1", key->context[0],
339 mask->context[0]);
340 format_be32_masked(ds, &first, "c2", key->context[1],
341 mask->context[1]);
342 format_be32_masked(ds, &first, "c3", key->context[2],
343 mask->context[2]);
344 format_be32_masked(ds, &first, "c4", key->context[3],
345 mask->context[3]);
346 }
347 }
348
349 static void
350 format_odp_push_nsh_action(struct ds *ds,
351 const struct nsh_hdr *nsh_hdr)
352 {
353 size_t mdlen = nsh_hdr_len(nsh_hdr) - NSH_BASE_HDR_LEN;
354 uint32_t spi = ntohl(nsh_get_spi(nsh_hdr));
355 uint8_t si = nsh_get_si(nsh_hdr);
356 uint8_t flags = nsh_get_flags(nsh_hdr);
357 uint8_t ttl = nsh_get_ttl(nsh_hdr);
358
359 ds_put_cstr(ds, "push_nsh(");
360 ds_put_format(ds, "flags=%d", flags);
361 ds_put_format(ds, ",ttl=%d", ttl);
362 ds_put_format(ds, ",mdtype=%d", nsh_hdr->md_type);
363 ds_put_format(ds, ",np=%d", nsh_hdr->next_proto);
364 ds_put_format(ds, ",spi=0x%x", spi);
365 ds_put_format(ds, ",si=%d", si);
366 switch (nsh_hdr->md_type) {
367 case NSH_M_TYPE1: {
368 const struct nsh_md1_ctx *md1_ctx = &nsh_hdr->md1;
369 for (int i = 0; i < 4; i++) {
370 ds_put_format(ds, ",c%d=0x%x", i + 1,
371 ntohl(get_16aligned_be32(&md1_ctx->context[i])));
372 }
373 break;
374 }
375 case NSH_M_TYPE2: {
376 const struct nsh_md2_tlv *md2_ctx = &nsh_hdr->md2;
377 ds_put_cstr(ds, ",md2=");
378 ds_put_hex(ds, md2_ctx, mdlen);
379 break;
380 }
381 default:
382 OVS_NOT_REACHED();
383 }
384 ds_put_format(ds, ")");
385 }
386
387 static const char *
388 slow_path_reason_to_string(uint32_t reason)
389 {
390 switch ((enum slow_path_reason) reason) {
391 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
392 SLOW_PATH_REASONS
393 #undef SPR
394 }
395
396 return NULL;
397 }
398
399 const char *
400 slow_path_reason_to_explanation(enum slow_path_reason reason)
401 {
402 switch (reason) {
403 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
404 SLOW_PATH_REASONS
405 #undef SPR
406 }
407
408 return "<unknown>";
409 }
410
411 static int
412 parse_odp_flags(const char *s, const char *(*bit_to_string)(uint32_t),
413 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
414 {
415 return parse_flags(s, bit_to_string, ')', NULL, NULL,
416 res_flags, allowed, res_mask);
417 }
418
419 static void
420 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr,
421 const struct hmap *portno_names)
422 {
423 static const struct nl_policy ovs_userspace_policy[] = {
424 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
425 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
426 .optional = true },
427 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
428 .optional = true },
429 [OVS_USERSPACE_ATTR_ACTIONS] = { .type = NL_A_UNSPEC,
430 .optional = true },
431 };
432 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
433 const struct nlattr *userdata_attr;
434 const struct nlattr *tunnel_out_port_attr;
435
436 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
437 ds_put_cstr(ds, "userspace(error)");
438 return;
439 }
440
441 ds_put_format(ds, "userspace(pid=%"PRIu32,
442 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
443
444 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
445
446 if (userdata_attr) {
447 const uint8_t *userdata = nl_attr_get(userdata_attr);
448 size_t userdata_len = nl_attr_get_size(userdata_attr);
449 bool userdata_unspec = true;
450 struct user_action_cookie cookie;
451
452 if (userdata_len == sizeof cookie) {
453 memcpy(&cookie, userdata, sizeof cookie);
454
455 userdata_unspec = false;
456
457 if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
458 ds_put_format(ds, ",sFlow("
459 "vid=%"PRIu16",pcp=%d,output=%"PRIu32")",
460 vlan_tci_to_vid(cookie.sflow.vlan_tci),
461 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
462 cookie.sflow.output);
463 } else if (cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
464 ds_put_cstr(ds, ",slow_path(");
465 format_flags(ds, slow_path_reason_to_string,
466 cookie.slow_path.reason, ',');
467 ds_put_format(ds, ")");
468 } else if (cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
469 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
470 ",collector_set_id=%"PRIu32
471 ",obs_domain_id=%"PRIu32
472 ",obs_point_id=%"PRIu32
473 ",output_port=",
474 cookie.flow_sample.probability,
475 cookie.flow_sample.collector_set_id,
476 cookie.flow_sample.obs_domain_id,
477 cookie.flow_sample.obs_point_id);
478 odp_portno_name_format(portno_names,
479 cookie.flow_sample.output_odp_port, ds);
480 if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_INGRESS) {
481 ds_put_cstr(ds, ",ingress");
482 } else if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_EGRESS) {
483 ds_put_cstr(ds, ",egress");
484 }
485 ds_put_char(ds, ')');
486 } else if (cookie.type == USER_ACTION_COOKIE_IPFIX) {
487 ds_put_format(ds, ",ipfix(output_port=");
488 odp_portno_name_format(portno_names,
489 cookie.ipfix.output_odp_port, ds);
490 ds_put_char(ds, ')');
491 } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
492 ds_put_format(ds, ",controller(reason=%"PRIu16
493 ",dont_send=%d"
494 ",continuation=%d"
495 ",recirc_id=%"PRIu32
496 ",rule_cookie=%#"PRIx64
497 ",controller_id=%"PRIu16
498 ",max_len=%"PRIu16,
499 cookie.controller.reason,
500 !!cookie.controller.dont_send,
501 !!cookie.controller.continuation,
502 cookie.controller.recirc_id,
503 ntohll(get_32aligned_be64(
504 &cookie.controller.rule_cookie)),
505 cookie.controller.controller_id,
506 cookie.controller.max_len);
507 ds_put_char(ds, ')');
508 } else {
509 userdata_unspec = true;
510 }
511 }
512
513 if (userdata_unspec) {
514 size_t i;
515 ds_put_format(ds, ",userdata(");
516 for (i = 0; i < userdata_len; i++) {
517 ds_put_format(ds, "%02x", userdata[i]);
518 }
519 ds_put_char(ds, ')');
520 }
521 }
522
523 if (a[OVS_USERSPACE_ATTR_ACTIONS]) {
524 ds_put_cstr(ds, ",actions");
525 }
526
527 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
528 if (tunnel_out_port_attr) {
529 ds_put_format(ds, ",tunnel_out_port=");
530 odp_portno_name_format(portno_names,
531 nl_attr_get_odp_port(tunnel_out_port_attr), ds);
532 }
533
534 ds_put_char(ds, ')');
535 }
536
537 static void
538 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
539 {
540 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
541 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
542 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
543 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
544 };
545 ds_put_char(ds, ',');
546 }
547 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
548 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
549 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
550 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
551 }
552 ds_put_char(ds, ',');
553 }
554 if (!(tci & htons(VLAN_CFI))) {
555 ds_put_cstr(ds, "cfi=0");
556 ds_put_char(ds, ',');
557 }
558 ds_chomp(ds, ',');
559 }
560
561 static void
562 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
563 {
564 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
565 mpls_lse_to_label(mpls_lse),
566 mpls_lse_to_tc(mpls_lse),
567 mpls_lse_to_ttl(mpls_lse),
568 mpls_lse_to_bos(mpls_lse));
569 }
570
571 static void
572 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
573 const struct ovs_key_mpls *mpls_mask, int n)
574 {
575 for (int i = 0; i < n; i++) {
576 ovs_be32 key = mpls_key[i].mpls_lse;
577
578 if (mpls_mask == NULL) {
579 format_mpls_lse(ds, key);
580 } else {
581 ovs_be32 mask = mpls_mask[i].mpls_lse;
582
583 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
584 mpls_lse_to_label(key), mpls_lse_to_label(mask),
585 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
586 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
587 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
588 }
589 ds_put_char(ds, ',');
590 }
591 ds_chomp(ds, ',');
592 }
593
594 static void
595 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
596 {
597 ds_put_format(ds, "recirc(%#"PRIx32")", recirc_id);
598 }
599
600 static void
601 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
602 {
603 ds_put_format(ds, "hash(");
604
605 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
606 ds_put_format(ds, "l4(%"PRIu32")", hash_act->hash_basis);
607 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
608 ds_put_format(ds, "sym_l4(%"PRIu32")", hash_act->hash_basis);
609 } else {
610 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
611 hash_act->hash_alg);
612 }
613 ds_put_format(ds, ")");
614 }
615
616 static const void *
617 format_udp_tnl_push_header(struct ds *ds, const struct udp_header *udp)
618 {
619 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
620 ntohs(udp->udp_src), ntohs(udp->udp_dst),
621 ntohs(udp->udp_csum));
622
623 return udp + 1;
624 }
625
626 static void
627 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
628 {
629 const struct eth_header *eth;
630 const void *l3;
631 const void *l4;
632 const struct udp_header *udp;
633
634 eth = (const struct eth_header *)data->header;
635
636 l3 = eth + 1;
637
638 /* Ethernet */
639 ds_put_format(ds, "header(size=%"PRIu32",type=%"PRIu32",eth(dst=",
640 data->header_len, data->tnl_type);
641 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
642 ds_put_format(ds, ",src=");
643 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
644 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
645
646 if (eth->eth_type == htons(ETH_TYPE_IP)) {
647 /* IPv4 */
648 const struct ip_header *ip = l3;
649 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
650 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
651 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
652 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
653 ip->ip_proto, ip->ip_tos,
654 ip->ip_ttl,
655 ntohs(ip->ip_frag_off));
656 l4 = (ip + 1);
657 } else {
658 const struct ovs_16aligned_ip6_hdr *ip6 = l3;
659 struct in6_addr src, dst;
660 memcpy(&src, &ip6->ip6_src, sizeof src);
661 memcpy(&dst, &ip6->ip6_dst, sizeof dst);
662 uint32_t ipv6_flow = ntohl(get_16aligned_be32(&ip6->ip6_flow));
663
664 ds_put_format(ds, "ipv6(src=");
665 ipv6_format_addr(&src, ds);
666 ds_put_format(ds, ",dst=");
667 ipv6_format_addr(&dst, ds);
668 ds_put_format(ds, ",label=%i,proto=%"PRIu8",tclass=0x%"PRIx32
669 ",hlimit=%"PRIu8"),",
670 ipv6_flow & IPV6_LABEL_MASK, ip6->ip6_nxt,
671 (ipv6_flow >> 20) & 0xff, ip6->ip6_hlim);
672 l4 = (ip6 + 1);
673 }
674
675 udp = (const struct udp_header *) l4;
676
677 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
678 const struct vxlanhdr *vxh;
679
680 vxh = format_udp_tnl_push_header(ds, udp);
681
682 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
683 ntohl(get_16aligned_be32(&vxh->vx_flags)),
684 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
685 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
686 const struct genevehdr *gnh;
687
688 gnh = format_udp_tnl_push_header(ds, udp);
689
690 ds_put_format(ds, "geneve(%s%svni=0x%"PRIx32,
691 gnh->oam ? "oam," : "",
692 gnh->critical ? "crit," : "",
693 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
694
695 if (gnh->opt_len) {
696 ds_put_cstr(ds, ",options(");
697 format_geneve_opts(gnh->options, NULL, gnh->opt_len * 4,
698 ds, false);
699 ds_put_char(ds, ')');
700 }
701
702 ds_put_char(ds, ')');
703 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE ||
704 data->tnl_type == OVS_VPORT_TYPE_IP6GRE) {
705 const struct gre_base_hdr *greh;
706 ovs_16aligned_be32 *options;
707
708 greh = (const struct gre_base_hdr *) l4;
709
710 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
711 ntohs(greh->flags), ntohs(greh->protocol));
712 options = (ovs_16aligned_be32 *)(greh + 1);
713 if (greh->flags & htons(GRE_CSUM)) {
714 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
715 options++;
716 }
717 if (greh->flags & htons(GRE_KEY)) {
718 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
719 options++;
720 }
721 if (greh->flags & htons(GRE_SEQ)) {
722 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
723 options++;
724 }
725 ds_put_format(ds, ")");
726 } else if (data->tnl_type == OVS_VPORT_TYPE_ERSPAN ||
727 data->tnl_type == OVS_VPORT_TYPE_IP6ERSPAN) {
728 const struct gre_base_hdr *greh;
729 const struct erspan_base_hdr *ersh;
730
731 greh = (const struct gre_base_hdr *) l4;
732 ersh = ERSPAN_HDR(greh);
733
734 if (ersh->ver == 1) {
735 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
736 ersh + 1);
737 ds_put_format(ds, "erspan(ver=1,sid=0x%"PRIx16",idx=0x%"PRIx32")",
738 get_sid(ersh), ntohl(get_16aligned_be32(index)));
739 } else if (ersh->ver == 2) {
740 struct erspan_md2 *md2 = ALIGNED_CAST(struct erspan_md2 *,
741 ersh + 1);
742 ds_put_format(ds, "erspan(ver=2,sid=0x%"PRIx16
743 ",dir=%"PRIu8",hwid=0x%"PRIx8")",
744 get_sid(ersh), md2->dir, get_hwid(md2));
745 } else {
746 VLOG_WARN("%s Invalid ERSPAN version %d\n", __func__, ersh->ver);
747 }
748 }
749 ds_put_format(ds, ")");
750 }
751
752 static void
753 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr,
754 const struct hmap *portno_names)
755 {
756 struct ovs_action_push_tnl *data;
757
758 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
759
760 ds_put_cstr(ds, "tnl_push(tnl_port(");
761 odp_portno_name_format(portno_names, data->tnl_port, ds);
762 ds_put_cstr(ds, "),");
763 format_odp_tnl_push_header(ds, data);
764 ds_put_format(ds, ",out_port(");
765 odp_portno_name_format(portno_names, data->out_port, ds);
766 ds_put_cstr(ds, "))");
767 }
768
769 static const struct nl_policy ovs_nat_policy[] = {
770 [OVS_NAT_ATTR_SRC] = { .type = NL_A_FLAG, .optional = true, },
771 [OVS_NAT_ATTR_DST] = { .type = NL_A_FLAG, .optional = true, },
772 [OVS_NAT_ATTR_IP_MIN] = { .type = NL_A_UNSPEC, .optional = true,
773 .min_len = sizeof(struct in_addr),
774 .max_len = sizeof(struct in6_addr)},
775 [OVS_NAT_ATTR_IP_MAX] = { .type = NL_A_UNSPEC, .optional = true,
776 .min_len = sizeof(struct in_addr),
777 .max_len = sizeof(struct in6_addr)},
778 [OVS_NAT_ATTR_PROTO_MIN] = { .type = NL_A_U16, .optional = true, },
779 [OVS_NAT_ATTR_PROTO_MAX] = { .type = NL_A_U16, .optional = true, },
780 [OVS_NAT_ATTR_PERSISTENT] = { .type = NL_A_FLAG, .optional = true, },
781 [OVS_NAT_ATTR_PROTO_HASH] = { .type = NL_A_FLAG, .optional = true, },
782 [OVS_NAT_ATTR_PROTO_RANDOM] = { .type = NL_A_FLAG, .optional = true, },
783 };
784
785 static void
786 format_odp_ct_nat(struct ds *ds, const struct nlattr *attr)
787 {
788 struct nlattr *a[ARRAY_SIZE(ovs_nat_policy)];
789 size_t addr_len;
790 ovs_be32 ip_min, ip_max;
791 struct in6_addr ip6_min, ip6_max;
792 uint16_t proto_min, proto_max;
793
794 if (!nl_parse_nested(attr, ovs_nat_policy, a, ARRAY_SIZE(a))) {
795 ds_put_cstr(ds, "nat(error: nl_parse_nested() failed.)");
796 return;
797 }
798 /* If no type, then nothing else either. */
799 if (!(a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST])
800 && (a[OVS_NAT_ATTR_IP_MIN] || a[OVS_NAT_ATTR_IP_MAX]
801 || a[OVS_NAT_ATTR_PROTO_MIN] || a[OVS_NAT_ATTR_PROTO_MAX]
802 || a[OVS_NAT_ATTR_PERSISTENT] || a[OVS_NAT_ATTR_PROTO_HASH]
803 || a[OVS_NAT_ATTR_PROTO_RANDOM])) {
804 ds_put_cstr(ds, "nat(error: options allowed only with \"src\" or \"dst\")");
805 return;
806 }
807 /* Both SNAT & DNAT may not be specified. */
808 if (a[OVS_NAT_ATTR_SRC] && a[OVS_NAT_ATTR_DST]) {
809 ds_put_cstr(ds, "nat(error: Only one of \"src\" or \"dst\" may be present.)");
810 return;
811 }
812 /* proto may not appear without ip. */
813 if (!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_PROTO_MIN]) {
814 ds_put_cstr(ds, "nat(error: proto but no IP.)");
815 return;
816 }
817 /* MAX may not appear without MIN. */
818 if ((!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX])
819 || (!a[OVS_NAT_ATTR_PROTO_MIN] && a[OVS_NAT_ATTR_PROTO_MAX])) {
820 ds_put_cstr(ds, "nat(error: range max without min.)");
821 return;
822 }
823 /* Address sizes must match. */
824 if ((a[OVS_NAT_ATTR_IP_MIN]
825 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(ovs_be32) &&
826 nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(struct in6_addr)))
827 || (a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX]
828 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN])
829 != nl_attr_get_size(a[OVS_NAT_ATTR_IP_MAX])))) {
830 ds_put_cstr(ds, "nat(error: IP address sizes do not match)");
831 return;
832 }
833
834 addr_len = a[OVS_NAT_ATTR_IP_MIN]
835 ? nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) : 0;
836 ip_min = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MIN]
837 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MIN]) : 0;
838 ip_max = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MAX]
839 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MAX]) : 0;
840 if (addr_len == sizeof ip6_min) {
841 ip6_min = a[OVS_NAT_ATTR_IP_MIN]
842 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MIN])
843 : in6addr_any;
844 ip6_max = a[OVS_NAT_ATTR_IP_MAX]
845 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MAX])
846 : in6addr_any;
847 }
848 proto_min = a[OVS_NAT_ATTR_PROTO_MIN]
849 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MIN]) : 0;
850 proto_max = a[OVS_NAT_ATTR_PROTO_MAX]
851 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MAX]) : 0;
852
853 if ((addr_len == sizeof(ovs_be32)
854 && ip_max && ntohl(ip_min) > ntohl(ip_max))
855 || (addr_len == sizeof(struct in6_addr)
856 && !ipv6_mask_is_any(&ip6_max)
857 && memcmp(&ip6_min, &ip6_max, sizeof ip6_min) > 0)
858 || (proto_max && proto_min > proto_max)) {
859 ds_put_cstr(ds, "nat(range error)");
860 return;
861 }
862
863 ds_put_cstr(ds, "nat");
864 if (a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST]) {
865 ds_put_char(ds, '(');
866 if (a[OVS_NAT_ATTR_SRC]) {
867 ds_put_cstr(ds, "src");
868 } else if (a[OVS_NAT_ATTR_DST]) {
869 ds_put_cstr(ds, "dst");
870 }
871
872 if (addr_len > 0) {
873 ds_put_cstr(ds, "=");
874
875 if (addr_len == sizeof ip_min) {
876 ds_put_format(ds, IP_FMT, IP_ARGS(ip_min));
877
878 if (ip_max && ip_max != ip_min) {
879 ds_put_format(ds, "-"IP_FMT, IP_ARGS(ip_max));
880 }
881 } else if (addr_len == sizeof ip6_min) {
882 ipv6_format_addr_bracket(&ip6_min, ds, proto_min);
883
884 if (!ipv6_mask_is_any(&ip6_max) &&
885 memcmp(&ip6_max, &ip6_min, sizeof ip6_max) != 0) {
886 ds_put_char(ds, '-');
887 ipv6_format_addr_bracket(&ip6_max, ds, proto_min);
888 }
889 }
890 if (proto_min) {
891 ds_put_format(ds, ":%"PRIu16, proto_min);
892
893 if (proto_max && proto_max != proto_min) {
894 ds_put_format(ds, "-%"PRIu16, proto_max);
895 }
896 }
897 }
898 ds_put_char(ds, ',');
899 if (a[OVS_NAT_ATTR_PERSISTENT]) {
900 ds_put_cstr(ds, "persistent,");
901 }
902 if (a[OVS_NAT_ATTR_PROTO_HASH]) {
903 ds_put_cstr(ds, "hash,");
904 }
905 if (a[OVS_NAT_ATTR_PROTO_RANDOM]) {
906 ds_put_cstr(ds, "random,");
907 }
908 ds_chomp(ds, ',');
909 ds_put_char(ds, ')');
910 }
911 }
912
913 static const struct nl_policy ovs_conntrack_policy[] = {
914 [OVS_CT_ATTR_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
915 [OVS_CT_ATTR_FORCE_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
916 [OVS_CT_ATTR_ZONE] = { .type = NL_A_U16, .optional = true, },
917 [OVS_CT_ATTR_MARK] = { .type = NL_A_UNSPEC, .optional = true,
918 .min_len = sizeof(uint32_t) * 2 },
919 [OVS_CT_ATTR_LABELS] = { .type = NL_A_UNSPEC, .optional = true,
920 .min_len = sizeof(struct ovs_key_ct_labels) * 2 },
921 [OVS_CT_ATTR_HELPER] = { .type = NL_A_STRING, .optional = true,
922 .min_len = 1, .max_len = 16 },
923 [OVS_CT_ATTR_NAT] = { .type = NL_A_UNSPEC, .optional = true },
924 };
925
926 static void
927 format_odp_conntrack_action(struct ds *ds, const struct nlattr *attr)
928 {
929 struct nlattr *a[ARRAY_SIZE(ovs_conntrack_policy)];
930 const struct {
931 ovs_32aligned_u128 value;
932 ovs_32aligned_u128 mask;
933 } *label;
934 const uint32_t *mark;
935 const char *helper;
936 uint16_t zone;
937 bool commit, force;
938 const struct nlattr *nat;
939
940 if (!nl_parse_nested(attr, ovs_conntrack_policy, a, ARRAY_SIZE(a))) {
941 ds_put_cstr(ds, "ct(error)");
942 return;
943 }
944
945 commit = a[OVS_CT_ATTR_COMMIT] ? true : false;
946 force = a[OVS_CT_ATTR_FORCE_COMMIT] ? true : false;
947 zone = a[OVS_CT_ATTR_ZONE] ? nl_attr_get_u16(a[OVS_CT_ATTR_ZONE]) : 0;
948 mark = a[OVS_CT_ATTR_MARK] ? nl_attr_get(a[OVS_CT_ATTR_MARK]) : NULL;
949 label = a[OVS_CT_ATTR_LABELS] ? nl_attr_get(a[OVS_CT_ATTR_LABELS]): NULL;
950 helper = a[OVS_CT_ATTR_HELPER] ? nl_attr_get(a[OVS_CT_ATTR_HELPER]) : NULL;
951 nat = a[OVS_CT_ATTR_NAT];
952
953 ds_put_format(ds, "ct");
954 if (commit || force || zone || mark || label || helper || nat) {
955 ds_put_cstr(ds, "(");
956 if (commit) {
957 ds_put_format(ds, "commit,");
958 }
959 if (force) {
960 ds_put_format(ds, "force_commit,");
961 }
962 if (zone) {
963 ds_put_format(ds, "zone=%"PRIu16",", zone);
964 }
965 if (mark) {
966 ds_put_format(ds, "mark=%#"PRIx32"/%#"PRIx32",", *mark,
967 *(mark + 1));
968 }
969 if (label) {
970 ds_put_format(ds, "label=");
971 format_u128(ds, &label->value, &label->mask, true);
972 ds_put_char(ds, ',');
973 }
974 if (helper) {
975 ds_put_format(ds, "helper=%s,", helper);
976 }
977 if (nat) {
978 format_odp_ct_nat(ds, nat);
979 }
980 ds_chomp(ds, ',');
981 ds_put_cstr(ds, ")");
982 }
983 }
984
985 static const struct attr_len_tbl
986 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
987 [OVS_NSH_KEY_ATTR_BASE] = { .len = 8 },
988 [OVS_NSH_KEY_ATTR_MD1] = { .len = 16 },
989 [OVS_NSH_KEY_ATTR_MD2] = { .len = ATTR_LEN_VARIABLE },
990 };
991
992 static void
993 format_odp_set_nsh(struct ds *ds, const struct nlattr *attr)
994 {
995 unsigned int left;
996 const struct nlattr *a;
997 struct ovs_key_nsh nsh;
998 struct ovs_key_nsh nsh_mask;
999
1000 memset(&nsh, 0, sizeof nsh);
1001 memset(&nsh_mask, 0xff, sizeof nsh_mask);
1002
1003 NL_NESTED_FOR_EACH (a, left, attr) {
1004 enum ovs_nsh_key_attr type = nl_attr_type(a);
1005 size_t len = nl_attr_get_size(a);
1006
1007 if (type >= OVS_NSH_KEY_ATTR_MAX) {
1008 return;
1009 }
1010
1011 int expected_len = ovs_nsh_key_attr_lens[type].len;
1012 if ((expected_len != ATTR_LEN_VARIABLE) && (len != 2 * expected_len)) {
1013 return;
1014 }
1015
1016 switch (type) {
1017 case OVS_NSH_KEY_ATTR_UNSPEC:
1018 break;
1019 case OVS_NSH_KEY_ATTR_BASE: {
1020 const struct ovs_nsh_key_base *base = nl_attr_get(a);
1021 const struct ovs_nsh_key_base *base_mask = base + 1;
1022 memcpy(&nsh, base, sizeof(*base));
1023 memcpy(&nsh_mask, base_mask, sizeof(*base_mask));
1024 break;
1025 }
1026 case OVS_NSH_KEY_ATTR_MD1: {
1027 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
1028 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1029 memcpy(&nsh.context, &md1->context, sizeof(*md1));
1030 memcpy(&nsh_mask.context, &md1_mask->context, sizeof(*md1_mask));
1031 break;
1032 }
1033 case OVS_NSH_KEY_ATTR_MD2:
1034 case __OVS_NSH_KEY_ATTR_MAX:
1035 default:
1036 /* No support for matching other metadata formats yet. */
1037 break;
1038 }
1039 }
1040
1041 ds_put_cstr(ds, "set(nsh(");
1042 format_nsh_key_mask(ds, &nsh, &nsh_mask);
1043 ds_put_cstr(ds, "))");
1044 }
1045
1046
1047 static void
1048 format_odp_action(struct ds *ds, const struct nlattr *a,
1049 const struct hmap *portno_names)
1050 {
1051 int expected_len;
1052 enum ovs_action_attr type = nl_attr_type(a);
1053 size_t size;
1054
1055 expected_len = odp_action_len(nl_attr_type(a));
1056 if (expected_len != ATTR_LEN_VARIABLE &&
1057 nl_attr_get_size(a) != expected_len) {
1058 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
1059 nl_attr_get_size(a), expected_len);
1060 format_generic_odp_action(ds, a);
1061 return;
1062 }
1063
1064 switch (type) {
1065 case OVS_ACTION_ATTR_METER:
1066 ds_put_format(ds, "meter(%"PRIu32")", nl_attr_get_u32(a));
1067 break;
1068 case OVS_ACTION_ATTR_OUTPUT:
1069 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1070 break;
1071 case OVS_ACTION_ATTR_TRUNC: {
1072 const struct ovs_action_trunc *trunc =
1073 nl_attr_get_unspec(a, sizeof *trunc);
1074
1075 ds_put_format(ds, "trunc(%"PRIu32")", trunc->max_len);
1076 break;
1077 }
1078 case OVS_ACTION_ATTR_TUNNEL_POP:
1079 ds_put_cstr(ds, "tnl_pop(");
1080 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1081 ds_put_char(ds, ')');
1082 break;
1083 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1084 format_odp_tnl_push_action(ds, a, portno_names);
1085 break;
1086 case OVS_ACTION_ATTR_USERSPACE:
1087 format_odp_userspace_action(ds, a, portno_names);
1088 break;
1089 case OVS_ACTION_ATTR_RECIRC:
1090 format_odp_recirc_action(ds, nl_attr_get_u32(a));
1091 break;
1092 case OVS_ACTION_ATTR_HASH:
1093 format_odp_hash_action(ds, nl_attr_get(a));
1094 break;
1095 case OVS_ACTION_ATTR_SET_MASKED:
1096 a = nl_attr_get(a);
1097 /* OVS_KEY_ATTR_NSH is nested attribute, so it needs special process */
1098 if (nl_attr_type(a) == OVS_KEY_ATTR_NSH) {
1099 format_odp_set_nsh(ds, a);
1100 break;
1101 }
1102 size = nl_attr_get_size(a) / 2;
1103 ds_put_cstr(ds, "set(");
1104
1105 /* Masked set action not supported for tunnel key, which is bigger. */
1106 if (size <= sizeof(struct ovs_key_ipv6)) {
1107 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1108 sizeof(struct nlattr))];
1109 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1110 sizeof(struct nlattr))];
1111
1112 mask->nla_type = attr->nla_type = nl_attr_type(a);
1113 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
1114 memcpy(attr + 1, (char *)(a + 1), size);
1115 memcpy(mask + 1, (char *)(a + 1) + size, size);
1116 format_odp_key_attr(attr, mask, NULL, ds, false);
1117 } else {
1118 format_odp_key_attr(a, NULL, NULL, ds, false);
1119 }
1120 ds_put_cstr(ds, ")");
1121 break;
1122 case OVS_ACTION_ATTR_SET:
1123 ds_put_cstr(ds, "set(");
1124 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
1125 ds_put_cstr(ds, ")");
1126 break;
1127 case OVS_ACTION_ATTR_PUSH_ETH: {
1128 const struct ovs_action_push_eth *eth = nl_attr_get(a);
1129 ds_put_format(ds, "push_eth(src="ETH_ADDR_FMT",dst="ETH_ADDR_FMT")",
1130 ETH_ADDR_ARGS(eth->addresses.eth_src),
1131 ETH_ADDR_ARGS(eth->addresses.eth_dst));
1132 break;
1133 }
1134 case OVS_ACTION_ATTR_POP_ETH:
1135 ds_put_cstr(ds, "pop_eth");
1136 break;
1137 case OVS_ACTION_ATTR_PUSH_VLAN: {
1138 const struct ovs_action_push_vlan *vlan = nl_attr_get(a);
1139 ds_put_cstr(ds, "push_vlan(");
1140 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
1141 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
1142 }
1143 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
1144 ds_put_char(ds, ')');
1145 break;
1146 }
1147 case OVS_ACTION_ATTR_POP_VLAN:
1148 ds_put_cstr(ds, "pop_vlan");
1149 break;
1150 case OVS_ACTION_ATTR_PUSH_MPLS: {
1151 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1152 ds_put_cstr(ds, "push_mpls(");
1153 format_mpls_lse(ds, mpls->mpls_lse);
1154 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
1155 break;
1156 }
1157 case OVS_ACTION_ATTR_POP_MPLS: {
1158 ovs_be16 ethertype = nl_attr_get_be16(a);
1159 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
1160 break;
1161 }
1162 case OVS_ACTION_ATTR_SAMPLE:
1163 format_odp_sample_action(ds, a, portno_names);
1164 break;
1165 case OVS_ACTION_ATTR_CT:
1166 format_odp_conntrack_action(ds, a);
1167 break;
1168 case OVS_ACTION_ATTR_CT_CLEAR:
1169 ds_put_cstr(ds, "ct_clear");
1170 break;
1171 case OVS_ACTION_ATTR_CLONE:
1172 format_odp_clone_action(ds, a, portno_names);
1173 break;
1174 case OVS_ACTION_ATTR_PUSH_NSH: {
1175 uint32_t buffer[NSH_HDR_MAX_LEN / 4];
1176 struct nsh_hdr *nsh_hdr = ALIGNED_CAST(struct nsh_hdr *, buffer);
1177 nsh_reset_ver_flags_ttl_len(nsh_hdr);
1178 odp_nsh_hdr_from_attr(nl_attr_get(a), nsh_hdr, NSH_HDR_MAX_LEN);
1179 format_odp_push_nsh_action(ds, nsh_hdr);
1180 break;
1181 }
1182 case OVS_ACTION_ATTR_POP_NSH:
1183 ds_put_cstr(ds, "pop_nsh()");
1184 break;
1185 case OVS_ACTION_ATTR_UNSPEC:
1186 case __OVS_ACTION_ATTR_MAX:
1187 default:
1188 format_generic_odp_action(ds, a);
1189 break;
1190 }
1191 }
1192
1193 void
1194 format_odp_actions(struct ds *ds, const struct nlattr *actions,
1195 size_t actions_len, const struct hmap *portno_names)
1196 {
1197 if (actions_len) {
1198 const struct nlattr *a;
1199 unsigned int left;
1200
1201 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1202 if (a != actions) {
1203 ds_put_char(ds, ',');
1204 }
1205 format_odp_action(ds, a, portno_names);
1206 }
1207 if (left) {
1208 int i;
1209
1210 if (left == actions_len) {
1211 ds_put_cstr(ds, "<empty>");
1212 }
1213 ds_put_format(ds, ",***%u leftover bytes*** (", left);
1214 for (i = 0; i < left; i++) {
1215 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
1216 }
1217 ds_put_char(ds, ')');
1218 }
1219 } else {
1220 ds_put_cstr(ds, "drop");
1221 }
1222 }
1223
1224 /* Separate out parse_odp_userspace_action() function. */
1225 static int
1226 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
1227 {
1228 uint32_t pid;
1229 struct user_action_cookie cookie;
1230 struct ofpbuf buf;
1231 odp_port_t tunnel_out_port;
1232 int n = -1;
1233 void *user_data = NULL;
1234 size_t user_data_size = 0;
1235 bool include_actions = false;
1236 int res;
1237
1238 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
1239 return -EINVAL;
1240 }
1241
1242 ofpbuf_init(&buf, 16);
1243 memset(&cookie, 0, sizeof cookie);
1244
1245 user_data = &cookie;
1246 user_data_size = sizeof cookie;
1247 {
1248 uint32_t output;
1249 uint32_t probability;
1250 uint32_t collector_set_id;
1251 uint32_t obs_domain_id;
1252 uint32_t obs_point_id;
1253
1254 /* USER_ACTION_COOKIE_CONTROLLER. */
1255 uint8_t dont_send;
1256 uint8_t continuation;
1257 uint16_t reason;
1258 uint32_t recirc_id;
1259 uint64_t rule_cookie;
1260 uint16_t controller_id;
1261 uint16_t max_len;
1262
1263 int vid, pcp;
1264 int n1 = -1;
1265 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
1266 "pcp=%i,output=%"SCNi32")%n",
1267 &vid, &pcp, &output, &n1)) {
1268 uint16_t tci;
1269
1270 n += n1;
1271 tci = vid | (pcp << VLAN_PCP_SHIFT);
1272 if (tci) {
1273 tci |= VLAN_CFI;
1274 }
1275
1276 cookie.type = USER_ACTION_COOKIE_SFLOW;
1277 cookie.ofp_in_port = OFPP_NONE;
1278 cookie.ofproto_uuid = UUID_ZERO;
1279 cookie.sflow.vlan_tci = htons(tci);
1280 cookie.sflow.output = output;
1281 } else if (ovs_scan(&s[n], ",slow_path(%n",
1282 &n1)) {
1283 n += n1;
1284 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
1285 cookie.ofp_in_port = OFPP_NONE;
1286 cookie.ofproto_uuid = UUID_ZERO;
1287 cookie.slow_path.reason = 0;
1288
1289 res = parse_odp_flags(&s[n], slow_path_reason_to_string,
1290 &cookie.slow_path.reason,
1291 SLOW_PATH_REASON_MASK, NULL);
1292 if (res < 0 || s[n + res] != ')') {
1293 goto out;
1294 }
1295 n += res + 1;
1296 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
1297 "collector_set_id=%"SCNi32","
1298 "obs_domain_id=%"SCNi32","
1299 "obs_point_id=%"SCNi32","
1300 "output_port=%"SCNi32"%n",
1301 &probability, &collector_set_id,
1302 &obs_domain_id, &obs_point_id,
1303 &output, &n1)) {
1304 n += n1;
1305
1306 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1307 cookie.ofp_in_port = OFPP_NONE;
1308 cookie.ofproto_uuid = UUID_ZERO;
1309 cookie.flow_sample.probability = probability;
1310 cookie.flow_sample.collector_set_id = collector_set_id;
1311 cookie.flow_sample.obs_domain_id = obs_domain_id;
1312 cookie.flow_sample.obs_point_id = obs_point_id;
1313 cookie.flow_sample.output_odp_port = u32_to_odp(output);
1314
1315 if (ovs_scan(&s[n], ",ingress%n", &n1)) {
1316 cookie.flow_sample.direction = NX_ACTION_SAMPLE_INGRESS;
1317 n += n1;
1318 } else if (ovs_scan(&s[n], ",egress%n", &n1)) {
1319 cookie.flow_sample.direction = NX_ACTION_SAMPLE_EGRESS;
1320 n += n1;
1321 } else {
1322 cookie.flow_sample.direction = NX_ACTION_SAMPLE_DEFAULT;
1323 }
1324 if (s[n] != ')') {
1325 res = -EINVAL;
1326 goto out;
1327 }
1328 n++;
1329 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
1330 &output, &n1) ) {
1331 n += n1;
1332 cookie.type = USER_ACTION_COOKIE_IPFIX;
1333 cookie.ofp_in_port = OFPP_NONE;
1334 cookie.ofproto_uuid = UUID_ZERO;
1335 cookie.ipfix.output_odp_port = u32_to_odp(output);
1336 } else if (ovs_scan(&s[n], ",controller(reason=%"SCNu16
1337 ",dont_send=%"SCNu8
1338 ",continuation=%"SCNu8
1339 ",recirc_id=%"SCNu32
1340 ",rule_cookie=%"SCNx64
1341 ",controller_id=%"SCNu16
1342 ",max_len=%"SCNu16")%n",
1343 &reason, &dont_send, &continuation, &recirc_id,
1344 &rule_cookie, &controller_id, &max_len, &n1)) {
1345 n += n1;
1346 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
1347 cookie.ofp_in_port = OFPP_NONE;
1348 cookie.ofproto_uuid = UUID_ZERO;
1349 cookie.controller.dont_send = dont_send ? true : false;
1350 cookie.controller.continuation = continuation ? true : false;
1351 cookie.controller.reason = reason;
1352 cookie.controller.recirc_id = recirc_id;
1353 put_32aligned_be64(&cookie.controller.rule_cookie,
1354 htonll(rule_cookie));
1355 cookie.controller.controller_id = controller_id;
1356 cookie.controller.max_len = max_len;
1357 } else if (ovs_scan(&s[n], ",userdata(%n", &n1)) {
1358 char *end;
1359
1360 n += n1;
1361 end = ofpbuf_put_hex(&buf, &s[n], NULL);
1362 if (end[0] != ')') {
1363 res = -EINVAL;
1364 goto out;
1365 }
1366 user_data = buf.data;
1367 user_data_size = buf.size;
1368 n = (end + 1) - s;
1369 }
1370 }
1371
1372 {
1373 int n1 = -1;
1374 if (ovs_scan(&s[n], ",actions%n", &n1)) {
1375 n += n1;
1376 include_actions = true;
1377 }
1378 }
1379
1380 {
1381 int n1 = -1;
1382 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
1383 &tunnel_out_port, &n1)) {
1384 odp_put_userspace_action(pid, user_data, user_data_size,
1385 tunnel_out_port, include_actions, actions);
1386 res = n + n1;
1387 goto out;
1388 } else if (s[n] == ')') {
1389 odp_put_userspace_action(pid, user_data, user_data_size,
1390 ODPP_NONE, include_actions, actions);
1391 res = n + 1;
1392 goto out;
1393 }
1394 }
1395
1396 {
1397 struct ovs_action_push_eth push;
1398 int eth_type = 0;
1399 int n1 = -1;
1400
1401 if (ovs_scan(&s[n], "push_eth(src="ETH_ADDR_SCAN_FMT","
1402 "dst="ETH_ADDR_SCAN_FMT",type=%i)%n",
1403 ETH_ADDR_SCAN_ARGS(push.addresses.eth_src),
1404 ETH_ADDR_SCAN_ARGS(push.addresses.eth_dst),
1405 &eth_type, &n1)) {
1406
1407 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_ETH,
1408 &push, sizeof push);
1409
1410 res = n + n1;
1411 goto out;
1412 }
1413 }
1414
1415 if (!strncmp(&s[n], "pop_eth", 7)) {
1416 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_ETH);
1417 res = 7;
1418 goto out;
1419 }
1420
1421 res = -EINVAL;
1422 out:
1423 ofpbuf_uninit(&buf);
1424 return res;
1425 }
1426
1427 static int
1428 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
1429 {
1430 struct eth_header *eth;
1431 struct ip_header *ip;
1432 struct ovs_16aligned_ip6_hdr *ip6;
1433 struct udp_header *udp;
1434 struct gre_base_hdr *greh;
1435 struct erspan_base_hdr *ersh;
1436 struct erspan_md2 *md2;
1437 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, csum, sid;
1438 ovs_be32 sip, dip;
1439 uint32_t tnl_type = 0, header_len = 0, ip_len = 0, erspan_idx = 0;
1440 void *l3, *l4;
1441 int n = 0;
1442 uint8_t hwid, dir;
1443
1444 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
1445 return -EINVAL;
1446 }
1447 eth = (struct eth_header *) data->header;
1448 l3 = (struct ip_header *) (eth + 1);
1449 ip = (struct ip_header *) l3;
1450 ip6 = (struct ovs_16aligned_ip6_hdr *) l3;
1451 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
1452 "eth(dst="ETH_ADDR_SCAN_FMT",",
1453 &data->header_len,
1454 &data->tnl_type,
1455 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
1456 return -EINVAL;
1457 }
1458
1459 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
1460 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
1461 return -EINVAL;
1462 }
1463 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
1464 return -EINVAL;
1465 }
1466 eth->eth_type = htons(dl_type);
1467
1468 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1469 /* IPv4 */
1470 uint16_t ip_frag_off;
1471 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
1472 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
1473 IP_SCAN_ARGS(&sip),
1474 IP_SCAN_ARGS(&dip),
1475 &ip->ip_proto, &ip->ip_tos,
1476 &ip->ip_ttl, &ip_frag_off)) {
1477 return -EINVAL;
1478 }
1479 put_16aligned_be32(&ip->ip_src, sip);
1480 put_16aligned_be32(&ip->ip_dst, dip);
1481 ip->ip_frag_off = htons(ip_frag_off);
1482 ip_len = sizeof *ip;
1483 } else {
1484 char sip6_s[IPV6_SCAN_LEN + 1];
1485 char dip6_s[IPV6_SCAN_LEN + 1];
1486 struct in6_addr sip6, dip6;
1487 uint8_t tclass;
1488 uint32_t label;
1489 if (!ovs_scan_len(s, &n, "ipv6(src="IPV6_SCAN_FMT",dst="IPV6_SCAN_FMT
1490 ",label=%i,proto=%"SCNi8",tclass=0x%"SCNx8
1491 ",hlimit=%"SCNi8"),",
1492 sip6_s, dip6_s, &label, &ip6->ip6_nxt,
1493 &tclass, &ip6->ip6_hlim)
1494 || (label & ~IPV6_LABEL_MASK) != 0
1495 || inet_pton(AF_INET6, sip6_s, &sip6) != 1
1496 || inet_pton(AF_INET6, dip6_s, &dip6) != 1) {
1497 return -EINVAL;
1498 }
1499 put_16aligned_be32(&ip6->ip6_flow, htonl(6 << 28) |
1500 htonl(tclass << 20) | htonl(label));
1501 memcpy(&ip6->ip6_src, &sip6, sizeof(ip6->ip6_src));
1502 memcpy(&ip6->ip6_dst, &dip6, sizeof(ip6->ip6_dst));
1503 ip_len = sizeof *ip6;
1504 }
1505
1506 /* Tunnel header */
1507 l4 = ((uint8_t *) l3 + ip_len);
1508 udp = (struct udp_header *) l4;
1509 greh = (struct gre_base_hdr *) l4;
1510 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
1511 &udp_src, &udp_dst, &csum)) {
1512 uint32_t vx_flags, vni;
1513
1514 udp->udp_src = htons(udp_src);
1515 udp->udp_dst = htons(udp_dst);
1516 udp->udp_len = 0;
1517 udp->udp_csum = htons(csum);
1518
1519 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
1520 &vx_flags, &vni)) {
1521 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
1522
1523 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
1524 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
1525 tnl_type = OVS_VPORT_TYPE_VXLAN;
1526 header_len = sizeof *eth + ip_len +
1527 sizeof *udp + sizeof *vxh;
1528 } else if (ovs_scan_len(s, &n, "geneve(")) {
1529 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
1530
1531 memset(gnh, 0, sizeof *gnh);
1532 header_len = sizeof *eth + ip_len +
1533 sizeof *udp + sizeof *gnh;
1534
1535 if (ovs_scan_len(s, &n, "oam,")) {
1536 gnh->oam = 1;
1537 }
1538 if (ovs_scan_len(s, &n, "crit,")) {
1539 gnh->critical = 1;
1540 }
1541 if (!ovs_scan_len(s, &n, "vni=%"SCNi32, &vni)) {
1542 return -EINVAL;
1543 }
1544 if (ovs_scan_len(s, &n, ",options(")) {
1545 struct geneve_scan options;
1546 int len;
1547
1548 memset(&options, 0, sizeof options);
1549 len = scan_geneve(s + n, &options, NULL);
1550 if (!len) {
1551 return -EINVAL;
1552 }
1553
1554 memcpy(gnh->options, options.d, options.len);
1555 gnh->opt_len = options.len / 4;
1556 header_len += options.len;
1557
1558 n += len;
1559 }
1560 if (!ovs_scan_len(s, &n, "))")) {
1561 return -EINVAL;
1562 }
1563
1564 gnh->proto_type = htons(ETH_TYPE_TEB);
1565 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
1566 tnl_type = OVS_VPORT_TYPE_GENEVE;
1567 } else {
1568 return -EINVAL;
1569 }
1570 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
1571 &gre_flags, &gre_proto)){
1572
1573 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1574 tnl_type = OVS_VPORT_TYPE_GRE;
1575 } else {
1576 tnl_type = OVS_VPORT_TYPE_IP6GRE;
1577 }
1578 greh->flags = htons(gre_flags);
1579 greh->protocol = htons(gre_proto);
1580 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
1581
1582 if (greh->flags & htons(GRE_CSUM)) {
1583 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
1584 return -EINVAL;
1585 }
1586
1587 memset(options, 0, sizeof *options);
1588 *((ovs_be16 *)options) = htons(csum);
1589 options++;
1590 }
1591 if (greh->flags & htons(GRE_KEY)) {
1592 uint32_t key;
1593
1594 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
1595 return -EINVAL;
1596 }
1597
1598 put_16aligned_be32(options, htonl(key));
1599 options++;
1600 }
1601 if (greh->flags & htons(GRE_SEQ)) {
1602 uint32_t seq;
1603
1604 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
1605 return -EINVAL;
1606 }
1607 put_16aligned_be32(options, htonl(seq));
1608 options++;
1609 }
1610
1611 if (!ovs_scan_len(s, &n, "))")) {
1612 return -EINVAL;
1613 }
1614
1615 header_len = sizeof *eth + ip_len +
1616 ((uint8_t *) options - (uint8_t *) greh);
1617 } else if (ovs_scan_len(s, &n, "erspan(ver=1,sid="SCNx16",idx=0x"SCNx32")",
1618 &sid, &erspan_idx)) {
1619 ersh = ERSPAN_HDR(greh);
1620 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
1621 ersh + 1);
1622
1623 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1624 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1625 } else {
1626 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1627 }
1628
1629 greh->flags = htons(GRE_SEQ);
1630 greh->protocol = htons(ETH_TYPE_ERSPAN1);
1631
1632 ersh->ver = 1;
1633 set_sid(ersh, sid);
1634 put_16aligned_be32(index, htonl(erspan_idx));
1635
1636 if (!ovs_scan_len(s, &n, ")")) {
1637 return -EINVAL;
1638 }
1639 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1640 sizeof *ersh + ERSPAN_V1_MDSIZE;
1641
1642 } else if (ovs_scan_len(s, &n, "erspan(ver=2,sid="SCNx16"dir="SCNu8
1643 ",hwid=0x"SCNx8")", &sid, &dir, &hwid)) {
1644
1645 ersh = ERSPAN_HDR(greh);
1646 md2 = ALIGNED_CAST(struct erspan_md2 *, ersh + 1);
1647
1648 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1649 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1650 } else {
1651 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1652 }
1653
1654 greh->flags = htons(GRE_SEQ);
1655 greh->protocol = htons(ETH_TYPE_ERSPAN2);
1656
1657 ersh->ver = 2;
1658 set_sid(ersh, sid);
1659 set_hwid(md2, hwid);
1660 md2->dir = dir;
1661
1662 if (!ovs_scan_len(s, &n, ")")) {
1663 return -EINVAL;
1664 }
1665
1666 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1667 sizeof *ersh + ERSPAN_V2_MDSIZE;
1668 } else {
1669 return -EINVAL;
1670 }
1671
1672 /* check tunnel meta data. */
1673 if (data->tnl_type != tnl_type) {
1674 return -EINVAL;
1675 }
1676 if (data->header_len != header_len) {
1677 return -EINVAL;
1678 }
1679
1680 /* Out port */
1681 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
1682 return -EINVAL;
1683 }
1684
1685 return n;
1686 }
1687
1688 struct ct_nat_params {
1689 bool snat;
1690 bool dnat;
1691 size_t addr_len;
1692 union {
1693 ovs_be32 ip;
1694 struct in6_addr ip6;
1695 } addr_min;
1696 union {
1697 ovs_be32 ip;
1698 struct in6_addr ip6;
1699 } addr_max;
1700 uint16_t proto_min;
1701 uint16_t proto_max;
1702 bool persistent;
1703 bool proto_hash;
1704 bool proto_random;
1705 };
1706
1707 static int
1708 scan_ct_nat_range(const char *s, int *n, struct ct_nat_params *p)
1709 {
1710 if (ovs_scan_len(s, n, "=")) {
1711 char ipv6_s[IPV6_SCAN_LEN + 1];
1712 struct in6_addr ipv6;
1713
1714 if (ovs_scan_len(s, n, IP_SCAN_FMT, IP_SCAN_ARGS(&p->addr_min.ip))) {
1715 p->addr_len = sizeof p->addr_min.ip;
1716 if (ovs_scan_len(s, n, "-")) {
1717 if (!ovs_scan_len(s, n, IP_SCAN_FMT,
1718 IP_SCAN_ARGS(&p->addr_max.ip))) {
1719 return -EINVAL;
1720 }
1721 }
1722 } else if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1723 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1724 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1725 p->addr_len = sizeof p->addr_min.ip6;
1726 p->addr_min.ip6 = ipv6;
1727 if (ovs_scan_len(s, n, "-")) {
1728 if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1729 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1730 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1731 p->addr_max.ip6 = ipv6;
1732 } else {
1733 return -EINVAL;
1734 }
1735 }
1736 } else {
1737 return -EINVAL;
1738 }
1739 if (ovs_scan_len(s, n, ":%"SCNu16, &p->proto_min)) {
1740 if (ovs_scan_len(s, n, "-")) {
1741 if (!ovs_scan_len(s, n, "%"SCNu16, &p->proto_max)) {
1742 return -EINVAL;
1743 }
1744 }
1745 }
1746 }
1747 return 0;
1748 }
1749
1750 static int
1751 scan_ct_nat(const char *s, struct ct_nat_params *p)
1752 {
1753 int n = 0;
1754
1755 if (ovs_scan_len(s, &n, "nat")) {
1756 memset(p, 0, sizeof *p);
1757
1758 if (ovs_scan_len(s, &n, "(")) {
1759 char *end;
1760 int end_n;
1761
1762 end = strchr(s + n, ')');
1763 if (!end) {
1764 return -EINVAL;
1765 }
1766 end_n = end - s;
1767
1768 while (n < end_n) {
1769 n += strspn(s + n, delimiters);
1770 if (ovs_scan_len(s, &n, "src")) {
1771 int err = scan_ct_nat_range(s, &n, p);
1772 if (err) {
1773 return err;
1774 }
1775 p->snat = true;
1776 continue;
1777 }
1778 if (ovs_scan_len(s, &n, "dst")) {
1779 int err = scan_ct_nat_range(s, &n, p);
1780 if (err) {
1781 return err;
1782 }
1783 p->dnat = true;
1784 continue;
1785 }
1786 if (ovs_scan_len(s, &n, "persistent")) {
1787 p->persistent = true;
1788 continue;
1789 }
1790 if (ovs_scan_len(s, &n, "hash")) {
1791 p->proto_hash = true;
1792 continue;
1793 }
1794 if (ovs_scan_len(s, &n, "random")) {
1795 p->proto_random = true;
1796 continue;
1797 }
1798 return -EINVAL;
1799 }
1800
1801 if (p->snat && p->dnat) {
1802 return -EINVAL;
1803 }
1804 if ((p->addr_len != 0 &&
1805 memcmp(&p->addr_max, &in6addr_any, p->addr_len) &&
1806 memcmp(&p->addr_max, &p->addr_min, p->addr_len) < 0) ||
1807 (p->proto_max && p->proto_max < p->proto_min)) {
1808 return -EINVAL;
1809 }
1810 if (p->proto_hash && p->proto_random) {
1811 return -EINVAL;
1812 }
1813 n++;
1814 }
1815 }
1816 return n;
1817 }
1818
1819 static void
1820 nl_msg_put_ct_nat(struct ct_nat_params *p, struct ofpbuf *actions)
1821 {
1822 size_t start = nl_msg_start_nested(actions, OVS_CT_ATTR_NAT);
1823
1824 if (p->snat) {
1825 nl_msg_put_flag(actions, OVS_NAT_ATTR_SRC);
1826 } else if (p->dnat) {
1827 nl_msg_put_flag(actions, OVS_NAT_ATTR_DST);
1828 } else {
1829 goto out;
1830 }
1831 if (p->addr_len != 0) {
1832 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MIN, &p->addr_min,
1833 p->addr_len);
1834 if (memcmp(&p->addr_max, &p->addr_min, p->addr_len) > 0) {
1835 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MAX, &p->addr_max,
1836 p->addr_len);
1837 }
1838 if (p->proto_min) {
1839 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MIN, p->proto_min);
1840 if (p->proto_max && p->proto_max > p->proto_min) {
1841 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MAX, p->proto_max);
1842 }
1843 }
1844 if (p->persistent) {
1845 nl_msg_put_flag(actions, OVS_NAT_ATTR_PERSISTENT);
1846 }
1847 if (p->proto_hash) {
1848 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_HASH);
1849 }
1850 if (p->proto_random) {
1851 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_RANDOM);
1852 }
1853 }
1854 out:
1855 nl_msg_end_nested(actions, start);
1856 }
1857
1858 static int
1859 parse_conntrack_action(const char *s_, struct ofpbuf *actions)
1860 {
1861 const char *s = s_;
1862
1863 if (ovs_scan(s, "ct")) {
1864 const char *helper = NULL;
1865 size_t helper_len = 0;
1866 bool commit = false;
1867 bool force_commit = false;
1868 uint16_t zone = 0;
1869 struct {
1870 uint32_t value;
1871 uint32_t mask;
1872 } ct_mark = { 0, 0 };
1873 struct {
1874 ovs_u128 value;
1875 ovs_u128 mask;
1876 } ct_label;
1877 struct ct_nat_params nat_params;
1878 bool have_nat = false;
1879 size_t start;
1880 char *end;
1881
1882 memset(&ct_label, 0, sizeof(ct_label));
1883
1884 s += 2;
1885 if (ovs_scan(s, "(")) {
1886 s++;
1887 find_end:
1888 end = strchr(s, ')');
1889 if (!end) {
1890 return -EINVAL;
1891 }
1892
1893 while (s != end) {
1894 int n;
1895
1896 s += strspn(s, delimiters);
1897 if (ovs_scan(s, "commit%n", &n)) {
1898 commit = true;
1899 s += n;
1900 continue;
1901 }
1902 if (ovs_scan(s, "force_commit%n", &n)) {
1903 force_commit = true;
1904 s += n;
1905 continue;
1906 }
1907 if (ovs_scan(s, "zone=%"SCNu16"%n", &zone, &n)) {
1908 s += n;
1909 continue;
1910 }
1911 if (ovs_scan(s, "mark=%"SCNx32"%n", &ct_mark.value, &n)) {
1912 s += n;
1913 n = -1;
1914 if (ovs_scan(s, "/%"SCNx32"%n", &ct_mark.mask, &n)) {
1915 s += n;
1916 } else {
1917 ct_mark.mask = UINT32_MAX;
1918 }
1919 continue;
1920 }
1921 if (ovs_scan(s, "label=%n", &n)) {
1922 int retval;
1923
1924 s += n;
1925 retval = scan_u128(s, &ct_label.value, &ct_label.mask);
1926 if (retval == 0) {
1927 return -EINVAL;
1928 }
1929 s += retval;
1930 continue;
1931 }
1932 if (ovs_scan(s, "helper=%n", &n)) {
1933 s += n;
1934 helper_len = strcspn(s, delimiters_end);
1935 if (!helper_len || helper_len > 15) {
1936 return -EINVAL;
1937 }
1938 helper = s;
1939 s += helper_len;
1940 continue;
1941 }
1942
1943 n = scan_ct_nat(s, &nat_params);
1944 if (n > 0) {
1945 s += n;
1946 have_nat = true;
1947
1948 /* end points to the end of the nested, nat action.
1949 * find the real end. */
1950 goto find_end;
1951 }
1952 /* Nothing matched. */
1953 return -EINVAL;
1954 }
1955 s++;
1956 }
1957 if (commit && force_commit) {
1958 return -EINVAL;
1959 }
1960
1961 start = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CT);
1962 if (commit) {
1963 nl_msg_put_flag(actions, OVS_CT_ATTR_COMMIT);
1964 } else if (force_commit) {
1965 nl_msg_put_flag(actions, OVS_CT_ATTR_FORCE_COMMIT);
1966 }
1967 if (zone) {
1968 nl_msg_put_u16(actions, OVS_CT_ATTR_ZONE, zone);
1969 }
1970 if (ct_mark.mask) {
1971 nl_msg_put_unspec(actions, OVS_CT_ATTR_MARK, &ct_mark,
1972 sizeof(ct_mark));
1973 }
1974 if (!ovs_u128_is_zero(ct_label.mask)) {
1975 nl_msg_put_unspec(actions, OVS_CT_ATTR_LABELS, &ct_label,
1976 sizeof ct_label);
1977 }
1978 if (helper) {
1979 nl_msg_put_string__(actions, OVS_CT_ATTR_HELPER, helper,
1980 helper_len);
1981 }
1982 if (have_nat) {
1983 nl_msg_put_ct_nat(&nat_params, actions);
1984 }
1985 nl_msg_end_nested(actions, start);
1986 }
1987
1988 return s - s_;
1989 }
1990
1991 static void
1992 nsh_key_to_attr(struct ofpbuf *buf, const struct ovs_key_nsh *nsh,
1993 uint8_t * metadata, size_t md_size,
1994 bool is_mask)
1995 {
1996 size_t nsh_key_ofs;
1997 struct ovs_nsh_key_base base;
1998
1999 base.flags = nsh->flags;
2000 base.ttl = nsh->ttl;
2001 base.mdtype = nsh->mdtype;
2002 base.np = nsh->np;
2003 base.path_hdr = nsh->path_hdr;
2004
2005 nsh_key_ofs = nl_msg_start_nested(buf, OVS_KEY_ATTR_NSH);
2006 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_BASE, &base, sizeof base);
2007
2008 if (is_mask) {
2009 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2010 sizeof nsh->context);
2011 } else {
2012 switch (nsh->mdtype) {
2013 case NSH_M_TYPE1:
2014 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2015 sizeof nsh->context);
2016 break;
2017 case NSH_M_TYPE2:
2018 if (metadata && md_size > 0) {
2019 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD2, metadata,
2020 md_size);
2021 }
2022 break;
2023 default:
2024 /* No match support for other MD formats yet. */
2025 break;
2026 }
2027 }
2028 nl_msg_end_nested(buf, nsh_key_ofs);
2029 }
2030
2031
2032 static int
2033 parse_odp_push_nsh_action(const char *s, struct ofpbuf *actions)
2034 {
2035 int n = 0;
2036 int ret = 0;
2037 uint32_t spi = 0;
2038 uint8_t si = 255;
2039 uint32_t cd;
2040 struct ovs_key_nsh nsh;
2041 uint8_t metadata[NSH_CTX_HDRS_MAX_LEN];
2042 uint8_t md_size = 0;
2043
2044 if (!ovs_scan_len(s, &n, "push_nsh(")) {
2045 ret = -EINVAL;
2046 goto out;
2047 }
2048
2049 /* The default is NSH_M_TYPE1 */
2050 nsh.flags = 0;
2051 nsh.ttl = 63;
2052 nsh.mdtype = NSH_M_TYPE1;
2053 nsh.np = NSH_P_ETHERNET;
2054 nsh.path_hdr = nsh_spi_si_to_path_hdr(0, 255);
2055 memset(nsh.context, 0, NSH_M_TYPE1_MDLEN);
2056
2057 for (;;) {
2058 n += strspn(s + n, delimiters);
2059 if (s[n] == ')') {
2060 break;
2061 }
2062
2063 if (ovs_scan_len(s, &n, "flags=%"SCNi8, &nsh.flags)) {
2064 continue;
2065 }
2066 if (ovs_scan_len(s, &n, "ttl=%"SCNi8, &nsh.ttl)) {
2067 continue;
2068 }
2069 if (ovs_scan_len(s, &n, "mdtype=%"SCNi8, &nsh.mdtype)) {
2070 switch (nsh.mdtype) {
2071 case NSH_M_TYPE1:
2072 /* This is the default format. */;
2073 break;
2074 case NSH_M_TYPE2:
2075 /* Length will be updated later. */
2076 md_size = 0;
2077 break;
2078 default:
2079 ret = -EINVAL;
2080 goto out;
2081 }
2082 continue;
2083 }
2084 if (ovs_scan_len(s, &n, "np=%"SCNi8, &nsh.np)) {
2085 continue;
2086 }
2087 if (ovs_scan_len(s, &n, "spi=0x%"SCNx32, &spi)) {
2088 continue;
2089 }
2090 if (ovs_scan_len(s, &n, "si=%"SCNi8, &si)) {
2091 continue;
2092 }
2093 if (nsh.mdtype == NSH_M_TYPE1) {
2094 if (ovs_scan_len(s, &n, "c1=0x%"SCNx32, &cd)) {
2095 nsh.context[0] = htonl(cd);
2096 continue;
2097 }
2098 if (ovs_scan_len(s, &n, "c2=0x%"SCNx32, &cd)) {
2099 nsh.context[1] = htonl(cd);
2100 continue;
2101 }
2102 if (ovs_scan_len(s, &n, "c3=0x%"SCNx32, &cd)) {
2103 nsh.context[2] = htonl(cd);
2104 continue;
2105 }
2106 if (ovs_scan_len(s, &n, "c4=0x%"SCNx32, &cd)) {
2107 nsh.context[3] = htonl(cd);
2108 continue;
2109 }
2110 }
2111 else if (nsh.mdtype == NSH_M_TYPE2) {
2112 struct ofpbuf b;
2113 char buf[512];
2114 size_t mdlen, padding;
2115 if (ovs_scan_len(s, &n, "md2=0x%511[0-9a-fA-F]", buf)
2116 && n/2 <= sizeof metadata) {
2117 ofpbuf_use_stub(&b, metadata, sizeof metadata);
2118 ofpbuf_put_hex(&b, buf, &mdlen);
2119 /* Pad metadata to 4 bytes. */
2120 padding = PAD_SIZE(mdlen, 4);
2121 if (padding > 0) {
2122 ofpbuf_put_zeros(&b, padding);
2123 }
2124 md_size = mdlen + padding;
2125 ofpbuf_uninit(&b);
2126 continue;
2127 }
2128 }
2129
2130 ret = -EINVAL;
2131 goto out;
2132 }
2133 out:
2134 if (ret >= 0) {
2135 nsh.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
2136 size_t offset = nl_msg_start_nested(actions, OVS_ACTION_ATTR_PUSH_NSH);
2137 nsh_key_to_attr(actions, &nsh, metadata, md_size, false);
2138 nl_msg_end_nested(actions, offset);
2139 ret = n;
2140 }
2141 return ret;
2142 }
2143
2144 static int
2145 parse_action_list(const char *s, const struct simap *port_names,
2146 struct ofpbuf *actions)
2147 {
2148 int n = 0;
2149
2150 for (;;) {
2151 int retval;
2152
2153 n += strspn(s + n, delimiters);
2154 if (s[n] == ')') {
2155 break;
2156 }
2157 retval = parse_odp_action(s + n, port_names, actions);
2158 if (retval < 0) {
2159 return retval;
2160 }
2161 n += retval;
2162 }
2163
2164 return n;
2165 }
2166
2167 static int
2168 parse_odp_action(const char *s, const struct simap *port_names,
2169 struct ofpbuf *actions)
2170 {
2171 {
2172 uint32_t port;
2173 int n;
2174
2175 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
2176 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
2177 return n;
2178 }
2179 }
2180
2181 {
2182 uint32_t max_len;
2183 int n;
2184
2185 if (ovs_scan(s, "trunc(%"SCNi32")%n", &max_len, &n)) {
2186 struct ovs_action_trunc *trunc;
2187
2188 trunc = nl_msg_put_unspec_uninit(actions,
2189 OVS_ACTION_ATTR_TRUNC, sizeof *trunc);
2190 trunc->max_len = max_len;
2191 return n;
2192 }
2193 }
2194
2195 if (port_names) {
2196 int len = strcspn(s, delimiters);
2197 struct simap_node *node;
2198
2199 node = simap_find_len(port_names, s, len);
2200 if (node) {
2201 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
2202 return len;
2203 }
2204 }
2205
2206 {
2207 uint32_t recirc_id;
2208 int n = -1;
2209
2210 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
2211 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
2212 return n;
2213 }
2214 }
2215
2216 if (!strncmp(s, "userspace(", 10)) {
2217 return parse_odp_userspace_action(s, actions);
2218 }
2219
2220 if (!strncmp(s, "set(", 4)) {
2221 size_t start_ofs;
2222 int retval;
2223 struct nlattr mask[1024 / sizeof(struct nlattr)];
2224 struct ofpbuf maskbuf = OFPBUF_STUB_INITIALIZER(mask);
2225 struct nlattr *nested, *key;
2226 size_t size;
2227 struct parse_odp_context context = (struct parse_odp_context) {
2228 .port_names = port_names,
2229 };
2230
2231 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
2232 retval = parse_odp_key_mask_attr(&context, s + 4, actions, &maskbuf);
2233 if (retval < 0) {
2234 ofpbuf_uninit(&maskbuf);
2235 return retval;
2236 }
2237 if (s[retval + 4] != ')') {
2238 ofpbuf_uninit(&maskbuf);
2239 return -EINVAL;
2240 }
2241
2242 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2243 key = nested + 1;
2244
2245 size = nl_attr_get_size(mask);
2246 if (size == nl_attr_get_size(key)) {
2247 /* Change to masked set action if not fully masked. */
2248 if (!is_all_ones(mask + 1, size)) {
2249 /* Remove padding of eariler key payload */
2250 actions->size -= NLA_ALIGN(key->nla_len) - key->nla_len;
2251
2252 /* Put mask payload right after key payload */
2253 key->nla_len += size;
2254 ofpbuf_put(actions, mask + 1, size);
2255
2256 /* 'actions' may have been reallocated by ofpbuf_put(). */
2257 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2258 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
2259
2260 key = nested + 1;
2261 /* Add new padding as needed */
2262 ofpbuf_put_zeros(actions, NLA_ALIGN(key->nla_len) -
2263 key->nla_len);
2264 }
2265 }
2266 ofpbuf_uninit(&maskbuf);
2267
2268 nl_msg_end_nested(actions, start_ofs);
2269 return retval + 5;
2270 }
2271
2272 {
2273 struct ovs_action_push_vlan push;
2274 int tpid = ETH_TYPE_VLAN;
2275 int vid, pcp;
2276 int cfi = 1;
2277 int n = -1;
2278
2279 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
2280 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
2281 &vid, &pcp, &cfi, &n)
2282 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
2283 &tpid, &vid, &pcp, &n)
2284 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
2285 &tpid, &vid, &pcp, &cfi, &n)) {
2286 if ((vid & ~(VLAN_VID_MASK >> VLAN_VID_SHIFT)) != 0
2287 || (pcp & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) != 0) {
2288 return -EINVAL;
2289 }
2290 push.vlan_tpid = htons(tpid);
2291 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
2292 | (pcp << VLAN_PCP_SHIFT)
2293 | (cfi ? VLAN_CFI : 0));
2294 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
2295 &push, sizeof push);
2296
2297 return n;
2298 }
2299 }
2300
2301 if (!strncmp(s, "pop_vlan", 8)) {
2302 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
2303 return 8;
2304 }
2305
2306 {
2307 unsigned long long int meter_id;
2308 int n = -1;
2309
2310 if (sscanf(s, "meter(%lli)%n", &meter_id, &n) > 0 && n > 0) {
2311 nl_msg_put_u32(actions, OVS_ACTION_ATTR_METER, meter_id);
2312 return n;
2313 }
2314 }
2315
2316 {
2317 double percentage;
2318 int n = -1;
2319
2320 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
2321 && percentage >= 0. && percentage <= 100.0) {
2322 size_t sample_ofs, actions_ofs;
2323 double probability;
2324
2325 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
2326 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
2327 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
2328 (probability <= 0 ? 0
2329 : probability >= UINT32_MAX ? UINT32_MAX
2330 : probability));
2331
2332 actions_ofs = nl_msg_start_nested(actions,
2333 OVS_SAMPLE_ATTR_ACTIONS);
2334 int retval = parse_action_list(s + n, port_names, actions);
2335 if (retval < 0)
2336 return retval;
2337
2338 n += retval;
2339 nl_msg_end_nested(actions, actions_ofs);
2340 nl_msg_end_nested(actions, sample_ofs);
2341
2342 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2343 }
2344 }
2345
2346 {
2347 if (!strncmp(s, "clone(", 6)) {
2348 size_t actions_ofs;
2349 int n = 6;
2350
2351 actions_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CLONE);
2352 int retval = parse_action_list(s + n, port_names, actions);
2353 if (retval < 0) {
2354 return retval;
2355 }
2356 n += retval;
2357 nl_msg_end_nested(actions, actions_ofs);
2358 return n + 1;
2359 }
2360 }
2361
2362 {
2363 if (!strncmp(s, "push_nsh(", 9)) {
2364 int retval = parse_odp_push_nsh_action(s, actions);
2365 if (retval < 0) {
2366 return retval;
2367 }
2368 return retval + 1;
2369 }
2370 }
2371
2372 {
2373 int n;
2374 if (ovs_scan(s, "pop_nsh()%n", &n)) {
2375 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_NSH);
2376 return n;
2377 }
2378 }
2379
2380 {
2381 uint32_t port;
2382 int n;
2383
2384 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
2385 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
2386 return n;
2387 }
2388 }
2389
2390 {
2391 if (!strncmp(s, "ct_clear", 8)) {
2392 nl_msg_put_flag(actions, OVS_ACTION_ATTR_CT_CLEAR);
2393 return 8;
2394 }
2395 }
2396
2397 {
2398 int retval;
2399
2400 retval = parse_conntrack_action(s, actions);
2401 if (retval) {
2402 return retval;
2403 }
2404 }
2405
2406 {
2407 struct ovs_action_push_tnl data;
2408 int n;
2409
2410 n = ovs_parse_tnl_push(s, &data);
2411 if (n > 0) {
2412 odp_put_tnl_push_action(actions, &data);
2413 return n;
2414 } else if (n < 0) {
2415 return n;
2416 }
2417 }
2418 return -EINVAL;
2419 }
2420
2421 /* Parses the string representation of datapath actions, in the format output
2422 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
2423 * value. On success, the ODP actions are appended to 'actions' as a series of
2424 * Netlink attributes. On failure, no data is appended to 'actions'. Either
2425 * way, 'actions''s data might be reallocated. */
2426 int
2427 odp_actions_from_string(const char *s, const struct simap *port_names,
2428 struct ofpbuf *actions)
2429 {
2430 size_t old_size;
2431
2432 if (!strcasecmp(s, "drop")) {
2433 return 0;
2434 }
2435
2436 old_size = actions->size;
2437 for (;;) {
2438 int retval;
2439
2440 s += strspn(s, delimiters);
2441 if (!*s) {
2442 return 0;
2443 }
2444
2445 retval = parse_odp_action(s, port_names, actions);
2446 if (retval < 0 || !strchr(delimiters, s[retval])) {
2447 actions->size = old_size;
2448 return -retval;
2449 }
2450 s += retval;
2451 }
2452
2453 return 0;
2454 }
2455 \f
2456 static const struct attr_len_tbl ovs_vxlan_ext_attr_lens[OVS_VXLAN_EXT_MAX + 1] = {
2457 [OVS_VXLAN_EXT_GBP] = { .len = 4 },
2458 };
2459
2460 static const struct attr_len_tbl ovs_tun_key_attr_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
2461 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = 8 },
2462 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = 4 },
2463 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = 4 },
2464 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
2465 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
2466 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
2467 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
2468 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = 2 },
2469 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = 2 },
2470 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
2471 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = ATTR_LEN_VARIABLE },
2472 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
2473 .next = ovs_vxlan_ext_attr_lens ,
2474 .next_max = OVS_VXLAN_EXT_MAX},
2475 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = 16 },
2476 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = 16 },
2477 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = ATTR_LEN_VARIABLE },
2478 };
2479
2480 const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
2481 [OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
2482 [OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
2483 [OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
2484 [OVS_KEY_ATTR_DP_HASH] = { .len = 4 },
2485 [OVS_KEY_ATTR_RECIRC_ID] = { .len = 4 },
2486 [OVS_KEY_ATTR_TUNNEL] = { .len = ATTR_LEN_NESTED,
2487 .next = ovs_tun_key_attr_lens,
2488 .next_max = OVS_TUNNEL_KEY_ATTR_MAX },
2489 [OVS_KEY_ATTR_IN_PORT] = { .len = 4 },
2490 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
2491 [OVS_KEY_ATTR_VLAN] = { .len = 2 },
2492 [OVS_KEY_ATTR_ETHERTYPE] = { .len = 2 },
2493 [OVS_KEY_ATTR_MPLS] = { .len = ATTR_LEN_VARIABLE },
2494 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
2495 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
2496 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
2497 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = 2 },
2498 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
2499 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
2500 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
2501 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
2502 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
2503 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
2504 [OVS_KEY_ATTR_ND_EXTENSIONS] = { .len = sizeof(struct ovs_key_nd_extensions) },
2505 [OVS_KEY_ATTR_CT_STATE] = { .len = 4 },
2506 [OVS_KEY_ATTR_CT_ZONE] = { .len = 2 },
2507 [OVS_KEY_ATTR_CT_MARK] = { .len = 4 },
2508 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
2509 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
2510 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
2511 [OVS_KEY_ATTR_PACKET_TYPE] = { .len = 4 },
2512 [OVS_KEY_ATTR_NSH] = { .len = ATTR_LEN_NESTED,
2513 .next = ovs_nsh_key_attr_lens,
2514 .next_max = OVS_NSH_KEY_ATTR_MAX },
2515 };
2516
2517 /* Returns the correct length of the payload for a flow key attribute of the
2518 * specified 'type', ATTR_LEN_INVALID if 'type' is unknown, ATTR_LEN_VARIABLE
2519 * if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
2520 * payload is a nested type. */
2521 static int
2522 odp_key_attr_len(const struct attr_len_tbl tbl[], int max_type, uint16_t type)
2523 {
2524 if (type > max_type) {
2525 return ATTR_LEN_INVALID;
2526 }
2527
2528 return tbl[type].len;
2529 }
2530
2531 static void
2532 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
2533 {
2534 size_t len = nl_attr_get_size(a);
2535 if (len) {
2536 const uint8_t *unspec;
2537 unsigned int i;
2538
2539 unspec = nl_attr_get(a);
2540 for (i = 0; i < len; i++) {
2541 if (i) {
2542 ds_put_char(ds, ' ');
2543 }
2544 ds_put_format(ds, "%02x", unspec[i]);
2545 }
2546 }
2547 }
2548
2549 static const char *
2550 ovs_frag_type_to_string(enum ovs_frag_type type)
2551 {
2552 switch (type) {
2553 case OVS_FRAG_TYPE_NONE:
2554 return "no";
2555 case OVS_FRAG_TYPE_FIRST:
2556 return "first";
2557 case OVS_FRAG_TYPE_LATER:
2558 return "later";
2559 case __OVS_FRAG_TYPE_MAX:
2560 default:
2561 return "<error>";
2562 }
2563 }
2564
2565 enum odp_key_fitness
2566 odp_nsh_hdr_from_attr(const struct nlattr *attr,
2567 struct nsh_hdr *nsh_hdr, size_t size)
2568 {
2569 unsigned int left;
2570 const struct nlattr *a;
2571 bool unknown = false;
2572 uint8_t flags = 0;
2573 uint8_t ttl = 63;
2574 size_t mdlen = 0;
2575 bool has_md1 = false;
2576 bool has_md2 = false;
2577
2578 memset(nsh_hdr, 0, size);
2579
2580 NL_NESTED_FOR_EACH (a, left, attr) {
2581 uint16_t type = nl_attr_type(a);
2582 size_t len = nl_attr_get_size(a);
2583 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2584 OVS_NSH_KEY_ATTR_MAX, type);
2585
2586 if (len != expected_len && expected_len >= 0) {
2587 return ODP_FIT_ERROR;
2588 }
2589
2590 switch (type) {
2591 case OVS_NSH_KEY_ATTR_BASE: {
2592 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2593 nsh_hdr->next_proto = base->np;
2594 nsh_hdr->md_type = base->mdtype;
2595 put_16aligned_be32(&nsh_hdr->path_hdr, base->path_hdr);
2596 flags = base->flags;
2597 ttl = base->ttl;
2598 break;
2599 }
2600 case OVS_NSH_KEY_ATTR_MD1: {
2601 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2602 struct nsh_md1_ctx *md1_dst = &nsh_hdr->md1;
2603 has_md1 = true;
2604 mdlen = nl_attr_get_size(a);
2605 if ((mdlen + NSH_BASE_HDR_LEN != NSH_M_TYPE1_LEN) ||
2606 (mdlen + NSH_BASE_HDR_LEN > size)) {
2607 return ODP_FIT_ERROR;
2608 }
2609 memcpy(md1_dst, md1, mdlen);
2610 break;
2611 }
2612 case OVS_NSH_KEY_ATTR_MD2: {
2613 struct nsh_md2_tlv *md2_dst = &nsh_hdr->md2;
2614 const uint8_t *md2 = nl_attr_get(a);
2615 has_md2 = true;
2616 mdlen = nl_attr_get_size(a);
2617 if (mdlen + NSH_BASE_HDR_LEN > size) {
2618 return ODP_FIT_ERROR;
2619 }
2620 memcpy(md2_dst, md2, mdlen);
2621 break;
2622 }
2623 default:
2624 /* Allow this to show up as unexpected, if there are unknown
2625 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2626 unknown = true;
2627 break;
2628 }
2629 }
2630
2631 if (unknown) {
2632 return ODP_FIT_TOO_MUCH;
2633 }
2634
2635 if ((has_md1 && nsh_hdr->md_type != NSH_M_TYPE1)
2636 || (has_md2 && nsh_hdr->md_type != NSH_M_TYPE2)) {
2637 return ODP_FIT_ERROR;
2638 }
2639
2640 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
2641 nsh_set_flags_ttl_len(nsh_hdr, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
2642
2643 return ODP_FIT_PERFECT;
2644 }
2645
2646 enum odp_key_fitness
2647 odp_nsh_key_from_attr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
2648 struct ovs_key_nsh *nsh_mask)
2649 {
2650 unsigned int left;
2651 const struct nlattr *a;
2652 bool unknown = false;
2653 bool has_md1 = false;
2654
2655 NL_NESTED_FOR_EACH (a, left, attr) {
2656 uint16_t type = nl_attr_type(a);
2657 size_t len = nl_attr_get_size(a);
2658 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2659 OVS_NSH_KEY_ATTR_MAX, type);
2660
2661 /* the attribute can have mask, len is 2 * expected_len for that case.
2662 */
2663 if ((len != expected_len) && (len != 2 * expected_len) &&
2664 (expected_len >= 0)) {
2665 return ODP_FIT_ERROR;
2666 }
2667
2668 if ((nsh_mask && (expected_len >= 0) && (len != 2 * expected_len)) ||
2669 (!nsh_mask && (expected_len >= 0) && (len == 2 * expected_len))) {
2670 return ODP_FIT_ERROR;
2671 }
2672
2673 switch (type) {
2674 case OVS_NSH_KEY_ATTR_UNSPEC:
2675 break;
2676 case OVS_NSH_KEY_ATTR_BASE: {
2677 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2678 nsh->flags = base->flags;
2679 nsh->ttl = base->ttl;
2680 nsh->mdtype = base->mdtype;
2681 nsh->np = base->np;
2682 nsh->path_hdr = base->path_hdr;
2683 if (nsh_mask && (len == 2 * sizeof(*base))) {
2684 const struct ovs_nsh_key_base *base_mask = base + 1;
2685 nsh_mask->flags = base_mask->flags;
2686 nsh_mask->ttl = base_mask->ttl;
2687 nsh_mask->mdtype = base_mask->mdtype;
2688 nsh_mask->np = base_mask->np;
2689 nsh_mask->path_hdr = base_mask->path_hdr;
2690 }
2691 break;
2692 }
2693 case OVS_NSH_KEY_ATTR_MD1: {
2694 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2695 has_md1 = true;
2696 memcpy(nsh->context, md1->context, sizeof md1->context);
2697 if (len == 2 * sizeof(*md1)) {
2698 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
2699 memcpy(nsh_mask->context, md1_mask->context,
2700 sizeof(*md1_mask));
2701 }
2702 break;
2703 }
2704 case OVS_NSH_KEY_ATTR_MD2:
2705 default:
2706 /* Allow this to show up as unexpected, if there are unknown
2707 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2708 unknown = true;
2709 break;
2710 }
2711 }
2712
2713 if (unknown) {
2714 return ODP_FIT_TOO_MUCH;
2715 }
2716
2717 if (has_md1 && nsh->mdtype != NSH_M_TYPE1 && !nsh_mask) {
2718 return ODP_FIT_ERROR;
2719 }
2720
2721 return ODP_FIT_PERFECT;
2722 }
2723
2724 static enum odp_key_fitness
2725 odp_tun_key_from_attr__(const struct nlattr *attr, bool is_mask,
2726 struct flow_tnl *tun)
2727 {
2728 unsigned int left;
2729 const struct nlattr *a;
2730 bool ttl = false;
2731 bool unknown = false;
2732
2733 NL_NESTED_FOR_EACH(a, left, attr) {
2734 uint16_t type = nl_attr_type(a);
2735 size_t len = nl_attr_get_size(a);
2736 int expected_len = odp_key_attr_len(ovs_tun_key_attr_lens,
2737 OVS_TUNNEL_ATTR_MAX, type);
2738
2739 if (len != expected_len && expected_len >= 0) {
2740 return ODP_FIT_ERROR;
2741 }
2742
2743 switch (type) {
2744 case OVS_TUNNEL_KEY_ATTR_ID:
2745 tun->tun_id = nl_attr_get_be64(a);
2746 tun->flags |= FLOW_TNL_F_KEY;
2747 break;
2748 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
2749 tun->ip_src = nl_attr_get_be32(a);
2750 break;
2751 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
2752 tun->ip_dst = nl_attr_get_be32(a);
2753 break;
2754 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
2755 tun->ipv6_src = nl_attr_get_in6_addr(a);
2756 break;
2757 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
2758 tun->ipv6_dst = nl_attr_get_in6_addr(a);
2759 break;
2760 case OVS_TUNNEL_KEY_ATTR_TOS:
2761 tun->ip_tos = nl_attr_get_u8(a);
2762 break;
2763 case OVS_TUNNEL_KEY_ATTR_TTL:
2764 tun->ip_ttl = nl_attr_get_u8(a);
2765 ttl = true;
2766 break;
2767 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
2768 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
2769 break;
2770 case OVS_TUNNEL_KEY_ATTR_CSUM:
2771 tun->flags |= FLOW_TNL_F_CSUM;
2772 break;
2773 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
2774 tun->tp_src = nl_attr_get_be16(a);
2775 break;
2776 case OVS_TUNNEL_KEY_ATTR_TP_DST:
2777 tun->tp_dst = nl_attr_get_be16(a);
2778 break;
2779 case OVS_TUNNEL_KEY_ATTR_OAM:
2780 tun->flags |= FLOW_TNL_F_OAM;
2781 break;
2782 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
2783 static const struct nl_policy vxlan_opts_policy[] = {
2784 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
2785 };
2786 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
2787
2788 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
2789 return ODP_FIT_ERROR;
2790 }
2791
2792 if (ext[OVS_VXLAN_EXT_GBP]) {
2793 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
2794
2795 tun->gbp_id = htons(gbp & 0xFFFF);
2796 tun->gbp_flags = (gbp >> 16) & 0xFF;
2797 }
2798
2799 break;
2800 }
2801 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
2802 tun_metadata_from_geneve_nlattr(a, is_mask, tun);
2803 break;
2804 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: {
2805 const struct erspan_metadata *opts = nl_attr_get(a);
2806
2807 tun->erspan_ver = opts->version;
2808 if (tun->erspan_ver == 1) {
2809 tun->erspan_idx = ntohl(opts->u.index);
2810 } else if (tun->erspan_ver == 2) {
2811 tun->erspan_dir = opts->u.md2.dir;
2812 tun->erspan_hwid = get_hwid(&opts->u.md2);
2813 } else {
2814 VLOG_WARN("%s invalid erspan version\n", __func__);
2815 }
2816 break;
2817 }
2818
2819 default:
2820 /* Allow this to show up as unexpected, if there are unknown
2821 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2822 unknown = true;
2823 break;
2824 }
2825 }
2826
2827 if (!ttl) {
2828 return ODP_FIT_ERROR;
2829 }
2830 if (unknown) {
2831 return ODP_FIT_TOO_MUCH;
2832 }
2833 return ODP_FIT_PERFECT;
2834 }
2835
2836 enum odp_key_fitness
2837 odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun)
2838 {
2839 memset(tun, 0, sizeof *tun);
2840 return odp_tun_key_from_attr__(attr, false, tun);
2841 }
2842
2843 static void
2844 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key,
2845 const struct flow_tnl *tun_flow_key,
2846 const struct ofpbuf *key_buf, const char *tnl_type)
2847 {
2848 size_t tun_key_ofs;
2849
2850 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
2851
2852 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
2853 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
2854 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
2855 }
2856 if (tun_key->ip_src) {
2857 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
2858 }
2859 if (tun_key->ip_dst) {
2860 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
2861 }
2862 if (ipv6_addr_is_set(&tun_key->ipv6_src)) {
2863 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, &tun_key->ipv6_src);
2864 }
2865 if (ipv6_addr_is_set(&tun_key->ipv6_dst)) {
2866 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_DST, &tun_key->ipv6_dst);
2867 }
2868 if (tun_key->ip_tos) {
2869 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
2870 }
2871 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
2872 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
2873 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
2874 }
2875 if (tun_key->flags & FLOW_TNL_F_CSUM) {
2876 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
2877 }
2878 if (tun_key->tp_src) {
2879 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
2880 }
2881 if (tun_key->tp_dst) {
2882 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
2883 }
2884 if (tun_key->flags & FLOW_TNL_F_OAM) {
2885 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
2886 }
2887
2888 /* If tnl_type is set to a particular type of output tunnel,
2889 * only put its relevant tunnel metadata to the nlattr.
2890 * If tnl_type is NULL, put tunnel metadata according to the
2891 * 'tun_key'.
2892 */
2893 if ((!tnl_type || !strcmp(tnl_type, "vxlan")) &&
2894 (tun_key->gbp_flags || tun_key->gbp_id)) {
2895 size_t vxlan_opts_ofs;
2896
2897 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
2898 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
2899 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
2900 nl_msg_end_nested(a, vxlan_opts_ofs);
2901 }
2902
2903 if (!tnl_type || !strcmp(tnl_type, "geneve")) {
2904 tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
2905 }
2906
2907 if ((!tnl_type || !strcmp(tnl_type, "erspan") ||
2908 !strcmp(tnl_type, "ip6erspan")) &&
2909 (tun_key->erspan_ver == 1 || tun_key->erspan_ver == 2)) {
2910 struct erspan_metadata opts;
2911
2912 opts.version = tun_key->erspan_ver;
2913 if (opts.version == 1) {
2914 opts.u.index = htonl(tun_key->erspan_idx);
2915 } else {
2916 opts.u.md2.dir = tun_key->erspan_dir;
2917 set_hwid(&opts.u.md2, tun_key->erspan_hwid);
2918 }
2919 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
2920 &opts, sizeof(opts));
2921 }
2922
2923 nl_msg_end_nested(a, tun_key_ofs);
2924 }
2925
2926 static bool
2927 odp_mask_is_constant__(enum ovs_key_attr attr, const void *mask, size_t size,
2928 int constant)
2929 {
2930 /* Convert 'constant' to all the widths we need. C conversion rules ensure
2931 * that -1 becomes all-1-bits and 0 does not change. */
2932 ovs_be16 be16 = (OVS_FORCE ovs_be16) constant;
2933 uint32_t u32 = constant;
2934 uint8_t u8 = constant;
2935 const struct in6_addr *in6 = constant ? &in6addr_exact : &in6addr_any;
2936
2937 switch (attr) {
2938 case OVS_KEY_ATTR_UNSPEC:
2939 case OVS_KEY_ATTR_ENCAP:
2940 case __OVS_KEY_ATTR_MAX:
2941 default:
2942 return false;
2943
2944 case OVS_KEY_ATTR_PRIORITY:
2945 case OVS_KEY_ATTR_IN_PORT:
2946 case OVS_KEY_ATTR_ETHERNET:
2947 case OVS_KEY_ATTR_VLAN:
2948 case OVS_KEY_ATTR_ETHERTYPE:
2949 case OVS_KEY_ATTR_IPV4:
2950 case OVS_KEY_ATTR_TCP:
2951 case OVS_KEY_ATTR_UDP:
2952 case OVS_KEY_ATTR_ICMP:
2953 case OVS_KEY_ATTR_ICMPV6:
2954 case OVS_KEY_ATTR_ND:
2955 case OVS_KEY_ATTR_ND_EXTENSIONS:
2956 case OVS_KEY_ATTR_SKB_MARK:
2957 case OVS_KEY_ATTR_TUNNEL:
2958 case OVS_KEY_ATTR_SCTP:
2959 case OVS_KEY_ATTR_DP_HASH:
2960 case OVS_KEY_ATTR_RECIRC_ID:
2961 case OVS_KEY_ATTR_MPLS:
2962 case OVS_KEY_ATTR_CT_STATE:
2963 case OVS_KEY_ATTR_CT_ZONE:
2964 case OVS_KEY_ATTR_CT_MARK:
2965 case OVS_KEY_ATTR_CT_LABELS:
2966 case OVS_KEY_ATTR_PACKET_TYPE:
2967 case OVS_KEY_ATTR_NSH:
2968 return is_all_byte(mask, size, u8);
2969
2970 case OVS_KEY_ATTR_TCP_FLAGS:
2971 return TCP_FLAGS(*(ovs_be16 *) mask) == TCP_FLAGS(be16);
2972
2973 case OVS_KEY_ATTR_IPV6: {
2974 const struct ovs_key_ipv6 *ipv6_mask = mask;
2975 return ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
2976 == htonl(IPV6_LABEL_MASK & u32)
2977 && ipv6_mask->ipv6_proto == u8
2978 && ipv6_mask->ipv6_tclass == u8
2979 && ipv6_mask->ipv6_hlimit == u8
2980 && ipv6_mask->ipv6_frag == u8
2981 && ipv6_addr_equals(&ipv6_mask->ipv6_src, in6)
2982 && ipv6_addr_equals(&ipv6_mask->ipv6_dst, in6));
2983 }
2984
2985 case OVS_KEY_ATTR_ARP:
2986 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_arp, arp_tha), u8);
2987
2988 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
2989 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv4,
2990 ipv4_proto), u8);
2991
2992 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
2993 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv6,
2994 ipv6_proto), u8);
2995 }
2996 }
2997
2998 /* The caller must already have verified that 'ma' has a correct length.
2999 *
3000 * The main purpose of this function is formatting, to allow code to figure out
3001 * whether the mask can be omitted. It doesn't try hard for attributes that
3002 * contain sub-attributes, etc., because normally those would be broken down
3003 * further for formatting. */
3004 static bool
3005 odp_mask_attr_is_wildcard(const struct nlattr *ma)
3006 {
3007 return odp_mask_is_constant__(nl_attr_type(ma),
3008 nl_attr_get(ma), nl_attr_get_size(ma), 0);
3009 }
3010
3011 /* The caller must already have verified that 'size' is a correct length for
3012 * 'attr'.
3013 *
3014 * The main purpose of this function is formatting, to allow code to figure out
3015 * whether the mask can be omitted. It doesn't try hard for attributes that
3016 * contain sub-attributes, etc., because normally those would be broken down
3017 * further for formatting. */
3018 static bool
3019 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
3020 {
3021 return odp_mask_is_constant__(attr, mask, size, -1);
3022 }
3023
3024 /* The caller must already have verified that 'ma' has a correct length. */
3025 static bool
3026 odp_mask_attr_is_exact(const struct nlattr *ma)
3027 {
3028 enum ovs_key_attr attr = nl_attr_type(ma);
3029 return odp_mask_is_exact(attr, nl_attr_get(ma), nl_attr_get_size(ma));
3030 }
3031
3032 void
3033 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
3034 char *port_name)
3035 {
3036 struct odp_portno_names *odp_portno_names;
3037
3038 odp_portno_names = xmalloc(sizeof *odp_portno_names);
3039 odp_portno_names->port_no = port_no;
3040 odp_portno_names->name = xstrdup(port_name);
3041 hmap_insert(portno_names, &odp_portno_names->hmap_node,
3042 hash_odp_port(port_no));
3043 }
3044
3045 static char *
3046 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
3047 {
3048 if (portno_names) {
3049 struct odp_portno_names *odp_portno_names;
3050
3051 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
3052 hash_odp_port(port_no), portno_names) {
3053 if (odp_portno_names->port_no == port_no) {
3054 return odp_portno_names->name;
3055 }
3056 }
3057 }
3058 return NULL;
3059 }
3060
3061 void
3062 odp_portno_names_destroy(struct hmap *portno_names)
3063 {
3064 struct odp_portno_names *odp_portno_names;
3065
3066 HMAP_FOR_EACH_POP (odp_portno_names, hmap_node, portno_names) {
3067 free(odp_portno_names->name);
3068 free(odp_portno_names);
3069 }
3070 }
3071
3072 void
3073 odp_portno_name_format(const struct hmap *portno_names, odp_port_t port_no,
3074 struct ds *s)
3075 {
3076 const char *name = odp_portno_names_get(portno_names, port_no);
3077 if (name) {
3078 ds_put_cstr(s, name);
3079 } else {
3080 ds_put_format(s, "%"PRIu32, port_no);
3081 }
3082 }
3083
3084 /* Format helpers. */
3085
3086 static void
3087 format_eth(struct ds *ds, const char *name, const struct eth_addr key,
3088 const struct eth_addr *mask, bool verbose)
3089 {
3090 bool mask_empty = mask && eth_addr_is_zero(*mask);
3091
3092 if (verbose || !mask_empty) {
3093 bool mask_full = !mask || eth_mask_is_exact(*mask);
3094
3095 if (mask_full) {
3096 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
3097 } else {
3098 ds_put_format(ds, "%s=", name);
3099 eth_format_masked(key, mask, ds);
3100 ds_put_char(ds, ',');
3101 }
3102 }
3103 }
3104
3105
3106 static void
3107 format_be64(struct ds *ds, const char *name, ovs_be64 key,
3108 const ovs_be64 *mask, bool verbose)
3109 {
3110 bool mask_empty = mask && !*mask;
3111
3112 if (verbose || !mask_empty) {
3113 bool mask_full = !mask || *mask == OVS_BE64_MAX;
3114
3115 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
3116 if (!mask_full) { /* Partially masked. */
3117 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
3118 }
3119 ds_put_char(ds, ',');
3120 }
3121 }
3122
3123 static void
3124 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
3125 const ovs_be32 *mask, bool verbose)
3126 {
3127 bool mask_empty = mask && !*mask;
3128
3129 if (verbose || !mask_empty) {
3130 bool mask_full = !mask || *mask == OVS_BE32_MAX;
3131
3132 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
3133 if (!mask_full) { /* Partially masked. */
3134 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
3135 }
3136 ds_put_char(ds, ',');
3137 }
3138 }
3139
3140 static void
3141 format_in6_addr(struct ds *ds, const char *name,
3142 const struct in6_addr *key,
3143 const struct in6_addr *mask,
3144 bool verbose)
3145 {
3146 char buf[INET6_ADDRSTRLEN];
3147 bool mask_empty = mask && ipv6_mask_is_any(mask);
3148
3149 if (verbose || !mask_empty) {
3150 bool mask_full = !mask || ipv6_mask_is_exact(mask);
3151
3152 inet_ntop(AF_INET6, key, buf, sizeof buf);
3153 ds_put_format(ds, "%s=%s", name, buf);
3154 if (!mask_full) { /* Partially masked. */
3155 inet_ntop(AF_INET6, mask, buf, sizeof buf);
3156 ds_put_format(ds, "/%s", buf);
3157 }
3158 ds_put_char(ds, ',');
3159 }
3160 }
3161
3162 static void
3163 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
3164 const ovs_be32 *mask, bool verbose)
3165 {
3166 bool mask_empty = mask && !*mask;
3167
3168 if (verbose || !mask_empty) {
3169 bool mask_full = !mask
3170 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
3171
3172 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
3173 if (!mask_full) { /* Partially masked. */
3174 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
3175 }
3176 ds_put_char(ds, ',');
3177 }
3178 }
3179
3180 static void
3181 format_u8x(struct ds *ds, const char *name, uint8_t key,
3182 const uint8_t *mask, bool verbose)
3183 {
3184 bool mask_empty = mask && !*mask;
3185
3186 if (verbose || !mask_empty) {
3187 bool mask_full = !mask || *mask == UINT8_MAX;
3188
3189 ds_put_format(ds, "%s=%#"PRIx8, name, key);
3190 if (!mask_full) { /* Partially masked. */
3191 ds_put_format(ds, "/%#"PRIx8, *mask);
3192 }
3193 ds_put_char(ds, ',');
3194 }
3195 }
3196
3197 static void
3198 format_u8u(struct ds *ds, const char *name, uint8_t key,
3199 const uint8_t *mask, bool verbose)
3200 {
3201 bool mask_empty = mask && !*mask;
3202
3203 if (verbose || !mask_empty) {
3204 bool mask_full = !mask || *mask == UINT8_MAX;
3205
3206 ds_put_format(ds, "%s=%"PRIu8, name, key);
3207 if (!mask_full) { /* Partially masked. */
3208 ds_put_format(ds, "/%#"PRIx8, *mask);
3209 }
3210 ds_put_char(ds, ',');
3211 }
3212 }
3213
3214 static void
3215 format_be16(struct ds *ds, const char *name, ovs_be16 key,
3216 const ovs_be16 *mask, bool verbose)
3217 {
3218 bool mask_empty = mask && !*mask;
3219
3220 if (verbose || !mask_empty) {
3221 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3222
3223 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
3224 if (!mask_full) { /* Partially masked. */
3225 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3226 }
3227 ds_put_char(ds, ',');
3228 }
3229 }
3230
3231 static void
3232 format_be16x(struct ds *ds, const char *name, ovs_be16 key,
3233 const ovs_be16 *mask, bool verbose)
3234 {
3235 bool mask_empty = mask && !*mask;
3236
3237 if (verbose || !mask_empty) {
3238 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3239
3240 ds_put_format(ds, "%s=%#"PRIx16, name, ntohs(key));
3241 if (!mask_full) { /* Partially masked. */
3242 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3243 }
3244 ds_put_char(ds, ',');
3245 }
3246 }
3247
3248 static void
3249 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
3250 const uint16_t *mask, bool verbose)
3251 {
3252 bool mask_empty = mask && !*mask;
3253
3254 if (verbose || !mask_empty) {
3255 ds_put_cstr(ds, name);
3256 ds_put_char(ds, '(');
3257 if (mask) {
3258 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key,
3259 *mask & FLOW_TNL_F_MASK, FLOW_TNL_F_MASK);
3260 } else { /* Fully masked. */
3261 format_flags(ds, flow_tun_flag_to_string, key, '|');
3262 }
3263 ds_put_cstr(ds, "),");
3264 }
3265 }
3266
3267 static bool
3268 check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
3269 const struct attr_len_tbl tbl[], int max_type, bool need_key)
3270 {
3271 int expected_len;
3272
3273 expected_len = odp_key_attr_len(tbl, max_type, nl_attr_type(a));
3274 if (expected_len != ATTR_LEN_VARIABLE &&
3275 expected_len != ATTR_LEN_NESTED) {
3276
3277 bool bad_key_len = nl_attr_get_size(a) != expected_len;
3278 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
3279
3280 if (bad_key_len || bad_mask_len) {
3281 if (need_key) {
3282 ds_put_format(ds, "key%u", nl_attr_type(a));
3283 }
3284 if (bad_key_len) {
3285 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
3286 nl_attr_get_size(a), expected_len);
3287 }
3288 format_generic_odp_key(a, ds);
3289 if (ma) {
3290 ds_put_char(ds, '/');
3291 if (bad_mask_len) {
3292 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
3293 nl_attr_get_size(ma), expected_len);
3294 }
3295 format_generic_odp_key(ma, ds);
3296 }
3297 ds_put_char(ds, ')');
3298 return false;
3299 }
3300 }
3301
3302 return true;
3303 }
3304
3305 static void
3306 format_unknown_key(struct ds *ds, const struct nlattr *a,
3307 const struct nlattr *ma)
3308 {
3309 ds_put_format(ds, "key%u(", nl_attr_type(a));
3310 format_generic_odp_key(a, ds);
3311 if (ma && !odp_mask_attr_is_exact(ma)) {
3312 ds_put_char(ds, '/');
3313 format_generic_odp_key(ma, ds);
3314 }
3315 ds_put_cstr(ds, "),");
3316 }
3317
3318 static void
3319 format_odp_tun_vxlan_opt(const struct nlattr *attr,
3320 const struct nlattr *mask_attr, struct ds *ds,
3321 bool verbose)
3322 {
3323 unsigned int left;
3324 const struct nlattr *a;
3325 struct ofpbuf ofp;
3326
3327 ofpbuf_init(&ofp, 100);
3328 NL_NESTED_FOR_EACH(a, left, attr) {
3329 uint16_t type = nl_attr_type(a);
3330 const struct nlattr *ma = NULL;
3331
3332 if (mask_attr) {
3333 ma = nl_attr_find__(nl_attr_get(mask_attr),
3334 nl_attr_get_size(mask_attr), type);
3335 if (!ma) {
3336 ma = generate_all_wildcard_mask(ovs_vxlan_ext_attr_lens,
3337 OVS_VXLAN_EXT_MAX,
3338 &ofp, a);
3339 }
3340 }
3341
3342 if (!check_attr_len(ds, a, ma, ovs_vxlan_ext_attr_lens,
3343 OVS_VXLAN_EXT_MAX, true)) {
3344 continue;
3345 }
3346
3347 switch (type) {
3348 case OVS_VXLAN_EXT_GBP: {
3349 uint32_t key = nl_attr_get_u32(a);
3350 ovs_be16 id, id_mask;
3351 uint8_t flags, flags_mask = 0;
3352
3353 id = htons(key & 0xFFFF);
3354 flags = (key >> 16) & 0xFF;
3355 if (ma) {
3356 uint32_t mask = nl_attr_get_u32(ma);
3357 id_mask = htons(mask & 0xFFFF);
3358 flags_mask = (mask >> 16) & 0xFF;
3359 }
3360
3361 ds_put_cstr(ds, "gbp(");
3362 format_be16(ds, "id", id, ma ? &id_mask : NULL, verbose);
3363 format_u8x(ds, "flags", flags, ma ? &flags_mask : NULL, verbose);
3364 ds_chomp(ds, ',');
3365 ds_put_cstr(ds, "),");
3366 break;
3367 }
3368
3369 default:
3370 format_unknown_key(ds, a, ma);
3371 }
3372 ofpbuf_clear(&ofp);
3373 }
3374
3375 ds_chomp(ds, ',');
3376 ofpbuf_uninit(&ofp);
3377 }
3378
3379 static void
3380 format_odp_tun_erspan_opt(const struct nlattr *attr,
3381 const struct nlattr *mask_attr, struct ds *ds,
3382 bool verbose)
3383 {
3384 const struct erspan_metadata *opts, *mask;
3385 uint8_t ver, ver_ma, dir, dir_ma, hwid, hwid_ma;
3386
3387 opts = nl_attr_get(attr);
3388 mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
3389
3390 ver = (uint8_t)opts->version;
3391 if (mask) {
3392 ver_ma = (uint8_t)mask->version;
3393 }
3394
3395 format_u8u(ds, "ver", ver, mask ? &ver_ma : NULL, verbose);
3396
3397 if (opts->version == 1) {
3398 if (mask) {
3399 ds_put_format(ds, "idx=%#"PRIx32"/%#"PRIx32",",
3400 ntohl(opts->u.index),
3401 ntohl(mask->u.index));
3402 } else {
3403 ds_put_format(ds, "idx=%#"PRIx32",", ntohl(opts->u.index));
3404 }
3405 } else if (opts->version == 2) {
3406 dir = opts->u.md2.dir;
3407 hwid = opts->u.md2.hwid;
3408 if (mask) {
3409 dir_ma = mask->u.md2.dir;
3410 hwid_ma = mask->u.md2.hwid;
3411 }
3412
3413 format_u8u(ds, "dir", dir, mask ? &dir_ma : NULL, verbose);
3414 format_u8x(ds, "hwid", hwid, mask ? &hwid_ma : NULL, verbose);
3415 }
3416 ds_chomp(ds, ',');
3417 }
3418
3419 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
3420
3421 static void
3422 format_geneve_opts(const struct geneve_opt *opt,
3423 const struct geneve_opt *mask, int opts_len,
3424 struct ds *ds, bool verbose)
3425 {
3426 while (opts_len > 0) {
3427 unsigned int len;
3428 uint8_t data_len, data_len_mask;
3429
3430 if (opts_len < sizeof *opt) {
3431 ds_put_format(ds, "opt len %u less than minimum %"PRIuSIZE,
3432 opts_len, sizeof *opt);
3433 return;
3434 }
3435
3436 data_len = opt->length * 4;
3437 if (mask) {
3438 if (mask->length == 0x1f) {
3439 data_len_mask = UINT8_MAX;
3440 } else {
3441 data_len_mask = mask->length;
3442 }
3443 }
3444 len = sizeof *opt + data_len;
3445 if (len > opts_len) {
3446 ds_put_format(ds, "opt len %u greater than remaining %u",
3447 len, opts_len);
3448 return;
3449 }
3450
3451 ds_put_char(ds, '{');
3452 format_be16x(ds, "class", opt->opt_class, MASK(mask, opt_class),
3453 verbose);
3454 format_u8x(ds, "type", opt->type, MASK(mask, type), verbose);
3455 format_u8u(ds, "len", data_len, mask ? &data_len_mask : NULL, verbose);
3456 if (data_len &&
3457 (verbose || !mask || !is_all_zeros(mask + 1, data_len))) {
3458 ds_put_hex(ds, opt + 1, data_len);
3459 if (mask && !is_all_ones(mask + 1, data_len)) {
3460 ds_put_char(ds, '/');
3461 ds_put_hex(ds, mask + 1, data_len);
3462 }
3463 } else {
3464 ds_chomp(ds, ',');
3465 }
3466 ds_put_char(ds, '}');
3467
3468 opt += len / sizeof(*opt);
3469 if (mask) {
3470 mask += len / sizeof(*opt);
3471 }
3472 opts_len -= len;
3473 };
3474 }
3475
3476 static void
3477 format_odp_tun_geneve(const struct nlattr *attr,
3478 const struct nlattr *mask_attr, struct ds *ds,
3479 bool verbose)
3480 {
3481 int opts_len = nl_attr_get_size(attr);
3482 const struct geneve_opt *opt = nl_attr_get(attr);
3483 const struct geneve_opt *mask = mask_attr ?
3484 nl_attr_get(mask_attr) : NULL;
3485
3486 if (mask && nl_attr_get_size(attr) != nl_attr_get_size(mask_attr)) {
3487 ds_put_format(ds, "value len %"PRIuSIZE" different from mask len %"PRIuSIZE,
3488 nl_attr_get_size(attr), nl_attr_get_size(mask_attr));
3489 return;
3490 }
3491
3492 format_geneve_opts(opt, mask, opts_len, ds, verbose);
3493 }
3494
3495 static void
3496 format_odp_nsh_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3497 struct ds *ds)
3498 {
3499 unsigned int left;
3500 const struct nlattr *a;
3501 struct ovs_key_nsh nsh;
3502 struct ovs_key_nsh nsh_mask;
3503
3504 memset(&nsh, 0, sizeof nsh);
3505 memset(&nsh_mask, 0xff, sizeof nsh_mask);
3506
3507 NL_NESTED_FOR_EACH (a, left, attr) {
3508 enum ovs_nsh_key_attr type = nl_attr_type(a);
3509 const struct nlattr *ma = NULL;
3510
3511 if (mask_attr) {
3512 ma = nl_attr_find__(nl_attr_get(mask_attr),
3513 nl_attr_get_size(mask_attr), type);
3514 }
3515
3516 if (!check_attr_len(ds, a, ma, ovs_nsh_key_attr_lens,
3517 OVS_NSH_KEY_ATTR_MAX, true)) {
3518 continue;
3519 }
3520
3521 switch (type) {
3522 case OVS_NSH_KEY_ATTR_UNSPEC:
3523 break;
3524 case OVS_NSH_KEY_ATTR_BASE: {
3525 const struct ovs_nsh_key_base *base = nl_attr_get(a);
3526 const struct ovs_nsh_key_base *base_mask
3527 = ma ? nl_attr_get(ma) : NULL;
3528 nsh.flags = base->flags;
3529 nsh.ttl = base->ttl;
3530 nsh.mdtype = base->mdtype;
3531 nsh.np = base->np;
3532 nsh.path_hdr = base->path_hdr;
3533 if (base_mask) {
3534 nsh_mask.flags = base_mask->flags;
3535 nsh_mask.ttl = base_mask->ttl;
3536 nsh_mask.mdtype = base_mask->mdtype;
3537 nsh_mask.np = base_mask->np;
3538 nsh_mask.path_hdr = base_mask->path_hdr;
3539 }
3540 break;
3541 }
3542 case OVS_NSH_KEY_ATTR_MD1: {
3543 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
3544 const struct ovs_nsh_key_md1 *md1_mask
3545 = ma ? nl_attr_get(ma) : NULL;
3546 memcpy(nsh.context, md1->context, sizeof md1->context);
3547 if (md1_mask) {
3548 memcpy(nsh_mask.context, md1_mask->context,
3549 sizeof md1_mask->context);
3550 }
3551 break;
3552 }
3553 case OVS_NSH_KEY_ATTR_MD2:
3554 case __OVS_NSH_KEY_ATTR_MAX:
3555 default:
3556 /* No support for matching other metadata formats yet. */
3557 break;
3558 }
3559 }
3560
3561 if (mask_attr) {
3562 format_nsh_key_mask(ds, &nsh, &nsh_mask);
3563 } else {
3564 format_nsh_key(ds, &nsh);
3565 }
3566 }
3567
3568 static void
3569 format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3570 struct ds *ds, bool verbose)
3571 {
3572 unsigned int left;
3573 const struct nlattr *a;
3574 uint16_t flags = 0;
3575 uint16_t mask_flags = 0;
3576 struct ofpbuf ofp;
3577
3578 ofpbuf_init(&ofp, 100);
3579 NL_NESTED_FOR_EACH(a, left, attr) {
3580 enum ovs_tunnel_key_attr type = nl_attr_type(a);
3581 const struct nlattr *ma = NULL;
3582
3583 if (mask_attr) {
3584 ma = nl_attr_find__(nl_attr_get(mask_attr),
3585 nl_attr_get_size(mask_attr), type);
3586 if (!ma) {
3587 ma = generate_all_wildcard_mask(ovs_tun_key_attr_lens,
3588 OVS_TUNNEL_KEY_ATTR_MAX,
3589 &ofp, a);
3590 }
3591 }
3592
3593 if (!check_attr_len(ds, a, ma, ovs_tun_key_attr_lens,
3594 OVS_TUNNEL_KEY_ATTR_MAX, true)) {
3595 continue;
3596 }
3597
3598 switch (type) {
3599 case OVS_TUNNEL_KEY_ATTR_ID:
3600 format_be64(ds, "tun_id", nl_attr_get_be64(a),
3601 ma ? nl_attr_get(ma) : NULL, verbose);
3602 flags |= FLOW_TNL_F_KEY;
3603 if (ma) {
3604 mask_flags |= FLOW_TNL_F_KEY;
3605 }
3606 break;
3607 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
3608 format_ipv4(ds, "src", nl_attr_get_be32(a),
3609 ma ? nl_attr_get(ma) : NULL, verbose);
3610 break;
3611 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
3612 format_ipv4(ds, "dst", nl_attr_get_be32(a),
3613 ma ? nl_attr_get(ma) : NULL, verbose);
3614 break;
3615 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
3616 struct in6_addr ipv6_src;
3617 ipv6_src = nl_attr_get_in6_addr(a);
3618 format_in6_addr(ds, "ipv6_src", &ipv6_src,
3619 ma ? nl_attr_get(ma) : NULL, verbose);
3620 break;
3621 }
3622 case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
3623 struct in6_addr ipv6_dst;
3624 ipv6_dst = nl_attr_get_in6_addr(a);
3625 format_in6_addr(ds, "ipv6_dst", &ipv6_dst,
3626 ma ? nl_attr_get(ma) : NULL, verbose);
3627 break;
3628 }
3629 case OVS_TUNNEL_KEY_ATTR_TOS:
3630 format_u8x(ds, "tos", nl_attr_get_u8(a),
3631 ma ? nl_attr_get(ma) : NULL, verbose);
3632 break;
3633 case OVS_TUNNEL_KEY_ATTR_TTL:
3634 format_u8u(ds, "ttl", nl_attr_get_u8(a),
3635 ma ? nl_attr_get(ma) : NULL, verbose);
3636 break;
3637 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3638 flags |= FLOW_TNL_F_DONT_FRAGMENT;
3639 break;
3640 case OVS_TUNNEL_KEY_ATTR_CSUM:
3641 flags |= FLOW_TNL_F_CSUM;
3642 break;
3643 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
3644 format_be16(ds, "tp_src", nl_attr_get_be16(a),
3645 ma ? nl_attr_get(ma) : NULL, verbose);
3646 break;
3647 case OVS_TUNNEL_KEY_ATTR_TP_DST:
3648 format_be16(ds, "tp_dst", nl_attr_get_be16(a),
3649 ma ? nl_attr_get(ma) : NULL, verbose);
3650 break;
3651 case OVS_TUNNEL_KEY_ATTR_OAM:
3652 flags |= FLOW_TNL_F_OAM;
3653 break;
3654 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
3655 ds_put_cstr(ds, "vxlan(");
3656 format_odp_tun_vxlan_opt(a, ma, ds, verbose);
3657 ds_put_cstr(ds, "),");
3658 break;
3659 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3660 ds_put_cstr(ds, "geneve(");
3661 format_odp_tun_geneve(a, ma, ds, verbose);
3662 ds_put_cstr(ds, "),");
3663 break;
3664 case OVS_TUNNEL_KEY_ATTR_PAD:
3665 break;
3666 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
3667 ds_put_cstr(ds, "erspan(");
3668 format_odp_tun_erspan_opt(a, ma, ds, verbose);
3669 ds_put_cstr(ds, "),");
3670 break;
3671 case __OVS_TUNNEL_KEY_ATTR_MAX:
3672 default:
3673 format_unknown_key(ds, a, ma);
3674 }
3675 ofpbuf_clear(&ofp);
3676 }
3677
3678 /* Flags can have a valid mask even if the attribute is not set, so
3679 * we need to collect these separately. */
3680 if (mask_attr) {
3681 NL_NESTED_FOR_EACH(a, left, mask_attr) {
3682 switch (nl_attr_type(a)) {
3683 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3684 mask_flags |= FLOW_TNL_F_DONT_FRAGMENT;
3685 break;
3686 case OVS_TUNNEL_KEY_ATTR_CSUM:
3687 mask_flags |= FLOW_TNL_F_CSUM;
3688 break;
3689 case OVS_TUNNEL_KEY_ATTR_OAM:
3690 mask_flags |= FLOW_TNL_F_OAM;
3691 break;
3692 }
3693 }
3694 }
3695
3696 format_tun_flags(ds, "flags", flags, mask_attr ? &mask_flags : NULL,
3697 verbose);
3698 ds_chomp(ds, ',');
3699 ofpbuf_uninit(&ofp);
3700 }
3701
3702 static const char *
3703 odp_ct_state_to_string(uint32_t flag)
3704 {
3705 switch (flag) {
3706 case OVS_CS_F_REPLY_DIR:
3707 return "rpl";
3708 case OVS_CS_F_TRACKED:
3709 return "trk";
3710 case OVS_CS_F_NEW:
3711 return "new";
3712 case OVS_CS_F_ESTABLISHED:
3713 return "est";
3714 case OVS_CS_F_RELATED:
3715 return "rel";
3716 case OVS_CS_F_INVALID:
3717 return "inv";
3718 case OVS_CS_F_SRC_NAT:
3719 return "snat";
3720 case OVS_CS_F_DST_NAT:
3721 return "dnat";
3722 default:
3723 return NULL;
3724 }
3725 }
3726
3727 static void
3728 format_frag(struct ds *ds, const char *name, uint8_t key,
3729 const uint8_t *mask, bool verbose OVS_UNUSED)
3730 {
3731 bool mask_empty = mask && !*mask;
3732 bool mask_full = !mask || *mask == UINT8_MAX;
3733
3734 /* ODP frag is an enumeration field; partial masks are not meaningful. */
3735 if (!mask_empty && !mask_full) {
3736 ds_put_format(ds, "error: partial mask not supported for frag (%#"
3737 PRIx8"),", *mask);
3738 } else if (!mask_empty) {
3739 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
3740 }
3741 }
3742
3743 static bool
3744 mask_empty(const struct nlattr *ma)
3745 {
3746 const void *mask;
3747 size_t n;
3748
3749 if (!ma) {
3750 return true;
3751 }
3752 mask = nl_attr_get(ma);
3753 n = nl_attr_get_size(ma);
3754
3755 return is_all_zeros(mask, n);
3756 }
3757
3758 /* The caller must have already verified that 'a' and 'ma' have correct
3759 * lengths. */
3760 static void
3761 format_odp_key_attr__(const struct nlattr *a, const struct nlattr *ma,
3762 const struct hmap *portno_names, struct ds *ds,
3763 bool verbose)
3764 {
3765 enum ovs_key_attr attr = nl_attr_type(a);
3766 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3767 bool is_exact;
3768
3769 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
3770
3771 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
3772
3773 ds_put_char(ds, '(');
3774 switch (attr) {
3775 case OVS_KEY_ATTR_ENCAP:
3776 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
3777 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
3778 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
3779 verbose);
3780 } else if (nl_attr_get_size(a)) {
3781 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
3782 ds, verbose);
3783 }
3784 break;
3785
3786 case OVS_KEY_ATTR_PRIORITY:
3787 case OVS_KEY_ATTR_SKB_MARK:
3788 case OVS_KEY_ATTR_DP_HASH:
3789 case OVS_KEY_ATTR_RECIRC_ID:
3790 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
3791 if (!is_exact) {
3792 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
3793 }
3794 break;
3795
3796 case OVS_KEY_ATTR_CT_MARK:
3797 if (verbose || !mask_empty(ma)) {
3798 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
3799 if (!is_exact) {
3800 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
3801 }
3802 }
3803 break;
3804
3805 case OVS_KEY_ATTR_CT_STATE:
3806 if (verbose) {
3807 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
3808 if (!is_exact) {
3809 ds_put_format(ds, "/%#"PRIx32,
3810 mask_empty(ma) ? 0 : nl_attr_get_u32(ma));
3811 }
3812 } else if (!is_exact) {
3813 format_flags_masked(ds, NULL, odp_ct_state_to_string,
3814 nl_attr_get_u32(a),
3815 mask_empty(ma) ? 0 : nl_attr_get_u32(ma),
3816 UINT32_MAX);
3817 } else {
3818 format_flags(ds, odp_ct_state_to_string, nl_attr_get_u32(a), '|');
3819 }
3820 break;
3821
3822 case OVS_KEY_ATTR_CT_ZONE:
3823 if (verbose || !mask_empty(ma)) {
3824 ds_put_format(ds, "%#"PRIx16, nl_attr_get_u16(a));
3825 if (!is_exact) {
3826 ds_put_format(ds, "/%#"PRIx16, nl_attr_get_u16(ma));
3827 }
3828 }
3829 break;
3830
3831 case OVS_KEY_ATTR_CT_LABELS: {
3832 const ovs_32aligned_u128 *value = nl_attr_get(a);
3833 const ovs_32aligned_u128 *mask = ma ? nl_attr_get(ma) : NULL;
3834
3835 format_u128(ds, value, mask, verbose);
3836 break;
3837 }
3838
3839 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
3840 const struct ovs_key_ct_tuple_ipv4 *key = nl_attr_get(a);
3841 const struct ovs_key_ct_tuple_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
3842
3843 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
3844 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
3845 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
3846 verbose);
3847 format_be16(ds, "tp_src", key->src_port, MASK(mask, src_port),
3848 verbose);
3849 format_be16(ds, "tp_dst", key->dst_port, MASK(mask, dst_port),
3850 verbose);
3851 ds_chomp(ds, ',');
3852 break;
3853 }
3854
3855 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
3856 const struct ovs_key_ct_tuple_ipv6 *key = nl_attr_get(a);
3857 const struct ovs_key_ct_tuple_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
3858
3859 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
3860 verbose);
3861 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
3862 verbose);
3863 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
3864 verbose);
3865 format_be16(ds, "src_port", key->src_port, MASK(mask, src_port),
3866 verbose);
3867 format_be16(ds, "dst_port", key->dst_port, MASK(mask, dst_port),
3868 verbose);
3869 ds_chomp(ds, ',');
3870 break;
3871 }
3872
3873 case OVS_KEY_ATTR_TUNNEL:
3874 format_odp_tun_attr(a, ma, ds, verbose);
3875 break;
3876
3877 case OVS_KEY_ATTR_IN_PORT:
3878 if (is_exact) {
3879 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
3880 } else {
3881 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
3882 if (!is_exact) {
3883 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
3884 }
3885 }
3886 break;
3887
3888 case OVS_KEY_ATTR_PACKET_TYPE: {
3889 ovs_be32 value = nl_attr_get_be32(a);
3890 ovs_be32 mask = ma ? nl_attr_get_be32(ma) : OVS_BE32_MAX;
3891
3892 ovs_be16 ns = htons(pt_ns(value));
3893 ovs_be16 ns_mask = htons(pt_ns(mask));
3894 format_be16(ds, "ns", ns, &ns_mask, verbose);
3895
3896 ovs_be16 ns_type = pt_ns_type_be(value);
3897 ovs_be16 ns_type_mask = pt_ns_type_be(mask);
3898 format_be16x(ds, "id", ns_type, &ns_type_mask, verbose);
3899
3900 ds_chomp(ds, ',');
3901 break;
3902 }
3903
3904 case OVS_KEY_ATTR_ETHERNET: {
3905 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
3906 const struct ovs_key_ethernet *key = nl_attr_get(a);
3907
3908 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
3909 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
3910 ds_chomp(ds, ',');
3911 break;
3912 }
3913 case OVS_KEY_ATTR_VLAN:
3914 format_vlan_tci(ds, nl_attr_get_be16(a),
3915 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
3916 break;
3917
3918 case OVS_KEY_ATTR_MPLS: {
3919 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
3920 const struct ovs_key_mpls *mpls_mask = NULL;
3921 size_t size = nl_attr_get_size(a);
3922
3923 if (!size || size % sizeof *mpls_key) {
3924 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
3925 return;
3926 }
3927 if (!is_exact) {
3928 mpls_mask = nl_attr_get(ma);
3929 if (size != nl_attr_get_size(ma)) {
3930 ds_put_format(ds, "(key length %"PRIuSIZE" != "
3931 "mask length %"PRIuSIZE")",
3932 size, nl_attr_get_size(ma));
3933 return;
3934 }
3935 }
3936 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
3937 break;
3938 }
3939 case OVS_KEY_ATTR_ETHERTYPE:
3940 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
3941 if (!is_exact) {
3942 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
3943 }
3944 break;
3945
3946 case OVS_KEY_ATTR_IPV4: {
3947 const struct ovs_key_ipv4 *key = nl_attr_get(a);
3948 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
3949
3950 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
3951 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
3952 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
3953 verbose);
3954 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
3955 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
3956 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
3957 verbose);
3958 ds_chomp(ds, ',');
3959 break;
3960 }
3961 case OVS_KEY_ATTR_IPV6: {
3962 const struct ovs_key_ipv6 *key = nl_attr_get(a);
3963 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
3964
3965 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
3966 verbose);
3967 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
3968 verbose);
3969 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
3970 verbose);
3971 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
3972 verbose);
3973 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
3974 verbose);
3975 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
3976 verbose);
3977 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
3978 verbose);
3979 ds_chomp(ds, ',');
3980 break;
3981 }
3982 /* These have the same structure and format. */
3983 case OVS_KEY_ATTR_TCP:
3984 case OVS_KEY_ATTR_UDP:
3985 case OVS_KEY_ATTR_SCTP: {
3986 const struct ovs_key_tcp *key = nl_attr_get(a);
3987 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
3988
3989 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
3990 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
3991 ds_chomp(ds, ',');
3992 break;
3993 }
3994 case OVS_KEY_ATTR_TCP_FLAGS:
3995 if (!is_exact) {
3996 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
3997 ntohs(nl_attr_get_be16(a)),
3998 TCP_FLAGS(nl_attr_get_be16(ma)),
3999 TCP_FLAGS(OVS_BE16_MAX));
4000 } else {
4001 format_flags(ds, packet_tcp_flag_to_string,
4002 ntohs(nl_attr_get_be16(a)), '|');
4003 }
4004 break;
4005
4006 case OVS_KEY_ATTR_ICMP: {
4007 const struct ovs_key_icmp *key = nl_attr_get(a);
4008 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
4009
4010 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
4011 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
4012 ds_chomp(ds, ',');
4013 break;
4014 }
4015 case OVS_KEY_ATTR_ICMPV6: {
4016 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
4017 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
4018
4019 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
4020 verbose);
4021 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
4022 verbose);
4023 ds_chomp(ds, ',');
4024 break;
4025 }
4026 case OVS_KEY_ATTR_ARP: {
4027 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
4028 const struct ovs_key_arp *key = nl_attr_get(a);
4029
4030 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
4031 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
4032 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
4033 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
4034 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
4035 ds_chomp(ds, ',');
4036 break;
4037 }
4038 case OVS_KEY_ATTR_ND: {
4039 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
4040 const struct ovs_key_nd *key = nl_attr_get(a);
4041
4042 format_in6_addr(ds, "target", &key->nd_target, MASK(mask, nd_target),
4043 verbose);
4044 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
4045 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
4046
4047 ds_chomp(ds, ',');
4048 break;
4049 }
4050 case OVS_KEY_ATTR_ND_EXTENSIONS: {
4051 const struct ovs_key_nd_extensions *mask = ma ? nl_attr_get(ma) : NULL;
4052 const struct ovs_key_nd_extensions *key = nl_attr_get(a);
4053
4054 bool first = true;
4055 format_be32_masked(ds, &first, "nd_reserved", key->nd_reserved,
4056 OVS_BE32_MAX);
4057 ds_put_char(ds, ',');
4058
4059 format_u8u(ds, "nd_options_type", key->nd_options_type,
4060 MASK(mask, nd_options_type), verbose);
4061
4062 ds_chomp(ds, ',');
4063 break;
4064 }
4065 case OVS_KEY_ATTR_NSH: {
4066 format_odp_nsh_attr(a, ma, ds);
4067 break;
4068 }
4069 case OVS_KEY_ATTR_UNSPEC:
4070 case __OVS_KEY_ATTR_MAX:
4071 default:
4072 format_generic_odp_key(a, ds);
4073 if (!is_exact) {
4074 ds_put_char(ds, '/');
4075 format_generic_odp_key(ma, ds);
4076 }
4077 break;
4078 }
4079 ds_put_char(ds, ')');
4080 }
4081
4082 static void
4083 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
4084 const struct hmap *portno_names, struct ds *ds,
4085 bool verbose)
4086 {
4087 if (check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4088 OVS_KEY_ATTR_MAX, false)) {
4089 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4090 }
4091 }
4092
4093 static struct nlattr *
4094 generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
4095 struct ofpbuf *ofp, const struct nlattr *key)
4096 {
4097 const struct nlattr *a;
4098 unsigned int left;
4099 int type = nl_attr_type(key);
4100 int size = nl_attr_get_size(key);
4101
4102 if (odp_key_attr_len(tbl, max, type) != ATTR_LEN_NESTED) {
4103 nl_msg_put_unspec_zero(ofp, type, size);
4104 } else {
4105 size_t nested_mask;
4106
4107 if (tbl[type].next) {
4108 const struct attr_len_tbl *entry = &tbl[type];
4109 tbl = entry->next;
4110 max = entry->next_max;
4111 }
4112
4113 nested_mask = nl_msg_start_nested(ofp, type);
4114 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
4115 generate_all_wildcard_mask(tbl, max, ofp, nl_attr_get(a));
4116 }
4117 nl_msg_end_nested(ofp, nested_mask);
4118 }
4119
4120 return ofp->base;
4121 }
4122
4123 static void
4124 format_u128(struct ds *ds, const ovs_32aligned_u128 *key,
4125 const ovs_32aligned_u128 *mask, bool verbose)
4126 {
4127 if (verbose || (mask && !ovs_u128_is_zero(get_32aligned_u128(mask)))) {
4128 ovs_be128 value = hton128(get_32aligned_u128(key));
4129 ds_put_hex(ds, &value, sizeof value);
4130 if (mask && !(ovs_u128_is_ones(get_32aligned_u128(mask)))) {
4131 value = hton128(get_32aligned_u128(mask));
4132 ds_put_char(ds, '/');
4133 ds_put_hex(ds, &value, sizeof value);
4134 }
4135 }
4136 }
4137
4138 /* Read the string from 's_' as a 128-bit value. If the string contains
4139 * a "/", the rest of the string will be treated as a 128-bit mask.
4140 *
4141 * If either the value or mask is larger than 64 bits, the string must
4142 * be in hexadecimal.
4143 */
4144 static int
4145 scan_u128(const char *s_, ovs_u128 *value, ovs_u128 *mask)
4146 {
4147 char *s = CONST_CAST(char *, s_);
4148 ovs_be128 be_value;
4149 ovs_be128 be_mask;
4150
4151 if (!parse_int_string(s, (uint8_t *)&be_value, sizeof be_value, &s)) {
4152 *value = ntoh128(be_value);
4153
4154 if (mask) {
4155 int n;
4156
4157 if (ovs_scan(s, "/%n", &n)) {
4158 int error;
4159
4160 s += n;
4161 error = parse_int_string(s, (uint8_t *)&be_mask,
4162 sizeof be_mask, &s);
4163 if (error) {
4164 return 0;
4165 }
4166 *mask = ntoh128(be_mask);
4167 } else {
4168 *mask = OVS_U128_MAX;
4169 }
4170 }
4171 return s - s_;
4172 }
4173
4174 return 0;
4175 }
4176
4177 int
4178 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
4179 {
4180 const char *s = s_;
4181
4182 if (ovs_scan(s, "ufid:")) {
4183 s += 5;
4184
4185 if (!uuid_from_string_prefix((struct uuid *)ufid, s)) {
4186 return -EINVAL;
4187 }
4188 s += UUID_LEN;
4189
4190 return s - s_;
4191 }
4192
4193 return 0;
4194 }
4195
4196 void
4197 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
4198 {
4199 ds_put_format(ds, "ufid:"UUID_FMT, UUID_ARGS((struct uuid *)ufid));
4200 }
4201
4202 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4203 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
4204 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
4205 * non-null, translates odp port number to its name. */
4206 void
4207 odp_flow_format(const struct nlattr *key, size_t key_len,
4208 const struct nlattr *mask, size_t mask_len,
4209 const struct hmap *portno_names, struct ds *ds, bool verbose)
4210 {
4211 if (key_len) {
4212 const struct nlattr *a;
4213 unsigned int left;
4214 bool has_ethtype_key = false;
4215 bool has_packet_type_key = false;
4216 struct ofpbuf ofp;
4217 bool first_field = true;
4218
4219 ofpbuf_init(&ofp, 100);
4220 NL_ATTR_FOR_EACH (a, left, key, key_len) {
4221 int attr_type = nl_attr_type(a);
4222 const struct nlattr *ma = (mask && mask_len
4223 ? nl_attr_find__(mask, mask_len,
4224 attr_type)
4225 : NULL);
4226 if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4227 OVS_KEY_ATTR_MAX, false)) {
4228 continue;
4229 }
4230
4231 bool is_nested_attr;
4232 bool is_wildcard = false;
4233
4234 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
4235 has_ethtype_key = true;
4236 } else if (attr_type == OVS_KEY_ATTR_PACKET_TYPE) {
4237 has_packet_type_key = true;
4238 }
4239
4240 is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
4241 OVS_KEY_ATTR_MAX, attr_type) ==
4242 ATTR_LEN_NESTED;
4243
4244 if (mask && mask_len) {
4245 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
4246 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
4247 }
4248
4249 if (verbose || !is_wildcard || is_nested_attr) {
4250 if (is_wildcard && !ma) {
4251 ma = generate_all_wildcard_mask(ovs_flow_key_attr_lens,
4252 OVS_KEY_ATTR_MAX,
4253 &ofp, a);
4254 }
4255 if (!first_field) {
4256 ds_put_char(ds, ',');
4257 }
4258 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4259 first_field = false;
4260 } else if (attr_type == OVS_KEY_ATTR_ETHERNET
4261 && !has_packet_type_key) {
4262 /* This special case reflects differences between the kernel
4263 * and userspace datapaths regarding the root type of the
4264 * packet being matched (typically Ethernet but some tunnels
4265 * can encapsulate IPv4 etc.). The kernel datapath does not
4266 * have an explicit way to indicate packet type; instead:
4267 *
4268 * - If OVS_KEY_ATTR_ETHERNET is present, the packet is an
4269 * Ethernet packet and OVS_KEY_ATTR_ETHERTYPE is the
4270 * Ethertype encoded in the Ethernet header.
4271 *
4272 * - If OVS_KEY_ATTR_ETHERNET is absent, then the packet's
4273 * root type is that encoded in OVS_KEY_ATTR_ETHERTYPE
4274 * (i.e. if OVS_KEY_ATTR_ETHERTYPE is 0x0800 then the
4275 * packet is an IPv4 packet).
4276 *
4277 * Thus, if OVS_KEY_ATTR_ETHERNET is present, even if it is
4278 * all-wildcarded, it is important to print it.
4279 *
4280 * On the other hand, the userspace datapath supports
4281 * OVS_KEY_ATTR_PACKET_TYPE and uses it to indicate the packet
4282 * type. Thus, if OVS_KEY_ATTR_PACKET_TYPE is present, we need
4283 * not print an all-wildcarded OVS_KEY_ATTR_ETHERNET. */
4284 if (!first_field) {
4285 ds_put_char(ds, ',');
4286 }
4287 ds_put_cstr(ds, "eth()");
4288 }
4289 ofpbuf_clear(&ofp);
4290 }
4291 ofpbuf_uninit(&ofp);
4292
4293 if (left) {
4294 int i;
4295
4296 if (left == key_len) {
4297 ds_put_cstr(ds, "<empty>");
4298 }
4299 ds_put_format(ds, ",***%u leftover bytes*** (", left);
4300 for (i = 0; i < left; i++) {
4301 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
4302 }
4303 ds_put_char(ds, ')');
4304 }
4305 if (!has_ethtype_key) {
4306 const struct nlattr *ma = nl_attr_find__(mask, mask_len,
4307 OVS_KEY_ATTR_ETHERTYPE);
4308 if (ma) {
4309 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
4310 ntohs(nl_attr_get_be16(ma)));
4311 }
4312 }
4313 } else {
4314 ds_put_cstr(ds, "<empty>");
4315 }
4316 }
4317
4318 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4319 * OVS_KEY_ATTR_* attributes in 'key'. */
4320 void
4321 odp_flow_key_format(const struct nlattr *key,
4322 size_t key_len, struct ds *ds)
4323 {
4324 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
4325 }
4326
4327 static bool
4328 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
4329 {
4330 if (!strcasecmp(s, "no")) {
4331 *type = OVS_FRAG_TYPE_NONE;
4332 } else if (!strcasecmp(s, "first")) {
4333 *type = OVS_FRAG_TYPE_FIRST;
4334 } else if (!strcasecmp(s, "later")) {
4335 *type = OVS_FRAG_TYPE_LATER;
4336 } else {
4337 return false;
4338 }
4339 return true;
4340 }
4341
4342 /* Parsing. */
4343
4344 static int
4345 scan_eth(const char *s, struct eth_addr *key, struct eth_addr *mask)
4346 {
4347 int n;
4348
4349 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n",
4350 ETH_ADDR_SCAN_ARGS(*key), &n)) {
4351 int len = n;
4352
4353 if (mask) {
4354 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
4355 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
4356 len += n;
4357 } else {
4358 memset(mask, 0xff, sizeof *mask);
4359 }
4360 }
4361 return len;
4362 }
4363 return 0;
4364 }
4365
4366 static int
4367 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
4368 {
4369 int n;
4370
4371 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
4372 int len = n;
4373
4374 if (mask) {
4375 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
4376 IP_SCAN_ARGS(mask), &n)) {
4377 len += n;
4378 } else {
4379 *mask = OVS_BE32_MAX;
4380 }
4381 }
4382 return len;
4383 }
4384 return 0;
4385 }
4386
4387 static int
4388 scan_in6_addr(const char *s, struct in6_addr *key, struct in6_addr *mask)
4389 {
4390 int n;
4391 char ipv6_s[IPV6_SCAN_LEN + 1];
4392
4393 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
4394 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
4395 int len = n;
4396
4397 if (mask) {
4398 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
4399 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
4400 len += n;
4401 } else {
4402 memset(mask, 0xff, sizeof *mask);
4403 }
4404 }
4405 return len;
4406 }
4407 return 0;
4408 }
4409
4410 static int
4411 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
4412 {
4413 int key_, mask_;
4414 int n;
4415
4416 if (ovs_scan(s, "%i%n", &key_, &n)
4417 && (key_ & ~IPV6_LABEL_MASK) == 0) {
4418 int len = n;
4419
4420 *key = htonl(key_);
4421 if (mask) {
4422 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
4423 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
4424 len += n;
4425 *mask = htonl(mask_);
4426 } else {
4427 *mask = htonl(IPV6_LABEL_MASK);
4428 }
4429 }
4430 return len;
4431 }
4432 return 0;
4433 }
4434
4435 static int
4436 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
4437 {
4438 int n;
4439
4440 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
4441 int len = n;
4442
4443 if (mask) {
4444 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
4445 len += n;
4446 } else {
4447 *mask = UINT8_MAX;
4448 }
4449 }
4450 return len;
4451 }
4452 return 0;
4453 }
4454
4455 static int
4456 scan_u16(const char *s, uint16_t *key, uint16_t *mask)
4457 {
4458 int n;
4459
4460 if (ovs_scan(s, "%"SCNi16"%n", key, &n)) {
4461 int len = n;
4462
4463 if (mask) {
4464 if (ovs_scan(s + len, "/%"SCNi16"%n", mask, &n)) {
4465 len += n;
4466 } else {
4467 *mask = UINT16_MAX;
4468 }
4469 }
4470 return len;
4471 }
4472 return 0;
4473 }
4474
4475 static int
4476 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
4477 {
4478 int n;
4479
4480 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4481 int len = n;
4482
4483 if (mask) {
4484 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4485 len += n;
4486 } else {
4487 *mask = UINT32_MAX;
4488 }
4489 }
4490 return len;
4491 }
4492 return 0;
4493 }
4494
4495 static int
4496 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
4497 {
4498 uint16_t key_, mask_;
4499 int n;
4500
4501 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4502 int len = n;
4503
4504 *key = htons(key_);
4505 if (mask) {
4506 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4507 len += n;
4508 *mask = htons(mask_);
4509 } else {
4510 *mask = OVS_BE16_MAX;
4511 }
4512 }
4513 return len;
4514 }
4515 return 0;
4516 }
4517
4518 static int
4519 scan_be32(const char *s, ovs_be32 *key, ovs_be32 *mask)
4520 {
4521 uint32_t key_, mask_;
4522 int n;
4523
4524 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
4525 int len = n;
4526
4527 *key = htonl(key_);
4528 if (mask) {
4529 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
4530 len += n;
4531 *mask = htonl(mask_);
4532 } else {
4533 *mask = OVS_BE32_MAX;
4534 }
4535 }
4536 return len;
4537 }
4538 return 0;
4539 }
4540
4541 static int
4542 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
4543 {
4544 uint64_t key_, mask_;
4545 int n;
4546
4547 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
4548 int len = n;
4549
4550 *key = htonll(key_);
4551 if (mask) {
4552 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
4553 len += n;
4554 *mask = htonll(mask_);
4555 } else {
4556 *mask = OVS_BE64_MAX;
4557 }
4558 }
4559 return len;
4560 }
4561 return 0;
4562 }
4563
4564 static int
4565 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
4566 {
4567 uint32_t flags, fmask;
4568 int n;
4569
4570 n = parse_odp_flags(s, flow_tun_flag_to_string, &flags,
4571 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
4572 if (n >= 0 && s[n] == ')') {
4573 *key = flags;
4574 if (mask) {
4575 *mask = fmask;
4576 }
4577 return n + 1;
4578 }
4579 return 0;
4580 }
4581
4582 static int
4583 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
4584 {
4585 uint32_t flags, fmask;
4586 int n;
4587
4588 n = parse_odp_flags(s, packet_tcp_flag_to_string, &flags,
4589 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
4590 if (n >= 0) {
4591 *key = htons(flags);
4592 if (mask) {
4593 *mask = htons(fmask);
4594 }
4595 return n;
4596 }
4597 return 0;
4598 }
4599
4600 static uint32_t
4601 ovs_to_odp_ct_state(uint8_t state)
4602 {
4603 uint32_t odp = 0;
4604
4605 #define CS_STATE(ENUM, INDEX, NAME) \
4606 if (state & CS_##ENUM) { \
4607 odp |= OVS_CS_F_##ENUM; \
4608 }
4609 CS_STATES
4610 #undef CS_STATE
4611
4612 return odp;
4613 }
4614
4615 static uint8_t
4616 odp_to_ovs_ct_state(uint32_t flags)
4617 {
4618 uint32_t state = 0;
4619
4620 #define CS_STATE(ENUM, INDEX, NAME) \
4621 if (flags & OVS_CS_F_##ENUM) { \
4622 state |= CS_##ENUM; \
4623 }
4624 CS_STATES
4625 #undef CS_STATE
4626
4627 return state;
4628 }
4629
4630 static int
4631 scan_ct_state(const char *s, uint32_t *key, uint32_t *mask)
4632 {
4633 uint32_t flags, fmask;
4634 int n;
4635
4636 n = parse_flags(s, odp_ct_state_to_string, ')', NULL, NULL, &flags,
4637 ovs_to_odp_ct_state(CS_SUPPORTED_MASK),
4638 mask ? &fmask : NULL);
4639
4640 if (n >= 0) {
4641 *key = flags;
4642 if (mask) {
4643 *mask = fmask;
4644 }
4645 return n;
4646 }
4647 return 0;
4648 }
4649
4650 static int
4651 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
4652 {
4653 int n;
4654 char frag[8];
4655 enum ovs_frag_type frag_type;
4656
4657 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
4658 && ovs_frag_type_from_string(frag, &frag_type)) {
4659 int len = n;
4660
4661 *key = frag_type;
4662 if (mask) {
4663 *mask = UINT8_MAX;
4664 }
4665 return len;
4666 }
4667 return 0;
4668 }
4669
4670 static int
4671 scan_port(const char *s, uint32_t *key, uint32_t *mask,
4672 const struct simap *port_names)
4673 {
4674 int n;
4675
4676 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4677 int len = n;
4678
4679 if (mask) {
4680 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4681 len += n;
4682 } else {
4683 *mask = UINT32_MAX;
4684 }
4685 }
4686 return len;
4687 } else if (port_names) {
4688 const struct simap_node *node;
4689 int len;
4690
4691 len = strcspn(s, ")");
4692 node = simap_find_len(port_names, s, len);
4693 if (node) {
4694 *key = node->data;
4695
4696 if (mask) {
4697 *mask = UINT32_MAX;
4698 }
4699 return len;
4700 }
4701 }
4702 return 0;
4703 }
4704
4705 /* Helper for vlan parsing. */
4706 struct ovs_key_vlan__ {
4707 ovs_be16 tci;
4708 };
4709
4710 static bool
4711 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
4712 {
4713 const uint16_t mask = ((1U << bits) - 1) << offset;
4714
4715 if (value >> bits) {
4716 return false;
4717 }
4718
4719 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
4720 return true;
4721 }
4722
4723 static int
4724 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
4725 uint8_t offset)
4726 {
4727 uint16_t key_, mask_;
4728 int n;
4729
4730 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4731 int len = n;
4732
4733 if (set_be16_bf(key, bits, offset, key_)) {
4734 if (mask) {
4735 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4736 len += n;
4737
4738 if (!set_be16_bf(mask, bits, offset, mask_)) {
4739 return 0;
4740 }
4741 } else {
4742 *mask |= htons(((1U << bits) - 1) << offset);
4743 }
4744 }
4745 return len;
4746 }
4747 }
4748 return 0;
4749 }
4750
4751 static int
4752 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
4753 {
4754 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
4755 }
4756
4757 static int
4758 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
4759 {
4760 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
4761 }
4762
4763 static int
4764 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
4765 {
4766 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
4767 }
4768
4769 /* For MPLS. */
4770 static bool
4771 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
4772 {
4773 const uint32_t mask = ((1U << bits) - 1) << offset;
4774
4775 if (value >> bits) {
4776 return false;
4777 }
4778
4779 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
4780 return true;
4781 }
4782
4783 static int
4784 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
4785 uint8_t offset)
4786 {
4787 uint32_t key_, mask_;
4788 int n;
4789
4790 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
4791 int len = n;
4792
4793 if (set_be32_bf(key, bits, offset, key_)) {
4794 if (mask) {
4795 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
4796 len += n;
4797
4798 if (!set_be32_bf(mask, bits, offset, mask_)) {
4799 return 0;
4800 }
4801 } else {
4802 *mask |= htonl(((1U << bits) - 1) << offset);
4803 }
4804 }
4805 return len;
4806 }
4807 }
4808 return 0;
4809 }
4810
4811 static int
4812 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
4813 {
4814 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
4815 }
4816
4817 static int
4818 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
4819 {
4820 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
4821 }
4822
4823 static int
4824 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
4825 {
4826 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
4827 }
4828
4829 static int
4830 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
4831 {
4832 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
4833 }
4834
4835 static int
4836 scan_vxlan_gbp(const char *s, uint32_t *key, uint32_t *mask)
4837 {
4838 const char *s_base = s;
4839 ovs_be16 id = 0, id_mask = 0;
4840 uint8_t flags = 0, flags_mask = 0;
4841 int len;
4842
4843 if (!strncmp(s, "id=", 3)) {
4844 s += 3;
4845 len = scan_be16(s, &id, mask ? &id_mask : NULL);
4846 if (len == 0) {
4847 return 0;
4848 }
4849 s += len;
4850 }
4851
4852 if (s[0] == ',') {
4853 s++;
4854 }
4855 if (!strncmp(s, "flags=", 6)) {
4856 s += 6;
4857 len = scan_u8(s, &flags, mask ? &flags_mask : NULL);
4858 if (len == 0) {
4859 return 0;
4860 }
4861 s += len;
4862 }
4863
4864 if (!strncmp(s, "))", 2)) {
4865 s += 2;
4866
4867 *key = (flags << 16) | ntohs(id);
4868 if (mask) {
4869 *mask = (flags_mask << 16) | ntohs(id_mask);
4870 }
4871
4872 return s - s_base;
4873 }
4874
4875 return 0;
4876 }
4877
4878 static int
4879 scan_erspan_metadata(const char *s,
4880 struct erspan_metadata *key,
4881 struct erspan_metadata *mask)
4882 {
4883 const char *s_base = s;
4884 uint32_t idx = 0, idx_mask = 0;
4885 uint8_t ver = 0, dir = 0, hwid = 0;
4886 uint8_t ver_mask = 0, dir_mask = 0, hwid_mask = 0;
4887 int len;
4888
4889 if (!strncmp(s, "ver=", 4)) {
4890 s += 4;
4891 len = scan_u8(s, &ver, mask ? &ver_mask : NULL);
4892 if (len == 0) {
4893 return 0;
4894 }
4895 s += len;
4896 }
4897
4898 if (s[0] == ',') {
4899 s++;
4900 }
4901
4902 if (ver == 1) {
4903 if (!strncmp(s, "idx=", 4)) {
4904 s += 4;
4905 len = scan_u32(s, &idx, mask ? &idx_mask : NULL);
4906 if (len == 0) {
4907 return 0;
4908 }
4909 s += len;
4910 }
4911
4912 if (!strncmp(s, ")", 1)) {
4913 s += 1;
4914 key->version = ver;
4915 key->u.index = htonl(idx);
4916 if (mask) {
4917 mask->u.index = htonl(idx_mask);
4918 }
4919 }
4920 return s - s_base;
4921
4922 } else if (ver == 2) {
4923 if (!strncmp(s, "dir=", 4)) {
4924 s += 4;
4925 len = scan_u8(s, &dir, mask ? &dir_mask : NULL);
4926 if (len == 0) {
4927 return 0;
4928 }
4929 s += len;
4930 }
4931 if (s[0] == ',') {
4932 s++;
4933 }
4934 if (!strncmp(s, "hwid=", 5)) {
4935 s += 5;
4936 len = scan_u8(s, &hwid, mask ? &hwid_mask : NULL);
4937 if (len == 0) {
4938 return 0;
4939 }
4940 s += len;
4941 }
4942
4943 if (!strncmp(s, ")", 1)) {
4944 s += 1;
4945 key->version = ver;
4946 key->u.md2.hwid = hwid;
4947 key->u.md2.dir = dir;
4948 if (mask) {
4949 mask->u.md2.hwid = hwid_mask;
4950 mask->u.md2.dir = dir_mask;
4951 }
4952 }
4953 return s - s_base;
4954 }
4955
4956 return 0;
4957 }
4958
4959 static int
4960 scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
4961 {
4962 const char *s_base = s;
4963 struct geneve_opt *opt = key->d;
4964 struct geneve_opt *opt_mask = mask ? mask->d : NULL;
4965 int len_remain = sizeof key->d;
4966 int len;
4967
4968 while (s[0] == '{' && len_remain >= sizeof *opt) {
4969 int data_len = 0;
4970
4971 s++;
4972 len_remain -= sizeof *opt;
4973
4974 if (!strncmp(s, "class=", 6)) {
4975 s += 6;
4976 len = scan_be16(s, &opt->opt_class,
4977 mask ? &opt_mask->opt_class : NULL);
4978 if (len == 0) {
4979 return 0;
4980 }
4981 s += len;
4982 } else if (mask) {
4983 memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
4984 }
4985
4986 if (s[0] == ',') {
4987 s++;
4988 }
4989 if (!strncmp(s, "type=", 5)) {
4990 s += 5;
4991 len = scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
4992 if (len == 0) {
4993 return 0;
4994 }
4995 s += len;
4996 } else if (mask) {
4997 memset(&opt_mask->type, 0, sizeof opt_mask->type);
4998 }
4999
5000 if (s[0] == ',') {
5001 s++;
5002 }
5003 if (!strncmp(s, "len=", 4)) {
5004 uint8_t opt_len, opt_len_mask;
5005 s += 4;
5006 len = scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
5007 if (len == 0) {
5008 return 0;
5009 }
5010 s += len;
5011
5012 if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
5013 return 0;
5014 }
5015 opt->length = opt_len / 4;
5016 if (mask) {
5017 opt_mask->length = opt_len_mask;
5018 }
5019 data_len = opt_len;
5020 } else if (mask) {
5021 memset(&opt_mask->type, 0, sizeof opt_mask->type);
5022 }
5023
5024 if (s[0] == ',') {
5025 s++;
5026 if (parse_int_string(s, (uint8_t *)(opt + 1),
5027 data_len, (char **)&s)) {
5028 return 0;
5029 }
5030 }
5031 if (mask) {
5032 if (s[0] == '/') {
5033 s++;
5034 if (parse_int_string(s, (uint8_t *)(opt_mask + 1),
5035 data_len, (char **)&s)) {
5036 return 0;
5037 }
5038 }
5039 opt_mask->r1 = 0;
5040 opt_mask->r2 = 0;
5041 opt_mask->r3 = 0;
5042 }
5043
5044 if (s[0] == '}') {
5045 s++;
5046 opt += 1 + data_len / 4;
5047 if (mask) {
5048 opt_mask += 1 + data_len / 4;
5049 }
5050 len_remain -= data_len;
5051 } else {
5052 return 0;
5053 }
5054 }
5055
5056 if (s[0] == ')') {
5057 len = sizeof key->d - len_remain;
5058
5059 s++;
5060 key->len = len;
5061 if (mask) {
5062 mask->len = len;
5063 }
5064 return s - s_base;
5065 }
5066
5067 return 0;
5068 }
5069
5070 static void
5071 tun_flags_to_attr(struct ofpbuf *a, const void *data_)
5072 {
5073 const uint16_t *flags = data_;
5074
5075 if (*flags & FLOW_TNL_F_DONT_FRAGMENT) {
5076 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
5077 }
5078 if (*flags & FLOW_TNL_F_CSUM) {
5079 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
5080 }
5081 if (*flags & FLOW_TNL_F_OAM) {
5082 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
5083 }
5084 }
5085
5086 static void
5087 vxlan_gbp_to_attr(struct ofpbuf *a, const void *data_)
5088 {
5089 const uint32_t *gbp = data_;
5090
5091 if (*gbp) {
5092 size_t vxlan_opts_ofs;
5093
5094 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
5095 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP, *gbp);
5096 nl_msg_end_nested(a, vxlan_opts_ofs);
5097 }
5098 }
5099
5100 static void
5101 geneve_to_attr(struct ofpbuf *a, const void *data_)
5102 {
5103 const struct geneve_scan *geneve = data_;
5104
5105 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, geneve->d,
5106 geneve->len);
5107 }
5108
5109 static void
5110 erspan_to_attr(struct ofpbuf *a, const void *data_)
5111 {
5112 const struct erspan_metadata *md = data_;
5113
5114 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, md,
5115 sizeof *md);
5116 }
5117
5118 #define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
5119 { \
5120 unsigned long call_fn = (unsigned long)FUNC; \
5121 if (call_fn) { \
5122 typedef void (*fn)(struct ofpbuf *, const void *); \
5123 fn func = FUNC; \
5124 func(BUF, &(DATA)); \
5125 } else { \
5126 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
5127 } \
5128 }
5129
5130 #define SCAN_IF(NAME) \
5131 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5132 const char *start = s; \
5133 int len; \
5134 \
5135 s += strlen(NAME)
5136
5137 /* Usually no special initialization is needed. */
5138 #define SCAN_BEGIN(NAME, TYPE) \
5139 SCAN_IF(NAME); \
5140 TYPE skey, smask; \
5141 memset(&skey, 0, sizeof skey); \
5142 memset(&smask, 0, sizeof smask); \
5143 do { \
5144 len = 0;
5145
5146 /* Init as fully-masked as mask will not be scanned. */
5147 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
5148 SCAN_IF(NAME); \
5149 TYPE skey, smask; \
5150 memset(&skey, 0, sizeof skey); \
5151 memset(&smask, 0xff, sizeof smask); \
5152 do { \
5153 len = 0;
5154
5155 /* VLAN needs special initialization. */
5156 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
5157 SCAN_IF(NAME); \
5158 TYPE skey = KEY_INIT; \
5159 TYPE smask = MASK_INIT; \
5160 do { \
5161 len = 0;
5162
5163 /* Scan unnamed entry as 'TYPE' */
5164 #define SCAN_TYPE(TYPE, KEY, MASK) \
5165 len = scan_##TYPE(s, KEY, MASK); \
5166 if (len == 0) { \
5167 return -EINVAL; \
5168 } \
5169 s += len
5170
5171 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5172 #define SCAN_FIELD(NAME, TYPE, FIELD) \
5173 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5174 s += strlen(NAME); \
5175 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
5176 continue; \
5177 }
5178
5179 #define SCAN_FINISH() \
5180 } while (*s++ == ',' && len != 0); \
5181 if (s[-1] != ')') { \
5182 return -EINVAL; \
5183 }
5184
5185 #define SCAN_FINISH_SINGLE() \
5186 } while (false); \
5187 if (*s++ != ')') { \
5188 return -EINVAL; \
5189 }
5190
5191 /* Beginning of nested attribute. */
5192 #define SCAN_BEGIN_NESTED(NAME, ATTR) \
5193 SCAN_IF(NAME); \
5194 size_t key_offset, mask_offset = 0; \
5195 key_offset = nl_msg_start_nested(key, ATTR); \
5196 if (mask) { \
5197 mask_offset = nl_msg_start_nested(mask, ATTR); \
5198 } \
5199 do { \
5200 len = 0;
5201
5202 #define SCAN_END_NESTED() \
5203 SCAN_FINISH(); \
5204 nl_msg_end_nested(key, key_offset); \
5205 if (mask) { \
5206 nl_msg_end_nested(mask, mask_offset); \
5207 } \
5208 return s - start; \
5209 }
5210
5211 #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
5212 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5213 TYPE skey, smask; \
5214 memset(&skey, 0, sizeof skey); \
5215 memset(&smask, 0xff, sizeof smask); \
5216 s += strlen(NAME); \
5217 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5218 SCAN_PUT(ATTR, FUNC); \
5219 continue; \
5220 }
5221
5222 #define SCAN_FIELD_NESTED(NAME, TYPE, SCAN_AS, ATTR) \
5223 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, NULL)
5224
5225 #define SCAN_FIELD_NESTED_FUNC(NAME, TYPE, SCAN_AS, FUNC) \
5226 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, 0, FUNC)
5227
5228 #define SCAN_PUT(ATTR, FUNC) \
5229 SCAN_PUT_ATTR(key, ATTR, skey, FUNC); \
5230 if (mask) \
5231 SCAN_PUT_ATTR(mask, ATTR, smask, FUNC); \
5232
5233 #define SCAN_END(ATTR) \
5234 SCAN_FINISH(); \
5235 SCAN_PUT(ATTR, NULL); \
5236 return s - start; \
5237 }
5238
5239 #define SCAN_BEGIN_ARRAY(NAME, TYPE, CNT) \
5240 SCAN_IF(NAME); \
5241 TYPE skey[CNT], smask[CNT]; \
5242 memset(&skey, 0, sizeof skey); \
5243 memset(&smask, 0, sizeof smask); \
5244 int idx = 0, cnt = CNT; \
5245 uint64_t fields = 0; \
5246 do { \
5247 int field = 0; \
5248 len = 0;
5249
5250 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5251 #define SCAN_FIELD_ARRAY(NAME, TYPE, FIELD) \
5252 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5253 if (fields & (1UL << field)) { \
5254 fields = 0; \
5255 if (++idx == cnt) { \
5256 break; \
5257 } \
5258 } \
5259 s += strlen(NAME); \
5260 SCAN_TYPE(TYPE, &skey[idx].FIELD, mask ? &smask[idx].FIELD : NULL); \
5261 fields |= 1UL << field; \
5262 continue; \
5263 } \
5264 field++;
5265
5266 #define SCAN_PUT_ATTR_ARRAY(BUF, ATTR, DATA, CNT) \
5267 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)[0] * (CNT)); \
5268
5269 #define SCAN_PUT_ARRAY(ATTR, CNT) \
5270 SCAN_PUT_ATTR_ARRAY(key, ATTR, skey, CNT); \
5271 if (mask) { \
5272 SCAN_PUT_ATTR_ARRAY(mask, ATTR, smask, CNT); \
5273 }
5274
5275 #define SCAN_END_ARRAY(ATTR) \
5276 SCAN_FINISH(); \
5277 if (idx == cnt) { \
5278 return -EINVAL; \
5279 } \
5280 SCAN_PUT_ARRAY(ATTR, idx + 1); \
5281 return s - start; \
5282 }
5283
5284 #define SCAN_END_SINGLE(ATTR) \
5285 SCAN_FINISH_SINGLE(); \
5286 SCAN_PUT(ATTR, NULL); \
5287 return s - start; \
5288 }
5289
5290 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
5291 SCAN_BEGIN(NAME, TYPE) { \
5292 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5293 } SCAN_END_SINGLE(ATTR)
5294
5295 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
5296 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
5297 SCAN_TYPE(SCAN_AS, &skey, NULL); \
5298 } SCAN_END_SINGLE(ATTR)
5299
5300 /* scan_port needs one extra argument. */
5301 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
5302 SCAN_BEGIN(NAME, TYPE) { \
5303 len = scan_port(s, &skey, &smask, \
5304 context->port_names); \
5305 if (len == 0) { \
5306 return -EINVAL; \
5307 } \
5308 s += len; \
5309 } SCAN_END_SINGLE(ATTR)
5310
5311 static int
5312 parse_odp_nsh_key_mask_attr(const char *s, struct ofpbuf *key,
5313 struct ofpbuf *mask)
5314 {
5315 if (strncmp(s, "nsh(", 4) == 0) {
5316 const char *start = s;
5317 int len;
5318 struct ovs_key_nsh skey, smask;
5319 uint32_t spi = 0, spi_mask = 0;
5320 uint8_t si = 0, si_mask = 0;
5321
5322 s += 4;
5323
5324 memset(&skey, 0, sizeof skey);
5325 memset(&smask, 0, sizeof smask);
5326 do {
5327 len = 0;
5328
5329 if (strncmp(s, "flags=", 6) == 0) {
5330 s += 6;
5331 len = scan_u8(s, &skey.flags, mask ? &smask.flags : NULL);
5332 if (len == 0) {
5333 return -EINVAL;
5334 }
5335 s += len;
5336 continue;
5337 }
5338
5339 if (strncmp(s, "mdtype=", 7) == 0) {
5340 s += 7;
5341 len = scan_u8(s, &skey.mdtype, mask ? &smask.mdtype : NULL);
5342 if (len == 0) {
5343 return -EINVAL;
5344 }
5345 s += len;
5346 continue;
5347 }
5348
5349 if (strncmp(s, "np=", 3) == 0) {
5350 s += 3;
5351 len = scan_u8(s, &skey.np, mask ? &smask.np : NULL);
5352 if (len == 0) {
5353 return -EINVAL;
5354 }
5355 s += len;
5356 continue;
5357 }
5358
5359 if (strncmp(s, "spi=", 4) == 0) {
5360 s += 4;
5361 len = scan_u32(s, &spi, mask ? &spi_mask : NULL);
5362 if (len == 0) {
5363 return -EINVAL;
5364 }
5365 s += len;
5366 continue;
5367 }
5368
5369 if (strncmp(s, "si=", 3) == 0) {
5370 s += 3;
5371 len = scan_u8(s, &si, mask ? &si_mask : NULL);
5372 if (len == 0) {
5373 return -EINVAL;
5374 }
5375 s += len;
5376 continue;
5377 }
5378
5379 if (strncmp(s, "c1=", 3) == 0) {
5380 s += 3;
5381 len = scan_be32(s, &skey.context[0],
5382 mask ? &smask.context[0] : NULL);
5383 if (len == 0) {
5384 return -EINVAL;
5385 }
5386 s += len;
5387 continue;
5388 }
5389
5390 if (strncmp(s, "c2=", 3) == 0) {
5391 s += 3;
5392 len = scan_be32(s, &skey.context[1],
5393 mask ? &smask.context[1] : NULL);
5394 if (len == 0) {
5395 return -EINVAL;
5396 }
5397 s += len;
5398 continue;
5399 }
5400
5401 if (strncmp(s, "c3=", 3) == 0) {
5402 s += 3;
5403 len = scan_be32(s, &skey.context[2],
5404 mask ? &smask.context[2] : NULL);
5405 if (len == 0) {
5406 return -EINVAL;
5407 }
5408 s += len;
5409 continue;
5410 }
5411
5412 if (strncmp(s, "c4=", 3) == 0) {
5413 s += 3;
5414 len = scan_be32(s, &skey.context[3],
5415 mask ? &smask.context[3] : NULL);
5416 if (len == 0) {
5417 return -EINVAL;
5418 }
5419 s += len;
5420 continue;
5421 }
5422 } while (*s++ == ',' && len != 0);
5423 if (s[-1] != ')') {
5424 return -EINVAL;
5425 }
5426
5427 skey.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
5428 smask.path_hdr = nsh_spi_si_to_path_hdr(spi_mask, si_mask);
5429
5430 nsh_key_to_attr(key, &skey, NULL, 0, false);
5431 if (mask) {
5432 nsh_key_to_attr(mask, &smask, NULL, 0, true);
5433 }
5434 return s - start;
5435 }
5436 return 0;
5437 }
5438
5439 static int
5440 parse_odp_key_mask_attr(struct parse_odp_context *context, const char *s,
5441 struct ofpbuf *key, struct ofpbuf *mask)
5442 {
5443 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
5444 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
5445 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
5446 OVS_KEY_ATTR_RECIRC_ID);
5447 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
5448
5449 SCAN_SINGLE("ct_state(", uint32_t, ct_state, OVS_KEY_ATTR_CT_STATE);
5450 SCAN_SINGLE("ct_zone(", uint16_t, u16, OVS_KEY_ATTR_CT_ZONE);
5451 SCAN_SINGLE("ct_mark(", uint32_t, u32, OVS_KEY_ATTR_CT_MARK);
5452 SCAN_SINGLE("ct_label(", ovs_u128, u128, OVS_KEY_ATTR_CT_LABELS);
5453
5454 SCAN_BEGIN("ct_tuple4(", struct ovs_key_ct_tuple_ipv4) {
5455 SCAN_FIELD("src=", ipv4, ipv4_src);
5456 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5457 SCAN_FIELD("proto=", u8, ipv4_proto);
5458 SCAN_FIELD("tp_src=", be16, src_port);
5459 SCAN_FIELD("tp_dst=", be16, dst_port);
5460 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
5461
5462 SCAN_BEGIN("ct_tuple6(", struct ovs_key_ct_tuple_ipv6) {
5463 SCAN_FIELD("src=", in6_addr, ipv6_src);
5464 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5465 SCAN_FIELD("proto=", u8, ipv6_proto);
5466 SCAN_FIELD("tp_src=", be16, src_port);
5467 SCAN_FIELD("tp_dst=", be16, dst_port);
5468 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
5469
5470 SCAN_BEGIN_NESTED("tunnel(", OVS_KEY_ATTR_TUNNEL) {
5471 SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
5472 SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
5473 SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
5474 SCAN_FIELD_NESTED("ipv6_src=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_SRC);
5475 SCAN_FIELD_NESTED("ipv6_dst=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_DST);
5476 SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
5477 SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
5478 SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
5479 SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
5480 SCAN_FIELD_NESTED_FUNC("erspan(", struct erspan_metadata, erspan_metadata,
5481 erspan_to_attr);
5482 SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
5483 SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
5484 geneve_to_attr);
5485 SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
5486 } SCAN_END_NESTED();
5487
5488 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
5489
5490 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
5491 SCAN_FIELD("src=", eth, eth_src);
5492 SCAN_FIELD("dst=", eth, eth_dst);
5493 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
5494
5495 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
5496 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
5497 SCAN_FIELD("vid=", vid, tci);
5498 SCAN_FIELD("pcp=", pcp, tci);
5499 SCAN_FIELD("cfi=", cfi, tci);
5500 } SCAN_END(OVS_KEY_ATTR_VLAN);
5501
5502 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
5503
5504 SCAN_BEGIN_ARRAY("mpls(", struct ovs_key_mpls, FLOW_MAX_MPLS_LABELS) {
5505 SCAN_FIELD_ARRAY("label=", mpls_label, mpls_lse);
5506 SCAN_FIELD_ARRAY("tc=", mpls_tc, mpls_lse);
5507 SCAN_FIELD_ARRAY("ttl=", mpls_ttl, mpls_lse);
5508 SCAN_FIELD_ARRAY("bos=", mpls_bos, mpls_lse);
5509 } SCAN_END_ARRAY(OVS_KEY_ATTR_MPLS);
5510
5511 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
5512 SCAN_FIELD("src=", ipv4, ipv4_src);
5513 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5514 SCAN_FIELD("proto=", u8, ipv4_proto);
5515 SCAN_FIELD("tos=", u8, ipv4_tos);
5516 SCAN_FIELD("ttl=", u8, ipv4_ttl);
5517 SCAN_FIELD("frag=", frag, ipv4_frag);
5518 } SCAN_END(OVS_KEY_ATTR_IPV4);
5519
5520 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
5521 SCAN_FIELD("src=", in6_addr, ipv6_src);
5522 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5523 SCAN_FIELD("label=", ipv6_label, ipv6_label);
5524 SCAN_FIELD("proto=", u8, ipv6_proto);
5525 SCAN_FIELD("tclass=", u8, ipv6_tclass);
5526 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
5527 SCAN_FIELD("frag=", frag, ipv6_frag);
5528 } SCAN_END(OVS_KEY_ATTR_IPV6);
5529
5530 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
5531 SCAN_FIELD("src=", be16, tcp_src);
5532 SCAN_FIELD("dst=", be16, tcp_dst);
5533 } SCAN_END(OVS_KEY_ATTR_TCP);
5534
5535 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
5536
5537 SCAN_BEGIN("udp(", struct ovs_key_udp) {
5538 SCAN_FIELD("src=", be16, udp_src);
5539 SCAN_FIELD("dst=", be16, udp_dst);
5540 } SCAN_END(OVS_KEY_ATTR_UDP);
5541
5542 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
5543 SCAN_FIELD("src=", be16, sctp_src);
5544 SCAN_FIELD("dst=", be16, sctp_dst);
5545 } SCAN_END(OVS_KEY_ATTR_SCTP);
5546
5547 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
5548 SCAN_FIELD("type=", u8, icmp_type);
5549 SCAN_FIELD("code=", u8, icmp_code);
5550 } SCAN_END(OVS_KEY_ATTR_ICMP);
5551
5552 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
5553 SCAN_FIELD("type=", u8, icmpv6_type);
5554 SCAN_FIELD("code=", u8, icmpv6_code);
5555 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
5556
5557 SCAN_BEGIN("arp(", struct ovs_key_arp) {
5558 SCAN_FIELD("sip=", ipv4, arp_sip);
5559 SCAN_FIELD("tip=", ipv4, arp_tip);
5560 SCAN_FIELD("op=", be16, arp_op);
5561 SCAN_FIELD("sha=", eth, arp_sha);
5562 SCAN_FIELD("tha=", eth, arp_tha);
5563 } SCAN_END(OVS_KEY_ATTR_ARP);
5564
5565 SCAN_BEGIN("nd(", struct ovs_key_nd) {
5566 SCAN_FIELD("target=", in6_addr, nd_target);
5567 SCAN_FIELD("sll=", eth, nd_sll);
5568 SCAN_FIELD("tll=", eth, nd_tll);
5569 } SCAN_END(OVS_KEY_ATTR_ND);
5570
5571 SCAN_BEGIN("nd_ext(", struct ovs_key_nd_extensions) {
5572 SCAN_FIELD("nd_reserved=", be32, nd_reserved);
5573 SCAN_FIELD("nd_options_type=", u8, nd_options_type);
5574 } SCAN_END(OVS_KEY_ATTR_ND_EXTENSIONS);
5575
5576 struct packet_type {
5577 ovs_be16 ns;
5578 ovs_be16 id;
5579 };
5580 SCAN_BEGIN("packet_type(", struct packet_type) {
5581 SCAN_FIELD("ns=", be16, ns);
5582 SCAN_FIELD("id=", be16, id);
5583 } SCAN_END(OVS_KEY_ATTR_PACKET_TYPE);
5584
5585 /* nsh is nested, it needs special process */
5586 int ret = parse_odp_nsh_key_mask_attr(s, key, mask);
5587 if (ret < 0) {
5588 return ret;
5589 } else {
5590 s += ret;
5591 }
5592
5593 /* Encap open-coded. */
5594 if (!strncmp(s, "encap(", 6)) {
5595 const char *start = s;
5596 size_t encap, encap_mask = 0;
5597
5598 if (context->depth + 1 == MAX_ODP_NESTED) {
5599 return -EINVAL;
5600 }
5601 context->depth++;
5602
5603 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
5604 if (mask) {
5605 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
5606 }
5607
5608 s += 6;
5609 for (;;) {
5610 int retval;
5611
5612 s += strspn(s, delimiters);
5613 if (!*s) {
5614 context->depth--;
5615 return -EINVAL;
5616 } else if (*s == ')') {
5617 break;
5618 }
5619
5620 retval = parse_odp_key_mask_attr(context, s, key, mask);
5621 if (retval < 0) {
5622 context->depth--;
5623 return retval;
5624 }
5625
5626 if (nl_attr_oversized(key->size - encap - NLA_HDRLEN)) {
5627 return -E2BIG;
5628 }
5629 s += retval;
5630 }
5631 s++;
5632
5633 nl_msg_end_nested(key, encap);
5634 if (mask) {
5635 nl_msg_end_nested(mask, encap_mask);
5636 }
5637 context->depth--;
5638
5639 return s - start;
5640 }
5641
5642 return -EINVAL;
5643 }
5644
5645 /* Parses the string representation of a datapath flow key, in the
5646 * format output by odp_flow_key_format(). Returns 0 if successful,
5647 * otherwise a positive errno value. On success, the flow key is
5648 * appended to 'key' as a series of Netlink attributes. On failure, no
5649 * data is appended to 'key'. Either way, 'key''s data might be
5650 * reallocated.
5651 *
5652 * If 'port_names' is nonnull, it points to an simap that maps from a port name
5653 * to a port number. (Port names may be used instead of port numbers in
5654 * in_port.)
5655 *
5656 * On success, the attributes appended to 'key' are individually syntactically
5657 * valid, but they may not be valid as a sequence. 'key' might, for example,
5658 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
5659 int
5660 odp_flow_from_string(const char *s, const struct simap *port_names,
5661 struct ofpbuf *key, struct ofpbuf *mask)
5662 {
5663 const size_t old_size = key->size;
5664 struct parse_odp_context context = (struct parse_odp_context) {
5665 .port_names = port_names,
5666 };
5667 for (;;) {
5668 int retval;
5669
5670 s += strspn(s, delimiters);
5671 if (!*s) {
5672 return 0;
5673 }
5674
5675 /* Skip UFID. */
5676 ovs_u128 ufid;
5677 retval = odp_ufid_from_string(s, &ufid);
5678 if (retval < 0) {
5679 key->size = old_size;
5680 return -retval;
5681 } else if (retval > 0) {
5682 s += retval;
5683 s += s[0] == ' ' ? 1 : 0;
5684 }
5685
5686 retval = parse_odp_key_mask_attr(&context, s, key, mask);
5687 if (retval < 0) {
5688 key->size = old_size;
5689 return -retval;
5690 }
5691 s += retval;
5692 }
5693
5694 return 0;
5695 }
5696
5697 static uint8_t
5698 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
5699 {
5700 if (is_mask) {
5701 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
5702 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
5703 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
5704 * must use a zero mask for the netlink frag field, and all ones mask
5705 * otherwise. */
5706 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
5707 }
5708 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
5709 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
5710 : OVS_FRAG_TYPE_FIRST;
5711 }
5712
5713 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
5714 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
5715 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
5716 bool is_mask);
5717 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
5718 bool is_mask);
5719 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
5720 bool is_mask);
5721 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
5722 bool is_mask);
5723 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
5724 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
5725 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
5726 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
5727 static void get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh,
5728 bool is_mask);
5729 static void put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
5730 bool is_mask);
5731
5732 /* These share the same layout. */
5733 union ovs_key_tp {
5734 struct ovs_key_tcp tcp;
5735 struct ovs_key_udp udp;
5736 struct ovs_key_sctp sctp;
5737 };
5738
5739 static void get_tp_key(const struct flow *, union ovs_key_tp *);
5740 static void put_tp_key(const union ovs_key_tp *, struct flow *);
5741
5742 static void
5743 odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms,
5744 bool export_mask, struct ofpbuf *buf)
5745 {
5746 struct ovs_key_ethernet *eth_key;
5747 size_t encap[FLOW_MAX_VLAN_HEADERS] = {0};
5748 size_t max_vlans;
5749 const struct flow *flow = parms->flow;
5750 const struct flow *mask = parms->mask;
5751 const struct flow *data = export_mask ? mask : flow;
5752
5753 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
5754
5755 if (flow_tnl_dst_is_set(&flow->tunnel) || export_mask) {
5756 tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
5757 parms->key_buf, NULL);
5758 }
5759
5760 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
5761
5762 if (parms->support.ct_state) {
5763 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
5764 ovs_to_odp_ct_state(data->ct_state));
5765 }
5766 if (parms->support.ct_zone) {
5767 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, data->ct_zone);
5768 }
5769 if (parms->support.ct_mark) {
5770 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, data->ct_mark);
5771 }
5772 if (parms->support.ct_label) {
5773 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &data->ct_label,
5774 sizeof(data->ct_label));
5775 }
5776 if (flow->ct_nw_proto) {
5777 if (parms->support.ct_orig_tuple
5778 && flow->dl_type == htons(ETH_TYPE_IP)) {
5779 struct ovs_key_ct_tuple_ipv4 ct = {
5780 data->ct_nw_src,
5781 data->ct_nw_dst,
5782 data->ct_tp_src,
5783 data->ct_tp_dst,
5784 data->ct_nw_proto,
5785 };
5786 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, &ct,
5787 sizeof ct);
5788 } else if (parms->support.ct_orig_tuple6
5789 && flow->dl_type == htons(ETH_TYPE_IPV6)) {
5790 struct ovs_key_ct_tuple_ipv6 ct = {
5791 data->ct_ipv6_src,
5792 data->ct_ipv6_dst,
5793 data->ct_tp_src,
5794 data->ct_tp_dst,
5795 data->ct_nw_proto,
5796 };
5797 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, &ct,
5798 sizeof ct);
5799 }
5800 }
5801 if (parms->support.recirc) {
5802 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
5803 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
5804 }
5805
5806 /* Add an ingress port attribute if this is a mask or 'in_port.odp_port'
5807 * is not the magical value "ODPP_NONE". */
5808 if (export_mask || flow->in_port.odp_port != ODPP_NONE) {
5809 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, data->in_port.odp_port);
5810 }
5811
5812 nl_msg_put_be32(buf, OVS_KEY_ATTR_PACKET_TYPE, data->packet_type);
5813
5814 if (OVS_UNLIKELY(parms->probe)) {
5815 max_vlans = FLOW_MAX_VLAN_HEADERS;
5816 } else {
5817 max_vlans = MIN(parms->support.max_vlan_headers, flow_vlan_limit);
5818 }
5819
5820 /* Conditionally add L2 attributes for Ethernet packets */
5821 if (flow->packet_type == htonl(PT_ETH)) {
5822 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
5823 sizeof *eth_key);
5824 get_ethernet_key(data, eth_key);
5825
5826 for (int encaps = 0; encaps < max_vlans; encaps++) {
5827 ovs_be16 tpid = flow->vlans[encaps].tpid;
5828
5829 if (flow->vlans[encaps].tci == htons(0)) {
5830 if (eth_type_vlan(flow->dl_type)) {
5831 /* If VLAN was truncated the tpid is in dl_type */
5832 tpid = flow->dl_type;
5833 } else {
5834 break;
5835 }
5836 }
5837
5838 if (export_mask) {
5839 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
5840 } else {
5841 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, tpid);
5842 }
5843 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlans[encaps].tci);
5844 encap[encaps] = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
5845 if (flow->vlans[encaps].tci == htons(0)) {
5846 goto unencap;
5847 }
5848 }
5849 }
5850
5851 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
5852 /* For backwards compatibility with kernels that don't support
5853 * wildcarding, the following convention is used to encode the
5854 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
5855 *
5856 * key mask matches
5857 * -------- -------- -------
5858 * >0x5ff 0xffff Specified Ethernet II Ethertype.
5859 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
5860 * <none> 0xffff Any non-Ethernet II frame (except valid
5861 * 802.3 SNAP packet with valid eth_type).
5862 */
5863 if (export_mask) {
5864 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
5865 }
5866 goto unencap;
5867 }
5868
5869 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
5870
5871 if (eth_type_vlan(flow->dl_type)) {
5872 goto unencap;
5873 }
5874
5875 if (flow->dl_type == htons(ETH_TYPE_IP)) {
5876 struct ovs_key_ipv4 *ipv4_key;
5877
5878 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
5879 sizeof *ipv4_key);
5880 get_ipv4_key(data, ipv4_key, export_mask);
5881 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
5882 struct ovs_key_ipv6 *ipv6_key;
5883
5884 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
5885 sizeof *ipv6_key);
5886 get_ipv6_key(data, ipv6_key, export_mask);
5887 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
5888 flow->dl_type == htons(ETH_TYPE_RARP)) {
5889 struct ovs_key_arp *arp_key;
5890
5891 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
5892 sizeof *arp_key);
5893 get_arp_key(data, arp_key);
5894 } else if (eth_type_mpls(flow->dl_type)) {
5895 struct ovs_key_mpls *mpls_key;
5896 int i, n;
5897
5898 n = flow_count_mpls_labels(flow, NULL);
5899 if (export_mask) {
5900 n = MIN(n, parms->support.max_mpls_depth);
5901 }
5902 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
5903 n * sizeof *mpls_key);
5904 for (i = 0; i < n; i++) {
5905 mpls_key[i].mpls_lse = data->mpls_lse[i];
5906 }
5907 } else if (flow->dl_type == htons(ETH_TYPE_NSH)) {
5908 nsh_key_to_attr(buf, &data->nsh, NULL, 0, export_mask);
5909 }
5910
5911 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
5912 if (flow->nw_proto == IPPROTO_TCP) {
5913 union ovs_key_tp *tcp_key;
5914
5915 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
5916 sizeof *tcp_key);
5917 get_tp_key(data, tcp_key);
5918 if (data->tcp_flags || (mask && mask->tcp_flags)) {
5919 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
5920 }
5921 } else if (flow->nw_proto == IPPROTO_UDP) {
5922 union ovs_key_tp *udp_key;
5923
5924 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
5925 sizeof *udp_key);
5926 get_tp_key(data, udp_key);
5927 } else if (flow->nw_proto == IPPROTO_SCTP) {
5928 union ovs_key_tp *sctp_key;
5929
5930 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
5931 sizeof *sctp_key);
5932 get_tp_key(data, sctp_key);
5933 } else if (flow->dl_type == htons(ETH_TYPE_IP)
5934 && flow->nw_proto == IPPROTO_ICMP) {
5935 struct ovs_key_icmp *icmp_key;
5936
5937 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
5938 sizeof *icmp_key);
5939 icmp_key->icmp_type = ntohs(data->tp_src);
5940 icmp_key->icmp_code = ntohs(data->tp_dst);
5941 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
5942 && flow->nw_proto == IPPROTO_ICMPV6) {
5943 struct ovs_key_icmpv6 *icmpv6_key;
5944
5945 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
5946 sizeof *icmpv6_key);
5947 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
5948 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
5949
5950 if (is_nd(flow, NULL)
5951 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, ICMP
5952 * type and code are 8 bits wide. Therefore, an exact match
5953 * looks like htons(0xff), not htons(0xffff). See
5954 * xlate_wc_finish() for details. */
5955 && (!export_mask || (data->tp_src == htons(0xff)
5956 && data->tp_dst == htons(0xff)))) {
5957 struct ovs_key_nd *nd_key;
5958 struct ovs_key_nd_extensions *nd_ext_key;
5959 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
5960 sizeof *nd_key);
5961 nd_key->nd_target = data->nd_target;
5962 nd_key->nd_sll = data->arp_sha;
5963 nd_key->nd_tll = data->arp_tha;
5964
5965 /* Add ND Extensions Attr only if reserved field
5966 * or options type is set. */
5967 if (data->igmp_group_ip4 != 0 ||
5968 data->tcp_flags != 0) {
5969 nd_ext_key =
5970 nl_msg_put_unspec_uninit(buf,
5971 OVS_KEY_ATTR_ND_EXTENSIONS,
5972 sizeof *nd_ext_key);
5973 nd_ext_key->nd_reserved = data->igmp_group_ip4;
5974 nd_ext_key->nd_options_type = ntohs(data->tcp_flags);
5975 }
5976 }
5977 }
5978 }
5979
5980 unencap:
5981 for (int encaps = max_vlans - 1; encaps >= 0; encaps--) {
5982 if (encap[encaps]) {
5983 nl_msg_end_nested(buf, encap[encaps]);
5984 }
5985 }
5986 }
5987
5988 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
5989 *
5990 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
5991 * capable of being expanded to allow for that much space. */
5992 void
5993 odp_flow_key_from_flow(const struct odp_flow_key_parms *parms,
5994 struct ofpbuf *buf)
5995 {
5996 odp_flow_key_from_flow__(parms, false, buf);
5997 }
5998
5999 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
6000 * 'buf'.
6001 *
6002 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
6003 * capable of being expanded to allow for that much space. */
6004 void
6005 odp_flow_key_from_mask(const struct odp_flow_key_parms *parms,
6006 struct ofpbuf *buf)
6007 {
6008 odp_flow_key_from_flow__(parms, true, buf);
6009 }
6010
6011 /* Generate ODP flow key from the given packet metadata */
6012 void
6013 odp_key_from_dp_packet(struct ofpbuf *buf, const struct dp_packet *packet)
6014 {
6015 const struct pkt_metadata *md = &packet->md;
6016
6017 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
6018
6019 if (flow_tnl_dst_is_set(&md->tunnel)) {
6020 tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL, NULL);
6021 }
6022
6023 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
6024
6025 if (md->ct_state) {
6026 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
6027 ovs_to_odp_ct_state(md->ct_state));
6028 if (md->ct_zone) {
6029 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, md->ct_zone);
6030 }
6031 if (md->ct_mark) {
6032 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, md->ct_mark);
6033 }
6034 if (!ovs_u128_is_zero(md->ct_label)) {
6035 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &md->ct_label,
6036 sizeof(md->ct_label));
6037 }
6038 if (md->ct_orig_tuple_ipv6) {
6039 if (md->ct_orig_tuple.ipv6.ipv6_proto) {
6040 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
6041 &md->ct_orig_tuple.ipv6,
6042 sizeof md->ct_orig_tuple.ipv6);
6043 }
6044 } else {
6045 if (md->ct_orig_tuple.ipv4.ipv4_proto) {
6046 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
6047 &md->ct_orig_tuple.ipv4,
6048 sizeof md->ct_orig_tuple.ipv4);
6049 }
6050 }
6051 }
6052
6053 /* Add an ingress port attribute if 'odp_in_port' is not the magical
6054 * value "ODPP_NONE". */
6055 if (md->in_port.odp_port != ODPP_NONE) {
6056 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
6057 }
6058
6059 /* Add OVS_KEY_ATTR_ETHERNET for non-Ethernet packets */
6060 if (pt_ns(packet->packet_type) == OFPHTN_ETHERTYPE) {
6061 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE,
6062 pt_ns_type_be(packet->packet_type));
6063 }
6064 }
6065
6066 /* Generate packet metadata from the given ODP flow key. */
6067 void
6068 odp_key_to_dp_packet(const struct nlattr *key, size_t key_len,
6069 struct dp_packet *packet)
6070 {
6071 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6072 const struct nlattr *nla;
6073 struct pkt_metadata *md = &packet->md;
6074 ovs_be32 packet_type = htonl(PT_UNKNOWN);
6075 ovs_be16 ethertype = 0;
6076 size_t left;
6077
6078 pkt_metadata_init(md, ODPP_NONE);
6079
6080 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6081 enum ovs_key_attr type = nl_attr_type(nla);
6082 size_t len = nl_attr_get_size(nla);
6083 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6084 OVS_KEY_ATTR_MAX, type);
6085
6086 if (len != expected_len && expected_len >= 0) {
6087 continue;
6088 }
6089
6090 switch (type) {
6091 case OVS_KEY_ATTR_RECIRC_ID:
6092 md->recirc_id = nl_attr_get_u32(nla);
6093 break;
6094 case OVS_KEY_ATTR_DP_HASH:
6095 md->dp_hash = nl_attr_get_u32(nla);
6096 break;
6097 case OVS_KEY_ATTR_PRIORITY:
6098 md->skb_priority = nl_attr_get_u32(nla);
6099 break;
6100 case OVS_KEY_ATTR_SKB_MARK:
6101 md->pkt_mark = nl_attr_get_u32(nla);
6102 break;
6103 case OVS_KEY_ATTR_CT_STATE:
6104 md->ct_state = odp_to_ovs_ct_state(nl_attr_get_u32(nla));
6105 break;
6106 case OVS_KEY_ATTR_CT_ZONE:
6107 md->ct_zone = nl_attr_get_u16(nla);
6108 break;
6109 case OVS_KEY_ATTR_CT_MARK:
6110 md->ct_mark = nl_attr_get_u32(nla);
6111 break;
6112 case OVS_KEY_ATTR_CT_LABELS: {
6113 md->ct_label = nl_attr_get_u128(nla);
6114 break;
6115 }
6116 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
6117 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(nla);
6118 md->ct_orig_tuple.ipv4 = *ct;
6119 md->ct_orig_tuple_ipv6 = false;
6120 break;
6121 }
6122 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
6123 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(nla);
6124
6125 md->ct_orig_tuple.ipv6 = *ct;
6126 md->ct_orig_tuple_ipv6 = true;
6127 break;
6128 }
6129 case OVS_KEY_ATTR_TUNNEL: {
6130 enum odp_key_fitness res;
6131
6132 res = odp_tun_key_from_attr(nla, &md->tunnel);
6133 if (res == ODP_FIT_ERROR) {
6134 memset(&md->tunnel, 0, sizeof md->tunnel);
6135 }
6136 break;
6137 }
6138 case OVS_KEY_ATTR_IN_PORT:
6139 md->in_port.odp_port = nl_attr_get_odp_port(nla);
6140 break;
6141 case OVS_KEY_ATTR_ETHERNET:
6142 /* Presence of OVS_KEY_ATTR_ETHERNET indicates Ethernet packet. */
6143 packet_type = htonl(PT_ETH);
6144 break;
6145 case OVS_KEY_ATTR_ETHERTYPE:
6146 ethertype = nl_attr_get_be16(nla);
6147 break;
6148 case OVS_KEY_ATTR_UNSPEC:
6149 case OVS_KEY_ATTR_ENCAP:
6150 case OVS_KEY_ATTR_VLAN:
6151 case OVS_KEY_ATTR_IPV4:
6152 case OVS_KEY_ATTR_IPV6:
6153 case OVS_KEY_ATTR_TCP:
6154 case OVS_KEY_ATTR_UDP:
6155 case OVS_KEY_ATTR_ICMP:
6156 case OVS_KEY_ATTR_ICMPV6:
6157 case OVS_KEY_ATTR_ARP:
6158 case OVS_KEY_ATTR_ND:
6159 case OVS_KEY_ATTR_ND_EXTENSIONS:
6160 case OVS_KEY_ATTR_SCTP:
6161 case OVS_KEY_ATTR_TCP_FLAGS:
6162 case OVS_KEY_ATTR_MPLS:
6163 case OVS_KEY_ATTR_PACKET_TYPE:
6164 case OVS_KEY_ATTR_NSH:
6165 case __OVS_KEY_ATTR_MAX:
6166 default:
6167 break;
6168 }
6169 }
6170
6171 if (packet_type == htonl(PT_ETH)) {
6172 packet->packet_type = htonl(PT_ETH);
6173 } else if (packet_type == htonl(PT_UNKNOWN) && ethertype != 0) {
6174 packet->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
6175 ntohs(ethertype));
6176 } else {
6177 VLOG_ERR_RL(&rl, "Packet without ETHERTYPE. Unknown packet_type.");
6178 }
6179 }
6180
6181 uint32_t
6182 odp_flow_key_hash(const struct nlattr *key, size_t key_len)
6183 {
6184 BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
6185 return hash_bytes32(ALIGNED_CAST(const uint32_t *, key), key_len, 0);
6186 }
6187
6188 static void
6189 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
6190 uint64_t attrs, int out_of_range_attr,
6191 const struct nlattr *key, size_t key_len)
6192 {
6193 struct ds s;
6194 int i;
6195
6196 if (VLOG_DROP_DBG(rl)) {
6197 return;
6198 }
6199
6200 ds_init(&s);
6201 for (i = 0; i < 64; i++) {
6202 if (attrs & (UINT64_C(1) << i)) {
6203 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6204
6205 ds_put_format(&s, " %s",
6206 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
6207 }
6208 }
6209 if (out_of_range_attr) {
6210 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
6211 }
6212
6213 ds_put_cstr(&s, ": ");
6214 odp_flow_key_format(key, key_len, &s);
6215
6216 VLOG_DBG("%s:%s", title, ds_cstr(&s));
6217 ds_destroy(&s);
6218 }
6219
6220 static uint8_t
6221 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
6222 {
6223 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6224
6225 if (is_mask) {
6226 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
6227 }
6228
6229 if (odp_frag > OVS_FRAG_TYPE_LATER) {
6230 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
6231 return 0xff; /* Error. */
6232 }
6233
6234 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
6235 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
6236 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
6237 }
6238
6239 static bool
6240 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
6241 const struct nlattr *attrs[], uint64_t *present_attrsp,
6242 int *out_of_range_attrp)
6243 {
6244 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6245 const struct nlattr *nla;
6246 uint64_t present_attrs;
6247 size_t left;
6248
6249 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
6250 present_attrs = 0;
6251 *out_of_range_attrp = 0;
6252 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6253 uint16_t type = nl_attr_type(nla);
6254 size_t len = nl_attr_get_size(nla);
6255 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6256 OVS_KEY_ATTR_MAX, type);
6257
6258 if (len != expected_len && expected_len >= 0) {
6259 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6260
6261 VLOG_ERR_RL(&rl, "attribute %s has length %"PRIuSIZE" but should have "
6262 "length %d", ovs_key_attr_to_string(type, namebuf,
6263 sizeof namebuf),
6264 len, expected_len);
6265 return false;
6266 }
6267
6268 if (type > OVS_KEY_ATTR_MAX) {
6269 *out_of_range_attrp = type;
6270 } else {
6271 if (present_attrs & (UINT64_C(1) << type)) {
6272 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6273
6274 VLOG_ERR_RL(&rl, "duplicate %s attribute in flow key",
6275 ovs_key_attr_to_string(type,
6276 namebuf, sizeof namebuf));
6277 return false;
6278 }
6279
6280 present_attrs |= UINT64_C(1) << type;
6281 attrs[type] = nla;
6282 }
6283 }
6284 if (left) {
6285 VLOG_ERR_RL(&rl, "trailing garbage in flow key");
6286 return false;
6287 }
6288
6289 *present_attrsp = present_attrs;
6290 return true;
6291 }
6292
6293 static enum odp_key_fitness
6294 check_expectations(uint64_t present_attrs, int out_of_range_attr,
6295 uint64_t expected_attrs,
6296 const struct nlattr *key, size_t key_len)
6297 {
6298 uint64_t missing_attrs;
6299 uint64_t extra_attrs;
6300
6301 missing_attrs = expected_attrs & ~present_attrs;
6302 if (missing_attrs) {
6303 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6304 log_odp_key_attributes(&rl, "expected but not present",
6305 missing_attrs, 0, key, key_len);
6306 return ODP_FIT_TOO_LITTLE;
6307 }
6308
6309 extra_attrs = present_attrs & ~expected_attrs;
6310 if (extra_attrs || out_of_range_attr) {
6311 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6312 log_odp_key_attributes(&rl, "present but not expected",
6313 extra_attrs, out_of_range_attr, key, key_len);
6314 return ODP_FIT_TOO_MUCH;
6315 }
6316
6317 return ODP_FIT_PERFECT;
6318 }
6319
6320 static bool
6321 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6322 uint64_t present_attrs, uint64_t *expected_attrs,
6323 struct flow *flow, const struct flow *src_flow)
6324 {
6325 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6326 bool is_mask = flow != src_flow;
6327
6328 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
6329 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
6330 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6331 VLOG_ERR_RL(&rl, "invalid Ethertype %"PRIu16" in flow key",
6332 ntohs(flow->dl_type));
6333 return false;
6334 }
6335 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
6336 flow->dl_type != htons(0xffff)) {
6337 return false;
6338 }
6339 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
6340 } else {
6341 if (!is_mask) {
6342 /* Default ethertype for well-known L3 packets. */
6343 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6344 flow->dl_type = htons(ETH_TYPE_IP);
6345 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6346 flow->dl_type = htons(ETH_TYPE_IPV6);
6347 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6348 flow->dl_type = htons(ETH_TYPE_MPLS);
6349 } else {
6350 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
6351 }
6352 } else if (src_flow->packet_type != htonl(PT_ETH)) {
6353 /* dl_type is mandatory for non-Ethernet packets */
6354 flow->dl_type = htons(0xffff);
6355 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
6356 /* See comments in odp_flow_key_from_flow__(). */
6357 VLOG_ERR_RL(&rl, "mask expected for non-Ethernet II frame");
6358 return false;
6359 }
6360 }
6361 return true;
6362 }
6363
6364 static enum odp_key_fitness
6365 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6366 uint64_t present_attrs, int out_of_range_attr,
6367 uint64_t *expected_attrs, struct flow *flow,
6368 const struct nlattr *key, size_t key_len,
6369 const struct flow *src_flow, bool need_check)
6370 {
6371 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6372 bool is_mask = src_flow != flow;
6373 const void *check_start = NULL;
6374 size_t check_len = 0;
6375 enum ovs_key_attr expected_bit = 0xff;
6376
6377 if (eth_type_mpls(src_flow->dl_type)) {
6378 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6379 *expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
6380 }
6381 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6382 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
6383 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
6384 int n = size / sizeof(ovs_be32);
6385 int i;
6386
6387 if (!size || size % sizeof(ovs_be32)) {
6388 return ODP_FIT_ERROR;
6389 }
6390 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
6391 return ODP_FIT_ERROR;
6392 }
6393
6394 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
6395 flow->mpls_lse[i] = mpls_lse[i];
6396 }
6397 if (n > FLOW_MAX_MPLS_LABELS) {
6398 return ODP_FIT_TOO_MUCH;
6399 }
6400
6401 if (!is_mask) {
6402 /* BOS may be set only in the innermost label. */
6403 for (i = 0; i < n - 1; i++) {
6404 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
6405 return ODP_FIT_ERROR;
6406 }
6407 }
6408
6409 /* BOS must be set in the innermost label. */
6410 if (n < FLOW_MAX_MPLS_LABELS
6411 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
6412 return ODP_FIT_TOO_LITTLE;
6413 }
6414 }
6415 }
6416
6417 goto done;
6418 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
6419 if (!is_mask) {
6420 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
6421 }
6422 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6423 const struct ovs_key_ipv4 *ipv4_key;
6424
6425 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
6426 put_ipv4_key(ipv4_key, flow, is_mask);
6427 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6428 return ODP_FIT_ERROR;
6429 }
6430 if (is_mask) {
6431 check_start = ipv4_key;
6432 check_len = sizeof *ipv4_key;
6433 expected_bit = OVS_KEY_ATTR_IPV4;
6434 }
6435 }
6436 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
6437 if (!is_mask) {
6438 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
6439 }
6440 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6441 const struct ovs_key_ipv6 *ipv6_key;
6442
6443 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
6444 put_ipv6_key(ipv6_key, flow, is_mask);
6445 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6446 return ODP_FIT_ERROR;
6447 }
6448 if (is_mask) {
6449 check_start = ipv6_key;
6450 check_len = sizeof *ipv6_key;
6451 expected_bit = OVS_KEY_ATTR_IPV6;
6452 }
6453 }
6454 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
6455 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
6456 if (!is_mask) {
6457 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
6458 }
6459 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
6460 const struct ovs_key_arp *arp_key;
6461
6462 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
6463 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
6464 VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
6465 "key", ntohs(arp_key->arp_op));
6466 return ODP_FIT_ERROR;
6467 }
6468 put_arp_key(arp_key, flow);
6469 if (is_mask) {
6470 check_start = arp_key;
6471 check_len = sizeof *arp_key;
6472 expected_bit = OVS_KEY_ATTR_ARP;
6473 }
6474 }
6475 } else if (src_flow->dl_type == htons(ETH_TYPE_NSH)) {
6476 if (!is_mask) {
6477 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_NSH;
6478 }
6479 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_NSH)) {
6480 odp_nsh_key_from_attr(attrs[OVS_KEY_ATTR_NSH], &flow->nsh, NULL);
6481 if (is_mask) {
6482 check_start = nl_attr_get(attrs[OVS_KEY_ATTR_NSH]);
6483 check_len = nl_attr_get_size(attrs[OVS_KEY_ATTR_NSH]);
6484 expected_bit = OVS_KEY_ATTR_NSH;
6485 }
6486 }
6487 } else {
6488 goto done;
6489 }
6490 if (check_len > 0) { /* Happens only when 'is_mask'. */
6491 if (!is_all_zeros(check_start, check_len) &&
6492 flow->dl_type != htons(0xffff)) {
6493 return ODP_FIT_ERROR;
6494 } else {
6495 *expected_attrs |= UINT64_C(1) << expected_bit;
6496 }
6497 }
6498
6499 expected_bit = OVS_KEY_ATTR_UNSPEC;
6500 if (src_flow->nw_proto == IPPROTO_TCP
6501 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6502 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6503 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6504 if (!is_mask) {
6505 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
6506 }
6507 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
6508 const union ovs_key_tp *tcp_key;
6509
6510 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
6511 put_tp_key(tcp_key, flow);
6512 expected_bit = OVS_KEY_ATTR_TCP;
6513 }
6514 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
6515 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
6516 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
6517 }
6518 } else if (src_flow->nw_proto == IPPROTO_UDP
6519 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6520 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6521 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6522 if (!is_mask) {
6523 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
6524 }
6525 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
6526 const union ovs_key_tp *udp_key;
6527
6528 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
6529 put_tp_key(udp_key, flow);
6530 expected_bit = OVS_KEY_ATTR_UDP;
6531 }
6532 } else if (src_flow->nw_proto == IPPROTO_SCTP
6533 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6534 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6535 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6536 if (!is_mask) {
6537 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
6538 }
6539 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
6540 const union ovs_key_tp *sctp_key;
6541
6542 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
6543 put_tp_key(sctp_key, flow);
6544 expected_bit = OVS_KEY_ATTR_SCTP;
6545 }
6546 } else if (src_flow->nw_proto == IPPROTO_ICMP
6547 && src_flow->dl_type == htons(ETH_TYPE_IP)
6548 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6549 if (!is_mask) {
6550 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
6551 }
6552 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
6553 const struct ovs_key_icmp *icmp_key;
6554
6555 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
6556 flow->tp_src = htons(icmp_key->icmp_type);
6557 flow->tp_dst = htons(icmp_key->icmp_code);
6558 expected_bit = OVS_KEY_ATTR_ICMP;
6559 }
6560 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
6561 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
6562 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6563 if (!is_mask) {
6564 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
6565 }
6566 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
6567 const struct ovs_key_icmpv6 *icmpv6_key;
6568
6569 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
6570 flow->tp_src = htons(icmpv6_key->icmpv6_type);
6571 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
6572 expected_bit = OVS_KEY_ATTR_ICMPV6;
6573 if (is_nd(src_flow, NULL)) {
6574 if (!is_mask) {
6575 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
6576 }
6577 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
6578 const struct ovs_key_nd *nd_key;
6579
6580 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
6581 flow->nd_target = nd_key->nd_target;
6582 flow->arp_sha = nd_key->nd_sll;
6583 flow->arp_tha = nd_key->nd_tll;
6584 if (is_mask) {
6585 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
6586 * ICMP type and code are 8 bits wide. Therefore, an
6587 * exact match looks like htons(0xff), not
6588 * htons(0xffff). See xlate_wc_finish() for details.
6589 * */
6590 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
6591 (flow->tp_src != htons(0xff) ||
6592 flow->tp_dst != htons(0xff))) {
6593 return ODP_FIT_ERROR;
6594 } else {
6595 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
6596 }
6597 }
6598 }
6599 if (present_attrs &
6600 (UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS)) {
6601 const struct ovs_key_nd_extensions *nd_ext_key;
6602 if (!is_mask) {
6603 *expected_attrs |=
6604 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
6605 }
6606
6607 nd_ext_key =
6608 nl_attr_get(attrs[OVS_KEY_ATTR_ND_EXTENSIONS]);
6609 flow->igmp_group_ip4 = nd_ext_key->nd_reserved;
6610 flow->tcp_flags = htons(nd_ext_key->nd_options_type);
6611
6612 if (is_mask) {
6613 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
6614 * ICMP type and code are 8 bits wide. Therefore, an
6615 * exact match looks like htons(0xff), not
6616 * htons(0xffff). See xlate_wc_finish() for details.
6617 * */
6618 if (!is_all_zeros(nd_ext_key, sizeof *nd_ext_key) &&
6619 (flow->tp_src != htons(0xff) ||
6620 flow->tp_dst != htons(0xff))) {
6621 return ODP_FIT_ERROR;
6622 } else {
6623 *expected_attrs |=
6624 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
6625 }
6626 }
6627 }
6628 }
6629 }
6630 } else if (src_flow->nw_proto == IPPROTO_IGMP
6631 && src_flow->dl_type == htons(ETH_TYPE_IP)) {
6632 /* OVS userspace parses the IGMP type, code, and group, but its
6633 * datapaths do not, so there is always missing information. */
6634 return ODP_FIT_TOO_LITTLE;
6635 }
6636 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
6637 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
6638 return ODP_FIT_ERROR;
6639 } else {
6640 *expected_attrs |= UINT64_C(1) << expected_bit;
6641 }
6642 }
6643
6644 done:
6645 return need_check ? check_expectations(present_attrs, out_of_range_attr,
6646 *expected_attrs, key, key_len) : ODP_FIT_PERFECT;
6647 }
6648
6649 /* Parse 802.1Q header then encapsulated L3 attributes. */
6650 static enum odp_key_fitness
6651 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6652 uint64_t present_attrs, int out_of_range_attr,
6653 uint64_t expected_attrs, struct flow *flow,
6654 const struct nlattr *key, size_t key_len,
6655 const struct flow *src_flow)
6656 {
6657 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6658 bool is_mask = src_flow != flow;
6659
6660 const struct nlattr *encap;
6661 enum odp_key_fitness encap_fitness;
6662 enum odp_key_fitness fitness = ODP_FIT_ERROR;
6663 int encaps = 0;
6664
6665 while (encaps < flow_vlan_limit &&
6666 (is_mask
6667 ? (src_flow->vlans[encaps].tci & htons(VLAN_CFI)) != 0
6668 : eth_type_vlan(flow->dl_type))) {
6669
6670 encap = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
6671 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
6672
6673 /* Calculate fitness of outer attributes. */
6674 if (!is_mask) {
6675 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
6676 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
6677 } else {
6678 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
6679 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
6680 }
6681 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
6682 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
6683 }
6684 }
6685 fitness = check_expectations(present_attrs, out_of_range_attr,
6686 expected_attrs, key, key_len);
6687
6688 /* Set vlan_tci.
6689 * Remove the TPID from dl_type since it's not the real Ethertype. */
6690 flow->vlans[encaps].tpid = flow->dl_type;
6691 flow->dl_type = htons(0);
6692 flow->vlans[encaps].tci =
6693 (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
6694 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
6695 : htons(0));
6696 if (!is_mask) {
6697 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) ||
6698 !(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
6699 return ODP_FIT_TOO_LITTLE;
6700 } else if (flow->vlans[encaps].tci == htons(0)) {
6701 /* Corner case for a truncated 802.1Q header. */
6702 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
6703 return ODP_FIT_TOO_MUCH;
6704 }
6705 return fitness;
6706 } else if (!(flow->vlans[encaps].tci & htons(VLAN_CFI))) {
6707 VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
6708 "but CFI bit is not set",
6709 ntohs(flow->vlans[encaps].tci));
6710 return ODP_FIT_ERROR;
6711 }
6712 } else {
6713 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
6714 return fitness;
6715 }
6716 }
6717
6718 /* Now parse the encapsulated attributes. */
6719 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
6720 attrs, &present_attrs, &out_of_range_attr)) {
6721 return ODP_FIT_ERROR;
6722 }
6723 expected_attrs = 0;
6724
6725 if (!parse_ethertype(attrs, present_attrs, &expected_attrs,
6726 flow, src_flow)) {
6727 return ODP_FIT_ERROR;
6728 }
6729 encap_fitness = parse_l2_5_onward(attrs, present_attrs,
6730 out_of_range_attr,
6731 &expected_attrs,
6732 flow, key, key_len,
6733 src_flow, false);
6734 if (encap_fitness != ODP_FIT_PERFECT) {
6735 return encap_fitness;
6736 }
6737 encaps++;
6738 }
6739
6740 return check_expectations(present_attrs, out_of_range_attr,
6741 expected_attrs, key, key_len);
6742 }
6743
6744 static enum odp_key_fitness
6745 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
6746 struct flow *flow, const struct flow *src_flow)
6747 {
6748 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
6749 uint64_t expected_attrs;
6750 uint64_t present_attrs;
6751 int out_of_range_attr;
6752 bool is_mask = src_flow != flow;
6753
6754 memset(flow, 0, sizeof *flow);
6755
6756 /* Parse attributes. */
6757 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
6758 &out_of_range_attr)) {
6759 return ODP_FIT_ERROR;
6760 }
6761 expected_attrs = 0;
6762
6763 /* Metadata. */
6764 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
6765 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
6766 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
6767 } else if (is_mask) {
6768 /* Always exact match recirc_id if it is not specified. */
6769 flow->recirc_id = UINT32_MAX;
6770 }
6771
6772 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
6773 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
6774 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
6775 }
6776 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
6777 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
6778 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
6779 }
6780
6781 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
6782 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
6783 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
6784 }
6785
6786 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_STATE)) {
6787 uint32_t odp_state = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_STATE]);
6788
6789 flow->ct_state = odp_to_ovs_ct_state(odp_state);
6790 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_STATE;
6791 }
6792 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE)) {
6793 flow->ct_zone = nl_attr_get_u16(attrs[OVS_KEY_ATTR_CT_ZONE]);
6794 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE;
6795 }
6796 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_MARK)) {
6797 flow->ct_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_MARK]);
6798 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_MARK;
6799 }
6800 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS)) {
6801 flow->ct_label = nl_attr_get_u128(attrs[OVS_KEY_ATTR_CT_LABELS]);
6802 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS;
6803 }
6804 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
6805 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
6806 flow->ct_nw_src = ct->ipv4_src;
6807 flow->ct_nw_dst = ct->ipv4_dst;
6808 flow->ct_nw_proto = ct->ipv4_proto;
6809 flow->ct_tp_src = ct->src_port;
6810 flow->ct_tp_dst = ct->dst_port;
6811 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
6812 }
6813 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
6814 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
6815
6816 flow->ct_ipv6_src = ct->ipv6_src;
6817 flow->ct_ipv6_dst = ct->ipv6_dst;
6818 flow->ct_nw_proto = ct->ipv6_proto;
6819 flow->ct_tp_src = ct->src_port;
6820 flow->ct_tp_dst = ct->dst_port;
6821 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
6822 }
6823
6824 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
6825 enum odp_key_fitness res;
6826
6827 res = odp_tun_key_from_attr__(attrs[OVS_KEY_ATTR_TUNNEL], is_mask,
6828 &flow->tunnel);
6829 if (res == ODP_FIT_ERROR) {
6830 return ODP_FIT_ERROR;
6831 } else if (res == ODP_FIT_PERFECT) {
6832 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
6833 }
6834 }
6835
6836 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
6837 flow->in_port.odp_port
6838 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
6839 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
6840 } else if (!is_mask) {
6841 flow->in_port.odp_port = ODPP_NONE;
6842 }
6843
6844 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE)) {
6845 flow->packet_type
6846 = nl_attr_get_be32(attrs[OVS_KEY_ATTR_PACKET_TYPE]);
6847 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE;
6848 if (pt_ns(src_flow->packet_type) == OFPHTN_ETHERTYPE) {
6849 flow->dl_type = pt_ns_type_be(flow->packet_type);
6850 }
6851 } else if (!is_mask) {
6852 flow->packet_type = htonl(PT_ETH);
6853 }
6854
6855 /* Check for Ethernet header. */
6856 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
6857 const struct ovs_key_ethernet *eth_key;
6858
6859 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
6860 put_ethernet_key(eth_key, flow);
6861 if (!is_mask) {
6862 flow->packet_type = htonl(PT_ETH);
6863 }
6864 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
6865 }
6866 else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
6867 ovs_be16 ethertype = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
6868 if (!is_mask) {
6869 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
6870 ntohs(ethertype));
6871 }
6872 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
6873 }
6874
6875 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
6876 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
6877 src_flow)) {
6878 return ODP_FIT_ERROR;
6879 }
6880
6881 if (is_mask
6882 ? (src_flow->vlans[0].tci & htons(VLAN_CFI)) != 0
6883 : eth_type_vlan(src_flow->dl_type)) {
6884 return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
6885 expected_attrs, flow, key, key_len, src_flow);
6886 }
6887 if (is_mask) {
6888 /* A missing VLAN mask means exact match on vlan_tci 0 (== no VLAN). */
6889 flow->vlans[0].tpid = htons(0xffff);
6890 flow->vlans[0].tci = htons(0xffff);
6891 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
6892 flow->vlans[0].tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
6893 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
6894 }
6895 }
6896 return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
6897 &expected_attrs, flow, key, key_len,
6898 src_flow, true);
6899 }
6900
6901 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
6902 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
6903 * 'key' fits our expectations for what a flow key should contain.
6904 *
6905 * The 'in_port' will be the datapath's understanding of the port. The
6906 * caller will need to translate with odp_port_to_ofp_port() if the
6907 * OpenFlow port is needed.
6908 *
6909 * This function doesn't take the packet itself as an argument because none of
6910 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
6911 * it is always possible to infer which additional attribute(s) should appear
6912 * by looking at the attributes for lower-level protocols, e.g. if the network
6913 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
6914 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
6915 * must be absent. */
6916 enum odp_key_fitness
6917 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
6918 struct flow *flow)
6919 {
6920 return odp_flow_key_to_flow__(key, key_len, flow, flow);
6921 }
6922
6923 /* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key'
6924 * to a mask structure in 'mask'. 'flow' must be a previously translated flow
6925 * corresponding to 'mask' and similarly flow_key/flow_key_len must be the
6926 * attributes from that flow. Returns an ODP_FIT_* value that indicates how
6927 * well 'key' fits our expectations for what a flow key should contain. */
6928 enum odp_key_fitness
6929 odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len,
6930 struct flow_wildcards *mask, const struct flow *src_flow)
6931 {
6932 if (mask_key_len) {
6933 return odp_flow_key_to_flow__(mask_key, mask_key_len,
6934 &mask->masks, src_flow);
6935
6936 } else {
6937 /* A missing mask means that the flow should be exact matched.
6938 * Generate an appropriate exact wildcard for the flow. */
6939 flow_wildcards_init_for_packet(mask, src_flow);
6940
6941 return ODP_FIT_PERFECT;
6942 }
6943 }
6944
6945 /* Converts the netlink formated key/mask to match.
6946 * Fails if odp_flow_key_from_key/mask and odp_flow_key_key/mask
6947 * disagree on the acceptable form of flow */
6948 int
6949 parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len,
6950 const struct nlattr *mask, size_t mask_len,
6951 struct match *match)
6952 {
6953 enum odp_key_fitness fitness;
6954
6955 fitness = odp_flow_key_to_flow(key, key_len, &match->flow);
6956 if (fitness) {
6957 /* This should not happen: it indicates that
6958 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
6959 * the acceptable form of a flow. Log the problem as an error,
6960 * with enough details to enable debugging. */
6961 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6962
6963 if (!VLOG_DROP_ERR(&rl)) {
6964 struct ds s;
6965
6966 ds_init(&s);
6967 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
6968 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
6969 ds_destroy(&s);
6970 }
6971
6972 return EINVAL;
6973 }
6974
6975 fitness = odp_flow_key_to_mask(mask, mask_len, &match->wc, &match->flow);
6976 if (fitness) {
6977 /* This should not happen: it indicates that
6978 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
6979 * disagree on the acceptable form of a mask. Log the problem
6980 * as an error, with enough details to enable debugging. */
6981 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6982
6983 if (!VLOG_DROP_ERR(&rl)) {
6984 struct ds s;
6985
6986 ds_init(&s);
6987 odp_flow_format(key, key_len, mask, mask_len, NULL, &s,
6988 true);
6989 VLOG_ERR("internal error parsing flow mask %s (%s)",
6990 ds_cstr(&s), odp_key_fitness_to_string(fitness));
6991 ds_destroy(&s);
6992 }
6993
6994 return EINVAL;
6995 }
6996
6997 return 0;
6998 }
6999
7000 /* Returns 'fitness' as a string, for use in debug messages. */
7001 const char *
7002 odp_key_fitness_to_string(enum odp_key_fitness fitness)
7003 {
7004 switch (fitness) {
7005 case ODP_FIT_PERFECT:
7006 return "OK";
7007 case ODP_FIT_TOO_MUCH:
7008 return "too_much";
7009 case ODP_FIT_TOO_LITTLE:
7010 return "too_little";
7011 case ODP_FIT_ERROR:
7012 return "error";
7013 default:
7014 return "<unknown>";
7015 }
7016 }
7017
7018 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
7019 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
7020 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
7021 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
7022 * null, then the return value is not meaningful.) */
7023 size_t
7024 odp_put_userspace_action(uint32_t pid,
7025 const void *userdata, size_t userdata_size,
7026 odp_port_t tunnel_out_port,
7027 bool include_actions,
7028 struct ofpbuf *odp_actions)
7029 {
7030 size_t userdata_ofs;
7031 size_t offset;
7032
7033 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
7034 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
7035 if (userdata) {
7036 userdata_ofs = odp_actions->size + NLA_HDRLEN;
7037
7038 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
7039 * module before Linux 3.10 required the userdata to be exactly 8 bytes
7040 * long:
7041 *
7042 * - The kernel rejected shorter userdata with -ERANGE.
7043 *
7044 * - The kernel silently dropped userdata beyond the first 8 bytes.
7045 *
7046 * Thus, for maximum compatibility, always put at least 8 bytes. (We
7047 * separately disable features that required more than 8 bytes.) */
7048 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
7049 MAX(8, userdata_size)),
7050 userdata, userdata_size);
7051 } else {
7052 userdata_ofs = 0;
7053 }
7054 if (tunnel_out_port != ODPP_NONE) {
7055 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
7056 tunnel_out_port);
7057 }
7058 if (include_actions) {
7059 nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS);
7060 }
7061 nl_msg_end_nested(odp_actions, offset);
7062
7063 return userdata_ofs;
7064 }
7065
7066 void
7067 odp_put_pop_eth_action(struct ofpbuf *odp_actions)
7068 {
7069 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_ETH);
7070 }
7071
7072 void
7073 odp_put_push_eth_action(struct ofpbuf *odp_actions,
7074 const struct eth_addr *eth_src,
7075 const struct eth_addr *eth_dst)
7076 {
7077 struct ovs_action_push_eth eth;
7078
7079 memset(&eth, 0, sizeof eth);
7080 if (eth_src) {
7081 eth.addresses.eth_src = *eth_src;
7082 }
7083 if (eth_dst) {
7084 eth.addresses.eth_dst = *eth_dst;
7085 }
7086
7087 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_ETH,
7088 &eth, sizeof eth);
7089 }
7090
7091 void
7092 odp_put_tunnel_action(const struct flow_tnl *tunnel,
7093 struct ofpbuf *odp_actions, const char *tnl_type)
7094 {
7095 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7096 tun_key_to_attr(odp_actions, tunnel, tunnel, NULL, tnl_type);
7097 nl_msg_end_nested(odp_actions, offset);
7098 }
7099
7100 void
7101 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
7102 struct ovs_action_push_tnl *data)
7103 {
7104 int size = offsetof(struct ovs_action_push_tnl, header);
7105
7106 size += data->header_len;
7107 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
7108 }
7109
7110 \f
7111 /* The commit_odp_actions() function and its helpers. */
7112
7113 static void
7114 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
7115 const void *key, size_t key_size)
7116 {
7117 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7118 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
7119 nl_msg_end_nested(odp_actions, offset);
7120 }
7121
7122 /* Masked set actions have a mask following the data within the netlink
7123 * attribute. The unmasked bits in the data will be cleared as the data
7124 * is copied to the action. */
7125 void
7126 commit_masked_set_action(struct ofpbuf *odp_actions,
7127 enum ovs_key_attr key_type,
7128 const void *key_, const void *mask_, size_t key_size)
7129 {
7130 size_t offset = nl_msg_start_nested(odp_actions,
7131 OVS_ACTION_ATTR_SET_MASKED);
7132 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
7133 const char *key = key_, *mask = mask_;
7134
7135 memcpy(data + key_size, mask, key_size);
7136 /* Clear unmasked bits while copying. */
7137 while (key_size--) {
7138 *data++ = *key++ & *mask++;
7139 }
7140 nl_msg_end_nested(odp_actions, offset);
7141 }
7142
7143 /* If any of the flow key data that ODP actions can modify are different in
7144 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
7145 * 'odp_actions' that change the flow tunneling information in key from
7146 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
7147 * same way. In other words, operates the same as commit_odp_actions(), but
7148 * only on tunneling information. */
7149 void
7150 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
7151 struct ofpbuf *odp_actions, const char *tnl_type)
7152 {
7153 /* A valid IPV4_TUNNEL must have non-zero ip_dst; a valid IPv6 tunnel
7154 * must have non-zero ipv6_dst. */
7155 if (flow_tnl_dst_is_set(&flow->tunnel)) {
7156 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
7157 return;
7158 }
7159 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
7160 odp_put_tunnel_action(&base->tunnel, odp_actions, tnl_type);
7161 }
7162 }
7163
7164 static bool
7165 commit(enum ovs_key_attr attr, bool use_masked_set,
7166 const void *key, void *base, void *mask, size_t size,
7167 struct ofpbuf *odp_actions)
7168 {
7169 if (memcmp(key, base, size)) {
7170 bool fully_masked = odp_mask_is_exact(attr, mask, size);
7171
7172 if (use_masked_set && !fully_masked) {
7173 commit_masked_set_action(odp_actions, attr, key, mask, size);
7174 } else {
7175 if (!fully_masked) {
7176 memset(mask, 0xff, size);
7177 }
7178 commit_set_action(odp_actions, attr, key, size);
7179 }
7180 memcpy(base, key, size);
7181 return true;
7182 } else {
7183 /* Mask bits are set when we have either read or set the corresponding
7184 * values. Masked bits will be exact-matched, no need to set them
7185 * if the value did not actually change. */
7186 return false;
7187 }
7188 }
7189
7190 static void
7191 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
7192 {
7193 eth->eth_src = flow->dl_src;
7194 eth->eth_dst = flow->dl_dst;
7195 }
7196
7197 static void
7198 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
7199 {
7200 flow->dl_src = eth->eth_src;
7201 flow->dl_dst = eth->eth_dst;
7202 }
7203
7204 static void
7205 commit_set_ether_action(const struct flow *flow, struct flow *base_flow,
7206 struct ofpbuf *odp_actions,
7207 struct flow_wildcards *wc,
7208 bool use_masked)
7209 {
7210 struct ovs_key_ethernet key, base, mask;
7211
7212 if (flow->packet_type != htonl(PT_ETH)) {
7213 return;
7214 }
7215
7216 get_ethernet_key(flow, &key);
7217 get_ethernet_key(base_flow, &base);
7218 get_ethernet_key(&wc->masks, &mask);
7219
7220 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
7221 &key, &base, &mask, sizeof key, odp_actions)) {
7222 put_ethernet_key(&base, base_flow);
7223 put_ethernet_key(&mask, &wc->masks);
7224 }
7225 }
7226
7227 static void
7228 commit_vlan_action(const struct flow* flow, struct flow *base,
7229 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7230 {
7231 int base_n = flow_count_vlan_headers(base);
7232 int flow_n = flow_count_vlan_headers(flow);
7233 flow_skip_common_vlan_headers(base, &base_n, flow, &flow_n);
7234
7235 /* Pop all mismatching vlan of base, push those of flow */
7236 for (; base_n >= 0; base_n--) {
7237 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
7238 wc->masks.vlans[base_n].qtag = OVS_BE32_MAX;
7239 }
7240
7241 for (; flow_n >= 0; flow_n--) {
7242 struct ovs_action_push_vlan vlan;
7243
7244 vlan.vlan_tpid = flow->vlans[flow_n].tpid;
7245 vlan.vlan_tci = flow->vlans[flow_n].tci;
7246 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
7247 &vlan, sizeof vlan);
7248 }
7249 memcpy(base->vlans, flow->vlans, sizeof(base->vlans));
7250 }
7251
7252 /* Wildcarding already done at action translation time. */
7253 static void
7254 commit_mpls_action(const struct flow *flow, struct flow *base,
7255 struct ofpbuf *odp_actions)
7256 {
7257 int base_n = flow_count_mpls_labels(base, NULL);
7258 int flow_n = flow_count_mpls_labels(flow, NULL);
7259 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
7260 NULL);
7261
7262 while (base_n > common_n) {
7263 if (base_n - 1 == common_n && flow_n > common_n) {
7264 /* If there is only one more LSE in base than there are common
7265 * between base and flow; and flow has at least one more LSE than
7266 * is common then the topmost LSE of base may be updated using
7267 * set */
7268 struct ovs_key_mpls mpls_key;
7269
7270 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
7271 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
7272 &mpls_key, sizeof mpls_key);
7273 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
7274 common_n++;
7275 } else {
7276 /* Otherwise, if there more LSEs in base than are common between
7277 * base and flow then pop the topmost one. */
7278 ovs_be16 dl_type;
7279 /* If all the LSEs are to be popped and this is not the outermost
7280 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
7281 * POP_MPLS action instead of flow->dl_type.
7282 *
7283 * This is because the POP_MPLS action requires its ethertype
7284 * argument to be an MPLS ethernet type but in this case
7285 * flow->dl_type will be a non-MPLS ethernet type.
7286 *
7287 * When the final POP_MPLS action occurs it use flow->dl_type and
7288 * the and the resulting packet will have the desired dl_type. */
7289 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
7290 dl_type = htons(ETH_TYPE_MPLS);
7291 } else {
7292 dl_type = flow->dl_type;
7293 }
7294 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
7295 ovs_assert(flow_pop_mpls(base, base_n, flow->dl_type, NULL));
7296 base_n--;
7297 }
7298 }
7299
7300 /* If, after the above popping and setting, there are more LSEs in flow
7301 * than base then some LSEs need to be pushed. */
7302 while (base_n < flow_n) {
7303 struct ovs_action_push_mpls *mpls;
7304
7305 mpls = nl_msg_put_unspec_zero(odp_actions,
7306 OVS_ACTION_ATTR_PUSH_MPLS,
7307 sizeof *mpls);
7308 mpls->mpls_ethertype = flow->dl_type;
7309 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
7310 /* Update base flow's MPLS stack, but do not clear L3. We need the L3
7311 * headers if the flow is restored later due to returning from a patch
7312 * port or group bucket. */
7313 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL, false);
7314 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
7315 base_n++;
7316 }
7317 }
7318
7319 static void
7320 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
7321 {
7322 ipv4->ipv4_src = flow->nw_src;
7323 ipv4->ipv4_dst = flow->nw_dst;
7324 ipv4->ipv4_proto = flow->nw_proto;
7325 ipv4->ipv4_tos = flow->nw_tos;
7326 ipv4->ipv4_ttl = flow->nw_ttl;
7327 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7328 }
7329
7330 static void
7331 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
7332 {
7333 flow->nw_src = ipv4->ipv4_src;
7334 flow->nw_dst = ipv4->ipv4_dst;
7335 flow->nw_proto = ipv4->ipv4_proto;
7336 flow->nw_tos = ipv4->ipv4_tos;
7337 flow->nw_ttl = ipv4->ipv4_ttl;
7338 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
7339 }
7340
7341 static void
7342 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
7343 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7344 bool use_masked)
7345 {
7346 struct ovs_key_ipv4 key, mask, base;
7347
7348 /* Check that nw_proto and nw_frag remain unchanged. */
7349 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7350 flow->nw_frag == base_flow->nw_frag);
7351
7352 get_ipv4_key(flow, &key, false);
7353 get_ipv4_key(base_flow, &base, false);
7354 get_ipv4_key(&wc->masks, &mask, true);
7355 mask.ipv4_proto = 0; /* Not writeable. */
7356 mask.ipv4_frag = 0; /* Not writable. */
7357
7358 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7359 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7360 mask.ipv4_tos &= ~IP_ECN_MASK;
7361 }
7362
7363 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
7364 odp_actions)) {
7365 put_ipv4_key(&base, base_flow, false);
7366 if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
7367 put_ipv4_key(&mask, &wc->masks, true);
7368 }
7369 }
7370 }
7371
7372 static void
7373 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
7374 {
7375 ipv6->ipv6_src = flow->ipv6_src;
7376 ipv6->ipv6_dst = flow->ipv6_dst;
7377 ipv6->ipv6_label = flow->ipv6_label;
7378 ipv6->ipv6_proto = flow->nw_proto;
7379 ipv6->ipv6_tclass = flow->nw_tos;
7380 ipv6->ipv6_hlimit = flow->nw_ttl;
7381 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7382 }
7383
7384 static void
7385 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
7386 {
7387 flow->ipv6_src = ipv6->ipv6_src;
7388 flow->ipv6_dst = ipv6->ipv6_dst;
7389 flow->ipv6_label = ipv6->ipv6_label;
7390 flow->nw_proto = ipv6->ipv6_proto;
7391 flow->nw_tos = ipv6->ipv6_tclass;
7392 flow->nw_ttl = ipv6->ipv6_hlimit;
7393 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
7394 }
7395
7396 static void
7397 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
7398 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7399 bool use_masked)
7400 {
7401 struct ovs_key_ipv6 key, mask, base;
7402
7403 /* Check that nw_proto and nw_frag remain unchanged. */
7404 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7405 flow->nw_frag == base_flow->nw_frag);
7406
7407 get_ipv6_key(flow, &key, false);
7408 get_ipv6_key(base_flow, &base, false);
7409 get_ipv6_key(&wc->masks, &mask, true);
7410 mask.ipv6_proto = 0; /* Not writeable. */
7411 mask.ipv6_frag = 0; /* Not writable. */
7412 mask.ipv6_label &= htonl(IPV6_LABEL_MASK); /* Not writable. */
7413
7414 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7415 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7416 mask.ipv6_tclass &= ~IP_ECN_MASK;
7417 }
7418
7419 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
7420 odp_actions)) {
7421 put_ipv6_key(&base, base_flow, false);
7422 if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
7423 put_ipv6_key(&mask, &wc->masks, true);
7424 }
7425 }
7426 }
7427
7428 static void
7429 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
7430 {
7431 /* ARP key has padding, clear it. */
7432 memset(arp, 0, sizeof *arp);
7433
7434 arp->arp_sip = flow->nw_src;
7435 arp->arp_tip = flow->nw_dst;
7436 arp->arp_op = htons(flow->nw_proto);
7437 arp->arp_sha = flow->arp_sha;
7438 arp->arp_tha = flow->arp_tha;
7439 }
7440
7441 static void
7442 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
7443 {
7444 flow->nw_src = arp->arp_sip;
7445 flow->nw_dst = arp->arp_tip;
7446 flow->nw_proto = ntohs(arp->arp_op);
7447 flow->arp_sha = arp->arp_sha;
7448 flow->arp_tha = arp->arp_tha;
7449 }
7450
7451 static enum slow_path_reason
7452 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
7453 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7454 {
7455 struct ovs_key_arp key, mask, base;
7456
7457 get_arp_key(flow, &key);
7458 get_arp_key(base_flow, &base);
7459 get_arp_key(&wc->masks, &mask);
7460
7461 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
7462 odp_actions)) {
7463 put_arp_key(&base, base_flow);
7464 put_arp_key(&mask, &wc->masks);
7465 return SLOW_ACTION;
7466 }
7467 return 0;
7468 }
7469
7470 static void
7471 get_icmp_key(const struct flow *flow, struct ovs_key_icmp *icmp)
7472 {
7473 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
7474 icmp->icmp_type = ntohs(flow->tp_src);
7475 icmp->icmp_code = ntohs(flow->tp_dst);
7476 }
7477
7478 static void
7479 put_icmp_key(const struct ovs_key_icmp *icmp, struct flow *flow)
7480 {
7481 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
7482 flow->tp_src = htons(icmp->icmp_type);
7483 flow->tp_dst = htons(icmp->icmp_code);
7484 }
7485
7486 static enum slow_path_reason
7487 commit_set_icmp_action(const struct flow *flow, struct flow *base_flow,
7488 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7489 {
7490 struct ovs_key_icmp key, mask, base;
7491 enum ovs_key_attr attr;
7492
7493 if (is_icmpv4(flow, NULL)) {
7494 attr = OVS_KEY_ATTR_ICMP;
7495 } else if (is_icmpv6(flow, NULL)) {
7496 attr = OVS_KEY_ATTR_ICMPV6;
7497 } else {
7498 return 0;
7499 }
7500
7501 get_icmp_key(flow, &key);
7502 get_icmp_key(base_flow, &base);
7503 get_icmp_key(&wc->masks, &mask);
7504
7505 if (commit(attr, false, &key, &base, &mask, sizeof key, odp_actions)) {
7506 put_icmp_key(&base, base_flow);
7507 put_icmp_key(&mask, &wc->masks);
7508 return SLOW_ACTION;
7509 }
7510 return 0;
7511 }
7512
7513 static void
7514 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
7515 {
7516 nd->nd_target = flow->nd_target;
7517 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
7518 nd->nd_sll = flow->arp_sha;
7519 nd->nd_tll = flow->arp_tha;
7520 }
7521
7522 static void
7523 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
7524 {
7525 flow->nd_target = nd->nd_target;
7526 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
7527 flow->arp_sha = nd->nd_sll;
7528 flow->arp_tha = nd->nd_tll;
7529 }
7530
7531 static void
7532 get_nd_extensions_key(const struct flow *flow,
7533 struct ovs_key_nd_extensions *nd_ext)
7534 {
7535 /* ND Extensions key has padding, clear it. */
7536 memset(nd_ext, 0, sizeof *nd_ext);
7537 nd_ext->nd_reserved = flow->igmp_group_ip4;
7538 nd_ext->nd_options_type = ntohs(flow->tcp_flags);
7539 }
7540
7541 static void
7542 put_nd_extensions_key(const struct ovs_key_nd_extensions *nd_ext,
7543 struct flow *flow)
7544 {
7545 flow->igmp_group_ip4 = nd_ext->nd_reserved;
7546 flow->tcp_flags = htons(nd_ext->nd_options_type);
7547 }
7548
7549 static enum slow_path_reason
7550 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
7551 struct ofpbuf *odp_actions,
7552 struct flow_wildcards *wc, bool use_masked)
7553 {
7554 struct ovs_key_nd key, mask, base;
7555
7556 get_nd_key(flow, &key);
7557 get_nd_key(base_flow, &base);
7558 get_nd_key(&wc->masks, &mask);
7559
7560 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
7561 odp_actions)) {
7562 put_nd_key(&base, base_flow);
7563 put_nd_key(&mask, &wc->masks);
7564 return SLOW_ACTION;
7565 }
7566
7567 return 0;
7568 }
7569
7570 static enum slow_path_reason
7571 commit_set_nd_extensions_action(const struct flow *flow,
7572 struct flow *base_flow,
7573 struct ofpbuf *odp_actions,
7574 struct flow_wildcards *wc, bool use_masked)
7575 {
7576 struct ovs_key_nd_extensions key, mask, base;
7577
7578 get_nd_extensions_key(flow, &key);
7579 get_nd_extensions_key(base_flow, &base);
7580 get_nd_extensions_key(&wc->masks, &mask);
7581
7582 if (commit(OVS_KEY_ATTR_ND_EXTENSIONS, use_masked, &key,
7583 &base, &mask, sizeof key, odp_actions)) {
7584 put_nd_extensions_key(&base, base_flow);
7585 put_nd_extensions_key(&mask, &wc->masks);
7586 return SLOW_ACTION;
7587 }
7588 return 0;
7589 }
7590
7591 static enum slow_path_reason
7592 commit_set_nw_action(const struct flow *flow, struct flow *base,
7593 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7594 bool use_masked)
7595 {
7596 uint32_t reason;
7597
7598 /* Check if 'flow' really has an L3 header. */
7599 if (!flow->nw_proto) {
7600 return 0;
7601 }
7602
7603 switch (ntohs(base->dl_type)) {
7604 case ETH_TYPE_IP:
7605 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
7606 break;
7607
7608 case ETH_TYPE_IPV6:
7609 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
7610 if (base->nw_proto == IPPROTO_ICMPV6) {
7611 /* Commit extended attrs first to make sure
7612 correct options are added.*/
7613 reason = commit_set_nd_extensions_action(flow, base,
7614 odp_actions, wc, use_masked);
7615 reason |= commit_set_nd_action(flow, base, odp_actions,
7616 wc, use_masked);
7617 return reason;
7618 }
7619 break;
7620
7621 case ETH_TYPE_ARP:
7622 return commit_set_arp_action(flow, base, odp_actions, wc);
7623 }
7624
7625 return 0;
7626 }
7627
7628 static inline void
7629 get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh, bool is_mask)
7630 {
7631 *nsh = flow->nsh;
7632 if (!is_mask) {
7633 if (nsh->mdtype != NSH_M_TYPE1) {
7634 memset(nsh->context, 0, sizeof(nsh->context));
7635 }
7636 }
7637 }
7638
7639 static inline void
7640 put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
7641 bool is_mask OVS_UNUSED)
7642 {
7643 flow->nsh = *nsh;
7644 if (flow->nsh.mdtype != NSH_M_TYPE1) {
7645 memset(flow->nsh.context, 0, sizeof(flow->nsh.context));
7646 }
7647 }
7648
7649 static bool
7650 commit_nsh(const struct ovs_key_nsh * flow_nsh, bool use_masked_set,
7651 const struct ovs_key_nsh *key, struct ovs_key_nsh *base,
7652 struct ovs_key_nsh *mask, size_t size,
7653 struct ofpbuf *odp_actions)
7654 {
7655 enum ovs_key_attr attr = OVS_KEY_ATTR_NSH;
7656
7657 if (memcmp(key, base, size) == 0) {
7658 /* Mask bits are set when we have either read or set the corresponding
7659 * values. Masked bits will be exact-matched, no need to set them
7660 * if the value did not actually change. */
7661 return false;
7662 }
7663
7664 bool fully_masked = odp_mask_is_exact(attr, mask, size);
7665
7666 if (use_masked_set && !fully_masked) {
7667 size_t nsh_key_ofs;
7668 struct ovs_nsh_key_base nsh_base;
7669 struct ovs_nsh_key_base nsh_base_mask;
7670 struct ovs_nsh_key_md1 md1;
7671 struct ovs_nsh_key_md1 md1_mask;
7672 size_t offset = nl_msg_start_nested(odp_actions,
7673 OVS_ACTION_ATTR_SET_MASKED);
7674
7675 nsh_base.flags = key->flags;
7676 nsh_base.ttl = key->ttl;
7677 nsh_base.mdtype = key->mdtype;
7678 nsh_base.np = key->np;
7679 nsh_base.path_hdr = key->path_hdr;
7680
7681 nsh_base_mask.flags = mask->flags;
7682 nsh_base_mask.ttl = mask->ttl;
7683 nsh_base_mask.mdtype = mask->mdtype;
7684 nsh_base_mask.np = mask->np;
7685 nsh_base_mask.path_hdr = mask->path_hdr;
7686
7687 /* OVS_KEY_ATTR_NSH keys */
7688 nsh_key_ofs = nl_msg_start_nested(odp_actions, OVS_KEY_ATTR_NSH);
7689
7690 /* put value and mask for OVS_NSH_KEY_ATTR_BASE */
7691 char *data = nl_msg_put_unspec_uninit(odp_actions,
7692 OVS_NSH_KEY_ATTR_BASE,
7693 2 * sizeof(nsh_base));
7694 const char *lkey = (char *)&nsh_base, *lmask = (char *)&nsh_base_mask;
7695 size_t lkey_size = sizeof(nsh_base);
7696
7697 while (lkey_size--) {
7698 *data++ = *lkey++ & *lmask++;
7699 }
7700 lmask = (char *)&nsh_base_mask;
7701 memcpy(data, lmask, sizeof(nsh_base_mask));
7702
7703 switch (key->mdtype) {
7704 case NSH_M_TYPE1:
7705 memcpy(md1.context, key->context, sizeof key->context);
7706 memcpy(md1_mask.context, mask->context, sizeof mask->context);
7707
7708 /* put value and mask for OVS_NSH_KEY_ATTR_MD1 */
7709 data = nl_msg_put_unspec_uninit(odp_actions,
7710 OVS_NSH_KEY_ATTR_MD1,
7711 2 * sizeof(md1));
7712 lkey = (char *)&md1;
7713 lmask = (char *)&md1_mask;
7714 lkey_size = sizeof(md1);
7715
7716 while (lkey_size--) {
7717 *data++ = *lkey++ & *lmask++;
7718 }
7719 lmask = (char *)&md1_mask;
7720 memcpy(data, lmask, sizeof(md1_mask));
7721 break;
7722 case NSH_M_TYPE2:
7723 default:
7724 /* No match support for other MD formats yet. */
7725 break;
7726 }
7727
7728 nl_msg_end_nested(odp_actions, nsh_key_ofs);
7729
7730 nl_msg_end_nested(odp_actions, offset);
7731 } else {
7732 if (!fully_masked) {
7733 memset(mask, 0xff, size);
7734 }
7735 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7736 nsh_key_to_attr(odp_actions, flow_nsh, NULL, 0, false);
7737 nl_msg_end_nested(odp_actions, offset);
7738 }
7739 memcpy(base, key, size);
7740 return true;
7741 }
7742
7743 static void
7744 commit_set_nsh_action(const struct flow *flow, struct flow *base_flow,
7745 struct ofpbuf *odp_actions,
7746 struct flow_wildcards *wc,
7747 bool use_masked)
7748 {
7749 struct ovs_key_nsh key, mask, base;
7750
7751 if (flow->dl_type != htons(ETH_TYPE_NSH) ||
7752 !memcmp(&base_flow->nsh, &flow->nsh, sizeof base_flow->nsh)) {
7753 return;
7754 }
7755
7756 /* Check that mdtype and np remain unchanged. */
7757 ovs_assert(flow->nsh.mdtype == base_flow->nsh.mdtype &&
7758 flow->nsh.np == base_flow->nsh.np);
7759
7760 get_nsh_key(flow, &key, false);
7761 get_nsh_key(base_flow, &base, false);
7762 get_nsh_key(&wc->masks, &mask, true);
7763 mask.mdtype = 0; /* Not writable. */
7764 mask.np = 0; /* Not writable. */
7765
7766 if (commit_nsh(&base_flow->nsh, use_masked, &key, &base, &mask,
7767 sizeof key, odp_actions)) {
7768 put_nsh_key(&base, base_flow, false);
7769 if (mask.mdtype != 0) { /* Mask was changed by commit(). */
7770 put_nsh_key(&mask, &wc->masks, true);
7771 }
7772 }
7773 }
7774
7775 /* TCP, UDP, and SCTP keys have the same layout. */
7776 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
7777 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
7778
7779 static void
7780 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
7781 {
7782 tp->tcp.tcp_src = flow->tp_src;
7783 tp->tcp.tcp_dst = flow->tp_dst;
7784 }
7785
7786 static void
7787 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
7788 {
7789 flow->tp_src = tp->tcp.tcp_src;
7790 flow->tp_dst = tp->tcp.tcp_dst;
7791 }
7792
7793 static void
7794 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
7795 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7796 bool use_masked)
7797 {
7798 enum ovs_key_attr key_type;
7799 union ovs_key_tp key, mask, base;
7800
7801 /* Check if 'flow' really has an L3 header. */
7802 if (!flow->nw_proto) {
7803 return;
7804 }
7805
7806 if (!is_ip_any(base_flow)) {
7807 return;
7808 }
7809
7810 if (flow->nw_proto == IPPROTO_TCP) {
7811 key_type = OVS_KEY_ATTR_TCP;
7812 } else if (flow->nw_proto == IPPROTO_UDP) {
7813 key_type = OVS_KEY_ATTR_UDP;
7814 } else if (flow->nw_proto == IPPROTO_SCTP) {
7815 key_type = OVS_KEY_ATTR_SCTP;
7816 } else {
7817 return;
7818 }
7819
7820 get_tp_key(flow, &key);
7821 get_tp_key(base_flow, &base);
7822 get_tp_key(&wc->masks, &mask);
7823
7824 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
7825 odp_actions)) {
7826 put_tp_key(&base, base_flow);
7827 put_tp_key(&mask, &wc->masks);
7828 }
7829 }
7830
7831 static void
7832 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
7833 struct ofpbuf *odp_actions,
7834 struct flow_wildcards *wc,
7835 bool use_masked)
7836 {
7837 uint32_t key, mask, base;
7838
7839 key = flow->skb_priority;
7840 base = base_flow->skb_priority;
7841 mask = wc->masks.skb_priority;
7842
7843 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
7844 sizeof key, odp_actions)) {
7845 base_flow->skb_priority = base;
7846 wc->masks.skb_priority = mask;
7847 }
7848 }
7849
7850 static void
7851 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
7852 struct ofpbuf *odp_actions,
7853 struct flow_wildcards *wc,
7854 bool use_masked)
7855 {
7856 uint32_t key, mask, base;
7857
7858 key = flow->pkt_mark;
7859 base = base_flow->pkt_mark;
7860 mask = wc->masks.pkt_mark;
7861
7862 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
7863 sizeof key, odp_actions)) {
7864 base_flow->pkt_mark = base;
7865 wc->masks.pkt_mark = mask;
7866 }
7867 }
7868
7869 static void
7870 odp_put_pop_nsh_action(struct ofpbuf *odp_actions)
7871 {
7872 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_NSH);
7873 }
7874
7875 static void
7876 odp_put_push_nsh_action(struct ofpbuf *odp_actions,
7877 const struct flow *flow,
7878 struct ofpbuf *encap_data)
7879 {
7880 uint8_t * metadata = NULL;
7881 uint8_t md_size = 0;
7882
7883 switch (flow->nsh.mdtype) {
7884 case NSH_M_TYPE2:
7885 if (encap_data) {
7886 ovs_assert(encap_data->size < NSH_CTX_HDRS_MAX_LEN);
7887 metadata = encap_data->data;
7888 md_size = encap_data->size;
7889 } else {
7890 md_size = 0;
7891 }
7892 break;
7893 default:
7894 md_size = 0;
7895 break;
7896 }
7897 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_PUSH_NSH);
7898 nsh_key_to_attr(odp_actions, &flow->nsh, metadata, md_size, false);
7899 nl_msg_end_nested(odp_actions, offset);
7900 }
7901
7902 static void
7903 commit_encap_decap_action(const struct flow *flow,
7904 struct flow *base_flow,
7905 struct ofpbuf *odp_actions,
7906 struct flow_wildcards *wc,
7907 bool pending_encap, bool pending_decap,
7908 struct ofpbuf *encap_data)
7909 {
7910 if (pending_encap) {
7911 switch (ntohl(flow->packet_type)) {
7912 case PT_ETH: {
7913 /* push_eth */
7914 odp_put_push_eth_action(odp_actions, &flow->dl_src,
7915 &flow->dl_dst);
7916 base_flow->packet_type = flow->packet_type;
7917 base_flow->dl_src = flow->dl_src;
7918 base_flow->dl_dst = flow->dl_dst;
7919 break;
7920 }
7921 case PT_NSH:
7922 /* push_nsh */
7923 odp_put_push_nsh_action(odp_actions, flow, encap_data);
7924 base_flow->packet_type = flow->packet_type;
7925 /* Update all packet headers in base_flow. */
7926 memcpy(&base_flow->dl_dst, &flow->dl_dst,
7927 sizeof(*flow) - offsetof(struct flow, dl_dst));
7928 break;
7929 default:
7930 /* Only the above protocols are supported for encap.
7931 * The check is done at action translation. */
7932 OVS_NOT_REACHED();
7933 }
7934 } else if (pending_decap || flow->packet_type != base_flow->packet_type) {
7935 /* This is an explicit or implicit decap case. */
7936 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE &&
7937 base_flow->packet_type == htonl(PT_ETH)) {
7938 /* Generate pop_eth and continue without recirculation. */
7939 odp_put_pop_eth_action(odp_actions);
7940 base_flow->packet_type = flow->packet_type;
7941 base_flow->dl_src = eth_addr_zero;
7942 base_flow->dl_dst = eth_addr_zero;
7943 } else {
7944 /* All other decap cases require recirculation.
7945 * No need to update the base flow here. */
7946 switch (ntohl(base_flow->packet_type)) {
7947 case PT_NSH:
7948 /* pop_nsh. */
7949 odp_put_pop_nsh_action(odp_actions);
7950 break;
7951 default:
7952 /* Checks are done during translation. */
7953 OVS_NOT_REACHED();
7954 }
7955 }
7956 }
7957
7958 wc->masks.packet_type = OVS_BE32_MAX;
7959 }
7960
7961 /* If any of the flow key data that ODP actions can modify are different in
7962 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
7963 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
7964 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
7965 * in addition to this function if needed. Sets fields in 'wc' that are
7966 * used as part of the action.
7967 *
7968 * Returns a reason to force processing the flow's packets into the userspace
7969 * slow path, if there is one, otherwise 0. */
7970 enum slow_path_reason
7971 commit_odp_actions(const struct flow *flow, struct flow *base,
7972 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7973 bool use_masked, bool pending_encap, bool pending_decap,
7974 struct ofpbuf *encap_data)
7975 {
7976 enum slow_path_reason slow1, slow2;
7977 bool mpls_done = false;
7978
7979 commit_encap_decap_action(flow, base, odp_actions, wc,
7980 pending_encap, pending_decap, encap_data);
7981 commit_set_ether_action(flow, base, odp_actions, wc, use_masked);
7982 /* Make packet a non-MPLS packet before committing L3/4 actions,
7983 * which would otherwise do nothing. */
7984 if (eth_type_mpls(base->dl_type) && !eth_type_mpls(flow->dl_type)) {
7985 commit_mpls_action(flow, base, odp_actions);
7986 mpls_done = true;
7987 }
7988 commit_set_nsh_action(flow, base, odp_actions, wc, use_masked);
7989 slow1 = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
7990 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
7991 slow2 = commit_set_icmp_action(flow, base, odp_actions, wc);
7992 if (!mpls_done) {
7993 commit_mpls_action(flow, base, odp_actions);
7994 }
7995 commit_vlan_action(flow, base, odp_actions, wc);
7996 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
7997 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);
7998
7999 return slow1 ? slow1 : slow2;
8000 }