]> git.proxmox.com Git - mirror_ovs.git/blob - lib/odp-util.c
odp-util.c: Fix dp_hash execution with slowpath actions.
[mirror_ovs.git] / lib / odp-util.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2019 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <sys/types.h>
19 #include <netinet/in.h>
20 #include <arpa/inet.h>
21 #include "odp-util.h"
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <math.h>
25 #include <netinet/icmp6.h>
26 #include <netinet/ip6.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include "byte-order.h"
31 #include "coverage.h"
32 #include "dpif.h"
33 #include "openvswitch/dynamic-string.h"
34 #include "flow.h"
35 #include "netlink.h"
36 #include "openvswitch/ofpbuf.h"
37 #include "packets.h"
38 #include "simap.h"
39 #include "timeval.h"
40 #include "tun-metadata.h"
41 #include "unaligned.h"
42 #include "util.h"
43 #include "uuid.h"
44 #include "openvswitch/vlog.h"
45 #include "openvswitch/match.h"
46 #include "odp-netlink-macros.h"
47 #include "csum.h"
48
49 VLOG_DEFINE_THIS_MODULE(odp_util);
50
51 /* The interface between userspace and kernel uses an "OVS_*" prefix.
52 * Since this is fairly non-specific for the OVS userspace components,
53 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
54 * interactions with the datapath.
55 */
56
57 /* The set of characters that may separate one action or one key attribute
58 * from another. */
59 static const char *delimiters = ", \t\r\n";
60 static const char *delimiters_end = ", \t\r\n)";
61
62 #define MAX_ODP_NESTED 32
63
64 struct parse_odp_context {
65 const struct simap *port_names;
66 int depth; /* Current nested depth of odp string. */
67 };
68
69 static int parse_odp_key_mask_attr(struct parse_odp_context *, const char *,
70 struct ofpbuf *, struct ofpbuf *);
71
72 static int parse_odp_key_mask_attr__(struct parse_odp_context *, const char *,
73 struct ofpbuf *, struct ofpbuf *);
74
75 static void format_odp_key_attr(const struct nlattr *a,
76 const struct nlattr *ma,
77 const struct hmap *portno_names, struct ds *ds,
78 bool verbose);
79
80 struct geneve_scan {
81 struct geneve_opt d[63];
82 int len;
83 };
84
85 static int scan_geneve(const char *s, struct geneve_scan *key,
86 struct geneve_scan *mask);
87 static void format_geneve_opts(const struct geneve_opt *opt,
88 const struct geneve_opt *mask, int opts_len,
89 struct ds *, bool verbose);
90
91 static struct nlattr *generate_all_wildcard_mask(const struct attr_len_tbl tbl[],
92 int max, struct ofpbuf *,
93 const struct nlattr *key);
94 static void format_u128(struct ds *d, const ovs_32aligned_u128 *key,
95 const ovs_32aligned_u128 *mask, bool verbose);
96 static int scan_u128(const char *s, ovs_u128 *value, ovs_u128 *mask);
97
98 static int parse_odp_action(struct parse_odp_context *context, const char *s,
99 struct ofpbuf *actions);
100
101 static int parse_odp_action__(struct parse_odp_context *context, const char *s,
102 struct ofpbuf *actions);
103
104 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
105 * 'type':
106 *
107 * - For an action whose argument has a fixed length, returned that
108 * nonnegative length in bytes.
109 *
110 * - For an action with a variable-length argument, returns ATTR_LEN_VARIABLE.
111 *
112 * - For an invalid 'type', returns ATTR_LEN_INVALID. */
113 static int
114 odp_action_len(uint16_t type)
115 {
116 if (type > OVS_ACTION_ATTR_MAX) {
117 return -1;
118 }
119
120 switch ((enum ovs_action_attr) type) {
121 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
122 case OVS_ACTION_ATTR_TRUNC: return sizeof(struct ovs_action_trunc);
123 case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
124 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
125 case OVS_ACTION_ATTR_METER: return sizeof(uint32_t);
126 case OVS_ACTION_ATTR_USERSPACE: return ATTR_LEN_VARIABLE;
127 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
128 case OVS_ACTION_ATTR_POP_VLAN: return 0;
129 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
130 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
131 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
132 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
133 case OVS_ACTION_ATTR_SET: return ATTR_LEN_VARIABLE;
134 case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
135 case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
136 case OVS_ACTION_ATTR_CT: return ATTR_LEN_VARIABLE;
137 case OVS_ACTION_ATTR_CT_CLEAR: return 0;
138 case OVS_ACTION_ATTR_PUSH_ETH: return sizeof(struct ovs_action_push_eth);
139 case OVS_ACTION_ATTR_POP_ETH: return 0;
140 case OVS_ACTION_ATTR_CLONE: return ATTR_LEN_VARIABLE;
141 case OVS_ACTION_ATTR_PUSH_NSH: return ATTR_LEN_VARIABLE;
142 case OVS_ACTION_ATTR_POP_NSH: return 0;
143 case OVS_ACTION_ATTR_CHECK_PKT_LEN: return ATTR_LEN_VARIABLE;
144 case OVS_ACTION_ATTR_DROP: return sizeof(uint32_t);
145
146 case OVS_ACTION_ATTR_UNSPEC:
147 case __OVS_ACTION_ATTR_MAX:
148 return ATTR_LEN_INVALID;
149 }
150
151 return ATTR_LEN_INVALID;
152 }
153
154 /* Returns a string form of 'attr'. The return value is either a statically
155 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
156 * should be at least OVS_KEY_ATTR_BUFSIZE. */
157 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
158 static const char *
159 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
160 {
161 switch (attr) {
162 case OVS_KEY_ATTR_UNSPEC: return "unspec";
163 case OVS_KEY_ATTR_ENCAP: return "encap";
164 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
165 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
166 case OVS_KEY_ATTR_CT_STATE: return "ct_state";
167 case OVS_KEY_ATTR_CT_ZONE: return "ct_zone";
168 case OVS_KEY_ATTR_CT_MARK: return "ct_mark";
169 case OVS_KEY_ATTR_CT_LABELS: return "ct_label";
170 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: return "ct_tuple4";
171 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: return "ct_tuple6";
172 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
173 case OVS_KEY_ATTR_IN_PORT: return "in_port";
174 case OVS_KEY_ATTR_ETHERNET: return "eth";
175 case OVS_KEY_ATTR_VLAN: return "vlan";
176 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
177 case OVS_KEY_ATTR_IPV4: return "ipv4";
178 case OVS_KEY_ATTR_IPV6: return "ipv6";
179 case OVS_KEY_ATTR_TCP: return "tcp";
180 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
181 case OVS_KEY_ATTR_UDP: return "udp";
182 case OVS_KEY_ATTR_SCTP: return "sctp";
183 case OVS_KEY_ATTR_ICMP: return "icmp";
184 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
185 case OVS_KEY_ATTR_ARP: return "arp";
186 case OVS_KEY_ATTR_ND: return "nd";
187 case OVS_KEY_ATTR_ND_EXTENSIONS: return "nd_ext";
188 case OVS_KEY_ATTR_MPLS: return "mpls";
189 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
190 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
191 case OVS_KEY_ATTR_PACKET_TYPE: return "packet_type";
192 case OVS_KEY_ATTR_NSH: return "nsh";
193
194 case __OVS_KEY_ATTR_MAX:
195 default:
196 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
197 return namebuf;
198 }
199 }
200
201 static void
202 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
203 {
204 size_t len = nl_attr_get_size(a);
205
206 ds_put_format(ds, "action%d", nl_attr_type(a));
207 if (len) {
208 const uint8_t *unspec;
209 unsigned int i;
210
211 unspec = nl_attr_get(a);
212 for (i = 0; i < len; i++) {
213 ds_put_char(ds, i ? ' ': '(');
214 ds_put_format(ds, "%02x", unspec[i]);
215 }
216 ds_put_char(ds, ')');
217 }
218 }
219
220 static void
221 format_odp_sample_action(struct ds *ds, const struct nlattr *attr,
222 const struct hmap *portno_names)
223 {
224 static const struct nl_policy ovs_sample_policy[] = {
225 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
226 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
227 };
228 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
229 double percentage;
230 const struct nlattr *nla_acts;
231 int len;
232
233 ds_put_cstr(ds, "sample");
234
235 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
236 ds_put_cstr(ds, "(error)");
237 return;
238 }
239
240 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
241 UINT32_MAX;
242
243 ds_put_format(ds, "(sample=%.1f%%,", percentage);
244
245 ds_put_cstr(ds, "actions(");
246 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
247 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
248 format_odp_actions(ds, nla_acts, len, portno_names);
249 ds_put_format(ds, "))");
250 }
251
252 static void
253 format_odp_clone_action(struct ds *ds, const struct nlattr *attr,
254 const struct hmap *portno_names)
255 {
256 const struct nlattr *nla_acts = nl_attr_get(attr);
257 int len = nl_attr_get_size(attr);
258
259 ds_put_cstr(ds, "clone");
260 ds_put_format(ds, "(");
261 format_odp_actions(ds, nla_acts, len, portno_names);
262 ds_put_format(ds, ")");
263 }
264
265 static void
266 format_nsh_key(struct ds *ds, const struct ovs_key_nsh *key)
267 {
268 ds_put_format(ds, "flags=%d", key->flags);
269 ds_put_format(ds, ",ttl=%d", key->ttl);
270 ds_put_format(ds, ",mdtype=%d", key->mdtype);
271 ds_put_format(ds, ",np=%d", key->np);
272 ds_put_format(ds, ",spi=0x%x",
273 nsh_path_hdr_to_spi_uint32(key->path_hdr));
274 ds_put_format(ds, ",si=%d",
275 nsh_path_hdr_to_si(key->path_hdr));
276
277 switch (key->mdtype) {
278 case NSH_M_TYPE1:
279 for (int i = 0; i < 4; i++) {
280 ds_put_format(ds, ",c%d=0x%x", i + 1, ntohl(key->context[i]));
281 }
282 break;
283 case NSH_M_TYPE2:
284 default:
285 /* No support for matching other metadata formats yet. */
286 break;
287 }
288 }
289
290 static void
291 format_uint8_masked(struct ds *s, bool *first, const char *name,
292 uint8_t value, uint8_t mask)
293 {
294 if (mask != 0) {
295 if (!*first) {
296 ds_put_char(s, ',');
297 }
298 ds_put_format(s, "%s=", name);
299 if (mask == UINT8_MAX) {
300 ds_put_format(s, "%"PRIu8, value);
301 } else {
302 ds_put_format(s, "0x%02"PRIx8"/0x%02"PRIx8, value, mask);
303 }
304 *first = false;
305 }
306 }
307
308 static void
309 format_be32_masked(struct ds *s, bool *first, const char *name,
310 ovs_be32 value, ovs_be32 mask)
311 {
312 if (mask != htonl(0)) {
313 if (!*first) {
314 ds_put_char(s, ',');
315 }
316 ds_put_format(s, "%s=", name);
317 if (mask == OVS_BE32_MAX) {
318 ds_put_format(s, "0x%"PRIx32, ntohl(value));
319 } else {
320 ds_put_format(s, "0x%"PRIx32"/0x%08"PRIx32,
321 ntohl(value), ntohl(mask));
322 }
323 *first = false;
324 }
325 }
326
327 static void
328 format_nsh_key_mask(struct ds *ds, const struct ovs_key_nsh *key,
329 const struct ovs_key_nsh *mask)
330 {
331 if (!mask) {
332 format_nsh_key(ds, key);
333 } else {
334 bool first = true;
335 uint32_t spi = nsh_path_hdr_to_spi_uint32(key->path_hdr);
336 uint32_t spi_mask = nsh_path_hdr_to_spi_uint32(mask->path_hdr);
337 if (spi_mask == (NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
338 spi_mask = UINT32_MAX;
339 }
340 uint8_t si = nsh_path_hdr_to_si(key->path_hdr);
341 uint8_t si_mask = nsh_path_hdr_to_si(mask->path_hdr);
342
343 format_uint8_masked(ds, &first, "flags", key->flags, mask->flags);
344 format_uint8_masked(ds, &first, "ttl", key->ttl, mask->ttl);
345 format_uint8_masked(ds, &first, "mdtype", key->mdtype, mask->mdtype);
346 format_uint8_masked(ds, &first, "np", key->np, mask->np);
347 format_be32_masked(ds, &first, "spi", htonl(spi), htonl(spi_mask));
348 format_uint8_masked(ds, &first, "si", si, si_mask);
349 format_be32_masked(ds, &first, "c1", key->context[0],
350 mask->context[0]);
351 format_be32_masked(ds, &first, "c2", key->context[1],
352 mask->context[1]);
353 format_be32_masked(ds, &first, "c3", key->context[2],
354 mask->context[2]);
355 format_be32_masked(ds, &first, "c4", key->context[3],
356 mask->context[3]);
357 }
358 }
359
360 static void
361 format_odp_push_nsh_action(struct ds *ds,
362 const struct nsh_hdr *nsh_hdr)
363 {
364 size_t mdlen = nsh_hdr_len(nsh_hdr) - NSH_BASE_HDR_LEN;
365 uint32_t spi = ntohl(nsh_get_spi(nsh_hdr));
366 uint8_t si = nsh_get_si(nsh_hdr);
367 uint8_t flags = nsh_get_flags(nsh_hdr);
368 uint8_t ttl = nsh_get_ttl(nsh_hdr);
369
370 ds_put_cstr(ds, "push_nsh(");
371 ds_put_format(ds, "flags=%d", flags);
372 ds_put_format(ds, ",ttl=%d", ttl);
373 ds_put_format(ds, ",mdtype=%d", nsh_hdr->md_type);
374 ds_put_format(ds, ",np=%d", nsh_hdr->next_proto);
375 ds_put_format(ds, ",spi=0x%x", spi);
376 ds_put_format(ds, ",si=%d", si);
377 switch (nsh_hdr->md_type) {
378 case NSH_M_TYPE1: {
379 const struct nsh_md1_ctx *md1_ctx = &nsh_hdr->md1;
380 for (int i = 0; i < 4; i++) {
381 ds_put_format(ds, ",c%d=0x%x", i + 1,
382 ntohl(get_16aligned_be32(&md1_ctx->context[i])));
383 }
384 break;
385 }
386 case NSH_M_TYPE2: {
387 const struct nsh_md2_tlv *md2_ctx = &nsh_hdr->md2;
388 ds_put_cstr(ds, ",md2=");
389 ds_put_hex(ds, md2_ctx, mdlen);
390 break;
391 }
392 default:
393 OVS_NOT_REACHED();
394 }
395 ds_put_format(ds, ")");
396 }
397
398 static const char *
399 slow_path_reason_to_string(uint32_t reason)
400 {
401 switch ((enum slow_path_reason) reason) {
402 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
403 SLOW_PATH_REASONS
404 #undef SPR
405 }
406
407 return NULL;
408 }
409
410 const char *
411 slow_path_reason_to_explanation(enum slow_path_reason reason)
412 {
413 switch (reason) {
414 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
415 SLOW_PATH_REASONS
416 #undef SPR
417 }
418
419 return "<unknown>";
420 }
421
422 static int
423 parse_odp_flags(const char *s, const char *(*bit_to_string)(uint32_t),
424 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
425 {
426 return parse_flags(s, bit_to_string, ')', NULL, NULL,
427 res_flags, allowed, res_mask);
428 }
429
430 static void
431 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr,
432 const struct hmap *portno_names)
433 {
434 static const struct nl_policy ovs_userspace_policy[] = {
435 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
436 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
437 .optional = true },
438 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
439 .optional = true },
440 [OVS_USERSPACE_ATTR_ACTIONS] = { .type = NL_A_UNSPEC,
441 .optional = true },
442 };
443 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
444 const struct nlattr *userdata_attr;
445 const struct nlattr *tunnel_out_port_attr;
446
447 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
448 ds_put_cstr(ds, "userspace(error)");
449 return;
450 }
451
452 ds_put_format(ds, "userspace(pid=%"PRIu32,
453 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
454
455 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
456
457 if (userdata_attr) {
458 const uint8_t *userdata = nl_attr_get(userdata_attr);
459 size_t userdata_len = nl_attr_get_size(userdata_attr);
460 bool userdata_unspec = true;
461 struct user_action_cookie cookie;
462
463 if (userdata_len == sizeof cookie) {
464 memcpy(&cookie, userdata, sizeof cookie);
465
466 userdata_unspec = false;
467
468 if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
469 ds_put_format(ds, ",sFlow("
470 "vid=%"PRIu16",pcp=%d,output=%"PRIu32")",
471 vlan_tci_to_vid(cookie.sflow.vlan_tci),
472 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
473 cookie.sflow.output);
474 } else if (cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
475 ds_put_cstr(ds, ",slow_path(");
476 format_flags(ds, slow_path_reason_to_string,
477 cookie.slow_path.reason, ',');
478 ds_put_format(ds, ")");
479 } else if (cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
480 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
481 ",collector_set_id=%"PRIu32
482 ",obs_domain_id=%"PRIu32
483 ",obs_point_id=%"PRIu32
484 ",output_port=",
485 cookie.flow_sample.probability,
486 cookie.flow_sample.collector_set_id,
487 cookie.flow_sample.obs_domain_id,
488 cookie.flow_sample.obs_point_id);
489 odp_portno_name_format(portno_names,
490 cookie.flow_sample.output_odp_port, ds);
491 if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_INGRESS) {
492 ds_put_cstr(ds, ",ingress");
493 } else if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_EGRESS) {
494 ds_put_cstr(ds, ",egress");
495 }
496 ds_put_char(ds, ')');
497 } else if (cookie.type == USER_ACTION_COOKIE_IPFIX) {
498 ds_put_format(ds, ",ipfix(output_port=");
499 odp_portno_name_format(portno_names,
500 cookie.ipfix.output_odp_port, ds);
501 ds_put_char(ds, ')');
502 } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
503 ds_put_format(ds, ",controller(reason=%"PRIu16
504 ",dont_send=%d"
505 ",continuation=%d"
506 ",recirc_id=%"PRIu32
507 ",rule_cookie=%#"PRIx64
508 ",controller_id=%"PRIu16
509 ",max_len=%"PRIu16,
510 cookie.controller.reason,
511 !!cookie.controller.dont_send,
512 !!cookie.controller.continuation,
513 cookie.controller.recirc_id,
514 ntohll(get_32aligned_be64(
515 &cookie.controller.rule_cookie)),
516 cookie.controller.controller_id,
517 cookie.controller.max_len);
518 ds_put_char(ds, ')');
519 } else {
520 userdata_unspec = true;
521 }
522 }
523
524 if (userdata_unspec) {
525 size_t i;
526 ds_put_format(ds, ",userdata(");
527 for (i = 0; i < userdata_len; i++) {
528 ds_put_format(ds, "%02x", userdata[i]);
529 }
530 ds_put_char(ds, ')');
531 }
532 }
533
534 if (a[OVS_USERSPACE_ATTR_ACTIONS]) {
535 ds_put_cstr(ds, ",actions");
536 }
537
538 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
539 if (tunnel_out_port_attr) {
540 ds_put_format(ds, ",tunnel_out_port=");
541 odp_portno_name_format(portno_names,
542 nl_attr_get_odp_port(tunnel_out_port_attr), ds);
543 }
544
545 ds_put_char(ds, ')');
546 }
547
548 static void
549 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
550 {
551 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
552 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
553 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
554 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
555 };
556 ds_put_char(ds, ',');
557 }
558 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
559 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
560 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
561 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
562 }
563 ds_put_char(ds, ',');
564 }
565 if (!(tci & htons(VLAN_CFI))) {
566 ds_put_cstr(ds, "cfi=0");
567 ds_put_char(ds, ',');
568 }
569 ds_chomp(ds, ',');
570 }
571
572 static void
573 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
574 {
575 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
576 mpls_lse_to_label(mpls_lse),
577 mpls_lse_to_tc(mpls_lse),
578 mpls_lse_to_ttl(mpls_lse),
579 mpls_lse_to_bos(mpls_lse));
580 }
581
582 static void
583 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
584 const struct ovs_key_mpls *mpls_mask, int n)
585 {
586 for (int i = 0; i < n; i++) {
587 ovs_be32 key = mpls_key[i].mpls_lse;
588
589 if (mpls_mask == NULL) {
590 format_mpls_lse(ds, key);
591 } else {
592 ovs_be32 mask = mpls_mask[i].mpls_lse;
593
594 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
595 mpls_lse_to_label(key), mpls_lse_to_label(mask),
596 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
597 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
598 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
599 }
600 ds_put_char(ds, ',');
601 }
602 ds_chomp(ds, ',');
603 }
604
605 static void
606 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
607 {
608 ds_put_format(ds, "recirc(%#"PRIx32")", recirc_id);
609 }
610
611 static void
612 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
613 {
614 ds_put_format(ds, "hash(");
615
616 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
617 ds_put_format(ds, "l4(%"PRIu32")", hash_act->hash_basis);
618 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
619 ds_put_format(ds, "sym_l4(%"PRIu32")", hash_act->hash_basis);
620 } else {
621 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
622 hash_act->hash_alg);
623 }
624 ds_put_format(ds, ")");
625 }
626
627 static const void *
628 format_udp_tnl_push_header(struct ds *ds, const struct udp_header *udp)
629 {
630 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
631 ntohs(udp->udp_src), ntohs(udp->udp_dst),
632 ntohs(udp->udp_csum));
633
634 return udp + 1;
635 }
636
637 static void
638 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
639 {
640 const struct eth_header *eth;
641 const void *l3;
642 const void *l4;
643 const struct udp_header *udp;
644
645 eth = (const struct eth_header *)data->header;
646
647 l3 = eth + 1;
648
649 /* Ethernet */
650 ds_put_format(ds, "header(size=%"PRIu32",type=%"PRIu32",eth(dst=",
651 data->header_len, data->tnl_type);
652 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
653 ds_put_format(ds, ",src=");
654 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
655 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
656
657 if (eth->eth_type == htons(ETH_TYPE_IP)) {
658 /* IPv4 */
659 const struct ip_header *ip = l3;
660 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
661 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
662 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
663 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
664 ip->ip_proto, ip->ip_tos,
665 ip->ip_ttl,
666 ntohs(ip->ip_frag_off));
667 l4 = (ip + 1);
668 } else {
669 const struct ovs_16aligned_ip6_hdr *ip6 = l3;
670 struct in6_addr src, dst;
671 memcpy(&src, &ip6->ip6_src, sizeof src);
672 memcpy(&dst, &ip6->ip6_dst, sizeof dst);
673 uint32_t ipv6_flow = ntohl(get_16aligned_be32(&ip6->ip6_flow));
674
675 ds_put_format(ds, "ipv6(src=");
676 ipv6_format_addr(&src, ds);
677 ds_put_format(ds, ",dst=");
678 ipv6_format_addr(&dst, ds);
679 ds_put_format(ds, ",label=%i,proto=%"PRIu8",tclass=0x%"PRIx32
680 ",hlimit=%"PRIu8"),",
681 ipv6_flow & IPV6_LABEL_MASK, ip6->ip6_nxt,
682 (ipv6_flow >> 20) & 0xff, ip6->ip6_hlim);
683 l4 = (ip6 + 1);
684 }
685
686 udp = (const struct udp_header *) l4;
687
688 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
689 const struct vxlanhdr *vxh;
690
691 vxh = format_udp_tnl_push_header(ds, udp);
692
693 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
694 ntohl(get_16aligned_be32(&vxh->vx_flags)),
695 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
696 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
697 const struct genevehdr *gnh;
698
699 gnh = format_udp_tnl_push_header(ds, udp);
700
701 ds_put_format(ds, "geneve(%s%svni=0x%"PRIx32,
702 gnh->oam ? "oam," : "",
703 gnh->critical ? "crit," : "",
704 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
705
706 if (gnh->opt_len) {
707 ds_put_cstr(ds, ",options(");
708 format_geneve_opts(gnh->options, NULL, gnh->opt_len * 4,
709 ds, false);
710 ds_put_char(ds, ')');
711 }
712
713 ds_put_char(ds, ')');
714 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE ||
715 data->tnl_type == OVS_VPORT_TYPE_IP6GRE) {
716 const struct gre_base_hdr *greh;
717 ovs_16aligned_be32 *options;
718
719 greh = (const struct gre_base_hdr *) l4;
720
721 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
722 ntohs(greh->flags), ntohs(greh->protocol));
723 options = (ovs_16aligned_be32 *)(greh + 1);
724 if (greh->flags & htons(GRE_CSUM)) {
725 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
726 options++;
727 }
728 if (greh->flags & htons(GRE_KEY)) {
729 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
730 options++;
731 }
732 if (greh->flags & htons(GRE_SEQ)) {
733 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
734 options++;
735 }
736 ds_put_format(ds, ")");
737 } else if (data->tnl_type == OVS_VPORT_TYPE_ERSPAN ||
738 data->tnl_type == OVS_VPORT_TYPE_IP6ERSPAN) {
739 const struct gre_base_hdr *greh;
740 const struct erspan_base_hdr *ersh;
741
742 greh = (const struct gre_base_hdr *) l4;
743 ersh = ERSPAN_HDR(greh);
744
745 if (ersh->ver == 1) {
746 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
747 ersh + 1);
748 ds_put_format(ds, "erspan(ver=1,sid=0x%"PRIx16",idx=0x%"PRIx32")",
749 get_sid(ersh), ntohl(get_16aligned_be32(index)));
750 } else if (ersh->ver == 2) {
751 struct erspan_md2 *md2 = ALIGNED_CAST(struct erspan_md2 *,
752 ersh + 1);
753 ds_put_format(ds, "erspan(ver=2,sid=0x%"PRIx16
754 ",dir=%"PRIu8",hwid=0x%"PRIx8")",
755 get_sid(ersh), md2->dir, get_hwid(md2));
756 } else {
757 VLOG_WARN("%s Invalid ERSPAN version %d\n", __func__, ersh->ver);
758 }
759 } else if (data->tnl_type == OVS_VPORT_TYPE_GTPU) {
760 const struct gtpuhdr *gtph;
761
762 gtph = format_udp_tnl_push_header(ds, udp);
763
764 ds_put_format(ds, "gtpu(flags=0x%"PRIx8
765 ",msgtype=%"PRIu8",teid=0x%"PRIx32")",
766 gtph->md.flags, gtph->md.msgtype,
767 ntohl(get_16aligned_be32(&gtph->teid)));
768 }
769
770 ds_put_format(ds, ")");
771 }
772
773 static void
774 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr,
775 const struct hmap *portno_names)
776 {
777 struct ovs_action_push_tnl *data;
778
779 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
780
781 ds_put_cstr(ds, "tnl_push(tnl_port(");
782 odp_portno_name_format(portno_names, data->tnl_port, ds);
783 ds_put_cstr(ds, "),");
784 format_odp_tnl_push_header(ds, data);
785 ds_put_format(ds, ",out_port(");
786 odp_portno_name_format(portno_names, data->out_port, ds);
787 ds_put_cstr(ds, "))");
788 }
789
790 static const struct nl_policy ovs_nat_policy[] = {
791 [OVS_NAT_ATTR_SRC] = { .type = NL_A_FLAG, .optional = true, },
792 [OVS_NAT_ATTR_DST] = { .type = NL_A_FLAG, .optional = true, },
793 [OVS_NAT_ATTR_IP_MIN] = { .type = NL_A_UNSPEC, .optional = true,
794 .min_len = sizeof(struct in_addr),
795 .max_len = sizeof(struct in6_addr)},
796 [OVS_NAT_ATTR_IP_MAX] = { .type = NL_A_UNSPEC, .optional = true,
797 .min_len = sizeof(struct in_addr),
798 .max_len = sizeof(struct in6_addr)},
799 [OVS_NAT_ATTR_PROTO_MIN] = { .type = NL_A_U16, .optional = true, },
800 [OVS_NAT_ATTR_PROTO_MAX] = { .type = NL_A_U16, .optional = true, },
801 [OVS_NAT_ATTR_PERSISTENT] = { .type = NL_A_FLAG, .optional = true, },
802 [OVS_NAT_ATTR_PROTO_HASH] = { .type = NL_A_FLAG, .optional = true, },
803 [OVS_NAT_ATTR_PROTO_RANDOM] = { .type = NL_A_FLAG, .optional = true, },
804 };
805
806 static void
807 format_odp_ct_nat(struct ds *ds, const struct nlattr *attr)
808 {
809 struct nlattr *a[ARRAY_SIZE(ovs_nat_policy)];
810 size_t addr_len;
811 ovs_be32 ip_min, ip_max;
812 struct in6_addr ip6_min, ip6_max;
813 uint16_t proto_min, proto_max;
814
815 if (!nl_parse_nested(attr, ovs_nat_policy, a, ARRAY_SIZE(a))) {
816 ds_put_cstr(ds, "nat(error: nl_parse_nested() failed.)");
817 return;
818 }
819 /* If no type, then nothing else either. */
820 if (!(a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST])
821 && (a[OVS_NAT_ATTR_IP_MIN] || a[OVS_NAT_ATTR_IP_MAX]
822 || a[OVS_NAT_ATTR_PROTO_MIN] || a[OVS_NAT_ATTR_PROTO_MAX]
823 || a[OVS_NAT_ATTR_PERSISTENT] || a[OVS_NAT_ATTR_PROTO_HASH]
824 || a[OVS_NAT_ATTR_PROTO_RANDOM])) {
825 ds_put_cstr(ds, "nat(error: options allowed only with \"src\" or \"dst\")");
826 return;
827 }
828 /* Both SNAT & DNAT may not be specified. */
829 if (a[OVS_NAT_ATTR_SRC] && a[OVS_NAT_ATTR_DST]) {
830 ds_put_cstr(ds, "nat(error: Only one of \"src\" or \"dst\" may be present.)");
831 return;
832 }
833 /* proto may not appear without ip. */
834 if (!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_PROTO_MIN]) {
835 ds_put_cstr(ds, "nat(error: proto but no IP.)");
836 return;
837 }
838 /* MAX may not appear without MIN. */
839 if ((!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX])
840 || (!a[OVS_NAT_ATTR_PROTO_MIN] && a[OVS_NAT_ATTR_PROTO_MAX])) {
841 ds_put_cstr(ds, "nat(error: range max without min.)");
842 return;
843 }
844 /* Address sizes must match. */
845 if ((a[OVS_NAT_ATTR_IP_MIN]
846 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(ovs_be32) &&
847 nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(struct in6_addr)))
848 || (a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX]
849 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN])
850 != nl_attr_get_size(a[OVS_NAT_ATTR_IP_MAX])))) {
851 ds_put_cstr(ds, "nat(error: IP address sizes do not match)");
852 return;
853 }
854
855 addr_len = a[OVS_NAT_ATTR_IP_MIN]
856 ? nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) : 0;
857 ip_min = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MIN]
858 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MIN]) : 0;
859 ip_max = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MAX]
860 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MAX]) : 0;
861 if (addr_len == sizeof ip6_min) {
862 ip6_min = a[OVS_NAT_ATTR_IP_MIN]
863 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MIN])
864 : in6addr_any;
865 ip6_max = a[OVS_NAT_ATTR_IP_MAX]
866 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MAX])
867 : in6addr_any;
868 }
869 proto_min = a[OVS_NAT_ATTR_PROTO_MIN]
870 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MIN]) : 0;
871 proto_max = a[OVS_NAT_ATTR_PROTO_MAX]
872 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MAX]) : 0;
873
874 if ((addr_len == sizeof(ovs_be32)
875 && ip_max && ntohl(ip_min) > ntohl(ip_max))
876 || (addr_len == sizeof(struct in6_addr)
877 && !ipv6_mask_is_any(&ip6_max)
878 && memcmp(&ip6_min, &ip6_max, sizeof ip6_min) > 0)
879 || (proto_max && proto_min > proto_max)) {
880 ds_put_cstr(ds, "nat(range error)");
881 return;
882 }
883
884 ds_put_cstr(ds, "nat");
885 if (a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST]) {
886 ds_put_char(ds, '(');
887 if (a[OVS_NAT_ATTR_SRC]) {
888 ds_put_cstr(ds, "src");
889 } else if (a[OVS_NAT_ATTR_DST]) {
890 ds_put_cstr(ds, "dst");
891 }
892
893 if (addr_len > 0) {
894 ds_put_cstr(ds, "=");
895
896 if (addr_len == sizeof ip_min) {
897 ds_put_format(ds, IP_FMT, IP_ARGS(ip_min));
898
899 if (ip_max && ip_max != ip_min) {
900 ds_put_format(ds, "-"IP_FMT, IP_ARGS(ip_max));
901 }
902 } else if (addr_len == sizeof ip6_min) {
903 ipv6_format_addr_bracket(&ip6_min, ds, proto_min);
904
905 if (!ipv6_mask_is_any(&ip6_max) &&
906 memcmp(&ip6_max, &ip6_min, sizeof ip6_max) != 0) {
907 ds_put_char(ds, '-');
908 ipv6_format_addr_bracket(&ip6_max, ds, proto_min);
909 }
910 }
911 if (proto_min) {
912 ds_put_format(ds, ":%"PRIu16, proto_min);
913
914 if (proto_max && proto_max != proto_min) {
915 ds_put_format(ds, "-%"PRIu16, proto_max);
916 }
917 }
918 }
919 ds_put_char(ds, ',');
920 if (a[OVS_NAT_ATTR_PERSISTENT]) {
921 ds_put_cstr(ds, "persistent,");
922 }
923 if (a[OVS_NAT_ATTR_PROTO_HASH]) {
924 ds_put_cstr(ds, "hash,");
925 }
926 if (a[OVS_NAT_ATTR_PROTO_RANDOM]) {
927 ds_put_cstr(ds, "random,");
928 }
929 ds_chomp(ds, ',');
930 ds_put_char(ds, ')');
931 }
932 }
933
934 static const struct nl_policy ovs_conntrack_policy[] = {
935 [OVS_CT_ATTR_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
936 [OVS_CT_ATTR_FORCE_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
937 [OVS_CT_ATTR_ZONE] = { .type = NL_A_U16, .optional = true, },
938 [OVS_CT_ATTR_MARK] = { .type = NL_A_UNSPEC, .optional = true,
939 .min_len = sizeof(uint32_t) * 2 },
940 [OVS_CT_ATTR_LABELS] = { .type = NL_A_UNSPEC, .optional = true,
941 .min_len = sizeof(struct ovs_key_ct_labels) * 2 },
942 [OVS_CT_ATTR_HELPER] = { .type = NL_A_STRING, .optional = true,
943 .min_len = 1, .max_len = 16 },
944 [OVS_CT_ATTR_NAT] = { .type = NL_A_UNSPEC, .optional = true },
945 [OVS_CT_ATTR_TIMEOUT] = { .type = NL_A_STRING, .optional = true,
946 .min_len = 1, .max_len = 32 },
947 };
948
949 static void
950 format_odp_conntrack_action(struct ds *ds, const struct nlattr *attr)
951 {
952 struct nlattr *a[ARRAY_SIZE(ovs_conntrack_policy)];
953 const struct {
954 ovs_32aligned_u128 value;
955 ovs_32aligned_u128 mask;
956 } *label;
957 const uint32_t *mark;
958 const char *helper, *timeout;
959 uint16_t zone;
960 bool commit, force;
961 const struct nlattr *nat;
962
963 if (!nl_parse_nested(attr, ovs_conntrack_policy, a, ARRAY_SIZE(a))) {
964 ds_put_cstr(ds, "ct(error)");
965 return;
966 }
967
968 commit = a[OVS_CT_ATTR_COMMIT] ? true : false;
969 force = a[OVS_CT_ATTR_FORCE_COMMIT] ? true : false;
970 zone = a[OVS_CT_ATTR_ZONE] ? nl_attr_get_u16(a[OVS_CT_ATTR_ZONE]) : 0;
971 mark = a[OVS_CT_ATTR_MARK] ? nl_attr_get(a[OVS_CT_ATTR_MARK]) : NULL;
972 label = a[OVS_CT_ATTR_LABELS] ? nl_attr_get(a[OVS_CT_ATTR_LABELS]): NULL;
973 helper = a[OVS_CT_ATTR_HELPER] ? nl_attr_get(a[OVS_CT_ATTR_HELPER]) : NULL;
974 timeout = a[OVS_CT_ATTR_TIMEOUT] ?
975 nl_attr_get(a[OVS_CT_ATTR_TIMEOUT]) : NULL;
976 nat = a[OVS_CT_ATTR_NAT];
977
978 ds_put_format(ds, "ct");
979 if (commit || force || zone || mark || label || helper || timeout || nat) {
980 ds_put_cstr(ds, "(");
981 if (commit) {
982 ds_put_format(ds, "commit,");
983 }
984 if (force) {
985 ds_put_format(ds, "force_commit,");
986 }
987 if (zone) {
988 ds_put_format(ds, "zone=%"PRIu16",", zone);
989 }
990 if (mark) {
991 ds_put_format(ds, "mark=%#"PRIx32"/%#"PRIx32",", *mark,
992 *(mark + 1));
993 }
994 if (label) {
995 ds_put_format(ds, "label=");
996 format_u128(ds, &label->value, &label->mask, true);
997 ds_put_char(ds, ',');
998 }
999 if (helper) {
1000 ds_put_format(ds, "helper=%s,", helper);
1001 }
1002 if (timeout) {
1003 ds_put_format(ds, "timeout=%s", timeout);
1004 }
1005 if (nat) {
1006 format_odp_ct_nat(ds, nat);
1007 }
1008 ds_chomp(ds, ',');
1009 ds_put_cstr(ds, ")");
1010 }
1011 }
1012
1013 static const struct attr_len_tbl
1014 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
1015 [OVS_NSH_KEY_ATTR_BASE] = { .len = 8 },
1016 [OVS_NSH_KEY_ATTR_MD1] = { .len = 16 },
1017 [OVS_NSH_KEY_ATTR_MD2] = { .len = ATTR_LEN_VARIABLE },
1018 };
1019
1020 static void
1021 format_odp_set_nsh(struct ds *ds, const struct nlattr *attr)
1022 {
1023 unsigned int left;
1024 const struct nlattr *a;
1025 struct ovs_key_nsh nsh;
1026 struct ovs_key_nsh nsh_mask;
1027
1028 memset(&nsh, 0, sizeof nsh);
1029 memset(&nsh_mask, 0xff, sizeof nsh_mask);
1030
1031 NL_NESTED_FOR_EACH (a, left, attr) {
1032 enum ovs_nsh_key_attr type = nl_attr_type(a);
1033 size_t len = nl_attr_get_size(a);
1034
1035 if (type >= OVS_NSH_KEY_ATTR_MAX) {
1036 return;
1037 }
1038
1039 int expected_len = ovs_nsh_key_attr_lens[type].len;
1040 if ((expected_len != ATTR_LEN_VARIABLE) && (len != 2 * expected_len)) {
1041 return;
1042 }
1043
1044 switch (type) {
1045 case OVS_NSH_KEY_ATTR_UNSPEC:
1046 break;
1047 case OVS_NSH_KEY_ATTR_BASE: {
1048 const struct ovs_nsh_key_base *base = nl_attr_get(a);
1049 const struct ovs_nsh_key_base *base_mask = base + 1;
1050 memcpy(&nsh, base, sizeof(*base));
1051 memcpy(&nsh_mask, base_mask, sizeof(*base_mask));
1052 break;
1053 }
1054 case OVS_NSH_KEY_ATTR_MD1: {
1055 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
1056 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1057 memcpy(&nsh.context, &md1->context, sizeof(*md1));
1058 memcpy(&nsh_mask.context, &md1_mask->context, sizeof(*md1_mask));
1059 break;
1060 }
1061 case OVS_NSH_KEY_ATTR_MD2:
1062 case __OVS_NSH_KEY_ATTR_MAX:
1063 default:
1064 /* No support for matching other metadata formats yet. */
1065 break;
1066 }
1067 }
1068
1069 ds_put_cstr(ds, "set(nsh(");
1070 format_nsh_key_mask(ds, &nsh, &nsh_mask);
1071 ds_put_cstr(ds, "))");
1072 }
1073
1074 static void
1075 format_odp_check_pkt_len_action(struct ds *ds, const struct nlattr *attr,
1076 const struct hmap *portno_names OVS_UNUSED)
1077 {
1078 static const struct nl_policy ovs_cpl_policy[] = {
1079 [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = { .type = NL_A_U16 },
1080 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = { .type = NL_A_NESTED },
1081 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]
1082 = { .type = NL_A_NESTED },
1083 };
1084 struct nlattr *a[ARRAY_SIZE(ovs_cpl_policy)];
1085 ds_put_cstr(ds, "check_pkt_len");
1086 if (!nl_parse_nested(attr, ovs_cpl_policy, a, ARRAY_SIZE(a))) {
1087 ds_put_cstr(ds, "(error)");
1088 return;
1089 }
1090
1091 if (!a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] ||
1092 !a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]) {
1093 ds_put_cstr(ds, "(error)");
1094 return;
1095 }
1096
1097 uint16_t pkt_len = nl_attr_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
1098 ds_put_format(ds, "(size=%u,gt(", pkt_len);
1099 const struct nlattr *acts;
1100 acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
1101 format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
1102 portno_names);
1103
1104 ds_put_cstr(ds, "),le(");
1105 acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
1106 format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
1107 portno_names);
1108 ds_put_cstr(ds, "))");
1109 }
1110
1111 static void
1112 format_odp_action(struct ds *ds, const struct nlattr *a,
1113 const struct hmap *portno_names)
1114 {
1115 int expected_len;
1116 enum ovs_action_attr type = nl_attr_type(a);
1117 size_t size;
1118
1119 expected_len = odp_action_len(nl_attr_type(a));
1120 if (expected_len != ATTR_LEN_VARIABLE &&
1121 nl_attr_get_size(a) != expected_len) {
1122 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
1123 nl_attr_get_size(a), expected_len);
1124 format_generic_odp_action(ds, a);
1125 return;
1126 }
1127
1128 switch (type) {
1129 case OVS_ACTION_ATTR_METER:
1130 ds_put_format(ds, "meter(%"PRIu32")", nl_attr_get_u32(a));
1131 break;
1132 case OVS_ACTION_ATTR_OUTPUT:
1133 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1134 break;
1135 case OVS_ACTION_ATTR_TRUNC: {
1136 const struct ovs_action_trunc *trunc =
1137 nl_attr_get_unspec(a, sizeof *trunc);
1138
1139 ds_put_format(ds, "trunc(%"PRIu32")", trunc->max_len);
1140 break;
1141 }
1142 case OVS_ACTION_ATTR_TUNNEL_POP:
1143 ds_put_cstr(ds, "tnl_pop(");
1144 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1145 ds_put_char(ds, ')');
1146 break;
1147 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1148 format_odp_tnl_push_action(ds, a, portno_names);
1149 break;
1150 case OVS_ACTION_ATTR_USERSPACE:
1151 format_odp_userspace_action(ds, a, portno_names);
1152 break;
1153 case OVS_ACTION_ATTR_RECIRC:
1154 format_odp_recirc_action(ds, nl_attr_get_u32(a));
1155 break;
1156 case OVS_ACTION_ATTR_HASH:
1157 format_odp_hash_action(ds, nl_attr_get(a));
1158 break;
1159 case OVS_ACTION_ATTR_SET_MASKED:
1160 a = nl_attr_get(a);
1161 /* OVS_KEY_ATTR_NSH is nested attribute, so it needs special process */
1162 if (nl_attr_type(a) == OVS_KEY_ATTR_NSH) {
1163 format_odp_set_nsh(ds, a);
1164 break;
1165 }
1166 size = nl_attr_get_size(a) / 2;
1167 ds_put_cstr(ds, "set(");
1168
1169 /* Masked set action not supported for tunnel key, which is bigger. */
1170 if (size <= sizeof(struct ovs_key_ipv6)) {
1171 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1172 sizeof(struct nlattr))];
1173 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1174 sizeof(struct nlattr))];
1175
1176 mask->nla_type = attr->nla_type = nl_attr_type(a);
1177 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
1178 memcpy(attr + 1, (char *)(a + 1), size);
1179 memcpy(mask + 1, (char *)(a + 1) + size, size);
1180 format_odp_key_attr(attr, mask, NULL, ds, false);
1181 } else {
1182 format_odp_key_attr(a, NULL, NULL, ds, false);
1183 }
1184 ds_put_cstr(ds, ")");
1185 break;
1186 case OVS_ACTION_ATTR_SET:
1187 ds_put_cstr(ds, "set(");
1188 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
1189 ds_put_cstr(ds, ")");
1190 break;
1191 case OVS_ACTION_ATTR_PUSH_ETH: {
1192 const struct ovs_action_push_eth *eth = nl_attr_get(a);
1193 ds_put_format(ds, "push_eth(src="ETH_ADDR_FMT",dst="ETH_ADDR_FMT")",
1194 ETH_ADDR_ARGS(eth->addresses.eth_src),
1195 ETH_ADDR_ARGS(eth->addresses.eth_dst));
1196 break;
1197 }
1198 case OVS_ACTION_ATTR_POP_ETH:
1199 ds_put_cstr(ds, "pop_eth");
1200 break;
1201 case OVS_ACTION_ATTR_PUSH_VLAN: {
1202 const struct ovs_action_push_vlan *vlan = nl_attr_get(a);
1203 ds_put_cstr(ds, "push_vlan(");
1204 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
1205 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
1206 }
1207 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
1208 ds_put_char(ds, ')');
1209 break;
1210 }
1211 case OVS_ACTION_ATTR_POP_VLAN:
1212 ds_put_cstr(ds, "pop_vlan");
1213 break;
1214 case OVS_ACTION_ATTR_PUSH_MPLS: {
1215 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1216 ds_put_cstr(ds, "push_mpls(");
1217 format_mpls_lse(ds, mpls->mpls_lse);
1218 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
1219 break;
1220 }
1221 case OVS_ACTION_ATTR_POP_MPLS: {
1222 ovs_be16 ethertype = nl_attr_get_be16(a);
1223 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
1224 break;
1225 }
1226 case OVS_ACTION_ATTR_SAMPLE:
1227 format_odp_sample_action(ds, a, portno_names);
1228 break;
1229 case OVS_ACTION_ATTR_CT:
1230 format_odp_conntrack_action(ds, a);
1231 break;
1232 case OVS_ACTION_ATTR_CT_CLEAR:
1233 ds_put_cstr(ds, "ct_clear");
1234 break;
1235 case OVS_ACTION_ATTR_CLONE:
1236 format_odp_clone_action(ds, a, portno_names);
1237 break;
1238 case OVS_ACTION_ATTR_PUSH_NSH: {
1239 uint32_t buffer[NSH_HDR_MAX_LEN / 4];
1240 struct nsh_hdr *nsh_hdr = ALIGNED_CAST(struct nsh_hdr *, buffer);
1241 nsh_reset_ver_flags_ttl_len(nsh_hdr);
1242 odp_nsh_hdr_from_attr(nl_attr_get(a), nsh_hdr, NSH_HDR_MAX_LEN);
1243 format_odp_push_nsh_action(ds, nsh_hdr);
1244 break;
1245 }
1246 case OVS_ACTION_ATTR_POP_NSH:
1247 ds_put_cstr(ds, "pop_nsh()");
1248 break;
1249 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
1250 format_odp_check_pkt_len_action(ds, a, portno_names);
1251 break;
1252 case OVS_ACTION_ATTR_DROP:
1253 ds_put_cstr(ds, "drop");
1254 break;
1255 case OVS_ACTION_ATTR_UNSPEC:
1256 case __OVS_ACTION_ATTR_MAX:
1257 default:
1258 format_generic_odp_action(ds, a);
1259 break;
1260 }
1261 }
1262
1263 void
1264 format_odp_actions(struct ds *ds, const struct nlattr *actions,
1265 size_t actions_len, const struct hmap *portno_names)
1266 {
1267 if (actions_len) {
1268 const struct nlattr *a;
1269 unsigned int left;
1270
1271 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1272 if (a != actions) {
1273 ds_put_char(ds, ',');
1274 }
1275 format_odp_action(ds, a, portno_names);
1276 }
1277 if (left) {
1278 int i;
1279
1280 if (left == actions_len) {
1281 ds_put_cstr(ds, "<empty>");
1282 }
1283 ds_put_format(ds, ",***%u leftover bytes*** (", left);
1284 for (i = 0; i < left; i++) {
1285 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
1286 }
1287 ds_put_char(ds, ')');
1288 }
1289 } else {
1290 ds_put_cstr(ds, "drop");
1291 }
1292 }
1293
1294 /* Separate out parse_odp_userspace_action() function. */
1295 static int
1296 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
1297 {
1298 uint32_t pid;
1299 struct user_action_cookie cookie;
1300 struct ofpbuf buf;
1301 odp_port_t tunnel_out_port;
1302 int n = -1;
1303 void *user_data = NULL;
1304 size_t user_data_size = 0;
1305 bool include_actions = false;
1306 int res;
1307
1308 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
1309 return -EINVAL;
1310 }
1311
1312 ofpbuf_init(&buf, 16);
1313 memset(&cookie, 0, sizeof cookie);
1314
1315 user_data = &cookie;
1316 user_data_size = sizeof cookie;
1317 {
1318 uint32_t output;
1319 uint32_t probability;
1320 uint32_t collector_set_id;
1321 uint32_t obs_domain_id;
1322 uint32_t obs_point_id;
1323
1324 /* USER_ACTION_COOKIE_CONTROLLER. */
1325 uint8_t dont_send;
1326 uint8_t continuation;
1327 uint16_t reason;
1328 uint32_t recirc_id;
1329 uint64_t rule_cookie;
1330 uint16_t controller_id;
1331 uint16_t max_len;
1332
1333 int vid, pcp;
1334 int n1 = -1;
1335 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
1336 "pcp=%i,output=%"SCNi32")%n",
1337 &vid, &pcp, &output, &n1)) {
1338 uint16_t tci;
1339
1340 n += n1;
1341 tci = vid | (pcp << VLAN_PCP_SHIFT);
1342 if (tci) {
1343 tci |= VLAN_CFI;
1344 }
1345
1346 cookie.type = USER_ACTION_COOKIE_SFLOW;
1347 cookie.ofp_in_port = OFPP_NONE;
1348 cookie.ofproto_uuid = UUID_ZERO;
1349 cookie.sflow.vlan_tci = htons(tci);
1350 cookie.sflow.output = output;
1351 } else if (ovs_scan(&s[n], ",slow_path(%n",
1352 &n1)) {
1353 n += n1;
1354 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
1355 cookie.ofp_in_port = OFPP_NONE;
1356 cookie.ofproto_uuid = UUID_ZERO;
1357 cookie.slow_path.reason = 0;
1358
1359 res = parse_odp_flags(&s[n], slow_path_reason_to_string,
1360 &cookie.slow_path.reason,
1361 SLOW_PATH_REASON_MASK, NULL);
1362 if (res < 0 || s[n + res] != ')') {
1363 goto out;
1364 }
1365 n += res + 1;
1366 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
1367 "collector_set_id=%"SCNi32","
1368 "obs_domain_id=%"SCNi32","
1369 "obs_point_id=%"SCNi32","
1370 "output_port=%"SCNi32"%n",
1371 &probability, &collector_set_id,
1372 &obs_domain_id, &obs_point_id,
1373 &output, &n1)) {
1374 n += n1;
1375
1376 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1377 cookie.ofp_in_port = OFPP_NONE;
1378 cookie.ofproto_uuid = UUID_ZERO;
1379 cookie.flow_sample.probability = probability;
1380 cookie.flow_sample.collector_set_id = collector_set_id;
1381 cookie.flow_sample.obs_domain_id = obs_domain_id;
1382 cookie.flow_sample.obs_point_id = obs_point_id;
1383 cookie.flow_sample.output_odp_port = u32_to_odp(output);
1384
1385 if (ovs_scan(&s[n], ",ingress%n", &n1)) {
1386 cookie.flow_sample.direction = NX_ACTION_SAMPLE_INGRESS;
1387 n += n1;
1388 } else if (ovs_scan(&s[n], ",egress%n", &n1)) {
1389 cookie.flow_sample.direction = NX_ACTION_SAMPLE_EGRESS;
1390 n += n1;
1391 } else {
1392 cookie.flow_sample.direction = NX_ACTION_SAMPLE_DEFAULT;
1393 }
1394 if (s[n] != ')') {
1395 res = -EINVAL;
1396 goto out;
1397 }
1398 n++;
1399 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
1400 &output, &n1) ) {
1401 n += n1;
1402 cookie.type = USER_ACTION_COOKIE_IPFIX;
1403 cookie.ofp_in_port = OFPP_NONE;
1404 cookie.ofproto_uuid = UUID_ZERO;
1405 cookie.ipfix.output_odp_port = u32_to_odp(output);
1406 } else if (ovs_scan(&s[n], ",controller(reason=%"SCNu16
1407 ",dont_send=%"SCNu8
1408 ",continuation=%"SCNu8
1409 ",recirc_id=%"SCNu32
1410 ",rule_cookie=%"SCNx64
1411 ",controller_id=%"SCNu16
1412 ",max_len=%"SCNu16")%n",
1413 &reason, &dont_send, &continuation, &recirc_id,
1414 &rule_cookie, &controller_id, &max_len, &n1)) {
1415 n += n1;
1416 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
1417 cookie.ofp_in_port = OFPP_NONE;
1418 cookie.ofproto_uuid = UUID_ZERO;
1419 cookie.controller.dont_send = dont_send ? true : false;
1420 cookie.controller.continuation = continuation ? true : false;
1421 cookie.controller.reason = reason;
1422 cookie.controller.recirc_id = recirc_id;
1423 put_32aligned_be64(&cookie.controller.rule_cookie,
1424 htonll(rule_cookie));
1425 cookie.controller.controller_id = controller_id;
1426 cookie.controller.max_len = max_len;
1427 } else if (ovs_scan(&s[n], ",userdata(%n", &n1)) {
1428 char *end;
1429
1430 n += n1;
1431 end = ofpbuf_put_hex(&buf, &s[n], NULL);
1432 if (end[0] != ')') {
1433 res = -EINVAL;
1434 goto out;
1435 }
1436 user_data = buf.data;
1437 user_data_size = buf.size;
1438 n = (end + 1) - s;
1439 }
1440 }
1441
1442 {
1443 int n1 = -1;
1444 if (ovs_scan(&s[n], ",actions%n", &n1)) {
1445 n += n1;
1446 include_actions = true;
1447 }
1448 }
1449
1450 {
1451 int n1 = -1;
1452 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
1453 &tunnel_out_port, &n1)) {
1454 odp_put_userspace_action(pid, user_data, user_data_size,
1455 tunnel_out_port, include_actions, actions);
1456 res = n + n1;
1457 goto out;
1458 } else if (s[n] == ')') {
1459 odp_put_userspace_action(pid, user_data, user_data_size,
1460 ODPP_NONE, include_actions, actions);
1461 res = n + 1;
1462 goto out;
1463 }
1464 }
1465
1466 {
1467 struct ovs_action_push_eth push;
1468 int eth_type = 0;
1469 int n1 = -1;
1470
1471 if (ovs_scan(&s[n], "push_eth(src="ETH_ADDR_SCAN_FMT","
1472 "dst="ETH_ADDR_SCAN_FMT",type=%i)%n",
1473 ETH_ADDR_SCAN_ARGS(push.addresses.eth_src),
1474 ETH_ADDR_SCAN_ARGS(push.addresses.eth_dst),
1475 &eth_type, &n1)) {
1476
1477 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_ETH,
1478 &push, sizeof push);
1479
1480 res = n + n1;
1481 goto out;
1482 }
1483 }
1484
1485 if (!strncmp(&s[n], "pop_eth", 7)) {
1486 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_ETH);
1487 res = 7;
1488 goto out;
1489 }
1490
1491 res = -EINVAL;
1492 out:
1493 ofpbuf_uninit(&buf);
1494 return res;
1495 }
1496
1497 static int
1498 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
1499 {
1500 struct eth_header *eth;
1501 struct ip_header *ip;
1502 struct ovs_16aligned_ip6_hdr *ip6;
1503 struct udp_header *udp;
1504 struct gre_base_hdr *greh;
1505 struct erspan_base_hdr *ersh;
1506 struct erspan_md2 *md2;
1507 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, udp_csum, sid;
1508 ovs_be32 sip, dip;
1509 uint32_t tnl_type = 0, header_len = 0, ip_len = 0, erspan_idx = 0;
1510 void *l3, *l4;
1511 int n = 0;
1512 uint8_t hwid, dir;
1513 uint32_t teid;
1514 uint8_t gtpu_flags, gtpu_msgtype;
1515
1516 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
1517 return -EINVAL;
1518 }
1519 eth = (struct eth_header *) data->header;
1520 l3 = (struct ip_header *) (eth + 1);
1521 ip = (struct ip_header *) l3;
1522 ip6 = (struct ovs_16aligned_ip6_hdr *) l3;
1523 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
1524 "eth(dst="ETH_ADDR_SCAN_FMT",",
1525 &data->header_len,
1526 &data->tnl_type,
1527 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
1528 return -EINVAL;
1529 }
1530
1531 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
1532 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
1533 return -EINVAL;
1534 }
1535 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
1536 return -EINVAL;
1537 }
1538 eth->eth_type = htons(dl_type);
1539
1540 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1541 /* IPv4 */
1542 uint16_t ip_frag_off;
1543 memset(ip, 0, sizeof(*ip));
1544 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
1545 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
1546 IP_SCAN_ARGS(&sip),
1547 IP_SCAN_ARGS(&dip),
1548 &ip->ip_proto, &ip->ip_tos,
1549 &ip->ip_ttl, &ip_frag_off)) {
1550 return -EINVAL;
1551 }
1552 put_16aligned_be32(&ip->ip_src, sip);
1553 put_16aligned_be32(&ip->ip_dst, dip);
1554 ip->ip_frag_off = htons(ip_frag_off);
1555 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1556 ip_len = sizeof *ip;
1557 ip->ip_csum = csum(ip, ip_len);
1558 } else {
1559 char sip6_s[IPV6_SCAN_LEN + 1];
1560 char dip6_s[IPV6_SCAN_LEN + 1];
1561 struct in6_addr sip6, dip6;
1562 uint8_t tclass;
1563 uint32_t label;
1564 if (!ovs_scan_len(s, &n, "ipv6(src="IPV6_SCAN_FMT",dst="IPV6_SCAN_FMT
1565 ",label=%i,proto=%"SCNi8",tclass=0x%"SCNx8
1566 ",hlimit=%"SCNi8"),",
1567 sip6_s, dip6_s, &label, &ip6->ip6_nxt,
1568 &tclass, &ip6->ip6_hlim)
1569 || (label & ~IPV6_LABEL_MASK) != 0
1570 || inet_pton(AF_INET6, sip6_s, &sip6) != 1
1571 || inet_pton(AF_INET6, dip6_s, &dip6) != 1) {
1572 return -EINVAL;
1573 }
1574 put_16aligned_be32(&ip6->ip6_flow, htonl(6 << 28) |
1575 htonl(tclass << 20) | htonl(label));
1576 memcpy(&ip6->ip6_src, &sip6, sizeof(ip6->ip6_src));
1577 memcpy(&ip6->ip6_dst, &dip6, sizeof(ip6->ip6_dst));
1578 ip_len = sizeof *ip6;
1579 }
1580
1581 /* Tunnel header */
1582 l4 = ((uint8_t *) l3 + ip_len);
1583 udp = (struct udp_header *) l4;
1584 greh = (struct gre_base_hdr *) l4;
1585 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
1586 &udp_src, &udp_dst, &udp_csum)) {
1587 uint32_t vx_flags, vni;
1588
1589 udp->udp_src = htons(udp_src);
1590 udp->udp_dst = htons(udp_dst);
1591 udp->udp_len = 0;
1592 udp->udp_csum = htons(udp_csum);
1593
1594 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
1595 &vx_flags, &vni)) {
1596 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
1597
1598 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
1599 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
1600 tnl_type = OVS_VPORT_TYPE_VXLAN;
1601 header_len = sizeof *eth + ip_len +
1602 sizeof *udp + sizeof *vxh;
1603 } else if (ovs_scan_len(s, &n, "geneve(")) {
1604 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
1605
1606 memset(gnh, 0, sizeof *gnh);
1607 header_len = sizeof *eth + ip_len +
1608 sizeof *udp + sizeof *gnh;
1609
1610 if (ovs_scan_len(s, &n, "oam,")) {
1611 gnh->oam = 1;
1612 }
1613 if (ovs_scan_len(s, &n, "crit,")) {
1614 gnh->critical = 1;
1615 }
1616 if (!ovs_scan_len(s, &n, "vni=%"SCNi32, &vni)) {
1617 return -EINVAL;
1618 }
1619 if (ovs_scan_len(s, &n, ",options(")) {
1620 struct geneve_scan options;
1621 int len;
1622
1623 memset(&options, 0, sizeof options);
1624 len = scan_geneve(s + n, &options, NULL);
1625 if (!len) {
1626 return -EINVAL;
1627 }
1628
1629 memcpy(gnh->options, options.d, options.len);
1630 gnh->opt_len = options.len / 4;
1631 header_len += options.len;
1632
1633 n += len;
1634 }
1635 if (!ovs_scan_len(s, &n, "))")) {
1636 return -EINVAL;
1637 }
1638
1639 gnh->proto_type = htons(ETH_TYPE_TEB);
1640 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
1641 tnl_type = OVS_VPORT_TYPE_GENEVE;
1642 } else {
1643 return -EINVAL;
1644 }
1645 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
1646 &gre_flags, &gre_proto)){
1647
1648 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1649 tnl_type = OVS_VPORT_TYPE_GRE;
1650 } else {
1651 tnl_type = OVS_VPORT_TYPE_IP6GRE;
1652 }
1653 greh->flags = htons(gre_flags);
1654 greh->protocol = htons(gre_proto);
1655 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
1656
1657 if (greh->flags & htons(GRE_CSUM)) {
1658 uint16_t csum;
1659 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
1660 return -EINVAL;
1661 }
1662
1663 memset(options, 0, sizeof *options);
1664 *((ovs_be16 *)options) = htons(csum);
1665 options++;
1666 }
1667 if (greh->flags & htons(GRE_KEY)) {
1668 uint32_t key;
1669
1670 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
1671 return -EINVAL;
1672 }
1673
1674 put_16aligned_be32(options, htonl(key));
1675 options++;
1676 }
1677 if (greh->flags & htons(GRE_SEQ)) {
1678 uint32_t seq;
1679
1680 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
1681 return -EINVAL;
1682 }
1683 put_16aligned_be32(options, htonl(seq));
1684 options++;
1685 }
1686
1687 if (!ovs_scan_len(s, &n, "))")) {
1688 return -EINVAL;
1689 }
1690
1691 header_len = sizeof *eth + ip_len +
1692 ((uint8_t *) options - (uint8_t *) greh);
1693 } else if (ovs_scan_len(s, &n, "erspan(ver=1,sid="SCNx16",idx=0x"SCNx32")",
1694 &sid, &erspan_idx)) {
1695 ersh = ERSPAN_HDR(greh);
1696 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
1697 ersh + 1);
1698
1699 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1700 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1701 } else {
1702 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1703 }
1704
1705 greh->flags = htons(GRE_SEQ);
1706 greh->protocol = htons(ETH_TYPE_ERSPAN1);
1707
1708 ersh->ver = 1;
1709 set_sid(ersh, sid);
1710 put_16aligned_be32(index, htonl(erspan_idx));
1711
1712 if (!ovs_scan_len(s, &n, ")")) {
1713 return -EINVAL;
1714 }
1715 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1716 sizeof *ersh + ERSPAN_V1_MDSIZE;
1717
1718 } else if (ovs_scan_len(s, &n, "erspan(ver=2,sid="SCNx16"dir="SCNu8
1719 ",hwid=0x"SCNx8")", &sid, &dir, &hwid)) {
1720
1721 ersh = ERSPAN_HDR(greh);
1722 md2 = ALIGNED_CAST(struct erspan_md2 *, ersh + 1);
1723
1724 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1725 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1726 } else {
1727 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1728 }
1729
1730 greh->flags = htons(GRE_SEQ);
1731 greh->protocol = htons(ETH_TYPE_ERSPAN2);
1732
1733 ersh->ver = 2;
1734 set_sid(ersh, sid);
1735 set_hwid(md2, hwid);
1736 md2->dir = dir;
1737
1738 if (!ovs_scan_len(s, &n, ")")) {
1739 return -EINVAL;
1740 }
1741
1742 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1743 sizeof *ersh + ERSPAN_V2_MDSIZE;
1744
1745 } else if (ovs_scan_len(s, &n, "gtpu(flags=%"SCNi8",msgtype=%"
1746 SCNu8",teid=0x%"SCNx32"))",
1747 &gtpu_flags, &gtpu_msgtype, &teid)) {
1748 struct gtpuhdr *gtph = (struct gtpuhdr *) (udp + 1);
1749
1750 gtph->md.flags = gtpu_flags;
1751 gtph->md.msgtype = gtpu_msgtype;
1752 put_16aligned_be32(&gtph->teid, htonl(teid));
1753 tnl_type = OVS_VPORT_TYPE_GTPU;
1754 header_len = sizeof *eth + ip_len +
1755 sizeof *udp + sizeof *gtph;
1756 } else {
1757 return -EINVAL;
1758 }
1759
1760 /* check tunnel meta data. */
1761 if (data->tnl_type != tnl_type) {
1762 return -EINVAL;
1763 }
1764 if (data->header_len != header_len) {
1765 return -EINVAL;
1766 }
1767
1768 /* Out port */
1769 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
1770 return -EINVAL;
1771 }
1772
1773 return n;
1774 }
1775
1776 struct ct_nat_params {
1777 bool snat;
1778 bool dnat;
1779 size_t addr_len;
1780 union {
1781 ovs_be32 ip;
1782 struct in6_addr ip6;
1783 } addr_min;
1784 union {
1785 ovs_be32 ip;
1786 struct in6_addr ip6;
1787 } addr_max;
1788 uint16_t proto_min;
1789 uint16_t proto_max;
1790 bool persistent;
1791 bool proto_hash;
1792 bool proto_random;
1793 };
1794
1795 static int
1796 scan_ct_nat_range(const char *s, int *n, struct ct_nat_params *p)
1797 {
1798 if (ovs_scan_len(s, n, "=")) {
1799 char ipv6_s[IPV6_SCAN_LEN + 1];
1800 struct in6_addr ipv6;
1801
1802 if (ovs_scan_len(s, n, IP_SCAN_FMT, IP_SCAN_ARGS(&p->addr_min.ip))) {
1803 p->addr_len = sizeof p->addr_min.ip;
1804 if (ovs_scan_len(s, n, "-")) {
1805 if (!ovs_scan_len(s, n, IP_SCAN_FMT,
1806 IP_SCAN_ARGS(&p->addr_max.ip))) {
1807 return -EINVAL;
1808 }
1809 }
1810 } else if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1811 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1812 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1813 p->addr_len = sizeof p->addr_min.ip6;
1814 p->addr_min.ip6 = ipv6;
1815 if (ovs_scan_len(s, n, "-")) {
1816 if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1817 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1818 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1819 p->addr_max.ip6 = ipv6;
1820 } else {
1821 return -EINVAL;
1822 }
1823 }
1824 } else {
1825 return -EINVAL;
1826 }
1827 if (ovs_scan_len(s, n, ":%"SCNu16, &p->proto_min)) {
1828 if (ovs_scan_len(s, n, "-")) {
1829 if (!ovs_scan_len(s, n, "%"SCNu16, &p->proto_max)) {
1830 return -EINVAL;
1831 }
1832 }
1833 }
1834 }
1835 return 0;
1836 }
1837
1838 static int
1839 scan_ct_nat(const char *s, struct ct_nat_params *p)
1840 {
1841 int n = 0;
1842
1843 if (ovs_scan_len(s, &n, "nat")) {
1844 memset(p, 0, sizeof *p);
1845
1846 if (ovs_scan_len(s, &n, "(")) {
1847 char *end;
1848 int end_n;
1849
1850 end = strchr(s + n, ')');
1851 if (!end) {
1852 return -EINVAL;
1853 }
1854 end_n = end - s;
1855
1856 while (n < end_n) {
1857 n += strspn(s + n, delimiters);
1858 if (ovs_scan_len(s, &n, "src")) {
1859 int err = scan_ct_nat_range(s, &n, p);
1860 if (err) {
1861 return err;
1862 }
1863 p->snat = true;
1864 continue;
1865 }
1866 if (ovs_scan_len(s, &n, "dst")) {
1867 int err = scan_ct_nat_range(s, &n, p);
1868 if (err) {
1869 return err;
1870 }
1871 p->dnat = true;
1872 continue;
1873 }
1874 if (ovs_scan_len(s, &n, "persistent")) {
1875 p->persistent = true;
1876 continue;
1877 }
1878 if (ovs_scan_len(s, &n, "hash")) {
1879 p->proto_hash = true;
1880 continue;
1881 }
1882 if (ovs_scan_len(s, &n, "random")) {
1883 p->proto_random = true;
1884 continue;
1885 }
1886 return -EINVAL;
1887 }
1888
1889 if (p->snat && p->dnat) {
1890 return -EINVAL;
1891 }
1892 if ((p->addr_len != 0 &&
1893 memcmp(&p->addr_max, &in6addr_any, p->addr_len) &&
1894 memcmp(&p->addr_max, &p->addr_min, p->addr_len) < 0) ||
1895 (p->proto_max && p->proto_max < p->proto_min)) {
1896 return -EINVAL;
1897 }
1898 if (p->proto_hash && p->proto_random) {
1899 return -EINVAL;
1900 }
1901 n++;
1902 }
1903 }
1904 return n;
1905 }
1906
1907 static void
1908 nl_msg_put_ct_nat(struct ct_nat_params *p, struct ofpbuf *actions)
1909 {
1910 size_t start = nl_msg_start_nested(actions, OVS_CT_ATTR_NAT);
1911
1912 if (p->snat) {
1913 nl_msg_put_flag(actions, OVS_NAT_ATTR_SRC);
1914 } else if (p->dnat) {
1915 nl_msg_put_flag(actions, OVS_NAT_ATTR_DST);
1916 } else {
1917 goto out;
1918 }
1919 if (p->addr_len != 0) {
1920 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MIN, &p->addr_min,
1921 p->addr_len);
1922 if (memcmp(&p->addr_max, &p->addr_min, p->addr_len) > 0) {
1923 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MAX, &p->addr_max,
1924 p->addr_len);
1925 }
1926 if (p->proto_min) {
1927 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MIN, p->proto_min);
1928 if (p->proto_max && p->proto_max > p->proto_min) {
1929 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MAX, p->proto_max);
1930 }
1931 }
1932 if (p->persistent) {
1933 nl_msg_put_flag(actions, OVS_NAT_ATTR_PERSISTENT);
1934 }
1935 if (p->proto_hash) {
1936 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_HASH);
1937 }
1938 if (p->proto_random) {
1939 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_RANDOM);
1940 }
1941 }
1942 out:
1943 nl_msg_end_nested(actions, start);
1944 }
1945
1946 static int
1947 parse_conntrack_action(const char *s_, struct ofpbuf *actions)
1948 {
1949 const char *s = s_;
1950
1951 if (ovs_scan(s, "ct")) {
1952 const char *helper = NULL, *timeout = NULL;
1953 size_t helper_len = 0, timeout_len = 0;
1954 bool commit = false;
1955 bool force_commit = false;
1956 uint16_t zone = 0;
1957 struct {
1958 uint32_t value;
1959 uint32_t mask;
1960 } ct_mark = { 0, 0 };
1961 struct {
1962 ovs_u128 value;
1963 ovs_u128 mask;
1964 } ct_label;
1965 struct ct_nat_params nat_params;
1966 bool have_nat = false;
1967 size_t start;
1968 char *end;
1969
1970 memset(&ct_label, 0, sizeof(ct_label));
1971
1972 s += 2;
1973 if (ovs_scan(s, "(")) {
1974 s++;
1975 find_end:
1976 end = strchr(s, ')');
1977 if (!end) {
1978 return -EINVAL;
1979 }
1980
1981 while (s != end) {
1982 int n;
1983
1984 s += strspn(s, delimiters);
1985 if (ovs_scan(s, "commit%n", &n)) {
1986 commit = true;
1987 s += n;
1988 continue;
1989 }
1990 if (ovs_scan(s, "force_commit%n", &n)) {
1991 force_commit = true;
1992 s += n;
1993 continue;
1994 }
1995 if (ovs_scan(s, "zone=%"SCNu16"%n", &zone, &n)) {
1996 s += n;
1997 continue;
1998 }
1999 if (ovs_scan(s, "mark=%"SCNx32"%n", &ct_mark.value, &n)) {
2000 s += n;
2001 n = -1;
2002 if (ovs_scan(s, "/%"SCNx32"%n", &ct_mark.mask, &n)) {
2003 s += n;
2004 } else {
2005 ct_mark.mask = UINT32_MAX;
2006 }
2007 continue;
2008 }
2009 if (ovs_scan(s, "label=%n", &n)) {
2010 int retval;
2011
2012 s += n;
2013 retval = scan_u128(s, &ct_label.value, &ct_label.mask);
2014 if (retval == 0) {
2015 return -EINVAL;
2016 }
2017 s += retval;
2018 continue;
2019 }
2020 if (ovs_scan(s, "helper=%n", &n)) {
2021 s += n;
2022 helper_len = strcspn(s, delimiters_end);
2023 if (!helper_len || helper_len > 15) {
2024 return -EINVAL;
2025 }
2026 helper = s;
2027 s += helper_len;
2028 continue;
2029 }
2030 if (ovs_scan(s, "timeout=%n", &n)) {
2031 s += n;
2032 timeout_len = strcspn(s, delimiters_end);
2033 if (!timeout_len || timeout_len > 31) {
2034 return -EINVAL;
2035 }
2036 timeout = s;
2037 s += timeout_len;
2038 continue;
2039 }
2040
2041 n = scan_ct_nat(s, &nat_params);
2042 if (n > 0) {
2043 s += n;
2044 have_nat = true;
2045
2046 /* end points to the end of the nested, nat action.
2047 * find the real end. */
2048 goto find_end;
2049 }
2050 /* Nothing matched. */
2051 return -EINVAL;
2052 }
2053 s++;
2054 }
2055 if (commit && force_commit) {
2056 return -EINVAL;
2057 }
2058
2059 start = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CT);
2060 if (commit) {
2061 nl_msg_put_flag(actions, OVS_CT_ATTR_COMMIT);
2062 } else if (force_commit) {
2063 nl_msg_put_flag(actions, OVS_CT_ATTR_FORCE_COMMIT);
2064 }
2065 if (zone) {
2066 nl_msg_put_u16(actions, OVS_CT_ATTR_ZONE, zone);
2067 }
2068 if (ct_mark.mask) {
2069 nl_msg_put_unspec(actions, OVS_CT_ATTR_MARK, &ct_mark,
2070 sizeof(ct_mark));
2071 }
2072 if (!ovs_u128_is_zero(ct_label.mask)) {
2073 nl_msg_put_unspec(actions, OVS_CT_ATTR_LABELS, &ct_label,
2074 sizeof ct_label);
2075 }
2076 if (helper) {
2077 nl_msg_put_string__(actions, OVS_CT_ATTR_HELPER, helper,
2078 helper_len);
2079 }
2080 if (timeout) {
2081 nl_msg_put_string__(actions, OVS_CT_ATTR_TIMEOUT, timeout,
2082 timeout_len);
2083 }
2084 if (have_nat) {
2085 nl_msg_put_ct_nat(&nat_params, actions);
2086 }
2087 nl_msg_end_nested(actions, start);
2088 }
2089
2090 return s - s_;
2091 }
2092
2093 static void
2094 nsh_key_to_attr(struct ofpbuf *buf, const struct ovs_key_nsh *nsh,
2095 uint8_t * metadata, size_t md_size,
2096 bool is_mask)
2097 {
2098 size_t nsh_key_ofs;
2099 struct ovs_nsh_key_base base;
2100
2101 base.flags = nsh->flags;
2102 base.ttl = nsh->ttl;
2103 base.mdtype = nsh->mdtype;
2104 base.np = nsh->np;
2105 base.path_hdr = nsh->path_hdr;
2106
2107 nsh_key_ofs = nl_msg_start_nested(buf, OVS_KEY_ATTR_NSH);
2108 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_BASE, &base, sizeof base);
2109
2110 if (is_mask) {
2111 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2112 sizeof nsh->context);
2113 } else {
2114 switch (nsh->mdtype) {
2115 case NSH_M_TYPE1:
2116 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2117 sizeof nsh->context);
2118 break;
2119 case NSH_M_TYPE2:
2120 if (metadata && md_size > 0) {
2121 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD2, metadata,
2122 md_size);
2123 }
2124 break;
2125 default:
2126 /* No match support for other MD formats yet. */
2127 break;
2128 }
2129 }
2130 nl_msg_end_nested(buf, nsh_key_ofs);
2131 }
2132
2133
2134 static int
2135 parse_odp_push_nsh_action(const char *s, struct ofpbuf *actions)
2136 {
2137 int n = 0;
2138 int ret = 0;
2139 uint32_t spi = 0;
2140 uint8_t si = 255;
2141 uint32_t cd;
2142 struct ovs_key_nsh nsh;
2143 uint8_t metadata[NSH_CTX_HDRS_MAX_LEN];
2144 uint8_t md_size = 0;
2145
2146 if (!ovs_scan_len(s, &n, "push_nsh(")) {
2147 ret = -EINVAL;
2148 goto out;
2149 }
2150
2151 /* The default is NSH_M_TYPE1 */
2152 nsh.flags = 0;
2153 nsh.ttl = 63;
2154 nsh.mdtype = NSH_M_TYPE1;
2155 nsh.np = NSH_P_ETHERNET;
2156 nsh.path_hdr = nsh_spi_si_to_path_hdr(0, 255);
2157 memset(nsh.context, 0, NSH_M_TYPE1_MDLEN);
2158
2159 for (;;) {
2160 n += strspn(s + n, delimiters);
2161 if (s[n] == ')') {
2162 break;
2163 }
2164
2165 if (ovs_scan_len(s, &n, "flags=%"SCNi8, &nsh.flags)) {
2166 continue;
2167 }
2168 if (ovs_scan_len(s, &n, "ttl=%"SCNi8, &nsh.ttl)) {
2169 continue;
2170 }
2171 if (ovs_scan_len(s, &n, "mdtype=%"SCNi8, &nsh.mdtype)) {
2172 switch (nsh.mdtype) {
2173 case NSH_M_TYPE1:
2174 /* This is the default format. */;
2175 break;
2176 case NSH_M_TYPE2:
2177 /* Length will be updated later. */
2178 md_size = 0;
2179 break;
2180 default:
2181 ret = -EINVAL;
2182 goto out;
2183 }
2184 continue;
2185 }
2186 if (ovs_scan_len(s, &n, "np=%"SCNi8, &nsh.np)) {
2187 continue;
2188 }
2189 if (ovs_scan_len(s, &n, "spi=0x%"SCNx32, &spi)) {
2190 continue;
2191 }
2192 if (ovs_scan_len(s, &n, "si=%"SCNi8, &si)) {
2193 continue;
2194 }
2195 if (nsh.mdtype == NSH_M_TYPE1) {
2196 if (ovs_scan_len(s, &n, "c1=0x%"SCNx32, &cd)) {
2197 nsh.context[0] = htonl(cd);
2198 continue;
2199 }
2200 if (ovs_scan_len(s, &n, "c2=0x%"SCNx32, &cd)) {
2201 nsh.context[1] = htonl(cd);
2202 continue;
2203 }
2204 if (ovs_scan_len(s, &n, "c3=0x%"SCNx32, &cd)) {
2205 nsh.context[2] = htonl(cd);
2206 continue;
2207 }
2208 if (ovs_scan_len(s, &n, "c4=0x%"SCNx32, &cd)) {
2209 nsh.context[3] = htonl(cd);
2210 continue;
2211 }
2212 }
2213 else if (nsh.mdtype == NSH_M_TYPE2) {
2214 struct ofpbuf b;
2215 char buf[512];
2216 size_t mdlen, padding;
2217 if (ovs_scan_len(s, &n, "md2=0x%511[0-9a-fA-F]", buf)
2218 && n/2 <= sizeof metadata) {
2219 ofpbuf_use_stub(&b, metadata, sizeof metadata);
2220 ofpbuf_put_hex(&b, buf, &mdlen);
2221 /* Pad metadata to 4 bytes. */
2222 padding = PAD_SIZE(mdlen, 4);
2223 if (padding > 0) {
2224 ofpbuf_put_zeros(&b, padding);
2225 }
2226 md_size = mdlen + padding;
2227 ofpbuf_uninit(&b);
2228 continue;
2229 }
2230 }
2231
2232 ret = -EINVAL;
2233 goto out;
2234 }
2235 out:
2236 if (ret >= 0) {
2237 nsh.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
2238 size_t offset = nl_msg_start_nested(actions, OVS_ACTION_ATTR_PUSH_NSH);
2239 nsh_key_to_attr(actions, &nsh, metadata, md_size, false);
2240 nl_msg_end_nested(actions, offset);
2241 ret = n;
2242 }
2243 return ret;
2244 }
2245
2246 static int
2247 parse_action_list(struct parse_odp_context *context, const char *s,
2248 struct ofpbuf *actions)
2249 {
2250 int n = 0;
2251
2252 for (;;) {
2253 int retval;
2254
2255 n += strspn(s + n, delimiters);
2256 if (s[n] == ')') {
2257 break;
2258 }
2259 retval = parse_odp_action(context, s + n, actions);
2260 if (retval < 0) {
2261 return retval;
2262 }
2263 n += retval;
2264 }
2265
2266 if (actions->size > UINT16_MAX) {
2267 return -EFBIG;
2268 }
2269
2270 return n;
2271 }
2272
2273
2274 static int
2275 parse_odp_action(struct parse_odp_context *context, const char *s,
2276 struct ofpbuf *actions)
2277 {
2278 int retval;
2279
2280 context->depth++;
2281
2282 if (context->depth == MAX_ODP_NESTED) {
2283 retval = -EINVAL;
2284 } else {
2285 retval = parse_odp_action__(context, s, actions);
2286 }
2287
2288 context->depth--;
2289
2290 return retval;
2291 }
2292
2293
2294 static int
2295 parse_odp_action__(struct parse_odp_context *context, const char *s,
2296 struct ofpbuf *actions)
2297 {
2298 {
2299 uint32_t port;
2300 int n;
2301
2302 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
2303 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
2304 return n;
2305 }
2306 }
2307
2308 {
2309 uint32_t max_len;
2310 int n;
2311
2312 if (ovs_scan(s, "trunc(%"SCNi32")%n", &max_len, &n)) {
2313 struct ovs_action_trunc *trunc;
2314
2315 trunc = nl_msg_put_unspec_uninit(actions,
2316 OVS_ACTION_ATTR_TRUNC, sizeof *trunc);
2317 trunc->max_len = max_len;
2318 return n;
2319 }
2320 }
2321
2322 if (context->port_names) {
2323 int len = strcspn(s, delimiters);
2324 struct simap_node *node;
2325
2326 node = simap_find_len(context->port_names, s, len);
2327 if (node) {
2328 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
2329 return len;
2330 }
2331 }
2332
2333 {
2334 uint32_t recirc_id;
2335 int n = -1;
2336
2337 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
2338 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
2339 return n;
2340 }
2341 }
2342
2343 if (!strncmp(s, "userspace(", 10)) {
2344 return parse_odp_userspace_action(s, actions);
2345 }
2346
2347 if (!strncmp(s, "set(", 4)) {
2348 size_t start_ofs;
2349 int retval;
2350 struct nlattr mask[1024 / sizeof(struct nlattr)];
2351 struct ofpbuf maskbuf = OFPBUF_STUB_INITIALIZER(mask);
2352 struct nlattr *nested, *key;
2353 size_t size;
2354
2355 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
2356 retval = parse_odp_key_mask_attr(context, s + 4, actions, &maskbuf);
2357 if (retval < 0) {
2358 ofpbuf_uninit(&maskbuf);
2359 return retval;
2360 }
2361 if (s[retval + 4] != ')') {
2362 ofpbuf_uninit(&maskbuf);
2363 return -EINVAL;
2364 }
2365
2366 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2367 key = nested + 1;
2368
2369 size = nl_attr_get_size(mask);
2370 if (size == nl_attr_get_size(key)) {
2371 /* Change to masked set action if not fully masked. */
2372 if (!is_all_ones(mask + 1, size)) {
2373 /* Remove padding of eariler key payload */
2374 actions->size -= NLA_ALIGN(key->nla_len) - key->nla_len;
2375
2376 /* Put mask payload right after key payload */
2377 key->nla_len += size;
2378 ofpbuf_put(actions, mask + 1, size);
2379
2380 /* 'actions' may have been reallocated by ofpbuf_put(). */
2381 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2382 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
2383
2384 key = nested + 1;
2385 /* Add new padding as needed */
2386 ofpbuf_put_zeros(actions, NLA_ALIGN(key->nla_len) -
2387 key->nla_len);
2388 }
2389 }
2390 ofpbuf_uninit(&maskbuf);
2391
2392 nl_msg_end_nested(actions, start_ofs);
2393 return retval + 5;
2394 }
2395
2396 {
2397 struct ovs_action_push_vlan push;
2398 int tpid = ETH_TYPE_VLAN;
2399 int vid, pcp;
2400 int cfi = 1;
2401 int n = -1;
2402
2403 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
2404 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
2405 &vid, &pcp, &cfi, &n)
2406 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
2407 &tpid, &vid, &pcp, &n)
2408 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
2409 &tpid, &vid, &pcp, &cfi, &n)) {
2410 if ((vid & ~(VLAN_VID_MASK >> VLAN_VID_SHIFT)) != 0
2411 || (pcp & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) != 0) {
2412 return -EINVAL;
2413 }
2414 push.vlan_tpid = htons(tpid);
2415 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
2416 | (pcp << VLAN_PCP_SHIFT)
2417 | (cfi ? VLAN_CFI : 0));
2418 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
2419 &push, sizeof push);
2420
2421 return n;
2422 }
2423 }
2424
2425 if (!strncmp(s, "pop_vlan", 8)) {
2426 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
2427 return 8;
2428 }
2429
2430 {
2431 unsigned long long int meter_id;
2432 int n = -1;
2433
2434 if (sscanf(s, "meter(%lli)%n", &meter_id, &n) > 0 && n > 0) {
2435 nl_msg_put_u32(actions, OVS_ACTION_ATTR_METER, meter_id);
2436 return n;
2437 }
2438 }
2439
2440 {
2441 double percentage;
2442 int n = -1;
2443
2444 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
2445 && percentage >= 0. && percentage <= 100.0) {
2446 size_t sample_ofs, actions_ofs;
2447 double probability;
2448
2449 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
2450 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
2451 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
2452 (probability <= 0 ? 0
2453 : probability >= UINT32_MAX ? UINT32_MAX
2454 : probability));
2455
2456 actions_ofs = nl_msg_start_nested(actions,
2457 OVS_SAMPLE_ATTR_ACTIONS);
2458 int retval = parse_action_list(context, s + n, actions);
2459 if (retval < 0) {
2460 return retval;
2461 }
2462
2463
2464 n += retval;
2465 nl_msg_end_nested(actions, actions_ofs);
2466 nl_msg_end_nested(actions, sample_ofs);
2467
2468 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2469 }
2470 }
2471
2472 {
2473 if (!strncmp(s, "clone(", 6)) {
2474 size_t actions_ofs;
2475 int n = 6;
2476
2477 actions_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CLONE);
2478 int retval = parse_action_list(context, s + n, actions);
2479 if (retval < 0) {
2480 return retval;
2481 }
2482 n += retval;
2483 nl_msg_end_nested(actions, actions_ofs);
2484 return n + 1;
2485 }
2486 }
2487
2488 {
2489 if (!strncmp(s, "push_nsh(", 9)) {
2490 int retval = parse_odp_push_nsh_action(s, actions);
2491 if (retval < 0) {
2492 return retval;
2493 }
2494 return retval + 1;
2495 }
2496 }
2497
2498 {
2499 int n;
2500 if (ovs_scan(s, "pop_nsh()%n", &n)) {
2501 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_NSH);
2502 return n;
2503 }
2504 }
2505
2506 {
2507 uint32_t port;
2508 int n;
2509
2510 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
2511 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
2512 return n;
2513 }
2514 }
2515
2516 {
2517 if (!strncmp(s, "ct_clear", 8)) {
2518 nl_msg_put_flag(actions, OVS_ACTION_ATTR_CT_CLEAR);
2519 return 8;
2520 }
2521 }
2522
2523 {
2524 uint16_t pkt_len;
2525 int n = -1;
2526 if (ovs_scan(s, "check_pkt_len(size=%"SCNi16",gt(%n", &pkt_len, &n)) {
2527 size_t cpl_ofs, actions_ofs;
2528 cpl_ofs = nl_msg_start_nested(actions,
2529 OVS_ACTION_ATTR_CHECK_PKT_LEN);
2530 nl_msg_put_u16(actions, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, pkt_len);
2531 actions_ofs = nl_msg_start_nested(
2532 actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
2533
2534 int retval;
2535 if (!strncasecmp(s + n, "drop", 4)) {
2536 n += 4;
2537 } else {
2538 retval = parse_action_list(context, s + n, actions);
2539 if (retval < 0) {
2540 return retval;
2541 }
2542
2543 n += retval;
2544 }
2545 nl_msg_end_nested(actions, actions_ofs);
2546 retval = -1;
2547 if (!ovs_scan(s + n, "),le(%n", &retval)) {
2548 return -EINVAL;
2549 }
2550 n += retval;
2551
2552 actions_ofs = nl_msg_start_nested(
2553 actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
2554 if (!strncasecmp(s + n, "drop", 4)) {
2555 n += 4;
2556 } else {
2557 retval = parse_action_list(context, s + n, actions);
2558 if (retval < 0) {
2559 return retval;
2560 }
2561 n += retval;
2562 }
2563 nl_msg_end_nested(actions, actions_ofs);
2564 nl_msg_end_nested(actions, cpl_ofs);
2565 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2566 }
2567 }
2568
2569 {
2570 int retval;
2571
2572 retval = parse_conntrack_action(s, actions);
2573 if (retval) {
2574 return retval;
2575 }
2576 }
2577
2578 {
2579 struct ovs_action_push_tnl data;
2580 int n;
2581
2582 n = ovs_parse_tnl_push(s, &data);
2583 if (n > 0) {
2584 odp_put_tnl_push_action(actions, &data);
2585 return n;
2586 } else if (n < 0) {
2587 return n;
2588 }
2589 }
2590
2591 return -EINVAL;
2592 }
2593
2594 /* Parses the string representation of datapath actions, in the format output
2595 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
2596 * value. On success, the ODP actions are appended to 'actions' as a series of
2597 * Netlink attributes. On failure, no data is appended to 'actions'. Either
2598 * way, 'actions''s data might be reallocated. */
2599 int
2600 odp_actions_from_string(const char *s, const struct simap *port_names,
2601 struct ofpbuf *actions)
2602 {
2603 size_t old_size;
2604
2605 if (!strcasecmp(s, "drop")) {
2606 nl_msg_put_u32(actions, OVS_ACTION_ATTR_DROP, XLATE_OK);
2607 return 0;
2608 }
2609
2610 struct parse_odp_context context = (struct parse_odp_context) {
2611 .port_names = port_names,
2612 };
2613
2614 old_size = actions->size;
2615 for (;;) {
2616 int retval;
2617
2618 s += strspn(s, delimiters);
2619 if (!*s) {
2620 return 0;
2621 }
2622
2623 retval = parse_odp_action(&context, s, actions);
2624
2625 if (retval < 0 || !strchr(delimiters, s[retval])) {
2626 actions->size = old_size;
2627 return -retval;
2628 }
2629 s += retval;
2630 }
2631
2632 return 0;
2633 }
2634 \f
2635 static const struct attr_len_tbl ovs_vxlan_ext_attr_lens[OVS_VXLAN_EXT_MAX + 1] = {
2636 [OVS_VXLAN_EXT_GBP] = { .len = 4 },
2637 };
2638
2639 static const struct attr_len_tbl ovs_tun_key_attr_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
2640 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = 8 },
2641 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = 4 },
2642 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = 4 },
2643 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
2644 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
2645 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
2646 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
2647 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = 2 },
2648 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = 2 },
2649 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
2650 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = ATTR_LEN_VARIABLE },
2651 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
2652 .next = ovs_vxlan_ext_attr_lens ,
2653 .next_max = OVS_VXLAN_EXT_MAX},
2654 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = 16 },
2655 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = 16 },
2656 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = ATTR_LEN_VARIABLE },
2657 [OVS_TUNNEL_KEY_ATTR_GTPU_OPTS] = { .len = ATTR_LEN_VARIABLE },
2658 };
2659
2660 const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
2661 [OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
2662 [OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
2663 [OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
2664 [OVS_KEY_ATTR_DP_HASH] = { .len = 4 },
2665 [OVS_KEY_ATTR_RECIRC_ID] = { .len = 4 },
2666 [OVS_KEY_ATTR_TUNNEL] = { .len = ATTR_LEN_NESTED,
2667 .next = ovs_tun_key_attr_lens,
2668 .next_max = OVS_TUNNEL_KEY_ATTR_MAX },
2669 [OVS_KEY_ATTR_IN_PORT] = { .len = 4 },
2670 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
2671 [OVS_KEY_ATTR_VLAN] = { .len = 2 },
2672 [OVS_KEY_ATTR_ETHERTYPE] = { .len = 2 },
2673 [OVS_KEY_ATTR_MPLS] = { .len = ATTR_LEN_VARIABLE },
2674 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
2675 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
2676 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
2677 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = 2 },
2678 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
2679 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
2680 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
2681 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
2682 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
2683 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
2684 [OVS_KEY_ATTR_ND_EXTENSIONS] = { .len = sizeof(struct ovs_key_nd_extensions) },
2685 [OVS_KEY_ATTR_CT_STATE] = { .len = 4 },
2686 [OVS_KEY_ATTR_CT_ZONE] = { .len = 2 },
2687 [OVS_KEY_ATTR_CT_MARK] = { .len = 4 },
2688 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
2689 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
2690 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
2691 [OVS_KEY_ATTR_PACKET_TYPE] = { .len = 4 },
2692 [OVS_KEY_ATTR_NSH] = { .len = ATTR_LEN_NESTED,
2693 .next = ovs_nsh_key_attr_lens,
2694 .next_max = OVS_NSH_KEY_ATTR_MAX },
2695 };
2696
2697 /* Returns the correct length of the payload for a flow key attribute of the
2698 * specified 'type', ATTR_LEN_INVALID if 'type' is unknown, ATTR_LEN_VARIABLE
2699 * if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
2700 * payload is a nested type. */
2701 static int
2702 odp_key_attr_len(const struct attr_len_tbl tbl[], int max_type, uint16_t type)
2703 {
2704 if (type > max_type) {
2705 return ATTR_LEN_INVALID;
2706 }
2707
2708 return tbl[type].len;
2709 }
2710
2711 static void
2712 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
2713 {
2714 size_t len = nl_attr_get_size(a);
2715 if (len) {
2716 const uint8_t *unspec;
2717 unsigned int i;
2718
2719 unspec = nl_attr_get(a);
2720 for (i = 0; i < len; i++) {
2721 if (i) {
2722 ds_put_char(ds, ' ');
2723 }
2724 ds_put_format(ds, "%02x", unspec[i]);
2725 }
2726 }
2727 }
2728
2729 static const char *
2730 ovs_frag_type_to_string(enum ovs_frag_type type)
2731 {
2732 switch (type) {
2733 case OVS_FRAG_TYPE_NONE:
2734 return "no";
2735 case OVS_FRAG_TYPE_FIRST:
2736 return "first";
2737 case OVS_FRAG_TYPE_LATER:
2738 return "later";
2739 case __OVS_FRAG_TYPE_MAX:
2740 default:
2741 return "<error>";
2742 }
2743 }
2744
2745 enum odp_key_fitness
2746 odp_nsh_hdr_from_attr(const struct nlattr *attr,
2747 struct nsh_hdr *nsh_hdr, size_t size)
2748 {
2749 unsigned int left;
2750 const struct nlattr *a;
2751 bool unknown = false;
2752 uint8_t flags = 0;
2753 uint8_t ttl = 63;
2754 size_t mdlen = 0;
2755 bool has_md1 = false;
2756 bool has_md2 = false;
2757
2758 memset(nsh_hdr, 0, size);
2759
2760 NL_NESTED_FOR_EACH (a, left, attr) {
2761 uint16_t type = nl_attr_type(a);
2762 size_t len = nl_attr_get_size(a);
2763 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2764 OVS_NSH_KEY_ATTR_MAX, type);
2765
2766 if (len != expected_len && expected_len >= 0) {
2767 return ODP_FIT_ERROR;
2768 }
2769
2770 switch (type) {
2771 case OVS_NSH_KEY_ATTR_BASE: {
2772 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2773 nsh_hdr->next_proto = base->np;
2774 nsh_hdr->md_type = base->mdtype;
2775 put_16aligned_be32(&nsh_hdr->path_hdr, base->path_hdr);
2776 flags = base->flags;
2777 ttl = base->ttl;
2778 break;
2779 }
2780 case OVS_NSH_KEY_ATTR_MD1: {
2781 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2782 struct nsh_md1_ctx *md1_dst = &nsh_hdr->md1;
2783 has_md1 = true;
2784 mdlen = nl_attr_get_size(a);
2785 if ((mdlen + NSH_BASE_HDR_LEN != NSH_M_TYPE1_LEN) ||
2786 (mdlen + NSH_BASE_HDR_LEN > size)) {
2787 return ODP_FIT_ERROR;
2788 }
2789 memcpy(md1_dst, md1, mdlen);
2790 break;
2791 }
2792 case OVS_NSH_KEY_ATTR_MD2: {
2793 struct nsh_md2_tlv *md2_dst = &nsh_hdr->md2;
2794 const uint8_t *md2 = nl_attr_get(a);
2795 has_md2 = true;
2796 mdlen = nl_attr_get_size(a);
2797 if (mdlen + NSH_BASE_HDR_LEN > size) {
2798 return ODP_FIT_ERROR;
2799 }
2800 memcpy(md2_dst, md2, mdlen);
2801 break;
2802 }
2803 default:
2804 /* Allow this to show up as unexpected, if there are unknown
2805 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2806 unknown = true;
2807 break;
2808 }
2809 }
2810
2811 if (unknown) {
2812 return ODP_FIT_TOO_MUCH;
2813 }
2814
2815 if ((has_md1 && nsh_hdr->md_type != NSH_M_TYPE1)
2816 || (has_md2 && nsh_hdr->md_type != NSH_M_TYPE2)) {
2817 return ODP_FIT_ERROR;
2818 }
2819
2820 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
2821 nsh_set_flags_ttl_len(nsh_hdr, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
2822
2823 return ODP_FIT_PERFECT;
2824 }
2825
2826 /* Reports the error 'msg', which is formatted as with printf().
2827 *
2828 * If 'errorp' is nonnull, then some the wants the error report to come
2829 * directly back to it, so the function stores the error message into '*errorp'
2830 * (after first freeing it in case there's something there already).
2831 *
2832 * Otherwise, logs the message at WARN level, rate-limited. */
2833 static void OVS_PRINTF_FORMAT(3, 4)
2834 odp_parse_error(struct vlog_rate_limit *rl, char **errorp,
2835 const char *msg, ...)
2836 {
2837 if (OVS_UNLIKELY(errorp)) {
2838 free(*errorp);
2839
2840 va_list args;
2841 va_start(args, msg);
2842 *errorp = xvasprintf(msg, args);
2843 va_end(args);
2844 } else if (!VLOG_DROP_WARN(rl)) {
2845 va_list args;
2846 va_start(args, msg);
2847 char *error = xvasprintf(msg, args);
2848 va_end(args);
2849
2850 VLOG_WARN("%s", error);
2851
2852 free(error);
2853 }
2854 }
2855
2856 /* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
2857 * returns fitness. If the attribute is a key, 'is_mask' should be false;
2858 * if it is a mask, 'is_mask' should be true. If 'errorp' is nonnull and the
2859 * function returns ODP_FIT_ERROR, stores a malloc()'d error message in
2860 * '*errorp'. */
2861 static enum odp_key_fitness
2862 odp_nsh_key_from_attr__(const struct nlattr *attr, bool is_mask,
2863 struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask,
2864 char **errorp)
2865 {
2866 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2867 if (errorp) {
2868 *errorp = NULL;
2869 }
2870
2871 unsigned int left;
2872 const struct nlattr *a;
2873 bool unknown = false;
2874 bool has_md1 = false;
2875
2876 NL_NESTED_FOR_EACH (a, left, attr) {
2877 uint16_t type = nl_attr_type(a);
2878 size_t len = nl_attr_get_size(a);
2879 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2880 OVS_NSH_KEY_ATTR_MAX, type);
2881 if (expected_len) {
2882 if (nsh_mask) {
2883 expected_len *= 2;
2884 }
2885 if (len != expected_len) {
2886 odp_parse_error(&rl, errorp, "NSH %s attribute %"PRIu16" "
2887 "should have length %d but actually has "
2888 "%"PRIuSIZE,
2889 nsh_mask ? "mask" : "key",
2890 type, expected_len, len);
2891 return ODP_FIT_ERROR;
2892 }
2893 }
2894
2895 switch (type) {
2896 case OVS_NSH_KEY_ATTR_UNSPEC:
2897 break;
2898 case OVS_NSH_KEY_ATTR_BASE: {
2899 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2900 nsh->flags = base->flags;
2901 nsh->ttl = base->ttl;
2902 nsh->mdtype = base->mdtype;
2903 nsh->np = base->np;
2904 nsh->path_hdr = base->path_hdr;
2905 if (nsh_mask && (len == 2 * sizeof(*base))) {
2906 const struct ovs_nsh_key_base *base_mask = base + 1;
2907 nsh_mask->flags = base_mask->flags;
2908 nsh_mask->ttl = base_mask->ttl;
2909 nsh_mask->mdtype = base_mask->mdtype;
2910 nsh_mask->np = base_mask->np;
2911 nsh_mask->path_hdr = base_mask->path_hdr;
2912 }
2913 break;
2914 }
2915 case OVS_NSH_KEY_ATTR_MD1: {
2916 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2917 has_md1 = true;
2918 memcpy(nsh->context, md1->context, sizeof md1->context);
2919 if (len == 2 * sizeof(*md1)) {
2920 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
2921 memcpy(nsh_mask->context, md1_mask->context,
2922 sizeof(*md1_mask));
2923 }
2924 break;
2925 }
2926 case OVS_NSH_KEY_ATTR_MD2:
2927 default:
2928 /* Allow this to show up as unexpected, if there are unknown
2929 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2930 unknown = true;
2931 break;
2932 }
2933 }
2934
2935 if (unknown) {
2936 return ODP_FIT_TOO_MUCH;
2937 }
2938
2939 if (!is_mask && has_md1 && nsh->mdtype != NSH_M_TYPE1 && !nsh_mask) {
2940 odp_parse_error(&rl, errorp, "OVS_NSH_KEY_ATTR_MD1 present but "
2941 "declared mdtype %"PRIu8" is not %d (NSH_M_TYPE1)",
2942 nsh->mdtype, NSH_M_TYPE1);
2943 return ODP_FIT_ERROR;
2944 }
2945
2946 return ODP_FIT_PERFECT;
2947 }
2948
2949 /* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
2950 * returns fitness. The attribute should be a key (not a mask). If 'errorp'
2951 * is nonnull and the function returns ODP_FIT_ERROR, stores a malloc()'d error
2952 * message in '*errorp'. */
2953 enum odp_key_fitness
2954 odp_nsh_key_from_attr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
2955 struct ovs_key_nsh *nsh_mask, char **errorp)
2956 {
2957 return odp_nsh_key_from_attr__(attr, false, nsh, nsh_mask, errorp);
2958 }
2959
2960 /* Parses OVS_KEY_ATTR_TUNNEL attribute 'attr' into 'tun' and returns fitness.
2961 * If the attribute is a key, 'is_mask' should be false; if it is a mask,
2962 * 'is_mask' should be true. If 'errorp' is nonnull and the function returns
2963 * ODP_FIT_ERROR, stores a malloc()'d error message in '*errorp'. */
2964 static enum odp_key_fitness
2965 odp_tun_key_from_attr__(const struct nlattr *attr, bool is_mask,
2966 struct flow_tnl *tun, char **errorp)
2967 {
2968 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2969 unsigned int left;
2970 const struct nlattr *a;
2971 bool ttl = false;
2972 bool unknown = false;
2973
2974 NL_NESTED_FOR_EACH(a, left, attr) {
2975 uint16_t type = nl_attr_type(a);
2976 size_t len = nl_attr_get_size(a);
2977 int expected_len = odp_key_attr_len(ovs_tun_key_attr_lens,
2978 OVS_TUNNEL_ATTR_MAX, type);
2979
2980 if (len != expected_len && expected_len >= 0) {
2981 odp_parse_error(&rl, errorp, "tunnel key attribute %"PRIu16" "
2982 "should have length %d but actually has %"PRIuSIZE,
2983 type, expected_len, len);
2984 return ODP_FIT_ERROR;
2985 }
2986
2987 switch (type) {
2988 case OVS_TUNNEL_KEY_ATTR_ID:
2989 tun->tun_id = nl_attr_get_be64(a);
2990 tun->flags |= FLOW_TNL_F_KEY;
2991 break;
2992 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
2993 tun->ip_src = nl_attr_get_be32(a);
2994 break;
2995 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
2996 tun->ip_dst = nl_attr_get_be32(a);
2997 break;
2998 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
2999 tun->ipv6_src = nl_attr_get_in6_addr(a);
3000 break;
3001 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
3002 tun->ipv6_dst = nl_attr_get_in6_addr(a);
3003 break;
3004 case OVS_TUNNEL_KEY_ATTR_TOS:
3005 tun->ip_tos = nl_attr_get_u8(a);
3006 break;
3007 case OVS_TUNNEL_KEY_ATTR_TTL:
3008 tun->ip_ttl = nl_attr_get_u8(a);
3009 ttl = true;
3010 break;
3011 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3012 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
3013 break;
3014 case OVS_TUNNEL_KEY_ATTR_CSUM:
3015 tun->flags |= FLOW_TNL_F_CSUM;
3016 break;
3017 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
3018 tun->tp_src = nl_attr_get_be16(a);
3019 break;
3020 case OVS_TUNNEL_KEY_ATTR_TP_DST:
3021 tun->tp_dst = nl_attr_get_be16(a);
3022 break;
3023 case OVS_TUNNEL_KEY_ATTR_OAM:
3024 tun->flags |= FLOW_TNL_F_OAM;
3025 break;
3026 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
3027 static const struct nl_policy vxlan_opts_policy[] = {
3028 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
3029 };
3030 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
3031
3032 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
3033 odp_parse_error(&rl, errorp, "error parsing VXLAN options");
3034 return ODP_FIT_ERROR;
3035 }
3036
3037 if (ext[OVS_VXLAN_EXT_GBP]) {
3038 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
3039
3040 tun->gbp_id = htons(gbp & 0xFFFF);
3041 tun->gbp_flags = (gbp >> 16) & 0xFF;
3042 }
3043
3044 break;
3045 }
3046 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3047 tun_metadata_from_geneve_nlattr(a, is_mask, tun);
3048 break;
3049 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: {
3050 const struct erspan_metadata *opts = nl_attr_get(a);
3051
3052 tun->erspan_ver = opts->version;
3053 if (tun->erspan_ver == 1) {
3054 tun->erspan_idx = ntohl(opts->u.index);
3055 } else if (tun->erspan_ver == 2) {
3056 tun->erspan_dir = opts->u.md2.dir;
3057 tun->erspan_hwid = get_hwid(&opts->u.md2);
3058 } else {
3059 VLOG_WARN("%s invalid erspan version\n", __func__);
3060 }
3061 break;
3062 }
3063 case OVS_TUNNEL_KEY_ATTR_GTPU_OPTS: {
3064 const struct gtpu_metadata *opts = nl_attr_get(a);
3065
3066 tun->gtpu_flags = opts->flags;
3067 tun->gtpu_msgtype = opts->msgtype;
3068 break;
3069 }
3070
3071 default:
3072 /* Allow this to show up as unexpected, if there are unknown
3073 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
3074 unknown = true;
3075 break;
3076 }
3077 }
3078
3079 if (!ttl) {
3080 odp_parse_error(&rl, errorp, "tunnel options missing TTL");
3081 return ODP_FIT_ERROR;
3082 }
3083 if (unknown) {
3084 return ODP_FIT_TOO_MUCH;
3085 }
3086 return ODP_FIT_PERFECT;
3087 }
3088
3089 /* Parses OVS_KEY_ATTR_TUNNEL key attribute 'attr' into 'tun' and returns
3090 * fitness. The attribute should be a key (not a mask). If 'errorp' is
3091 * nonnull, stores NULL into '*errorp' on success, otherwise a malloc()'d error
3092 * message. */
3093 enum odp_key_fitness
3094 odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun,
3095 char **errorp)
3096 {
3097 if (errorp) {
3098 *errorp = NULL;
3099 }
3100 memset(tun, 0, sizeof *tun);
3101 return odp_tun_key_from_attr__(attr, false, tun, errorp);
3102 }
3103
3104 static void
3105 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key,
3106 const struct flow_tnl *tun_flow_key,
3107 const struct ofpbuf *key_buf, const char *tnl_type)
3108 {
3109 size_t tun_key_ofs;
3110
3111 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
3112
3113 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
3114 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
3115 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
3116 }
3117 if (tun_key->ip_src) {
3118 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
3119 }
3120 if (tun_key->ip_dst) {
3121 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
3122 }
3123 if (ipv6_addr_is_set(&tun_key->ipv6_src)) {
3124 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, &tun_key->ipv6_src);
3125 }
3126 if (ipv6_addr_is_set(&tun_key->ipv6_dst)) {
3127 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_DST, &tun_key->ipv6_dst);
3128 }
3129 if (tun_key->ip_tos) {
3130 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
3131 }
3132 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
3133 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
3134 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
3135 }
3136 if (tun_key->flags & FLOW_TNL_F_CSUM) {
3137 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
3138 }
3139 if (tun_key->tp_src) {
3140 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
3141 }
3142 if (tun_key->tp_dst) {
3143 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
3144 }
3145 if (tun_key->flags & FLOW_TNL_F_OAM) {
3146 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
3147 }
3148
3149 /* If tnl_type is set to a particular type of output tunnel,
3150 * only put its relevant tunnel metadata to the nlattr.
3151 * If tnl_type is NULL, put tunnel metadata according to the
3152 * 'tun_key'.
3153 */
3154 if ((!tnl_type || !strcmp(tnl_type, "vxlan")) &&
3155 (tun_key->gbp_flags || tun_key->gbp_id)) {
3156 size_t vxlan_opts_ofs;
3157
3158 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
3159 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
3160 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
3161 nl_msg_end_nested(a, vxlan_opts_ofs);
3162 }
3163
3164 if (!tnl_type || !strcmp(tnl_type, "geneve")) {
3165 tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
3166 }
3167
3168 if ((!tnl_type || !strcmp(tnl_type, "erspan") ||
3169 !strcmp(tnl_type, "ip6erspan")) &&
3170 (tun_key->erspan_ver == 1 || tun_key->erspan_ver == 2)) {
3171 struct erspan_metadata opts;
3172
3173 opts.version = tun_key->erspan_ver;
3174 if (opts.version == 1) {
3175 opts.u.index = htonl(tun_key->erspan_idx);
3176 } else {
3177 opts.u.md2.dir = tun_key->erspan_dir;
3178 set_hwid(&opts.u.md2, tun_key->erspan_hwid);
3179 }
3180 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
3181 &opts, sizeof(opts));
3182 }
3183
3184 if ((!tnl_type || !strcmp(tnl_type, "gtpu")) &&
3185 (tun_key->gtpu_flags && tun_key->gtpu_msgtype)) {
3186 struct gtpu_metadata opts;
3187
3188 opts.flags = tun_key->gtpu_flags;
3189 opts.msgtype = tun_key->gtpu_msgtype;
3190 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
3191 &opts, sizeof(opts));
3192 }
3193 nl_msg_end_nested(a, tun_key_ofs);
3194 }
3195
3196 static bool
3197 odp_mask_is_constant__(enum ovs_key_attr attr, const void *mask, size_t size,
3198 int constant)
3199 {
3200 /* Convert 'constant' to all the widths we need. C conversion rules ensure
3201 * that -1 becomes all-1-bits and 0 does not change. */
3202 ovs_be16 be16 = (OVS_FORCE ovs_be16) constant;
3203 uint32_t u32 = constant;
3204 uint8_t u8 = constant;
3205 const struct in6_addr *in6 = constant ? &in6addr_exact : &in6addr_any;
3206
3207 switch (attr) {
3208 case OVS_KEY_ATTR_UNSPEC:
3209 case OVS_KEY_ATTR_ENCAP:
3210 case __OVS_KEY_ATTR_MAX:
3211 default:
3212 return false;
3213
3214 case OVS_KEY_ATTR_PRIORITY:
3215 case OVS_KEY_ATTR_IN_PORT:
3216 case OVS_KEY_ATTR_ETHERNET:
3217 case OVS_KEY_ATTR_VLAN:
3218 case OVS_KEY_ATTR_ETHERTYPE:
3219 case OVS_KEY_ATTR_IPV4:
3220 case OVS_KEY_ATTR_TCP:
3221 case OVS_KEY_ATTR_UDP:
3222 case OVS_KEY_ATTR_ICMP:
3223 case OVS_KEY_ATTR_ICMPV6:
3224 case OVS_KEY_ATTR_ND:
3225 case OVS_KEY_ATTR_ND_EXTENSIONS:
3226 case OVS_KEY_ATTR_SKB_MARK:
3227 case OVS_KEY_ATTR_TUNNEL:
3228 case OVS_KEY_ATTR_SCTP:
3229 case OVS_KEY_ATTR_DP_HASH:
3230 case OVS_KEY_ATTR_RECIRC_ID:
3231 case OVS_KEY_ATTR_MPLS:
3232 case OVS_KEY_ATTR_CT_STATE:
3233 case OVS_KEY_ATTR_CT_ZONE:
3234 case OVS_KEY_ATTR_CT_MARK:
3235 case OVS_KEY_ATTR_CT_LABELS:
3236 case OVS_KEY_ATTR_PACKET_TYPE:
3237 case OVS_KEY_ATTR_NSH:
3238 return is_all_byte(mask, size, u8);
3239
3240 case OVS_KEY_ATTR_TCP_FLAGS:
3241 return TCP_FLAGS(*(ovs_be16 *) mask) == TCP_FLAGS(be16);
3242
3243 case OVS_KEY_ATTR_IPV6: {
3244 const struct ovs_key_ipv6 *ipv6_mask = mask;
3245 return ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
3246 == htonl(IPV6_LABEL_MASK & u32)
3247 && ipv6_mask->ipv6_proto == u8
3248 && ipv6_mask->ipv6_tclass == u8
3249 && ipv6_mask->ipv6_hlimit == u8
3250 && ipv6_mask->ipv6_frag == u8
3251 && ipv6_addr_equals(&ipv6_mask->ipv6_src, in6)
3252 && ipv6_addr_equals(&ipv6_mask->ipv6_dst, in6));
3253 }
3254
3255 case OVS_KEY_ATTR_ARP:
3256 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_arp, arp_tha), u8);
3257
3258 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
3259 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv4,
3260 ipv4_proto), u8);
3261
3262 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
3263 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv6,
3264 ipv6_proto), u8);
3265 }
3266 }
3267
3268 /* The caller must already have verified that 'ma' has a correct length.
3269 *
3270 * The main purpose of this function is formatting, to allow code to figure out
3271 * whether the mask can be omitted. It doesn't try hard for attributes that
3272 * contain sub-attributes, etc., because normally those would be broken down
3273 * further for formatting. */
3274 static bool
3275 odp_mask_attr_is_wildcard(const struct nlattr *ma)
3276 {
3277 return odp_mask_is_constant__(nl_attr_type(ma),
3278 nl_attr_get(ma), nl_attr_get_size(ma), 0);
3279 }
3280
3281 /* The caller must already have verified that 'size' is a correct length for
3282 * 'attr'.
3283 *
3284 * The main purpose of this function is formatting, to allow code to figure out
3285 * whether the mask can be omitted. It doesn't try hard for attributes that
3286 * contain sub-attributes, etc., because normally those would be broken down
3287 * further for formatting. */
3288 static bool
3289 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
3290 {
3291 return odp_mask_is_constant__(attr, mask, size, -1);
3292 }
3293
3294 /* The caller must already have verified that 'ma' has a correct length. */
3295 static bool
3296 odp_mask_attr_is_exact(const struct nlattr *ma)
3297 {
3298 enum ovs_key_attr attr = nl_attr_type(ma);
3299 return odp_mask_is_exact(attr, nl_attr_get(ma), nl_attr_get_size(ma));
3300 }
3301
3302 void
3303 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
3304 char *port_name)
3305 {
3306 struct odp_portno_names *odp_portno_names;
3307
3308 odp_portno_names = xmalloc(sizeof *odp_portno_names);
3309 odp_portno_names->port_no = port_no;
3310 odp_portno_names->name = xstrdup(port_name);
3311 hmap_insert(portno_names, &odp_portno_names->hmap_node,
3312 hash_odp_port(port_no));
3313 }
3314
3315 static char *
3316 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
3317 {
3318 if (portno_names) {
3319 struct odp_portno_names *odp_portno_names;
3320
3321 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
3322 hash_odp_port(port_no), portno_names) {
3323 if (odp_portno_names->port_no == port_no) {
3324 return odp_portno_names->name;
3325 }
3326 }
3327 }
3328 return NULL;
3329 }
3330
3331 void
3332 odp_portno_names_destroy(struct hmap *portno_names)
3333 {
3334 struct odp_portno_names *odp_portno_names;
3335
3336 HMAP_FOR_EACH_POP (odp_portno_names, hmap_node, portno_names) {
3337 free(odp_portno_names->name);
3338 free(odp_portno_names);
3339 }
3340 }
3341
3342 void
3343 odp_portno_name_format(const struct hmap *portno_names, odp_port_t port_no,
3344 struct ds *s)
3345 {
3346 const char *name = odp_portno_names_get(portno_names, port_no);
3347 if (name) {
3348 ds_put_cstr(s, name);
3349 } else {
3350 ds_put_format(s, "%"PRIu32, port_no);
3351 }
3352 }
3353
3354 /* Format helpers. */
3355
3356 static void
3357 format_eth(struct ds *ds, const char *name, const struct eth_addr key,
3358 const struct eth_addr *mask, bool verbose)
3359 {
3360 bool mask_empty = mask && eth_addr_is_zero(*mask);
3361
3362 if (verbose || !mask_empty) {
3363 bool mask_full = !mask || eth_mask_is_exact(*mask);
3364
3365 if (mask_full) {
3366 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
3367 } else {
3368 ds_put_format(ds, "%s=", name);
3369 eth_format_masked(key, mask, ds);
3370 ds_put_char(ds, ',');
3371 }
3372 }
3373 }
3374
3375
3376 static void
3377 format_be64(struct ds *ds, const char *name, ovs_be64 key,
3378 const ovs_be64 *mask, bool verbose)
3379 {
3380 bool mask_empty = mask && !*mask;
3381
3382 if (verbose || !mask_empty) {
3383 bool mask_full = !mask || *mask == OVS_BE64_MAX;
3384
3385 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
3386 if (!mask_full) { /* Partially masked. */
3387 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
3388 }
3389 ds_put_char(ds, ',');
3390 }
3391 }
3392
3393 static void
3394 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
3395 const ovs_be32 *mask, bool verbose)
3396 {
3397 bool mask_empty = mask && !*mask;
3398
3399 if (verbose || !mask_empty) {
3400 bool mask_full = !mask || *mask == OVS_BE32_MAX;
3401
3402 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
3403 if (!mask_full) { /* Partially masked. */
3404 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
3405 }
3406 ds_put_char(ds, ',');
3407 }
3408 }
3409
3410 static void
3411 format_in6_addr(struct ds *ds, const char *name,
3412 const struct in6_addr *key,
3413 const struct in6_addr *mask,
3414 bool verbose)
3415 {
3416 char buf[INET6_ADDRSTRLEN];
3417 bool mask_empty = mask && ipv6_mask_is_any(mask);
3418
3419 if (verbose || !mask_empty) {
3420 bool mask_full = !mask || ipv6_mask_is_exact(mask);
3421
3422 inet_ntop(AF_INET6, key, buf, sizeof buf);
3423 ds_put_format(ds, "%s=%s", name, buf);
3424 if (!mask_full) { /* Partially masked. */
3425 inet_ntop(AF_INET6, mask, buf, sizeof buf);
3426 ds_put_format(ds, "/%s", buf);
3427 }
3428 ds_put_char(ds, ',');
3429 }
3430 }
3431
3432 static void
3433 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
3434 const ovs_be32 *mask, bool verbose)
3435 {
3436 bool mask_empty = mask && !*mask;
3437
3438 if (verbose || !mask_empty) {
3439 bool mask_full = !mask
3440 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
3441
3442 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
3443 if (!mask_full) { /* Partially masked. */
3444 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
3445 }
3446 ds_put_char(ds, ',');
3447 }
3448 }
3449
3450 static void
3451 format_u8x(struct ds *ds, const char *name, uint8_t key,
3452 const uint8_t *mask, bool verbose)
3453 {
3454 bool mask_empty = mask && !*mask;
3455
3456 if (verbose || !mask_empty) {
3457 bool mask_full = !mask || *mask == UINT8_MAX;
3458
3459 ds_put_format(ds, "%s=%#"PRIx8, name, key);
3460 if (!mask_full) { /* Partially masked. */
3461 ds_put_format(ds, "/%#"PRIx8, *mask);
3462 }
3463 ds_put_char(ds, ',');
3464 }
3465 }
3466
3467 static void
3468 format_u8u(struct ds *ds, const char *name, uint8_t key,
3469 const uint8_t *mask, bool verbose)
3470 {
3471 bool mask_empty = mask && !*mask;
3472
3473 if (verbose || !mask_empty) {
3474 bool mask_full = !mask || *mask == UINT8_MAX;
3475
3476 ds_put_format(ds, "%s=%"PRIu8, name, key);
3477 if (!mask_full) { /* Partially masked. */
3478 ds_put_format(ds, "/%#"PRIx8, *mask);
3479 }
3480 ds_put_char(ds, ',');
3481 }
3482 }
3483
3484 static void
3485 format_be16(struct ds *ds, const char *name, ovs_be16 key,
3486 const ovs_be16 *mask, bool verbose)
3487 {
3488 bool mask_empty = mask && !*mask;
3489
3490 if (verbose || !mask_empty) {
3491 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3492
3493 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
3494 if (!mask_full) { /* Partially masked. */
3495 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3496 }
3497 ds_put_char(ds, ',');
3498 }
3499 }
3500
3501 static void
3502 format_be16x(struct ds *ds, const char *name, ovs_be16 key,
3503 const ovs_be16 *mask, bool verbose)
3504 {
3505 bool mask_empty = mask && !*mask;
3506
3507 if (verbose || !mask_empty) {
3508 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3509
3510 ds_put_format(ds, "%s=%#"PRIx16, name, ntohs(key));
3511 if (!mask_full) { /* Partially masked. */
3512 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3513 }
3514 ds_put_char(ds, ',');
3515 }
3516 }
3517
3518 static void
3519 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
3520 const uint16_t *mask, bool verbose)
3521 {
3522 bool mask_empty = mask && !*mask;
3523
3524 if (verbose || !mask_empty) {
3525 ds_put_cstr(ds, name);
3526 ds_put_char(ds, '(');
3527 if (mask) {
3528 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key,
3529 *mask & FLOW_TNL_F_MASK, FLOW_TNL_F_MASK);
3530 } else { /* Fully masked. */
3531 format_flags(ds, flow_tun_flag_to_string, key, '|');
3532 }
3533 ds_put_cstr(ds, "),");
3534 }
3535 }
3536
3537 static bool
3538 check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
3539 const struct attr_len_tbl tbl[], int max_type, bool need_key)
3540 {
3541 int expected_len;
3542
3543 expected_len = odp_key_attr_len(tbl, max_type, nl_attr_type(a));
3544 if (expected_len != ATTR_LEN_VARIABLE &&
3545 expected_len != ATTR_LEN_NESTED) {
3546
3547 bool bad_key_len = nl_attr_get_size(a) != expected_len;
3548 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
3549
3550 if (bad_key_len || bad_mask_len) {
3551 if (need_key) {
3552 ds_put_format(ds, "key%u", nl_attr_type(a));
3553 }
3554 if (bad_key_len) {
3555 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
3556 nl_attr_get_size(a), expected_len);
3557 }
3558 format_generic_odp_key(a, ds);
3559 if (ma) {
3560 ds_put_char(ds, '/');
3561 if (bad_mask_len) {
3562 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
3563 nl_attr_get_size(ma), expected_len);
3564 }
3565 format_generic_odp_key(ma, ds);
3566 }
3567 ds_put_char(ds, ')');
3568 return false;
3569 }
3570 }
3571
3572 return true;
3573 }
3574
3575 static void
3576 format_unknown_key(struct ds *ds, const struct nlattr *a,
3577 const struct nlattr *ma)
3578 {
3579 ds_put_format(ds, "key%u(", nl_attr_type(a));
3580 format_generic_odp_key(a, ds);
3581 if (ma && !odp_mask_attr_is_exact(ma)) {
3582 ds_put_char(ds, '/');
3583 format_generic_odp_key(ma, ds);
3584 }
3585 ds_put_cstr(ds, "),");
3586 }
3587
3588 static void
3589 format_odp_tun_vxlan_opt(const struct nlattr *attr,
3590 const struct nlattr *mask_attr, struct ds *ds,
3591 bool verbose)
3592 {
3593 unsigned int left;
3594 const struct nlattr *a;
3595 struct ofpbuf ofp;
3596
3597 ofpbuf_init(&ofp, 100);
3598 NL_NESTED_FOR_EACH(a, left, attr) {
3599 uint16_t type = nl_attr_type(a);
3600 const struct nlattr *ma = NULL;
3601
3602 if (mask_attr) {
3603 ma = nl_attr_find__(nl_attr_get(mask_attr),
3604 nl_attr_get_size(mask_attr), type);
3605 if (!ma) {
3606 ma = generate_all_wildcard_mask(ovs_vxlan_ext_attr_lens,
3607 OVS_VXLAN_EXT_MAX,
3608 &ofp, a);
3609 }
3610 }
3611
3612 if (!check_attr_len(ds, a, ma, ovs_vxlan_ext_attr_lens,
3613 OVS_VXLAN_EXT_MAX, true)) {
3614 continue;
3615 }
3616
3617 switch (type) {
3618 case OVS_VXLAN_EXT_GBP: {
3619 uint32_t key = nl_attr_get_u32(a);
3620 ovs_be16 id, id_mask;
3621 uint8_t flags, flags_mask = 0;
3622
3623 id = htons(key & 0xFFFF);
3624 flags = (key >> 16) & 0xFF;
3625 if (ma) {
3626 uint32_t mask = nl_attr_get_u32(ma);
3627 id_mask = htons(mask & 0xFFFF);
3628 flags_mask = (mask >> 16) & 0xFF;
3629 }
3630
3631 ds_put_cstr(ds, "gbp(");
3632 format_be16(ds, "id", id, ma ? &id_mask : NULL, verbose);
3633 format_u8x(ds, "flags", flags, ma ? &flags_mask : NULL, verbose);
3634 ds_chomp(ds, ',');
3635 ds_put_cstr(ds, "),");
3636 break;
3637 }
3638
3639 default:
3640 format_unknown_key(ds, a, ma);
3641 }
3642 ofpbuf_clear(&ofp);
3643 }
3644
3645 ds_chomp(ds, ',');
3646 ofpbuf_uninit(&ofp);
3647 }
3648
3649 static void
3650 format_odp_tun_erspan_opt(const struct nlattr *attr,
3651 const struct nlattr *mask_attr, struct ds *ds,
3652 bool verbose)
3653 {
3654 const struct erspan_metadata *opts, *mask;
3655 uint8_t ver, ver_ma, dir, dir_ma, hwid, hwid_ma;
3656
3657 opts = nl_attr_get(attr);
3658 mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
3659
3660 ver = (uint8_t)opts->version;
3661 if (mask) {
3662 ver_ma = (uint8_t)mask->version;
3663 }
3664
3665 format_u8u(ds, "ver", ver, mask ? &ver_ma : NULL, verbose);
3666
3667 if (opts->version == 1) {
3668 if (mask) {
3669 ds_put_format(ds, "idx=%#"PRIx32"/%#"PRIx32",",
3670 ntohl(opts->u.index),
3671 ntohl(mask->u.index));
3672 } else {
3673 ds_put_format(ds, "idx=%#"PRIx32",", ntohl(opts->u.index));
3674 }
3675 } else if (opts->version == 2) {
3676 dir = opts->u.md2.dir;
3677 hwid = opts->u.md2.hwid;
3678 if (mask) {
3679 dir_ma = mask->u.md2.dir;
3680 hwid_ma = mask->u.md2.hwid;
3681 }
3682
3683 format_u8u(ds, "dir", dir, mask ? &dir_ma : NULL, verbose);
3684 format_u8x(ds, "hwid", hwid, mask ? &hwid_ma : NULL, verbose);
3685 }
3686 ds_chomp(ds, ',');
3687 }
3688
3689 static void
3690 format_odp_tun_gtpu_opt(const struct nlattr *attr,
3691 const struct nlattr *mask_attr, struct ds *ds,
3692 bool verbose)
3693 {
3694 const struct gtpu_metadata *opts, *mask;
3695
3696 opts = nl_attr_get(attr);
3697 mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
3698
3699 format_u8x(ds, "flags", opts->flags, mask ? &mask->flags : NULL, verbose);
3700 format_u8u(ds, "msgtype", opts->msgtype, mask ? &mask->msgtype : NULL,
3701 verbose);
3702 ds_chomp(ds, ',');
3703 }
3704
3705 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
3706
3707 static void
3708 format_geneve_opts(const struct geneve_opt *opt,
3709 const struct geneve_opt *mask, int opts_len,
3710 struct ds *ds, bool verbose)
3711 {
3712 while (opts_len > 0) {
3713 unsigned int len;
3714 uint8_t data_len, data_len_mask;
3715
3716 if (opts_len < sizeof *opt) {
3717 ds_put_format(ds, "opt len %u less than minimum %"PRIuSIZE,
3718 opts_len, sizeof *opt);
3719 return;
3720 }
3721
3722 data_len = opt->length * 4;
3723 if (mask) {
3724 if (mask->length == 0x1f) {
3725 data_len_mask = UINT8_MAX;
3726 } else {
3727 data_len_mask = mask->length;
3728 }
3729 }
3730 len = sizeof *opt + data_len;
3731 if (len > opts_len) {
3732 ds_put_format(ds, "opt len %u greater than remaining %u",
3733 len, opts_len);
3734 return;
3735 }
3736
3737 ds_put_char(ds, '{');
3738 format_be16x(ds, "class", opt->opt_class, MASK(mask, opt_class),
3739 verbose);
3740 format_u8x(ds, "type", opt->type, MASK(mask, type), verbose);
3741 format_u8u(ds, "len", data_len, mask ? &data_len_mask : NULL, verbose);
3742 if (data_len &&
3743 (verbose || !mask || !is_all_zeros(mask + 1, data_len))) {
3744 ds_put_hex(ds, opt + 1, data_len);
3745 if (mask && !is_all_ones(mask + 1, data_len)) {
3746 ds_put_char(ds, '/');
3747 ds_put_hex(ds, mask + 1, data_len);
3748 }
3749 } else {
3750 ds_chomp(ds, ',');
3751 }
3752 ds_put_char(ds, '}');
3753
3754 opt += len / sizeof(*opt);
3755 if (mask) {
3756 mask += len / sizeof(*opt);
3757 }
3758 opts_len -= len;
3759 };
3760 }
3761
3762 static void
3763 format_odp_tun_geneve(const struct nlattr *attr,
3764 const struct nlattr *mask_attr, struct ds *ds,
3765 bool verbose)
3766 {
3767 int opts_len = nl_attr_get_size(attr);
3768 const struct geneve_opt *opt = nl_attr_get(attr);
3769 const struct geneve_opt *mask = mask_attr ?
3770 nl_attr_get(mask_attr) : NULL;
3771
3772 if (mask && nl_attr_get_size(attr) != nl_attr_get_size(mask_attr)) {
3773 ds_put_format(ds, "value len %"PRIuSIZE" different from mask len %"PRIuSIZE,
3774 nl_attr_get_size(attr), nl_attr_get_size(mask_attr));
3775 return;
3776 }
3777
3778 format_geneve_opts(opt, mask, opts_len, ds, verbose);
3779 }
3780
3781 static void
3782 format_odp_nsh_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3783 struct ds *ds)
3784 {
3785 unsigned int left;
3786 const struct nlattr *a;
3787 struct ovs_key_nsh nsh;
3788 struct ovs_key_nsh nsh_mask;
3789
3790 memset(&nsh, 0, sizeof nsh);
3791 memset(&nsh_mask, 0xff, sizeof nsh_mask);
3792
3793 NL_NESTED_FOR_EACH (a, left, attr) {
3794 enum ovs_nsh_key_attr type = nl_attr_type(a);
3795 const struct nlattr *ma = NULL;
3796
3797 if (mask_attr) {
3798 ma = nl_attr_find__(nl_attr_get(mask_attr),
3799 nl_attr_get_size(mask_attr), type);
3800 }
3801
3802 if (!check_attr_len(ds, a, ma, ovs_nsh_key_attr_lens,
3803 OVS_NSH_KEY_ATTR_MAX, true)) {
3804 continue;
3805 }
3806
3807 switch (type) {
3808 case OVS_NSH_KEY_ATTR_UNSPEC:
3809 break;
3810 case OVS_NSH_KEY_ATTR_BASE: {
3811 const struct ovs_nsh_key_base *base = nl_attr_get(a);
3812 const struct ovs_nsh_key_base *base_mask
3813 = ma ? nl_attr_get(ma) : NULL;
3814 nsh.flags = base->flags;
3815 nsh.ttl = base->ttl;
3816 nsh.mdtype = base->mdtype;
3817 nsh.np = base->np;
3818 nsh.path_hdr = base->path_hdr;
3819 if (base_mask) {
3820 nsh_mask.flags = base_mask->flags;
3821 nsh_mask.ttl = base_mask->ttl;
3822 nsh_mask.mdtype = base_mask->mdtype;
3823 nsh_mask.np = base_mask->np;
3824 nsh_mask.path_hdr = base_mask->path_hdr;
3825 }
3826 break;
3827 }
3828 case OVS_NSH_KEY_ATTR_MD1: {
3829 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
3830 const struct ovs_nsh_key_md1 *md1_mask
3831 = ma ? nl_attr_get(ma) : NULL;
3832 memcpy(nsh.context, md1->context, sizeof md1->context);
3833 if (md1_mask) {
3834 memcpy(nsh_mask.context, md1_mask->context,
3835 sizeof md1_mask->context);
3836 }
3837 break;
3838 }
3839 case OVS_NSH_KEY_ATTR_MD2:
3840 case __OVS_NSH_KEY_ATTR_MAX:
3841 default:
3842 /* No support for matching other metadata formats yet. */
3843 break;
3844 }
3845 }
3846
3847 if (mask_attr) {
3848 format_nsh_key_mask(ds, &nsh, &nsh_mask);
3849 } else {
3850 format_nsh_key(ds, &nsh);
3851 }
3852 }
3853
3854 static void
3855 format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3856 struct ds *ds, bool verbose)
3857 {
3858 unsigned int left;
3859 const struct nlattr *a;
3860 uint16_t flags = 0;
3861 uint16_t mask_flags = 0;
3862 struct ofpbuf ofp;
3863
3864 ofpbuf_init(&ofp, 100);
3865 NL_NESTED_FOR_EACH(a, left, attr) {
3866 enum ovs_tunnel_key_attr type = nl_attr_type(a);
3867 const struct nlattr *ma = NULL;
3868
3869 if (mask_attr) {
3870 ma = nl_attr_find__(nl_attr_get(mask_attr),
3871 nl_attr_get_size(mask_attr), type);
3872 if (!ma) {
3873 ma = generate_all_wildcard_mask(ovs_tun_key_attr_lens,
3874 OVS_TUNNEL_KEY_ATTR_MAX,
3875 &ofp, a);
3876 }
3877 }
3878
3879 if (!check_attr_len(ds, a, ma, ovs_tun_key_attr_lens,
3880 OVS_TUNNEL_KEY_ATTR_MAX, true)) {
3881 continue;
3882 }
3883
3884 switch (type) {
3885 case OVS_TUNNEL_KEY_ATTR_ID:
3886 format_be64(ds, "tun_id", nl_attr_get_be64(a),
3887 ma ? nl_attr_get(ma) : NULL, verbose);
3888 flags |= FLOW_TNL_F_KEY;
3889 if (ma) {
3890 mask_flags |= FLOW_TNL_F_KEY;
3891 }
3892 break;
3893 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
3894 format_ipv4(ds, "src", nl_attr_get_be32(a),
3895 ma ? nl_attr_get(ma) : NULL, verbose);
3896 break;
3897 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
3898 format_ipv4(ds, "dst", nl_attr_get_be32(a),
3899 ma ? nl_attr_get(ma) : NULL, verbose);
3900 break;
3901 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
3902 struct in6_addr ipv6_src;
3903 ipv6_src = nl_attr_get_in6_addr(a);
3904 format_in6_addr(ds, "ipv6_src", &ipv6_src,
3905 ma ? nl_attr_get(ma) : NULL, verbose);
3906 break;
3907 }
3908 case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
3909 struct in6_addr ipv6_dst;
3910 ipv6_dst = nl_attr_get_in6_addr(a);
3911 format_in6_addr(ds, "ipv6_dst", &ipv6_dst,
3912 ma ? nl_attr_get(ma) : NULL, verbose);
3913 break;
3914 }
3915 case OVS_TUNNEL_KEY_ATTR_TOS:
3916 format_u8x(ds, "tos", nl_attr_get_u8(a),
3917 ma ? nl_attr_get(ma) : NULL, verbose);
3918 break;
3919 case OVS_TUNNEL_KEY_ATTR_TTL:
3920 format_u8u(ds, "ttl", nl_attr_get_u8(a),
3921 ma ? nl_attr_get(ma) : NULL, verbose);
3922 break;
3923 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3924 flags |= FLOW_TNL_F_DONT_FRAGMENT;
3925 break;
3926 case OVS_TUNNEL_KEY_ATTR_CSUM:
3927 flags |= FLOW_TNL_F_CSUM;
3928 break;
3929 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
3930 format_be16(ds, "tp_src", nl_attr_get_be16(a),
3931 ma ? nl_attr_get(ma) : NULL, verbose);
3932 break;
3933 case OVS_TUNNEL_KEY_ATTR_TP_DST:
3934 format_be16(ds, "tp_dst", nl_attr_get_be16(a),
3935 ma ? nl_attr_get(ma) : NULL, verbose);
3936 break;
3937 case OVS_TUNNEL_KEY_ATTR_OAM:
3938 flags |= FLOW_TNL_F_OAM;
3939 break;
3940 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
3941 ds_put_cstr(ds, "vxlan(");
3942 format_odp_tun_vxlan_opt(a, ma, ds, verbose);
3943 ds_put_cstr(ds, "),");
3944 break;
3945 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3946 ds_put_cstr(ds, "geneve(");
3947 format_odp_tun_geneve(a, ma, ds, verbose);
3948 ds_put_cstr(ds, "),");
3949 break;
3950 case OVS_TUNNEL_KEY_ATTR_PAD:
3951 break;
3952 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
3953 ds_put_cstr(ds, "erspan(");
3954 format_odp_tun_erspan_opt(a, ma, ds, verbose);
3955 ds_put_cstr(ds, "),");
3956 break;
3957 case OVS_TUNNEL_KEY_ATTR_GTPU_OPTS:
3958 ds_put_cstr(ds, "gtpu(");
3959 format_odp_tun_gtpu_opt(a, ma, ds, verbose);
3960 ds_put_cstr(ds, ")");
3961 break;
3962 case __OVS_TUNNEL_KEY_ATTR_MAX:
3963 default:
3964 format_unknown_key(ds, a, ma);
3965 }
3966 ofpbuf_clear(&ofp);
3967 }
3968
3969 /* Flags can have a valid mask even if the attribute is not set, so
3970 * we need to collect these separately. */
3971 if (mask_attr) {
3972 NL_NESTED_FOR_EACH(a, left, mask_attr) {
3973 switch (nl_attr_type(a)) {
3974 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3975 mask_flags |= FLOW_TNL_F_DONT_FRAGMENT;
3976 break;
3977 case OVS_TUNNEL_KEY_ATTR_CSUM:
3978 mask_flags |= FLOW_TNL_F_CSUM;
3979 break;
3980 case OVS_TUNNEL_KEY_ATTR_OAM:
3981 mask_flags |= FLOW_TNL_F_OAM;
3982 break;
3983 }
3984 }
3985 }
3986
3987 format_tun_flags(ds, "flags", flags, mask_attr ? &mask_flags : NULL,
3988 verbose);
3989 ds_chomp(ds, ',');
3990 ofpbuf_uninit(&ofp);
3991 }
3992
3993 static const char *
3994 odp_ct_state_to_string(uint32_t flag)
3995 {
3996 switch (flag) {
3997 case OVS_CS_F_REPLY_DIR:
3998 return "rpl";
3999 case OVS_CS_F_TRACKED:
4000 return "trk";
4001 case OVS_CS_F_NEW:
4002 return "new";
4003 case OVS_CS_F_ESTABLISHED:
4004 return "est";
4005 case OVS_CS_F_RELATED:
4006 return "rel";
4007 case OVS_CS_F_INVALID:
4008 return "inv";
4009 case OVS_CS_F_SRC_NAT:
4010 return "snat";
4011 case OVS_CS_F_DST_NAT:
4012 return "dnat";
4013 default:
4014 return NULL;
4015 }
4016 }
4017
4018 static void
4019 format_frag(struct ds *ds, const char *name, uint8_t key,
4020 const uint8_t *mask, bool verbose OVS_UNUSED)
4021 {
4022 bool mask_empty = mask && !*mask;
4023 bool mask_full = !mask || *mask == UINT8_MAX;
4024
4025 /* ODP frag is an enumeration field; partial masks are not meaningful. */
4026 if (!mask_empty && !mask_full) {
4027 ds_put_format(ds, "error: partial mask not supported for frag (%#"
4028 PRIx8"),", *mask);
4029 } else if (!mask_empty) {
4030 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
4031 }
4032 }
4033
4034 static bool
4035 mask_empty(const struct nlattr *ma)
4036 {
4037 const void *mask;
4038 size_t n;
4039
4040 if (!ma) {
4041 return true;
4042 }
4043 mask = nl_attr_get(ma);
4044 n = nl_attr_get_size(ma);
4045
4046 return is_all_zeros(mask, n);
4047 }
4048
4049 /* The caller must have already verified that 'a' and 'ma' have correct
4050 * lengths. */
4051 static void
4052 format_odp_key_attr__(const struct nlattr *a, const struct nlattr *ma,
4053 const struct hmap *portno_names, struct ds *ds,
4054 bool verbose)
4055 {
4056 enum ovs_key_attr attr = nl_attr_type(a);
4057 char namebuf[OVS_KEY_ATTR_BUFSIZE];
4058 bool is_exact;
4059
4060 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
4061
4062 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
4063
4064 ds_put_char(ds, '(');
4065 switch (attr) {
4066 case OVS_KEY_ATTR_ENCAP:
4067 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
4068 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
4069 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
4070 verbose);
4071 } else if (nl_attr_get_size(a)) {
4072 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
4073 ds, verbose);
4074 }
4075 break;
4076
4077 case OVS_KEY_ATTR_PRIORITY:
4078 case OVS_KEY_ATTR_SKB_MARK:
4079 case OVS_KEY_ATTR_DP_HASH:
4080 case OVS_KEY_ATTR_RECIRC_ID:
4081 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4082 if (!is_exact) {
4083 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4084 }
4085 break;
4086
4087 case OVS_KEY_ATTR_CT_MARK:
4088 if (verbose || !mask_empty(ma)) {
4089 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4090 if (!is_exact) {
4091 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4092 }
4093 }
4094 break;
4095
4096 case OVS_KEY_ATTR_CT_STATE:
4097 if (verbose) {
4098 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4099 if (!is_exact) {
4100 ds_put_format(ds, "/%#"PRIx32,
4101 mask_empty(ma) ? 0 : nl_attr_get_u32(ma));
4102 }
4103 } else if (!is_exact) {
4104 format_flags_masked(ds, NULL, odp_ct_state_to_string,
4105 nl_attr_get_u32(a),
4106 mask_empty(ma) ? 0 : nl_attr_get_u32(ma),
4107 UINT32_MAX);
4108 } else {
4109 format_flags(ds, odp_ct_state_to_string, nl_attr_get_u32(a), '|');
4110 }
4111 break;
4112
4113 case OVS_KEY_ATTR_CT_ZONE:
4114 if (verbose || !mask_empty(ma)) {
4115 ds_put_format(ds, "%#"PRIx16, nl_attr_get_u16(a));
4116 if (!is_exact) {
4117 ds_put_format(ds, "/%#"PRIx16, nl_attr_get_u16(ma));
4118 }
4119 }
4120 break;
4121
4122 case OVS_KEY_ATTR_CT_LABELS: {
4123 const ovs_32aligned_u128 *value = nl_attr_get(a);
4124 const ovs_32aligned_u128 *mask = ma ? nl_attr_get(ma) : NULL;
4125
4126 format_u128(ds, value, mask, verbose);
4127 break;
4128 }
4129
4130 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
4131 const struct ovs_key_ct_tuple_ipv4 *key = nl_attr_get(a);
4132 const struct ovs_key_ct_tuple_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
4133
4134 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
4135 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
4136 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
4137 verbose);
4138 format_be16(ds, "tp_src", key->src_port, MASK(mask, src_port),
4139 verbose);
4140 format_be16(ds, "tp_dst", key->dst_port, MASK(mask, dst_port),
4141 verbose);
4142 ds_chomp(ds, ',');
4143 break;
4144 }
4145
4146 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
4147 const struct ovs_key_ct_tuple_ipv6 *key = nl_attr_get(a);
4148 const struct ovs_key_ct_tuple_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
4149
4150 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
4151 verbose);
4152 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
4153 verbose);
4154 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
4155 verbose);
4156 format_be16(ds, "src_port", key->src_port, MASK(mask, src_port),
4157 verbose);
4158 format_be16(ds, "dst_port", key->dst_port, MASK(mask, dst_port),
4159 verbose);
4160 ds_chomp(ds, ',');
4161 break;
4162 }
4163
4164 case OVS_KEY_ATTR_TUNNEL:
4165 format_odp_tun_attr(a, ma, ds, verbose);
4166 break;
4167
4168 case OVS_KEY_ATTR_IN_PORT:
4169 if (is_exact) {
4170 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
4171 } else {
4172 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
4173 if (!is_exact) {
4174 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4175 }
4176 }
4177 break;
4178
4179 case OVS_KEY_ATTR_PACKET_TYPE: {
4180 ovs_be32 value = nl_attr_get_be32(a);
4181 ovs_be32 mask = ma ? nl_attr_get_be32(ma) : OVS_BE32_MAX;
4182
4183 ovs_be16 ns = htons(pt_ns(value));
4184 ovs_be16 ns_mask = htons(pt_ns(mask));
4185 format_be16(ds, "ns", ns, &ns_mask, verbose);
4186
4187 ovs_be16 ns_type = pt_ns_type_be(value);
4188 ovs_be16 ns_type_mask = pt_ns_type_be(mask);
4189 format_be16x(ds, "id", ns_type, &ns_type_mask, verbose);
4190
4191 ds_chomp(ds, ',');
4192 break;
4193 }
4194
4195 case OVS_KEY_ATTR_ETHERNET: {
4196 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
4197 const struct ovs_key_ethernet *key = nl_attr_get(a);
4198
4199 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
4200 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
4201 ds_chomp(ds, ',');
4202 break;
4203 }
4204 case OVS_KEY_ATTR_VLAN:
4205 format_vlan_tci(ds, nl_attr_get_be16(a),
4206 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
4207 break;
4208
4209 case OVS_KEY_ATTR_MPLS: {
4210 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
4211 const struct ovs_key_mpls *mpls_mask = NULL;
4212 size_t size = nl_attr_get_size(a);
4213
4214 if (!size || size % sizeof *mpls_key) {
4215 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
4216 return;
4217 }
4218 if (!is_exact) {
4219 mpls_mask = nl_attr_get(ma);
4220 if (size != nl_attr_get_size(ma)) {
4221 ds_put_format(ds, "(key length %"PRIuSIZE" != "
4222 "mask length %"PRIuSIZE")",
4223 size, nl_attr_get_size(ma));
4224 return;
4225 }
4226 }
4227 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
4228 break;
4229 }
4230 case OVS_KEY_ATTR_ETHERTYPE:
4231 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
4232 if (!is_exact) {
4233 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
4234 }
4235 break;
4236
4237 case OVS_KEY_ATTR_IPV4: {
4238 const struct ovs_key_ipv4 *key = nl_attr_get(a);
4239 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
4240
4241 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
4242 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
4243 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
4244 verbose);
4245 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
4246 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
4247 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
4248 verbose);
4249 ds_chomp(ds, ',');
4250 break;
4251 }
4252 case OVS_KEY_ATTR_IPV6: {
4253 const struct ovs_key_ipv6 *key = nl_attr_get(a);
4254 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
4255
4256 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
4257 verbose);
4258 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
4259 verbose);
4260 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
4261 verbose);
4262 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
4263 verbose);
4264 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
4265 verbose);
4266 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
4267 verbose);
4268 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
4269 verbose);
4270 ds_chomp(ds, ',');
4271 break;
4272 }
4273 /* These have the same structure and format. */
4274 case OVS_KEY_ATTR_TCP:
4275 case OVS_KEY_ATTR_UDP:
4276 case OVS_KEY_ATTR_SCTP: {
4277 const struct ovs_key_tcp *key = nl_attr_get(a);
4278 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
4279
4280 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
4281 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
4282 ds_chomp(ds, ',');
4283 break;
4284 }
4285 case OVS_KEY_ATTR_TCP_FLAGS:
4286 if (!is_exact) {
4287 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
4288 ntohs(nl_attr_get_be16(a)),
4289 TCP_FLAGS(nl_attr_get_be16(ma)),
4290 TCP_FLAGS(OVS_BE16_MAX));
4291 } else {
4292 format_flags(ds, packet_tcp_flag_to_string,
4293 ntohs(nl_attr_get_be16(a)), '|');
4294 }
4295 break;
4296
4297 case OVS_KEY_ATTR_ICMP: {
4298 const struct ovs_key_icmp *key = nl_attr_get(a);
4299 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
4300
4301 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
4302 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
4303 ds_chomp(ds, ',');
4304 break;
4305 }
4306 case OVS_KEY_ATTR_ICMPV6: {
4307 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
4308 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
4309
4310 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
4311 verbose);
4312 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
4313 verbose);
4314 ds_chomp(ds, ',');
4315 break;
4316 }
4317 case OVS_KEY_ATTR_ARP: {
4318 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
4319 const struct ovs_key_arp *key = nl_attr_get(a);
4320
4321 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
4322 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
4323 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
4324 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
4325 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
4326 ds_chomp(ds, ',');
4327 break;
4328 }
4329 case OVS_KEY_ATTR_ND: {
4330 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
4331 const struct ovs_key_nd *key = nl_attr_get(a);
4332
4333 format_in6_addr(ds, "target", &key->nd_target, MASK(mask, nd_target),
4334 verbose);
4335 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
4336 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
4337
4338 ds_chomp(ds, ',');
4339 break;
4340 }
4341 case OVS_KEY_ATTR_ND_EXTENSIONS: {
4342 const struct ovs_key_nd_extensions *mask = ma ? nl_attr_get(ma) : NULL;
4343 const struct ovs_key_nd_extensions *key = nl_attr_get(a);
4344
4345 bool first = true;
4346 format_be32_masked(ds, &first, "nd_reserved", key->nd_reserved,
4347 OVS_BE32_MAX);
4348 ds_put_char(ds, ',');
4349
4350 format_u8u(ds, "nd_options_type", key->nd_options_type,
4351 MASK(mask, nd_options_type), verbose);
4352
4353 ds_chomp(ds, ',');
4354 break;
4355 }
4356 case OVS_KEY_ATTR_NSH: {
4357 format_odp_nsh_attr(a, ma, ds);
4358 break;
4359 }
4360 case OVS_KEY_ATTR_UNSPEC:
4361 case __OVS_KEY_ATTR_MAX:
4362 default:
4363 format_generic_odp_key(a, ds);
4364 if (!is_exact) {
4365 ds_put_char(ds, '/');
4366 format_generic_odp_key(ma, ds);
4367 }
4368 break;
4369 }
4370 ds_put_char(ds, ')');
4371 }
4372
4373 static void
4374 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
4375 const struct hmap *portno_names, struct ds *ds,
4376 bool verbose)
4377 {
4378 if (check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4379 OVS_KEY_ATTR_MAX, false)) {
4380 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4381 }
4382 }
4383
4384 static struct nlattr *
4385 generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
4386 struct ofpbuf *ofp, const struct nlattr *key)
4387 {
4388 const struct nlattr *a;
4389 unsigned int left;
4390 int type = nl_attr_type(key);
4391 int size = nl_attr_get_size(key);
4392
4393 if (odp_key_attr_len(tbl, max, type) != ATTR_LEN_NESTED) {
4394 nl_msg_put_unspec_zero(ofp, type, size);
4395 } else {
4396 size_t nested_mask;
4397
4398 if (tbl[type].next) {
4399 const struct attr_len_tbl *entry = &tbl[type];
4400 tbl = entry->next;
4401 max = entry->next_max;
4402 }
4403
4404 nested_mask = nl_msg_start_nested(ofp, type);
4405 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
4406 generate_all_wildcard_mask(tbl, max, ofp, nl_attr_get(a));
4407 }
4408 nl_msg_end_nested(ofp, nested_mask);
4409 }
4410
4411 return ofp->base;
4412 }
4413
4414 static void
4415 format_u128(struct ds *ds, const ovs_32aligned_u128 *key,
4416 const ovs_32aligned_u128 *mask, bool verbose)
4417 {
4418 if (verbose || (mask && !ovs_u128_is_zero(get_32aligned_u128(mask)))) {
4419 ovs_be128 value = hton128(get_32aligned_u128(key));
4420 ds_put_hex(ds, &value, sizeof value);
4421 if (mask && !(ovs_u128_is_ones(get_32aligned_u128(mask)))) {
4422 value = hton128(get_32aligned_u128(mask));
4423 ds_put_char(ds, '/');
4424 ds_put_hex(ds, &value, sizeof value);
4425 }
4426 }
4427 }
4428
4429 /* Read the string from 's_' as a 128-bit value. If the string contains
4430 * a "/", the rest of the string will be treated as a 128-bit mask.
4431 *
4432 * If either the value or mask is larger than 64 bits, the string must
4433 * be in hexadecimal.
4434 */
4435 static int
4436 scan_u128(const char *s_, ovs_u128 *value, ovs_u128 *mask)
4437 {
4438 char *s = CONST_CAST(char *, s_);
4439 ovs_be128 be_value;
4440 ovs_be128 be_mask;
4441
4442 if (!parse_int_string(s, (uint8_t *)&be_value, sizeof be_value, &s)) {
4443 *value = ntoh128(be_value);
4444
4445 if (mask) {
4446 int n;
4447
4448 if (ovs_scan(s, "/%n", &n)) {
4449 int error;
4450
4451 s += n;
4452 error = parse_int_string(s, (uint8_t *)&be_mask,
4453 sizeof be_mask, &s);
4454 if (error) {
4455 return 0;
4456 }
4457 *mask = ntoh128(be_mask);
4458 } else {
4459 *mask = OVS_U128_MAX;
4460 }
4461 }
4462 return s - s_;
4463 }
4464
4465 return 0;
4466 }
4467
4468 int
4469 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
4470 {
4471 const char *s = s_;
4472
4473 if (ovs_scan(s, "ufid:")) {
4474 s += 5;
4475
4476 if (!uuid_from_string_prefix((struct uuid *)ufid, s)) {
4477 return -EINVAL;
4478 }
4479 s += UUID_LEN;
4480
4481 return s - s_;
4482 }
4483
4484 return 0;
4485 }
4486
4487 void
4488 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
4489 {
4490 ds_put_format(ds, "ufid:"UUID_FMT, UUID_ARGS((struct uuid *)ufid));
4491 }
4492
4493 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4494 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
4495 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
4496 * non-null, translates odp port number to its name. */
4497 void
4498 odp_flow_format(const struct nlattr *key, size_t key_len,
4499 const struct nlattr *mask, size_t mask_len,
4500 const struct hmap *portno_names, struct ds *ds, bool verbose)
4501 {
4502 if (key_len) {
4503 const struct nlattr *a;
4504 unsigned int left;
4505 bool has_ethtype_key = false;
4506 bool has_packet_type_key = false;
4507 struct ofpbuf ofp;
4508 bool first_field = true;
4509
4510 ofpbuf_init(&ofp, 100);
4511 NL_ATTR_FOR_EACH (a, left, key, key_len) {
4512 int attr_type = nl_attr_type(a);
4513 const struct nlattr *ma = (mask && mask_len
4514 ? nl_attr_find__(mask, mask_len,
4515 attr_type)
4516 : NULL);
4517 if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4518 OVS_KEY_ATTR_MAX, false)) {
4519 continue;
4520 }
4521
4522 bool is_nested_attr;
4523 bool is_wildcard = false;
4524
4525 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
4526 has_ethtype_key = true;
4527 } else if (attr_type == OVS_KEY_ATTR_PACKET_TYPE) {
4528 has_packet_type_key = true;
4529 }
4530
4531 is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
4532 OVS_KEY_ATTR_MAX, attr_type) ==
4533 ATTR_LEN_NESTED;
4534
4535 if (mask && mask_len) {
4536 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
4537 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
4538 }
4539
4540 if (verbose || !is_wildcard || is_nested_attr) {
4541 if (is_wildcard && !ma) {
4542 ma = generate_all_wildcard_mask(ovs_flow_key_attr_lens,
4543 OVS_KEY_ATTR_MAX,
4544 &ofp, a);
4545 }
4546 if (!first_field) {
4547 ds_put_char(ds, ',');
4548 }
4549 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4550 first_field = false;
4551 } else if (attr_type == OVS_KEY_ATTR_ETHERNET
4552 && !has_packet_type_key) {
4553 /* This special case reflects differences between the kernel
4554 * and userspace datapaths regarding the root type of the
4555 * packet being matched (typically Ethernet but some tunnels
4556 * can encapsulate IPv4 etc.). The kernel datapath does not
4557 * have an explicit way to indicate packet type; instead:
4558 *
4559 * - If OVS_KEY_ATTR_ETHERNET is present, the packet is an
4560 * Ethernet packet and OVS_KEY_ATTR_ETHERTYPE is the
4561 * Ethertype encoded in the Ethernet header.
4562 *
4563 * - If OVS_KEY_ATTR_ETHERNET is absent, then the packet's
4564 * root type is that encoded in OVS_KEY_ATTR_ETHERTYPE
4565 * (i.e. if OVS_KEY_ATTR_ETHERTYPE is 0x0800 then the
4566 * packet is an IPv4 packet).
4567 *
4568 * Thus, if OVS_KEY_ATTR_ETHERNET is present, even if it is
4569 * all-wildcarded, it is important to print it.
4570 *
4571 * On the other hand, the userspace datapath supports
4572 * OVS_KEY_ATTR_PACKET_TYPE and uses it to indicate the packet
4573 * type. Thus, if OVS_KEY_ATTR_PACKET_TYPE is present, we need
4574 * not print an all-wildcarded OVS_KEY_ATTR_ETHERNET. */
4575 if (!first_field) {
4576 ds_put_char(ds, ',');
4577 }
4578 ds_put_cstr(ds, "eth()");
4579 }
4580 ofpbuf_clear(&ofp);
4581 }
4582 ofpbuf_uninit(&ofp);
4583
4584 if (left) {
4585 int i;
4586
4587 if (left == key_len) {
4588 ds_put_cstr(ds, "<empty>");
4589 }
4590 ds_put_format(ds, ",***%u leftover bytes*** (", left);
4591 for (i = 0; i < left; i++) {
4592 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
4593 }
4594 ds_put_char(ds, ')');
4595 }
4596 if (!has_ethtype_key) {
4597 const struct nlattr *ma = nl_attr_find__(mask, mask_len,
4598 OVS_KEY_ATTR_ETHERTYPE);
4599 if (ma) {
4600 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
4601 ntohs(nl_attr_get_be16(ma)));
4602 }
4603 }
4604 } else {
4605 ds_put_cstr(ds, "<empty>");
4606 }
4607 }
4608
4609 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4610 * OVS_KEY_ATTR_* attributes in 'key'. */
4611 void
4612 odp_flow_key_format(const struct nlattr *key,
4613 size_t key_len, struct ds *ds)
4614 {
4615 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
4616 }
4617
4618 static bool
4619 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
4620 {
4621 if (!strcasecmp(s, "no")) {
4622 *type = OVS_FRAG_TYPE_NONE;
4623 } else if (!strcasecmp(s, "first")) {
4624 *type = OVS_FRAG_TYPE_FIRST;
4625 } else if (!strcasecmp(s, "later")) {
4626 *type = OVS_FRAG_TYPE_LATER;
4627 } else {
4628 return false;
4629 }
4630 return true;
4631 }
4632
4633 /* Parsing. */
4634
4635 static int
4636 scan_eth(const char *s, struct eth_addr *key, struct eth_addr *mask)
4637 {
4638 int n;
4639
4640 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n",
4641 ETH_ADDR_SCAN_ARGS(*key), &n)) {
4642 int len = n;
4643
4644 if (mask) {
4645 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
4646 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
4647 len += n;
4648 } else {
4649 memset(mask, 0xff, sizeof *mask);
4650 }
4651 }
4652 return len;
4653 }
4654 return 0;
4655 }
4656
4657 static int
4658 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
4659 {
4660 int n;
4661
4662 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
4663 int len = n;
4664
4665 if (mask) {
4666 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
4667 IP_SCAN_ARGS(mask), &n)) {
4668 len += n;
4669 } else {
4670 *mask = OVS_BE32_MAX;
4671 }
4672 }
4673 return len;
4674 }
4675 return 0;
4676 }
4677
4678 static int
4679 scan_in6_addr(const char *s, struct in6_addr *key, struct in6_addr *mask)
4680 {
4681 int n;
4682 char ipv6_s[IPV6_SCAN_LEN + 1];
4683
4684 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
4685 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
4686 int len = n;
4687
4688 if (mask) {
4689 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
4690 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
4691 len += n;
4692 } else {
4693 memset(mask, 0xff, sizeof *mask);
4694 }
4695 }
4696 return len;
4697 }
4698 return 0;
4699 }
4700
4701 static int
4702 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
4703 {
4704 int key_, mask_;
4705 int n;
4706
4707 if (ovs_scan(s, "%i%n", &key_, &n)
4708 && (key_ & ~IPV6_LABEL_MASK) == 0) {
4709 int len = n;
4710
4711 *key = htonl(key_);
4712 if (mask) {
4713 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
4714 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
4715 len += n;
4716 *mask = htonl(mask_);
4717 } else {
4718 *mask = htonl(IPV6_LABEL_MASK);
4719 }
4720 }
4721 return len;
4722 }
4723 return 0;
4724 }
4725
4726 static int
4727 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
4728 {
4729 int n;
4730
4731 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
4732 int len = n;
4733
4734 if (mask) {
4735 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
4736 len += n;
4737 } else {
4738 *mask = UINT8_MAX;
4739 }
4740 }
4741 return len;
4742 }
4743 return 0;
4744 }
4745
4746 static int
4747 scan_u16(const char *s, uint16_t *key, uint16_t *mask)
4748 {
4749 int n;
4750
4751 if (ovs_scan(s, "%"SCNi16"%n", key, &n)) {
4752 int len = n;
4753
4754 if (mask) {
4755 if (ovs_scan(s + len, "/%"SCNi16"%n", mask, &n)) {
4756 len += n;
4757 } else {
4758 *mask = UINT16_MAX;
4759 }
4760 }
4761 return len;
4762 }
4763 return 0;
4764 }
4765
4766 static int
4767 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
4768 {
4769 int n;
4770
4771 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4772 int len = n;
4773
4774 if (mask) {
4775 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4776 len += n;
4777 } else {
4778 *mask = UINT32_MAX;
4779 }
4780 }
4781 return len;
4782 }
4783 return 0;
4784 }
4785
4786 static int
4787 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
4788 {
4789 uint16_t key_, mask_;
4790 int n;
4791
4792 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4793 int len = n;
4794
4795 *key = htons(key_);
4796 if (mask) {
4797 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4798 len += n;
4799 *mask = htons(mask_);
4800 } else {
4801 *mask = OVS_BE16_MAX;
4802 }
4803 }
4804 return len;
4805 }
4806 return 0;
4807 }
4808
4809 static int
4810 scan_be32(const char *s, ovs_be32 *key, ovs_be32 *mask)
4811 {
4812 uint32_t key_, mask_;
4813 int n;
4814
4815 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
4816 int len = n;
4817
4818 *key = htonl(key_);
4819 if (mask) {
4820 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
4821 len += n;
4822 *mask = htonl(mask_);
4823 } else {
4824 *mask = OVS_BE32_MAX;
4825 }
4826 }
4827 return len;
4828 }
4829 return 0;
4830 }
4831
4832 static int
4833 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
4834 {
4835 uint64_t key_, mask_;
4836 int n;
4837
4838 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
4839 int len = n;
4840
4841 *key = htonll(key_);
4842 if (mask) {
4843 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
4844 len += n;
4845 *mask = htonll(mask_);
4846 } else {
4847 *mask = OVS_BE64_MAX;
4848 }
4849 }
4850 return len;
4851 }
4852 return 0;
4853 }
4854
4855 static int
4856 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
4857 {
4858 uint32_t flags, fmask;
4859 int n;
4860
4861 n = parse_odp_flags(s, flow_tun_flag_to_string, &flags,
4862 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
4863 if (n >= 0 && s[n] == ')') {
4864 *key = flags;
4865 if (mask) {
4866 *mask = fmask;
4867 }
4868 return n + 1;
4869 }
4870 return 0;
4871 }
4872
4873 static int
4874 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
4875 {
4876 uint32_t flags, fmask;
4877 int n;
4878
4879 n = parse_odp_flags(s, packet_tcp_flag_to_string, &flags,
4880 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
4881 if (n >= 0) {
4882 *key = htons(flags);
4883 if (mask) {
4884 *mask = htons(fmask);
4885 }
4886 return n;
4887 }
4888 return 0;
4889 }
4890
4891 static uint32_t
4892 ovs_to_odp_ct_state(uint8_t state)
4893 {
4894 uint32_t odp = 0;
4895
4896 #define CS_STATE(ENUM, INDEX, NAME) \
4897 if (state & CS_##ENUM) { \
4898 odp |= OVS_CS_F_##ENUM; \
4899 }
4900 CS_STATES
4901 #undef CS_STATE
4902
4903 return odp;
4904 }
4905
4906 static uint8_t
4907 odp_to_ovs_ct_state(uint32_t flags)
4908 {
4909 uint32_t state = 0;
4910
4911 #define CS_STATE(ENUM, INDEX, NAME) \
4912 if (flags & OVS_CS_F_##ENUM) { \
4913 state |= CS_##ENUM; \
4914 }
4915 CS_STATES
4916 #undef CS_STATE
4917
4918 return state;
4919 }
4920
4921 static int
4922 scan_ct_state(const char *s, uint32_t *key, uint32_t *mask)
4923 {
4924 uint32_t flags, fmask;
4925 int n;
4926
4927 n = parse_flags(s, odp_ct_state_to_string, ')', NULL, NULL, &flags,
4928 ovs_to_odp_ct_state(CS_SUPPORTED_MASK),
4929 mask ? &fmask : NULL);
4930
4931 if (n >= 0) {
4932 *key = flags;
4933 if (mask) {
4934 *mask = fmask;
4935 }
4936 return n;
4937 }
4938 return 0;
4939 }
4940
4941 static int
4942 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
4943 {
4944 int n;
4945 char frag[8];
4946 enum ovs_frag_type frag_type;
4947
4948 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
4949 && ovs_frag_type_from_string(frag, &frag_type)) {
4950 int len = n;
4951
4952 *key = frag_type;
4953 if (mask) {
4954 *mask = UINT8_MAX;
4955 }
4956 return len;
4957 }
4958 return 0;
4959 }
4960
4961 static int
4962 scan_port(const char *s, uint32_t *key, uint32_t *mask,
4963 const struct simap *port_names)
4964 {
4965 int n;
4966
4967 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4968 int len = n;
4969
4970 if (mask) {
4971 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4972 len += n;
4973 } else {
4974 *mask = UINT32_MAX;
4975 }
4976 }
4977 return len;
4978 } else if (port_names) {
4979 const struct simap_node *node;
4980 int len;
4981
4982 len = strcspn(s, ")");
4983 node = simap_find_len(port_names, s, len);
4984 if (node) {
4985 *key = node->data;
4986
4987 if (mask) {
4988 *mask = UINT32_MAX;
4989 }
4990 return len;
4991 }
4992 }
4993 return 0;
4994 }
4995
4996 /* Helper for vlan parsing. */
4997 struct ovs_key_vlan__ {
4998 ovs_be16 tci;
4999 };
5000
5001 static bool
5002 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
5003 {
5004 const uint16_t mask = ((1U << bits) - 1) << offset;
5005
5006 if (value >> bits) {
5007 return false;
5008 }
5009
5010 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
5011 return true;
5012 }
5013
5014 static int
5015 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
5016 uint8_t offset)
5017 {
5018 uint16_t key_, mask_;
5019 int n;
5020
5021 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
5022 int len = n;
5023
5024 if (set_be16_bf(key, bits, offset, key_)) {
5025 if (mask) {
5026 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
5027 len += n;
5028
5029 if (!set_be16_bf(mask, bits, offset, mask_)) {
5030 return 0;
5031 }
5032 } else {
5033 *mask |= htons(((1U << bits) - 1) << offset);
5034 }
5035 }
5036 return len;
5037 }
5038 }
5039 return 0;
5040 }
5041
5042 static int
5043 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
5044 {
5045 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
5046 }
5047
5048 static int
5049 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
5050 {
5051 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
5052 }
5053
5054 static int
5055 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
5056 {
5057 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
5058 }
5059
5060 /* For MPLS. */
5061 static bool
5062 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
5063 {
5064 const uint32_t mask = ((1U << bits) - 1) << offset;
5065
5066 if (value >> bits) {
5067 return false;
5068 }
5069
5070 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
5071 return true;
5072 }
5073
5074 static int
5075 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
5076 uint8_t offset)
5077 {
5078 uint32_t key_, mask_;
5079 int n;
5080
5081 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
5082 int len = n;
5083
5084 if (set_be32_bf(key, bits, offset, key_)) {
5085 if (mask) {
5086 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
5087 len += n;
5088
5089 if (!set_be32_bf(mask, bits, offset, mask_)) {
5090 return 0;
5091 }
5092 } else {
5093 *mask |= htonl(((1U << bits) - 1) << offset);
5094 }
5095 }
5096 return len;
5097 }
5098 }
5099 return 0;
5100 }
5101
5102 static int
5103 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
5104 {
5105 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
5106 }
5107
5108 static int
5109 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
5110 {
5111 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
5112 }
5113
5114 static int
5115 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
5116 {
5117 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
5118 }
5119
5120 static int
5121 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
5122 {
5123 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
5124 }
5125
5126 static int
5127 scan_vxlan_gbp(const char *s, uint32_t *key, uint32_t *mask)
5128 {
5129 const char *s_base = s;
5130 ovs_be16 id = 0, id_mask = 0;
5131 uint8_t flags = 0, flags_mask = 0;
5132 int len;
5133
5134 if (!strncmp(s, "id=", 3)) {
5135 s += 3;
5136 len = scan_be16(s, &id, mask ? &id_mask : NULL);
5137 if (len == 0) {
5138 return 0;
5139 }
5140 s += len;
5141 }
5142
5143 if (s[0] == ',') {
5144 s++;
5145 }
5146 if (!strncmp(s, "flags=", 6)) {
5147 s += 6;
5148 len = scan_u8(s, &flags, mask ? &flags_mask : NULL);
5149 if (len == 0) {
5150 return 0;
5151 }
5152 s += len;
5153 }
5154
5155 if (!strncmp(s, "))", 2)) {
5156 s += 2;
5157
5158 *key = (flags << 16) | ntohs(id);
5159 if (mask) {
5160 *mask = (flags_mask << 16) | ntohs(id_mask);
5161 }
5162
5163 return s - s_base;
5164 }
5165
5166 return 0;
5167 }
5168
5169 static int
5170 scan_gtpu_metadata(const char *s,
5171 struct gtpu_metadata *key,
5172 struct gtpu_metadata *mask)
5173 {
5174 const char *s_base = s;
5175 uint8_t flags, flags_ma;
5176 uint8_t msgtype, msgtype_ma;
5177 int len;
5178
5179 if (!strncmp(s, "flags=", 6)) {
5180 s += 6;
5181 len = scan_u8(s, &flags, mask ? &flags_ma : NULL);
5182 if (len == 0) {
5183 return 0;
5184 }
5185 s += len;
5186 }
5187
5188 if (s[0] == ',') {
5189 s++;
5190 }
5191
5192 if (!strncmp(s, "msgtype=", 8)) {
5193 s += 8;
5194 len = scan_u8(s, &msgtype, mask ? &msgtype_ma : NULL);
5195 if (len == 0) {
5196 return 0;
5197 }
5198 s += len;
5199 }
5200
5201 if (!strncmp(s, ")", 1)) {
5202 s += 1;
5203 key->flags = flags;
5204 key->msgtype = msgtype;
5205 if (mask) {
5206 mask->flags = flags_ma;
5207 mask->msgtype = msgtype_ma;
5208 }
5209 }
5210 return s - s_base;
5211 }
5212
5213 static int
5214 scan_erspan_metadata(const char *s,
5215 struct erspan_metadata *key,
5216 struct erspan_metadata *mask)
5217 {
5218 const char *s_base = s;
5219 uint32_t idx = 0, idx_mask = 0;
5220 uint8_t ver = 0, dir = 0, hwid = 0;
5221 uint8_t ver_mask = 0, dir_mask = 0, hwid_mask = 0;
5222 int len;
5223
5224 if (!strncmp(s, "ver=", 4)) {
5225 s += 4;
5226 len = scan_u8(s, &ver, mask ? &ver_mask : NULL);
5227 if (len == 0) {
5228 return 0;
5229 }
5230 s += len;
5231 }
5232
5233 if (s[0] == ',') {
5234 s++;
5235 }
5236
5237 if (ver == 1) {
5238 if (!strncmp(s, "idx=", 4)) {
5239 s += 4;
5240 len = scan_u32(s, &idx, mask ? &idx_mask : NULL);
5241 if (len == 0) {
5242 return 0;
5243 }
5244 s += len;
5245 }
5246
5247 if (!strncmp(s, ")", 1)) {
5248 s += 1;
5249 key->version = ver;
5250 key->u.index = htonl(idx);
5251 if (mask) {
5252 mask->u.index = htonl(idx_mask);
5253 }
5254 }
5255 return s - s_base;
5256
5257 } else if (ver == 2) {
5258 if (!strncmp(s, "dir=", 4)) {
5259 s += 4;
5260 len = scan_u8(s, &dir, mask ? &dir_mask : NULL);
5261 if (len == 0) {
5262 return 0;
5263 }
5264 s += len;
5265 }
5266 if (s[0] == ',') {
5267 s++;
5268 }
5269 if (!strncmp(s, "hwid=", 5)) {
5270 s += 5;
5271 len = scan_u8(s, &hwid, mask ? &hwid_mask : NULL);
5272 if (len == 0) {
5273 return 0;
5274 }
5275 s += len;
5276 }
5277
5278 if (!strncmp(s, ")", 1)) {
5279 s += 1;
5280 key->version = ver;
5281 key->u.md2.hwid = hwid;
5282 key->u.md2.dir = dir;
5283 if (mask) {
5284 mask->u.md2.hwid = hwid_mask;
5285 mask->u.md2.dir = dir_mask;
5286 }
5287 }
5288 return s - s_base;
5289 }
5290
5291 return 0;
5292 }
5293
5294 static int
5295 scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
5296 {
5297 const char *s_base = s;
5298 struct geneve_opt *opt = key->d;
5299 struct geneve_opt *opt_mask = mask ? mask->d : NULL;
5300 int len_remain = sizeof key->d;
5301 int len;
5302
5303 while (s[0] == '{' && len_remain >= sizeof *opt) {
5304 int data_len = 0;
5305
5306 s++;
5307 len_remain -= sizeof *opt;
5308
5309 if (!strncmp(s, "class=", 6)) {
5310 s += 6;
5311 len = scan_be16(s, &opt->opt_class,
5312 mask ? &opt_mask->opt_class : NULL);
5313 if (len == 0) {
5314 return 0;
5315 }
5316 s += len;
5317 } else if (mask) {
5318 memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
5319 }
5320
5321 if (s[0] == ',') {
5322 s++;
5323 }
5324 if (!strncmp(s, "type=", 5)) {
5325 s += 5;
5326 len = scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
5327 if (len == 0) {
5328 return 0;
5329 }
5330 s += len;
5331 } else if (mask) {
5332 memset(&opt_mask->type, 0, sizeof opt_mask->type);
5333 }
5334
5335 if (s[0] == ',') {
5336 s++;
5337 }
5338 if (!strncmp(s, "len=", 4)) {
5339 uint8_t opt_len, opt_len_mask;
5340 s += 4;
5341 len = scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
5342 if (len == 0) {
5343 return 0;
5344 }
5345 s += len;
5346
5347 if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
5348 return 0;
5349 }
5350 opt->length = opt_len / 4;
5351 if (mask) {
5352 opt_mask->length = opt_len_mask;
5353 }
5354 data_len = opt_len;
5355 } else if (mask) {
5356 memset(&opt_mask->type, 0, sizeof opt_mask->type);
5357 }
5358
5359 if (s[0] == ',') {
5360 s++;
5361 if (parse_int_string(s, (uint8_t *)(opt + 1),
5362 data_len, (char **)&s)) {
5363 return 0;
5364 }
5365 }
5366 if (mask) {
5367 if (s[0] == '/') {
5368 s++;
5369 if (parse_int_string(s, (uint8_t *)(opt_mask + 1),
5370 data_len, (char **)&s)) {
5371 return 0;
5372 }
5373 }
5374 opt_mask->r1 = 0;
5375 opt_mask->r2 = 0;
5376 opt_mask->r3 = 0;
5377 }
5378
5379 if (s[0] == '}') {
5380 s++;
5381 opt += 1 + data_len / 4;
5382 if (mask) {
5383 opt_mask += 1 + data_len / 4;
5384 }
5385 len_remain -= data_len;
5386 } else {
5387 return 0;
5388 }
5389 }
5390
5391 if (s[0] == ')') {
5392 len = sizeof key->d - len_remain;
5393
5394 s++;
5395 key->len = len;
5396 if (mask) {
5397 mask->len = len;
5398 }
5399 return s - s_base;
5400 }
5401
5402 return 0;
5403 }
5404
5405 static void
5406 tun_flags_to_attr(struct ofpbuf *a, const void *data_)
5407 {
5408 const uint16_t *flags = data_;
5409
5410 if (*flags & FLOW_TNL_F_DONT_FRAGMENT) {
5411 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
5412 }
5413 if (*flags & FLOW_TNL_F_CSUM) {
5414 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
5415 }
5416 if (*flags & FLOW_TNL_F_OAM) {
5417 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
5418 }
5419 }
5420
5421 static void
5422 vxlan_gbp_to_attr(struct ofpbuf *a, const void *data_)
5423 {
5424 const uint32_t *gbp = data_;
5425
5426 if (*gbp) {
5427 size_t vxlan_opts_ofs;
5428
5429 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
5430 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP, *gbp);
5431 nl_msg_end_nested(a, vxlan_opts_ofs);
5432 }
5433 }
5434
5435 static void
5436 geneve_to_attr(struct ofpbuf *a, const void *data_)
5437 {
5438 const struct geneve_scan *geneve = data_;
5439
5440 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, geneve->d,
5441 geneve->len);
5442 }
5443
5444 static void
5445 erspan_to_attr(struct ofpbuf *a, const void *data_)
5446 {
5447 const struct erspan_metadata *md = data_;
5448
5449 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, md,
5450 sizeof *md);
5451 }
5452
5453 static void
5454 gtpu_to_attr(struct ofpbuf *a, const void *data_)
5455 {
5456 const struct gtpu_metadata *md = data_;
5457
5458 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GTPU_OPTS, md,
5459 sizeof *md);
5460 }
5461
5462 #define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
5463 { \
5464 unsigned long call_fn = (unsigned long)FUNC; \
5465 if (call_fn) { \
5466 typedef void (*fn)(struct ofpbuf *, const void *); \
5467 fn func = FUNC; \
5468 func(BUF, &(DATA)); \
5469 } else { \
5470 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
5471 } \
5472 }
5473
5474 #define SCAN_IF(NAME) \
5475 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5476 const char *start = s; \
5477 int len; \
5478 \
5479 s += strlen(NAME)
5480
5481 /* Usually no special initialization is needed. */
5482 #define SCAN_BEGIN(NAME, TYPE) \
5483 SCAN_IF(NAME); \
5484 TYPE skey, smask; \
5485 memset(&skey, 0, sizeof skey); \
5486 memset(&smask, 0, sizeof smask); \
5487 do { \
5488 len = 0;
5489
5490 /* Init as fully-masked as mask will not be scanned. */
5491 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
5492 SCAN_IF(NAME); \
5493 TYPE skey, smask; \
5494 memset(&skey, 0, sizeof skey); \
5495 memset(&smask, 0xff, sizeof smask); \
5496 do { \
5497 len = 0;
5498
5499 /* VLAN needs special initialization. */
5500 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
5501 SCAN_IF(NAME); \
5502 TYPE skey = KEY_INIT; \
5503 TYPE smask = MASK_INIT; \
5504 do { \
5505 len = 0;
5506
5507 /* Scan unnamed entry as 'TYPE' */
5508 #define SCAN_TYPE(TYPE, KEY, MASK) \
5509 len = scan_##TYPE(s, KEY, MASK); \
5510 if (len == 0) { \
5511 return -EINVAL; \
5512 } \
5513 s += len
5514
5515 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5516 #define SCAN_FIELD(NAME, TYPE, FIELD) \
5517 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5518 s += strlen(NAME); \
5519 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
5520 continue; \
5521 }
5522
5523 #define SCAN_FINISH() \
5524 } while (*s++ == ',' && len != 0); \
5525 if (s[-1] != ')') { \
5526 return -EINVAL; \
5527 }
5528
5529 #define SCAN_FINISH_SINGLE() \
5530 } while (false); \
5531 if (*s++ != ')') { \
5532 return -EINVAL; \
5533 }
5534
5535 /* Beginning of nested attribute. */
5536 #define SCAN_BEGIN_NESTED(NAME, ATTR) \
5537 SCAN_IF(NAME); \
5538 size_t key_offset, mask_offset = 0; \
5539 key_offset = nl_msg_start_nested(key, ATTR); \
5540 if (mask) { \
5541 mask_offset = nl_msg_start_nested(mask, ATTR); \
5542 } \
5543 do { \
5544 len = 0;
5545
5546 #define SCAN_END_NESTED() \
5547 SCAN_FINISH(); \
5548 nl_msg_end_nested(key, key_offset); \
5549 if (mask) { \
5550 nl_msg_end_nested(mask, mask_offset); \
5551 } \
5552 return s - start; \
5553 }
5554
5555 #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
5556 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5557 TYPE skey, smask; \
5558 memset(&skey, 0, sizeof skey); \
5559 memset(&smask, 0xff, sizeof smask); \
5560 s += strlen(NAME); \
5561 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5562 SCAN_PUT(ATTR, FUNC); \
5563 continue; \
5564 }
5565
5566 #define SCAN_FIELD_NESTED(NAME, TYPE, SCAN_AS, ATTR) \
5567 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, NULL)
5568
5569 #define SCAN_FIELD_NESTED_FUNC(NAME, TYPE, SCAN_AS, FUNC) \
5570 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, 0, FUNC)
5571
5572 #define SCAN_PUT(ATTR, FUNC) \
5573 SCAN_PUT_ATTR(key, ATTR, skey, FUNC); \
5574 if (mask) \
5575 SCAN_PUT_ATTR(mask, ATTR, smask, FUNC); \
5576
5577 #define SCAN_END(ATTR) \
5578 SCAN_FINISH(); \
5579 SCAN_PUT(ATTR, NULL); \
5580 return s - start; \
5581 }
5582
5583 #define SCAN_BEGIN_ARRAY(NAME, TYPE, CNT) \
5584 SCAN_IF(NAME); \
5585 TYPE skey[CNT], smask[CNT]; \
5586 memset(&skey, 0, sizeof skey); \
5587 memset(&smask, 0, sizeof smask); \
5588 int idx = 0, cnt = CNT; \
5589 uint64_t fields = 0; \
5590 do { \
5591 int field = 0; \
5592 len = 0;
5593
5594 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5595 #define SCAN_FIELD_ARRAY(NAME, TYPE, FIELD) \
5596 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5597 if (fields & (1UL << field)) { \
5598 fields = 0; \
5599 if (++idx == cnt) { \
5600 break; \
5601 } \
5602 } \
5603 s += strlen(NAME); \
5604 SCAN_TYPE(TYPE, &skey[idx].FIELD, mask ? &smask[idx].FIELD : NULL); \
5605 fields |= 1UL << field; \
5606 continue; \
5607 } \
5608 field++;
5609
5610 #define SCAN_PUT_ATTR_ARRAY(BUF, ATTR, DATA, CNT) \
5611 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)[0] * (CNT)); \
5612
5613 #define SCAN_PUT_ARRAY(ATTR, CNT) \
5614 SCAN_PUT_ATTR_ARRAY(key, ATTR, skey, CNT); \
5615 if (mask) { \
5616 SCAN_PUT_ATTR_ARRAY(mask, ATTR, smask, CNT); \
5617 }
5618
5619 #define SCAN_END_ARRAY(ATTR) \
5620 SCAN_FINISH(); \
5621 if (idx == cnt) { \
5622 return -EINVAL; \
5623 } \
5624 SCAN_PUT_ARRAY(ATTR, idx + 1); \
5625 return s - start; \
5626 }
5627
5628 #define SCAN_END_SINGLE(ATTR) \
5629 SCAN_FINISH_SINGLE(); \
5630 SCAN_PUT(ATTR, NULL); \
5631 return s - start; \
5632 }
5633
5634 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
5635 SCAN_BEGIN(NAME, TYPE) { \
5636 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5637 } SCAN_END_SINGLE(ATTR)
5638
5639 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
5640 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
5641 SCAN_TYPE(SCAN_AS, &skey, NULL); \
5642 } SCAN_END_SINGLE(ATTR)
5643
5644 /* scan_port needs one extra argument. */
5645 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
5646 SCAN_BEGIN(NAME, TYPE) { \
5647 len = scan_port(s, &skey, &smask, \
5648 context->port_names); \
5649 if (len == 0) { \
5650 return -EINVAL; \
5651 } \
5652 s += len; \
5653 } SCAN_END_SINGLE(ATTR)
5654
5655 static int
5656 parse_odp_nsh_key_mask_attr(const char *s, struct ofpbuf *key,
5657 struct ofpbuf *mask)
5658 {
5659 if (strncmp(s, "nsh(", 4) == 0) {
5660 const char *start = s;
5661 int len;
5662 struct ovs_key_nsh skey, smask;
5663 uint32_t spi = 0, spi_mask = 0;
5664 uint8_t si = 0, si_mask = 0;
5665
5666 s += 4;
5667
5668 memset(&skey, 0, sizeof skey);
5669 memset(&smask, 0, sizeof smask);
5670 do {
5671 len = 0;
5672
5673 if (strncmp(s, "flags=", 6) == 0) {
5674 s += 6;
5675 len = scan_u8(s, &skey.flags, mask ? &smask.flags : NULL);
5676 if (len == 0) {
5677 return -EINVAL;
5678 }
5679 s += len;
5680 continue;
5681 }
5682
5683 if (strncmp(s, "mdtype=", 7) == 0) {
5684 s += 7;
5685 len = scan_u8(s, &skey.mdtype, mask ? &smask.mdtype : NULL);
5686 if (len == 0) {
5687 return -EINVAL;
5688 }
5689 s += len;
5690 continue;
5691 }
5692
5693 if (strncmp(s, "np=", 3) == 0) {
5694 s += 3;
5695 len = scan_u8(s, &skey.np, mask ? &smask.np : NULL);
5696 if (len == 0) {
5697 return -EINVAL;
5698 }
5699 s += len;
5700 continue;
5701 }
5702
5703 if (strncmp(s, "spi=", 4) == 0) {
5704 s += 4;
5705 len = scan_u32(s, &spi, mask ? &spi_mask : NULL);
5706 if (len == 0) {
5707 return -EINVAL;
5708 }
5709 s += len;
5710 continue;
5711 }
5712
5713 if (strncmp(s, "si=", 3) == 0) {
5714 s += 3;
5715 len = scan_u8(s, &si, mask ? &si_mask : NULL);
5716 if (len == 0) {
5717 return -EINVAL;
5718 }
5719 s += len;
5720 continue;
5721 }
5722
5723 if (strncmp(s, "c1=", 3) == 0) {
5724 s += 3;
5725 len = scan_be32(s, &skey.context[0],
5726 mask ? &smask.context[0] : NULL);
5727 if (len == 0) {
5728 return -EINVAL;
5729 }
5730 s += len;
5731 continue;
5732 }
5733
5734 if (strncmp(s, "c2=", 3) == 0) {
5735 s += 3;
5736 len = scan_be32(s, &skey.context[1],
5737 mask ? &smask.context[1] : NULL);
5738 if (len == 0) {
5739 return -EINVAL;
5740 }
5741 s += len;
5742 continue;
5743 }
5744
5745 if (strncmp(s, "c3=", 3) == 0) {
5746 s += 3;
5747 len = scan_be32(s, &skey.context[2],
5748 mask ? &smask.context[2] : NULL);
5749 if (len == 0) {
5750 return -EINVAL;
5751 }
5752 s += len;
5753 continue;
5754 }
5755
5756 if (strncmp(s, "c4=", 3) == 0) {
5757 s += 3;
5758 len = scan_be32(s, &skey.context[3],
5759 mask ? &smask.context[3] : NULL);
5760 if (len == 0) {
5761 return -EINVAL;
5762 }
5763 s += len;
5764 continue;
5765 }
5766 } while (*s++ == ',' && len != 0);
5767 if (s[-1] != ')') {
5768 return -EINVAL;
5769 }
5770
5771 skey.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
5772 smask.path_hdr = nsh_spi_si_to_path_hdr(spi_mask, si_mask);
5773
5774 nsh_key_to_attr(key, &skey, NULL, 0, false);
5775 if (mask) {
5776 nsh_key_to_attr(mask, &smask, NULL, 0, true);
5777 }
5778 return s - start;
5779 }
5780 return 0;
5781 }
5782
5783 static int
5784 parse_odp_key_mask_attr(struct parse_odp_context *context, const char *s,
5785 struct ofpbuf *key, struct ofpbuf *mask)
5786 {
5787 int retval;
5788
5789 context->depth++;
5790
5791 if (context->depth == MAX_ODP_NESTED) {
5792 retval = -EINVAL;
5793 } else {
5794 retval = parse_odp_key_mask_attr__(context, s, key, mask);
5795 }
5796
5797 context->depth--;
5798
5799 return retval;
5800 }
5801
5802 static int
5803 parse_odp_key_mask_attr__(struct parse_odp_context *context, const char *s,
5804 struct ofpbuf *key, struct ofpbuf *mask)
5805 {
5806 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
5807 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
5808 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
5809 OVS_KEY_ATTR_RECIRC_ID);
5810 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
5811
5812 SCAN_SINGLE("ct_state(", uint32_t, ct_state, OVS_KEY_ATTR_CT_STATE);
5813 SCAN_SINGLE("ct_zone(", uint16_t, u16, OVS_KEY_ATTR_CT_ZONE);
5814 SCAN_SINGLE("ct_mark(", uint32_t, u32, OVS_KEY_ATTR_CT_MARK);
5815 SCAN_SINGLE("ct_label(", ovs_u128, u128, OVS_KEY_ATTR_CT_LABELS);
5816
5817 SCAN_BEGIN("ct_tuple4(", struct ovs_key_ct_tuple_ipv4) {
5818 SCAN_FIELD("src=", ipv4, ipv4_src);
5819 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5820 SCAN_FIELD("proto=", u8, ipv4_proto);
5821 SCAN_FIELD("tp_src=", be16, src_port);
5822 SCAN_FIELD("tp_dst=", be16, dst_port);
5823 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
5824
5825 SCAN_BEGIN("ct_tuple6(", struct ovs_key_ct_tuple_ipv6) {
5826 SCAN_FIELD("src=", in6_addr, ipv6_src);
5827 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5828 SCAN_FIELD("proto=", u8, ipv6_proto);
5829 SCAN_FIELD("tp_src=", be16, src_port);
5830 SCAN_FIELD("tp_dst=", be16, dst_port);
5831 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
5832
5833 SCAN_BEGIN_NESTED("tunnel(", OVS_KEY_ATTR_TUNNEL) {
5834 SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
5835 SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
5836 SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
5837 SCAN_FIELD_NESTED("ipv6_src=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_SRC);
5838 SCAN_FIELD_NESTED("ipv6_dst=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_DST);
5839 SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
5840 SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
5841 SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
5842 SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
5843 SCAN_FIELD_NESTED_FUNC("erspan(", struct erspan_metadata, erspan_metadata,
5844 erspan_to_attr);
5845 SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
5846 SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
5847 geneve_to_attr);
5848 SCAN_FIELD_NESTED_FUNC("gtpu(", struct gtpu_metadata, gtpu_metadata,
5849 gtpu_to_attr);
5850 SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
5851 } SCAN_END_NESTED();
5852
5853 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
5854
5855 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
5856 SCAN_FIELD("src=", eth, eth_src);
5857 SCAN_FIELD("dst=", eth, eth_dst);
5858 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
5859
5860 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
5861 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
5862 SCAN_FIELD("vid=", vid, tci);
5863 SCAN_FIELD("pcp=", pcp, tci);
5864 SCAN_FIELD("cfi=", cfi, tci);
5865 } SCAN_END(OVS_KEY_ATTR_VLAN);
5866
5867 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
5868
5869 SCAN_BEGIN_ARRAY("mpls(", struct ovs_key_mpls, FLOW_MAX_MPLS_LABELS) {
5870 SCAN_FIELD_ARRAY("label=", mpls_label, mpls_lse);
5871 SCAN_FIELD_ARRAY("tc=", mpls_tc, mpls_lse);
5872 SCAN_FIELD_ARRAY("ttl=", mpls_ttl, mpls_lse);
5873 SCAN_FIELD_ARRAY("bos=", mpls_bos, mpls_lse);
5874 } SCAN_END_ARRAY(OVS_KEY_ATTR_MPLS);
5875
5876 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
5877 SCAN_FIELD("src=", ipv4, ipv4_src);
5878 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5879 SCAN_FIELD("proto=", u8, ipv4_proto);
5880 SCAN_FIELD("tos=", u8, ipv4_tos);
5881 SCAN_FIELD("ttl=", u8, ipv4_ttl);
5882 SCAN_FIELD("frag=", frag, ipv4_frag);
5883 } SCAN_END(OVS_KEY_ATTR_IPV4);
5884
5885 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
5886 SCAN_FIELD("src=", in6_addr, ipv6_src);
5887 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5888 SCAN_FIELD("label=", ipv6_label, ipv6_label);
5889 SCAN_FIELD("proto=", u8, ipv6_proto);
5890 SCAN_FIELD("tclass=", u8, ipv6_tclass);
5891 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
5892 SCAN_FIELD("frag=", frag, ipv6_frag);
5893 } SCAN_END(OVS_KEY_ATTR_IPV6);
5894
5895 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
5896 SCAN_FIELD("src=", be16, tcp_src);
5897 SCAN_FIELD("dst=", be16, tcp_dst);
5898 } SCAN_END(OVS_KEY_ATTR_TCP);
5899
5900 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
5901
5902 SCAN_BEGIN("udp(", struct ovs_key_udp) {
5903 SCAN_FIELD("src=", be16, udp_src);
5904 SCAN_FIELD("dst=", be16, udp_dst);
5905 } SCAN_END(OVS_KEY_ATTR_UDP);
5906
5907 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
5908 SCAN_FIELD("src=", be16, sctp_src);
5909 SCAN_FIELD("dst=", be16, sctp_dst);
5910 } SCAN_END(OVS_KEY_ATTR_SCTP);
5911
5912 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
5913 SCAN_FIELD("type=", u8, icmp_type);
5914 SCAN_FIELD("code=", u8, icmp_code);
5915 } SCAN_END(OVS_KEY_ATTR_ICMP);
5916
5917 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
5918 SCAN_FIELD("type=", u8, icmpv6_type);
5919 SCAN_FIELD("code=", u8, icmpv6_code);
5920 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
5921
5922 SCAN_BEGIN("arp(", struct ovs_key_arp) {
5923 SCAN_FIELD("sip=", ipv4, arp_sip);
5924 SCAN_FIELD("tip=", ipv4, arp_tip);
5925 SCAN_FIELD("op=", be16, arp_op);
5926 SCAN_FIELD("sha=", eth, arp_sha);
5927 SCAN_FIELD("tha=", eth, arp_tha);
5928 } SCAN_END(OVS_KEY_ATTR_ARP);
5929
5930 SCAN_BEGIN("nd(", struct ovs_key_nd) {
5931 SCAN_FIELD("target=", in6_addr, nd_target);
5932 SCAN_FIELD("sll=", eth, nd_sll);
5933 SCAN_FIELD("tll=", eth, nd_tll);
5934 } SCAN_END(OVS_KEY_ATTR_ND);
5935
5936 SCAN_BEGIN("nd_ext(", struct ovs_key_nd_extensions) {
5937 SCAN_FIELD("nd_reserved=", be32, nd_reserved);
5938 SCAN_FIELD("nd_options_type=", u8, nd_options_type);
5939 } SCAN_END(OVS_KEY_ATTR_ND_EXTENSIONS);
5940
5941 struct packet_type {
5942 ovs_be16 ns;
5943 ovs_be16 id;
5944 };
5945 SCAN_BEGIN("packet_type(", struct packet_type) {
5946 SCAN_FIELD("ns=", be16, ns);
5947 SCAN_FIELD("id=", be16, id);
5948 } SCAN_END(OVS_KEY_ATTR_PACKET_TYPE);
5949
5950 /* nsh is nested, it needs special process */
5951 int ret = parse_odp_nsh_key_mask_attr(s, key, mask);
5952 if (ret < 0) {
5953 return ret;
5954 } else {
5955 s += ret;
5956 }
5957
5958 /* Encap open-coded. */
5959 if (!strncmp(s, "encap(", 6)) {
5960 const char *start = s;
5961 size_t encap, encap_mask = 0;
5962
5963 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
5964 if (mask) {
5965 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
5966 }
5967
5968 s += 6;
5969 for (;;) {
5970 int retval;
5971
5972 s += strspn(s, delimiters);
5973 if (!*s) {
5974 return -EINVAL;
5975 } else if (*s == ')') {
5976 break;
5977 }
5978
5979 retval = parse_odp_key_mask_attr(context, s, key, mask);
5980 if (retval < 0) {
5981 return retval;
5982 }
5983
5984 if (nl_attr_oversized(key->size - encap - NLA_HDRLEN)) {
5985 return -E2BIG;
5986 }
5987 s += retval;
5988 }
5989 s++;
5990
5991 nl_msg_end_nested(key, encap);
5992 if (mask) {
5993 nl_msg_end_nested(mask, encap_mask);
5994 }
5995
5996 return s - start;
5997 }
5998
5999 return -EINVAL;
6000 }
6001
6002 /* Parses the string representation of a datapath flow key, in the format
6003 * output by odp_flow_key_format(). Returns 0 if successful, otherwise a
6004 * positive errno value. On success, stores NULL into '*errorp' and the flow
6005 * key is appended to 'key' as a series of Netlink attributes. On failure,
6006 * stores a malloc()'d error message in '*errorp' without changing the data in
6007 * 'key'. Either way, 'key''s data might be reallocated.
6008 *
6009 * If 'port_names' is nonnull, it points to an simap that maps from a port name
6010 * to a port number. (Port names may be used instead of port numbers in
6011 * in_port.)
6012 *
6013 * On success, the attributes appended to 'key' are individually syntactically
6014 * valid, but they may not be valid as a sequence. 'key' might, for example,
6015 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
6016 int
6017 odp_flow_from_string(const char *s, const struct simap *port_names,
6018 struct ofpbuf *key, struct ofpbuf *mask,
6019 char **errorp)
6020 {
6021 if (errorp) {
6022 *errorp = NULL;
6023 }
6024
6025 const size_t old_size = key->size;
6026 struct parse_odp_context context = (struct parse_odp_context) {
6027 .port_names = port_names,
6028 };
6029 for (;;) {
6030 int retval;
6031
6032 s += strspn(s, delimiters);
6033 if (!*s) {
6034 return 0;
6035 }
6036
6037 /* Skip UFID. */
6038 ovs_u128 ufid;
6039 retval = odp_ufid_from_string(s, &ufid);
6040 if (retval < 0) {
6041 if (errorp) {
6042 *errorp = xasprintf("syntax error at %s", s);
6043 }
6044 key->size = old_size;
6045 return -retval;
6046 } else if (retval > 0) {
6047 s += retval;
6048 s += s[0] == ' ' ? 1 : 0;
6049 }
6050
6051 retval = parse_odp_key_mask_attr(&context, s, key, mask);
6052 if (retval < 0) {
6053 if (errorp) {
6054 *errorp = xasprintf("syntax error at %s", s);
6055 }
6056 key->size = old_size;
6057 return -retval;
6058 }
6059 s += retval;
6060 }
6061
6062 return 0;
6063 }
6064
6065 static uint8_t
6066 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
6067 {
6068 if (is_mask) {
6069 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
6070 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
6071 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
6072 * must use a zero mask for the netlink frag field, and all ones mask
6073 * otherwise. */
6074 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
6075 }
6076 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
6077 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
6078 : OVS_FRAG_TYPE_FIRST;
6079 }
6080
6081 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
6082 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
6083 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
6084 bool is_mask);
6085 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
6086 bool is_mask);
6087 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
6088 bool is_mask);
6089 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
6090 bool is_mask);
6091 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
6092 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
6093 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
6094 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
6095 static void get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh,
6096 bool is_mask);
6097 static void put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
6098 bool is_mask);
6099
6100 /* These share the same layout. */
6101 union ovs_key_tp {
6102 struct ovs_key_tcp tcp;
6103 struct ovs_key_udp udp;
6104 struct ovs_key_sctp sctp;
6105 };
6106
6107 static void get_tp_key(const struct flow *, union ovs_key_tp *);
6108 static void put_tp_key(const union ovs_key_tp *, struct flow *);
6109
6110 static void
6111 odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms,
6112 bool export_mask, struct ofpbuf *buf)
6113 {
6114 /* New "struct flow" fields that are visible to the datapath (including all
6115 * data fields) should be translated into equivalent datapath flow fields
6116 * here (you will have to add a OVS_KEY_ATTR_* for them). */
6117 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
6118
6119 struct ovs_key_ethernet *eth_key;
6120 size_t encap[FLOW_MAX_VLAN_HEADERS] = {0};
6121 size_t max_vlans;
6122 const struct flow *flow = parms->flow;
6123 const struct flow *mask = parms->mask;
6124 const struct flow *data = export_mask ? mask : flow;
6125
6126 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
6127
6128 if (flow_tnl_dst_is_set(&flow->tunnel) ||
6129 flow_tnl_src_is_set(&flow->tunnel) || export_mask) {
6130 tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
6131 parms->key_buf, NULL);
6132 }
6133
6134 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
6135
6136 if (parms->support.ct_state) {
6137 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
6138 ovs_to_odp_ct_state(data->ct_state));
6139 }
6140 if (parms->support.ct_zone) {
6141 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, data->ct_zone);
6142 }
6143 if (parms->support.ct_mark) {
6144 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, data->ct_mark);
6145 }
6146 if (parms->support.ct_label) {
6147 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &data->ct_label,
6148 sizeof(data->ct_label));
6149 }
6150 if (flow->ct_nw_proto) {
6151 if (parms->support.ct_orig_tuple
6152 && flow->dl_type == htons(ETH_TYPE_IP)) {
6153 struct ovs_key_ct_tuple_ipv4 *ct;
6154
6155 /* 'struct ovs_key_ct_tuple_ipv4' has padding, clear it. */
6156 ct = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
6157 sizeof *ct);
6158 ct->ipv4_src = data->ct_nw_src;
6159 ct->ipv4_dst = data->ct_nw_dst;
6160 ct->src_port = data->ct_tp_src;
6161 ct->dst_port = data->ct_tp_dst;
6162 ct->ipv4_proto = data->ct_nw_proto;
6163 } else if (parms->support.ct_orig_tuple6
6164 && flow->dl_type == htons(ETH_TYPE_IPV6)) {
6165 struct ovs_key_ct_tuple_ipv6 *ct;
6166
6167 /* 'struct ovs_key_ct_tuple_ipv6' has padding, clear it. */
6168 ct = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
6169 sizeof *ct);
6170 ct->ipv6_src = data->ct_ipv6_src;
6171 ct->ipv6_dst = data->ct_ipv6_dst;
6172 ct->src_port = data->ct_tp_src;
6173 ct->dst_port = data->ct_tp_dst;
6174 ct->ipv6_proto = data->ct_nw_proto;
6175 }
6176 }
6177 if (parms->support.recirc) {
6178 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
6179 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
6180 }
6181
6182 /* Add an ingress port attribute if this is a mask or 'in_port.odp_port'
6183 * is not the magical value "ODPP_NONE". */
6184 if (export_mask || flow->in_port.odp_port != ODPP_NONE) {
6185 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, data->in_port.odp_port);
6186 }
6187
6188 nl_msg_put_be32(buf, OVS_KEY_ATTR_PACKET_TYPE, data->packet_type);
6189
6190 if (OVS_UNLIKELY(parms->probe)) {
6191 max_vlans = FLOW_MAX_VLAN_HEADERS;
6192 } else {
6193 max_vlans = MIN(parms->support.max_vlan_headers, flow_vlan_limit);
6194 }
6195
6196 /* Conditionally add L2 attributes for Ethernet packets */
6197 if (flow->packet_type == htonl(PT_ETH)) {
6198 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
6199 sizeof *eth_key);
6200 get_ethernet_key(data, eth_key);
6201
6202 for (int encaps = 0; encaps < max_vlans; encaps++) {
6203 ovs_be16 tpid = flow->vlans[encaps].tpid;
6204
6205 if (flow->vlans[encaps].tci == htons(0)) {
6206 if (eth_type_vlan(flow->dl_type)) {
6207 /* If VLAN was truncated the tpid is in dl_type */
6208 tpid = flow->dl_type;
6209 } else {
6210 break;
6211 }
6212 }
6213
6214 if (export_mask) {
6215 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
6216 } else {
6217 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, tpid);
6218 }
6219 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlans[encaps].tci);
6220 encap[encaps] = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
6221 if (flow->vlans[encaps].tci == htons(0)) {
6222 goto unencap;
6223 }
6224 }
6225 }
6226
6227 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6228 /* For backwards compatibility with kernels that don't support
6229 * wildcarding, the following convention is used to encode the
6230 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
6231 *
6232 * key mask matches
6233 * -------- -------- -------
6234 * >0x5ff 0xffff Specified Ethernet II Ethertype.
6235 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
6236 * <none> 0xffff Any non-Ethernet II frame (except valid
6237 * 802.3 SNAP packet with valid eth_type).
6238 */
6239 if (export_mask) {
6240 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
6241 }
6242 goto unencap;
6243 }
6244
6245 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
6246
6247 if (eth_type_vlan(flow->dl_type)) {
6248 goto unencap;
6249 }
6250
6251 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6252 struct ovs_key_ipv4 *ipv4_key;
6253
6254 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
6255 sizeof *ipv4_key);
6256 get_ipv4_key(data, ipv4_key, export_mask);
6257 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
6258 struct ovs_key_ipv6 *ipv6_key;
6259
6260 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
6261 sizeof *ipv6_key);
6262 get_ipv6_key(data, ipv6_key, export_mask);
6263 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
6264 flow->dl_type == htons(ETH_TYPE_RARP)) {
6265 struct ovs_key_arp *arp_key;
6266
6267 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
6268 sizeof *arp_key);
6269 get_arp_key(data, arp_key);
6270 } else if (eth_type_mpls(flow->dl_type)) {
6271 struct ovs_key_mpls *mpls_key;
6272 int i, n;
6273
6274 n = flow_count_mpls_labels(flow, NULL);
6275 if (export_mask) {
6276 n = MIN(n, parms->support.max_mpls_depth);
6277 }
6278 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
6279 n * sizeof *mpls_key);
6280 for (i = 0; i < n; i++) {
6281 mpls_key[i].mpls_lse = data->mpls_lse[i];
6282 }
6283 } else if (flow->dl_type == htons(ETH_TYPE_NSH)) {
6284 nsh_key_to_attr(buf, &data->nsh, NULL, 0, export_mask);
6285 }
6286
6287 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6288 if (flow->nw_proto == IPPROTO_TCP) {
6289 union ovs_key_tp *tcp_key;
6290
6291 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
6292 sizeof *tcp_key);
6293 get_tp_key(data, tcp_key);
6294 if (data->tcp_flags || (mask && mask->tcp_flags)) {
6295 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
6296 }
6297 } else if (flow->nw_proto == IPPROTO_UDP) {
6298 union ovs_key_tp *udp_key;
6299
6300 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
6301 sizeof *udp_key);
6302 get_tp_key(data, udp_key);
6303 } else if (flow->nw_proto == IPPROTO_SCTP) {
6304 union ovs_key_tp *sctp_key;
6305
6306 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
6307 sizeof *sctp_key);
6308 get_tp_key(data, sctp_key);
6309 } else if (flow->dl_type == htons(ETH_TYPE_IP)
6310 && flow->nw_proto == IPPROTO_ICMP) {
6311 struct ovs_key_icmp *icmp_key;
6312
6313 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
6314 sizeof *icmp_key);
6315 icmp_key->icmp_type = ntohs(data->tp_src);
6316 icmp_key->icmp_code = ntohs(data->tp_dst);
6317 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
6318 && flow->nw_proto == IPPROTO_ICMPV6) {
6319 struct ovs_key_icmpv6 *icmpv6_key;
6320
6321 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
6322 sizeof *icmpv6_key);
6323 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
6324 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
6325
6326 if (is_nd(flow, NULL)
6327 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, ICMP
6328 * type and code are 8 bits wide. Therefore, an exact match
6329 * looks like htons(0xff), not htons(0xffff). See
6330 * xlate_wc_finish() for details. */
6331 && (!export_mask || (data->tp_src == htons(0xff)
6332 && data->tp_dst == htons(0xff)))) {
6333 struct ovs_key_nd *nd_key;
6334 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
6335 sizeof *nd_key);
6336 nd_key->nd_target = data->nd_target;
6337 nd_key->nd_sll = data->arp_sha;
6338 nd_key->nd_tll = data->arp_tha;
6339
6340 /* Add ND Extensions Attr only if supported and reserved field
6341 * or options type is set. */
6342 if (parms->support.nd_ext) {
6343 struct ovs_key_nd_extensions *nd_ext_key;
6344
6345 if (data->igmp_group_ip4 != 0 || data->tcp_flags != 0) {
6346 nd_ext_key = nl_msg_put_unspec_uninit(buf,
6347 OVS_KEY_ATTR_ND_EXTENSIONS,
6348 sizeof *nd_ext_key);
6349 nd_ext_key->nd_reserved = data->igmp_group_ip4;
6350 nd_ext_key->nd_options_type = ntohs(data->tcp_flags);
6351 }
6352 }
6353 }
6354 }
6355 }
6356
6357 unencap:
6358 for (int encaps = max_vlans - 1; encaps >= 0; encaps--) {
6359 if (encap[encaps]) {
6360 nl_msg_end_nested(buf, encap[encaps]);
6361 }
6362 }
6363 }
6364
6365 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
6366 *
6367 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
6368 * capable of being expanded to allow for that much space. */
6369 void
6370 odp_flow_key_from_flow(const struct odp_flow_key_parms *parms,
6371 struct ofpbuf *buf)
6372 {
6373 odp_flow_key_from_flow__(parms, false, buf);
6374 }
6375
6376 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
6377 * 'buf'.
6378 *
6379 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
6380 * capable of being expanded to allow for that much space. */
6381 void
6382 odp_flow_key_from_mask(const struct odp_flow_key_parms *parms,
6383 struct ofpbuf *buf)
6384 {
6385 odp_flow_key_from_flow__(parms, true, buf);
6386 }
6387
6388 /* Generate ODP flow key from the given packet metadata */
6389 void
6390 odp_key_from_dp_packet(struct ofpbuf *buf, const struct dp_packet *packet)
6391 {
6392 const struct pkt_metadata *md = &packet->md;
6393
6394 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
6395
6396 if (md->dp_hash) {
6397 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, md->dp_hash);
6398 }
6399
6400 if (flow_tnl_dst_is_set(&md->tunnel)) {
6401 tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL, NULL);
6402 }
6403
6404 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
6405
6406 if (md->ct_state) {
6407 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
6408 ovs_to_odp_ct_state(md->ct_state));
6409 if (md->ct_zone) {
6410 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, md->ct_zone);
6411 }
6412 if (md->ct_mark) {
6413 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, md->ct_mark);
6414 }
6415 if (!ovs_u128_is_zero(md->ct_label)) {
6416 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &md->ct_label,
6417 sizeof(md->ct_label));
6418 }
6419 if (md->ct_orig_tuple_ipv6) {
6420 if (md->ct_orig_tuple.ipv6.ipv6_proto) {
6421 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
6422 &md->ct_orig_tuple.ipv6,
6423 sizeof md->ct_orig_tuple.ipv6);
6424 }
6425 } else {
6426 if (md->ct_orig_tuple.ipv4.ipv4_proto) {
6427 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
6428 &md->ct_orig_tuple.ipv4,
6429 sizeof md->ct_orig_tuple.ipv4);
6430 }
6431 }
6432 }
6433
6434 /* Add an ingress port attribute if 'odp_in_port' is not the magical
6435 * value "ODPP_NONE". */
6436 if (md->in_port.odp_port != ODPP_NONE) {
6437 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
6438 }
6439
6440 /* Add OVS_KEY_ATTR_ETHERNET for non-Ethernet packets */
6441 if (pt_ns(packet->packet_type) == OFPHTN_ETHERTYPE) {
6442 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE,
6443 pt_ns_type_be(packet->packet_type));
6444 }
6445 }
6446
6447 /* Generate packet metadata from the given ODP flow key. */
6448 void
6449 odp_key_to_dp_packet(const struct nlattr *key, size_t key_len,
6450 struct dp_packet *packet)
6451 {
6452 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6453 const struct nlattr *nla;
6454 struct pkt_metadata *md = &packet->md;
6455 ovs_be32 packet_type = htonl(PT_UNKNOWN);
6456 ovs_be16 ethertype = 0;
6457 size_t left;
6458
6459 pkt_metadata_init(md, ODPP_NONE);
6460
6461 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6462 enum ovs_key_attr type = nl_attr_type(nla);
6463 size_t len = nl_attr_get_size(nla);
6464 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6465 OVS_KEY_ATTR_MAX, type);
6466
6467 if (len != expected_len && expected_len >= 0) {
6468 continue;
6469 }
6470
6471 switch (type) {
6472 case OVS_KEY_ATTR_RECIRC_ID:
6473 md->recirc_id = nl_attr_get_u32(nla);
6474 break;
6475 case OVS_KEY_ATTR_DP_HASH:
6476 md->dp_hash = nl_attr_get_u32(nla);
6477 break;
6478 case OVS_KEY_ATTR_PRIORITY:
6479 md->skb_priority = nl_attr_get_u32(nla);
6480 break;
6481 case OVS_KEY_ATTR_SKB_MARK:
6482 md->pkt_mark = nl_attr_get_u32(nla);
6483 break;
6484 case OVS_KEY_ATTR_CT_STATE:
6485 md->ct_state = odp_to_ovs_ct_state(nl_attr_get_u32(nla));
6486 break;
6487 case OVS_KEY_ATTR_CT_ZONE:
6488 md->ct_zone = nl_attr_get_u16(nla);
6489 break;
6490 case OVS_KEY_ATTR_CT_MARK:
6491 md->ct_mark = nl_attr_get_u32(nla);
6492 break;
6493 case OVS_KEY_ATTR_CT_LABELS: {
6494 md->ct_label = nl_attr_get_u128(nla);
6495 break;
6496 }
6497 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
6498 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(nla);
6499 md->ct_orig_tuple.ipv4 = *ct;
6500 md->ct_orig_tuple_ipv6 = false;
6501 break;
6502 }
6503 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
6504 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(nla);
6505
6506 md->ct_orig_tuple.ipv6 = *ct;
6507 md->ct_orig_tuple_ipv6 = true;
6508 break;
6509 }
6510 case OVS_KEY_ATTR_TUNNEL: {
6511 enum odp_key_fitness res;
6512
6513 res = odp_tun_key_from_attr(nla, &md->tunnel, NULL);
6514 if (res == ODP_FIT_ERROR) {
6515 memset(&md->tunnel, 0, sizeof md->tunnel);
6516 }
6517 break;
6518 }
6519 case OVS_KEY_ATTR_IN_PORT:
6520 md->in_port.odp_port = nl_attr_get_odp_port(nla);
6521 break;
6522 case OVS_KEY_ATTR_ETHERNET:
6523 /* Presence of OVS_KEY_ATTR_ETHERNET indicates Ethernet packet. */
6524 packet_type = htonl(PT_ETH);
6525 break;
6526 case OVS_KEY_ATTR_ETHERTYPE:
6527 ethertype = nl_attr_get_be16(nla);
6528 break;
6529 case OVS_KEY_ATTR_UNSPEC:
6530 case OVS_KEY_ATTR_ENCAP:
6531 case OVS_KEY_ATTR_VLAN:
6532 case OVS_KEY_ATTR_IPV4:
6533 case OVS_KEY_ATTR_IPV6:
6534 case OVS_KEY_ATTR_TCP:
6535 case OVS_KEY_ATTR_UDP:
6536 case OVS_KEY_ATTR_ICMP:
6537 case OVS_KEY_ATTR_ICMPV6:
6538 case OVS_KEY_ATTR_ARP:
6539 case OVS_KEY_ATTR_ND:
6540 case OVS_KEY_ATTR_ND_EXTENSIONS:
6541 case OVS_KEY_ATTR_SCTP:
6542 case OVS_KEY_ATTR_TCP_FLAGS:
6543 case OVS_KEY_ATTR_MPLS:
6544 case OVS_KEY_ATTR_PACKET_TYPE:
6545 case OVS_KEY_ATTR_NSH:
6546 case __OVS_KEY_ATTR_MAX:
6547 default:
6548 break;
6549 }
6550 }
6551
6552 if (packet_type == htonl(PT_ETH)) {
6553 packet->packet_type = htonl(PT_ETH);
6554 } else if (packet_type == htonl(PT_UNKNOWN) && ethertype != 0) {
6555 packet->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
6556 ntohs(ethertype));
6557 } else {
6558 VLOG_ERR_RL(&rl, "Packet without ETHERTYPE. Unknown packet_type.");
6559 }
6560 }
6561
6562 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'.
6563 * Generated value has format of random UUID. */
6564 void
6565 odp_flow_key_hash(const void *key, size_t key_len, ovs_u128 *hash)
6566 {
6567 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
6568 static uint32_t secret;
6569
6570 if (ovsthread_once_start(&once)) {
6571 secret = random_uint32();
6572 ovsthread_once_done(&once);
6573 }
6574 hash_bytes128(key, key_len, secret, hash);
6575 uuid_set_bits_v4((struct uuid *)hash);
6576 }
6577
6578 static void
6579 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
6580 uint64_t attrs, int out_of_range_attr,
6581 const struct nlattr *key, size_t key_len)
6582 {
6583 struct ds s;
6584 int i;
6585
6586 if (VLOG_DROP_DBG(rl)) {
6587 return;
6588 }
6589
6590 ds_init(&s);
6591 for (i = 0; i < 64; i++) {
6592 if (attrs & (UINT64_C(1) << i)) {
6593 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6594
6595 ds_put_format(&s, " %s",
6596 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
6597 }
6598 }
6599 if (out_of_range_attr) {
6600 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
6601 }
6602
6603 ds_put_cstr(&s, ": ");
6604 odp_flow_key_format(key, key_len, &s);
6605
6606 VLOG_DBG("%s:%s", title, ds_cstr(&s));
6607 ds_destroy(&s);
6608 }
6609
6610 static uint8_t
6611 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
6612 {
6613 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6614
6615 if (is_mask) {
6616 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
6617 }
6618
6619 if (odp_frag > OVS_FRAG_TYPE_LATER) {
6620 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
6621 return 0xff; /* Error. */
6622 }
6623
6624 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
6625 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
6626 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
6627 }
6628
6629 /* Parses the attributes in the 'key_len' bytes of 'key' into 'attrs', which
6630 * must have OVS_KEY_ATTR_MAX + 1 elements. Stores each attribute in 'key'
6631 * into the corresponding element of 'attrs'.
6632 *
6633 * Stores a bitmask of the attributes' indexes found in 'key' into
6634 * '*present_attrsp'.
6635 *
6636 * If an attribute beyond OVS_KEY_ATTR_MAX is found, stores its attribute type
6637 * (or one of them, if more than one) into '*out_of_range_attrp', otherwise 0.
6638 *
6639 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6640 * error message in '*errorp'. */
6641 static bool
6642 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
6643 const struct nlattr *attrs[], uint64_t *present_attrsp,
6644 int *out_of_range_attrp, char **errorp)
6645 {
6646 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6647 const struct nlattr *nla;
6648 uint64_t present_attrs;
6649 size_t left;
6650
6651 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
6652 present_attrs = 0;
6653 *out_of_range_attrp = 0;
6654 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6655 uint16_t type = nl_attr_type(nla);
6656 size_t len = nl_attr_get_size(nla);
6657 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6658 OVS_KEY_ATTR_MAX, type);
6659
6660 if (len != expected_len && expected_len >= 0) {
6661 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6662
6663 odp_parse_error(&rl, errorp, "attribute %s has length %"PRIuSIZE" "
6664 "but should have length %d",
6665 ovs_key_attr_to_string(type, namebuf,
6666 sizeof namebuf),
6667 len, expected_len);
6668 return false;
6669 }
6670
6671 if (type > OVS_KEY_ATTR_MAX) {
6672 *out_of_range_attrp = type;
6673 } else {
6674 if (present_attrs & (UINT64_C(1) << type)) {
6675 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6676
6677 odp_parse_error(&rl, errorp,
6678 "duplicate %s attribute in flow key",
6679 ovs_key_attr_to_string(type, namebuf,
6680 sizeof namebuf));
6681 return false;
6682 }
6683
6684 present_attrs |= UINT64_C(1) << type;
6685 attrs[type] = nla;
6686 }
6687 }
6688 if (left) {
6689 odp_parse_error(&rl, errorp, "trailing garbage in flow key");
6690 return false;
6691 }
6692
6693 *present_attrsp = present_attrs;
6694 return true;
6695 }
6696
6697 static enum odp_key_fitness
6698 check_expectations(uint64_t present_attrs, int out_of_range_attr,
6699 uint64_t expected_attrs,
6700 const struct nlattr *key, size_t key_len)
6701 {
6702 uint64_t missing_attrs;
6703 uint64_t extra_attrs;
6704
6705 missing_attrs = expected_attrs & ~present_attrs;
6706 if (missing_attrs) {
6707 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6708 log_odp_key_attributes(&rl, "expected but not present",
6709 missing_attrs, 0, key, key_len);
6710 return ODP_FIT_TOO_LITTLE;
6711 }
6712
6713 extra_attrs = present_attrs & ~expected_attrs;
6714 if (extra_attrs || out_of_range_attr) {
6715 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6716 log_odp_key_attributes(&rl, "present but not expected",
6717 extra_attrs, out_of_range_attr, key, key_len);
6718 return ODP_FIT_TOO_MUCH;
6719 }
6720
6721 return ODP_FIT_PERFECT;
6722 }
6723
6724 /* Initializes 'flow->dl_type' based on the attributes in 'attrs', in which the
6725 * attributes in the bit-mask 'present_attrs' are present. Returns true if
6726 * successful, false on failure.
6727 *
6728 * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
6729 * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
6730 * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
6731 * previously parsed flow key.
6732 *
6733 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6734 * error message in '*errorp'. */
6735 static bool
6736 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6737 uint64_t present_attrs, uint64_t *expected_attrs,
6738 struct flow *flow, const struct flow *src_flow,
6739 char **errorp)
6740 {
6741 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6742 bool is_mask = flow != src_flow;
6743
6744 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
6745 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
6746 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6747 odp_parse_error(&rl, errorp,
6748 "invalid Ethertype %"PRIu16" in flow key",
6749 ntohs(flow->dl_type));
6750 return false;
6751 }
6752 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
6753 flow->dl_type != htons(0xffff)) {
6754 odp_parse_error(&rl, errorp, "can't bitwise match non-Ethernet II "
6755 "\"Ethertype\" %#"PRIx16" (with mask %#"PRIx16")",
6756 ntohs(src_flow->dl_type), ntohs(flow->dl_type));
6757 return false;
6758 }
6759 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
6760 } else {
6761 if (!is_mask) {
6762 /* Default ethertype for well-known L3 packets. */
6763 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6764 flow->dl_type = htons(ETH_TYPE_IP);
6765 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6766 flow->dl_type = htons(ETH_TYPE_IPV6);
6767 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6768 flow->dl_type = htons(ETH_TYPE_MPLS);
6769 } else {
6770 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
6771 }
6772 } else if (src_flow->packet_type != htonl(PT_ETH)) {
6773 /* dl_type is mandatory for non-Ethernet packets */
6774 flow->dl_type = htons(0xffff);
6775 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
6776 /* See comments in odp_flow_key_from_flow__(). */
6777 odp_parse_error(&rl, errorp,
6778 "mask expected for non-Ethernet II frame");
6779 return false;
6780 }
6781 }
6782 return true;
6783 }
6784
6785 /* Initializes MPLS, L3, and L4 fields in 'flow' based on the attributes in
6786 * 'attrs', in which the attributes in the bit-mask 'present_attrs' are
6787 * present. The caller also indicates an out-of-range attribute
6788 * 'out_of_range_attr' if one was present when parsing (if so, the fitness
6789 * cannot be perfect).
6790 *
6791 * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
6792 * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
6793 * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
6794 * previously parsed flow key.
6795 *
6796 * Returns fitness based on any discrepancies between present and expected
6797 * attributes, except that a 'need_check' of false overrides this.
6798 *
6799 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6800 * error message in '*errorp'. 'key' and 'key_len' are just used for error
6801 * reporting in this case. */
6802 static enum odp_key_fitness
6803 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6804 uint64_t present_attrs, int out_of_range_attr,
6805 uint64_t *expected_attrs, struct flow *flow,
6806 const struct nlattr *key, size_t key_len,
6807 const struct flow *src_flow, bool need_check, char **errorp)
6808 {
6809 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6810 bool is_mask = src_flow != flow;
6811 const void *check_start = NULL;
6812 size_t check_len = 0;
6813 enum ovs_key_attr expected_bit = 0xff;
6814
6815 if (eth_type_mpls(src_flow->dl_type)) {
6816 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6817 *expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
6818 }
6819 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6820 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
6821 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
6822 int n = size / sizeof(ovs_be32);
6823 int i;
6824
6825 if (!size || size % sizeof(ovs_be32)) {
6826 odp_parse_error(&rl, errorp,
6827 "MPLS LSEs have invalid length %"PRIuSIZE,
6828 size);
6829 return ODP_FIT_ERROR;
6830 }
6831 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
6832 odp_parse_error(&rl, errorp,
6833 "unexpected MPLS Ethertype mask %x"PRIx16,
6834 ntohs(flow->dl_type));
6835 return ODP_FIT_ERROR;
6836 }
6837
6838 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
6839 flow->mpls_lse[i] = mpls_lse[i];
6840 }
6841 if (n > FLOW_MAX_MPLS_LABELS) {
6842 return ODP_FIT_TOO_MUCH;
6843 }
6844
6845 if (!is_mask) {
6846 /* BOS may be set only in the innermost label. */
6847 for (i = 0; i < n - 1; i++) {
6848 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
6849 odp_parse_error(&rl, errorp,
6850 "MPLS BOS set in non-innermost label");
6851 return ODP_FIT_ERROR;
6852 }
6853 }
6854
6855 /* BOS must be set in the innermost label. */
6856 if (n < FLOW_MAX_MPLS_LABELS
6857 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
6858 return ODP_FIT_TOO_LITTLE;
6859 }
6860 }
6861 }
6862
6863 goto done;
6864 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
6865 if (!is_mask) {
6866 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
6867 }
6868 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6869 const struct ovs_key_ipv4 *ipv4_key;
6870
6871 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
6872 put_ipv4_key(ipv4_key, flow, is_mask);
6873 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6874 odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV4 has invalid "
6875 "nw_frag %#"PRIx8, flow->nw_frag);
6876 return ODP_FIT_ERROR;
6877 }
6878
6879 if (is_mask) {
6880 check_start = ipv4_key;
6881 check_len = sizeof *ipv4_key;
6882 expected_bit = OVS_KEY_ATTR_IPV4;
6883 }
6884 }
6885 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
6886 if (!is_mask) {
6887 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
6888 }
6889 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6890 const struct ovs_key_ipv6 *ipv6_key;
6891
6892 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
6893 put_ipv6_key(ipv6_key, flow, is_mask);
6894 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6895 odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV6 has invalid "
6896 "nw_frag %#"PRIx8, flow->nw_frag);
6897 return ODP_FIT_ERROR;
6898 }
6899 if (is_mask) {
6900 check_start = ipv6_key;
6901 check_len = sizeof *ipv6_key;
6902 expected_bit = OVS_KEY_ATTR_IPV6;
6903 }
6904 }
6905 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
6906 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
6907 if (!is_mask) {
6908 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
6909 }
6910 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
6911 const struct ovs_key_arp *arp_key;
6912
6913 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
6914 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
6915 odp_parse_error(&rl, errorp,
6916 "unsupported ARP opcode %"PRIu16" in flow "
6917 "key", ntohs(arp_key->arp_op));
6918 return ODP_FIT_ERROR;
6919 }
6920 put_arp_key(arp_key, flow);
6921 if (is_mask) {
6922 check_start = arp_key;
6923 check_len = sizeof *arp_key;
6924 expected_bit = OVS_KEY_ATTR_ARP;
6925 }
6926 }
6927 } else if (src_flow->dl_type == htons(ETH_TYPE_NSH)) {
6928 if (!is_mask) {
6929 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_NSH;
6930 }
6931 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_NSH)) {
6932 if (odp_nsh_key_from_attr__(attrs[OVS_KEY_ATTR_NSH],
6933 is_mask, &flow->nsh,
6934 NULL, errorp) == ODP_FIT_ERROR) {
6935 return ODP_FIT_ERROR;
6936 }
6937 if (is_mask) {
6938 check_start = nl_attr_get(attrs[OVS_KEY_ATTR_NSH]);
6939 check_len = nl_attr_get_size(attrs[OVS_KEY_ATTR_NSH]);
6940 expected_bit = OVS_KEY_ATTR_NSH;
6941 }
6942 }
6943 } else {
6944 goto done;
6945 }
6946 if (check_len > 0) { /* Happens only when 'is_mask'. */
6947 if (!is_all_zeros(check_start, check_len) &&
6948 flow->dl_type != htons(0xffff)) {
6949 odp_parse_error(&rl, errorp, "unexpected L3 matching with "
6950 "masked Ethertype %#"PRIx16"/%#"PRIx16,
6951 ntohs(src_flow->dl_type),
6952 ntohs(flow->dl_type));
6953 return ODP_FIT_ERROR;
6954 } else {
6955 *expected_attrs |= UINT64_C(1) << expected_bit;
6956 }
6957 }
6958
6959 expected_bit = OVS_KEY_ATTR_UNSPEC;
6960 if (src_flow->nw_proto == IPPROTO_TCP
6961 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6962 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6963 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6964 if (!is_mask) {
6965 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
6966 }
6967 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
6968 const union ovs_key_tp *tcp_key;
6969
6970 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
6971 put_tp_key(tcp_key, flow);
6972 expected_bit = OVS_KEY_ATTR_TCP;
6973 }
6974 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
6975 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
6976 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
6977 }
6978 } else if (src_flow->nw_proto == IPPROTO_UDP
6979 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6980 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6981 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6982 if (!is_mask) {
6983 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
6984 }
6985 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
6986 const union ovs_key_tp *udp_key;
6987
6988 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
6989 put_tp_key(udp_key, flow);
6990 expected_bit = OVS_KEY_ATTR_UDP;
6991 }
6992 } else if (src_flow->nw_proto == IPPROTO_SCTP
6993 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6994 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6995 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6996 if (!is_mask) {
6997 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
6998 }
6999 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
7000 const union ovs_key_tp *sctp_key;
7001
7002 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
7003 put_tp_key(sctp_key, flow);
7004 expected_bit = OVS_KEY_ATTR_SCTP;
7005 }
7006 } else if (src_flow->nw_proto == IPPROTO_ICMP
7007 && src_flow->dl_type == htons(ETH_TYPE_IP)
7008 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
7009 if (!is_mask) {
7010 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
7011 }
7012 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
7013 const struct ovs_key_icmp *icmp_key;
7014
7015 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
7016 flow->tp_src = htons(icmp_key->icmp_type);
7017 flow->tp_dst = htons(icmp_key->icmp_code);
7018 expected_bit = OVS_KEY_ATTR_ICMP;
7019 }
7020 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
7021 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
7022 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
7023 if (!is_mask) {
7024 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
7025 }
7026 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
7027 const struct ovs_key_icmpv6 *icmpv6_key;
7028
7029 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
7030 flow->tp_src = htons(icmpv6_key->icmpv6_type);
7031 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
7032 expected_bit = OVS_KEY_ATTR_ICMPV6;
7033 if (is_nd(src_flow, NULL)) {
7034 if (!is_mask) {
7035 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
7036 }
7037 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
7038 const struct ovs_key_nd *nd_key;
7039
7040 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
7041 flow->nd_target = nd_key->nd_target;
7042 flow->arp_sha = nd_key->nd_sll;
7043 flow->arp_tha = nd_key->nd_tll;
7044 if (is_mask) {
7045 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
7046 * ICMP type and code are 8 bits wide. Therefore, an
7047 * exact match looks like htons(0xff), not
7048 * htons(0xffff). See xlate_wc_finish() for details.
7049 * */
7050 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
7051 (flow->tp_src != htons(0xff) ||
7052 flow->tp_dst != htons(0xff))) {
7053 odp_parse_error(&rl, errorp,
7054 "ICMP (src,dst) masks should be "
7055 "(0xff,0xff) but are actually "
7056 "(%#"PRIx16",%#"PRIx16")",
7057 ntohs(flow->tp_src),
7058 ntohs(flow->tp_dst));
7059 return ODP_FIT_ERROR;
7060 } else {
7061 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
7062 }
7063 }
7064 }
7065 if (present_attrs &
7066 (UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS)) {
7067 const struct ovs_key_nd_extensions *nd_ext_key;
7068 if (!is_mask) {
7069 *expected_attrs |=
7070 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
7071 }
7072
7073 nd_ext_key =
7074 nl_attr_get(attrs[OVS_KEY_ATTR_ND_EXTENSIONS]);
7075 flow->igmp_group_ip4 = nd_ext_key->nd_reserved;
7076 flow->tcp_flags = htons(nd_ext_key->nd_options_type);
7077
7078 if (is_mask) {
7079 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
7080 * ICMP type and code are 8 bits wide. Therefore, an
7081 * exact match looks like htons(0xff), not
7082 * htons(0xffff). See xlate_wc_finish() for details.
7083 * */
7084 if (!is_all_zeros(nd_ext_key, sizeof *nd_ext_key) &&
7085 (flow->tp_src != htons(0xff) ||
7086 flow->tp_dst != htons(0xff))) {
7087 return ODP_FIT_ERROR;
7088 } else {
7089 *expected_attrs |=
7090 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
7091 }
7092 }
7093 }
7094 }
7095 }
7096 } else if (src_flow->nw_proto == IPPROTO_IGMP
7097 && src_flow->dl_type == htons(ETH_TYPE_IP)) {
7098 /* OVS userspace parses the IGMP type, code, and group, but its
7099 * datapaths do not, so there is always missing information. */
7100 return ODP_FIT_TOO_LITTLE;
7101 }
7102 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
7103 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
7104 odp_parse_error(&rl, errorp, "flow matches on L4 ports but does "
7105 "not define an L4 protocol");
7106 return ODP_FIT_ERROR;
7107 } else {
7108 *expected_attrs |= UINT64_C(1) << expected_bit;
7109 }
7110 }
7111
7112 done:
7113 return need_check ? check_expectations(present_attrs, out_of_range_attr,
7114 *expected_attrs, key, key_len) : ODP_FIT_PERFECT;
7115 }
7116
7117 /* Parse 802.1Q header then encapsulated L3 attributes. */
7118 static enum odp_key_fitness
7119 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
7120 uint64_t present_attrs, int out_of_range_attr,
7121 uint64_t expected_attrs, struct flow *flow,
7122 const struct nlattr *key, size_t key_len,
7123 const struct flow *src_flow, char **errorp)
7124 {
7125 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7126 bool is_mask = src_flow != flow;
7127
7128 const struct nlattr *encap;
7129 enum odp_key_fitness encap_fitness;
7130 enum odp_key_fitness fitness = ODP_FIT_ERROR;
7131 int encaps = 0;
7132
7133 while (encaps < flow_vlan_limit &&
7134 (is_mask
7135 ? (src_flow->vlans[encaps].tci & htons(VLAN_CFI)) != 0
7136 : eth_type_vlan(flow->dl_type))) {
7137
7138 encap = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
7139 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
7140
7141 /* Calculate fitness of outer attributes. */
7142 if (!is_mask) {
7143 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
7144 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
7145 } else {
7146 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
7147 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
7148 }
7149 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
7150 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
7151 }
7152 }
7153 fitness = check_expectations(present_attrs, out_of_range_attr,
7154 expected_attrs, key, key_len);
7155
7156 /* Set vlan_tci.
7157 * Remove the TPID from dl_type since it's not the real Ethertype. */
7158 flow->vlans[encaps].tpid = flow->dl_type;
7159 flow->dl_type = htons(0);
7160 flow->vlans[encaps].tci =
7161 (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
7162 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
7163 : htons(0));
7164 if (!is_mask) {
7165 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) ||
7166 !(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
7167 return ODP_FIT_TOO_LITTLE;
7168 } else if (flow->vlans[encaps].tci == htons(0)) {
7169 /* Corner case for a truncated 802.1Q header. */
7170 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
7171 return ODP_FIT_TOO_MUCH;
7172 }
7173 return fitness;
7174 } else if (!(flow->vlans[encaps].tci & htons(VLAN_CFI))) {
7175 odp_parse_error(
7176 &rl, errorp, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
7177 "but CFI bit is not set", ntohs(flow->vlans[encaps].tci));
7178 return ODP_FIT_ERROR;
7179 }
7180 } else {
7181 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
7182 return fitness;
7183 }
7184 }
7185
7186 /* Now parse the encapsulated attributes. */
7187 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
7188 attrs, &present_attrs, &out_of_range_attr,
7189 errorp)) {
7190 return ODP_FIT_ERROR;
7191 }
7192 expected_attrs = 0;
7193
7194 if (!parse_ethertype(attrs, present_attrs, &expected_attrs,
7195 flow, src_flow, errorp)) {
7196 return ODP_FIT_ERROR;
7197 }
7198 encap_fitness = parse_l2_5_onward(attrs, present_attrs,
7199 out_of_range_attr,
7200 &expected_attrs,
7201 flow, key, key_len,
7202 src_flow, false, errorp);
7203 if (encap_fitness != ODP_FIT_PERFECT) {
7204 return encap_fitness;
7205 }
7206 encaps++;
7207 }
7208
7209 return check_expectations(present_attrs, out_of_range_attr,
7210 expected_attrs, key, key_len);
7211 }
7212
7213 static enum odp_key_fitness
7214 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
7215 struct flow *flow, const struct flow *src_flow,
7216 char **errorp)
7217 {
7218 /* New "struct flow" fields that are visible to the datapath (including all
7219 * data fields) should be translated from equivalent datapath flow fields
7220 * here (you will have to add a OVS_KEY_ATTR_* for them). */
7221 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
7222
7223 enum odp_key_fitness fitness = ODP_FIT_ERROR;
7224 if (errorp) {
7225 *errorp = NULL;
7226 }
7227
7228 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
7229 uint64_t expected_attrs;
7230 uint64_t present_attrs;
7231 int out_of_range_attr;
7232 bool is_mask = src_flow != flow;
7233
7234 memset(flow, 0, sizeof *flow);
7235
7236 /* Parse attributes. */
7237 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
7238 &out_of_range_attr, errorp)) {
7239 goto exit;
7240 }
7241 expected_attrs = 0;
7242
7243 /* Metadata. */
7244 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
7245 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
7246 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
7247 } else if (is_mask) {
7248 /* Always exact match recirc_id if it is not specified. */
7249 flow->recirc_id = UINT32_MAX;
7250 }
7251
7252 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
7253 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
7254 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
7255 }
7256 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
7257 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
7258 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
7259 }
7260
7261 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
7262 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
7263 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
7264 }
7265
7266 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_STATE)) {
7267 uint32_t odp_state = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_STATE]);
7268
7269 flow->ct_state = odp_to_ovs_ct_state(odp_state);
7270 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_STATE;
7271 }
7272 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE)) {
7273 flow->ct_zone = nl_attr_get_u16(attrs[OVS_KEY_ATTR_CT_ZONE]);
7274 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE;
7275 }
7276 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_MARK)) {
7277 flow->ct_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_MARK]);
7278 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_MARK;
7279 }
7280 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS)) {
7281 flow->ct_label = nl_attr_get_u128(attrs[OVS_KEY_ATTR_CT_LABELS]);
7282 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS;
7283 }
7284 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
7285 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
7286 flow->ct_nw_src = ct->ipv4_src;
7287 flow->ct_nw_dst = ct->ipv4_dst;
7288 flow->ct_nw_proto = ct->ipv4_proto;
7289 flow->ct_tp_src = ct->src_port;
7290 flow->ct_tp_dst = ct->dst_port;
7291 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
7292 }
7293 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
7294 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
7295
7296 flow->ct_ipv6_src = ct->ipv6_src;
7297 flow->ct_ipv6_dst = ct->ipv6_dst;
7298 flow->ct_nw_proto = ct->ipv6_proto;
7299 flow->ct_tp_src = ct->src_port;
7300 flow->ct_tp_dst = ct->dst_port;
7301 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
7302 }
7303
7304 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
7305 enum odp_key_fitness res;
7306
7307 res = odp_tun_key_from_attr__(attrs[OVS_KEY_ATTR_TUNNEL], is_mask,
7308 &flow->tunnel, errorp);
7309 if (res == ODP_FIT_ERROR) {
7310 goto exit;
7311 } else if (res == ODP_FIT_PERFECT) {
7312 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
7313 }
7314 }
7315
7316 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
7317 flow->in_port.odp_port
7318 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
7319 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
7320 } else if (!is_mask) {
7321 flow->in_port.odp_port = ODPP_NONE;
7322 }
7323
7324 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE)) {
7325 flow->packet_type
7326 = nl_attr_get_be32(attrs[OVS_KEY_ATTR_PACKET_TYPE]);
7327 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE;
7328 if (pt_ns(src_flow->packet_type) == OFPHTN_ETHERTYPE) {
7329 flow->dl_type = pt_ns_type_be(flow->packet_type);
7330 }
7331 } else if (!is_mask) {
7332 flow->packet_type = htonl(PT_ETH);
7333 }
7334
7335 /* Check for Ethernet header. */
7336 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
7337 const struct ovs_key_ethernet *eth_key;
7338
7339 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
7340 put_ethernet_key(eth_key, flow);
7341 if (!is_mask) {
7342 flow->packet_type = htonl(PT_ETH);
7343 }
7344 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
7345 }
7346 else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
7347 ovs_be16 ethertype = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
7348 if (!is_mask) {
7349 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
7350 ntohs(ethertype));
7351 }
7352 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
7353 }
7354
7355 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
7356 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
7357 src_flow, errorp)) {
7358 goto exit;
7359 }
7360
7361 if (is_mask
7362 ? (src_flow->vlans[0].tci & htons(VLAN_CFI)) != 0
7363 : eth_type_vlan(src_flow->dl_type)) {
7364 fitness = parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
7365 expected_attrs, flow, key, key_len,
7366 src_flow, errorp);
7367 } else {
7368 if (is_mask) {
7369 /* A missing VLAN mask means exact match on vlan_tci 0 (== no
7370 * VLAN). */
7371 flow->vlans[0].tpid = htons(0xffff);
7372 flow->vlans[0].tci = htons(0xffff);
7373 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
7374 flow->vlans[0].tci = nl_attr_get_be16(
7375 attrs[OVS_KEY_ATTR_VLAN]);
7376 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
7377 }
7378 }
7379 fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
7380 &expected_attrs, flow, key, key_len,
7381 src_flow, true, errorp);
7382 }
7383
7384 exit:;
7385 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7386 if (fitness == ODP_FIT_ERROR && (errorp || !VLOG_DROP_WARN(&rl))) {
7387 struct ds s = DS_EMPTY_INITIALIZER;
7388 if (is_mask) {
7389 ds_put_cstr(&s, "the flow mask in error is: ");
7390 odp_flow_key_format(key, key_len, &s);
7391 ds_put_cstr(&s, ", for the following flow key: ");
7392 flow_format(&s, src_flow, NULL);
7393 } else {
7394 ds_put_cstr(&s, "the flow key in error is: ");
7395 odp_flow_key_format(key, key_len, &s);
7396 }
7397 if (errorp) {
7398 char *old_error = *errorp;
7399 *errorp = xasprintf("%s; %s", old_error, ds_cstr(&s));
7400 free(old_error);
7401 } else {
7402 VLOG_WARN("%s", ds_cstr(&s));
7403 }
7404 ds_destroy(&s);
7405 }
7406 return fitness;
7407 }
7408
7409 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
7410 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
7411 * 'key' fits our expectations for what a flow key should contain.
7412 *
7413 * The 'in_port' will be the datapath's understanding of the port. The
7414 * caller will need to translate with odp_port_to_ofp_port() if the
7415 * OpenFlow port is needed.
7416 *
7417 * This function doesn't take the packet itself as an argument because none of
7418 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
7419 * it is always possible to infer which additional attribute(s) should appear
7420 * by looking at the attributes for lower-level protocols, e.g. if the network
7421 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
7422 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
7423 * must be absent.
7424 *
7425 * If 'errorp' is nonnull, this function uses it for detailed error reports: if
7426 * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
7427 * '*errorp', otherwise NULL. */
7428 enum odp_key_fitness
7429 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
7430 struct flow *flow, char **errorp)
7431 {
7432 return odp_flow_key_to_flow__(key, key_len, flow, flow, errorp);
7433 }
7434
7435 /* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key'
7436 * to a mask structure in 'mask'. 'flow' must be a previously translated flow
7437 * corresponding to 'mask' and similarly flow_key/flow_key_len must be the
7438 * attributes from that flow. Returns an ODP_FIT_* value that indicates how
7439 * well 'key' fits our expectations for what a flow key should contain.
7440 *
7441 * If 'errorp' is nonnull, this function uses it for detailed error reports: if
7442 * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
7443 * '*errorp', otherwise NULL. */
7444 enum odp_key_fitness
7445 odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len,
7446 struct flow_wildcards *mask, const struct flow *src_flow,
7447 char **errorp)
7448 {
7449 if (mask_key_len) {
7450 return odp_flow_key_to_flow__(mask_key, mask_key_len,
7451 &mask->masks, src_flow, errorp);
7452 } else {
7453 if (errorp) {
7454 *errorp = NULL;
7455 }
7456
7457 /* A missing mask means that the flow should be exact matched.
7458 * Generate an appropriate exact wildcard for the flow. */
7459 flow_wildcards_init_for_packet(mask, src_flow);
7460
7461 return ODP_FIT_PERFECT;
7462 }
7463 }
7464
7465 /* Converts the netlink formated key/mask to match.
7466 * Fails if odp_flow_key_from_key/mask and odp_flow_key_key/mask
7467 * disagree on the acceptable form of flow */
7468 int
7469 parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len,
7470 const struct nlattr *mask, size_t mask_len,
7471 struct match *match)
7472 {
7473 enum odp_key_fitness fitness;
7474
7475 fitness = odp_flow_key_to_flow(key, key_len, &match->flow, NULL);
7476 if (fitness) {
7477 /* This should not happen: it indicates that
7478 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
7479 * the acceptable form of a flow. Log the problem as an error,
7480 * with enough details to enable debugging. */
7481 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7482
7483 if (!VLOG_DROP_ERR(&rl)) {
7484 struct ds s;
7485
7486 ds_init(&s);
7487 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
7488 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
7489 ds_destroy(&s);
7490 }
7491
7492 return EINVAL;
7493 }
7494
7495 fitness = odp_flow_key_to_mask(mask, mask_len, &match->wc, &match->flow,
7496 NULL);
7497 if (fitness) {
7498 /* This should not happen: it indicates that
7499 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
7500 * disagree on the acceptable form of a mask. Log the problem
7501 * as an error, with enough details to enable debugging. */
7502 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7503
7504 if (!VLOG_DROP_ERR(&rl)) {
7505 struct ds s;
7506
7507 ds_init(&s);
7508 odp_flow_format(key, key_len, mask, mask_len, NULL, &s,
7509 true);
7510 VLOG_ERR("internal error parsing flow mask %s (%s)",
7511 ds_cstr(&s), odp_key_fitness_to_string(fitness));
7512 ds_destroy(&s);
7513 }
7514
7515 return EINVAL;
7516 }
7517
7518 return 0;
7519 }
7520
7521 /* Returns 'fitness' as a string, for use in debug messages. */
7522 const char *
7523 odp_key_fitness_to_string(enum odp_key_fitness fitness)
7524 {
7525 switch (fitness) {
7526 case ODP_FIT_PERFECT:
7527 return "OK";
7528 case ODP_FIT_TOO_MUCH:
7529 return "too_much";
7530 case ODP_FIT_TOO_LITTLE:
7531 return "too_little";
7532 case ODP_FIT_ERROR:
7533 return "error";
7534 default:
7535 return "<unknown>";
7536 }
7537 }
7538
7539 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
7540 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
7541 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
7542 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
7543 * null, then the return value is not meaningful.) */
7544 size_t
7545 odp_put_userspace_action(uint32_t pid,
7546 const void *userdata, size_t userdata_size,
7547 odp_port_t tunnel_out_port,
7548 bool include_actions,
7549 struct ofpbuf *odp_actions)
7550 {
7551 size_t userdata_ofs;
7552 size_t offset;
7553
7554 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
7555 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
7556 if (userdata) {
7557 userdata_ofs = odp_actions->size + NLA_HDRLEN;
7558
7559 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
7560 * module before Linux 3.10 required the userdata to be exactly 8 bytes
7561 * long:
7562 *
7563 * - The kernel rejected shorter userdata with -ERANGE.
7564 *
7565 * - The kernel silently dropped userdata beyond the first 8 bytes.
7566 *
7567 * Thus, for maximum compatibility, always put at least 8 bytes. (We
7568 * separately disable features that required more than 8 bytes.) */
7569 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
7570 MAX(8, userdata_size)),
7571 userdata, userdata_size);
7572 } else {
7573 userdata_ofs = 0;
7574 }
7575 if (tunnel_out_port != ODPP_NONE) {
7576 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
7577 tunnel_out_port);
7578 }
7579 if (include_actions) {
7580 nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS);
7581 }
7582 nl_msg_end_nested(odp_actions, offset);
7583
7584 return userdata_ofs;
7585 }
7586
7587 void
7588 odp_put_pop_eth_action(struct ofpbuf *odp_actions)
7589 {
7590 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_ETH);
7591 }
7592
7593 void
7594 odp_put_push_eth_action(struct ofpbuf *odp_actions,
7595 const struct eth_addr *eth_src,
7596 const struct eth_addr *eth_dst)
7597 {
7598 struct ovs_action_push_eth eth;
7599
7600 memset(&eth, 0, sizeof eth);
7601 if (eth_src) {
7602 eth.addresses.eth_src = *eth_src;
7603 }
7604 if (eth_dst) {
7605 eth.addresses.eth_dst = *eth_dst;
7606 }
7607
7608 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_ETH,
7609 &eth, sizeof eth);
7610 }
7611
7612 void
7613 odp_put_tunnel_action(const struct flow_tnl *tunnel,
7614 struct ofpbuf *odp_actions, const char *tnl_type)
7615 {
7616 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7617 tun_key_to_attr(odp_actions, tunnel, tunnel, NULL, tnl_type);
7618 nl_msg_end_nested(odp_actions, offset);
7619 }
7620
7621 void
7622 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
7623 struct ovs_action_push_tnl *data)
7624 {
7625 int size = offsetof(struct ovs_action_push_tnl, header);
7626
7627 size += data->header_len;
7628 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
7629 }
7630
7631 \f
7632 /* The commit_odp_actions() function and its helpers. */
7633
7634 static void
7635 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
7636 const void *key, size_t key_size)
7637 {
7638 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7639 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
7640 nl_msg_end_nested(odp_actions, offset);
7641 }
7642
7643 /* Masked set actions have a mask following the data within the netlink
7644 * attribute. The unmasked bits in the data will be cleared as the data
7645 * is copied to the action. */
7646 void
7647 commit_masked_set_action(struct ofpbuf *odp_actions,
7648 enum ovs_key_attr key_type,
7649 const void *key_, const void *mask_, size_t key_size)
7650 {
7651 size_t offset = nl_msg_start_nested(odp_actions,
7652 OVS_ACTION_ATTR_SET_MASKED);
7653 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
7654 const char *key = key_, *mask = mask_;
7655
7656 memcpy(data + key_size, mask, key_size);
7657 /* Clear unmasked bits while copying. */
7658 while (key_size--) {
7659 *data++ = *key++ & *mask++;
7660 }
7661 nl_msg_end_nested(odp_actions, offset);
7662 }
7663
7664 /* If any of the flow key data that ODP actions can modify are different in
7665 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
7666 * 'odp_actions' that change the flow tunneling information in key from
7667 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
7668 * same way. In other words, operates the same as commit_odp_actions(), but
7669 * only on tunneling information. */
7670 void
7671 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
7672 struct ofpbuf *odp_actions, const char *tnl_type)
7673 {
7674 /* A valid IPV4_TUNNEL must have non-zero ip_dst; a valid IPv6 tunnel
7675 * must have non-zero ipv6_dst. */
7676 if (flow_tnl_dst_is_set(&flow->tunnel)) {
7677 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
7678 return;
7679 }
7680 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
7681 odp_put_tunnel_action(&base->tunnel, odp_actions, tnl_type);
7682 }
7683 }
7684
7685 struct offsetof_sizeof {
7686 int offset;
7687 int size;
7688 };
7689
7690 /* Compares each of the fields in 'key0' and 'key1'. The fields are specified
7691 * in 'offsetof_sizeof_arr', which is an array terminated by a 0-size field.
7692 * Returns true if all of the fields are equal, false if at least one differs.
7693 * As a side effect, for each field that is the same in 'key0' and 'key1',
7694 * zeros the corresponding bytes in 'mask'. */
7695 static bool
7696 keycmp_mask(const void *key0, const void *key1,
7697 struct offsetof_sizeof *offsetof_sizeof_arr, void *mask)
7698 {
7699 bool differ = false;
7700
7701 for (int field = 0 ; ; field++) {
7702 int size = offsetof_sizeof_arr[field].size;
7703 int offset = offsetof_sizeof_arr[field].offset;
7704 if (size == 0) {
7705 break;
7706 }
7707
7708 char *pkey0 = ((char *)key0) + offset;
7709 char *pkey1 = ((char *)key1) + offset;
7710 char *pmask = ((char *)mask) + offset;
7711 if (memcmp(pkey0, pkey1, size) == 0) {
7712 memset(pmask, 0, size);
7713 } else {
7714 differ = true;
7715 }
7716 }
7717
7718 return differ;
7719 }
7720
7721 static bool
7722 commit(enum ovs_key_attr attr, bool use_masked_set,
7723 const void *key, void *base, void *mask, size_t size,
7724 struct offsetof_sizeof *offsetof_sizeof_arr,
7725 struct ofpbuf *odp_actions)
7726 {
7727 if (keycmp_mask(key, base, offsetof_sizeof_arr, mask)) {
7728 bool fully_masked = odp_mask_is_exact(attr, mask, size);
7729
7730 if (use_masked_set && !fully_masked) {
7731 commit_masked_set_action(odp_actions, attr, key, mask, size);
7732 } else {
7733 if (!fully_masked) {
7734 memset(mask, 0xff, size);
7735 }
7736 commit_set_action(odp_actions, attr, key, size);
7737 }
7738 memcpy(base, key, size);
7739 return true;
7740 } else {
7741 /* Mask bits are set when we have either read or set the corresponding
7742 * values. Masked bits will be exact-matched, no need to set them
7743 * if the value did not actually change. */
7744 return false;
7745 }
7746 }
7747
7748 static void
7749 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
7750 {
7751 eth->eth_src = flow->dl_src;
7752 eth->eth_dst = flow->dl_dst;
7753 }
7754
7755 static void
7756 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
7757 {
7758 flow->dl_src = eth->eth_src;
7759 flow->dl_dst = eth->eth_dst;
7760 }
7761
7762 static void
7763 commit_set_ether_action(const struct flow *flow, struct flow *base_flow,
7764 struct ofpbuf *odp_actions,
7765 struct flow_wildcards *wc,
7766 bool use_masked)
7767 {
7768 struct ovs_key_ethernet key, base, mask;
7769 struct offsetof_sizeof ovs_key_ethernet_offsetof_sizeof_arr[] =
7770 OVS_KEY_ETHERNET_OFFSETOF_SIZEOF_ARR;
7771 if (flow->packet_type != htonl(PT_ETH)) {
7772 return;
7773 }
7774
7775 get_ethernet_key(flow, &key);
7776 get_ethernet_key(base_flow, &base);
7777 get_ethernet_key(&wc->masks, &mask);
7778
7779 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
7780 &key, &base, &mask, sizeof key,
7781 ovs_key_ethernet_offsetof_sizeof_arr, odp_actions)) {
7782 put_ethernet_key(&base, base_flow);
7783 put_ethernet_key(&mask, &wc->masks);
7784 }
7785 }
7786
7787 static void
7788 commit_vlan_action(const struct flow* flow, struct flow *base,
7789 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7790 {
7791 int base_n = flow_count_vlan_headers(base);
7792 int flow_n = flow_count_vlan_headers(flow);
7793 flow_skip_common_vlan_headers(base, &base_n, flow, &flow_n);
7794
7795 /* Pop all mismatching vlan of base, push those of flow */
7796 for (; base_n >= 0; base_n--) {
7797 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
7798 wc->masks.vlans[base_n].qtag = OVS_BE32_MAX;
7799 }
7800
7801 for (; flow_n >= 0; flow_n--) {
7802 struct ovs_action_push_vlan vlan;
7803
7804 vlan.vlan_tpid = flow->vlans[flow_n].tpid;
7805 vlan.vlan_tci = flow->vlans[flow_n].tci;
7806 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
7807 &vlan, sizeof vlan);
7808 }
7809 memcpy(base->vlans, flow->vlans, sizeof(base->vlans));
7810 }
7811
7812 /* Wildcarding already done at action translation time. */
7813 static void
7814 commit_mpls_action(const struct flow *flow, struct flow *base,
7815 struct ofpbuf *odp_actions)
7816 {
7817 int base_n = flow_count_mpls_labels(base, NULL);
7818 int flow_n = flow_count_mpls_labels(flow, NULL);
7819 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
7820 NULL);
7821
7822 while (base_n > common_n) {
7823 if (base_n - 1 == common_n && flow_n > common_n) {
7824 /* If there is only one more LSE in base than there are common
7825 * between base and flow; and flow has at least one more LSE than
7826 * is common then the topmost LSE of base may be updated using
7827 * set */
7828 struct ovs_key_mpls mpls_key;
7829
7830 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
7831 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
7832 &mpls_key, sizeof mpls_key);
7833 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
7834 common_n++;
7835 } else {
7836 /* Otherwise, if there more LSEs in base than are common between
7837 * base and flow then pop the topmost one. */
7838 ovs_be16 dl_type;
7839 /* If all the LSEs are to be popped and this is not the outermost
7840 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
7841 * POP_MPLS action instead of flow->dl_type.
7842 *
7843 * This is because the POP_MPLS action requires its ethertype
7844 * argument to be an MPLS ethernet type but in this case
7845 * flow->dl_type will be a non-MPLS ethernet type.
7846 *
7847 * When the final POP_MPLS action occurs it use flow->dl_type and
7848 * the and the resulting packet will have the desired dl_type. */
7849 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
7850 dl_type = htons(ETH_TYPE_MPLS);
7851 } else {
7852 dl_type = flow->dl_type;
7853 }
7854 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
7855 ovs_assert(flow_pop_mpls(base, base_n, flow->dl_type, NULL));
7856 base_n--;
7857 }
7858 }
7859
7860 /* If, after the above popping and setting, there are more LSEs in flow
7861 * than base then some LSEs need to be pushed. */
7862 while (base_n < flow_n) {
7863 struct ovs_action_push_mpls *mpls;
7864
7865 mpls = nl_msg_put_unspec_zero(odp_actions,
7866 OVS_ACTION_ATTR_PUSH_MPLS,
7867 sizeof *mpls);
7868 mpls->mpls_ethertype = flow->dl_type;
7869 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
7870 /* Update base flow's MPLS stack, but do not clear L3. We need the L3
7871 * headers if the flow is restored later due to returning from a patch
7872 * port or group bucket. */
7873 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL, false);
7874 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
7875 base_n++;
7876 }
7877 }
7878
7879 static void
7880 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
7881 {
7882 ipv4->ipv4_src = flow->nw_src;
7883 ipv4->ipv4_dst = flow->nw_dst;
7884 ipv4->ipv4_proto = flow->nw_proto;
7885 ipv4->ipv4_tos = flow->nw_tos;
7886 ipv4->ipv4_ttl = flow->nw_ttl;
7887 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7888 }
7889
7890 static void
7891 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
7892 {
7893 flow->nw_src = ipv4->ipv4_src;
7894 flow->nw_dst = ipv4->ipv4_dst;
7895 flow->nw_proto = ipv4->ipv4_proto;
7896 flow->nw_tos = ipv4->ipv4_tos;
7897 flow->nw_ttl = ipv4->ipv4_ttl;
7898 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
7899 }
7900
7901 static void
7902 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
7903 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7904 bool use_masked)
7905 {
7906 struct ovs_key_ipv4 key, mask, base;
7907 struct offsetof_sizeof ovs_key_ipv4_offsetof_sizeof_arr[] =
7908 OVS_KEY_IPV4_OFFSETOF_SIZEOF_ARR;
7909
7910 /* Check that nw_proto and nw_frag remain unchanged. */
7911 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7912 flow->nw_frag == base_flow->nw_frag);
7913
7914 get_ipv4_key(flow, &key, false);
7915 get_ipv4_key(base_flow, &base, false);
7916 get_ipv4_key(&wc->masks, &mask, true);
7917 mask.ipv4_proto = 0; /* Not writeable. */
7918 mask.ipv4_frag = 0; /* Not writable. */
7919
7920 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7921 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7922 mask.ipv4_tos &= ~IP_ECN_MASK;
7923 }
7924
7925 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
7926 ovs_key_ipv4_offsetof_sizeof_arr, odp_actions)) {
7927 put_ipv4_key(&base, base_flow, false);
7928 if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
7929 put_ipv4_key(&mask, &wc->masks, true);
7930 }
7931 }
7932 }
7933
7934 static void
7935 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
7936 {
7937 ipv6->ipv6_src = flow->ipv6_src;
7938 ipv6->ipv6_dst = flow->ipv6_dst;
7939 ipv6->ipv6_label = flow->ipv6_label;
7940 ipv6->ipv6_proto = flow->nw_proto;
7941 ipv6->ipv6_tclass = flow->nw_tos;
7942 ipv6->ipv6_hlimit = flow->nw_ttl;
7943 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7944 }
7945
7946 static void
7947 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
7948 {
7949 flow->ipv6_src = ipv6->ipv6_src;
7950 flow->ipv6_dst = ipv6->ipv6_dst;
7951 flow->ipv6_label = ipv6->ipv6_label;
7952 flow->nw_proto = ipv6->ipv6_proto;
7953 flow->nw_tos = ipv6->ipv6_tclass;
7954 flow->nw_ttl = ipv6->ipv6_hlimit;
7955 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
7956 }
7957
7958 static void
7959 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
7960 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7961 bool use_masked)
7962 {
7963 struct ovs_key_ipv6 key, mask, base;
7964 struct offsetof_sizeof ovs_key_ipv6_offsetof_sizeof_arr[] =
7965 OVS_KEY_IPV6_OFFSETOF_SIZEOF_ARR;
7966
7967 /* Check that nw_proto and nw_frag remain unchanged. */
7968 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7969 flow->nw_frag == base_flow->nw_frag);
7970
7971 get_ipv6_key(flow, &key, false);
7972 get_ipv6_key(base_flow, &base, false);
7973 get_ipv6_key(&wc->masks, &mask, true);
7974 mask.ipv6_proto = 0; /* Not writeable. */
7975 mask.ipv6_frag = 0; /* Not writable. */
7976 mask.ipv6_label &= htonl(IPV6_LABEL_MASK); /* Not writable. */
7977
7978 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7979 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7980 mask.ipv6_tclass &= ~IP_ECN_MASK;
7981 }
7982
7983 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
7984 ovs_key_ipv6_offsetof_sizeof_arr, odp_actions)) {
7985 put_ipv6_key(&base, base_flow, false);
7986 if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
7987 put_ipv6_key(&mask, &wc->masks, true);
7988 }
7989 }
7990 }
7991
7992 static void
7993 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
7994 {
7995 /* ARP key has padding, clear it. */
7996 memset(arp, 0, sizeof *arp);
7997
7998 arp->arp_sip = flow->nw_src;
7999 arp->arp_tip = flow->nw_dst;
8000 arp->arp_op = flow->nw_proto == UINT8_MAX ?
8001 OVS_BE16_MAX : htons(flow->nw_proto);
8002 arp->arp_sha = flow->arp_sha;
8003 arp->arp_tha = flow->arp_tha;
8004 }
8005
8006 static void
8007 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
8008 {
8009 flow->nw_src = arp->arp_sip;
8010 flow->nw_dst = arp->arp_tip;
8011 flow->nw_proto = ntohs(arp->arp_op);
8012 flow->arp_sha = arp->arp_sha;
8013 flow->arp_tha = arp->arp_tha;
8014 }
8015
8016 static enum slow_path_reason
8017 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
8018 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
8019 {
8020 struct ovs_key_arp key, mask, base;
8021 struct offsetof_sizeof ovs_key_arp_offsetof_sizeof_arr[] =
8022 OVS_KEY_ARP_OFFSETOF_SIZEOF_ARR;
8023
8024 get_arp_key(flow, &key);
8025 get_arp_key(base_flow, &base);
8026 get_arp_key(&wc->masks, &mask);
8027
8028 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
8029 ovs_key_arp_offsetof_sizeof_arr, odp_actions)) {
8030 put_arp_key(&base, base_flow);
8031 put_arp_key(&mask, &wc->masks);
8032 return SLOW_ACTION;
8033 }
8034 return 0;
8035 }
8036
8037 static void
8038 get_icmp_key(const struct flow *flow, struct ovs_key_icmp *icmp)
8039 {
8040 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
8041 icmp->icmp_type = ntohs(flow->tp_src);
8042 icmp->icmp_code = ntohs(flow->tp_dst);
8043 }
8044
8045 static void
8046 put_icmp_key(const struct ovs_key_icmp *icmp, struct flow *flow)
8047 {
8048 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
8049 flow->tp_src = htons(icmp->icmp_type);
8050 flow->tp_dst = htons(icmp->icmp_code);
8051 }
8052
8053 static enum slow_path_reason
8054 commit_set_icmp_action(const struct flow *flow, struct flow *base_flow,
8055 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
8056 {
8057 struct ovs_key_icmp key, mask, base;
8058 struct offsetof_sizeof ovs_key_icmp_offsetof_sizeof_arr[] =
8059 OVS_KEY_ICMP_OFFSETOF_SIZEOF_ARR;
8060 enum ovs_key_attr attr;
8061
8062 if (is_icmpv4(flow, NULL)) {
8063 attr = OVS_KEY_ATTR_ICMP;
8064 } else if (is_icmpv6(flow, NULL)) {
8065 attr = OVS_KEY_ATTR_ICMPV6;
8066 } else {
8067 return 0;
8068 }
8069
8070 get_icmp_key(flow, &key);
8071 get_icmp_key(base_flow, &base);
8072 get_icmp_key(&wc->masks, &mask);
8073
8074 if (commit(attr, false, &key, &base, &mask, sizeof key,
8075 ovs_key_icmp_offsetof_sizeof_arr, odp_actions)) {
8076 put_icmp_key(&base, base_flow);
8077 put_icmp_key(&mask, &wc->masks);
8078 return SLOW_ACTION;
8079 }
8080 return 0;
8081 }
8082
8083 static void
8084 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
8085 {
8086 nd->nd_target = flow->nd_target;
8087 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
8088 nd->nd_sll = flow->arp_sha;
8089 nd->nd_tll = flow->arp_tha;
8090 }
8091
8092 static void
8093 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
8094 {
8095 flow->nd_target = nd->nd_target;
8096 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
8097 flow->arp_sha = nd->nd_sll;
8098 flow->arp_tha = nd->nd_tll;
8099 }
8100
8101 static void
8102 get_nd_extensions_key(const struct flow *flow,
8103 struct ovs_key_nd_extensions *nd_ext)
8104 {
8105 /* ND Extensions key has padding, clear it. */
8106 memset(nd_ext, 0, sizeof *nd_ext);
8107 nd_ext->nd_reserved = flow->igmp_group_ip4;
8108 nd_ext->nd_options_type = ntohs(flow->tcp_flags);
8109 }
8110
8111 static void
8112 put_nd_extensions_key(const struct ovs_key_nd_extensions *nd_ext,
8113 struct flow *flow)
8114 {
8115 flow->igmp_group_ip4 = nd_ext->nd_reserved;
8116 flow->tcp_flags = htons(nd_ext->nd_options_type);
8117 }
8118
8119 static enum slow_path_reason
8120 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
8121 struct ofpbuf *odp_actions,
8122 struct flow_wildcards *wc, bool use_masked)
8123 {
8124 struct ovs_key_nd key, mask, base;
8125 struct offsetof_sizeof ovs_key_nd_offsetof_sizeof_arr[] =
8126 OVS_KEY_ND_OFFSETOF_SIZEOF_ARR;
8127
8128 get_nd_key(flow, &key);
8129 get_nd_key(base_flow, &base);
8130 get_nd_key(&wc->masks, &mask);
8131
8132 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
8133 ovs_key_nd_offsetof_sizeof_arr, odp_actions)) {
8134 put_nd_key(&base, base_flow);
8135 put_nd_key(&mask, &wc->masks);
8136 return SLOW_ACTION;
8137 }
8138
8139 return 0;
8140 }
8141
8142 static enum slow_path_reason
8143 commit_set_nd_extensions_action(const struct flow *flow,
8144 struct flow *base_flow,
8145 struct ofpbuf *odp_actions,
8146 struct flow_wildcards *wc, bool use_masked)
8147 {
8148 struct ovs_key_nd_extensions key, mask, base;
8149 struct offsetof_sizeof ovs_key_nd_extensions_offsetof_sizeof_arr[] =
8150 OVS_KEY_ND_EXTENSIONS_OFFSETOF_SIZEOF_ARR;
8151
8152 get_nd_extensions_key(flow, &key);
8153 get_nd_extensions_key(base_flow, &base);
8154 get_nd_extensions_key(&wc->masks, &mask);
8155
8156 if (commit(OVS_KEY_ATTR_ND_EXTENSIONS, use_masked, &key, &base, &mask,
8157 sizeof key, ovs_key_nd_extensions_offsetof_sizeof_arr,
8158 odp_actions)) {
8159 put_nd_extensions_key(&base, base_flow);
8160 put_nd_extensions_key(&mask, &wc->masks);
8161 return SLOW_ACTION;
8162 }
8163 return 0;
8164 }
8165
8166 static enum slow_path_reason
8167 commit_set_nw_action(const struct flow *flow, struct flow *base,
8168 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8169 bool use_masked)
8170 {
8171 uint32_t reason;
8172
8173 /* Check if 'flow' really has an L3 header. */
8174 if (!flow->nw_proto) {
8175 return 0;
8176 }
8177
8178 switch (ntohs(base->dl_type)) {
8179 case ETH_TYPE_IP:
8180 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
8181 break;
8182
8183 case ETH_TYPE_IPV6:
8184 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
8185 if (base->nw_proto == IPPROTO_ICMPV6) {
8186 /* Commit extended attrs first to make sure
8187 correct options are added.*/
8188 reason = commit_set_nd_extensions_action(flow, base,
8189 odp_actions, wc, use_masked);
8190 reason |= commit_set_nd_action(flow, base, odp_actions,
8191 wc, use_masked);
8192 return reason;
8193 }
8194 break;
8195
8196 case ETH_TYPE_ARP:
8197 return commit_set_arp_action(flow, base, odp_actions, wc);
8198 }
8199
8200 return 0;
8201 }
8202
8203 static inline void
8204 get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh, bool is_mask)
8205 {
8206 *nsh = flow->nsh;
8207 if (!is_mask) {
8208 if (nsh->mdtype != NSH_M_TYPE1) {
8209 memset(nsh->context, 0, sizeof(nsh->context));
8210 }
8211 }
8212 }
8213
8214 static inline void
8215 put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
8216 bool is_mask OVS_UNUSED)
8217 {
8218 flow->nsh = *nsh;
8219 if (flow->nsh.mdtype != NSH_M_TYPE1) {
8220 memset(flow->nsh.context, 0, sizeof(flow->nsh.context));
8221 }
8222 }
8223
8224 static bool
8225 commit_nsh(const struct ovs_key_nsh * flow_nsh, bool use_masked_set,
8226 const struct ovs_key_nsh *key, struct ovs_key_nsh *base,
8227 struct ovs_key_nsh *mask, size_t size,
8228 struct ofpbuf *odp_actions)
8229 {
8230 enum ovs_key_attr attr = OVS_KEY_ATTR_NSH;
8231
8232 if (memcmp(key, base, size) == 0) {
8233 /* Mask bits are set when we have either read or set the corresponding
8234 * values. Masked bits will be exact-matched, no need to set them
8235 * if the value did not actually change. */
8236 return false;
8237 }
8238
8239 bool fully_masked = odp_mask_is_exact(attr, mask, size);
8240
8241 if (use_masked_set && !fully_masked) {
8242 size_t nsh_key_ofs;
8243 struct ovs_nsh_key_base nsh_base;
8244 struct ovs_nsh_key_base nsh_base_mask;
8245 struct ovs_nsh_key_md1 md1;
8246 struct ovs_nsh_key_md1 md1_mask;
8247 size_t offset = nl_msg_start_nested(odp_actions,
8248 OVS_ACTION_ATTR_SET_MASKED);
8249
8250 nsh_base.flags = key->flags;
8251 nsh_base.ttl = key->ttl;
8252 nsh_base.mdtype = key->mdtype;
8253 nsh_base.np = key->np;
8254 nsh_base.path_hdr = key->path_hdr;
8255
8256 nsh_base_mask.flags = mask->flags;
8257 nsh_base_mask.ttl = mask->ttl;
8258 nsh_base_mask.mdtype = mask->mdtype;
8259 nsh_base_mask.np = mask->np;
8260 nsh_base_mask.path_hdr = mask->path_hdr;
8261
8262 /* OVS_KEY_ATTR_NSH keys */
8263 nsh_key_ofs = nl_msg_start_nested(odp_actions, OVS_KEY_ATTR_NSH);
8264
8265 /* put value and mask for OVS_NSH_KEY_ATTR_BASE */
8266 char *data = nl_msg_put_unspec_uninit(odp_actions,
8267 OVS_NSH_KEY_ATTR_BASE,
8268 2 * sizeof(nsh_base));
8269 const char *lkey = (char *)&nsh_base, *lmask = (char *)&nsh_base_mask;
8270 size_t lkey_size = sizeof(nsh_base);
8271
8272 while (lkey_size--) {
8273 *data++ = *lkey++ & *lmask++;
8274 }
8275 lmask = (char *)&nsh_base_mask;
8276 memcpy(data, lmask, sizeof(nsh_base_mask));
8277
8278 switch (key->mdtype) {
8279 case NSH_M_TYPE1:
8280 memcpy(md1.context, key->context, sizeof key->context);
8281 memcpy(md1_mask.context, mask->context, sizeof mask->context);
8282
8283 /* put value and mask for OVS_NSH_KEY_ATTR_MD1 */
8284 data = nl_msg_put_unspec_uninit(odp_actions,
8285 OVS_NSH_KEY_ATTR_MD1,
8286 2 * sizeof(md1));
8287 lkey = (char *)&md1;
8288 lmask = (char *)&md1_mask;
8289 lkey_size = sizeof(md1);
8290
8291 while (lkey_size--) {
8292 *data++ = *lkey++ & *lmask++;
8293 }
8294 lmask = (char *)&md1_mask;
8295 memcpy(data, lmask, sizeof(md1_mask));
8296 break;
8297 case NSH_M_TYPE2:
8298 default:
8299 /* No match support for other MD formats yet. */
8300 break;
8301 }
8302
8303 nl_msg_end_nested(odp_actions, nsh_key_ofs);
8304
8305 nl_msg_end_nested(odp_actions, offset);
8306 } else {
8307 if (!fully_masked) {
8308 memset(mask, 0xff, size);
8309 }
8310 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
8311 nsh_key_to_attr(odp_actions, flow_nsh, NULL, 0, false);
8312 nl_msg_end_nested(odp_actions, offset);
8313 }
8314 memcpy(base, key, size);
8315 return true;
8316 }
8317
8318 static void
8319 commit_set_nsh_action(const struct flow *flow, struct flow *base_flow,
8320 struct ofpbuf *odp_actions,
8321 struct flow_wildcards *wc,
8322 bool use_masked)
8323 {
8324 struct ovs_key_nsh key, mask, base;
8325
8326 if (flow->dl_type != htons(ETH_TYPE_NSH) ||
8327 !memcmp(&base_flow->nsh, &flow->nsh, sizeof base_flow->nsh)) {
8328 return;
8329 }
8330
8331 /* Check that mdtype and np remain unchanged. */
8332 ovs_assert(flow->nsh.mdtype == base_flow->nsh.mdtype &&
8333 flow->nsh.np == base_flow->nsh.np);
8334
8335 get_nsh_key(flow, &key, false);
8336 get_nsh_key(base_flow, &base, false);
8337 get_nsh_key(&wc->masks, &mask, true);
8338 mask.mdtype = 0; /* Not writable. */
8339 mask.np = 0; /* Not writable. */
8340
8341 if (commit_nsh(&base_flow->nsh, use_masked, &key, &base, &mask,
8342 sizeof key, odp_actions)) {
8343 put_nsh_key(&base, base_flow, false);
8344 if (mask.mdtype != 0) { /* Mask was changed by commit(). */
8345 put_nsh_key(&mask, &wc->masks, true);
8346 }
8347 }
8348 }
8349
8350 /* TCP, UDP, and SCTP keys have the same layout. */
8351 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
8352 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
8353
8354 static void
8355 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
8356 {
8357 tp->tcp.tcp_src = flow->tp_src;
8358 tp->tcp.tcp_dst = flow->tp_dst;
8359 }
8360
8361 static void
8362 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
8363 {
8364 flow->tp_src = tp->tcp.tcp_src;
8365 flow->tp_dst = tp->tcp.tcp_dst;
8366 }
8367
8368 static void
8369 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
8370 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8371 bool use_masked)
8372 {
8373 enum ovs_key_attr key_type;
8374 union ovs_key_tp key, mask, base;
8375 struct offsetof_sizeof ovs_key_tp_offsetof_sizeof_arr[] =
8376 OVS_KEY_TCP_OFFSETOF_SIZEOF_ARR;
8377
8378 /* Check if 'flow' really has an L3 header. */
8379 if (!flow->nw_proto) {
8380 return;
8381 }
8382
8383 if (!is_ip_any(base_flow)) {
8384 return;
8385 }
8386
8387 if (flow->nw_proto == IPPROTO_TCP) {
8388 key_type = OVS_KEY_ATTR_TCP;
8389 } else if (flow->nw_proto == IPPROTO_UDP) {
8390 key_type = OVS_KEY_ATTR_UDP;
8391 } else if (flow->nw_proto == IPPROTO_SCTP) {
8392 key_type = OVS_KEY_ATTR_SCTP;
8393 } else {
8394 return;
8395 }
8396
8397 get_tp_key(flow, &key);
8398 get_tp_key(base_flow, &base);
8399 get_tp_key(&wc->masks, &mask);
8400
8401 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
8402 ovs_key_tp_offsetof_sizeof_arr, odp_actions)) {
8403 put_tp_key(&base, base_flow);
8404 put_tp_key(&mask, &wc->masks);
8405 }
8406 }
8407
8408 static void
8409 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
8410 struct ofpbuf *odp_actions,
8411 struct flow_wildcards *wc,
8412 bool use_masked)
8413 {
8414 uint32_t key, mask, base;
8415 struct offsetof_sizeof ovs_key_prio_offsetof_sizeof_arr[] = {
8416 {0, sizeof(uint32_t)},
8417 {0, 0}
8418 };
8419
8420 key = flow->skb_priority;
8421 base = base_flow->skb_priority;
8422 mask = wc->masks.skb_priority;
8423
8424 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
8425 sizeof key, ovs_key_prio_offsetof_sizeof_arr, odp_actions)) {
8426 base_flow->skb_priority = base;
8427 wc->masks.skb_priority = mask;
8428 }
8429 }
8430
8431 static void
8432 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
8433 struct ofpbuf *odp_actions,
8434 struct flow_wildcards *wc,
8435 bool use_masked)
8436 {
8437 uint32_t key, mask, base;
8438 struct offsetof_sizeof ovs_key_pkt_mark_offsetof_sizeof_arr[] = {
8439 {0, sizeof(uint32_t)},
8440 {0, 0}
8441 };
8442
8443 key = flow->pkt_mark;
8444 base = base_flow->pkt_mark;
8445 mask = wc->masks.pkt_mark;
8446
8447 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
8448 sizeof key, ovs_key_pkt_mark_offsetof_sizeof_arr,
8449 odp_actions)) {
8450 base_flow->pkt_mark = base;
8451 wc->masks.pkt_mark = mask;
8452 }
8453 }
8454
8455 static void
8456 odp_put_pop_nsh_action(struct ofpbuf *odp_actions)
8457 {
8458 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_NSH);
8459 }
8460
8461 static void
8462 odp_put_push_nsh_action(struct ofpbuf *odp_actions,
8463 const struct flow *flow,
8464 struct ofpbuf *encap_data)
8465 {
8466 uint8_t * metadata = NULL;
8467 uint8_t md_size = 0;
8468
8469 switch (flow->nsh.mdtype) {
8470 case NSH_M_TYPE2:
8471 if (encap_data) {
8472 ovs_assert(encap_data->size < NSH_CTX_HDRS_MAX_LEN);
8473 metadata = encap_data->data;
8474 md_size = encap_data->size;
8475 } else {
8476 md_size = 0;
8477 }
8478 break;
8479 default:
8480 md_size = 0;
8481 break;
8482 }
8483 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_PUSH_NSH);
8484 nsh_key_to_attr(odp_actions, &flow->nsh, metadata, md_size, false);
8485 nl_msg_end_nested(odp_actions, offset);
8486 }
8487
8488 static void
8489 commit_encap_decap_action(const struct flow *flow,
8490 struct flow *base_flow,
8491 struct ofpbuf *odp_actions,
8492 struct flow_wildcards *wc,
8493 bool pending_encap, bool pending_decap,
8494 struct ofpbuf *encap_data)
8495 {
8496 if (pending_encap) {
8497 switch (ntohl(flow->packet_type)) {
8498 case PT_ETH: {
8499 /* push_eth */
8500 odp_put_push_eth_action(odp_actions, &flow->dl_src,
8501 &flow->dl_dst);
8502 base_flow->packet_type = flow->packet_type;
8503 base_flow->dl_src = flow->dl_src;
8504 base_flow->dl_dst = flow->dl_dst;
8505 break;
8506 }
8507 case PT_NSH:
8508 /* push_nsh */
8509 odp_put_push_nsh_action(odp_actions, flow, encap_data);
8510 base_flow->packet_type = flow->packet_type;
8511 /* Update all packet headers in base_flow. */
8512 memcpy(&base_flow->dl_dst, &flow->dl_dst,
8513 sizeof(*flow) - offsetof(struct flow, dl_dst));
8514 break;
8515 default:
8516 /* Only the above protocols are supported for encap.
8517 * The check is done at action translation. */
8518 OVS_NOT_REACHED();
8519 }
8520 } else if (pending_decap || flow->packet_type != base_flow->packet_type) {
8521 /* This is an explicit or implicit decap case. */
8522 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE &&
8523 base_flow->packet_type == htonl(PT_ETH)) {
8524 /* Generate pop_eth and continue without recirculation. */
8525 odp_put_pop_eth_action(odp_actions);
8526 base_flow->packet_type = flow->packet_type;
8527 base_flow->dl_src = eth_addr_zero;
8528 base_flow->dl_dst = eth_addr_zero;
8529 } else {
8530 /* All other decap cases require recirculation.
8531 * No need to update the base flow here. */
8532 switch (ntohl(base_flow->packet_type)) {
8533 case PT_NSH:
8534 /* pop_nsh. */
8535 odp_put_pop_nsh_action(odp_actions);
8536 break;
8537 default:
8538 /* Checks are done during translation. */
8539 OVS_NOT_REACHED();
8540 }
8541 }
8542 }
8543
8544 wc->masks.packet_type = OVS_BE32_MAX;
8545 }
8546
8547 /* If any of the flow key data that ODP actions can modify are different in
8548 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
8549 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
8550 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
8551 * in addition to this function if needed. Sets fields in 'wc' that are
8552 * used as part of the action.
8553 *
8554 * In the common case, this function returns 0. If the flow key modification
8555 * requires the flow's packets to be forced into the userspace slow path, this
8556 * function returns SLOW_ACTION. This only happens when there is no ODP action
8557 * to modify some field that was actually modified. For example, there is no
8558 * ODP action to modify any ARP field, so such a modification triggers
8559 * SLOW_ACTION. (When this happens, packets that need such modification get
8560 * flushed to userspace and handled there, which works OK but much more slowly
8561 * than if the datapath handled it directly.) */
8562 enum slow_path_reason
8563 commit_odp_actions(const struct flow *flow, struct flow *base,
8564 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8565 bool use_masked, bool pending_encap, bool pending_decap,
8566 struct ofpbuf *encap_data)
8567 {
8568 /* If you add a field that OpenFlow actions can change, and that is visible
8569 * to the datapath (including all data fields), then you should also add
8570 * code here to commit changes to the field. */
8571 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
8572
8573 enum slow_path_reason slow1, slow2;
8574 bool mpls_done = false;
8575
8576 commit_encap_decap_action(flow, base, odp_actions, wc,
8577 pending_encap, pending_decap, encap_data);
8578 commit_set_ether_action(flow, base, odp_actions, wc, use_masked);
8579 /* Make packet a non-MPLS packet before committing L3/4 actions,
8580 * which would otherwise do nothing. */
8581 if (eth_type_mpls(base->dl_type) && !eth_type_mpls(flow->dl_type)) {
8582 commit_mpls_action(flow, base, odp_actions);
8583 mpls_done = true;
8584 }
8585 commit_set_nsh_action(flow, base, odp_actions, wc, use_masked);
8586 slow1 = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
8587 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
8588 slow2 = commit_set_icmp_action(flow, base, odp_actions, wc);
8589 if (!mpls_done) {
8590 commit_mpls_action(flow, base, odp_actions);
8591 }
8592 commit_vlan_action(flow, base, odp_actions, wc);
8593 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
8594 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);
8595
8596 return slow1 ? slow1 : slow2;
8597 }