]> git.proxmox.com Git - mirror_ovs.git/blob - lib/odp-util.c
odp-util: Fix clearing match mask if set action is partially unnecessary.
[mirror_ovs.git] / lib / odp-util.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2019 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <sys/types.h>
19 #include <netinet/in.h>
20 #include <arpa/inet.h>
21 #include "odp-util.h"
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <math.h>
25 #include <netinet/icmp6.h>
26 #include <netinet/ip6.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include "byte-order.h"
31 #include "coverage.h"
32 #include "dpif.h"
33 #include "openvswitch/dynamic-string.h"
34 #include "flow.h"
35 #include "netlink.h"
36 #include "openvswitch/ofpbuf.h"
37 #include "packets.h"
38 #include "simap.h"
39 #include "timeval.h"
40 #include "tun-metadata.h"
41 #include "unaligned.h"
42 #include "util.h"
43 #include "uuid.h"
44 #include "openvswitch/vlog.h"
45 #include "openvswitch/match.h"
46 #include "odp-netlink-macros.h"
47 #include "csum.h"
48
49 VLOG_DEFINE_THIS_MODULE(odp_util);
50
51 /* The interface between userspace and kernel uses an "OVS_*" prefix.
52 * Since this is fairly non-specific for the OVS userspace components,
53 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
54 * interactions with the datapath.
55 */
56
57 /* The set of characters that may separate one action or one key attribute
58 * from another. */
59 static const char *delimiters = ", \t\r\n";
60 static const char *delimiters_end = ", \t\r\n)";
61
62 #define MAX_ODP_NESTED 32
63
64 struct parse_odp_context {
65 const struct simap *port_names;
66 int depth; /* Current nested depth of odp string. */
67 };
68
69 static int parse_odp_key_mask_attr(struct parse_odp_context *, const char *,
70 struct ofpbuf *, struct ofpbuf *);
71
72 static int parse_odp_key_mask_attr__(struct parse_odp_context *, const char *,
73 struct ofpbuf *, struct ofpbuf *);
74
75 static void format_odp_key_attr(const struct nlattr *a,
76 const struct nlattr *ma,
77 const struct hmap *portno_names, struct ds *ds,
78 bool verbose);
79
80 struct geneve_scan {
81 struct geneve_opt d[63];
82 int len;
83 };
84
85 static int scan_geneve(const char *s, struct geneve_scan *key,
86 struct geneve_scan *mask);
87 static void format_geneve_opts(const struct geneve_opt *opt,
88 const struct geneve_opt *mask, int opts_len,
89 struct ds *, bool verbose);
90
91 static struct nlattr *generate_all_wildcard_mask(const struct attr_len_tbl tbl[],
92 int max, struct ofpbuf *,
93 const struct nlattr *key);
94 static void format_u128(struct ds *d, const ovs_32aligned_u128 *key,
95 const ovs_32aligned_u128 *mask, bool verbose);
96 static int scan_u128(const char *s, ovs_u128 *value, ovs_u128 *mask);
97
98 static int parse_odp_action(struct parse_odp_context *context, const char *s,
99 struct ofpbuf *actions);
100
101 static int parse_odp_action__(struct parse_odp_context *context, const char *s,
102 struct ofpbuf *actions);
103
104 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
105 * 'type':
106 *
107 * - For an action whose argument has a fixed length, returned that
108 * nonnegative length in bytes.
109 *
110 * - For an action with a variable-length argument, returns ATTR_LEN_VARIABLE.
111 *
112 * - For an invalid 'type', returns ATTR_LEN_INVALID. */
113 static int
114 odp_action_len(uint16_t type)
115 {
116 if (type > OVS_ACTION_ATTR_MAX) {
117 return -1;
118 }
119
120 switch ((enum ovs_action_attr) type) {
121 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
122 case OVS_ACTION_ATTR_LB_OUTPUT: return sizeof(uint32_t);
123 case OVS_ACTION_ATTR_TRUNC: return sizeof(struct ovs_action_trunc);
124 case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
125 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
126 case OVS_ACTION_ATTR_METER: return sizeof(uint32_t);
127 case OVS_ACTION_ATTR_USERSPACE: return ATTR_LEN_VARIABLE;
128 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
129 case OVS_ACTION_ATTR_POP_VLAN: return 0;
130 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
131 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
132 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
133 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
134 case OVS_ACTION_ATTR_SET: return ATTR_LEN_VARIABLE;
135 case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
136 case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
137 case OVS_ACTION_ATTR_CT: return ATTR_LEN_VARIABLE;
138 case OVS_ACTION_ATTR_CT_CLEAR: return 0;
139 case OVS_ACTION_ATTR_PUSH_ETH: return sizeof(struct ovs_action_push_eth);
140 case OVS_ACTION_ATTR_POP_ETH: return 0;
141 case OVS_ACTION_ATTR_CLONE: return ATTR_LEN_VARIABLE;
142 case OVS_ACTION_ATTR_PUSH_NSH: return ATTR_LEN_VARIABLE;
143 case OVS_ACTION_ATTR_POP_NSH: return 0;
144 case OVS_ACTION_ATTR_CHECK_PKT_LEN: return ATTR_LEN_VARIABLE;
145 case OVS_ACTION_ATTR_DROP: return sizeof(uint32_t);
146
147 case OVS_ACTION_ATTR_UNSPEC:
148 case __OVS_ACTION_ATTR_MAX:
149 return ATTR_LEN_INVALID;
150 }
151
152 return ATTR_LEN_INVALID;
153 }
154
155 /* Returns a string form of 'attr'. The return value is either a statically
156 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
157 * should be at least OVS_KEY_ATTR_BUFSIZE. */
158 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
159 static const char *
160 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
161 {
162 switch (attr) {
163 case OVS_KEY_ATTR_UNSPEC: return "unspec";
164 case OVS_KEY_ATTR_ENCAP: return "encap";
165 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
166 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
167 case OVS_KEY_ATTR_CT_STATE: return "ct_state";
168 case OVS_KEY_ATTR_CT_ZONE: return "ct_zone";
169 case OVS_KEY_ATTR_CT_MARK: return "ct_mark";
170 case OVS_KEY_ATTR_CT_LABELS: return "ct_label";
171 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: return "ct_tuple4";
172 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: return "ct_tuple6";
173 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
174 case OVS_KEY_ATTR_IN_PORT: return "in_port";
175 case OVS_KEY_ATTR_ETHERNET: return "eth";
176 case OVS_KEY_ATTR_VLAN: return "vlan";
177 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
178 case OVS_KEY_ATTR_IPV4: return "ipv4";
179 case OVS_KEY_ATTR_IPV6: return "ipv6";
180 case OVS_KEY_ATTR_TCP: return "tcp";
181 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
182 case OVS_KEY_ATTR_UDP: return "udp";
183 case OVS_KEY_ATTR_SCTP: return "sctp";
184 case OVS_KEY_ATTR_ICMP: return "icmp";
185 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
186 case OVS_KEY_ATTR_ARP: return "arp";
187 case OVS_KEY_ATTR_ND: return "nd";
188 case OVS_KEY_ATTR_ND_EXTENSIONS: return "nd_ext";
189 case OVS_KEY_ATTR_MPLS: return "mpls";
190 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
191 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
192 case OVS_KEY_ATTR_PACKET_TYPE: return "packet_type";
193 case OVS_KEY_ATTR_NSH: return "nsh";
194
195 case __OVS_KEY_ATTR_MAX:
196 default:
197 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
198 return namebuf;
199 }
200 }
201
202 static void
203 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
204 {
205 size_t len = nl_attr_get_size(a);
206
207 ds_put_format(ds, "action%d", nl_attr_type(a));
208 if (len) {
209 const uint8_t *unspec;
210 unsigned int i;
211
212 unspec = nl_attr_get(a);
213 for (i = 0; i < len; i++) {
214 ds_put_char(ds, i ? ' ': '(');
215 ds_put_format(ds, "%02x", unspec[i]);
216 }
217 ds_put_char(ds, ')');
218 }
219 }
220
221 static void
222 format_odp_sample_action(struct ds *ds, const struct nlattr *attr,
223 const struct hmap *portno_names)
224 {
225 static const struct nl_policy ovs_sample_policy[] = {
226 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
227 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
228 };
229 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
230 double percentage;
231 const struct nlattr *nla_acts;
232 int len;
233
234 ds_put_cstr(ds, "sample");
235
236 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
237 ds_put_cstr(ds, "(error)");
238 return;
239 }
240
241 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
242 UINT32_MAX;
243
244 ds_put_format(ds, "(sample=%.1f%%,", percentage);
245
246 ds_put_cstr(ds, "actions(");
247 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
248 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
249 format_odp_actions(ds, nla_acts, len, portno_names);
250 ds_put_format(ds, "))");
251 }
252
253 static void
254 format_odp_clone_action(struct ds *ds, const struct nlattr *attr,
255 const struct hmap *portno_names)
256 {
257 const struct nlattr *nla_acts = nl_attr_get(attr);
258 int len = nl_attr_get_size(attr);
259
260 ds_put_cstr(ds, "clone");
261 ds_put_format(ds, "(");
262 format_odp_actions(ds, nla_acts, len, portno_names);
263 ds_put_format(ds, ")");
264 }
265
266 static void
267 format_nsh_key(struct ds *ds, const struct ovs_key_nsh *key)
268 {
269 ds_put_format(ds, "flags=%d", key->flags);
270 ds_put_format(ds, ",ttl=%d", key->ttl);
271 ds_put_format(ds, ",mdtype=%d", key->mdtype);
272 ds_put_format(ds, ",np=%d", key->np);
273 ds_put_format(ds, ",spi=0x%x",
274 nsh_path_hdr_to_spi_uint32(key->path_hdr));
275 ds_put_format(ds, ",si=%d",
276 nsh_path_hdr_to_si(key->path_hdr));
277
278 switch (key->mdtype) {
279 case NSH_M_TYPE1:
280 for (int i = 0; i < 4; i++) {
281 ds_put_format(ds, ",c%d=0x%x", i + 1, ntohl(key->context[i]));
282 }
283 break;
284 case NSH_M_TYPE2:
285 default:
286 /* No support for matching other metadata formats yet. */
287 break;
288 }
289 }
290
291 static void
292 format_uint8_masked(struct ds *s, bool *first, const char *name,
293 uint8_t value, uint8_t mask)
294 {
295 if (mask != 0) {
296 if (!*first) {
297 ds_put_char(s, ',');
298 }
299 ds_put_format(s, "%s=", name);
300 if (mask == UINT8_MAX) {
301 ds_put_format(s, "%"PRIu8, value);
302 } else {
303 ds_put_format(s, "0x%02"PRIx8"/0x%02"PRIx8, value, mask);
304 }
305 *first = false;
306 }
307 }
308
309 static void
310 format_be32_masked(struct ds *s, bool *first, const char *name,
311 ovs_be32 value, ovs_be32 mask)
312 {
313 if (mask != htonl(0)) {
314 if (!*first) {
315 ds_put_char(s, ',');
316 }
317 ds_put_format(s, "%s=", name);
318 if (mask == OVS_BE32_MAX) {
319 ds_put_format(s, "0x%"PRIx32, ntohl(value));
320 } else {
321 ds_put_format(s, "0x%"PRIx32"/0x%08"PRIx32,
322 ntohl(value), ntohl(mask));
323 }
324 *first = false;
325 }
326 }
327
328 static void
329 format_nsh_key_mask(struct ds *ds, const struct ovs_key_nsh *key,
330 const struct ovs_key_nsh *mask)
331 {
332 if (!mask) {
333 format_nsh_key(ds, key);
334 } else {
335 bool first = true;
336 uint32_t spi = nsh_path_hdr_to_spi_uint32(key->path_hdr);
337 uint32_t spi_mask = nsh_path_hdr_to_spi_uint32(mask->path_hdr);
338 if (spi_mask == (NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
339 spi_mask = UINT32_MAX;
340 }
341 uint8_t si = nsh_path_hdr_to_si(key->path_hdr);
342 uint8_t si_mask = nsh_path_hdr_to_si(mask->path_hdr);
343
344 format_uint8_masked(ds, &first, "flags", key->flags, mask->flags);
345 format_uint8_masked(ds, &first, "ttl", key->ttl, mask->ttl);
346 format_uint8_masked(ds, &first, "mdtype", key->mdtype, mask->mdtype);
347 format_uint8_masked(ds, &first, "np", key->np, mask->np);
348 format_be32_masked(ds, &first, "spi", htonl(spi), htonl(spi_mask));
349 format_uint8_masked(ds, &first, "si", si, si_mask);
350 format_be32_masked(ds, &first, "c1", key->context[0],
351 mask->context[0]);
352 format_be32_masked(ds, &first, "c2", key->context[1],
353 mask->context[1]);
354 format_be32_masked(ds, &first, "c3", key->context[2],
355 mask->context[2]);
356 format_be32_masked(ds, &first, "c4", key->context[3],
357 mask->context[3]);
358 }
359 }
360
361 static void
362 format_odp_push_nsh_action(struct ds *ds,
363 const struct nsh_hdr *nsh_hdr)
364 {
365 size_t mdlen = nsh_hdr_len(nsh_hdr) - NSH_BASE_HDR_LEN;
366 uint32_t spi = ntohl(nsh_get_spi(nsh_hdr));
367 uint8_t si = nsh_get_si(nsh_hdr);
368 uint8_t flags = nsh_get_flags(nsh_hdr);
369 uint8_t ttl = nsh_get_ttl(nsh_hdr);
370
371 ds_put_cstr(ds, "push_nsh(");
372 ds_put_format(ds, "flags=%d", flags);
373 ds_put_format(ds, ",ttl=%d", ttl);
374 ds_put_format(ds, ",mdtype=%d", nsh_hdr->md_type);
375 ds_put_format(ds, ",np=%d", nsh_hdr->next_proto);
376 ds_put_format(ds, ",spi=0x%x", spi);
377 ds_put_format(ds, ",si=%d", si);
378 switch (nsh_hdr->md_type) {
379 case NSH_M_TYPE1: {
380 const struct nsh_md1_ctx *md1_ctx = &nsh_hdr->md1;
381 for (int i = 0; i < 4; i++) {
382 ds_put_format(ds, ",c%d=0x%x", i + 1,
383 ntohl(get_16aligned_be32(&md1_ctx->context[i])));
384 }
385 break;
386 }
387 case NSH_M_TYPE2: {
388 const struct nsh_md2_tlv *md2_ctx = &nsh_hdr->md2;
389 ds_put_cstr(ds, ",md2=");
390 ds_put_hex(ds, md2_ctx, mdlen);
391 break;
392 }
393 default:
394 OVS_NOT_REACHED();
395 }
396 ds_put_format(ds, ")");
397 }
398
399 static const char *
400 slow_path_reason_to_string(uint32_t reason)
401 {
402 switch ((enum slow_path_reason) reason) {
403 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
404 SLOW_PATH_REASONS
405 #undef SPR
406 }
407
408 return NULL;
409 }
410
411 const char *
412 slow_path_reason_to_explanation(enum slow_path_reason reason)
413 {
414 switch (reason) {
415 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
416 SLOW_PATH_REASONS
417 #undef SPR
418 }
419
420 return "<unknown>";
421 }
422
423 static int
424 parse_odp_flags(const char *s, const char *(*bit_to_string)(uint32_t),
425 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
426 {
427 return parse_flags(s, bit_to_string, ')', NULL, NULL,
428 res_flags, allowed, res_mask);
429 }
430
431 static void
432 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr,
433 const struct hmap *portno_names)
434 {
435 static const struct nl_policy ovs_userspace_policy[] = {
436 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
437 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
438 .optional = true },
439 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
440 .optional = true },
441 [OVS_USERSPACE_ATTR_ACTIONS] = { .type = NL_A_UNSPEC,
442 .optional = true },
443 };
444 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
445 const struct nlattr *userdata_attr;
446 const struct nlattr *tunnel_out_port_attr;
447
448 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
449 ds_put_cstr(ds, "userspace(error)");
450 return;
451 }
452
453 ds_put_format(ds, "userspace(pid=%"PRIu32,
454 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
455
456 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
457
458 if (userdata_attr) {
459 const uint8_t *userdata = nl_attr_get(userdata_attr);
460 size_t userdata_len = nl_attr_get_size(userdata_attr);
461 bool userdata_unspec = true;
462 struct user_action_cookie cookie;
463
464 if (userdata_len == sizeof cookie) {
465 memcpy(&cookie, userdata, sizeof cookie);
466
467 userdata_unspec = false;
468
469 if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
470 ds_put_format(ds, ",sFlow("
471 "vid=%"PRIu16",pcp=%d,output=%"PRIu32")",
472 vlan_tci_to_vid(cookie.sflow.vlan_tci),
473 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
474 cookie.sflow.output);
475 } else if (cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
476 ds_put_cstr(ds, ",slow_path(");
477 format_flags(ds, slow_path_reason_to_string,
478 cookie.slow_path.reason, ',');
479 ds_put_format(ds, ")");
480 } else if (cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
481 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
482 ",collector_set_id=%"PRIu32
483 ",obs_domain_id=%"PRIu32
484 ",obs_point_id=%"PRIu32
485 ",output_port=",
486 cookie.flow_sample.probability,
487 cookie.flow_sample.collector_set_id,
488 cookie.flow_sample.obs_domain_id,
489 cookie.flow_sample.obs_point_id);
490 odp_portno_name_format(portno_names,
491 cookie.flow_sample.output_odp_port, ds);
492 if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_INGRESS) {
493 ds_put_cstr(ds, ",ingress");
494 } else if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_EGRESS) {
495 ds_put_cstr(ds, ",egress");
496 }
497 ds_put_char(ds, ')');
498 } else if (cookie.type == USER_ACTION_COOKIE_IPFIX) {
499 ds_put_format(ds, ",ipfix(output_port=");
500 odp_portno_name_format(portno_names,
501 cookie.ipfix.output_odp_port, ds);
502 ds_put_char(ds, ')');
503 } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
504 ds_put_format(ds, ",controller(reason=%"PRIu16
505 ",dont_send=%d"
506 ",continuation=%d"
507 ",recirc_id=%"PRIu32
508 ",rule_cookie=%#"PRIx64
509 ",controller_id=%"PRIu16
510 ",max_len=%"PRIu16,
511 cookie.controller.reason,
512 !!cookie.controller.dont_send,
513 !!cookie.controller.continuation,
514 cookie.controller.recirc_id,
515 ntohll(get_32aligned_be64(
516 &cookie.controller.rule_cookie)),
517 cookie.controller.controller_id,
518 cookie.controller.max_len);
519 ds_put_char(ds, ')');
520 } else {
521 userdata_unspec = true;
522 }
523 }
524
525 if (userdata_unspec) {
526 size_t i;
527 ds_put_format(ds, ",userdata(");
528 for (i = 0; i < userdata_len; i++) {
529 ds_put_format(ds, "%02x", userdata[i]);
530 }
531 ds_put_char(ds, ')');
532 }
533 }
534
535 if (a[OVS_USERSPACE_ATTR_ACTIONS]) {
536 ds_put_cstr(ds, ",actions");
537 }
538
539 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
540 if (tunnel_out_port_attr) {
541 ds_put_format(ds, ",tunnel_out_port=");
542 odp_portno_name_format(portno_names,
543 nl_attr_get_odp_port(tunnel_out_port_attr), ds);
544 }
545
546 ds_put_char(ds, ')');
547 }
548
549 static void
550 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
551 {
552 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
553 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
554 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
555 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
556 };
557 ds_put_char(ds, ',');
558 }
559 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
560 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
561 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
562 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
563 }
564 ds_put_char(ds, ',');
565 }
566 if (!(tci & htons(VLAN_CFI))) {
567 ds_put_cstr(ds, "cfi=0");
568 ds_put_char(ds, ',');
569 }
570 ds_chomp(ds, ',');
571 }
572
573 static void
574 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
575 {
576 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
577 mpls_lse_to_label(mpls_lse),
578 mpls_lse_to_tc(mpls_lse),
579 mpls_lse_to_ttl(mpls_lse),
580 mpls_lse_to_bos(mpls_lse));
581 }
582
583 static void
584 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
585 const struct ovs_key_mpls *mpls_mask, int n)
586 {
587 for (int i = 0; i < n; i++) {
588 ovs_be32 key = mpls_key[i].mpls_lse;
589
590 if (mpls_mask == NULL) {
591 format_mpls_lse(ds, key);
592 } else {
593 ovs_be32 mask = mpls_mask[i].mpls_lse;
594
595 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
596 mpls_lse_to_label(key), mpls_lse_to_label(mask),
597 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
598 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
599 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
600 }
601 ds_put_char(ds, ',');
602 }
603 ds_chomp(ds, ',');
604 }
605
606 static void
607 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
608 {
609 ds_put_format(ds, "recirc(%#"PRIx32")", recirc_id);
610 }
611
612 static void
613 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
614 {
615 ds_put_format(ds, "hash(");
616
617 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
618 ds_put_format(ds, "l4(%"PRIu32")", hash_act->hash_basis);
619 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
620 ds_put_format(ds, "sym_l4(%"PRIu32")", hash_act->hash_basis);
621 } else {
622 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
623 hash_act->hash_alg);
624 }
625 ds_put_format(ds, ")");
626 }
627
628 static const void *
629 format_udp_tnl_push_header(struct ds *ds, const struct udp_header *udp)
630 {
631 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
632 ntohs(udp->udp_src), ntohs(udp->udp_dst),
633 ntohs(udp->udp_csum));
634
635 return udp + 1;
636 }
637
638 static void
639 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
640 {
641 const struct eth_header *eth;
642 const void *l3;
643 const void *l4;
644 const struct udp_header *udp;
645
646 eth = (const struct eth_header *)data->header;
647
648 l3 = eth + 1;
649
650 /* Ethernet */
651 ds_put_format(ds, "header(size=%"PRIu32",type=%"PRIu32",eth(dst=",
652 data->header_len, data->tnl_type);
653 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
654 ds_put_format(ds, ",src=");
655 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
656 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
657
658 if (eth->eth_type == htons(ETH_TYPE_IP)) {
659 /* IPv4 */
660 const struct ip_header *ip = l3;
661 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
662 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
663 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
664 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
665 ip->ip_proto, ip->ip_tos,
666 ip->ip_ttl,
667 ntohs(ip->ip_frag_off));
668 l4 = (ip + 1);
669 } else {
670 const struct ovs_16aligned_ip6_hdr *ip6 = l3;
671 struct in6_addr src, dst;
672 memcpy(&src, &ip6->ip6_src, sizeof src);
673 memcpy(&dst, &ip6->ip6_dst, sizeof dst);
674 uint32_t ipv6_flow = ntohl(get_16aligned_be32(&ip6->ip6_flow));
675
676 ds_put_format(ds, "ipv6(src=");
677 ipv6_format_addr(&src, ds);
678 ds_put_format(ds, ",dst=");
679 ipv6_format_addr(&dst, ds);
680 ds_put_format(ds, ",label=%i,proto=%"PRIu8",tclass=0x%"PRIx32
681 ",hlimit=%"PRIu8"),",
682 ipv6_flow & IPV6_LABEL_MASK, ip6->ip6_nxt,
683 (ipv6_flow >> 20) & 0xff, ip6->ip6_hlim);
684 l4 = (ip6 + 1);
685 }
686
687 udp = (const struct udp_header *) l4;
688
689 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
690 const struct vxlanhdr *vxh;
691
692 vxh = format_udp_tnl_push_header(ds, udp);
693
694 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
695 ntohl(get_16aligned_be32(&vxh->vx_flags)),
696 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
697 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
698 const struct genevehdr *gnh;
699
700 gnh = format_udp_tnl_push_header(ds, udp);
701
702 ds_put_format(ds, "geneve(%s%svni=0x%"PRIx32,
703 gnh->oam ? "oam," : "",
704 gnh->critical ? "crit," : "",
705 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
706
707 if (gnh->opt_len) {
708 ds_put_cstr(ds, ",options(");
709 format_geneve_opts(gnh->options, NULL, gnh->opt_len * 4,
710 ds, false);
711 ds_put_char(ds, ')');
712 }
713
714 ds_put_char(ds, ')');
715 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE ||
716 data->tnl_type == OVS_VPORT_TYPE_IP6GRE) {
717 const struct gre_base_hdr *greh;
718 ovs_16aligned_be32 *options;
719
720 greh = (const struct gre_base_hdr *) l4;
721
722 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
723 ntohs(greh->flags), ntohs(greh->protocol));
724 options = (ovs_16aligned_be32 *)(greh + 1);
725 if (greh->flags & htons(GRE_CSUM)) {
726 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
727 options++;
728 }
729 if (greh->flags & htons(GRE_KEY)) {
730 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
731 options++;
732 }
733 if (greh->flags & htons(GRE_SEQ)) {
734 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
735 options++;
736 }
737 ds_put_format(ds, ")");
738 } else if (data->tnl_type == OVS_VPORT_TYPE_ERSPAN ||
739 data->tnl_type == OVS_VPORT_TYPE_IP6ERSPAN) {
740 const struct gre_base_hdr *greh;
741 const struct erspan_base_hdr *ersh;
742
743 greh = (const struct gre_base_hdr *) l4;
744 ersh = ERSPAN_HDR(greh);
745
746 if (ersh->ver == 1) {
747 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
748 ersh + 1);
749 ds_put_format(ds, "erspan(ver=1,sid=0x%"PRIx16",idx=0x%"PRIx32")",
750 get_sid(ersh), ntohl(get_16aligned_be32(index)));
751 } else if (ersh->ver == 2) {
752 struct erspan_md2 *md2 = ALIGNED_CAST(struct erspan_md2 *,
753 ersh + 1);
754 ds_put_format(ds, "erspan(ver=2,sid=0x%"PRIx16
755 ",dir=%"PRIu8",hwid=0x%"PRIx8")",
756 get_sid(ersh), md2->dir, get_hwid(md2));
757 } else {
758 VLOG_WARN("%s Invalid ERSPAN version %d\n", __func__, ersh->ver);
759 }
760 } else if (data->tnl_type == OVS_VPORT_TYPE_GTPU) {
761 const struct gtpuhdr *gtph;
762
763 gtph = format_udp_tnl_push_header(ds, udp);
764
765 ds_put_format(ds, "gtpu(flags=0x%"PRIx8
766 ",msgtype=%"PRIu8",teid=0x%"PRIx32")",
767 gtph->md.flags, gtph->md.msgtype,
768 ntohl(get_16aligned_be32(&gtph->teid)));
769 }
770
771 ds_put_format(ds, ")");
772 }
773
774 static void
775 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr,
776 const struct hmap *portno_names)
777 {
778 struct ovs_action_push_tnl *data;
779
780 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
781
782 ds_put_cstr(ds, "tnl_push(tnl_port(");
783 odp_portno_name_format(portno_names, data->tnl_port, ds);
784 ds_put_cstr(ds, "),");
785 format_odp_tnl_push_header(ds, data);
786 ds_put_format(ds, ",out_port(");
787 odp_portno_name_format(portno_names, data->out_port, ds);
788 ds_put_cstr(ds, "))");
789 }
790
791 static const struct nl_policy ovs_nat_policy[] = {
792 [OVS_NAT_ATTR_SRC] = { .type = NL_A_FLAG, .optional = true, },
793 [OVS_NAT_ATTR_DST] = { .type = NL_A_FLAG, .optional = true, },
794 [OVS_NAT_ATTR_IP_MIN] = { .type = NL_A_UNSPEC, .optional = true,
795 .min_len = sizeof(struct in_addr),
796 .max_len = sizeof(struct in6_addr)},
797 [OVS_NAT_ATTR_IP_MAX] = { .type = NL_A_UNSPEC, .optional = true,
798 .min_len = sizeof(struct in_addr),
799 .max_len = sizeof(struct in6_addr)},
800 [OVS_NAT_ATTR_PROTO_MIN] = { .type = NL_A_U16, .optional = true, },
801 [OVS_NAT_ATTR_PROTO_MAX] = { .type = NL_A_U16, .optional = true, },
802 [OVS_NAT_ATTR_PERSISTENT] = { .type = NL_A_FLAG, .optional = true, },
803 [OVS_NAT_ATTR_PROTO_HASH] = { .type = NL_A_FLAG, .optional = true, },
804 [OVS_NAT_ATTR_PROTO_RANDOM] = { .type = NL_A_FLAG, .optional = true, },
805 };
806
807 static void
808 format_odp_ct_nat(struct ds *ds, const struct nlattr *attr)
809 {
810 struct nlattr *a[ARRAY_SIZE(ovs_nat_policy)];
811 size_t addr_len;
812 ovs_be32 ip_min, ip_max;
813 struct in6_addr ip6_min, ip6_max;
814 uint16_t proto_min, proto_max;
815
816 if (!nl_parse_nested(attr, ovs_nat_policy, a, ARRAY_SIZE(a))) {
817 ds_put_cstr(ds, "nat(error: nl_parse_nested() failed.)");
818 return;
819 }
820 /* If no type, then nothing else either. */
821 if (!(a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST])
822 && (a[OVS_NAT_ATTR_IP_MIN] || a[OVS_NAT_ATTR_IP_MAX]
823 || a[OVS_NAT_ATTR_PROTO_MIN] || a[OVS_NAT_ATTR_PROTO_MAX]
824 || a[OVS_NAT_ATTR_PERSISTENT] || a[OVS_NAT_ATTR_PROTO_HASH]
825 || a[OVS_NAT_ATTR_PROTO_RANDOM])) {
826 ds_put_cstr(ds, "nat(error: options allowed only with \"src\" or \"dst\")");
827 return;
828 }
829 /* Both SNAT & DNAT may not be specified. */
830 if (a[OVS_NAT_ATTR_SRC] && a[OVS_NAT_ATTR_DST]) {
831 ds_put_cstr(ds, "nat(error: Only one of \"src\" or \"dst\" may be present.)");
832 return;
833 }
834 /* proto may not appear without ip. */
835 if (!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_PROTO_MIN]) {
836 ds_put_cstr(ds, "nat(error: proto but no IP.)");
837 return;
838 }
839 /* MAX may not appear without MIN. */
840 if ((!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX])
841 || (!a[OVS_NAT_ATTR_PROTO_MIN] && a[OVS_NAT_ATTR_PROTO_MAX])) {
842 ds_put_cstr(ds, "nat(error: range max without min.)");
843 return;
844 }
845 /* Address sizes must match. */
846 if ((a[OVS_NAT_ATTR_IP_MIN]
847 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(ovs_be32) &&
848 nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(struct in6_addr)))
849 || (a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX]
850 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN])
851 != nl_attr_get_size(a[OVS_NAT_ATTR_IP_MAX])))) {
852 ds_put_cstr(ds, "nat(error: IP address sizes do not match)");
853 return;
854 }
855
856 addr_len = a[OVS_NAT_ATTR_IP_MIN]
857 ? nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) : 0;
858 ip_min = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MIN]
859 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MIN]) : 0;
860 ip_max = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MAX]
861 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MAX]) : 0;
862 if (addr_len == sizeof ip6_min) {
863 ip6_min = a[OVS_NAT_ATTR_IP_MIN]
864 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MIN])
865 : in6addr_any;
866 ip6_max = a[OVS_NAT_ATTR_IP_MAX]
867 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MAX])
868 : in6addr_any;
869 }
870 proto_min = a[OVS_NAT_ATTR_PROTO_MIN]
871 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MIN]) : 0;
872 proto_max = a[OVS_NAT_ATTR_PROTO_MAX]
873 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MAX]) : 0;
874
875 if ((addr_len == sizeof(ovs_be32)
876 && ip_max && ntohl(ip_min) > ntohl(ip_max))
877 || (addr_len == sizeof(struct in6_addr)
878 && !ipv6_mask_is_any(&ip6_max)
879 && memcmp(&ip6_min, &ip6_max, sizeof ip6_min) > 0)
880 || (proto_max && proto_min > proto_max)) {
881 ds_put_cstr(ds, "nat(range error)");
882 return;
883 }
884
885 ds_put_cstr(ds, "nat");
886 if (a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST]) {
887 ds_put_char(ds, '(');
888 if (a[OVS_NAT_ATTR_SRC]) {
889 ds_put_cstr(ds, "src");
890 } else if (a[OVS_NAT_ATTR_DST]) {
891 ds_put_cstr(ds, "dst");
892 }
893
894 if (addr_len > 0) {
895 ds_put_cstr(ds, "=");
896
897 if (addr_len == sizeof ip_min) {
898 ds_put_format(ds, IP_FMT, IP_ARGS(ip_min));
899
900 if (ip_max && ip_max != ip_min) {
901 ds_put_format(ds, "-"IP_FMT, IP_ARGS(ip_max));
902 }
903 } else if (addr_len == sizeof ip6_min) {
904 ipv6_format_addr_bracket(&ip6_min, ds, proto_min);
905
906 if (!ipv6_mask_is_any(&ip6_max) &&
907 memcmp(&ip6_max, &ip6_min, sizeof ip6_max) != 0) {
908 ds_put_char(ds, '-');
909 ipv6_format_addr_bracket(&ip6_max, ds, proto_min);
910 }
911 }
912 if (proto_min) {
913 ds_put_format(ds, ":%"PRIu16, proto_min);
914
915 if (proto_max && proto_max != proto_min) {
916 ds_put_format(ds, "-%"PRIu16, proto_max);
917 }
918 }
919 }
920 ds_put_char(ds, ',');
921 if (a[OVS_NAT_ATTR_PERSISTENT]) {
922 ds_put_cstr(ds, "persistent,");
923 }
924 if (a[OVS_NAT_ATTR_PROTO_HASH]) {
925 ds_put_cstr(ds, "hash,");
926 }
927 if (a[OVS_NAT_ATTR_PROTO_RANDOM]) {
928 ds_put_cstr(ds, "random,");
929 }
930 ds_chomp(ds, ',');
931 ds_put_char(ds, ')');
932 }
933 }
934
935 static const struct nl_policy ovs_conntrack_policy[] = {
936 [OVS_CT_ATTR_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
937 [OVS_CT_ATTR_FORCE_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
938 [OVS_CT_ATTR_ZONE] = { .type = NL_A_U16, .optional = true, },
939 [OVS_CT_ATTR_MARK] = { .type = NL_A_UNSPEC, .optional = true,
940 .min_len = sizeof(uint32_t) * 2 },
941 [OVS_CT_ATTR_LABELS] = { .type = NL_A_UNSPEC, .optional = true,
942 .min_len = sizeof(struct ovs_key_ct_labels) * 2 },
943 [OVS_CT_ATTR_HELPER] = { .type = NL_A_STRING, .optional = true,
944 .min_len = 1, .max_len = 16 },
945 [OVS_CT_ATTR_NAT] = { .type = NL_A_UNSPEC, .optional = true },
946 [OVS_CT_ATTR_TIMEOUT] = { .type = NL_A_STRING, .optional = true,
947 .min_len = 1, .max_len = 32 },
948 };
949
950 static void
951 format_odp_conntrack_action(struct ds *ds, const struct nlattr *attr)
952 {
953 struct nlattr *a[ARRAY_SIZE(ovs_conntrack_policy)];
954 const struct {
955 ovs_32aligned_u128 value;
956 ovs_32aligned_u128 mask;
957 } *label;
958 const uint32_t *mark;
959 const char *helper, *timeout;
960 uint16_t zone;
961 bool commit, force;
962 const struct nlattr *nat;
963
964 if (!nl_parse_nested(attr, ovs_conntrack_policy, a, ARRAY_SIZE(a))) {
965 ds_put_cstr(ds, "ct(error)");
966 return;
967 }
968
969 commit = a[OVS_CT_ATTR_COMMIT] ? true : false;
970 force = a[OVS_CT_ATTR_FORCE_COMMIT] ? true : false;
971 zone = a[OVS_CT_ATTR_ZONE] ? nl_attr_get_u16(a[OVS_CT_ATTR_ZONE]) : 0;
972 mark = a[OVS_CT_ATTR_MARK] ? nl_attr_get(a[OVS_CT_ATTR_MARK]) : NULL;
973 label = a[OVS_CT_ATTR_LABELS] ? nl_attr_get(a[OVS_CT_ATTR_LABELS]): NULL;
974 helper = a[OVS_CT_ATTR_HELPER] ? nl_attr_get(a[OVS_CT_ATTR_HELPER]) : NULL;
975 timeout = a[OVS_CT_ATTR_TIMEOUT] ?
976 nl_attr_get(a[OVS_CT_ATTR_TIMEOUT]) : NULL;
977 nat = a[OVS_CT_ATTR_NAT];
978
979 ds_put_format(ds, "ct");
980 if (commit || force || zone || mark || label || helper || timeout || nat) {
981 ds_put_cstr(ds, "(");
982 if (commit) {
983 ds_put_format(ds, "commit,");
984 }
985 if (force) {
986 ds_put_format(ds, "force_commit,");
987 }
988 if (zone) {
989 ds_put_format(ds, "zone=%"PRIu16",", zone);
990 }
991 if (mark) {
992 ds_put_format(ds, "mark=%#"PRIx32"/%#"PRIx32",", *mark,
993 *(mark + 1));
994 }
995 if (label) {
996 ds_put_format(ds, "label=");
997 format_u128(ds, &label->value, &label->mask, true);
998 ds_put_char(ds, ',');
999 }
1000 if (helper) {
1001 ds_put_format(ds, "helper=%s,", helper);
1002 }
1003 if (timeout) {
1004 ds_put_format(ds, "timeout=%s", timeout);
1005 }
1006 if (nat) {
1007 format_odp_ct_nat(ds, nat);
1008 }
1009 ds_chomp(ds, ',');
1010 ds_put_cstr(ds, ")");
1011 }
1012 }
1013
1014 static const struct attr_len_tbl
1015 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
1016 [OVS_NSH_KEY_ATTR_BASE] = { .len = 8 },
1017 [OVS_NSH_KEY_ATTR_MD1] = { .len = 16 },
1018 [OVS_NSH_KEY_ATTR_MD2] = { .len = ATTR_LEN_VARIABLE },
1019 };
1020
1021 static void
1022 format_odp_set_nsh(struct ds *ds, const struct nlattr *attr)
1023 {
1024 unsigned int left;
1025 const struct nlattr *a;
1026 struct ovs_key_nsh nsh;
1027 struct ovs_key_nsh nsh_mask;
1028
1029 memset(&nsh, 0, sizeof nsh);
1030 memset(&nsh_mask, 0xff, sizeof nsh_mask);
1031
1032 NL_NESTED_FOR_EACH (a, left, attr) {
1033 enum ovs_nsh_key_attr type = nl_attr_type(a);
1034 size_t len = nl_attr_get_size(a);
1035
1036 if (type >= OVS_NSH_KEY_ATTR_MAX) {
1037 return;
1038 }
1039
1040 int expected_len = ovs_nsh_key_attr_lens[type].len;
1041 if ((expected_len != ATTR_LEN_VARIABLE) && (len != 2 * expected_len)) {
1042 return;
1043 }
1044
1045 switch (type) {
1046 case OVS_NSH_KEY_ATTR_UNSPEC:
1047 break;
1048 case OVS_NSH_KEY_ATTR_BASE: {
1049 const struct ovs_nsh_key_base *base = nl_attr_get(a);
1050 const struct ovs_nsh_key_base *base_mask = base + 1;
1051 memcpy(&nsh, base, sizeof(*base));
1052 memcpy(&nsh_mask, base_mask, sizeof(*base_mask));
1053 break;
1054 }
1055 case OVS_NSH_KEY_ATTR_MD1: {
1056 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
1057 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1058 memcpy(&nsh.context, &md1->context, sizeof(*md1));
1059 memcpy(&nsh_mask.context, &md1_mask->context, sizeof(*md1_mask));
1060 break;
1061 }
1062 case OVS_NSH_KEY_ATTR_MD2:
1063 case __OVS_NSH_KEY_ATTR_MAX:
1064 default:
1065 /* No support for matching other metadata formats yet. */
1066 break;
1067 }
1068 }
1069
1070 ds_put_cstr(ds, "set(nsh(");
1071 format_nsh_key_mask(ds, &nsh, &nsh_mask);
1072 ds_put_cstr(ds, "))");
1073 }
1074
1075 static void
1076 format_odp_check_pkt_len_action(struct ds *ds, const struct nlattr *attr,
1077 const struct hmap *portno_names OVS_UNUSED)
1078 {
1079 static const struct nl_policy ovs_cpl_policy[] = {
1080 [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = { .type = NL_A_U16 },
1081 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = { .type = NL_A_NESTED },
1082 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]
1083 = { .type = NL_A_NESTED },
1084 };
1085 struct nlattr *a[ARRAY_SIZE(ovs_cpl_policy)];
1086 ds_put_cstr(ds, "check_pkt_len");
1087 if (!nl_parse_nested(attr, ovs_cpl_policy, a, ARRAY_SIZE(a))) {
1088 ds_put_cstr(ds, "(error)");
1089 return;
1090 }
1091
1092 if (!a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] ||
1093 !a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]) {
1094 ds_put_cstr(ds, "(error)");
1095 return;
1096 }
1097
1098 uint16_t pkt_len = nl_attr_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
1099 ds_put_format(ds, "(size=%u,gt(", pkt_len);
1100 const struct nlattr *acts;
1101 acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
1102 format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
1103 portno_names);
1104
1105 ds_put_cstr(ds, "),le(");
1106 acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
1107 format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
1108 portno_names);
1109 ds_put_cstr(ds, "))");
1110 }
1111
1112 static void
1113 format_odp_action(struct ds *ds, const struct nlattr *a,
1114 const struct hmap *portno_names)
1115 {
1116 int expected_len;
1117 enum ovs_action_attr type = nl_attr_type(a);
1118 size_t size;
1119
1120 expected_len = odp_action_len(nl_attr_type(a));
1121 if (expected_len != ATTR_LEN_VARIABLE &&
1122 nl_attr_get_size(a) != expected_len) {
1123 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
1124 nl_attr_get_size(a), expected_len);
1125 format_generic_odp_action(ds, a);
1126 return;
1127 }
1128
1129 switch (type) {
1130 case OVS_ACTION_ATTR_METER:
1131 ds_put_format(ds, "meter(%"PRIu32")", nl_attr_get_u32(a));
1132 break;
1133 case OVS_ACTION_ATTR_OUTPUT:
1134 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1135 break;
1136 case OVS_ACTION_ATTR_LB_OUTPUT:
1137 ds_put_format(ds, "lb_output(%"PRIu32")", nl_attr_get_u32(a));
1138 break;
1139 case OVS_ACTION_ATTR_TRUNC: {
1140 const struct ovs_action_trunc *trunc =
1141 nl_attr_get_unspec(a, sizeof *trunc);
1142
1143 ds_put_format(ds, "trunc(%"PRIu32")", trunc->max_len);
1144 break;
1145 }
1146 case OVS_ACTION_ATTR_TUNNEL_POP:
1147 ds_put_cstr(ds, "tnl_pop(");
1148 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1149 ds_put_char(ds, ')');
1150 break;
1151 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1152 format_odp_tnl_push_action(ds, a, portno_names);
1153 break;
1154 case OVS_ACTION_ATTR_USERSPACE:
1155 format_odp_userspace_action(ds, a, portno_names);
1156 break;
1157 case OVS_ACTION_ATTR_RECIRC:
1158 format_odp_recirc_action(ds, nl_attr_get_u32(a));
1159 break;
1160 case OVS_ACTION_ATTR_HASH:
1161 format_odp_hash_action(ds, nl_attr_get(a));
1162 break;
1163 case OVS_ACTION_ATTR_SET_MASKED:
1164 a = nl_attr_get(a);
1165 /* OVS_KEY_ATTR_NSH is nested attribute, so it needs special process */
1166 if (nl_attr_type(a) == OVS_KEY_ATTR_NSH) {
1167 format_odp_set_nsh(ds, a);
1168 break;
1169 }
1170 size = nl_attr_get_size(a) / 2;
1171 ds_put_cstr(ds, "set(");
1172
1173 /* Masked set action not supported for tunnel key, which is bigger. */
1174 if (size <= sizeof(struct ovs_key_ipv6)) {
1175 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1176 sizeof(struct nlattr))];
1177 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1178 sizeof(struct nlattr))];
1179
1180 mask->nla_type = attr->nla_type = nl_attr_type(a);
1181 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
1182 memcpy(attr + 1, (char *)(a + 1), size);
1183 memcpy(mask + 1, (char *)(a + 1) + size, size);
1184 format_odp_key_attr(attr, mask, NULL, ds, false);
1185 } else {
1186 format_odp_key_attr(a, NULL, NULL, ds, false);
1187 }
1188 ds_put_cstr(ds, ")");
1189 break;
1190 case OVS_ACTION_ATTR_SET:
1191 ds_put_cstr(ds, "set(");
1192 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
1193 ds_put_cstr(ds, ")");
1194 break;
1195 case OVS_ACTION_ATTR_PUSH_ETH: {
1196 const struct ovs_action_push_eth *eth = nl_attr_get(a);
1197 ds_put_format(ds, "push_eth(src="ETH_ADDR_FMT",dst="ETH_ADDR_FMT")",
1198 ETH_ADDR_ARGS(eth->addresses.eth_src),
1199 ETH_ADDR_ARGS(eth->addresses.eth_dst));
1200 break;
1201 }
1202 case OVS_ACTION_ATTR_POP_ETH:
1203 ds_put_cstr(ds, "pop_eth");
1204 break;
1205 case OVS_ACTION_ATTR_PUSH_VLAN: {
1206 const struct ovs_action_push_vlan *vlan = nl_attr_get(a);
1207 ds_put_cstr(ds, "push_vlan(");
1208 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
1209 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
1210 }
1211 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
1212 ds_put_char(ds, ')');
1213 break;
1214 }
1215 case OVS_ACTION_ATTR_POP_VLAN:
1216 ds_put_cstr(ds, "pop_vlan");
1217 break;
1218 case OVS_ACTION_ATTR_PUSH_MPLS: {
1219 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1220 ds_put_cstr(ds, "push_mpls(");
1221 format_mpls_lse(ds, mpls->mpls_lse);
1222 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
1223 break;
1224 }
1225 case OVS_ACTION_ATTR_POP_MPLS: {
1226 ovs_be16 ethertype = nl_attr_get_be16(a);
1227 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
1228 break;
1229 }
1230 case OVS_ACTION_ATTR_SAMPLE:
1231 format_odp_sample_action(ds, a, portno_names);
1232 break;
1233 case OVS_ACTION_ATTR_CT:
1234 format_odp_conntrack_action(ds, a);
1235 break;
1236 case OVS_ACTION_ATTR_CT_CLEAR:
1237 ds_put_cstr(ds, "ct_clear");
1238 break;
1239 case OVS_ACTION_ATTR_CLONE:
1240 format_odp_clone_action(ds, a, portno_names);
1241 break;
1242 case OVS_ACTION_ATTR_PUSH_NSH: {
1243 uint32_t buffer[NSH_HDR_MAX_LEN / 4];
1244 struct nsh_hdr *nsh_hdr = ALIGNED_CAST(struct nsh_hdr *, buffer);
1245 nsh_reset_ver_flags_ttl_len(nsh_hdr);
1246 odp_nsh_hdr_from_attr(nl_attr_get(a), nsh_hdr, NSH_HDR_MAX_LEN);
1247 format_odp_push_nsh_action(ds, nsh_hdr);
1248 break;
1249 }
1250 case OVS_ACTION_ATTR_POP_NSH:
1251 ds_put_cstr(ds, "pop_nsh()");
1252 break;
1253 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
1254 format_odp_check_pkt_len_action(ds, a, portno_names);
1255 break;
1256 case OVS_ACTION_ATTR_DROP:
1257 ds_put_cstr(ds, "drop");
1258 break;
1259 case OVS_ACTION_ATTR_UNSPEC:
1260 case __OVS_ACTION_ATTR_MAX:
1261 default:
1262 format_generic_odp_action(ds, a);
1263 break;
1264 }
1265 }
1266
1267 void
1268 format_odp_actions(struct ds *ds, const struct nlattr *actions,
1269 size_t actions_len, const struct hmap *portno_names)
1270 {
1271 if (actions_len) {
1272 const struct nlattr *a;
1273 unsigned int left;
1274
1275 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1276 if (a != actions) {
1277 ds_put_char(ds, ',');
1278 }
1279 format_odp_action(ds, a, portno_names);
1280 }
1281 if (left) {
1282 int i;
1283
1284 if (left == actions_len) {
1285 ds_put_cstr(ds, "<empty>");
1286 }
1287 ds_put_format(ds, ",***%u leftover bytes*** (", left);
1288 for (i = 0; i < left; i++) {
1289 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
1290 }
1291 ds_put_char(ds, ')');
1292 }
1293 } else {
1294 ds_put_cstr(ds, "drop");
1295 }
1296 }
1297
1298 /* Separate out parse_odp_userspace_action() function. */
1299 static int
1300 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
1301 {
1302 uint32_t pid;
1303 struct user_action_cookie cookie;
1304 struct ofpbuf buf;
1305 odp_port_t tunnel_out_port;
1306 int n = -1;
1307 void *user_data = NULL;
1308 size_t user_data_size = 0;
1309 bool include_actions = false;
1310 int res;
1311
1312 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
1313 return -EINVAL;
1314 }
1315
1316 ofpbuf_init(&buf, 16);
1317 memset(&cookie, 0, sizeof cookie);
1318
1319 user_data = &cookie;
1320 user_data_size = sizeof cookie;
1321 {
1322 uint32_t output;
1323 uint32_t probability;
1324 uint32_t collector_set_id;
1325 uint32_t obs_domain_id;
1326 uint32_t obs_point_id;
1327
1328 /* USER_ACTION_COOKIE_CONTROLLER. */
1329 uint8_t dont_send;
1330 uint8_t continuation;
1331 uint16_t reason;
1332 uint32_t recirc_id;
1333 uint64_t rule_cookie;
1334 uint16_t controller_id;
1335 uint16_t max_len;
1336
1337 int vid, pcp;
1338 int n1 = -1;
1339 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
1340 "pcp=%i,output=%"SCNi32")%n",
1341 &vid, &pcp, &output, &n1)) {
1342 uint16_t tci;
1343
1344 n += n1;
1345 tci = vid | (pcp << VLAN_PCP_SHIFT);
1346 if (tci) {
1347 tci |= VLAN_CFI;
1348 }
1349
1350 cookie.type = USER_ACTION_COOKIE_SFLOW;
1351 cookie.ofp_in_port = OFPP_NONE;
1352 cookie.ofproto_uuid = UUID_ZERO;
1353 cookie.sflow.vlan_tci = htons(tci);
1354 cookie.sflow.output = output;
1355 } else if (ovs_scan(&s[n], ",slow_path(%n",
1356 &n1)) {
1357 n += n1;
1358 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
1359 cookie.ofp_in_port = OFPP_NONE;
1360 cookie.ofproto_uuid = UUID_ZERO;
1361 cookie.slow_path.reason = 0;
1362
1363 res = parse_odp_flags(&s[n], slow_path_reason_to_string,
1364 &cookie.slow_path.reason,
1365 SLOW_PATH_REASON_MASK, NULL);
1366 if (res < 0 || s[n + res] != ')') {
1367 goto out;
1368 }
1369 n += res + 1;
1370 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
1371 "collector_set_id=%"SCNi32","
1372 "obs_domain_id=%"SCNi32","
1373 "obs_point_id=%"SCNi32","
1374 "output_port=%"SCNi32"%n",
1375 &probability, &collector_set_id,
1376 &obs_domain_id, &obs_point_id,
1377 &output, &n1)) {
1378 n += n1;
1379
1380 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1381 cookie.ofp_in_port = OFPP_NONE;
1382 cookie.ofproto_uuid = UUID_ZERO;
1383 cookie.flow_sample.probability = probability;
1384 cookie.flow_sample.collector_set_id = collector_set_id;
1385 cookie.flow_sample.obs_domain_id = obs_domain_id;
1386 cookie.flow_sample.obs_point_id = obs_point_id;
1387 cookie.flow_sample.output_odp_port = u32_to_odp(output);
1388
1389 if (ovs_scan(&s[n], ",ingress%n", &n1)) {
1390 cookie.flow_sample.direction = NX_ACTION_SAMPLE_INGRESS;
1391 n += n1;
1392 } else if (ovs_scan(&s[n], ",egress%n", &n1)) {
1393 cookie.flow_sample.direction = NX_ACTION_SAMPLE_EGRESS;
1394 n += n1;
1395 } else {
1396 cookie.flow_sample.direction = NX_ACTION_SAMPLE_DEFAULT;
1397 }
1398 if (s[n] != ')') {
1399 res = -EINVAL;
1400 goto out;
1401 }
1402 n++;
1403 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
1404 &output, &n1) ) {
1405 n += n1;
1406 cookie.type = USER_ACTION_COOKIE_IPFIX;
1407 cookie.ofp_in_port = OFPP_NONE;
1408 cookie.ofproto_uuid = UUID_ZERO;
1409 cookie.ipfix.output_odp_port = u32_to_odp(output);
1410 } else if (ovs_scan(&s[n], ",controller(reason=%"SCNu16
1411 ",dont_send=%"SCNu8
1412 ",continuation=%"SCNu8
1413 ",recirc_id=%"SCNu32
1414 ",rule_cookie=%"SCNx64
1415 ",controller_id=%"SCNu16
1416 ",max_len=%"SCNu16")%n",
1417 &reason, &dont_send, &continuation, &recirc_id,
1418 &rule_cookie, &controller_id, &max_len, &n1)) {
1419 n += n1;
1420 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
1421 cookie.ofp_in_port = OFPP_NONE;
1422 cookie.ofproto_uuid = UUID_ZERO;
1423 cookie.controller.dont_send = dont_send ? true : false;
1424 cookie.controller.continuation = continuation ? true : false;
1425 cookie.controller.reason = reason;
1426 cookie.controller.recirc_id = recirc_id;
1427 put_32aligned_be64(&cookie.controller.rule_cookie,
1428 htonll(rule_cookie));
1429 cookie.controller.controller_id = controller_id;
1430 cookie.controller.max_len = max_len;
1431 } else if (ovs_scan(&s[n], ",userdata(%n", &n1)) {
1432 char *end;
1433
1434 n += n1;
1435 end = ofpbuf_put_hex(&buf, &s[n], NULL);
1436 if (end[0] != ')') {
1437 res = -EINVAL;
1438 goto out;
1439 }
1440 user_data = buf.data;
1441 user_data_size = buf.size;
1442 n = (end + 1) - s;
1443 }
1444 }
1445
1446 {
1447 int n1 = -1;
1448 if (ovs_scan(&s[n], ",actions%n", &n1)) {
1449 n += n1;
1450 include_actions = true;
1451 }
1452 }
1453
1454 {
1455 int n1 = -1;
1456 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
1457 &tunnel_out_port, &n1)) {
1458 odp_put_userspace_action(pid, user_data, user_data_size,
1459 tunnel_out_port, include_actions, actions);
1460 res = n + n1;
1461 goto out;
1462 } else if (s[n] == ')') {
1463 odp_put_userspace_action(pid, user_data, user_data_size,
1464 ODPP_NONE, include_actions, actions);
1465 res = n + 1;
1466 goto out;
1467 }
1468 }
1469
1470 {
1471 struct ovs_action_push_eth push;
1472 int eth_type = 0;
1473 int n1 = -1;
1474
1475 if (ovs_scan(&s[n], "push_eth(src="ETH_ADDR_SCAN_FMT","
1476 "dst="ETH_ADDR_SCAN_FMT",type=%i)%n",
1477 ETH_ADDR_SCAN_ARGS(push.addresses.eth_src),
1478 ETH_ADDR_SCAN_ARGS(push.addresses.eth_dst),
1479 &eth_type, &n1)) {
1480
1481 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_ETH,
1482 &push, sizeof push);
1483
1484 res = n + n1;
1485 goto out;
1486 }
1487 }
1488
1489 if (!strncmp(&s[n], "pop_eth", 7)) {
1490 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_ETH);
1491 res = 7;
1492 goto out;
1493 }
1494
1495 res = -EINVAL;
1496 out:
1497 ofpbuf_uninit(&buf);
1498 return res;
1499 }
1500
1501 static int
1502 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
1503 {
1504 struct eth_header *eth;
1505 struct ip_header *ip;
1506 struct ovs_16aligned_ip6_hdr *ip6;
1507 struct udp_header *udp;
1508 struct gre_base_hdr *greh;
1509 struct erspan_base_hdr *ersh;
1510 struct erspan_md2 *md2;
1511 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, udp_csum, sid;
1512 ovs_be32 sip, dip;
1513 uint32_t tnl_type = 0, header_len = 0, ip_len = 0, erspan_idx = 0;
1514 void *l3, *l4;
1515 int n = 0;
1516 uint8_t hwid, dir;
1517 uint32_t teid;
1518 uint8_t gtpu_flags, gtpu_msgtype;
1519
1520 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
1521 return -EINVAL;
1522 }
1523 eth = (struct eth_header *) data->header;
1524 l3 = (struct ip_header *) (eth + 1);
1525 ip = (struct ip_header *) l3;
1526 ip6 = (struct ovs_16aligned_ip6_hdr *) l3;
1527 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
1528 "eth(dst="ETH_ADDR_SCAN_FMT",",
1529 &data->header_len,
1530 &data->tnl_type,
1531 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
1532 return -EINVAL;
1533 }
1534
1535 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
1536 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
1537 return -EINVAL;
1538 }
1539 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
1540 return -EINVAL;
1541 }
1542 eth->eth_type = htons(dl_type);
1543
1544 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1545 /* IPv4 */
1546 uint16_t ip_frag_off;
1547 memset(ip, 0, sizeof(*ip));
1548 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
1549 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
1550 IP_SCAN_ARGS(&sip),
1551 IP_SCAN_ARGS(&dip),
1552 &ip->ip_proto, &ip->ip_tos,
1553 &ip->ip_ttl, &ip_frag_off)) {
1554 return -EINVAL;
1555 }
1556 put_16aligned_be32(&ip->ip_src, sip);
1557 put_16aligned_be32(&ip->ip_dst, dip);
1558 ip->ip_frag_off = htons(ip_frag_off);
1559 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1560 ip_len = sizeof *ip;
1561 ip->ip_csum = csum(ip, ip_len);
1562 } else {
1563 char sip6_s[IPV6_SCAN_LEN + 1];
1564 char dip6_s[IPV6_SCAN_LEN + 1];
1565 struct in6_addr sip6, dip6;
1566 uint8_t tclass;
1567 uint32_t label;
1568 if (!ovs_scan_len(s, &n, "ipv6(src="IPV6_SCAN_FMT",dst="IPV6_SCAN_FMT
1569 ",label=%i,proto=%"SCNi8",tclass=0x%"SCNx8
1570 ",hlimit=%"SCNi8"),",
1571 sip6_s, dip6_s, &label, &ip6->ip6_nxt,
1572 &tclass, &ip6->ip6_hlim)
1573 || (label & ~IPV6_LABEL_MASK) != 0
1574 || inet_pton(AF_INET6, sip6_s, &sip6) != 1
1575 || inet_pton(AF_INET6, dip6_s, &dip6) != 1) {
1576 return -EINVAL;
1577 }
1578 put_16aligned_be32(&ip6->ip6_flow, htonl(6 << 28) |
1579 htonl(tclass << 20) | htonl(label));
1580 memcpy(&ip6->ip6_src, &sip6, sizeof(ip6->ip6_src));
1581 memcpy(&ip6->ip6_dst, &dip6, sizeof(ip6->ip6_dst));
1582 ip_len = sizeof *ip6;
1583 }
1584
1585 /* Tunnel header */
1586 l4 = ((uint8_t *) l3 + ip_len);
1587 udp = (struct udp_header *) l4;
1588 greh = (struct gre_base_hdr *) l4;
1589 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
1590 &udp_src, &udp_dst, &udp_csum)) {
1591 uint32_t vx_flags, vni;
1592
1593 udp->udp_src = htons(udp_src);
1594 udp->udp_dst = htons(udp_dst);
1595 udp->udp_len = 0;
1596 udp->udp_csum = htons(udp_csum);
1597
1598 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
1599 &vx_flags, &vni)) {
1600 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
1601
1602 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
1603 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
1604 tnl_type = OVS_VPORT_TYPE_VXLAN;
1605 header_len = sizeof *eth + ip_len +
1606 sizeof *udp + sizeof *vxh;
1607 } else if (ovs_scan_len(s, &n, "geneve(")) {
1608 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
1609
1610 memset(gnh, 0, sizeof *gnh);
1611 header_len = sizeof *eth + ip_len +
1612 sizeof *udp + sizeof *gnh;
1613
1614 if (ovs_scan_len(s, &n, "oam,")) {
1615 gnh->oam = 1;
1616 }
1617 if (ovs_scan_len(s, &n, "crit,")) {
1618 gnh->critical = 1;
1619 }
1620 if (!ovs_scan_len(s, &n, "vni=%"SCNi32, &vni)) {
1621 return -EINVAL;
1622 }
1623 if (ovs_scan_len(s, &n, ",options(")) {
1624 struct geneve_scan options;
1625 int len;
1626
1627 memset(&options, 0, sizeof options);
1628 len = scan_geneve(s + n, &options, NULL);
1629 if (!len) {
1630 return -EINVAL;
1631 }
1632
1633 memcpy(gnh->options, options.d, options.len);
1634 gnh->opt_len = options.len / 4;
1635 header_len += options.len;
1636
1637 n += len;
1638 }
1639 if (!ovs_scan_len(s, &n, "))")) {
1640 return -EINVAL;
1641 }
1642
1643 gnh->proto_type = htons(ETH_TYPE_TEB);
1644 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
1645 tnl_type = OVS_VPORT_TYPE_GENEVE;
1646 } else {
1647 return -EINVAL;
1648 }
1649 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
1650 &gre_flags, &gre_proto)){
1651
1652 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1653 tnl_type = OVS_VPORT_TYPE_GRE;
1654 } else {
1655 tnl_type = OVS_VPORT_TYPE_IP6GRE;
1656 }
1657 greh->flags = htons(gre_flags);
1658 greh->protocol = htons(gre_proto);
1659 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
1660
1661 if (greh->flags & htons(GRE_CSUM)) {
1662 uint16_t csum;
1663 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
1664 return -EINVAL;
1665 }
1666
1667 memset(options, 0, sizeof *options);
1668 *((ovs_be16 *)options) = htons(csum);
1669 options++;
1670 }
1671 if (greh->flags & htons(GRE_KEY)) {
1672 uint32_t key;
1673
1674 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
1675 return -EINVAL;
1676 }
1677
1678 put_16aligned_be32(options, htonl(key));
1679 options++;
1680 }
1681 if (greh->flags & htons(GRE_SEQ)) {
1682 uint32_t seq;
1683
1684 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
1685 return -EINVAL;
1686 }
1687 put_16aligned_be32(options, htonl(seq));
1688 options++;
1689 }
1690
1691 if (!ovs_scan_len(s, &n, "))")) {
1692 return -EINVAL;
1693 }
1694
1695 header_len = sizeof *eth + ip_len +
1696 ((uint8_t *) options - (uint8_t *) greh);
1697 } else if (ovs_scan_len(s, &n, "erspan(ver=1,sid="SCNx16",idx=0x"SCNx32")",
1698 &sid, &erspan_idx)) {
1699 ersh = ERSPAN_HDR(greh);
1700 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
1701 ersh + 1);
1702
1703 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1704 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1705 } else {
1706 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1707 }
1708
1709 greh->flags = htons(GRE_SEQ);
1710 greh->protocol = htons(ETH_TYPE_ERSPAN1);
1711
1712 ersh->ver = 1;
1713 set_sid(ersh, sid);
1714 put_16aligned_be32(index, htonl(erspan_idx));
1715
1716 if (!ovs_scan_len(s, &n, ")")) {
1717 return -EINVAL;
1718 }
1719 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1720 sizeof *ersh + ERSPAN_V1_MDSIZE;
1721
1722 } else if (ovs_scan_len(s, &n, "erspan(ver=2,sid="SCNx16"dir="SCNu8
1723 ",hwid=0x"SCNx8")", &sid, &dir, &hwid)) {
1724
1725 ersh = ERSPAN_HDR(greh);
1726 md2 = ALIGNED_CAST(struct erspan_md2 *, ersh + 1);
1727
1728 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1729 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1730 } else {
1731 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1732 }
1733
1734 greh->flags = htons(GRE_SEQ);
1735 greh->protocol = htons(ETH_TYPE_ERSPAN2);
1736
1737 ersh->ver = 2;
1738 set_sid(ersh, sid);
1739 set_hwid(md2, hwid);
1740 md2->dir = dir;
1741
1742 if (!ovs_scan_len(s, &n, ")")) {
1743 return -EINVAL;
1744 }
1745
1746 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1747 sizeof *ersh + ERSPAN_V2_MDSIZE;
1748
1749 } else if (ovs_scan_len(s, &n, "gtpu(flags=%"SCNi8",msgtype=%"
1750 SCNu8",teid=0x%"SCNx32"))",
1751 &gtpu_flags, &gtpu_msgtype, &teid)) {
1752 struct gtpuhdr *gtph = (struct gtpuhdr *) (udp + 1);
1753
1754 gtph->md.flags = gtpu_flags;
1755 gtph->md.msgtype = gtpu_msgtype;
1756 put_16aligned_be32(&gtph->teid, htonl(teid));
1757 tnl_type = OVS_VPORT_TYPE_GTPU;
1758 header_len = sizeof *eth + ip_len +
1759 sizeof *udp + sizeof *gtph;
1760 } else {
1761 return -EINVAL;
1762 }
1763
1764 /* check tunnel meta data. */
1765 if (data->tnl_type != tnl_type) {
1766 return -EINVAL;
1767 }
1768 if (data->header_len != header_len) {
1769 return -EINVAL;
1770 }
1771
1772 /* Out port */
1773 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
1774 return -EINVAL;
1775 }
1776
1777 return n;
1778 }
1779
1780 struct ct_nat_params {
1781 bool snat;
1782 bool dnat;
1783 size_t addr_len;
1784 union {
1785 ovs_be32 ip;
1786 struct in6_addr ip6;
1787 } addr_min;
1788 union {
1789 ovs_be32 ip;
1790 struct in6_addr ip6;
1791 } addr_max;
1792 uint16_t proto_min;
1793 uint16_t proto_max;
1794 bool persistent;
1795 bool proto_hash;
1796 bool proto_random;
1797 };
1798
1799 static int
1800 scan_ct_nat_range(const char *s, int *n, struct ct_nat_params *p)
1801 {
1802 if (ovs_scan_len(s, n, "=")) {
1803 char ipv6_s[IPV6_SCAN_LEN + 1];
1804 struct in6_addr ipv6;
1805
1806 if (ovs_scan_len(s, n, IP_SCAN_FMT, IP_SCAN_ARGS(&p->addr_min.ip))) {
1807 p->addr_len = sizeof p->addr_min.ip;
1808 if (ovs_scan_len(s, n, "-")) {
1809 if (!ovs_scan_len(s, n, IP_SCAN_FMT,
1810 IP_SCAN_ARGS(&p->addr_max.ip))) {
1811 return -EINVAL;
1812 }
1813 }
1814 } else if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1815 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1816 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1817 p->addr_len = sizeof p->addr_min.ip6;
1818 p->addr_min.ip6 = ipv6;
1819 if (ovs_scan_len(s, n, "-")) {
1820 if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1821 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1822 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1823 p->addr_max.ip6 = ipv6;
1824 } else {
1825 return -EINVAL;
1826 }
1827 }
1828 } else {
1829 return -EINVAL;
1830 }
1831 if (ovs_scan_len(s, n, ":%"SCNu16, &p->proto_min)) {
1832 if (ovs_scan_len(s, n, "-")) {
1833 if (!ovs_scan_len(s, n, "%"SCNu16, &p->proto_max)) {
1834 return -EINVAL;
1835 }
1836 }
1837 }
1838 }
1839 return 0;
1840 }
1841
1842 static int
1843 scan_ct_nat(const char *s, struct ct_nat_params *p)
1844 {
1845 int n = 0;
1846
1847 if (ovs_scan_len(s, &n, "nat")) {
1848 memset(p, 0, sizeof *p);
1849
1850 if (ovs_scan_len(s, &n, "(")) {
1851 char *end;
1852 int end_n;
1853
1854 end = strchr(s + n, ')');
1855 if (!end) {
1856 return -EINVAL;
1857 }
1858 end_n = end - s;
1859
1860 while (n < end_n) {
1861 n += strspn(s + n, delimiters);
1862 if (ovs_scan_len(s, &n, "src")) {
1863 int err = scan_ct_nat_range(s, &n, p);
1864 if (err) {
1865 return err;
1866 }
1867 p->snat = true;
1868 continue;
1869 }
1870 if (ovs_scan_len(s, &n, "dst")) {
1871 int err = scan_ct_nat_range(s, &n, p);
1872 if (err) {
1873 return err;
1874 }
1875 p->dnat = true;
1876 continue;
1877 }
1878 if (ovs_scan_len(s, &n, "persistent")) {
1879 p->persistent = true;
1880 continue;
1881 }
1882 if (ovs_scan_len(s, &n, "hash")) {
1883 p->proto_hash = true;
1884 continue;
1885 }
1886 if (ovs_scan_len(s, &n, "random")) {
1887 p->proto_random = true;
1888 continue;
1889 }
1890 return -EINVAL;
1891 }
1892
1893 if (p->snat && p->dnat) {
1894 return -EINVAL;
1895 }
1896 if ((p->addr_len != 0 &&
1897 memcmp(&p->addr_max, &in6addr_any, p->addr_len) &&
1898 memcmp(&p->addr_max, &p->addr_min, p->addr_len) < 0) ||
1899 (p->proto_max && p->proto_max < p->proto_min)) {
1900 return -EINVAL;
1901 }
1902 if (p->proto_hash && p->proto_random) {
1903 return -EINVAL;
1904 }
1905 n++;
1906 }
1907 }
1908 return n;
1909 }
1910
1911 static void
1912 nl_msg_put_ct_nat(struct ct_nat_params *p, struct ofpbuf *actions)
1913 {
1914 size_t start = nl_msg_start_nested(actions, OVS_CT_ATTR_NAT);
1915
1916 if (p->snat) {
1917 nl_msg_put_flag(actions, OVS_NAT_ATTR_SRC);
1918 } else if (p->dnat) {
1919 nl_msg_put_flag(actions, OVS_NAT_ATTR_DST);
1920 } else {
1921 goto out;
1922 }
1923 if (p->addr_len != 0) {
1924 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MIN, &p->addr_min,
1925 p->addr_len);
1926 if (memcmp(&p->addr_max, &p->addr_min, p->addr_len) > 0) {
1927 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MAX, &p->addr_max,
1928 p->addr_len);
1929 }
1930 if (p->proto_min) {
1931 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MIN, p->proto_min);
1932 if (p->proto_max && p->proto_max > p->proto_min) {
1933 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MAX, p->proto_max);
1934 }
1935 }
1936 if (p->persistent) {
1937 nl_msg_put_flag(actions, OVS_NAT_ATTR_PERSISTENT);
1938 }
1939 if (p->proto_hash) {
1940 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_HASH);
1941 }
1942 if (p->proto_random) {
1943 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_RANDOM);
1944 }
1945 }
1946 out:
1947 nl_msg_end_nested(actions, start);
1948 }
1949
1950 static int
1951 parse_conntrack_action(const char *s_, struct ofpbuf *actions)
1952 {
1953 const char *s = s_;
1954
1955 if (ovs_scan(s, "ct")) {
1956 const char *helper = NULL, *timeout = NULL;
1957 size_t helper_len = 0, timeout_len = 0;
1958 bool commit = false;
1959 bool force_commit = false;
1960 uint16_t zone = 0;
1961 struct {
1962 uint32_t value;
1963 uint32_t mask;
1964 } ct_mark = { 0, 0 };
1965 struct {
1966 ovs_u128 value;
1967 ovs_u128 mask;
1968 } ct_label;
1969 struct ct_nat_params nat_params;
1970 bool have_nat = false;
1971 size_t start;
1972 char *end;
1973
1974 memset(&ct_label, 0, sizeof(ct_label));
1975
1976 s += 2;
1977 if (ovs_scan(s, "(")) {
1978 s++;
1979 find_end:
1980 end = strchr(s, ')');
1981 if (!end) {
1982 return -EINVAL;
1983 }
1984
1985 while (s != end) {
1986 int n;
1987
1988 s += strspn(s, delimiters);
1989 if (ovs_scan(s, "commit%n", &n)) {
1990 commit = true;
1991 s += n;
1992 continue;
1993 }
1994 if (ovs_scan(s, "force_commit%n", &n)) {
1995 force_commit = true;
1996 s += n;
1997 continue;
1998 }
1999 if (ovs_scan(s, "zone=%"SCNu16"%n", &zone, &n)) {
2000 s += n;
2001 continue;
2002 }
2003 if (ovs_scan(s, "mark=%"SCNx32"%n", &ct_mark.value, &n)) {
2004 s += n;
2005 n = -1;
2006 if (ovs_scan(s, "/%"SCNx32"%n", &ct_mark.mask, &n)) {
2007 s += n;
2008 } else {
2009 ct_mark.mask = UINT32_MAX;
2010 }
2011 continue;
2012 }
2013 if (ovs_scan(s, "label=%n", &n)) {
2014 int retval;
2015
2016 s += n;
2017 retval = scan_u128(s, &ct_label.value, &ct_label.mask);
2018 if (retval == 0) {
2019 return -EINVAL;
2020 }
2021 s += retval;
2022 continue;
2023 }
2024 if (ovs_scan(s, "helper=%n", &n)) {
2025 s += n;
2026 helper_len = strcspn(s, delimiters_end);
2027 if (!helper_len || helper_len > 15) {
2028 return -EINVAL;
2029 }
2030 helper = s;
2031 s += helper_len;
2032 continue;
2033 }
2034 if (ovs_scan(s, "timeout=%n", &n)) {
2035 s += n;
2036 timeout_len = strcspn(s, delimiters_end);
2037 if (!timeout_len || timeout_len > 31) {
2038 return -EINVAL;
2039 }
2040 timeout = s;
2041 s += timeout_len;
2042 continue;
2043 }
2044
2045 n = scan_ct_nat(s, &nat_params);
2046 if (n > 0) {
2047 s += n;
2048 have_nat = true;
2049
2050 /* end points to the end of the nested, nat action.
2051 * find the real end. */
2052 goto find_end;
2053 }
2054 /* Nothing matched. */
2055 return -EINVAL;
2056 }
2057 s++;
2058 }
2059 if (commit && force_commit) {
2060 return -EINVAL;
2061 }
2062
2063 start = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CT);
2064 if (commit) {
2065 nl_msg_put_flag(actions, OVS_CT_ATTR_COMMIT);
2066 } else if (force_commit) {
2067 nl_msg_put_flag(actions, OVS_CT_ATTR_FORCE_COMMIT);
2068 }
2069 if (zone) {
2070 nl_msg_put_u16(actions, OVS_CT_ATTR_ZONE, zone);
2071 }
2072 if (ct_mark.mask) {
2073 nl_msg_put_unspec(actions, OVS_CT_ATTR_MARK, &ct_mark,
2074 sizeof(ct_mark));
2075 }
2076 if (!ovs_u128_is_zero(ct_label.mask)) {
2077 nl_msg_put_unspec(actions, OVS_CT_ATTR_LABELS, &ct_label,
2078 sizeof ct_label);
2079 }
2080 if (helper) {
2081 nl_msg_put_string__(actions, OVS_CT_ATTR_HELPER, helper,
2082 helper_len);
2083 }
2084 if (timeout) {
2085 nl_msg_put_string__(actions, OVS_CT_ATTR_TIMEOUT, timeout,
2086 timeout_len);
2087 }
2088 if (have_nat) {
2089 nl_msg_put_ct_nat(&nat_params, actions);
2090 }
2091 nl_msg_end_nested(actions, start);
2092 }
2093
2094 return s - s_;
2095 }
2096
2097 static void
2098 nsh_key_to_attr(struct ofpbuf *buf, const struct ovs_key_nsh *nsh,
2099 uint8_t * metadata, size_t md_size,
2100 bool is_mask)
2101 {
2102 size_t nsh_key_ofs;
2103 struct ovs_nsh_key_base base;
2104
2105 base.flags = nsh->flags;
2106 base.ttl = nsh->ttl;
2107 base.mdtype = nsh->mdtype;
2108 base.np = nsh->np;
2109 base.path_hdr = nsh->path_hdr;
2110
2111 nsh_key_ofs = nl_msg_start_nested(buf, OVS_KEY_ATTR_NSH);
2112 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_BASE, &base, sizeof base);
2113
2114 if (is_mask) {
2115 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2116 sizeof nsh->context);
2117 } else {
2118 switch (nsh->mdtype) {
2119 case NSH_M_TYPE1:
2120 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2121 sizeof nsh->context);
2122 break;
2123 case NSH_M_TYPE2:
2124 if (metadata && md_size > 0) {
2125 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD2, metadata,
2126 md_size);
2127 }
2128 break;
2129 default:
2130 /* No match support for other MD formats yet. */
2131 break;
2132 }
2133 }
2134 nl_msg_end_nested(buf, nsh_key_ofs);
2135 }
2136
2137
2138 static int
2139 parse_odp_push_nsh_action(const char *s, struct ofpbuf *actions)
2140 {
2141 int n = 0;
2142 int ret = 0;
2143 uint32_t spi = 0;
2144 uint8_t si = 255;
2145 uint32_t cd;
2146 struct ovs_key_nsh nsh;
2147 uint8_t metadata[NSH_CTX_HDRS_MAX_LEN];
2148 uint8_t md_size = 0;
2149
2150 if (!ovs_scan_len(s, &n, "push_nsh(")) {
2151 ret = -EINVAL;
2152 goto out;
2153 }
2154
2155 /* The default is NSH_M_TYPE1 */
2156 nsh.flags = 0;
2157 nsh.ttl = 63;
2158 nsh.mdtype = NSH_M_TYPE1;
2159 nsh.np = NSH_P_ETHERNET;
2160 nsh.path_hdr = nsh_spi_si_to_path_hdr(0, 255);
2161 memset(nsh.context, 0, NSH_M_TYPE1_MDLEN);
2162
2163 for (;;) {
2164 n += strspn(s + n, delimiters);
2165 if (s[n] == ')') {
2166 break;
2167 }
2168
2169 if (ovs_scan_len(s, &n, "flags=%"SCNi8, &nsh.flags)) {
2170 continue;
2171 }
2172 if (ovs_scan_len(s, &n, "ttl=%"SCNi8, &nsh.ttl)) {
2173 continue;
2174 }
2175 if (ovs_scan_len(s, &n, "mdtype=%"SCNi8, &nsh.mdtype)) {
2176 switch (nsh.mdtype) {
2177 case NSH_M_TYPE1:
2178 /* This is the default format. */;
2179 break;
2180 case NSH_M_TYPE2:
2181 /* Length will be updated later. */
2182 md_size = 0;
2183 break;
2184 default:
2185 ret = -EINVAL;
2186 goto out;
2187 }
2188 continue;
2189 }
2190 if (ovs_scan_len(s, &n, "np=%"SCNi8, &nsh.np)) {
2191 continue;
2192 }
2193 if (ovs_scan_len(s, &n, "spi=0x%"SCNx32, &spi)) {
2194 continue;
2195 }
2196 if (ovs_scan_len(s, &n, "si=%"SCNi8, &si)) {
2197 continue;
2198 }
2199 if (nsh.mdtype == NSH_M_TYPE1) {
2200 if (ovs_scan_len(s, &n, "c1=0x%"SCNx32, &cd)) {
2201 nsh.context[0] = htonl(cd);
2202 continue;
2203 }
2204 if (ovs_scan_len(s, &n, "c2=0x%"SCNx32, &cd)) {
2205 nsh.context[1] = htonl(cd);
2206 continue;
2207 }
2208 if (ovs_scan_len(s, &n, "c3=0x%"SCNx32, &cd)) {
2209 nsh.context[2] = htonl(cd);
2210 continue;
2211 }
2212 if (ovs_scan_len(s, &n, "c4=0x%"SCNx32, &cd)) {
2213 nsh.context[3] = htonl(cd);
2214 continue;
2215 }
2216 }
2217 else if (nsh.mdtype == NSH_M_TYPE2) {
2218 struct ofpbuf b;
2219 char buf[512];
2220 size_t mdlen, padding;
2221 if (ovs_scan_len(s, &n, "md2=0x%511[0-9a-fA-F]", buf)
2222 && n/2 <= sizeof metadata) {
2223 ofpbuf_use_stub(&b, metadata, sizeof metadata);
2224 ofpbuf_put_hex(&b, buf, &mdlen);
2225 /* Pad metadata to 4 bytes. */
2226 padding = PAD_SIZE(mdlen, 4);
2227 if (padding > 0) {
2228 ofpbuf_put_zeros(&b, padding);
2229 }
2230 md_size = mdlen + padding;
2231 ofpbuf_uninit(&b);
2232 continue;
2233 }
2234 }
2235
2236 ret = -EINVAL;
2237 goto out;
2238 }
2239 out:
2240 if (ret >= 0) {
2241 nsh.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
2242 size_t offset = nl_msg_start_nested(actions, OVS_ACTION_ATTR_PUSH_NSH);
2243 nsh_key_to_attr(actions, &nsh, metadata, md_size, false);
2244 nl_msg_end_nested(actions, offset);
2245 ret = n;
2246 }
2247 return ret;
2248 }
2249
2250 static int
2251 parse_action_list(struct parse_odp_context *context, const char *s,
2252 struct ofpbuf *actions)
2253 {
2254 int n = 0;
2255
2256 for (;;) {
2257 int retval;
2258
2259 n += strspn(s + n, delimiters);
2260 if (s[n] == ')') {
2261 break;
2262 }
2263 retval = parse_odp_action(context, s + n, actions);
2264 if (retval < 0) {
2265 return retval;
2266 }
2267 n += retval;
2268 }
2269
2270 if (actions->size > UINT16_MAX) {
2271 return -EFBIG;
2272 }
2273
2274 return n;
2275 }
2276
2277
2278 static int
2279 parse_odp_action(struct parse_odp_context *context, const char *s,
2280 struct ofpbuf *actions)
2281 {
2282 int retval;
2283
2284 context->depth++;
2285
2286 if (context->depth == MAX_ODP_NESTED) {
2287 retval = -EINVAL;
2288 } else {
2289 retval = parse_odp_action__(context, s, actions);
2290 }
2291
2292 context->depth--;
2293
2294 return retval;
2295 }
2296
2297
2298 static int
2299 parse_odp_action__(struct parse_odp_context *context, const char *s,
2300 struct ofpbuf *actions)
2301 {
2302 {
2303 uint32_t port;
2304 int n;
2305
2306 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
2307 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
2308 return n;
2309 }
2310 }
2311
2312 {
2313 uint32_t bond_id;
2314 int n;
2315
2316 if (ovs_scan(s, "lb_output(%"PRIu32")%n", &bond_id, &n)) {
2317 nl_msg_put_u32(actions, OVS_ACTION_ATTR_LB_OUTPUT, bond_id);
2318 return n;
2319 }
2320 }
2321
2322 {
2323 uint32_t max_len;
2324 int n;
2325
2326 if (ovs_scan(s, "trunc(%"SCNi32")%n", &max_len, &n)) {
2327 struct ovs_action_trunc *trunc;
2328
2329 trunc = nl_msg_put_unspec_uninit(actions,
2330 OVS_ACTION_ATTR_TRUNC, sizeof *trunc);
2331 trunc->max_len = max_len;
2332 return n;
2333 }
2334 }
2335
2336 if (context->port_names) {
2337 int len = strcspn(s, delimiters);
2338 struct simap_node *node;
2339
2340 node = simap_find_len(context->port_names, s, len);
2341 if (node) {
2342 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
2343 return len;
2344 }
2345 }
2346
2347 {
2348 uint32_t recirc_id;
2349 int n = -1;
2350
2351 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
2352 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
2353 return n;
2354 }
2355 }
2356
2357 if (!strncmp(s, "userspace(", 10)) {
2358 return parse_odp_userspace_action(s, actions);
2359 }
2360
2361 if (!strncmp(s, "set(", 4)) {
2362 size_t start_ofs;
2363 int retval;
2364 struct nlattr mask[1024 / sizeof(struct nlattr)];
2365 struct ofpbuf maskbuf = OFPBUF_STUB_INITIALIZER(mask);
2366 struct nlattr *nested, *key;
2367 size_t size;
2368
2369 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
2370 retval = parse_odp_key_mask_attr(context, s + 4, actions, &maskbuf);
2371 if (retval < 0) {
2372 ofpbuf_uninit(&maskbuf);
2373 return retval;
2374 }
2375 if (s[retval + 4] != ')') {
2376 ofpbuf_uninit(&maskbuf);
2377 return -EINVAL;
2378 }
2379
2380 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2381 key = nested + 1;
2382
2383 size = nl_attr_get_size(mask);
2384 if (size == nl_attr_get_size(key)) {
2385 /* Change to masked set action if not fully masked. */
2386 if (!is_all_ones(mask + 1, size)) {
2387 /* Remove padding of eariler key payload */
2388 actions->size -= NLA_ALIGN(key->nla_len) - key->nla_len;
2389
2390 /* Put mask payload right after key payload */
2391 key->nla_len += size;
2392 ofpbuf_put(actions, mask + 1, size);
2393
2394 /* 'actions' may have been reallocated by ofpbuf_put(). */
2395 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2396 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
2397
2398 key = nested + 1;
2399 /* Add new padding as needed */
2400 ofpbuf_put_zeros(actions, NLA_ALIGN(key->nla_len) -
2401 key->nla_len);
2402 }
2403 }
2404 ofpbuf_uninit(&maskbuf);
2405
2406 nl_msg_end_nested(actions, start_ofs);
2407 return retval + 5;
2408 }
2409
2410 {
2411 struct ovs_action_push_vlan push;
2412 int tpid = ETH_TYPE_VLAN;
2413 int vid, pcp;
2414 int cfi = 1;
2415 int n = -1;
2416
2417 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
2418 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
2419 &vid, &pcp, &cfi, &n)
2420 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
2421 &tpid, &vid, &pcp, &n)
2422 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
2423 &tpid, &vid, &pcp, &cfi, &n)) {
2424 if ((vid & ~(VLAN_VID_MASK >> VLAN_VID_SHIFT)) != 0
2425 || (pcp & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) != 0) {
2426 return -EINVAL;
2427 }
2428 push.vlan_tpid = htons(tpid);
2429 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
2430 | (pcp << VLAN_PCP_SHIFT)
2431 | (cfi ? VLAN_CFI : 0));
2432 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
2433 &push, sizeof push);
2434
2435 return n;
2436 }
2437 }
2438
2439 if (!strncmp(s, "pop_vlan", 8)) {
2440 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
2441 return 8;
2442 }
2443
2444 {
2445 unsigned long long int meter_id;
2446 int n = -1;
2447
2448 if (sscanf(s, "meter(%lli)%n", &meter_id, &n) > 0 && n > 0) {
2449 nl_msg_put_u32(actions, OVS_ACTION_ATTR_METER, meter_id);
2450 return n;
2451 }
2452 }
2453
2454 {
2455 double percentage;
2456 int n = -1;
2457
2458 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
2459 && percentage >= 0. && percentage <= 100.0) {
2460 size_t sample_ofs, actions_ofs;
2461 double probability;
2462
2463 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
2464 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
2465 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
2466 (probability <= 0 ? 0
2467 : probability >= UINT32_MAX ? UINT32_MAX
2468 : probability));
2469
2470 actions_ofs = nl_msg_start_nested(actions,
2471 OVS_SAMPLE_ATTR_ACTIONS);
2472 int retval = parse_action_list(context, s + n, actions);
2473 if (retval < 0) {
2474 return retval;
2475 }
2476
2477
2478 n += retval;
2479 nl_msg_end_nested(actions, actions_ofs);
2480 nl_msg_end_nested(actions, sample_ofs);
2481
2482 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2483 }
2484 }
2485
2486 {
2487 if (!strncmp(s, "clone(", 6)) {
2488 size_t actions_ofs;
2489 int n = 6;
2490
2491 actions_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CLONE);
2492 int retval = parse_action_list(context, s + n, actions);
2493 if (retval < 0) {
2494 return retval;
2495 }
2496 n += retval;
2497 nl_msg_end_nested(actions, actions_ofs);
2498 return n + 1;
2499 }
2500 }
2501
2502 {
2503 if (!strncmp(s, "push_nsh(", 9)) {
2504 int retval = parse_odp_push_nsh_action(s, actions);
2505 if (retval < 0) {
2506 return retval;
2507 }
2508 return retval + 1;
2509 }
2510 }
2511
2512 {
2513 int n;
2514 if (ovs_scan(s, "pop_nsh()%n", &n)) {
2515 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_NSH);
2516 return n;
2517 }
2518 }
2519
2520 {
2521 uint32_t port;
2522 int n;
2523
2524 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
2525 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
2526 return n;
2527 }
2528 }
2529
2530 {
2531 if (!strncmp(s, "ct_clear", 8)) {
2532 nl_msg_put_flag(actions, OVS_ACTION_ATTR_CT_CLEAR);
2533 return 8;
2534 }
2535 }
2536
2537 {
2538 uint16_t pkt_len;
2539 int n = -1;
2540 if (ovs_scan(s, "check_pkt_len(size=%"SCNi16",gt(%n", &pkt_len, &n)) {
2541 size_t cpl_ofs, actions_ofs;
2542 cpl_ofs = nl_msg_start_nested(actions,
2543 OVS_ACTION_ATTR_CHECK_PKT_LEN);
2544 nl_msg_put_u16(actions, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, pkt_len);
2545 actions_ofs = nl_msg_start_nested(
2546 actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
2547
2548 int retval;
2549 if (!strncasecmp(s + n, "drop", 4)) {
2550 n += 4;
2551 } else {
2552 retval = parse_action_list(context, s + n, actions);
2553 if (retval < 0) {
2554 return retval;
2555 }
2556
2557 n += retval;
2558 }
2559 nl_msg_end_nested(actions, actions_ofs);
2560 retval = -1;
2561 if (!ovs_scan(s + n, "),le(%n", &retval)) {
2562 return -EINVAL;
2563 }
2564 n += retval;
2565
2566 actions_ofs = nl_msg_start_nested(
2567 actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
2568 if (!strncasecmp(s + n, "drop", 4)) {
2569 n += 4;
2570 } else {
2571 retval = parse_action_list(context, s + n, actions);
2572 if (retval < 0) {
2573 return retval;
2574 }
2575 n += retval;
2576 }
2577 nl_msg_end_nested(actions, actions_ofs);
2578 nl_msg_end_nested(actions, cpl_ofs);
2579 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2580 }
2581 }
2582
2583 {
2584 int retval;
2585
2586 retval = parse_conntrack_action(s, actions);
2587 if (retval) {
2588 return retval;
2589 }
2590 }
2591
2592 {
2593 struct ovs_action_push_tnl data;
2594 int n;
2595
2596 n = ovs_parse_tnl_push(s, &data);
2597 if (n > 0) {
2598 odp_put_tnl_push_action(actions, &data);
2599 return n;
2600 } else if (n < 0) {
2601 return n;
2602 }
2603 }
2604
2605 return -EINVAL;
2606 }
2607
2608 /* Parses the string representation of datapath actions, in the format output
2609 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
2610 * value. On success, the ODP actions are appended to 'actions' as a series of
2611 * Netlink attributes. On failure, no data is appended to 'actions'. Either
2612 * way, 'actions''s data might be reallocated. */
2613 int
2614 odp_actions_from_string(const char *s, const struct simap *port_names,
2615 struct ofpbuf *actions)
2616 {
2617 size_t old_size;
2618
2619 if (!strcasecmp(s, "drop")) {
2620 nl_msg_put_u32(actions, OVS_ACTION_ATTR_DROP, XLATE_OK);
2621 return 0;
2622 }
2623
2624 struct parse_odp_context context = (struct parse_odp_context) {
2625 .port_names = port_names,
2626 };
2627
2628 old_size = actions->size;
2629 for (;;) {
2630 int retval;
2631
2632 s += strspn(s, delimiters);
2633 if (!*s) {
2634 return 0;
2635 }
2636
2637 retval = parse_odp_action(&context, s, actions);
2638
2639 if (retval < 0 || !strchr(delimiters, s[retval])) {
2640 actions->size = old_size;
2641 return -retval;
2642 }
2643 s += retval;
2644 }
2645
2646 return 0;
2647 }
2648 \f
2649 static const struct attr_len_tbl ovs_vxlan_ext_attr_lens[OVS_VXLAN_EXT_MAX + 1] = {
2650 [OVS_VXLAN_EXT_GBP] = { .len = 4 },
2651 };
2652
2653 static const struct attr_len_tbl ovs_tun_key_attr_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
2654 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = 8 },
2655 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = 4 },
2656 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = 4 },
2657 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
2658 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
2659 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
2660 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
2661 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = 2 },
2662 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = 2 },
2663 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
2664 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = ATTR_LEN_VARIABLE },
2665 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
2666 .next = ovs_vxlan_ext_attr_lens ,
2667 .next_max = OVS_VXLAN_EXT_MAX},
2668 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = 16 },
2669 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = 16 },
2670 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = ATTR_LEN_VARIABLE },
2671 [OVS_TUNNEL_KEY_ATTR_GTPU_OPTS] = { .len = ATTR_LEN_VARIABLE },
2672 };
2673
2674 const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
2675 [OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
2676 [OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
2677 [OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
2678 [OVS_KEY_ATTR_DP_HASH] = { .len = 4 },
2679 [OVS_KEY_ATTR_RECIRC_ID] = { .len = 4 },
2680 [OVS_KEY_ATTR_TUNNEL] = { .len = ATTR_LEN_NESTED,
2681 .next = ovs_tun_key_attr_lens,
2682 .next_max = OVS_TUNNEL_KEY_ATTR_MAX },
2683 [OVS_KEY_ATTR_IN_PORT] = { .len = 4 },
2684 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
2685 [OVS_KEY_ATTR_VLAN] = { .len = 2 },
2686 [OVS_KEY_ATTR_ETHERTYPE] = { .len = 2 },
2687 [OVS_KEY_ATTR_MPLS] = { .len = ATTR_LEN_VARIABLE },
2688 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
2689 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
2690 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
2691 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = 2 },
2692 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
2693 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
2694 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
2695 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
2696 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
2697 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
2698 [OVS_KEY_ATTR_ND_EXTENSIONS] = { .len = sizeof(struct ovs_key_nd_extensions) },
2699 [OVS_KEY_ATTR_CT_STATE] = { .len = 4 },
2700 [OVS_KEY_ATTR_CT_ZONE] = { .len = 2 },
2701 [OVS_KEY_ATTR_CT_MARK] = { .len = 4 },
2702 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
2703 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
2704 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
2705 [OVS_KEY_ATTR_PACKET_TYPE] = { .len = 4 },
2706 [OVS_KEY_ATTR_NSH] = { .len = ATTR_LEN_NESTED,
2707 .next = ovs_nsh_key_attr_lens,
2708 .next_max = OVS_NSH_KEY_ATTR_MAX },
2709 };
2710
2711 /* Returns the correct length of the payload for a flow key attribute of the
2712 * specified 'type', ATTR_LEN_INVALID if 'type' is unknown, ATTR_LEN_VARIABLE
2713 * if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
2714 * payload is a nested type. */
2715 static int
2716 odp_key_attr_len(const struct attr_len_tbl tbl[], int max_type, uint16_t type)
2717 {
2718 if (type > max_type) {
2719 return ATTR_LEN_INVALID;
2720 }
2721
2722 return tbl[type].len;
2723 }
2724
2725 static void
2726 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
2727 {
2728 size_t len = nl_attr_get_size(a);
2729 if (len) {
2730 const uint8_t *unspec;
2731 unsigned int i;
2732
2733 unspec = nl_attr_get(a);
2734 for (i = 0; i < len; i++) {
2735 if (i) {
2736 ds_put_char(ds, ' ');
2737 }
2738 ds_put_format(ds, "%02x", unspec[i]);
2739 }
2740 }
2741 }
2742
2743 static const char *
2744 ovs_frag_type_to_string(enum ovs_frag_type type)
2745 {
2746 switch (type) {
2747 case OVS_FRAG_TYPE_NONE:
2748 return "no";
2749 case OVS_FRAG_TYPE_FIRST:
2750 return "first";
2751 case OVS_FRAG_TYPE_LATER:
2752 return "later";
2753 case __OVS_FRAG_TYPE_MAX:
2754 default:
2755 return "<error>";
2756 }
2757 }
2758
2759 enum odp_key_fitness
2760 odp_nsh_hdr_from_attr(const struct nlattr *attr,
2761 struct nsh_hdr *nsh_hdr, size_t size)
2762 {
2763 unsigned int left;
2764 const struct nlattr *a;
2765 bool unknown = false;
2766 uint8_t flags = 0;
2767 uint8_t ttl = 63;
2768 size_t mdlen = 0;
2769 bool has_md1 = false;
2770 bool has_md2 = false;
2771
2772 memset(nsh_hdr, 0, size);
2773
2774 NL_NESTED_FOR_EACH (a, left, attr) {
2775 uint16_t type = nl_attr_type(a);
2776 size_t len = nl_attr_get_size(a);
2777 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2778 OVS_NSH_KEY_ATTR_MAX, type);
2779
2780 if (len != expected_len && expected_len >= 0) {
2781 return ODP_FIT_ERROR;
2782 }
2783
2784 switch (type) {
2785 case OVS_NSH_KEY_ATTR_BASE: {
2786 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2787 nsh_hdr->next_proto = base->np;
2788 nsh_hdr->md_type = base->mdtype;
2789 put_16aligned_be32(&nsh_hdr->path_hdr, base->path_hdr);
2790 flags = base->flags;
2791 ttl = base->ttl;
2792 break;
2793 }
2794 case OVS_NSH_KEY_ATTR_MD1: {
2795 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2796 struct nsh_md1_ctx *md1_dst = &nsh_hdr->md1;
2797 has_md1 = true;
2798 mdlen = nl_attr_get_size(a);
2799 if ((mdlen + NSH_BASE_HDR_LEN != NSH_M_TYPE1_LEN) ||
2800 (mdlen + NSH_BASE_HDR_LEN > size)) {
2801 return ODP_FIT_ERROR;
2802 }
2803 memcpy(md1_dst, md1, mdlen);
2804 break;
2805 }
2806 case OVS_NSH_KEY_ATTR_MD2: {
2807 struct nsh_md2_tlv *md2_dst = &nsh_hdr->md2;
2808 const uint8_t *md2 = nl_attr_get(a);
2809 has_md2 = true;
2810 mdlen = nl_attr_get_size(a);
2811 if (mdlen + NSH_BASE_HDR_LEN > size) {
2812 return ODP_FIT_ERROR;
2813 }
2814 memcpy(md2_dst, md2, mdlen);
2815 break;
2816 }
2817 default:
2818 /* Allow this to show up as unexpected, if there are unknown
2819 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2820 unknown = true;
2821 break;
2822 }
2823 }
2824
2825 if (unknown) {
2826 return ODP_FIT_TOO_MUCH;
2827 }
2828
2829 if ((has_md1 && nsh_hdr->md_type != NSH_M_TYPE1)
2830 || (has_md2 && nsh_hdr->md_type != NSH_M_TYPE2)) {
2831 return ODP_FIT_ERROR;
2832 }
2833
2834 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
2835 nsh_set_flags_ttl_len(nsh_hdr, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
2836
2837 return ODP_FIT_PERFECT;
2838 }
2839
2840 /* Reports the error 'msg', which is formatted as with printf().
2841 *
2842 * If 'errorp' is nonnull, then some the wants the error report to come
2843 * directly back to it, so the function stores the error message into '*errorp'
2844 * (after first freeing it in case there's something there already).
2845 *
2846 * Otherwise, logs the message at WARN level, rate-limited. */
2847 static void OVS_PRINTF_FORMAT(3, 4)
2848 odp_parse_error(struct vlog_rate_limit *rl, char **errorp,
2849 const char *msg, ...)
2850 {
2851 if (OVS_UNLIKELY(errorp)) {
2852 free(*errorp);
2853
2854 va_list args;
2855 va_start(args, msg);
2856 *errorp = xvasprintf(msg, args);
2857 va_end(args);
2858 } else if (!VLOG_DROP_WARN(rl)) {
2859 va_list args;
2860 va_start(args, msg);
2861 char *error = xvasprintf(msg, args);
2862 va_end(args);
2863
2864 VLOG_WARN("%s", error);
2865
2866 free(error);
2867 }
2868 }
2869
2870 /* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
2871 * returns fitness. If the attribute is a key, 'is_mask' should be false;
2872 * if it is a mask, 'is_mask' should be true. If 'errorp' is nonnull and the
2873 * function returns ODP_FIT_ERROR, stores a malloc()'d error message in
2874 * '*errorp'. */
2875 static enum odp_key_fitness
2876 odp_nsh_key_from_attr__(const struct nlattr *attr, bool is_mask,
2877 struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask,
2878 char **errorp)
2879 {
2880 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2881 if (errorp) {
2882 *errorp = NULL;
2883 }
2884
2885 unsigned int left;
2886 const struct nlattr *a;
2887 bool unknown = false;
2888 bool has_md1 = false;
2889
2890 NL_NESTED_FOR_EACH (a, left, attr) {
2891 uint16_t type = nl_attr_type(a);
2892 size_t len = nl_attr_get_size(a);
2893 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2894 OVS_NSH_KEY_ATTR_MAX, type);
2895 if (expected_len) {
2896 if (nsh_mask) {
2897 expected_len *= 2;
2898 }
2899 if (len != expected_len) {
2900 odp_parse_error(&rl, errorp, "NSH %s attribute %"PRIu16" "
2901 "should have length %d but actually has "
2902 "%"PRIuSIZE,
2903 nsh_mask ? "mask" : "key",
2904 type, expected_len, len);
2905 return ODP_FIT_ERROR;
2906 }
2907 }
2908
2909 switch (type) {
2910 case OVS_NSH_KEY_ATTR_UNSPEC:
2911 break;
2912 case OVS_NSH_KEY_ATTR_BASE: {
2913 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2914 nsh->flags = base->flags;
2915 nsh->ttl = base->ttl;
2916 nsh->mdtype = base->mdtype;
2917 nsh->np = base->np;
2918 nsh->path_hdr = base->path_hdr;
2919 if (nsh_mask && (len == 2 * sizeof(*base))) {
2920 const struct ovs_nsh_key_base *base_mask = base + 1;
2921 nsh_mask->flags = base_mask->flags;
2922 nsh_mask->ttl = base_mask->ttl;
2923 nsh_mask->mdtype = base_mask->mdtype;
2924 nsh_mask->np = base_mask->np;
2925 nsh_mask->path_hdr = base_mask->path_hdr;
2926 }
2927 break;
2928 }
2929 case OVS_NSH_KEY_ATTR_MD1: {
2930 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2931 has_md1 = true;
2932 memcpy(nsh->context, md1->context, sizeof md1->context);
2933 if (len == 2 * sizeof(*md1)) {
2934 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
2935 memcpy(nsh_mask->context, md1_mask->context,
2936 sizeof(*md1_mask));
2937 }
2938 break;
2939 }
2940 case OVS_NSH_KEY_ATTR_MD2:
2941 default:
2942 /* Allow this to show up as unexpected, if there are unknown
2943 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2944 unknown = true;
2945 break;
2946 }
2947 }
2948
2949 if (unknown) {
2950 return ODP_FIT_TOO_MUCH;
2951 }
2952
2953 if (!is_mask && has_md1 && nsh->mdtype != NSH_M_TYPE1 && !nsh_mask) {
2954 odp_parse_error(&rl, errorp, "OVS_NSH_KEY_ATTR_MD1 present but "
2955 "declared mdtype %"PRIu8" is not %d (NSH_M_TYPE1)",
2956 nsh->mdtype, NSH_M_TYPE1);
2957 return ODP_FIT_ERROR;
2958 }
2959
2960 return ODP_FIT_PERFECT;
2961 }
2962
2963 /* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
2964 * returns fitness. The attribute should be a key (not a mask). If 'errorp'
2965 * is nonnull and the function returns ODP_FIT_ERROR, stores a malloc()'d error
2966 * message in '*errorp'. */
2967 enum odp_key_fitness
2968 odp_nsh_key_from_attr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
2969 struct ovs_key_nsh *nsh_mask, char **errorp)
2970 {
2971 return odp_nsh_key_from_attr__(attr, false, nsh, nsh_mask, errorp);
2972 }
2973
2974 /* Parses OVS_KEY_ATTR_TUNNEL attribute 'attr' into 'tun' and returns fitness.
2975 * If the attribute is a key, 'is_mask' should be false; if it is a mask,
2976 * 'is_mask' should be true. If 'errorp' is nonnull and the function returns
2977 * ODP_FIT_ERROR, stores a malloc()'d error message in '*errorp'. */
2978 static enum odp_key_fitness
2979 odp_tun_key_from_attr__(const struct nlattr *attr, bool is_mask,
2980 struct flow_tnl *tun, char **errorp)
2981 {
2982 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2983 unsigned int left;
2984 const struct nlattr *a;
2985 bool ttl = false;
2986 bool unknown = false;
2987
2988 NL_NESTED_FOR_EACH(a, left, attr) {
2989 uint16_t type = nl_attr_type(a);
2990 size_t len = nl_attr_get_size(a);
2991 int expected_len = odp_key_attr_len(ovs_tun_key_attr_lens,
2992 OVS_TUNNEL_ATTR_MAX, type);
2993
2994 if (len != expected_len && expected_len >= 0) {
2995 odp_parse_error(&rl, errorp, "tunnel key attribute %"PRIu16" "
2996 "should have length %d but actually has %"PRIuSIZE,
2997 type, expected_len, len);
2998 return ODP_FIT_ERROR;
2999 }
3000
3001 switch (type) {
3002 case OVS_TUNNEL_KEY_ATTR_ID:
3003 tun->tun_id = nl_attr_get_be64(a);
3004 tun->flags |= FLOW_TNL_F_KEY;
3005 break;
3006 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
3007 tun->ip_src = nl_attr_get_be32(a);
3008 break;
3009 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
3010 tun->ip_dst = nl_attr_get_be32(a);
3011 break;
3012 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
3013 tun->ipv6_src = nl_attr_get_in6_addr(a);
3014 break;
3015 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
3016 tun->ipv6_dst = nl_attr_get_in6_addr(a);
3017 break;
3018 case OVS_TUNNEL_KEY_ATTR_TOS:
3019 tun->ip_tos = nl_attr_get_u8(a);
3020 break;
3021 case OVS_TUNNEL_KEY_ATTR_TTL:
3022 tun->ip_ttl = nl_attr_get_u8(a);
3023 ttl = true;
3024 break;
3025 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3026 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
3027 break;
3028 case OVS_TUNNEL_KEY_ATTR_CSUM:
3029 tun->flags |= FLOW_TNL_F_CSUM;
3030 break;
3031 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
3032 tun->tp_src = nl_attr_get_be16(a);
3033 break;
3034 case OVS_TUNNEL_KEY_ATTR_TP_DST:
3035 tun->tp_dst = nl_attr_get_be16(a);
3036 break;
3037 case OVS_TUNNEL_KEY_ATTR_OAM:
3038 tun->flags |= FLOW_TNL_F_OAM;
3039 break;
3040 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
3041 static const struct nl_policy vxlan_opts_policy[] = {
3042 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
3043 };
3044 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
3045
3046 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
3047 odp_parse_error(&rl, errorp, "error parsing VXLAN options");
3048 return ODP_FIT_ERROR;
3049 }
3050
3051 if (ext[OVS_VXLAN_EXT_GBP]) {
3052 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
3053
3054 tun->gbp_id = htons(gbp & 0xFFFF);
3055 tun->gbp_flags = (gbp >> 16) & 0xFF;
3056 }
3057
3058 break;
3059 }
3060 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3061 tun_metadata_from_geneve_nlattr(a, is_mask, tun);
3062 break;
3063 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: {
3064 const struct erspan_metadata *opts = nl_attr_get(a);
3065
3066 tun->erspan_ver = opts->version;
3067 if (tun->erspan_ver == 1) {
3068 tun->erspan_idx = ntohl(opts->u.index);
3069 } else if (tun->erspan_ver == 2) {
3070 tun->erspan_dir = opts->u.md2.dir;
3071 tun->erspan_hwid = get_hwid(&opts->u.md2);
3072 } else {
3073 VLOG_WARN("%s invalid erspan version\n", __func__);
3074 }
3075 break;
3076 }
3077 case OVS_TUNNEL_KEY_ATTR_GTPU_OPTS: {
3078 const struct gtpu_metadata *opts = nl_attr_get(a);
3079
3080 tun->gtpu_flags = opts->flags;
3081 tun->gtpu_msgtype = opts->msgtype;
3082 break;
3083 }
3084
3085 default:
3086 /* Allow this to show up as unexpected, if there are unknown
3087 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
3088 unknown = true;
3089 break;
3090 }
3091 }
3092
3093 if (!ttl) {
3094 odp_parse_error(&rl, errorp, "tunnel options missing TTL");
3095 return ODP_FIT_ERROR;
3096 }
3097 if (unknown) {
3098 return ODP_FIT_TOO_MUCH;
3099 }
3100 return ODP_FIT_PERFECT;
3101 }
3102
3103 /* Parses OVS_KEY_ATTR_TUNNEL key attribute 'attr' into 'tun' and returns
3104 * fitness. The attribute should be a key (not a mask). If 'errorp' is
3105 * nonnull, stores NULL into '*errorp' on success, otherwise a malloc()'d error
3106 * message. */
3107 enum odp_key_fitness
3108 odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun,
3109 char **errorp)
3110 {
3111 if (errorp) {
3112 *errorp = NULL;
3113 }
3114 memset(tun, 0, sizeof *tun);
3115 return odp_tun_key_from_attr__(attr, false, tun, errorp);
3116 }
3117
3118 static void
3119 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key,
3120 const struct flow_tnl *tun_flow_key,
3121 const struct ofpbuf *key_buf, const char *tnl_type)
3122 {
3123 size_t tun_key_ofs;
3124
3125 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
3126
3127 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
3128 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
3129 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
3130 }
3131 if (tun_key->ip_src) {
3132 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
3133 }
3134 if (tun_key->ip_dst) {
3135 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
3136 }
3137 if (ipv6_addr_is_set(&tun_key->ipv6_src)) {
3138 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, &tun_key->ipv6_src);
3139 }
3140 if (ipv6_addr_is_set(&tun_key->ipv6_dst)) {
3141 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_DST, &tun_key->ipv6_dst);
3142 }
3143 if (tun_key->ip_tos) {
3144 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
3145 }
3146 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
3147 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
3148 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
3149 }
3150 if (tun_key->flags & FLOW_TNL_F_CSUM) {
3151 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
3152 }
3153 if (tun_key->tp_src) {
3154 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
3155 }
3156 if (tun_key->tp_dst) {
3157 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
3158 }
3159 if (tun_key->flags & FLOW_TNL_F_OAM) {
3160 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
3161 }
3162
3163 /* If tnl_type is set to a particular type of output tunnel,
3164 * only put its relevant tunnel metadata to the nlattr.
3165 * If tnl_type is NULL, put tunnel metadata according to the
3166 * 'tun_key'.
3167 */
3168 if ((!tnl_type || !strcmp(tnl_type, "vxlan")) &&
3169 (tun_key->gbp_flags || tun_key->gbp_id)) {
3170 size_t vxlan_opts_ofs;
3171
3172 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
3173 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
3174 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
3175 nl_msg_end_nested(a, vxlan_opts_ofs);
3176 }
3177
3178 if (!tnl_type || !strcmp(tnl_type, "geneve")) {
3179 tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
3180 }
3181
3182 if ((!tnl_type || !strcmp(tnl_type, "erspan") ||
3183 !strcmp(tnl_type, "ip6erspan")) &&
3184 (tun_key->erspan_ver == 1 || tun_key->erspan_ver == 2)) {
3185 struct erspan_metadata opts;
3186
3187 opts.version = tun_key->erspan_ver;
3188 if (opts.version == 1) {
3189 opts.u.index = htonl(tun_key->erspan_idx);
3190 } else {
3191 opts.u.md2.dir = tun_key->erspan_dir;
3192 set_hwid(&opts.u.md2, tun_key->erspan_hwid);
3193 }
3194 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
3195 &opts, sizeof(opts));
3196 }
3197
3198 if ((!tnl_type || !strcmp(tnl_type, "gtpu")) &&
3199 (tun_key->gtpu_flags && tun_key->gtpu_msgtype)) {
3200 struct gtpu_metadata opts;
3201
3202 opts.flags = tun_key->gtpu_flags;
3203 opts.msgtype = tun_key->gtpu_msgtype;
3204 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
3205 &opts, sizeof(opts));
3206 }
3207 nl_msg_end_nested(a, tun_key_ofs);
3208 }
3209
3210 static bool
3211 odp_mask_is_constant__(enum ovs_key_attr attr, const void *mask, size_t size,
3212 int constant)
3213 {
3214 /* Convert 'constant' to all the widths we need. C conversion rules ensure
3215 * that -1 becomes all-1-bits and 0 does not change. */
3216 ovs_be16 be16 = (OVS_FORCE ovs_be16) constant;
3217 uint32_t u32 = constant;
3218 uint8_t u8 = constant;
3219 const struct in6_addr *in6 = constant ? &in6addr_exact : &in6addr_any;
3220
3221 switch (attr) {
3222 case OVS_KEY_ATTR_UNSPEC:
3223 case OVS_KEY_ATTR_ENCAP:
3224 case __OVS_KEY_ATTR_MAX:
3225 default:
3226 return false;
3227
3228 case OVS_KEY_ATTR_PRIORITY:
3229 case OVS_KEY_ATTR_IN_PORT:
3230 case OVS_KEY_ATTR_ETHERNET:
3231 case OVS_KEY_ATTR_VLAN:
3232 case OVS_KEY_ATTR_ETHERTYPE:
3233 case OVS_KEY_ATTR_IPV4:
3234 case OVS_KEY_ATTR_TCP:
3235 case OVS_KEY_ATTR_UDP:
3236 case OVS_KEY_ATTR_ICMP:
3237 case OVS_KEY_ATTR_ICMPV6:
3238 case OVS_KEY_ATTR_ND:
3239 case OVS_KEY_ATTR_ND_EXTENSIONS:
3240 case OVS_KEY_ATTR_SKB_MARK:
3241 case OVS_KEY_ATTR_TUNNEL:
3242 case OVS_KEY_ATTR_SCTP:
3243 case OVS_KEY_ATTR_DP_HASH:
3244 case OVS_KEY_ATTR_RECIRC_ID:
3245 case OVS_KEY_ATTR_MPLS:
3246 case OVS_KEY_ATTR_CT_STATE:
3247 case OVS_KEY_ATTR_CT_ZONE:
3248 case OVS_KEY_ATTR_CT_MARK:
3249 case OVS_KEY_ATTR_CT_LABELS:
3250 case OVS_KEY_ATTR_PACKET_TYPE:
3251 case OVS_KEY_ATTR_NSH:
3252 return is_all_byte(mask, size, u8);
3253
3254 case OVS_KEY_ATTR_TCP_FLAGS:
3255 return TCP_FLAGS(*(ovs_be16 *) mask) == TCP_FLAGS(be16);
3256
3257 case OVS_KEY_ATTR_IPV6: {
3258 const struct ovs_key_ipv6 *ipv6_mask = mask;
3259 return ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
3260 == htonl(IPV6_LABEL_MASK & u32)
3261 && ipv6_mask->ipv6_proto == u8
3262 && ipv6_mask->ipv6_tclass == u8
3263 && ipv6_mask->ipv6_hlimit == u8
3264 && ipv6_mask->ipv6_frag == u8
3265 && ipv6_addr_equals(&ipv6_mask->ipv6_src, in6)
3266 && ipv6_addr_equals(&ipv6_mask->ipv6_dst, in6));
3267 }
3268
3269 case OVS_KEY_ATTR_ARP:
3270 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_arp, arp_tha), u8);
3271
3272 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
3273 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv4,
3274 ipv4_proto), u8);
3275
3276 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
3277 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv6,
3278 ipv6_proto), u8);
3279 }
3280 }
3281
3282 /* The caller must already have verified that 'ma' has a correct length.
3283 *
3284 * The main purpose of this function is formatting, to allow code to figure out
3285 * whether the mask can be omitted. It doesn't try hard for attributes that
3286 * contain sub-attributes, etc., because normally those would be broken down
3287 * further for formatting. */
3288 static bool
3289 odp_mask_attr_is_wildcard(const struct nlattr *ma)
3290 {
3291 return odp_mask_is_constant__(nl_attr_type(ma),
3292 nl_attr_get(ma), nl_attr_get_size(ma), 0);
3293 }
3294
3295 /* The caller must already have verified that 'size' is a correct length for
3296 * 'attr'.
3297 *
3298 * The main purpose of this function is formatting, to allow code to figure out
3299 * whether the mask can be omitted. It doesn't try hard for attributes that
3300 * contain sub-attributes, etc., because normally those would be broken down
3301 * further for formatting. */
3302 static bool
3303 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
3304 {
3305 return odp_mask_is_constant__(attr, mask, size, -1);
3306 }
3307
3308 /* The caller must already have verified that 'ma' has a correct length. */
3309 static bool
3310 odp_mask_attr_is_exact(const struct nlattr *ma)
3311 {
3312 enum ovs_key_attr attr = nl_attr_type(ma);
3313 return odp_mask_is_exact(attr, nl_attr_get(ma), nl_attr_get_size(ma));
3314 }
3315
3316 void
3317 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
3318 char *port_name)
3319 {
3320 struct odp_portno_names *odp_portno_names;
3321
3322 odp_portno_names = xmalloc(sizeof *odp_portno_names);
3323 odp_portno_names->port_no = port_no;
3324 odp_portno_names->name = xstrdup(port_name);
3325 hmap_insert(portno_names, &odp_portno_names->hmap_node,
3326 hash_odp_port(port_no));
3327 }
3328
3329 static char *
3330 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
3331 {
3332 if (portno_names) {
3333 struct odp_portno_names *odp_portno_names;
3334
3335 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
3336 hash_odp_port(port_no), portno_names) {
3337 if (odp_portno_names->port_no == port_no) {
3338 return odp_portno_names->name;
3339 }
3340 }
3341 }
3342 return NULL;
3343 }
3344
3345 void
3346 odp_portno_names_destroy(struct hmap *portno_names)
3347 {
3348 struct odp_portno_names *odp_portno_names;
3349
3350 HMAP_FOR_EACH_POP (odp_portno_names, hmap_node, portno_names) {
3351 free(odp_portno_names->name);
3352 free(odp_portno_names);
3353 }
3354 }
3355
3356 void
3357 odp_portno_name_format(const struct hmap *portno_names, odp_port_t port_no,
3358 struct ds *s)
3359 {
3360 const char *name = odp_portno_names_get(portno_names, port_no);
3361 if (name) {
3362 ds_put_cstr(s, name);
3363 } else {
3364 ds_put_format(s, "%"PRIu32, port_no);
3365 }
3366 }
3367
3368 /* Format helpers. */
3369
3370 static void
3371 format_eth(struct ds *ds, const char *name, const struct eth_addr key,
3372 const struct eth_addr *mask, bool verbose)
3373 {
3374 bool mask_empty = mask && eth_addr_is_zero(*mask);
3375
3376 if (verbose || !mask_empty) {
3377 bool mask_full = !mask || eth_mask_is_exact(*mask);
3378
3379 if (mask_full) {
3380 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
3381 } else {
3382 ds_put_format(ds, "%s=", name);
3383 eth_format_masked(key, mask, ds);
3384 ds_put_char(ds, ',');
3385 }
3386 }
3387 }
3388
3389
3390 static void
3391 format_be64(struct ds *ds, const char *name, ovs_be64 key,
3392 const ovs_be64 *mask, bool verbose)
3393 {
3394 bool mask_empty = mask && !*mask;
3395
3396 if (verbose || !mask_empty) {
3397 bool mask_full = !mask || *mask == OVS_BE64_MAX;
3398
3399 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
3400 if (!mask_full) { /* Partially masked. */
3401 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
3402 }
3403 ds_put_char(ds, ',');
3404 }
3405 }
3406
3407 static void
3408 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
3409 const ovs_be32 *mask, bool verbose)
3410 {
3411 bool mask_empty = mask && !*mask;
3412
3413 if (verbose || !mask_empty) {
3414 bool mask_full = !mask || *mask == OVS_BE32_MAX;
3415
3416 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
3417 if (!mask_full) { /* Partially masked. */
3418 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
3419 }
3420 ds_put_char(ds, ',');
3421 }
3422 }
3423
3424 static void
3425 format_in6_addr(struct ds *ds, const char *name,
3426 const struct in6_addr *key,
3427 const struct in6_addr *mask,
3428 bool verbose)
3429 {
3430 char buf[INET6_ADDRSTRLEN];
3431 bool mask_empty = mask && ipv6_mask_is_any(mask);
3432
3433 if (verbose || !mask_empty) {
3434 bool mask_full = !mask || ipv6_mask_is_exact(mask);
3435
3436 inet_ntop(AF_INET6, key, buf, sizeof buf);
3437 ds_put_format(ds, "%s=%s", name, buf);
3438 if (!mask_full) { /* Partially masked. */
3439 inet_ntop(AF_INET6, mask, buf, sizeof buf);
3440 ds_put_format(ds, "/%s", buf);
3441 }
3442 ds_put_char(ds, ',');
3443 }
3444 }
3445
3446 static void
3447 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
3448 const ovs_be32 *mask, bool verbose)
3449 {
3450 bool mask_empty = mask && !*mask;
3451
3452 if (verbose || !mask_empty) {
3453 bool mask_full = !mask
3454 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
3455
3456 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
3457 if (!mask_full) { /* Partially masked. */
3458 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
3459 }
3460 ds_put_char(ds, ',');
3461 }
3462 }
3463
3464 static void
3465 format_u8x(struct ds *ds, const char *name, uint8_t key,
3466 const uint8_t *mask, bool verbose)
3467 {
3468 bool mask_empty = mask && !*mask;
3469
3470 if (verbose || !mask_empty) {
3471 bool mask_full = !mask || *mask == UINT8_MAX;
3472
3473 ds_put_format(ds, "%s=%#"PRIx8, name, key);
3474 if (!mask_full) { /* Partially masked. */
3475 ds_put_format(ds, "/%#"PRIx8, *mask);
3476 }
3477 ds_put_char(ds, ',');
3478 }
3479 }
3480
3481 static void
3482 format_u8u(struct ds *ds, const char *name, uint8_t key,
3483 const uint8_t *mask, bool verbose)
3484 {
3485 bool mask_empty = mask && !*mask;
3486
3487 if (verbose || !mask_empty) {
3488 bool mask_full = !mask || *mask == UINT8_MAX;
3489
3490 ds_put_format(ds, "%s=%"PRIu8, name, key);
3491 if (!mask_full) { /* Partially masked. */
3492 ds_put_format(ds, "/%#"PRIx8, *mask);
3493 }
3494 ds_put_char(ds, ',');
3495 }
3496 }
3497
3498 static void
3499 format_be16(struct ds *ds, const char *name, ovs_be16 key,
3500 const ovs_be16 *mask, bool verbose)
3501 {
3502 bool mask_empty = mask && !*mask;
3503
3504 if (verbose || !mask_empty) {
3505 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3506
3507 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
3508 if (!mask_full) { /* Partially masked. */
3509 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3510 }
3511 ds_put_char(ds, ',');
3512 }
3513 }
3514
3515 static void
3516 format_be16x(struct ds *ds, const char *name, ovs_be16 key,
3517 const ovs_be16 *mask, bool verbose)
3518 {
3519 bool mask_empty = mask && !*mask;
3520
3521 if (verbose || !mask_empty) {
3522 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3523
3524 ds_put_format(ds, "%s=%#"PRIx16, name, ntohs(key));
3525 if (!mask_full) { /* Partially masked. */
3526 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3527 }
3528 ds_put_char(ds, ',');
3529 }
3530 }
3531
3532 static void
3533 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
3534 const uint16_t *mask, bool verbose)
3535 {
3536 bool mask_empty = mask && !*mask;
3537
3538 if (verbose || !mask_empty) {
3539 ds_put_cstr(ds, name);
3540 ds_put_char(ds, '(');
3541 if (mask) {
3542 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key,
3543 *mask & FLOW_TNL_F_MASK, FLOW_TNL_F_MASK);
3544 } else { /* Fully masked. */
3545 format_flags(ds, flow_tun_flag_to_string, key, '|');
3546 }
3547 ds_put_cstr(ds, "),");
3548 }
3549 }
3550
3551 static bool
3552 check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
3553 const struct attr_len_tbl tbl[], int max_type, bool need_key)
3554 {
3555 int expected_len;
3556
3557 expected_len = odp_key_attr_len(tbl, max_type, nl_attr_type(a));
3558 if (expected_len != ATTR_LEN_VARIABLE &&
3559 expected_len != ATTR_LEN_NESTED) {
3560
3561 bool bad_key_len = nl_attr_get_size(a) != expected_len;
3562 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
3563
3564 if (bad_key_len || bad_mask_len) {
3565 if (need_key) {
3566 ds_put_format(ds, "key%u", nl_attr_type(a));
3567 }
3568 if (bad_key_len) {
3569 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
3570 nl_attr_get_size(a), expected_len);
3571 }
3572 format_generic_odp_key(a, ds);
3573 if (ma) {
3574 ds_put_char(ds, '/');
3575 if (bad_mask_len) {
3576 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
3577 nl_attr_get_size(ma), expected_len);
3578 }
3579 format_generic_odp_key(ma, ds);
3580 }
3581 ds_put_char(ds, ')');
3582 return false;
3583 }
3584 }
3585
3586 return true;
3587 }
3588
3589 static void
3590 format_unknown_key(struct ds *ds, const struct nlattr *a,
3591 const struct nlattr *ma)
3592 {
3593 ds_put_format(ds, "key%u(", nl_attr_type(a));
3594 format_generic_odp_key(a, ds);
3595 if (ma && !odp_mask_attr_is_exact(ma)) {
3596 ds_put_char(ds, '/');
3597 format_generic_odp_key(ma, ds);
3598 }
3599 ds_put_cstr(ds, "),");
3600 }
3601
3602 static void
3603 format_odp_tun_vxlan_opt(const struct nlattr *attr,
3604 const struct nlattr *mask_attr, struct ds *ds,
3605 bool verbose)
3606 {
3607 unsigned int left;
3608 const struct nlattr *a;
3609 struct ofpbuf ofp;
3610
3611 ofpbuf_init(&ofp, 100);
3612 NL_NESTED_FOR_EACH(a, left, attr) {
3613 uint16_t type = nl_attr_type(a);
3614 const struct nlattr *ma = NULL;
3615
3616 if (mask_attr) {
3617 ma = nl_attr_find__(nl_attr_get(mask_attr),
3618 nl_attr_get_size(mask_attr), type);
3619 if (!ma) {
3620 ma = generate_all_wildcard_mask(ovs_vxlan_ext_attr_lens,
3621 OVS_VXLAN_EXT_MAX,
3622 &ofp, a);
3623 }
3624 }
3625
3626 if (!check_attr_len(ds, a, ma, ovs_vxlan_ext_attr_lens,
3627 OVS_VXLAN_EXT_MAX, true)) {
3628 continue;
3629 }
3630
3631 switch (type) {
3632 case OVS_VXLAN_EXT_GBP: {
3633 uint32_t key = nl_attr_get_u32(a);
3634 ovs_be16 id, id_mask;
3635 uint8_t flags, flags_mask = 0;
3636
3637 id = htons(key & 0xFFFF);
3638 flags = (key >> 16) & 0xFF;
3639 if (ma) {
3640 uint32_t mask = nl_attr_get_u32(ma);
3641 id_mask = htons(mask & 0xFFFF);
3642 flags_mask = (mask >> 16) & 0xFF;
3643 }
3644
3645 ds_put_cstr(ds, "gbp(");
3646 format_be16(ds, "id", id, ma ? &id_mask : NULL, verbose);
3647 format_u8x(ds, "flags", flags, ma ? &flags_mask : NULL, verbose);
3648 ds_chomp(ds, ',');
3649 ds_put_cstr(ds, "),");
3650 break;
3651 }
3652
3653 default:
3654 format_unknown_key(ds, a, ma);
3655 }
3656 ofpbuf_clear(&ofp);
3657 }
3658
3659 ds_chomp(ds, ',');
3660 ofpbuf_uninit(&ofp);
3661 }
3662
3663 static void
3664 format_odp_tun_erspan_opt(const struct nlattr *attr,
3665 const struct nlattr *mask_attr, struct ds *ds,
3666 bool verbose)
3667 {
3668 const struct erspan_metadata *opts, *mask;
3669 uint8_t ver, ver_ma, dir, dir_ma, hwid, hwid_ma;
3670
3671 opts = nl_attr_get(attr);
3672 mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
3673
3674 ver = (uint8_t)opts->version;
3675 if (mask) {
3676 ver_ma = (uint8_t)mask->version;
3677 }
3678
3679 format_u8u(ds, "ver", ver, mask ? &ver_ma : NULL, verbose);
3680
3681 if (opts->version == 1) {
3682 if (mask) {
3683 ds_put_format(ds, "idx=%#"PRIx32"/%#"PRIx32",",
3684 ntohl(opts->u.index),
3685 ntohl(mask->u.index));
3686 } else {
3687 ds_put_format(ds, "idx=%#"PRIx32",", ntohl(opts->u.index));
3688 }
3689 } else if (opts->version == 2) {
3690 dir = opts->u.md2.dir;
3691 hwid = opts->u.md2.hwid;
3692 if (mask) {
3693 dir_ma = mask->u.md2.dir;
3694 hwid_ma = mask->u.md2.hwid;
3695 }
3696
3697 format_u8u(ds, "dir", dir, mask ? &dir_ma : NULL, verbose);
3698 format_u8x(ds, "hwid", hwid, mask ? &hwid_ma : NULL, verbose);
3699 }
3700 ds_chomp(ds, ',');
3701 }
3702
3703 static void
3704 format_odp_tun_gtpu_opt(const struct nlattr *attr,
3705 const struct nlattr *mask_attr, struct ds *ds,
3706 bool verbose)
3707 {
3708 const struct gtpu_metadata *opts, *mask;
3709
3710 opts = nl_attr_get(attr);
3711 mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
3712
3713 format_u8x(ds, "flags", opts->flags, mask ? &mask->flags : NULL, verbose);
3714 format_u8u(ds, "msgtype", opts->msgtype, mask ? &mask->msgtype : NULL,
3715 verbose);
3716 ds_chomp(ds, ',');
3717 }
3718
3719 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
3720
3721 static void
3722 format_geneve_opts(const struct geneve_opt *opt,
3723 const struct geneve_opt *mask, int opts_len,
3724 struct ds *ds, bool verbose)
3725 {
3726 while (opts_len > 0) {
3727 unsigned int len;
3728 uint8_t data_len, data_len_mask;
3729
3730 if (opts_len < sizeof *opt) {
3731 ds_put_format(ds, "opt len %u less than minimum %"PRIuSIZE,
3732 opts_len, sizeof *opt);
3733 return;
3734 }
3735
3736 data_len = opt->length * 4;
3737 if (mask) {
3738 if (mask->length == 0x1f) {
3739 data_len_mask = UINT8_MAX;
3740 } else {
3741 data_len_mask = mask->length;
3742 }
3743 }
3744 len = sizeof *opt + data_len;
3745 if (len > opts_len) {
3746 ds_put_format(ds, "opt len %u greater than remaining %u",
3747 len, opts_len);
3748 return;
3749 }
3750
3751 ds_put_char(ds, '{');
3752 format_be16x(ds, "class", opt->opt_class, MASK(mask, opt_class),
3753 verbose);
3754 format_u8x(ds, "type", opt->type, MASK(mask, type), verbose);
3755 format_u8u(ds, "len", data_len, mask ? &data_len_mask : NULL, verbose);
3756 if (data_len &&
3757 (verbose || !mask || !is_all_zeros(mask + 1, data_len))) {
3758 ds_put_hex(ds, opt + 1, data_len);
3759 if (mask && !is_all_ones(mask + 1, data_len)) {
3760 ds_put_char(ds, '/');
3761 ds_put_hex(ds, mask + 1, data_len);
3762 }
3763 } else {
3764 ds_chomp(ds, ',');
3765 }
3766 ds_put_char(ds, '}');
3767
3768 opt += len / sizeof(*opt);
3769 if (mask) {
3770 mask += len / sizeof(*opt);
3771 }
3772 opts_len -= len;
3773 };
3774 }
3775
3776 static void
3777 format_odp_tun_geneve(const struct nlattr *attr,
3778 const struct nlattr *mask_attr, struct ds *ds,
3779 bool verbose)
3780 {
3781 int opts_len = nl_attr_get_size(attr);
3782 const struct geneve_opt *opt = nl_attr_get(attr);
3783 const struct geneve_opt *mask = mask_attr ?
3784 nl_attr_get(mask_attr) : NULL;
3785
3786 if (mask && nl_attr_get_size(attr) != nl_attr_get_size(mask_attr)) {
3787 ds_put_format(ds, "value len %"PRIuSIZE" different from mask len %"PRIuSIZE,
3788 nl_attr_get_size(attr), nl_attr_get_size(mask_attr));
3789 return;
3790 }
3791
3792 format_geneve_opts(opt, mask, opts_len, ds, verbose);
3793 }
3794
3795 static void
3796 format_odp_nsh_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3797 struct ds *ds)
3798 {
3799 unsigned int left;
3800 const struct nlattr *a;
3801 struct ovs_key_nsh nsh;
3802 struct ovs_key_nsh nsh_mask;
3803
3804 memset(&nsh, 0, sizeof nsh);
3805 memset(&nsh_mask, 0xff, sizeof nsh_mask);
3806
3807 NL_NESTED_FOR_EACH (a, left, attr) {
3808 enum ovs_nsh_key_attr type = nl_attr_type(a);
3809 const struct nlattr *ma = NULL;
3810
3811 if (mask_attr) {
3812 ma = nl_attr_find__(nl_attr_get(mask_attr),
3813 nl_attr_get_size(mask_attr), type);
3814 }
3815
3816 if (!check_attr_len(ds, a, ma, ovs_nsh_key_attr_lens,
3817 OVS_NSH_KEY_ATTR_MAX, true)) {
3818 continue;
3819 }
3820
3821 switch (type) {
3822 case OVS_NSH_KEY_ATTR_UNSPEC:
3823 break;
3824 case OVS_NSH_KEY_ATTR_BASE: {
3825 const struct ovs_nsh_key_base *base = nl_attr_get(a);
3826 const struct ovs_nsh_key_base *base_mask
3827 = ma ? nl_attr_get(ma) : NULL;
3828 nsh.flags = base->flags;
3829 nsh.ttl = base->ttl;
3830 nsh.mdtype = base->mdtype;
3831 nsh.np = base->np;
3832 nsh.path_hdr = base->path_hdr;
3833 if (base_mask) {
3834 nsh_mask.flags = base_mask->flags;
3835 nsh_mask.ttl = base_mask->ttl;
3836 nsh_mask.mdtype = base_mask->mdtype;
3837 nsh_mask.np = base_mask->np;
3838 nsh_mask.path_hdr = base_mask->path_hdr;
3839 }
3840 break;
3841 }
3842 case OVS_NSH_KEY_ATTR_MD1: {
3843 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
3844 const struct ovs_nsh_key_md1 *md1_mask
3845 = ma ? nl_attr_get(ma) : NULL;
3846 memcpy(nsh.context, md1->context, sizeof md1->context);
3847 if (md1_mask) {
3848 memcpy(nsh_mask.context, md1_mask->context,
3849 sizeof md1_mask->context);
3850 }
3851 break;
3852 }
3853 case OVS_NSH_KEY_ATTR_MD2:
3854 case __OVS_NSH_KEY_ATTR_MAX:
3855 default:
3856 /* No support for matching other metadata formats yet. */
3857 break;
3858 }
3859 }
3860
3861 if (mask_attr) {
3862 format_nsh_key_mask(ds, &nsh, &nsh_mask);
3863 } else {
3864 format_nsh_key(ds, &nsh);
3865 }
3866 }
3867
3868 static void
3869 format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3870 struct ds *ds, bool verbose)
3871 {
3872 unsigned int left;
3873 const struct nlattr *a;
3874 uint16_t flags = 0;
3875 uint16_t mask_flags = 0;
3876 struct ofpbuf ofp;
3877
3878 ofpbuf_init(&ofp, 100);
3879 NL_NESTED_FOR_EACH(a, left, attr) {
3880 enum ovs_tunnel_key_attr type = nl_attr_type(a);
3881 const struct nlattr *ma = NULL;
3882
3883 if (mask_attr) {
3884 ma = nl_attr_find__(nl_attr_get(mask_attr),
3885 nl_attr_get_size(mask_attr), type);
3886 if (!ma) {
3887 ma = generate_all_wildcard_mask(ovs_tun_key_attr_lens,
3888 OVS_TUNNEL_KEY_ATTR_MAX,
3889 &ofp, a);
3890 }
3891 }
3892
3893 if (!check_attr_len(ds, a, ma, ovs_tun_key_attr_lens,
3894 OVS_TUNNEL_KEY_ATTR_MAX, true)) {
3895 continue;
3896 }
3897
3898 switch (type) {
3899 case OVS_TUNNEL_KEY_ATTR_ID:
3900 format_be64(ds, "tun_id", nl_attr_get_be64(a),
3901 ma ? nl_attr_get(ma) : NULL, verbose);
3902 flags |= FLOW_TNL_F_KEY;
3903 if (ma) {
3904 mask_flags |= FLOW_TNL_F_KEY;
3905 }
3906 break;
3907 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
3908 format_ipv4(ds, "src", nl_attr_get_be32(a),
3909 ma ? nl_attr_get(ma) : NULL, verbose);
3910 break;
3911 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
3912 format_ipv4(ds, "dst", nl_attr_get_be32(a),
3913 ma ? nl_attr_get(ma) : NULL, verbose);
3914 break;
3915 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
3916 struct in6_addr ipv6_src;
3917 ipv6_src = nl_attr_get_in6_addr(a);
3918 format_in6_addr(ds, "ipv6_src", &ipv6_src,
3919 ma ? nl_attr_get(ma) : NULL, verbose);
3920 break;
3921 }
3922 case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
3923 struct in6_addr ipv6_dst;
3924 ipv6_dst = nl_attr_get_in6_addr(a);
3925 format_in6_addr(ds, "ipv6_dst", &ipv6_dst,
3926 ma ? nl_attr_get(ma) : NULL, verbose);
3927 break;
3928 }
3929 case OVS_TUNNEL_KEY_ATTR_TOS:
3930 format_u8x(ds, "tos", nl_attr_get_u8(a),
3931 ma ? nl_attr_get(ma) : NULL, verbose);
3932 break;
3933 case OVS_TUNNEL_KEY_ATTR_TTL:
3934 format_u8u(ds, "ttl", nl_attr_get_u8(a),
3935 ma ? nl_attr_get(ma) : NULL, verbose);
3936 break;
3937 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3938 flags |= FLOW_TNL_F_DONT_FRAGMENT;
3939 break;
3940 case OVS_TUNNEL_KEY_ATTR_CSUM:
3941 flags |= FLOW_TNL_F_CSUM;
3942 break;
3943 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
3944 format_be16(ds, "tp_src", nl_attr_get_be16(a),
3945 ma ? nl_attr_get(ma) : NULL, verbose);
3946 break;
3947 case OVS_TUNNEL_KEY_ATTR_TP_DST:
3948 format_be16(ds, "tp_dst", nl_attr_get_be16(a),
3949 ma ? nl_attr_get(ma) : NULL, verbose);
3950 break;
3951 case OVS_TUNNEL_KEY_ATTR_OAM:
3952 flags |= FLOW_TNL_F_OAM;
3953 break;
3954 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
3955 ds_put_cstr(ds, "vxlan(");
3956 format_odp_tun_vxlan_opt(a, ma, ds, verbose);
3957 ds_put_cstr(ds, "),");
3958 break;
3959 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3960 ds_put_cstr(ds, "geneve(");
3961 format_odp_tun_geneve(a, ma, ds, verbose);
3962 ds_put_cstr(ds, "),");
3963 break;
3964 case OVS_TUNNEL_KEY_ATTR_PAD:
3965 break;
3966 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
3967 ds_put_cstr(ds, "erspan(");
3968 format_odp_tun_erspan_opt(a, ma, ds, verbose);
3969 ds_put_cstr(ds, "),");
3970 break;
3971 case OVS_TUNNEL_KEY_ATTR_GTPU_OPTS:
3972 ds_put_cstr(ds, "gtpu(");
3973 format_odp_tun_gtpu_opt(a, ma, ds, verbose);
3974 ds_put_cstr(ds, ")");
3975 break;
3976 case __OVS_TUNNEL_KEY_ATTR_MAX:
3977 default:
3978 format_unknown_key(ds, a, ma);
3979 }
3980 ofpbuf_clear(&ofp);
3981 }
3982
3983 /* Flags can have a valid mask even if the attribute is not set, so
3984 * we need to collect these separately. */
3985 if (mask_attr) {
3986 NL_NESTED_FOR_EACH(a, left, mask_attr) {
3987 switch (nl_attr_type(a)) {
3988 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3989 mask_flags |= FLOW_TNL_F_DONT_FRAGMENT;
3990 break;
3991 case OVS_TUNNEL_KEY_ATTR_CSUM:
3992 mask_flags |= FLOW_TNL_F_CSUM;
3993 break;
3994 case OVS_TUNNEL_KEY_ATTR_OAM:
3995 mask_flags |= FLOW_TNL_F_OAM;
3996 break;
3997 }
3998 }
3999 }
4000
4001 format_tun_flags(ds, "flags", flags, mask_attr ? &mask_flags : NULL,
4002 verbose);
4003 ds_chomp(ds, ',');
4004 ofpbuf_uninit(&ofp);
4005 }
4006
4007 static const char *
4008 odp_ct_state_to_string(uint32_t flag)
4009 {
4010 switch (flag) {
4011 case OVS_CS_F_REPLY_DIR:
4012 return "rpl";
4013 case OVS_CS_F_TRACKED:
4014 return "trk";
4015 case OVS_CS_F_NEW:
4016 return "new";
4017 case OVS_CS_F_ESTABLISHED:
4018 return "est";
4019 case OVS_CS_F_RELATED:
4020 return "rel";
4021 case OVS_CS_F_INVALID:
4022 return "inv";
4023 case OVS_CS_F_SRC_NAT:
4024 return "snat";
4025 case OVS_CS_F_DST_NAT:
4026 return "dnat";
4027 default:
4028 return NULL;
4029 }
4030 }
4031
4032 static void
4033 format_frag(struct ds *ds, const char *name, uint8_t key,
4034 const uint8_t *mask, bool verbose OVS_UNUSED)
4035 {
4036 bool mask_empty = mask && !*mask;
4037 bool mask_full = !mask || *mask == UINT8_MAX;
4038
4039 /* ODP frag is an enumeration field; partial masks are not meaningful. */
4040 if (!mask_empty && !mask_full) {
4041 ds_put_format(ds, "error: partial mask not supported for frag (%#"
4042 PRIx8"),", *mask);
4043 } else if (!mask_empty) {
4044 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
4045 }
4046 }
4047
4048 static bool
4049 mask_empty(const struct nlattr *ma)
4050 {
4051 const void *mask;
4052 size_t n;
4053
4054 if (!ma) {
4055 return true;
4056 }
4057 mask = nl_attr_get(ma);
4058 n = nl_attr_get_size(ma);
4059
4060 return is_all_zeros(mask, n);
4061 }
4062
4063 /* The caller must have already verified that 'a' and 'ma' have correct
4064 * lengths. */
4065 static void
4066 format_odp_key_attr__(const struct nlattr *a, const struct nlattr *ma,
4067 const struct hmap *portno_names, struct ds *ds,
4068 bool verbose)
4069 {
4070 enum ovs_key_attr attr = nl_attr_type(a);
4071 char namebuf[OVS_KEY_ATTR_BUFSIZE];
4072 bool is_exact;
4073
4074 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
4075
4076 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
4077
4078 ds_put_char(ds, '(');
4079 switch (attr) {
4080 case OVS_KEY_ATTR_ENCAP:
4081 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
4082 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
4083 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
4084 verbose);
4085 } else if (nl_attr_get_size(a)) {
4086 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
4087 ds, verbose);
4088 }
4089 break;
4090
4091 case OVS_KEY_ATTR_PRIORITY:
4092 case OVS_KEY_ATTR_SKB_MARK:
4093 case OVS_KEY_ATTR_DP_HASH:
4094 case OVS_KEY_ATTR_RECIRC_ID:
4095 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4096 if (!is_exact) {
4097 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4098 }
4099 break;
4100
4101 case OVS_KEY_ATTR_CT_MARK:
4102 if (verbose || !mask_empty(ma)) {
4103 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4104 if (!is_exact) {
4105 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4106 }
4107 }
4108 break;
4109
4110 case OVS_KEY_ATTR_CT_STATE:
4111 if (verbose) {
4112 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4113 if (!is_exact) {
4114 ds_put_format(ds, "/%#"PRIx32,
4115 mask_empty(ma) ? 0 : nl_attr_get_u32(ma));
4116 }
4117 } else if (!is_exact) {
4118 format_flags_masked(ds, NULL, odp_ct_state_to_string,
4119 nl_attr_get_u32(a),
4120 mask_empty(ma) ? 0 : nl_attr_get_u32(ma),
4121 UINT32_MAX);
4122 } else {
4123 format_flags(ds, odp_ct_state_to_string, nl_attr_get_u32(a), '|');
4124 }
4125 break;
4126
4127 case OVS_KEY_ATTR_CT_ZONE:
4128 if (verbose || !mask_empty(ma)) {
4129 ds_put_format(ds, "%#"PRIx16, nl_attr_get_u16(a));
4130 if (!is_exact) {
4131 ds_put_format(ds, "/%#"PRIx16, nl_attr_get_u16(ma));
4132 }
4133 }
4134 break;
4135
4136 case OVS_KEY_ATTR_CT_LABELS: {
4137 const ovs_32aligned_u128 *value = nl_attr_get(a);
4138 const ovs_32aligned_u128 *mask = ma ? nl_attr_get(ma) : NULL;
4139
4140 format_u128(ds, value, mask, verbose);
4141 break;
4142 }
4143
4144 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
4145 const struct ovs_key_ct_tuple_ipv4 *key = nl_attr_get(a);
4146 const struct ovs_key_ct_tuple_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
4147
4148 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
4149 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
4150 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
4151 verbose);
4152 format_be16(ds, "tp_src", key->src_port, MASK(mask, src_port),
4153 verbose);
4154 format_be16(ds, "tp_dst", key->dst_port, MASK(mask, dst_port),
4155 verbose);
4156 ds_chomp(ds, ',');
4157 break;
4158 }
4159
4160 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
4161 const struct ovs_key_ct_tuple_ipv6 *key = nl_attr_get(a);
4162 const struct ovs_key_ct_tuple_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
4163
4164 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
4165 verbose);
4166 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
4167 verbose);
4168 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
4169 verbose);
4170 format_be16(ds, "src_port", key->src_port, MASK(mask, src_port),
4171 verbose);
4172 format_be16(ds, "dst_port", key->dst_port, MASK(mask, dst_port),
4173 verbose);
4174 ds_chomp(ds, ',');
4175 break;
4176 }
4177
4178 case OVS_KEY_ATTR_TUNNEL:
4179 format_odp_tun_attr(a, ma, ds, verbose);
4180 break;
4181
4182 case OVS_KEY_ATTR_IN_PORT:
4183 if (is_exact) {
4184 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
4185 } else {
4186 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
4187 if (!is_exact) {
4188 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4189 }
4190 }
4191 break;
4192
4193 case OVS_KEY_ATTR_PACKET_TYPE: {
4194 ovs_be32 value = nl_attr_get_be32(a);
4195 ovs_be32 mask = ma ? nl_attr_get_be32(ma) : OVS_BE32_MAX;
4196
4197 ovs_be16 ns = htons(pt_ns(value));
4198 ovs_be16 ns_mask = htons(pt_ns(mask));
4199 format_be16(ds, "ns", ns, &ns_mask, verbose);
4200
4201 ovs_be16 ns_type = pt_ns_type_be(value);
4202 ovs_be16 ns_type_mask = pt_ns_type_be(mask);
4203 format_be16x(ds, "id", ns_type, &ns_type_mask, verbose);
4204
4205 ds_chomp(ds, ',');
4206 break;
4207 }
4208
4209 case OVS_KEY_ATTR_ETHERNET: {
4210 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
4211 const struct ovs_key_ethernet *key = nl_attr_get(a);
4212
4213 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
4214 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
4215 ds_chomp(ds, ',');
4216 break;
4217 }
4218 case OVS_KEY_ATTR_VLAN:
4219 format_vlan_tci(ds, nl_attr_get_be16(a),
4220 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
4221 break;
4222
4223 case OVS_KEY_ATTR_MPLS: {
4224 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
4225 const struct ovs_key_mpls *mpls_mask = NULL;
4226 size_t size = nl_attr_get_size(a);
4227
4228 if (!size || size % sizeof *mpls_key) {
4229 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
4230 return;
4231 }
4232 if (!is_exact) {
4233 mpls_mask = nl_attr_get(ma);
4234 if (size != nl_attr_get_size(ma)) {
4235 ds_put_format(ds, "(key length %"PRIuSIZE" != "
4236 "mask length %"PRIuSIZE")",
4237 size, nl_attr_get_size(ma));
4238 return;
4239 }
4240 }
4241 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
4242 break;
4243 }
4244 case OVS_KEY_ATTR_ETHERTYPE:
4245 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
4246 if (!is_exact) {
4247 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
4248 }
4249 break;
4250
4251 case OVS_KEY_ATTR_IPV4: {
4252 const struct ovs_key_ipv4 *key = nl_attr_get(a);
4253 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
4254
4255 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
4256 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
4257 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
4258 verbose);
4259 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
4260 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
4261 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
4262 verbose);
4263 ds_chomp(ds, ',');
4264 break;
4265 }
4266 case OVS_KEY_ATTR_IPV6: {
4267 const struct ovs_key_ipv6 *key = nl_attr_get(a);
4268 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
4269
4270 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
4271 verbose);
4272 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
4273 verbose);
4274 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
4275 verbose);
4276 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
4277 verbose);
4278 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
4279 verbose);
4280 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
4281 verbose);
4282 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
4283 verbose);
4284 ds_chomp(ds, ',');
4285 break;
4286 }
4287 /* These have the same structure and format. */
4288 case OVS_KEY_ATTR_TCP:
4289 case OVS_KEY_ATTR_UDP:
4290 case OVS_KEY_ATTR_SCTP: {
4291 const struct ovs_key_tcp *key = nl_attr_get(a);
4292 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
4293
4294 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
4295 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
4296 ds_chomp(ds, ',');
4297 break;
4298 }
4299 case OVS_KEY_ATTR_TCP_FLAGS:
4300 if (!is_exact) {
4301 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
4302 ntohs(nl_attr_get_be16(a)),
4303 TCP_FLAGS(nl_attr_get_be16(ma)),
4304 TCP_FLAGS(OVS_BE16_MAX));
4305 } else {
4306 format_flags(ds, packet_tcp_flag_to_string,
4307 ntohs(nl_attr_get_be16(a)), '|');
4308 }
4309 break;
4310
4311 case OVS_KEY_ATTR_ICMP: {
4312 const struct ovs_key_icmp *key = nl_attr_get(a);
4313 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
4314
4315 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
4316 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
4317 ds_chomp(ds, ',');
4318 break;
4319 }
4320 case OVS_KEY_ATTR_ICMPV6: {
4321 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
4322 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
4323
4324 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
4325 verbose);
4326 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
4327 verbose);
4328 ds_chomp(ds, ',');
4329 break;
4330 }
4331 case OVS_KEY_ATTR_ARP: {
4332 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
4333 const struct ovs_key_arp *key = nl_attr_get(a);
4334
4335 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
4336 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
4337 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
4338 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
4339 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
4340 ds_chomp(ds, ',');
4341 break;
4342 }
4343 case OVS_KEY_ATTR_ND: {
4344 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
4345 const struct ovs_key_nd *key = nl_attr_get(a);
4346
4347 format_in6_addr(ds, "target", &key->nd_target, MASK(mask, nd_target),
4348 verbose);
4349 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
4350 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
4351
4352 ds_chomp(ds, ',');
4353 break;
4354 }
4355 case OVS_KEY_ATTR_ND_EXTENSIONS: {
4356 const struct ovs_key_nd_extensions *mask = ma ? nl_attr_get(ma) : NULL;
4357 const struct ovs_key_nd_extensions *key = nl_attr_get(a);
4358
4359 bool first = true;
4360 format_be32_masked(ds, &first, "nd_reserved", key->nd_reserved,
4361 OVS_BE32_MAX);
4362 ds_put_char(ds, ',');
4363
4364 format_u8u(ds, "nd_options_type", key->nd_options_type,
4365 MASK(mask, nd_options_type), verbose);
4366
4367 ds_chomp(ds, ',');
4368 break;
4369 }
4370 case OVS_KEY_ATTR_NSH: {
4371 format_odp_nsh_attr(a, ma, ds);
4372 break;
4373 }
4374 case OVS_KEY_ATTR_UNSPEC:
4375 case __OVS_KEY_ATTR_MAX:
4376 default:
4377 format_generic_odp_key(a, ds);
4378 if (!is_exact) {
4379 ds_put_char(ds, '/');
4380 format_generic_odp_key(ma, ds);
4381 }
4382 break;
4383 }
4384 ds_put_char(ds, ')');
4385 }
4386
4387 static void
4388 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
4389 const struct hmap *portno_names, struct ds *ds,
4390 bool verbose)
4391 {
4392 if (check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4393 OVS_KEY_ATTR_MAX, false)) {
4394 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4395 }
4396 }
4397
4398 static struct nlattr *
4399 generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
4400 struct ofpbuf *ofp, const struct nlattr *key)
4401 {
4402 const struct nlattr *a;
4403 unsigned int left;
4404 int type = nl_attr_type(key);
4405 int size = nl_attr_get_size(key);
4406
4407 if (odp_key_attr_len(tbl, max, type) != ATTR_LEN_NESTED) {
4408 nl_msg_put_unspec_zero(ofp, type, size);
4409 } else {
4410 size_t nested_mask;
4411
4412 if (tbl[type].next) {
4413 const struct attr_len_tbl *entry = &tbl[type];
4414 tbl = entry->next;
4415 max = entry->next_max;
4416 }
4417
4418 nested_mask = nl_msg_start_nested(ofp, type);
4419 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
4420 generate_all_wildcard_mask(tbl, max, ofp, nl_attr_get(a));
4421 }
4422 nl_msg_end_nested(ofp, nested_mask);
4423 }
4424
4425 return ofp->base;
4426 }
4427
4428 static void
4429 format_u128(struct ds *ds, const ovs_32aligned_u128 *key,
4430 const ovs_32aligned_u128 *mask, bool verbose)
4431 {
4432 if (verbose || (mask && !ovs_u128_is_zero(get_32aligned_u128(mask)))) {
4433 ovs_be128 value = hton128(get_32aligned_u128(key));
4434 ds_put_hex(ds, &value, sizeof value);
4435 if (mask && !(ovs_u128_is_ones(get_32aligned_u128(mask)))) {
4436 value = hton128(get_32aligned_u128(mask));
4437 ds_put_char(ds, '/');
4438 ds_put_hex(ds, &value, sizeof value);
4439 }
4440 }
4441 }
4442
4443 /* Read the string from 's_' as a 128-bit value. If the string contains
4444 * a "/", the rest of the string will be treated as a 128-bit mask.
4445 *
4446 * If either the value or mask is larger than 64 bits, the string must
4447 * be in hexadecimal.
4448 */
4449 static int
4450 scan_u128(const char *s_, ovs_u128 *value, ovs_u128 *mask)
4451 {
4452 char *s = CONST_CAST(char *, s_);
4453 ovs_be128 be_value;
4454 ovs_be128 be_mask;
4455
4456 if (!parse_int_string(s, (uint8_t *)&be_value, sizeof be_value, &s)) {
4457 *value = ntoh128(be_value);
4458
4459 if (mask) {
4460 int n;
4461
4462 if (ovs_scan(s, "/%n", &n)) {
4463 int error;
4464
4465 s += n;
4466 error = parse_int_string(s, (uint8_t *)&be_mask,
4467 sizeof be_mask, &s);
4468 if (error) {
4469 return 0;
4470 }
4471 *mask = ntoh128(be_mask);
4472 } else {
4473 *mask = OVS_U128_MAX;
4474 }
4475 }
4476 return s - s_;
4477 }
4478
4479 return 0;
4480 }
4481
4482 int
4483 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
4484 {
4485 const char *s = s_;
4486
4487 if (ovs_scan(s, "ufid:")) {
4488 s += 5;
4489
4490 if (!uuid_from_string_prefix((struct uuid *)ufid, s)) {
4491 return -EINVAL;
4492 }
4493 s += UUID_LEN;
4494
4495 return s - s_;
4496 }
4497
4498 return 0;
4499 }
4500
4501 void
4502 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
4503 {
4504 ds_put_format(ds, "ufid:"UUID_FMT, UUID_ARGS((struct uuid *)ufid));
4505 }
4506
4507 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4508 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
4509 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
4510 * non-null, translates odp port number to its name. */
4511 void
4512 odp_flow_format(const struct nlattr *key, size_t key_len,
4513 const struct nlattr *mask, size_t mask_len,
4514 const struct hmap *portno_names, struct ds *ds, bool verbose)
4515 {
4516 if (key_len) {
4517 const struct nlattr *a;
4518 unsigned int left;
4519 bool has_ethtype_key = false;
4520 bool has_packet_type_key = false;
4521 struct ofpbuf ofp;
4522 bool first_field = true;
4523
4524 ofpbuf_init(&ofp, 100);
4525 NL_ATTR_FOR_EACH (a, left, key, key_len) {
4526 int attr_type = nl_attr_type(a);
4527 const struct nlattr *ma = (mask && mask_len
4528 ? nl_attr_find__(mask, mask_len,
4529 attr_type)
4530 : NULL);
4531 if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4532 OVS_KEY_ATTR_MAX, false)) {
4533 continue;
4534 }
4535
4536 bool is_nested_attr;
4537 bool is_wildcard = false;
4538
4539 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
4540 has_ethtype_key = true;
4541 } else if (attr_type == OVS_KEY_ATTR_PACKET_TYPE) {
4542 has_packet_type_key = true;
4543 }
4544
4545 is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
4546 OVS_KEY_ATTR_MAX, attr_type) ==
4547 ATTR_LEN_NESTED;
4548
4549 if (mask && mask_len) {
4550 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
4551 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
4552 }
4553
4554 if (verbose || !is_wildcard || is_nested_attr) {
4555 if (is_wildcard && !ma) {
4556 ma = generate_all_wildcard_mask(ovs_flow_key_attr_lens,
4557 OVS_KEY_ATTR_MAX,
4558 &ofp, a);
4559 }
4560 if (!first_field) {
4561 ds_put_char(ds, ',');
4562 }
4563 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4564 first_field = false;
4565 } else if (attr_type == OVS_KEY_ATTR_ETHERNET
4566 && !has_packet_type_key) {
4567 /* This special case reflects differences between the kernel
4568 * and userspace datapaths regarding the root type of the
4569 * packet being matched (typically Ethernet but some tunnels
4570 * can encapsulate IPv4 etc.). The kernel datapath does not
4571 * have an explicit way to indicate packet type; instead:
4572 *
4573 * - If OVS_KEY_ATTR_ETHERNET is present, the packet is an
4574 * Ethernet packet and OVS_KEY_ATTR_ETHERTYPE is the
4575 * Ethertype encoded in the Ethernet header.
4576 *
4577 * - If OVS_KEY_ATTR_ETHERNET is absent, then the packet's
4578 * root type is that encoded in OVS_KEY_ATTR_ETHERTYPE
4579 * (i.e. if OVS_KEY_ATTR_ETHERTYPE is 0x0800 then the
4580 * packet is an IPv4 packet).
4581 *
4582 * Thus, if OVS_KEY_ATTR_ETHERNET is present, even if it is
4583 * all-wildcarded, it is important to print it.
4584 *
4585 * On the other hand, the userspace datapath supports
4586 * OVS_KEY_ATTR_PACKET_TYPE and uses it to indicate the packet
4587 * type. Thus, if OVS_KEY_ATTR_PACKET_TYPE is present, we need
4588 * not print an all-wildcarded OVS_KEY_ATTR_ETHERNET. */
4589 if (!first_field) {
4590 ds_put_char(ds, ',');
4591 }
4592 ds_put_cstr(ds, "eth()");
4593 }
4594 ofpbuf_clear(&ofp);
4595 }
4596 ofpbuf_uninit(&ofp);
4597
4598 if (left) {
4599 int i;
4600
4601 if (left == key_len) {
4602 ds_put_cstr(ds, "<empty>");
4603 }
4604 ds_put_format(ds, ",***%u leftover bytes*** (", left);
4605 for (i = 0; i < left; i++) {
4606 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
4607 }
4608 ds_put_char(ds, ')');
4609 }
4610 if (!has_ethtype_key) {
4611 const struct nlattr *ma = nl_attr_find__(mask, mask_len,
4612 OVS_KEY_ATTR_ETHERTYPE);
4613 if (ma) {
4614 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
4615 ntohs(nl_attr_get_be16(ma)));
4616 }
4617 }
4618 } else {
4619 ds_put_cstr(ds, "<empty>");
4620 }
4621 }
4622
4623 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4624 * OVS_KEY_ATTR_* attributes in 'key'. */
4625 void
4626 odp_flow_key_format(const struct nlattr *key,
4627 size_t key_len, struct ds *ds)
4628 {
4629 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
4630 }
4631
4632 static bool
4633 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
4634 {
4635 if (!strcasecmp(s, "no")) {
4636 *type = OVS_FRAG_TYPE_NONE;
4637 } else if (!strcasecmp(s, "first")) {
4638 *type = OVS_FRAG_TYPE_FIRST;
4639 } else if (!strcasecmp(s, "later")) {
4640 *type = OVS_FRAG_TYPE_LATER;
4641 } else {
4642 return false;
4643 }
4644 return true;
4645 }
4646
4647 /* Parsing. */
4648
4649 static int
4650 scan_eth(const char *s, struct eth_addr *key, struct eth_addr *mask)
4651 {
4652 int n;
4653
4654 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n",
4655 ETH_ADDR_SCAN_ARGS(*key), &n)) {
4656 int len = n;
4657
4658 if (mask) {
4659 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
4660 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
4661 len += n;
4662 } else {
4663 memset(mask, 0xff, sizeof *mask);
4664 }
4665 }
4666 return len;
4667 }
4668 return 0;
4669 }
4670
4671 static int
4672 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
4673 {
4674 int n;
4675
4676 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
4677 int len = n;
4678
4679 if (mask) {
4680 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
4681 IP_SCAN_ARGS(mask), &n)) {
4682 len += n;
4683 } else {
4684 *mask = OVS_BE32_MAX;
4685 }
4686 }
4687 return len;
4688 }
4689 return 0;
4690 }
4691
4692 static int
4693 scan_in6_addr(const char *s, struct in6_addr *key, struct in6_addr *mask)
4694 {
4695 int n;
4696 char ipv6_s[IPV6_SCAN_LEN + 1];
4697
4698 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
4699 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
4700 int len = n;
4701
4702 if (mask) {
4703 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
4704 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
4705 len += n;
4706 } else {
4707 memset(mask, 0xff, sizeof *mask);
4708 }
4709 }
4710 return len;
4711 }
4712 return 0;
4713 }
4714
4715 static int
4716 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
4717 {
4718 int key_, mask_;
4719 int n;
4720
4721 if (ovs_scan(s, "%i%n", &key_, &n)
4722 && (key_ & ~IPV6_LABEL_MASK) == 0) {
4723 int len = n;
4724
4725 *key = htonl(key_);
4726 if (mask) {
4727 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
4728 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
4729 len += n;
4730 *mask = htonl(mask_);
4731 } else {
4732 *mask = htonl(IPV6_LABEL_MASK);
4733 }
4734 }
4735 return len;
4736 }
4737 return 0;
4738 }
4739
4740 static int
4741 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
4742 {
4743 int n;
4744
4745 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
4746 int len = n;
4747
4748 if (mask) {
4749 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
4750 len += n;
4751 } else {
4752 *mask = UINT8_MAX;
4753 }
4754 }
4755 return len;
4756 }
4757 return 0;
4758 }
4759
4760 static int
4761 scan_u16(const char *s, uint16_t *key, uint16_t *mask)
4762 {
4763 int n;
4764
4765 if (ovs_scan(s, "%"SCNi16"%n", key, &n)) {
4766 int len = n;
4767
4768 if (mask) {
4769 if (ovs_scan(s + len, "/%"SCNi16"%n", mask, &n)) {
4770 len += n;
4771 } else {
4772 *mask = UINT16_MAX;
4773 }
4774 }
4775 return len;
4776 }
4777 return 0;
4778 }
4779
4780 static int
4781 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
4782 {
4783 int n;
4784
4785 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4786 int len = n;
4787
4788 if (mask) {
4789 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4790 len += n;
4791 } else {
4792 *mask = UINT32_MAX;
4793 }
4794 }
4795 return len;
4796 }
4797 return 0;
4798 }
4799
4800 static int
4801 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
4802 {
4803 uint16_t key_, mask_;
4804 int n;
4805
4806 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4807 int len = n;
4808
4809 *key = htons(key_);
4810 if (mask) {
4811 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4812 len += n;
4813 *mask = htons(mask_);
4814 } else {
4815 *mask = OVS_BE16_MAX;
4816 }
4817 }
4818 return len;
4819 }
4820 return 0;
4821 }
4822
4823 static int
4824 scan_be32(const char *s, ovs_be32 *key, ovs_be32 *mask)
4825 {
4826 uint32_t key_, mask_;
4827 int n;
4828
4829 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
4830 int len = n;
4831
4832 *key = htonl(key_);
4833 if (mask) {
4834 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
4835 len += n;
4836 *mask = htonl(mask_);
4837 } else {
4838 *mask = OVS_BE32_MAX;
4839 }
4840 }
4841 return len;
4842 }
4843 return 0;
4844 }
4845
4846 static int
4847 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
4848 {
4849 uint64_t key_, mask_;
4850 int n;
4851
4852 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
4853 int len = n;
4854
4855 *key = htonll(key_);
4856 if (mask) {
4857 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
4858 len += n;
4859 *mask = htonll(mask_);
4860 } else {
4861 *mask = OVS_BE64_MAX;
4862 }
4863 }
4864 return len;
4865 }
4866 return 0;
4867 }
4868
4869 static int
4870 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
4871 {
4872 uint32_t flags, fmask;
4873 int n;
4874
4875 n = parse_odp_flags(s, flow_tun_flag_to_string, &flags,
4876 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
4877 if (n >= 0 && s[n] == ')') {
4878 *key = flags;
4879 if (mask) {
4880 *mask = fmask;
4881 }
4882 return n + 1;
4883 }
4884 return 0;
4885 }
4886
4887 static int
4888 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
4889 {
4890 uint32_t flags, fmask;
4891 int n;
4892
4893 n = parse_odp_flags(s, packet_tcp_flag_to_string, &flags,
4894 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
4895 if (n >= 0) {
4896 *key = htons(flags);
4897 if (mask) {
4898 *mask = htons(fmask);
4899 }
4900 return n;
4901 }
4902 return 0;
4903 }
4904
4905 static uint32_t
4906 ovs_to_odp_ct_state(uint8_t state)
4907 {
4908 uint32_t odp = 0;
4909
4910 #define CS_STATE(ENUM, INDEX, NAME) \
4911 if (state & CS_##ENUM) { \
4912 odp |= OVS_CS_F_##ENUM; \
4913 }
4914 CS_STATES
4915 #undef CS_STATE
4916
4917 return odp;
4918 }
4919
4920 static uint8_t
4921 odp_to_ovs_ct_state(uint32_t flags)
4922 {
4923 uint32_t state = 0;
4924
4925 #define CS_STATE(ENUM, INDEX, NAME) \
4926 if (flags & OVS_CS_F_##ENUM) { \
4927 state |= CS_##ENUM; \
4928 }
4929 CS_STATES
4930 #undef CS_STATE
4931
4932 return state;
4933 }
4934
4935 static int
4936 scan_ct_state(const char *s, uint32_t *key, uint32_t *mask)
4937 {
4938 uint32_t flags, fmask;
4939 int n;
4940
4941 n = parse_flags(s, odp_ct_state_to_string, ')', NULL, NULL, &flags,
4942 ovs_to_odp_ct_state(CS_SUPPORTED_MASK),
4943 mask ? &fmask : NULL);
4944
4945 if (n >= 0) {
4946 *key = flags;
4947 if (mask) {
4948 *mask = fmask;
4949 }
4950 return n;
4951 }
4952 return 0;
4953 }
4954
4955 static int
4956 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
4957 {
4958 int n;
4959 char frag[8];
4960 enum ovs_frag_type frag_type;
4961
4962 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
4963 && ovs_frag_type_from_string(frag, &frag_type)) {
4964 int len = n;
4965
4966 *key = frag_type;
4967 if (mask) {
4968 *mask = UINT8_MAX;
4969 }
4970 return len;
4971 }
4972 return 0;
4973 }
4974
4975 static int
4976 scan_port(const char *s, uint32_t *key, uint32_t *mask,
4977 const struct simap *port_names)
4978 {
4979 int n;
4980
4981 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4982 int len = n;
4983
4984 if (mask) {
4985 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4986 len += n;
4987 } else {
4988 *mask = UINT32_MAX;
4989 }
4990 }
4991 return len;
4992 } else if (port_names) {
4993 const struct simap_node *node;
4994 int len;
4995
4996 len = strcspn(s, ")");
4997 node = simap_find_len(port_names, s, len);
4998 if (node) {
4999 *key = node->data;
5000
5001 if (mask) {
5002 *mask = UINT32_MAX;
5003 }
5004 return len;
5005 }
5006 }
5007 return 0;
5008 }
5009
5010 /* Helper for vlan parsing. */
5011 struct ovs_key_vlan__ {
5012 ovs_be16 tci;
5013 };
5014
5015 static bool
5016 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
5017 {
5018 const uint16_t mask = ((1U << bits) - 1) << offset;
5019
5020 if (value >> bits) {
5021 return false;
5022 }
5023
5024 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
5025 return true;
5026 }
5027
5028 static int
5029 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
5030 uint8_t offset)
5031 {
5032 uint16_t key_, mask_;
5033 int n;
5034
5035 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
5036 int len = n;
5037
5038 if (set_be16_bf(key, bits, offset, key_)) {
5039 if (mask) {
5040 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
5041 len += n;
5042
5043 if (!set_be16_bf(mask, bits, offset, mask_)) {
5044 return 0;
5045 }
5046 } else {
5047 *mask |= htons(((1U << bits) - 1) << offset);
5048 }
5049 }
5050 return len;
5051 }
5052 }
5053 return 0;
5054 }
5055
5056 static int
5057 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
5058 {
5059 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
5060 }
5061
5062 static int
5063 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
5064 {
5065 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
5066 }
5067
5068 static int
5069 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
5070 {
5071 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
5072 }
5073
5074 /* For MPLS. */
5075 static bool
5076 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
5077 {
5078 const uint32_t mask = ((1U << bits) - 1) << offset;
5079
5080 if (value >> bits) {
5081 return false;
5082 }
5083
5084 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
5085 return true;
5086 }
5087
5088 static int
5089 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
5090 uint8_t offset)
5091 {
5092 uint32_t key_, mask_;
5093 int n;
5094
5095 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
5096 int len = n;
5097
5098 if (set_be32_bf(key, bits, offset, key_)) {
5099 if (mask) {
5100 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
5101 len += n;
5102
5103 if (!set_be32_bf(mask, bits, offset, mask_)) {
5104 return 0;
5105 }
5106 } else {
5107 *mask |= htonl(((1U << bits) - 1) << offset);
5108 }
5109 }
5110 return len;
5111 }
5112 }
5113 return 0;
5114 }
5115
5116 static int
5117 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
5118 {
5119 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
5120 }
5121
5122 static int
5123 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
5124 {
5125 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
5126 }
5127
5128 static int
5129 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
5130 {
5131 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
5132 }
5133
5134 static int
5135 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
5136 {
5137 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
5138 }
5139
5140 static int
5141 scan_vxlan_gbp(const char *s, uint32_t *key, uint32_t *mask)
5142 {
5143 const char *s_base = s;
5144 ovs_be16 id = 0, id_mask = 0;
5145 uint8_t flags = 0, flags_mask = 0;
5146 int len;
5147
5148 if (!strncmp(s, "id=", 3)) {
5149 s += 3;
5150 len = scan_be16(s, &id, mask ? &id_mask : NULL);
5151 if (len == 0) {
5152 return 0;
5153 }
5154 s += len;
5155 }
5156
5157 if (s[0] == ',') {
5158 s++;
5159 }
5160 if (!strncmp(s, "flags=", 6)) {
5161 s += 6;
5162 len = scan_u8(s, &flags, mask ? &flags_mask : NULL);
5163 if (len == 0) {
5164 return 0;
5165 }
5166 s += len;
5167 }
5168
5169 if (!strncmp(s, "))", 2)) {
5170 s += 2;
5171
5172 *key = (flags << 16) | ntohs(id);
5173 if (mask) {
5174 *mask = (flags_mask << 16) | ntohs(id_mask);
5175 }
5176
5177 return s - s_base;
5178 }
5179
5180 return 0;
5181 }
5182
5183 static int
5184 scan_gtpu_metadata(const char *s,
5185 struct gtpu_metadata *key,
5186 struct gtpu_metadata *mask)
5187 {
5188 const char *s_base = s;
5189 uint8_t flags, flags_ma;
5190 uint8_t msgtype, msgtype_ma;
5191 int len;
5192
5193 if (!strncmp(s, "flags=", 6)) {
5194 s += 6;
5195 len = scan_u8(s, &flags, mask ? &flags_ma : NULL);
5196 if (len == 0) {
5197 return 0;
5198 }
5199 s += len;
5200 }
5201
5202 if (s[0] == ',') {
5203 s++;
5204 }
5205
5206 if (!strncmp(s, "msgtype=", 8)) {
5207 s += 8;
5208 len = scan_u8(s, &msgtype, mask ? &msgtype_ma : NULL);
5209 if (len == 0) {
5210 return 0;
5211 }
5212 s += len;
5213 }
5214
5215 if (!strncmp(s, ")", 1)) {
5216 s += 1;
5217 key->flags = flags;
5218 key->msgtype = msgtype;
5219 if (mask) {
5220 mask->flags = flags_ma;
5221 mask->msgtype = msgtype_ma;
5222 }
5223 }
5224 return s - s_base;
5225 }
5226
5227 static int
5228 scan_erspan_metadata(const char *s,
5229 struct erspan_metadata *key,
5230 struct erspan_metadata *mask)
5231 {
5232 const char *s_base = s;
5233 uint32_t idx = 0, idx_mask = 0;
5234 uint8_t ver = 0, dir = 0, hwid = 0;
5235 uint8_t ver_mask = 0, dir_mask = 0, hwid_mask = 0;
5236 int len;
5237
5238 if (!strncmp(s, "ver=", 4)) {
5239 s += 4;
5240 len = scan_u8(s, &ver, mask ? &ver_mask : NULL);
5241 if (len == 0) {
5242 return 0;
5243 }
5244 s += len;
5245 }
5246
5247 if (s[0] == ',') {
5248 s++;
5249 }
5250
5251 if (ver == 1) {
5252 if (!strncmp(s, "idx=", 4)) {
5253 s += 4;
5254 len = scan_u32(s, &idx, mask ? &idx_mask : NULL);
5255 if (len == 0) {
5256 return 0;
5257 }
5258 s += len;
5259 }
5260
5261 if (!strncmp(s, ")", 1)) {
5262 s += 1;
5263 key->version = ver;
5264 key->u.index = htonl(idx);
5265 if (mask) {
5266 mask->u.index = htonl(idx_mask);
5267 }
5268 }
5269 return s - s_base;
5270
5271 } else if (ver == 2) {
5272 if (!strncmp(s, "dir=", 4)) {
5273 s += 4;
5274 len = scan_u8(s, &dir, mask ? &dir_mask : NULL);
5275 if (len == 0) {
5276 return 0;
5277 }
5278 s += len;
5279 }
5280 if (s[0] == ',') {
5281 s++;
5282 }
5283 if (!strncmp(s, "hwid=", 5)) {
5284 s += 5;
5285 len = scan_u8(s, &hwid, mask ? &hwid_mask : NULL);
5286 if (len == 0) {
5287 return 0;
5288 }
5289 s += len;
5290 }
5291
5292 if (!strncmp(s, ")", 1)) {
5293 s += 1;
5294 key->version = ver;
5295 key->u.md2.hwid = hwid;
5296 key->u.md2.dir = dir;
5297 if (mask) {
5298 mask->u.md2.hwid = hwid_mask;
5299 mask->u.md2.dir = dir_mask;
5300 }
5301 }
5302 return s - s_base;
5303 }
5304
5305 return 0;
5306 }
5307
5308 static int
5309 scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
5310 {
5311 const char *s_base = s;
5312 struct geneve_opt *opt = key->d;
5313 struct geneve_opt *opt_mask = mask ? mask->d : NULL;
5314 int len_remain = sizeof key->d;
5315 int len;
5316
5317 while (s[0] == '{' && len_remain >= sizeof *opt) {
5318 int data_len = 0;
5319
5320 s++;
5321 len_remain -= sizeof *opt;
5322
5323 if (!strncmp(s, "class=", 6)) {
5324 s += 6;
5325 len = scan_be16(s, &opt->opt_class,
5326 mask ? &opt_mask->opt_class : NULL);
5327 if (len == 0) {
5328 return 0;
5329 }
5330 s += len;
5331 } else if (mask) {
5332 memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
5333 }
5334
5335 if (s[0] == ',') {
5336 s++;
5337 }
5338 if (!strncmp(s, "type=", 5)) {
5339 s += 5;
5340 len = scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
5341 if (len == 0) {
5342 return 0;
5343 }
5344 s += len;
5345 } else if (mask) {
5346 memset(&opt_mask->type, 0, sizeof opt_mask->type);
5347 }
5348
5349 if (s[0] == ',') {
5350 s++;
5351 }
5352 if (!strncmp(s, "len=", 4)) {
5353 uint8_t opt_len, opt_len_mask;
5354 s += 4;
5355 len = scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
5356 if (len == 0) {
5357 return 0;
5358 }
5359 s += len;
5360
5361 if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
5362 return 0;
5363 }
5364 opt->length = opt_len / 4;
5365 if (mask) {
5366 opt_mask->length = opt_len_mask;
5367 }
5368 data_len = opt_len;
5369 } else if (mask) {
5370 memset(&opt_mask->type, 0, sizeof opt_mask->type);
5371 }
5372
5373 if (s[0] == ',') {
5374 s++;
5375 if (parse_int_string(s, (uint8_t *)(opt + 1),
5376 data_len, (char **)&s)) {
5377 return 0;
5378 }
5379 }
5380 if (mask) {
5381 if (s[0] == '/') {
5382 s++;
5383 if (parse_int_string(s, (uint8_t *)(opt_mask + 1),
5384 data_len, (char **)&s)) {
5385 return 0;
5386 }
5387 }
5388 opt_mask->r1 = 0;
5389 opt_mask->r2 = 0;
5390 opt_mask->r3 = 0;
5391 }
5392
5393 if (s[0] == '}') {
5394 s++;
5395 opt += 1 + data_len / 4;
5396 if (mask) {
5397 opt_mask += 1 + data_len / 4;
5398 }
5399 len_remain -= data_len;
5400 } else {
5401 return 0;
5402 }
5403 }
5404
5405 if (s[0] == ')') {
5406 len = sizeof key->d - len_remain;
5407
5408 s++;
5409 key->len = len;
5410 if (mask) {
5411 mask->len = len;
5412 }
5413 return s - s_base;
5414 }
5415
5416 return 0;
5417 }
5418
5419 static void
5420 tun_flags_to_attr(struct ofpbuf *a, const void *data_)
5421 {
5422 const uint16_t *flags = data_;
5423
5424 if (*flags & FLOW_TNL_F_DONT_FRAGMENT) {
5425 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
5426 }
5427 if (*flags & FLOW_TNL_F_CSUM) {
5428 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
5429 }
5430 if (*flags & FLOW_TNL_F_OAM) {
5431 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
5432 }
5433 }
5434
5435 static void
5436 vxlan_gbp_to_attr(struct ofpbuf *a, const void *data_)
5437 {
5438 const uint32_t *gbp = data_;
5439
5440 if (*gbp) {
5441 size_t vxlan_opts_ofs;
5442
5443 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
5444 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP, *gbp);
5445 nl_msg_end_nested(a, vxlan_opts_ofs);
5446 }
5447 }
5448
5449 static void
5450 geneve_to_attr(struct ofpbuf *a, const void *data_)
5451 {
5452 const struct geneve_scan *geneve = data_;
5453
5454 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, geneve->d,
5455 geneve->len);
5456 }
5457
5458 static void
5459 erspan_to_attr(struct ofpbuf *a, const void *data_)
5460 {
5461 const struct erspan_metadata *md = data_;
5462
5463 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, md,
5464 sizeof *md);
5465 }
5466
5467 static void
5468 gtpu_to_attr(struct ofpbuf *a, const void *data_)
5469 {
5470 const struct gtpu_metadata *md = data_;
5471
5472 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GTPU_OPTS, md,
5473 sizeof *md);
5474 }
5475
5476 #define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
5477 { \
5478 unsigned long call_fn = (unsigned long)FUNC; \
5479 if (call_fn) { \
5480 typedef void (*fn)(struct ofpbuf *, const void *); \
5481 fn func = FUNC; \
5482 func(BUF, &(DATA)); \
5483 } else { \
5484 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
5485 } \
5486 }
5487
5488 #define SCAN_IF(NAME) \
5489 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5490 const char *start = s; \
5491 int len; \
5492 \
5493 s += strlen(NAME)
5494
5495 /* Usually no special initialization is needed. */
5496 #define SCAN_BEGIN(NAME, TYPE) \
5497 SCAN_IF(NAME); \
5498 TYPE skey, smask; \
5499 memset(&skey, 0, sizeof skey); \
5500 memset(&smask, 0, sizeof smask); \
5501 do { \
5502 len = 0;
5503
5504 /* Init as fully-masked as mask will not be scanned. */
5505 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
5506 SCAN_IF(NAME); \
5507 TYPE skey, smask; \
5508 memset(&skey, 0, sizeof skey); \
5509 memset(&smask, 0xff, sizeof smask); \
5510 do { \
5511 len = 0;
5512
5513 /* VLAN needs special initialization. */
5514 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
5515 SCAN_IF(NAME); \
5516 TYPE skey = KEY_INIT; \
5517 TYPE smask = MASK_INIT; \
5518 do { \
5519 len = 0;
5520
5521 /* Scan unnamed entry as 'TYPE' */
5522 #define SCAN_TYPE(TYPE, KEY, MASK) \
5523 len = scan_##TYPE(s, KEY, MASK); \
5524 if (len == 0) { \
5525 return -EINVAL; \
5526 } \
5527 s += len
5528
5529 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5530 #define SCAN_FIELD(NAME, TYPE, FIELD) \
5531 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5532 s += strlen(NAME); \
5533 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
5534 continue; \
5535 }
5536
5537 #define SCAN_FINISH() \
5538 } while (*s++ == ',' && len != 0); \
5539 if (s[-1] != ')') { \
5540 return -EINVAL; \
5541 }
5542
5543 #define SCAN_FINISH_SINGLE() \
5544 } while (false); \
5545 if (*s++ != ')') { \
5546 return -EINVAL; \
5547 }
5548
5549 /* Beginning of nested attribute. */
5550 #define SCAN_BEGIN_NESTED(NAME, ATTR) \
5551 SCAN_IF(NAME); \
5552 size_t key_offset, mask_offset = 0; \
5553 key_offset = nl_msg_start_nested(key, ATTR); \
5554 if (mask) { \
5555 mask_offset = nl_msg_start_nested(mask, ATTR); \
5556 } \
5557 do { \
5558 len = 0;
5559
5560 #define SCAN_END_NESTED() \
5561 SCAN_FINISH(); \
5562 nl_msg_end_nested(key, key_offset); \
5563 if (mask) { \
5564 nl_msg_end_nested(mask, mask_offset); \
5565 } \
5566 return s - start; \
5567 }
5568
5569 #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
5570 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5571 TYPE skey, smask; \
5572 memset(&skey, 0, sizeof skey); \
5573 memset(&smask, 0xff, sizeof smask); \
5574 s += strlen(NAME); \
5575 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5576 SCAN_PUT(ATTR, FUNC); \
5577 continue; \
5578 }
5579
5580 #define SCAN_FIELD_NESTED(NAME, TYPE, SCAN_AS, ATTR) \
5581 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, NULL)
5582
5583 #define SCAN_FIELD_NESTED_FUNC(NAME, TYPE, SCAN_AS, FUNC) \
5584 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, 0, FUNC)
5585
5586 #define SCAN_PUT(ATTR, FUNC) \
5587 SCAN_PUT_ATTR(key, ATTR, skey, FUNC); \
5588 if (mask) \
5589 SCAN_PUT_ATTR(mask, ATTR, smask, FUNC); \
5590
5591 #define SCAN_END(ATTR) \
5592 SCAN_FINISH(); \
5593 SCAN_PUT(ATTR, NULL); \
5594 return s - start; \
5595 }
5596
5597 #define SCAN_BEGIN_ARRAY(NAME, TYPE, CNT) \
5598 SCAN_IF(NAME); \
5599 TYPE skey[CNT], smask[CNT]; \
5600 memset(&skey, 0, sizeof skey); \
5601 memset(&smask, 0, sizeof smask); \
5602 int idx = 0, cnt = CNT; \
5603 uint64_t fields = 0; \
5604 do { \
5605 int field = 0; \
5606 len = 0;
5607
5608 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5609 #define SCAN_FIELD_ARRAY(NAME, TYPE, FIELD) \
5610 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5611 if (fields & (1UL << field)) { \
5612 fields = 0; \
5613 if (++idx == cnt) { \
5614 break; \
5615 } \
5616 } \
5617 s += strlen(NAME); \
5618 SCAN_TYPE(TYPE, &skey[idx].FIELD, mask ? &smask[idx].FIELD : NULL); \
5619 fields |= 1UL << field; \
5620 continue; \
5621 } \
5622 field++;
5623
5624 #define SCAN_PUT_ATTR_ARRAY(BUF, ATTR, DATA, CNT) \
5625 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)[0] * (CNT)); \
5626
5627 #define SCAN_PUT_ARRAY(ATTR, CNT) \
5628 SCAN_PUT_ATTR_ARRAY(key, ATTR, skey, CNT); \
5629 if (mask) { \
5630 SCAN_PUT_ATTR_ARRAY(mask, ATTR, smask, CNT); \
5631 }
5632
5633 #define SCAN_END_ARRAY(ATTR) \
5634 SCAN_FINISH(); \
5635 if (idx == cnt) { \
5636 return -EINVAL; \
5637 } \
5638 SCAN_PUT_ARRAY(ATTR, idx + 1); \
5639 return s - start; \
5640 }
5641
5642 #define SCAN_END_SINGLE(ATTR) \
5643 SCAN_FINISH_SINGLE(); \
5644 SCAN_PUT(ATTR, NULL); \
5645 return s - start; \
5646 }
5647
5648 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
5649 SCAN_BEGIN(NAME, TYPE) { \
5650 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5651 } SCAN_END_SINGLE(ATTR)
5652
5653 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
5654 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
5655 SCAN_TYPE(SCAN_AS, &skey, NULL); \
5656 } SCAN_END_SINGLE(ATTR)
5657
5658 /* scan_port needs one extra argument. */
5659 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
5660 SCAN_BEGIN(NAME, TYPE) { \
5661 len = scan_port(s, &skey, &smask, \
5662 context->port_names); \
5663 if (len == 0) { \
5664 return -EINVAL; \
5665 } \
5666 s += len; \
5667 } SCAN_END_SINGLE(ATTR)
5668
5669 static int
5670 parse_odp_nsh_key_mask_attr(const char *s, struct ofpbuf *key,
5671 struct ofpbuf *mask)
5672 {
5673 if (strncmp(s, "nsh(", 4) == 0) {
5674 const char *start = s;
5675 int len;
5676 struct ovs_key_nsh skey, smask;
5677 uint32_t spi = 0, spi_mask = 0;
5678 uint8_t si = 0, si_mask = 0;
5679
5680 s += 4;
5681
5682 memset(&skey, 0, sizeof skey);
5683 memset(&smask, 0, sizeof smask);
5684 do {
5685 len = 0;
5686
5687 if (strncmp(s, "flags=", 6) == 0) {
5688 s += 6;
5689 len = scan_u8(s, &skey.flags, mask ? &smask.flags : NULL);
5690 if (len == 0) {
5691 return -EINVAL;
5692 }
5693 s += len;
5694 continue;
5695 }
5696
5697 if (strncmp(s, "mdtype=", 7) == 0) {
5698 s += 7;
5699 len = scan_u8(s, &skey.mdtype, mask ? &smask.mdtype : NULL);
5700 if (len == 0) {
5701 return -EINVAL;
5702 }
5703 s += len;
5704 continue;
5705 }
5706
5707 if (strncmp(s, "np=", 3) == 0) {
5708 s += 3;
5709 len = scan_u8(s, &skey.np, mask ? &smask.np : NULL);
5710 if (len == 0) {
5711 return -EINVAL;
5712 }
5713 s += len;
5714 continue;
5715 }
5716
5717 if (strncmp(s, "spi=", 4) == 0) {
5718 s += 4;
5719 len = scan_u32(s, &spi, mask ? &spi_mask : NULL);
5720 if (len == 0) {
5721 return -EINVAL;
5722 }
5723 s += len;
5724 continue;
5725 }
5726
5727 if (strncmp(s, "si=", 3) == 0) {
5728 s += 3;
5729 len = scan_u8(s, &si, mask ? &si_mask : NULL);
5730 if (len == 0) {
5731 return -EINVAL;
5732 }
5733 s += len;
5734 continue;
5735 }
5736
5737 if (strncmp(s, "c1=", 3) == 0) {
5738 s += 3;
5739 len = scan_be32(s, &skey.context[0],
5740 mask ? &smask.context[0] : NULL);
5741 if (len == 0) {
5742 return -EINVAL;
5743 }
5744 s += len;
5745 continue;
5746 }
5747
5748 if (strncmp(s, "c2=", 3) == 0) {
5749 s += 3;
5750 len = scan_be32(s, &skey.context[1],
5751 mask ? &smask.context[1] : NULL);
5752 if (len == 0) {
5753 return -EINVAL;
5754 }
5755 s += len;
5756 continue;
5757 }
5758
5759 if (strncmp(s, "c3=", 3) == 0) {
5760 s += 3;
5761 len = scan_be32(s, &skey.context[2],
5762 mask ? &smask.context[2] : NULL);
5763 if (len == 0) {
5764 return -EINVAL;
5765 }
5766 s += len;
5767 continue;
5768 }
5769
5770 if (strncmp(s, "c4=", 3) == 0) {
5771 s += 3;
5772 len = scan_be32(s, &skey.context[3],
5773 mask ? &smask.context[3] : NULL);
5774 if (len == 0) {
5775 return -EINVAL;
5776 }
5777 s += len;
5778 continue;
5779 }
5780 } while (*s++ == ',' && len != 0);
5781 if (s[-1] != ')') {
5782 return -EINVAL;
5783 }
5784
5785 skey.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
5786 smask.path_hdr = nsh_spi_si_to_path_hdr(spi_mask, si_mask);
5787
5788 nsh_key_to_attr(key, &skey, NULL, 0, false);
5789 if (mask) {
5790 nsh_key_to_attr(mask, &smask, NULL, 0, true);
5791 }
5792 return s - start;
5793 }
5794 return 0;
5795 }
5796
5797 static int
5798 parse_odp_key_mask_attr(struct parse_odp_context *context, const char *s,
5799 struct ofpbuf *key, struct ofpbuf *mask)
5800 {
5801 int retval;
5802
5803 context->depth++;
5804
5805 if (context->depth == MAX_ODP_NESTED) {
5806 retval = -EINVAL;
5807 } else {
5808 retval = parse_odp_key_mask_attr__(context, s, key, mask);
5809 }
5810
5811 context->depth--;
5812
5813 return retval;
5814 }
5815
5816 static int
5817 parse_odp_key_mask_attr__(struct parse_odp_context *context, const char *s,
5818 struct ofpbuf *key, struct ofpbuf *mask)
5819 {
5820 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
5821 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
5822 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
5823 OVS_KEY_ATTR_RECIRC_ID);
5824 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
5825
5826 SCAN_SINGLE("ct_state(", uint32_t, ct_state, OVS_KEY_ATTR_CT_STATE);
5827 SCAN_SINGLE("ct_zone(", uint16_t, u16, OVS_KEY_ATTR_CT_ZONE);
5828 SCAN_SINGLE("ct_mark(", uint32_t, u32, OVS_KEY_ATTR_CT_MARK);
5829 SCAN_SINGLE("ct_label(", ovs_u128, u128, OVS_KEY_ATTR_CT_LABELS);
5830
5831 SCAN_BEGIN("ct_tuple4(", struct ovs_key_ct_tuple_ipv4) {
5832 SCAN_FIELD("src=", ipv4, ipv4_src);
5833 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5834 SCAN_FIELD("proto=", u8, ipv4_proto);
5835 SCAN_FIELD("tp_src=", be16, src_port);
5836 SCAN_FIELD("tp_dst=", be16, dst_port);
5837 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
5838
5839 SCAN_BEGIN("ct_tuple6(", struct ovs_key_ct_tuple_ipv6) {
5840 SCAN_FIELD("src=", in6_addr, ipv6_src);
5841 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5842 SCAN_FIELD("proto=", u8, ipv6_proto);
5843 SCAN_FIELD("tp_src=", be16, src_port);
5844 SCAN_FIELD("tp_dst=", be16, dst_port);
5845 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
5846
5847 SCAN_BEGIN_NESTED("tunnel(", OVS_KEY_ATTR_TUNNEL) {
5848 SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
5849 SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
5850 SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
5851 SCAN_FIELD_NESTED("ipv6_src=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_SRC);
5852 SCAN_FIELD_NESTED("ipv6_dst=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_DST);
5853 SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
5854 SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
5855 SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
5856 SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
5857 SCAN_FIELD_NESTED_FUNC("erspan(", struct erspan_metadata, erspan_metadata,
5858 erspan_to_attr);
5859 SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
5860 SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
5861 geneve_to_attr);
5862 SCAN_FIELD_NESTED_FUNC("gtpu(", struct gtpu_metadata, gtpu_metadata,
5863 gtpu_to_attr);
5864 SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
5865 } SCAN_END_NESTED();
5866
5867 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
5868
5869 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
5870 SCAN_FIELD("src=", eth, eth_src);
5871 SCAN_FIELD("dst=", eth, eth_dst);
5872 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
5873
5874 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
5875 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
5876 SCAN_FIELD("vid=", vid, tci);
5877 SCAN_FIELD("pcp=", pcp, tci);
5878 SCAN_FIELD("cfi=", cfi, tci);
5879 } SCAN_END(OVS_KEY_ATTR_VLAN);
5880
5881 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
5882
5883 SCAN_BEGIN_ARRAY("mpls(", struct ovs_key_mpls, FLOW_MAX_MPLS_LABELS) {
5884 SCAN_FIELD_ARRAY("label=", mpls_label, mpls_lse);
5885 SCAN_FIELD_ARRAY("tc=", mpls_tc, mpls_lse);
5886 SCAN_FIELD_ARRAY("ttl=", mpls_ttl, mpls_lse);
5887 SCAN_FIELD_ARRAY("bos=", mpls_bos, mpls_lse);
5888 } SCAN_END_ARRAY(OVS_KEY_ATTR_MPLS);
5889
5890 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
5891 SCAN_FIELD("src=", ipv4, ipv4_src);
5892 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5893 SCAN_FIELD("proto=", u8, ipv4_proto);
5894 SCAN_FIELD("tos=", u8, ipv4_tos);
5895 SCAN_FIELD("ttl=", u8, ipv4_ttl);
5896 SCAN_FIELD("frag=", frag, ipv4_frag);
5897 } SCAN_END(OVS_KEY_ATTR_IPV4);
5898
5899 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
5900 SCAN_FIELD("src=", in6_addr, ipv6_src);
5901 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5902 SCAN_FIELD("label=", ipv6_label, ipv6_label);
5903 SCAN_FIELD("proto=", u8, ipv6_proto);
5904 SCAN_FIELD("tclass=", u8, ipv6_tclass);
5905 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
5906 SCAN_FIELD("frag=", frag, ipv6_frag);
5907 } SCAN_END(OVS_KEY_ATTR_IPV6);
5908
5909 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
5910 SCAN_FIELD("src=", be16, tcp_src);
5911 SCAN_FIELD("dst=", be16, tcp_dst);
5912 } SCAN_END(OVS_KEY_ATTR_TCP);
5913
5914 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
5915
5916 SCAN_BEGIN("udp(", struct ovs_key_udp) {
5917 SCAN_FIELD("src=", be16, udp_src);
5918 SCAN_FIELD("dst=", be16, udp_dst);
5919 } SCAN_END(OVS_KEY_ATTR_UDP);
5920
5921 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
5922 SCAN_FIELD("src=", be16, sctp_src);
5923 SCAN_FIELD("dst=", be16, sctp_dst);
5924 } SCAN_END(OVS_KEY_ATTR_SCTP);
5925
5926 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
5927 SCAN_FIELD("type=", u8, icmp_type);
5928 SCAN_FIELD("code=", u8, icmp_code);
5929 } SCAN_END(OVS_KEY_ATTR_ICMP);
5930
5931 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
5932 SCAN_FIELD("type=", u8, icmpv6_type);
5933 SCAN_FIELD("code=", u8, icmpv6_code);
5934 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
5935
5936 SCAN_BEGIN("arp(", struct ovs_key_arp) {
5937 SCAN_FIELD("sip=", ipv4, arp_sip);
5938 SCAN_FIELD("tip=", ipv4, arp_tip);
5939 SCAN_FIELD("op=", be16, arp_op);
5940 SCAN_FIELD("sha=", eth, arp_sha);
5941 SCAN_FIELD("tha=", eth, arp_tha);
5942 } SCAN_END(OVS_KEY_ATTR_ARP);
5943
5944 SCAN_BEGIN("nd(", struct ovs_key_nd) {
5945 SCAN_FIELD("target=", in6_addr, nd_target);
5946 SCAN_FIELD("sll=", eth, nd_sll);
5947 SCAN_FIELD("tll=", eth, nd_tll);
5948 } SCAN_END(OVS_KEY_ATTR_ND);
5949
5950 SCAN_BEGIN("nd_ext(", struct ovs_key_nd_extensions) {
5951 SCAN_FIELD("nd_reserved=", be32, nd_reserved);
5952 SCAN_FIELD("nd_options_type=", u8, nd_options_type);
5953 } SCAN_END(OVS_KEY_ATTR_ND_EXTENSIONS);
5954
5955 struct packet_type {
5956 ovs_be16 ns;
5957 ovs_be16 id;
5958 };
5959 SCAN_BEGIN("packet_type(", struct packet_type) {
5960 SCAN_FIELD("ns=", be16, ns);
5961 SCAN_FIELD("id=", be16, id);
5962 } SCAN_END(OVS_KEY_ATTR_PACKET_TYPE);
5963
5964 /* nsh is nested, it needs special process */
5965 int ret = parse_odp_nsh_key_mask_attr(s, key, mask);
5966 if (ret < 0) {
5967 return ret;
5968 } else {
5969 s += ret;
5970 }
5971
5972 /* Encap open-coded. */
5973 if (!strncmp(s, "encap(", 6)) {
5974 const char *start = s;
5975 size_t encap, encap_mask = 0;
5976
5977 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
5978 if (mask) {
5979 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
5980 }
5981
5982 s += 6;
5983 for (;;) {
5984 int retval;
5985
5986 s += strspn(s, delimiters);
5987 if (!*s) {
5988 return -EINVAL;
5989 } else if (*s == ')') {
5990 break;
5991 }
5992
5993 retval = parse_odp_key_mask_attr(context, s, key, mask);
5994 if (retval < 0) {
5995 return retval;
5996 }
5997
5998 if (nl_attr_oversized(key->size - encap - NLA_HDRLEN)) {
5999 return -E2BIG;
6000 }
6001 s += retval;
6002 }
6003 s++;
6004
6005 nl_msg_end_nested(key, encap);
6006 if (mask) {
6007 nl_msg_end_nested(mask, encap_mask);
6008 }
6009
6010 return s - start;
6011 }
6012
6013 return -EINVAL;
6014 }
6015
6016 /* Parses the string representation of a datapath flow key, in the format
6017 * output by odp_flow_key_format(). Returns 0 if successful, otherwise a
6018 * positive errno value. On success, stores NULL into '*errorp' and the flow
6019 * key is appended to 'key' as a series of Netlink attributes. On failure,
6020 * stores a malloc()'d error message in '*errorp' without changing the data in
6021 * 'key'. Either way, 'key''s data might be reallocated.
6022 *
6023 * If 'port_names' is nonnull, it points to an simap that maps from a port name
6024 * to a port number. (Port names may be used instead of port numbers in
6025 * in_port.)
6026 *
6027 * On success, the attributes appended to 'key' are individually syntactically
6028 * valid, but they may not be valid as a sequence. 'key' might, for example,
6029 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
6030 int
6031 odp_flow_from_string(const char *s, const struct simap *port_names,
6032 struct ofpbuf *key, struct ofpbuf *mask,
6033 char **errorp)
6034 {
6035 if (errorp) {
6036 *errorp = NULL;
6037 }
6038
6039 const size_t old_size = key->size;
6040 struct parse_odp_context context = (struct parse_odp_context) {
6041 .port_names = port_names,
6042 };
6043 for (;;) {
6044 int retval;
6045
6046 s += strspn(s, delimiters);
6047 if (!*s) {
6048 return 0;
6049 }
6050
6051 /* Skip UFID. */
6052 ovs_u128 ufid;
6053 retval = odp_ufid_from_string(s, &ufid);
6054 if (retval < 0) {
6055 if (errorp) {
6056 *errorp = xasprintf("syntax error at %s", s);
6057 }
6058 key->size = old_size;
6059 return -retval;
6060 } else if (retval > 0) {
6061 s += retval;
6062 s += s[0] == ' ' ? 1 : 0;
6063 }
6064
6065 retval = parse_odp_key_mask_attr(&context, s, key, mask);
6066 if (retval < 0) {
6067 if (errorp) {
6068 *errorp = xasprintf("syntax error at %s", s);
6069 }
6070 key->size = old_size;
6071 return -retval;
6072 }
6073 s += retval;
6074 }
6075
6076 return 0;
6077 }
6078
6079 static uint8_t
6080 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
6081 {
6082 if (is_mask) {
6083 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
6084 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
6085 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
6086 * must use a zero mask for the netlink frag field, and all ones mask
6087 * otherwise. */
6088 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
6089 }
6090 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
6091 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
6092 : OVS_FRAG_TYPE_FIRST;
6093 }
6094
6095 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
6096 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
6097 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
6098 bool is_mask);
6099 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
6100 bool is_mask);
6101 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
6102 bool is_mask);
6103 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
6104 bool is_mask);
6105 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
6106 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
6107 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
6108 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
6109 static void get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh,
6110 bool is_mask);
6111 static void put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
6112 bool is_mask);
6113
6114 /* These share the same layout. */
6115 union ovs_key_tp {
6116 struct ovs_key_tcp tcp;
6117 struct ovs_key_udp udp;
6118 struct ovs_key_sctp sctp;
6119 };
6120
6121 static void get_tp_key(const struct flow *, union ovs_key_tp *);
6122 static void put_tp_key(const union ovs_key_tp *, struct flow *);
6123
6124 static void
6125 odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms,
6126 bool export_mask, struct ofpbuf *buf)
6127 {
6128 /* New "struct flow" fields that are visible to the datapath (including all
6129 * data fields) should be translated into equivalent datapath flow fields
6130 * here (you will have to add a OVS_KEY_ATTR_* for them). */
6131 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
6132
6133 struct ovs_key_ethernet *eth_key;
6134 size_t encap[FLOW_MAX_VLAN_HEADERS] = {0};
6135 size_t max_vlans;
6136 const struct flow *flow = parms->flow;
6137 const struct flow *mask = parms->mask;
6138 const struct flow *data = export_mask ? mask : flow;
6139
6140 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
6141
6142 if (flow_tnl_dst_is_set(&flow->tunnel) ||
6143 flow_tnl_src_is_set(&flow->tunnel) || export_mask) {
6144 tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
6145 parms->key_buf, NULL);
6146 }
6147
6148 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
6149
6150 if (parms->support.ct_state) {
6151 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
6152 ovs_to_odp_ct_state(data->ct_state));
6153 }
6154 if (parms->support.ct_zone) {
6155 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, data->ct_zone);
6156 }
6157 if (parms->support.ct_mark) {
6158 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, data->ct_mark);
6159 }
6160 if (parms->support.ct_label) {
6161 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &data->ct_label,
6162 sizeof(data->ct_label));
6163 }
6164 if (flow->ct_nw_proto) {
6165 if (parms->support.ct_orig_tuple
6166 && flow->dl_type == htons(ETH_TYPE_IP)) {
6167 struct ovs_key_ct_tuple_ipv4 *ct;
6168
6169 /* 'struct ovs_key_ct_tuple_ipv4' has padding, clear it. */
6170 ct = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
6171 sizeof *ct);
6172 ct->ipv4_src = data->ct_nw_src;
6173 ct->ipv4_dst = data->ct_nw_dst;
6174 ct->src_port = data->ct_tp_src;
6175 ct->dst_port = data->ct_tp_dst;
6176 ct->ipv4_proto = data->ct_nw_proto;
6177 } else if (parms->support.ct_orig_tuple6
6178 && flow->dl_type == htons(ETH_TYPE_IPV6)) {
6179 struct ovs_key_ct_tuple_ipv6 *ct;
6180
6181 /* 'struct ovs_key_ct_tuple_ipv6' has padding, clear it. */
6182 ct = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
6183 sizeof *ct);
6184 ct->ipv6_src = data->ct_ipv6_src;
6185 ct->ipv6_dst = data->ct_ipv6_dst;
6186 ct->src_port = data->ct_tp_src;
6187 ct->dst_port = data->ct_tp_dst;
6188 ct->ipv6_proto = data->ct_nw_proto;
6189 }
6190 }
6191 if (parms->support.recirc) {
6192 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
6193 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
6194 }
6195
6196 /* Add an ingress port attribute if this is a mask or 'in_port.odp_port'
6197 * is not the magical value "ODPP_NONE". */
6198 if (export_mask || flow->in_port.odp_port != ODPP_NONE) {
6199 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, data->in_port.odp_port);
6200 }
6201
6202 nl_msg_put_be32(buf, OVS_KEY_ATTR_PACKET_TYPE, data->packet_type);
6203
6204 if (OVS_UNLIKELY(parms->probe)) {
6205 max_vlans = FLOW_MAX_VLAN_HEADERS;
6206 } else {
6207 max_vlans = MIN(parms->support.max_vlan_headers, flow_vlan_limit);
6208 }
6209
6210 /* Conditionally add L2 attributes for Ethernet packets */
6211 if (flow->packet_type == htonl(PT_ETH)) {
6212 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
6213 sizeof *eth_key);
6214 get_ethernet_key(data, eth_key);
6215
6216 for (int encaps = 0; encaps < max_vlans; encaps++) {
6217 ovs_be16 tpid = flow->vlans[encaps].tpid;
6218
6219 if (flow->vlans[encaps].tci == htons(0)) {
6220 if (eth_type_vlan(flow->dl_type)) {
6221 /* If VLAN was truncated the tpid is in dl_type */
6222 tpid = flow->dl_type;
6223 } else {
6224 break;
6225 }
6226 }
6227
6228 if (export_mask) {
6229 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
6230 } else {
6231 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, tpid);
6232 }
6233 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlans[encaps].tci);
6234 encap[encaps] = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
6235 if (flow->vlans[encaps].tci == htons(0)) {
6236 goto unencap;
6237 }
6238 }
6239 }
6240
6241 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6242 /* For backwards compatibility with kernels that don't support
6243 * wildcarding, the following convention is used to encode the
6244 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
6245 *
6246 * key mask matches
6247 * -------- -------- -------
6248 * >0x5ff 0xffff Specified Ethernet II Ethertype.
6249 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
6250 * <none> 0xffff Any non-Ethernet II frame (except valid
6251 * 802.3 SNAP packet with valid eth_type).
6252 */
6253 if (export_mask) {
6254 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
6255 }
6256 goto unencap;
6257 }
6258
6259 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
6260
6261 if (eth_type_vlan(flow->dl_type)) {
6262 goto unencap;
6263 }
6264
6265 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6266 struct ovs_key_ipv4 *ipv4_key;
6267
6268 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
6269 sizeof *ipv4_key);
6270 get_ipv4_key(data, ipv4_key, export_mask);
6271 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
6272 struct ovs_key_ipv6 *ipv6_key;
6273
6274 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
6275 sizeof *ipv6_key);
6276 get_ipv6_key(data, ipv6_key, export_mask);
6277 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
6278 flow->dl_type == htons(ETH_TYPE_RARP)) {
6279 struct ovs_key_arp *arp_key;
6280
6281 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
6282 sizeof *arp_key);
6283 get_arp_key(data, arp_key);
6284 } else if (eth_type_mpls(flow->dl_type)) {
6285 struct ovs_key_mpls *mpls_key;
6286 int i, n;
6287
6288 n = flow_count_mpls_labels(flow, NULL);
6289 if (export_mask) {
6290 n = MIN(n, parms->support.max_mpls_depth);
6291 }
6292 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
6293 n * sizeof *mpls_key);
6294 for (i = 0; i < n; i++) {
6295 mpls_key[i].mpls_lse = data->mpls_lse[i];
6296 }
6297 } else if (flow->dl_type == htons(ETH_TYPE_NSH)) {
6298 nsh_key_to_attr(buf, &data->nsh, NULL, 0, export_mask);
6299 }
6300
6301 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6302 if (flow->nw_proto == IPPROTO_TCP) {
6303 union ovs_key_tp *tcp_key;
6304
6305 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
6306 sizeof *tcp_key);
6307 get_tp_key(data, tcp_key);
6308 if (data->tcp_flags || (mask && mask->tcp_flags)) {
6309 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
6310 }
6311 } else if (flow->nw_proto == IPPROTO_UDP) {
6312 union ovs_key_tp *udp_key;
6313
6314 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
6315 sizeof *udp_key);
6316 get_tp_key(data, udp_key);
6317 } else if (flow->nw_proto == IPPROTO_SCTP) {
6318 union ovs_key_tp *sctp_key;
6319
6320 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
6321 sizeof *sctp_key);
6322 get_tp_key(data, sctp_key);
6323 } else if (flow->dl_type == htons(ETH_TYPE_IP)
6324 && flow->nw_proto == IPPROTO_ICMP) {
6325 struct ovs_key_icmp *icmp_key;
6326
6327 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
6328 sizeof *icmp_key);
6329 icmp_key->icmp_type = ntohs(data->tp_src);
6330 icmp_key->icmp_code = ntohs(data->tp_dst);
6331 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
6332 && flow->nw_proto == IPPROTO_ICMPV6) {
6333 struct ovs_key_icmpv6 *icmpv6_key;
6334
6335 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
6336 sizeof *icmpv6_key);
6337 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
6338 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
6339
6340 if (is_nd(flow, NULL)
6341 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, ICMP
6342 * type and code are 8 bits wide. Therefore, an exact match
6343 * looks like htons(0xff), not htons(0xffff). See
6344 * xlate_wc_finish() for details. */
6345 && (!export_mask || (data->tp_src == htons(0xff)
6346 && data->tp_dst == htons(0xff)))) {
6347 struct ovs_key_nd *nd_key;
6348 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
6349 sizeof *nd_key);
6350 nd_key->nd_target = data->nd_target;
6351 nd_key->nd_sll = data->arp_sha;
6352 nd_key->nd_tll = data->arp_tha;
6353
6354 /* Add ND Extensions Attr only if supported and reserved field
6355 * or options type is set. */
6356 if (parms->support.nd_ext) {
6357 struct ovs_key_nd_extensions *nd_ext_key;
6358
6359 if (data->igmp_group_ip4 != 0 || data->tcp_flags != 0) {
6360 nd_ext_key = nl_msg_put_unspec_uninit(buf,
6361 OVS_KEY_ATTR_ND_EXTENSIONS,
6362 sizeof *nd_ext_key);
6363 nd_ext_key->nd_reserved = data->igmp_group_ip4;
6364 nd_ext_key->nd_options_type = ntohs(data->tcp_flags);
6365 }
6366 }
6367 }
6368 }
6369 }
6370
6371 unencap:
6372 for (int encaps = max_vlans - 1; encaps >= 0; encaps--) {
6373 if (encap[encaps]) {
6374 nl_msg_end_nested(buf, encap[encaps]);
6375 }
6376 }
6377 }
6378
6379 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
6380 *
6381 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
6382 * capable of being expanded to allow for that much space. */
6383 void
6384 odp_flow_key_from_flow(const struct odp_flow_key_parms *parms,
6385 struct ofpbuf *buf)
6386 {
6387 odp_flow_key_from_flow__(parms, false, buf);
6388 }
6389
6390 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
6391 * 'buf'.
6392 *
6393 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
6394 * capable of being expanded to allow for that much space. */
6395 void
6396 odp_flow_key_from_mask(const struct odp_flow_key_parms *parms,
6397 struct ofpbuf *buf)
6398 {
6399 odp_flow_key_from_flow__(parms, true, buf);
6400 }
6401
6402 /* Generate ODP flow key from the given packet metadata */
6403 void
6404 odp_key_from_dp_packet(struct ofpbuf *buf, const struct dp_packet *packet)
6405 {
6406 const struct pkt_metadata *md = &packet->md;
6407
6408 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
6409
6410 if (md->dp_hash) {
6411 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, md->dp_hash);
6412 }
6413
6414 if (flow_tnl_dst_is_set(&md->tunnel)) {
6415 tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL, NULL);
6416 }
6417
6418 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
6419
6420 if (md->ct_state) {
6421 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
6422 ovs_to_odp_ct_state(md->ct_state));
6423 if (md->ct_zone) {
6424 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, md->ct_zone);
6425 }
6426 if (md->ct_mark) {
6427 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, md->ct_mark);
6428 }
6429 if (!ovs_u128_is_zero(md->ct_label)) {
6430 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &md->ct_label,
6431 sizeof(md->ct_label));
6432 }
6433 if (md->ct_orig_tuple_ipv6) {
6434 if (md->ct_orig_tuple.ipv6.ipv6_proto) {
6435 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
6436 &md->ct_orig_tuple.ipv6,
6437 sizeof md->ct_orig_tuple.ipv6);
6438 }
6439 } else {
6440 if (md->ct_orig_tuple.ipv4.ipv4_proto) {
6441 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
6442 &md->ct_orig_tuple.ipv4,
6443 sizeof md->ct_orig_tuple.ipv4);
6444 }
6445 }
6446 }
6447
6448 /* Add an ingress port attribute if 'odp_in_port' is not the magical
6449 * value "ODPP_NONE". */
6450 if (md->in_port.odp_port != ODPP_NONE) {
6451 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
6452 }
6453
6454 /* Add OVS_KEY_ATTR_ETHERNET for non-Ethernet packets */
6455 if (pt_ns(packet->packet_type) == OFPHTN_ETHERTYPE) {
6456 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE,
6457 pt_ns_type_be(packet->packet_type));
6458 }
6459 }
6460
6461 /* Generate packet metadata from the given ODP flow key. */
6462 void
6463 odp_key_to_dp_packet(const struct nlattr *key, size_t key_len,
6464 struct dp_packet *packet)
6465 {
6466 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6467 const struct nlattr *nla;
6468 struct pkt_metadata *md = &packet->md;
6469 ovs_be32 packet_type = htonl(PT_UNKNOWN);
6470 ovs_be16 ethertype = 0;
6471 size_t left;
6472
6473 pkt_metadata_init(md, ODPP_NONE);
6474
6475 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6476 enum ovs_key_attr type = nl_attr_type(nla);
6477 size_t len = nl_attr_get_size(nla);
6478 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6479 OVS_KEY_ATTR_MAX, type);
6480
6481 if (len != expected_len && expected_len >= 0) {
6482 continue;
6483 }
6484
6485 switch (type) {
6486 case OVS_KEY_ATTR_RECIRC_ID:
6487 md->recirc_id = nl_attr_get_u32(nla);
6488 break;
6489 case OVS_KEY_ATTR_DP_HASH:
6490 md->dp_hash = nl_attr_get_u32(nla);
6491 break;
6492 case OVS_KEY_ATTR_PRIORITY:
6493 md->skb_priority = nl_attr_get_u32(nla);
6494 break;
6495 case OVS_KEY_ATTR_SKB_MARK:
6496 md->pkt_mark = nl_attr_get_u32(nla);
6497 break;
6498 case OVS_KEY_ATTR_CT_STATE:
6499 md->ct_state = odp_to_ovs_ct_state(nl_attr_get_u32(nla));
6500 break;
6501 case OVS_KEY_ATTR_CT_ZONE:
6502 md->ct_zone = nl_attr_get_u16(nla);
6503 break;
6504 case OVS_KEY_ATTR_CT_MARK:
6505 md->ct_mark = nl_attr_get_u32(nla);
6506 break;
6507 case OVS_KEY_ATTR_CT_LABELS: {
6508 md->ct_label = nl_attr_get_u128(nla);
6509 break;
6510 }
6511 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
6512 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(nla);
6513 md->ct_orig_tuple.ipv4 = *ct;
6514 md->ct_orig_tuple_ipv6 = false;
6515 break;
6516 }
6517 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
6518 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(nla);
6519
6520 md->ct_orig_tuple.ipv6 = *ct;
6521 md->ct_orig_tuple_ipv6 = true;
6522 break;
6523 }
6524 case OVS_KEY_ATTR_TUNNEL: {
6525 enum odp_key_fitness res;
6526
6527 res = odp_tun_key_from_attr(nla, &md->tunnel, NULL);
6528 if (res == ODP_FIT_ERROR) {
6529 memset(&md->tunnel, 0, sizeof md->tunnel);
6530 }
6531 break;
6532 }
6533 case OVS_KEY_ATTR_IN_PORT:
6534 md->in_port.odp_port = nl_attr_get_odp_port(nla);
6535 break;
6536 case OVS_KEY_ATTR_ETHERNET:
6537 /* Presence of OVS_KEY_ATTR_ETHERNET indicates Ethernet packet. */
6538 packet_type = htonl(PT_ETH);
6539 break;
6540 case OVS_KEY_ATTR_ETHERTYPE:
6541 ethertype = nl_attr_get_be16(nla);
6542 break;
6543 case OVS_KEY_ATTR_UNSPEC:
6544 case OVS_KEY_ATTR_ENCAP:
6545 case OVS_KEY_ATTR_VLAN:
6546 case OVS_KEY_ATTR_IPV4:
6547 case OVS_KEY_ATTR_IPV6:
6548 case OVS_KEY_ATTR_TCP:
6549 case OVS_KEY_ATTR_UDP:
6550 case OVS_KEY_ATTR_ICMP:
6551 case OVS_KEY_ATTR_ICMPV6:
6552 case OVS_KEY_ATTR_ARP:
6553 case OVS_KEY_ATTR_ND:
6554 case OVS_KEY_ATTR_ND_EXTENSIONS:
6555 case OVS_KEY_ATTR_SCTP:
6556 case OVS_KEY_ATTR_TCP_FLAGS:
6557 case OVS_KEY_ATTR_MPLS:
6558 case OVS_KEY_ATTR_PACKET_TYPE:
6559 case OVS_KEY_ATTR_NSH:
6560 case __OVS_KEY_ATTR_MAX:
6561 default:
6562 break;
6563 }
6564 }
6565
6566 if (packet_type == htonl(PT_ETH)) {
6567 packet->packet_type = htonl(PT_ETH);
6568 } else if (packet_type == htonl(PT_UNKNOWN) && ethertype != 0) {
6569 packet->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
6570 ntohs(ethertype));
6571 } else {
6572 VLOG_ERR_RL(&rl, "Packet without ETHERTYPE. Unknown packet_type.");
6573 }
6574 }
6575
6576 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'.
6577 * Generated value has format of random UUID. */
6578 void
6579 odp_flow_key_hash(const void *key, size_t key_len, ovs_u128 *hash)
6580 {
6581 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
6582 static uint32_t secret;
6583
6584 if (ovsthread_once_start(&once)) {
6585 secret = random_uint32();
6586 ovsthread_once_done(&once);
6587 }
6588 hash_bytes128(key, key_len, secret, hash);
6589 uuid_set_bits_v4((struct uuid *)hash);
6590 }
6591
6592 static void
6593 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
6594 uint64_t attrs, int out_of_range_attr,
6595 const struct nlattr *key, size_t key_len)
6596 {
6597 struct ds s;
6598 int i;
6599
6600 if (VLOG_DROP_DBG(rl)) {
6601 return;
6602 }
6603
6604 ds_init(&s);
6605 for (i = 0; i < 64; i++) {
6606 if (attrs & (UINT64_C(1) << i)) {
6607 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6608
6609 ds_put_format(&s, " %s",
6610 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
6611 }
6612 }
6613 if (out_of_range_attr) {
6614 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
6615 }
6616
6617 ds_put_cstr(&s, ": ");
6618 odp_flow_key_format(key, key_len, &s);
6619
6620 VLOG_DBG("%s:%s", title, ds_cstr(&s));
6621 ds_destroy(&s);
6622 }
6623
6624 static uint8_t
6625 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
6626 {
6627 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6628
6629 if (is_mask) {
6630 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
6631 }
6632
6633 if (odp_frag > OVS_FRAG_TYPE_LATER) {
6634 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
6635 return 0xff; /* Error. */
6636 }
6637
6638 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
6639 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
6640 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
6641 }
6642
6643 /* Parses the attributes in the 'key_len' bytes of 'key' into 'attrs', which
6644 * must have OVS_KEY_ATTR_MAX + 1 elements. Stores each attribute in 'key'
6645 * into the corresponding element of 'attrs'.
6646 *
6647 * Stores a bitmask of the attributes' indexes found in 'key' into
6648 * '*present_attrsp'.
6649 *
6650 * If an attribute beyond OVS_KEY_ATTR_MAX is found, stores its attribute type
6651 * (or one of them, if more than one) into '*out_of_range_attrp', otherwise 0.
6652 *
6653 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6654 * error message in '*errorp'. */
6655 static bool
6656 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
6657 const struct nlattr *attrs[], uint64_t *present_attrsp,
6658 int *out_of_range_attrp, char **errorp)
6659 {
6660 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6661 const struct nlattr *nla;
6662 uint64_t present_attrs;
6663 size_t left;
6664
6665 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
6666 present_attrs = 0;
6667 *out_of_range_attrp = 0;
6668 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6669 uint16_t type = nl_attr_type(nla);
6670 size_t len = nl_attr_get_size(nla);
6671 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6672 OVS_KEY_ATTR_MAX, type);
6673
6674 if (len != expected_len && expected_len >= 0) {
6675 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6676
6677 odp_parse_error(&rl, errorp, "attribute %s has length %"PRIuSIZE" "
6678 "but should have length %d",
6679 ovs_key_attr_to_string(type, namebuf,
6680 sizeof namebuf),
6681 len, expected_len);
6682 return false;
6683 }
6684
6685 if (type > OVS_KEY_ATTR_MAX) {
6686 *out_of_range_attrp = type;
6687 } else {
6688 if (present_attrs & (UINT64_C(1) << type)) {
6689 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6690
6691 odp_parse_error(&rl, errorp,
6692 "duplicate %s attribute in flow key",
6693 ovs_key_attr_to_string(type, namebuf,
6694 sizeof namebuf));
6695 return false;
6696 }
6697
6698 present_attrs |= UINT64_C(1) << type;
6699 attrs[type] = nla;
6700 }
6701 }
6702 if (left) {
6703 odp_parse_error(&rl, errorp, "trailing garbage in flow key");
6704 return false;
6705 }
6706
6707 *present_attrsp = present_attrs;
6708 return true;
6709 }
6710
6711 static enum odp_key_fitness
6712 check_expectations(uint64_t present_attrs, int out_of_range_attr,
6713 uint64_t expected_attrs,
6714 const struct nlattr *key, size_t key_len)
6715 {
6716 uint64_t missing_attrs;
6717 uint64_t extra_attrs;
6718
6719 missing_attrs = expected_attrs & ~present_attrs;
6720 if (missing_attrs) {
6721 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6722 log_odp_key_attributes(&rl, "expected but not present",
6723 missing_attrs, 0, key, key_len);
6724 return ODP_FIT_TOO_LITTLE;
6725 }
6726
6727 extra_attrs = present_attrs & ~expected_attrs;
6728 if (extra_attrs || out_of_range_attr) {
6729 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6730 log_odp_key_attributes(&rl, "present but not expected",
6731 extra_attrs, out_of_range_attr, key, key_len);
6732 return ODP_FIT_TOO_MUCH;
6733 }
6734
6735 return ODP_FIT_PERFECT;
6736 }
6737
6738 /* Initializes 'flow->dl_type' based on the attributes in 'attrs', in which the
6739 * attributes in the bit-mask 'present_attrs' are present. Returns true if
6740 * successful, false on failure.
6741 *
6742 * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
6743 * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
6744 * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
6745 * previously parsed flow key.
6746 *
6747 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6748 * error message in '*errorp'. */
6749 static bool
6750 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6751 uint64_t present_attrs, uint64_t *expected_attrs,
6752 struct flow *flow, const struct flow *src_flow,
6753 char **errorp)
6754 {
6755 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6756 bool is_mask = flow != src_flow;
6757
6758 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
6759 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
6760 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6761 odp_parse_error(&rl, errorp,
6762 "invalid Ethertype %"PRIu16" in flow key",
6763 ntohs(flow->dl_type));
6764 return false;
6765 }
6766 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
6767 flow->dl_type != htons(0xffff)) {
6768 odp_parse_error(&rl, errorp, "can't bitwise match non-Ethernet II "
6769 "\"Ethertype\" %#"PRIx16" (with mask %#"PRIx16")",
6770 ntohs(src_flow->dl_type), ntohs(flow->dl_type));
6771 return false;
6772 }
6773 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
6774 } else {
6775 if (!is_mask) {
6776 /* Default ethertype for well-known L3 packets. */
6777 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6778 flow->dl_type = htons(ETH_TYPE_IP);
6779 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6780 flow->dl_type = htons(ETH_TYPE_IPV6);
6781 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6782 flow->dl_type = htons(ETH_TYPE_MPLS);
6783 } else {
6784 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
6785 }
6786 } else if (src_flow->packet_type != htonl(PT_ETH)) {
6787 /* dl_type is mandatory for non-Ethernet packets */
6788 flow->dl_type = htons(0xffff);
6789 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
6790 /* See comments in odp_flow_key_from_flow__(). */
6791 odp_parse_error(&rl, errorp,
6792 "mask expected for non-Ethernet II frame");
6793 return false;
6794 }
6795 }
6796 return true;
6797 }
6798
6799 /* Initializes MPLS, L3, and L4 fields in 'flow' based on the attributes in
6800 * 'attrs', in which the attributes in the bit-mask 'present_attrs' are
6801 * present. The caller also indicates an out-of-range attribute
6802 * 'out_of_range_attr' if one was present when parsing (if so, the fitness
6803 * cannot be perfect).
6804 *
6805 * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
6806 * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
6807 * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
6808 * previously parsed flow key.
6809 *
6810 * Returns fitness based on any discrepancies between present and expected
6811 * attributes, except that a 'need_check' of false overrides this.
6812 *
6813 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6814 * error message in '*errorp'. 'key' and 'key_len' are just used for error
6815 * reporting in this case. */
6816 static enum odp_key_fitness
6817 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6818 uint64_t present_attrs, int out_of_range_attr,
6819 uint64_t *expected_attrs, struct flow *flow,
6820 const struct nlattr *key, size_t key_len,
6821 const struct flow *src_flow, bool need_check, char **errorp)
6822 {
6823 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6824 bool is_mask = src_flow != flow;
6825 const void *check_start = NULL;
6826 size_t check_len = 0;
6827 enum ovs_key_attr expected_bit = 0xff;
6828
6829 if (eth_type_mpls(src_flow->dl_type)) {
6830 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6831 *expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
6832 }
6833 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6834 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
6835 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
6836 int n = size / sizeof(ovs_be32);
6837 int i;
6838
6839 if (!size || size % sizeof(ovs_be32)) {
6840 odp_parse_error(&rl, errorp,
6841 "MPLS LSEs have invalid length %"PRIuSIZE,
6842 size);
6843 return ODP_FIT_ERROR;
6844 }
6845 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
6846 odp_parse_error(&rl, errorp,
6847 "unexpected MPLS Ethertype mask %x"PRIx16,
6848 ntohs(flow->dl_type));
6849 return ODP_FIT_ERROR;
6850 }
6851
6852 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
6853 flow->mpls_lse[i] = mpls_lse[i];
6854 }
6855 if (n > FLOW_MAX_MPLS_LABELS) {
6856 return ODP_FIT_TOO_MUCH;
6857 }
6858
6859 if (!is_mask) {
6860 /* BOS may be set only in the innermost label. */
6861 for (i = 0; i < n - 1; i++) {
6862 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
6863 odp_parse_error(&rl, errorp,
6864 "MPLS BOS set in non-innermost label");
6865 return ODP_FIT_ERROR;
6866 }
6867 }
6868
6869 /* BOS must be set in the innermost label. */
6870 if (n < FLOW_MAX_MPLS_LABELS
6871 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
6872 return ODP_FIT_TOO_LITTLE;
6873 }
6874 }
6875 }
6876
6877 goto done;
6878 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
6879 if (!is_mask) {
6880 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
6881 }
6882 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6883 const struct ovs_key_ipv4 *ipv4_key;
6884
6885 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
6886 put_ipv4_key(ipv4_key, flow, is_mask);
6887 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6888 odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV4 has invalid "
6889 "nw_frag %#"PRIx8, flow->nw_frag);
6890 return ODP_FIT_ERROR;
6891 }
6892
6893 if (is_mask) {
6894 check_start = ipv4_key;
6895 check_len = sizeof *ipv4_key;
6896 expected_bit = OVS_KEY_ATTR_IPV4;
6897 }
6898 }
6899 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
6900 if (!is_mask) {
6901 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
6902 }
6903 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6904 const struct ovs_key_ipv6 *ipv6_key;
6905
6906 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
6907 put_ipv6_key(ipv6_key, flow, is_mask);
6908 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6909 odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV6 has invalid "
6910 "nw_frag %#"PRIx8, flow->nw_frag);
6911 return ODP_FIT_ERROR;
6912 }
6913 if (is_mask) {
6914 check_start = ipv6_key;
6915 check_len = sizeof *ipv6_key;
6916 expected_bit = OVS_KEY_ATTR_IPV6;
6917 }
6918 }
6919 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
6920 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
6921 if (!is_mask) {
6922 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
6923 }
6924 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
6925 const struct ovs_key_arp *arp_key;
6926
6927 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
6928 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
6929 odp_parse_error(&rl, errorp,
6930 "unsupported ARP opcode %"PRIu16" in flow "
6931 "key", ntohs(arp_key->arp_op));
6932 return ODP_FIT_ERROR;
6933 }
6934 put_arp_key(arp_key, flow);
6935 if (is_mask) {
6936 check_start = arp_key;
6937 check_len = sizeof *arp_key;
6938 expected_bit = OVS_KEY_ATTR_ARP;
6939 }
6940 }
6941 } else if (src_flow->dl_type == htons(ETH_TYPE_NSH)) {
6942 if (!is_mask) {
6943 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_NSH;
6944 }
6945 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_NSH)) {
6946 if (odp_nsh_key_from_attr__(attrs[OVS_KEY_ATTR_NSH],
6947 is_mask, &flow->nsh,
6948 NULL, errorp) == ODP_FIT_ERROR) {
6949 return ODP_FIT_ERROR;
6950 }
6951 if (is_mask) {
6952 check_start = nl_attr_get(attrs[OVS_KEY_ATTR_NSH]);
6953 check_len = nl_attr_get_size(attrs[OVS_KEY_ATTR_NSH]);
6954 expected_bit = OVS_KEY_ATTR_NSH;
6955 }
6956 }
6957 } else {
6958 goto done;
6959 }
6960 if (check_len > 0) { /* Happens only when 'is_mask'. */
6961 if (!is_all_zeros(check_start, check_len) &&
6962 flow->dl_type != htons(0xffff)) {
6963 odp_parse_error(&rl, errorp, "unexpected L3 matching with "
6964 "masked Ethertype %#"PRIx16"/%#"PRIx16,
6965 ntohs(src_flow->dl_type),
6966 ntohs(flow->dl_type));
6967 return ODP_FIT_ERROR;
6968 } else {
6969 *expected_attrs |= UINT64_C(1) << expected_bit;
6970 }
6971 }
6972
6973 expected_bit = OVS_KEY_ATTR_UNSPEC;
6974 if (src_flow->nw_proto == IPPROTO_TCP
6975 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6976 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6977 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6978 if (!is_mask) {
6979 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
6980 }
6981 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
6982 const union ovs_key_tp *tcp_key;
6983
6984 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
6985 put_tp_key(tcp_key, flow);
6986 expected_bit = OVS_KEY_ATTR_TCP;
6987 }
6988 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
6989 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
6990 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
6991 }
6992 } else if (src_flow->nw_proto == IPPROTO_UDP
6993 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6994 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6995 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6996 if (!is_mask) {
6997 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
6998 }
6999 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
7000 const union ovs_key_tp *udp_key;
7001
7002 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
7003 put_tp_key(udp_key, flow);
7004 expected_bit = OVS_KEY_ATTR_UDP;
7005 }
7006 } else if (src_flow->nw_proto == IPPROTO_SCTP
7007 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
7008 src_flow->dl_type == htons(ETH_TYPE_IPV6))
7009 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
7010 if (!is_mask) {
7011 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
7012 }
7013 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
7014 const union ovs_key_tp *sctp_key;
7015
7016 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
7017 put_tp_key(sctp_key, flow);
7018 expected_bit = OVS_KEY_ATTR_SCTP;
7019 }
7020 } else if (src_flow->nw_proto == IPPROTO_ICMP
7021 && src_flow->dl_type == htons(ETH_TYPE_IP)
7022 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
7023 if (!is_mask) {
7024 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
7025 }
7026 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
7027 const struct ovs_key_icmp *icmp_key;
7028
7029 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
7030 flow->tp_src = htons(icmp_key->icmp_type);
7031 flow->tp_dst = htons(icmp_key->icmp_code);
7032 expected_bit = OVS_KEY_ATTR_ICMP;
7033 }
7034 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
7035 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
7036 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
7037 if (!is_mask) {
7038 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
7039 }
7040 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
7041 const struct ovs_key_icmpv6 *icmpv6_key;
7042
7043 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
7044 flow->tp_src = htons(icmpv6_key->icmpv6_type);
7045 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
7046 expected_bit = OVS_KEY_ATTR_ICMPV6;
7047 if (is_nd(src_flow, NULL)) {
7048 if (!is_mask) {
7049 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
7050 }
7051 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
7052 const struct ovs_key_nd *nd_key;
7053
7054 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
7055 flow->nd_target = nd_key->nd_target;
7056 flow->arp_sha = nd_key->nd_sll;
7057 flow->arp_tha = nd_key->nd_tll;
7058 if (is_mask) {
7059 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
7060 * ICMP type and code are 8 bits wide. Therefore, an
7061 * exact match looks like htons(0xff), not
7062 * htons(0xffff). See xlate_wc_finish() for details.
7063 * */
7064 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
7065 (flow->tp_src != htons(0xff) ||
7066 flow->tp_dst != htons(0xff))) {
7067 odp_parse_error(&rl, errorp,
7068 "ICMP (src,dst) masks should be "
7069 "(0xff,0xff) but are actually "
7070 "(%#"PRIx16",%#"PRIx16")",
7071 ntohs(flow->tp_src),
7072 ntohs(flow->tp_dst));
7073 return ODP_FIT_ERROR;
7074 } else {
7075 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
7076 }
7077 }
7078 }
7079 if (present_attrs &
7080 (UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS)) {
7081 const struct ovs_key_nd_extensions *nd_ext_key;
7082 if (!is_mask) {
7083 *expected_attrs |=
7084 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
7085 }
7086
7087 nd_ext_key =
7088 nl_attr_get(attrs[OVS_KEY_ATTR_ND_EXTENSIONS]);
7089 flow->igmp_group_ip4 = nd_ext_key->nd_reserved;
7090 flow->tcp_flags = htons(nd_ext_key->nd_options_type);
7091
7092 if (is_mask) {
7093 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
7094 * ICMP type and code are 8 bits wide. Therefore, an
7095 * exact match looks like htons(0xff), not
7096 * htons(0xffff). See xlate_wc_finish() for details.
7097 * */
7098 if (!is_all_zeros(nd_ext_key, sizeof *nd_ext_key) &&
7099 (flow->tp_src != htons(0xff) ||
7100 flow->tp_dst != htons(0xff))) {
7101 return ODP_FIT_ERROR;
7102 } else {
7103 *expected_attrs |=
7104 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
7105 }
7106 }
7107 }
7108 }
7109 }
7110 } else if (src_flow->nw_proto == IPPROTO_IGMP
7111 && src_flow->dl_type == htons(ETH_TYPE_IP)) {
7112 /* OVS userspace parses the IGMP type, code, and group, but its
7113 * datapaths do not, so there is always missing information. */
7114 return ODP_FIT_TOO_LITTLE;
7115 }
7116 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
7117 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
7118 odp_parse_error(&rl, errorp, "flow matches on L4 ports but does "
7119 "not define an L4 protocol");
7120 return ODP_FIT_ERROR;
7121 } else {
7122 *expected_attrs |= UINT64_C(1) << expected_bit;
7123 }
7124 }
7125
7126 done:
7127 return need_check ? check_expectations(present_attrs, out_of_range_attr,
7128 *expected_attrs, key, key_len) : ODP_FIT_PERFECT;
7129 }
7130
7131 /* Parse 802.1Q header then encapsulated L3 attributes. */
7132 static enum odp_key_fitness
7133 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
7134 uint64_t present_attrs, int out_of_range_attr,
7135 uint64_t expected_attrs, struct flow *flow,
7136 const struct nlattr *key, size_t key_len,
7137 const struct flow *src_flow, char **errorp)
7138 {
7139 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7140 bool is_mask = src_flow != flow;
7141
7142 const struct nlattr *encap;
7143 enum odp_key_fitness encap_fitness;
7144 enum odp_key_fitness fitness = ODP_FIT_ERROR;
7145 int encaps = 0;
7146
7147 while (encaps < flow_vlan_limit &&
7148 (is_mask
7149 ? (src_flow->vlans[encaps].tci & htons(VLAN_CFI)) != 0
7150 : eth_type_vlan(flow->dl_type))) {
7151
7152 encap = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
7153 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
7154
7155 /* Calculate fitness of outer attributes. */
7156 if (!is_mask) {
7157 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
7158 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
7159 } else {
7160 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
7161 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
7162 }
7163 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
7164 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
7165 }
7166 }
7167 fitness = check_expectations(present_attrs, out_of_range_attr,
7168 expected_attrs, key, key_len);
7169
7170 /* Set vlan_tci.
7171 * Remove the TPID from dl_type since it's not the real Ethertype. */
7172 flow->vlans[encaps].tpid = flow->dl_type;
7173 flow->dl_type = htons(0);
7174 flow->vlans[encaps].tci =
7175 (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
7176 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
7177 : htons(0));
7178 if (!is_mask) {
7179 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) ||
7180 !(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
7181 return ODP_FIT_TOO_LITTLE;
7182 } else if (flow->vlans[encaps].tci == htons(0)) {
7183 /* Corner case for a truncated 802.1Q header. */
7184 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
7185 return ODP_FIT_TOO_MUCH;
7186 }
7187 return fitness;
7188 } else if (!(flow->vlans[encaps].tci & htons(VLAN_CFI))) {
7189 odp_parse_error(
7190 &rl, errorp, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
7191 "but CFI bit is not set", ntohs(flow->vlans[encaps].tci));
7192 return ODP_FIT_ERROR;
7193 }
7194 } else {
7195 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
7196 return fitness;
7197 }
7198 }
7199
7200 /* Now parse the encapsulated attributes. */
7201 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
7202 attrs, &present_attrs, &out_of_range_attr,
7203 errorp)) {
7204 return ODP_FIT_ERROR;
7205 }
7206 expected_attrs = 0;
7207
7208 if (!parse_ethertype(attrs, present_attrs, &expected_attrs,
7209 flow, src_flow, errorp)) {
7210 return ODP_FIT_ERROR;
7211 }
7212 encap_fitness = parse_l2_5_onward(attrs, present_attrs,
7213 out_of_range_attr,
7214 &expected_attrs,
7215 flow, key, key_len,
7216 src_flow, false, errorp);
7217 if (encap_fitness != ODP_FIT_PERFECT) {
7218 return encap_fitness;
7219 }
7220 encaps++;
7221 }
7222
7223 return check_expectations(present_attrs, out_of_range_attr,
7224 expected_attrs, key, key_len);
7225 }
7226
7227 static enum odp_key_fitness
7228 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
7229 struct flow *flow, const struct flow *src_flow,
7230 char **errorp)
7231 {
7232 /* New "struct flow" fields that are visible to the datapath (including all
7233 * data fields) should be translated from equivalent datapath flow fields
7234 * here (you will have to add a OVS_KEY_ATTR_* for them). */
7235 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
7236
7237 enum odp_key_fitness fitness = ODP_FIT_ERROR;
7238 if (errorp) {
7239 *errorp = NULL;
7240 }
7241
7242 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
7243 uint64_t expected_attrs;
7244 uint64_t present_attrs;
7245 int out_of_range_attr;
7246 bool is_mask = src_flow != flow;
7247
7248 memset(flow, 0, sizeof *flow);
7249
7250 /* Parse attributes. */
7251 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
7252 &out_of_range_attr, errorp)) {
7253 goto exit;
7254 }
7255 expected_attrs = 0;
7256
7257 /* Metadata. */
7258 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
7259 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
7260 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
7261 } else if (is_mask) {
7262 /* Always exact match recirc_id if it is not specified. */
7263 flow->recirc_id = UINT32_MAX;
7264 }
7265
7266 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
7267 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
7268 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
7269 }
7270 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
7271 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
7272 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
7273 }
7274
7275 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
7276 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
7277 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
7278 }
7279
7280 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_STATE)) {
7281 uint32_t odp_state = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_STATE]);
7282
7283 flow->ct_state = odp_to_ovs_ct_state(odp_state);
7284 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_STATE;
7285 }
7286 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE)) {
7287 flow->ct_zone = nl_attr_get_u16(attrs[OVS_KEY_ATTR_CT_ZONE]);
7288 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE;
7289 }
7290 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_MARK)) {
7291 flow->ct_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_MARK]);
7292 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_MARK;
7293 }
7294 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS)) {
7295 flow->ct_label = nl_attr_get_u128(attrs[OVS_KEY_ATTR_CT_LABELS]);
7296 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS;
7297 }
7298 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
7299 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
7300 flow->ct_nw_src = ct->ipv4_src;
7301 flow->ct_nw_dst = ct->ipv4_dst;
7302 flow->ct_nw_proto = ct->ipv4_proto;
7303 flow->ct_tp_src = ct->src_port;
7304 flow->ct_tp_dst = ct->dst_port;
7305 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
7306 }
7307 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
7308 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
7309
7310 flow->ct_ipv6_src = ct->ipv6_src;
7311 flow->ct_ipv6_dst = ct->ipv6_dst;
7312 flow->ct_nw_proto = ct->ipv6_proto;
7313 flow->ct_tp_src = ct->src_port;
7314 flow->ct_tp_dst = ct->dst_port;
7315 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
7316 }
7317
7318 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
7319 enum odp_key_fitness res;
7320
7321 res = odp_tun_key_from_attr__(attrs[OVS_KEY_ATTR_TUNNEL], is_mask,
7322 &flow->tunnel, errorp);
7323 if (res == ODP_FIT_ERROR) {
7324 goto exit;
7325 } else if (res == ODP_FIT_PERFECT) {
7326 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
7327 }
7328 }
7329
7330 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
7331 flow->in_port.odp_port
7332 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
7333 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
7334 } else if (!is_mask) {
7335 flow->in_port.odp_port = ODPP_NONE;
7336 }
7337
7338 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE)) {
7339 flow->packet_type
7340 = nl_attr_get_be32(attrs[OVS_KEY_ATTR_PACKET_TYPE]);
7341 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE;
7342 if (pt_ns(src_flow->packet_type) == OFPHTN_ETHERTYPE) {
7343 flow->dl_type = pt_ns_type_be(flow->packet_type);
7344 }
7345 } else if (!is_mask) {
7346 flow->packet_type = htonl(PT_ETH);
7347 }
7348
7349 /* Check for Ethernet header. */
7350 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
7351 const struct ovs_key_ethernet *eth_key;
7352
7353 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
7354 put_ethernet_key(eth_key, flow);
7355 if (!is_mask) {
7356 flow->packet_type = htonl(PT_ETH);
7357 }
7358 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
7359 }
7360 else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
7361 ovs_be16 ethertype = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
7362 if (!is_mask) {
7363 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
7364 ntohs(ethertype));
7365 }
7366 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
7367 }
7368
7369 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
7370 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
7371 src_flow, errorp)) {
7372 goto exit;
7373 }
7374
7375 if (is_mask
7376 ? (src_flow->vlans[0].tci & htons(VLAN_CFI)) != 0
7377 : eth_type_vlan(src_flow->dl_type)) {
7378 fitness = parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
7379 expected_attrs, flow, key, key_len,
7380 src_flow, errorp);
7381 } else {
7382 if (is_mask) {
7383 /* A missing VLAN mask means exact match on vlan_tci 0 (== no
7384 * VLAN). */
7385 flow->vlans[0].tpid = htons(0xffff);
7386 flow->vlans[0].tci = htons(0xffff);
7387 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
7388 flow->vlans[0].tci = nl_attr_get_be16(
7389 attrs[OVS_KEY_ATTR_VLAN]);
7390 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
7391 }
7392 }
7393 fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
7394 &expected_attrs, flow, key, key_len,
7395 src_flow, true, errorp);
7396 }
7397
7398 exit:;
7399 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7400 if (fitness == ODP_FIT_ERROR && (errorp || !VLOG_DROP_WARN(&rl))) {
7401 struct ds s = DS_EMPTY_INITIALIZER;
7402 if (is_mask) {
7403 ds_put_cstr(&s, "the flow mask in error is: ");
7404 odp_flow_key_format(key, key_len, &s);
7405 ds_put_cstr(&s, ", for the following flow key: ");
7406 flow_format(&s, src_flow, NULL);
7407 } else {
7408 ds_put_cstr(&s, "the flow key in error is: ");
7409 odp_flow_key_format(key, key_len, &s);
7410 }
7411 if (errorp) {
7412 char *old_error = *errorp;
7413 *errorp = xasprintf("%s; %s", old_error, ds_cstr(&s));
7414 free(old_error);
7415 } else {
7416 VLOG_WARN("%s", ds_cstr(&s));
7417 }
7418 ds_destroy(&s);
7419 }
7420 return fitness;
7421 }
7422
7423 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
7424 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
7425 * 'key' fits our expectations for what a flow key should contain.
7426 *
7427 * The 'in_port' will be the datapath's understanding of the port. The
7428 * caller will need to translate with odp_port_to_ofp_port() if the
7429 * OpenFlow port is needed.
7430 *
7431 * This function doesn't take the packet itself as an argument because none of
7432 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
7433 * it is always possible to infer which additional attribute(s) should appear
7434 * by looking at the attributes for lower-level protocols, e.g. if the network
7435 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
7436 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
7437 * must be absent.
7438 *
7439 * If 'errorp' is nonnull, this function uses it for detailed error reports: if
7440 * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
7441 * '*errorp', otherwise NULL. */
7442 enum odp_key_fitness
7443 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
7444 struct flow *flow, char **errorp)
7445 {
7446 return odp_flow_key_to_flow__(key, key_len, flow, flow, errorp);
7447 }
7448
7449 /* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key'
7450 * to a mask structure in 'mask'. 'flow' must be a previously translated flow
7451 * corresponding to 'mask' and similarly flow_key/flow_key_len must be the
7452 * attributes from that flow. Returns an ODP_FIT_* value that indicates how
7453 * well 'key' fits our expectations for what a flow key should contain.
7454 *
7455 * If 'errorp' is nonnull, this function uses it for detailed error reports: if
7456 * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
7457 * '*errorp', otherwise NULL. */
7458 enum odp_key_fitness
7459 odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len,
7460 struct flow_wildcards *mask, const struct flow *src_flow,
7461 char **errorp)
7462 {
7463 if (mask_key_len) {
7464 return odp_flow_key_to_flow__(mask_key, mask_key_len,
7465 &mask->masks, src_flow, errorp);
7466 } else {
7467 if (errorp) {
7468 *errorp = NULL;
7469 }
7470
7471 /* A missing mask means that the flow should be exact matched.
7472 * Generate an appropriate exact wildcard for the flow. */
7473 flow_wildcards_init_for_packet(mask, src_flow);
7474
7475 return ODP_FIT_PERFECT;
7476 }
7477 }
7478
7479 /* Converts the netlink formated key/mask to match.
7480 * Fails if odp_flow_key_from_key/mask and odp_flow_key_key/mask
7481 * disagree on the acceptable form of flow */
7482 int
7483 parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len,
7484 const struct nlattr *mask, size_t mask_len,
7485 struct match *match)
7486 {
7487 enum odp_key_fitness fitness;
7488
7489 fitness = odp_flow_key_to_flow(key, key_len, &match->flow, NULL);
7490 if (fitness) {
7491 /* This should not happen: it indicates that
7492 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
7493 * the acceptable form of a flow. Log the problem as an error,
7494 * with enough details to enable debugging. */
7495 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7496
7497 if (!VLOG_DROP_ERR(&rl)) {
7498 struct ds s;
7499
7500 ds_init(&s);
7501 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
7502 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
7503 ds_destroy(&s);
7504 }
7505
7506 return EINVAL;
7507 }
7508
7509 fitness = odp_flow_key_to_mask(mask, mask_len, &match->wc, &match->flow,
7510 NULL);
7511 if (fitness) {
7512 /* This should not happen: it indicates that
7513 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
7514 * disagree on the acceptable form of a mask. Log the problem
7515 * as an error, with enough details to enable debugging. */
7516 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7517
7518 if (!VLOG_DROP_ERR(&rl)) {
7519 struct ds s;
7520
7521 ds_init(&s);
7522 odp_flow_format(key, key_len, mask, mask_len, NULL, &s,
7523 true);
7524 VLOG_ERR("internal error parsing flow mask %s (%s)",
7525 ds_cstr(&s), odp_key_fitness_to_string(fitness));
7526 ds_destroy(&s);
7527 }
7528
7529 return EINVAL;
7530 }
7531
7532 return 0;
7533 }
7534
7535 /* Returns 'fitness' as a string, for use in debug messages. */
7536 const char *
7537 odp_key_fitness_to_string(enum odp_key_fitness fitness)
7538 {
7539 switch (fitness) {
7540 case ODP_FIT_PERFECT:
7541 return "OK";
7542 case ODP_FIT_TOO_MUCH:
7543 return "too_much";
7544 case ODP_FIT_TOO_LITTLE:
7545 return "too_little";
7546 case ODP_FIT_ERROR:
7547 return "error";
7548 default:
7549 return "<unknown>";
7550 }
7551 }
7552
7553 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
7554 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
7555 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
7556 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
7557 * null, then the return value is not meaningful.) */
7558 size_t
7559 odp_put_userspace_action(uint32_t pid,
7560 const void *userdata, size_t userdata_size,
7561 odp_port_t tunnel_out_port,
7562 bool include_actions,
7563 struct ofpbuf *odp_actions)
7564 {
7565 size_t userdata_ofs;
7566 size_t offset;
7567
7568 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
7569 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
7570 if (userdata) {
7571 userdata_ofs = odp_actions->size + NLA_HDRLEN;
7572
7573 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
7574 * module before Linux 3.10 required the userdata to be exactly 8 bytes
7575 * long:
7576 *
7577 * - The kernel rejected shorter userdata with -ERANGE.
7578 *
7579 * - The kernel silently dropped userdata beyond the first 8 bytes.
7580 *
7581 * Thus, for maximum compatibility, always put at least 8 bytes. (We
7582 * separately disable features that required more than 8 bytes.) */
7583 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
7584 MAX(8, userdata_size)),
7585 userdata, userdata_size);
7586 } else {
7587 userdata_ofs = 0;
7588 }
7589 if (tunnel_out_port != ODPP_NONE) {
7590 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
7591 tunnel_out_port);
7592 }
7593 if (include_actions) {
7594 nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS);
7595 }
7596 nl_msg_end_nested(odp_actions, offset);
7597
7598 return userdata_ofs;
7599 }
7600
7601 void
7602 odp_put_pop_eth_action(struct ofpbuf *odp_actions)
7603 {
7604 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_ETH);
7605 }
7606
7607 void
7608 odp_put_push_eth_action(struct ofpbuf *odp_actions,
7609 const struct eth_addr *eth_src,
7610 const struct eth_addr *eth_dst)
7611 {
7612 struct ovs_action_push_eth eth;
7613
7614 memset(&eth, 0, sizeof eth);
7615 if (eth_src) {
7616 eth.addresses.eth_src = *eth_src;
7617 }
7618 if (eth_dst) {
7619 eth.addresses.eth_dst = *eth_dst;
7620 }
7621
7622 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_ETH,
7623 &eth, sizeof eth);
7624 }
7625
7626 void
7627 odp_put_tunnel_action(const struct flow_tnl *tunnel,
7628 struct ofpbuf *odp_actions, const char *tnl_type)
7629 {
7630 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7631 tun_key_to_attr(odp_actions, tunnel, tunnel, NULL, tnl_type);
7632 nl_msg_end_nested(odp_actions, offset);
7633 }
7634
7635 void
7636 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
7637 struct ovs_action_push_tnl *data)
7638 {
7639 int size = offsetof(struct ovs_action_push_tnl, header);
7640
7641 size += data->header_len;
7642 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
7643 }
7644
7645 \f
7646 /* The commit_odp_actions() function and its helpers. */
7647
7648 static void
7649 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
7650 const void *key, size_t key_size)
7651 {
7652 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7653 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
7654 nl_msg_end_nested(odp_actions, offset);
7655 }
7656
7657 /* Masked set actions have a mask following the data within the netlink
7658 * attribute. The unmasked bits in the data will be cleared as the data
7659 * is copied to the action. */
7660 void
7661 commit_masked_set_action(struct ofpbuf *odp_actions,
7662 enum ovs_key_attr key_type,
7663 const void *key_, const void *mask_, size_t key_size)
7664 {
7665 size_t offset = nl_msg_start_nested(odp_actions,
7666 OVS_ACTION_ATTR_SET_MASKED);
7667 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
7668 const char *key = key_, *mask = mask_;
7669
7670 memcpy(data + key_size, mask, key_size);
7671 /* Clear unmasked bits while copying. */
7672 while (key_size--) {
7673 *data++ = *key++ & *mask++;
7674 }
7675 nl_msg_end_nested(odp_actions, offset);
7676 }
7677
7678 /* If any of the flow key data that ODP actions can modify are different in
7679 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
7680 * 'odp_actions' that change the flow tunneling information in key from
7681 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
7682 * same way. In other words, operates the same as commit_odp_actions(), but
7683 * only on tunneling information. */
7684 void
7685 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
7686 struct ofpbuf *odp_actions, const char *tnl_type)
7687 {
7688 /* A valid IPV4_TUNNEL must have non-zero ip_dst; a valid IPv6 tunnel
7689 * must have non-zero ipv6_dst. */
7690 if (flow_tnl_dst_is_set(&flow->tunnel)) {
7691 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
7692 return;
7693 }
7694 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
7695 odp_put_tunnel_action(&base->tunnel, odp_actions, tnl_type);
7696 }
7697 }
7698
7699 struct offsetof_sizeof {
7700 int offset;
7701 int size;
7702 };
7703
7704
7705 /* Performs bitwise OR over the fields in 'dst_' and 'src_' specified in
7706 * 'offsetof_sizeof_arr' array. Result is stored in 'dst_'. */
7707 static void
7708 or_masks(void *dst_, const void *src_,
7709 struct offsetof_sizeof *offsetof_sizeof_arr)
7710 {
7711 int field, size, offset;
7712 const uint8_t *src = src_;
7713 uint8_t *dst = dst_;
7714
7715 for (field = 0; ; field++) {
7716 size = offsetof_sizeof_arr[field].size;
7717 offset = offsetof_sizeof_arr[field].offset;
7718
7719 if (!size) {
7720 return;
7721 }
7722 or_bytes(dst + offset, src + offset, size);
7723 }
7724 }
7725
7726 /* Compares each of the fields in 'key0' and 'key1'. The fields are specified
7727 * in 'offsetof_sizeof_arr', which is an array terminated by a 0-size field.
7728 * Returns true if all of the fields are equal, false if at least one differs.
7729 * As a side effect, for each field that is the same in 'key0' and 'key1',
7730 * zeros the corresponding bytes in 'mask'. */
7731 static bool
7732 keycmp_mask(const void *key0, const void *key1,
7733 struct offsetof_sizeof *offsetof_sizeof_arr, void *mask)
7734 {
7735 bool differ = false;
7736
7737 for (int field = 0 ; ; field++) {
7738 int size = offsetof_sizeof_arr[field].size;
7739 int offset = offsetof_sizeof_arr[field].offset;
7740 if (size == 0) {
7741 break;
7742 }
7743
7744 char *pkey0 = ((char *)key0) + offset;
7745 char *pkey1 = ((char *)key1) + offset;
7746 char *pmask = ((char *)mask) + offset;
7747 if (memcmp(pkey0, pkey1, size) == 0) {
7748 memset(pmask, 0, size);
7749 } else {
7750 differ = true;
7751 }
7752 }
7753
7754 return differ;
7755 }
7756
7757 static bool
7758 commit(enum ovs_key_attr attr, bool use_masked_set,
7759 const void *key, void *base, void *mask, size_t size,
7760 struct offsetof_sizeof *offsetof_sizeof_arr,
7761 struct ofpbuf *odp_actions)
7762 {
7763 if (keycmp_mask(key, base, offsetof_sizeof_arr, mask)) {
7764 bool fully_masked = odp_mask_is_exact(attr, mask, size);
7765
7766 if (use_masked_set && !fully_masked) {
7767 commit_masked_set_action(odp_actions, attr, key, mask, size);
7768 } else {
7769 if (!fully_masked) {
7770 memset(mask, 0xff, size);
7771 }
7772 commit_set_action(odp_actions, attr, key, size);
7773 }
7774 memcpy(base, key, size);
7775 return true;
7776 } else {
7777 /* Mask bits are set when we have either read or set the corresponding
7778 * values. Masked bits will be exact-matched, no need to set them
7779 * if the value did not actually change. */
7780 return false;
7781 }
7782 }
7783
7784 static void
7785 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
7786 {
7787 eth->eth_src = flow->dl_src;
7788 eth->eth_dst = flow->dl_dst;
7789 }
7790
7791 static void
7792 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
7793 {
7794 flow->dl_src = eth->eth_src;
7795 flow->dl_dst = eth->eth_dst;
7796 }
7797
7798 static void
7799 commit_set_ether_action(const struct flow *flow, struct flow *base_flow,
7800 struct ofpbuf *odp_actions,
7801 struct flow_wildcards *wc,
7802 bool use_masked)
7803 {
7804 struct ovs_key_ethernet key, base, mask, orig_mask;
7805 struct offsetof_sizeof ovs_key_ethernet_offsetof_sizeof_arr[] =
7806 OVS_KEY_ETHERNET_OFFSETOF_SIZEOF_ARR;
7807
7808 if (flow->packet_type != htonl(PT_ETH)) {
7809 return;
7810 }
7811
7812 get_ethernet_key(flow, &key);
7813 get_ethernet_key(base_flow, &base);
7814 get_ethernet_key(&wc->masks, &mask);
7815 memcpy(&orig_mask, &mask, sizeof mask);
7816
7817 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
7818 &key, &base, &mask, sizeof key,
7819 ovs_key_ethernet_offsetof_sizeof_arr, odp_actions)) {
7820 put_ethernet_key(&base, base_flow);
7821 or_masks(&mask, &orig_mask, ovs_key_ethernet_offsetof_sizeof_arr);
7822 put_ethernet_key(&mask, &wc->masks);
7823 }
7824 }
7825
7826 static void
7827 commit_vlan_action(const struct flow* flow, struct flow *base,
7828 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7829 {
7830 int base_n = flow_count_vlan_headers(base);
7831 int flow_n = flow_count_vlan_headers(flow);
7832 flow_skip_common_vlan_headers(base, &base_n, flow, &flow_n);
7833
7834 /* Pop all mismatching vlan of base, push those of flow */
7835 for (; base_n >= 0; base_n--) {
7836 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
7837 wc->masks.vlans[base_n].qtag = OVS_BE32_MAX;
7838 }
7839
7840 for (; flow_n >= 0; flow_n--) {
7841 struct ovs_action_push_vlan vlan;
7842
7843 vlan.vlan_tpid = flow->vlans[flow_n].tpid;
7844 vlan.vlan_tci = flow->vlans[flow_n].tci;
7845 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
7846 &vlan, sizeof vlan);
7847 }
7848 memcpy(base->vlans, flow->vlans, sizeof(base->vlans));
7849 }
7850
7851 /* Wildcarding already done at action translation time. */
7852 static void
7853 commit_mpls_action(const struct flow *flow, struct flow *base,
7854 struct ofpbuf *odp_actions)
7855 {
7856 int base_n = flow_count_mpls_labels(base, NULL);
7857 int flow_n = flow_count_mpls_labels(flow, NULL);
7858 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
7859 NULL);
7860
7861 while (base_n > common_n) {
7862 if (base_n - 1 == common_n && flow_n > common_n) {
7863 /* If there is only one more LSE in base than there are common
7864 * between base and flow; and flow has at least one more LSE than
7865 * is common then the topmost LSE of base may be updated using
7866 * set */
7867 struct ovs_key_mpls mpls_key;
7868
7869 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
7870 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
7871 &mpls_key, sizeof mpls_key);
7872 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
7873 common_n++;
7874 } else {
7875 /* Otherwise, if there more LSEs in base than are common between
7876 * base and flow then pop the topmost one. */
7877 ovs_be16 dl_type;
7878 /* If all the LSEs are to be popped and this is not the outermost
7879 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
7880 * POP_MPLS action instead of flow->dl_type.
7881 *
7882 * This is because the POP_MPLS action requires its ethertype
7883 * argument to be an MPLS ethernet type but in this case
7884 * flow->dl_type will be a non-MPLS ethernet type.
7885 *
7886 * When the final POP_MPLS action occurs it use flow->dl_type and
7887 * the and the resulting packet will have the desired dl_type. */
7888 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
7889 dl_type = htons(ETH_TYPE_MPLS);
7890 } else {
7891 dl_type = flow->dl_type;
7892 }
7893 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
7894 ovs_assert(flow_pop_mpls(base, base_n, flow->dl_type, NULL));
7895 base_n--;
7896 }
7897 }
7898
7899 /* If, after the above popping and setting, there are more LSEs in flow
7900 * than base then some LSEs need to be pushed. */
7901 while (base_n < flow_n) {
7902 struct ovs_action_push_mpls *mpls;
7903
7904 mpls = nl_msg_put_unspec_zero(odp_actions,
7905 OVS_ACTION_ATTR_PUSH_MPLS,
7906 sizeof *mpls);
7907 mpls->mpls_ethertype = flow->dl_type;
7908 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
7909 /* Update base flow's MPLS stack, but do not clear L3. We need the L3
7910 * headers if the flow is restored later due to returning from a patch
7911 * port or group bucket. */
7912 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL, false);
7913 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
7914 base_n++;
7915 }
7916 }
7917
7918 static void
7919 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
7920 {
7921 ipv4->ipv4_src = flow->nw_src;
7922 ipv4->ipv4_dst = flow->nw_dst;
7923 ipv4->ipv4_proto = flow->nw_proto;
7924 ipv4->ipv4_tos = flow->nw_tos;
7925 ipv4->ipv4_ttl = flow->nw_ttl;
7926 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7927 }
7928
7929 static void
7930 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
7931 {
7932 flow->nw_src = ipv4->ipv4_src;
7933 flow->nw_dst = ipv4->ipv4_dst;
7934 flow->nw_proto = ipv4->ipv4_proto;
7935 flow->nw_tos = ipv4->ipv4_tos;
7936 flow->nw_ttl = ipv4->ipv4_ttl;
7937 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
7938 }
7939
7940 static void
7941 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
7942 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7943 bool use_masked)
7944 {
7945 struct ovs_key_ipv4 key, mask, orig_mask, base;
7946 struct offsetof_sizeof ovs_key_ipv4_offsetof_sizeof_arr[] =
7947 OVS_KEY_IPV4_OFFSETOF_SIZEOF_ARR;
7948
7949 /* Check that nw_proto and nw_frag remain unchanged. */
7950 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7951 flow->nw_frag == base_flow->nw_frag);
7952
7953 get_ipv4_key(flow, &key, false);
7954 get_ipv4_key(base_flow, &base, false);
7955 get_ipv4_key(&wc->masks, &mask, true);
7956 memcpy(&orig_mask, &mask, sizeof mask);
7957 mask.ipv4_proto = 0; /* Not writeable. */
7958 mask.ipv4_frag = 0; /* Not writable. */
7959
7960 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7961 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7962 mask.ipv4_tos &= ~IP_ECN_MASK;
7963 }
7964
7965 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
7966 ovs_key_ipv4_offsetof_sizeof_arr, odp_actions)) {
7967 put_ipv4_key(&base, base_flow, false);
7968 or_masks(&mask, &orig_mask, ovs_key_ipv4_offsetof_sizeof_arr);
7969 put_ipv4_key(&mask, &wc->masks, true);
7970 }
7971 }
7972
7973 static void
7974 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
7975 {
7976 ipv6->ipv6_src = flow->ipv6_src;
7977 ipv6->ipv6_dst = flow->ipv6_dst;
7978 ipv6->ipv6_label = flow->ipv6_label;
7979 ipv6->ipv6_proto = flow->nw_proto;
7980 ipv6->ipv6_tclass = flow->nw_tos;
7981 ipv6->ipv6_hlimit = flow->nw_ttl;
7982 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7983 }
7984
7985 static void
7986 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
7987 {
7988 flow->ipv6_src = ipv6->ipv6_src;
7989 flow->ipv6_dst = ipv6->ipv6_dst;
7990 flow->ipv6_label = ipv6->ipv6_label;
7991 flow->nw_proto = ipv6->ipv6_proto;
7992 flow->nw_tos = ipv6->ipv6_tclass;
7993 flow->nw_ttl = ipv6->ipv6_hlimit;
7994 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
7995 }
7996
7997 static void
7998 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
7999 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8000 bool use_masked)
8001 {
8002 struct ovs_key_ipv6 key, mask, orig_mask, base;
8003 struct offsetof_sizeof ovs_key_ipv6_offsetof_sizeof_arr[] =
8004 OVS_KEY_IPV6_OFFSETOF_SIZEOF_ARR;
8005
8006 /* Check that nw_proto and nw_frag remain unchanged. */
8007 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
8008 flow->nw_frag == base_flow->nw_frag);
8009
8010 get_ipv6_key(flow, &key, false);
8011 get_ipv6_key(base_flow, &base, false);
8012 get_ipv6_key(&wc->masks, &mask, true);
8013 memcpy(&orig_mask, &mask, sizeof mask);
8014 mask.ipv6_proto = 0; /* Not writeable. */
8015 mask.ipv6_frag = 0; /* Not writable. */
8016 mask.ipv6_label &= htonl(IPV6_LABEL_MASK); /* Not writable. */
8017
8018 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
8019 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
8020 mask.ipv6_tclass &= ~IP_ECN_MASK;
8021 }
8022
8023 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
8024 ovs_key_ipv6_offsetof_sizeof_arr, odp_actions)) {
8025 put_ipv6_key(&base, base_flow, false);
8026 or_masks(&mask, &orig_mask, ovs_key_ipv6_offsetof_sizeof_arr);
8027 put_ipv6_key(&mask, &wc->masks, true);
8028 }
8029 }
8030
8031 static void
8032 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
8033 {
8034 /* ARP key has padding, clear it. */
8035 memset(arp, 0, sizeof *arp);
8036
8037 arp->arp_sip = flow->nw_src;
8038 arp->arp_tip = flow->nw_dst;
8039 arp->arp_op = flow->nw_proto == UINT8_MAX ?
8040 OVS_BE16_MAX : htons(flow->nw_proto);
8041 arp->arp_sha = flow->arp_sha;
8042 arp->arp_tha = flow->arp_tha;
8043 }
8044
8045 static void
8046 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
8047 {
8048 flow->nw_src = arp->arp_sip;
8049 flow->nw_dst = arp->arp_tip;
8050 flow->nw_proto = ntohs(arp->arp_op);
8051 flow->arp_sha = arp->arp_sha;
8052 flow->arp_tha = arp->arp_tha;
8053 }
8054
8055 static enum slow_path_reason
8056 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
8057 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
8058 {
8059 struct ovs_key_arp key, mask, orig_mask, base;
8060 struct offsetof_sizeof ovs_key_arp_offsetof_sizeof_arr[] =
8061 OVS_KEY_ARP_OFFSETOF_SIZEOF_ARR;
8062
8063 get_arp_key(flow, &key);
8064 get_arp_key(base_flow, &base);
8065 get_arp_key(&wc->masks, &mask);
8066 memcpy(&orig_mask, &mask, sizeof mask);
8067
8068 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
8069 ovs_key_arp_offsetof_sizeof_arr, odp_actions)) {
8070 put_arp_key(&base, base_flow);
8071 or_masks(&mask, &orig_mask, ovs_key_arp_offsetof_sizeof_arr);
8072 put_arp_key(&mask, &wc->masks);
8073 return SLOW_ACTION;
8074 }
8075 return 0;
8076 }
8077
8078 static void
8079 get_icmp_key(const struct flow *flow, struct ovs_key_icmp *icmp)
8080 {
8081 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
8082 icmp->icmp_type = ntohs(flow->tp_src);
8083 icmp->icmp_code = ntohs(flow->tp_dst);
8084 }
8085
8086 static void
8087 put_icmp_key(const struct ovs_key_icmp *icmp, struct flow *flow)
8088 {
8089 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
8090 flow->tp_src = htons(icmp->icmp_type);
8091 flow->tp_dst = htons(icmp->icmp_code);
8092 }
8093
8094 static enum slow_path_reason
8095 commit_set_icmp_action(const struct flow *flow, struct flow *base_flow,
8096 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
8097 {
8098 struct ovs_key_icmp key, mask, orig_mask, base;
8099 struct offsetof_sizeof ovs_key_icmp_offsetof_sizeof_arr[] =
8100 OVS_KEY_ICMP_OFFSETOF_SIZEOF_ARR;
8101 enum ovs_key_attr attr;
8102
8103 if (is_icmpv4(flow, NULL)) {
8104 attr = OVS_KEY_ATTR_ICMP;
8105 } else if (is_icmpv6(flow, NULL)) {
8106 attr = OVS_KEY_ATTR_ICMPV6;
8107 } else {
8108 return 0;
8109 }
8110
8111 get_icmp_key(flow, &key);
8112 get_icmp_key(base_flow, &base);
8113 get_icmp_key(&wc->masks, &mask);
8114 memcpy(&orig_mask, &mask, sizeof mask);
8115
8116 if (commit(attr, false, &key, &base, &mask, sizeof key,
8117 ovs_key_icmp_offsetof_sizeof_arr, odp_actions)) {
8118 put_icmp_key(&base, base_flow);
8119 or_masks(&mask, &orig_mask, ovs_key_icmp_offsetof_sizeof_arr);
8120 put_icmp_key(&mask, &wc->masks);
8121 return SLOW_ACTION;
8122 }
8123 return 0;
8124 }
8125
8126 static void
8127 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
8128 {
8129 nd->nd_target = flow->nd_target;
8130 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
8131 nd->nd_sll = flow->arp_sha;
8132 nd->nd_tll = flow->arp_tha;
8133 }
8134
8135 static void
8136 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
8137 {
8138 flow->nd_target = nd->nd_target;
8139 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
8140 flow->arp_sha = nd->nd_sll;
8141 flow->arp_tha = nd->nd_tll;
8142 }
8143
8144 static void
8145 get_nd_extensions_key(const struct flow *flow,
8146 struct ovs_key_nd_extensions *nd_ext)
8147 {
8148 /* ND Extensions key has padding, clear it. */
8149 memset(nd_ext, 0, sizeof *nd_ext);
8150 nd_ext->nd_reserved = flow->igmp_group_ip4;
8151 nd_ext->nd_options_type = ntohs(flow->tcp_flags);
8152 }
8153
8154 static void
8155 put_nd_extensions_key(const struct ovs_key_nd_extensions *nd_ext,
8156 struct flow *flow)
8157 {
8158 flow->igmp_group_ip4 = nd_ext->nd_reserved;
8159 flow->tcp_flags = htons(nd_ext->nd_options_type);
8160 }
8161
8162 static enum slow_path_reason
8163 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
8164 struct ofpbuf *odp_actions,
8165 struct flow_wildcards *wc, bool use_masked)
8166 {
8167 struct ovs_key_nd key, mask, orig_mask, base;
8168 struct offsetof_sizeof ovs_key_nd_offsetof_sizeof_arr[] =
8169 OVS_KEY_ND_OFFSETOF_SIZEOF_ARR;
8170
8171 get_nd_key(flow, &key);
8172 get_nd_key(base_flow, &base);
8173 get_nd_key(&wc->masks, &mask);
8174 memcpy(&orig_mask, &mask, sizeof mask);
8175
8176 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
8177 ovs_key_nd_offsetof_sizeof_arr, odp_actions)) {
8178 put_nd_key(&base, base_flow);
8179 or_masks(&mask, &orig_mask, ovs_key_nd_offsetof_sizeof_arr);
8180 put_nd_key(&mask, &wc->masks);
8181 return SLOW_ACTION;
8182 }
8183
8184 return 0;
8185 }
8186
8187 static enum slow_path_reason
8188 commit_set_nd_extensions_action(const struct flow *flow,
8189 struct flow *base_flow,
8190 struct ofpbuf *odp_actions,
8191 struct flow_wildcards *wc, bool use_masked)
8192 {
8193 struct ovs_key_nd_extensions key, mask, orig_mask, base;
8194 struct offsetof_sizeof ovs_key_nd_extensions_offsetof_sizeof_arr[] =
8195 OVS_KEY_ND_EXTENSIONS_OFFSETOF_SIZEOF_ARR;
8196
8197 get_nd_extensions_key(flow, &key);
8198 get_nd_extensions_key(base_flow, &base);
8199 get_nd_extensions_key(&wc->masks, &mask);
8200 memcpy(&orig_mask, &mask, sizeof mask);
8201
8202 if (commit(OVS_KEY_ATTR_ND_EXTENSIONS, use_masked, &key, &base, &mask,
8203 sizeof key, ovs_key_nd_extensions_offsetof_sizeof_arr,
8204 odp_actions)) {
8205 put_nd_extensions_key(&base, base_flow);
8206 or_masks(&mask, &orig_mask, ovs_key_nd_extensions_offsetof_sizeof_arr);
8207 put_nd_extensions_key(&mask, &wc->masks);
8208 return SLOW_ACTION;
8209 }
8210 return 0;
8211 }
8212
8213 static enum slow_path_reason
8214 commit_set_nw_action(const struct flow *flow, struct flow *base,
8215 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8216 bool use_masked)
8217 {
8218 uint32_t reason;
8219
8220 /* Check if 'flow' really has an L3 header. */
8221 if (!flow->nw_proto) {
8222 return 0;
8223 }
8224
8225 switch (ntohs(base->dl_type)) {
8226 case ETH_TYPE_IP:
8227 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
8228 break;
8229
8230 case ETH_TYPE_IPV6:
8231 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
8232 if (base->nw_proto == IPPROTO_ICMPV6) {
8233 /* Commit extended attrs first to make sure
8234 correct options are added.*/
8235 reason = commit_set_nd_extensions_action(flow, base,
8236 odp_actions, wc, use_masked);
8237 reason |= commit_set_nd_action(flow, base, odp_actions,
8238 wc, use_masked);
8239 return reason;
8240 }
8241 break;
8242
8243 case ETH_TYPE_ARP:
8244 return commit_set_arp_action(flow, base, odp_actions, wc);
8245 }
8246
8247 return 0;
8248 }
8249
8250 static inline void
8251 get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh, bool is_mask)
8252 {
8253 *nsh = flow->nsh;
8254 if (!is_mask) {
8255 if (nsh->mdtype != NSH_M_TYPE1) {
8256 memset(nsh->context, 0, sizeof(nsh->context));
8257 }
8258 }
8259 }
8260
8261 static inline void
8262 put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
8263 bool is_mask OVS_UNUSED)
8264 {
8265 flow->nsh = *nsh;
8266 if (flow->nsh.mdtype != NSH_M_TYPE1) {
8267 memset(flow->nsh.context, 0, sizeof(flow->nsh.context));
8268 }
8269 }
8270
8271 static bool
8272 commit_nsh(const struct ovs_key_nsh * flow_nsh, bool use_masked_set,
8273 const struct ovs_key_nsh *key, struct ovs_key_nsh *base,
8274 struct ovs_key_nsh *mask, size_t size,
8275 struct ofpbuf *odp_actions)
8276 {
8277 enum ovs_key_attr attr = OVS_KEY_ATTR_NSH;
8278
8279 if (memcmp(key, base, size) == 0) {
8280 /* Mask bits are set when we have either read or set the corresponding
8281 * values. Masked bits will be exact-matched, no need to set them
8282 * if the value did not actually change. */
8283 return false;
8284 }
8285
8286 bool fully_masked = odp_mask_is_exact(attr, mask, size);
8287
8288 if (use_masked_set && !fully_masked) {
8289 size_t nsh_key_ofs;
8290 struct ovs_nsh_key_base nsh_base;
8291 struct ovs_nsh_key_base nsh_base_mask;
8292 struct ovs_nsh_key_md1 md1;
8293 struct ovs_nsh_key_md1 md1_mask;
8294 size_t offset = nl_msg_start_nested(odp_actions,
8295 OVS_ACTION_ATTR_SET_MASKED);
8296
8297 nsh_base.flags = key->flags;
8298 nsh_base.ttl = key->ttl;
8299 nsh_base.mdtype = key->mdtype;
8300 nsh_base.np = key->np;
8301 nsh_base.path_hdr = key->path_hdr;
8302
8303 nsh_base_mask.flags = mask->flags;
8304 nsh_base_mask.ttl = mask->ttl;
8305 nsh_base_mask.mdtype = mask->mdtype;
8306 nsh_base_mask.np = mask->np;
8307 nsh_base_mask.path_hdr = mask->path_hdr;
8308
8309 /* OVS_KEY_ATTR_NSH keys */
8310 nsh_key_ofs = nl_msg_start_nested(odp_actions, OVS_KEY_ATTR_NSH);
8311
8312 /* put value and mask for OVS_NSH_KEY_ATTR_BASE */
8313 char *data = nl_msg_put_unspec_uninit(odp_actions,
8314 OVS_NSH_KEY_ATTR_BASE,
8315 2 * sizeof(nsh_base));
8316 const char *lkey = (char *)&nsh_base, *lmask = (char *)&nsh_base_mask;
8317 size_t lkey_size = sizeof(nsh_base);
8318
8319 while (lkey_size--) {
8320 *data++ = *lkey++ & *lmask++;
8321 }
8322 lmask = (char *)&nsh_base_mask;
8323 memcpy(data, lmask, sizeof(nsh_base_mask));
8324
8325 switch (key->mdtype) {
8326 case NSH_M_TYPE1:
8327 memcpy(md1.context, key->context, sizeof key->context);
8328 memcpy(md1_mask.context, mask->context, sizeof mask->context);
8329
8330 /* put value and mask for OVS_NSH_KEY_ATTR_MD1 */
8331 data = nl_msg_put_unspec_uninit(odp_actions,
8332 OVS_NSH_KEY_ATTR_MD1,
8333 2 * sizeof(md1));
8334 lkey = (char *)&md1;
8335 lmask = (char *)&md1_mask;
8336 lkey_size = sizeof(md1);
8337
8338 while (lkey_size--) {
8339 *data++ = *lkey++ & *lmask++;
8340 }
8341 lmask = (char *)&md1_mask;
8342 memcpy(data, lmask, sizeof(md1_mask));
8343 break;
8344 case NSH_M_TYPE2:
8345 default:
8346 /* No match support for other MD formats yet. */
8347 break;
8348 }
8349
8350 nl_msg_end_nested(odp_actions, nsh_key_ofs);
8351
8352 nl_msg_end_nested(odp_actions, offset);
8353 } else {
8354 if (!fully_masked) {
8355 memset(mask, 0xff, size);
8356 }
8357 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
8358 nsh_key_to_attr(odp_actions, flow_nsh, NULL, 0, false);
8359 nl_msg_end_nested(odp_actions, offset);
8360 }
8361 memcpy(base, key, size);
8362 return true;
8363 }
8364
8365 static void
8366 commit_set_nsh_action(const struct flow *flow, struct flow *base_flow,
8367 struct ofpbuf *odp_actions,
8368 struct flow_wildcards *wc,
8369 bool use_masked)
8370 {
8371 struct ovs_key_nsh key, mask, base;
8372
8373 if (flow->dl_type != htons(ETH_TYPE_NSH) ||
8374 !memcmp(&base_flow->nsh, &flow->nsh, sizeof base_flow->nsh)) {
8375 return;
8376 }
8377
8378 /* Check that mdtype and np remain unchanged. */
8379 ovs_assert(flow->nsh.mdtype == base_flow->nsh.mdtype &&
8380 flow->nsh.np == base_flow->nsh.np);
8381
8382 get_nsh_key(flow, &key, false);
8383 get_nsh_key(base_flow, &base, false);
8384 get_nsh_key(&wc->masks, &mask, true);
8385 mask.mdtype = 0; /* Not writable. */
8386 mask.np = 0; /* Not writable. */
8387
8388 if (commit_nsh(&base_flow->nsh, use_masked, &key, &base, &mask,
8389 sizeof key, odp_actions)) {
8390 put_nsh_key(&base, base_flow, false);
8391 if (mask.mdtype != 0) { /* Mask was changed by commit(). */
8392 put_nsh_key(&mask, &wc->masks, true);
8393 }
8394 }
8395 }
8396
8397 /* TCP, UDP, and SCTP keys have the same layout. */
8398 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
8399 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
8400
8401 static void
8402 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
8403 {
8404 tp->tcp.tcp_src = flow->tp_src;
8405 tp->tcp.tcp_dst = flow->tp_dst;
8406 }
8407
8408 static void
8409 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
8410 {
8411 flow->tp_src = tp->tcp.tcp_src;
8412 flow->tp_dst = tp->tcp.tcp_dst;
8413 }
8414
8415 static void
8416 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
8417 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8418 bool use_masked)
8419 {
8420 enum ovs_key_attr key_type;
8421 union ovs_key_tp key, mask, orig_mask, base;
8422 struct offsetof_sizeof ovs_key_tp_offsetof_sizeof_arr[] =
8423 OVS_KEY_TCP_OFFSETOF_SIZEOF_ARR;
8424
8425 /* Check if 'flow' really has an L3 header. */
8426 if (!flow->nw_proto) {
8427 return;
8428 }
8429
8430 if (!is_ip_any(base_flow)) {
8431 return;
8432 }
8433
8434 if (flow->nw_proto == IPPROTO_TCP) {
8435 key_type = OVS_KEY_ATTR_TCP;
8436 } else if (flow->nw_proto == IPPROTO_UDP) {
8437 key_type = OVS_KEY_ATTR_UDP;
8438 } else if (flow->nw_proto == IPPROTO_SCTP) {
8439 key_type = OVS_KEY_ATTR_SCTP;
8440 } else {
8441 return;
8442 }
8443
8444 get_tp_key(flow, &key);
8445 get_tp_key(base_flow, &base);
8446 get_tp_key(&wc->masks, &mask);
8447 memcpy(&orig_mask, &mask, sizeof mask);
8448
8449 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
8450 ovs_key_tp_offsetof_sizeof_arr, odp_actions)) {
8451 put_tp_key(&base, base_flow);
8452 or_masks(&mask, &orig_mask, ovs_key_tp_offsetof_sizeof_arr);
8453 put_tp_key(&mask, &wc->masks);
8454 }
8455 }
8456
8457 static void
8458 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
8459 struct ofpbuf *odp_actions,
8460 struct flow_wildcards *wc,
8461 bool use_masked)
8462 {
8463 uint32_t key, mask, base;
8464 struct offsetof_sizeof ovs_key_prio_offsetof_sizeof_arr[] = {
8465 {0, sizeof(uint32_t)},
8466 {0, 0}
8467 };
8468
8469 key = flow->skb_priority;
8470 base = base_flow->skb_priority;
8471 mask = wc->masks.skb_priority;
8472
8473 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
8474 sizeof key, ovs_key_prio_offsetof_sizeof_arr, odp_actions)) {
8475 base_flow->skb_priority = base;
8476 wc->masks.skb_priority |= mask;
8477 }
8478 }
8479
8480 static void
8481 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
8482 struct ofpbuf *odp_actions,
8483 struct flow_wildcards *wc,
8484 bool use_masked)
8485 {
8486 uint32_t key, mask, base;
8487 struct offsetof_sizeof ovs_key_pkt_mark_offsetof_sizeof_arr[] = {
8488 {0, sizeof(uint32_t)},
8489 {0, 0}
8490 };
8491
8492 key = flow->pkt_mark;
8493 base = base_flow->pkt_mark;
8494 mask = wc->masks.pkt_mark;
8495
8496 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
8497 sizeof key, ovs_key_pkt_mark_offsetof_sizeof_arr,
8498 odp_actions)) {
8499 base_flow->pkt_mark = base;
8500 wc->masks.pkt_mark |= mask;
8501 }
8502 }
8503
8504 static void
8505 odp_put_pop_nsh_action(struct ofpbuf *odp_actions)
8506 {
8507 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_NSH);
8508 }
8509
8510 static void
8511 odp_put_push_nsh_action(struct ofpbuf *odp_actions,
8512 const struct flow *flow,
8513 struct ofpbuf *encap_data)
8514 {
8515 uint8_t * metadata = NULL;
8516 uint8_t md_size = 0;
8517
8518 switch (flow->nsh.mdtype) {
8519 case NSH_M_TYPE2:
8520 if (encap_data) {
8521 ovs_assert(encap_data->size < NSH_CTX_HDRS_MAX_LEN);
8522 metadata = encap_data->data;
8523 md_size = encap_data->size;
8524 } else {
8525 md_size = 0;
8526 }
8527 break;
8528 default:
8529 md_size = 0;
8530 break;
8531 }
8532 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_PUSH_NSH);
8533 nsh_key_to_attr(odp_actions, &flow->nsh, metadata, md_size, false);
8534 nl_msg_end_nested(odp_actions, offset);
8535 }
8536
8537 static void
8538 commit_encap_decap_action(const struct flow *flow,
8539 struct flow *base_flow,
8540 struct ofpbuf *odp_actions,
8541 struct flow_wildcards *wc,
8542 bool pending_encap, bool pending_decap,
8543 struct ofpbuf *encap_data)
8544 {
8545 if (pending_encap) {
8546 switch (ntohl(flow->packet_type)) {
8547 case PT_ETH: {
8548 /* push_eth */
8549 odp_put_push_eth_action(odp_actions, &flow->dl_src,
8550 &flow->dl_dst);
8551 base_flow->packet_type = flow->packet_type;
8552 base_flow->dl_src = flow->dl_src;
8553 base_flow->dl_dst = flow->dl_dst;
8554 break;
8555 }
8556 case PT_NSH:
8557 /* push_nsh */
8558 odp_put_push_nsh_action(odp_actions, flow, encap_data);
8559 base_flow->packet_type = flow->packet_type;
8560 /* Update all packet headers in base_flow. */
8561 memcpy(&base_flow->dl_dst, &flow->dl_dst,
8562 sizeof(*flow) - offsetof(struct flow, dl_dst));
8563 break;
8564 default:
8565 /* Only the above protocols are supported for encap.
8566 * The check is done at action translation. */
8567 OVS_NOT_REACHED();
8568 }
8569 } else if (pending_decap || flow->packet_type != base_flow->packet_type) {
8570 /* This is an explicit or implicit decap case. */
8571 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE &&
8572 base_flow->packet_type == htonl(PT_ETH)) {
8573 /* Generate pop_eth and continue without recirculation. */
8574 odp_put_pop_eth_action(odp_actions);
8575 base_flow->packet_type = flow->packet_type;
8576 base_flow->dl_src = eth_addr_zero;
8577 base_flow->dl_dst = eth_addr_zero;
8578 } else {
8579 /* All other decap cases require recirculation.
8580 * No need to update the base flow here. */
8581 switch (ntohl(base_flow->packet_type)) {
8582 case PT_NSH:
8583 /* pop_nsh. */
8584 odp_put_pop_nsh_action(odp_actions);
8585 break;
8586 default:
8587 /* Checks are done during translation. */
8588 OVS_NOT_REACHED();
8589 }
8590 }
8591 }
8592
8593 wc->masks.packet_type = OVS_BE32_MAX;
8594 }
8595
8596 /* If any of the flow key data that ODP actions can modify are different in
8597 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
8598 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
8599 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
8600 * in addition to this function if needed. Sets fields in 'wc' that are
8601 * used as part of the action.
8602 *
8603 * In the common case, this function returns 0. If the flow key modification
8604 * requires the flow's packets to be forced into the userspace slow path, this
8605 * function returns SLOW_ACTION. This only happens when there is no ODP action
8606 * to modify some field that was actually modified. For example, there is no
8607 * ODP action to modify any ARP field, so such a modification triggers
8608 * SLOW_ACTION. (When this happens, packets that need such modification get
8609 * flushed to userspace and handled there, which works OK but much more slowly
8610 * than if the datapath handled it directly.) */
8611 enum slow_path_reason
8612 commit_odp_actions(const struct flow *flow, struct flow *base,
8613 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8614 bool use_masked, bool pending_encap, bool pending_decap,
8615 struct ofpbuf *encap_data)
8616 {
8617 /* If you add a field that OpenFlow actions can change, and that is visible
8618 * to the datapath (including all data fields), then you should also add
8619 * code here to commit changes to the field. */
8620 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
8621
8622 enum slow_path_reason slow1, slow2;
8623 bool mpls_done = false;
8624
8625 commit_encap_decap_action(flow, base, odp_actions, wc,
8626 pending_encap, pending_decap, encap_data);
8627 commit_set_ether_action(flow, base, odp_actions, wc, use_masked);
8628 /* Make packet a non-MPLS packet before committing L3/4 actions,
8629 * which would otherwise do nothing. */
8630 if (eth_type_mpls(base->dl_type) && !eth_type_mpls(flow->dl_type)) {
8631 commit_mpls_action(flow, base, odp_actions);
8632 mpls_done = true;
8633 }
8634 commit_set_nsh_action(flow, base, odp_actions, wc, use_masked);
8635 slow1 = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
8636 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
8637 slow2 = commit_set_icmp_action(flow, base, odp_actions, wc);
8638 if (!mpls_done) {
8639 commit_mpls_action(flow, base, odp_actions);
8640 }
8641 commit_vlan_action(flow, base, odp_actions, wc);
8642 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
8643 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);
8644
8645 return slow1 ? slow1 : slow2;
8646 }