]> git.proxmox.com Git - mirror_ovs.git/blob - lib/odp-util.c
userspace: Improved packet drop statistics.
[mirror_ovs.git] / lib / odp-util.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2019 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <sys/types.h>
19 #include <netinet/in.h>
20 #include <arpa/inet.h>
21 #include "odp-util.h"
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <math.h>
25 #include <netinet/icmp6.h>
26 #include <netinet/ip6.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include "byte-order.h"
31 #include "coverage.h"
32 #include "dpif.h"
33 #include "openvswitch/dynamic-string.h"
34 #include "flow.h"
35 #include "netlink.h"
36 #include "openvswitch/ofpbuf.h"
37 #include "packets.h"
38 #include "simap.h"
39 #include "timeval.h"
40 #include "tun-metadata.h"
41 #include "unaligned.h"
42 #include "util.h"
43 #include "uuid.h"
44 #include "openvswitch/vlog.h"
45 #include "openvswitch/match.h"
46 #include "odp-netlink-macros.h"
47 #include "csum.h"
48
49 VLOG_DEFINE_THIS_MODULE(odp_util);
50
51 /* The interface between userspace and kernel uses an "OVS_*" prefix.
52 * Since this is fairly non-specific for the OVS userspace components,
53 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
54 * interactions with the datapath.
55 */
56
57 /* The set of characters that may separate one action or one key attribute
58 * from another. */
59 static const char *delimiters = ", \t\r\n";
60 static const char *delimiters_end = ", \t\r\n)";
61
62 #define MAX_ODP_NESTED 32
63
64 struct parse_odp_context {
65 const struct simap *port_names;
66 int depth; /* Current nested depth of odp string. */
67 };
68
69 static int parse_odp_key_mask_attr(struct parse_odp_context *, const char *,
70 struct ofpbuf *, struct ofpbuf *);
71
72 static int parse_odp_key_mask_attr__(struct parse_odp_context *, const char *,
73 struct ofpbuf *, struct ofpbuf *);
74
75 static void format_odp_key_attr(const struct nlattr *a,
76 const struct nlattr *ma,
77 const struct hmap *portno_names, struct ds *ds,
78 bool verbose);
79
80 struct geneve_scan {
81 struct geneve_opt d[63];
82 int len;
83 };
84
85 static int scan_geneve(const char *s, struct geneve_scan *key,
86 struct geneve_scan *mask);
87 static void format_geneve_opts(const struct geneve_opt *opt,
88 const struct geneve_opt *mask, int opts_len,
89 struct ds *, bool verbose);
90
91 static struct nlattr *generate_all_wildcard_mask(const struct attr_len_tbl tbl[],
92 int max, struct ofpbuf *,
93 const struct nlattr *key);
94 static void format_u128(struct ds *d, const ovs_32aligned_u128 *key,
95 const ovs_32aligned_u128 *mask, bool verbose);
96 static int scan_u128(const char *s, ovs_u128 *value, ovs_u128 *mask);
97
98 static int parse_odp_action(struct parse_odp_context *context, const char *s,
99 struct ofpbuf *actions);
100
101 static int parse_odp_action__(struct parse_odp_context *context, const char *s,
102 struct ofpbuf *actions);
103
104 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
105 * 'type':
106 *
107 * - For an action whose argument has a fixed length, returned that
108 * nonnegative length in bytes.
109 *
110 * - For an action with a variable-length argument, returns ATTR_LEN_VARIABLE.
111 *
112 * - For an invalid 'type', returns ATTR_LEN_INVALID. */
113 static int
114 odp_action_len(uint16_t type)
115 {
116 if (type > OVS_ACTION_ATTR_MAX) {
117 return -1;
118 }
119
120 switch ((enum ovs_action_attr) type) {
121 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
122 case OVS_ACTION_ATTR_TRUNC: return sizeof(struct ovs_action_trunc);
123 case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
124 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
125 case OVS_ACTION_ATTR_METER: return sizeof(uint32_t);
126 case OVS_ACTION_ATTR_USERSPACE: return ATTR_LEN_VARIABLE;
127 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
128 case OVS_ACTION_ATTR_POP_VLAN: return 0;
129 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
130 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
131 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
132 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
133 case OVS_ACTION_ATTR_SET: return ATTR_LEN_VARIABLE;
134 case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
135 case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
136 case OVS_ACTION_ATTR_CT: return ATTR_LEN_VARIABLE;
137 case OVS_ACTION_ATTR_CT_CLEAR: return 0;
138 case OVS_ACTION_ATTR_PUSH_ETH: return sizeof(struct ovs_action_push_eth);
139 case OVS_ACTION_ATTR_POP_ETH: return 0;
140 case OVS_ACTION_ATTR_CLONE: return ATTR_LEN_VARIABLE;
141 case OVS_ACTION_ATTR_PUSH_NSH: return ATTR_LEN_VARIABLE;
142 case OVS_ACTION_ATTR_POP_NSH: return 0;
143 case OVS_ACTION_ATTR_CHECK_PKT_LEN: return ATTR_LEN_VARIABLE;
144 case OVS_ACTION_ATTR_DROP: return sizeof(uint32_t);
145
146 case OVS_ACTION_ATTR_UNSPEC:
147 case __OVS_ACTION_ATTR_MAX:
148 return ATTR_LEN_INVALID;
149 }
150
151 return ATTR_LEN_INVALID;
152 }
153
154 /* Returns a string form of 'attr'. The return value is either a statically
155 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
156 * should be at least OVS_KEY_ATTR_BUFSIZE. */
157 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
158 static const char *
159 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
160 {
161 switch (attr) {
162 case OVS_KEY_ATTR_UNSPEC: return "unspec";
163 case OVS_KEY_ATTR_ENCAP: return "encap";
164 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
165 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
166 case OVS_KEY_ATTR_CT_STATE: return "ct_state";
167 case OVS_KEY_ATTR_CT_ZONE: return "ct_zone";
168 case OVS_KEY_ATTR_CT_MARK: return "ct_mark";
169 case OVS_KEY_ATTR_CT_LABELS: return "ct_label";
170 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: return "ct_tuple4";
171 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: return "ct_tuple6";
172 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
173 case OVS_KEY_ATTR_IN_PORT: return "in_port";
174 case OVS_KEY_ATTR_ETHERNET: return "eth";
175 case OVS_KEY_ATTR_VLAN: return "vlan";
176 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
177 case OVS_KEY_ATTR_IPV4: return "ipv4";
178 case OVS_KEY_ATTR_IPV6: return "ipv6";
179 case OVS_KEY_ATTR_TCP: return "tcp";
180 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
181 case OVS_KEY_ATTR_UDP: return "udp";
182 case OVS_KEY_ATTR_SCTP: return "sctp";
183 case OVS_KEY_ATTR_ICMP: return "icmp";
184 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
185 case OVS_KEY_ATTR_ARP: return "arp";
186 case OVS_KEY_ATTR_ND: return "nd";
187 case OVS_KEY_ATTR_ND_EXTENSIONS: return "nd_ext";
188 case OVS_KEY_ATTR_MPLS: return "mpls";
189 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
190 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
191 case OVS_KEY_ATTR_PACKET_TYPE: return "packet_type";
192 case OVS_KEY_ATTR_NSH: return "nsh";
193
194 case __OVS_KEY_ATTR_MAX:
195 default:
196 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
197 return namebuf;
198 }
199 }
200
201 static void
202 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
203 {
204 size_t len = nl_attr_get_size(a);
205
206 ds_put_format(ds, "action%d", nl_attr_type(a));
207 if (len) {
208 const uint8_t *unspec;
209 unsigned int i;
210
211 unspec = nl_attr_get(a);
212 for (i = 0; i < len; i++) {
213 ds_put_char(ds, i ? ' ': '(');
214 ds_put_format(ds, "%02x", unspec[i]);
215 }
216 ds_put_char(ds, ')');
217 }
218 }
219
220 static void
221 format_odp_sample_action(struct ds *ds, const struct nlattr *attr,
222 const struct hmap *portno_names)
223 {
224 static const struct nl_policy ovs_sample_policy[] = {
225 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
226 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
227 };
228 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
229 double percentage;
230 const struct nlattr *nla_acts;
231 int len;
232
233 ds_put_cstr(ds, "sample");
234
235 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
236 ds_put_cstr(ds, "(error)");
237 return;
238 }
239
240 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
241 UINT32_MAX;
242
243 ds_put_format(ds, "(sample=%.1f%%,", percentage);
244
245 ds_put_cstr(ds, "actions(");
246 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
247 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
248 format_odp_actions(ds, nla_acts, len, portno_names);
249 ds_put_format(ds, "))");
250 }
251
252 static void
253 format_odp_clone_action(struct ds *ds, const struct nlattr *attr,
254 const struct hmap *portno_names)
255 {
256 const struct nlattr *nla_acts = nl_attr_get(attr);
257 int len = nl_attr_get_size(attr);
258
259 ds_put_cstr(ds, "clone");
260 ds_put_format(ds, "(");
261 format_odp_actions(ds, nla_acts, len, portno_names);
262 ds_put_format(ds, ")");
263 }
264
265 static void
266 format_nsh_key(struct ds *ds, const struct ovs_key_nsh *key)
267 {
268 ds_put_format(ds, "flags=%d", key->flags);
269 ds_put_format(ds, ",ttl=%d", key->ttl);
270 ds_put_format(ds, ",mdtype=%d", key->mdtype);
271 ds_put_format(ds, ",np=%d", key->np);
272 ds_put_format(ds, ",spi=0x%x",
273 nsh_path_hdr_to_spi_uint32(key->path_hdr));
274 ds_put_format(ds, ",si=%d",
275 nsh_path_hdr_to_si(key->path_hdr));
276
277 switch (key->mdtype) {
278 case NSH_M_TYPE1:
279 for (int i = 0; i < 4; i++) {
280 ds_put_format(ds, ",c%d=0x%x", i + 1, ntohl(key->context[i]));
281 }
282 break;
283 case NSH_M_TYPE2:
284 default:
285 /* No support for matching other metadata formats yet. */
286 break;
287 }
288 }
289
290 static void
291 format_uint8_masked(struct ds *s, bool *first, const char *name,
292 uint8_t value, uint8_t mask)
293 {
294 if (mask != 0) {
295 if (!*first) {
296 ds_put_char(s, ',');
297 }
298 ds_put_format(s, "%s=", name);
299 if (mask == UINT8_MAX) {
300 ds_put_format(s, "%"PRIu8, value);
301 } else {
302 ds_put_format(s, "0x%02"PRIx8"/0x%02"PRIx8, value, mask);
303 }
304 *first = false;
305 }
306 }
307
308 static void
309 format_be32_masked(struct ds *s, bool *first, const char *name,
310 ovs_be32 value, ovs_be32 mask)
311 {
312 if (mask != htonl(0)) {
313 if (!*first) {
314 ds_put_char(s, ',');
315 }
316 ds_put_format(s, "%s=", name);
317 if (mask == OVS_BE32_MAX) {
318 ds_put_format(s, "0x%"PRIx32, ntohl(value));
319 } else {
320 ds_put_format(s, "0x%"PRIx32"/0x%08"PRIx32,
321 ntohl(value), ntohl(mask));
322 }
323 *first = false;
324 }
325 }
326
327 static void
328 format_nsh_key_mask(struct ds *ds, const struct ovs_key_nsh *key,
329 const struct ovs_key_nsh *mask)
330 {
331 if (!mask) {
332 format_nsh_key(ds, key);
333 } else {
334 bool first = true;
335 uint32_t spi = nsh_path_hdr_to_spi_uint32(key->path_hdr);
336 uint32_t spi_mask = nsh_path_hdr_to_spi_uint32(mask->path_hdr);
337 if (spi_mask == (NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
338 spi_mask = UINT32_MAX;
339 }
340 uint8_t si = nsh_path_hdr_to_si(key->path_hdr);
341 uint8_t si_mask = nsh_path_hdr_to_si(mask->path_hdr);
342
343 format_uint8_masked(ds, &first, "flags", key->flags, mask->flags);
344 format_uint8_masked(ds, &first, "ttl", key->ttl, mask->ttl);
345 format_uint8_masked(ds, &first, "mdtype", key->mdtype, mask->mdtype);
346 format_uint8_masked(ds, &first, "np", key->np, mask->np);
347 format_be32_masked(ds, &first, "spi", htonl(spi), htonl(spi_mask));
348 format_uint8_masked(ds, &first, "si", si, si_mask);
349 format_be32_masked(ds, &first, "c1", key->context[0],
350 mask->context[0]);
351 format_be32_masked(ds, &first, "c2", key->context[1],
352 mask->context[1]);
353 format_be32_masked(ds, &first, "c3", key->context[2],
354 mask->context[2]);
355 format_be32_masked(ds, &first, "c4", key->context[3],
356 mask->context[3]);
357 }
358 }
359
360 static void
361 format_odp_push_nsh_action(struct ds *ds,
362 const struct nsh_hdr *nsh_hdr)
363 {
364 size_t mdlen = nsh_hdr_len(nsh_hdr) - NSH_BASE_HDR_LEN;
365 uint32_t spi = ntohl(nsh_get_spi(nsh_hdr));
366 uint8_t si = nsh_get_si(nsh_hdr);
367 uint8_t flags = nsh_get_flags(nsh_hdr);
368 uint8_t ttl = nsh_get_ttl(nsh_hdr);
369
370 ds_put_cstr(ds, "push_nsh(");
371 ds_put_format(ds, "flags=%d", flags);
372 ds_put_format(ds, ",ttl=%d", ttl);
373 ds_put_format(ds, ",mdtype=%d", nsh_hdr->md_type);
374 ds_put_format(ds, ",np=%d", nsh_hdr->next_proto);
375 ds_put_format(ds, ",spi=0x%x", spi);
376 ds_put_format(ds, ",si=%d", si);
377 switch (nsh_hdr->md_type) {
378 case NSH_M_TYPE1: {
379 const struct nsh_md1_ctx *md1_ctx = &nsh_hdr->md1;
380 for (int i = 0; i < 4; i++) {
381 ds_put_format(ds, ",c%d=0x%x", i + 1,
382 ntohl(get_16aligned_be32(&md1_ctx->context[i])));
383 }
384 break;
385 }
386 case NSH_M_TYPE2: {
387 const struct nsh_md2_tlv *md2_ctx = &nsh_hdr->md2;
388 ds_put_cstr(ds, ",md2=");
389 ds_put_hex(ds, md2_ctx, mdlen);
390 break;
391 }
392 default:
393 OVS_NOT_REACHED();
394 }
395 ds_put_format(ds, ")");
396 }
397
398 static const char *
399 slow_path_reason_to_string(uint32_t reason)
400 {
401 switch ((enum slow_path_reason) reason) {
402 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
403 SLOW_PATH_REASONS
404 #undef SPR
405 }
406
407 return NULL;
408 }
409
410 const char *
411 slow_path_reason_to_explanation(enum slow_path_reason reason)
412 {
413 switch (reason) {
414 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
415 SLOW_PATH_REASONS
416 #undef SPR
417 }
418
419 return "<unknown>";
420 }
421
422 static int
423 parse_odp_flags(const char *s, const char *(*bit_to_string)(uint32_t),
424 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
425 {
426 return parse_flags(s, bit_to_string, ')', NULL, NULL,
427 res_flags, allowed, res_mask);
428 }
429
430 static void
431 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr,
432 const struct hmap *portno_names)
433 {
434 static const struct nl_policy ovs_userspace_policy[] = {
435 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
436 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
437 .optional = true },
438 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
439 .optional = true },
440 [OVS_USERSPACE_ATTR_ACTIONS] = { .type = NL_A_UNSPEC,
441 .optional = true },
442 };
443 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
444 const struct nlattr *userdata_attr;
445 const struct nlattr *tunnel_out_port_attr;
446
447 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
448 ds_put_cstr(ds, "userspace(error)");
449 return;
450 }
451
452 ds_put_format(ds, "userspace(pid=%"PRIu32,
453 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
454
455 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
456
457 if (userdata_attr) {
458 const uint8_t *userdata = nl_attr_get(userdata_attr);
459 size_t userdata_len = nl_attr_get_size(userdata_attr);
460 bool userdata_unspec = true;
461 struct user_action_cookie cookie;
462
463 if (userdata_len == sizeof cookie) {
464 memcpy(&cookie, userdata, sizeof cookie);
465
466 userdata_unspec = false;
467
468 if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
469 ds_put_format(ds, ",sFlow("
470 "vid=%"PRIu16",pcp=%d,output=%"PRIu32")",
471 vlan_tci_to_vid(cookie.sflow.vlan_tci),
472 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
473 cookie.sflow.output);
474 } else if (cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
475 ds_put_cstr(ds, ",slow_path(");
476 format_flags(ds, slow_path_reason_to_string,
477 cookie.slow_path.reason, ',');
478 ds_put_format(ds, ")");
479 } else if (cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
480 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
481 ",collector_set_id=%"PRIu32
482 ",obs_domain_id=%"PRIu32
483 ",obs_point_id=%"PRIu32
484 ",output_port=",
485 cookie.flow_sample.probability,
486 cookie.flow_sample.collector_set_id,
487 cookie.flow_sample.obs_domain_id,
488 cookie.flow_sample.obs_point_id);
489 odp_portno_name_format(portno_names,
490 cookie.flow_sample.output_odp_port, ds);
491 if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_INGRESS) {
492 ds_put_cstr(ds, ",ingress");
493 } else if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_EGRESS) {
494 ds_put_cstr(ds, ",egress");
495 }
496 ds_put_char(ds, ')');
497 } else if (cookie.type == USER_ACTION_COOKIE_IPFIX) {
498 ds_put_format(ds, ",ipfix(output_port=");
499 odp_portno_name_format(portno_names,
500 cookie.ipfix.output_odp_port, ds);
501 ds_put_char(ds, ')');
502 } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
503 ds_put_format(ds, ",controller(reason=%"PRIu16
504 ",dont_send=%d"
505 ",continuation=%d"
506 ",recirc_id=%"PRIu32
507 ",rule_cookie=%#"PRIx64
508 ",controller_id=%"PRIu16
509 ",max_len=%"PRIu16,
510 cookie.controller.reason,
511 !!cookie.controller.dont_send,
512 !!cookie.controller.continuation,
513 cookie.controller.recirc_id,
514 ntohll(get_32aligned_be64(
515 &cookie.controller.rule_cookie)),
516 cookie.controller.controller_id,
517 cookie.controller.max_len);
518 ds_put_char(ds, ')');
519 } else {
520 userdata_unspec = true;
521 }
522 }
523
524 if (userdata_unspec) {
525 size_t i;
526 ds_put_format(ds, ",userdata(");
527 for (i = 0; i < userdata_len; i++) {
528 ds_put_format(ds, "%02x", userdata[i]);
529 }
530 ds_put_char(ds, ')');
531 }
532 }
533
534 if (a[OVS_USERSPACE_ATTR_ACTIONS]) {
535 ds_put_cstr(ds, ",actions");
536 }
537
538 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
539 if (tunnel_out_port_attr) {
540 ds_put_format(ds, ",tunnel_out_port=");
541 odp_portno_name_format(portno_names,
542 nl_attr_get_odp_port(tunnel_out_port_attr), ds);
543 }
544
545 ds_put_char(ds, ')');
546 }
547
548 static void
549 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
550 {
551 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
552 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
553 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
554 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
555 };
556 ds_put_char(ds, ',');
557 }
558 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
559 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
560 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
561 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
562 }
563 ds_put_char(ds, ',');
564 }
565 if (!(tci & htons(VLAN_CFI))) {
566 ds_put_cstr(ds, "cfi=0");
567 ds_put_char(ds, ',');
568 }
569 ds_chomp(ds, ',');
570 }
571
572 static void
573 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
574 {
575 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
576 mpls_lse_to_label(mpls_lse),
577 mpls_lse_to_tc(mpls_lse),
578 mpls_lse_to_ttl(mpls_lse),
579 mpls_lse_to_bos(mpls_lse));
580 }
581
582 static void
583 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
584 const struct ovs_key_mpls *mpls_mask, int n)
585 {
586 for (int i = 0; i < n; i++) {
587 ovs_be32 key = mpls_key[i].mpls_lse;
588
589 if (mpls_mask == NULL) {
590 format_mpls_lse(ds, key);
591 } else {
592 ovs_be32 mask = mpls_mask[i].mpls_lse;
593
594 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
595 mpls_lse_to_label(key), mpls_lse_to_label(mask),
596 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
597 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
598 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
599 }
600 ds_put_char(ds, ',');
601 }
602 ds_chomp(ds, ',');
603 }
604
605 static void
606 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
607 {
608 ds_put_format(ds, "recirc(%#"PRIx32")", recirc_id);
609 }
610
611 static void
612 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
613 {
614 ds_put_format(ds, "hash(");
615
616 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
617 ds_put_format(ds, "l4(%"PRIu32")", hash_act->hash_basis);
618 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
619 ds_put_format(ds, "sym_l4(%"PRIu32")", hash_act->hash_basis);
620 } else {
621 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
622 hash_act->hash_alg);
623 }
624 ds_put_format(ds, ")");
625 }
626
627 static const void *
628 format_udp_tnl_push_header(struct ds *ds, const struct udp_header *udp)
629 {
630 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
631 ntohs(udp->udp_src), ntohs(udp->udp_dst),
632 ntohs(udp->udp_csum));
633
634 return udp + 1;
635 }
636
637 static void
638 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
639 {
640 const struct eth_header *eth;
641 const void *l3;
642 const void *l4;
643 const struct udp_header *udp;
644
645 eth = (const struct eth_header *)data->header;
646
647 l3 = eth + 1;
648
649 /* Ethernet */
650 ds_put_format(ds, "header(size=%"PRIu32",type=%"PRIu32",eth(dst=",
651 data->header_len, data->tnl_type);
652 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
653 ds_put_format(ds, ",src=");
654 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
655 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
656
657 if (eth->eth_type == htons(ETH_TYPE_IP)) {
658 /* IPv4 */
659 const struct ip_header *ip = l3;
660 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
661 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
662 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
663 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
664 ip->ip_proto, ip->ip_tos,
665 ip->ip_ttl,
666 ntohs(ip->ip_frag_off));
667 l4 = (ip + 1);
668 } else {
669 const struct ovs_16aligned_ip6_hdr *ip6 = l3;
670 struct in6_addr src, dst;
671 memcpy(&src, &ip6->ip6_src, sizeof src);
672 memcpy(&dst, &ip6->ip6_dst, sizeof dst);
673 uint32_t ipv6_flow = ntohl(get_16aligned_be32(&ip6->ip6_flow));
674
675 ds_put_format(ds, "ipv6(src=");
676 ipv6_format_addr(&src, ds);
677 ds_put_format(ds, ",dst=");
678 ipv6_format_addr(&dst, ds);
679 ds_put_format(ds, ",label=%i,proto=%"PRIu8",tclass=0x%"PRIx32
680 ",hlimit=%"PRIu8"),",
681 ipv6_flow & IPV6_LABEL_MASK, ip6->ip6_nxt,
682 (ipv6_flow >> 20) & 0xff, ip6->ip6_hlim);
683 l4 = (ip6 + 1);
684 }
685
686 udp = (const struct udp_header *) l4;
687
688 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
689 const struct vxlanhdr *vxh;
690
691 vxh = format_udp_tnl_push_header(ds, udp);
692
693 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
694 ntohl(get_16aligned_be32(&vxh->vx_flags)),
695 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
696 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
697 const struct genevehdr *gnh;
698
699 gnh = format_udp_tnl_push_header(ds, udp);
700
701 ds_put_format(ds, "geneve(%s%svni=0x%"PRIx32,
702 gnh->oam ? "oam," : "",
703 gnh->critical ? "crit," : "",
704 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
705
706 if (gnh->opt_len) {
707 ds_put_cstr(ds, ",options(");
708 format_geneve_opts(gnh->options, NULL, gnh->opt_len * 4,
709 ds, false);
710 ds_put_char(ds, ')');
711 }
712
713 ds_put_char(ds, ')');
714 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE ||
715 data->tnl_type == OVS_VPORT_TYPE_IP6GRE) {
716 const struct gre_base_hdr *greh;
717 ovs_16aligned_be32 *options;
718
719 greh = (const struct gre_base_hdr *) l4;
720
721 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
722 ntohs(greh->flags), ntohs(greh->protocol));
723 options = (ovs_16aligned_be32 *)(greh + 1);
724 if (greh->flags & htons(GRE_CSUM)) {
725 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
726 options++;
727 }
728 if (greh->flags & htons(GRE_KEY)) {
729 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
730 options++;
731 }
732 if (greh->flags & htons(GRE_SEQ)) {
733 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
734 options++;
735 }
736 ds_put_format(ds, ")");
737 } else if (data->tnl_type == OVS_VPORT_TYPE_ERSPAN ||
738 data->tnl_type == OVS_VPORT_TYPE_IP6ERSPAN) {
739 const struct gre_base_hdr *greh;
740 const struct erspan_base_hdr *ersh;
741
742 greh = (const struct gre_base_hdr *) l4;
743 ersh = ERSPAN_HDR(greh);
744
745 if (ersh->ver == 1) {
746 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
747 ersh + 1);
748 ds_put_format(ds, "erspan(ver=1,sid=0x%"PRIx16",idx=0x%"PRIx32")",
749 get_sid(ersh), ntohl(get_16aligned_be32(index)));
750 } else if (ersh->ver == 2) {
751 struct erspan_md2 *md2 = ALIGNED_CAST(struct erspan_md2 *,
752 ersh + 1);
753 ds_put_format(ds, "erspan(ver=2,sid=0x%"PRIx16
754 ",dir=%"PRIu8",hwid=0x%"PRIx8")",
755 get_sid(ersh), md2->dir, get_hwid(md2));
756 } else {
757 VLOG_WARN("%s Invalid ERSPAN version %d\n", __func__, ersh->ver);
758 }
759 }
760 ds_put_format(ds, ")");
761 }
762
763 static void
764 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr,
765 const struct hmap *portno_names)
766 {
767 struct ovs_action_push_tnl *data;
768
769 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
770
771 ds_put_cstr(ds, "tnl_push(tnl_port(");
772 odp_portno_name_format(portno_names, data->tnl_port, ds);
773 ds_put_cstr(ds, "),");
774 format_odp_tnl_push_header(ds, data);
775 ds_put_format(ds, ",out_port(");
776 odp_portno_name_format(portno_names, data->out_port, ds);
777 ds_put_cstr(ds, "))");
778 }
779
780 static const struct nl_policy ovs_nat_policy[] = {
781 [OVS_NAT_ATTR_SRC] = { .type = NL_A_FLAG, .optional = true, },
782 [OVS_NAT_ATTR_DST] = { .type = NL_A_FLAG, .optional = true, },
783 [OVS_NAT_ATTR_IP_MIN] = { .type = NL_A_UNSPEC, .optional = true,
784 .min_len = sizeof(struct in_addr),
785 .max_len = sizeof(struct in6_addr)},
786 [OVS_NAT_ATTR_IP_MAX] = { .type = NL_A_UNSPEC, .optional = true,
787 .min_len = sizeof(struct in_addr),
788 .max_len = sizeof(struct in6_addr)},
789 [OVS_NAT_ATTR_PROTO_MIN] = { .type = NL_A_U16, .optional = true, },
790 [OVS_NAT_ATTR_PROTO_MAX] = { .type = NL_A_U16, .optional = true, },
791 [OVS_NAT_ATTR_PERSISTENT] = { .type = NL_A_FLAG, .optional = true, },
792 [OVS_NAT_ATTR_PROTO_HASH] = { .type = NL_A_FLAG, .optional = true, },
793 [OVS_NAT_ATTR_PROTO_RANDOM] = { .type = NL_A_FLAG, .optional = true, },
794 };
795
796 static void
797 format_odp_ct_nat(struct ds *ds, const struct nlattr *attr)
798 {
799 struct nlattr *a[ARRAY_SIZE(ovs_nat_policy)];
800 size_t addr_len;
801 ovs_be32 ip_min, ip_max;
802 struct in6_addr ip6_min, ip6_max;
803 uint16_t proto_min, proto_max;
804
805 if (!nl_parse_nested(attr, ovs_nat_policy, a, ARRAY_SIZE(a))) {
806 ds_put_cstr(ds, "nat(error: nl_parse_nested() failed.)");
807 return;
808 }
809 /* If no type, then nothing else either. */
810 if (!(a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST])
811 && (a[OVS_NAT_ATTR_IP_MIN] || a[OVS_NAT_ATTR_IP_MAX]
812 || a[OVS_NAT_ATTR_PROTO_MIN] || a[OVS_NAT_ATTR_PROTO_MAX]
813 || a[OVS_NAT_ATTR_PERSISTENT] || a[OVS_NAT_ATTR_PROTO_HASH]
814 || a[OVS_NAT_ATTR_PROTO_RANDOM])) {
815 ds_put_cstr(ds, "nat(error: options allowed only with \"src\" or \"dst\")");
816 return;
817 }
818 /* Both SNAT & DNAT may not be specified. */
819 if (a[OVS_NAT_ATTR_SRC] && a[OVS_NAT_ATTR_DST]) {
820 ds_put_cstr(ds, "nat(error: Only one of \"src\" or \"dst\" may be present.)");
821 return;
822 }
823 /* proto may not appear without ip. */
824 if (!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_PROTO_MIN]) {
825 ds_put_cstr(ds, "nat(error: proto but no IP.)");
826 return;
827 }
828 /* MAX may not appear without MIN. */
829 if ((!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX])
830 || (!a[OVS_NAT_ATTR_PROTO_MIN] && a[OVS_NAT_ATTR_PROTO_MAX])) {
831 ds_put_cstr(ds, "nat(error: range max without min.)");
832 return;
833 }
834 /* Address sizes must match. */
835 if ((a[OVS_NAT_ATTR_IP_MIN]
836 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(ovs_be32) &&
837 nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(struct in6_addr)))
838 || (a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX]
839 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN])
840 != nl_attr_get_size(a[OVS_NAT_ATTR_IP_MAX])))) {
841 ds_put_cstr(ds, "nat(error: IP address sizes do not match)");
842 return;
843 }
844
845 addr_len = a[OVS_NAT_ATTR_IP_MIN]
846 ? nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) : 0;
847 ip_min = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MIN]
848 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MIN]) : 0;
849 ip_max = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MAX]
850 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MAX]) : 0;
851 if (addr_len == sizeof ip6_min) {
852 ip6_min = a[OVS_NAT_ATTR_IP_MIN]
853 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MIN])
854 : in6addr_any;
855 ip6_max = a[OVS_NAT_ATTR_IP_MAX]
856 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MAX])
857 : in6addr_any;
858 }
859 proto_min = a[OVS_NAT_ATTR_PROTO_MIN]
860 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MIN]) : 0;
861 proto_max = a[OVS_NAT_ATTR_PROTO_MAX]
862 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MAX]) : 0;
863
864 if ((addr_len == sizeof(ovs_be32)
865 && ip_max && ntohl(ip_min) > ntohl(ip_max))
866 || (addr_len == sizeof(struct in6_addr)
867 && !ipv6_mask_is_any(&ip6_max)
868 && memcmp(&ip6_min, &ip6_max, sizeof ip6_min) > 0)
869 || (proto_max && proto_min > proto_max)) {
870 ds_put_cstr(ds, "nat(range error)");
871 return;
872 }
873
874 ds_put_cstr(ds, "nat");
875 if (a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST]) {
876 ds_put_char(ds, '(');
877 if (a[OVS_NAT_ATTR_SRC]) {
878 ds_put_cstr(ds, "src");
879 } else if (a[OVS_NAT_ATTR_DST]) {
880 ds_put_cstr(ds, "dst");
881 }
882
883 if (addr_len > 0) {
884 ds_put_cstr(ds, "=");
885
886 if (addr_len == sizeof ip_min) {
887 ds_put_format(ds, IP_FMT, IP_ARGS(ip_min));
888
889 if (ip_max && ip_max != ip_min) {
890 ds_put_format(ds, "-"IP_FMT, IP_ARGS(ip_max));
891 }
892 } else if (addr_len == sizeof ip6_min) {
893 ipv6_format_addr_bracket(&ip6_min, ds, proto_min);
894
895 if (!ipv6_mask_is_any(&ip6_max) &&
896 memcmp(&ip6_max, &ip6_min, sizeof ip6_max) != 0) {
897 ds_put_char(ds, '-');
898 ipv6_format_addr_bracket(&ip6_max, ds, proto_min);
899 }
900 }
901 if (proto_min) {
902 ds_put_format(ds, ":%"PRIu16, proto_min);
903
904 if (proto_max && proto_max != proto_min) {
905 ds_put_format(ds, "-%"PRIu16, proto_max);
906 }
907 }
908 }
909 ds_put_char(ds, ',');
910 if (a[OVS_NAT_ATTR_PERSISTENT]) {
911 ds_put_cstr(ds, "persistent,");
912 }
913 if (a[OVS_NAT_ATTR_PROTO_HASH]) {
914 ds_put_cstr(ds, "hash,");
915 }
916 if (a[OVS_NAT_ATTR_PROTO_RANDOM]) {
917 ds_put_cstr(ds, "random,");
918 }
919 ds_chomp(ds, ',');
920 ds_put_char(ds, ')');
921 }
922 }
923
924 static const struct nl_policy ovs_conntrack_policy[] = {
925 [OVS_CT_ATTR_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
926 [OVS_CT_ATTR_FORCE_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
927 [OVS_CT_ATTR_ZONE] = { .type = NL_A_U16, .optional = true, },
928 [OVS_CT_ATTR_MARK] = { .type = NL_A_UNSPEC, .optional = true,
929 .min_len = sizeof(uint32_t) * 2 },
930 [OVS_CT_ATTR_LABELS] = { .type = NL_A_UNSPEC, .optional = true,
931 .min_len = sizeof(struct ovs_key_ct_labels) * 2 },
932 [OVS_CT_ATTR_HELPER] = { .type = NL_A_STRING, .optional = true,
933 .min_len = 1, .max_len = 16 },
934 [OVS_CT_ATTR_NAT] = { .type = NL_A_UNSPEC, .optional = true },
935 [OVS_CT_ATTR_TIMEOUT] = { .type = NL_A_STRING, .optional = true,
936 .min_len = 1, .max_len = 32 },
937 };
938
939 static void
940 format_odp_conntrack_action(struct ds *ds, const struct nlattr *attr)
941 {
942 struct nlattr *a[ARRAY_SIZE(ovs_conntrack_policy)];
943 const struct {
944 ovs_32aligned_u128 value;
945 ovs_32aligned_u128 mask;
946 } *label;
947 const uint32_t *mark;
948 const char *helper, *timeout;
949 uint16_t zone;
950 bool commit, force;
951 const struct nlattr *nat;
952
953 if (!nl_parse_nested(attr, ovs_conntrack_policy, a, ARRAY_SIZE(a))) {
954 ds_put_cstr(ds, "ct(error)");
955 return;
956 }
957
958 commit = a[OVS_CT_ATTR_COMMIT] ? true : false;
959 force = a[OVS_CT_ATTR_FORCE_COMMIT] ? true : false;
960 zone = a[OVS_CT_ATTR_ZONE] ? nl_attr_get_u16(a[OVS_CT_ATTR_ZONE]) : 0;
961 mark = a[OVS_CT_ATTR_MARK] ? nl_attr_get(a[OVS_CT_ATTR_MARK]) : NULL;
962 label = a[OVS_CT_ATTR_LABELS] ? nl_attr_get(a[OVS_CT_ATTR_LABELS]): NULL;
963 helper = a[OVS_CT_ATTR_HELPER] ? nl_attr_get(a[OVS_CT_ATTR_HELPER]) : NULL;
964 timeout = a[OVS_CT_ATTR_TIMEOUT] ?
965 nl_attr_get(a[OVS_CT_ATTR_TIMEOUT]) : NULL;
966 nat = a[OVS_CT_ATTR_NAT];
967
968 ds_put_format(ds, "ct");
969 if (commit || force || zone || mark || label || helper || timeout || nat) {
970 ds_put_cstr(ds, "(");
971 if (commit) {
972 ds_put_format(ds, "commit,");
973 }
974 if (force) {
975 ds_put_format(ds, "force_commit,");
976 }
977 if (zone) {
978 ds_put_format(ds, "zone=%"PRIu16",", zone);
979 }
980 if (mark) {
981 ds_put_format(ds, "mark=%#"PRIx32"/%#"PRIx32",", *mark,
982 *(mark + 1));
983 }
984 if (label) {
985 ds_put_format(ds, "label=");
986 format_u128(ds, &label->value, &label->mask, true);
987 ds_put_char(ds, ',');
988 }
989 if (helper) {
990 ds_put_format(ds, "helper=%s,", helper);
991 }
992 if (timeout) {
993 ds_put_format(ds, "timeout=%s", timeout);
994 }
995 if (nat) {
996 format_odp_ct_nat(ds, nat);
997 }
998 ds_chomp(ds, ',');
999 ds_put_cstr(ds, ")");
1000 }
1001 }
1002
1003 static const struct attr_len_tbl
1004 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
1005 [OVS_NSH_KEY_ATTR_BASE] = { .len = 8 },
1006 [OVS_NSH_KEY_ATTR_MD1] = { .len = 16 },
1007 [OVS_NSH_KEY_ATTR_MD2] = { .len = ATTR_LEN_VARIABLE },
1008 };
1009
1010 static void
1011 format_odp_set_nsh(struct ds *ds, const struct nlattr *attr)
1012 {
1013 unsigned int left;
1014 const struct nlattr *a;
1015 struct ovs_key_nsh nsh;
1016 struct ovs_key_nsh nsh_mask;
1017
1018 memset(&nsh, 0, sizeof nsh);
1019 memset(&nsh_mask, 0xff, sizeof nsh_mask);
1020
1021 NL_NESTED_FOR_EACH (a, left, attr) {
1022 enum ovs_nsh_key_attr type = nl_attr_type(a);
1023 size_t len = nl_attr_get_size(a);
1024
1025 if (type >= OVS_NSH_KEY_ATTR_MAX) {
1026 return;
1027 }
1028
1029 int expected_len = ovs_nsh_key_attr_lens[type].len;
1030 if ((expected_len != ATTR_LEN_VARIABLE) && (len != 2 * expected_len)) {
1031 return;
1032 }
1033
1034 switch (type) {
1035 case OVS_NSH_KEY_ATTR_UNSPEC:
1036 break;
1037 case OVS_NSH_KEY_ATTR_BASE: {
1038 const struct ovs_nsh_key_base *base = nl_attr_get(a);
1039 const struct ovs_nsh_key_base *base_mask = base + 1;
1040 memcpy(&nsh, base, sizeof(*base));
1041 memcpy(&nsh_mask, base_mask, sizeof(*base_mask));
1042 break;
1043 }
1044 case OVS_NSH_KEY_ATTR_MD1: {
1045 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
1046 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1047 memcpy(&nsh.context, &md1->context, sizeof(*md1));
1048 memcpy(&nsh_mask.context, &md1_mask->context, sizeof(*md1_mask));
1049 break;
1050 }
1051 case OVS_NSH_KEY_ATTR_MD2:
1052 case __OVS_NSH_KEY_ATTR_MAX:
1053 default:
1054 /* No support for matching other metadata formats yet. */
1055 break;
1056 }
1057 }
1058
1059 ds_put_cstr(ds, "set(nsh(");
1060 format_nsh_key_mask(ds, &nsh, &nsh_mask);
1061 ds_put_cstr(ds, "))");
1062 }
1063
1064 static void
1065 format_odp_check_pkt_len_action(struct ds *ds, const struct nlattr *attr,
1066 const struct hmap *portno_names OVS_UNUSED)
1067 {
1068 static const struct nl_policy ovs_cpl_policy[] = {
1069 [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = { .type = NL_A_U16 },
1070 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = { .type = NL_A_NESTED },
1071 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]
1072 = { .type = NL_A_NESTED },
1073 };
1074 struct nlattr *a[ARRAY_SIZE(ovs_cpl_policy)];
1075 ds_put_cstr(ds, "check_pkt_len");
1076 if (!nl_parse_nested(attr, ovs_cpl_policy, a, ARRAY_SIZE(a))) {
1077 ds_put_cstr(ds, "(error)");
1078 return;
1079 }
1080
1081 if (!a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] ||
1082 !a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]) {
1083 ds_put_cstr(ds, "(error)");
1084 return;
1085 }
1086
1087 uint16_t pkt_len = nl_attr_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
1088 ds_put_format(ds, "(size=%u,gt(", pkt_len);
1089 const struct nlattr *acts;
1090 acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
1091 format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
1092 portno_names);
1093
1094 ds_put_cstr(ds, "),le(");
1095 acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
1096 format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
1097 portno_names);
1098 ds_put_cstr(ds, "))");
1099 }
1100
1101 static void
1102 format_odp_action(struct ds *ds, const struct nlattr *a,
1103 const struct hmap *portno_names)
1104 {
1105 int expected_len;
1106 enum ovs_action_attr type = nl_attr_type(a);
1107 size_t size;
1108
1109 expected_len = odp_action_len(nl_attr_type(a));
1110 if (expected_len != ATTR_LEN_VARIABLE &&
1111 nl_attr_get_size(a) != expected_len) {
1112 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
1113 nl_attr_get_size(a), expected_len);
1114 format_generic_odp_action(ds, a);
1115 return;
1116 }
1117
1118 switch (type) {
1119 case OVS_ACTION_ATTR_METER:
1120 ds_put_format(ds, "meter(%"PRIu32")", nl_attr_get_u32(a));
1121 break;
1122 case OVS_ACTION_ATTR_OUTPUT:
1123 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1124 break;
1125 case OVS_ACTION_ATTR_TRUNC: {
1126 const struct ovs_action_trunc *trunc =
1127 nl_attr_get_unspec(a, sizeof *trunc);
1128
1129 ds_put_format(ds, "trunc(%"PRIu32")", trunc->max_len);
1130 break;
1131 }
1132 case OVS_ACTION_ATTR_TUNNEL_POP:
1133 ds_put_cstr(ds, "tnl_pop(");
1134 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1135 ds_put_char(ds, ')');
1136 break;
1137 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1138 format_odp_tnl_push_action(ds, a, portno_names);
1139 break;
1140 case OVS_ACTION_ATTR_USERSPACE:
1141 format_odp_userspace_action(ds, a, portno_names);
1142 break;
1143 case OVS_ACTION_ATTR_RECIRC:
1144 format_odp_recirc_action(ds, nl_attr_get_u32(a));
1145 break;
1146 case OVS_ACTION_ATTR_HASH:
1147 format_odp_hash_action(ds, nl_attr_get(a));
1148 break;
1149 case OVS_ACTION_ATTR_SET_MASKED:
1150 a = nl_attr_get(a);
1151 /* OVS_KEY_ATTR_NSH is nested attribute, so it needs special process */
1152 if (nl_attr_type(a) == OVS_KEY_ATTR_NSH) {
1153 format_odp_set_nsh(ds, a);
1154 break;
1155 }
1156 size = nl_attr_get_size(a) / 2;
1157 ds_put_cstr(ds, "set(");
1158
1159 /* Masked set action not supported for tunnel key, which is bigger. */
1160 if (size <= sizeof(struct ovs_key_ipv6)) {
1161 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1162 sizeof(struct nlattr))];
1163 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1164 sizeof(struct nlattr))];
1165
1166 mask->nla_type = attr->nla_type = nl_attr_type(a);
1167 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
1168 memcpy(attr + 1, (char *)(a + 1), size);
1169 memcpy(mask + 1, (char *)(a + 1) + size, size);
1170 format_odp_key_attr(attr, mask, NULL, ds, false);
1171 } else {
1172 format_odp_key_attr(a, NULL, NULL, ds, false);
1173 }
1174 ds_put_cstr(ds, ")");
1175 break;
1176 case OVS_ACTION_ATTR_SET:
1177 ds_put_cstr(ds, "set(");
1178 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
1179 ds_put_cstr(ds, ")");
1180 break;
1181 case OVS_ACTION_ATTR_PUSH_ETH: {
1182 const struct ovs_action_push_eth *eth = nl_attr_get(a);
1183 ds_put_format(ds, "push_eth(src="ETH_ADDR_FMT",dst="ETH_ADDR_FMT")",
1184 ETH_ADDR_ARGS(eth->addresses.eth_src),
1185 ETH_ADDR_ARGS(eth->addresses.eth_dst));
1186 break;
1187 }
1188 case OVS_ACTION_ATTR_POP_ETH:
1189 ds_put_cstr(ds, "pop_eth");
1190 break;
1191 case OVS_ACTION_ATTR_PUSH_VLAN: {
1192 const struct ovs_action_push_vlan *vlan = nl_attr_get(a);
1193 ds_put_cstr(ds, "push_vlan(");
1194 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
1195 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
1196 }
1197 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
1198 ds_put_char(ds, ')');
1199 break;
1200 }
1201 case OVS_ACTION_ATTR_POP_VLAN:
1202 ds_put_cstr(ds, "pop_vlan");
1203 break;
1204 case OVS_ACTION_ATTR_PUSH_MPLS: {
1205 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1206 ds_put_cstr(ds, "push_mpls(");
1207 format_mpls_lse(ds, mpls->mpls_lse);
1208 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
1209 break;
1210 }
1211 case OVS_ACTION_ATTR_POP_MPLS: {
1212 ovs_be16 ethertype = nl_attr_get_be16(a);
1213 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
1214 break;
1215 }
1216 case OVS_ACTION_ATTR_SAMPLE:
1217 format_odp_sample_action(ds, a, portno_names);
1218 break;
1219 case OVS_ACTION_ATTR_CT:
1220 format_odp_conntrack_action(ds, a);
1221 break;
1222 case OVS_ACTION_ATTR_CT_CLEAR:
1223 ds_put_cstr(ds, "ct_clear");
1224 break;
1225 case OVS_ACTION_ATTR_CLONE:
1226 format_odp_clone_action(ds, a, portno_names);
1227 break;
1228 case OVS_ACTION_ATTR_PUSH_NSH: {
1229 uint32_t buffer[NSH_HDR_MAX_LEN / 4];
1230 struct nsh_hdr *nsh_hdr = ALIGNED_CAST(struct nsh_hdr *, buffer);
1231 nsh_reset_ver_flags_ttl_len(nsh_hdr);
1232 odp_nsh_hdr_from_attr(nl_attr_get(a), nsh_hdr, NSH_HDR_MAX_LEN);
1233 format_odp_push_nsh_action(ds, nsh_hdr);
1234 break;
1235 }
1236 case OVS_ACTION_ATTR_POP_NSH:
1237 ds_put_cstr(ds, "pop_nsh()");
1238 break;
1239 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
1240 format_odp_check_pkt_len_action(ds, a, portno_names);
1241 break;
1242 case OVS_ACTION_ATTR_DROP:
1243 ds_put_cstr(ds, "drop");
1244 break;
1245 case OVS_ACTION_ATTR_UNSPEC:
1246 case __OVS_ACTION_ATTR_MAX:
1247 default:
1248 format_generic_odp_action(ds, a);
1249 break;
1250 }
1251 }
1252
1253 void
1254 format_odp_actions(struct ds *ds, const struct nlattr *actions,
1255 size_t actions_len, const struct hmap *portno_names)
1256 {
1257 if (actions_len) {
1258 const struct nlattr *a;
1259 unsigned int left;
1260
1261 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1262 if (a != actions) {
1263 ds_put_char(ds, ',');
1264 }
1265 format_odp_action(ds, a, portno_names);
1266 }
1267 if (left) {
1268 int i;
1269
1270 if (left == actions_len) {
1271 ds_put_cstr(ds, "<empty>");
1272 }
1273 ds_put_format(ds, ",***%u leftover bytes*** (", left);
1274 for (i = 0; i < left; i++) {
1275 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
1276 }
1277 ds_put_char(ds, ')');
1278 }
1279 } else {
1280 ds_put_cstr(ds, "drop");
1281 }
1282 }
1283
1284 /* Separate out parse_odp_userspace_action() function. */
1285 static int
1286 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
1287 {
1288 uint32_t pid;
1289 struct user_action_cookie cookie;
1290 struct ofpbuf buf;
1291 odp_port_t tunnel_out_port;
1292 int n = -1;
1293 void *user_data = NULL;
1294 size_t user_data_size = 0;
1295 bool include_actions = false;
1296 int res;
1297
1298 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
1299 return -EINVAL;
1300 }
1301
1302 ofpbuf_init(&buf, 16);
1303 memset(&cookie, 0, sizeof cookie);
1304
1305 user_data = &cookie;
1306 user_data_size = sizeof cookie;
1307 {
1308 uint32_t output;
1309 uint32_t probability;
1310 uint32_t collector_set_id;
1311 uint32_t obs_domain_id;
1312 uint32_t obs_point_id;
1313
1314 /* USER_ACTION_COOKIE_CONTROLLER. */
1315 uint8_t dont_send;
1316 uint8_t continuation;
1317 uint16_t reason;
1318 uint32_t recirc_id;
1319 uint64_t rule_cookie;
1320 uint16_t controller_id;
1321 uint16_t max_len;
1322
1323 int vid, pcp;
1324 int n1 = -1;
1325 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
1326 "pcp=%i,output=%"SCNi32")%n",
1327 &vid, &pcp, &output, &n1)) {
1328 uint16_t tci;
1329
1330 n += n1;
1331 tci = vid | (pcp << VLAN_PCP_SHIFT);
1332 if (tci) {
1333 tci |= VLAN_CFI;
1334 }
1335
1336 cookie.type = USER_ACTION_COOKIE_SFLOW;
1337 cookie.ofp_in_port = OFPP_NONE;
1338 cookie.ofproto_uuid = UUID_ZERO;
1339 cookie.sflow.vlan_tci = htons(tci);
1340 cookie.sflow.output = output;
1341 } else if (ovs_scan(&s[n], ",slow_path(%n",
1342 &n1)) {
1343 n += n1;
1344 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
1345 cookie.ofp_in_port = OFPP_NONE;
1346 cookie.ofproto_uuid = UUID_ZERO;
1347 cookie.slow_path.reason = 0;
1348
1349 res = parse_odp_flags(&s[n], slow_path_reason_to_string,
1350 &cookie.slow_path.reason,
1351 SLOW_PATH_REASON_MASK, NULL);
1352 if (res < 0 || s[n + res] != ')') {
1353 goto out;
1354 }
1355 n += res + 1;
1356 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
1357 "collector_set_id=%"SCNi32","
1358 "obs_domain_id=%"SCNi32","
1359 "obs_point_id=%"SCNi32","
1360 "output_port=%"SCNi32"%n",
1361 &probability, &collector_set_id,
1362 &obs_domain_id, &obs_point_id,
1363 &output, &n1)) {
1364 n += n1;
1365
1366 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1367 cookie.ofp_in_port = OFPP_NONE;
1368 cookie.ofproto_uuid = UUID_ZERO;
1369 cookie.flow_sample.probability = probability;
1370 cookie.flow_sample.collector_set_id = collector_set_id;
1371 cookie.flow_sample.obs_domain_id = obs_domain_id;
1372 cookie.flow_sample.obs_point_id = obs_point_id;
1373 cookie.flow_sample.output_odp_port = u32_to_odp(output);
1374
1375 if (ovs_scan(&s[n], ",ingress%n", &n1)) {
1376 cookie.flow_sample.direction = NX_ACTION_SAMPLE_INGRESS;
1377 n += n1;
1378 } else if (ovs_scan(&s[n], ",egress%n", &n1)) {
1379 cookie.flow_sample.direction = NX_ACTION_SAMPLE_EGRESS;
1380 n += n1;
1381 } else {
1382 cookie.flow_sample.direction = NX_ACTION_SAMPLE_DEFAULT;
1383 }
1384 if (s[n] != ')') {
1385 res = -EINVAL;
1386 goto out;
1387 }
1388 n++;
1389 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
1390 &output, &n1) ) {
1391 n += n1;
1392 cookie.type = USER_ACTION_COOKIE_IPFIX;
1393 cookie.ofp_in_port = OFPP_NONE;
1394 cookie.ofproto_uuid = UUID_ZERO;
1395 cookie.ipfix.output_odp_port = u32_to_odp(output);
1396 } else if (ovs_scan(&s[n], ",controller(reason=%"SCNu16
1397 ",dont_send=%"SCNu8
1398 ",continuation=%"SCNu8
1399 ",recirc_id=%"SCNu32
1400 ",rule_cookie=%"SCNx64
1401 ",controller_id=%"SCNu16
1402 ",max_len=%"SCNu16")%n",
1403 &reason, &dont_send, &continuation, &recirc_id,
1404 &rule_cookie, &controller_id, &max_len, &n1)) {
1405 n += n1;
1406 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
1407 cookie.ofp_in_port = OFPP_NONE;
1408 cookie.ofproto_uuid = UUID_ZERO;
1409 cookie.controller.dont_send = dont_send ? true : false;
1410 cookie.controller.continuation = continuation ? true : false;
1411 cookie.controller.reason = reason;
1412 cookie.controller.recirc_id = recirc_id;
1413 put_32aligned_be64(&cookie.controller.rule_cookie,
1414 htonll(rule_cookie));
1415 cookie.controller.controller_id = controller_id;
1416 cookie.controller.max_len = max_len;
1417 } else if (ovs_scan(&s[n], ",userdata(%n", &n1)) {
1418 char *end;
1419
1420 n += n1;
1421 end = ofpbuf_put_hex(&buf, &s[n], NULL);
1422 if (end[0] != ')') {
1423 res = -EINVAL;
1424 goto out;
1425 }
1426 user_data = buf.data;
1427 user_data_size = buf.size;
1428 n = (end + 1) - s;
1429 }
1430 }
1431
1432 {
1433 int n1 = -1;
1434 if (ovs_scan(&s[n], ",actions%n", &n1)) {
1435 n += n1;
1436 include_actions = true;
1437 }
1438 }
1439
1440 {
1441 int n1 = -1;
1442 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
1443 &tunnel_out_port, &n1)) {
1444 odp_put_userspace_action(pid, user_data, user_data_size,
1445 tunnel_out_port, include_actions, actions);
1446 res = n + n1;
1447 goto out;
1448 } else if (s[n] == ')') {
1449 odp_put_userspace_action(pid, user_data, user_data_size,
1450 ODPP_NONE, include_actions, actions);
1451 res = n + 1;
1452 goto out;
1453 }
1454 }
1455
1456 {
1457 struct ovs_action_push_eth push;
1458 int eth_type = 0;
1459 int n1 = -1;
1460
1461 if (ovs_scan(&s[n], "push_eth(src="ETH_ADDR_SCAN_FMT","
1462 "dst="ETH_ADDR_SCAN_FMT",type=%i)%n",
1463 ETH_ADDR_SCAN_ARGS(push.addresses.eth_src),
1464 ETH_ADDR_SCAN_ARGS(push.addresses.eth_dst),
1465 &eth_type, &n1)) {
1466
1467 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_ETH,
1468 &push, sizeof push);
1469
1470 res = n + n1;
1471 goto out;
1472 }
1473 }
1474
1475 if (!strncmp(&s[n], "pop_eth", 7)) {
1476 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_ETH);
1477 res = 7;
1478 goto out;
1479 }
1480
1481 res = -EINVAL;
1482 out:
1483 ofpbuf_uninit(&buf);
1484 return res;
1485 }
1486
1487 static int
1488 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
1489 {
1490 struct eth_header *eth;
1491 struct ip_header *ip;
1492 struct ovs_16aligned_ip6_hdr *ip6;
1493 struct udp_header *udp;
1494 struct gre_base_hdr *greh;
1495 struct erspan_base_hdr *ersh;
1496 struct erspan_md2 *md2;
1497 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, udp_csum, sid;
1498 ovs_be32 sip, dip;
1499 uint32_t tnl_type = 0, header_len = 0, ip_len = 0, erspan_idx = 0;
1500 void *l3, *l4;
1501 int n = 0;
1502 uint8_t hwid, dir;
1503
1504 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
1505 return -EINVAL;
1506 }
1507 eth = (struct eth_header *) data->header;
1508 l3 = (struct ip_header *) (eth + 1);
1509 ip = (struct ip_header *) l3;
1510 ip6 = (struct ovs_16aligned_ip6_hdr *) l3;
1511 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
1512 "eth(dst="ETH_ADDR_SCAN_FMT",",
1513 &data->header_len,
1514 &data->tnl_type,
1515 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
1516 return -EINVAL;
1517 }
1518
1519 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
1520 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
1521 return -EINVAL;
1522 }
1523 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
1524 return -EINVAL;
1525 }
1526 eth->eth_type = htons(dl_type);
1527
1528 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1529 /* IPv4 */
1530 uint16_t ip_frag_off;
1531 memset(ip, 0, sizeof(*ip));
1532 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
1533 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
1534 IP_SCAN_ARGS(&sip),
1535 IP_SCAN_ARGS(&dip),
1536 &ip->ip_proto, &ip->ip_tos,
1537 &ip->ip_ttl, &ip_frag_off)) {
1538 return -EINVAL;
1539 }
1540 put_16aligned_be32(&ip->ip_src, sip);
1541 put_16aligned_be32(&ip->ip_dst, dip);
1542 ip->ip_frag_off = htons(ip_frag_off);
1543 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1544 ip_len = sizeof *ip;
1545 ip->ip_csum = csum(ip, ip_len);
1546 } else {
1547 char sip6_s[IPV6_SCAN_LEN + 1];
1548 char dip6_s[IPV6_SCAN_LEN + 1];
1549 struct in6_addr sip6, dip6;
1550 uint8_t tclass;
1551 uint32_t label;
1552 if (!ovs_scan_len(s, &n, "ipv6(src="IPV6_SCAN_FMT",dst="IPV6_SCAN_FMT
1553 ",label=%i,proto=%"SCNi8",tclass=0x%"SCNx8
1554 ",hlimit=%"SCNi8"),",
1555 sip6_s, dip6_s, &label, &ip6->ip6_nxt,
1556 &tclass, &ip6->ip6_hlim)
1557 || (label & ~IPV6_LABEL_MASK) != 0
1558 || inet_pton(AF_INET6, sip6_s, &sip6) != 1
1559 || inet_pton(AF_INET6, dip6_s, &dip6) != 1) {
1560 return -EINVAL;
1561 }
1562 put_16aligned_be32(&ip6->ip6_flow, htonl(6 << 28) |
1563 htonl(tclass << 20) | htonl(label));
1564 memcpy(&ip6->ip6_src, &sip6, sizeof(ip6->ip6_src));
1565 memcpy(&ip6->ip6_dst, &dip6, sizeof(ip6->ip6_dst));
1566 ip_len = sizeof *ip6;
1567 }
1568
1569 /* Tunnel header */
1570 l4 = ((uint8_t *) l3 + ip_len);
1571 udp = (struct udp_header *) l4;
1572 greh = (struct gre_base_hdr *) l4;
1573 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
1574 &udp_src, &udp_dst, &udp_csum)) {
1575 uint32_t vx_flags, vni;
1576
1577 udp->udp_src = htons(udp_src);
1578 udp->udp_dst = htons(udp_dst);
1579 udp->udp_len = 0;
1580 udp->udp_csum = htons(udp_csum);
1581
1582 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
1583 &vx_flags, &vni)) {
1584 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
1585
1586 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
1587 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
1588 tnl_type = OVS_VPORT_TYPE_VXLAN;
1589 header_len = sizeof *eth + ip_len +
1590 sizeof *udp + sizeof *vxh;
1591 } else if (ovs_scan_len(s, &n, "geneve(")) {
1592 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
1593
1594 memset(gnh, 0, sizeof *gnh);
1595 header_len = sizeof *eth + ip_len +
1596 sizeof *udp + sizeof *gnh;
1597
1598 if (ovs_scan_len(s, &n, "oam,")) {
1599 gnh->oam = 1;
1600 }
1601 if (ovs_scan_len(s, &n, "crit,")) {
1602 gnh->critical = 1;
1603 }
1604 if (!ovs_scan_len(s, &n, "vni=%"SCNi32, &vni)) {
1605 return -EINVAL;
1606 }
1607 if (ovs_scan_len(s, &n, ",options(")) {
1608 struct geneve_scan options;
1609 int len;
1610
1611 memset(&options, 0, sizeof options);
1612 len = scan_geneve(s + n, &options, NULL);
1613 if (!len) {
1614 return -EINVAL;
1615 }
1616
1617 memcpy(gnh->options, options.d, options.len);
1618 gnh->opt_len = options.len / 4;
1619 header_len += options.len;
1620
1621 n += len;
1622 }
1623 if (!ovs_scan_len(s, &n, "))")) {
1624 return -EINVAL;
1625 }
1626
1627 gnh->proto_type = htons(ETH_TYPE_TEB);
1628 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
1629 tnl_type = OVS_VPORT_TYPE_GENEVE;
1630 } else {
1631 return -EINVAL;
1632 }
1633 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
1634 &gre_flags, &gre_proto)){
1635
1636 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1637 tnl_type = OVS_VPORT_TYPE_GRE;
1638 } else {
1639 tnl_type = OVS_VPORT_TYPE_IP6GRE;
1640 }
1641 greh->flags = htons(gre_flags);
1642 greh->protocol = htons(gre_proto);
1643 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
1644
1645 if (greh->flags & htons(GRE_CSUM)) {
1646 uint16_t csum;
1647 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
1648 return -EINVAL;
1649 }
1650
1651 memset(options, 0, sizeof *options);
1652 *((ovs_be16 *)options) = htons(csum);
1653 options++;
1654 }
1655 if (greh->flags & htons(GRE_KEY)) {
1656 uint32_t key;
1657
1658 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
1659 return -EINVAL;
1660 }
1661
1662 put_16aligned_be32(options, htonl(key));
1663 options++;
1664 }
1665 if (greh->flags & htons(GRE_SEQ)) {
1666 uint32_t seq;
1667
1668 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
1669 return -EINVAL;
1670 }
1671 put_16aligned_be32(options, htonl(seq));
1672 options++;
1673 }
1674
1675 if (!ovs_scan_len(s, &n, "))")) {
1676 return -EINVAL;
1677 }
1678
1679 header_len = sizeof *eth + ip_len +
1680 ((uint8_t *) options - (uint8_t *) greh);
1681 } else if (ovs_scan_len(s, &n, "erspan(ver=1,sid="SCNx16",idx=0x"SCNx32")",
1682 &sid, &erspan_idx)) {
1683 ersh = ERSPAN_HDR(greh);
1684 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
1685 ersh + 1);
1686
1687 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1688 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1689 } else {
1690 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1691 }
1692
1693 greh->flags = htons(GRE_SEQ);
1694 greh->protocol = htons(ETH_TYPE_ERSPAN1);
1695
1696 ersh->ver = 1;
1697 set_sid(ersh, sid);
1698 put_16aligned_be32(index, htonl(erspan_idx));
1699
1700 if (!ovs_scan_len(s, &n, ")")) {
1701 return -EINVAL;
1702 }
1703 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1704 sizeof *ersh + ERSPAN_V1_MDSIZE;
1705
1706 } else if (ovs_scan_len(s, &n, "erspan(ver=2,sid="SCNx16"dir="SCNu8
1707 ",hwid=0x"SCNx8")", &sid, &dir, &hwid)) {
1708
1709 ersh = ERSPAN_HDR(greh);
1710 md2 = ALIGNED_CAST(struct erspan_md2 *, ersh + 1);
1711
1712 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1713 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1714 } else {
1715 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1716 }
1717
1718 greh->flags = htons(GRE_SEQ);
1719 greh->protocol = htons(ETH_TYPE_ERSPAN2);
1720
1721 ersh->ver = 2;
1722 set_sid(ersh, sid);
1723 set_hwid(md2, hwid);
1724 md2->dir = dir;
1725
1726 if (!ovs_scan_len(s, &n, ")")) {
1727 return -EINVAL;
1728 }
1729
1730 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1731 sizeof *ersh + ERSPAN_V2_MDSIZE;
1732 } else {
1733 return -EINVAL;
1734 }
1735
1736 /* check tunnel meta data. */
1737 if (data->tnl_type != tnl_type) {
1738 return -EINVAL;
1739 }
1740 if (data->header_len != header_len) {
1741 return -EINVAL;
1742 }
1743
1744 /* Out port */
1745 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
1746 return -EINVAL;
1747 }
1748
1749 return n;
1750 }
1751
1752 struct ct_nat_params {
1753 bool snat;
1754 bool dnat;
1755 size_t addr_len;
1756 union {
1757 ovs_be32 ip;
1758 struct in6_addr ip6;
1759 } addr_min;
1760 union {
1761 ovs_be32 ip;
1762 struct in6_addr ip6;
1763 } addr_max;
1764 uint16_t proto_min;
1765 uint16_t proto_max;
1766 bool persistent;
1767 bool proto_hash;
1768 bool proto_random;
1769 };
1770
1771 static int
1772 scan_ct_nat_range(const char *s, int *n, struct ct_nat_params *p)
1773 {
1774 if (ovs_scan_len(s, n, "=")) {
1775 char ipv6_s[IPV6_SCAN_LEN + 1];
1776 struct in6_addr ipv6;
1777
1778 if (ovs_scan_len(s, n, IP_SCAN_FMT, IP_SCAN_ARGS(&p->addr_min.ip))) {
1779 p->addr_len = sizeof p->addr_min.ip;
1780 if (ovs_scan_len(s, n, "-")) {
1781 if (!ovs_scan_len(s, n, IP_SCAN_FMT,
1782 IP_SCAN_ARGS(&p->addr_max.ip))) {
1783 return -EINVAL;
1784 }
1785 }
1786 } else if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1787 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1788 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1789 p->addr_len = sizeof p->addr_min.ip6;
1790 p->addr_min.ip6 = ipv6;
1791 if (ovs_scan_len(s, n, "-")) {
1792 if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1793 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1794 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1795 p->addr_max.ip6 = ipv6;
1796 } else {
1797 return -EINVAL;
1798 }
1799 }
1800 } else {
1801 return -EINVAL;
1802 }
1803 if (ovs_scan_len(s, n, ":%"SCNu16, &p->proto_min)) {
1804 if (ovs_scan_len(s, n, "-")) {
1805 if (!ovs_scan_len(s, n, "%"SCNu16, &p->proto_max)) {
1806 return -EINVAL;
1807 }
1808 }
1809 }
1810 }
1811 return 0;
1812 }
1813
1814 static int
1815 scan_ct_nat(const char *s, struct ct_nat_params *p)
1816 {
1817 int n = 0;
1818
1819 if (ovs_scan_len(s, &n, "nat")) {
1820 memset(p, 0, sizeof *p);
1821
1822 if (ovs_scan_len(s, &n, "(")) {
1823 char *end;
1824 int end_n;
1825
1826 end = strchr(s + n, ')');
1827 if (!end) {
1828 return -EINVAL;
1829 }
1830 end_n = end - s;
1831
1832 while (n < end_n) {
1833 n += strspn(s + n, delimiters);
1834 if (ovs_scan_len(s, &n, "src")) {
1835 int err = scan_ct_nat_range(s, &n, p);
1836 if (err) {
1837 return err;
1838 }
1839 p->snat = true;
1840 continue;
1841 }
1842 if (ovs_scan_len(s, &n, "dst")) {
1843 int err = scan_ct_nat_range(s, &n, p);
1844 if (err) {
1845 return err;
1846 }
1847 p->dnat = true;
1848 continue;
1849 }
1850 if (ovs_scan_len(s, &n, "persistent")) {
1851 p->persistent = true;
1852 continue;
1853 }
1854 if (ovs_scan_len(s, &n, "hash")) {
1855 p->proto_hash = true;
1856 continue;
1857 }
1858 if (ovs_scan_len(s, &n, "random")) {
1859 p->proto_random = true;
1860 continue;
1861 }
1862 return -EINVAL;
1863 }
1864
1865 if (p->snat && p->dnat) {
1866 return -EINVAL;
1867 }
1868 if ((p->addr_len != 0 &&
1869 memcmp(&p->addr_max, &in6addr_any, p->addr_len) &&
1870 memcmp(&p->addr_max, &p->addr_min, p->addr_len) < 0) ||
1871 (p->proto_max && p->proto_max < p->proto_min)) {
1872 return -EINVAL;
1873 }
1874 if (p->proto_hash && p->proto_random) {
1875 return -EINVAL;
1876 }
1877 n++;
1878 }
1879 }
1880 return n;
1881 }
1882
1883 static void
1884 nl_msg_put_ct_nat(struct ct_nat_params *p, struct ofpbuf *actions)
1885 {
1886 size_t start = nl_msg_start_nested(actions, OVS_CT_ATTR_NAT);
1887
1888 if (p->snat) {
1889 nl_msg_put_flag(actions, OVS_NAT_ATTR_SRC);
1890 } else if (p->dnat) {
1891 nl_msg_put_flag(actions, OVS_NAT_ATTR_DST);
1892 } else {
1893 goto out;
1894 }
1895 if (p->addr_len != 0) {
1896 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MIN, &p->addr_min,
1897 p->addr_len);
1898 if (memcmp(&p->addr_max, &p->addr_min, p->addr_len) > 0) {
1899 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MAX, &p->addr_max,
1900 p->addr_len);
1901 }
1902 if (p->proto_min) {
1903 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MIN, p->proto_min);
1904 if (p->proto_max && p->proto_max > p->proto_min) {
1905 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MAX, p->proto_max);
1906 }
1907 }
1908 if (p->persistent) {
1909 nl_msg_put_flag(actions, OVS_NAT_ATTR_PERSISTENT);
1910 }
1911 if (p->proto_hash) {
1912 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_HASH);
1913 }
1914 if (p->proto_random) {
1915 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_RANDOM);
1916 }
1917 }
1918 out:
1919 nl_msg_end_nested(actions, start);
1920 }
1921
1922 static int
1923 parse_conntrack_action(const char *s_, struct ofpbuf *actions)
1924 {
1925 const char *s = s_;
1926
1927 if (ovs_scan(s, "ct")) {
1928 const char *helper = NULL, *timeout = NULL;
1929 size_t helper_len = 0, timeout_len = 0;
1930 bool commit = false;
1931 bool force_commit = false;
1932 uint16_t zone = 0;
1933 struct {
1934 uint32_t value;
1935 uint32_t mask;
1936 } ct_mark = { 0, 0 };
1937 struct {
1938 ovs_u128 value;
1939 ovs_u128 mask;
1940 } ct_label;
1941 struct ct_nat_params nat_params;
1942 bool have_nat = false;
1943 size_t start;
1944 char *end;
1945
1946 memset(&ct_label, 0, sizeof(ct_label));
1947
1948 s += 2;
1949 if (ovs_scan(s, "(")) {
1950 s++;
1951 find_end:
1952 end = strchr(s, ')');
1953 if (!end) {
1954 return -EINVAL;
1955 }
1956
1957 while (s != end) {
1958 int n;
1959
1960 s += strspn(s, delimiters);
1961 if (ovs_scan(s, "commit%n", &n)) {
1962 commit = true;
1963 s += n;
1964 continue;
1965 }
1966 if (ovs_scan(s, "force_commit%n", &n)) {
1967 force_commit = true;
1968 s += n;
1969 continue;
1970 }
1971 if (ovs_scan(s, "zone=%"SCNu16"%n", &zone, &n)) {
1972 s += n;
1973 continue;
1974 }
1975 if (ovs_scan(s, "mark=%"SCNx32"%n", &ct_mark.value, &n)) {
1976 s += n;
1977 n = -1;
1978 if (ovs_scan(s, "/%"SCNx32"%n", &ct_mark.mask, &n)) {
1979 s += n;
1980 } else {
1981 ct_mark.mask = UINT32_MAX;
1982 }
1983 continue;
1984 }
1985 if (ovs_scan(s, "label=%n", &n)) {
1986 int retval;
1987
1988 s += n;
1989 retval = scan_u128(s, &ct_label.value, &ct_label.mask);
1990 if (retval == 0) {
1991 return -EINVAL;
1992 }
1993 s += retval;
1994 continue;
1995 }
1996 if (ovs_scan(s, "helper=%n", &n)) {
1997 s += n;
1998 helper_len = strcspn(s, delimiters_end);
1999 if (!helper_len || helper_len > 15) {
2000 return -EINVAL;
2001 }
2002 helper = s;
2003 s += helper_len;
2004 continue;
2005 }
2006 if (ovs_scan(s, "timeout=%n", &n)) {
2007 s += n;
2008 timeout_len = strcspn(s, delimiters_end);
2009 if (!timeout_len || timeout_len > 31) {
2010 return -EINVAL;
2011 }
2012 timeout = s;
2013 s += timeout_len;
2014 continue;
2015 }
2016
2017 n = scan_ct_nat(s, &nat_params);
2018 if (n > 0) {
2019 s += n;
2020 have_nat = true;
2021
2022 /* end points to the end of the nested, nat action.
2023 * find the real end. */
2024 goto find_end;
2025 }
2026 /* Nothing matched. */
2027 return -EINVAL;
2028 }
2029 s++;
2030 }
2031 if (commit && force_commit) {
2032 return -EINVAL;
2033 }
2034
2035 start = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CT);
2036 if (commit) {
2037 nl_msg_put_flag(actions, OVS_CT_ATTR_COMMIT);
2038 } else if (force_commit) {
2039 nl_msg_put_flag(actions, OVS_CT_ATTR_FORCE_COMMIT);
2040 }
2041 if (zone) {
2042 nl_msg_put_u16(actions, OVS_CT_ATTR_ZONE, zone);
2043 }
2044 if (ct_mark.mask) {
2045 nl_msg_put_unspec(actions, OVS_CT_ATTR_MARK, &ct_mark,
2046 sizeof(ct_mark));
2047 }
2048 if (!ovs_u128_is_zero(ct_label.mask)) {
2049 nl_msg_put_unspec(actions, OVS_CT_ATTR_LABELS, &ct_label,
2050 sizeof ct_label);
2051 }
2052 if (helper) {
2053 nl_msg_put_string__(actions, OVS_CT_ATTR_HELPER, helper,
2054 helper_len);
2055 }
2056 if (timeout) {
2057 nl_msg_put_string__(actions, OVS_CT_ATTR_TIMEOUT, timeout,
2058 timeout_len);
2059 }
2060 if (have_nat) {
2061 nl_msg_put_ct_nat(&nat_params, actions);
2062 }
2063 nl_msg_end_nested(actions, start);
2064 }
2065
2066 return s - s_;
2067 }
2068
2069 static void
2070 nsh_key_to_attr(struct ofpbuf *buf, const struct ovs_key_nsh *nsh,
2071 uint8_t * metadata, size_t md_size,
2072 bool is_mask)
2073 {
2074 size_t nsh_key_ofs;
2075 struct ovs_nsh_key_base base;
2076
2077 base.flags = nsh->flags;
2078 base.ttl = nsh->ttl;
2079 base.mdtype = nsh->mdtype;
2080 base.np = nsh->np;
2081 base.path_hdr = nsh->path_hdr;
2082
2083 nsh_key_ofs = nl_msg_start_nested(buf, OVS_KEY_ATTR_NSH);
2084 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_BASE, &base, sizeof base);
2085
2086 if (is_mask) {
2087 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2088 sizeof nsh->context);
2089 } else {
2090 switch (nsh->mdtype) {
2091 case NSH_M_TYPE1:
2092 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2093 sizeof nsh->context);
2094 break;
2095 case NSH_M_TYPE2:
2096 if (metadata && md_size > 0) {
2097 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD2, metadata,
2098 md_size);
2099 }
2100 break;
2101 default:
2102 /* No match support for other MD formats yet. */
2103 break;
2104 }
2105 }
2106 nl_msg_end_nested(buf, nsh_key_ofs);
2107 }
2108
2109
2110 static int
2111 parse_odp_push_nsh_action(const char *s, struct ofpbuf *actions)
2112 {
2113 int n = 0;
2114 int ret = 0;
2115 uint32_t spi = 0;
2116 uint8_t si = 255;
2117 uint32_t cd;
2118 struct ovs_key_nsh nsh;
2119 uint8_t metadata[NSH_CTX_HDRS_MAX_LEN];
2120 uint8_t md_size = 0;
2121
2122 if (!ovs_scan_len(s, &n, "push_nsh(")) {
2123 ret = -EINVAL;
2124 goto out;
2125 }
2126
2127 /* The default is NSH_M_TYPE1 */
2128 nsh.flags = 0;
2129 nsh.ttl = 63;
2130 nsh.mdtype = NSH_M_TYPE1;
2131 nsh.np = NSH_P_ETHERNET;
2132 nsh.path_hdr = nsh_spi_si_to_path_hdr(0, 255);
2133 memset(nsh.context, 0, NSH_M_TYPE1_MDLEN);
2134
2135 for (;;) {
2136 n += strspn(s + n, delimiters);
2137 if (s[n] == ')') {
2138 break;
2139 }
2140
2141 if (ovs_scan_len(s, &n, "flags=%"SCNi8, &nsh.flags)) {
2142 continue;
2143 }
2144 if (ovs_scan_len(s, &n, "ttl=%"SCNi8, &nsh.ttl)) {
2145 continue;
2146 }
2147 if (ovs_scan_len(s, &n, "mdtype=%"SCNi8, &nsh.mdtype)) {
2148 switch (nsh.mdtype) {
2149 case NSH_M_TYPE1:
2150 /* This is the default format. */;
2151 break;
2152 case NSH_M_TYPE2:
2153 /* Length will be updated later. */
2154 md_size = 0;
2155 break;
2156 default:
2157 ret = -EINVAL;
2158 goto out;
2159 }
2160 continue;
2161 }
2162 if (ovs_scan_len(s, &n, "np=%"SCNi8, &nsh.np)) {
2163 continue;
2164 }
2165 if (ovs_scan_len(s, &n, "spi=0x%"SCNx32, &spi)) {
2166 continue;
2167 }
2168 if (ovs_scan_len(s, &n, "si=%"SCNi8, &si)) {
2169 continue;
2170 }
2171 if (nsh.mdtype == NSH_M_TYPE1) {
2172 if (ovs_scan_len(s, &n, "c1=0x%"SCNx32, &cd)) {
2173 nsh.context[0] = htonl(cd);
2174 continue;
2175 }
2176 if (ovs_scan_len(s, &n, "c2=0x%"SCNx32, &cd)) {
2177 nsh.context[1] = htonl(cd);
2178 continue;
2179 }
2180 if (ovs_scan_len(s, &n, "c3=0x%"SCNx32, &cd)) {
2181 nsh.context[2] = htonl(cd);
2182 continue;
2183 }
2184 if (ovs_scan_len(s, &n, "c4=0x%"SCNx32, &cd)) {
2185 nsh.context[3] = htonl(cd);
2186 continue;
2187 }
2188 }
2189 else if (nsh.mdtype == NSH_M_TYPE2) {
2190 struct ofpbuf b;
2191 char buf[512];
2192 size_t mdlen, padding;
2193 if (ovs_scan_len(s, &n, "md2=0x%511[0-9a-fA-F]", buf)
2194 && n/2 <= sizeof metadata) {
2195 ofpbuf_use_stub(&b, metadata, sizeof metadata);
2196 ofpbuf_put_hex(&b, buf, &mdlen);
2197 /* Pad metadata to 4 bytes. */
2198 padding = PAD_SIZE(mdlen, 4);
2199 if (padding > 0) {
2200 ofpbuf_put_zeros(&b, padding);
2201 }
2202 md_size = mdlen + padding;
2203 ofpbuf_uninit(&b);
2204 continue;
2205 }
2206 }
2207
2208 ret = -EINVAL;
2209 goto out;
2210 }
2211 out:
2212 if (ret >= 0) {
2213 nsh.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
2214 size_t offset = nl_msg_start_nested(actions, OVS_ACTION_ATTR_PUSH_NSH);
2215 nsh_key_to_attr(actions, &nsh, metadata, md_size, false);
2216 nl_msg_end_nested(actions, offset);
2217 ret = n;
2218 }
2219 return ret;
2220 }
2221
2222 static int
2223 parse_action_list(struct parse_odp_context *context, const char *s,
2224 struct ofpbuf *actions)
2225 {
2226 int n = 0;
2227
2228 for (;;) {
2229 int retval;
2230
2231 n += strspn(s + n, delimiters);
2232 if (s[n] == ')') {
2233 break;
2234 }
2235 retval = parse_odp_action(context, s + n, actions);
2236 if (retval < 0) {
2237 return retval;
2238 }
2239 n += retval;
2240 }
2241
2242 if (actions->size > UINT16_MAX) {
2243 return -EFBIG;
2244 }
2245
2246 return n;
2247 }
2248
2249
2250 static int
2251 parse_odp_action(struct parse_odp_context *context, const char *s,
2252 struct ofpbuf *actions)
2253 {
2254 int retval;
2255
2256 context->depth++;
2257
2258 if (context->depth == MAX_ODP_NESTED) {
2259 retval = -EINVAL;
2260 } else {
2261 retval = parse_odp_action__(context, s, actions);
2262 }
2263
2264 context->depth--;
2265
2266 return retval;
2267 }
2268
2269
2270 static int
2271 parse_odp_action__(struct parse_odp_context *context, const char *s,
2272 struct ofpbuf *actions)
2273 {
2274 {
2275 uint32_t port;
2276 int n;
2277
2278 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
2279 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
2280 return n;
2281 }
2282 }
2283
2284 {
2285 uint32_t max_len;
2286 int n;
2287
2288 if (ovs_scan(s, "trunc(%"SCNi32")%n", &max_len, &n)) {
2289 struct ovs_action_trunc *trunc;
2290
2291 trunc = nl_msg_put_unspec_uninit(actions,
2292 OVS_ACTION_ATTR_TRUNC, sizeof *trunc);
2293 trunc->max_len = max_len;
2294 return n;
2295 }
2296 }
2297
2298 if (context->port_names) {
2299 int len = strcspn(s, delimiters);
2300 struct simap_node *node;
2301
2302 node = simap_find_len(context->port_names, s, len);
2303 if (node) {
2304 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
2305 return len;
2306 }
2307 }
2308
2309 {
2310 uint32_t recirc_id;
2311 int n = -1;
2312
2313 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
2314 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
2315 return n;
2316 }
2317 }
2318
2319 if (!strncmp(s, "userspace(", 10)) {
2320 return parse_odp_userspace_action(s, actions);
2321 }
2322
2323 if (!strncmp(s, "set(", 4)) {
2324 size_t start_ofs;
2325 int retval;
2326 struct nlattr mask[1024 / sizeof(struct nlattr)];
2327 struct ofpbuf maskbuf = OFPBUF_STUB_INITIALIZER(mask);
2328 struct nlattr *nested, *key;
2329 size_t size;
2330
2331 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
2332 retval = parse_odp_key_mask_attr(context, s + 4, actions, &maskbuf);
2333 if (retval < 0) {
2334 ofpbuf_uninit(&maskbuf);
2335 return retval;
2336 }
2337 if (s[retval + 4] != ')') {
2338 ofpbuf_uninit(&maskbuf);
2339 return -EINVAL;
2340 }
2341
2342 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2343 key = nested + 1;
2344
2345 size = nl_attr_get_size(mask);
2346 if (size == nl_attr_get_size(key)) {
2347 /* Change to masked set action if not fully masked. */
2348 if (!is_all_ones(mask + 1, size)) {
2349 /* Remove padding of eariler key payload */
2350 actions->size -= NLA_ALIGN(key->nla_len) - key->nla_len;
2351
2352 /* Put mask payload right after key payload */
2353 key->nla_len += size;
2354 ofpbuf_put(actions, mask + 1, size);
2355
2356 /* 'actions' may have been reallocated by ofpbuf_put(). */
2357 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2358 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
2359
2360 key = nested + 1;
2361 /* Add new padding as needed */
2362 ofpbuf_put_zeros(actions, NLA_ALIGN(key->nla_len) -
2363 key->nla_len);
2364 }
2365 }
2366 ofpbuf_uninit(&maskbuf);
2367
2368 nl_msg_end_nested(actions, start_ofs);
2369 return retval + 5;
2370 }
2371
2372 {
2373 struct ovs_action_push_vlan push;
2374 int tpid = ETH_TYPE_VLAN;
2375 int vid, pcp;
2376 int cfi = 1;
2377 int n = -1;
2378
2379 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
2380 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
2381 &vid, &pcp, &cfi, &n)
2382 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
2383 &tpid, &vid, &pcp, &n)
2384 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
2385 &tpid, &vid, &pcp, &cfi, &n)) {
2386 if ((vid & ~(VLAN_VID_MASK >> VLAN_VID_SHIFT)) != 0
2387 || (pcp & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) != 0) {
2388 return -EINVAL;
2389 }
2390 push.vlan_tpid = htons(tpid);
2391 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
2392 | (pcp << VLAN_PCP_SHIFT)
2393 | (cfi ? VLAN_CFI : 0));
2394 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
2395 &push, sizeof push);
2396
2397 return n;
2398 }
2399 }
2400
2401 if (!strncmp(s, "pop_vlan", 8)) {
2402 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
2403 return 8;
2404 }
2405
2406 {
2407 unsigned long long int meter_id;
2408 int n = -1;
2409
2410 if (sscanf(s, "meter(%lli)%n", &meter_id, &n) > 0 && n > 0) {
2411 nl_msg_put_u32(actions, OVS_ACTION_ATTR_METER, meter_id);
2412 return n;
2413 }
2414 }
2415
2416 {
2417 double percentage;
2418 int n = -1;
2419
2420 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
2421 && percentage >= 0. && percentage <= 100.0) {
2422 size_t sample_ofs, actions_ofs;
2423 double probability;
2424
2425 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
2426 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
2427 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
2428 (probability <= 0 ? 0
2429 : probability >= UINT32_MAX ? UINT32_MAX
2430 : probability));
2431
2432 actions_ofs = nl_msg_start_nested(actions,
2433 OVS_SAMPLE_ATTR_ACTIONS);
2434 int retval = parse_action_list(context, s + n, actions);
2435 if (retval < 0) {
2436 return retval;
2437 }
2438
2439
2440 n += retval;
2441 nl_msg_end_nested(actions, actions_ofs);
2442 nl_msg_end_nested(actions, sample_ofs);
2443
2444 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2445 }
2446 }
2447
2448 {
2449 if (!strncmp(s, "clone(", 6)) {
2450 size_t actions_ofs;
2451 int n = 6;
2452
2453 actions_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CLONE);
2454 int retval = parse_action_list(context, s + n, actions);
2455 if (retval < 0) {
2456 return retval;
2457 }
2458 n += retval;
2459 nl_msg_end_nested(actions, actions_ofs);
2460 return n + 1;
2461 }
2462 }
2463
2464 {
2465 if (!strncmp(s, "push_nsh(", 9)) {
2466 int retval = parse_odp_push_nsh_action(s, actions);
2467 if (retval < 0) {
2468 return retval;
2469 }
2470 return retval + 1;
2471 }
2472 }
2473
2474 {
2475 int n;
2476 if (ovs_scan(s, "pop_nsh()%n", &n)) {
2477 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_NSH);
2478 return n;
2479 }
2480 }
2481
2482 {
2483 uint32_t port;
2484 int n;
2485
2486 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
2487 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
2488 return n;
2489 }
2490 }
2491
2492 {
2493 if (!strncmp(s, "ct_clear", 8)) {
2494 nl_msg_put_flag(actions, OVS_ACTION_ATTR_CT_CLEAR);
2495 return 8;
2496 }
2497 }
2498
2499 {
2500 uint16_t pkt_len;
2501 int n = -1;
2502 if (ovs_scan(s, "check_pkt_len(size=%"SCNi16",gt(%n", &pkt_len, &n)) {
2503 size_t cpl_ofs, actions_ofs;
2504 cpl_ofs = nl_msg_start_nested(actions,
2505 OVS_ACTION_ATTR_CHECK_PKT_LEN);
2506 nl_msg_put_u16(actions, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, pkt_len);
2507 actions_ofs = nl_msg_start_nested(
2508 actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
2509
2510 int retval;
2511 if (!strncasecmp(s + n, "drop", 4)) {
2512 n += 4;
2513 } else {
2514 retval = parse_action_list(context, s + n, actions);
2515 if (retval < 0) {
2516 return retval;
2517 }
2518
2519 n += retval;
2520 }
2521 nl_msg_end_nested(actions, actions_ofs);
2522 retval = -1;
2523 if (!ovs_scan(s + n, "),le(%n", &retval)) {
2524 return -EINVAL;
2525 }
2526 n += retval;
2527
2528 actions_ofs = nl_msg_start_nested(
2529 actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
2530 if (!strncasecmp(s + n, "drop", 4)) {
2531 n += 4;
2532 } else {
2533 retval = parse_action_list(context, s + n, actions);
2534 if (retval < 0) {
2535 return retval;
2536 }
2537 n += retval;
2538 }
2539 nl_msg_end_nested(actions, actions_ofs);
2540 nl_msg_end_nested(actions, cpl_ofs);
2541 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2542 }
2543 }
2544
2545 {
2546 int retval;
2547
2548 retval = parse_conntrack_action(s, actions);
2549 if (retval) {
2550 return retval;
2551 }
2552 }
2553
2554 {
2555 struct ovs_action_push_tnl data;
2556 int n;
2557
2558 n = ovs_parse_tnl_push(s, &data);
2559 if (n > 0) {
2560 odp_put_tnl_push_action(actions, &data);
2561 return n;
2562 } else if (n < 0) {
2563 return n;
2564 }
2565 }
2566
2567 return -EINVAL;
2568 }
2569
2570 /* Parses the string representation of datapath actions, in the format output
2571 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
2572 * value. On success, the ODP actions are appended to 'actions' as a series of
2573 * Netlink attributes. On failure, no data is appended to 'actions'. Either
2574 * way, 'actions''s data might be reallocated. */
2575 int
2576 odp_actions_from_string(const char *s, const struct simap *port_names,
2577 struct ofpbuf *actions)
2578 {
2579 size_t old_size;
2580
2581 if (!strcasecmp(s, "drop")) {
2582 nl_msg_put_u32(actions, OVS_ACTION_ATTR_DROP, XLATE_OK);
2583 return 0;
2584 }
2585
2586 struct parse_odp_context context = (struct parse_odp_context) {
2587 .port_names = port_names,
2588 };
2589
2590 old_size = actions->size;
2591 for (;;) {
2592 int retval;
2593
2594 s += strspn(s, delimiters);
2595 if (!*s) {
2596 return 0;
2597 }
2598
2599 retval = parse_odp_action(&context, s, actions);
2600
2601 if (retval < 0 || !strchr(delimiters, s[retval])) {
2602 actions->size = old_size;
2603 return -retval;
2604 }
2605 s += retval;
2606 }
2607
2608 return 0;
2609 }
2610 \f
2611 static const struct attr_len_tbl ovs_vxlan_ext_attr_lens[OVS_VXLAN_EXT_MAX + 1] = {
2612 [OVS_VXLAN_EXT_GBP] = { .len = 4 },
2613 };
2614
2615 static const struct attr_len_tbl ovs_tun_key_attr_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
2616 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = 8 },
2617 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = 4 },
2618 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = 4 },
2619 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
2620 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
2621 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
2622 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
2623 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = 2 },
2624 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = 2 },
2625 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
2626 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = ATTR_LEN_VARIABLE },
2627 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
2628 .next = ovs_vxlan_ext_attr_lens ,
2629 .next_max = OVS_VXLAN_EXT_MAX},
2630 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = 16 },
2631 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = 16 },
2632 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = ATTR_LEN_VARIABLE },
2633 };
2634
2635 const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
2636 [OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
2637 [OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
2638 [OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
2639 [OVS_KEY_ATTR_DP_HASH] = { .len = 4 },
2640 [OVS_KEY_ATTR_RECIRC_ID] = { .len = 4 },
2641 [OVS_KEY_ATTR_TUNNEL] = { .len = ATTR_LEN_NESTED,
2642 .next = ovs_tun_key_attr_lens,
2643 .next_max = OVS_TUNNEL_KEY_ATTR_MAX },
2644 [OVS_KEY_ATTR_IN_PORT] = { .len = 4 },
2645 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
2646 [OVS_KEY_ATTR_VLAN] = { .len = 2 },
2647 [OVS_KEY_ATTR_ETHERTYPE] = { .len = 2 },
2648 [OVS_KEY_ATTR_MPLS] = { .len = ATTR_LEN_VARIABLE },
2649 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
2650 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
2651 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
2652 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = 2 },
2653 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
2654 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
2655 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
2656 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
2657 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
2658 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
2659 [OVS_KEY_ATTR_ND_EXTENSIONS] = { .len = sizeof(struct ovs_key_nd_extensions) },
2660 [OVS_KEY_ATTR_CT_STATE] = { .len = 4 },
2661 [OVS_KEY_ATTR_CT_ZONE] = { .len = 2 },
2662 [OVS_KEY_ATTR_CT_MARK] = { .len = 4 },
2663 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
2664 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
2665 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
2666 [OVS_KEY_ATTR_PACKET_TYPE] = { .len = 4 },
2667 [OVS_KEY_ATTR_NSH] = { .len = ATTR_LEN_NESTED,
2668 .next = ovs_nsh_key_attr_lens,
2669 .next_max = OVS_NSH_KEY_ATTR_MAX },
2670 };
2671
2672 /* Returns the correct length of the payload for a flow key attribute of the
2673 * specified 'type', ATTR_LEN_INVALID if 'type' is unknown, ATTR_LEN_VARIABLE
2674 * if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
2675 * payload is a nested type. */
2676 static int
2677 odp_key_attr_len(const struct attr_len_tbl tbl[], int max_type, uint16_t type)
2678 {
2679 if (type > max_type) {
2680 return ATTR_LEN_INVALID;
2681 }
2682
2683 return tbl[type].len;
2684 }
2685
2686 static void
2687 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
2688 {
2689 size_t len = nl_attr_get_size(a);
2690 if (len) {
2691 const uint8_t *unspec;
2692 unsigned int i;
2693
2694 unspec = nl_attr_get(a);
2695 for (i = 0; i < len; i++) {
2696 if (i) {
2697 ds_put_char(ds, ' ');
2698 }
2699 ds_put_format(ds, "%02x", unspec[i]);
2700 }
2701 }
2702 }
2703
2704 static const char *
2705 ovs_frag_type_to_string(enum ovs_frag_type type)
2706 {
2707 switch (type) {
2708 case OVS_FRAG_TYPE_NONE:
2709 return "no";
2710 case OVS_FRAG_TYPE_FIRST:
2711 return "first";
2712 case OVS_FRAG_TYPE_LATER:
2713 return "later";
2714 case __OVS_FRAG_TYPE_MAX:
2715 default:
2716 return "<error>";
2717 }
2718 }
2719
2720 enum odp_key_fitness
2721 odp_nsh_hdr_from_attr(const struct nlattr *attr,
2722 struct nsh_hdr *nsh_hdr, size_t size)
2723 {
2724 unsigned int left;
2725 const struct nlattr *a;
2726 bool unknown = false;
2727 uint8_t flags = 0;
2728 uint8_t ttl = 63;
2729 size_t mdlen = 0;
2730 bool has_md1 = false;
2731 bool has_md2 = false;
2732
2733 memset(nsh_hdr, 0, size);
2734
2735 NL_NESTED_FOR_EACH (a, left, attr) {
2736 uint16_t type = nl_attr_type(a);
2737 size_t len = nl_attr_get_size(a);
2738 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2739 OVS_NSH_KEY_ATTR_MAX, type);
2740
2741 if (len != expected_len && expected_len >= 0) {
2742 return ODP_FIT_ERROR;
2743 }
2744
2745 switch (type) {
2746 case OVS_NSH_KEY_ATTR_BASE: {
2747 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2748 nsh_hdr->next_proto = base->np;
2749 nsh_hdr->md_type = base->mdtype;
2750 put_16aligned_be32(&nsh_hdr->path_hdr, base->path_hdr);
2751 flags = base->flags;
2752 ttl = base->ttl;
2753 break;
2754 }
2755 case OVS_NSH_KEY_ATTR_MD1: {
2756 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2757 struct nsh_md1_ctx *md1_dst = &nsh_hdr->md1;
2758 has_md1 = true;
2759 mdlen = nl_attr_get_size(a);
2760 if ((mdlen + NSH_BASE_HDR_LEN != NSH_M_TYPE1_LEN) ||
2761 (mdlen + NSH_BASE_HDR_LEN > size)) {
2762 return ODP_FIT_ERROR;
2763 }
2764 memcpy(md1_dst, md1, mdlen);
2765 break;
2766 }
2767 case OVS_NSH_KEY_ATTR_MD2: {
2768 struct nsh_md2_tlv *md2_dst = &nsh_hdr->md2;
2769 const uint8_t *md2 = nl_attr_get(a);
2770 has_md2 = true;
2771 mdlen = nl_attr_get_size(a);
2772 if (mdlen + NSH_BASE_HDR_LEN > size) {
2773 return ODP_FIT_ERROR;
2774 }
2775 memcpy(md2_dst, md2, mdlen);
2776 break;
2777 }
2778 default:
2779 /* Allow this to show up as unexpected, if there are unknown
2780 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2781 unknown = true;
2782 break;
2783 }
2784 }
2785
2786 if (unknown) {
2787 return ODP_FIT_TOO_MUCH;
2788 }
2789
2790 if ((has_md1 && nsh_hdr->md_type != NSH_M_TYPE1)
2791 || (has_md2 && nsh_hdr->md_type != NSH_M_TYPE2)) {
2792 return ODP_FIT_ERROR;
2793 }
2794
2795 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
2796 nsh_set_flags_ttl_len(nsh_hdr, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
2797
2798 return ODP_FIT_PERFECT;
2799 }
2800
2801 /* Reports the error 'msg', which is formatted as with printf().
2802 *
2803 * If 'errorp' is nonnull, then some the wants the error report to come
2804 * directly back to it, so the function stores the error message into '*errorp'
2805 * (after first freeing it in case there's something there already).
2806 *
2807 * Otherwise, logs the message at WARN level, rate-limited. */
2808 static void OVS_PRINTF_FORMAT(3, 4)
2809 odp_parse_error(struct vlog_rate_limit *rl, char **errorp,
2810 const char *msg, ...)
2811 {
2812 if (OVS_UNLIKELY(errorp)) {
2813 free(*errorp);
2814
2815 va_list args;
2816 va_start(args, msg);
2817 *errorp = xvasprintf(msg, args);
2818 va_end(args);
2819 } else if (!VLOG_DROP_WARN(rl)) {
2820 va_list args;
2821 va_start(args, msg);
2822 char *error = xvasprintf(msg, args);
2823 va_end(args);
2824
2825 VLOG_WARN("%s", error);
2826
2827 free(error);
2828 }
2829 }
2830
2831 /* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
2832 * returns fitness. If the attribute is a key, 'is_mask' should be false;
2833 * if it is a mask, 'is_mask' should be true. If 'errorp' is nonnull and the
2834 * function returns ODP_FIT_ERROR, stores a malloc()'d error message in
2835 * '*errorp'. */
2836 static enum odp_key_fitness
2837 odp_nsh_key_from_attr__(const struct nlattr *attr, bool is_mask,
2838 struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask,
2839 char **errorp)
2840 {
2841 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2842 if (errorp) {
2843 *errorp = NULL;
2844 }
2845
2846 unsigned int left;
2847 const struct nlattr *a;
2848 bool unknown = false;
2849 bool has_md1 = false;
2850
2851 NL_NESTED_FOR_EACH (a, left, attr) {
2852 uint16_t type = nl_attr_type(a);
2853 size_t len = nl_attr_get_size(a);
2854 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2855 OVS_NSH_KEY_ATTR_MAX, type);
2856 if (expected_len) {
2857 if (nsh_mask) {
2858 expected_len *= 2;
2859 }
2860 if (len != expected_len) {
2861 odp_parse_error(&rl, errorp, "NSH %s attribute %"PRIu16" "
2862 "should have length %d but actually has "
2863 "%"PRIuSIZE,
2864 nsh_mask ? "mask" : "key",
2865 type, expected_len, len);
2866 return ODP_FIT_ERROR;
2867 }
2868 }
2869
2870 switch (type) {
2871 case OVS_NSH_KEY_ATTR_UNSPEC:
2872 break;
2873 case OVS_NSH_KEY_ATTR_BASE: {
2874 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2875 nsh->flags = base->flags;
2876 nsh->ttl = base->ttl;
2877 nsh->mdtype = base->mdtype;
2878 nsh->np = base->np;
2879 nsh->path_hdr = base->path_hdr;
2880 if (nsh_mask && (len == 2 * sizeof(*base))) {
2881 const struct ovs_nsh_key_base *base_mask = base + 1;
2882 nsh_mask->flags = base_mask->flags;
2883 nsh_mask->ttl = base_mask->ttl;
2884 nsh_mask->mdtype = base_mask->mdtype;
2885 nsh_mask->np = base_mask->np;
2886 nsh_mask->path_hdr = base_mask->path_hdr;
2887 }
2888 break;
2889 }
2890 case OVS_NSH_KEY_ATTR_MD1: {
2891 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2892 has_md1 = true;
2893 memcpy(nsh->context, md1->context, sizeof md1->context);
2894 if (len == 2 * sizeof(*md1)) {
2895 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
2896 memcpy(nsh_mask->context, md1_mask->context,
2897 sizeof(*md1_mask));
2898 }
2899 break;
2900 }
2901 case OVS_NSH_KEY_ATTR_MD2:
2902 default:
2903 /* Allow this to show up as unexpected, if there are unknown
2904 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2905 unknown = true;
2906 break;
2907 }
2908 }
2909
2910 if (unknown) {
2911 return ODP_FIT_TOO_MUCH;
2912 }
2913
2914 if (!is_mask && has_md1 && nsh->mdtype != NSH_M_TYPE1 && !nsh_mask) {
2915 odp_parse_error(&rl, errorp, "OVS_NSH_KEY_ATTR_MD1 present but "
2916 "declared mdtype %"PRIu8" is not %d (NSH_M_TYPE1)",
2917 nsh->mdtype, NSH_M_TYPE1);
2918 return ODP_FIT_ERROR;
2919 }
2920
2921 return ODP_FIT_PERFECT;
2922 }
2923
2924 /* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
2925 * returns fitness. The attribute should be a key (not a mask). If 'errorp'
2926 * is nonnull and the function returns ODP_FIT_ERROR, stores a malloc()'d error
2927 * message in '*errorp'. */
2928 enum odp_key_fitness
2929 odp_nsh_key_from_attr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
2930 struct ovs_key_nsh *nsh_mask, char **errorp)
2931 {
2932 return odp_nsh_key_from_attr__(attr, false, nsh, nsh_mask, errorp);
2933 }
2934
2935 /* Parses OVS_KEY_ATTR_TUNNEL attribute 'attr' into 'tun' and returns fitness.
2936 * If the attribute is a key, 'is_mask' should be false; if it is a mask,
2937 * 'is_mask' should be true. If 'errorp' is nonnull and the function returns
2938 * ODP_FIT_ERROR, stores a malloc()'d error message in '*errorp'. */
2939 static enum odp_key_fitness
2940 odp_tun_key_from_attr__(const struct nlattr *attr, bool is_mask,
2941 struct flow_tnl *tun, char **errorp)
2942 {
2943 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2944 unsigned int left;
2945 const struct nlattr *a;
2946 bool ttl = false;
2947 bool unknown = false;
2948
2949 NL_NESTED_FOR_EACH(a, left, attr) {
2950 uint16_t type = nl_attr_type(a);
2951 size_t len = nl_attr_get_size(a);
2952 int expected_len = odp_key_attr_len(ovs_tun_key_attr_lens,
2953 OVS_TUNNEL_ATTR_MAX, type);
2954
2955 if (len != expected_len && expected_len >= 0) {
2956 odp_parse_error(&rl, errorp, "tunnel key attribute %"PRIu16" "
2957 "should have length %d but actually has %"PRIuSIZE,
2958 type, expected_len, len);
2959 return ODP_FIT_ERROR;
2960 }
2961
2962 switch (type) {
2963 case OVS_TUNNEL_KEY_ATTR_ID:
2964 tun->tun_id = nl_attr_get_be64(a);
2965 tun->flags |= FLOW_TNL_F_KEY;
2966 break;
2967 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
2968 tun->ip_src = nl_attr_get_be32(a);
2969 break;
2970 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
2971 tun->ip_dst = nl_attr_get_be32(a);
2972 break;
2973 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
2974 tun->ipv6_src = nl_attr_get_in6_addr(a);
2975 break;
2976 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
2977 tun->ipv6_dst = nl_attr_get_in6_addr(a);
2978 break;
2979 case OVS_TUNNEL_KEY_ATTR_TOS:
2980 tun->ip_tos = nl_attr_get_u8(a);
2981 break;
2982 case OVS_TUNNEL_KEY_ATTR_TTL:
2983 tun->ip_ttl = nl_attr_get_u8(a);
2984 ttl = true;
2985 break;
2986 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
2987 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
2988 break;
2989 case OVS_TUNNEL_KEY_ATTR_CSUM:
2990 tun->flags |= FLOW_TNL_F_CSUM;
2991 break;
2992 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
2993 tun->tp_src = nl_attr_get_be16(a);
2994 break;
2995 case OVS_TUNNEL_KEY_ATTR_TP_DST:
2996 tun->tp_dst = nl_attr_get_be16(a);
2997 break;
2998 case OVS_TUNNEL_KEY_ATTR_OAM:
2999 tun->flags |= FLOW_TNL_F_OAM;
3000 break;
3001 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
3002 static const struct nl_policy vxlan_opts_policy[] = {
3003 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
3004 };
3005 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
3006
3007 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
3008 odp_parse_error(&rl, errorp, "error parsing VXLAN options");
3009 return ODP_FIT_ERROR;
3010 }
3011
3012 if (ext[OVS_VXLAN_EXT_GBP]) {
3013 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
3014
3015 tun->gbp_id = htons(gbp & 0xFFFF);
3016 tun->gbp_flags = (gbp >> 16) & 0xFF;
3017 }
3018
3019 break;
3020 }
3021 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3022 tun_metadata_from_geneve_nlattr(a, is_mask, tun);
3023 break;
3024 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: {
3025 const struct erspan_metadata *opts = nl_attr_get(a);
3026
3027 tun->erspan_ver = opts->version;
3028 if (tun->erspan_ver == 1) {
3029 tun->erspan_idx = ntohl(opts->u.index);
3030 } else if (tun->erspan_ver == 2) {
3031 tun->erspan_dir = opts->u.md2.dir;
3032 tun->erspan_hwid = get_hwid(&opts->u.md2);
3033 } else {
3034 VLOG_WARN("%s invalid erspan version\n", __func__);
3035 }
3036 break;
3037 }
3038
3039 default:
3040 /* Allow this to show up as unexpected, if there are unknown
3041 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
3042 unknown = true;
3043 break;
3044 }
3045 }
3046
3047 if (!ttl) {
3048 odp_parse_error(&rl, errorp, "tunnel options missing TTL");
3049 return ODP_FIT_ERROR;
3050 }
3051 if (unknown) {
3052 return ODP_FIT_TOO_MUCH;
3053 }
3054 return ODP_FIT_PERFECT;
3055 }
3056
3057 /* Parses OVS_KEY_ATTR_TUNNEL key attribute 'attr' into 'tun' and returns
3058 * fitness. The attribute should be a key (not a mask). If 'errorp' is
3059 * nonnull, stores NULL into '*errorp' on success, otherwise a malloc()'d error
3060 * message. */
3061 enum odp_key_fitness
3062 odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun,
3063 char **errorp)
3064 {
3065 if (errorp) {
3066 *errorp = NULL;
3067 }
3068 memset(tun, 0, sizeof *tun);
3069 return odp_tun_key_from_attr__(attr, false, tun, errorp);
3070 }
3071
3072 static void
3073 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key,
3074 const struct flow_tnl *tun_flow_key,
3075 const struct ofpbuf *key_buf, const char *tnl_type)
3076 {
3077 size_t tun_key_ofs;
3078
3079 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
3080
3081 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
3082 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
3083 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
3084 }
3085 if (tun_key->ip_src) {
3086 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
3087 }
3088 if (tun_key->ip_dst) {
3089 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
3090 }
3091 if (ipv6_addr_is_set(&tun_key->ipv6_src)) {
3092 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, &tun_key->ipv6_src);
3093 }
3094 if (ipv6_addr_is_set(&tun_key->ipv6_dst)) {
3095 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_DST, &tun_key->ipv6_dst);
3096 }
3097 if (tun_key->ip_tos) {
3098 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
3099 }
3100 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
3101 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
3102 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
3103 }
3104 if (tun_key->flags & FLOW_TNL_F_CSUM) {
3105 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
3106 }
3107 if (tun_key->tp_src) {
3108 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
3109 }
3110 if (tun_key->tp_dst) {
3111 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
3112 }
3113 if (tun_key->flags & FLOW_TNL_F_OAM) {
3114 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
3115 }
3116
3117 /* If tnl_type is set to a particular type of output tunnel,
3118 * only put its relevant tunnel metadata to the nlattr.
3119 * If tnl_type is NULL, put tunnel metadata according to the
3120 * 'tun_key'.
3121 */
3122 if ((!tnl_type || !strcmp(tnl_type, "vxlan")) &&
3123 (tun_key->gbp_flags || tun_key->gbp_id)) {
3124 size_t vxlan_opts_ofs;
3125
3126 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
3127 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
3128 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
3129 nl_msg_end_nested(a, vxlan_opts_ofs);
3130 }
3131
3132 if (!tnl_type || !strcmp(tnl_type, "geneve")) {
3133 tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
3134 }
3135
3136 if ((!tnl_type || !strcmp(tnl_type, "erspan") ||
3137 !strcmp(tnl_type, "ip6erspan")) &&
3138 (tun_key->erspan_ver == 1 || tun_key->erspan_ver == 2)) {
3139 struct erspan_metadata opts;
3140
3141 opts.version = tun_key->erspan_ver;
3142 if (opts.version == 1) {
3143 opts.u.index = htonl(tun_key->erspan_idx);
3144 } else {
3145 opts.u.md2.dir = tun_key->erspan_dir;
3146 set_hwid(&opts.u.md2, tun_key->erspan_hwid);
3147 }
3148 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
3149 &opts, sizeof(opts));
3150 }
3151
3152 nl_msg_end_nested(a, tun_key_ofs);
3153 }
3154
3155 static bool
3156 odp_mask_is_constant__(enum ovs_key_attr attr, const void *mask, size_t size,
3157 int constant)
3158 {
3159 /* Convert 'constant' to all the widths we need. C conversion rules ensure
3160 * that -1 becomes all-1-bits and 0 does not change. */
3161 ovs_be16 be16 = (OVS_FORCE ovs_be16) constant;
3162 uint32_t u32 = constant;
3163 uint8_t u8 = constant;
3164 const struct in6_addr *in6 = constant ? &in6addr_exact : &in6addr_any;
3165
3166 switch (attr) {
3167 case OVS_KEY_ATTR_UNSPEC:
3168 case OVS_KEY_ATTR_ENCAP:
3169 case __OVS_KEY_ATTR_MAX:
3170 default:
3171 return false;
3172
3173 case OVS_KEY_ATTR_PRIORITY:
3174 case OVS_KEY_ATTR_IN_PORT:
3175 case OVS_KEY_ATTR_ETHERNET:
3176 case OVS_KEY_ATTR_VLAN:
3177 case OVS_KEY_ATTR_ETHERTYPE:
3178 case OVS_KEY_ATTR_IPV4:
3179 case OVS_KEY_ATTR_TCP:
3180 case OVS_KEY_ATTR_UDP:
3181 case OVS_KEY_ATTR_ICMP:
3182 case OVS_KEY_ATTR_ICMPV6:
3183 case OVS_KEY_ATTR_ND:
3184 case OVS_KEY_ATTR_ND_EXTENSIONS:
3185 case OVS_KEY_ATTR_SKB_MARK:
3186 case OVS_KEY_ATTR_TUNNEL:
3187 case OVS_KEY_ATTR_SCTP:
3188 case OVS_KEY_ATTR_DP_HASH:
3189 case OVS_KEY_ATTR_RECIRC_ID:
3190 case OVS_KEY_ATTR_MPLS:
3191 case OVS_KEY_ATTR_CT_STATE:
3192 case OVS_KEY_ATTR_CT_ZONE:
3193 case OVS_KEY_ATTR_CT_MARK:
3194 case OVS_KEY_ATTR_CT_LABELS:
3195 case OVS_KEY_ATTR_PACKET_TYPE:
3196 case OVS_KEY_ATTR_NSH:
3197 return is_all_byte(mask, size, u8);
3198
3199 case OVS_KEY_ATTR_TCP_FLAGS:
3200 return TCP_FLAGS(*(ovs_be16 *) mask) == TCP_FLAGS(be16);
3201
3202 case OVS_KEY_ATTR_IPV6: {
3203 const struct ovs_key_ipv6 *ipv6_mask = mask;
3204 return ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
3205 == htonl(IPV6_LABEL_MASK & u32)
3206 && ipv6_mask->ipv6_proto == u8
3207 && ipv6_mask->ipv6_tclass == u8
3208 && ipv6_mask->ipv6_hlimit == u8
3209 && ipv6_mask->ipv6_frag == u8
3210 && ipv6_addr_equals(&ipv6_mask->ipv6_src, in6)
3211 && ipv6_addr_equals(&ipv6_mask->ipv6_dst, in6));
3212 }
3213
3214 case OVS_KEY_ATTR_ARP:
3215 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_arp, arp_tha), u8);
3216
3217 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
3218 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv4,
3219 ipv4_proto), u8);
3220
3221 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
3222 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv6,
3223 ipv6_proto), u8);
3224 }
3225 }
3226
3227 /* The caller must already have verified that 'ma' has a correct length.
3228 *
3229 * The main purpose of this function is formatting, to allow code to figure out
3230 * whether the mask can be omitted. It doesn't try hard for attributes that
3231 * contain sub-attributes, etc., because normally those would be broken down
3232 * further for formatting. */
3233 static bool
3234 odp_mask_attr_is_wildcard(const struct nlattr *ma)
3235 {
3236 return odp_mask_is_constant__(nl_attr_type(ma),
3237 nl_attr_get(ma), nl_attr_get_size(ma), 0);
3238 }
3239
3240 /* The caller must already have verified that 'size' is a correct length for
3241 * 'attr'.
3242 *
3243 * The main purpose of this function is formatting, to allow code to figure out
3244 * whether the mask can be omitted. It doesn't try hard for attributes that
3245 * contain sub-attributes, etc., because normally those would be broken down
3246 * further for formatting. */
3247 static bool
3248 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
3249 {
3250 return odp_mask_is_constant__(attr, mask, size, -1);
3251 }
3252
3253 /* The caller must already have verified that 'ma' has a correct length. */
3254 static bool
3255 odp_mask_attr_is_exact(const struct nlattr *ma)
3256 {
3257 enum ovs_key_attr attr = nl_attr_type(ma);
3258 return odp_mask_is_exact(attr, nl_attr_get(ma), nl_attr_get_size(ma));
3259 }
3260
3261 void
3262 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
3263 char *port_name)
3264 {
3265 struct odp_portno_names *odp_portno_names;
3266
3267 odp_portno_names = xmalloc(sizeof *odp_portno_names);
3268 odp_portno_names->port_no = port_no;
3269 odp_portno_names->name = xstrdup(port_name);
3270 hmap_insert(portno_names, &odp_portno_names->hmap_node,
3271 hash_odp_port(port_no));
3272 }
3273
3274 static char *
3275 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
3276 {
3277 if (portno_names) {
3278 struct odp_portno_names *odp_portno_names;
3279
3280 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
3281 hash_odp_port(port_no), portno_names) {
3282 if (odp_portno_names->port_no == port_no) {
3283 return odp_portno_names->name;
3284 }
3285 }
3286 }
3287 return NULL;
3288 }
3289
3290 void
3291 odp_portno_names_destroy(struct hmap *portno_names)
3292 {
3293 struct odp_portno_names *odp_portno_names;
3294
3295 HMAP_FOR_EACH_POP (odp_portno_names, hmap_node, portno_names) {
3296 free(odp_portno_names->name);
3297 free(odp_portno_names);
3298 }
3299 }
3300
3301 void
3302 odp_portno_name_format(const struct hmap *portno_names, odp_port_t port_no,
3303 struct ds *s)
3304 {
3305 const char *name = odp_portno_names_get(portno_names, port_no);
3306 if (name) {
3307 ds_put_cstr(s, name);
3308 } else {
3309 ds_put_format(s, "%"PRIu32, port_no);
3310 }
3311 }
3312
3313 /* Format helpers. */
3314
3315 static void
3316 format_eth(struct ds *ds, const char *name, const struct eth_addr key,
3317 const struct eth_addr *mask, bool verbose)
3318 {
3319 bool mask_empty = mask && eth_addr_is_zero(*mask);
3320
3321 if (verbose || !mask_empty) {
3322 bool mask_full = !mask || eth_mask_is_exact(*mask);
3323
3324 if (mask_full) {
3325 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
3326 } else {
3327 ds_put_format(ds, "%s=", name);
3328 eth_format_masked(key, mask, ds);
3329 ds_put_char(ds, ',');
3330 }
3331 }
3332 }
3333
3334
3335 static void
3336 format_be64(struct ds *ds, const char *name, ovs_be64 key,
3337 const ovs_be64 *mask, bool verbose)
3338 {
3339 bool mask_empty = mask && !*mask;
3340
3341 if (verbose || !mask_empty) {
3342 bool mask_full = !mask || *mask == OVS_BE64_MAX;
3343
3344 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
3345 if (!mask_full) { /* Partially masked. */
3346 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
3347 }
3348 ds_put_char(ds, ',');
3349 }
3350 }
3351
3352 static void
3353 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
3354 const ovs_be32 *mask, bool verbose)
3355 {
3356 bool mask_empty = mask && !*mask;
3357
3358 if (verbose || !mask_empty) {
3359 bool mask_full = !mask || *mask == OVS_BE32_MAX;
3360
3361 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
3362 if (!mask_full) { /* Partially masked. */
3363 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
3364 }
3365 ds_put_char(ds, ',');
3366 }
3367 }
3368
3369 static void
3370 format_in6_addr(struct ds *ds, const char *name,
3371 const struct in6_addr *key,
3372 const struct in6_addr *mask,
3373 bool verbose)
3374 {
3375 char buf[INET6_ADDRSTRLEN];
3376 bool mask_empty = mask && ipv6_mask_is_any(mask);
3377
3378 if (verbose || !mask_empty) {
3379 bool mask_full = !mask || ipv6_mask_is_exact(mask);
3380
3381 inet_ntop(AF_INET6, key, buf, sizeof buf);
3382 ds_put_format(ds, "%s=%s", name, buf);
3383 if (!mask_full) { /* Partially masked. */
3384 inet_ntop(AF_INET6, mask, buf, sizeof buf);
3385 ds_put_format(ds, "/%s", buf);
3386 }
3387 ds_put_char(ds, ',');
3388 }
3389 }
3390
3391 static void
3392 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
3393 const ovs_be32 *mask, bool verbose)
3394 {
3395 bool mask_empty = mask && !*mask;
3396
3397 if (verbose || !mask_empty) {
3398 bool mask_full = !mask
3399 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
3400
3401 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
3402 if (!mask_full) { /* Partially masked. */
3403 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
3404 }
3405 ds_put_char(ds, ',');
3406 }
3407 }
3408
3409 static void
3410 format_u8x(struct ds *ds, const char *name, uint8_t key,
3411 const uint8_t *mask, bool verbose)
3412 {
3413 bool mask_empty = mask && !*mask;
3414
3415 if (verbose || !mask_empty) {
3416 bool mask_full = !mask || *mask == UINT8_MAX;
3417
3418 ds_put_format(ds, "%s=%#"PRIx8, name, key);
3419 if (!mask_full) { /* Partially masked. */
3420 ds_put_format(ds, "/%#"PRIx8, *mask);
3421 }
3422 ds_put_char(ds, ',');
3423 }
3424 }
3425
3426 static void
3427 format_u8u(struct ds *ds, const char *name, uint8_t key,
3428 const uint8_t *mask, bool verbose)
3429 {
3430 bool mask_empty = mask && !*mask;
3431
3432 if (verbose || !mask_empty) {
3433 bool mask_full = !mask || *mask == UINT8_MAX;
3434
3435 ds_put_format(ds, "%s=%"PRIu8, name, key);
3436 if (!mask_full) { /* Partially masked. */
3437 ds_put_format(ds, "/%#"PRIx8, *mask);
3438 }
3439 ds_put_char(ds, ',');
3440 }
3441 }
3442
3443 static void
3444 format_be16(struct ds *ds, const char *name, ovs_be16 key,
3445 const ovs_be16 *mask, bool verbose)
3446 {
3447 bool mask_empty = mask && !*mask;
3448
3449 if (verbose || !mask_empty) {
3450 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3451
3452 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
3453 if (!mask_full) { /* Partially masked. */
3454 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3455 }
3456 ds_put_char(ds, ',');
3457 }
3458 }
3459
3460 static void
3461 format_be16x(struct ds *ds, const char *name, ovs_be16 key,
3462 const ovs_be16 *mask, bool verbose)
3463 {
3464 bool mask_empty = mask && !*mask;
3465
3466 if (verbose || !mask_empty) {
3467 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3468
3469 ds_put_format(ds, "%s=%#"PRIx16, name, ntohs(key));
3470 if (!mask_full) { /* Partially masked. */
3471 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3472 }
3473 ds_put_char(ds, ',');
3474 }
3475 }
3476
3477 static void
3478 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
3479 const uint16_t *mask, bool verbose)
3480 {
3481 bool mask_empty = mask && !*mask;
3482
3483 if (verbose || !mask_empty) {
3484 ds_put_cstr(ds, name);
3485 ds_put_char(ds, '(');
3486 if (mask) {
3487 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key,
3488 *mask & FLOW_TNL_F_MASK, FLOW_TNL_F_MASK);
3489 } else { /* Fully masked. */
3490 format_flags(ds, flow_tun_flag_to_string, key, '|');
3491 }
3492 ds_put_cstr(ds, "),");
3493 }
3494 }
3495
3496 static bool
3497 check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
3498 const struct attr_len_tbl tbl[], int max_type, bool need_key)
3499 {
3500 int expected_len;
3501
3502 expected_len = odp_key_attr_len(tbl, max_type, nl_attr_type(a));
3503 if (expected_len != ATTR_LEN_VARIABLE &&
3504 expected_len != ATTR_LEN_NESTED) {
3505
3506 bool bad_key_len = nl_attr_get_size(a) != expected_len;
3507 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
3508
3509 if (bad_key_len || bad_mask_len) {
3510 if (need_key) {
3511 ds_put_format(ds, "key%u", nl_attr_type(a));
3512 }
3513 if (bad_key_len) {
3514 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
3515 nl_attr_get_size(a), expected_len);
3516 }
3517 format_generic_odp_key(a, ds);
3518 if (ma) {
3519 ds_put_char(ds, '/');
3520 if (bad_mask_len) {
3521 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
3522 nl_attr_get_size(ma), expected_len);
3523 }
3524 format_generic_odp_key(ma, ds);
3525 }
3526 ds_put_char(ds, ')');
3527 return false;
3528 }
3529 }
3530
3531 return true;
3532 }
3533
3534 static void
3535 format_unknown_key(struct ds *ds, const struct nlattr *a,
3536 const struct nlattr *ma)
3537 {
3538 ds_put_format(ds, "key%u(", nl_attr_type(a));
3539 format_generic_odp_key(a, ds);
3540 if (ma && !odp_mask_attr_is_exact(ma)) {
3541 ds_put_char(ds, '/');
3542 format_generic_odp_key(ma, ds);
3543 }
3544 ds_put_cstr(ds, "),");
3545 }
3546
3547 static void
3548 format_odp_tun_vxlan_opt(const struct nlattr *attr,
3549 const struct nlattr *mask_attr, struct ds *ds,
3550 bool verbose)
3551 {
3552 unsigned int left;
3553 const struct nlattr *a;
3554 struct ofpbuf ofp;
3555
3556 ofpbuf_init(&ofp, 100);
3557 NL_NESTED_FOR_EACH(a, left, attr) {
3558 uint16_t type = nl_attr_type(a);
3559 const struct nlattr *ma = NULL;
3560
3561 if (mask_attr) {
3562 ma = nl_attr_find__(nl_attr_get(mask_attr),
3563 nl_attr_get_size(mask_attr), type);
3564 if (!ma) {
3565 ma = generate_all_wildcard_mask(ovs_vxlan_ext_attr_lens,
3566 OVS_VXLAN_EXT_MAX,
3567 &ofp, a);
3568 }
3569 }
3570
3571 if (!check_attr_len(ds, a, ma, ovs_vxlan_ext_attr_lens,
3572 OVS_VXLAN_EXT_MAX, true)) {
3573 continue;
3574 }
3575
3576 switch (type) {
3577 case OVS_VXLAN_EXT_GBP: {
3578 uint32_t key = nl_attr_get_u32(a);
3579 ovs_be16 id, id_mask;
3580 uint8_t flags, flags_mask = 0;
3581
3582 id = htons(key & 0xFFFF);
3583 flags = (key >> 16) & 0xFF;
3584 if (ma) {
3585 uint32_t mask = nl_attr_get_u32(ma);
3586 id_mask = htons(mask & 0xFFFF);
3587 flags_mask = (mask >> 16) & 0xFF;
3588 }
3589
3590 ds_put_cstr(ds, "gbp(");
3591 format_be16(ds, "id", id, ma ? &id_mask : NULL, verbose);
3592 format_u8x(ds, "flags", flags, ma ? &flags_mask : NULL, verbose);
3593 ds_chomp(ds, ',');
3594 ds_put_cstr(ds, "),");
3595 break;
3596 }
3597
3598 default:
3599 format_unknown_key(ds, a, ma);
3600 }
3601 ofpbuf_clear(&ofp);
3602 }
3603
3604 ds_chomp(ds, ',');
3605 ofpbuf_uninit(&ofp);
3606 }
3607
3608 static void
3609 format_odp_tun_erspan_opt(const struct nlattr *attr,
3610 const struct nlattr *mask_attr, struct ds *ds,
3611 bool verbose)
3612 {
3613 const struct erspan_metadata *opts, *mask;
3614 uint8_t ver, ver_ma, dir, dir_ma, hwid, hwid_ma;
3615
3616 opts = nl_attr_get(attr);
3617 mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
3618
3619 ver = (uint8_t)opts->version;
3620 if (mask) {
3621 ver_ma = (uint8_t)mask->version;
3622 }
3623
3624 format_u8u(ds, "ver", ver, mask ? &ver_ma : NULL, verbose);
3625
3626 if (opts->version == 1) {
3627 if (mask) {
3628 ds_put_format(ds, "idx=%#"PRIx32"/%#"PRIx32",",
3629 ntohl(opts->u.index),
3630 ntohl(mask->u.index));
3631 } else {
3632 ds_put_format(ds, "idx=%#"PRIx32",", ntohl(opts->u.index));
3633 }
3634 } else if (opts->version == 2) {
3635 dir = opts->u.md2.dir;
3636 hwid = opts->u.md2.hwid;
3637 if (mask) {
3638 dir_ma = mask->u.md2.dir;
3639 hwid_ma = mask->u.md2.hwid;
3640 }
3641
3642 format_u8u(ds, "dir", dir, mask ? &dir_ma : NULL, verbose);
3643 format_u8x(ds, "hwid", hwid, mask ? &hwid_ma : NULL, verbose);
3644 }
3645 ds_chomp(ds, ',');
3646 }
3647
3648 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
3649
3650 static void
3651 format_geneve_opts(const struct geneve_opt *opt,
3652 const struct geneve_opt *mask, int opts_len,
3653 struct ds *ds, bool verbose)
3654 {
3655 while (opts_len > 0) {
3656 unsigned int len;
3657 uint8_t data_len, data_len_mask;
3658
3659 if (opts_len < sizeof *opt) {
3660 ds_put_format(ds, "opt len %u less than minimum %"PRIuSIZE,
3661 opts_len, sizeof *opt);
3662 return;
3663 }
3664
3665 data_len = opt->length * 4;
3666 if (mask) {
3667 if (mask->length == 0x1f) {
3668 data_len_mask = UINT8_MAX;
3669 } else {
3670 data_len_mask = mask->length;
3671 }
3672 }
3673 len = sizeof *opt + data_len;
3674 if (len > opts_len) {
3675 ds_put_format(ds, "opt len %u greater than remaining %u",
3676 len, opts_len);
3677 return;
3678 }
3679
3680 ds_put_char(ds, '{');
3681 format_be16x(ds, "class", opt->opt_class, MASK(mask, opt_class),
3682 verbose);
3683 format_u8x(ds, "type", opt->type, MASK(mask, type), verbose);
3684 format_u8u(ds, "len", data_len, mask ? &data_len_mask : NULL, verbose);
3685 if (data_len &&
3686 (verbose || !mask || !is_all_zeros(mask + 1, data_len))) {
3687 ds_put_hex(ds, opt + 1, data_len);
3688 if (mask && !is_all_ones(mask + 1, data_len)) {
3689 ds_put_char(ds, '/');
3690 ds_put_hex(ds, mask + 1, data_len);
3691 }
3692 } else {
3693 ds_chomp(ds, ',');
3694 }
3695 ds_put_char(ds, '}');
3696
3697 opt += len / sizeof(*opt);
3698 if (mask) {
3699 mask += len / sizeof(*opt);
3700 }
3701 opts_len -= len;
3702 };
3703 }
3704
3705 static void
3706 format_odp_tun_geneve(const struct nlattr *attr,
3707 const struct nlattr *mask_attr, struct ds *ds,
3708 bool verbose)
3709 {
3710 int opts_len = nl_attr_get_size(attr);
3711 const struct geneve_opt *opt = nl_attr_get(attr);
3712 const struct geneve_opt *mask = mask_attr ?
3713 nl_attr_get(mask_attr) : NULL;
3714
3715 if (mask && nl_attr_get_size(attr) != nl_attr_get_size(mask_attr)) {
3716 ds_put_format(ds, "value len %"PRIuSIZE" different from mask len %"PRIuSIZE,
3717 nl_attr_get_size(attr), nl_attr_get_size(mask_attr));
3718 return;
3719 }
3720
3721 format_geneve_opts(opt, mask, opts_len, ds, verbose);
3722 }
3723
3724 static void
3725 format_odp_nsh_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3726 struct ds *ds)
3727 {
3728 unsigned int left;
3729 const struct nlattr *a;
3730 struct ovs_key_nsh nsh;
3731 struct ovs_key_nsh nsh_mask;
3732
3733 memset(&nsh, 0, sizeof nsh);
3734 memset(&nsh_mask, 0xff, sizeof nsh_mask);
3735
3736 NL_NESTED_FOR_EACH (a, left, attr) {
3737 enum ovs_nsh_key_attr type = nl_attr_type(a);
3738 const struct nlattr *ma = NULL;
3739
3740 if (mask_attr) {
3741 ma = nl_attr_find__(nl_attr_get(mask_attr),
3742 nl_attr_get_size(mask_attr), type);
3743 }
3744
3745 if (!check_attr_len(ds, a, ma, ovs_nsh_key_attr_lens,
3746 OVS_NSH_KEY_ATTR_MAX, true)) {
3747 continue;
3748 }
3749
3750 switch (type) {
3751 case OVS_NSH_KEY_ATTR_UNSPEC:
3752 break;
3753 case OVS_NSH_KEY_ATTR_BASE: {
3754 const struct ovs_nsh_key_base *base = nl_attr_get(a);
3755 const struct ovs_nsh_key_base *base_mask
3756 = ma ? nl_attr_get(ma) : NULL;
3757 nsh.flags = base->flags;
3758 nsh.ttl = base->ttl;
3759 nsh.mdtype = base->mdtype;
3760 nsh.np = base->np;
3761 nsh.path_hdr = base->path_hdr;
3762 if (base_mask) {
3763 nsh_mask.flags = base_mask->flags;
3764 nsh_mask.ttl = base_mask->ttl;
3765 nsh_mask.mdtype = base_mask->mdtype;
3766 nsh_mask.np = base_mask->np;
3767 nsh_mask.path_hdr = base_mask->path_hdr;
3768 }
3769 break;
3770 }
3771 case OVS_NSH_KEY_ATTR_MD1: {
3772 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
3773 const struct ovs_nsh_key_md1 *md1_mask
3774 = ma ? nl_attr_get(ma) : NULL;
3775 memcpy(nsh.context, md1->context, sizeof md1->context);
3776 if (md1_mask) {
3777 memcpy(nsh_mask.context, md1_mask->context,
3778 sizeof md1_mask->context);
3779 }
3780 break;
3781 }
3782 case OVS_NSH_KEY_ATTR_MD2:
3783 case __OVS_NSH_KEY_ATTR_MAX:
3784 default:
3785 /* No support for matching other metadata formats yet. */
3786 break;
3787 }
3788 }
3789
3790 if (mask_attr) {
3791 format_nsh_key_mask(ds, &nsh, &nsh_mask);
3792 } else {
3793 format_nsh_key(ds, &nsh);
3794 }
3795 }
3796
3797 static void
3798 format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3799 struct ds *ds, bool verbose)
3800 {
3801 unsigned int left;
3802 const struct nlattr *a;
3803 uint16_t flags = 0;
3804 uint16_t mask_flags = 0;
3805 struct ofpbuf ofp;
3806
3807 ofpbuf_init(&ofp, 100);
3808 NL_NESTED_FOR_EACH(a, left, attr) {
3809 enum ovs_tunnel_key_attr type = nl_attr_type(a);
3810 const struct nlattr *ma = NULL;
3811
3812 if (mask_attr) {
3813 ma = nl_attr_find__(nl_attr_get(mask_attr),
3814 nl_attr_get_size(mask_attr), type);
3815 if (!ma) {
3816 ma = generate_all_wildcard_mask(ovs_tun_key_attr_lens,
3817 OVS_TUNNEL_KEY_ATTR_MAX,
3818 &ofp, a);
3819 }
3820 }
3821
3822 if (!check_attr_len(ds, a, ma, ovs_tun_key_attr_lens,
3823 OVS_TUNNEL_KEY_ATTR_MAX, true)) {
3824 continue;
3825 }
3826
3827 switch (type) {
3828 case OVS_TUNNEL_KEY_ATTR_ID:
3829 format_be64(ds, "tun_id", nl_attr_get_be64(a),
3830 ma ? nl_attr_get(ma) : NULL, verbose);
3831 flags |= FLOW_TNL_F_KEY;
3832 if (ma) {
3833 mask_flags |= FLOW_TNL_F_KEY;
3834 }
3835 break;
3836 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
3837 format_ipv4(ds, "src", nl_attr_get_be32(a),
3838 ma ? nl_attr_get(ma) : NULL, verbose);
3839 break;
3840 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
3841 format_ipv4(ds, "dst", nl_attr_get_be32(a),
3842 ma ? nl_attr_get(ma) : NULL, verbose);
3843 break;
3844 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
3845 struct in6_addr ipv6_src;
3846 ipv6_src = nl_attr_get_in6_addr(a);
3847 format_in6_addr(ds, "ipv6_src", &ipv6_src,
3848 ma ? nl_attr_get(ma) : NULL, verbose);
3849 break;
3850 }
3851 case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
3852 struct in6_addr ipv6_dst;
3853 ipv6_dst = nl_attr_get_in6_addr(a);
3854 format_in6_addr(ds, "ipv6_dst", &ipv6_dst,
3855 ma ? nl_attr_get(ma) : NULL, verbose);
3856 break;
3857 }
3858 case OVS_TUNNEL_KEY_ATTR_TOS:
3859 format_u8x(ds, "tos", nl_attr_get_u8(a),
3860 ma ? nl_attr_get(ma) : NULL, verbose);
3861 break;
3862 case OVS_TUNNEL_KEY_ATTR_TTL:
3863 format_u8u(ds, "ttl", nl_attr_get_u8(a),
3864 ma ? nl_attr_get(ma) : NULL, verbose);
3865 break;
3866 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3867 flags |= FLOW_TNL_F_DONT_FRAGMENT;
3868 break;
3869 case OVS_TUNNEL_KEY_ATTR_CSUM:
3870 flags |= FLOW_TNL_F_CSUM;
3871 break;
3872 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
3873 format_be16(ds, "tp_src", nl_attr_get_be16(a),
3874 ma ? nl_attr_get(ma) : NULL, verbose);
3875 break;
3876 case OVS_TUNNEL_KEY_ATTR_TP_DST:
3877 format_be16(ds, "tp_dst", nl_attr_get_be16(a),
3878 ma ? nl_attr_get(ma) : NULL, verbose);
3879 break;
3880 case OVS_TUNNEL_KEY_ATTR_OAM:
3881 flags |= FLOW_TNL_F_OAM;
3882 break;
3883 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
3884 ds_put_cstr(ds, "vxlan(");
3885 format_odp_tun_vxlan_opt(a, ma, ds, verbose);
3886 ds_put_cstr(ds, "),");
3887 break;
3888 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3889 ds_put_cstr(ds, "geneve(");
3890 format_odp_tun_geneve(a, ma, ds, verbose);
3891 ds_put_cstr(ds, "),");
3892 break;
3893 case OVS_TUNNEL_KEY_ATTR_PAD:
3894 break;
3895 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
3896 ds_put_cstr(ds, "erspan(");
3897 format_odp_tun_erspan_opt(a, ma, ds, verbose);
3898 ds_put_cstr(ds, "),");
3899 break;
3900 case __OVS_TUNNEL_KEY_ATTR_MAX:
3901 default:
3902 format_unknown_key(ds, a, ma);
3903 }
3904 ofpbuf_clear(&ofp);
3905 }
3906
3907 /* Flags can have a valid mask even if the attribute is not set, so
3908 * we need to collect these separately. */
3909 if (mask_attr) {
3910 NL_NESTED_FOR_EACH(a, left, mask_attr) {
3911 switch (nl_attr_type(a)) {
3912 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3913 mask_flags |= FLOW_TNL_F_DONT_FRAGMENT;
3914 break;
3915 case OVS_TUNNEL_KEY_ATTR_CSUM:
3916 mask_flags |= FLOW_TNL_F_CSUM;
3917 break;
3918 case OVS_TUNNEL_KEY_ATTR_OAM:
3919 mask_flags |= FLOW_TNL_F_OAM;
3920 break;
3921 }
3922 }
3923 }
3924
3925 format_tun_flags(ds, "flags", flags, mask_attr ? &mask_flags : NULL,
3926 verbose);
3927 ds_chomp(ds, ',');
3928 ofpbuf_uninit(&ofp);
3929 }
3930
3931 static const char *
3932 odp_ct_state_to_string(uint32_t flag)
3933 {
3934 switch (flag) {
3935 case OVS_CS_F_REPLY_DIR:
3936 return "rpl";
3937 case OVS_CS_F_TRACKED:
3938 return "trk";
3939 case OVS_CS_F_NEW:
3940 return "new";
3941 case OVS_CS_F_ESTABLISHED:
3942 return "est";
3943 case OVS_CS_F_RELATED:
3944 return "rel";
3945 case OVS_CS_F_INVALID:
3946 return "inv";
3947 case OVS_CS_F_SRC_NAT:
3948 return "snat";
3949 case OVS_CS_F_DST_NAT:
3950 return "dnat";
3951 default:
3952 return NULL;
3953 }
3954 }
3955
3956 static void
3957 format_frag(struct ds *ds, const char *name, uint8_t key,
3958 const uint8_t *mask, bool verbose OVS_UNUSED)
3959 {
3960 bool mask_empty = mask && !*mask;
3961 bool mask_full = !mask || *mask == UINT8_MAX;
3962
3963 /* ODP frag is an enumeration field; partial masks are not meaningful. */
3964 if (!mask_empty && !mask_full) {
3965 ds_put_format(ds, "error: partial mask not supported for frag (%#"
3966 PRIx8"),", *mask);
3967 } else if (!mask_empty) {
3968 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
3969 }
3970 }
3971
3972 static bool
3973 mask_empty(const struct nlattr *ma)
3974 {
3975 const void *mask;
3976 size_t n;
3977
3978 if (!ma) {
3979 return true;
3980 }
3981 mask = nl_attr_get(ma);
3982 n = nl_attr_get_size(ma);
3983
3984 return is_all_zeros(mask, n);
3985 }
3986
3987 /* The caller must have already verified that 'a' and 'ma' have correct
3988 * lengths. */
3989 static void
3990 format_odp_key_attr__(const struct nlattr *a, const struct nlattr *ma,
3991 const struct hmap *portno_names, struct ds *ds,
3992 bool verbose)
3993 {
3994 enum ovs_key_attr attr = nl_attr_type(a);
3995 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3996 bool is_exact;
3997
3998 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
3999
4000 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
4001
4002 ds_put_char(ds, '(');
4003 switch (attr) {
4004 case OVS_KEY_ATTR_ENCAP:
4005 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
4006 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
4007 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
4008 verbose);
4009 } else if (nl_attr_get_size(a)) {
4010 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
4011 ds, verbose);
4012 }
4013 break;
4014
4015 case OVS_KEY_ATTR_PRIORITY:
4016 case OVS_KEY_ATTR_SKB_MARK:
4017 case OVS_KEY_ATTR_DP_HASH:
4018 case OVS_KEY_ATTR_RECIRC_ID:
4019 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4020 if (!is_exact) {
4021 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4022 }
4023 break;
4024
4025 case OVS_KEY_ATTR_CT_MARK:
4026 if (verbose || !mask_empty(ma)) {
4027 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4028 if (!is_exact) {
4029 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4030 }
4031 }
4032 break;
4033
4034 case OVS_KEY_ATTR_CT_STATE:
4035 if (verbose) {
4036 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
4037 if (!is_exact) {
4038 ds_put_format(ds, "/%#"PRIx32,
4039 mask_empty(ma) ? 0 : nl_attr_get_u32(ma));
4040 }
4041 } else if (!is_exact) {
4042 format_flags_masked(ds, NULL, odp_ct_state_to_string,
4043 nl_attr_get_u32(a),
4044 mask_empty(ma) ? 0 : nl_attr_get_u32(ma),
4045 UINT32_MAX);
4046 } else {
4047 format_flags(ds, odp_ct_state_to_string, nl_attr_get_u32(a), '|');
4048 }
4049 break;
4050
4051 case OVS_KEY_ATTR_CT_ZONE:
4052 if (verbose || !mask_empty(ma)) {
4053 ds_put_format(ds, "%#"PRIx16, nl_attr_get_u16(a));
4054 if (!is_exact) {
4055 ds_put_format(ds, "/%#"PRIx16, nl_attr_get_u16(ma));
4056 }
4057 }
4058 break;
4059
4060 case OVS_KEY_ATTR_CT_LABELS: {
4061 const ovs_32aligned_u128 *value = nl_attr_get(a);
4062 const ovs_32aligned_u128 *mask = ma ? nl_attr_get(ma) : NULL;
4063
4064 format_u128(ds, value, mask, verbose);
4065 break;
4066 }
4067
4068 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
4069 const struct ovs_key_ct_tuple_ipv4 *key = nl_attr_get(a);
4070 const struct ovs_key_ct_tuple_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
4071
4072 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
4073 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
4074 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
4075 verbose);
4076 format_be16(ds, "tp_src", key->src_port, MASK(mask, src_port),
4077 verbose);
4078 format_be16(ds, "tp_dst", key->dst_port, MASK(mask, dst_port),
4079 verbose);
4080 ds_chomp(ds, ',');
4081 break;
4082 }
4083
4084 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
4085 const struct ovs_key_ct_tuple_ipv6 *key = nl_attr_get(a);
4086 const struct ovs_key_ct_tuple_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
4087
4088 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
4089 verbose);
4090 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
4091 verbose);
4092 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
4093 verbose);
4094 format_be16(ds, "src_port", key->src_port, MASK(mask, src_port),
4095 verbose);
4096 format_be16(ds, "dst_port", key->dst_port, MASK(mask, dst_port),
4097 verbose);
4098 ds_chomp(ds, ',');
4099 break;
4100 }
4101
4102 case OVS_KEY_ATTR_TUNNEL:
4103 format_odp_tun_attr(a, ma, ds, verbose);
4104 break;
4105
4106 case OVS_KEY_ATTR_IN_PORT:
4107 if (is_exact) {
4108 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
4109 } else {
4110 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
4111 if (!is_exact) {
4112 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
4113 }
4114 }
4115 break;
4116
4117 case OVS_KEY_ATTR_PACKET_TYPE: {
4118 ovs_be32 value = nl_attr_get_be32(a);
4119 ovs_be32 mask = ma ? nl_attr_get_be32(ma) : OVS_BE32_MAX;
4120
4121 ovs_be16 ns = htons(pt_ns(value));
4122 ovs_be16 ns_mask = htons(pt_ns(mask));
4123 format_be16(ds, "ns", ns, &ns_mask, verbose);
4124
4125 ovs_be16 ns_type = pt_ns_type_be(value);
4126 ovs_be16 ns_type_mask = pt_ns_type_be(mask);
4127 format_be16x(ds, "id", ns_type, &ns_type_mask, verbose);
4128
4129 ds_chomp(ds, ',');
4130 break;
4131 }
4132
4133 case OVS_KEY_ATTR_ETHERNET: {
4134 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
4135 const struct ovs_key_ethernet *key = nl_attr_get(a);
4136
4137 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
4138 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
4139 ds_chomp(ds, ',');
4140 break;
4141 }
4142 case OVS_KEY_ATTR_VLAN:
4143 format_vlan_tci(ds, nl_attr_get_be16(a),
4144 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
4145 break;
4146
4147 case OVS_KEY_ATTR_MPLS: {
4148 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
4149 const struct ovs_key_mpls *mpls_mask = NULL;
4150 size_t size = nl_attr_get_size(a);
4151
4152 if (!size || size % sizeof *mpls_key) {
4153 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
4154 return;
4155 }
4156 if (!is_exact) {
4157 mpls_mask = nl_attr_get(ma);
4158 if (size != nl_attr_get_size(ma)) {
4159 ds_put_format(ds, "(key length %"PRIuSIZE" != "
4160 "mask length %"PRIuSIZE")",
4161 size, nl_attr_get_size(ma));
4162 return;
4163 }
4164 }
4165 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
4166 break;
4167 }
4168 case OVS_KEY_ATTR_ETHERTYPE:
4169 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
4170 if (!is_exact) {
4171 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
4172 }
4173 break;
4174
4175 case OVS_KEY_ATTR_IPV4: {
4176 const struct ovs_key_ipv4 *key = nl_attr_get(a);
4177 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
4178
4179 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
4180 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
4181 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
4182 verbose);
4183 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
4184 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
4185 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
4186 verbose);
4187 ds_chomp(ds, ',');
4188 break;
4189 }
4190 case OVS_KEY_ATTR_IPV6: {
4191 const struct ovs_key_ipv6 *key = nl_attr_get(a);
4192 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
4193
4194 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
4195 verbose);
4196 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
4197 verbose);
4198 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
4199 verbose);
4200 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
4201 verbose);
4202 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
4203 verbose);
4204 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
4205 verbose);
4206 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
4207 verbose);
4208 ds_chomp(ds, ',');
4209 break;
4210 }
4211 /* These have the same structure and format. */
4212 case OVS_KEY_ATTR_TCP:
4213 case OVS_KEY_ATTR_UDP:
4214 case OVS_KEY_ATTR_SCTP: {
4215 const struct ovs_key_tcp *key = nl_attr_get(a);
4216 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
4217
4218 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
4219 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
4220 ds_chomp(ds, ',');
4221 break;
4222 }
4223 case OVS_KEY_ATTR_TCP_FLAGS:
4224 if (!is_exact) {
4225 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
4226 ntohs(nl_attr_get_be16(a)),
4227 TCP_FLAGS(nl_attr_get_be16(ma)),
4228 TCP_FLAGS(OVS_BE16_MAX));
4229 } else {
4230 format_flags(ds, packet_tcp_flag_to_string,
4231 ntohs(nl_attr_get_be16(a)), '|');
4232 }
4233 break;
4234
4235 case OVS_KEY_ATTR_ICMP: {
4236 const struct ovs_key_icmp *key = nl_attr_get(a);
4237 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
4238
4239 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
4240 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
4241 ds_chomp(ds, ',');
4242 break;
4243 }
4244 case OVS_KEY_ATTR_ICMPV6: {
4245 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
4246 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
4247
4248 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
4249 verbose);
4250 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
4251 verbose);
4252 ds_chomp(ds, ',');
4253 break;
4254 }
4255 case OVS_KEY_ATTR_ARP: {
4256 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
4257 const struct ovs_key_arp *key = nl_attr_get(a);
4258
4259 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
4260 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
4261 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
4262 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
4263 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
4264 ds_chomp(ds, ',');
4265 break;
4266 }
4267 case OVS_KEY_ATTR_ND: {
4268 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
4269 const struct ovs_key_nd *key = nl_attr_get(a);
4270
4271 format_in6_addr(ds, "target", &key->nd_target, MASK(mask, nd_target),
4272 verbose);
4273 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
4274 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
4275
4276 ds_chomp(ds, ',');
4277 break;
4278 }
4279 case OVS_KEY_ATTR_ND_EXTENSIONS: {
4280 const struct ovs_key_nd_extensions *mask = ma ? nl_attr_get(ma) : NULL;
4281 const struct ovs_key_nd_extensions *key = nl_attr_get(a);
4282
4283 bool first = true;
4284 format_be32_masked(ds, &first, "nd_reserved", key->nd_reserved,
4285 OVS_BE32_MAX);
4286 ds_put_char(ds, ',');
4287
4288 format_u8u(ds, "nd_options_type", key->nd_options_type,
4289 MASK(mask, nd_options_type), verbose);
4290
4291 ds_chomp(ds, ',');
4292 break;
4293 }
4294 case OVS_KEY_ATTR_NSH: {
4295 format_odp_nsh_attr(a, ma, ds);
4296 break;
4297 }
4298 case OVS_KEY_ATTR_UNSPEC:
4299 case __OVS_KEY_ATTR_MAX:
4300 default:
4301 format_generic_odp_key(a, ds);
4302 if (!is_exact) {
4303 ds_put_char(ds, '/');
4304 format_generic_odp_key(ma, ds);
4305 }
4306 break;
4307 }
4308 ds_put_char(ds, ')');
4309 }
4310
4311 static void
4312 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
4313 const struct hmap *portno_names, struct ds *ds,
4314 bool verbose)
4315 {
4316 if (check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4317 OVS_KEY_ATTR_MAX, false)) {
4318 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4319 }
4320 }
4321
4322 static struct nlattr *
4323 generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
4324 struct ofpbuf *ofp, const struct nlattr *key)
4325 {
4326 const struct nlattr *a;
4327 unsigned int left;
4328 int type = nl_attr_type(key);
4329 int size = nl_attr_get_size(key);
4330
4331 if (odp_key_attr_len(tbl, max, type) != ATTR_LEN_NESTED) {
4332 nl_msg_put_unspec_zero(ofp, type, size);
4333 } else {
4334 size_t nested_mask;
4335
4336 if (tbl[type].next) {
4337 const struct attr_len_tbl *entry = &tbl[type];
4338 tbl = entry->next;
4339 max = entry->next_max;
4340 }
4341
4342 nested_mask = nl_msg_start_nested(ofp, type);
4343 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
4344 generate_all_wildcard_mask(tbl, max, ofp, nl_attr_get(a));
4345 }
4346 nl_msg_end_nested(ofp, nested_mask);
4347 }
4348
4349 return ofp->base;
4350 }
4351
4352 static void
4353 format_u128(struct ds *ds, const ovs_32aligned_u128 *key,
4354 const ovs_32aligned_u128 *mask, bool verbose)
4355 {
4356 if (verbose || (mask && !ovs_u128_is_zero(get_32aligned_u128(mask)))) {
4357 ovs_be128 value = hton128(get_32aligned_u128(key));
4358 ds_put_hex(ds, &value, sizeof value);
4359 if (mask && !(ovs_u128_is_ones(get_32aligned_u128(mask)))) {
4360 value = hton128(get_32aligned_u128(mask));
4361 ds_put_char(ds, '/');
4362 ds_put_hex(ds, &value, sizeof value);
4363 }
4364 }
4365 }
4366
4367 /* Read the string from 's_' as a 128-bit value. If the string contains
4368 * a "/", the rest of the string will be treated as a 128-bit mask.
4369 *
4370 * If either the value or mask is larger than 64 bits, the string must
4371 * be in hexadecimal.
4372 */
4373 static int
4374 scan_u128(const char *s_, ovs_u128 *value, ovs_u128 *mask)
4375 {
4376 char *s = CONST_CAST(char *, s_);
4377 ovs_be128 be_value;
4378 ovs_be128 be_mask;
4379
4380 if (!parse_int_string(s, (uint8_t *)&be_value, sizeof be_value, &s)) {
4381 *value = ntoh128(be_value);
4382
4383 if (mask) {
4384 int n;
4385
4386 if (ovs_scan(s, "/%n", &n)) {
4387 int error;
4388
4389 s += n;
4390 error = parse_int_string(s, (uint8_t *)&be_mask,
4391 sizeof be_mask, &s);
4392 if (error) {
4393 return 0;
4394 }
4395 *mask = ntoh128(be_mask);
4396 } else {
4397 *mask = OVS_U128_MAX;
4398 }
4399 }
4400 return s - s_;
4401 }
4402
4403 return 0;
4404 }
4405
4406 int
4407 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
4408 {
4409 const char *s = s_;
4410
4411 if (ovs_scan(s, "ufid:")) {
4412 s += 5;
4413
4414 if (!uuid_from_string_prefix((struct uuid *)ufid, s)) {
4415 return -EINVAL;
4416 }
4417 s += UUID_LEN;
4418
4419 return s - s_;
4420 }
4421
4422 return 0;
4423 }
4424
4425 void
4426 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
4427 {
4428 ds_put_format(ds, "ufid:"UUID_FMT, UUID_ARGS((struct uuid *)ufid));
4429 }
4430
4431 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4432 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
4433 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
4434 * non-null, translates odp port number to its name. */
4435 void
4436 odp_flow_format(const struct nlattr *key, size_t key_len,
4437 const struct nlattr *mask, size_t mask_len,
4438 const struct hmap *portno_names, struct ds *ds, bool verbose)
4439 {
4440 if (key_len) {
4441 const struct nlattr *a;
4442 unsigned int left;
4443 bool has_ethtype_key = false;
4444 bool has_packet_type_key = false;
4445 struct ofpbuf ofp;
4446 bool first_field = true;
4447
4448 ofpbuf_init(&ofp, 100);
4449 NL_ATTR_FOR_EACH (a, left, key, key_len) {
4450 int attr_type = nl_attr_type(a);
4451 const struct nlattr *ma = (mask && mask_len
4452 ? nl_attr_find__(mask, mask_len,
4453 attr_type)
4454 : NULL);
4455 if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4456 OVS_KEY_ATTR_MAX, false)) {
4457 continue;
4458 }
4459
4460 bool is_nested_attr;
4461 bool is_wildcard = false;
4462
4463 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
4464 has_ethtype_key = true;
4465 } else if (attr_type == OVS_KEY_ATTR_PACKET_TYPE) {
4466 has_packet_type_key = true;
4467 }
4468
4469 is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
4470 OVS_KEY_ATTR_MAX, attr_type) ==
4471 ATTR_LEN_NESTED;
4472
4473 if (mask && mask_len) {
4474 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
4475 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
4476 }
4477
4478 if (verbose || !is_wildcard || is_nested_attr) {
4479 if (is_wildcard && !ma) {
4480 ma = generate_all_wildcard_mask(ovs_flow_key_attr_lens,
4481 OVS_KEY_ATTR_MAX,
4482 &ofp, a);
4483 }
4484 if (!first_field) {
4485 ds_put_char(ds, ',');
4486 }
4487 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4488 first_field = false;
4489 } else if (attr_type == OVS_KEY_ATTR_ETHERNET
4490 && !has_packet_type_key) {
4491 /* This special case reflects differences between the kernel
4492 * and userspace datapaths regarding the root type of the
4493 * packet being matched (typically Ethernet but some tunnels
4494 * can encapsulate IPv4 etc.). The kernel datapath does not
4495 * have an explicit way to indicate packet type; instead:
4496 *
4497 * - If OVS_KEY_ATTR_ETHERNET is present, the packet is an
4498 * Ethernet packet and OVS_KEY_ATTR_ETHERTYPE is the
4499 * Ethertype encoded in the Ethernet header.
4500 *
4501 * - If OVS_KEY_ATTR_ETHERNET is absent, then the packet's
4502 * root type is that encoded in OVS_KEY_ATTR_ETHERTYPE
4503 * (i.e. if OVS_KEY_ATTR_ETHERTYPE is 0x0800 then the
4504 * packet is an IPv4 packet).
4505 *
4506 * Thus, if OVS_KEY_ATTR_ETHERNET is present, even if it is
4507 * all-wildcarded, it is important to print it.
4508 *
4509 * On the other hand, the userspace datapath supports
4510 * OVS_KEY_ATTR_PACKET_TYPE and uses it to indicate the packet
4511 * type. Thus, if OVS_KEY_ATTR_PACKET_TYPE is present, we need
4512 * not print an all-wildcarded OVS_KEY_ATTR_ETHERNET. */
4513 if (!first_field) {
4514 ds_put_char(ds, ',');
4515 }
4516 ds_put_cstr(ds, "eth()");
4517 }
4518 ofpbuf_clear(&ofp);
4519 }
4520 ofpbuf_uninit(&ofp);
4521
4522 if (left) {
4523 int i;
4524
4525 if (left == key_len) {
4526 ds_put_cstr(ds, "<empty>");
4527 }
4528 ds_put_format(ds, ",***%u leftover bytes*** (", left);
4529 for (i = 0; i < left; i++) {
4530 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
4531 }
4532 ds_put_char(ds, ')');
4533 }
4534 if (!has_ethtype_key) {
4535 const struct nlattr *ma = nl_attr_find__(mask, mask_len,
4536 OVS_KEY_ATTR_ETHERTYPE);
4537 if (ma) {
4538 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
4539 ntohs(nl_attr_get_be16(ma)));
4540 }
4541 }
4542 } else {
4543 ds_put_cstr(ds, "<empty>");
4544 }
4545 }
4546
4547 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4548 * OVS_KEY_ATTR_* attributes in 'key'. */
4549 void
4550 odp_flow_key_format(const struct nlattr *key,
4551 size_t key_len, struct ds *ds)
4552 {
4553 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
4554 }
4555
4556 static bool
4557 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
4558 {
4559 if (!strcasecmp(s, "no")) {
4560 *type = OVS_FRAG_TYPE_NONE;
4561 } else if (!strcasecmp(s, "first")) {
4562 *type = OVS_FRAG_TYPE_FIRST;
4563 } else if (!strcasecmp(s, "later")) {
4564 *type = OVS_FRAG_TYPE_LATER;
4565 } else {
4566 return false;
4567 }
4568 return true;
4569 }
4570
4571 /* Parsing. */
4572
4573 static int
4574 scan_eth(const char *s, struct eth_addr *key, struct eth_addr *mask)
4575 {
4576 int n;
4577
4578 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n",
4579 ETH_ADDR_SCAN_ARGS(*key), &n)) {
4580 int len = n;
4581
4582 if (mask) {
4583 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
4584 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
4585 len += n;
4586 } else {
4587 memset(mask, 0xff, sizeof *mask);
4588 }
4589 }
4590 return len;
4591 }
4592 return 0;
4593 }
4594
4595 static int
4596 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
4597 {
4598 int n;
4599
4600 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
4601 int len = n;
4602
4603 if (mask) {
4604 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
4605 IP_SCAN_ARGS(mask), &n)) {
4606 len += n;
4607 } else {
4608 *mask = OVS_BE32_MAX;
4609 }
4610 }
4611 return len;
4612 }
4613 return 0;
4614 }
4615
4616 static int
4617 scan_in6_addr(const char *s, struct in6_addr *key, struct in6_addr *mask)
4618 {
4619 int n;
4620 char ipv6_s[IPV6_SCAN_LEN + 1];
4621
4622 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
4623 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
4624 int len = n;
4625
4626 if (mask) {
4627 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
4628 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
4629 len += n;
4630 } else {
4631 memset(mask, 0xff, sizeof *mask);
4632 }
4633 }
4634 return len;
4635 }
4636 return 0;
4637 }
4638
4639 static int
4640 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
4641 {
4642 int key_, mask_;
4643 int n;
4644
4645 if (ovs_scan(s, "%i%n", &key_, &n)
4646 && (key_ & ~IPV6_LABEL_MASK) == 0) {
4647 int len = n;
4648
4649 *key = htonl(key_);
4650 if (mask) {
4651 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
4652 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
4653 len += n;
4654 *mask = htonl(mask_);
4655 } else {
4656 *mask = htonl(IPV6_LABEL_MASK);
4657 }
4658 }
4659 return len;
4660 }
4661 return 0;
4662 }
4663
4664 static int
4665 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
4666 {
4667 int n;
4668
4669 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
4670 int len = n;
4671
4672 if (mask) {
4673 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
4674 len += n;
4675 } else {
4676 *mask = UINT8_MAX;
4677 }
4678 }
4679 return len;
4680 }
4681 return 0;
4682 }
4683
4684 static int
4685 scan_u16(const char *s, uint16_t *key, uint16_t *mask)
4686 {
4687 int n;
4688
4689 if (ovs_scan(s, "%"SCNi16"%n", key, &n)) {
4690 int len = n;
4691
4692 if (mask) {
4693 if (ovs_scan(s + len, "/%"SCNi16"%n", mask, &n)) {
4694 len += n;
4695 } else {
4696 *mask = UINT16_MAX;
4697 }
4698 }
4699 return len;
4700 }
4701 return 0;
4702 }
4703
4704 static int
4705 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
4706 {
4707 int n;
4708
4709 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4710 int len = n;
4711
4712 if (mask) {
4713 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4714 len += n;
4715 } else {
4716 *mask = UINT32_MAX;
4717 }
4718 }
4719 return len;
4720 }
4721 return 0;
4722 }
4723
4724 static int
4725 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
4726 {
4727 uint16_t key_, mask_;
4728 int n;
4729
4730 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4731 int len = n;
4732
4733 *key = htons(key_);
4734 if (mask) {
4735 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4736 len += n;
4737 *mask = htons(mask_);
4738 } else {
4739 *mask = OVS_BE16_MAX;
4740 }
4741 }
4742 return len;
4743 }
4744 return 0;
4745 }
4746
4747 static int
4748 scan_be32(const char *s, ovs_be32 *key, ovs_be32 *mask)
4749 {
4750 uint32_t key_, mask_;
4751 int n;
4752
4753 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
4754 int len = n;
4755
4756 *key = htonl(key_);
4757 if (mask) {
4758 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
4759 len += n;
4760 *mask = htonl(mask_);
4761 } else {
4762 *mask = OVS_BE32_MAX;
4763 }
4764 }
4765 return len;
4766 }
4767 return 0;
4768 }
4769
4770 static int
4771 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
4772 {
4773 uint64_t key_, mask_;
4774 int n;
4775
4776 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
4777 int len = n;
4778
4779 *key = htonll(key_);
4780 if (mask) {
4781 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
4782 len += n;
4783 *mask = htonll(mask_);
4784 } else {
4785 *mask = OVS_BE64_MAX;
4786 }
4787 }
4788 return len;
4789 }
4790 return 0;
4791 }
4792
4793 static int
4794 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
4795 {
4796 uint32_t flags, fmask;
4797 int n;
4798
4799 n = parse_odp_flags(s, flow_tun_flag_to_string, &flags,
4800 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
4801 if (n >= 0 && s[n] == ')') {
4802 *key = flags;
4803 if (mask) {
4804 *mask = fmask;
4805 }
4806 return n + 1;
4807 }
4808 return 0;
4809 }
4810
4811 static int
4812 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
4813 {
4814 uint32_t flags, fmask;
4815 int n;
4816
4817 n = parse_odp_flags(s, packet_tcp_flag_to_string, &flags,
4818 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
4819 if (n >= 0) {
4820 *key = htons(flags);
4821 if (mask) {
4822 *mask = htons(fmask);
4823 }
4824 return n;
4825 }
4826 return 0;
4827 }
4828
4829 static uint32_t
4830 ovs_to_odp_ct_state(uint8_t state)
4831 {
4832 uint32_t odp = 0;
4833
4834 #define CS_STATE(ENUM, INDEX, NAME) \
4835 if (state & CS_##ENUM) { \
4836 odp |= OVS_CS_F_##ENUM; \
4837 }
4838 CS_STATES
4839 #undef CS_STATE
4840
4841 return odp;
4842 }
4843
4844 static uint8_t
4845 odp_to_ovs_ct_state(uint32_t flags)
4846 {
4847 uint32_t state = 0;
4848
4849 #define CS_STATE(ENUM, INDEX, NAME) \
4850 if (flags & OVS_CS_F_##ENUM) { \
4851 state |= CS_##ENUM; \
4852 }
4853 CS_STATES
4854 #undef CS_STATE
4855
4856 return state;
4857 }
4858
4859 static int
4860 scan_ct_state(const char *s, uint32_t *key, uint32_t *mask)
4861 {
4862 uint32_t flags, fmask;
4863 int n;
4864
4865 n = parse_flags(s, odp_ct_state_to_string, ')', NULL, NULL, &flags,
4866 ovs_to_odp_ct_state(CS_SUPPORTED_MASK),
4867 mask ? &fmask : NULL);
4868
4869 if (n >= 0) {
4870 *key = flags;
4871 if (mask) {
4872 *mask = fmask;
4873 }
4874 return n;
4875 }
4876 return 0;
4877 }
4878
4879 static int
4880 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
4881 {
4882 int n;
4883 char frag[8];
4884 enum ovs_frag_type frag_type;
4885
4886 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
4887 && ovs_frag_type_from_string(frag, &frag_type)) {
4888 int len = n;
4889
4890 *key = frag_type;
4891 if (mask) {
4892 *mask = UINT8_MAX;
4893 }
4894 return len;
4895 }
4896 return 0;
4897 }
4898
4899 static int
4900 scan_port(const char *s, uint32_t *key, uint32_t *mask,
4901 const struct simap *port_names)
4902 {
4903 int n;
4904
4905 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4906 int len = n;
4907
4908 if (mask) {
4909 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4910 len += n;
4911 } else {
4912 *mask = UINT32_MAX;
4913 }
4914 }
4915 return len;
4916 } else if (port_names) {
4917 const struct simap_node *node;
4918 int len;
4919
4920 len = strcspn(s, ")");
4921 node = simap_find_len(port_names, s, len);
4922 if (node) {
4923 *key = node->data;
4924
4925 if (mask) {
4926 *mask = UINT32_MAX;
4927 }
4928 return len;
4929 }
4930 }
4931 return 0;
4932 }
4933
4934 /* Helper for vlan parsing. */
4935 struct ovs_key_vlan__ {
4936 ovs_be16 tci;
4937 };
4938
4939 static bool
4940 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
4941 {
4942 const uint16_t mask = ((1U << bits) - 1) << offset;
4943
4944 if (value >> bits) {
4945 return false;
4946 }
4947
4948 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
4949 return true;
4950 }
4951
4952 static int
4953 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
4954 uint8_t offset)
4955 {
4956 uint16_t key_, mask_;
4957 int n;
4958
4959 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4960 int len = n;
4961
4962 if (set_be16_bf(key, bits, offset, key_)) {
4963 if (mask) {
4964 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4965 len += n;
4966
4967 if (!set_be16_bf(mask, bits, offset, mask_)) {
4968 return 0;
4969 }
4970 } else {
4971 *mask |= htons(((1U << bits) - 1) << offset);
4972 }
4973 }
4974 return len;
4975 }
4976 }
4977 return 0;
4978 }
4979
4980 static int
4981 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
4982 {
4983 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
4984 }
4985
4986 static int
4987 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
4988 {
4989 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
4990 }
4991
4992 static int
4993 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
4994 {
4995 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
4996 }
4997
4998 /* For MPLS. */
4999 static bool
5000 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
5001 {
5002 const uint32_t mask = ((1U << bits) - 1) << offset;
5003
5004 if (value >> bits) {
5005 return false;
5006 }
5007
5008 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
5009 return true;
5010 }
5011
5012 static int
5013 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
5014 uint8_t offset)
5015 {
5016 uint32_t key_, mask_;
5017 int n;
5018
5019 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
5020 int len = n;
5021
5022 if (set_be32_bf(key, bits, offset, key_)) {
5023 if (mask) {
5024 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
5025 len += n;
5026
5027 if (!set_be32_bf(mask, bits, offset, mask_)) {
5028 return 0;
5029 }
5030 } else {
5031 *mask |= htonl(((1U << bits) - 1) << offset);
5032 }
5033 }
5034 return len;
5035 }
5036 }
5037 return 0;
5038 }
5039
5040 static int
5041 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
5042 {
5043 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
5044 }
5045
5046 static int
5047 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
5048 {
5049 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
5050 }
5051
5052 static int
5053 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
5054 {
5055 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
5056 }
5057
5058 static int
5059 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
5060 {
5061 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
5062 }
5063
5064 static int
5065 scan_vxlan_gbp(const char *s, uint32_t *key, uint32_t *mask)
5066 {
5067 const char *s_base = s;
5068 ovs_be16 id = 0, id_mask = 0;
5069 uint8_t flags = 0, flags_mask = 0;
5070 int len;
5071
5072 if (!strncmp(s, "id=", 3)) {
5073 s += 3;
5074 len = scan_be16(s, &id, mask ? &id_mask : NULL);
5075 if (len == 0) {
5076 return 0;
5077 }
5078 s += len;
5079 }
5080
5081 if (s[0] == ',') {
5082 s++;
5083 }
5084 if (!strncmp(s, "flags=", 6)) {
5085 s += 6;
5086 len = scan_u8(s, &flags, mask ? &flags_mask : NULL);
5087 if (len == 0) {
5088 return 0;
5089 }
5090 s += len;
5091 }
5092
5093 if (!strncmp(s, "))", 2)) {
5094 s += 2;
5095
5096 *key = (flags << 16) | ntohs(id);
5097 if (mask) {
5098 *mask = (flags_mask << 16) | ntohs(id_mask);
5099 }
5100
5101 return s - s_base;
5102 }
5103
5104 return 0;
5105 }
5106
5107 static int
5108 scan_erspan_metadata(const char *s,
5109 struct erspan_metadata *key,
5110 struct erspan_metadata *mask)
5111 {
5112 const char *s_base = s;
5113 uint32_t idx = 0, idx_mask = 0;
5114 uint8_t ver = 0, dir = 0, hwid = 0;
5115 uint8_t ver_mask = 0, dir_mask = 0, hwid_mask = 0;
5116 int len;
5117
5118 if (!strncmp(s, "ver=", 4)) {
5119 s += 4;
5120 len = scan_u8(s, &ver, mask ? &ver_mask : NULL);
5121 if (len == 0) {
5122 return 0;
5123 }
5124 s += len;
5125 }
5126
5127 if (s[0] == ',') {
5128 s++;
5129 }
5130
5131 if (ver == 1) {
5132 if (!strncmp(s, "idx=", 4)) {
5133 s += 4;
5134 len = scan_u32(s, &idx, mask ? &idx_mask : NULL);
5135 if (len == 0) {
5136 return 0;
5137 }
5138 s += len;
5139 }
5140
5141 if (!strncmp(s, ")", 1)) {
5142 s += 1;
5143 key->version = ver;
5144 key->u.index = htonl(idx);
5145 if (mask) {
5146 mask->u.index = htonl(idx_mask);
5147 }
5148 }
5149 return s - s_base;
5150
5151 } else if (ver == 2) {
5152 if (!strncmp(s, "dir=", 4)) {
5153 s += 4;
5154 len = scan_u8(s, &dir, mask ? &dir_mask : NULL);
5155 if (len == 0) {
5156 return 0;
5157 }
5158 s += len;
5159 }
5160 if (s[0] == ',') {
5161 s++;
5162 }
5163 if (!strncmp(s, "hwid=", 5)) {
5164 s += 5;
5165 len = scan_u8(s, &hwid, mask ? &hwid_mask : NULL);
5166 if (len == 0) {
5167 return 0;
5168 }
5169 s += len;
5170 }
5171
5172 if (!strncmp(s, ")", 1)) {
5173 s += 1;
5174 key->version = ver;
5175 key->u.md2.hwid = hwid;
5176 key->u.md2.dir = dir;
5177 if (mask) {
5178 mask->u.md2.hwid = hwid_mask;
5179 mask->u.md2.dir = dir_mask;
5180 }
5181 }
5182 return s - s_base;
5183 }
5184
5185 return 0;
5186 }
5187
5188 static int
5189 scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
5190 {
5191 const char *s_base = s;
5192 struct geneve_opt *opt = key->d;
5193 struct geneve_opt *opt_mask = mask ? mask->d : NULL;
5194 int len_remain = sizeof key->d;
5195 int len;
5196
5197 while (s[0] == '{' && len_remain >= sizeof *opt) {
5198 int data_len = 0;
5199
5200 s++;
5201 len_remain -= sizeof *opt;
5202
5203 if (!strncmp(s, "class=", 6)) {
5204 s += 6;
5205 len = scan_be16(s, &opt->opt_class,
5206 mask ? &opt_mask->opt_class : NULL);
5207 if (len == 0) {
5208 return 0;
5209 }
5210 s += len;
5211 } else if (mask) {
5212 memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
5213 }
5214
5215 if (s[0] == ',') {
5216 s++;
5217 }
5218 if (!strncmp(s, "type=", 5)) {
5219 s += 5;
5220 len = scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
5221 if (len == 0) {
5222 return 0;
5223 }
5224 s += len;
5225 } else if (mask) {
5226 memset(&opt_mask->type, 0, sizeof opt_mask->type);
5227 }
5228
5229 if (s[0] == ',') {
5230 s++;
5231 }
5232 if (!strncmp(s, "len=", 4)) {
5233 uint8_t opt_len, opt_len_mask;
5234 s += 4;
5235 len = scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
5236 if (len == 0) {
5237 return 0;
5238 }
5239 s += len;
5240
5241 if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
5242 return 0;
5243 }
5244 opt->length = opt_len / 4;
5245 if (mask) {
5246 opt_mask->length = opt_len_mask;
5247 }
5248 data_len = opt_len;
5249 } else if (mask) {
5250 memset(&opt_mask->type, 0, sizeof opt_mask->type);
5251 }
5252
5253 if (s[0] == ',') {
5254 s++;
5255 if (parse_int_string(s, (uint8_t *)(opt + 1),
5256 data_len, (char **)&s)) {
5257 return 0;
5258 }
5259 }
5260 if (mask) {
5261 if (s[0] == '/') {
5262 s++;
5263 if (parse_int_string(s, (uint8_t *)(opt_mask + 1),
5264 data_len, (char **)&s)) {
5265 return 0;
5266 }
5267 }
5268 opt_mask->r1 = 0;
5269 opt_mask->r2 = 0;
5270 opt_mask->r3 = 0;
5271 }
5272
5273 if (s[0] == '}') {
5274 s++;
5275 opt += 1 + data_len / 4;
5276 if (mask) {
5277 opt_mask += 1 + data_len / 4;
5278 }
5279 len_remain -= data_len;
5280 } else {
5281 return 0;
5282 }
5283 }
5284
5285 if (s[0] == ')') {
5286 len = sizeof key->d - len_remain;
5287
5288 s++;
5289 key->len = len;
5290 if (mask) {
5291 mask->len = len;
5292 }
5293 return s - s_base;
5294 }
5295
5296 return 0;
5297 }
5298
5299 static void
5300 tun_flags_to_attr(struct ofpbuf *a, const void *data_)
5301 {
5302 const uint16_t *flags = data_;
5303
5304 if (*flags & FLOW_TNL_F_DONT_FRAGMENT) {
5305 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
5306 }
5307 if (*flags & FLOW_TNL_F_CSUM) {
5308 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
5309 }
5310 if (*flags & FLOW_TNL_F_OAM) {
5311 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
5312 }
5313 }
5314
5315 static void
5316 vxlan_gbp_to_attr(struct ofpbuf *a, const void *data_)
5317 {
5318 const uint32_t *gbp = data_;
5319
5320 if (*gbp) {
5321 size_t vxlan_opts_ofs;
5322
5323 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
5324 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP, *gbp);
5325 nl_msg_end_nested(a, vxlan_opts_ofs);
5326 }
5327 }
5328
5329 static void
5330 geneve_to_attr(struct ofpbuf *a, const void *data_)
5331 {
5332 const struct geneve_scan *geneve = data_;
5333
5334 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, geneve->d,
5335 geneve->len);
5336 }
5337
5338 static void
5339 erspan_to_attr(struct ofpbuf *a, const void *data_)
5340 {
5341 const struct erspan_metadata *md = data_;
5342
5343 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, md,
5344 sizeof *md);
5345 }
5346
5347 #define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
5348 { \
5349 unsigned long call_fn = (unsigned long)FUNC; \
5350 if (call_fn) { \
5351 typedef void (*fn)(struct ofpbuf *, const void *); \
5352 fn func = FUNC; \
5353 func(BUF, &(DATA)); \
5354 } else { \
5355 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
5356 } \
5357 }
5358
5359 #define SCAN_IF(NAME) \
5360 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5361 const char *start = s; \
5362 int len; \
5363 \
5364 s += strlen(NAME)
5365
5366 /* Usually no special initialization is needed. */
5367 #define SCAN_BEGIN(NAME, TYPE) \
5368 SCAN_IF(NAME); \
5369 TYPE skey, smask; \
5370 memset(&skey, 0, sizeof skey); \
5371 memset(&smask, 0, sizeof smask); \
5372 do { \
5373 len = 0;
5374
5375 /* Init as fully-masked as mask will not be scanned. */
5376 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
5377 SCAN_IF(NAME); \
5378 TYPE skey, smask; \
5379 memset(&skey, 0, sizeof skey); \
5380 memset(&smask, 0xff, sizeof smask); \
5381 do { \
5382 len = 0;
5383
5384 /* VLAN needs special initialization. */
5385 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
5386 SCAN_IF(NAME); \
5387 TYPE skey = KEY_INIT; \
5388 TYPE smask = MASK_INIT; \
5389 do { \
5390 len = 0;
5391
5392 /* Scan unnamed entry as 'TYPE' */
5393 #define SCAN_TYPE(TYPE, KEY, MASK) \
5394 len = scan_##TYPE(s, KEY, MASK); \
5395 if (len == 0) { \
5396 return -EINVAL; \
5397 } \
5398 s += len
5399
5400 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5401 #define SCAN_FIELD(NAME, TYPE, FIELD) \
5402 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5403 s += strlen(NAME); \
5404 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
5405 continue; \
5406 }
5407
5408 #define SCAN_FINISH() \
5409 } while (*s++ == ',' && len != 0); \
5410 if (s[-1] != ')') { \
5411 return -EINVAL; \
5412 }
5413
5414 #define SCAN_FINISH_SINGLE() \
5415 } while (false); \
5416 if (*s++ != ')') { \
5417 return -EINVAL; \
5418 }
5419
5420 /* Beginning of nested attribute. */
5421 #define SCAN_BEGIN_NESTED(NAME, ATTR) \
5422 SCAN_IF(NAME); \
5423 size_t key_offset, mask_offset = 0; \
5424 key_offset = nl_msg_start_nested(key, ATTR); \
5425 if (mask) { \
5426 mask_offset = nl_msg_start_nested(mask, ATTR); \
5427 } \
5428 do { \
5429 len = 0;
5430
5431 #define SCAN_END_NESTED() \
5432 SCAN_FINISH(); \
5433 nl_msg_end_nested(key, key_offset); \
5434 if (mask) { \
5435 nl_msg_end_nested(mask, mask_offset); \
5436 } \
5437 return s - start; \
5438 }
5439
5440 #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
5441 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5442 TYPE skey, smask; \
5443 memset(&skey, 0, sizeof skey); \
5444 memset(&smask, 0xff, sizeof smask); \
5445 s += strlen(NAME); \
5446 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5447 SCAN_PUT(ATTR, FUNC); \
5448 continue; \
5449 }
5450
5451 #define SCAN_FIELD_NESTED(NAME, TYPE, SCAN_AS, ATTR) \
5452 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, NULL)
5453
5454 #define SCAN_FIELD_NESTED_FUNC(NAME, TYPE, SCAN_AS, FUNC) \
5455 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, 0, FUNC)
5456
5457 #define SCAN_PUT(ATTR, FUNC) \
5458 SCAN_PUT_ATTR(key, ATTR, skey, FUNC); \
5459 if (mask) \
5460 SCAN_PUT_ATTR(mask, ATTR, smask, FUNC); \
5461
5462 #define SCAN_END(ATTR) \
5463 SCAN_FINISH(); \
5464 SCAN_PUT(ATTR, NULL); \
5465 return s - start; \
5466 }
5467
5468 #define SCAN_BEGIN_ARRAY(NAME, TYPE, CNT) \
5469 SCAN_IF(NAME); \
5470 TYPE skey[CNT], smask[CNT]; \
5471 memset(&skey, 0, sizeof skey); \
5472 memset(&smask, 0, sizeof smask); \
5473 int idx = 0, cnt = CNT; \
5474 uint64_t fields = 0; \
5475 do { \
5476 int field = 0; \
5477 len = 0;
5478
5479 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5480 #define SCAN_FIELD_ARRAY(NAME, TYPE, FIELD) \
5481 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5482 if (fields & (1UL << field)) { \
5483 fields = 0; \
5484 if (++idx == cnt) { \
5485 break; \
5486 } \
5487 } \
5488 s += strlen(NAME); \
5489 SCAN_TYPE(TYPE, &skey[idx].FIELD, mask ? &smask[idx].FIELD : NULL); \
5490 fields |= 1UL << field; \
5491 continue; \
5492 } \
5493 field++;
5494
5495 #define SCAN_PUT_ATTR_ARRAY(BUF, ATTR, DATA, CNT) \
5496 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)[0] * (CNT)); \
5497
5498 #define SCAN_PUT_ARRAY(ATTR, CNT) \
5499 SCAN_PUT_ATTR_ARRAY(key, ATTR, skey, CNT); \
5500 if (mask) { \
5501 SCAN_PUT_ATTR_ARRAY(mask, ATTR, smask, CNT); \
5502 }
5503
5504 #define SCAN_END_ARRAY(ATTR) \
5505 SCAN_FINISH(); \
5506 if (idx == cnt) { \
5507 return -EINVAL; \
5508 } \
5509 SCAN_PUT_ARRAY(ATTR, idx + 1); \
5510 return s - start; \
5511 }
5512
5513 #define SCAN_END_SINGLE(ATTR) \
5514 SCAN_FINISH_SINGLE(); \
5515 SCAN_PUT(ATTR, NULL); \
5516 return s - start; \
5517 }
5518
5519 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
5520 SCAN_BEGIN(NAME, TYPE) { \
5521 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5522 } SCAN_END_SINGLE(ATTR)
5523
5524 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
5525 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
5526 SCAN_TYPE(SCAN_AS, &skey, NULL); \
5527 } SCAN_END_SINGLE(ATTR)
5528
5529 /* scan_port needs one extra argument. */
5530 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
5531 SCAN_BEGIN(NAME, TYPE) { \
5532 len = scan_port(s, &skey, &smask, \
5533 context->port_names); \
5534 if (len == 0) { \
5535 return -EINVAL; \
5536 } \
5537 s += len; \
5538 } SCAN_END_SINGLE(ATTR)
5539
5540 static int
5541 parse_odp_nsh_key_mask_attr(const char *s, struct ofpbuf *key,
5542 struct ofpbuf *mask)
5543 {
5544 if (strncmp(s, "nsh(", 4) == 0) {
5545 const char *start = s;
5546 int len;
5547 struct ovs_key_nsh skey, smask;
5548 uint32_t spi = 0, spi_mask = 0;
5549 uint8_t si = 0, si_mask = 0;
5550
5551 s += 4;
5552
5553 memset(&skey, 0, sizeof skey);
5554 memset(&smask, 0, sizeof smask);
5555 do {
5556 len = 0;
5557
5558 if (strncmp(s, "flags=", 6) == 0) {
5559 s += 6;
5560 len = scan_u8(s, &skey.flags, mask ? &smask.flags : NULL);
5561 if (len == 0) {
5562 return -EINVAL;
5563 }
5564 s += len;
5565 continue;
5566 }
5567
5568 if (strncmp(s, "mdtype=", 7) == 0) {
5569 s += 7;
5570 len = scan_u8(s, &skey.mdtype, mask ? &smask.mdtype : NULL);
5571 if (len == 0) {
5572 return -EINVAL;
5573 }
5574 s += len;
5575 continue;
5576 }
5577
5578 if (strncmp(s, "np=", 3) == 0) {
5579 s += 3;
5580 len = scan_u8(s, &skey.np, mask ? &smask.np : NULL);
5581 if (len == 0) {
5582 return -EINVAL;
5583 }
5584 s += len;
5585 continue;
5586 }
5587
5588 if (strncmp(s, "spi=", 4) == 0) {
5589 s += 4;
5590 len = scan_u32(s, &spi, mask ? &spi_mask : NULL);
5591 if (len == 0) {
5592 return -EINVAL;
5593 }
5594 s += len;
5595 continue;
5596 }
5597
5598 if (strncmp(s, "si=", 3) == 0) {
5599 s += 3;
5600 len = scan_u8(s, &si, mask ? &si_mask : NULL);
5601 if (len == 0) {
5602 return -EINVAL;
5603 }
5604 s += len;
5605 continue;
5606 }
5607
5608 if (strncmp(s, "c1=", 3) == 0) {
5609 s += 3;
5610 len = scan_be32(s, &skey.context[0],
5611 mask ? &smask.context[0] : NULL);
5612 if (len == 0) {
5613 return -EINVAL;
5614 }
5615 s += len;
5616 continue;
5617 }
5618
5619 if (strncmp(s, "c2=", 3) == 0) {
5620 s += 3;
5621 len = scan_be32(s, &skey.context[1],
5622 mask ? &smask.context[1] : NULL);
5623 if (len == 0) {
5624 return -EINVAL;
5625 }
5626 s += len;
5627 continue;
5628 }
5629
5630 if (strncmp(s, "c3=", 3) == 0) {
5631 s += 3;
5632 len = scan_be32(s, &skey.context[2],
5633 mask ? &smask.context[2] : NULL);
5634 if (len == 0) {
5635 return -EINVAL;
5636 }
5637 s += len;
5638 continue;
5639 }
5640
5641 if (strncmp(s, "c4=", 3) == 0) {
5642 s += 3;
5643 len = scan_be32(s, &skey.context[3],
5644 mask ? &smask.context[3] : NULL);
5645 if (len == 0) {
5646 return -EINVAL;
5647 }
5648 s += len;
5649 continue;
5650 }
5651 } while (*s++ == ',' && len != 0);
5652 if (s[-1] != ')') {
5653 return -EINVAL;
5654 }
5655
5656 skey.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
5657 smask.path_hdr = nsh_spi_si_to_path_hdr(spi_mask, si_mask);
5658
5659 nsh_key_to_attr(key, &skey, NULL, 0, false);
5660 if (mask) {
5661 nsh_key_to_attr(mask, &smask, NULL, 0, true);
5662 }
5663 return s - start;
5664 }
5665 return 0;
5666 }
5667
5668 static int
5669 parse_odp_key_mask_attr(struct parse_odp_context *context, const char *s,
5670 struct ofpbuf *key, struct ofpbuf *mask)
5671 {
5672 int retval;
5673
5674 context->depth++;
5675
5676 if (context->depth == MAX_ODP_NESTED) {
5677 retval = -EINVAL;
5678 } else {
5679 retval = parse_odp_key_mask_attr__(context, s, key, mask);
5680 }
5681
5682 context->depth--;
5683
5684 return retval;
5685 }
5686
5687 static int
5688 parse_odp_key_mask_attr__(struct parse_odp_context *context, const char *s,
5689 struct ofpbuf *key, struct ofpbuf *mask)
5690 {
5691 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
5692 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
5693 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
5694 OVS_KEY_ATTR_RECIRC_ID);
5695 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
5696
5697 SCAN_SINGLE("ct_state(", uint32_t, ct_state, OVS_KEY_ATTR_CT_STATE);
5698 SCAN_SINGLE("ct_zone(", uint16_t, u16, OVS_KEY_ATTR_CT_ZONE);
5699 SCAN_SINGLE("ct_mark(", uint32_t, u32, OVS_KEY_ATTR_CT_MARK);
5700 SCAN_SINGLE("ct_label(", ovs_u128, u128, OVS_KEY_ATTR_CT_LABELS);
5701
5702 SCAN_BEGIN("ct_tuple4(", struct ovs_key_ct_tuple_ipv4) {
5703 SCAN_FIELD("src=", ipv4, ipv4_src);
5704 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5705 SCAN_FIELD("proto=", u8, ipv4_proto);
5706 SCAN_FIELD("tp_src=", be16, src_port);
5707 SCAN_FIELD("tp_dst=", be16, dst_port);
5708 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
5709
5710 SCAN_BEGIN("ct_tuple6(", struct ovs_key_ct_tuple_ipv6) {
5711 SCAN_FIELD("src=", in6_addr, ipv6_src);
5712 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5713 SCAN_FIELD("proto=", u8, ipv6_proto);
5714 SCAN_FIELD("tp_src=", be16, src_port);
5715 SCAN_FIELD("tp_dst=", be16, dst_port);
5716 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
5717
5718 SCAN_BEGIN_NESTED("tunnel(", OVS_KEY_ATTR_TUNNEL) {
5719 SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
5720 SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
5721 SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
5722 SCAN_FIELD_NESTED("ipv6_src=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_SRC);
5723 SCAN_FIELD_NESTED("ipv6_dst=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_DST);
5724 SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
5725 SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
5726 SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
5727 SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
5728 SCAN_FIELD_NESTED_FUNC("erspan(", struct erspan_metadata, erspan_metadata,
5729 erspan_to_attr);
5730 SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
5731 SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
5732 geneve_to_attr);
5733 SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
5734 } SCAN_END_NESTED();
5735
5736 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
5737
5738 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
5739 SCAN_FIELD("src=", eth, eth_src);
5740 SCAN_FIELD("dst=", eth, eth_dst);
5741 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
5742
5743 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
5744 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
5745 SCAN_FIELD("vid=", vid, tci);
5746 SCAN_FIELD("pcp=", pcp, tci);
5747 SCAN_FIELD("cfi=", cfi, tci);
5748 } SCAN_END(OVS_KEY_ATTR_VLAN);
5749
5750 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
5751
5752 SCAN_BEGIN_ARRAY("mpls(", struct ovs_key_mpls, FLOW_MAX_MPLS_LABELS) {
5753 SCAN_FIELD_ARRAY("label=", mpls_label, mpls_lse);
5754 SCAN_FIELD_ARRAY("tc=", mpls_tc, mpls_lse);
5755 SCAN_FIELD_ARRAY("ttl=", mpls_ttl, mpls_lse);
5756 SCAN_FIELD_ARRAY("bos=", mpls_bos, mpls_lse);
5757 } SCAN_END_ARRAY(OVS_KEY_ATTR_MPLS);
5758
5759 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
5760 SCAN_FIELD("src=", ipv4, ipv4_src);
5761 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5762 SCAN_FIELD("proto=", u8, ipv4_proto);
5763 SCAN_FIELD("tos=", u8, ipv4_tos);
5764 SCAN_FIELD("ttl=", u8, ipv4_ttl);
5765 SCAN_FIELD("frag=", frag, ipv4_frag);
5766 } SCAN_END(OVS_KEY_ATTR_IPV4);
5767
5768 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
5769 SCAN_FIELD("src=", in6_addr, ipv6_src);
5770 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5771 SCAN_FIELD("label=", ipv6_label, ipv6_label);
5772 SCAN_FIELD("proto=", u8, ipv6_proto);
5773 SCAN_FIELD("tclass=", u8, ipv6_tclass);
5774 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
5775 SCAN_FIELD("frag=", frag, ipv6_frag);
5776 } SCAN_END(OVS_KEY_ATTR_IPV6);
5777
5778 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
5779 SCAN_FIELD("src=", be16, tcp_src);
5780 SCAN_FIELD("dst=", be16, tcp_dst);
5781 } SCAN_END(OVS_KEY_ATTR_TCP);
5782
5783 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
5784
5785 SCAN_BEGIN("udp(", struct ovs_key_udp) {
5786 SCAN_FIELD("src=", be16, udp_src);
5787 SCAN_FIELD("dst=", be16, udp_dst);
5788 } SCAN_END(OVS_KEY_ATTR_UDP);
5789
5790 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
5791 SCAN_FIELD("src=", be16, sctp_src);
5792 SCAN_FIELD("dst=", be16, sctp_dst);
5793 } SCAN_END(OVS_KEY_ATTR_SCTP);
5794
5795 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
5796 SCAN_FIELD("type=", u8, icmp_type);
5797 SCAN_FIELD("code=", u8, icmp_code);
5798 } SCAN_END(OVS_KEY_ATTR_ICMP);
5799
5800 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
5801 SCAN_FIELD("type=", u8, icmpv6_type);
5802 SCAN_FIELD("code=", u8, icmpv6_code);
5803 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
5804
5805 SCAN_BEGIN("arp(", struct ovs_key_arp) {
5806 SCAN_FIELD("sip=", ipv4, arp_sip);
5807 SCAN_FIELD("tip=", ipv4, arp_tip);
5808 SCAN_FIELD("op=", be16, arp_op);
5809 SCAN_FIELD("sha=", eth, arp_sha);
5810 SCAN_FIELD("tha=", eth, arp_tha);
5811 } SCAN_END(OVS_KEY_ATTR_ARP);
5812
5813 SCAN_BEGIN("nd(", struct ovs_key_nd) {
5814 SCAN_FIELD("target=", in6_addr, nd_target);
5815 SCAN_FIELD("sll=", eth, nd_sll);
5816 SCAN_FIELD("tll=", eth, nd_tll);
5817 } SCAN_END(OVS_KEY_ATTR_ND);
5818
5819 SCAN_BEGIN("nd_ext(", struct ovs_key_nd_extensions) {
5820 SCAN_FIELD("nd_reserved=", be32, nd_reserved);
5821 SCAN_FIELD("nd_options_type=", u8, nd_options_type);
5822 } SCAN_END(OVS_KEY_ATTR_ND_EXTENSIONS);
5823
5824 struct packet_type {
5825 ovs_be16 ns;
5826 ovs_be16 id;
5827 };
5828 SCAN_BEGIN("packet_type(", struct packet_type) {
5829 SCAN_FIELD("ns=", be16, ns);
5830 SCAN_FIELD("id=", be16, id);
5831 } SCAN_END(OVS_KEY_ATTR_PACKET_TYPE);
5832
5833 /* nsh is nested, it needs special process */
5834 int ret = parse_odp_nsh_key_mask_attr(s, key, mask);
5835 if (ret < 0) {
5836 return ret;
5837 } else {
5838 s += ret;
5839 }
5840
5841 /* Encap open-coded. */
5842 if (!strncmp(s, "encap(", 6)) {
5843 const char *start = s;
5844 size_t encap, encap_mask = 0;
5845
5846 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
5847 if (mask) {
5848 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
5849 }
5850
5851 s += 6;
5852 for (;;) {
5853 int retval;
5854
5855 s += strspn(s, delimiters);
5856 if (!*s) {
5857 return -EINVAL;
5858 } else if (*s == ')') {
5859 break;
5860 }
5861
5862 retval = parse_odp_key_mask_attr(context, s, key, mask);
5863 if (retval < 0) {
5864 return retval;
5865 }
5866
5867 if (nl_attr_oversized(key->size - encap - NLA_HDRLEN)) {
5868 return -E2BIG;
5869 }
5870 s += retval;
5871 }
5872 s++;
5873
5874 nl_msg_end_nested(key, encap);
5875 if (mask) {
5876 nl_msg_end_nested(mask, encap_mask);
5877 }
5878
5879 return s - start;
5880 }
5881
5882 return -EINVAL;
5883 }
5884
5885 /* Parses the string representation of a datapath flow key, in the format
5886 * output by odp_flow_key_format(). Returns 0 if successful, otherwise a
5887 * positive errno value. On success, stores NULL into '*errorp' and the flow
5888 * key is appended to 'key' as a series of Netlink attributes. On failure,
5889 * stores a malloc()'d error message in '*errorp' without changing the data in
5890 * 'key'. Either way, 'key''s data might be reallocated.
5891 *
5892 * If 'port_names' is nonnull, it points to an simap that maps from a port name
5893 * to a port number. (Port names may be used instead of port numbers in
5894 * in_port.)
5895 *
5896 * On success, the attributes appended to 'key' are individually syntactically
5897 * valid, but they may not be valid as a sequence. 'key' might, for example,
5898 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
5899 int
5900 odp_flow_from_string(const char *s, const struct simap *port_names,
5901 struct ofpbuf *key, struct ofpbuf *mask,
5902 char **errorp)
5903 {
5904 if (errorp) {
5905 *errorp = NULL;
5906 }
5907
5908 const size_t old_size = key->size;
5909 struct parse_odp_context context = (struct parse_odp_context) {
5910 .port_names = port_names,
5911 };
5912 for (;;) {
5913 int retval;
5914
5915 s += strspn(s, delimiters);
5916 if (!*s) {
5917 return 0;
5918 }
5919
5920 /* Skip UFID. */
5921 ovs_u128 ufid;
5922 retval = odp_ufid_from_string(s, &ufid);
5923 if (retval < 0) {
5924 if (errorp) {
5925 *errorp = xasprintf("syntax error at %s", s);
5926 }
5927 key->size = old_size;
5928 return -retval;
5929 } else if (retval > 0) {
5930 s += retval;
5931 s += s[0] == ' ' ? 1 : 0;
5932 }
5933
5934 retval = parse_odp_key_mask_attr(&context, s, key, mask);
5935 if (retval < 0) {
5936 if (errorp) {
5937 *errorp = xasprintf("syntax error at %s", s);
5938 }
5939 key->size = old_size;
5940 return -retval;
5941 }
5942 s += retval;
5943 }
5944
5945 return 0;
5946 }
5947
5948 static uint8_t
5949 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
5950 {
5951 if (is_mask) {
5952 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
5953 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
5954 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
5955 * must use a zero mask for the netlink frag field, and all ones mask
5956 * otherwise. */
5957 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
5958 }
5959 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
5960 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
5961 : OVS_FRAG_TYPE_FIRST;
5962 }
5963
5964 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
5965 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
5966 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
5967 bool is_mask);
5968 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
5969 bool is_mask);
5970 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
5971 bool is_mask);
5972 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
5973 bool is_mask);
5974 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
5975 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
5976 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
5977 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
5978 static void get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh,
5979 bool is_mask);
5980 static void put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
5981 bool is_mask);
5982
5983 /* These share the same layout. */
5984 union ovs_key_tp {
5985 struct ovs_key_tcp tcp;
5986 struct ovs_key_udp udp;
5987 struct ovs_key_sctp sctp;
5988 };
5989
5990 static void get_tp_key(const struct flow *, union ovs_key_tp *);
5991 static void put_tp_key(const union ovs_key_tp *, struct flow *);
5992
5993 static void
5994 odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms,
5995 bool export_mask, struct ofpbuf *buf)
5996 {
5997 /* New "struct flow" fields that are visible to the datapath (including all
5998 * data fields) should be translated into equivalent datapath flow fields
5999 * here (you will have to add a OVS_KEY_ATTR_* for them). */
6000 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 41);
6001
6002 struct ovs_key_ethernet *eth_key;
6003 size_t encap[FLOW_MAX_VLAN_HEADERS] = {0};
6004 size_t max_vlans;
6005 const struct flow *flow = parms->flow;
6006 const struct flow *mask = parms->mask;
6007 const struct flow *data = export_mask ? mask : flow;
6008
6009 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
6010
6011 if (flow_tnl_dst_is_set(&flow->tunnel) || export_mask) {
6012 tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
6013 parms->key_buf, NULL);
6014 }
6015
6016 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
6017
6018 if (parms->support.ct_state) {
6019 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
6020 ovs_to_odp_ct_state(data->ct_state));
6021 }
6022 if (parms->support.ct_zone) {
6023 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, data->ct_zone);
6024 }
6025 if (parms->support.ct_mark) {
6026 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, data->ct_mark);
6027 }
6028 if (parms->support.ct_label) {
6029 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &data->ct_label,
6030 sizeof(data->ct_label));
6031 }
6032 if (flow->ct_nw_proto) {
6033 if (parms->support.ct_orig_tuple
6034 && flow->dl_type == htons(ETH_TYPE_IP)) {
6035 struct ovs_key_ct_tuple_ipv4 ct = {
6036 data->ct_nw_src,
6037 data->ct_nw_dst,
6038 data->ct_tp_src,
6039 data->ct_tp_dst,
6040 data->ct_nw_proto,
6041 };
6042 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, &ct,
6043 sizeof ct);
6044 } else if (parms->support.ct_orig_tuple6
6045 && flow->dl_type == htons(ETH_TYPE_IPV6)) {
6046 struct ovs_key_ct_tuple_ipv6 ct = {
6047 data->ct_ipv6_src,
6048 data->ct_ipv6_dst,
6049 data->ct_tp_src,
6050 data->ct_tp_dst,
6051 data->ct_nw_proto,
6052 };
6053 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, &ct,
6054 sizeof ct);
6055 }
6056 }
6057 if (parms->support.recirc) {
6058 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
6059 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
6060 }
6061
6062 /* Add an ingress port attribute if this is a mask or 'in_port.odp_port'
6063 * is not the magical value "ODPP_NONE". */
6064 if (export_mask || flow->in_port.odp_port != ODPP_NONE) {
6065 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, data->in_port.odp_port);
6066 }
6067
6068 nl_msg_put_be32(buf, OVS_KEY_ATTR_PACKET_TYPE, data->packet_type);
6069
6070 if (OVS_UNLIKELY(parms->probe)) {
6071 max_vlans = FLOW_MAX_VLAN_HEADERS;
6072 } else {
6073 max_vlans = MIN(parms->support.max_vlan_headers, flow_vlan_limit);
6074 }
6075
6076 /* Conditionally add L2 attributes for Ethernet packets */
6077 if (flow->packet_type == htonl(PT_ETH)) {
6078 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
6079 sizeof *eth_key);
6080 get_ethernet_key(data, eth_key);
6081
6082 for (int encaps = 0; encaps < max_vlans; encaps++) {
6083 ovs_be16 tpid = flow->vlans[encaps].tpid;
6084
6085 if (flow->vlans[encaps].tci == htons(0)) {
6086 if (eth_type_vlan(flow->dl_type)) {
6087 /* If VLAN was truncated the tpid is in dl_type */
6088 tpid = flow->dl_type;
6089 } else {
6090 break;
6091 }
6092 }
6093
6094 if (export_mask) {
6095 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
6096 } else {
6097 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, tpid);
6098 }
6099 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlans[encaps].tci);
6100 encap[encaps] = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
6101 if (flow->vlans[encaps].tci == htons(0)) {
6102 goto unencap;
6103 }
6104 }
6105 }
6106
6107 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6108 /* For backwards compatibility with kernels that don't support
6109 * wildcarding, the following convention is used to encode the
6110 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
6111 *
6112 * key mask matches
6113 * -------- -------- -------
6114 * >0x5ff 0xffff Specified Ethernet II Ethertype.
6115 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
6116 * <none> 0xffff Any non-Ethernet II frame (except valid
6117 * 802.3 SNAP packet with valid eth_type).
6118 */
6119 if (export_mask) {
6120 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
6121 }
6122 goto unencap;
6123 }
6124
6125 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
6126
6127 if (eth_type_vlan(flow->dl_type)) {
6128 goto unencap;
6129 }
6130
6131 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6132 struct ovs_key_ipv4 *ipv4_key;
6133
6134 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
6135 sizeof *ipv4_key);
6136 get_ipv4_key(data, ipv4_key, export_mask);
6137 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
6138 struct ovs_key_ipv6 *ipv6_key;
6139
6140 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
6141 sizeof *ipv6_key);
6142 get_ipv6_key(data, ipv6_key, export_mask);
6143 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
6144 flow->dl_type == htons(ETH_TYPE_RARP)) {
6145 struct ovs_key_arp *arp_key;
6146
6147 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
6148 sizeof *arp_key);
6149 get_arp_key(data, arp_key);
6150 } else if (eth_type_mpls(flow->dl_type)) {
6151 struct ovs_key_mpls *mpls_key;
6152 int i, n;
6153
6154 n = flow_count_mpls_labels(flow, NULL);
6155 if (export_mask) {
6156 n = MIN(n, parms->support.max_mpls_depth);
6157 }
6158 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
6159 n * sizeof *mpls_key);
6160 for (i = 0; i < n; i++) {
6161 mpls_key[i].mpls_lse = data->mpls_lse[i];
6162 }
6163 } else if (flow->dl_type == htons(ETH_TYPE_NSH)) {
6164 nsh_key_to_attr(buf, &data->nsh, NULL, 0, export_mask);
6165 }
6166
6167 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6168 if (flow->nw_proto == IPPROTO_TCP) {
6169 union ovs_key_tp *tcp_key;
6170
6171 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
6172 sizeof *tcp_key);
6173 get_tp_key(data, tcp_key);
6174 if (data->tcp_flags || (mask && mask->tcp_flags)) {
6175 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
6176 }
6177 } else if (flow->nw_proto == IPPROTO_UDP) {
6178 union ovs_key_tp *udp_key;
6179
6180 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
6181 sizeof *udp_key);
6182 get_tp_key(data, udp_key);
6183 } else if (flow->nw_proto == IPPROTO_SCTP) {
6184 union ovs_key_tp *sctp_key;
6185
6186 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
6187 sizeof *sctp_key);
6188 get_tp_key(data, sctp_key);
6189 } else if (flow->dl_type == htons(ETH_TYPE_IP)
6190 && flow->nw_proto == IPPROTO_ICMP) {
6191 struct ovs_key_icmp *icmp_key;
6192
6193 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
6194 sizeof *icmp_key);
6195 icmp_key->icmp_type = ntohs(data->tp_src);
6196 icmp_key->icmp_code = ntohs(data->tp_dst);
6197 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
6198 && flow->nw_proto == IPPROTO_ICMPV6) {
6199 struct ovs_key_icmpv6 *icmpv6_key;
6200
6201 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
6202 sizeof *icmpv6_key);
6203 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
6204 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
6205
6206 if (is_nd(flow, NULL)
6207 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, ICMP
6208 * type and code are 8 bits wide. Therefore, an exact match
6209 * looks like htons(0xff), not htons(0xffff). See
6210 * xlate_wc_finish() for details. */
6211 && (!export_mask || (data->tp_src == htons(0xff)
6212 && data->tp_dst == htons(0xff)))) {
6213 struct ovs_key_nd *nd_key;
6214 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
6215 sizeof *nd_key);
6216 nd_key->nd_target = data->nd_target;
6217 nd_key->nd_sll = data->arp_sha;
6218 nd_key->nd_tll = data->arp_tha;
6219
6220 /* Add ND Extensions Attr only if supported and reserved field
6221 * or options type is set. */
6222 if (parms->support.nd_ext) {
6223 struct ovs_key_nd_extensions *nd_ext_key;
6224
6225 if (data->igmp_group_ip4 != 0 || data->tcp_flags != 0) {
6226 nd_ext_key = nl_msg_put_unspec_uninit(buf,
6227 OVS_KEY_ATTR_ND_EXTENSIONS,
6228 sizeof *nd_ext_key);
6229 nd_ext_key->nd_reserved = data->igmp_group_ip4;
6230 nd_ext_key->nd_options_type = ntohs(data->tcp_flags);
6231 }
6232 }
6233 }
6234 }
6235 }
6236
6237 unencap:
6238 for (int encaps = max_vlans - 1; encaps >= 0; encaps--) {
6239 if (encap[encaps]) {
6240 nl_msg_end_nested(buf, encap[encaps]);
6241 }
6242 }
6243 }
6244
6245 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
6246 *
6247 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
6248 * capable of being expanded to allow for that much space. */
6249 void
6250 odp_flow_key_from_flow(const struct odp_flow_key_parms *parms,
6251 struct ofpbuf *buf)
6252 {
6253 odp_flow_key_from_flow__(parms, false, buf);
6254 }
6255
6256 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
6257 * 'buf'.
6258 *
6259 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
6260 * capable of being expanded to allow for that much space. */
6261 void
6262 odp_flow_key_from_mask(const struct odp_flow_key_parms *parms,
6263 struct ofpbuf *buf)
6264 {
6265 odp_flow_key_from_flow__(parms, true, buf);
6266 }
6267
6268 /* Generate ODP flow key from the given packet metadata */
6269 void
6270 odp_key_from_dp_packet(struct ofpbuf *buf, const struct dp_packet *packet)
6271 {
6272 const struct pkt_metadata *md = &packet->md;
6273
6274 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
6275
6276 if (flow_tnl_dst_is_set(&md->tunnel)) {
6277 tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL, NULL);
6278 }
6279
6280 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
6281
6282 if (md->ct_state) {
6283 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
6284 ovs_to_odp_ct_state(md->ct_state));
6285 if (md->ct_zone) {
6286 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, md->ct_zone);
6287 }
6288 if (md->ct_mark) {
6289 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, md->ct_mark);
6290 }
6291 if (!ovs_u128_is_zero(md->ct_label)) {
6292 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &md->ct_label,
6293 sizeof(md->ct_label));
6294 }
6295 if (md->ct_orig_tuple_ipv6) {
6296 if (md->ct_orig_tuple.ipv6.ipv6_proto) {
6297 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
6298 &md->ct_orig_tuple.ipv6,
6299 sizeof md->ct_orig_tuple.ipv6);
6300 }
6301 } else {
6302 if (md->ct_orig_tuple.ipv4.ipv4_proto) {
6303 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
6304 &md->ct_orig_tuple.ipv4,
6305 sizeof md->ct_orig_tuple.ipv4);
6306 }
6307 }
6308 }
6309
6310 /* Add an ingress port attribute if 'odp_in_port' is not the magical
6311 * value "ODPP_NONE". */
6312 if (md->in_port.odp_port != ODPP_NONE) {
6313 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
6314 }
6315
6316 /* Add OVS_KEY_ATTR_ETHERNET for non-Ethernet packets */
6317 if (pt_ns(packet->packet_type) == OFPHTN_ETHERTYPE) {
6318 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE,
6319 pt_ns_type_be(packet->packet_type));
6320 }
6321 }
6322
6323 /* Generate packet metadata from the given ODP flow key. */
6324 void
6325 odp_key_to_dp_packet(const struct nlattr *key, size_t key_len,
6326 struct dp_packet *packet)
6327 {
6328 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6329 const struct nlattr *nla;
6330 struct pkt_metadata *md = &packet->md;
6331 ovs_be32 packet_type = htonl(PT_UNKNOWN);
6332 ovs_be16 ethertype = 0;
6333 size_t left;
6334
6335 pkt_metadata_init(md, ODPP_NONE);
6336
6337 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6338 enum ovs_key_attr type = nl_attr_type(nla);
6339 size_t len = nl_attr_get_size(nla);
6340 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6341 OVS_KEY_ATTR_MAX, type);
6342
6343 if (len != expected_len && expected_len >= 0) {
6344 continue;
6345 }
6346
6347 switch (type) {
6348 case OVS_KEY_ATTR_RECIRC_ID:
6349 md->recirc_id = nl_attr_get_u32(nla);
6350 break;
6351 case OVS_KEY_ATTR_DP_HASH:
6352 md->dp_hash = nl_attr_get_u32(nla);
6353 break;
6354 case OVS_KEY_ATTR_PRIORITY:
6355 md->skb_priority = nl_attr_get_u32(nla);
6356 break;
6357 case OVS_KEY_ATTR_SKB_MARK:
6358 md->pkt_mark = nl_attr_get_u32(nla);
6359 break;
6360 case OVS_KEY_ATTR_CT_STATE:
6361 md->ct_state = odp_to_ovs_ct_state(nl_attr_get_u32(nla));
6362 break;
6363 case OVS_KEY_ATTR_CT_ZONE:
6364 md->ct_zone = nl_attr_get_u16(nla);
6365 break;
6366 case OVS_KEY_ATTR_CT_MARK:
6367 md->ct_mark = nl_attr_get_u32(nla);
6368 break;
6369 case OVS_KEY_ATTR_CT_LABELS: {
6370 md->ct_label = nl_attr_get_u128(nla);
6371 break;
6372 }
6373 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
6374 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(nla);
6375 md->ct_orig_tuple.ipv4 = *ct;
6376 md->ct_orig_tuple_ipv6 = false;
6377 break;
6378 }
6379 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
6380 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(nla);
6381
6382 md->ct_orig_tuple.ipv6 = *ct;
6383 md->ct_orig_tuple_ipv6 = true;
6384 break;
6385 }
6386 case OVS_KEY_ATTR_TUNNEL: {
6387 enum odp_key_fitness res;
6388
6389 res = odp_tun_key_from_attr(nla, &md->tunnel, NULL);
6390 if (res == ODP_FIT_ERROR) {
6391 memset(&md->tunnel, 0, sizeof md->tunnel);
6392 }
6393 break;
6394 }
6395 case OVS_KEY_ATTR_IN_PORT:
6396 md->in_port.odp_port = nl_attr_get_odp_port(nla);
6397 break;
6398 case OVS_KEY_ATTR_ETHERNET:
6399 /* Presence of OVS_KEY_ATTR_ETHERNET indicates Ethernet packet. */
6400 packet_type = htonl(PT_ETH);
6401 break;
6402 case OVS_KEY_ATTR_ETHERTYPE:
6403 ethertype = nl_attr_get_be16(nla);
6404 break;
6405 case OVS_KEY_ATTR_UNSPEC:
6406 case OVS_KEY_ATTR_ENCAP:
6407 case OVS_KEY_ATTR_VLAN:
6408 case OVS_KEY_ATTR_IPV4:
6409 case OVS_KEY_ATTR_IPV6:
6410 case OVS_KEY_ATTR_TCP:
6411 case OVS_KEY_ATTR_UDP:
6412 case OVS_KEY_ATTR_ICMP:
6413 case OVS_KEY_ATTR_ICMPV6:
6414 case OVS_KEY_ATTR_ARP:
6415 case OVS_KEY_ATTR_ND:
6416 case OVS_KEY_ATTR_ND_EXTENSIONS:
6417 case OVS_KEY_ATTR_SCTP:
6418 case OVS_KEY_ATTR_TCP_FLAGS:
6419 case OVS_KEY_ATTR_MPLS:
6420 case OVS_KEY_ATTR_PACKET_TYPE:
6421 case OVS_KEY_ATTR_NSH:
6422 case __OVS_KEY_ATTR_MAX:
6423 default:
6424 break;
6425 }
6426 }
6427
6428 if (packet_type == htonl(PT_ETH)) {
6429 packet->packet_type = htonl(PT_ETH);
6430 } else if (packet_type == htonl(PT_UNKNOWN) && ethertype != 0) {
6431 packet->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
6432 ntohs(ethertype));
6433 } else {
6434 VLOG_ERR_RL(&rl, "Packet without ETHERTYPE. Unknown packet_type.");
6435 }
6436 }
6437
6438 uint32_t
6439 odp_flow_key_hash(const struct nlattr *key, size_t key_len)
6440 {
6441 BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
6442 return hash_bytes32(ALIGNED_CAST(const uint32_t *, key), key_len, 0);
6443 }
6444
6445 static void
6446 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
6447 uint64_t attrs, int out_of_range_attr,
6448 const struct nlattr *key, size_t key_len)
6449 {
6450 struct ds s;
6451 int i;
6452
6453 if (VLOG_DROP_DBG(rl)) {
6454 return;
6455 }
6456
6457 ds_init(&s);
6458 for (i = 0; i < 64; i++) {
6459 if (attrs & (UINT64_C(1) << i)) {
6460 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6461
6462 ds_put_format(&s, " %s",
6463 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
6464 }
6465 }
6466 if (out_of_range_attr) {
6467 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
6468 }
6469
6470 ds_put_cstr(&s, ": ");
6471 odp_flow_key_format(key, key_len, &s);
6472
6473 VLOG_DBG("%s:%s", title, ds_cstr(&s));
6474 ds_destroy(&s);
6475 }
6476
6477 static uint8_t
6478 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
6479 {
6480 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6481
6482 if (is_mask) {
6483 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
6484 }
6485
6486 if (odp_frag > OVS_FRAG_TYPE_LATER) {
6487 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
6488 return 0xff; /* Error. */
6489 }
6490
6491 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
6492 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
6493 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
6494 }
6495
6496 /* Parses the attributes in the 'key_len' bytes of 'key' into 'attrs', which
6497 * must have OVS_KEY_ATTR_MAX + 1 elements. Stores each attribute in 'key'
6498 * into the corresponding element of 'attrs'.
6499 *
6500 * Stores a bitmask of the attributes' indexes found in 'key' into
6501 * '*present_attrsp'.
6502 *
6503 * If an attribute beyond OVS_KEY_ATTR_MAX is found, stores its attribute type
6504 * (or one of them, if more than one) into '*out_of_range_attrp', otherwise 0.
6505 *
6506 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6507 * error message in '*errorp'. */
6508 static bool
6509 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
6510 const struct nlattr *attrs[], uint64_t *present_attrsp,
6511 int *out_of_range_attrp, char **errorp)
6512 {
6513 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6514 const struct nlattr *nla;
6515 uint64_t present_attrs;
6516 size_t left;
6517
6518 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
6519 present_attrs = 0;
6520 *out_of_range_attrp = 0;
6521 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6522 uint16_t type = nl_attr_type(nla);
6523 size_t len = nl_attr_get_size(nla);
6524 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6525 OVS_KEY_ATTR_MAX, type);
6526
6527 if (len != expected_len && expected_len >= 0) {
6528 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6529
6530 odp_parse_error(&rl, errorp, "attribute %s has length %"PRIuSIZE" "
6531 "but should have length %d",
6532 ovs_key_attr_to_string(type, namebuf,
6533 sizeof namebuf),
6534 len, expected_len);
6535 return false;
6536 }
6537
6538 if (type > OVS_KEY_ATTR_MAX) {
6539 *out_of_range_attrp = type;
6540 } else {
6541 if (present_attrs & (UINT64_C(1) << type)) {
6542 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6543
6544 odp_parse_error(&rl, errorp,
6545 "duplicate %s attribute in flow key",
6546 ovs_key_attr_to_string(type, namebuf,
6547 sizeof namebuf));
6548 return false;
6549 }
6550
6551 present_attrs |= UINT64_C(1) << type;
6552 attrs[type] = nla;
6553 }
6554 }
6555 if (left) {
6556 odp_parse_error(&rl, errorp, "trailing garbage in flow key");
6557 return false;
6558 }
6559
6560 *present_attrsp = present_attrs;
6561 return true;
6562 }
6563
6564 static enum odp_key_fitness
6565 check_expectations(uint64_t present_attrs, int out_of_range_attr,
6566 uint64_t expected_attrs,
6567 const struct nlattr *key, size_t key_len)
6568 {
6569 uint64_t missing_attrs;
6570 uint64_t extra_attrs;
6571
6572 missing_attrs = expected_attrs & ~present_attrs;
6573 if (missing_attrs) {
6574 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6575 log_odp_key_attributes(&rl, "expected but not present",
6576 missing_attrs, 0, key, key_len);
6577 return ODP_FIT_TOO_LITTLE;
6578 }
6579
6580 extra_attrs = present_attrs & ~expected_attrs;
6581 if (extra_attrs || out_of_range_attr) {
6582 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6583 log_odp_key_attributes(&rl, "present but not expected",
6584 extra_attrs, out_of_range_attr, key, key_len);
6585 return ODP_FIT_TOO_MUCH;
6586 }
6587
6588 return ODP_FIT_PERFECT;
6589 }
6590
6591 /* Initializes 'flow->dl_type' based on the attributes in 'attrs', in which the
6592 * attributes in the bit-mask 'present_attrs' are present. Returns true if
6593 * successful, false on failure.
6594 *
6595 * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
6596 * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
6597 * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
6598 * previously parsed flow key.
6599 *
6600 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6601 * error message in '*errorp'. */
6602 static bool
6603 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6604 uint64_t present_attrs, uint64_t *expected_attrs,
6605 struct flow *flow, const struct flow *src_flow,
6606 char **errorp)
6607 {
6608 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6609 bool is_mask = flow != src_flow;
6610
6611 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
6612 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
6613 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6614 odp_parse_error(&rl, errorp,
6615 "invalid Ethertype %"PRIu16" in flow key",
6616 ntohs(flow->dl_type));
6617 return false;
6618 }
6619 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
6620 flow->dl_type != htons(0xffff)) {
6621 odp_parse_error(&rl, errorp, "can't bitwise match non-Ethernet II "
6622 "\"Ethertype\" %#"PRIx16" (with mask %#"PRIx16")",
6623 ntohs(src_flow->dl_type), ntohs(flow->dl_type));
6624 return false;
6625 }
6626 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
6627 } else {
6628 if (!is_mask) {
6629 /* Default ethertype for well-known L3 packets. */
6630 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6631 flow->dl_type = htons(ETH_TYPE_IP);
6632 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6633 flow->dl_type = htons(ETH_TYPE_IPV6);
6634 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6635 flow->dl_type = htons(ETH_TYPE_MPLS);
6636 } else {
6637 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
6638 }
6639 } else if (src_flow->packet_type != htonl(PT_ETH)) {
6640 /* dl_type is mandatory for non-Ethernet packets */
6641 flow->dl_type = htons(0xffff);
6642 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
6643 /* See comments in odp_flow_key_from_flow__(). */
6644 odp_parse_error(&rl, errorp,
6645 "mask expected for non-Ethernet II frame");
6646 return false;
6647 }
6648 }
6649 return true;
6650 }
6651
6652 /* Initializes MPLS, L3, and L4 fields in 'flow' based on the attributes in
6653 * 'attrs', in which the attributes in the bit-mask 'present_attrs' are
6654 * present. The caller also indicates an out-of-range attribute
6655 * 'out_of_range_attr' if one was present when parsing (if so, the fitness
6656 * cannot be perfect).
6657 *
6658 * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
6659 * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
6660 * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
6661 * previously parsed flow key.
6662 *
6663 * Returns fitness based on any discrepancies between present and expected
6664 * attributes, except that a 'need_check' of false overrides this.
6665 *
6666 * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
6667 * error message in '*errorp'. 'key' and 'key_len' are just used for error
6668 * reporting in this case. */
6669 static enum odp_key_fitness
6670 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6671 uint64_t present_attrs, int out_of_range_attr,
6672 uint64_t *expected_attrs, struct flow *flow,
6673 const struct nlattr *key, size_t key_len,
6674 const struct flow *src_flow, bool need_check, char **errorp)
6675 {
6676 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6677 bool is_mask = src_flow != flow;
6678 const void *check_start = NULL;
6679 size_t check_len = 0;
6680 enum ovs_key_attr expected_bit = 0xff;
6681
6682 if (eth_type_mpls(src_flow->dl_type)) {
6683 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6684 *expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
6685 }
6686 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6687 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
6688 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
6689 int n = size / sizeof(ovs_be32);
6690 int i;
6691
6692 if (!size || size % sizeof(ovs_be32)) {
6693 odp_parse_error(&rl, errorp,
6694 "MPLS LSEs have invalid length %"PRIuSIZE,
6695 size);
6696 return ODP_FIT_ERROR;
6697 }
6698 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
6699 odp_parse_error(&rl, errorp,
6700 "unexpected MPLS Ethertype mask %x"PRIx16,
6701 ntohs(flow->dl_type));
6702 return ODP_FIT_ERROR;
6703 }
6704
6705 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
6706 flow->mpls_lse[i] = mpls_lse[i];
6707 }
6708 if (n > FLOW_MAX_MPLS_LABELS) {
6709 return ODP_FIT_TOO_MUCH;
6710 }
6711
6712 if (!is_mask) {
6713 /* BOS may be set only in the innermost label. */
6714 for (i = 0; i < n - 1; i++) {
6715 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
6716 odp_parse_error(&rl, errorp,
6717 "MPLS BOS set in non-innermost label");
6718 return ODP_FIT_ERROR;
6719 }
6720 }
6721
6722 /* BOS must be set in the innermost label. */
6723 if (n < FLOW_MAX_MPLS_LABELS
6724 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
6725 return ODP_FIT_TOO_LITTLE;
6726 }
6727 }
6728 }
6729
6730 goto done;
6731 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
6732 if (!is_mask) {
6733 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
6734 }
6735 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6736 const struct ovs_key_ipv4 *ipv4_key;
6737
6738 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
6739 put_ipv4_key(ipv4_key, flow, is_mask);
6740 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6741 odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV4 has invalid "
6742 "nw_frag %#"PRIx8, flow->nw_frag);
6743 return ODP_FIT_ERROR;
6744 }
6745
6746 if (is_mask) {
6747 check_start = ipv4_key;
6748 check_len = sizeof *ipv4_key;
6749 expected_bit = OVS_KEY_ATTR_IPV4;
6750 }
6751 }
6752 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
6753 if (!is_mask) {
6754 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
6755 }
6756 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6757 const struct ovs_key_ipv6 *ipv6_key;
6758
6759 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
6760 put_ipv6_key(ipv6_key, flow, is_mask);
6761 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6762 odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV6 has invalid "
6763 "nw_frag %#"PRIx8, flow->nw_frag);
6764 return ODP_FIT_ERROR;
6765 }
6766 if (is_mask) {
6767 check_start = ipv6_key;
6768 check_len = sizeof *ipv6_key;
6769 expected_bit = OVS_KEY_ATTR_IPV6;
6770 }
6771 }
6772 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
6773 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
6774 if (!is_mask) {
6775 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
6776 }
6777 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
6778 const struct ovs_key_arp *arp_key;
6779
6780 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
6781 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
6782 odp_parse_error(&rl, errorp,
6783 "unsupported ARP opcode %"PRIu16" in flow "
6784 "key", ntohs(arp_key->arp_op));
6785 return ODP_FIT_ERROR;
6786 }
6787 put_arp_key(arp_key, flow);
6788 if (is_mask) {
6789 check_start = arp_key;
6790 check_len = sizeof *arp_key;
6791 expected_bit = OVS_KEY_ATTR_ARP;
6792 }
6793 }
6794 } else if (src_flow->dl_type == htons(ETH_TYPE_NSH)) {
6795 if (!is_mask) {
6796 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_NSH;
6797 }
6798 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_NSH)) {
6799 if (odp_nsh_key_from_attr__(attrs[OVS_KEY_ATTR_NSH],
6800 is_mask, &flow->nsh,
6801 NULL, errorp) == ODP_FIT_ERROR) {
6802 return ODP_FIT_ERROR;
6803 }
6804 if (is_mask) {
6805 check_start = nl_attr_get(attrs[OVS_KEY_ATTR_NSH]);
6806 check_len = nl_attr_get_size(attrs[OVS_KEY_ATTR_NSH]);
6807 expected_bit = OVS_KEY_ATTR_NSH;
6808 }
6809 }
6810 } else {
6811 goto done;
6812 }
6813 if (check_len > 0) { /* Happens only when 'is_mask'. */
6814 if (!is_all_zeros(check_start, check_len) &&
6815 flow->dl_type != htons(0xffff)) {
6816 odp_parse_error(&rl, errorp, "unexpected L3 matching with "
6817 "masked Ethertype %#"PRIx16"/%#"PRIx16,
6818 ntohs(src_flow->dl_type),
6819 ntohs(flow->dl_type));
6820 return ODP_FIT_ERROR;
6821 } else {
6822 *expected_attrs |= UINT64_C(1) << expected_bit;
6823 }
6824 }
6825
6826 expected_bit = OVS_KEY_ATTR_UNSPEC;
6827 if (src_flow->nw_proto == IPPROTO_TCP
6828 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6829 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6830 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6831 if (!is_mask) {
6832 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
6833 }
6834 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
6835 const union ovs_key_tp *tcp_key;
6836
6837 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
6838 put_tp_key(tcp_key, flow);
6839 expected_bit = OVS_KEY_ATTR_TCP;
6840 }
6841 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
6842 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
6843 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
6844 }
6845 } else if (src_flow->nw_proto == IPPROTO_UDP
6846 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6847 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6848 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6849 if (!is_mask) {
6850 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
6851 }
6852 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
6853 const union ovs_key_tp *udp_key;
6854
6855 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
6856 put_tp_key(udp_key, flow);
6857 expected_bit = OVS_KEY_ATTR_UDP;
6858 }
6859 } else if (src_flow->nw_proto == IPPROTO_SCTP
6860 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6861 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6862 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6863 if (!is_mask) {
6864 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
6865 }
6866 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
6867 const union ovs_key_tp *sctp_key;
6868
6869 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
6870 put_tp_key(sctp_key, flow);
6871 expected_bit = OVS_KEY_ATTR_SCTP;
6872 }
6873 } else if (src_flow->nw_proto == IPPROTO_ICMP
6874 && src_flow->dl_type == htons(ETH_TYPE_IP)
6875 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6876 if (!is_mask) {
6877 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
6878 }
6879 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
6880 const struct ovs_key_icmp *icmp_key;
6881
6882 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
6883 flow->tp_src = htons(icmp_key->icmp_type);
6884 flow->tp_dst = htons(icmp_key->icmp_code);
6885 expected_bit = OVS_KEY_ATTR_ICMP;
6886 }
6887 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
6888 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
6889 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6890 if (!is_mask) {
6891 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
6892 }
6893 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
6894 const struct ovs_key_icmpv6 *icmpv6_key;
6895
6896 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
6897 flow->tp_src = htons(icmpv6_key->icmpv6_type);
6898 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
6899 expected_bit = OVS_KEY_ATTR_ICMPV6;
6900 if (is_nd(src_flow, NULL)) {
6901 if (!is_mask) {
6902 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
6903 }
6904 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
6905 const struct ovs_key_nd *nd_key;
6906
6907 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
6908 flow->nd_target = nd_key->nd_target;
6909 flow->arp_sha = nd_key->nd_sll;
6910 flow->arp_tha = nd_key->nd_tll;
6911 if (is_mask) {
6912 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
6913 * ICMP type and code are 8 bits wide. Therefore, an
6914 * exact match looks like htons(0xff), not
6915 * htons(0xffff). See xlate_wc_finish() for details.
6916 * */
6917 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
6918 (flow->tp_src != htons(0xff) ||
6919 flow->tp_dst != htons(0xff))) {
6920 odp_parse_error(&rl, errorp,
6921 "ICMP (src,dst) masks should be "
6922 "(0xff,0xff) but are actually "
6923 "(%#"PRIx16",%#"PRIx16")",
6924 ntohs(flow->tp_src),
6925 ntohs(flow->tp_dst));
6926 return ODP_FIT_ERROR;
6927 } else {
6928 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
6929 }
6930 }
6931 }
6932 if (present_attrs &
6933 (UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS)) {
6934 const struct ovs_key_nd_extensions *nd_ext_key;
6935 if (!is_mask) {
6936 *expected_attrs |=
6937 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
6938 }
6939
6940 nd_ext_key =
6941 nl_attr_get(attrs[OVS_KEY_ATTR_ND_EXTENSIONS]);
6942 flow->igmp_group_ip4 = nd_ext_key->nd_reserved;
6943 flow->tcp_flags = htons(nd_ext_key->nd_options_type);
6944
6945 if (is_mask) {
6946 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
6947 * ICMP type and code are 8 bits wide. Therefore, an
6948 * exact match looks like htons(0xff), not
6949 * htons(0xffff). See xlate_wc_finish() for details.
6950 * */
6951 if (!is_all_zeros(nd_ext_key, sizeof *nd_ext_key) &&
6952 (flow->tp_src != htons(0xff) ||
6953 flow->tp_dst != htons(0xff))) {
6954 return ODP_FIT_ERROR;
6955 } else {
6956 *expected_attrs |=
6957 UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
6958 }
6959 }
6960 }
6961 }
6962 }
6963 } else if (src_flow->nw_proto == IPPROTO_IGMP
6964 && src_flow->dl_type == htons(ETH_TYPE_IP)) {
6965 /* OVS userspace parses the IGMP type, code, and group, but its
6966 * datapaths do not, so there is always missing information. */
6967 return ODP_FIT_TOO_LITTLE;
6968 }
6969 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
6970 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
6971 odp_parse_error(&rl, errorp, "flow matches on L4 ports but does "
6972 "not define an L4 protocol");
6973 return ODP_FIT_ERROR;
6974 } else {
6975 *expected_attrs |= UINT64_C(1) << expected_bit;
6976 }
6977 }
6978
6979 done:
6980 return need_check ? check_expectations(present_attrs, out_of_range_attr,
6981 *expected_attrs, key, key_len) : ODP_FIT_PERFECT;
6982 }
6983
6984 /* Parse 802.1Q header then encapsulated L3 attributes. */
6985 static enum odp_key_fitness
6986 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6987 uint64_t present_attrs, int out_of_range_attr,
6988 uint64_t expected_attrs, struct flow *flow,
6989 const struct nlattr *key, size_t key_len,
6990 const struct flow *src_flow, char **errorp)
6991 {
6992 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6993 bool is_mask = src_flow != flow;
6994
6995 const struct nlattr *encap;
6996 enum odp_key_fitness encap_fitness;
6997 enum odp_key_fitness fitness = ODP_FIT_ERROR;
6998 int encaps = 0;
6999
7000 while (encaps < flow_vlan_limit &&
7001 (is_mask
7002 ? (src_flow->vlans[encaps].tci & htons(VLAN_CFI)) != 0
7003 : eth_type_vlan(flow->dl_type))) {
7004
7005 encap = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
7006 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
7007
7008 /* Calculate fitness of outer attributes. */
7009 if (!is_mask) {
7010 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
7011 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
7012 } else {
7013 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
7014 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
7015 }
7016 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
7017 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
7018 }
7019 }
7020 fitness = check_expectations(present_attrs, out_of_range_attr,
7021 expected_attrs, key, key_len);
7022
7023 /* Set vlan_tci.
7024 * Remove the TPID from dl_type since it's not the real Ethertype. */
7025 flow->vlans[encaps].tpid = flow->dl_type;
7026 flow->dl_type = htons(0);
7027 flow->vlans[encaps].tci =
7028 (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
7029 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
7030 : htons(0));
7031 if (!is_mask) {
7032 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) ||
7033 !(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
7034 return ODP_FIT_TOO_LITTLE;
7035 } else if (flow->vlans[encaps].tci == htons(0)) {
7036 /* Corner case for a truncated 802.1Q header. */
7037 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
7038 return ODP_FIT_TOO_MUCH;
7039 }
7040 return fitness;
7041 } else if (!(flow->vlans[encaps].tci & htons(VLAN_CFI))) {
7042 odp_parse_error(
7043 &rl, errorp, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
7044 "but CFI bit is not set", ntohs(flow->vlans[encaps].tci));
7045 return ODP_FIT_ERROR;
7046 }
7047 } else {
7048 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
7049 return fitness;
7050 }
7051 }
7052
7053 /* Now parse the encapsulated attributes. */
7054 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
7055 attrs, &present_attrs, &out_of_range_attr,
7056 errorp)) {
7057 return ODP_FIT_ERROR;
7058 }
7059 expected_attrs = 0;
7060
7061 if (!parse_ethertype(attrs, present_attrs, &expected_attrs,
7062 flow, src_flow, errorp)) {
7063 return ODP_FIT_ERROR;
7064 }
7065 encap_fitness = parse_l2_5_onward(attrs, present_attrs,
7066 out_of_range_attr,
7067 &expected_attrs,
7068 flow, key, key_len,
7069 src_flow, false, errorp);
7070 if (encap_fitness != ODP_FIT_PERFECT) {
7071 return encap_fitness;
7072 }
7073 encaps++;
7074 }
7075
7076 return check_expectations(present_attrs, out_of_range_attr,
7077 expected_attrs, key, key_len);
7078 }
7079
7080 static enum odp_key_fitness
7081 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
7082 struct flow *flow, const struct flow *src_flow,
7083 char **errorp)
7084 {
7085 /* New "struct flow" fields that are visible to the datapath (including all
7086 * data fields) should be translated from equivalent datapath flow fields
7087 * here (you will have to add a OVS_KEY_ATTR_* for them). */
7088 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 41);
7089
7090 enum odp_key_fitness fitness = ODP_FIT_ERROR;
7091 if (errorp) {
7092 *errorp = NULL;
7093 }
7094
7095 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
7096 uint64_t expected_attrs;
7097 uint64_t present_attrs;
7098 int out_of_range_attr;
7099 bool is_mask = src_flow != flow;
7100
7101 memset(flow, 0, sizeof *flow);
7102
7103 /* Parse attributes. */
7104 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
7105 &out_of_range_attr, errorp)) {
7106 goto exit;
7107 }
7108 expected_attrs = 0;
7109
7110 /* Metadata. */
7111 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
7112 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
7113 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
7114 } else if (is_mask) {
7115 /* Always exact match recirc_id if it is not specified. */
7116 flow->recirc_id = UINT32_MAX;
7117 }
7118
7119 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
7120 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
7121 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
7122 }
7123 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
7124 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
7125 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
7126 }
7127
7128 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
7129 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
7130 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
7131 }
7132
7133 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_STATE)) {
7134 uint32_t odp_state = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_STATE]);
7135
7136 flow->ct_state = odp_to_ovs_ct_state(odp_state);
7137 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_STATE;
7138 }
7139 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE)) {
7140 flow->ct_zone = nl_attr_get_u16(attrs[OVS_KEY_ATTR_CT_ZONE]);
7141 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE;
7142 }
7143 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_MARK)) {
7144 flow->ct_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_MARK]);
7145 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_MARK;
7146 }
7147 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS)) {
7148 flow->ct_label = nl_attr_get_u128(attrs[OVS_KEY_ATTR_CT_LABELS]);
7149 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS;
7150 }
7151 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
7152 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
7153 flow->ct_nw_src = ct->ipv4_src;
7154 flow->ct_nw_dst = ct->ipv4_dst;
7155 flow->ct_nw_proto = ct->ipv4_proto;
7156 flow->ct_tp_src = ct->src_port;
7157 flow->ct_tp_dst = ct->dst_port;
7158 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
7159 }
7160 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
7161 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
7162
7163 flow->ct_ipv6_src = ct->ipv6_src;
7164 flow->ct_ipv6_dst = ct->ipv6_dst;
7165 flow->ct_nw_proto = ct->ipv6_proto;
7166 flow->ct_tp_src = ct->src_port;
7167 flow->ct_tp_dst = ct->dst_port;
7168 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
7169 }
7170
7171 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
7172 enum odp_key_fitness res;
7173
7174 res = odp_tun_key_from_attr__(attrs[OVS_KEY_ATTR_TUNNEL], is_mask,
7175 &flow->tunnel, errorp);
7176 if (res == ODP_FIT_ERROR) {
7177 goto exit;
7178 } else if (res == ODP_FIT_PERFECT) {
7179 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
7180 }
7181 }
7182
7183 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
7184 flow->in_port.odp_port
7185 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
7186 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
7187 } else if (!is_mask) {
7188 flow->in_port.odp_port = ODPP_NONE;
7189 }
7190
7191 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE)) {
7192 flow->packet_type
7193 = nl_attr_get_be32(attrs[OVS_KEY_ATTR_PACKET_TYPE]);
7194 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE;
7195 if (pt_ns(src_flow->packet_type) == OFPHTN_ETHERTYPE) {
7196 flow->dl_type = pt_ns_type_be(flow->packet_type);
7197 }
7198 } else if (!is_mask) {
7199 flow->packet_type = htonl(PT_ETH);
7200 }
7201
7202 /* Check for Ethernet header. */
7203 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
7204 const struct ovs_key_ethernet *eth_key;
7205
7206 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
7207 put_ethernet_key(eth_key, flow);
7208 if (!is_mask) {
7209 flow->packet_type = htonl(PT_ETH);
7210 }
7211 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
7212 }
7213 else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
7214 ovs_be16 ethertype = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
7215 if (!is_mask) {
7216 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
7217 ntohs(ethertype));
7218 }
7219 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
7220 }
7221
7222 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
7223 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
7224 src_flow, errorp)) {
7225 goto exit;
7226 }
7227
7228 if (is_mask
7229 ? (src_flow->vlans[0].tci & htons(VLAN_CFI)) != 0
7230 : eth_type_vlan(src_flow->dl_type)) {
7231 fitness = parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
7232 expected_attrs, flow, key, key_len,
7233 src_flow, errorp);
7234 } else {
7235 if (is_mask) {
7236 /* A missing VLAN mask means exact match on vlan_tci 0 (== no
7237 * VLAN). */
7238 flow->vlans[0].tpid = htons(0xffff);
7239 flow->vlans[0].tci = htons(0xffff);
7240 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
7241 flow->vlans[0].tci = nl_attr_get_be16(
7242 attrs[OVS_KEY_ATTR_VLAN]);
7243 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
7244 }
7245 }
7246 fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
7247 &expected_attrs, flow, key, key_len,
7248 src_flow, true, errorp);
7249 }
7250
7251 exit:;
7252 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7253 if (fitness == ODP_FIT_ERROR && (errorp || !VLOG_DROP_WARN(&rl))) {
7254 struct ds s = DS_EMPTY_INITIALIZER;
7255 if (is_mask) {
7256 ds_put_cstr(&s, "the flow mask in error is: ");
7257 odp_flow_key_format(key, key_len, &s);
7258 ds_put_cstr(&s, ", for the following flow key: ");
7259 flow_format(&s, src_flow, NULL);
7260 } else {
7261 ds_put_cstr(&s, "the flow key in error is: ");
7262 odp_flow_key_format(key, key_len, &s);
7263 }
7264 if (errorp) {
7265 char *old_error = *errorp;
7266 *errorp = xasprintf("%s; %s", old_error, ds_cstr(&s));
7267 free(old_error);
7268 } else {
7269 VLOG_WARN("%s", ds_cstr(&s));
7270 }
7271 ds_destroy(&s);
7272 }
7273 return fitness;
7274 }
7275
7276 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
7277 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
7278 * 'key' fits our expectations for what a flow key should contain.
7279 *
7280 * The 'in_port' will be the datapath's understanding of the port. The
7281 * caller will need to translate with odp_port_to_ofp_port() if the
7282 * OpenFlow port is needed.
7283 *
7284 * This function doesn't take the packet itself as an argument because none of
7285 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
7286 * it is always possible to infer which additional attribute(s) should appear
7287 * by looking at the attributes for lower-level protocols, e.g. if the network
7288 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
7289 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
7290 * must be absent.
7291 *
7292 * If 'errorp' is nonnull, this function uses it for detailed error reports: if
7293 * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
7294 * '*errorp', otherwise NULL. */
7295 enum odp_key_fitness
7296 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
7297 struct flow *flow, char **errorp)
7298 {
7299 return odp_flow_key_to_flow__(key, key_len, flow, flow, errorp);
7300 }
7301
7302 /* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key'
7303 * to a mask structure in 'mask'. 'flow' must be a previously translated flow
7304 * corresponding to 'mask' and similarly flow_key/flow_key_len must be the
7305 * attributes from that flow. Returns an ODP_FIT_* value that indicates how
7306 * well 'key' fits our expectations for what a flow key should contain.
7307 *
7308 * If 'errorp' is nonnull, this function uses it for detailed error reports: if
7309 * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
7310 * '*errorp', otherwise NULL. */
7311 enum odp_key_fitness
7312 odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len,
7313 struct flow_wildcards *mask, const struct flow *src_flow,
7314 char **errorp)
7315 {
7316 if (mask_key_len) {
7317 return odp_flow_key_to_flow__(mask_key, mask_key_len,
7318 &mask->masks, src_flow, errorp);
7319 } else {
7320 if (errorp) {
7321 *errorp = NULL;
7322 }
7323
7324 /* A missing mask means that the flow should be exact matched.
7325 * Generate an appropriate exact wildcard for the flow. */
7326 flow_wildcards_init_for_packet(mask, src_flow);
7327
7328 return ODP_FIT_PERFECT;
7329 }
7330 }
7331
7332 /* Converts the netlink formated key/mask to match.
7333 * Fails if odp_flow_key_from_key/mask and odp_flow_key_key/mask
7334 * disagree on the acceptable form of flow */
7335 int
7336 parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len,
7337 const struct nlattr *mask, size_t mask_len,
7338 struct match *match)
7339 {
7340 enum odp_key_fitness fitness;
7341
7342 fitness = odp_flow_key_to_flow(key, key_len, &match->flow, NULL);
7343 if (fitness) {
7344 /* This should not happen: it indicates that
7345 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
7346 * the acceptable form of a flow. Log the problem as an error,
7347 * with enough details to enable debugging. */
7348 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7349
7350 if (!VLOG_DROP_ERR(&rl)) {
7351 struct ds s;
7352
7353 ds_init(&s);
7354 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
7355 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
7356 ds_destroy(&s);
7357 }
7358
7359 return EINVAL;
7360 }
7361
7362 fitness = odp_flow_key_to_mask(mask, mask_len, &match->wc, &match->flow,
7363 NULL);
7364 if (fitness) {
7365 /* This should not happen: it indicates that
7366 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
7367 * disagree on the acceptable form of a mask. Log the problem
7368 * as an error, with enough details to enable debugging. */
7369 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7370
7371 if (!VLOG_DROP_ERR(&rl)) {
7372 struct ds s;
7373
7374 ds_init(&s);
7375 odp_flow_format(key, key_len, mask, mask_len, NULL, &s,
7376 true);
7377 VLOG_ERR("internal error parsing flow mask %s (%s)",
7378 ds_cstr(&s), odp_key_fitness_to_string(fitness));
7379 ds_destroy(&s);
7380 }
7381
7382 return EINVAL;
7383 }
7384
7385 return 0;
7386 }
7387
7388 /* Returns 'fitness' as a string, for use in debug messages. */
7389 const char *
7390 odp_key_fitness_to_string(enum odp_key_fitness fitness)
7391 {
7392 switch (fitness) {
7393 case ODP_FIT_PERFECT:
7394 return "OK";
7395 case ODP_FIT_TOO_MUCH:
7396 return "too_much";
7397 case ODP_FIT_TOO_LITTLE:
7398 return "too_little";
7399 case ODP_FIT_ERROR:
7400 return "error";
7401 default:
7402 return "<unknown>";
7403 }
7404 }
7405
7406 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
7407 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
7408 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
7409 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
7410 * null, then the return value is not meaningful.) */
7411 size_t
7412 odp_put_userspace_action(uint32_t pid,
7413 const void *userdata, size_t userdata_size,
7414 odp_port_t tunnel_out_port,
7415 bool include_actions,
7416 struct ofpbuf *odp_actions)
7417 {
7418 size_t userdata_ofs;
7419 size_t offset;
7420
7421 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
7422 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
7423 if (userdata) {
7424 userdata_ofs = odp_actions->size + NLA_HDRLEN;
7425
7426 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
7427 * module before Linux 3.10 required the userdata to be exactly 8 bytes
7428 * long:
7429 *
7430 * - The kernel rejected shorter userdata with -ERANGE.
7431 *
7432 * - The kernel silently dropped userdata beyond the first 8 bytes.
7433 *
7434 * Thus, for maximum compatibility, always put at least 8 bytes. (We
7435 * separately disable features that required more than 8 bytes.) */
7436 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
7437 MAX(8, userdata_size)),
7438 userdata, userdata_size);
7439 } else {
7440 userdata_ofs = 0;
7441 }
7442 if (tunnel_out_port != ODPP_NONE) {
7443 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
7444 tunnel_out_port);
7445 }
7446 if (include_actions) {
7447 nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS);
7448 }
7449 nl_msg_end_nested(odp_actions, offset);
7450
7451 return userdata_ofs;
7452 }
7453
7454 void
7455 odp_put_pop_eth_action(struct ofpbuf *odp_actions)
7456 {
7457 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_ETH);
7458 }
7459
7460 void
7461 odp_put_push_eth_action(struct ofpbuf *odp_actions,
7462 const struct eth_addr *eth_src,
7463 const struct eth_addr *eth_dst)
7464 {
7465 struct ovs_action_push_eth eth;
7466
7467 memset(&eth, 0, sizeof eth);
7468 if (eth_src) {
7469 eth.addresses.eth_src = *eth_src;
7470 }
7471 if (eth_dst) {
7472 eth.addresses.eth_dst = *eth_dst;
7473 }
7474
7475 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_ETH,
7476 &eth, sizeof eth);
7477 }
7478
7479 void
7480 odp_put_tunnel_action(const struct flow_tnl *tunnel,
7481 struct ofpbuf *odp_actions, const char *tnl_type)
7482 {
7483 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7484 tun_key_to_attr(odp_actions, tunnel, tunnel, NULL, tnl_type);
7485 nl_msg_end_nested(odp_actions, offset);
7486 }
7487
7488 void
7489 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
7490 struct ovs_action_push_tnl *data)
7491 {
7492 int size = offsetof(struct ovs_action_push_tnl, header);
7493
7494 size += data->header_len;
7495 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
7496 }
7497
7498 \f
7499 /* The commit_odp_actions() function and its helpers. */
7500
7501 static void
7502 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
7503 const void *key, size_t key_size)
7504 {
7505 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7506 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
7507 nl_msg_end_nested(odp_actions, offset);
7508 }
7509
7510 /* Masked set actions have a mask following the data within the netlink
7511 * attribute. The unmasked bits in the data will be cleared as the data
7512 * is copied to the action. */
7513 void
7514 commit_masked_set_action(struct ofpbuf *odp_actions,
7515 enum ovs_key_attr key_type,
7516 const void *key_, const void *mask_, size_t key_size)
7517 {
7518 size_t offset = nl_msg_start_nested(odp_actions,
7519 OVS_ACTION_ATTR_SET_MASKED);
7520 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
7521 const char *key = key_, *mask = mask_;
7522
7523 memcpy(data + key_size, mask, key_size);
7524 /* Clear unmasked bits while copying. */
7525 while (key_size--) {
7526 *data++ = *key++ & *mask++;
7527 }
7528 nl_msg_end_nested(odp_actions, offset);
7529 }
7530
7531 /* If any of the flow key data that ODP actions can modify are different in
7532 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
7533 * 'odp_actions' that change the flow tunneling information in key from
7534 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
7535 * same way. In other words, operates the same as commit_odp_actions(), but
7536 * only on tunneling information. */
7537 void
7538 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
7539 struct ofpbuf *odp_actions, const char *tnl_type)
7540 {
7541 /* A valid IPV4_TUNNEL must have non-zero ip_dst; a valid IPv6 tunnel
7542 * must have non-zero ipv6_dst. */
7543 if (flow_tnl_dst_is_set(&flow->tunnel)) {
7544 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
7545 return;
7546 }
7547 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
7548 odp_put_tunnel_action(&base->tunnel, odp_actions, tnl_type);
7549 }
7550 }
7551
7552 struct offsetof_sizeof {
7553 int offset;
7554 int size;
7555 };
7556
7557 /* Compares each of the fields in 'key0' and 'key1'. The fields are specified
7558 * in 'offsetof_sizeof_arr', which is an array terminated by a 0-size field.
7559 * Returns true if all of the fields are equal, false if at least one differs.
7560 * As a side effect, for each field that is the same in 'key0' and 'key1',
7561 * zeros the corresponding bytes in 'mask'. */
7562 static bool
7563 keycmp_mask(const void *key0, const void *key1,
7564 struct offsetof_sizeof *offsetof_sizeof_arr, void *mask)
7565 {
7566 bool differ = false;
7567
7568 for (int field = 0 ; ; field++) {
7569 int size = offsetof_sizeof_arr[field].size;
7570 int offset = offsetof_sizeof_arr[field].offset;
7571 if (size == 0) {
7572 break;
7573 }
7574
7575 char *pkey0 = ((char *)key0) + offset;
7576 char *pkey1 = ((char *)key1) + offset;
7577 char *pmask = ((char *)mask) + offset;
7578 if (memcmp(pkey0, pkey1, size) == 0) {
7579 memset(pmask, 0, size);
7580 } else {
7581 differ = true;
7582 }
7583 }
7584
7585 return differ;
7586 }
7587
7588 static bool
7589 commit(enum ovs_key_attr attr, bool use_masked_set,
7590 const void *key, void *base, void *mask, size_t size,
7591 struct offsetof_sizeof *offsetof_sizeof_arr,
7592 struct ofpbuf *odp_actions)
7593 {
7594 if (keycmp_mask(key, base, offsetof_sizeof_arr, mask)) {
7595 bool fully_masked = odp_mask_is_exact(attr, mask, size);
7596
7597 if (use_masked_set && !fully_masked) {
7598 commit_masked_set_action(odp_actions, attr, key, mask, size);
7599 } else {
7600 if (!fully_masked) {
7601 memset(mask, 0xff, size);
7602 }
7603 commit_set_action(odp_actions, attr, key, size);
7604 }
7605 memcpy(base, key, size);
7606 return true;
7607 } else {
7608 /* Mask bits are set when we have either read or set the corresponding
7609 * values. Masked bits will be exact-matched, no need to set them
7610 * if the value did not actually change. */
7611 return false;
7612 }
7613 }
7614
7615 static void
7616 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
7617 {
7618 eth->eth_src = flow->dl_src;
7619 eth->eth_dst = flow->dl_dst;
7620 }
7621
7622 static void
7623 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
7624 {
7625 flow->dl_src = eth->eth_src;
7626 flow->dl_dst = eth->eth_dst;
7627 }
7628
7629 static void
7630 commit_set_ether_action(const struct flow *flow, struct flow *base_flow,
7631 struct ofpbuf *odp_actions,
7632 struct flow_wildcards *wc,
7633 bool use_masked)
7634 {
7635 struct ovs_key_ethernet key, base, mask;
7636 struct offsetof_sizeof ovs_key_ethernet_offsetof_sizeof_arr[] =
7637 OVS_KEY_ETHERNET_OFFSETOF_SIZEOF_ARR;
7638 if (flow->packet_type != htonl(PT_ETH)) {
7639 return;
7640 }
7641
7642 get_ethernet_key(flow, &key);
7643 get_ethernet_key(base_flow, &base);
7644 get_ethernet_key(&wc->masks, &mask);
7645
7646 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
7647 &key, &base, &mask, sizeof key,
7648 ovs_key_ethernet_offsetof_sizeof_arr, odp_actions)) {
7649 put_ethernet_key(&base, base_flow);
7650 put_ethernet_key(&mask, &wc->masks);
7651 }
7652 }
7653
7654 static void
7655 commit_vlan_action(const struct flow* flow, struct flow *base,
7656 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7657 {
7658 int base_n = flow_count_vlan_headers(base);
7659 int flow_n = flow_count_vlan_headers(flow);
7660 flow_skip_common_vlan_headers(base, &base_n, flow, &flow_n);
7661
7662 /* Pop all mismatching vlan of base, push those of flow */
7663 for (; base_n >= 0; base_n--) {
7664 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
7665 wc->masks.vlans[base_n].qtag = OVS_BE32_MAX;
7666 }
7667
7668 for (; flow_n >= 0; flow_n--) {
7669 struct ovs_action_push_vlan vlan;
7670
7671 vlan.vlan_tpid = flow->vlans[flow_n].tpid;
7672 vlan.vlan_tci = flow->vlans[flow_n].tci;
7673 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
7674 &vlan, sizeof vlan);
7675 }
7676 memcpy(base->vlans, flow->vlans, sizeof(base->vlans));
7677 }
7678
7679 /* Wildcarding already done at action translation time. */
7680 static void
7681 commit_mpls_action(const struct flow *flow, struct flow *base,
7682 struct ofpbuf *odp_actions)
7683 {
7684 int base_n = flow_count_mpls_labels(base, NULL);
7685 int flow_n = flow_count_mpls_labels(flow, NULL);
7686 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
7687 NULL);
7688
7689 while (base_n > common_n) {
7690 if (base_n - 1 == common_n && flow_n > common_n) {
7691 /* If there is only one more LSE in base than there are common
7692 * between base and flow; and flow has at least one more LSE than
7693 * is common then the topmost LSE of base may be updated using
7694 * set */
7695 struct ovs_key_mpls mpls_key;
7696
7697 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
7698 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
7699 &mpls_key, sizeof mpls_key);
7700 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
7701 common_n++;
7702 } else {
7703 /* Otherwise, if there more LSEs in base than are common between
7704 * base and flow then pop the topmost one. */
7705 ovs_be16 dl_type;
7706 /* If all the LSEs are to be popped and this is not the outermost
7707 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
7708 * POP_MPLS action instead of flow->dl_type.
7709 *
7710 * This is because the POP_MPLS action requires its ethertype
7711 * argument to be an MPLS ethernet type but in this case
7712 * flow->dl_type will be a non-MPLS ethernet type.
7713 *
7714 * When the final POP_MPLS action occurs it use flow->dl_type and
7715 * the and the resulting packet will have the desired dl_type. */
7716 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
7717 dl_type = htons(ETH_TYPE_MPLS);
7718 } else {
7719 dl_type = flow->dl_type;
7720 }
7721 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
7722 ovs_assert(flow_pop_mpls(base, base_n, flow->dl_type, NULL));
7723 base_n--;
7724 }
7725 }
7726
7727 /* If, after the above popping and setting, there are more LSEs in flow
7728 * than base then some LSEs need to be pushed. */
7729 while (base_n < flow_n) {
7730 struct ovs_action_push_mpls *mpls;
7731
7732 mpls = nl_msg_put_unspec_zero(odp_actions,
7733 OVS_ACTION_ATTR_PUSH_MPLS,
7734 sizeof *mpls);
7735 mpls->mpls_ethertype = flow->dl_type;
7736 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
7737 /* Update base flow's MPLS stack, but do not clear L3. We need the L3
7738 * headers if the flow is restored later due to returning from a patch
7739 * port or group bucket. */
7740 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL, false);
7741 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
7742 base_n++;
7743 }
7744 }
7745
7746 static void
7747 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
7748 {
7749 ipv4->ipv4_src = flow->nw_src;
7750 ipv4->ipv4_dst = flow->nw_dst;
7751 ipv4->ipv4_proto = flow->nw_proto;
7752 ipv4->ipv4_tos = flow->nw_tos;
7753 ipv4->ipv4_ttl = flow->nw_ttl;
7754 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7755 }
7756
7757 static void
7758 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
7759 {
7760 flow->nw_src = ipv4->ipv4_src;
7761 flow->nw_dst = ipv4->ipv4_dst;
7762 flow->nw_proto = ipv4->ipv4_proto;
7763 flow->nw_tos = ipv4->ipv4_tos;
7764 flow->nw_ttl = ipv4->ipv4_ttl;
7765 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
7766 }
7767
7768 static void
7769 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
7770 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7771 bool use_masked)
7772 {
7773 struct ovs_key_ipv4 key, mask, base;
7774 struct offsetof_sizeof ovs_key_ipv4_offsetof_sizeof_arr[] =
7775 OVS_KEY_IPV4_OFFSETOF_SIZEOF_ARR;
7776
7777 /* Check that nw_proto and nw_frag remain unchanged. */
7778 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7779 flow->nw_frag == base_flow->nw_frag);
7780
7781 get_ipv4_key(flow, &key, false);
7782 get_ipv4_key(base_flow, &base, false);
7783 get_ipv4_key(&wc->masks, &mask, true);
7784 mask.ipv4_proto = 0; /* Not writeable. */
7785 mask.ipv4_frag = 0; /* Not writable. */
7786
7787 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7788 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7789 mask.ipv4_tos &= ~IP_ECN_MASK;
7790 }
7791
7792 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
7793 ovs_key_ipv4_offsetof_sizeof_arr, odp_actions)) {
7794 put_ipv4_key(&base, base_flow, false);
7795 if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
7796 put_ipv4_key(&mask, &wc->masks, true);
7797 }
7798 }
7799 }
7800
7801 static void
7802 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
7803 {
7804 ipv6->ipv6_src = flow->ipv6_src;
7805 ipv6->ipv6_dst = flow->ipv6_dst;
7806 ipv6->ipv6_label = flow->ipv6_label;
7807 ipv6->ipv6_proto = flow->nw_proto;
7808 ipv6->ipv6_tclass = flow->nw_tos;
7809 ipv6->ipv6_hlimit = flow->nw_ttl;
7810 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7811 }
7812
7813 static void
7814 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
7815 {
7816 flow->ipv6_src = ipv6->ipv6_src;
7817 flow->ipv6_dst = ipv6->ipv6_dst;
7818 flow->ipv6_label = ipv6->ipv6_label;
7819 flow->nw_proto = ipv6->ipv6_proto;
7820 flow->nw_tos = ipv6->ipv6_tclass;
7821 flow->nw_ttl = ipv6->ipv6_hlimit;
7822 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
7823 }
7824
7825 static void
7826 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
7827 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7828 bool use_masked)
7829 {
7830 struct ovs_key_ipv6 key, mask, base;
7831 struct offsetof_sizeof ovs_key_ipv6_offsetof_sizeof_arr[] =
7832 OVS_KEY_IPV6_OFFSETOF_SIZEOF_ARR;
7833
7834 /* Check that nw_proto and nw_frag remain unchanged. */
7835 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7836 flow->nw_frag == base_flow->nw_frag);
7837
7838 get_ipv6_key(flow, &key, false);
7839 get_ipv6_key(base_flow, &base, false);
7840 get_ipv6_key(&wc->masks, &mask, true);
7841 mask.ipv6_proto = 0; /* Not writeable. */
7842 mask.ipv6_frag = 0; /* Not writable. */
7843 mask.ipv6_label &= htonl(IPV6_LABEL_MASK); /* Not writable. */
7844
7845 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7846 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7847 mask.ipv6_tclass &= ~IP_ECN_MASK;
7848 }
7849
7850 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
7851 ovs_key_ipv6_offsetof_sizeof_arr, odp_actions)) {
7852 put_ipv6_key(&base, base_flow, false);
7853 if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
7854 put_ipv6_key(&mask, &wc->masks, true);
7855 }
7856 }
7857 }
7858
7859 static void
7860 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
7861 {
7862 /* ARP key has padding, clear it. */
7863 memset(arp, 0, sizeof *arp);
7864
7865 arp->arp_sip = flow->nw_src;
7866 arp->arp_tip = flow->nw_dst;
7867 arp->arp_op = htons(flow->nw_proto);
7868 arp->arp_sha = flow->arp_sha;
7869 arp->arp_tha = flow->arp_tha;
7870 }
7871
7872 static void
7873 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
7874 {
7875 flow->nw_src = arp->arp_sip;
7876 flow->nw_dst = arp->arp_tip;
7877 flow->nw_proto = ntohs(arp->arp_op);
7878 flow->arp_sha = arp->arp_sha;
7879 flow->arp_tha = arp->arp_tha;
7880 }
7881
7882 static enum slow_path_reason
7883 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
7884 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7885 {
7886 struct ovs_key_arp key, mask, base;
7887 struct offsetof_sizeof ovs_key_arp_offsetof_sizeof_arr[] =
7888 OVS_KEY_ARP_OFFSETOF_SIZEOF_ARR;
7889
7890 get_arp_key(flow, &key);
7891 get_arp_key(base_flow, &base);
7892 get_arp_key(&wc->masks, &mask);
7893
7894 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
7895 ovs_key_arp_offsetof_sizeof_arr, odp_actions)) {
7896 put_arp_key(&base, base_flow);
7897 put_arp_key(&mask, &wc->masks);
7898 return SLOW_ACTION;
7899 }
7900 return 0;
7901 }
7902
7903 static void
7904 get_icmp_key(const struct flow *flow, struct ovs_key_icmp *icmp)
7905 {
7906 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
7907 icmp->icmp_type = ntohs(flow->tp_src);
7908 icmp->icmp_code = ntohs(flow->tp_dst);
7909 }
7910
7911 static void
7912 put_icmp_key(const struct ovs_key_icmp *icmp, struct flow *flow)
7913 {
7914 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
7915 flow->tp_src = htons(icmp->icmp_type);
7916 flow->tp_dst = htons(icmp->icmp_code);
7917 }
7918
7919 static enum slow_path_reason
7920 commit_set_icmp_action(const struct flow *flow, struct flow *base_flow,
7921 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7922 {
7923 struct ovs_key_icmp key, mask, base;
7924 struct offsetof_sizeof ovs_key_icmp_offsetof_sizeof_arr[] =
7925 OVS_KEY_ICMP_OFFSETOF_SIZEOF_ARR;
7926 enum ovs_key_attr attr;
7927
7928 if (is_icmpv4(flow, NULL)) {
7929 attr = OVS_KEY_ATTR_ICMP;
7930 } else if (is_icmpv6(flow, NULL)) {
7931 attr = OVS_KEY_ATTR_ICMPV6;
7932 } else {
7933 return 0;
7934 }
7935
7936 get_icmp_key(flow, &key);
7937 get_icmp_key(base_flow, &base);
7938 get_icmp_key(&wc->masks, &mask);
7939
7940 if (commit(attr, false, &key, &base, &mask, sizeof key,
7941 ovs_key_icmp_offsetof_sizeof_arr, odp_actions)) {
7942 put_icmp_key(&base, base_flow);
7943 put_icmp_key(&mask, &wc->masks);
7944 return SLOW_ACTION;
7945 }
7946 return 0;
7947 }
7948
7949 static void
7950 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
7951 {
7952 nd->nd_target = flow->nd_target;
7953 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
7954 nd->nd_sll = flow->arp_sha;
7955 nd->nd_tll = flow->arp_tha;
7956 }
7957
7958 static void
7959 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
7960 {
7961 flow->nd_target = nd->nd_target;
7962 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
7963 flow->arp_sha = nd->nd_sll;
7964 flow->arp_tha = nd->nd_tll;
7965 }
7966
7967 static void
7968 get_nd_extensions_key(const struct flow *flow,
7969 struct ovs_key_nd_extensions *nd_ext)
7970 {
7971 /* ND Extensions key has padding, clear it. */
7972 memset(nd_ext, 0, sizeof *nd_ext);
7973 nd_ext->nd_reserved = flow->igmp_group_ip4;
7974 nd_ext->nd_options_type = ntohs(flow->tcp_flags);
7975 }
7976
7977 static void
7978 put_nd_extensions_key(const struct ovs_key_nd_extensions *nd_ext,
7979 struct flow *flow)
7980 {
7981 flow->igmp_group_ip4 = nd_ext->nd_reserved;
7982 flow->tcp_flags = htons(nd_ext->nd_options_type);
7983 }
7984
7985 static enum slow_path_reason
7986 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
7987 struct ofpbuf *odp_actions,
7988 struct flow_wildcards *wc, bool use_masked)
7989 {
7990 struct ovs_key_nd key, mask, base;
7991 struct offsetof_sizeof ovs_key_nd_offsetof_sizeof_arr[] =
7992 OVS_KEY_ND_OFFSETOF_SIZEOF_ARR;
7993
7994 get_nd_key(flow, &key);
7995 get_nd_key(base_flow, &base);
7996 get_nd_key(&wc->masks, &mask);
7997
7998 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
7999 ovs_key_nd_offsetof_sizeof_arr, odp_actions)) {
8000 put_nd_key(&base, base_flow);
8001 put_nd_key(&mask, &wc->masks);
8002 return SLOW_ACTION;
8003 }
8004
8005 return 0;
8006 }
8007
8008 static enum slow_path_reason
8009 commit_set_nd_extensions_action(const struct flow *flow,
8010 struct flow *base_flow,
8011 struct ofpbuf *odp_actions,
8012 struct flow_wildcards *wc, bool use_masked)
8013 {
8014 struct ovs_key_nd_extensions key, mask, base;
8015 struct offsetof_sizeof ovs_key_nd_extensions_offsetof_sizeof_arr[] =
8016 OVS_KEY_ND_EXTENSIONS_OFFSETOF_SIZEOF_ARR;
8017
8018 get_nd_extensions_key(flow, &key);
8019 get_nd_extensions_key(base_flow, &base);
8020 get_nd_extensions_key(&wc->masks, &mask);
8021
8022 if (commit(OVS_KEY_ATTR_ND_EXTENSIONS, use_masked, &key, &base, &mask,
8023 sizeof key, ovs_key_nd_extensions_offsetof_sizeof_arr,
8024 odp_actions)) {
8025 put_nd_extensions_key(&base, base_flow);
8026 put_nd_extensions_key(&mask, &wc->masks);
8027 return SLOW_ACTION;
8028 }
8029 return 0;
8030 }
8031
8032 static enum slow_path_reason
8033 commit_set_nw_action(const struct flow *flow, struct flow *base,
8034 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8035 bool use_masked)
8036 {
8037 uint32_t reason;
8038
8039 /* Check if 'flow' really has an L3 header. */
8040 if (!flow->nw_proto) {
8041 return 0;
8042 }
8043
8044 switch (ntohs(base->dl_type)) {
8045 case ETH_TYPE_IP:
8046 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
8047 break;
8048
8049 case ETH_TYPE_IPV6:
8050 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
8051 if (base->nw_proto == IPPROTO_ICMPV6) {
8052 /* Commit extended attrs first to make sure
8053 correct options are added.*/
8054 reason = commit_set_nd_extensions_action(flow, base,
8055 odp_actions, wc, use_masked);
8056 reason |= commit_set_nd_action(flow, base, odp_actions,
8057 wc, use_masked);
8058 return reason;
8059 }
8060 break;
8061
8062 case ETH_TYPE_ARP:
8063 return commit_set_arp_action(flow, base, odp_actions, wc);
8064 }
8065
8066 return 0;
8067 }
8068
8069 static inline void
8070 get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh, bool is_mask)
8071 {
8072 *nsh = flow->nsh;
8073 if (!is_mask) {
8074 if (nsh->mdtype != NSH_M_TYPE1) {
8075 memset(nsh->context, 0, sizeof(nsh->context));
8076 }
8077 }
8078 }
8079
8080 static inline void
8081 put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
8082 bool is_mask OVS_UNUSED)
8083 {
8084 flow->nsh = *nsh;
8085 if (flow->nsh.mdtype != NSH_M_TYPE1) {
8086 memset(flow->nsh.context, 0, sizeof(flow->nsh.context));
8087 }
8088 }
8089
8090 static bool
8091 commit_nsh(const struct ovs_key_nsh * flow_nsh, bool use_masked_set,
8092 const struct ovs_key_nsh *key, struct ovs_key_nsh *base,
8093 struct ovs_key_nsh *mask, size_t size,
8094 struct ofpbuf *odp_actions)
8095 {
8096 enum ovs_key_attr attr = OVS_KEY_ATTR_NSH;
8097
8098 if (memcmp(key, base, size) == 0) {
8099 /* Mask bits are set when we have either read or set the corresponding
8100 * values. Masked bits will be exact-matched, no need to set them
8101 * if the value did not actually change. */
8102 return false;
8103 }
8104
8105 bool fully_masked = odp_mask_is_exact(attr, mask, size);
8106
8107 if (use_masked_set && !fully_masked) {
8108 size_t nsh_key_ofs;
8109 struct ovs_nsh_key_base nsh_base;
8110 struct ovs_nsh_key_base nsh_base_mask;
8111 struct ovs_nsh_key_md1 md1;
8112 struct ovs_nsh_key_md1 md1_mask;
8113 size_t offset = nl_msg_start_nested(odp_actions,
8114 OVS_ACTION_ATTR_SET_MASKED);
8115
8116 nsh_base.flags = key->flags;
8117 nsh_base.ttl = key->ttl;
8118 nsh_base.mdtype = key->mdtype;
8119 nsh_base.np = key->np;
8120 nsh_base.path_hdr = key->path_hdr;
8121
8122 nsh_base_mask.flags = mask->flags;
8123 nsh_base_mask.ttl = mask->ttl;
8124 nsh_base_mask.mdtype = mask->mdtype;
8125 nsh_base_mask.np = mask->np;
8126 nsh_base_mask.path_hdr = mask->path_hdr;
8127
8128 /* OVS_KEY_ATTR_NSH keys */
8129 nsh_key_ofs = nl_msg_start_nested(odp_actions, OVS_KEY_ATTR_NSH);
8130
8131 /* put value and mask for OVS_NSH_KEY_ATTR_BASE */
8132 char *data = nl_msg_put_unspec_uninit(odp_actions,
8133 OVS_NSH_KEY_ATTR_BASE,
8134 2 * sizeof(nsh_base));
8135 const char *lkey = (char *)&nsh_base, *lmask = (char *)&nsh_base_mask;
8136 size_t lkey_size = sizeof(nsh_base);
8137
8138 while (lkey_size--) {
8139 *data++ = *lkey++ & *lmask++;
8140 }
8141 lmask = (char *)&nsh_base_mask;
8142 memcpy(data, lmask, sizeof(nsh_base_mask));
8143
8144 switch (key->mdtype) {
8145 case NSH_M_TYPE1:
8146 memcpy(md1.context, key->context, sizeof key->context);
8147 memcpy(md1_mask.context, mask->context, sizeof mask->context);
8148
8149 /* put value and mask for OVS_NSH_KEY_ATTR_MD1 */
8150 data = nl_msg_put_unspec_uninit(odp_actions,
8151 OVS_NSH_KEY_ATTR_MD1,
8152 2 * sizeof(md1));
8153 lkey = (char *)&md1;
8154 lmask = (char *)&md1_mask;
8155 lkey_size = sizeof(md1);
8156
8157 while (lkey_size--) {
8158 *data++ = *lkey++ & *lmask++;
8159 }
8160 lmask = (char *)&md1_mask;
8161 memcpy(data, lmask, sizeof(md1_mask));
8162 break;
8163 case NSH_M_TYPE2:
8164 default:
8165 /* No match support for other MD formats yet. */
8166 break;
8167 }
8168
8169 nl_msg_end_nested(odp_actions, nsh_key_ofs);
8170
8171 nl_msg_end_nested(odp_actions, offset);
8172 } else {
8173 if (!fully_masked) {
8174 memset(mask, 0xff, size);
8175 }
8176 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
8177 nsh_key_to_attr(odp_actions, flow_nsh, NULL, 0, false);
8178 nl_msg_end_nested(odp_actions, offset);
8179 }
8180 memcpy(base, key, size);
8181 return true;
8182 }
8183
8184 static void
8185 commit_set_nsh_action(const struct flow *flow, struct flow *base_flow,
8186 struct ofpbuf *odp_actions,
8187 struct flow_wildcards *wc,
8188 bool use_masked)
8189 {
8190 struct ovs_key_nsh key, mask, base;
8191
8192 if (flow->dl_type != htons(ETH_TYPE_NSH) ||
8193 !memcmp(&base_flow->nsh, &flow->nsh, sizeof base_flow->nsh)) {
8194 return;
8195 }
8196
8197 /* Check that mdtype and np remain unchanged. */
8198 ovs_assert(flow->nsh.mdtype == base_flow->nsh.mdtype &&
8199 flow->nsh.np == base_flow->nsh.np);
8200
8201 get_nsh_key(flow, &key, false);
8202 get_nsh_key(base_flow, &base, false);
8203 get_nsh_key(&wc->masks, &mask, true);
8204 mask.mdtype = 0; /* Not writable. */
8205 mask.np = 0; /* Not writable. */
8206
8207 if (commit_nsh(&base_flow->nsh, use_masked, &key, &base, &mask,
8208 sizeof key, odp_actions)) {
8209 put_nsh_key(&base, base_flow, false);
8210 if (mask.mdtype != 0) { /* Mask was changed by commit(). */
8211 put_nsh_key(&mask, &wc->masks, true);
8212 }
8213 }
8214 }
8215
8216 /* TCP, UDP, and SCTP keys have the same layout. */
8217 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
8218 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
8219
8220 static void
8221 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
8222 {
8223 tp->tcp.tcp_src = flow->tp_src;
8224 tp->tcp.tcp_dst = flow->tp_dst;
8225 }
8226
8227 static void
8228 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
8229 {
8230 flow->tp_src = tp->tcp.tcp_src;
8231 flow->tp_dst = tp->tcp.tcp_dst;
8232 }
8233
8234 static void
8235 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
8236 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8237 bool use_masked)
8238 {
8239 enum ovs_key_attr key_type;
8240 union ovs_key_tp key, mask, base;
8241 struct offsetof_sizeof ovs_key_tp_offsetof_sizeof_arr[] =
8242 OVS_KEY_TCP_OFFSETOF_SIZEOF_ARR;
8243
8244 /* Check if 'flow' really has an L3 header. */
8245 if (!flow->nw_proto) {
8246 return;
8247 }
8248
8249 if (!is_ip_any(base_flow)) {
8250 return;
8251 }
8252
8253 if (flow->nw_proto == IPPROTO_TCP) {
8254 key_type = OVS_KEY_ATTR_TCP;
8255 } else if (flow->nw_proto == IPPROTO_UDP) {
8256 key_type = OVS_KEY_ATTR_UDP;
8257 } else if (flow->nw_proto == IPPROTO_SCTP) {
8258 key_type = OVS_KEY_ATTR_SCTP;
8259 } else {
8260 return;
8261 }
8262
8263 get_tp_key(flow, &key);
8264 get_tp_key(base_flow, &base);
8265 get_tp_key(&wc->masks, &mask);
8266
8267 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
8268 ovs_key_tp_offsetof_sizeof_arr, odp_actions)) {
8269 put_tp_key(&base, base_flow);
8270 put_tp_key(&mask, &wc->masks);
8271 }
8272 }
8273
8274 static void
8275 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
8276 struct ofpbuf *odp_actions,
8277 struct flow_wildcards *wc,
8278 bool use_masked)
8279 {
8280 uint32_t key, mask, base;
8281 struct offsetof_sizeof ovs_key_prio_offsetof_sizeof_arr[] = {
8282 {0, sizeof(uint32_t)},
8283 {0, 0}
8284 };
8285
8286 key = flow->skb_priority;
8287 base = base_flow->skb_priority;
8288 mask = wc->masks.skb_priority;
8289
8290 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
8291 sizeof key, ovs_key_prio_offsetof_sizeof_arr, odp_actions)) {
8292 base_flow->skb_priority = base;
8293 wc->masks.skb_priority = mask;
8294 }
8295 }
8296
8297 static void
8298 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
8299 struct ofpbuf *odp_actions,
8300 struct flow_wildcards *wc,
8301 bool use_masked)
8302 {
8303 uint32_t key, mask, base;
8304 struct offsetof_sizeof ovs_key_pkt_mark_offsetof_sizeof_arr[] = {
8305 {0, sizeof(uint32_t)},
8306 {0, 0}
8307 };
8308
8309 key = flow->pkt_mark;
8310 base = base_flow->pkt_mark;
8311 mask = wc->masks.pkt_mark;
8312
8313 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
8314 sizeof key, ovs_key_pkt_mark_offsetof_sizeof_arr,
8315 odp_actions)) {
8316 base_flow->pkt_mark = base;
8317 wc->masks.pkt_mark = mask;
8318 }
8319 }
8320
8321 static void
8322 odp_put_pop_nsh_action(struct ofpbuf *odp_actions)
8323 {
8324 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_NSH);
8325 }
8326
8327 static void
8328 odp_put_push_nsh_action(struct ofpbuf *odp_actions,
8329 const struct flow *flow,
8330 struct ofpbuf *encap_data)
8331 {
8332 uint8_t * metadata = NULL;
8333 uint8_t md_size = 0;
8334
8335 switch (flow->nsh.mdtype) {
8336 case NSH_M_TYPE2:
8337 if (encap_data) {
8338 ovs_assert(encap_data->size < NSH_CTX_HDRS_MAX_LEN);
8339 metadata = encap_data->data;
8340 md_size = encap_data->size;
8341 } else {
8342 md_size = 0;
8343 }
8344 break;
8345 default:
8346 md_size = 0;
8347 break;
8348 }
8349 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_PUSH_NSH);
8350 nsh_key_to_attr(odp_actions, &flow->nsh, metadata, md_size, false);
8351 nl_msg_end_nested(odp_actions, offset);
8352 }
8353
8354 static void
8355 commit_encap_decap_action(const struct flow *flow,
8356 struct flow *base_flow,
8357 struct ofpbuf *odp_actions,
8358 struct flow_wildcards *wc,
8359 bool pending_encap, bool pending_decap,
8360 struct ofpbuf *encap_data)
8361 {
8362 if (pending_encap) {
8363 switch (ntohl(flow->packet_type)) {
8364 case PT_ETH: {
8365 /* push_eth */
8366 odp_put_push_eth_action(odp_actions, &flow->dl_src,
8367 &flow->dl_dst);
8368 base_flow->packet_type = flow->packet_type;
8369 base_flow->dl_src = flow->dl_src;
8370 base_flow->dl_dst = flow->dl_dst;
8371 break;
8372 }
8373 case PT_NSH:
8374 /* push_nsh */
8375 odp_put_push_nsh_action(odp_actions, flow, encap_data);
8376 base_flow->packet_type = flow->packet_type;
8377 /* Update all packet headers in base_flow. */
8378 memcpy(&base_flow->dl_dst, &flow->dl_dst,
8379 sizeof(*flow) - offsetof(struct flow, dl_dst));
8380 break;
8381 default:
8382 /* Only the above protocols are supported for encap.
8383 * The check is done at action translation. */
8384 OVS_NOT_REACHED();
8385 }
8386 } else if (pending_decap || flow->packet_type != base_flow->packet_type) {
8387 /* This is an explicit or implicit decap case. */
8388 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE &&
8389 base_flow->packet_type == htonl(PT_ETH)) {
8390 /* Generate pop_eth and continue without recirculation. */
8391 odp_put_pop_eth_action(odp_actions);
8392 base_flow->packet_type = flow->packet_type;
8393 base_flow->dl_src = eth_addr_zero;
8394 base_flow->dl_dst = eth_addr_zero;
8395 } else {
8396 /* All other decap cases require recirculation.
8397 * No need to update the base flow here. */
8398 switch (ntohl(base_flow->packet_type)) {
8399 case PT_NSH:
8400 /* pop_nsh. */
8401 odp_put_pop_nsh_action(odp_actions);
8402 break;
8403 default:
8404 /* Checks are done during translation. */
8405 OVS_NOT_REACHED();
8406 }
8407 }
8408 }
8409
8410 wc->masks.packet_type = OVS_BE32_MAX;
8411 }
8412
8413 /* If any of the flow key data that ODP actions can modify are different in
8414 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
8415 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
8416 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
8417 * in addition to this function if needed. Sets fields in 'wc' that are
8418 * used as part of the action.
8419 *
8420 * In the common case, this function returns 0. If the flow key modification
8421 * requires the flow's packets to be forced into the userspace slow path, this
8422 * function returns SLOW_ACTION. This only happens when there is no ODP action
8423 * to modify some field that was actually modified. For example, there is no
8424 * ODP action to modify any ARP field, so such a modification triggers
8425 * SLOW_ACTION. (When this happens, packets that need such modification get
8426 * flushed to userspace and handled there, which works OK but much more slowly
8427 * than if the datapath handled it directly.) */
8428 enum slow_path_reason
8429 commit_odp_actions(const struct flow *flow, struct flow *base,
8430 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
8431 bool use_masked, bool pending_encap, bool pending_decap,
8432 struct ofpbuf *encap_data)
8433 {
8434 /* If you add a field that OpenFlow actions can change, and that is visible
8435 * to the datapath (including all data fields), then you should also add
8436 * code here to commit changes to the field. */
8437 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 41);
8438
8439 enum slow_path_reason slow1, slow2;
8440 bool mpls_done = false;
8441
8442 commit_encap_decap_action(flow, base, odp_actions, wc,
8443 pending_encap, pending_decap, encap_data);
8444 commit_set_ether_action(flow, base, odp_actions, wc, use_masked);
8445 /* Make packet a non-MPLS packet before committing L3/4 actions,
8446 * which would otherwise do nothing. */
8447 if (eth_type_mpls(base->dl_type) && !eth_type_mpls(flow->dl_type)) {
8448 commit_mpls_action(flow, base, odp_actions);
8449 mpls_done = true;
8450 }
8451 commit_set_nsh_action(flow, base, odp_actions, wc, use_masked);
8452 slow1 = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
8453 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
8454 slow2 = commit_set_icmp_action(flow, base, odp_actions, wc);
8455 if (!mpls_done) {
8456 commit_mpls_action(flow, base, odp_actions);
8457 }
8458 commit_vlan_action(flow, base, odp_actions, wc);
8459 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
8460 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);
8461
8462 return slow1 ? slow1 : slow2;
8463 }