]> git.proxmox.com Git - mirror_ovs.git/blob - lib/odp-util.c
odp-execute: Rename 'may_steal' to 'should_steal'.
[mirror_ovs.git] / lib / odp-util.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <sys/types.h>
19 #include <netinet/in.h>
20 #include <arpa/inet.h>
21 #include "odp-util.h"
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <math.h>
25 #include <netinet/icmp6.h>
26 #include <netinet/ip6.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include "byte-order.h"
31 #include "coverage.h"
32 #include "dpif.h"
33 #include "openvswitch/dynamic-string.h"
34 #include "flow.h"
35 #include "netlink.h"
36 #include "openvswitch/ofpbuf.h"
37 #include "packets.h"
38 #include "simap.h"
39 #include "timeval.h"
40 #include "tun-metadata.h"
41 #include "unaligned.h"
42 #include "util.h"
43 #include "uuid.h"
44 #include "openvswitch/vlog.h"
45 #include "openvswitch/match.h"
46
47 VLOG_DEFINE_THIS_MODULE(odp_util);
48
49 /* The interface between userspace and kernel uses an "OVS_*" prefix.
50 * Since this is fairly non-specific for the OVS userspace components,
51 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
52 * interactions with the datapath.
53 */
54
55 /* The set of characters that may separate one action or one key attribute
56 * from another. */
57 static const char *delimiters = ", \t\r\n";
58 static const char *delimiters_end = ", \t\r\n)";
59
60 static int parse_odp_key_mask_attr(const char *, const struct simap *port_names,
61 struct ofpbuf *, struct ofpbuf *);
62 static void format_odp_key_attr(const struct nlattr *a,
63 const struct nlattr *ma,
64 const struct hmap *portno_names, struct ds *ds,
65 bool verbose);
66
67 struct geneve_scan {
68 struct geneve_opt d[63];
69 int len;
70 };
71
72 static int scan_geneve(const char *s, struct geneve_scan *key,
73 struct geneve_scan *mask);
74 static void format_geneve_opts(const struct geneve_opt *opt,
75 const struct geneve_opt *mask, int opts_len,
76 struct ds *, bool verbose);
77
78 static struct nlattr *generate_all_wildcard_mask(const struct attr_len_tbl tbl[],
79 int max, struct ofpbuf *,
80 const struct nlattr *key);
81 static void format_u128(struct ds *d, const ovs_32aligned_u128 *key,
82 const ovs_32aligned_u128 *mask, bool verbose);
83 static int scan_u128(const char *s, ovs_u128 *value, ovs_u128 *mask);
84
85 static int parse_odp_action(const char *s, const struct simap *port_names,
86 struct ofpbuf *actions);
87
88 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
89 * 'type':
90 *
91 * - For an action whose argument has a fixed length, returned that
92 * nonnegative length in bytes.
93 *
94 * - For an action with a variable-length argument, returns ATTR_LEN_VARIABLE.
95 *
96 * - For an invalid 'type', returns ATTR_LEN_INVALID. */
97 static int
98 odp_action_len(uint16_t type)
99 {
100 if (type > OVS_ACTION_ATTR_MAX) {
101 return -1;
102 }
103
104 switch ((enum ovs_action_attr) type) {
105 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
106 case OVS_ACTION_ATTR_TRUNC: return sizeof(struct ovs_action_trunc);
107 case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
108 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
109 case OVS_ACTION_ATTR_METER: return sizeof(uint32_t);
110 case OVS_ACTION_ATTR_USERSPACE: return ATTR_LEN_VARIABLE;
111 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
112 case OVS_ACTION_ATTR_POP_VLAN: return 0;
113 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
114 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
115 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
116 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
117 case OVS_ACTION_ATTR_SET: return ATTR_LEN_VARIABLE;
118 case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
119 case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
120 case OVS_ACTION_ATTR_CT: return ATTR_LEN_VARIABLE;
121 case OVS_ACTION_ATTR_CT_CLEAR: return 0;
122 case OVS_ACTION_ATTR_PUSH_ETH: return sizeof(struct ovs_action_push_eth);
123 case OVS_ACTION_ATTR_POP_ETH: return 0;
124 case OVS_ACTION_ATTR_CLONE: return ATTR_LEN_VARIABLE;
125 case OVS_ACTION_ATTR_PUSH_NSH: return ATTR_LEN_VARIABLE;
126 case OVS_ACTION_ATTR_POP_NSH: return 0;
127
128 case OVS_ACTION_ATTR_UNSPEC:
129 case __OVS_ACTION_ATTR_MAX:
130 return ATTR_LEN_INVALID;
131 }
132
133 return ATTR_LEN_INVALID;
134 }
135
136 /* Returns a string form of 'attr'. The return value is either a statically
137 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
138 * should be at least OVS_KEY_ATTR_BUFSIZE. */
139 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
140 static const char *
141 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
142 {
143 switch (attr) {
144 case OVS_KEY_ATTR_UNSPEC: return "unspec";
145 case OVS_KEY_ATTR_ENCAP: return "encap";
146 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
147 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
148 case OVS_KEY_ATTR_CT_STATE: return "ct_state";
149 case OVS_KEY_ATTR_CT_ZONE: return "ct_zone";
150 case OVS_KEY_ATTR_CT_MARK: return "ct_mark";
151 case OVS_KEY_ATTR_CT_LABELS: return "ct_label";
152 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: return "ct_tuple4";
153 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: return "ct_tuple6";
154 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
155 case OVS_KEY_ATTR_IN_PORT: return "in_port";
156 case OVS_KEY_ATTR_ETHERNET: return "eth";
157 case OVS_KEY_ATTR_VLAN: return "vlan";
158 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
159 case OVS_KEY_ATTR_IPV4: return "ipv4";
160 case OVS_KEY_ATTR_IPV6: return "ipv6";
161 case OVS_KEY_ATTR_TCP: return "tcp";
162 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
163 case OVS_KEY_ATTR_UDP: return "udp";
164 case OVS_KEY_ATTR_SCTP: return "sctp";
165 case OVS_KEY_ATTR_ICMP: return "icmp";
166 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
167 case OVS_KEY_ATTR_ARP: return "arp";
168 case OVS_KEY_ATTR_ND: return "nd";
169 case OVS_KEY_ATTR_MPLS: return "mpls";
170 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
171 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
172 case OVS_KEY_ATTR_PACKET_TYPE: return "packet_type";
173 case OVS_KEY_ATTR_NSH: return "nsh";
174
175 case __OVS_KEY_ATTR_MAX:
176 default:
177 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
178 return namebuf;
179 }
180 }
181
182 static void
183 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
184 {
185 size_t len = nl_attr_get_size(a);
186
187 ds_put_format(ds, "action%d", nl_attr_type(a));
188 if (len) {
189 const uint8_t *unspec;
190 unsigned int i;
191
192 unspec = nl_attr_get(a);
193 for (i = 0; i < len; i++) {
194 ds_put_char(ds, i ? ' ': '(');
195 ds_put_format(ds, "%02x", unspec[i]);
196 }
197 ds_put_char(ds, ')');
198 }
199 }
200
201 static void
202 format_odp_sample_action(struct ds *ds, const struct nlattr *attr,
203 const struct hmap *portno_names)
204 {
205 static const struct nl_policy ovs_sample_policy[] = {
206 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
207 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
208 };
209 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
210 double percentage;
211 const struct nlattr *nla_acts;
212 int len;
213
214 ds_put_cstr(ds, "sample");
215
216 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
217 ds_put_cstr(ds, "(error)");
218 return;
219 }
220
221 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
222 UINT32_MAX;
223
224 ds_put_format(ds, "(sample=%.1f%%,", percentage);
225
226 ds_put_cstr(ds, "actions(");
227 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
228 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
229 format_odp_actions(ds, nla_acts, len, portno_names);
230 ds_put_format(ds, "))");
231 }
232
233 static void
234 format_odp_clone_action(struct ds *ds, const struct nlattr *attr,
235 const struct hmap *portno_names)
236 {
237 const struct nlattr *nla_acts = nl_attr_get(attr);
238 int len = nl_attr_get_size(attr);
239
240 ds_put_cstr(ds, "clone");
241 ds_put_format(ds, "(");
242 format_odp_actions(ds, nla_acts, len, portno_names);
243 ds_put_format(ds, ")");
244 }
245
246 static void
247 format_nsh_key(struct ds *ds, const struct ovs_key_nsh *key)
248 {
249 ds_put_format(ds, "flags=%d", key->flags);
250 ds_put_format(ds, "ttl=%d", key->ttl);
251 ds_put_format(ds, ",mdtype=%d", key->mdtype);
252 ds_put_format(ds, ",np=%d", key->np);
253 ds_put_format(ds, ",spi=0x%x",
254 nsh_path_hdr_to_spi_uint32(key->path_hdr));
255 ds_put_format(ds, ",si=%d",
256 nsh_path_hdr_to_si(key->path_hdr));
257
258 switch (key->mdtype) {
259 case NSH_M_TYPE1:
260 for (int i = 0; i < 4; i++) {
261 ds_put_format(ds, ",c%d=0x%x", i + 1, ntohl(key->context[i]));
262 }
263 break;
264 case NSH_M_TYPE2:
265 default:
266 /* No support for matching other metadata formats yet. */
267 break;
268 }
269 }
270
271 static void
272 format_uint8_masked(struct ds *s, bool *first, const char *name,
273 uint8_t value, uint8_t mask)
274 {
275 if (mask != 0) {
276 if (!*first) {
277 ds_put_char(s, ',');
278 }
279 ds_put_format(s, "%s=", name);
280 if (mask == UINT8_MAX) {
281 ds_put_format(s, "%"PRIu8, value);
282 } else {
283 ds_put_format(s, "0x%02"PRIx8"/0x%02"PRIx8, value, mask);
284 }
285 *first = false;
286 }
287 }
288
289 static void
290 format_be32_masked(struct ds *s, bool *first, const char *name,
291 ovs_be32 value, ovs_be32 mask)
292 {
293 if (mask != htonl(0)) {
294 if (!*first) {
295 ds_put_char(s, ',');
296 }
297 ds_put_format(s, "%s=", name);
298 if (mask == OVS_BE32_MAX) {
299 ds_put_format(s, "0x%"PRIx32, ntohl(value));
300 } else {
301 ds_put_format(s, "0x%"PRIx32"/0x%08"PRIx32,
302 ntohl(value), ntohl(mask));
303 }
304 *first = false;
305 }
306 }
307
308 static void
309 format_nsh_key_mask(struct ds *ds, const struct ovs_key_nsh *key,
310 const struct ovs_key_nsh *mask)
311 {
312 if (!mask) {
313 format_nsh_key(ds, key);
314 } else {
315 bool first = true;
316 uint32_t spi = nsh_path_hdr_to_spi_uint32(key->path_hdr);
317 uint32_t spi_mask = nsh_path_hdr_to_spi_uint32(mask->path_hdr);
318 if (spi_mask == (NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
319 spi_mask = UINT32_MAX;
320 }
321 uint8_t si = nsh_path_hdr_to_si(key->path_hdr);
322 uint8_t si_mask = nsh_path_hdr_to_si(mask->path_hdr);
323
324 format_uint8_masked(ds, &first, "flags", key->flags, mask->flags);
325 format_uint8_masked(ds, &first, "ttl", key->ttl, mask->ttl);
326 format_uint8_masked(ds, &first, "mdtype", key->mdtype, mask->mdtype);
327 format_uint8_masked(ds, &first, "np", key->np, mask->np);
328 format_be32_masked(ds, &first, "spi", htonl(spi), htonl(spi_mask));
329 format_uint8_masked(ds, &first, "si", si, si_mask);
330 format_be32_masked(ds, &first, "c1", key->context[0],
331 mask->context[0]);
332 format_be32_masked(ds, &first, "c2", key->context[1],
333 mask->context[1]);
334 format_be32_masked(ds, &first, "c3", key->context[2],
335 mask->context[2]);
336 format_be32_masked(ds, &first, "c4", key->context[3],
337 mask->context[3]);
338 }
339 }
340
341 static void
342 format_odp_push_nsh_action(struct ds *ds,
343 const struct nsh_hdr *nsh_hdr)
344 {
345 size_t mdlen = nsh_hdr_len(nsh_hdr) - NSH_BASE_HDR_LEN;
346 uint32_t spi = ntohl(nsh_get_spi(nsh_hdr));
347 uint8_t si = nsh_get_si(nsh_hdr);
348 uint8_t flags = nsh_get_flags(nsh_hdr);
349 uint8_t ttl = nsh_get_ttl(nsh_hdr);
350
351 ds_put_cstr(ds, "push_nsh(");
352 ds_put_format(ds, "flags=%d", flags);
353 ds_put_format(ds, ",ttl=%d", ttl);
354 ds_put_format(ds, ",mdtype=%d", nsh_hdr->md_type);
355 ds_put_format(ds, ",np=%d", nsh_hdr->next_proto);
356 ds_put_format(ds, ",spi=0x%x", spi);
357 ds_put_format(ds, ",si=%d", si);
358 switch (nsh_hdr->md_type) {
359 case NSH_M_TYPE1: {
360 const struct nsh_md1_ctx *md1_ctx = &nsh_hdr->md1;
361 for (int i = 0; i < 4; i++) {
362 ds_put_format(ds, ",c%d=0x%x", i + 1,
363 ntohl(get_16aligned_be32(&md1_ctx->context[i])));
364 }
365 break;
366 }
367 case NSH_M_TYPE2: {
368 const struct nsh_md2_tlv *md2_ctx = &nsh_hdr->md2;
369 ds_put_cstr(ds, ",md2=");
370 ds_put_hex(ds, md2_ctx, mdlen);
371 break;
372 }
373 default:
374 OVS_NOT_REACHED();
375 }
376 ds_put_format(ds, ")");
377 }
378
379 static const char *
380 slow_path_reason_to_string(uint32_t reason)
381 {
382 switch ((enum slow_path_reason) reason) {
383 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
384 SLOW_PATH_REASONS
385 #undef SPR
386 }
387
388 return NULL;
389 }
390
391 const char *
392 slow_path_reason_to_explanation(enum slow_path_reason reason)
393 {
394 switch (reason) {
395 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
396 SLOW_PATH_REASONS
397 #undef SPR
398 }
399
400 return "<unknown>";
401 }
402
403 static int
404 parse_odp_flags(const char *s, const char *(*bit_to_string)(uint32_t),
405 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
406 {
407 return parse_flags(s, bit_to_string, ')', NULL, NULL,
408 res_flags, allowed, res_mask);
409 }
410
411 static void
412 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr,
413 const struct hmap *portno_names)
414 {
415 static const struct nl_policy ovs_userspace_policy[] = {
416 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
417 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
418 .optional = true },
419 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
420 .optional = true },
421 [OVS_USERSPACE_ATTR_ACTIONS] = { .type = NL_A_UNSPEC,
422 .optional = true },
423 };
424 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
425 const struct nlattr *userdata_attr;
426 const struct nlattr *tunnel_out_port_attr;
427
428 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
429 ds_put_cstr(ds, "userspace(error)");
430 return;
431 }
432
433 ds_put_format(ds, "userspace(pid=%"PRIu32,
434 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
435
436 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
437
438 if (userdata_attr) {
439 const uint8_t *userdata = nl_attr_get(userdata_attr);
440 size_t userdata_len = nl_attr_get_size(userdata_attr);
441 bool userdata_unspec = true;
442 struct user_action_cookie cookie;
443
444 if (userdata_len == sizeof cookie) {
445 memcpy(&cookie, userdata, sizeof cookie);
446
447 userdata_unspec = false;
448
449 if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
450 ds_put_format(ds, ",sFlow("
451 "vid=%"PRIu16",pcp=%d,output=%"PRIu32")",
452 vlan_tci_to_vid(cookie.sflow.vlan_tci),
453 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
454 cookie.sflow.output);
455 } else if (cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
456 ds_put_cstr(ds, ",slow_path(");
457 format_flags(ds, slow_path_reason_to_string,
458 cookie.slow_path.reason, ',');
459 ds_put_format(ds, ")");
460 } else if (cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
461 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
462 ",collector_set_id=%"PRIu32
463 ",obs_domain_id=%"PRIu32
464 ",obs_point_id=%"PRIu32
465 ",output_port=",
466 cookie.flow_sample.probability,
467 cookie.flow_sample.collector_set_id,
468 cookie.flow_sample.obs_domain_id,
469 cookie.flow_sample.obs_point_id);
470 odp_portno_name_format(portno_names,
471 cookie.flow_sample.output_odp_port, ds);
472 if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_INGRESS) {
473 ds_put_cstr(ds, ",ingress");
474 } else if (cookie.flow_sample.direction == NX_ACTION_SAMPLE_EGRESS) {
475 ds_put_cstr(ds, ",egress");
476 }
477 ds_put_char(ds, ')');
478 } else if (cookie.type == USER_ACTION_COOKIE_IPFIX) {
479 ds_put_format(ds, ",ipfix(output_port=");
480 odp_portno_name_format(portno_names,
481 cookie.ipfix.output_odp_port, ds);
482 ds_put_char(ds, ')');
483 } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
484 ds_put_format(ds, ",controller(reason=%"PRIu16
485 ",dont_send=%d"
486 ",continuation=%d"
487 ",recirc_id=%"PRIu32
488 ",rule_cookie=%#"PRIx64
489 ",controller_id=%"PRIu16
490 ",max_len=%"PRIu16,
491 cookie.controller.reason,
492 cookie.controller.dont_send ? 1 : 0,
493 cookie.controller.continuation ? 1 : 0,
494 cookie.controller.recirc_id,
495 ntohll(get_32aligned_be64(
496 &cookie.controller.rule_cookie)),
497 cookie.controller.controller_id,
498 cookie.controller.max_len);
499 ds_put_char(ds, ')');
500 } else {
501 userdata_unspec = true;
502 }
503 }
504
505 if (userdata_unspec) {
506 size_t i;
507 ds_put_format(ds, ",userdata(");
508 for (i = 0; i < userdata_len; i++) {
509 ds_put_format(ds, "%02x", userdata[i]);
510 }
511 ds_put_char(ds, ')');
512 }
513 }
514
515 if (a[OVS_USERSPACE_ATTR_ACTIONS]) {
516 ds_put_cstr(ds, ",actions");
517 }
518
519 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
520 if (tunnel_out_port_attr) {
521 ds_put_format(ds, ",tunnel_out_port=");
522 odp_portno_name_format(portno_names,
523 nl_attr_get_odp_port(tunnel_out_port_attr), ds);
524 }
525
526 ds_put_char(ds, ')');
527 }
528
529 static void
530 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
531 {
532 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
533 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
534 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
535 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
536 };
537 ds_put_char(ds, ',');
538 }
539 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
540 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
541 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
542 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
543 }
544 ds_put_char(ds, ',');
545 }
546 if (!(tci & htons(VLAN_CFI))) {
547 ds_put_cstr(ds, "cfi=0");
548 ds_put_char(ds, ',');
549 }
550 ds_chomp(ds, ',');
551 }
552
553 static void
554 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
555 {
556 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
557 mpls_lse_to_label(mpls_lse),
558 mpls_lse_to_tc(mpls_lse),
559 mpls_lse_to_ttl(mpls_lse),
560 mpls_lse_to_bos(mpls_lse));
561 }
562
563 static void
564 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
565 const struct ovs_key_mpls *mpls_mask, int n)
566 {
567 for (int i = 0; i < n; i++) {
568 ovs_be32 key = mpls_key[i].mpls_lse;
569
570 if (mpls_mask == NULL) {
571 format_mpls_lse(ds, key);
572 } else {
573 ovs_be32 mask = mpls_mask[i].mpls_lse;
574
575 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
576 mpls_lse_to_label(key), mpls_lse_to_label(mask),
577 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
578 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
579 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
580 }
581 ds_put_char(ds, ',');
582 }
583 ds_chomp(ds, ',');
584 }
585
586 static void
587 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
588 {
589 ds_put_format(ds, "recirc(%#"PRIx32")", recirc_id);
590 }
591
592 static void
593 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
594 {
595 ds_put_format(ds, "hash(");
596
597 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
598 ds_put_format(ds, "hash_l4(%"PRIu32")", hash_act->hash_basis);
599 } else {
600 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
601 hash_act->hash_alg);
602 }
603 ds_put_format(ds, ")");
604 }
605
606 static const void *
607 format_udp_tnl_push_header(struct ds *ds, const struct udp_header *udp)
608 {
609 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
610 ntohs(udp->udp_src), ntohs(udp->udp_dst),
611 ntohs(udp->udp_csum));
612
613 return udp + 1;
614 }
615
616 static void
617 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
618 {
619 const struct eth_header *eth;
620 const void *l3;
621 const void *l4;
622 const struct udp_header *udp;
623
624 eth = (const struct eth_header *)data->header;
625
626 l3 = eth + 1;
627
628 /* Ethernet */
629 ds_put_format(ds, "header(size=%"PRIu32",type=%"PRIu32",eth(dst=",
630 data->header_len, data->tnl_type);
631 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
632 ds_put_format(ds, ",src=");
633 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
634 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
635
636 if (eth->eth_type == htons(ETH_TYPE_IP)) {
637 /* IPv4 */
638 const struct ip_header *ip = l3;
639 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
640 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
641 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
642 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
643 ip->ip_proto, ip->ip_tos,
644 ip->ip_ttl,
645 ntohs(ip->ip_frag_off));
646 l4 = (ip + 1);
647 } else {
648 const struct ovs_16aligned_ip6_hdr *ip6 = l3;
649 struct in6_addr src, dst;
650 memcpy(&src, &ip6->ip6_src, sizeof src);
651 memcpy(&dst, &ip6->ip6_dst, sizeof dst);
652 uint32_t ipv6_flow = ntohl(get_16aligned_be32(&ip6->ip6_flow));
653
654 ds_put_format(ds, "ipv6(src=");
655 ipv6_format_addr(&src, ds);
656 ds_put_format(ds, ",dst=");
657 ipv6_format_addr(&dst, ds);
658 ds_put_format(ds, ",label=%i,proto=%"PRIu8",tclass=0x%"PRIx32
659 ",hlimit=%"PRIu8"),",
660 ipv6_flow & IPV6_LABEL_MASK, ip6->ip6_nxt,
661 (ipv6_flow >> 20) & 0xff, ip6->ip6_hlim);
662 l4 = (ip6 + 1);
663 }
664
665 udp = (const struct udp_header *) l4;
666
667 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
668 const struct vxlanhdr *vxh;
669
670 vxh = format_udp_tnl_push_header(ds, udp);
671
672 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
673 ntohl(get_16aligned_be32(&vxh->vx_flags)),
674 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
675 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
676 const struct genevehdr *gnh;
677
678 gnh = format_udp_tnl_push_header(ds, udp);
679
680 ds_put_format(ds, "geneve(%s%svni=0x%"PRIx32,
681 gnh->oam ? "oam," : "",
682 gnh->critical ? "crit," : "",
683 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
684
685 if (gnh->opt_len) {
686 ds_put_cstr(ds, ",options(");
687 format_geneve_opts(gnh->options, NULL, gnh->opt_len * 4,
688 ds, false);
689 ds_put_char(ds, ')');
690 }
691
692 ds_put_char(ds, ')');
693 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE ||
694 data->tnl_type == OVS_VPORT_TYPE_IP6GRE) {
695 const struct gre_base_hdr *greh;
696 ovs_16aligned_be32 *options;
697
698 greh = (const struct gre_base_hdr *) l4;
699
700 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
701 ntohs(greh->flags), ntohs(greh->protocol));
702 options = (ovs_16aligned_be32 *)(greh + 1);
703 if (greh->flags & htons(GRE_CSUM)) {
704 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
705 options++;
706 }
707 if (greh->flags & htons(GRE_KEY)) {
708 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
709 options++;
710 }
711 if (greh->flags & htons(GRE_SEQ)) {
712 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
713 options++;
714 }
715 ds_put_format(ds, ")");
716 } else if (data->tnl_type == OVS_VPORT_TYPE_ERSPAN ||
717 data->tnl_type == OVS_VPORT_TYPE_IP6ERSPAN) {
718 const struct gre_base_hdr *greh;
719 const struct erspan_base_hdr *ersh;
720
721 greh = (const struct gre_base_hdr *) l4;
722 ersh = ERSPAN_HDR(greh);
723
724 if (ersh->ver == 1) {
725 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
726 ersh + 1);
727 ds_put_format(ds, "erspan(ver=1,sid=0x%"PRIx16",idx=0x%"PRIx32")",
728 get_sid(ersh), ntohl(get_16aligned_be32(index)));
729 } else if (ersh->ver == 2) {
730 struct erspan_md2 *md2 = ALIGNED_CAST(struct erspan_md2 *,
731 ersh + 1);
732 ds_put_format(ds, "erspan(ver=2,sid=0x%"PRIx16
733 ",dir=%"PRIu8",hwid=0x%"PRIx8")",
734 get_sid(ersh), md2->dir, get_hwid(md2));
735 } else {
736 VLOG_WARN("%s Invalid ERSPAN version %d\n", __func__, ersh->ver);
737 }
738 }
739 ds_put_format(ds, ")");
740 }
741
742 static void
743 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr,
744 const struct hmap *portno_names)
745 {
746 struct ovs_action_push_tnl *data;
747
748 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
749
750 ds_put_cstr(ds, "tnl_push(tnl_port(");
751 odp_portno_name_format(portno_names, data->tnl_port, ds);
752 ds_put_cstr(ds, "),");
753 format_odp_tnl_push_header(ds, data);
754 ds_put_format(ds, ",out_port(");
755 odp_portno_name_format(portno_names, data->out_port, ds);
756 ds_put_cstr(ds, "))");
757 }
758
759 static const struct nl_policy ovs_nat_policy[] = {
760 [OVS_NAT_ATTR_SRC] = { .type = NL_A_FLAG, .optional = true, },
761 [OVS_NAT_ATTR_DST] = { .type = NL_A_FLAG, .optional = true, },
762 [OVS_NAT_ATTR_IP_MIN] = { .type = NL_A_UNSPEC, .optional = true,
763 .min_len = sizeof(struct in_addr),
764 .max_len = sizeof(struct in6_addr)},
765 [OVS_NAT_ATTR_IP_MAX] = { .type = NL_A_UNSPEC, .optional = true,
766 .min_len = sizeof(struct in_addr),
767 .max_len = sizeof(struct in6_addr)},
768 [OVS_NAT_ATTR_PROTO_MIN] = { .type = NL_A_U16, .optional = true, },
769 [OVS_NAT_ATTR_PROTO_MAX] = { .type = NL_A_U16, .optional = true, },
770 [OVS_NAT_ATTR_PERSISTENT] = { .type = NL_A_FLAG, .optional = true, },
771 [OVS_NAT_ATTR_PROTO_HASH] = { .type = NL_A_FLAG, .optional = true, },
772 [OVS_NAT_ATTR_PROTO_RANDOM] = { .type = NL_A_FLAG, .optional = true, },
773 };
774
775 static void
776 format_odp_ct_nat(struct ds *ds, const struct nlattr *attr)
777 {
778 struct nlattr *a[ARRAY_SIZE(ovs_nat_policy)];
779 size_t addr_len;
780 ovs_be32 ip_min, ip_max;
781 struct in6_addr ip6_min, ip6_max;
782 uint16_t proto_min, proto_max;
783
784 if (!nl_parse_nested(attr, ovs_nat_policy, a, ARRAY_SIZE(a))) {
785 ds_put_cstr(ds, "nat(error: nl_parse_nested() failed.)");
786 return;
787 }
788 /* If no type, then nothing else either. */
789 if (!(a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST])
790 && (a[OVS_NAT_ATTR_IP_MIN] || a[OVS_NAT_ATTR_IP_MAX]
791 || a[OVS_NAT_ATTR_PROTO_MIN] || a[OVS_NAT_ATTR_PROTO_MAX]
792 || a[OVS_NAT_ATTR_PERSISTENT] || a[OVS_NAT_ATTR_PROTO_HASH]
793 || a[OVS_NAT_ATTR_PROTO_RANDOM])) {
794 ds_put_cstr(ds, "nat(error: options allowed only with \"src\" or \"dst\")");
795 return;
796 }
797 /* Both SNAT & DNAT may not be specified. */
798 if (a[OVS_NAT_ATTR_SRC] && a[OVS_NAT_ATTR_DST]) {
799 ds_put_cstr(ds, "nat(error: Only one of \"src\" or \"dst\" may be present.)");
800 return;
801 }
802 /* proto may not appear without ip. */
803 if (!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_PROTO_MIN]) {
804 ds_put_cstr(ds, "nat(error: proto but no IP.)");
805 return;
806 }
807 /* MAX may not appear without MIN. */
808 if ((!a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX])
809 || (!a[OVS_NAT_ATTR_PROTO_MIN] && a[OVS_NAT_ATTR_PROTO_MAX])) {
810 ds_put_cstr(ds, "nat(error: range max without min.)");
811 return;
812 }
813 /* Address sizes must match. */
814 if ((a[OVS_NAT_ATTR_IP_MIN]
815 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(ovs_be32) &&
816 nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) != sizeof(struct in6_addr)))
817 || (a[OVS_NAT_ATTR_IP_MIN] && a[OVS_NAT_ATTR_IP_MAX]
818 && (nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN])
819 != nl_attr_get_size(a[OVS_NAT_ATTR_IP_MAX])))) {
820 ds_put_cstr(ds, "nat(error: IP address sizes do not match)");
821 return;
822 }
823
824 addr_len = a[OVS_NAT_ATTR_IP_MIN]
825 ? nl_attr_get_size(a[OVS_NAT_ATTR_IP_MIN]) : 0;
826 ip_min = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MIN]
827 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MIN]) : 0;
828 ip_max = addr_len == sizeof(ovs_be32) && a[OVS_NAT_ATTR_IP_MAX]
829 ? nl_attr_get_be32(a[OVS_NAT_ATTR_IP_MAX]) : 0;
830 if (addr_len == sizeof ip6_min) {
831 ip6_min = a[OVS_NAT_ATTR_IP_MIN]
832 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MIN])
833 : in6addr_any;
834 ip6_max = a[OVS_NAT_ATTR_IP_MAX]
835 ? *(struct in6_addr *)nl_attr_get(a[OVS_NAT_ATTR_IP_MAX])
836 : in6addr_any;
837 }
838 proto_min = a[OVS_NAT_ATTR_PROTO_MIN]
839 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MIN]) : 0;
840 proto_max = a[OVS_NAT_ATTR_PROTO_MAX]
841 ? nl_attr_get_u16(a[OVS_NAT_ATTR_PROTO_MAX]) : 0;
842
843 if ((addr_len == sizeof(ovs_be32)
844 && ip_max && ntohl(ip_min) > ntohl(ip_max))
845 || (addr_len == sizeof(struct in6_addr)
846 && !ipv6_mask_is_any(&ip6_max)
847 && memcmp(&ip6_min, &ip6_max, sizeof ip6_min) > 0)
848 || (proto_max && proto_min > proto_max)) {
849 ds_put_cstr(ds, "nat(range error)");
850 return;
851 }
852
853 ds_put_cstr(ds, "nat");
854 if (a[OVS_NAT_ATTR_SRC] || a[OVS_NAT_ATTR_DST]) {
855 ds_put_char(ds, '(');
856 if (a[OVS_NAT_ATTR_SRC]) {
857 ds_put_cstr(ds, "src");
858 } else if (a[OVS_NAT_ATTR_DST]) {
859 ds_put_cstr(ds, "dst");
860 }
861
862 if (addr_len > 0) {
863 ds_put_cstr(ds, "=");
864
865 if (addr_len == sizeof ip_min) {
866 ds_put_format(ds, IP_FMT, IP_ARGS(ip_min));
867
868 if (ip_max && ip_max != ip_min) {
869 ds_put_format(ds, "-"IP_FMT, IP_ARGS(ip_max));
870 }
871 } else if (addr_len == sizeof ip6_min) {
872 ipv6_format_addr_bracket(&ip6_min, ds, proto_min);
873
874 if (!ipv6_mask_is_any(&ip6_max) &&
875 memcmp(&ip6_max, &ip6_min, sizeof ip6_max) != 0) {
876 ds_put_char(ds, '-');
877 ipv6_format_addr_bracket(&ip6_max, ds, proto_min);
878 }
879 }
880 if (proto_min) {
881 ds_put_format(ds, ":%"PRIu16, proto_min);
882
883 if (proto_max && proto_max != proto_min) {
884 ds_put_format(ds, "-%"PRIu16, proto_max);
885 }
886 }
887 }
888 ds_put_char(ds, ',');
889 if (a[OVS_NAT_ATTR_PERSISTENT]) {
890 ds_put_cstr(ds, "persistent,");
891 }
892 if (a[OVS_NAT_ATTR_PROTO_HASH]) {
893 ds_put_cstr(ds, "hash,");
894 }
895 if (a[OVS_NAT_ATTR_PROTO_RANDOM]) {
896 ds_put_cstr(ds, "random,");
897 }
898 ds_chomp(ds, ',');
899 ds_put_char(ds, ')');
900 }
901 }
902
903 static const struct nl_policy ovs_conntrack_policy[] = {
904 [OVS_CT_ATTR_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
905 [OVS_CT_ATTR_FORCE_COMMIT] = { .type = NL_A_FLAG, .optional = true, },
906 [OVS_CT_ATTR_ZONE] = { .type = NL_A_U16, .optional = true, },
907 [OVS_CT_ATTR_MARK] = { .type = NL_A_UNSPEC, .optional = true,
908 .min_len = sizeof(uint32_t) * 2 },
909 [OVS_CT_ATTR_LABELS] = { .type = NL_A_UNSPEC, .optional = true,
910 .min_len = sizeof(struct ovs_key_ct_labels) * 2 },
911 [OVS_CT_ATTR_HELPER] = { .type = NL_A_STRING, .optional = true,
912 .min_len = 1, .max_len = 16 },
913 [OVS_CT_ATTR_NAT] = { .type = NL_A_UNSPEC, .optional = true },
914 };
915
916 static void
917 format_odp_conntrack_action(struct ds *ds, const struct nlattr *attr)
918 {
919 struct nlattr *a[ARRAY_SIZE(ovs_conntrack_policy)];
920 const struct {
921 ovs_32aligned_u128 value;
922 ovs_32aligned_u128 mask;
923 } *label;
924 const uint32_t *mark;
925 const char *helper;
926 uint16_t zone;
927 bool commit, force;
928 const struct nlattr *nat;
929
930 if (!nl_parse_nested(attr, ovs_conntrack_policy, a, ARRAY_SIZE(a))) {
931 ds_put_cstr(ds, "ct(error)");
932 return;
933 }
934
935 commit = a[OVS_CT_ATTR_COMMIT] ? true : false;
936 force = a[OVS_CT_ATTR_FORCE_COMMIT] ? true : false;
937 zone = a[OVS_CT_ATTR_ZONE] ? nl_attr_get_u16(a[OVS_CT_ATTR_ZONE]) : 0;
938 mark = a[OVS_CT_ATTR_MARK] ? nl_attr_get(a[OVS_CT_ATTR_MARK]) : NULL;
939 label = a[OVS_CT_ATTR_LABELS] ? nl_attr_get(a[OVS_CT_ATTR_LABELS]): NULL;
940 helper = a[OVS_CT_ATTR_HELPER] ? nl_attr_get(a[OVS_CT_ATTR_HELPER]) : NULL;
941 nat = a[OVS_CT_ATTR_NAT];
942
943 ds_put_format(ds, "ct");
944 if (commit || force || zone || mark || label || helper || nat) {
945 ds_put_cstr(ds, "(");
946 if (commit) {
947 ds_put_format(ds, "commit,");
948 }
949 if (force) {
950 ds_put_format(ds, "force_commit,");
951 }
952 if (zone) {
953 ds_put_format(ds, "zone=%"PRIu16",", zone);
954 }
955 if (mark) {
956 ds_put_format(ds, "mark=%#"PRIx32"/%#"PRIx32",", *mark,
957 *(mark + 1));
958 }
959 if (label) {
960 ds_put_format(ds, "label=");
961 format_u128(ds, &label->value, &label->mask, true);
962 ds_put_char(ds, ',');
963 }
964 if (helper) {
965 ds_put_format(ds, "helper=%s,", helper);
966 }
967 if (nat) {
968 format_odp_ct_nat(ds, nat);
969 }
970 ds_chomp(ds, ',');
971 ds_put_cstr(ds, ")");
972 }
973 }
974
975 static const struct attr_len_tbl
976 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
977 [OVS_NSH_KEY_ATTR_BASE] = { .len = 8 },
978 [OVS_NSH_KEY_ATTR_MD1] = { .len = 16 },
979 [OVS_NSH_KEY_ATTR_MD2] = { .len = ATTR_LEN_VARIABLE },
980 };
981
982 static void
983 format_odp_set_nsh(struct ds *ds, const struct nlattr *attr)
984 {
985 unsigned int left;
986 const struct nlattr *a;
987 struct ovs_key_nsh nsh;
988 struct ovs_key_nsh nsh_mask;
989
990 memset(&nsh, 0, sizeof nsh);
991 memset(&nsh_mask, 0xff, sizeof nsh_mask);
992
993 NL_NESTED_FOR_EACH (a, left, attr) {
994 enum ovs_nsh_key_attr type = nl_attr_type(a);
995 size_t len = nl_attr_get_size(a);
996
997 if (type >= OVS_NSH_KEY_ATTR_MAX) {
998 return;
999 }
1000
1001 int expected_len = ovs_nsh_key_attr_lens[type].len;
1002 if ((expected_len != ATTR_LEN_VARIABLE) && (len != 2 * expected_len)) {
1003 return;
1004 }
1005
1006 switch (type) {
1007 case OVS_NSH_KEY_ATTR_UNSPEC:
1008 break;
1009 case OVS_NSH_KEY_ATTR_BASE: {
1010 const struct ovs_nsh_key_base *base = nl_attr_get(a);
1011 const struct ovs_nsh_key_base *base_mask = base + 1;
1012 memcpy(&nsh, base, sizeof(*base));
1013 memcpy(&nsh_mask, base_mask, sizeof(*base_mask));
1014 break;
1015 }
1016 case OVS_NSH_KEY_ATTR_MD1: {
1017 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
1018 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1019 memcpy(&nsh.context, &md1->context, sizeof(*md1));
1020 memcpy(&nsh_mask.context, &md1_mask->context, sizeof(*md1_mask));
1021 break;
1022 }
1023 case OVS_NSH_KEY_ATTR_MD2:
1024 case __OVS_NSH_KEY_ATTR_MAX:
1025 default:
1026 /* No support for matching other metadata formats yet. */
1027 break;
1028 }
1029 }
1030
1031 ds_put_cstr(ds, "set(nsh(");
1032 format_nsh_key_mask(ds, &nsh, &nsh_mask);
1033 ds_put_cstr(ds, "))");
1034 }
1035
1036
1037 static void
1038 format_odp_action(struct ds *ds, const struct nlattr *a,
1039 const struct hmap *portno_names)
1040 {
1041 int expected_len;
1042 enum ovs_action_attr type = nl_attr_type(a);
1043 size_t size;
1044
1045 expected_len = odp_action_len(nl_attr_type(a));
1046 if (expected_len != ATTR_LEN_VARIABLE &&
1047 nl_attr_get_size(a) != expected_len) {
1048 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
1049 nl_attr_get_size(a), expected_len);
1050 format_generic_odp_action(ds, a);
1051 return;
1052 }
1053
1054 switch (type) {
1055 case OVS_ACTION_ATTR_METER:
1056 ds_put_format(ds, "meter(%"PRIu32")", nl_attr_get_u32(a));
1057 break;
1058 case OVS_ACTION_ATTR_OUTPUT:
1059 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1060 break;
1061 case OVS_ACTION_ATTR_TRUNC: {
1062 const struct ovs_action_trunc *trunc =
1063 nl_attr_get_unspec(a, sizeof *trunc);
1064
1065 ds_put_format(ds, "trunc(%"PRIu32")", trunc->max_len);
1066 break;
1067 }
1068 case OVS_ACTION_ATTR_TUNNEL_POP:
1069 ds_put_cstr(ds, "tnl_pop(");
1070 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
1071 ds_put_char(ds, ')');
1072 break;
1073 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1074 format_odp_tnl_push_action(ds, a, portno_names);
1075 break;
1076 case OVS_ACTION_ATTR_USERSPACE:
1077 format_odp_userspace_action(ds, a, portno_names);
1078 break;
1079 case OVS_ACTION_ATTR_RECIRC:
1080 format_odp_recirc_action(ds, nl_attr_get_u32(a));
1081 break;
1082 case OVS_ACTION_ATTR_HASH:
1083 format_odp_hash_action(ds, nl_attr_get(a));
1084 break;
1085 case OVS_ACTION_ATTR_SET_MASKED:
1086 a = nl_attr_get(a);
1087 /* OVS_KEY_ATTR_NSH is nested attribute, so it needs special process */
1088 if (nl_attr_type(a) == OVS_KEY_ATTR_NSH) {
1089 format_odp_set_nsh(ds, a);
1090 break;
1091 }
1092 size = nl_attr_get_size(a) / 2;
1093 ds_put_cstr(ds, "set(");
1094
1095 /* Masked set action not supported for tunnel key, which is bigger. */
1096 if (size <= sizeof(struct ovs_key_ipv6)) {
1097 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1098 sizeof(struct nlattr))];
1099 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
1100 sizeof(struct nlattr))];
1101
1102 mask->nla_type = attr->nla_type = nl_attr_type(a);
1103 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
1104 memcpy(attr + 1, (char *)(a + 1), size);
1105 memcpy(mask + 1, (char *)(a + 1) + size, size);
1106 format_odp_key_attr(attr, mask, NULL, ds, false);
1107 } else {
1108 format_odp_key_attr(a, NULL, NULL, ds, false);
1109 }
1110 ds_put_cstr(ds, ")");
1111 break;
1112 case OVS_ACTION_ATTR_SET:
1113 ds_put_cstr(ds, "set(");
1114 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
1115 ds_put_cstr(ds, ")");
1116 break;
1117 case OVS_ACTION_ATTR_PUSH_ETH: {
1118 const struct ovs_action_push_eth *eth = nl_attr_get(a);
1119 ds_put_format(ds, "push_eth(src="ETH_ADDR_FMT",dst="ETH_ADDR_FMT")",
1120 ETH_ADDR_ARGS(eth->addresses.eth_src),
1121 ETH_ADDR_ARGS(eth->addresses.eth_dst));
1122 break;
1123 }
1124 case OVS_ACTION_ATTR_POP_ETH:
1125 ds_put_cstr(ds, "pop_eth");
1126 break;
1127 case OVS_ACTION_ATTR_PUSH_VLAN: {
1128 const struct ovs_action_push_vlan *vlan = nl_attr_get(a);
1129 ds_put_cstr(ds, "push_vlan(");
1130 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
1131 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
1132 }
1133 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
1134 ds_put_char(ds, ')');
1135 break;
1136 }
1137 case OVS_ACTION_ATTR_POP_VLAN:
1138 ds_put_cstr(ds, "pop_vlan");
1139 break;
1140 case OVS_ACTION_ATTR_PUSH_MPLS: {
1141 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1142 ds_put_cstr(ds, "push_mpls(");
1143 format_mpls_lse(ds, mpls->mpls_lse);
1144 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
1145 break;
1146 }
1147 case OVS_ACTION_ATTR_POP_MPLS: {
1148 ovs_be16 ethertype = nl_attr_get_be16(a);
1149 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
1150 break;
1151 }
1152 case OVS_ACTION_ATTR_SAMPLE:
1153 format_odp_sample_action(ds, a, portno_names);
1154 break;
1155 case OVS_ACTION_ATTR_CT:
1156 format_odp_conntrack_action(ds, a);
1157 break;
1158 case OVS_ACTION_ATTR_CT_CLEAR:
1159 ds_put_cstr(ds, "ct_clear");
1160 break;
1161 case OVS_ACTION_ATTR_CLONE:
1162 format_odp_clone_action(ds, a, portno_names);
1163 break;
1164 case OVS_ACTION_ATTR_PUSH_NSH: {
1165 uint32_t buffer[NSH_HDR_MAX_LEN / 4];
1166 struct nsh_hdr *nsh_hdr = ALIGNED_CAST(struct nsh_hdr *, buffer);
1167 nsh_reset_ver_flags_ttl_len(nsh_hdr);
1168 odp_nsh_hdr_from_attr(nl_attr_get(a), nsh_hdr, NSH_HDR_MAX_LEN);
1169 format_odp_push_nsh_action(ds, nsh_hdr);
1170 break;
1171 }
1172 case OVS_ACTION_ATTR_POP_NSH:
1173 ds_put_cstr(ds, "pop_nsh()");
1174 break;
1175 case OVS_ACTION_ATTR_UNSPEC:
1176 case __OVS_ACTION_ATTR_MAX:
1177 default:
1178 format_generic_odp_action(ds, a);
1179 break;
1180 }
1181 }
1182
1183 void
1184 format_odp_actions(struct ds *ds, const struct nlattr *actions,
1185 size_t actions_len, const struct hmap *portno_names)
1186 {
1187 if (actions_len) {
1188 const struct nlattr *a;
1189 unsigned int left;
1190
1191 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1192 if (a != actions) {
1193 ds_put_char(ds, ',');
1194 }
1195 format_odp_action(ds, a, portno_names);
1196 }
1197 if (left) {
1198 int i;
1199
1200 if (left == actions_len) {
1201 ds_put_cstr(ds, "<empty>");
1202 }
1203 ds_put_format(ds, ",***%u leftover bytes*** (", left);
1204 for (i = 0; i < left; i++) {
1205 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
1206 }
1207 ds_put_char(ds, ')');
1208 }
1209 } else {
1210 ds_put_cstr(ds, "drop");
1211 }
1212 }
1213
1214 /* Separate out parse_odp_userspace_action() function. */
1215 static int
1216 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
1217 {
1218 uint32_t pid;
1219 struct user_action_cookie cookie;
1220 struct ofpbuf buf;
1221 odp_port_t tunnel_out_port;
1222 int n = -1;
1223 void *user_data = NULL;
1224 size_t user_data_size = 0;
1225 bool include_actions = false;
1226 int res;
1227
1228 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
1229 return -EINVAL;
1230 }
1231
1232 ofpbuf_init(&buf, 16);
1233 memset(&cookie, 0, sizeof cookie);
1234
1235 user_data = &cookie;
1236 user_data_size = sizeof cookie;
1237 {
1238 uint32_t output;
1239 uint32_t probability;
1240 uint32_t collector_set_id;
1241 uint32_t obs_domain_id;
1242 uint32_t obs_point_id;
1243
1244 /* USER_ACTION_COOKIE_CONTROLLER. */
1245 uint8_t dont_send;
1246 uint8_t continuation;
1247 uint16_t reason;
1248 uint32_t recirc_id;
1249 uint64_t rule_cookie;
1250 uint16_t controller_id;
1251 uint16_t max_len;
1252
1253 int vid, pcp;
1254 int n1 = -1;
1255 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
1256 "pcp=%i,output=%"SCNi32")%n",
1257 &vid, &pcp, &output, &n1)) {
1258 uint16_t tci;
1259
1260 n += n1;
1261 tci = vid | (pcp << VLAN_PCP_SHIFT);
1262 if (tci) {
1263 tci |= VLAN_CFI;
1264 }
1265
1266 cookie.type = USER_ACTION_COOKIE_SFLOW;
1267 cookie.ofp_in_port = OFPP_NONE;
1268 cookie.ofproto_uuid = UUID_ZERO;
1269 cookie.sflow.vlan_tci = htons(tci);
1270 cookie.sflow.output = output;
1271 } else if (ovs_scan(&s[n], ",slow_path(%n",
1272 &n1)) {
1273 n += n1;
1274 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
1275 cookie.ofp_in_port = OFPP_NONE;
1276 cookie.ofproto_uuid = UUID_ZERO;
1277 cookie.slow_path.reason = 0;
1278
1279 res = parse_odp_flags(&s[n], slow_path_reason_to_string,
1280 &cookie.slow_path.reason,
1281 SLOW_PATH_REASON_MASK, NULL);
1282 if (res < 0 || s[n + res] != ')') {
1283 goto out;
1284 }
1285 n += res + 1;
1286 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
1287 "collector_set_id=%"SCNi32","
1288 "obs_domain_id=%"SCNi32","
1289 "obs_point_id=%"SCNi32","
1290 "output_port=%"SCNi32"%n",
1291 &probability, &collector_set_id,
1292 &obs_domain_id, &obs_point_id,
1293 &output, &n1)) {
1294 n += n1;
1295
1296 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1297 cookie.ofp_in_port = OFPP_NONE;
1298 cookie.ofproto_uuid = UUID_ZERO;
1299 cookie.flow_sample.probability = probability;
1300 cookie.flow_sample.collector_set_id = collector_set_id;
1301 cookie.flow_sample.obs_domain_id = obs_domain_id;
1302 cookie.flow_sample.obs_point_id = obs_point_id;
1303 cookie.flow_sample.output_odp_port = u32_to_odp(output);
1304
1305 if (ovs_scan(&s[n], ",ingress%n", &n1)) {
1306 cookie.flow_sample.direction = NX_ACTION_SAMPLE_INGRESS;
1307 n += n1;
1308 } else if (ovs_scan(&s[n], ",egress%n", &n1)) {
1309 cookie.flow_sample.direction = NX_ACTION_SAMPLE_EGRESS;
1310 n += n1;
1311 } else {
1312 cookie.flow_sample.direction = NX_ACTION_SAMPLE_DEFAULT;
1313 }
1314 if (s[n] != ')') {
1315 res = -EINVAL;
1316 goto out;
1317 }
1318 n++;
1319 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
1320 &output, &n1) ) {
1321 n += n1;
1322 cookie.type = USER_ACTION_COOKIE_IPFIX;
1323 cookie.ofp_in_port = OFPP_NONE;
1324 cookie.ofproto_uuid = UUID_ZERO;
1325 cookie.ipfix.output_odp_port = u32_to_odp(output);
1326 } else if (ovs_scan(&s[n], ",controller(reason=%"SCNu16
1327 ",dont_send=%"SCNu8
1328 ",continuation=%"SCNu8
1329 ",recirc_id=%"SCNu32
1330 ",rule_cookie=%"SCNx64
1331 ",controller_id=%"SCNu16
1332 ",max_len=%"SCNu16")%n",
1333 &reason, &dont_send, &continuation, &recirc_id,
1334 &rule_cookie, &controller_id, &max_len, &n1)) {
1335 n += n1;
1336 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
1337 cookie.ofp_in_port = OFPP_NONE;
1338 cookie.ofproto_uuid = UUID_ZERO;
1339 cookie.controller.dont_send = dont_send ? true : false;
1340 cookie.controller.continuation = continuation ? true : false;
1341 cookie.controller.reason = reason;
1342 cookie.controller.recirc_id = recirc_id;
1343 put_32aligned_be64(&cookie.controller.rule_cookie,
1344 htonll(rule_cookie));
1345 cookie.controller.controller_id = controller_id;
1346 cookie.controller.max_len = max_len;
1347 } else if (ovs_scan(&s[n], ",userdata(%n", &n1)) {
1348 char *end;
1349
1350 n += n1;
1351 end = ofpbuf_put_hex(&buf, &s[n], NULL);
1352 if (end[0] != ')') {
1353 res = -EINVAL;
1354 goto out;
1355 }
1356 user_data = buf.data;
1357 user_data_size = buf.size;
1358 n = (end + 1) - s;
1359 }
1360 }
1361
1362 {
1363 int n1 = -1;
1364 if (ovs_scan(&s[n], ",actions%n", &n1)) {
1365 n += n1;
1366 include_actions = true;
1367 }
1368 }
1369
1370 {
1371 int n1 = -1;
1372 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
1373 &tunnel_out_port, &n1)) {
1374 odp_put_userspace_action(pid, user_data, user_data_size,
1375 tunnel_out_port, include_actions, actions);
1376 res = n + n1;
1377 goto out;
1378 } else if (s[n] == ')') {
1379 odp_put_userspace_action(pid, user_data, user_data_size,
1380 ODPP_NONE, include_actions, actions);
1381 res = n + 1;
1382 goto out;
1383 }
1384 }
1385
1386 {
1387 struct ovs_action_push_eth push;
1388 int eth_type = 0;
1389 int n1 = -1;
1390
1391 if (ovs_scan(&s[n], "push_eth(src="ETH_ADDR_SCAN_FMT","
1392 "dst="ETH_ADDR_SCAN_FMT",type=%i)%n",
1393 ETH_ADDR_SCAN_ARGS(push.addresses.eth_src),
1394 ETH_ADDR_SCAN_ARGS(push.addresses.eth_dst),
1395 &eth_type, &n1)) {
1396
1397 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_ETH,
1398 &push, sizeof push);
1399
1400 res = n + n1;
1401 goto out;
1402 }
1403 }
1404
1405 if (!strncmp(&s[n], "pop_eth", 7)) {
1406 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_ETH);
1407 res = 7;
1408 goto out;
1409 }
1410
1411 res = -EINVAL;
1412 out:
1413 ofpbuf_uninit(&buf);
1414 return res;
1415 }
1416
1417 static int
1418 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
1419 {
1420 struct eth_header *eth;
1421 struct ip_header *ip;
1422 struct ovs_16aligned_ip6_hdr *ip6;
1423 struct udp_header *udp;
1424 struct gre_base_hdr *greh;
1425 struct erspan_base_hdr *ersh;
1426 struct erspan_md2 *md2;
1427 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, csum, sid;
1428 ovs_be32 sip, dip;
1429 uint32_t tnl_type = 0, header_len = 0, ip_len = 0, erspan_idx = 0;
1430 void *l3, *l4;
1431 int n = 0;
1432 uint8_t hwid, dir;
1433
1434 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
1435 return -EINVAL;
1436 }
1437 eth = (struct eth_header *) data->header;
1438 l3 = (struct ip_header *) (eth + 1);
1439 ip = (struct ip_header *) l3;
1440 ip6 = (struct ovs_16aligned_ip6_hdr *) l3;
1441 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
1442 "eth(dst="ETH_ADDR_SCAN_FMT",",
1443 &data->header_len,
1444 &data->tnl_type,
1445 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
1446 return -EINVAL;
1447 }
1448
1449 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
1450 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
1451 return -EINVAL;
1452 }
1453 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
1454 return -EINVAL;
1455 }
1456 eth->eth_type = htons(dl_type);
1457
1458 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1459 /* IPv4 */
1460 uint16_t ip_frag_off;
1461 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
1462 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
1463 IP_SCAN_ARGS(&sip),
1464 IP_SCAN_ARGS(&dip),
1465 &ip->ip_proto, &ip->ip_tos,
1466 &ip->ip_ttl, &ip_frag_off)) {
1467 return -EINVAL;
1468 }
1469 put_16aligned_be32(&ip->ip_src, sip);
1470 put_16aligned_be32(&ip->ip_dst, dip);
1471 ip->ip_frag_off = htons(ip_frag_off);
1472 ip_len = sizeof *ip;
1473 } else {
1474 char sip6_s[IPV6_SCAN_LEN + 1];
1475 char dip6_s[IPV6_SCAN_LEN + 1];
1476 struct in6_addr sip6, dip6;
1477 uint8_t tclass;
1478 uint32_t label;
1479 if (!ovs_scan_len(s, &n, "ipv6(src="IPV6_SCAN_FMT",dst="IPV6_SCAN_FMT
1480 ",label=%i,proto=%"SCNi8",tclass=0x%"SCNx8
1481 ",hlimit=%"SCNi8"),",
1482 sip6_s, dip6_s, &label, &ip6->ip6_nxt,
1483 &tclass, &ip6->ip6_hlim)
1484 || (label & ~IPV6_LABEL_MASK) != 0
1485 || inet_pton(AF_INET6, sip6_s, &sip6) != 1
1486 || inet_pton(AF_INET6, dip6_s, &dip6) != 1) {
1487 return -EINVAL;
1488 }
1489 put_16aligned_be32(&ip6->ip6_flow, htonl(6 << 28) |
1490 htonl(tclass << 20) | htonl(label));
1491 memcpy(&ip6->ip6_src, &sip6, sizeof(ip6->ip6_src));
1492 memcpy(&ip6->ip6_dst, &dip6, sizeof(ip6->ip6_dst));
1493 ip_len = sizeof *ip6;
1494 }
1495
1496 /* Tunnel header */
1497 l4 = ((uint8_t *) l3 + ip_len);
1498 udp = (struct udp_header *) l4;
1499 greh = (struct gre_base_hdr *) l4;
1500 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
1501 &udp_src, &udp_dst, &csum)) {
1502 uint32_t vx_flags, vni;
1503
1504 udp->udp_src = htons(udp_src);
1505 udp->udp_dst = htons(udp_dst);
1506 udp->udp_len = 0;
1507 udp->udp_csum = htons(csum);
1508
1509 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
1510 &vx_flags, &vni)) {
1511 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
1512
1513 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
1514 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
1515 tnl_type = OVS_VPORT_TYPE_VXLAN;
1516 header_len = sizeof *eth + ip_len +
1517 sizeof *udp + sizeof *vxh;
1518 } else if (ovs_scan_len(s, &n, "geneve(")) {
1519 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
1520
1521 memset(gnh, 0, sizeof *gnh);
1522 header_len = sizeof *eth + ip_len +
1523 sizeof *udp + sizeof *gnh;
1524
1525 if (ovs_scan_len(s, &n, "oam,")) {
1526 gnh->oam = 1;
1527 }
1528 if (ovs_scan_len(s, &n, "crit,")) {
1529 gnh->critical = 1;
1530 }
1531 if (!ovs_scan_len(s, &n, "vni=%"SCNi32, &vni)) {
1532 return -EINVAL;
1533 }
1534 if (ovs_scan_len(s, &n, ",options(")) {
1535 struct geneve_scan options;
1536 int len;
1537
1538 memset(&options, 0, sizeof options);
1539 len = scan_geneve(s + n, &options, NULL);
1540 if (!len) {
1541 return -EINVAL;
1542 }
1543
1544 memcpy(gnh->options, options.d, options.len);
1545 gnh->opt_len = options.len / 4;
1546 header_len += options.len;
1547
1548 n += len;
1549 }
1550 if (!ovs_scan_len(s, &n, "))")) {
1551 return -EINVAL;
1552 }
1553
1554 gnh->proto_type = htons(ETH_TYPE_TEB);
1555 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
1556 tnl_type = OVS_VPORT_TYPE_GENEVE;
1557 } else {
1558 return -EINVAL;
1559 }
1560 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
1561 &gre_flags, &gre_proto)){
1562
1563 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1564 tnl_type = OVS_VPORT_TYPE_GRE;
1565 } else {
1566 tnl_type = OVS_VPORT_TYPE_IP6GRE;
1567 }
1568 greh->flags = htons(gre_flags);
1569 greh->protocol = htons(gre_proto);
1570 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
1571
1572 if (greh->flags & htons(GRE_CSUM)) {
1573 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
1574 return -EINVAL;
1575 }
1576
1577 memset(options, 0, sizeof *options);
1578 *((ovs_be16 *)options) = htons(csum);
1579 options++;
1580 }
1581 if (greh->flags & htons(GRE_KEY)) {
1582 uint32_t key;
1583
1584 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
1585 return -EINVAL;
1586 }
1587
1588 put_16aligned_be32(options, htonl(key));
1589 options++;
1590 }
1591 if (greh->flags & htons(GRE_SEQ)) {
1592 uint32_t seq;
1593
1594 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
1595 return -EINVAL;
1596 }
1597 put_16aligned_be32(options, htonl(seq));
1598 options++;
1599 }
1600
1601 if (!ovs_scan_len(s, &n, "))")) {
1602 return -EINVAL;
1603 }
1604
1605 header_len = sizeof *eth + ip_len +
1606 ((uint8_t *) options - (uint8_t *) greh);
1607 } else if (ovs_scan_len(s, &n, "erspan(ver=1,sid="SCNx16",idx=0x"SCNx32")",
1608 &sid, &erspan_idx)) {
1609 ersh = ERSPAN_HDR(greh);
1610 ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
1611 ersh + 1);
1612
1613 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1614 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1615 } else {
1616 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1617 }
1618
1619 greh->flags = htons(GRE_SEQ);
1620 greh->protocol = htons(ETH_TYPE_ERSPAN1);
1621
1622 ersh->ver = 1;
1623 set_sid(ersh, sid);
1624 put_16aligned_be32(index, htonl(erspan_idx));
1625
1626 if (!ovs_scan_len(s, &n, ")")) {
1627 return -EINVAL;
1628 }
1629 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1630 sizeof *ersh + ERSPAN_V1_MDSIZE;
1631
1632 } else if (ovs_scan_len(s, &n, "erspan(ver=2,sid="SCNx16"dir="SCNu8
1633 ",hwid=0x"SCNx8")", &sid, &dir, &hwid)) {
1634
1635 ersh = ERSPAN_HDR(greh);
1636 md2 = ALIGNED_CAST(struct erspan_md2 *, ersh + 1);
1637
1638 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1639 tnl_type = OVS_VPORT_TYPE_ERSPAN;
1640 } else {
1641 tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
1642 }
1643
1644 greh->flags = htons(GRE_SEQ);
1645 greh->protocol = htons(ETH_TYPE_ERSPAN2);
1646
1647 ersh->ver = 2;
1648 set_sid(ersh, sid);
1649 set_hwid(md2, hwid);
1650 md2->dir = dir;
1651
1652 if (!ovs_scan_len(s, &n, ")")) {
1653 return -EINVAL;
1654 }
1655
1656 header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
1657 sizeof *ersh + ERSPAN_V2_MDSIZE;
1658 } else {
1659 return -EINVAL;
1660 }
1661
1662 /* check tunnel meta data. */
1663 if (data->tnl_type != tnl_type) {
1664 return -EINVAL;
1665 }
1666 if (data->header_len != header_len) {
1667 return -EINVAL;
1668 }
1669
1670 /* Out port */
1671 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
1672 return -EINVAL;
1673 }
1674
1675 return n;
1676 }
1677
1678 struct ct_nat_params {
1679 bool snat;
1680 bool dnat;
1681 size_t addr_len;
1682 union {
1683 ovs_be32 ip;
1684 struct in6_addr ip6;
1685 } addr_min;
1686 union {
1687 ovs_be32 ip;
1688 struct in6_addr ip6;
1689 } addr_max;
1690 uint16_t proto_min;
1691 uint16_t proto_max;
1692 bool persistent;
1693 bool proto_hash;
1694 bool proto_random;
1695 };
1696
1697 static int
1698 scan_ct_nat_range(const char *s, int *n, struct ct_nat_params *p)
1699 {
1700 if (ovs_scan_len(s, n, "=")) {
1701 char ipv6_s[IPV6_SCAN_LEN + 1];
1702 struct in6_addr ipv6;
1703
1704 if (ovs_scan_len(s, n, IP_SCAN_FMT, IP_SCAN_ARGS(&p->addr_min.ip))) {
1705 p->addr_len = sizeof p->addr_min.ip;
1706 if (ovs_scan_len(s, n, "-")) {
1707 if (!ovs_scan_len(s, n, IP_SCAN_FMT,
1708 IP_SCAN_ARGS(&p->addr_max.ip))) {
1709 return -EINVAL;
1710 }
1711 }
1712 } else if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1713 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1714 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1715 p->addr_len = sizeof p->addr_min.ip6;
1716 p->addr_min.ip6 = ipv6;
1717 if (ovs_scan_len(s, n, "-")) {
1718 if ((ovs_scan_len(s, n, IPV6_SCAN_FMT, ipv6_s)
1719 || ovs_scan_len(s, n, "["IPV6_SCAN_FMT"]", ipv6_s))
1720 && inet_pton(AF_INET6, ipv6_s, &ipv6) == 1) {
1721 p->addr_max.ip6 = ipv6;
1722 } else {
1723 return -EINVAL;
1724 }
1725 }
1726 } else {
1727 return -EINVAL;
1728 }
1729 if (ovs_scan_len(s, n, ":%"SCNu16, &p->proto_min)) {
1730 if (ovs_scan_len(s, n, "-")) {
1731 if (!ovs_scan_len(s, n, "%"SCNu16, &p->proto_max)) {
1732 return -EINVAL;
1733 }
1734 }
1735 }
1736 }
1737 return 0;
1738 }
1739
1740 static int
1741 scan_ct_nat(const char *s, struct ct_nat_params *p)
1742 {
1743 int n = 0;
1744
1745 if (ovs_scan_len(s, &n, "nat")) {
1746 memset(p, 0, sizeof *p);
1747
1748 if (ovs_scan_len(s, &n, "(")) {
1749 char *end;
1750 int end_n;
1751
1752 end = strchr(s + n, ')');
1753 if (!end) {
1754 return -EINVAL;
1755 }
1756 end_n = end - s;
1757
1758 while (n < end_n) {
1759 n += strspn(s + n, delimiters);
1760 if (ovs_scan_len(s, &n, "src")) {
1761 int err = scan_ct_nat_range(s, &n, p);
1762 if (err) {
1763 return err;
1764 }
1765 p->snat = true;
1766 continue;
1767 }
1768 if (ovs_scan_len(s, &n, "dst")) {
1769 int err = scan_ct_nat_range(s, &n, p);
1770 if (err) {
1771 return err;
1772 }
1773 p->dnat = true;
1774 continue;
1775 }
1776 if (ovs_scan_len(s, &n, "persistent")) {
1777 p->persistent = true;
1778 continue;
1779 }
1780 if (ovs_scan_len(s, &n, "hash")) {
1781 p->proto_hash = true;
1782 continue;
1783 }
1784 if (ovs_scan_len(s, &n, "random")) {
1785 p->proto_random = true;
1786 continue;
1787 }
1788 return -EINVAL;
1789 }
1790
1791 if (p->snat && p->dnat) {
1792 return -EINVAL;
1793 }
1794 if ((p->addr_len != 0 &&
1795 memcmp(&p->addr_max, &in6addr_any, p->addr_len) &&
1796 memcmp(&p->addr_max, &p->addr_min, p->addr_len) < 0) ||
1797 (p->proto_max && p->proto_max < p->proto_min)) {
1798 return -EINVAL;
1799 }
1800 if (p->proto_hash && p->proto_random) {
1801 return -EINVAL;
1802 }
1803 n++;
1804 }
1805 }
1806 return n;
1807 }
1808
1809 static void
1810 nl_msg_put_ct_nat(struct ct_nat_params *p, struct ofpbuf *actions)
1811 {
1812 size_t start = nl_msg_start_nested(actions, OVS_CT_ATTR_NAT);
1813
1814 if (p->snat) {
1815 nl_msg_put_flag(actions, OVS_NAT_ATTR_SRC);
1816 } else if (p->dnat) {
1817 nl_msg_put_flag(actions, OVS_NAT_ATTR_DST);
1818 } else {
1819 goto out;
1820 }
1821 if (p->addr_len != 0) {
1822 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MIN, &p->addr_min,
1823 p->addr_len);
1824 if (memcmp(&p->addr_max, &p->addr_min, p->addr_len) > 0) {
1825 nl_msg_put_unspec(actions, OVS_NAT_ATTR_IP_MAX, &p->addr_max,
1826 p->addr_len);
1827 }
1828 if (p->proto_min) {
1829 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MIN, p->proto_min);
1830 if (p->proto_max && p->proto_max > p->proto_min) {
1831 nl_msg_put_u16(actions, OVS_NAT_ATTR_PROTO_MAX, p->proto_max);
1832 }
1833 }
1834 if (p->persistent) {
1835 nl_msg_put_flag(actions, OVS_NAT_ATTR_PERSISTENT);
1836 }
1837 if (p->proto_hash) {
1838 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_HASH);
1839 }
1840 if (p->proto_random) {
1841 nl_msg_put_flag(actions, OVS_NAT_ATTR_PROTO_RANDOM);
1842 }
1843 }
1844 out:
1845 nl_msg_end_nested(actions, start);
1846 }
1847
1848 static int
1849 parse_conntrack_action(const char *s_, struct ofpbuf *actions)
1850 {
1851 const char *s = s_;
1852
1853 if (ovs_scan(s, "ct")) {
1854 const char *helper = NULL;
1855 size_t helper_len = 0;
1856 bool commit = false;
1857 bool force_commit = false;
1858 uint16_t zone = 0;
1859 struct {
1860 uint32_t value;
1861 uint32_t mask;
1862 } ct_mark = { 0, 0 };
1863 struct {
1864 ovs_u128 value;
1865 ovs_u128 mask;
1866 } ct_label;
1867 struct ct_nat_params nat_params;
1868 bool have_nat = false;
1869 size_t start;
1870 char *end;
1871
1872 memset(&ct_label, 0, sizeof(ct_label));
1873
1874 s += 2;
1875 if (ovs_scan(s, "(")) {
1876 s++;
1877 find_end:
1878 end = strchr(s, ')');
1879 if (!end) {
1880 return -EINVAL;
1881 }
1882
1883 while (s != end) {
1884 int n;
1885
1886 s += strspn(s, delimiters);
1887 if (ovs_scan(s, "commit%n", &n)) {
1888 commit = true;
1889 s += n;
1890 continue;
1891 }
1892 if (ovs_scan(s, "force_commit%n", &n)) {
1893 force_commit = true;
1894 s += n;
1895 continue;
1896 }
1897 if (ovs_scan(s, "zone=%"SCNu16"%n", &zone, &n)) {
1898 s += n;
1899 continue;
1900 }
1901 if (ovs_scan(s, "mark=%"SCNx32"%n", &ct_mark.value, &n)) {
1902 s += n;
1903 n = -1;
1904 if (ovs_scan(s, "/%"SCNx32"%n", &ct_mark.mask, &n)) {
1905 s += n;
1906 } else {
1907 ct_mark.mask = UINT32_MAX;
1908 }
1909 continue;
1910 }
1911 if (ovs_scan(s, "label=%n", &n)) {
1912 int retval;
1913
1914 s += n;
1915 retval = scan_u128(s, &ct_label.value, &ct_label.mask);
1916 if (retval < 0) {
1917 return retval;
1918 }
1919 s += retval;
1920 continue;
1921 }
1922 if (ovs_scan(s, "helper=%n", &n)) {
1923 s += n;
1924 helper_len = strcspn(s, delimiters_end);
1925 if (!helper_len || helper_len > 15) {
1926 return -EINVAL;
1927 }
1928 helper = s;
1929 s += helper_len;
1930 continue;
1931 }
1932
1933 n = scan_ct_nat(s, &nat_params);
1934 if (n > 0) {
1935 s += n;
1936 have_nat = true;
1937
1938 /* end points to the end of the nested, nat action.
1939 * find the real end. */
1940 goto find_end;
1941 }
1942 /* Nothing matched. */
1943 return -EINVAL;
1944 }
1945 s++;
1946 }
1947 if (commit && force_commit) {
1948 return -EINVAL;
1949 }
1950
1951 start = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CT);
1952 if (commit) {
1953 nl_msg_put_flag(actions, OVS_CT_ATTR_COMMIT);
1954 } else if (force_commit) {
1955 nl_msg_put_flag(actions, OVS_CT_ATTR_FORCE_COMMIT);
1956 }
1957 if (zone) {
1958 nl_msg_put_u16(actions, OVS_CT_ATTR_ZONE, zone);
1959 }
1960 if (ct_mark.mask) {
1961 nl_msg_put_unspec(actions, OVS_CT_ATTR_MARK, &ct_mark,
1962 sizeof(ct_mark));
1963 }
1964 if (!ovs_u128_is_zero(ct_label.mask)) {
1965 nl_msg_put_unspec(actions, OVS_CT_ATTR_LABELS, &ct_label,
1966 sizeof ct_label);
1967 }
1968 if (helper) {
1969 nl_msg_put_string__(actions, OVS_CT_ATTR_HELPER, helper,
1970 helper_len);
1971 }
1972 if (have_nat) {
1973 nl_msg_put_ct_nat(&nat_params, actions);
1974 }
1975 nl_msg_end_nested(actions, start);
1976 }
1977
1978 return s - s_;
1979 }
1980
1981 static void
1982 nsh_key_to_attr(struct ofpbuf *buf, const struct ovs_key_nsh *nsh,
1983 uint8_t * metadata, size_t md_size,
1984 bool is_mask)
1985 {
1986 size_t nsh_key_ofs;
1987 struct ovs_nsh_key_base base;
1988
1989 base.flags = nsh->flags;
1990 base.ttl = nsh->ttl;
1991 base.mdtype = nsh->mdtype;
1992 base.np = nsh->np;
1993 base.path_hdr = nsh->path_hdr;
1994
1995 nsh_key_ofs = nl_msg_start_nested(buf, OVS_KEY_ATTR_NSH);
1996 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_BASE, &base, sizeof base);
1997
1998 if (is_mask) {
1999 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2000 sizeof nsh->context);
2001 } else {
2002 switch (nsh->mdtype) {
2003 case NSH_M_TYPE1:
2004 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
2005 sizeof nsh->context);
2006 break;
2007 case NSH_M_TYPE2:
2008 if (metadata && md_size > 0) {
2009 nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD2, metadata,
2010 md_size);
2011 }
2012 break;
2013 default:
2014 /* No match support for other MD formats yet. */
2015 break;
2016 }
2017 }
2018 nl_msg_end_nested(buf, nsh_key_ofs);
2019 }
2020
2021
2022 static int
2023 parse_odp_push_nsh_action(const char *s, struct ofpbuf *actions)
2024 {
2025 int n = 0;
2026 int ret = 0;
2027 uint32_t spi = 0;
2028 uint8_t si = 255;
2029 uint32_t cd;
2030 struct ovs_key_nsh nsh;
2031 uint8_t metadata[NSH_CTX_HDRS_MAX_LEN];
2032 uint8_t md_size = 0;
2033
2034 if (!ovs_scan_len(s, &n, "push_nsh(")) {
2035 ret = -EINVAL;
2036 goto out;
2037 }
2038
2039 /* The default is NSH_M_TYPE1 */
2040 nsh.flags = 0;
2041 nsh.ttl = 63;
2042 nsh.mdtype = NSH_M_TYPE1;
2043 nsh.np = NSH_P_ETHERNET;
2044 nsh.path_hdr = nsh_spi_si_to_path_hdr(0, 255);
2045 memset(nsh.context, 0, NSH_M_TYPE1_MDLEN);
2046
2047 for (;;) {
2048 n += strspn(s + n, delimiters);
2049 if (s[n] == ')') {
2050 break;
2051 }
2052
2053 if (ovs_scan_len(s, &n, "flags=%"SCNi8, &nsh.flags)) {
2054 continue;
2055 }
2056 if (ovs_scan_len(s, &n, "ttl=%"SCNi8, &nsh.ttl)) {
2057 continue;
2058 }
2059 if (ovs_scan_len(s, &n, "mdtype=%"SCNi8, &nsh.mdtype)) {
2060 switch (nsh.mdtype) {
2061 case NSH_M_TYPE1:
2062 /* This is the default format. */;
2063 break;
2064 case NSH_M_TYPE2:
2065 /* Length will be updated later. */
2066 md_size = 0;
2067 break;
2068 default:
2069 ret = -EINVAL;
2070 goto out;
2071 }
2072 continue;
2073 }
2074 if (ovs_scan_len(s, &n, "np=%"SCNi8, &nsh.np)) {
2075 continue;
2076 }
2077 if (ovs_scan_len(s, &n, "spi=0x%"SCNx32, &spi)) {
2078 continue;
2079 }
2080 if (ovs_scan_len(s, &n, "si=%"SCNi8, &si)) {
2081 continue;
2082 }
2083 if (nsh.mdtype == NSH_M_TYPE1) {
2084 if (ovs_scan_len(s, &n, "c1=0x%"SCNx32, &cd)) {
2085 nsh.context[0] = htonl(cd);
2086 continue;
2087 }
2088 if (ovs_scan_len(s, &n, "c2=0x%"SCNx32, &cd)) {
2089 nsh.context[1] = htonl(cd);
2090 continue;
2091 }
2092 if (ovs_scan_len(s, &n, "c3=0x%"SCNx32, &cd)) {
2093 nsh.context[2] = htonl(cd);
2094 continue;
2095 }
2096 if (ovs_scan_len(s, &n, "c4=0x%"SCNx32, &cd)) {
2097 nsh.context[3] = htonl(cd);
2098 continue;
2099 }
2100 }
2101 else if (nsh.mdtype == NSH_M_TYPE2) {
2102 struct ofpbuf b;
2103 char buf[512];
2104 size_t mdlen, padding;
2105 if (ovs_scan_len(s, &n, "md2=0x%511[0-9a-fA-F]", buf)) {
2106 ofpbuf_use_stub(&b, metadata,
2107 NSH_CTX_HDRS_MAX_LEN);
2108 ofpbuf_put_hex(&b, buf, &mdlen);
2109 /* Pad metadata to 4 bytes. */
2110 padding = PAD_SIZE(mdlen, 4);
2111 if (padding > 0) {
2112 ofpbuf_push_zeros(&b, padding);
2113 }
2114 md_size = mdlen + padding;
2115 ofpbuf_uninit(&b);
2116 continue;
2117 }
2118 }
2119
2120 ret = -EINVAL;
2121 goto out;
2122 }
2123 out:
2124 if (ret >= 0) {
2125 nsh.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
2126 size_t offset = nl_msg_start_nested(actions, OVS_ACTION_ATTR_PUSH_NSH);
2127 nsh_key_to_attr(actions, &nsh, metadata, md_size, false);
2128 nl_msg_end_nested(actions, offset);
2129 ret = n;
2130 }
2131 return ret;
2132 }
2133
2134 static int
2135 parse_action_list(const char *s, const struct simap *port_names,
2136 struct ofpbuf *actions)
2137 {
2138 int n = 0;
2139
2140 for (;;) {
2141 int retval;
2142
2143 n += strspn(s + n, delimiters);
2144 if (s[n] == ')') {
2145 break;
2146 }
2147 retval = parse_odp_action(s + n, port_names, actions);
2148 if (retval < 0) {
2149 return retval;
2150 }
2151 n += retval;
2152 }
2153
2154 return n;
2155 }
2156
2157 static int
2158 parse_odp_action(const char *s, const struct simap *port_names,
2159 struct ofpbuf *actions)
2160 {
2161 {
2162 uint32_t port;
2163 int n;
2164
2165 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
2166 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
2167 return n;
2168 }
2169 }
2170
2171 {
2172 uint32_t max_len;
2173 int n;
2174
2175 if (ovs_scan(s, "trunc(%"SCNi32")%n", &max_len, &n)) {
2176 struct ovs_action_trunc *trunc;
2177
2178 trunc = nl_msg_put_unspec_uninit(actions,
2179 OVS_ACTION_ATTR_TRUNC, sizeof *trunc);
2180 trunc->max_len = max_len;
2181 return n;
2182 }
2183 }
2184
2185 if (port_names) {
2186 int len = strcspn(s, delimiters);
2187 struct simap_node *node;
2188
2189 node = simap_find_len(port_names, s, len);
2190 if (node) {
2191 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
2192 return len;
2193 }
2194 }
2195
2196 {
2197 uint32_t recirc_id;
2198 int n = -1;
2199
2200 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
2201 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
2202 return n;
2203 }
2204 }
2205
2206 if (!strncmp(s, "userspace(", 10)) {
2207 return parse_odp_userspace_action(s, actions);
2208 }
2209
2210 if (!strncmp(s, "set(", 4)) {
2211 size_t start_ofs;
2212 int retval;
2213 struct nlattr mask[1024 / sizeof(struct nlattr)];
2214 struct ofpbuf maskbuf = OFPBUF_STUB_INITIALIZER(mask);
2215 struct nlattr *nested, *key;
2216 size_t size;
2217
2218 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
2219 retval = parse_odp_key_mask_attr(s + 4, port_names, actions, &maskbuf);
2220 if (retval < 0) {
2221 ofpbuf_uninit(&maskbuf);
2222 return retval;
2223 }
2224 if (s[retval + 4] != ')') {
2225 ofpbuf_uninit(&maskbuf);
2226 return -EINVAL;
2227 }
2228
2229 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2230 key = nested + 1;
2231
2232 size = nl_attr_get_size(mask);
2233 if (size == nl_attr_get_size(key)) {
2234 /* Change to masked set action if not fully masked. */
2235 if (!is_all_ones(mask + 1, size)) {
2236 /* Remove padding of eariler key payload */
2237 actions->size -= NLA_ALIGN(key->nla_len) - key->nla_len;
2238
2239 /* Put mask payload right after key payload */
2240 key->nla_len += size;
2241 ofpbuf_put(actions, mask + 1, size);
2242
2243 /* Add new padding as needed */
2244 ofpbuf_put_zeros(actions, NLA_ALIGN(key->nla_len) -
2245 key->nla_len);
2246
2247 /* 'actions' may have been reallocated by ofpbuf_put(). */
2248 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
2249 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
2250 }
2251 }
2252 ofpbuf_uninit(&maskbuf);
2253
2254 nl_msg_end_nested(actions, start_ofs);
2255 return retval + 5;
2256 }
2257
2258 {
2259 struct ovs_action_push_vlan push;
2260 int tpid = ETH_TYPE_VLAN;
2261 int vid, pcp;
2262 int cfi = 1;
2263 int n = -1;
2264
2265 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
2266 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
2267 &vid, &pcp, &cfi, &n)
2268 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
2269 &tpid, &vid, &pcp, &n)
2270 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
2271 &tpid, &vid, &pcp, &cfi, &n)) {
2272 push.vlan_tpid = htons(tpid);
2273 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
2274 | (pcp << VLAN_PCP_SHIFT)
2275 | (cfi ? VLAN_CFI : 0));
2276 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
2277 &push, sizeof push);
2278
2279 return n;
2280 }
2281 }
2282
2283 if (!strncmp(s, "pop_vlan", 8)) {
2284 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
2285 return 8;
2286 }
2287
2288 {
2289 unsigned long long int meter_id;
2290 int n = -1;
2291
2292 if (sscanf(s, "meter(%lli)%n", &meter_id, &n) > 0 && n > 0) {
2293 nl_msg_put_u32(actions, OVS_ACTION_ATTR_METER, meter_id);
2294 return n;
2295 }
2296 }
2297
2298 {
2299 double percentage;
2300 int n = -1;
2301
2302 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
2303 && percentage >= 0. && percentage <= 100.0) {
2304 size_t sample_ofs, actions_ofs;
2305 double probability;
2306
2307 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
2308 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
2309 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
2310 (probability <= 0 ? 0
2311 : probability >= UINT32_MAX ? UINT32_MAX
2312 : probability));
2313
2314 actions_ofs = nl_msg_start_nested(actions,
2315 OVS_SAMPLE_ATTR_ACTIONS);
2316 int retval = parse_action_list(s + n, port_names, actions);
2317 if (retval < 0)
2318 return retval;
2319
2320 n += retval;
2321 nl_msg_end_nested(actions, actions_ofs);
2322 nl_msg_end_nested(actions, sample_ofs);
2323
2324 return s[n + 1] == ')' ? n + 2 : -EINVAL;
2325 }
2326 }
2327
2328 {
2329 if (!strncmp(s, "clone(", 6)) {
2330 size_t actions_ofs;
2331 int n = 6;
2332
2333 actions_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CLONE);
2334 int retval = parse_action_list(s + n, port_names, actions);
2335 if (retval < 0) {
2336 return retval;
2337 }
2338 n += retval;
2339 nl_msg_end_nested(actions, actions_ofs);
2340 return n + 1;
2341 }
2342 }
2343
2344 {
2345 if (!strncmp(s, "push_nsh(", 9)) {
2346 int retval = parse_odp_push_nsh_action(s, actions);
2347 if (retval < 0) {
2348 return retval;
2349 }
2350 return retval + 1;
2351 }
2352 }
2353
2354 {
2355 int n;
2356 if (ovs_scan(s, "pop_nsh()%n", &n)) {
2357 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_NSH);
2358 return n;
2359 }
2360 }
2361
2362 {
2363 uint32_t port;
2364 int n;
2365
2366 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
2367 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
2368 return n;
2369 }
2370 }
2371
2372 {
2373 if (!strncmp(s, "ct_clear", 8)) {
2374 nl_msg_put_flag(actions, OVS_ACTION_ATTR_CT_CLEAR);
2375 return 8;
2376 }
2377 }
2378
2379 {
2380 int retval;
2381
2382 retval = parse_conntrack_action(s, actions);
2383 if (retval) {
2384 return retval;
2385 }
2386 }
2387
2388 {
2389 struct ovs_action_push_tnl data;
2390 int n;
2391
2392 n = ovs_parse_tnl_push(s, &data);
2393 if (n > 0) {
2394 odp_put_tnl_push_action(actions, &data);
2395 return n;
2396 } else if (n < 0) {
2397 return n;
2398 }
2399 }
2400 return -EINVAL;
2401 }
2402
2403 /* Parses the string representation of datapath actions, in the format output
2404 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
2405 * value. On success, the ODP actions are appended to 'actions' as a series of
2406 * Netlink attributes. On failure, no data is appended to 'actions'. Either
2407 * way, 'actions''s data might be reallocated. */
2408 int
2409 odp_actions_from_string(const char *s, const struct simap *port_names,
2410 struct ofpbuf *actions)
2411 {
2412 size_t old_size;
2413
2414 if (!strcasecmp(s, "drop")) {
2415 return 0;
2416 }
2417
2418 old_size = actions->size;
2419 for (;;) {
2420 int retval;
2421
2422 s += strspn(s, delimiters);
2423 if (!*s) {
2424 return 0;
2425 }
2426
2427 retval = parse_odp_action(s, port_names, actions);
2428 if (retval < 0 || !strchr(delimiters, s[retval])) {
2429 actions->size = old_size;
2430 return -retval;
2431 }
2432 s += retval;
2433 }
2434
2435 return 0;
2436 }
2437 \f
2438 static const struct attr_len_tbl ovs_vxlan_ext_attr_lens[OVS_VXLAN_EXT_MAX + 1] = {
2439 [OVS_VXLAN_EXT_GBP] = { .len = 4 },
2440 };
2441
2442 static const struct attr_len_tbl ovs_tun_key_attr_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
2443 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = 8 },
2444 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = 4 },
2445 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = 4 },
2446 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
2447 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
2448 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
2449 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
2450 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = 2 },
2451 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = 2 },
2452 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
2453 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = ATTR_LEN_VARIABLE },
2454 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
2455 .next = ovs_vxlan_ext_attr_lens ,
2456 .next_max = OVS_VXLAN_EXT_MAX},
2457 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = 16 },
2458 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = 16 },
2459 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = ATTR_LEN_VARIABLE },
2460 };
2461
2462 const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
2463 [OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
2464 [OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
2465 [OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
2466 [OVS_KEY_ATTR_DP_HASH] = { .len = 4 },
2467 [OVS_KEY_ATTR_RECIRC_ID] = { .len = 4 },
2468 [OVS_KEY_ATTR_TUNNEL] = { .len = ATTR_LEN_NESTED,
2469 .next = ovs_tun_key_attr_lens,
2470 .next_max = OVS_TUNNEL_KEY_ATTR_MAX },
2471 [OVS_KEY_ATTR_IN_PORT] = { .len = 4 },
2472 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
2473 [OVS_KEY_ATTR_VLAN] = { .len = 2 },
2474 [OVS_KEY_ATTR_ETHERTYPE] = { .len = 2 },
2475 [OVS_KEY_ATTR_MPLS] = { .len = ATTR_LEN_VARIABLE },
2476 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
2477 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
2478 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
2479 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = 2 },
2480 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
2481 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
2482 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
2483 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
2484 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
2485 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
2486 [OVS_KEY_ATTR_CT_STATE] = { .len = 4 },
2487 [OVS_KEY_ATTR_CT_ZONE] = { .len = 2 },
2488 [OVS_KEY_ATTR_CT_MARK] = { .len = 4 },
2489 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
2490 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
2491 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
2492 [OVS_KEY_ATTR_PACKET_TYPE] = { .len = 4 },
2493 [OVS_KEY_ATTR_NSH] = { .len = ATTR_LEN_NESTED,
2494 .next = ovs_nsh_key_attr_lens,
2495 .next_max = OVS_NSH_KEY_ATTR_MAX },
2496 };
2497
2498 /* Returns the correct length of the payload for a flow key attribute of the
2499 * specified 'type', ATTR_LEN_INVALID if 'type' is unknown, ATTR_LEN_VARIABLE
2500 * if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
2501 * payload is a nested type. */
2502 static int
2503 odp_key_attr_len(const struct attr_len_tbl tbl[], int max_type, uint16_t type)
2504 {
2505 if (type > max_type) {
2506 return ATTR_LEN_INVALID;
2507 }
2508
2509 return tbl[type].len;
2510 }
2511
2512 static void
2513 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
2514 {
2515 size_t len = nl_attr_get_size(a);
2516 if (len) {
2517 const uint8_t *unspec;
2518 unsigned int i;
2519
2520 unspec = nl_attr_get(a);
2521 for (i = 0; i < len; i++) {
2522 if (i) {
2523 ds_put_char(ds, ' ');
2524 }
2525 ds_put_format(ds, "%02x", unspec[i]);
2526 }
2527 }
2528 }
2529
2530 static const char *
2531 ovs_frag_type_to_string(enum ovs_frag_type type)
2532 {
2533 switch (type) {
2534 case OVS_FRAG_TYPE_NONE:
2535 return "no";
2536 case OVS_FRAG_TYPE_FIRST:
2537 return "first";
2538 case OVS_FRAG_TYPE_LATER:
2539 return "later";
2540 case __OVS_FRAG_TYPE_MAX:
2541 default:
2542 return "<error>";
2543 }
2544 }
2545
2546 enum odp_key_fitness
2547 odp_nsh_hdr_from_attr(const struct nlattr *attr,
2548 struct nsh_hdr *nsh_hdr, size_t size)
2549 {
2550 unsigned int left;
2551 const struct nlattr *a;
2552 bool unknown = false;
2553 uint8_t flags = 0;
2554 uint8_t ttl = 63;
2555 size_t mdlen = 0;
2556 bool has_md1 = false;
2557 bool has_md2 = false;
2558
2559 NL_NESTED_FOR_EACH (a, left, attr) {
2560 uint16_t type = nl_attr_type(a);
2561 size_t len = nl_attr_get_size(a);
2562 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2563 OVS_NSH_KEY_ATTR_MAX, type);
2564
2565 if (len != expected_len && expected_len >= 0) {
2566 return ODP_FIT_ERROR;
2567 }
2568
2569 switch (type) {
2570 case OVS_NSH_KEY_ATTR_BASE: {
2571 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2572 nsh_hdr->next_proto = base->np;
2573 nsh_hdr->md_type = base->mdtype;
2574 put_16aligned_be32(&nsh_hdr->path_hdr, base->path_hdr);
2575 flags = base->flags;
2576 ttl = base->ttl;
2577 break;
2578 }
2579 case OVS_NSH_KEY_ATTR_MD1: {
2580 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2581 struct nsh_md1_ctx *md1_dst = &nsh_hdr->md1;
2582 has_md1 = true;
2583 mdlen = nl_attr_get_size(a);
2584 if ((mdlen + NSH_BASE_HDR_LEN != NSH_M_TYPE1_LEN) ||
2585 (mdlen + NSH_BASE_HDR_LEN > size)) {
2586 return ODP_FIT_ERROR;
2587 }
2588 memcpy(md1_dst, md1, mdlen);
2589 break;
2590 }
2591 case OVS_NSH_KEY_ATTR_MD2: {
2592 struct nsh_md2_tlv *md2_dst = &nsh_hdr->md2;
2593 const uint8_t *md2 = nl_attr_get(a);
2594 has_md2 = true;
2595 mdlen = nl_attr_get_size(a);
2596 if (mdlen + NSH_BASE_HDR_LEN > size) {
2597 return ODP_FIT_ERROR;
2598 }
2599 memcpy(md2_dst, md2, mdlen);
2600 break;
2601 }
2602 default:
2603 /* Allow this to show up as unexpected, if there are unknown
2604 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2605 unknown = true;
2606 break;
2607 }
2608 }
2609
2610 if (unknown) {
2611 return ODP_FIT_TOO_MUCH;
2612 }
2613
2614 if ((has_md1 && nsh_hdr->md_type != NSH_M_TYPE1)
2615 || (has_md2 && nsh_hdr->md_type != NSH_M_TYPE2)) {
2616 return ODP_FIT_ERROR;
2617 }
2618
2619 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
2620 nsh_set_flags_ttl_len(nsh_hdr, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
2621
2622 return ODP_FIT_PERFECT;
2623 }
2624
2625 enum odp_key_fitness
2626 odp_nsh_key_from_attr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
2627 struct ovs_key_nsh *nsh_mask)
2628 {
2629 unsigned int left;
2630 const struct nlattr *a;
2631 bool unknown = false;
2632 bool has_md1 = false;
2633
2634 NL_NESTED_FOR_EACH (a, left, attr) {
2635 uint16_t type = nl_attr_type(a);
2636 size_t len = nl_attr_get_size(a);
2637 int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
2638 OVS_NSH_KEY_ATTR_MAX, type);
2639
2640 /* the attribute can have mask, len is 2 * expected_len for that case.
2641 */
2642 if ((len != expected_len) && (len != 2 * expected_len) &&
2643 (expected_len >= 0)) {
2644 return ODP_FIT_ERROR;
2645 }
2646
2647 if ((nsh_mask && (expected_len >= 0) && (len != 2 * expected_len)) ||
2648 (!nsh_mask && (expected_len >= 0) && (len == 2 * expected_len))) {
2649 return ODP_FIT_ERROR;
2650 }
2651
2652 switch (type) {
2653 case OVS_NSH_KEY_ATTR_UNSPEC:
2654 break;
2655 case OVS_NSH_KEY_ATTR_BASE: {
2656 const struct ovs_nsh_key_base *base = nl_attr_get(a);
2657 nsh->flags = base->flags;
2658 nsh->ttl = base->ttl;
2659 nsh->mdtype = base->mdtype;
2660 nsh->np = base->np;
2661 nsh->path_hdr = base->path_hdr;
2662 if (nsh_mask && (len == 2 * sizeof(*base))) {
2663 const struct ovs_nsh_key_base *base_mask = base + 1;
2664 nsh_mask->flags = base_mask->flags;
2665 nsh_mask->ttl = base_mask->ttl;
2666 nsh_mask->mdtype = base_mask->mdtype;
2667 nsh_mask->np = base_mask->np;
2668 nsh_mask->path_hdr = base_mask->path_hdr;
2669 }
2670 break;
2671 }
2672 case OVS_NSH_KEY_ATTR_MD1: {
2673 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
2674 has_md1 = true;
2675 memcpy(nsh->context, md1->context, sizeof md1->context);
2676 if (len == 2 * sizeof(*md1)) {
2677 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
2678 memcpy(nsh_mask->context, md1_mask->context,
2679 sizeof(*md1_mask));
2680 }
2681 break;
2682 }
2683 case OVS_NSH_KEY_ATTR_MD2:
2684 default:
2685 /* Allow this to show up as unexpected, if there are unknown
2686 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2687 unknown = true;
2688 break;
2689 }
2690 }
2691
2692 if (unknown) {
2693 return ODP_FIT_TOO_MUCH;
2694 }
2695
2696 if (has_md1 && nsh->mdtype != NSH_M_TYPE1) {
2697 return ODP_FIT_ERROR;
2698 }
2699
2700 return ODP_FIT_PERFECT;
2701 }
2702
2703 static enum odp_key_fitness
2704 odp_tun_key_from_attr__(const struct nlattr *attr, bool is_mask,
2705 struct flow_tnl *tun)
2706 {
2707 unsigned int left;
2708 const struct nlattr *a;
2709 bool ttl = false;
2710 bool unknown = false;
2711
2712 NL_NESTED_FOR_EACH(a, left, attr) {
2713 uint16_t type = nl_attr_type(a);
2714 size_t len = nl_attr_get_size(a);
2715 int expected_len = odp_key_attr_len(ovs_tun_key_attr_lens,
2716 OVS_TUNNEL_ATTR_MAX, type);
2717
2718 if (len != expected_len && expected_len >= 0) {
2719 return ODP_FIT_ERROR;
2720 }
2721
2722 switch (type) {
2723 case OVS_TUNNEL_KEY_ATTR_ID:
2724 tun->tun_id = nl_attr_get_be64(a);
2725 tun->flags |= FLOW_TNL_F_KEY;
2726 break;
2727 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
2728 tun->ip_src = nl_attr_get_be32(a);
2729 break;
2730 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
2731 tun->ip_dst = nl_attr_get_be32(a);
2732 break;
2733 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
2734 tun->ipv6_src = nl_attr_get_in6_addr(a);
2735 break;
2736 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
2737 tun->ipv6_dst = nl_attr_get_in6_addr(a);
2738 break;
2739 case OVS_TUNNEL_KEY_ATTR_TOS:
2740 tun->ip_tos = nl_attr_get_u8(a);
2741 break;
2742 case OVS_TUNNEL_KEY_ATTR_TTL:
2743 tun->ip_ttl = nl_attr_get_u8(a);
2744 ttl = true;
2745 break;
2746 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
2747 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
2748 break;
2749 case OVS_TUNNEL_KEY_ATTR_CSUM:
2750 tun->flags |= FLOW_TNL_F_CSUM;
2751 break;
2752 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
2753 tun->tp_src = nl_attr_get_be16(a);
2754 break;
2755 case OVS_TUNNEL_KEY_ATTR_TP_DST:
2756 tun->tp_dst = nl_attr_get_be16(a);
2757 break;
2758 case OVS_TUNNEL_KEY_ATTR_OAM:
2759 tun->flags |= FLOW_TNL_F_OAM;
2760 break;
2761 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
2762 static const struct nl_policy vxlan_opts_policy[] = {
2763 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
2764 };
2765 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
2766
2767 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
2768 return ODP_FIT_ERROR;
2769 }
2770
2771 if (ext[OVS_VXLAN_EXT_GBP]) {
2772 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
2773
2774 tun->gbp_id = htons(gbp & 0xFFFF);
2775 tun->gbp_flags = (gbp >> 16) & 0xFF;
2776 }
2777
2778 break;
2779 }
2780 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
2781 tun_metadata_from_geneve_nlattr(a, is_mask, tun);
2782 break;
2783 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: {
2784 int attr_len = nl_attr_get_size(a);
2785 struct erspan_metadata opts;
2786
2787 memcpy(&opts, nl_attr_get(a), attr_len);
2788
2789 tun->erspan_ver = opts.version;
2790 if (tun->erspan_ver == 1) {
2791 tun->erspan_idx = ntohl(opts.u.index);
2792 } else if (tun->erspan_ver == 2) {
2793 tun->erspan_dir = opts.u.md2.dir;
2794 tun->erspan_hwid = get_hwid(&opts.u.md2);
2795 } else {
2796 VLOG_WARN("%s invalid erspan version\n", __func__);
2797 }
2798 break;
2799 }
2800
2801 default:
2802 /* Allow this to show up as unexpected, if there are unknown
2803 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
2804 unknown = true;
2805 break;
2806 }
2807 }
2808
2809 if (!ttl) {
2810 return ODP_FIT_ERROR;
2811 }
2812 if (unknown) {
2813 return ODP_FIT_TOO_MUCH;
2814 }
2815 return ODP_FIT_PERFECT;
2816 }
2817
2818 enum odp_key_fitness
2819 odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun)
2820 {
2821 memset(tun, 0, sizeof *tun);
2822 return odp_tun_key_from_attr__(attr, false, tun);
2823 }
2824
2825 static void
2826 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key,
2827 const struct flow_tnl *tun_flow_key,
2828 const struct ofpbuf *key_buf, const char *tnl_type)
2829 {
2830 size_t tun_key_ofs;
2831
2832 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
2833
2834 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
2835 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
2836 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
2837 }
2838 if (tun_key->ip_src) {
2839 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
2840 }
2841 if (tun_key->ip_dst) {
2842 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
2843 }
2844 if (ipv6_addr_is_set(&tun_key->ipv6_src)) {
2845 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, &tun_key->ipv6_src);
2846 }
2847 if (ipv6_addr_is_set(&tun_key->ipv6_dst)) {
2848 nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_DST, &tun_key->ipv6_dst);
2849 }
2850 if (tun_key->ip_tos) {
2851 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
2852 }
2853 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
2854 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
2855 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
2856 }
2857 if (tun_key->flags & FLOW_TNL_F_CSUM) {
2858 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
2859 }
2860 if (tun_key->tp_src) {
2861 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
2862 }
2863 if (tun_key->tp_dst) {
2864 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
2865 }
2866 if (tun_key->flags & FLOW_TNL_F_OAM) {
2867 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
2868 }
2869
2870 /* If tnl_type is set to a particular type of output tunnel,
2871 * only put its relevant tunnel metadata to the nlattr.
2872 * If tnl_type is NULL, put tunnel metadata according to the
2873 * 'tun_key'.
2874 */
2875 if ((!tnl_type || !strcmp(tnl_type, "vxlan")) &&
2876 (tun_key->gbp_flags || tun_key->gbp_id)) {
2877 size_t vxlan_opts_ofs;
2878
2879 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
2880 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
2881 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
2882 nl_msg_end_nested(a, vxlan_opts_ofs);
2883 }
2884
2885 if (!tnl_type || !strcmp(tnl_type, "geneve")) {
2886 tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
2887 }
2888
2889 if ((!tnl_type || !strcmp(tnl_type, "erspan") ||
2890 !strcmp(tnl_type, "ip6erspan")) &&
2891 (tun_key->erspan_ver == 1 || tun_key->erspan_ver == 2)) {
2892 struct erspan_metadata opts;
2893
2894 opts.version = tun_key->erspan_ver;
2895 if (opts.version == 1) {
2896 opts.u.index = htonl(tun_key->erspan_idx);
2897 } else {
2898 opts.u.md2.dir = tun_key->erspan_dir;
2899 set_hwid(&opts.u.md2, tun_key->erspan_hwid);
2900 }
2901 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
2902 &opts, sizeof(opts));
2903 }
2904
2905 nl_msg_end_nested(a, tun_key_ofs);
2906 }
2907
2908 static bool
2909 odp_mask_is_constant__(enum ovs_key_attr attr, const void *mask, size_t size,
2910 int constant)
2911 {
2912 /* Convert 'constant' to all the widths we need. C conversion rules ensure
2913 * that -1 becomes all-1-bits and 0 does not change. */
2914 ovs_be16 be16 = (OVS_FORCE ovs_be16) constant;
2915 uint32_t u32 = constant;
2916 uint8_t u8 = constant;
2917 const struct in6_addr *in6 = constant ? &in6addr_exact : &in6addr_any;
2918
2919 switch (attr) {
2920 case OVS_KEY_ATTR_UNSPEC:
2921 case OVS_KEY_ATTR_ENCAP:
2922 case __OVS_KEY_ATTR_MAX:
2923 default:
2924 return false;
2925
2926 case OVS_KEY_ATTR_PRIORITY:
2927 case OVS_KEY_ATTR_IN_PORT:
2928 case OVS_KEY_ATTR_ETHERNET:
2929 case OVS_KEY_ATTR_VLAN:
2930 case OVS_KEY_ATTR_ETHERTYPE:
2931 case OVS_KEY_ATTR_IPV4:
2932 case OVS_KEY_ATTR_TCP:
2933 case OVS_KEY_ATTR_UDP:
2934 case OVS_KEY_ATTR_ICMP:
2935 case OVS_KEY_ATTR_ICMPV6:
2936 case OVS_KEY_ATTR_ND:
2937 case OVS_KEY_ATTR_SKB_MARK:
2938 case OVS_KEY_ATTR_TUNNEL:
2939 case OVS_KEY_ATTR_SCTP:
2940 case OVS_KEY_ATTR_DP_HASH:
2941 case OVS_KEY_ATTR_RECIRC_ID:
2942 case OVS_KEY_ATTR_MPLS:
2943 case OVS_KEY_ATTR_CT_STATE:
2944 case OVS_KEY_ATTR_CT_ZONE:
2945 case OVS_KEY_ATTR_CT_MARK:
2946 case OVS_KEY_ATTR_CT_LABELS:
2947 case OVS_KEY_ATTR_PACKET_TYPE:
2948 case OVS_KEY_ATTR_NSH:
2949 return is_all_byte(mask, size, u8);
2950
2951 case OVS_KEY_ATTR_TCP_FLAGS:
2952 return TCP_FLAGS(*(ovs_be16 *) mask) == TCP_FLAGS(be16);
2953
2954 case OVS_KEY_ATTR_IPV6: {
2955 const struct ovs_key_ipv6 *ipv6_mask = mask;
2956 return ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
2957 == htonl(IPV6_LABEL_MASK & u32)
2958 && ipv6_mask->ipv6_proto == u8
2959 && ipv6_mask->ipv6_tclass == u8
2960 && ipv6_mask->ipv6_hlimit == u8
2961 && ipv6_mask->ipv6_frag == u8
2962 && ipv6_addr_equals(&ipv6_mask->ipv6_src, in6)
2963 && ipv6_addr_equals(&ipv6_mask->ipv6_dst, in6));
2964 }
2965
2966 case OVS_KEY_ATTR_ARP:
2967 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_arp, arp_tha), u8);
2968
2969 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
2970 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv4,
2971 ipv4_proto), u8);
2972
2973 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
2974 return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv6,
2975 ipv6_proto), u8);
2976 }
2977 }
2978
2979 /* The caller must already have verified that 'ma' has a correct length.
2980 *
2981 * The main purpose of this function is formatting, to allow code to figure out
2982 * whether the mask can be omitted. It doesn't try hard for attributes that
2983 * contain sub-attributes, etc., because normally those would be broken down
2984 * further for formatting. */
2985 static bool
2986 odp_mask_attr_is_wildcard(const struct nlattr *ma)
2987 {
2988 return odp_mask_is_constant__(nl_attr_type(ma),
2989 nl_attr_get(ma), nl_attr_get_size(ma), 0);
2990 }
2991
2992 /* The caller must already have verified that 'size' is a correct length for
2993 * 'attr'.
2994 *
2995 * The main purpose of this function is formatting, to allow code to figure out
2996 * whether the mask can be omitted. It doesn't try hard for attributes that
2997 * contain sub-attributes, etc., because normally those would be broken down
2998 * further for formatting. */
2999 static bool
3000 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
3001 {
3002 return odp_mask_is_constant__(attr, mask, size, -1);
3003 }
3004
3005 /* The caller must already have verified that 'ma' has a correct length. */
3006 static bool
3007 odp_mask_attr_is_exact(const struct nlattr *ma)
3008 {
3009 enum ovs_key_attr attr = nl_attr_type(ma);
3010 return odp_mask_is_exact(attr, nl_attr_get(ma), nl_attr_get_size(ma));
3011 }
3012
3013 void
3014 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
3015 char *port_name)
3016 {
3017 struct odp_portno_names *odp_portno_names;
3018
3019 odp_portno_names = xmalloc(sizeof *odp_portno_names);
3020 odp_portno_names->port_no = port_no;
3021 odp_portno_names->name = xstrdup(port_name);
3022 hmap_insert(portno_names, &odp_portno_names->hmap_node,
3023 hash_odp_port(port_no));
3024 }
3025
3026 static char *
3027 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
3028 {
3029 if (portno_names) {
3030 struct odp_portno_names *odp_portno_names;
3031
3032 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
3033 hash_odp_port(port_no), portno_names) {
3034 if (odp_portno_names->port_no == port_no) {
3035 return odp_portno_names->name;
3036 }
3037 }
3038 }
3039 return NULL;
3040 }
3041
3042 void
3043 odp_portno_names_destroy(struct hmap *portno_names)
3044 {
3045 struct odp_portno_names *odp_portno_names;
3046
3047 HMAP_FOR_EACH_POP (odp_portno_names, hmap_node, portno_names) {
3048 free(odp_portno_names->name);
3049 free(odp_portno_names);
3050 }
3051 }
3052
3053 void
3054 odp_portno_name_format(const struct hmap *portno_names, odp_port_t port_no,
3055 struct ds *s)
3056 {
3057 const char *name = odp_portno_names_get(portno_names, port_no);
3058 if (name) {
3059 ds_put_cstr(s, name);
3060 } else {
3061 ds_put_format(s, "%"PRIu32, port_no);
3062 }
3063 }
3064
3065 /* Format helpers. */
3066
3067 static void
3068 format_eth(struct ds *ds, const char *name, const struct eth_addr key,
3069 const struct eth_addr *mask, bool verbose)
3070 {
3071 bool mask_empty = mask && eth_addr_is_zero(*mask);
3072
3073 if (verbose || !mask_empty) {
3074 bool mask_full = !mask || eth_mask_is_exact(*mask);
3075
3076 if (mask_full) {
3077 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
3078 } else {
3079 ds_put_format(ds, "%s=", name);
3080 eth_format_masked(key, mask, ds);
3081 ds_put_char(ds, ',');
3082 }
3083 }
3084 }
3085
3086
3087 static void
3088 format_be64(struct ds *ds, const char *name, ovs_be64 key,
3089 const ovs_be64 *mask, bool verbose)
3090 {
3091 bool mask_empty = mask && !*mask;
3092
3093 if (verbose || !mask_empty) {
3094 bool mask_full = !mask || *mask == OVS_BE64_MAX;
3095
3096 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
3097 if (!mask_full) { /* Partially masked. */
3098 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
3099 }
3100 ds_put_char(ds, ',');
3101 }
3102 }
3103
3104 static void
3105 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
3106 const ovs_be32 *mask, bool verbose)
3107 {
3108 bool mask_empty = mask && !*mask;
3109
3110 if (verbose || !mask_empty) {
3111 bool mask_full = !mask || *mask == OVS_BE32_MAX;
3112
3113 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
3114 if (!mask_full) { /* Partially masked. */
3115 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
3116 }
3117 ds_put_char(ds, ',');
3118 }
3119 }
3120
3121 static void
3122 format_in6_addr(struct ds *ds, const char *name,
3123 const struct in6_addr *key,
3124 const struct in6_addr *mask,
3125 bool verbose)
3126 {
3127 char buf[INET6_ADDRSTRLEN];
3128 bool mask_empty = mask && ipv6_mask_is_any(mask);
3129
3130 if (verbose || !mask_empty) {
3131 bool mask_full = !mask || ipv6_mask_is_exact(mask);
3132
3133 inet_ntop(AF_INET6, key, buf, sizeof buf);
3134 ds_put_format(ds, "%s=%s", name, buf);
3135 if (!mask_full) { /* Partially masked. */
3136 inet_ntop(AF_INET6, mask, buf, sizeof buf);
3137 ds_put_format(ds, "/%s", buf);
3138 }
3139 ds_put_char(ds, ',');
3140 }
3141 }
3142
3143 static void
3144 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
3145 const ovs_be32 *mask, bool verbose)
3146 {
3147 bool mask_empty = mask && !*mask;
3148
3149 if (verbose || !mask_empty) {
3150 bool mask_full = !mask
3151 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
3152
3153 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
3154 if (!mask_full) { /* Partially masked. */
3155 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
3156 }
3157 ds_put_char(ds, ',');
3158 }
3159 }
3160
3161 static void
3162 format_u8x(struct ds *ds, const char *name, uint8_t key,
3163 const uint8_t *mask, bool verbose)
3164 {
3165 bool mask_empty = mask && !*mask;
3166
3167 if (verbose || !mask_empty) {
3168 bool mask_full = !mask || *mask == UINT8_MAX;
3169
3170 ds_put_format(ds, "%s=%#"PRIx8, name, key);
3171 if (!mask_full) { /* Partially masked. */
3172 ds_put_format(ds, "/%#"PRIx8, *mask);
3173 }
3174 ds_put_char(ds, ',');
3175 }
3176 }
3177
3178 static void
3179 format_u8u(struct ds *ds, const char *name, uint8_t key,
3180 const uint8_t *mask, bool verbose)
3181 {
3182 bool mask_empty = mask && !*mask;
3183
3184 if (verbose || !mask_empty) {
3185 bool mask_full = !mask || *mask == UINT8_MAX;
3186
3187 ds_put_format(ds, "%s=%"PRIu8, name, key);
3188 if (!mask_full) { /* Partially masked. */
3189 ds_put_format(ds, "/%#"PRIx8, *mask);
3190 }
3191 ds_put_char(ds, ',');
3192 }
3193 }
3194
3195 static void
3196 format_be16(struct ds *ds, const char *name, ovs_be16 key,
3197 const ovs_be16 *mask, bool verbose)
3198 {
3199 bool mask_empty = mask && !*mask;
3200
3201 if (verbose || !mask_empty) {
3202 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3203
3204 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
3205 if (!mask_full) { /* Partially masked. */
3206 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3207 }
3208 ds_put_char(ds, ',');
3209 }
3210 }
3211
3212 static void
3213 format_be16x(struct ds *ds, const char *name, ovs_be16 key,
3214 const ovs_be16 *mask, bool verbose)
3215 {
3216 bool mask_empty = mask && !*mask;
3217
3218 if (verbose || !mask_empty) {
3219 bool mask_full = !mask || *mask == OVS_BE16_MAX;
3220
3221 ds_put_format(ds, "%s=%#"PRIx16, name, ntohs(key));
3222 if (!mask_full) { /* Partially masked. */
3223 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
3224 }
3225 ds_put_char(ds, ',');
3226 }
3227 }
3228
3229 static void
3230 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
3231 const uint16_t *mask, bool verbose)
3232 {
3233 bool mask_empty = mask && !*mask;
3234
3235 if (verbose || !mask_empty) {
3236 ds_put_cstr(ds, name);
3237 ds_put_char(ds, '(');
3238 if (mask) {
3239 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key,
3240 *mask & FLOW_TNL_F_MASK, FLOW_TNL_F_MASK);
3241 } else { /* Fully masked. */
3242 format_flags(ds, flow_tun_flag_to_string, key, '|');
3243 }
3244 ds_put_cstr(ds, "),");
3245 }
3246 }
3247
3248 static bool
3249 check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
3250 const struct attr_len_tbl tbl[], int max_type, bool need_key)
3251 {
3252 int expected_len;
3253
3254 expected_len = odp_key_attr_len(tbl, max_type, nl_attr_type(a));
3255 if (expected_len != ATTR_LEN_VARIABLE &&
3256 expected_len != ATTR_LEN_NESTED) {
3257
3258 bool bad_key_len = nl_attr_get_size(a) != expected_len;
3259 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
3260
3261 if (bad_key_len || bad_mask_len) {
3262 if (need_key) {
3263 ds_put_format(ds, "key%u", nl_attr_type(a));
3264 }
3265 if (bad_key_len) {
3266 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
3267 nl_attr_get_size(a), expected_len);
3268 }
3269 format_generic_odp_key(a, ds);
3270 if (ma) {
3271 ds_put_char(ds, '/');
3272 if (bad_mask_len) {
3273 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
3274 nl_attr_get_size(ma), expected_len);
3275 }
3276 format_generic_odp_key(ma, ds);
3277 }
3278 ds_put_char(ds, ')');
3279 return false;
3280 }
3281 }
3282
3283 return true;
3284 }
3285
3286 static void
3287 format_unknown_key(struct ds *ds, const struct nlattr *a,
3288 const struct nlattr *ma)
3289 {
3290 ds_put_format(ds, "key%u(", nl_attr_type(a));
3291 format_generic_odp_key(a, ds);
3292 if (ma && !odp_mask_attr_is_exact(ma)) {
3293 ds_put_char(ds, '/');
3294 format_generic_odp_key(ma, ds);
3295 }
3296 ds_put_cstr(ds, "),");
3297 }
3298
3299 static void
3300 format_odp_tun_vxlan_opt(const struct nlattr *attr,
3301 const struct nlattr *mask_attr, struct ds *ds,
3302 bool verbose)
3303 {
3304 unsigned int left;
3305 const struct nlattr *a;
3306 struct ofpbuf ofp;
3307
3308 ofpbuf_init(&ofp, 100);
3309 NL_NESTED_FOR_EACH(a, left, attr) {
3310 uint16_t type = nl_attr_type(a);
3311 const struct nlattr *ma = NULL;
3312
3313 if (mask_attr) {
3314 ma = nl_attr_find__(nl_attr_get(mask_attr),
3315 nl_attr_get_size(mask_attr), type);
3316 if (!ma) {
3317 ma = generate_all_wildcard_mask(ovs_vxlan_ext_attr_lens,
3318 OVS_VXLAN_EXT_MAX,
3319 &ofp, a);
3320 }
3321 }
3322
3323 if (!check_attr_len(ds, a, ma, ovs_vxlan_ext_attr_lens,
3324 OVS_VXLAN_EXT_MAX, true)) {
3325 continue;
3326 }
3327
3328 switch (type) {
3329 case OVS_VXLAN_EXT_GBP: {
3330 uint32_t key = nl_attr_get_u32(a);
3331 ovs_be16 id, id_mask;
3332 uint8_t flags, flags_mask = 0;
3333
3334 id = htons(key & 0xFFFF);
3335 flags = (key >> 16) & 0xFF;
3336 if (ma) {
3337 uint32_t mask = nl_attr_get_u32(ma);
3338 id_mask = htons(mask & 0xFFFF);
3339 flags_mask = (mask >> 16) & 0xFF;
3340 }
3341
3342 ds_put_cstr(ds, "gbp(");
3343 format_be16(ds, "id", id, ma ? &id_mask : NULL, verbose);
3344 format_u8x(ds, "flags", flags, ma ? &flags_mask : NULL, verbose);
3345 ds_chomp(ds, ',');
3346 ds_put_cstr(ds, "),");
3347 break;
3348 }
3349
3350 default:
3351 format_unknown_key(ds, a, ma);
3352 }
3353 ofpbuf_clear(&ofp);
3354 }
3355
3356 ds_chomp(ds, ',');
3357 ofpbuf_uninit(&ofp);
3358 }
3359
3360 static void
3361 format_odp_tun_erspan_opt(const struct nlattr *attr,
3362 const struct nlattr *mask_attr, struct ds *ds,
3363 bool verbose)
3364 {
3365 const struct erspan_metadata *opts, *mask;
3366 uint8_t ver, ver_ma, dir, dir_ma, hwid, hwid_ma;
3367
3368 opts = nl_attr_get(attr);
3369 mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
3370
3371 ver = (uint8_t)opts->version;
3372 if (mask) {
3373 ver_ma = (uint8_t)mask->version;
3374 }
3375
3376 format_u8u(ds, "ver", ver, mask ? &ver_ma : NULL, verbose);
3377
3378 if (opts->version == 1) {
3379 if (mask) {
3380 ds_put_format(ds, "idx=%#"PRIx32"/%#"PRIx32",",
3381 ntohl(opts->u.index),
3382 ntohl(mask->u.index));
3383 } else {
3384 ds_put_format(ds, "idx=%#"PRIx32",", ntohl(opts->u.index));
3385 }
3386 } else if (opts->version == 2) {
3387 dir = opts->u.md2.dir;
3388 hwid = opts->u.md2.hwid;
3389 if (mask) {
3390 dir_ma = mask->u.md2.dir;
3391 hwid_ma = mask->u.md2.hwid;
3392 }
3393
3394 format_u8u(ds, "dir", dir, mask ? &dir_ma : NULL, verbose);
3395 format_u8x(ds, "hwid", hwid, mask ? &hwid_ma : NULL, verbose);
3396 }
3397 ds_chomp(ds, ',');
3398 }
3399
3400 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
3401
3402 static void
3403 format_geneve_opts(const struct geneve_opt *opt,
3404 const struct geneve_opt *mask, int opts_len,
3405 struct ds *ds, bool verbose)
3406 {
3407 while (opts_len > 0) {
3408 unsigned int len;
3409 uint8_t data_len, data_len_mask;
3410
3411 if (opts_len < sizeof *opt) {
3412 ds_put_format(ds, "opt len %u less than minimum %"PRIuSIZE,
3413 opts_len, sizeof *opt);
3414 return;
3415 }
3416
3417 data_len = opt->length * 4;
3418 if (mask) {
3419 if (mask->length == 0x1f) {
3420 data_len_mask = UINT8_MAX;
3421 } else {
3422 data_len_mask = mask->length;
3423 }
3424 }
3425 len = sizeof *opt + data_len;
3426 if (len > opts_len) {
3427 ds_put_format(ds, "opt len %u greater than remaining %u",
3428 len, opts_len);
3429 return;
3430 }
3431
3432 ds_put_char(ds, '{');
3433 format_be16x(ds, "class", opt->opt_class, MASK(mask, opt_class),
3434 verbose);
3435 format_u8x(ds, "type", opt->type, MASK(mask, type), verbose);
3436 format_u8u(ds, "len", data_len, mask ? &data_len_mask : NULL, verbose);
3437 if (data_len &&
3438 (verbose || !mask || !is_all_zeros(mask + 1, data_len))) {
3439 ds_put_hex(ds, opt + 1, data_len);
3440 if (mask && !is_all_ones(mask + 1, data_len)) {
3441 ds_put_char(ds, '/');
3442 ds_put_hex(ds, mask + 1, data_len);
3443 }
3444 } else {
3445 ds_chomp(ds, ',');
3446 }
3447 ds_put_char(ds, '}');
3448
3449 opt += len / sizeof(*opt);
3450 if (mask) {
3451 mask += len / sizeof(*opt);
3452 }
3453 opts_len -= len;
3454 };
3455 }
3456
3457 static void
3458 format_odp_tun_geneve(const struct nlattr *attr,
3459 const struct nlattr *mask_attr, struct ds *ds,
3460 bool verbose)
3461 {
3462 int opts_len = nl_attr_get_size(attr);
3463 const struct geneve_opt *opt = nl_attr_get(attr);
3464 const struct geneve_opt *mask = mask_attr ?
3465 nl_attr_get(mask_attr) : NULL;
3466
3467 if (mask && nl_attr_get_size(attr) != nl_attr_get_size(mask_attr)) {
3468 ds_put_format(ds, "value len %"PRIuSIZE" different from mask len %"PRIuSIZE,
3469 nl_attr_get_size(attr), nl_attr_get_size(mask_attr));
3470 return;
3471 }
3472
3473 format_geneve_opts(opt, mask, opts_len, ds, verbose);
3474 }
3475
3476 static void
3477 format_odp_nsh_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3478 struct ds *ds)
3479 {
3480 unsigned int left;
3481 const struct nlattr *a;
3482 struct ovs_key_nsh nsh;
3483 struct ovs_key_nsh nsh_mask;
3484
3485 memset(&nsh, 0, sizeof nsh);
3486 memset(&nsh_mask, 0xff, sizeof nsh_mask);
3487
3488 NL_NESTED_FOR_EACH (a, left, attr) {
3489 enum ovs_nsh_key_attr type = nl_attr_type(a);
3490 const struct nlattr *ma = NULL;
3491
3492 if (mask_attr) {
3493 ma = nl_attr_find__(nl_attr_get(mask_attr),
3494 nl_attr_get_size(mask_attr), type);
3495 }
3496
3497 if (!check_attr_len(ds, a, ma, ovs_nsh_key_attr_lens,
3498 OVS_NSH_KEY_ATTR_MAX, true)) {
3499 continue;
3500 }
3501
3502 switch (type) {
3503 case OVS_NSH_KEY_ATTR_UNSPEC:
3504 break;
3505 case OVS_NSH_KEY_ATTR_BASE: {
3506 const struct ovs_nsh_key_base *base = nl_attr_get(a);
3507 const struct ovs_nsh_key_base *base_mask
3508 = ma ? nl_attr_get(ma) : NULL;
3509 nsh.flags = base->flags;
3510 nsh.ttl = base->ttl;
3511 nsh.mdtype = base->mdtype;
3512 nsh.np = base->np;
3513 nsh.path_hdr = base->path_hdr;
3514 if (base_mask) {
3515 nsh_mask.flags = base_mask->flags;
3516 nsh_mask.ttl = base_mask->ttl;
3517 nsh_mask.mdtype = base_mask->mdtype;
3518 nsh_mask.np = base_mask->np;
3519 nsh_mask.path_hdr = base_mask->path_hdr;
3520 }
3521 break;
3522 }
3523 case OVS_NSH_KEY_ATTR_MD1: {
3524 const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
3525 const struct ovs_nsh_key_md1 *md1_mask
3526 = ma ? nl_attr_get(ma) : NULL;
3527 memcpy(nsh.context, md1->context, sizeof md1->context);
3528 if (md1_mask) {
3529 memcpy(nsh_mask.context, md1_mask->context,
3530 sizeof md1_mask->context);
3531 }
3532 break;
3533 }
3534 case OVS_NSH_KEY_ATTR_MD2:
3535 case __OVS_NSH_KEY_ATTR_MAX:
3536 default:
3537 /* No support for matching other metadata formats yet. */
3538 break;
3539 }
3540 }
3541
3542 if (mask_attr) {
3543 format_nsh_key_mask(ds, &nsh, &nsh_mask);
3544 } else {
3545 format_nsh_key(ds, &nsh);
3546 }
3547 }
3548
3549 static void
3550 format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
3551 struct ds *ds, bool verbose)
3552 {
3553 unsigned int left;
3554 const struct nlattr *a;
3555 uint16_t flags = 0;
3556 uint16_t mask_flags = 0;
3557 struct ofpbuf ofp;
3558
3559 ofpbuf_init(&ofp, 100);
3560 NL_NESTED_FOR_EACH(a, left, attr) {
3561 enum ovs_tunnel_key_attr type = nl_attr_type(a);
3562 const struct nlattr *ma = NULL;
3563
3564 if (mask_attr) {
3565 ma = nl_attr_find__(nl_attr_get(mask_attr),
3566 nl_attr_get_size(mask_attr), type);
3567 if (!ma) {
3568 ma = generate_all_wildcard_mask(ovs_tun_key_attr_lens,
3569 OVS_TUNNEL_KEY_ATTR_MAX,
3570 &ofp, a);
3571 }
3572 }
3573
3574 if (!check_attr_len(ds, a, ma, ovs_tun_key_attr_lens,
3575 OVS_TUNNEL_KEY_ATTR_MAX, true)) {
3576 continue;
3577 }
3578
3579 switch (type) {
3580 case OVS_TUNNEL_KEY_ATTR_ID:
3581 format_be64(ds, "tun_id", nl_attr_get_be64(a),
3582 ma ? nl_attr_get(ma) : NULL, verbose);
3583 flags |= FLOW_TNL_F_KEY;
3584 if (ma) {
3585 mask_flags |= FLOW_TNL_F_KEY;
3586 }
3587 break;
3588 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
3589 format_ipv4(ds, "src", nl_attr_get_be32(a),
3590 ma ? nl_attr_get(ma) : NULL, verbose);
3591 break;
3592 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
3593 format_ipv4(ds, "dst", nl_attr_get_be32(a),
3594 ma ? nl_attr_get(ma) : NULL, verbose);
3595 break;
3596 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
3597 struct in6_addr ipv6_src;
3598 ipv6_src = nl_attr_get_in6_addr(a);
3599 format_in6_addr(ds, "ipv6_src", &ipv6_src,
3600 ma ? nl_attr_get(ma) : NULL, verbose);
3601 break;
3602 }
3603 case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
3604 struct in6_addr ipv6_dst;
3605 ipv6_dst = nl_attr_get_in6_addr(a);
3606 format_in6_addr(ds, "ipv6_dst", &ipv6_dst,
3607 ma ? nl_attr_get(ma) : NULL, verbose);
3608 break;
3609 }
3610 case OVS_TUNNEL_KEY_ATTR_TOS:
3611 format_u8x(ds, "tos", nl_attr_get_u8(a),
3612 ma ? nl_attr_get(ma) : NULL, verbose);
3613 break;
3614 case OVS_TUNNEL_KEY_ATTR_TTL:
3615 format_u8u(ds, "ttl", nl_attr_get_u8(a),
3616 ma ? nl_attr_get(ma) : NULL, verbose);
3617 break;
3618 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3619 flags |= FLOW_TNL_F_DONT_FRAGMENT;
3620 break;
3621 case OVS_TUNNEL_KEY_ATTR_CSUM:
3622 flags |= FLOW_TNL_F_CSUM;
3623 break;
3624 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
3625 format_be16(ds, "tp_src", nl_attr_get_be16(a),
3626 ma ? nl_attr_get(ma) : NULL, verbose);
3627 break;
3628 case OVS_TUNNEL_KEY_ATTR_TP_DST:
3629 format_be16(ds, "tp_dst", nl_attr_get_be16(a),
3630 ma ? nl_attr_get(ma) : NULL, verbose);
3631 break;
3632 case OVS_TUNNEL_KEY_ATTR_OAM:
3633 flags |= FLOW_TNL_F_OAM;
3634 break;
3635 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
3636 ds_put_cstr(ds, "vxlan(");
3637 format_odp_tun_vxlan_opt(a, ma, ds, verbose);
3638 ds_put_cstr(ds, "),");
3639 break;
3640 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
3641 ds_put_cstr(ds, "geneve(");
3642 format_odp_tun_geneve(a, ma, ds, verbose);
3643 ds_put_cstr(ds, "),");
3644 break;
3645 case OVS_TUNNEL_KEY_ATTR_PAD:
3646 break;
3647 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
3648 ds_put_cstr(ds, "erspan(");
3649 format_odp_tun_erspan_opt(a, ma, ds, verbose);
3650 ds_put_cstr(ds, "),");
3651 break;
3652 case __OVS_TUNNEL_KEY_ATTR_MAX:
3653 default:
3654 format_unknown_key(ds, a, ma);
3655 }
3656 ofpbuf_clear(&ofp);
3657 }
3658
3659 /* Flags can have a valid mask even if the attribute is not set, so
3660 * we need to collect these separately. */
3661 if (mask_attr) {
3662 NL_NESTED_FOR_EACH(a, left, mask_attr) {
3663 switch (nl_attr_type(a)) {
3664 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
3665 mask_flags |= FLOW_TNL_F_DONT_FRAGMENT;
3666 break;
3667 case OVS_TUNNEL_KEY_ATTR_CSUM:
3668 mask_flags |= FLOW_TNL_F_CSUM;
3669 break;
3670 case OVS_TUNNEL_KEY_ATTR_OAM:
3671 mask_flags |= FLOW_TNL_F_OAM;
3672 break;
3673 }
3674 }
3675 }
3676
3677 format_tun_flags(ds, "flags", flags, mask_attr ? &mask_flags : NULL,
3678 verbose);
3679 ds_chomp(ds, ',');
3680 ofpbuf_uninit(&ofp);
3681 }
3682
3683 static const char *
3684 odp_ct_state_to_string(uint32_t flag)
3685 {
3686 switch (flag) {
3687 case OVS_CS_F_REPLY_DIR:
3688 return "rpl";
3689 case OVS_CS_F_TRACKED:
3690 return "trk";
3691 case OVS_CS_F_NEW:
3692 return "new";
3693 case OVS_CS_F_ESTABLISHED:
3694 return "est";
3695 case OVS_CS_F_RELATED:
3696 return "rel";
3697 case OVS_CS_F_INVALID:
3698 return "inv";
3699 case OVS_CS_F_SRC_NAT:
3700 return "snat";
3701 case OVS_CS_F_DST_NAT:
3702 return "dnat";
3703 default:
3704 return NULL;
3705 }
3706 }
3707
3708 static void
3709 format_frag(struct ds *ds, const char *name, uint8_t key,
3710 const uint8_t *mask, bool verbose OVS_UNUSED)
3711 {
3712 bool mask_empty = mask && !*mask;
3713 bool mask_full = !mask || *mask == UINT8_MAX;
3714
3715 /* ODP frag is an enumeration field; partial masks are not meaningful. */
3716 if (!mask_empty && !mask_full) {
3717 ds_put_format(ds, "error: partial mask not supported for frag (%#"
3718 PRIx8"),", *mask);
3719 } else if (!mask_empty) {
3720 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
3721 }
3722 }
3723
3724 static bool
3725 mask_empty(const struct nlattr *ma)
3726 {
3727 const void *mask;
3728 size_t n;
3729
3730 if (!ma) {
3731 return true;
3732 }
3733 mask = nl_attr_get(ma);
3734 n = nl_attr_get_size(ma);
3735
3736 return is_all_zeros(mask, n);
3737 }
3738
3739 /* The caller must have already verified that 'a' and 'ma' have correct
3740 * lengths. */
3741 static void
3742 format_odp_key_attr__(const struct nlattr *a, const struct nlattr *ma,
3743 const struct hmap *portno_names, struct ds *ds,
3744 bool verbose)
3745 {
3746 enum ovs_key_attr attr = nl_attr_type(a);
3747 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3748 bool is_exact;
3749
3750 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
3751
3752 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
3753
3754 ds_put_char(ds, '(');
3755 switch (attr) {
3756 case OVS_KEY_ATTR_ENCAP:
3757 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
3758 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
3759 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
3760 verbose);
3761 } else if (nl_attr_get_size(a)) {
3762 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
3763 ds, verbose);
3764 }
3765 break;
3766
3767 case OVS_KEY_ATTR_PRIORITY:
3768 case OVS_KEY_ATTR_SKB_MARK:
3769 case OVS_KEY_ATTR_DP_HASH:
3770 case OVS_KEY_ATTR_RECIRC_ID:
3771 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
3772 if (!is_exact) {
3773 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
3774 }
3775 break;
3776
3777 case OVS_KEY_ATTR_CT_MARK:
3778 if (verbose || !mask_empty(ma)) {
3779 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
3780 if (!is_exact) {
3781 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
3782 }
3783 }
3784 break;
3785
3786 case OVS_KEY_ATTR_CT_STATE:
3787 if (verbose) {
3788 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
3789 if (!is_exact) {
3790 ds_put_format(ds, "/%#"PRIx32,
3791 mask_empty(ma) ? 0 : nl_attr_get_u32(ma));
3792 }
3793 } else if (!is_exact) {
3794 format_flags_masked(ds, NULL, odp_ct_state_to_string,
3795 nl_attr_get_u32(a),
3796 mask_empty(ma) ? 0 : nl_attr_get_u32(ma),
3797 UINT32_MAX);
3798 } else {
3799 format_flags(ds, odp_ct_state_to_string, nl_attr_get_u32(a), '|');
3800 }
3801 break;
3802
3803 case OVS_KEY_ATTR_CT_ZONE:
3804 if (verbose || !mask_empty(ma)) {
3805 ds_put_format(ds, "%#"PRIx16, nl_attr_get_u16(a));
3806 if (!is_exact) {
3807 ds_put_format(ds, "/%#"PRIx16, nl_attr_get_u16(ma));
3808 }
3809 }
3810 break;
3811
3812 case OVS_KEY_ATTR_CT_LABELS: {
3813 const ovs_32aligned_u128 *value = nl_attr_get(a);
3814 const ovs_32aligned_u128 *mask = ma ? nl_attr_get(ma) : NULL;
3815
3816 format_u128(ds, value, mask, verbose);
3817 break;
3818 }
3819
3820 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
3821 const struct ovs_key_ct_tuple_ipv4 *key = nl_attr_get(a);
3822 const struct ovs_key_ct_tuple_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
3823
3824 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
3825 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
3826 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
3827 verbose);
3828 format_be16(ds, "tp_src", key->src_port, MASK(mask, src_port),
3829 verbose);
3830 format_be16(ds, "tp_dst", key->dst_port, MASK(mask, dst_port),
3831 verbose);
3832 ds_chomp(ds, ',');
3833 break;
3834 }
3835
3836 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
3837 const struct ovs_key_ct_tuple_ipv6 *key = nl_attr_get(a);
3838 const struct ovs_key_ct_tuple_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
3839
3840 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
3841 verbose);
3842 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
3843 verbose);
3844 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
3845 verbose);
3846 format_be16(ds, "src_port", key->src_port, MASK(mask, src_port),
3847 verbose);
3848 format_be16(ds, "dst_port", key->dst_port, MASK(mask, dst_port),
3849 verbose);
3850 ds_chomp(ds, ',');
3851 break;
3852 }
3853
3854 case OVS_KEY_ATTR_TUNNEL:
3855 format_odp_tun_attr(a, ma, ds, verbose);
3856 break;
3857
3858 case OVS_KEY_ATTR_IN_PORT:
3859 if (is_exact) {
3860 odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
3861 } else {
3862 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
3863 if (!is_exact) {
3864 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
3865 }
3866 }
3867 break;
3868
3869 case OVS_KEY_ATTR_PACKET_TYPE: {
3870 ovs_be32 value = nl_attr_get_be32(a);
3871 ovs_be32 mask = ma ? nl_attr_get_be32(ma) : OVS_BE32_MAX;
3872
3873 ovs_be16 ns = htons(pt_ns(value));
3874 ovs_be16 ns_mask = htons(pt_ns(mask));
3875 format_be16(ds, "ns", ns, &ns_mask, verbose);
3876
3877 ovs_be16 ns_type = pt_ns_type_be(value);
3878 ovs_be16 ns_type_mask = pt_ns_type_be(mask);
3879 format_be16x(ds, "id", ns_type, &ns_type_mask, verbose);
3880
3881 ds_chomp(ds, ',');
3882 break;
3883 }
3884
3885 case OVS_KEY_ATTR_ETHERNET: {
3886 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
3887 const struct ovs_key_ethernet *key = nl_attr_get(a);
3888
3889 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
3890 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
3891 ds_chomp(ds, ',');
3892 break;
3893 }
3894 case OVS_KEY_ATTR_VLAN:
3895 format_vlan_tci(ds, nl_attr_get_be16(a),
3896 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
3897 break;
3898
3899 case OVS_KEY_ATTR_MPLS: {
3900 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
3901 const struct ovs_key_mpls *mpls_mask = NULL;
3902 size_t size = nl_attr_get_size(a);
3903
3904 if (!size || size % sizeof *mpls_key) {
3905 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
3906 return;
3907 }
3908 if (!is_exact) {
3909 mpls_mask = nl_attr_get(ma);
3910 if (size != nl_attr_get_size(ma)) {
3911 ds_put_format(ds, "(key length %"PRIuSIZE" != "
3912 "mask length %"PRIuSIZE")",
3913 size, nl_attr_get_size(ma));
3914 return;
3915 }
3916 }
3917 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
3918 break;
3919 }
3920 case OVS_KEY_ATTR_ETHERTYPE:
3921 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
3922 if (!is_exact) {
3923 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
3924 }
3925 break;
3926
3927 case OVS_KEY_ATTR_IPV4: {
3928 const struct ovs_key_ipv4 *key = nl_attr_get(a);
3929 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
3930
3931 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
3932 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
3933 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
3934 verbose);
3935 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
3936 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
3937 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
3938 verbose);
3939 ds_chomp(ds, ',');
3940 break;
3941 }
3942 case OVS_KEY_ATTR_IPV6: {
3943 const struct ovs_key_ipv6 *key = nl_attr_get(a);
3944 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
3945
3946 format_in6_addr(ds, "src", &key->ipv6_src, MASK(mask, ipv6_src),
3947 verbose);
3948 format_in6_addr(ds, "dst", &key->ipv6_dst, MASK(mask, ipv6_dst),
3949 verbose);
3950 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
3951 verbose);
3952 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
3953 verbose);
3954 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
3955 verbose);
3956 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
3957 verbose);
3958 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
3959 verbose);
3960 ds_chomp(ds, ',');
3961 break;
3962 }
3963 /* These have the same structure and format. */
3964 case OVS_KEY_ATTR_TCP:
3965 case OVS_KEY_ATTR_UDP:
3966 case OVS_KEY_ATTR_SCTP: {
3967 const struct ovs_key_tcp *key = nl_attr_get(a);
3968 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
3969
3970 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
3971 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
3972 ds_chomp(ds, ',');
3973 break;
3974 }
3975 case OVS_KEY_ATTR_TCP_FLAGS:
3976 if (!is_exact) {
3977 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
3978 ntohs(nl_attr_get_be16(a)),
3979 TCP_FLAGS(nl_attr_get_be16(ma)),
3980 TCP_FLAGS(OVS_BE16_MAX));
3981 } else {
3982 format_flags(ds, packet_tcp_flag_to_string,
3983 ntohs(nl_attr_get_be16(a)), '|');
3984 }
3985 break;
3986
3987 case OVS_KEY_ATTR_ICMP: {
3988 const struct ovs_key_icmp *key = nl_attr_get(a);
3989 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
3990
3991 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
3992 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
3993 ds_chomp(ds, ',');
3994 break;
3995 }
3996 case OVS_KEY_ATTR_ICMPV6: {
3997 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
3998 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
3999
4000 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
4001 verbose);
4002 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
4003 verbose);
4004 ds_chomp(ds, ',');
4005 break;
4006 }
4007 case OVS_KEY_ATTR_ARP: {
4008 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
4009 const struct ovs_key_arp *key = nl_attr_get(a);
4010
4011 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
4012 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
4013 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
4014 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
4015 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
4016 ds_chomp(ds, ',');
4017 break;
4018 }
4019 case OVS_KEY_ATTR_ND: {
4020 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
4021 const struct ovs_key_nd *key = nl_attr_get(a);
4022
4023 format_in6_addr(ds, "target", &key->nd_target, MASK(mask, nd_target),
4024 verbose);
4025 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
4026 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
4027
4028 ds_chomp(ds, ',');
4029 break;
4030 }
4031 case OVS_KEY_ATTR_NSH: {
4032 format_odp_nsh_attr(a, ma, ds);
4033 break;
4034 }
4035 case OVS_KEY_ATTR_UNSPEC:
4036 case __OVS_KEY_ATTR_MAX:
4037 default:
4038 format_generic_odp_key(a, ds);
4039 if (!is_exact) {
4040 ds_put_char(ds, '/');
4041 format_generic_odp_key(ma, ds);
4042 }
4043 break;
4044 }
4045 ds_put_char(ds, ')');
4046 }
4047
4048 static void
4049 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
4050 const struct hmap *portno_names, struct ds *ds,
4051 bool verbose)
4052 {
4053 if (check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4054 OVS_KEY_ATTR_MAX, false)) {
4055 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4056 }
4057 }
4058
4059 static struct nlattr *
4060 generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
4061 struct ofpbuf *ofp, const struct nlattr *key)
4062 {
4063 const struct nlattr *a;
4064 unsigned int left;
4065 int type = nl_attr_type(key);
4066 int size = nl_attr_get_size(key);
4067
4068 if (odp_key_attr_len(tbl, max, type) != ATTR_LEN_NESTED) {
4069 nl_msg_put_unspec_zero(ofp, type, size);
4070 } else {
4071 size_t nested_mask;
4072
4073 if (tbl[type].next) {
4074 const struct attr_len_tbl *entry = &tbl[type];
4075 tbl = entry->next;
4076 max = entry->next_max;
4077 }
4078
4079 nested_mask = nl_msg_start_nested(ofp, type);
4080 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
4081 generate_all_wildcard_mask(tbl, max, ofp, nl_attr_get(a));
4082 }
4083 nl_msg_end_nested(ofp, nested_mask);
4084 }
4085
4086 return ofp->base;
4087 }
4088
4089 static void
4090 format_u128(struct ds *ds, const ovs_32aligned_u128 *key,
4091 const ovs_32aligned_u128 *mask, bool verbose)
4092 {
4093 if (verbose || (mask && !ovs_u128_is_zero(get_32aligned_u128(mask)))) {
4094 ovs_be128 value = hton128(get_32aligned_u128(key));
4095 ds_put_hex(ds, &value, sizeof value);
4096 if (mask && !(ovs_u128_is_ones(get_32aligned_u128(mask)))) {
4097 value = hton128(get_32aligned_u128(mask));
4098 ds_put_char(ds, '/');
4099 ds_put_hex(ds, &value, sizeof value);
4100 }
4101 }
4102 }
4103
4104 /* Read the string from 's_' as a 128-bit value. If the string contains
4105 * a "/", the rest of the string will be treated as a 128-bit mask.
4106 *
4107 * If either the value or mask is larger than 64 bits, the string must
4108 * be in hexadecimal.
4109 */
4110 static int
4111 scan_u128(const char *s_, ovs_u128 *value, ovs_u128 *mask)
4112 {
4113 char *s = CONST_CAST(char *, s_);
4114 ovs_be128 be_value;
4115 ovs_be128 be_mask;
4116
4117 if (!parse_int_string(s, (uint8_t *)&be_value, sizeof be_value, &s)) {
4118 *value = ntoh128(be_value);
4119
4120 if (mask) {
4121 int n;
4122
4123 if (ovs_scan(s, "/%n", &n)) {
4124 int error;
4125
4126 s += n;
4127 error = parse_int_string(s, (uint8_t *)&be_mask,
4128 sizeof be_mask, &s);
4129 if (error) {
4130 return 0;
4131 }
4132 *mask = ntoh128(be_mask);
4133 } else {
4134 *mask = OVS_U128_MAX;
4135 }
4136 }
4137 return s - s_;
4138 }
4139
4140 return 0;
4141 }
4142
4143 int
4144 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
4145 {
4146 const char *s = s_;
4147
4148 if (ovs_scan(s, "ufid:")) {
4149 s += 5;
4150
4151 if (!uuid_from_string_prefix((struct uuid *)ufid, s)) {
4152 return -EINVAL;
4153 }
4154 s += UUID_LEN;
4155
4156 return s - s_;
4157 }
4158
4159 return 0;
4160 }
4161
4162 void
4163 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
4164 {
4165 ds_put_format(ds, "ufid:"UUID_FMT, UUID_ARGS((struct uuid *)ufid));
4166 }
4167
4168 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4169 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
4170 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
4171 * non-null, translates odp port number to its name. */
4172 void
4173 odp_flow_format(const struct nlattr *key, size_t key_len,
4174 const struct nlattr *mask, size_t mask_len,
4175 const struct hmap *portno_names, struct ds *ds, bool verbose)
4176 {
4177 if (key_len) {
4178 const struct nlattr *a;
4179 unsigned int left;
4180 bool has_ethtype_key = false;
4181 bool has_packet_type_key = false;
4182 struct ofpbuf ofp;
4183 bool first_field = true;
4184
4185 ofpbuf_init(&ofp, 100);
4186 NL_ATTR_FOR_EACH (a, left, key, key_len) {
4187 int attr_type = nl_attr_type(a);
4188 const struct nlattr *ma = (mask && mask_len
4189 ? nl_attr_find__(mask, mask_len,
4190 attr_type)
4191 : NULL);
4192 if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
4193 OVS_KEY_ATTR_MAX, false)) {
4194 continue;
4195 }
4196
4197 bool is_nested_attr;
4198 bool is_wildcard = false;
4199
4200 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
4201 has_ethtype_key = true;
4202 } else if (attr_type == OVS_KEY_ATTR_PACKET_TYPE) {
4203 has_packet_type_key = true;
4204 }
4205
4206 is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
4207 OVS_KEY_ATTR_MAX, attr_type) ==
4208 ATTR_LEN_NESTED;
4209
4210 if (mask && mask_len) {
4211 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
4212 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
4213 }
4214
4215 if (verbose || !is_wildcard || is_nested_attr) {
4216 if (is_wildcard && !ma) {
4217 ma = generate_all_wildcard_mask(ovs_flow_key_attr_lens,
4218 OVS_KEY_ATTR_MAX,
4219 &ofp, a);
4220 }
4221 if (!first_field) {
4222 ds_put_char(ds, ',');
4223 }
4224 format_odp_key_attr__(a, ma, portno_names, ds, verbose);
4225 first_field = false;
4226 } else if (attr_type == OVS_KEY_ATTR_ETHERNET
4227 && !has_packet_type_key) {
4228 /* This special case reflects differences between the kernel
4229 * and userspace datapaths regarding the root type of the
4230 * packet being matched (typically Ethernet but some tunnels
4231 * can encapsulate IPv4 etc.). The kernel datapath does not
4232 * have an explicit way to indicate packet type; instead:
4233 *
4234 * - If OVS_KEY_ATTR_ETHERNET is present, the packet is an
4235 * Ethernet packet and OVS_KEY_ATTR_ETHERTYPE is the
4236 * Ethertype encoded in the Ethernet header.
4237 *
4238 * - If OVS_KEY_ATTR_ETHERNET is absent, then the packet's
4239 * root type is that encoded in OVS_KEY_ATTR_ETHERTYPE
4240 * (i.e. if OVS_KEY_ATTR_ETHERTYPE is 0x0800 then the
4241 * packet is an IPv4 packet).
4242 *
4243 * Thus, if OVS_KEY_ATTR_ETHERNET is present, even if it is
4244 * all-wildcarded, it is important to print it.
4245 *
4246 * On the other hand, the userspace datapath supports
4247 * OVS_KEY_ATTR_PACKET_TYPE and uses it to indicate the packet
4248 * type. Thus, if OVS_KEY_ATTR_PACKET_TYPE is present, we need
4249 * not print an all-wildcarded OVS_KEY_ATTR_ETHERNET. */
4250 if (!first_field) {
4251 ds_put_char(ds, ',');
4252 }
4253 ds_put_cstr(ds, "eth()");
4254 }
4255 ofpbuf_clear(&ofp);
4256 }
4257 ofpbuf_uninit(&ofp);
4258
4259 if (left) {
4260 int i;
4261
4262 if (left == key_len) {
4263 ds_put_cstr(ds, "<empty>");
4264 }
4265 ds_put_format(ds, ",***%u leftover bytes*** (", left);
4266 for (i = 0; i < left; i++) {
4267 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
4268 }
4269 ds_put_char(ds, ')');
4270 }
4271 if (!has_ethtype_key) {
4272 const struct nlattr *ma = nl_attr_find__(mask, mask_len,
4273 OVS_KEY_ATTR_ETHERTYPE);
4274 if (ma) {
4275 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
4276 ntohs(nl_attr_get_be16(ma)));
4277 }
4278 }
4279 } else {
4280 ds_put_cstr(ds, "<empty>");
4281 }
4282 }
4283
4284 /* Appends to 'ds' a string representation of the 'key_len' bytes of
4285 * OVS_KEY_ATTR_* attributes in 'key'. */
4286 void
4287 odp_flow_key_format(const struct nlattr *key,
4288 size_t key_len, struct ds *ds)
4289 {
4290 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
4291 }
4292
4293 static bool
4294 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
4295 {
4296 if (!strcasecmp(s, "no")) {
4297 *type = OVS_FRAG_TYPE_NONE;
4298 } else if (!strcasecmp(s, "first")) {
4299 *type = OVS_FRAG_TYPE_FIRST;
4300 } else if (!strcasecmp(s, "later")) {
4301 *type = OVS_FRAG_TYPE_LATER;
4302 } else {
4303 return false;
4304 }
4305 return true;
4306 }
4307
4308 /* Parsing. */
4309
4310 static int
4311 scan_eth(const char *s, struct eth_addr *key, struct eth_addr *mask)
4312 {
4313 int n;
4314
4315 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n",
4316 ETH_ADDR_SCAN_ARGS(*key), &n)) {
4317 int len = n;
4318
4319 if (mask) {
4320 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
4321 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
4322 len += n;
4323 } else {
4324 memset(mask, 0xff, sizeof *mask);
4325 }
4326 }
4327 return len;
4328 }
4329 return 0;
4330 }
4331
4332 static int
4333 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
4334 {
4335 int n;
4336
4337 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
4338 int len = n;
4339
4340 if (mask) {
4341 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
4342 IP_SCAN_ARGS(mask), &n)) {
4343 len += n;
4344 } else {
4345 *mask = OVS_BE32_MAX;
4346 }
4347 }
4348 return len;
4349 }
4350 return 0;
4351 }
4352
4353 static int
4354 scan_in6_addr(const char *s, struct in6_addr *key, struct in6_addr *mask)
4355 {
4356 int n;
4357 char ipv6_s[IPV6_SCAN_LEN + 1];
4358
4359 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
4360 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
4361 int len = n;
4362
4363 if (mask) {
4364 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
4365 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
4366 len += n;
4367 } else {
4368 memset(mask, 0xff, sizeof *mask);
4369 }
4370 }
4371 return len;
4372 }
4373 return 0;
4374 }
4375
4376 static int
4377 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
4378 {
4379 int key_, mask_;
4380 int n;
4381
4382 if (ovs_scan(s, "%i%n", &key_, &n)
4383 && (key_ & ~IPV6_LABEL_MASK) == 0) {
4384 int len = n;
4385
4386 *key = htonl(key_);
4387 if (mask) {
4388 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
4389 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
4390 len += n;
4391 *mask = htonl(mask_);
4392 } else {
4393 *mask = htonl(IPV6_LABEL_MASK);
4394 }
4395 }
4396 return len;
4397 }
4398 return 0;
4399 }
4400
4401 static int
4402 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
4403 {
4404 int n;
4405
4406 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
4407 int len = n;
4408
4409 if (mask) {
4410 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
4411 len += n;
4412 } else {
4413 *mask = UINT8_MAX;
4414 }
4415 }
4416 return len;
4417 }
4418 return 0;
4419 }
4420
4421 static int
4422 scan_u16(const char *s, uint16_t *key, uint16_t *mask)
4423 {
4424 int n;
4425
4426 if (ovs_scan(s, "%"SCNi16"%n", key, &n)) {
4427 int len = n;
4428
4429 if (mask) {
4430 if (ovs_scan(s + len, "/%"SCNi16"%n", mask, &n)) {
4431 len += n;
4432 } else {
4433 *mask = UINT16_MAX;
4434 }
4435 }
4436 return len;
4437 }
4438 return 0;
4439 }
4440
4441 static int
4442 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
4443 {
4444 int n;
4445
4446 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4447 int len = n;
4448
4449 if (mask) {
4450 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4451 len += n;
4452 } else {
4453 *mask = UINT32_MAX;
4454 }
4455 }
4456 return len;
4457 }
4458 return 0;
4459 }
4460
4461 static int
4462 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
4463 {
4464 uint16_t key_, mask_;
4465 int n;
4466
4467 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4468 int len = n;
4469
4470 *key = htons(key_);
4471 if (mask) {
4472 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4473 len += n;
4474 *mask = htons(mask_);
4475 } else {
4476 *mask = OVS_BE16_MAX;
4477 }
4478 }
4479 return len;
4480 }
4481 return 0;
4482 }
4483
4484 static int
4485 scan_be32(const char *s, ovs_be32 *key, ovs_be32 *mask)
4486 {
4487 uint32_t key_, mask_;
4488 int n;
4489
4490 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
4491 int len = n;
4492
4493 *key = htonl(key_);
4494 if (mask) {
4495 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
4496 len += n;
4497 *mask = htonl(mask_);
4498 } else {
4499 *mask = OVS_BE32_MAX;
4500 }
4501 }
4502 return len;
4503 }
4504 return 0;
4505 }
4506
4507 static int
4508 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
4509 {
4510 uint64_t key_, mask_;
4511 int n;
4512
4513 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
4514 int len = n;
4515
4516 *key = htonll(key_);
4517 if (mask) {
4518 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
4519 len += n;
4520 *mask = htonll(mask_);
4521 } else {
4522 *mask = OVS_BE64_MAX;
4523 }
4524 }
4525 return len;
4526 }
4527 return 0;
4528 }
4529
4530 static int
4531 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
4532 {
4533 uint32_t flags, fmask;
4534 int n;
4535
4536 n = parse_odp_flags(s, flow_tun_flag_to_string, &flags,
4537 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
4538 if (n >= 0 && s[n] == ')') {
4539 *key = flags;
4540 if (mask) {
4541 *mask = fmask;
4542 }
4543 return n + 1;
4544 }
4545 return 0;
4546 }
4547
4548 static int
4549 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
4550 {
4551 uint32_t flags, fmask;
4552 int n;
4553
4554 n = parse_odp_flags(s, packet_tcp_flag_to_string, &flags,
4555 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
4556 if (n >= 0) {
4557 *key = htons(flags);
4558 if (mask) {
4559 *mask = htons(fmask);
4560 }
4561 return n;
4562 }
4563 return 0;
4564 }
4565
4566 static uint32_t
4567 ovs_to_odp_ct_state(uint8_t state)
4568 {
4569 uint32_t odp = 0;
4570
4571 #define CS_STATE(ENUM, INDEX, NAME) \
4572 if (state & CS_##ENUM) { \
4573 odp |= OVS_CS_F_##ENUM; \
4574 }
4575 CS_STATES
4576 #undef CS_STATE
4577
4578 return odp;
4579 }
4580
4581 static uint8_t
4582 odp_to_ovs_ct_state(uint32_t flags)
4583 {
4584 uint32_t state = 0;
4585
4586 #define CS_STATE(ENUM, INDEX, NAME) \
4587 if (flags & OVS_CS_F_##ENUM) { \
4588 state |= CS_##ENUM; \
4589 }
4590 CS_STATES
4591 #undef CS_STATE
4592
4593 return state;
4594 }
4595
4596 static int
4597 scan_ct_state(const char *s, uint32_t *key, uint32_t *mask)
4598 {
4599 uint32_t flags, fmask;
4600 int n;
4601
4602 n = parse_flags(s, odp_ct_state_to_string, ')', NULL, NULL, &flags,
4603 ovs_to_odp_ct_state(CS_SUPPORTED_MASK),
4604 mask ? &fmask : NULL);
4605
4606 if (n >= 0) {
4607 *key = flags;
4608 if (mask) {
4609 *mask = fmask;
4610 }
4611 return n;
4612 }
4613 return 0;
4614 }
4615
4616 static int
4617 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
4618 {
4619 int n;
4620 char frag[8];
4621 enum ovs_frag_type frag_type;
4622
4623 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
4624 && ovs_frag_type_from_string(frag, &frag_type)) {
4625 int len = n;
4626
4627 *key = frag_type;
4628 if (mask) {
4629 *mask = UINT8_MAX;
4630 }
4631 return len;
4632 }
4633 return 0;
4634 }
4635
4636 static int
4637 scan_port(const char *s, uint32_t *key, uint32_t *mask,
4638 const struct simap *port_names)
4639 {
4640 int n;
4641
4642 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
4643 int len = n;
4644
4645 if (mask) {
4646 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
4647 len += n;
4648 } else {
4649 *mask = UINT32_MAX;
4650 }
4651 }
4652 return len;
4653 } else if (port_names) {
4654 const struct simap_node *node;
4655 int len;
4656
4657 len = strcspn(s, ")");
4658 node = simap_find_len(port_names, s, len);
4659 if (node) {
4660 *key = node->data;
4661
4662 if (mask) {
4663 *mask = UINT32_MAX;
4664 }
4665 return len;
4666 }
4667 }
4668 return 0;
4669 }
4670
4671 /* Helper for vlan parsing. */
4672 struct ovs_key_vlan__ {
4673 ovs_be16 tci;
4674 };
4675
4676 static bool
4677 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
4678 {
4679 const uint16_t mask = ((1U << bits) - 1) << offset;
4680
4681 if (value >> bits) {
4682 return false;
4683 }
4684
4685 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
4686 return true;
4687 }
4688
4689 static int
4690 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
4691 uint8_t offset)
4692 {
4693 uint16_t key_, mask_;
4694 int n;
4695
4696 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
4697 int len = n;
4698
4699 if (set_be16_bf(key, bits, offset, key_)) {
4700 if (mask) {
4701 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
4702 len += n;
4703
4704 if (!set_be16_bf(mask, bits, offset, mask_)) {
4705 return 0;
4706 }
4707 } else {
4708 *mask |= htons(((1U << bits) - 1) << offset);
4709 }
4710 }
4711 return len;
4712 }
4713 }
4714 return 0;
4715 }
4716
4717 static int
4718 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
4719 {
4720 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
4721 }
4722
4723 static int
4724 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
4725 {
4726 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
4727 }
4728
4729 static int
4730 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
4731 {
4732 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
4733 }
4734
4735 /* For MPLS. */
4736 static bool
4737 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
4738 {
4739 const uint32_t mask = ((1U << bits) - 1) << offset;
4740
4741 if (value >> bits) {
4742 return false;
4743 }
4744
4745 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
4746 return true;
4747 }
4748
4749 static int
4750 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
4751 uint8_t offset)
4752 {
4753 uint32_t key_, mask_;
4754 int n;
4755
4756 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
4757 int len = n;
4758
4759 if (set_be32_bf(key, bits, offset, key_)) {
4760 if (mask) {
4761 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
4762 len += n;
4763
4764 if (!set_be32_bf(mask, bits, offset, mask_)) {
4765 return 0;
4766 }
4767 } else {
4768 *mask |= htonl(((1U << bits) - 1) << offset);
4769 }
4770 }
4771 return len;
4772 }
4773 }
4774 return 0;
4775 }
4776
4777 static int
4778 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
4779 {
4780 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
4781 }
4782
4783 static int
4784 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
4785 {
4786 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
4787 }
4788
4789 static int
4790 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
4791 {
4792 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
4793 }
4794
4795 static int
4796 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
4797 {
4798 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
4799 }
4800
4801 static int
4802 scan_vxlan_gbp(const char *s, uint32_t *key, uint32_t *mask)
4803 {
4804 const char *s_base = s;
4805 ovs_be16 id = 0, id_mask = 0;
4806 uint8_t flags = 0, flags_mask = 0;
4807
4808 if (!strncmp(s, "id=", 3)) {
4809 s += 3;
4810 s += scan_be16(s, &id, mask ? &id_mask : NULL);
4811 }
4812
4813 if (s[0] == ',') {
4814 s++;
4815 }
4816 if (!strncmp(s, "flags=", 6)) {
4817 s += 6;
4818 s += scan_u8(s, &flags, mask ? &flags_mask : NULL);
4819 }
4820
4821 if (!strncmp(s, "))", 2)) {
4822 s += 2;
4823
4824 *key = (flags << 16) | ntohs(id);
4825 if (mask) {
4826 *mask = (flags_mask << 16) | ntohs(id_mask);
4827 }
4828
4829 return s - s_base;
4830 }
4831
4832 return 0;
4833 }
4834
4835 static int
4836 scan_erspan_metadata(const char *s,
4837 struct erspan_metadata *key,
4838 struct erspan_metadata *mask)
4839 {
4840 const char *s_base = s;
4841 uint32_t idx = 0, idx_mask = 0;
4842 uint8_t ver = 0, dir = 0, hwid = 0;
4843 uint8_t ver_mask = 0, dir_mask = 0, hwid_mask = 0;
4844
4845 if (!strncmp(s, "ver=", 4)) {
4846 s += 4;
4847 s += scan_u8(s, &ver, mask ? &ver_mask : NULL);
4848 }
4849
4850 if (s[0] == ',') {
4851 s++;
4852 }
4853
4854 if (ver == 1) {
4855 if (!strncmp(s, "idx=", 4)) {
4856 s += 4;
4857 s += scan_u32(s, &idx, mask ? &idx_mask : NULL);
4858 }
4859
4860 if (!strncmp(s, ")", 1)) {
4861 s += 1;
4862 key->version = ver;
4863 key->u.index = htonl(idx);
4864 if (mask) {
4865 mask->u.index = htonl(idx_mask);
4866 }
4867 }
4868 return s - s_base;
4869
4870 } else if (ver == 2) {
4871 if (!strncmp(s, "dir=", 4)) {
4872 s += 4;
4873 s += scan_u8(s, &dir, mask ? &dir_mask : NULL);
4874 }
4875 if (s[0] == ',') {
4876 s++;
4877 }
4878 if (!strncmp(s, "hwid=", 5)) {
4879 s += 5;
4880 s += scan_u8(s, &hwid, mask ? &hwid_mask : NULL);
4881 }
4882
4883 if (!strncmp(s, ")", 1)) {
4884 s += 1;
4885 key->version = ver;
4886 key->u.md2.hwid = hwid;
4887 key->u.md2.dir = dir;
4888 if (mask) {
4889 mask->u.md2.hwid = hwid_mask;
4890 mask->u.md2.dir = dir_mask;
4891 }
4892 }
4893 return s - s_base;
4894 }
4895
4896 return 0;
4897 }
4898
4899 static int
4900 scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
4901 {
4902 const char *s_base = s;
4903 struct geneve_opt *opt = key->d;
4904 struct geneve_opt *opt_mask = mask ? mask->d : NULL;
4905 int len_remain = sizeof key->d;
4906
4907 while (s[0] == '{' && len_remain >= sizeof *opt) {
4908 int data_len = 0;
4909
4910 s++;
4911 len_remain -= sizeof *opt;
4912
4913 if (!strncmp(s, "class=", 6)) {
4914 s += 6;
4915 s += scan_be16(s, &opt->opt_class,
4916 mask ? &opt_mask->opt_class : NULL);
4917 } else if (mask) {
4918 memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
4919 }
4920
4921 if (s[0] == ',') {
4922 s++;
4923 }
4924 if (!strncmp(s, "type=", 5)) {
4925 s += 5;
4926 s += scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
4927 } else if (mask) {
4928 memset(&opt_mask->type, 0, sizeof opt_mask->type);
4929 }
4930
4931 if (s[0] == ',') {
4932 s++;
4933 }
4934 if (!strncmp(s, "len=", 4)) {
4935 uint8_t opt_len, opt_len_mask;
4936 s += 4;
4937 s += scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
4938
4939 if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
4940 return 0;
4941 }
4942 opt->length = opt_len / 4;
4943 if (mask) {
4944 opt_mask->length = opt_len_mask;
4945 }
4946 data_len = opt_len;
4947 } else if (mask) {
4948 memset(&opt_mask->type, 0, sizeof opt_mask->type);
4949 }
4950
4951 if (s[0] == ',') {
4952 s++;
4953 }
4954 if (parse_int_string(s, (uint8_t *)(opt + 1), data_len, (char **)&s)) {
4955 return 0;
4956 }
4957
4958 if (mask) {
4959 if (s[0] == '/') {
4960 s++;
4961 if (parse_int_string(s, (uint8_t *)(opt_mask + 1),
4962 data_len, (char **)&s)) {
4963 return 0;
4964 }
4965 }
4966 opt_mask->r1 = 0;
4967 opt_mask->r2 = 0;
4968 opt_mask->r3 = 0;
4969 }
4970
4971 if (s[0] == '}') {
4972 s++;
4973 opt += 1 + data_len / 4;
4974 if (mask) {
4975 opt_mask += 1 + data_len / 4;
4976 }
4977 len_remain -= data_len;
4978 }
4979 }
4980
4981 if (s[0] == ')') {
4982 int len = sizeof key->d - len_remain;
4983
4984 s++;
4985 key->len = len;
4986 if (mask) {
4987 mask->len = len;
4988 }
4989 return s - s_base;
4990 }
4991
4992 return 0;
4993 }
4994
4995 static void
4996 tun_flags_to_attr(struct ofpbuf *a, const void *data_)
4997 {
4998 const uint16_t *flags = data_;
4999
5000 if (*flags & FLOW_TNL_F_DONT_FRAGMENT) {
5001 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
5002 }
5003 if (*flags & FLOW_TNL_F_CSUM) {
5004 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
5005 }
5006 if (*flags & FLOW_TNL_F_OAM) {
5007 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
5008 }
5009 }
5010
5011 static void
5012 vxlan_gbp_to_attr(struct ofpbuf *a, const void *data_)
5013 {
5014 const uint32_t *gbp = data_;
5015
5016 if (*gbp) {
5017 size_t vxlan_opts_ofs;
5018
5019 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
5020 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP, *gbp);
5021 nl_msg_end_nested(a, vxlan_opts_ofs);
5022 }
5023 }
5024
5025 static void
5026 geneve_to_attr(struct ofpbuf *a, const void *data_)
5027 {
5028 const struct geneve_scan *geneve = data_;
5029
5030 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, geneve->d,
5031 geneve->len);
5032 }
5033
5034 static void
5035 erspan_to_attr(struct ofpbuf *a, const void *data_)
5036 {
5037 const struct erspan_metadata *md = data_;
5038
5039 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, md,
5040 sizeof *md);
5041 }
5042
5043 #define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
5044 { \
5045 unsigned long call_fn = (unsigned long)FUNC; \
5046 if (call_fn) { \
5047 typedef void (*fn)(struct ofpbuf *, const void *); \
5048 fn func = FUNC; \
5049 func(BUF, &(DATA)); \
5050 } else { \
5051 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
5052 } \
5053 }
5054
5055 #define SCAN_IF(NAME) \
5056 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5057 const char *start = s; \
5058 int len; \
5059 \
5060 s += strlen(NAME)
5061
5062 /* Usually no special initialization is needed. */
5063 #define SCAN_BEGIN(NAME, TYPE) \
5064 SCAN_IF(NAME); \
5065 TYPE skey, smask; \
5066 memset(&skey, 0, sizeof skey); \
5067 memset(&smask, 0, sizeof smask); \
5068 do { \
5069 len = 0;
5070
5071 /* Init as fully-masked as mask will not be scanned. */
5072 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
5073 SCAN_IF(NAME); \
5074 TYPE skey, smask; \
5075 memset(&skey, 0, sizeof skey); \
5076 memset(&smask, 0xff, sizeof smask); \
5077 do { \
5078 len = 0;
5079
5080 /* VLAN needs special initialization. */
5081 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
5082 SCAN_IF(NAME); \
5083 TYPE skey = KEY_INIT; \
5084 TYPE smask = MASK_INIT; \
5085 do { \
5086 len = 0;
5087
5088 /* Scan unnamed entry as 'TYPE' */
5089 #define SCAN_TYPE(TYPE, KEY, MASK) \
5090 len = scan_##TYPE(s, KEY, MASK); \
5091 if (len == 0) { \
5092 return -EINVAL; \
5093 } \
5094 s += len
5095
5096 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5097 #define SCAN_FIELD(NAME, TYPE, FIELD) \
5098 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5099 s += strlen(NAME); \
5100 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
5101 continue; \
5102 }
5103
5104 #define SCAN_FINISH() \
5105 } while (*s++ == ',' && len != 0); \
5106 if (s[-1] != ')') { \
5107 return -EINVAL; \
5108 }
5109
5110 #define SCAN_FINISH_SINGLE() \
5111 } while (false); \
5112 if (*s++ != ')') { \
5113 return -EINVAL; \
5114 }
5115
5116 /* Beginning of nested attribute. */
5117 #define SCAN_BEGIN_NESTED(NAME, ATTR) \
5118 SCAN_IF(NAME); \
5119 size_t key_offset, mask_offset; \
5120 key_offset = nl_msg_start_nested(key, ATTR); \
5121 if (mask) { \
5122 mask_offset = nl_msg_start_nested(mask, ATTR); \
5123 } \
5124 do { \
5125 len = 0;
5126
5127 #define SCAN_END_NESTED() \
5128 SCAN_FINISH(); \
5129 nl_msg_end_nested(key, key_offset); \
5130 if (mask) { \
5131 nl_msg_end_nested(mask, mask_offset); \
5132 } \
5133 return s - start; \
5134 }
5135
5136 #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
5137 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5138 TYPE skey, smask; \
5139 memset(&skey, 0, sizeof skey); \
5140 memset(&smask, 0xff, sizeof smask); \
5141 s += strlen(NAME); \
5142 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5143 SCAN_PUT(ATTR, FUNC); \
5144 continue; \
5145 }
5146
5147 #define SCAN_FIELD_NESTED(NAME, TYPE, SCAN_AS, ATTR) \
5148 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, NULL)
5149
5150 #define SCAN_FIELD_NESTED_FUNC(NAME, TYPE, SCAN_AS, FUNC) \
5151 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, 0, FUNC)
5152
5153 #define SCAN_PUT(ATTR, FUNC) \
5154 SCAN_PUT_ATTR(key, ATTR, skey, FUNC); \
5155 if (mask) \
5156 SCAN_PUT_ATTR(mask, ATTR, smask, FUNC); \
5157
5158 #define SCAN_END(ATTR) \
5159 SCAN_FINISH(); \
5160 SCAN_PUT(ATTR, NULL); \
5161 return s - start; \
5162 }
5163
5164 #define SCAN_BEGIN_ARRAY(NAME, TYPE, CNT) \
5165 SCAN_IF(NAME); \
5166 TYPE skey[CNT], smask[CNT]; \
5167 memset(&skey, 0, sizeof skey); \
5168 memset(&smask, 0, sizeof smask); \
5169 int idx = 0, cnt = CNT; \
5170 uint64_t fields = 0; \
5171 do { \
5172 int field = 0; \
5173 len = 0;
5174
5175 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
5176 #define SCAN_FIELD_ARRAY(NAME, TYPE, FIELD) \
5177 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
5178 if (fields & (1UL << field)) { \
5179 fields = 0; \
5180 if (++idx == cnt) { \
5181 break; \
5182 } \
5183 } \
5184 s += strlen(NAME); \
5185 SCAN_TYPE(TYPE, &skey[idx].FIELD, mask ? &smask[idx].FIELD : NULL); \
5186 fields |= 1UL << field; \
5187 continue; \
5188 } \
5189 field++;
5190
5191 #define SCAN_PUT_ATTR_ARRAY(BUF, ATTR, DATA, CNT) \
5192 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)[0] * (CNT)); \
5193
5194 #define SCAN_PUT_ARRAY(ATTR, CNT) \
5195 SCAN_PUT_ATTR_ARRAY(key, ATTR, skey, CNT); \
5196 if (mask) { \
5197 SCAN_PUT_ATTR_ARRAY(mask, ATTR, smask, CNT); \
5198 }
5199
5200 #define SCAN_END_ARRAY(ATTR) \
5201 SCAN_FINISH(); \
5202 if (idx == cnt) { \
5203 return -EINVAL; \
5204 } \
5205 SCAN_PUT_ARRAY(ATTR, idx + 1); \
5206 return s - start; \
5207 }
5208
5209 #define SCAN_END_SINGLE(ATTR) \
5210 SCAN_FINISH_SINGLE(); \
5211 SCAN_PUT(ATTR, NULL); \
5212 return s - start; \
5213 }
5214
5215 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
5216 SCAN_BEGIN(NAME, TYPE) { \
5217 SCAN_TYPE(SCAN_AS, &skey, &smask); \
5218 } SCAN_END_SINGLE(ATTR)
5219
5220 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
5221 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
5222 SCAN_TYPE(SCAN_AS, &skey, NULL); \
5223 } SCAN_END_SINGLE(ATTR)
5224
5225 /* scan_port needs one extra argument. */
5226 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
5227 SCAN_BEGIN(NAME, TYPE) { \
5228 len = scan_port(s, &skey, &smask, port_names); \
5229 if (len == 0) { \
5230 return -EINVAL; \
5231 } \
5232 s += len; \
5233 } SCAN_END_SINGLE(ATTR)
5234
5235 static int
5236 parse_odp_nsh_key_mask_attr(const char *s, struct ofpbuf *key,
5237 struct ofpbuf *mask)
5238 {
5239 if (strncmp(s, "nsh(", 4) == 0) {
5240 const char *start = s;
5241 int len;
5242 struct ovs_key_nsh skey, smask;
5243 uint32_t spi = 0, spi_mask = 0;
5244 uint8_t si = 0, si_mask = 0;
5245
5246 s += 4;
5247
5248 memset(&skey, 0, sizeof skey);
5249 memset(&smask, 0, sizeof smask);
5250 do {
5251 len = 0;
5252
5253 if (strncmp(s, "flags=", 6) == 0) {
5254 s += 6;
5255 len = scan_u8(s, &skey.flags, mask ? &smask.flags : NULL);
5256 if (len == 0) {
5257 return -EINVAL;
5258 }
5259 s += len;
5260 continue;
5261 }
5262
5263 if (strncmp(s, "mdtype=", 7) == 0) {
5264 s += 7;
5265 len = scan_u8(s, &skey.mdtype, mask ? &smask.mdtype : NULL);
5266 if (len == 0) {
5267 return -EINVAL;
5268 }
5269 s += len;
5270 continue;
5271 }
5272
5273 if (strncmp(s, "np=", 3) == 0) {
5274 s += 3;
5275 len = scan_u8(s, &skey.np, mask ? &smask.np : NULL);
5276 if (len == 0) {
5277 return -EINVAL;
5278 }
5279 s += len;
5280 continue;
5281 }
5282
5283 if (strncmp(s, "spi=", 4) == 0) {
5284 s += 4;
5285 len = scan_u32(s, &spi, mask ? &spi_mask : NULL);
5286 if (len == 0) {
5287 return -EINVAL;
5288 }
5289 s += len;
5290 continue;
5291 }
5292
5293 if (strncmp(s, "si=", 3) == 0) {
5294 s += 3;
5295 len = scan_u8(s, &si, mask ? &si_mask : NULL);
5296 if (len == 0) {
5297 return -EINVAL;
5298 }
5299 s += len;
5300 continue;
5301 }
5302
5303 if (strncmp(s, "c1=", 3) == 0) {
5304 s += 3;
5305 len = scan_be32(s, &skey.context[0],
5306 mask ? &smask.context[0] : NULL);
5307 if (len == 0) {
5308 return -EINVAL;
5309 }
5310 s += len;
5311 continue;
5312 }
5313
5314 if (strncmp(s, "c2=", 3) == 0) {
5315 s += 3;
5316 len = scan_be32(s, &skey.context[1],
5317 mask ? &smask.context[1] : NULL);
5318 if (len == 0) {
5319 return -EINVAL;
5320 }
5321 s += len;
5322 continue;
5323 }
5324
5325 if (strncmp(s, "c3=", 3) == 0) {
5326 s += 3;
5327 len = scan_be32(s, &skey.context[2],
5328 mask ? &smask.context[2] : NULL);
5329 if (len == 0) {
5330 return -EINVAL;
5331 }
5332 s += len;
5333 continue;
5334 }
5335
5336 if (strncmp(s, "c4=", 3) == 0) {
5337 s += 3;
5338 len = scan_be32(s, &skey.context[3],
5339 mask ? &smask.context[3] : NULL);
5340 if (len == 0) {
5341 return -EINVAL;
5342 }
5343 s += len;
5344 continue;
5345 }
5346 } while (*s++ == ',' && len != 0);
5347 if (s[-1] != ')') {
5348 return -EINVAL;
5349 }
5350
5351 skey.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
5352 smask.path_hdr = nsh_spi_si_to_path_hdr(spi_mask, si_mask);
5353
5354 nsh_key_to_attr(key, &skey, NULL, 0, false);
5355 if (mask) {
5356 nsh_key_to_attr(mask, &smask, NULL, 0, true);
5357 }
5358 return s - start;
5359 }
5360 return 0;
5361 }
5362
5363 static int
5364 parse_odp_key_mask_attr(const char *s, const struct simap *port_names,
5365 struct ofpbuf *key, struct ofpbuf *mask)
5366 {
5367 /* Skip UFID. */
5368 ovs_u128 ufid;
5369 int ufid_len = odp_ufid_from_string(s, &ufid);
5370 if (ufid_len) {
5371 return ufid_len;
5372 }
5373
5374 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
5375 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
5376 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
5377 OVS_KEY_ATTR_RECIRC_ID);
5378 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
5379
5380 SCAN_SINGLE("ct_state(", uint32_t, ct_state, OVS_KEY_ATTR_CT_STATE);
5381 SCAN_SINGLE("ct_zone(", uint16_t, u16, OVS_KEY_ATTR_CT_ZONE);
5382 SCAN_SINGLE("ct_mark(", uint32_t, u32, OVS_KEY_ATTR_CT_MARK);
5383 SCAN_SINGLE("ct_label(", ovs_u128, u128, OVS_KEY_ATTR_CT_LABELS);
5384
5385 SCAN_BEGIN("ct_tuple4(", struct ovs_key_ct_tuple_ipv4) {
5386 SCAN_FIELD("src=", ipv4, ipv4_src);
5387 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5388 SCAN_FIELD("proto=", u8, ipv4_proto);
5389 SCAN_FIELD("tp_src=", be16, src_port);
5390 SCAN_FIELD("tp_dst=", be16, dst_port);
5391 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
5392
5393 SCAN_BEGIN("ct_tuple6(", struct ovs_key_ct_tuple_ipv6) {
5394 SCAN_FIELD("src=", in6_addr, ipv6_src);
5395 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5396 SCAN_FIELD("proto=", u8, ipv6_proto);
5397 SCAN_FIELD("tp_src=", be16, src_port);
5398 SCAN_FIELD("tp_dst=", be16, dst_port);
5399 } SCAN_END(OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
5400
5401 SCAN_BEGIN_NESTED("tunnel(", OVS_KEY_ATTR_TUNNEL) {
5402 SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
5403 SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
5404 SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
5405 SCAN_FIELD_NESTED("ipv6_src=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_SRC);
5406 SCAN_FIELD_NESTED("ipv6_dst=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_DST);
5407 SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
5408 SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
5409 SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
5410 SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
5411 SCAN_FIELD_NESTED_FUNC("erspan(", struct erspan_metadata, erspan_metadata,
5412 erspan_to_attr);
5413 SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
5414 SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
5415 geneve_to_attr);
5416 SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
5417 } SCAN_END_NESTED();
5418
5419 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
5420
5421 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
5422 SCAN_FIELD("src=", eth, eth_src);
5423 SCAN_FIELD("dst=", eth, eth_dst);
5424 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
5425
5426 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
5427 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
5428 SCAN_FIELD("vid=", vid, tci);
5429 SCAN_FIELD("pcp=", pcp, tci);
5430 SCAN_FIELD("cfi=", cfi, tci);
5431 } SCAN_END(OVS_KEY_ATTR_VLAN);
5432
5433 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
5434
5435 SCAN_BEGIN_ARRAY("mpls(", struct ovs_key_mpls, FLOW_MAX_MPLS_LABELS) {
5436 SCAN_FIELD_ARRAY("label=", mpls_label, mpls_lse);
5437 SCAN_FIELD_ARRAY("tc=", mpls_tc, mpls_lse);
5438 SCAN_FIELD_ARRAY("ttl=", mpls_ttl, mpls_lse);
5439 SCAN_FIELD_ARRAY("bos=", mpls_bos, mpls_lse);
5440 } SCAN_END_ARRAY(OVS_KEY_ATTR_MPLS);
5441
5442 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
5443 SCAN_FIELD("src=", ipv4, ipv4_src);
5444 SCAN_FIELD("dst=", ipv4, ipv4_dst);
5445 SCAN_FIELD("proto=", u8, ipv4_proto);
5446 SCAN_FIELD("tos=", u8, ipv4_tos);
5447 SCAN_FIELD("ttl=", u8, ipv4_ttl);
5448 SCAN_FIELD("frag=", frag, ipv4_frag);
5449 } SCAN_END(OVS_KEY_ATTR_IPV4);
5450
5451 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
5452 SCAN_FIELD("src=", in6_addr, ipv6_src);
5453 SCAN_FIELD("dst=", in6_addr, ipv6_dst);
5454 SCAN_FIELD("label=", ipv6_label, ipv6_label);
5455 SCAN_FIELD("proto=", u8, ipv6_proto);
5456 SCAN_FIELD("tclass=", u8, ipv6_tclass);
5457 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
5458 SCAN_FIELD("frag=", frag, ipv6_frag);
5459 } SCAN_END(OVS_KEY_ATTR_IPV6);
5460
5461 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
5462 SCAN_FIELD("src=", be16, tcp_src);
5463 SCAN_FIELD("dst=", be16, tcp_dst);
5464 } SCAN_END(OVS_KEY_ATTR_TCP);
5465
5466 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
5467
5468 SCAN_BEGIN("udp(", struct ovs_key_udp) {
5469 SCAN_FIELD("src=", be16, udp_src);
5470 SCAN_FIELD("dst=", be16, udp_dst);
5471 } SCAN_END(OVS_KEY_ATTR_UDP);
5472
5473 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
5474 SCAN_FIELD("src=", be16, sctp_src);
5475 SCAN_FIELD("dst=", be16, sctp_dst);
5476 } SCAN_END(OVS_KEY_ATTR_SCTP);
5477
5478 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
5479 SCAN_FIELD("type=", u8, icmp_type);
5480 SCAN_FIELD("code=", u8, icmp_code);
5481 } SCAN_END(OVS_KEY_ATTR_ICMP);
5482
5483 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
5484 SCAN_FIELD("type=", u8, icmpv6_type);
5485 SCAN_FIELD("code=", u8, icmpv6_code);
5486 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
5487
5488 SCAN_BEGIN("arp(", struct ovs_key_arp) {
5489 SCAN_FIELD("sip=", ipv4, arp_sip);
5490 SCAN_FIELD("tip=", ipv4, arp_tip);
5491 SCAN_FIELD("op=", be16, arp_op);
5492 SCAN_FIELD("sha=", eth, arp_sha);
5493 SCAN_FIELD("tha=", eth, arp_tha);
5494 } SCAN_END(OVS_KEY_ATTR_ARP);
5495
5496 SCAN_BEGIN("nd(", struct ovs_key_nd) {
5497 SCAN_FIELD("target=", in6_addr, nd_target);
5498 SCAN_FIELD("sll=", eth, nd_sll);
5499 SCAN_FIELD("tll=", eth, nd_tll);
5500 } SCAN_END(OVS_KEY_ATTR_ND);
5501
5502 struct packet_type {
5503 ovs_be16 ns;
5504 ovs_be16 id;
5505 };
5506 SCAN_BEGIN("packet_type(", struct packet_type) {
5507 SCAN_FIELD("ns=", be16, ns);
5508 SCAN_FIELD("id=", be16, id);
5509 } SCAN_END(OVS_KEY_ATTR_PACKET_TYPE);
5510
5511 /* nsh is nested, it needs special process */
5512 int ret = parse_odp_nsh_key_mask_attr(s, key, mask);
5513 if (ret < 0) {
5514 return ret;
5515 } else {
5516 s += ret;
5517 }
5518
5519 /* Encap open-coded. */
5520 if (!strncmp(s, "encap(", 6)) {
5521 const char *start = s;
5522 size_t encap, encap_mask = 0;
5523
5524 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
5525 if (mask) {
5526 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
5527 }
5528
5529 s += 6;
5530 for (;;) {
5531 int retval;
5532
5533 s += strspn(s, delimiters);
5534 if (!*s) {
5535 return -EINVAL;
5536 } else if (*s == ')') {
5537 break;
5538 }
5539
5540 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
5541 if (retval < 0) {
5542 return retval;
5543 }
5544 s += retval;
5545 }
5546 s++;
5547
5548 nl_msg_end_nested(key, encap);
5549 if (mask) {
5550 nl_msg_end_nested(mask, encap_mask);
5551 }
5552
5553 return s - start;
5554 }
5555
5556 return -EINVAL;
5557 }
5558
5559 /* Parses the string representation of a datapath flow key, in the
5560 * format output by odp_flow_key_format(). Returns 0 if successful,
5561 * otherwise a positive errno value. On success, the flow key is
5562 * appended to 'key' as a series of Netlink attributes. On failure, no
5563 * data is appended to 'key'. Either way, 'key''s data might be
5564 * reallocated.
5565 *
5566 * If 'port_names' is nonnull, it points to an simap that maps from a port name
5567 * to a port number. (Port names may be used instead of port numbers in
5568 * in_port.)
5569 *
5570 * On success, the attributes appended to 'key' are individually syntactically
5571 * valid, but they may not be valid as a sequence. 'key' might, for example,
5572 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
5573 int
5574 odp_flow_from_string(const char *s, const struct simap *port_names,
5575 struct ofpbuf *key, struct ofpbuf *mask)
5576 {
5577 const size_t old_size = key->size;
5578 for (;;) {
5579 int retval;
5580
5581 s += strspn(s, delimiters);
5582 if (!*s) {
5583 return 0;
5584 }
5585
5586 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
5587 if (retval < 0) {
5588 key->size = old_size;
5589 return -retval;
5590 }
5591 s += retval;
5592 }
5593
5594 return 0;
5595 }
5596
5597 static uint8_t
5598 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
5599 {
5600 if (is_mask) {
5601 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
5602 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
5603 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
5604 * must use a zero mask for the netlink frag field, and all ones mask
5605 * otherwise. */
5606 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
5607 }
5608 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
5609 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
5610 : OVS_FRAG_TYPE_FIRST;
5611 }
5612
5613 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
5614 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
5615 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
5616 bool is_mask);
5617 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
5618 bool is_mask);
5619 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
5620 bool is_mask);
5621 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
5622 bool is_mask);
5623 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
5624 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
5625 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
5626 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
5627 static void get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh,
5628 bool is_mask);
5629 static void put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
5630 bool is_mask);
5631
5632 /* These share the same layout. */
5633 union ovs_key_tp {
5634 struct ovs_key_tcp tcp;
5635 struct ovs_key_udp udp;
5636 struct ovs_key_sctp sctp;
5637 };
5638
5639 static void get_tp_key(const struct flow *, union ovs_key_tp *);
5640 static void put_tp_key(const union ovs_key_tp *, struct flow *);
5641
5642 static void
5643 odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms,
5644 bool export_mask, struct ofpbuf *buf)
5645 {
5646 struct ovs_key_ethernet *eth_key;
5647 size_t encap[FLOW_MAX_VLAN_HEADERS] = {0};
5648 size_t max_vlans;
5649 const struct flow *flow = parms->flow;
5650 const struct flow *mask = parms->mask;
5651 const struct flow *data = export_mask ? mask : flow;
5652
5653 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
5654
5655 if (flow_tnl_dst_is_set(&flow->tunnel) || export_mask) {
5656 tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
5657 parms->key_buf, NULL);
5658 }
5659
5660 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
5661
5662 if (parms->support.ct_state) {
5663 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
5664 ovs_to_odp_ct_state(data->ct_state));
5665 }
5666 if (parms->support.ct_zone) {
5667 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, data->ct_zone);
5668 }
5669 if (parms->support.ct_mark) {
5670 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, data->ct_mark);
5671 }
5672 if (parms->support.ct_label) {
5673 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &data->ct_label,
5674 sizeof(data->ct_label));
5675 }
5676 if (flow->ct_nw_proto) {
5677 if (parms->support.ct_orig_tuple
5678 && flow->dl_type == htons(ETH_TYPE_IP)) {
5679 struct ovs_key_ct_tuple_ipv4 ct = {
5680 data->ct_nw_src,
5681 data->ct_nw_dst,
5682 data->ct_tp_src,
5683 data->ct_tp_dst,
5684 data->ct_nw_proto,
5685 };
5686 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, &ct,
5687 sizeof ct);
5688 } else if (parms->support.ct_orig_tuple6
5689 && flow->dl_type == htons(ETH_TYPE_IPV6)) {
5690 struct ovs_key_ct_tuple_ipv6 ct = {
5691 data->ct_ipv6_src,
5692 data->ct_ipv6_dst,
5693 data->ct_tp_src,
5694 data->ct_tp_dst,
5695 data->ct_nw_proto,
5696 };
5697 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, &ct,
5698 sizeof ct);
5699 }
5700 }
5701 if (parms->support.recirc) {
5702 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
5703 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
5704 }
5705
5706 /* Add an ingress port attribute if this is a mask or 'in_port.odp_port'
5707 * is not the magical value "ODPP_NONE". */
5708 if (export_mask || flow->in_port.odp_port != ODPP_NONE) {
5709 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, data->in_port.odp_port);
5710 }
5711
5712 nl_msg_put_be32(buf, OVS_KEY_ATTR_PACKET_TYPE, data->packet_type);
5713
5714 if (OVS_UNLIKELY(parms->probe)) {
5715 max_vlans = FLOW_MAX_VLAN_HEADERS;
5716 } else {
5717 max_vlans = MIN(parms->support.max_vlan_headers, flow_vlan_limit);
5718 }
5719
5720 /* Conditionally add L2 attributes for Ethernet packets */
5721 if (flow->packet_type == htonl(PT_ETH)) {
5722 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
5723 sizeof *eth_key);
5724 get_ethernet_key(data, eth_key);
5725
5726 for (int encaps = 0; encaps < max_vlans; encaps++) {
5727 ovs_be16 tpid = flow->vlans[encaps].tpid;
5728
5729 if (flow->vlans[encaps].tci == htons(0)) {
5730 if (eth_type_vlan(flow->dl_type)) {
5731 /* If VLAN was truncated the tpid is in dl_type */
5732 tpid = flow->dl_type;
5733 } else {
5734 break;
5735 }
5736 }
5737
5738 if (export_mask) {
5739 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
5740 } else {
5741 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, tpid);
5742 }
5743 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlans[encaps].tci);
5744 encap[encaps] = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
5745 if (flow->vlans[encaps].tci == htons(0)) {
5746 goto unencap;
5747 }
5748 }
5749 }
5750
5751 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
5752 /* For backwards compatibility with kernels that don't support
5753 * wildcarding, the following convention is used to encode the
5754 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
5755 *
5756 * key mask matches
5757 * -------- -------- -------
5758 * >0x5ff 0xffff Specified Ethernet II Ethertype.
5759 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
5760 * <none> 0xffff Any non-Ethernet II frame (except valid
5761 * 802.3 SNAP packet with valid eth_type).
5762 */
5763 if (export_mask) {
5764 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
5765 }
5766 goto unencap;
5767 }
5768
5769 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
5770
5771 if (eth_type_vlan(flow->dl_type)) {
5772 goto unencap;
5773 }
5774
5775 if (flow->dl_type == htons(ETH_TYPE_IP)) {
5776 struct ovs_key_ipv4 *ipv4_key;
5777
5778 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
5779 sizeof *ipv4_key);
5780 get_ipv4_key(data, ipv4_key, export_mask);
5781 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
5782 struct ovs_key_ipv6 *ipv6_key;
5783
5784 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
5785 sizeof *ipv6_key);
5786 get_ipv6_key(data, ipv6_key, export_mask);
5787 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
5788 flow->dl_type == htons(ETH_TYPE_RARP)) {
5789 struct ovs_key_arp *arp_key;
5790
5791 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
5792 sizeof *arp_key);
5793 get_arp_key(data, arp_key);
5794 } else if (eth_type_mpls(flow->dl_type)) {
5795 struct ovs_key_mpls *mpls_key;
5796 int i, n;
5797
5798 n = flow_count_mpls_labels(flow, NULL);
5799 if (export_mask) {
5800 n = MIN(n, parms->support.max_mpls_depth);
5801 }
5802 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
5803 n * sizeof *mpls_key);
5804 for (i = 0; i < n; i++) {
5805 mpls_key[i].mpls_lse = data->mpls_lse[i];
5806 }
5807 } else if (flow->dl_type == htons(ETH_TYPE_NSH)) {
5808 nsh_key_to_attr(buf, &data->nsh, NULL, 0, export_mask);
5809 }
5810
5811 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
5812 if (flow->nw_proto == IPPROTO_TCP) {
5813 union ovs_key_tp *tcp_key;
5814
5815 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
5816 sizeof *tcp_key);
5817 get_tp_key(data, tcp_key);
5818 if (data->tcp_flags || (mask && mask->tcp_flags)) {
5819 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
5820 }
5821 } else if (flow->nw_proto == IPPROTO_UDP) {
5822 union ovs_key_tp *udp_key;
5823
5824 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
5825 sizeof *udp_key);
5826 get_tp_key(data, udp_key);
5827 } else if (flow->nw_proto == IPPROTO_SCTP) {
5828 union ovs_key_tp *sctp_key;
5829
5830 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
5831 sizeof *sctp_key);
5832 get_tp_key(data, sctp_key);
5833 } else if (flow->dl_type == htons(ETH_TYPE_IP)
5834 && flow->nw_proto == IPPROTO_ICMP) {
5835 struct ovs_key_icmp *icmp_key;
5836
5837 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
5838 sizeof *icmp_key);
5839 icmp_key->icmp_type = ntohs(data->tp_src);
5840 icmp_key->icmp_code = ntohs(data->tp_dst);
5841 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
5842 && flow->nw_proto == IPPROTO_ICMPV6) {
5843 struct ovs_key_icmpv6 *icmpv6_key;
5844
5845 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
5846 sizeof *icmpv6_key);
5847 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
5848 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
5849
5850 if (is_nd(flow, NULL)
5851 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, ICMP
5852 * type and code are 8 bits wide. Therefore, an exact match
5853 * looks like htons(0xff), not htons(0xffff). See
5854 * xlate_wc_finish() for details. */
5855 && (!export_mask || (data->tp_src == htons(0xff)
5856 && data->tp_dst == htons(0xff)))) {
5857
5858 struct ovs_key_nd *nd_key;
5859
5860 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
5861 sizeof *nd_key);
5862 nd_key->nd_target = data->nd_target;
5863 nd_key->nd_sll = data->arp_sha;
5864 nd_key->nd_tll = data->arp_tha;
5865 }
5866 }
5867 }
5868
5869 unencap:
5870 for (int encaps = max_vlans - 1; encaps >= 0; encaps--) {
5871 if (encap[encaps]) {
5872 nl_msg_end_nested(buf, encap[encaps]);
5873 }
5874 }
5875 }
5876
5877 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
5878 *
5879 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
5880 * capable of being expanded to allow for that much space. */
5881 void
5882 odp_flow_key_from_flow(const struct odp_flow_key_parms *parms,
5883 struct ofpbuf *buf)
5884 {
5885 odp_flow_key_from_flow__(parms, false, buf);
5886 }
5887
5888 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
5889 * 'buf'.
5890 *
5891 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
5892 * capable of being expanded to allow for that much space. */
5893 void
5894 odp_flow_key_from_mask(const struct odp_flow_key_parms *parms,
5895 struct ofpbuf *buf)
5896 {
5897 odp_flow_key_from_flow__(parms, true, buf);
5898 }
5899
5900 /* Generate ODP flow key from the given packet metadata */
5901 void
5902 odp_key_from_dp_packet(struct ofpbuf *buf, const struct dp_packet *packet)
5903 {
5904 const struct pkt_metadata *md = &packet->md;
5905
5906 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
5907
5908 if (flow_tnl_dst_is_set(&md->tunnel)) {
5909 tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL, NULL);
5910 }
5911
5912 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
5913
5914 if (md->ct_state) {
5915 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_STATE,
5916 ovs_to_odp_ct_state(md->ct_state));
5917 if (md->ct_zone) {
5918 nl_msg_put_u16(buf, OVS_KEY_ATTR_CT_ZONE, md->ct_zone);
5919 }
5920 if (md->ct_mark) {
5921 nl_msg_put_u32(buf, OVS_KEY_ATTR_CT_MARK, md->ct_mark);
5922 }
5923 if (!ovs_u128_is_zero(md->ct_label)) {
5924 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &md->ct_label,
5925 sizeof(md->ct_label));
5926 }
5927 if (md->ct_orig_tuple_ipv6) {
5928 if (md->ct_orig_tuple.ipv6.ipv6_proto) {
5929 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
5930 &md->ct_orig_tuple.ipv6,
5931 sizeof md->ct_orig_tuple.ipv6);
5932 }
5933 } else {
5934 if (md->ct_orig_tuple.ipv4.ipv4_proto) {
5935 nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
5936 &md->ct_orig_tuple.ipv4,
5937 sizeof md->ct_orig_tuple.ipv4);
5938 }
5939 }
5940 }
5941
5942 /* Add an ingress port attribute if 'odp_in_port' is not the magical
5943 * value "ODPP_NONE". */
5944 if (md->in_port.odp_port != ODPP_NONE) {
5945 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
5946 }
5947
5948 /* Add OVS_KEY_ATTR_ETHERNET for non-Ethernet packets */
5949 if (pt_ns(packet->packet_type) == OFPHTN_ETHERTYPE) {
5950 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE,
5951 pt_ns_type_be(packet->packet_type));
5952 }
5953 }
5954
5955 /* Generate packet metadata from the given ODP flow key. */
5956 void
5957 odp_key_to_dp_packet(const struct nlattr *key, size_t key_len,
5958 struct dp_packet *packet)
5959 {
5960 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5961 const struct nlattr *nla;
5962 struct pkt_metadata *md = &packet->md;
5963 ovs_be32 packet_type = htonl(PT_UNKNOWN);
5964 ovs_be16 ethertype = 0;
5965 size_t left;
5966
5967 pkt_metadata_init(md, ODPP_NONE);
5968
5969 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
5970 enum ovs_key_attr type = nl_attr_type(nla);
5971 size_t len = nl_attr_get_size(nla);
5972 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
5973 OVS_KEY_ATTR_MAX, type);
5974
5975 if (len != expected_len && expected_len >= 0) {
5976 continue;
5977 }
5978
5979 switch (type) {
5980 case OVS_KEY_ATTR_RECIRC_ID:
5981 md->recirc_id = nl_attr_get_u32(nla);
5982 break;
5983 case OVS_KEY_ATTR_DP_HASH:
5984 md->dp_hash = nl_attr_get_u32(nla);
5985 break;
5986 case OVS_KEY_ATTR_PRIORITY:
5987 md->skb_priority = nl_attr_get_u32(nla);
5988 break;
5989 case OVS_KEY_ATTR_SKB_MARK:
5990 md->pkt_mark = nl_attr_get_u32(nla);
5991 break;
5992 case OVS_KEY_ATTR_CT_STATE:
5993 md->ct_state = odp_to_ovs_ct_state(nl_attr_get_u32(nla));
5994 break;
5995 case OVS_KEY_ATTR_CT_ZONE:
5996 md->ct_zone = nl_attr_get_u16(nla);
5997 break;
5998 case OVS_KEY_ATTR_CT_MARK:
5999 md->ct_mark = nl_attr_get_u32(nla);
6000 break;
6001 case OVS_KEY_ATTR_CT_LABELS: {
6002 md->ct_label = nl_attr_get_u128(nla);
6003 break;
6004 }
6005 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
6006 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(nla);
6007 md->ct_orig_tuple.ipv4 = *ct;
6008 md->ct_orig_tuple_ipv6 = false;
6009 break;
6010 }
6011 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
6012 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(nla);
6013
6014 md->ct_orig_tuple.ipv6 = *ct;
6015 md->ct_orig_tuple_ipv6 = true;
6016 break;
6017 }
6018 case OVS_KEY_ATTR_TUNNEL: {
6019 enum odp_key_fitness res;
6020
6021 res = odp_tun_key_from_attr(nla, &md->tunnel);
6022 if (res == ODP_FIT_ERROR) {
6023 memset(&md->tunnel, 0, sizeof md->tunnel);
6024 }
6025 break;
6026 }
6027 case OVS_KEY_ATTR_IN_PORT:
6028 md->in_port.odp_port = nl_attr_get_odp_port(nla);
6029 break;
6030 case OVS_KEY_ATTR_ETHERNET:
6031 /* Presence of OVS_KEY_ATTR_ETHERNET indicates Ethernet packet. */
6032 packet_type = htonl(PT_ETH);
6033 break;
6034 case OVS_KEY_ATTR_ETHERTYPE:
6035 ethertype = nl_attr_get_be16(nla);
6036 break;
6037 case OVS_KEY_ATTR_UNSPEC:
6038 case OVS_KEY_ATTR_ENCAP:
6039 case OVS_KEY_ATTR_VLAN:
6040 case OVS_KEY_ATTR_IPV4:
6041 case OVS_KEY_ATTR_IPV6:
6042 case OVS_KEY_ATTR_TCP:
6043 case OVS_KEY_ATTR_UDP:
6044 case OVS_KEY_ATTR_ICMP:
6045 case OVS_KEY_ATTR_ICMPV6:
6046 case OVS_KEY_ATTR_ARP:
6047 case OVS_KEY_ATTR_ND:
6048 case OVS_KEY_ATTR_SCTP:
6049 case OVS_KEY_ATTR_TCP_FLAGS:
6050 case OVS_KEY_ATTR_MPLS:
6051 case OVS_KEY_ATTR_PACKET_TYPE:
6052 case OVS_KEY_ATTR_NSH:
6053 case __OVS_KEY_ATTR_MAX:
6054 default:
6055 break;
6056 }
6057 }
6058
6059 if (packet_type == htonl(PT_ETH)) {
6060 packet->packet_type = htonl(PT_ETH);
6061 } else if (packet_type == htonl(PT_UNKNOWN) && ethertype != 0) {
6062 packet->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
6063 ntohs(ethertype));
6064 } else {
6065 VLOG_ERR_RL(&rl, "Packet without ETHERTYPE. Unknown packet_type.");
6066 }
6067 }
6068
6069 uint32_t
6070 odp_flow_key_hash(const struct nlattr *key, size_t key_len)
6071 {
6072 BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
6073 return hash_bytes32(ALIGNED_CAST(const uint32_t *, key), key_len, 0);
6074 }
6075
6076 static void
6077 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
6078 uint64_t attrs, int out_of_range_attr,
6079 const struct nlattr *key, size_t key_len)
6080 {
6081 struct ds s;
6082 int i;
6083
6084 if (VLOG_DROP_DBG(rl)) {
6085 return;
6086 }
6087
6088 ds_init(&s);
6089 for (i = 0; i < 64; i++) {
6090 if (attrs & (UINT64_C(1) << i)) {
6091 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6092
6093 ds_put_format(&s, " %s",
6094 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
6095 }
6096 }
6097 if (out_of_range_attr) {
6098 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
6099 }
6100
6101 ds_put_cstr(&s, ": ");
6102 odp_flow_key_format(key, key_len, &s);
6103
6104 VLOG_DBG("%s:%s", title, ds_cstr(&s));
6105 ds_destroy(&s);
6106 }
6107
6108 static uint8_t
6109 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
6110 {
6111 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6112
6113 if (is_mask) {
6114 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
6115 }
6116
6117 if (odp_frag > OVS_FRAG_TYPE_LATER) {
6118 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
6119 return 0xff; /* Error. */
6120 }
6121
6122 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
6123 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
6124 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
6125 }
6126
6127 static bool
6128 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
6129 const struct nlattr *attrs[], uint64_t *present_attrsp,
6130 int *out_of_range_attrp)
6131 {
6132 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6133 const struct nlattr *nla;
6134 uint64_t present_attrs;
6135 size_t left;
6136
6137 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
6138 present_attrs = 0;
6139 *out_of_range_attrp = 0;
6140 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
6141 uint16_t type = nl_attr_type(nla);
6142 size_t len = nl_attr_get_size(nla);
6143 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
6144 OVS_KEY_ATTR_MAX, type);
6145
6146 if (len != expected_len && expected_len >= 0) {
6147 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6148
6149 VLOG_ERR_RL(&rl, "attribute %s has length %"PRIuSIZE" but should have "
6150 "length %d", ovs_key_attr_to_string(type, namebuf,
6151 sizeof namebuf),
6152 len, expected_len);
6153 return false;
6154 }
6155
6156 if (type > OVS_KEY_ATTR_MAX) {
6157 *out_of_range_attrp = type;
6158 } else {
6159 if (present_attrs & (UINT64_C(1) << type)) {
6160 char namebuf[OVS_KEY_ATTR_BUFSIZE];
6161
6162 VLOG_ERR_RL(&rl, "duplicate %s attribute in flow key",
6163 ovs_key_attr_to_string(type,
6164 namebuf, sizeof namebuf));
6165 return false;
6166 }
6167
6168 present_attrs |= UINT64_C(1) << type;
6169 attrs[type] = nla;
6170 }
6171 }
6172 if (left) {
6173 VLOG_ERR_RL(&rl, "trailing garbage in flow key");
6174 return false;
6175 }
6176
6177 *present_attrsp = present_attrs;
6178 return true;
6179 }
6180
6181 static enum odp_key_fitness
6182 check_expectations(uint64_t present_attrs, int out_of_range_attr,
6183 uint64_t expected_attrs,
6184 const struct nlattr *key, size_t key_len)
6185 {
6186 uint64_t missing_attrs;
6187 uint64_t extra_attrs;
6188
6189 missing_attrs = expected_attrs & ~present_attrs;
6190 if (missing_attrs) {
6191 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6192 log_odp_key_attributes(&rl, "expected but not present",
6193 missing_attrs, 0, key, key_len);
6194 return ODP_FIT_TOO_LITTLE;
6195 }
6196
6197 extra_attrs = present_attrs & ~expected_attrs;
6198 if (extra_attrs || out_of_range_attr) {
6199 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
6200 log_odp_key_attributes(&rl, "present but not expected",
6201 extra_attrs, out_of_range_attr, key, key_len);
6202 return ODP_FIT_TOO_MUCH;
6203 }
6204
6205 return ODP_FIT_PERFECT;
6206 }
6207
6208 static bool
6209 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6210 uint64_t present_attrs, uint64_t *expected_attrs,
6211 struct flow *flow, const struct flow *src_flow)
6212 {
6213 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6214 bool is_mask = flow != src_flow;
6215
6216 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
6217 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
6218 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
6219 VLOG_ERR_RL(&rl, "invalid Ethertype %"PRIu16" in flow key",
6220 ntohs(flow->dl_type));
6221 return false;
6222 }
6223 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
6224 flow->dl_type != htons(0xffff)) {
6225 return false;
6226 }
6227 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
6228 } else {
6229 if (!is_mask) {
6230 /* Default ethertype for well-known L3 packets. */
6231 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6232 flow->dl_type = htons(ETH_TYPE_IP);
6233 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6234 flow->dl_type = htons(ETH_TYPE_IPV6);
6235 } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6236 flow->dl_type = htons(ETH_TYPE_MPLS);
6237 } else {
6238 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
6239 }
6240 } else if (src_flow->packet_type != htonl(PT_ETH)) {
6241 /* dl_type is mandatory for non-Ethernet packets */
6242 flow->dl_type = htons(0xffff);
6243 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
6244 /* See comments in odp_flow_key_from_flow__(). */
6245 VLOG_ERR_RL(&rl, "mask expected for non-Ethernet II frame");
6246 return false;
6247 }
6248 }
6249 return true;
6250 }
6251
6252 static enum odp_key_fitness
6253 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6254 uint64_t present_attrs, int out_of_range_attr,
6255 uint64_t expected_attrs, struct flow *flow,
6256 const struct nlattr *key, size_t key_len,
6257 const struct flow *src_flow)
6258 {
6259 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6260 bool is_mask = src_flow != flow;
6261 const void *check_start = NULL;
6262 size_t check_len = 0;
6263 enum ovs_key_attr expected_bit = 0xff;
6264
6265 if (eth_type_mpls(src_flow->dl_type)) {
6266 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6267 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
6268 }
6269 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
6270 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
6271 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
6272 int n = size / sizeof(ovs_be32);
6273 int i;
6274
6275 if (!size || size % sizeof(ovs_be32)) {
6276 return ODP_FIT_ERROR;
6277 }
6278 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
6279 return ODP_FIT_ERROR;
6280 }
6281
6282 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
6283 flow->mpls_lse[i] = mpls_lse[i];
6284 }
6285 if (n > FLOW_MAX_MPLS_LABELS) {
6286 return ODP_FIT_TOO_MUCH;
6287 }
6288
6289 if (!is_mask) {
6290 /* BOS may be set only in the innermost label. */
6291 for (i = 0; i < n - 1; i++) {
6292 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
6293 return ODP_FIT_ERROR;
6294 }
6295 }
6296
6297 /* BOS must be set in the innermost label. */
6298 if (n < FLOW_MAX_MPLS_LABELS
6299 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
6300 return ODP_FIT_TOO_LITTLE;
6301 }
6302 }
6303 }
6304
6305 goto done;
6306 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
6307 if (!is_mask) {
6308 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
6309 }
6310 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
6311 const struct ovs_key_ipv4 *ipv4_key;
6312
6313 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
6314 put_ipv4_key(ipv4_key, flow, is_mask);
6315 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6316 return ODP_FIT_ERROR;
6317 }
6318 if (is_mask) {
6319 check_start = ipv4_key;
6320 check_len = sizeof *ipv4_key;
6321 expected_bit = OVS_KEY_ATTR_IPV4;
6322 }
6323 }
6324 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
6325 if (!is_mask) {
6326 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
6327 }
6328 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
6329 const struct ovs_key_ipv6 *ipv6_key;
6330
6331 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
6332 put_ipv6_key(ipv6_key, flow, is_mask);
6333 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
6334 return ODP_FIT_ERROR;
6335 }
6336 if (is_mask) {
6337 check_start = ipv6_key;
6338 check_len = sizeof *ipv6_key;
6339 expected_bit = OVS_KEY_ATTR_IPV6;
6340 }
6341 }
6342 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
6343 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
6344 if (!is_mask) {
6345 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
6346 }
6347 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
6348 const struct ovs_key_arp *arp_key;
6349
6350 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
6351 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
6352 VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
6353 "key", ntohs(arp_key->arp_op));
6354 return ODP_FIT_ERROR;
6355 }
6356 put_arp_key(arp_key, flow);
6357 if (is_mask) {
6358 check_start = arp_key;
6359 check_len = sizeof *arp_key;
6360 expected_bit = OVS_KEY_ATTR_ARP;
6361 }
6362 }
6363 } else if (src_flow->dl_type == htons(ETH_TYPE_NSH)) {
6364 if (!is_mask) {
6365 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_NSH;
6366 }
6367 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_NSH)) {
6368 odp_nsh_key_from_attr(attrs[OVS_KEY_ATTR_NSH], &flow->nsh, NULL);
6369 if (is_mask) {
6370 check_start = nl_attr_get(attrs[OVS_KEY_ATTR_NSH]);
6371 check_len = nl_attr_get_size(attrs[OVS_KEY_ATTR_NSH]);
6372 expected_bit = OVS_KEY_ATTR_NSH;
6373 }
6374 }
6375 } else {
6376 goto done;
6377 }
6378 if (check_len > 0) { /* Happens only when 'is_mask'. */
6379 if (!is_all_zeros(check_start, check_len) &&
6380 flow->dl_type != htons(0xffff)) {
6381 return ODP_FIT_ERROR;
6382 } else {
6383 expected_attrs |= UINT64_C(1) << expected_bit;
6384 }
6385 }
6386
6387 expected_bit = OVS_KEY_ATTR_UNSPEC;
6388 if (src_flow->nw_proto == IPPROTO_TCP
6389 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6390 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6391 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6392 if (!is_mask) {
6393 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
6394 }
6395 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
6396 const union ovs_key_tp *tcp_key;
6397
6398 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
6399 put_tp_key(tcp_key, flow);
6400 expected_bit = OVS_KEY_ATTR_TCP;
6401 }
6402 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
6403 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
6404 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
6405 }
6406 } else if (src_flow->nw_proto == IPPROTO_UDP
6407 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6408 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6409 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6410 if (!is_mask) {
6411 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
6412 }
6413 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
6414 const union ovs_key_tp *udp_key;
6415
6416 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
6417 put_tp_key(udp_key, flow);
6418 expected_bit = OVS_KEY_ATTR_UDP;
6419 }
6420 } else if (src_flow->nw_proto == IPPROTO_SCTP
6421 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
6422 src_flow->dl_type == htons(ETH_TYPE_IPV6))
6423 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6424 if (!is_mask) {
6425 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
6426 }
6427 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
6428 const union ovs_key_tp *sctp_key;
6429
6430 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
6431 put_tp_key(sctp_key, flow);
6432 expected_bit = OVS_KEY_ATTR_SCTP;
6433 }
6434 } else if (src_flow->nw_proto == IPPROTO_ICMP
6435 && src_flow->dl_type == htons(ETH_TYPE_IP)
6436 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6437 if (!is_mask) {
6438 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
6439 }
6440 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
6441 const struct ovs_key_icmp *icmp_key;
6442
6443 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
6444 flow->tp_src = htons(icmp_key->icmp_type);
6445 flow->tp_dst = htons(icmp_key->icmp_code);
6446 expected_bit = OVS_KEY_ATTR_ICMP;
6447 }
6448 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
6449 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
6450 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6451 if (!is_mask) {
6452 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
6453 }
6454 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
6455 const struct ovs_key_icmpv6 *icmpv6_key;
6456
6457 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
6458 flow->tp_src = htons(icmpv6_key->icmpv6_type);
6459 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
6460 expected_bit = OVS_KEY_ATTR_ICMPV6;
6461 if (is_nd(src_flow, NULL)) {
6462 if (!is_mask) {
6463 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
6464 }
6465 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
6466 const struct ovs_key_nd *nd_key;
6467
6468 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
6469 flow->nd_target = nd_key->nd_target;
6470 flow->arp_sha = nd_key->nd_sll;
6471 flow->arp_tha = nd_key->nd_tll;
6472 if (is_mask) {
6473 /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
6474 * ICMP type and code are 8 bits wide. Therefore, an
6475 * exact match looks like htons(0xff), not
6476 * htons(0xffff). See xlate_wc_finish() for details.
6477 * */
6478 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
6479 (flow->tp_src != htons(0xff) ||
6480 flow->tp_dst != htons(0xff))) {
6481 return ODP_FIT_ERROR;
6482 } else {
6483 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
6484 }
6485 }
6486 }
6487 }
6488 }
6489 } else if (src_flow->nw_proto == IPPROTO_IGMP
6490 && src_flow->dl_type == htons(ETH_TYPE_IP)) {
6491 /* OVS userspace parses the IGMP type, code, and group, but its
6492 * datapaths do not, so there is always missing information. */
6493 return ODP_FIT_TOO_LITTLE;
6494 }
6495 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
6496 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
6497 return ODP_FIT_ERROR;
6498 } else {
6499 expected_attrs |= UINT64_C(1) << expected_bit;
6500 }
6501 }
6502
6503 done:
6504 return check_expectations(present_attrs, out_of_range_attr, expected_attrs,
6505 key, key_len);
6506 }
6507
6508 /* Parse 802.1Q header then encapsulated L3 attributes. */
6509 static enum odp_key_fitness
6510 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
6511 uint64_t present_attrs, int out_of_range_attr,
6512 uint64_t expected_attrs, struct flow *flow,
6513 const struct nlattr *key, size_t key_len,
6514 const struct flow *src_flow)
6515 {
6516 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6517 bool is_mask = src_flow != flow;
6518
6519 const struct nlattr *encap;
6520 enum odp_key_fitness encap_fitness;
6521 enum odp_key_fitness fitness = ODP_FIT_ERROR;
6522 int encaps = 0;
6523
6524 while (encaps < flow_vlan_limit &&
6525 (is_mask
6526 ? (src_flow->vlans[encaps].tci & htons(VLAN_CFI)) != 0
6527 : eth_type_vlan(flow->dl_type))) {
6528
6529 encap = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
6530 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
6531
6532 /* Calculate fitness of outer attributes. */
6533 if (!is_mask) {
6534 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
6535 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
6536 } else {
6537 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
6538 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
6539 }
6540 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
6541 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
6542 }
6543 }
6544 fitness = check_expectations(present_attrs, out_of_range_attr,
6545 expected_attrs, key, key_len);
6546
6547 /* Set vlan_tci.
6548 * Remove the TPID from dl_type since it's not the real Ethertype. */
6549 flow->vlans[encaps].tpid = flow->dl_type;
6550 flow->dl_type = htons(0);
6551 flow->vlans[encaps].tci =
6552 (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
6553 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
6554 : htons(0));
6555 if (!is_mask) {
6556 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) ||
6557 !(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
6558 return ODP_FIT_TOO_LITTLE;
6559 } else if (flow->vlans[encaps].tci == htons(0)) {
6560 /* Corner case for a truncated 802.1Q header. */
6561 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
6562 return ODP_FIT_TOO_MUCH;
6563 }
6564 return fitness;
6565 } else if (!(flow->vlans[encaps].tci & htons(VLAN_CFI))) {
6566 VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
6567 "but CFI bit is not set",
6568 ntohs(flow->vlans[encaps].tci));
6569 return ODP_FIT_ERROR;
6570 }
6571 } else {
6572 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
6573 return fitness;
6574 }
6575 }
6576
6577 /* Now parse the encapsulated attributes. */
6578 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
6579 attrs, &present_attrs, &out_of_range_attr)) {
6580 return ODP_FIT_ERROR;
6581 }
6582 expected_attrs = 0;
6583
6584 if (!parse_ethertype(attrs, present_attrs, &expected_attrs,
6585 flow, src_flow)) {
6586 return ODP_FIT_ERROR;
6587 }
6588
6589 encaps++;
6590 }
6591
6592 encap_fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
6593 expected_attrs, flow, key, key_len,
6594 src_flow);
6595
6596 /* The overall fitness is the worse of the outer and inner attributes. */
6597 return MAX(fitness, encap_fitness);
6598 }
6599
6600 static enum odp_key_fitness
6601 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
6602 struct flow *flow, const struct flow *src_flow)
6603 {
6604 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
6605 uint64_t expected_attrs;
6606 uint64_t present_attrs;
6607 int out_of_range_attr;
6608 bool is_mask = src_flow != flow;
6609
6610 memset(flow, 0, sizeof *flow);
6611
6612 /* Parse attributes. */
6613 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
6614 &out_of_range_attr)) {
6615 return ODP_FIT_ERROR;
6616 }
6617 expected_attrs = 0;
6618
6619 /* Metadata. */
6620 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
6621 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
6622 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
6623 } else if (is_mask) {
6624 /* Always exact match recirc_id if it is not specified. */
6625 flow->recirc_id = UINT32_MAX;
6626 }
6627
6628 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
6629 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
6630 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
6631 }
6632 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
6633 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
6634 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
6635 }
6636
6637 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
6638 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
6639 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
6640 }
6641
6642 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_STATE)) {
6643 uint32_t odp_state = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_STATE]);
6644
6645 flow->ct_state = odp_to_ovs_ct_state(odp_state);
6646 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_STATE;
6647 }
6648 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE)) {
6649 flow->ct_zone = nl_attr_get_u16(attrs[OVS_KEY_ATTR_CT_ZONE]);
6650 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ZONE;
6651 }
6652 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_MARK)) {
6653 flow->ct_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_CT_MARK]);
6654 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_MARK;
6655 }
6656 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS)) {
6657 flow->ct_label = nl_attr_get_u128(attrs[OVS_KEY_ATTR_CT_LABELS]);
6658 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_LABELS;
6659 }
6660 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
6661 const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
6662 flow->ct_nw_src = ct->ipv4_src;
6663 flow->ct_nw_dst = ct->ipv4_dst;
6664 flow->ct_nw_proto = ct->ipv4_proto;
6665 flow->ct_tp_src = ct->src_port;
6666 flow->ct_tp_dst = ct->dst_port;
6667 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
6668 }
6669 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
6670 const struct ovs_key_ct_tuple_ipv6 *ct = nl_attr_get(attrs[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
6671
6672 flow->ct_ipv6_src = ct->ipv6_src;
6673 flow->ct_ipv6_dst = ct->ipv6_dst;
6674 flow->ct_nw_proto = ct->ipv6_proto;
6675 flow->ct_tp_src = ct->src_port;
6676 flow->ct_tp_dst = ct->dst_port;
6677 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
6678 }
6679
6680 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
6681 enum odp_key_fitness res;
6682
6683 res = odp_tun_key_from_attr__(attrs[OVS_KEY_ATTR_TUNNEL], is_mask,
6684 &flow->tunnel);
6685 if (res == ODP_FIT_ERROR) {
6686 return ODP_FIT_ERROR;
6687 } else if (res == ODP_FIT_PERFECT) {
6688 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
6689 }
6690 }
6691
6692 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
6693 flow->in_port.odp_port
6694 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
6695 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
6696 } else if (!is_mask) {
6697 flow->in_port.odp_port = ODPP_NONE;
6698 }
6699
6700 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE)) {
6701 flow->packet_type
6702 = nl_attr_get_be32(attrs[OVS_KEY_ATTR_PACKET_TYPE]);
6703 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE;
6704 } else if (!is_mask) {
6705 flow->packet_type = htonl(PT_ETH);
6706 }
6707
6708 /* Check for Ethernet header. */
6709 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
6710 const struct ovs_key_ethernet *eth_key;
6711
6712 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
6713 put_ethernet_key(eth_key, flow);
6714 if (!is_mask) {
6715 flow->packet_type = htonl(PT_ETH);
6716 }
6717 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
6718 }
6719 else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
6720 ovs_be16 ethertype = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
6721 if (!is_mask) {
6722 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
6723 ntohs(ethertype));
6724 }
6725 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
6726 }
6727
6728 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
6729 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
6730 src_flow)) {
6731 return ODP_FIT_ERROR;
6732 }
6733
6734 if (is_mask
6735 ? (src_flow->vlans[0].tci & htons(VLAN_CFI)) != 0
6736 : eth_type_vlan(src_flow->dl_type)) {
6737 return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
6738 expected_attrs, flow, key, key_len, src_flow);
6739 }
6740 if (is_mask) {
6741 /* A missing VLAN mask means exact match on vlan_tci 0 (== no VLAN). */
6742 flow->vlans[0].tpid = htons(0xffff);
6743 flow->vlans[0].tci = htons(0xffff);
6744 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
6745 flow->vlans[0].tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
6746 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
6747 }
6748 }
6749 return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
6750 expected_attrs, flow, key, key_len, src_flow);
6751 }
6752
6753 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
6754 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
6755 * 'key' fits our expectations for what a flow key should contain.
6756 *
6757 * The 'in_port' will be the datapath's understanding of the port. The
6758 * caller will need to translate with odp_port_to_ofp_port() if the
6759 * OpenFlow port is needed.
6760 *
6761 * This function doesn't take the packet itself as an argument because none of
6762 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
6763 * it is always possible to infer which additional attribute(s) should appear
6764 * by looking at the attributes for lower-level protocols, e.g. if the network
6765 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
6766 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
6767 * must be absent. */
6768 enum odp_key_fitness
6769 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
6770 struct flow *flow)
6771 {
6772 return odp_flow_key_to_flow__(key, key_len, flow, flow);
6773 }
6774
6775 /* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key'
6776 * to a mask structure in 'mask'. 'flow' must be a previously translated flow
6777 * corresponding to 'mask' and similarly flow_key/flow_key_len must be the
6778 * attributes from that flow. Returns an ODP_FIT_* value that indicates how
6779 * well 'key' fits our expectations for what a flow key should contain. */
6780 enum odp_key_fitness
6781 odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len,
6782 struct flow_wildcards *mask, const struct flow *src_flow)
6783 {
6784 if (mask_key_len) {
6785 return odp_flow_key_to_flow__(mask_key, mask_key_len,
6786 &mask->masks, src_flow);
6787
6788 } else {
6789 /* A missing mask means that the flow should be exact matched.
6790 * Generate an appropriate exact wildcard for the flow. */
6791 flow_wildcards_init_for_packet(mask, src_flow);
6792
6793 return ODP_FIT_PERFECT;
6794 }
6795 }
6796
6797 /* Converts the netlink formated key/mask to match.
6798 * Fails if odp_flow_key_from_key/mask and odp_flow_key_key/mask
6799 * disagree on the acceptable form of flow */
6800 int
6801 parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len,
6802 const struct nlattr *mask, size_t mask_len,
6803 struct match *match)
6804 {
6805 enum odp_key_fitness fitness;
6806
6807 fitness = odp_flow_key_to_flow(key, key_len, &match->flow);
6808 if (fitness) {
6809 /* This should not happen: it indicates that
6810 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
6811 * the acceptable form of a flow. Log the problem as an error,
6812 * with enough details to enable debugging. */
6813 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6814
6815 if (!VLOG_DROP_ERR(&rl)) {
6816 struct ds s;
6817
6818 ds_init(&s);
6819 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
6820 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
6821 ds_destroy(&s);
6822 }
6823
6824 return EINVAL;
6825 }
6826
6827 fitness = odp_flow_key_to_mask(mask, mask_len, &match->wc, &match->flow);
6828 if (fitness) {
6829 /* This should not happen: it indicates that
6830 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
6831 * disagree on the acceptable form of a mask. Log the problem
6832 * as an error, with enough details to enable debugging. */
6833 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6834
6835 if (!VLOG_DROP_ERR(&rl)) {
6836 struct ds s;
6837
6838 ds_init(&s);
6839 odp_flow_format(key, key_len, mask, mask_len, NULL, &s,
6840 true);
6841 VLOG_ERR("internal error parsing flow mask %s (%s)",
6842 ds_cstr(&s), odp_key_fitness_to_string(fitness));
6843 ds_destroy(&s);
6844 }
6845
6846 return EINVAL;
6847 }
6848
6849 return 0;
6850 }
6851
6852 /* Returns 'fitness' as a string, for use in debug messages. */
6853 const char *
6854 odp_key_fitness_to_string(enum odp_key_fitness fitness)
6855 {
6856 switch (fitness) {
6857 case ODP_FIT_PERFECT:
6858 return "OK";
6859 case ODP_FIT_TOO_MUCH:
6860 return "too_much";
6861 case ODP_FIT_TOO_LITTLE:
6862 return "too_little";
6863 case ODP_FIT_ERROR:
6864 return "error";
6865 default:
6866 return "<unknown>";
6867 }
6868 }
6869
6870 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
6871 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
6872 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
6873 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
6874 * null, then the return value is not meaningful.) */
6875 size_t
6876 odp_put_userspace_action(uint32_t pid,
6877 const void *userdata, size_t userdata_size,
6878 odp_port_t tunnel_out_port,
6879 bool include_actions,
6880 struct ofpbuf *odp_actions)
6881 {
6882 size_t userdata_ofs;
6883 size_t offset;
6884
6885 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
6886 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
6887 if (userdata) {
6888 userdata_ofs = odp_actions->size + NLA_HDRLEN;
6889
6890 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
6891 * module before Linux 3.10 required the userdata to be exactly 8 bytes
6892 * long:
6893 *
6894 * - The kernel rejected shorter userdata with -ERANGE.
6895 *
6896 * - The kernel silently dropped userdata beyond the first 8 bytes.
6897 *
6898 * Thus, for maximum compatibility, always put at least 8 bytes. (We
6899 * separately disable features that required more than 8 bytes.) */
6900 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
6901 MAX(8, userdata_size)),
6902 userdata, userdata_size);
6903 } else {
6904 userdata_ofs = 0;
6905 }
6906 if (tunnel_out_port != ODPP_NONE) {
6907 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
6908 tunnel_out_port);
6909 }
6910 if (include_actions) {
6911 nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS);
6912 }
6913 nl_msg_end_nested(odp_actions, offset);
6914
6915 return userdata_ofs;
6916 }
6917
6918 void
6919 odp_put_pop_eth_action(struct ofpbuf *odp_actions)
6920 {
6921 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_ETH);
6922 }
6923
6924 void
6925 odp_put_push_eth_action(struct ofpbuf *odp_actions,
6926 const struct eth_addr *eth_src,
6927 const struct eth_addr *eth_dst)
6928 {
6929 struct ovs_action_push_eth eth;
6930
6931 memset(&eth, 0, sizeof eth);
6932 if (eth_src) {
6933 eth.addresses.eth_src = *eth_src;
6934 }
6935 if (eth_dst) {
6936 eth.addresses.eth_dst = *eth_dst;
6937 }
6938
6939 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_ETH,
6940 &eth, sizeof eth);
6941 }
6942
6943 void
6944 odp_put_tunnel_action(const struct flow_tnl *tunnel,
6945 struct ofpbuf *odp_actions, const char *tnl_type)
6946 {
6947 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
6948 tun_key_to_attr(odp_actions, tunnel, tunnel, NULL, tnl_type);
6949 nl_msg_end_nested(odp_actions, offset);
6950 }
6951
6952 void
6953 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
6954 struct ovs_action_push_tnl *data)
6955 {
6956 int size = offsetof(struct ovs_action_push_tnl, header);
6957
6958 size += data->header_len;
6959 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
6960 }
6961
6962 \f
6963 /* The commit_odp_actions() function and its helpers. */
6964
6965 static void
6966 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
6967 const void *key, size_t key_size)
6968 {
6969 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
6970 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
6971 nl_msg_end_nested(odp_actions, offset);
6972 }
6973
6974 /* Masked set actions have a mask following the data within the netlink
6975 * attribute. The unmasked bits in the data will be cleared as the data
6976 * is copied to the action. */
6977 void
6978 commit_masked_set_action(struct ofpbuf *odp_actions,
6979 enum ovs_key_attr key_type,
6980 const void *key_, const void *mask_, size_t key_size)
6981 {
6982 size_t offset = nl_msg_start_nested(odp_actions,
6983 OVS_ACTION_ATTR_SET_MASKED);
6984 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
6985 const char *key = key_, *mask = mask_;
6986
6987 memcpy(data + key_size, mask, key_size);
6988 /* Clear unmasked bits while copying. */
6989 while (key_size--) {
6990 *data++ = *key++ & *mask++;
6991 }
6992 nl_msg_end_nested(odp_actions, offset);
6993 }
6994
6995 /* If any of the flow key data that ODP actions can modify are different in
6996 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
6997 * 'odp_actions' that change the flow tunneling information in key from
6998 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
6999 * same way. In other words, operates the same as commit_odp_actions(), but
7000 * only on tunneling information. */
7001 void
7002 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
7003 struct ofpbuf *odp_actions, const char *tnl_type)
7004 {
7005 /* A valid IPV4_TUNNEL must have non-zero ip_dst; a valid IPv6 tunnel
7006 * must have non-zero ipv6_dst. */
7007 if (flow_tnl_dst_is_set(&flow->tunnel)) {
7008 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
7009 return;
7010 }
7011 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
7012 odp_put_tunnel_action(&base->tunnel, odp_actions, tnl_type);
7013 }
7014 }
7015
7016 static bool
7017 commit(enum ovs_key_attr attr, bool use_masked_set,
7018 const void *key, void *base, void *mask, size_t size,
7019 struct ofpbuf *odp_actions)
7020 {
7021 if (memcmp(key, base, size)) {
7022 bool fully_masked = odp_mask_is_exact(attr, mask, size);
7023
7024 if (use_masked_set && !fully_masked) {
7025 commit_masked_set_action(odp_actions, attr, key, mask, size);
7026 } else {
7027 if (!fully_masked) {
7028 memset(mask, 0xff, size);
7029 }
7030 commit_set_action(odp_actions, attr, key, size);
7031 }
7032 memcpy(base, key, size);
7033 return true;
7034 } else {
7035 /* Mask bits are set when we have either read or set the corresponding
7036 * values. Masked bits will be exact-matched, no need to set them
7037 * if the value did not actually change. */
7038 return false;
7039 }
7040 }
7041
7042 static void
7043 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
7044 {
7045 eth->eth_src = flow->dl_src;
7046 eth->eth_dst = flow->dl_dst;
7047 }
7048
7049 static void
7050 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
7051 {
7052 flow->dl_src = eth->eth_src;
7053 flow->dl_dst = eth->eth_dst;
7054 }
7055
7056 static void
7057 commit_set_ether_action(const struct flow *flow, struct flow *base_flow,
7058 struct ofpbuf *odp_actions,
7059 struct flow_wildcards *wc,
7060 bool use_masked)
7061 {
7062 struct ovs_key_ethernet key, base, mask;
7063
7064 if (flow->packet_type != htonl(PT_ETH)) {
7065 return;
7066 }
7067
7068 get_ethernet_key(flow, &key);
7069 get_ethernet_key(base_flow, &base);
7070 get_ethernet_key(&wc->masks, &mask);
7071
7072 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
7073 &key, &base, &mask, sizeof key, odp_actions)) {
7074 put_ethernet_key(&base, base_flow);
7075 put_ethernet_key(&mask, &wc->masks);
7076 }
7077 }
7078
7079 static void
7080 commit_vlan_action(const struct flow* flow, struct flow *base,
7081 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7082 {
7083 int base_n = flow_count_vlan_headers(base);
7084 int flow_n = flow_count_vlan_headers(flow);
7085 flow_skip_common_vlan_headers(base, &base_n, flow, &flow_n);
7086
7087 /* Pop all mismatching vlan of base, push those of flow */
7088 for (; base_n >= 0; base_n--) {
7089 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
7090 wc->masks.vlans[base_n].qtag = OVS_BE32_MAX;
7091 }
7092
7093 for (; flow_n >= 0; flow_n--) {
7094 struct ovs_action_push_vlan vlan;
7095
7096 vlan.vlan_tpid = flow->vlans[flow_n].tpid;
7097 vlan.vlan_tci = flow->vlans[flow_n].tci;
7098 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
7099 &vlan, sizeof vlan);
7100 }
7101 memcpy(base->vlans, flow->vlans, sizeof(base->vlans));
7102 }
7103
7104 /* Wildcarding already done at action translation time. */
7105 static void
7106 commit_mpls_action(const struct flow *flow, struct flow *base,
7107 struct ofpbuf *odp_actions)
7108 {
7109 int base_n = flow_count_mpls_labels(base, NULL);
7110 int flow_n = flow_count_mpls_labels(flow, NULL);
7111 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
7112 NULL);
7113
7114 while (base_n > common_n) {
7115 if (base_n - 1 == common_n && flow_n > common_n) {
7116 /* If there is only one more LSE in base than there are common
7117 * between base and flow; and flow has at least one more LSE than
7118 * is common then the topmost LSE of base may be updated using
7119 * set */
7120 struct ovs_key_mpls mpls_key;
7121
7122 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
7123 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
7124 &mpls_key, sizeof mpls_key);
7125 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
7126 common_n++;
7127 } else {
7128 /* Otherwise, if there more LSEs in base than are common between
7129 * base and flow then pop the topmost one. */
7130 ovs_be16 dl_type;
7131 /* If all the LSEs are to be popped and this is not the outermost
7132 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
7133 * POP_MPLS action instead of flow->dl_type.
7134 *
7135 * This is because the POP_MPLS action requires its ethertype
7136 * argument to be an MPLS ethernet type but in this case
7137 * flow->dl_type will be a non-MPLS ethernet type.
7138 *
7139 * When the final POP_MPLS action occurs it use flow->dl_type and
7140 * the and the resulting packet will have the desired dl_type. */
7141 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
7142 dl_type = htons(ETH_TYPE_MPLS);
7143 } else {
7144 dl_type = flow->dl_type;
7145 }
7146 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
7147 ovs_assert(flow_pop_mpls(base, base_n, flow->dl_type, NULL));
7148 base_n--;
7149 }
7150 }
7151
7152 /* If, after the above popping and setting, there are more LSEs in flow
7153 * than base then some LSEs need to be pushed. */
7154 while (base_n < flow_n) {
7155 struct ovs_action_push_mpls *mpls;
7156
7157 mpls = nl_msg_put_unspec_zero(odp_actions,
7158 OVS_ACTION_ATTR_PUSH_MPLS,
7159 sizeof *mpls);
7160 mpls->mpls_ethertype = flow->dl_type;
7161 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
7162 /* Update base flow's MPLS stack, but do not clear L3. We need the L3
7163 * headers if the flow is restored later due to returning from a patch
7164 * port or group bucket. */
7165 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL, false);
7166 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
7167 base_n++;
7168 }
7169 }
7170
7171 static void
7172 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
7173 {
7174 ipv4->ipv4_src = flow->nw_src;
7175 ipv4->ipv4_dst = flow->nw_dst;
7176 ipv4->ipv4_proto = flow->nw_proto;
7177 ipv4->ipv4_tos = flow->nw_tos;
7178 ipv4->ipv4_ttl = flow->nw_ttl;
7179 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7180 }
7181
7182 static void
7183 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
7184 {
7185 flow->nw_src = ipv4->ipv4_src;
7186 flow->nw_dst = ipv4->ipv4_dst;
7187 flow->nw_proto = ipv4->ipv4_proto;
7188 flow->nw_tos = ipv4->ipv4_tos;
7189 flow->nw_ttl = ipv4->ipv4_ttl;
7190 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
7191 }
7192
7193 static void
7194 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
7195 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7196 bool use_masked)
7197 {
7198 struct ovs_key_ipv4 key, mask, base;
7199
7200 /* Check that nw_proto and nw_frag remain unchanged. */
7201 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7202 flow->nw_frag == base_flow->nw_frag);
7203
7204 get_ipv4_key(flow, &key, false);
7205 get_ipv4_key(base_flow, &base, false);
7206 get_ipv4_key(&wc->masks, &mask, true);
7207 mask.ipv4_proto = 0; /* Not writeable. */
7208 mask.ipv4_frag = 0; /* Not writable. */
7209
7210 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7211 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7212 mask.ipv4_tos &= ~IP_ECN_MASK;
7213 }
7214
7215 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
7216 odp_actions)) {
7217 put_ipv4_key(&base, base_flow, false);
7218 if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
7219 put_ipv4_key(&mask, &wc->masks, true);
7220 }
7221 }
7222 }
7223
7224 static void
7225 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
7226 {
7227 ipv6->ipv6_src = flow->ipv6_src;
7228 ipv6->ipv6_dst = flow->ipv6_dst;
7229 ipv6->ipv6_label = flow->ipv6_label;
7230 ipv6->ipv6_proto = flow->nw_proto;
7231 ipv6->ipv6_tclass = flow->nw_tos;
7232 ipv6->ipv6_hlimit = flow->nw_ttl;
7233 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
7234 }
7235
7236 static void
7237 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
7238 {
7239 flow->ipv6_src = ipv6->ipv6_src;
7240 flow->ipv6_dst = ipv6->ipv6_dst;
7241 flow->ipv6_label = ipv6->ipv6_label;
7242 flow->nw_proto = ipv6->ipv6_proto;
7243 flow->nw_tos = ipv6->ipv6_tclass;
7244 flow->nw_ttl = ipv6->ipv6_hlimit;
7245 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
7246 }
7247
7248 static void
7249 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
7250 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7251 bool use_masked)
7252 {
7253 struct ovs_key_ipv6 key, mask, base;
7254
7255 /* Check that nw_proto and nw_frag remain unchanged. */
7256 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
7257 flow->nw_frag == base_flow->nw_frag);
7258
7259 get_ipv6_key(flow, &key, false);
7260 get_ipv6_key(base_flow, &base, false);
7261 get_ipv6_key(&wc->masks, &mask, true);
7262 mask.ipv6_proto = 0; /* Not writeable. */
7263 mask.ipv6_frag = 0; /* Not writable. */
7264
7265 if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
7266 ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
7267 mask.ipv6_tclass &= ~IP_ECN_MASK;
7268 }
7269
7270 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
7271 odp_actions)) {
7272 put_ipv6_key(&base, base_flow, false);
7273 if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
7274 put_ipv6_key(&mask, &wc->masks, true);
7275 }
7276 }
7277 }
7278
7279 static void
7280 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
7281 {
7282 /* ARP key has padding, clear it. */
7283 memset(arp, 0, sizeof *arp);
7284
7285 arp->arp_sip = flow->nw_src;
7286 arp->arp_tip = flow->nw_dst;
7287 arp->arp_op = htons(flow->nw_proto);
7288 arp->arp_sha = flow->arp_sha;
7289 arp->arp_tha = flow->arp_tha;
7290 }
7291
7292 static void
7293 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
7294 {
7295 flow->nw_src = arp->arp_sip;
7296 flow->nw_dst = arp->arp_tip;
7297 flow->nw_proto = ntohs(arp->arp_op);
7298 flow->arp_sha = arp->arp_sha;
7299 flow->arp_tha = arp->arp_tha;
7300 }
7301
7302 static enum slow_path_reason
7303 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
7304 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7305 {
7306 struct ovs_key_arp key, mask, base;
7307
7308 get_arp_key(flow, &key);
7309 get_arp_key(base_flow, &base);
7310 get_arp_key(&wc->masks, &mask);
7311
7312 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
7313 odp_actions)) {
7314 put_arp_key(&base, base_flow);
7315 put_arp_key(&mask, &wc->masks);
7316 return SLOW_ACTION;
7317 }
7318 return 0;
7319 }
7320
7321 static void
7322 get_icmp_key(const struct flow *flow, struct ovs_key_icmp *icmp)
7323 {
7324 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
7325 icmp->icmp_type = ntohs(flow->tp_src);
7326 icmp->icmp_code = ntohs(flow->tp_dst);
7327 }
7328
7329 static void
7330 put_icmp_key(const struct ovs_key_icmp *icmp, struct flow *flow)
7331 {
7332 /* icmp_type and icmp_code are stored in tp_src and tp_dst, respectively */
7333 flow->tp_src = htons(icmp->icmp_type);
7334 flow->tp_dst = htons(icmp->icmp_code);
7335 }
7336
7337 static enum slow_path_reason
7338 commit_set_icmp_action(const struct flow *flow, struct flow *base_flow,
7339 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
7340 {
7341 struct ovs_key_icmp key, mask, base;
7342 enum ovs_key_attr attr;
7343
7344 if (is_icmpv4(flow, NULL)) {
7345 attr = OVS_KEY_ATTR_ICMP;
7346 } else if (is_icmpv6(flow, NULL)) {
7347 attr = OVS_KEY_ATTR_ICMPV6;
7348 } else {
7349 return 0;
7350 }
7351
7352 get_icmp_key(flow, &key);
7353 get_icmp_key(base_flow, &base);
7354 get_icmp_key(&wc->masks, &mask);
7355
7356 if (commit(attr, false, &key, &base, &mask, sizeof key, odp_actions)) {
7357 put_icmp_key(&base, base_flow);
7358 put_icmp_key(&mask, &wc->masks);
7359 return SLOW_ACTION;
7360 }
7361 return 0;
7362 }
7363
7364 static void
7365 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
7366 {
7367 nd->nd_target = flow->nd_target;
7368 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
7369 nd->nd_sll = flow->arp_sha;
7370 nd->nd_tll = flow->arp_tha;
7371 }
7372
7373 static void
7374 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
7375 {
7376 flow->nd_target = nd->nd_target;
7377 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
7378 flow->arp_sha = nd->nd_sll;
7379 flow->arp_tha = nd->nd_tll;
7380 }
7381
7382 static enum slow_path_reason
7383 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
7384 struct ofpbuf *odp_actions,
7385 struct flow_wildcards *wc, bool use_masked)
7386 {
7387 struct ovs_key_nd key, mask, base;
7388
7389 get_nd_key(flow, &key);
7390 get_nd_key(base_flow, &base);
7391 get_nd_key(&wc->masks, &mask);
7392
7393 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
7394 odp_actions)) {
7395 put_nd_key(&base, base_flow);
7396 put_nd_key(&mask, &wc->masks);
7397 return SLOW_ACTION;
7398 }
7399
7400 return 0;
7401 }
7402
7403 static enum slow_path_reason
7404 commit_set_nw_action(const struct flow *flow, struct flow *base,
7405 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7406 bool use_masked)
7407 {
7408 /* Check if 'flow' really has an L3 header. */
7409 if (!flow->nw_proto) {
7410 return 0;
7411 }
7412
7413 switch (ntohs(base->dl_type)) {
7414 case ETH_TYPE_IP:
7415 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
7416 break;
7417
7418 case ETH_TYPE_IPV6:
7419 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
7420 return commit_set_nd_action(flow, base, odp_actions, wc, use_masked);
7421
7422 case ETH_TYPE_ARP:
7423 return commit_set_arp_action(flow, base, odp_actions, wc);
7424 }
7425
7426 return 0;
7427 }
7428
7429 static inline void
7430 get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh, bool is_mask)
7431 {
7432 *nsh = flow->nsh;
7433 if (!is_mask) {
7434 if (nsh->mdtype != NSH_M_TYPE1) {
7435 memset(nsh->context, 0, sizeof(nsh->context));
7436 }
7437 }
7438 }
7439
7440 static inline void
7441 put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
7442 bool is_mask OVS_UNUSED)
7443 {
7444 flow->nsh = *nsh;
7445 if (flow->nsh.mdtype != NSH_M_TYPE1) {
7446 memset(flow->nsh.context, 0, sizeof(flow->nsh.context));
7447 }
7448 }
7449
7450 static bool
7451 commit_nsh(const struct ovs_key_nsh * flow_nsh, bool use_masked_set,
7452 const struct ovs_key_nsh *key, struct ovs_key_nsh *base,
7453 struct ovs_key_nsh *mask, size_t size,
7454 struct ofpbuf *odp_actions)
7455 {
7456 enum ovs_key_attr attr = OVS_KEY_ATTR_NSH;
7457
7458 if (memcmp(key, base, size) == 0) {
7459 /* Mask bits are set when we have either read or set the corresponding
7460 * values. Masked bits will be exact-matched, no need to set them
7461 * if the value did not actually change. */
7462 return false;
7463 }
7464
7465 bool fully_masked = odp_mask_is_exact(attr, mask, size);
7466
7467 if (use_masked_set && !fully_masked) {
7468 size_t nsh_key_ofs;
7469 struct ovs_nsh_key_base nsh_base;
7470 struct ovs_nsh_key_base nsh_base_mask;
7471 struct ovs_nsh_key_md1 md1;
7472 struct ovs_nsh_key_md1 md1_mask;
7473 size_t offset = nl_msg_start_nested(odp_actions,
7474 OVS_ACTION_ATTR_SET_MASKED);
7475
7476 nsh_base.flags = key->flags;
7477 nsh_base.ttl = key->ttl;
7478 nsh_base.mdtype = key->mdtype;
7479 nsh_base.np = key->np;
7480 nsh_base.path_hdr = key->path_hdr;
7481
7482 nsh_base_mask.flags = mask->flags;
7483 nsh_base_mask.ttl = mask->ttl;
7484 nsh_base_mask.mdtype = mask->mdtype;
7485 nsh_base_mask.np = mask->np;
7486 nsh_base_mask.path_hdr = mask->path_hdr;
7487
7488 /* OVS_KEY_ATTR_NSH keys */
7489 nsh_key_ofs = nl_msg_start_nested(odp_actions, OVS_KEY_ATTR_NSH);
7490
7491 /* put value and mask for OVS_NSH_KEY_ATTR_BASE */
7492 char *data = nl_msg_put_unspec_uninit(odp_actions,
7493 OVS_NSH_KEY_ATTR_BASE,
7494 2 * sizeof(nsh_base));
7495 const char *lkey = (char *)&nsh_base, *lmask = (char *)&nsh_base_mask;
7496 size_t lkey_size = sizeof(nsh_base);
7497
7498 while (lkey_size--) {
7499 *data++ = *lkey++ & *lmask++;
7500 }
7501 lmask = (char *)&nsh_base_mask;
7502 memcpy(data, lmask, sizeof(nsh_base_mask));
7503
7504 switch (key->mdtype) {
7505 case NSH_M_TYPE1:
7506 memcpy(md1.context, key->context, sizeof key->context);
7507 memcpy(md1_mask.context, mask->context, sizeof mask->context);
7508
7509 /* put value and mask for OVS_NSH_KEY_ATTR_MD1 */
7510 data = nl_msg_put_unspec_uninit(odp_actions,
7511 OVS_NSH_KEY_ATTR_MD1,
7512 2 * sizeof(md1));
7513 lkey = (char *)&md1;
7514 lmask = (char *)&md1_mask;
7515 lkey_size = sizeof(md1);
7516
7517 while (lkey_size--) {
7518 *data++ = *lkey++ & *lmask++;
7519 }
7520 lmask = (char *)&md1_mask;
7521 memcpy(data, lmask, sizeof(md1_mask));
7522 break;
7523 case NSH_M_TYPE2:
7524 default:
7525 /* No match support for other MD formats yet. */
7526 break;
7527 }
7528
7529 nl_msg_end_nested(odp_actions, nsh_key_ofs);
7530
7531 nl_msg_end_nested(odp_actions, offset);
7532 } else {
7533 if (!fully_masked) {
7534 memset(mask, 0xff, size);
7535 }
7536 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
7537 nsh_key_to_attr(odp_actions, flow_nsh, NULL, 0, false);
7538 nl_msg_end_nested(odp_actions, offset);
7539 }
7540 memcpy(base, key, size);
7541 return true;
7542 }
7543
7544 static void
7545 commit_set_nsh_action(const struct flow *flow, struct flow *base_flow,
7546 struct ofpbuf *odp_actions,
7547 struct flow_wildcards *wc,
7548 bool use_masked)
7549 {
7550 struct ovs_key_nsh key, mask, base;
7551
7552 if (flow->dl_type != htons(ETH_TYPE_NSH) ||
7553 !memcmp(&base_flow->nsh, &flow->nsh, sizeof base_flow->nsh)) {
7554 return;
7555 }
7556
7557 /* Check that mdtype and np remain unchanged. */
7558 ovs_assert(flow->nsh.mdtype == base_flow->nsh.mdtype &&
7559 flow->nsh.np == base_flow->nsh.np);
7560
7561 get_nsh_key(flow, &key, false);
7562 get_nsh_key(base_flow, &base, false);
7563 get_nsh_key(&wc->masks, &mask, true);
7564 mask.mdtype = 0; /* Not writable. */
7565 mask.np = 0; /* Not writable. */
7566
7567 if (commit_nsh(&base_flow->nsh, use_masked, &key, &base, &mask,
7568 sizeof key, odp_actions)) {
7569 put_nsh_key(&base, base_flow, false);
7570 if (mask.mdtype != 0) { /* Mask was changed by commit(). */
7571 put_nsh_key(&mask, &wc->masks, true);
7572 }
7573 }
7574 }
7575
7576 /* TCP, UDP, and SCTP keys have the same layout. */
7577 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
7578 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
7579
7580 static void
7581 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
7582 {
7583 tp->tcp.tcp_src = flow->tp_src;
7584 tp->tcp.tcp_dst = flow->tp_dst;
7585 }
7586
7587 static void
7588 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
7589 {
7590 flow->tp_src = tp->tcp.tcp_src;
7591 flow->tp_dst = tp->tcp.tcp_dst;
7592 }
7593
7594 static void
7595 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
7596 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7597 bool use_masked)
7598 {
7599 enum ovs_key_attr key_type;
7600 union ovs_key_tp key, mask, base;
7601
7602 /* Check if 'flow' really has an L3 header. */
7603 if (!flow->nw_proto) {
7604 return;
7605 }
7606
7607 if (!is_ip_any(base_flow)) {
7608 return;
7609 }
7610
7611 if (flow->nw_proto == IPPROTO_TCP) {
7612 key_type = OVS_KEY_ATTR_TCP;
7613 } else if (flow->nw_proto == IPPROTO_UDP) {
7614 key_type = OVS_KEY_ATTR_UDP;
7615 } else if (flow->nw_proto == IPPROTO_SCTP) {
7616 key_type = OVS_KEY_ATTR_SCTP;
7617 } else {
7618 return;
7619 }
7620
7621 get_tp_key(flow, &key);
7622 get_tp_key(base_flow, &base);
7623 get_tp_key(&wc->masks, &mask);
7624
7625 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
7626 odp_actions)) {
7627 put_tp_key(&base, base_flow);
7628 put_tp_key(&mask, &wc->masks);
7629 }
7630 }
7631
7632 static void
7633 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
7634 struct ofpbuf *odp_actions,
7635 struct flow_wildcards *wc,
7636 bool use_masked)
7637 {
7638 uint32_t key, mask, base;
7639
7640 key = flow->skb_priority;
7641 base = base_flow->skb_priority;
7642 mask = wc->masks.skb_priority;
7643
7644 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
7645 sizeof key, odp_actions)) {
7646 base_flow->skb_priority = base;
7647 wc->masks.skb_priority = mask;
7648 }
7649 }
7650
7651 static void
7652 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
7653 struct ofpbuf *odp_actions,
7654 struct flow_wildcards *wc,
7655 bool use_masked)
7656 {
7657 uint32_t key, mask, base;
7658
7659 key = flow->pkt_mark;
7660 base = base_flow->pkt_mark;
7661 mask = wc->masks.pkt_mark;
7662
7663 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
7664 sizeof key, odp_actions)) {
7665 base_flow->pkt_mark = base;
7666 wc->masks.pkt_mark = mask;
7667 }
7668 }
7669
7670 static void
7671 odp_put_pop_nsh_action(struct ofpbuf *odp_actions)
7672 {
7673 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_NSH);
7674 }
7675
7676 static void
7677 odp_put_push_nsh_action(struct ofpbuf *odp_actions,
7678 const struct flow *flow,
7679 struct ofpbuf *encap_data)
7680 {
7681 uint8_t * metadata = NULL;
7682 uint8_t md_size = 0;
7683
7684 switch (flow->nsh.mdtype) {
7685 case NSH_M_TYPE2:
7686 if (encap_data) {
7687 ovs_assert(encap_data->size < NSH_CTX_HDRS_MAX_LEN);
7688 metadata = encap_data->data;
7689 md_size = encap_data->size;
7690 } else {
7691 md_size = 0;
7692 }
7693 break;
7694 default:
7695 md_size = 0;
7696 break;
7697 }
7698 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_PUSH_NSH);
7699 nsh_key_to_attr(odp_actions, &flow->nsh, metadata, md_size, false);
7700 nl_msg_end_nested(odp_actions, offset);
7701 }
7702
7703 static void
7704 commit_encap_decap_action(const struct flow *flow,
7705 struct flow *base_flow,
7706 struct ofpbuf *odp_actions,
7707 struct flow_wildcards *wc,
7708 bool pending_encap, bool pending_decap,
7709 struct ofpbuf *encap_data)
7710 {
7711 if (pending_encap) {
7712 switch (ntohl(flow->packet_type)) {
7713 case PT_ETH: {
7714 /* push_eth */
7715 odp_put_push_eth_action(odp_actions, &flow->dl_src,
7716 &flow->dl_dst);
7717 base_flow->packet_type = flow->packet_type;
7718 base_flow->dl_src = flow->dl_src;
7719 base_flow->dl_dst = flow->dl_dst;
7720 break;
7721 }
7722 case PT_NSH:
7723 /* push_nsh */
7724 odp_put_push_nsh_action(odp_actions, flow, encap_data);
7725 base_flow->packet_type = flow->packet_type;
7726 /* Update all packet headers in base_flow. */
7727 memcpy(&base_flow->dl_dst, &flow->dl_dst,
7728 sizeof(*flow) - offsetof(struct flow, dl_dst));
7729 break;
7730 default:
7731 /* Only the above protocols are supported for encap.
7732 * The check is done at action translation. */
7733 OVS_NOT_REACHED();
7734 }
7735 } else if (pending_decap || flow->packet_type != base_flow->packet_type) {
7736 /* This is an explicit or implicit decap case. */
7737 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE &&
7738 base_flow->packet_type == htonl(PT_ETH)) {
7739 /* Generate pop_eth and continue without recirculation. */
7740 odp_put_pop_eth_action(odp_actions);
7741 base_flow->packet_type = flow->packet_type;
7742 base_flow->dl_src = eth_addr_zero;
7743 base_flow->dl_dst = eth_addr_zero;
7744 } else {
7745 /* All other decap cases require recirculation.
7746 * No need to update the base flow here. */
7747 switch (ntohl(base_flow->packet_type)) {
7748 case PT_NSH:
7749 /* pop_nsh. */
7750 odp_put_pop_nsh_action(odp_actions);
7751 break;
7752 default:
7753 /* Checks are done during translation. */
7754 OVS_NOT_REACHED();
7755 }
7756 }
7757 }
7758
7759 wc->masks.packet_type = OVS_BE32_MAX;
7760 }
7761
7762 /* If any of the flow key data that ODP actions can modify are different in
7763 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
7764 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
7765 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
7766 * in addition to this function if needed. Sets fields in 'wc' that are
7767 * used as part of the action.
7768 *
7769 * Returns a reason to force processing the flow's packets into the userspace
7770 * slow path, if there is one, otherwise 0. */
7771 enum slow_path_reason
7772 commit_odp_actions(const struct flow *flow, struct flow *base,
7773 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
7774 bool use_masked, bool pending_encap, bool pending_decap,
7775 struct ofpbuf *encap_data)
7776 {
7777 enum slow_path_reason slow1, slow2;
7778 bool mpls_done = false;
7779
7780 commit_encap_decap_action(flow, base, odp_actions, wc,
7781 pending_encap, pending_decap, encap_data);
7782 commit_set_ether_action(flow, base, odp_actions, wc, use_masked);
7783 /* Make packet a non-MPLS packet before committing L3/4 actions,
7784 * which would otherwise do nothing. */
7785 if (eth_type_mpls(base->dl_type) && !eth_type_mpls(flow->dl_type)) {
7786 commit_mpls_action(flow, base, odp_actions);
7787 mpls_done = true;
7788 }
7789 commit_set_nsh_action(flow, base, odp_actions, wc, use_masked);
7790 slow1 = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
7791 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
7792 slow2 = commit_set_icmp_action(flow, base, odp_actions, wc);
7793 if (!mpls_done) {
7794 commit_mpls_action(flow, base, odp_actions);
7795 }
7796 commit_vlan_action(flow, base, odp_actions, wc);
7797 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
7798 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);
7799
7800 return slow1 ? slow1 : slow2;
7801 }