]> git.proxmox.com Git - mirror_ovs.git/blob - lib/nx-match.c
odp-execute: Rename 'may_steal' to 'should_steal'.
[mirror_ovs.git] / lib / nx-match.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "nx-match.h"
20
21 #include <netinet/icmp6.h>
22
23 #include "classifier.h"
24 #include "colors.h"
25 #include "openvswitch/hmap.h"
26 #include "openflow/nicira-ext.h"
27 #include "openvswitch/dynamic-string.h"
28 #include "openvswitch/meta-flow.h"
29 #include "openvswitch/ofp-actions.h"
30 #include "openvswitch/ofp-errors.h"
31 #include "openvswitch/ofp-match.h"
32 #include "openvswitch/ofp-port.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "packets.h"
36 #include "openvswitch/shash.h"
37 #include "tun-metadata.h"
38 #include "unaligned.h"
39 #include "util.h"
40 #include "vl-mff-map.h"
41
42 VLOG_DEFINE_THIS_MODULE(nx_match);
43
44 /* OXM headers.
45 *
46 *
47 * Standard OXM/NXM
48 * ================
49 *
50 * The header is 32 bits long. It looks like this:
51 *
52 * |31 16 15 9| 8 7 0
53 * +----------------------------------+---------------+--+------------------+
54 * | oxm_class | oxm_field |hm| oxm_length |
55 * +----------------------------------+---------------+--+------------------+
56 *
57 * where hm stands for oxm_hasmask. It is followed by oxm_length bytes of
58 * payload. When oxm_hasmask is 0, the payload is the value of the field
59 * identified by the header; when oxm_hasmask is 1, the payload is a value for
60 * the field followed by a mask of equal length.
61 *
62 * Internally, we represent a standard OXM header as a 64-bit integer with the
63 * above information in the most-significant bits.
64 *
65 *
66 * Experimenter OXM
67 * ================
68 *
69 * The header is 64 bits long. It looks like the diagram above except that a
70 * 32-bit experimenter ID, which we call oxm_vendor and which identifies a
71 * vendor, is inserted just before the payload. Experimenter OXMs are
72 * identified by an all-1-bits oxm_class (OFPXMC12_EXPERIMENTER). The
73 * oxm_length value *includes* the experimenter ID, so that the real payload is
74 * only oxm_length - 4 bytes long.
75 *
76 * Internally, we represent an experimenter OXM header as a 64-bit integer with
77 * the standard header in the upper 32 bits and the experimenter ID in the
78 * lower 32 bits. (It would be more convenient to swap the positions of the
79 * two 32-bit words, but this would be more error-prone because experimenter
80 * OXMs are very rarely used, so accidentally passing one through a 32-bit type
81 * somewhere in the OVS code would be hard to find.)
82 */
83
84 /*
85 * OXM Class IDs.
86 * The high order bit differentiate reserved classes from member classes.
87 * Classes 0x0000 to 0x7FFF are member classes, allocated by ONF.
88 * Classes 0x8000 to 0xFFFE are reserved classes, reserved for standardisation.
89 */
90 enum ofp12_oxm_class {
91 OFPXMC12_NXM_0 = 0x0000, /* Backward compatibility with NXM */
92 OFPXMC12_NXM_1 = 0x0001, /* Backward compatibility with NXM */
93 OFPXMC12_OPENFLOW_BASIC = 0x8000, /* Basic class for OpenFlow */
94 OFPXMC15_PACKET_REGS = 0x8001, /* Packet registers (pipeline fields). */
95 OFPXMC12_EXPERIMENTER = 0xffff, /* Experimenter class */
96 };
97
98 /* Functions for extracting raw field values from OXM/NXM headers. */
99 static uint32_t nxm_vendor(uint64_t header) { return header; }
100 static int nxm_class(uint64_t header) { return header >> 48; }
101 static int nxm_field(uint64_t header) { return (header >> 41) & 0x7f; }
102 static bool nxm_hasmask(uint64_t header) { return (header >> 40) & 1; }
103 static int nxm_length(uint64_t header) { return (header >> 32) & 0xff; }
104 static uint64_t nxm_no_len(uint64_t header) { return header & 0xffffff80ffffffffULL; }
105
106 static bool
107 is_experimenter_oxm(uint64_t header)
108 {
109 return nxm_class(header) == OFPXMC12_EXPERIMENTER;
110 }
111
112 /* The OXM header "length" field is somewhat tricky:
113 *
114 * - For a standard OXM header, the length is the number of bytes of the
115 * payload, and the payload consists of just the value (and mask, if
116 * present).
117 *
118 * - For an experimenter OXM header, the length is the number of bytes in
119 * the payload plus 4 (the length of the experimenter ID). That is, the
120 * experimenter ID is included in oxm_length.
121 *
122 * This function returns the length of the experimenter ID field in 'header'.
123 * That is, for an experimenter OXM (when an experimenter ID is present), it
124 * returns 4, and for a standard OXM (when no experimenter ID is present), it
125 * returns 0. */
126 static int
127 nxm_experimenter_len(uint64_t header)
128 {
129 return is_experimenter_oxm(header) ? 4 : 0;
130 }
131
132 /* Returns the number of bytes that follow the header for an NXM/OXM entry
133 * with the given 'header'. */
134 static int
135 nxm_payload_len(uint64_t header)
136 {
137 return nxm_length(header) - nxm_experimenter_len(header);
138 }
139
140 /* Returns the number of bytes in the header for an NXM/OXM entry with the
141 * given 'header'. */
142 static int
143 nxm_header_len(uint64_t header)
144 {
145 return 4 + nxm_experimenter_len(header);
146 }
147
148 #define NXM_HEADER(VENDOR, CLASS, FIELD, HASMASK, LENGTH) \
149 (((uint64_t) (CLASS) << 48) | \
150 ((uint64_t) (FIELD) << 41) | \
151 ((uint64_t) (HASMASK) << 40) | \
152 ((uint64_t) (LENGTH) << 32) | \
153 (VENDOR))
154
155 #define NXM_HEADER_FMT "%#"PRIx32":%d:%d:%d:%d"
156 #define NXM_HEADER_ARGS(HEADER) \
157 nxm_vendor(HEADER), nxm_class(HEADER), nxm_field(HEADER), \
158 nxm_hasmask(HEADER), nxm_length(HEADER)
159
160 /* Functions for turning the "hasmask" bit on or off. (This also requires
161 * adjusting the length.) */
162 static uint64_t
163 nxm_make_exact_header(uint64_t header)
164 {
165 int new_len = nxm_payload_len(header) / 2 + nxm_experimenter_len(header);
166 return NXM_HEADER(nxm_vendor(header), nxm_class(header),
167 nxm_field(header), 0, new_len);
168 }
169 static uint64_t
170 nxm_make_wild_header(uint64_t header)
171 {
172 int new_len = nxm_payload_len(header) * 2 + nxm_experimenter_len(header);
173 return NXM_HEADER(nxm_vendor(header), nxm_class(header),
174 nxm_field(header), 1, new_len);
175 }
176
177 /* Flow cookie.
178 *
179 * This may be used to gain the OpenFlow 1.1-like ability to restrict
180 * certain NXM-based Flow Mod and Flow Stats Request messages to flows
181 * with specific cookies. See the "nx_flow_mod" and "nx_flow_stats_request"
182 * structure definitions for more details. This match is otherwise not
183 * allowed. */
184 #define NXM_NX_COOKIE NXM_HEADER (0, 0x0001, 30, 0, 8)
185 #define NXM_NX_COOKIE_W nxm_make_wild_header(NXM_NX_COOKIE)
186
187 struct nxm_field {
188 uint64_t header;
189 enum ofp_version version;
190 const char *name; /* e.g. "NXM_OF_IN_PORT". */
191
192 enum mf_field_id id;
193 };
194
195 static const struct nxm_field *nxm_field_by_header(uint64_t header);
196 static const struct nxm_field *nxm_field_by_name(const char *name, size_t len);
197 static const struct nxm_field *nxm_field_by_mf_id(enum mf_field_id,
198 enum ofp_version);
199
200 static void nx_put_header__(struct ofpbuf *, uint64_t header, bool masked);
201 static void nx_put_header_len(struct ofpbuf *, enum mf_field_id field,
202 enum ofp_version version, bool masked,
203 size_t n_bytes);
204
205 /* Rate limit for nx_match parse errors. These always indicate a bug in the
206 * peer and so there's not much point in showing a lot of them. */
207 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
208
209 static const struct nxm_field *
210 mf_parse_subfield_name(const char *name, int name_len, bool *wild);
211
212 /* Returns the preferred OXM header to use for field 'id' in OpenFlow version
213 * 'version'. Specify 0 for 'version' if an NXM legacy header should be
214 * preferred over any standardized OXM header. Returns 0 if field 'id' cannot
215 * be expressed in NXM or OXM. */
216 static uint64_t
217 mf_oxm_header(enum mf_field_id id, enum ofp_version version)
218 {
219 const struct nxm_field *f = nxm_field_by_mf_id(id, version);
220 return f ? f->header : 0;
221 }
222
223 /* Returns the 32-bit OXM or NXM header to use for field 'id', preferring an
224 * NXM legacy header over any standardized OXM header. Returns 0 if field 'id'
225 * cannot be expressed with a 32-bit NXM or OXM header.
226 *
227 * Whenever possible, use nx_pull_header() instead of this function, because
228 * this function cannot support 64-bit experimenter OXM headers. */
229 uint32_t
230 mf_nxm_header(enum mf_field_id id)
231 {
232 uint64_t oxm = mf_oxm_header(id, 0);
233 return is_experimenter_oxm(oxm) ? 0 : oxm >> 32;
234 }
235
236 /* Returns the 32-bit OXM or NXM header to use for field 'mff'. If 'mff' is
237 * a mapped variable length mf_field, update the header with the configured
238 * length of 'mff'. Returns 0 if 'mff' cannot be expressed with a 32-bit NXM
239 * or OXM header.*/
240 uint32_t
241 nxm_header_from_mff(const struct mf_field *mff)
242 {
243 uint64_t oxm = mf_oxm_header(mff->id, 0);
244
245 if (mff->mapped) {
246 oxm = nxm_no_len(oxm) | ((uint64_t) mff->n_bytes << 32);
247 }
248
249 return is_experimenter_oxm(oxm) ? 0 : oxm >> 32;
250 }
251
252 static const struct mf_field *
253 mf_from_oxm_header(uint64_t header, const struct vl_mff_map *vl_mff_map)
254 {
255 const struct nxm_field *f = nxm_field_by_header(header);
256
257 if (f) {
258 const struct mf_field *mff = mf_from_id(f->id);
259 const struct mf_field *vl_mff = mf_get_vl_mff(mff, vl_mff_map);
260 return vl_mff ? vl_mff : mff;
261 } else {
262 return NULL;
263 }
264 }
265
266 /* Returns the "struct mf_field" that corresponds to NXM or OXM header
267 * 'header', or NULL if 'header' doesn't correspond to any known field. */
268 const struct mf_field *
269 mf_from_nxm_header(uint32_t header, const struct vl_mff_map *vl_mff_map)
270 {
271 return mf_from_oxm_header((uint64_t) header << 32, vl_mff_map);
272 }
273
274 /* Returns the width of the data for a field with the given 'header', in
275 * bytes. */
276 static int
277 nxm_field_bytes(uint64_t header)
278 {
279 unsigned int length = nxm_payload_len(header);
280 return nxm_hasmask(header) ? length / 2 : length;
281 }
282 \f
283 /* nx_pull_match() and helpers. */
284
285 /* Given NXM/OXM value 'value' and mask 'mask' associated with 'header', checks
286 * for any 1-bit in the value where there is a 0-bit in the mask. Returns 0 if
287 * none, otherwise an error code. */
288 static bool
289 is_mask_consistent(uint64_t header, const uint8_t *value, const uint8_t *mask)
290 {
291 unsigned int width = nxm_field_bytes(header);
292 unsigned int i;
293
294 for (i = 0; i < width; i++) {
295 if (value[i] & ~mask[i]) {
296 if (!VLOG_DROP_WARN(&rl)) {
297 VLOG_WARN_RL(&rl, "Rejecting NXM/OXM entry "NXM_HEADER_FMT " "
298 "with 1-bits in value for bits wildcarded by the "
299 "mask.", NXM_HEADER_ARGS(header));
300 }
301 return false;
302 }
303 }
304 return true;
305 }
306
307 static bool
308 is_cookie_pseudoheader(uint64_t header)
309 {
310 return header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W;
311 }
312
313 static enum ofperr
314 nx_pull_header__(struct ofpbuf *b, bool allow_cookie,
315 const struct vl_mff_map *vl_mff_map, uint64_t *header,
316 const struct mf_field **field)
317 {
318 if (b->size < 4) {
319 goto bad_len;
320 }
321
322 *header = ((uint64_t) ntohl(get_unaligned_be32(b->data))) << 32;
323 if (is_experimenter_oxm(*header)) {
324 if (b->size < 8) {
325 goto bad_len;
326 }
327 *header = ntohll(get_unaligned_be64(b->data));
328 }
329 if (nxm_length(*header) < nxm_experimenter_len(*header)) {
330 VLOG_WARN_RL(&rl, "OXM header "NXM_HEADER_FMT" has invalid length %d "
331 "(minimum is %d)",
332 NXM_HEADER_ARGS(*header), nxm_length(*header),
333 nxm_header_len(*header));
334 goto error;
335 }
336 ofpbuf_pull(b, nxm_header_len(*header));
337
338 if (field) {
339 *field = mf_from_oxm_header(*header, vl_mff_map);
340 if (!*field && !(allow_cookie && is_cookie_pseudoheader(*header))) {
341 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" is unknown",
342 NXM_HEADER_ARGS(*header));
343 return OFPERR_OFPBMC_BAD_FIELD;
344 } else if (mf_vl_mff_invalid(*field, vl_mff_map)) {
345 return OFPERR_NXFMFC_INVALID_TLV_FIELD;
346 }
347 }
348
349 return 0;
350
351 bad_len:
352 VLOG_DBG_RL(&rl, "encountered partial (%"PRIu32"-byte) OXM entry",
353 b->size);
354 error:
355 *header = 0;
356 if (field) {
357 *field = NULL;
358 }
359 return OFPERR_OFPBMC_BAD_LEN;
360 }
361
362 static void
363 copy_entry_value(const struct mf_field *field, union mf_value *value,
364 const uint8_t *payload, int width)
365 {
366 int copy_len;
367 void *copy_dst;
368
369 copy_dst = value;
370 copy_len = MIN(width, field ? field->n_bytes : sizeof *value);
371
372 if (field && field->variable_len) {
373 memset(value, 0, field->n_bytes);
374 copy_dst = &value->u8 + field->n_bytes - copy_len;
375 }
376
377 memcpy(copy_dst, payload, copy_len);
378 }
379
380 static enum ofperr
381 nx_pull_entry__(struct ofpbuf *b, bool allow_cookie,
382 const struct vl_mff_map *vl_mff_map, uint64_t *header,
383 const struct mf_field **field_,
384 union mf_value *value, union mf_value *mask)
385 {
386 const struct mf_field *field;
387 enum ofperr header_error;
388 unsigned int payload_len;
389 const uint8_t *payload;
390 int width;
391
392 header_error = nx_pull_header__(b, allow_cookie, vl_mff_map, header,
393 &field);
394 if (header_error && header_error != OFPERR_OFPBMC_BAD_FIELD) {
395 return header_error;
396 }
397
398 payload_len = nxm_payload_len(*header);
399 payload = ofpbuf_try_pull(b, payload_len);
400 if (!payload) {
401 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" calls for %u-byte "
402 "payload but only %"PRIu32" bytes follow OXM header",
403 NXM_HEADER_ARGS(*header), payload_len, b->size);
404 return OFPERR_OFPBMC_BAD_LEN;
405 }
406
407 width = nxm_field_bytes(*header);
408 if (nxm_hasmask(*header)
409 && !is_mask_consistent(*header, payload, payload + width)) {
410 return OFPERR_OFPBMC_BAD_WILDCARDS;
411 }
412
413 copy_entry_value(field, value, payload, width);
414
415 if (mask) {
416 if (nxm_hasmask(*header)) {
417 copy_entry_value(field, mask, payload + width, width);
418 } else {
419 memset(mask, 0xff, sizeof *mask);
420 }
421 } else if (nxm_hasmask(*header)) {
422 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" includes mask but "
423 "masked OXMs are not allowed here",
424 NXM_HEADER_ARGS(*header));
425 return OFPERR_OFPBMC_BAD_MASK;
426 }
427
428 if (field_) {
429 *field_ = field;
430 return header_error;
431 }
432
433 return 0;
434 }
435
436 /* Attempts to pull an NXM or OXM header, value, and mask (if present) from the
437 * beginning of 'b'. If successful, stores a pointer to the "struct mf_field"
438 * corresponding to the pulled header in '*field', the value into '*value',
439 * and the mask into '*mask', and returns 0. On error, returns an OpenFlow
440 * error; in this case, some bytes might have been pulled off 'b' anyhow, and
441 * the output parameters might have been modified.
442 *
443 * If a NULL 'mask' is supplied, masked OXM or NXM entries are treated as
444 * errors (with OFPERR_OFPBMC_BAD_MASK).
445 */
446 enum ofperr
447 nx_pull_entry(struct ofpbuf *b, const struct vl_mff_map *vl_mff_map,
448 const struct mf_field **field, union mf_value *value,
449 union mf_value *mask)
450 {
451 uint64_t header;
452
453 return nx_pull_entry__(b, false, vl_mff_map, &header, field, value, mask);
454 }
455
456 /* Attempts to pull an NXM or OXM header from the beginning of 'b'. If
457 * successful, stores a pointer to the "struct mf_field" corresponding to the
458 * pulled header in '*field', stores the header's hasmask bit in '*masked'
459 * (true if hasmask=1, false if hasmask=0), and returns 0. On error, returns
460 * an OpenFlow error; in this case, some bytes might have been pulled off 'b'
461 * anyhow, and the output parameters might have been modified.
462 *
463 * If NULL 'masked' is supplied, masked OXM or NXM headers are treated as
464 * errors (with OFPERR_OFPBMC_BAD_MASK).
465 */
466 enum ofperr
467 nx_pull_header(struct ofpbuf *b, const struct vl_mff_map *vl_mff_map,
468 const struct mf_field **field, bool *masked)
469 {
470 enum ofperr error;
471 uint64_t header;
472
473 error = nx_pull_header__(b, false, vl_mff_map, &header, field);
474 if (masked) {
475 *masked = !error && nxm_hasmask(header);
476 } else if (!error && nxm_hasmask(header)) {
477 error = OFPERR_OFPBMC_BAD_MASK;
478 }
479 return error;
480 }
481
482 static enum ofperr
483 nx_pull_match_entry(struct ofpbuf *b, bool allow_cookie,
484 const struct vl_mff_map *vl_mff_map,
485 const struct mf_field **field,
486 union mf_value *value, union mf_value *mask)
487 {
488 enum ofperr error;
489 uint64_t header;
490
491 error = nx_pull_entry__(b, allow_cookie, vl_mff_map, &header, field, value,
492 mask);
493 if (error) {
494 return error;
495 }
496 if (field && *field) {
497 if (!mf_is_mask_valid(*field, mask)) {
498 VLOG_DBG_RL(&rl, "bad mask for field %s", (*field)->name);
499 return OFPERR_OFPBMC_BAD_MASK;
500 }
501 if (!mf_is_value_valid(*field, value)) {
502 VLOG_DBG_RL(&rl, "bad value for field %s", (*field)->name);
503 return OFPERR_OFPBMC_BAD_VALUE;
504 }
505 }
506 return 0;
507 }
508
509 /* Prerequisites will only be checked when 'strict' is 'true'. This allows
510 * decoding conntrack original direction 5-tuple IP addresses without the
511 * ethertype being present, when decoding metadata only. */
512 static enum ofperr
513 nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
514 bool pipeline_fields_only, struct match *match, ovs_be64 *cookie,
515 ovs_be64 *cookie_mask, const struct tun_table *tun_table,
516 const struct vl_mff_map *vl_mff_map)
517 {
518 ovs_assert((cookie != NULL) == (cookie_mask != NULL));
519
520 match_init_catchall(match);
521 match->flow.tunnel.metadata.tab = tun_table;
522 if (cookie) {
523 *cookie = *cookie_mask = htonll(0);
524 }
525
526 struct ofpbuf b = ofpbuf_const_initializer(p, match_len);
527 while (b.size) {
528 const uint8_t *pos = b.data;
529 const struct mf_field *field;
530 union mf_value value;
531 union mf_value mask;
532 enum ofperr error;
533
534 error = nx_pull_match_entry(&b, cookie != NULL, vl_mff_map, &field,
535 &value, &mask);
536 if (error) {
537 if (error == OFPERR_OFPBMC_BAD_FIELD && !strict) {
538 continue;
539 }
540 } else if (!field) {
541 if (!cookie) {
542 error = OFPERR_OFPBMC_BAD_FIELD;
543 } else if (*cookie_mask) {
544 error = OFPERR_OFPBMC_DUP_FIELD;
545 } else {
546 *cookie = value.be64;
547 *cookie_mask = mask.be64;
548 }
549 } else if (strict && !mf_are_match_prereqs_ok(field, match)) {
550 error = OFPERR_OFPBMC_BAD_PREREQ;
551 } else if (!mf_is_all_wild(field, &match->wc)) {
552 error = OFPERR_OFPBMC_DUP_FIELD;
553 } else if (pipeline_fields_only && !mf_is_pipeline_field(field)) {
554 error = OFPERR_OFPBRC_PIPELINE_FIELDS_ONLY;
555 } else {
556 char *err_str;
557
558 mf_set(field, &value, &mask, match, &err_str);
559 if (err_str) {
560 VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" "
561 "within match (%s)", pos - p, err_str);
562 free(err_str);
563 return OFPERR_OFPBMC_BAD_VALUE;
564 }
565
566 match_add_ethernet_prereq(match, field);
567 }
568
569 if (error) {
570 VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" "
571 "within match (%s)", pos -
572 p, ofperr_to_string(error));
573 return error;
574 }
575 }
576
577 match->flow.tunnel.metadata.tab = NULL;
578 return 0;
579 }
580
581 static enum ofperr
582 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
583 bool pipeline_fields_only, struct match *match,
584 ovs_be64 *cookie, ovs_be64 *cookie_mask,
585 const struct tun_table *tun_table,
586 const struct vl_mff_map *vl_mff_map)
587 {
588 uint8_t *p = NULL;
589
590 if (match_len) {
591 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
592 if (!p) {
593 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
594 "multiple of 8, is longer than space in message (max "
595 "length %"PRIu32")", match_len, b->size);
596 return OFPERR_OFPBMC_BAD_LEN;
597 }
598 }
599
600 return nx_pull_raw(p, match_len, strict, pipeline_fields_only, match,
601 cookie, cookie_mask, tun_table, vl_mff_map);
602 }
603
604 /* Parses the nx_match formatted match description in 'b' with length
605 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
606 * are valid pointers, then stores the cookie and mask in them if 'b' contains
607 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
608 * If 'pipeline_fields_only' is true, this function returns
609 * OFPERR_OFPBRC_PIPELINE_FIELDS_ONLY if there is any non pipeline fields
610 * in 'b'.
611 *
612 * 'vl_mff_map" is an optional parameter that is used to validate the length
613 * of variable length mf_fields in 'match'. If it is not provided, the
614 * default mf_fields with maximum length will be used.
615 *
616 * Fails with an error upon encountering an unknown NXM header.
617 *
618 * Returns 0 if successful, otherwise an OpenFlow error code. */
619 enum ofperr
620 nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
621 ovs_be64 *cookie, ovs_be64 *cookie_mask,
622 bool pipeline_fields_only, const struct tun_table *tun_table,
623 const struct vl_mff_map *vl_mff_map)
624 {
625 return nx_pull_match__(b, match_len, true, pipeline_fields_only, match,
626 cookie, cookie_mask, tun_table, vl_mff_map);
627 }
628
629 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
630 * instead of failing with an error, and does not check for field
631 * prerequisites. */
632 enum ofperr
633 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
634 struct match *match, ovs_be64 *cookie,
635 ovs_be64 *cookie_mask, bool pipeline_fields_only,
636 const struct tun_table *tun_table)
637 {
638 return nx_pull_match__(b, match_len, false, pipeline_fields_only, match,
639 cookie, cookie_mask, tun_table, NULL);
640 }
641
642 static enum ofperr
643 oxm_pull_match__(struct ofpbuf *b, bool strict, bool pipeline_fields_only,
644 const struct tun_table *tun_table,
645 const struct vl_mff_map *vl_mff_map, struct match *match)
646 {
647 struct ofp11_match_header *omh = b->data;
648 uint8_t *p;
649 uint16_t match_len;
650
651 if (b->size < sizeof *omh) {
652 return OFPERR_OFPBMC_BAD_LEN;
653 }
654
655 match_len = ntohs(omh->length);
656 if (match_len < sizeof *omh) {
657 return OFPERR_OFPBMC_BAD_LEN;
658 }
659
660 if (omh->type != htons(OFPMT_OXM)) {
661 return OFPERR_OFPBMC_BAD_TYPE;
662 }
663
664 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
665 if (!p) {
666 VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
667 "multiple of 8, is longer than space in message (max "
668 "length %"PRIu32")", match_len, b->size);
669 return OFPERR_OFPBMC_BAD_LEN;
670 }
671
672 return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
673 strict, pipeline_fields_only, match, NULL, NULL,
674 tun_table, vl_mff_map);
675 }
676
677 /* Parses the oxm formatted match description preceded by a struct
678 * ofp11_match_header in 'b'. Stores the result in 'match'.
679 * If 'pipeline_fields_only' is true, this function returns
680 * OFPERR_OFPBRC_PIPELINE_FIELDS_ONLY if there is any non pipeline fields
681 * in 'b'.
682 *
683 * 'vl_mff_map' is an optional parameter that is used to validate the length
684 * of variable length mf_fields in 'match'. If it is not provided, the
685 * default mf_fields with maximum length will be used.
686 *
687 * Fails with an error when encountering unknown OXM headers.
688 *
689 * Returns 0 if successful, otherwise an OpenFlow error code. */
690 enum ofperr
691 oxm_pull_match(struct ofpbuf *b, bool pipeline_fields_only,
692 const struct tun_table *tun_table,
693 const struct vl_mff_map *vl_mff_map, struct match *match)
694 {
695 return oxm_pull_match__(b, true, pipeline_fields_only, tun_table,
696 vl_mff_map, match);
697 }
698
699 /* Behaves the same as oxm_pull_match() with two exceptions. Skips over
700 * unknown OXM headers instead of failing with an error when they are
701 * encountered, and does not check for field prerequisites. */
702 enum ofperr
703 oxm_pull_match_loose(struct ofpbuf *b, bool pipeline_fields_only,
704 const struct tun_table *tun_table, struct match *match)
705 {
706 return oxm_pull_match__(b, false, pipeline_fields_only, tun_table, NULL,
707 match);
708 }
709
710 /* Parses the OXM match description in the 'oxm_len' bytes in 'oxm'. Stores
711 * the result in 'match'.
712 *
713 * Returns 0 if successful, otherwise an OpenFlow error code.
714 *
715 * If 'loose' is true, encountering unknown OXM headers or missing field
716 * prerequisites are not considered as error conditions.
717 */
718 enum ofperr
719 oxm_decode_match(const void *oxm, size_t oxm_len, bool loose,
720 const struct tun_table *tun_table,
721 const struct vl_mff_map *vl_mff_map, struct match *match)
722 {
723 return nx_pull_raw(oxm, oxm_len, !loose, false, match, NULL, NULL,
724 tun_table, vl_mff_map);
725 }
726
727 /* Verify an array of OXM TLVs treating value of each TLV as a mask,
728 * disallowing masks in each TLV and ignoring pre-requisites. */
729 enum ofperr
730 oxm_pull_field_array(const void *fields_data, size_t fields_len,
731 struct field_array *fa)
732 {
733 struct ofpbuf b = ofpbuf_const_initializer(fields_data, fields_len);
734 while (b.size) {
735 const uint8_t *pos = b.data;
736 const struct mf_field *field;
737 union mf_value value;
738 enum ofperr error;
739 uint64_t header;
740
741 error = nx_pull_entry__(&b, false, NULL, &header, &field, &value,
742 NULL);
743 if (error) {
744 VLOG_DBG_RL(&rl, "error pulling field array field");
745 return error;
746 } else if (!field) {
747 VLOG_DBG_RL(&rl, "unknown field array field");
748 error = OFPERR_OFPBMC_BAD_FIELD;
749 } else if (bitmap_is_set(fa->used.bm, field->id)) {
750 VLOG_DBG_RL(&rl, "duplicate field array field '%s'", field->name);
751 error = OFPERR_OFPBMC_DUP_FIELD;
752 } else if (!mf_is_mask_valid(field, &value)) {
753 VLOG_DBG_RL(&rl, "bad mask in field array field '%s'", field->name);
754 return OFPERR_OFPBMC_BAD_MASK;
755 } else {
756 field_array_set(field->id, &value, fa);
757 }
758
759 if (error) {
760 const uint8_t *start = fields_data;
761
762 VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" "
763 "within field array (%s)", pos - start,
764 ofperr_to_string(error));
765 return error;
766 }
767 }
768
769 return 0;
770 }
771 \f
772 /* nx_put_match() and helpers.
773 *
774 * 'put' functions whose names end in 'w' add a wildcarded field.
775 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
776 * Other 'put' functions add exact-match fields.
777 */
778
779 struct nxm_put_ctx {
780 struct ofpbuf *output;
781 bool implied_ethernet;
782 };
783
784 void
785 nxm_put_entry_raw(struct ofpbuf *b,
786 enum mf_field_id field, enum ofp_version version,
787 const void *value, const void *mask, size_t n_bytes)
788 {
789 nx_put_header_len(b, field, version, !!mask, n_bytes);
790 ofpbuf_put(b, value, n_bytes);
791 if (mask) {
792 ofpbuf_put(b, mask, n_bytes);
793 }
794 }
795
796 static void
797 nxm_put__(struct nxm_put_ctx *ctx,
798 enum mf_field_id field, enum ofp_version version,
799 const void *value, const void *mask, size_t n_bytes)
800 {
801 nxm_put_entry_raw(ctx->output, field, version, value, mask, n_bytes);
802 if (!ctx->implied_ethernet && mf_from_id(field)->prereqs != MFP_NONE) {
803 ctx->implied_ethernet = true;
804 }
805 }
806
807 static void
808 nxm_put(struct nxm_put_ctx *ctx,
809 enum mf_field_id field, enum ofp_version version,
810 const void *value, const void *mask, size_t n_bytes)
811 {
812 if (!is_all_zeros(mask, n_bytes)) {
813 bool masked = !is_all_ones(mask, n_bytes);
814 nxm_put__(ctx, field, version, value, masked ? mask : NULL, n_bytes);
815 }
816 }
817
818 static void
819 nxm_put_8m(struct nxm_put_ctx *ctx,
820 enum mf_field_id field, enum ofp_version version,
821 uint8_t value, uint8_t mask)
822 {
823 nxm_put(ctx, field, version, &value, &mask, sizeof value);
824 }
825
826 static void
827 nxm_put_8(struct nxm_put_ctx *ctx,
828 enum mf_field_id field, enum ofp_version version, uint8_t value)
829 {
830 nxm_put__(ctx, field, version, &value, NULL, sizeof value);
831 }
832
833 static void
834 nxm_put_16m(struct nxm_put_ctx *ctx,
835 enum mf_field_id field, enum ofp_version version,
836 ovs_be16 value, ovs_be16 mask)
837 {
838 nxm_put(ctx, field, version, &value, &mask, sizeof value);
839 }
840
841 static void
842 nxm_put_16(struct nxm_put_ctx *ctx,
843 enum mf_field_id field, enum ofp_version version, ovs_be16 value)
844 {
845 nxm_put__(ctx, field, version, &value, NULL, sizeof value);
846 }
847
848 static void
849 nxm_put_32m(struct nxm_put_ctx *ctx,
850 enum mf_field_id field, enum ofp_version version,
851 ovs_be32 value, ovs_be32 mask)
852 {
853 nxm_put(ctx, field, version, &value, &mask, sizeof value);
854 }
855
856 static void
857 nxm_put_32(struct nxm_put_ctx *ctx,
858 enum mf_field_id field, enum ofp_version version, ovs_be32 value)
859 {
860 nxm_put__(ctx, field, version, &value, NULL, sizeof value);
861 }
862
863 static void
864 nxm_put_64m(struct nxm_put_ctx *ctx,
865 enum mf_field_id field, enum ofp_version version,
866 ovs_be64 value, ovs_be64 mask)
867 {
868 nxm_put(ctx, field, version, &value, &mask, sizeof value);
869 }
870
871 static void
872 nxm_put_128m(struct nxm_put_ctx *ctx,
873 enum mf_field_id field, enum ofp_version version,
874 const ovs_be128 value, const ovs_be128 mask)
875 {
876 nxm_put(ctx, field, version, &value, &mask, sizeof(value));
877 }
878
879 static void
880 nxm_put_eth_masked(struct nxm_put_ctx *ctx,
881 enum mf_field_id field, enum ofp_version version,
882 const struct eth_addr value, const struct eth_addr mask)
883 {
884 nxm_put(ctx, field, version, value.ea, mask.ea, ETH_ADDR_LEN);
885 }
886
887 static void
888 nxm_put_ipv6(struct nxm_put_ctx *ctx,
889 enum mf_field_id field, enum ofp_version version,
890 const struct in6_addr *value, const struct in6_addr *mask)
891 {
892 nxm_put(ctx, field, version, value->s6_addr, mask->s6_addr,
893 sizeof value->s6_addr);
894 }
895
896 static void
897 nxm_put_frag(struct nxm_put_ctx *ctx, const struct match *match,
898 enum ofp_version version)
899 {
900 uint8_t nw_frag = match->flow.nw_frag & FLOW_NW_FRAG_MASK;
901 uint8_t nw_frag_mask = match->wc.masks.nw_frag & FLOW_NW_FRAG_MASK;
902
903 nxm_put_8m(ctx, MFF_IP_FRAG, version, nw_frag,
904 nw_frag_mask == FLOW_NW_FRAG_MASK ? UINT8_MAX : nw_frag_mask);
905 }
906
907 /* Appends to 'b' a set of OXM or NXM matches for the IPv4 or IPv6 fields in
908 * 'match'. */
909 static void
910 nxm_put_ip(struct nxm_put_ctx *ctx,
911 const struct match *match, enum ofp_version oxm)
912 {
913 const struct flow *flow = &match->flow;
914 ovs_be16 dl_type = get_dl_type(flow);
915
916 if (dl_type == htons(ETH_TYPE_IP)) {
917 nxm_put_32m(ctx, MFF_IPV4_SRC, oxm,
918 flow->nw_src, match->wc.masks.nw_src);
919 nxm_put_32m(ctx, MFF_IPV4_DST, oxm,
920 flow->nw_dst, match->wc.masks.nw_dst);
921 } else {
922 nxm_put_ipv6(ctx, MFF_IPV6_SRC, oxm,
923 &flow->ipv6_src, &match->wc.masks.ipv6_src);
924 nxm_put_ipv6(ctx, MFF_IPV6_DST, oxm,
925 &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
926 }
927
928 nxm_put_frag(ctx, match, oxm);
929
930 if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
931 if (oxm) {
932 nxm_put_8(ctx, MFF_IP_DSCP_SHIFTED, oxm,
933 flow->nw_tos >> 2);
934 } else {
935 nxm_put_8(ctx, MFF_IP_DSCP, oxm,
936 flow->nw_tos & IP_DSCP_MASK);
937 }
938 }
939
940 if (match->wc.masks.nw_tos & IP_ECN_MASK) {
941 nxm_put_8(ctx, MFF_IP_ECN, oxm,
942 flow->nw_tos & IP_ECN_MASK);
943 }
944
945 if (match->wc.masks.nw_ttl) {
946 nxm_put_8(ctx, MFF_IP_TTL, oxm, flow->nw_ttl);
947 }
948
949 nxm_put_32m(ctx, MFF_IPV6_LABEL, oxm,
950 flow->ipv6_label, match->wc.masks.ipv6_label);
951
952 if (match->wc.masks.nw_proto) {
953 nxm_put_8(ctx, MFF_IP_PROTO, oxm, flow->nw_proto);
954
955 if (flow->nw_proto == IPPROTO_TCP) {
956 nxm_put_16m(ctx, MFF_TCP_SRC, oxm,
957 flow->tp_src, match->wc.masks.tp_src);
958 nxm_put_16m(ctx, MFF_TCP_DST, oxm,
959 flow->tp_dst, match->wc.masks.tp_dst);
960 nxm_put_16m(ctx, MFF_TCP_FLAGS, oxm,
961 flow->tcp_flags, match->wc.masks.tcp_flags);
962 } else if (flow->nw_proto == IPPROTO_UDP) {
963 nxm_put_16m(ctx, MFF_UDP_SRC, oxm,
964 flow->tp_src, match->wc.masks.tp_src);
965 nxm_put_16m(ctx, MFF_UDP_DST, oxm,
966 flow->tp_dst, match->wc.masks.tp_dst);
967 } else if (flow->nw_proto == IPPROTO_SCTP) {
968 nxm_put_16m(ctx, MFF_SCTP_SRC, oxm, flow->tp_src,
969 match->wc.masks.tp_src);
970 nxm_put_16m(ctx, MFF_SCTP_DST, oxm, flow->tp_dst,
971 match->wc.masks.tp_dst);
972 } else if (is_icmpv4(flow, NULL)) {
973 if (match->wc.masks.tp_src) {
974 nxm_put_8(ctx, MFF_ICMPV4_TYPE, oxm,
975 ntohs(flow->tp_src));
976 }
977 if (match->wc.masks.tp_dst) {
978 nxm_put_8(ctx, MFF_ICMPV4_CODE, oxm,
979 ntohs(flow->tp_dst));
980 }
981 } else if (is_icmpv6(flow, NULL)) {
982 if (match->wc.masks.tp_src) {
983 nxm_put_8(ctx, MFF_ICMPV6_TYPE, oxm,
984 ntohs(flow->tp_src));
985 }
986 if (match->wc.masks.tp_dst) {
987 nxm_put_8(ctx, MFF_ICMPV6_CODE, oxm,
988 ntohs(flow->tp_dst));
989 }
990 if (is_nd(flow, NULL)) {
991 nxm_put_ipv6(ctx, MFF_ND_TARGET, oxm,
992 &flow->nd_target, &match->wc.masks.nd_target);
993 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
994 nxm_put_eth_masked(ctx, MFF_ND_SLL, oxm,
995 flow->arp_sha, match->wc.masks.arp_sha);
996 }
997 if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
998 nxm_put_eth_masked(ctx, MFF_ND_TLL, oxm,
999 flow->arp_tha, match->wc.masks.arp_tha);
1000 }
1001 }
1002 }
1003 }
1004 }
1005
1006 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
1007 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
1008 * Otherwise, 'cookie_mask' should be zero.
1009 *
1010 * Specify 'oxm' as 0 to express the match in NXM format; otherwise, specify
1011 * 'oxm' as the OpenFlow version number for the OXM format to use.
1012 *
1013 * This function can cause 'b''s data to be reallocated.
1014 *
1015 * Returns the number of bytes appended to 'b', excluding padding.
1016 *
1017 * If 'match' is a catch-all rule that matches every packet, then this function
1018 * appends nothing to 'b' and returns 0. */
1019 static int
1020 nx_put_raw(struct ofpbuf *b, enum ofp_version oxm, const struct match *match,
1021 ovs_be64 cookie, ovs_be64 cookie_mask)
1022 {
1023 const struct flow *flow = &match->flow;
1024 const size_t start_len = b->size;
1025 ovs_be16 dl_type = get_dl_type(flow);
1026 ovs_be32 spi_mask;
1027 int match_len;
1028
1029 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 41);
1030
1031 struct nxm_put_ctx ctx = { .output = b, .implied_ethernet = false };
1032
1033 /* OpenFlow Packet Type. Must be first. */
1034 if (match->wc.masks.packet_type && !match_has_default_packet_type(match)) {
1035 nxm_put_32m(&ctx, MFF_PACKET_TYPE, oxm, flow->packet_type,
1036 match->wc.masks.packet_type);
1037 }
1038
1039 /* Metadata. */
1040 if (match->wc.masks.dp_hash) {
1041 nxm_put_32m(&ctx, MFF_DP_HASH, oxm,
1042 htonl(flow->dp_hash), htonl(match->wc.masks.dp_hash));
1043 }
1044
1045 if (match->wc.masks.recirc_id) {
1046 nxm_put_32(&ctx, MFF_RECIRC_ID, oxm, htonl(flow->recirc_id));
1047 }
1048
1049 if (match->wc.masks.conj_id) {
1050 nxm_put_32(&ctx, MFF_CONJ_ID, oxm, htonl(flow->conj_id));
1051 }
1052
1053 if (match->wc.masks.in_port.ofp_port) {
1054 ofp_port_t in_port = flow->in_port.ofp_port;
1055 if (oxm) {
1056 nxm_put_32(&ctx, MFF_IN_PORT_OXM, oxm,
1057 ofputil_port_to_ofp11(in_port));
1058 } else {
1059 nxm_put_16(&ctx, MFF_IN_PORT, oxm,
1060 htons(ofp_to_u16(in_port)));
1061 }
1062 }
1063 if (match->wc.masks.actset_output) {
1064 nxm_put_32(&ctx, MFF_ACTSET_OUTPUT, oxm,
1065 ofputil_port_to_ofp11(flow->actset_output));
1066 }
1067
1068 /* Ethernet. */
1069 nxm_put_eth_masked(&ctx, MFF_ETH_SRC, oxm,
1070 flow->dl_src, match->wc.masks.dl_src);
1071 nxm_put_eth_masked(&ctx, MFF_ETH_DST, oxm,
1072 flow->dl_dst, match->wc.masks.dl_dst);
1073 nxm_put_16m(&ctx, MFF_ETH_TYPE, oxm,
1074 ofputil_dl_type_to_openflow(flow->dl_type),
1075 match->wc.masks.dl_type);
1076
1077 /* 802.1Q. */
1078 if (oxm) {
1079 ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
1080 ovs_be16 vid = flow->vlans[0].tci & VID_CFI_MASK;
1081 ovs_be16 mask = match->wc.masks.vlans[0].tci & VID_CFI_MASK;
1082
1083 if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
1084 nxm_put_16(&ctx, MFF_VLAN_VID, oxm, vid);
1085 } else if (mask) {
1086 nxm_put_16m(&ctx, MFF_VLAN_VID, oxm, vid, mask);
1087 }
1088
1089 if (vid && vlan_tci_to_pcp(match->wc.masks.vlans[0].tci)) {
1090 nxm_put_8(&ctx, MFF_VLAN_PCP, oxm,
1091 vlan_tci_to_pcp(flow->vlans[0].tci));
1092 }
1093
1094 } else {
1095 nxm_put_16m(&ctx, MFF_VLAN_TCI, oxm, flow->vlans[0].tci,
1096 match->wc.masks.vlans[0].tci);
1097 }
1098
1099 /* MPLS. */
1100 if (eth_type_mpls(dl_type)) {
1101 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) {
1102 nxm_put_8(&ctx, MFF_MPLS_TC, oxm,
1103 mpls_lse_to_tc(flow->mpls_lse[0]));
1104 }
1105
1106 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) {
1107 nxm_put_8(&ctx, MFF_MPLS_BOS, oxm,
1108 mpls_lse_to_bos(flow->mpls_lse[0]));
1109 }
1110
1111 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) {
1112 nxm_put_32(&ctx, MFF_MPLS_LABEL, oxm,
1113 htonl(mpls_lse_to_label(flow->mpls_lse[0])));
1114 }
1115 }
1116
1117 /* L3. */
1118 if (is_ip_any(flow)) {
1119 nxm_put_ip(&ctx, match, oxm);
1120 } else if (dl_type == htons(ETH_TYPE_ARP) ||
1121 dl_type == htons(ETH_TYPE_RARP)) {
1122 /* ARP. */
1123 if (match->wc.masks.nw_proto) {
1124 nxm_put_16(&ctx, MFF_ARP_OP, oxm,
1125 htons(flow->nw_proto));
1126 }
1127 nxm_put_32m(&ctx, MFF_ARP_SPA, oxm,
1128 flow->nw_src, match->wc.masks.nw_src);
1129 nxm_put_32m(&ctx, MFF_ARP_TPA, oxm,
1130 flow->nw_dst, match->wc.masks.nw_dst);
1131 nxm_put_eth_masked(&ctx, MFF_ARP_SHA, oxm,
1132 flow->arp_sha, match->wc.masks.arp_sha);
1133 nxm_put_eth_masked(&ctx, MFF_ARP_THA, oxm,
1134 flow->arp_tha, match->wc.masks.arp_tha);
1135 }
1136
1137 /* Tunnel ID. */
1138 nxm_put_64m(&ctx, MFF_TUN_ID, oxm,
1139 flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
1140
1141 /* Other tunnel metadata. */
1142 nxm_put_16m(&ctx, MFF_TUN_FLAGS, oxm,
1143 htons(flow->tunnel.flags), htons(match->wc.masks.tunnel.flags));
1144 nxm_put_32m(&ctx, MFF_TUN_SRC, oxm,
1145 flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
1146 nxm_put_32m(&ctx, MFF_TUN_DST, oxm,
1147 flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
1148 nxm_put_ipv6(&ctx, MFF_TUN_IPV6_SRC, oxm,
1149 &flow->tunnel.ipv6_src, &match->wc.masks.tunnel.ipv6_src);
1150 nxm_put_ipv6(&ctx, MFF_TUN_IPV6_DST, oxm,
1151 &flow->tunnel.ipv6_dst, &match->wc.masks.tunnel.ipv6_dst);
1152 nxm_put_16m(&ctx, MFF_TUN_GBP_ID, oxm,
1153 flow->tunnel.gbp_id, match->wc.masks.tunnel.gbp_id);
1154 nxm_put_8m(&ctx, MFF_TUN_GBP_FLAGS, oxm,
1155 flow->tunnel.gbp_flags, match->wc.masks.tunnel.gbp_flags);
1156 tun_metadata_to_nx_match(b, oxm, match);
1157
1158 /* ERSPAN */
1159 nxm_put_32m(&ctx, MFF_TUN_ERSPAN_IDX, oxm,
1160 htonl(flow->tunnel.erspan_idx),
1161 htonl(match->wc.masks.tunnel.erspan_idx));
1162 nxm_put_8m(&ctx, MFF_TUN_ERSPAN_VER, oxm,
1163 flow->tunnel.erspan_ver, match->wc.masks.tunnel.erspan_ver);
1164 nxm_put_8m(&ctx, MFF_TUN_ERSPAN_DIR, oxm,
1165 flow->tunnel.erspan_dir, match->wc.masks.tunnel.erspan_dir);
1166 nxm_put_8m(&ctx, MFF_TUN_ERSPAN_HWID, oxm,
1167 flow->tunnel.erspan_hwid, match->wc.masks.tunnel.erspan_hwid);
1168
1169 /* Network Service Header */
1170 nxm_put_8m(&ctx, MFF_NSH_FLAGS, oxm, flow->nsh.flags,
1171 match->wc.masks.nsh.flags);
1172 nxm_put_8m(&ctx, MFF_NSH_TTL, oxm, flow->nsh.ttl,
1173 match->wc.masks.nsh.ttl);
1174 nxm_put_8m(&ctx, MFF_NSH_MDTYPE, oxm, flow->nsh.mdtype,
1175 match->wc.masks.nsh.mdtype);
1176 nxm_put_8m(&ctx, MFF_NSH_NP, oxm, flow->nsh.np,
1177 match->wc.masks.nsh.np);
1178 spi_mask = nsh_path_hdr_to_spi(match->wc.masks.nsh.path_hdr);
1179 if (spi_mask == htonl(NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
1180 spi_mask = OVS_BE32_MAX;
1181 }
1182 nxm_put_32m(&ctx, MFF_NSH_SPI, oxm,
1183 nsh_path_hdr_to_spi(flow->nsh.path_hdr),
1184 spi_mask);
1185 nxm_put_8m(&ctx, MFF_NSH_SI, oxm,
1186 nsh_path_hdr_to_si(flow->nsh.path_hdr),
1187 nsh_path_hdr_to_si(match->wc.masks.nsh.path_hdr));
1188 for (int i = 0; i < 4; i++) {
1189 nxm_put_32m(&ctx, MFF_NSH_C1 + i, oxm, flow->nsh.context[i],
1190 match->wc.masks.nsh.context[i]);
1191 }
1192
1193 /* Registers. */
1194 if (oxm < OFP15_VERSION) {
1195 for (int i = 0; i < FLOW_N_REGS; i++) {
1196 nxm_put_32m(&ctx, MFF_REG0 + i, oxm,
1197 htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
1198 }
1199 } else {
1200 for (int i = 0; i < FLOW_N_XREGS; i++) {
1201 nxm_put_64m(&ctx, MFF_XREG0 + i, oxm,
1202 htonll(flow_get_xreg(flow, i)),
1203 htonll(flow_get_xreg(&match->wc.masks, i)));
1204 }
1205 }
1206
1207 /* Packet mark. */
1208 nxm_put_32m(&ctx, MFF_PKT_MARK, oxm, htonl(flow->pkt_mark),
1209 htonl(match->wc.masks.pkt_mark));
1210
1211 /* Connection tracking. */
1212 nxm_put_32m(&ctx, MFF_CT_STATE, oxm, htonl(flow->ct_state),
1213 htonl(match->wc.masks.ct_state));
1214 nxm_put_16m(&ctx, MFF_CT_ZONE, oxm, htons(flow->ct_zone),
1215 htons(match->wc.masks.ct_zone));
1216 nxm_put_32m(&ctx, MFF_CT_MARK, oxm, htonl(flow->ct_mark),
1217 htonl(match->wc.masks.ct_mark));
1218 nxm_put_128m(&ctx, MFF_CT_LABEL, oxm, hton128(flow->ct_label),
1219 hton128(match->wc.masks.ct_label));
1220 nxm_put_32m(&ctx, MFF_CT_NW_SRC, oxm,
1221 flow->ct_nw_src, match->wc.masks.ct_nw_src);
1222 nxm_put_ipv6(&ctx, MFF_CT_IPV6_SRC, oxm,
1223 &flow->ct_ipv6_src, &match->wc.masks.ct_ipv6_src);
1224 nxm_put_32m(&ctx, MFF_CT_NW_DST, oxm,
1225 flow->ct_nw_dst, match->wc.masks.ct_nw_dst);
1226 nxm_put_ipv6(&ctx, MFF_CT_IPV6_DST, oxm,
1227 &flow->ct_ipv6_dst, &match->wc.masks.ct_ipv6_dst);
1228 if (flow->ct_nw_proto) {
1229 nxm_put_8m(&ctx, MFF_CT_NW_PROTO, oxm, flow->ct_nw_proto,
1230 match->wc.masks.ct_nw_proto);
1231 nxm_put_16m(&ctx, MFF_CT_TP_SRC, oxm,
1232 flow->ct_tp_src, match->wc.masks.ct_tp_src);
1233 nxm_put_16m(&ctx, MFF_CT_TP_DST, oxm,
1234 flow->ct_tp_dst, match->wc.masks.ct_tp_dst);
1235 }
1236 /* OpenFlow 1.1+ Metadata. */
1237 nxm_put_64m(&ctx, MFF_METADATA, oxm,
1238 flow->metadata, match->wc.masks.metadata);
1239
1240 /* Cookie. */
1241 if (cookie_mask) {
1242 bool masked = cookie_mask != OVS_BE64_MAX;
1243
1244 cookie &= cookie_mask;
1245 nx_put_header__(b, NXM_NX_COOKIE, masked);
1246 ofpbuf_put(b, &cookie, sizeof cookie);
1247 if (masked) {
1248 ofpbuf_put(b, &cookie_mask, sizeof cookie_mask);
1249 }
1250 }
1251
1252 if (match_has_default_packet_type(match) && !ctx.implied_ethernet) {
1253 uint64_t pt_stub[16 / 8];
1254 struct ofpbuf pt;
1255 ofpbuf_use_stack(&pt, pt_stub, sizeof pt_stub);
1256 nxm_put_entry_raw(&pt, MFF_PACKET_TYPE, oxm, &flow->packet_type,
1257 NULL, sizeof flow->packet_type);
1258
1259 ofpbuf_insert(b, start_len, pt.data, pt.size);
1260 }
1261
1262 match_len = b->size - start_len;
1263 return match_len;
1264 }
1265
1266 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
1267 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
1268 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
1269 * Otherwise, 'cookie_mask' should be zero.
1270 *
1271 * This function can cause 'b''s data to be reallocated.
1272 *
1273 * Returns the number of bytes appended to 'b', excluding padding. The return
1274 * value can be zero if it appended nothing at all to 'b' (which happens if
1275 * 'cr' is a catch-all rule that matches every packet). */
1276 int
1277 nx_put_match(struct ofpbuf *b, const struct match *match,
1278 ovs_be64 cookie, ovs_be64 cookie_mask)
1279 {
1280 int match_len = nx_put_raw(b, 0, match, cookie, cookie_mask);
1281
1282 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1283 return match_len;
1284 }
1285
1286 /* Appends to 'b' an struct ofp11_match_header followed by the OXM format that
1287 * expresses 'match', plus enough zero bytes to pad the data appended out to a
1288 * multiple of 8.
1289 *
1290 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
1291 * version in use as 'version'.
1292 *
1293 * This function can cause 'b''s data to be reallocated.
1294 *
1295 * Returns the number of bytes appended to 'b', excluding the padding. Never
1296 * returns zero. */
1297 int
1298 oxm_put_match(struct ofpbuf *b, const struct match *match,
1299 enum ofp_version version)
1300 {
1301 int match_len;
1302 struct ofp11_match_header *omh;
1303 size_t start_len = b->size;
1304 ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
1305
1306 ofpbuf_put_uninit(b, sizeof *omh);
1307 match_len = (nx_put_raw(b, version, match, cookie, cookie_mask)
1308 + sizeof *omh);
1309 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1310
1311 omh = ofpbuf_at(b, start_len, sizeof *omh);
1312 omh->type = htons(OFPMT_OXM);
1313 omh->length = htons(match_len);
1314
1315 return match_len;
1316 }
1317
1318 /* Appends to 'b' the OXM formats that expresses 'match', without header or
1319 * padding.
1320 *
1321 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
1322 * version in use as 'version'.
1323 *
1324 * This function can cause 'b''s data to be reallocated. */
1325 void
1326 oxm_put_raw(struct ofpbuf *b, const struct match *match,
1327 enum ofp_version version)
1328 {
1329 nx_put_raw(b, version, match, 0, 0);
1330 }
1331
1332 /* Appends to 'b' the nx_match format that expresses the tlv corresponding
1333 * to 'id'. If mask is not all-ones then it is also formated as the value
1334 * of the tlv. */
1335 static void
1336 nx_format_mask_tlv(struct ds *ds, enum mf_field_id id,
1337 const union mf_value *mask)
1338 {
1339 const struct mf_field *mf = mf_from_id(id);
1340
1341 ds_put_format(ds, "%s", mf->name);
1342
1343 if (!is_all_ones(mask, mf->n_bytes)) {
1344 ds_put_char(ds, '=');
1345 mf_format(mf, mask, NULL, NULL, ds);
1346 }
1347
1348 ds_put_char(ds, ',');
1349 }
1350
1351 /* Appends a string representation of 'fa_' to 'ds'.
1352 * The TLVS value of 'fa_' is treated as a mask and
1353 * only the name of fields is formated if it is all ones. */
1354 void
1355 oxm_format_field_array(struct ds *ds, const struct field_array *fa)
1356 {
1357 size_t start_len = ds->length;
1358 size_t i, offset = 0;
1359
1360 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fa->used.bm) {
1361 const struct mf_field *mf = mf_from_id(i);
1362 union mf_value value;
1363
1364 memcpy(&value, fa->values + offset, mf->n_bytes);
1365 nx_format_mask_tlv(ds, i, &value);
1366 offset += mf->n_bytes;
1367 }
1368
1369 if (ds->length > start_len) {
1370 ds_chomp(ds, ',');
1371 }
1372 }
1373
1374 /* Appends to 'b' a series of OXM TLVs corresponding to the series
1375 * of enum mf_field_id and value tuples in 'fa_'.
1376 *
1377 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
1378 * version in use as 'version'.
1379 *
1380 * This function can cause 'b''s data to be reallocated.
1381 *
1382 * Returns the number of bytes appended to 'b'. May return zero. */
1383 int
1384 oxm_put_field_array(struct ofpbuf *b, const struct field_array *fa,
1385 enum ofp_version version)
1386 {
1387 size_t start_len = b->size;
1388
1389 /* XXX Some care might need to be taken of different TLVs that handle the
1390 * same flow fields. In particular:
1391
1392 * - VLAN_TCI, VLAN_VID and MFF_VLAN_PCP
1393 * - IP_DSCP_MASK and DSCP_SHIFTED
1394 * - REGS and XREGS
1395 */
1396
1397 size_t i, offset = 0;
1398
1399 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fa->used.bm) {
1400 const struct mf_field *mf = mf_from_id(i);
1401 union mf_value value;
1402
1403 memcpy(&value, fa->values + offset, mf->n_bytes);
1404
1405 int len = mf_field_len(mf, &value, NULL, NULL);
1406 nxm_put_entry_raw(b, i, version,
1407 &value + mf->n_bytes - len, NULL, len);
1408 offset += mf->n_bytes;
1409 }
1410
1411 return b->size - start_len;
1412 }
1413
1414 static void
1415 nx_put_header__(struct ofpbuf *b, uint64_t header, bool masked)
1416 {
1417 uint64_t masked_header = masked ? nxm_make_wild_header(header) : header;
1418 ovs_be64 network_header = htonll(masked_header);
1419
1420 ofpbuf_put(b, &network_header, nxm_header_len(header));
1421 }
1422
1423 void
1424 nx_put_header(struct ofpbuf *b, enum mf_field_id field,
1425 enum ofp_version version, bool masked)
1426 {
1427 nx_put_header__(b, mf_oxm_header(field, version), masked);
1428 }
1429
1430 void nx_put_mff_header(struct ofpbuf *b, const struct mf_field *mff,
1431 enum ofp_version version, bool masked)
1432 {
1433 if (mff->mapped) {
1434 nx_put_header_len(b, mff->id, version, masked, mff->n_bytes);
1435 } else {
1436 nx_put_header(b, mff->id, version, masked);
1437 }
1438 }
1439
1440 static void
1441 nx_put_header_len(struct ofpbuf *b, enum mf_field_id field,
1442 enum ofp_version version, bool masked, size_t n_bytes)
1443 {
1444 uint64_t header = mf_oxm_header(field, version);
1445
1446 header = NXM_HEADER(nxm_vendor(header), nxm_class(header),
1447 nxm_field(header), false,
1448 nxm_experimenter_len(header) + n_bytes);
1449
1450 nx_put_header__(b, header, masked);
1451 }
1452
1453 void
1454 nx_put_entry(struct ofpbuf *b, const struct mf_field *mff,
1455 enum ofp_version version, const union mf_value *value,
1456 const union mf_value *mask)
1457 {
1458 bool masked;
1459 int len, offset;
1460
1461 len = mf_field_len(mff, value, mask, &masked);
1462 offset = mff->n_bytes - len;
1463
1464 nxm_put_entry_raw(b, mff->id, version,
1465 &value->u8 + offset, masked ? &mask->u8 + offset : NULL,
1466 len);
1467 }
1468 \f
1469 /* nx_match_to_string() and helpers. */
1470
1471 static void format_nxm_field_name(struct ds *, uint64_t header);
1472
1473 char *
1474 nx_match_to_string(const uint8_t *p, unsigned int match_len)
1475 {
1476 if (!match_len) {
1477 return xstrdup("<any>");
1478 }
1479
1480 struct ofpbuf b = ofpbuf_const_initializer(p, match_len);
1481 struct ds s = DS_EMPTY_INITIALIZER;
1482 while (b.size) {
1483 union mf_value value;
1484 union mf_value mask;
1485 enum ofperr error;
1486 uint64_t header;
1487 int value_len;
1488
1489 error = nx_pull_entry__(&b, true, NULL, &header, NULL, &value, &mask);
1490 if (error) {
1491 break;
1492 }
1493 value_len = MIN(sizeof value, nxm_field_bytes(header));
1494
1495 if (s.length) {
1496 ds_put_cstr(&s, ", ");
1497 }
1498
1499 format_nxm_field_name(&s, header);
1500 ds_put_char(&s, '(');
1501
1502 for (int i = 0; i < value_len; i++) {
1503 ds_put_format(&s, "%02x", ((const uint8_t *) &value)[i]);
1504 }
1505 if (nxm_hasmask(header)) {
1506 ds_put_char(&s, '/');
1507 for (int i = 0; i < value_len; i++) {
1508 ds_put_format(&s, "%02x", ((const uint8_t *) &mask)[i]);
1509 }
1510 }
1511 ds_put_char(&s, ')');
1512 }
1513
1514 if (b.size) {
1515 if (s.length) {
1516 ds_put_cstr(&s, ", ");
1517 }
1518
1519 ds_put_format(&s, "<%u invalid bytes>", b.size);
1520 }
1521
1522 return ds_steal_cstr(&s);
1523 }
1524
1525 char *
1526 oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
1527 {
1528 const struct ofp11_match_header *omh = p->data;
1529 uint16_t match_len_;
1530 struct ds s;
1531
1532 ds_init(&s);
1533
1534 if (match_len < sizeof *omh) {
1535 ds_put_format(&s, "<match too short: %u>", match_len);
1536 goto err;
1537 }
1538
1539 if (omh->type != htons(OFPMT_OXM)) {
1540 ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
1541 goto err;
1542 }
1543
1544 match_len_ = ntohs(omh->length);
1545 if (match_len_ < sizeof *omh) {
1546 ds_put_format(&s, "<match length field too short: %u>", match_len_);
1547 goto err;
1548 }
1549
1550 if (match_len_ != match_len) {
1551 ds_put_format(&s, "<match length field incorrect: %u != %u>",
1552 match_len_, match_len);
1553 goto err;
1554 }
1555
1556 return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
1557 match_len - sizeof *omh);
1558
1559 err:
1560 return ds_steal_cstr(&s);
1561 }
1562
1563 void
1564 nx_format_field_name(enum mf_field_id id, enum ofp_version version,
1565 struct ds *s)
1566 {
1567 format_nxm_field_name(s, mf_oxm_header(id, version));
1568 }
1569
1570 static void
1571 format_nxm_field_name(struct ds *s, uint64_t header)
1572 {
1573 const struct nxm_field *f = nxm_field_by_header(header);
1574 if (f) {
1575 ds_put_cstr(s, f->name);
1576 if (nxm_hasmask(header)) {
1577 ds_put_cstr(s, "_W");
1578 }
1579 } else if (header == NXM_NX_COOKIE) {
1580 ds_put_cstr(s, "NXM_NX_COOKIE");
1581 } else if (header == NXM_NX_COOKIE_W) {
1582 ds_put_cstr(s, "NXM_NX_COOKIE_W");
1583 } else {
1584 ds_put_format(s, "%d:%d", nxm_class(header), nxm_field(header));
1585 }
1586 }
1587
1588 static bool
1589 streq_len(const char *a, size_t a_len, const char *b)
1590 {
1591 return strlen(b) == a_len && !memcmp(a, b, a_len);
1592 }
1593
1594 static uint64_t
1595 parse_nxm_field_name(const char *name, int name_len)
1596 {
1597 const struct nxm_field *f;
1598 bool wild;
1599
1600 f = mf_parse_subfield_name(name, name_len, &wild);
1601 if (f) {
1602 if (!wild) {
1603 return f->header;
1604 } else if (mf_from_id(f->id)->maskable != MFM_NONE) {
1605 return nxm_make_wild_header(f->header);
1606 }
1607 }
1608
1609 if (streq_len(name, name_len, "NXM_NX_COOKIE")) {
1610 return NXM_NX_COOKIE;
1611 } else if (streq_len(name, name_len, "NXM_NX_COOKIE_W")) {
1612 return NXM_NX_COOKIE_W;
1613 }
1614
1615 /* Check whether it's a field header value as hex.
1616 * (This isn't ordinarily useful except for testing error behavior.) */
1617 if (name_len == 8) {
1618 uint64_t header;
1619 bool ok;
1620
1621 header = hexits_value(name, name_len, &ok) << 32;
1622 if (ok) {
1623 return header;
1624 }
1625 } else if (name_len == 16) {
1626 uint64_t header;
1627 bool ok;
1628
1629 header = hexits_value(name, name_len, &ok);
1630 if (ok && is_experimenter_oxm(header)) {
1631 return header;
1632 }
1633 }
1634
1635 return 0;
1636 }
1637 \f
1638 /* nx_match_from_string(). */
1639
1640 static int
1641 nx_match_from_string_raw(const char *s, struct ofpbuf *b)
1642 {
1643 const char *full_s = s;
1644 const size_t start_len = b->size;
1645
1646 if (!strcmp(s, "<any>")) {
1647 /* Ensure that 'b->data' isn't actually null. */
1648 ofpbuf_prealloc_tailroom(b, 1);
1649 return 0;
1650 }
1651
1652 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
1653 const char *name;
1654 uint64_t header;
1655 ovs_be64 nw_header;
1656 int name_len;
1657 size_t n;
1658
1659 name = s;
1660 name_len = strcspn(s, "(");
1661 if (s[name_len] != '(') {
1662 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
1663 }
1664
1665 header = parse_nxm_field_name(name, name_len);
1666 if (!header) {
1667 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
1668 }
1669
1670 s += name_len + 1;
1671
1672 b->header = ofpbuf_put_uninit(b, nxm_header_len(header));
1673 s = ofpbuf_put_hex(b, s, &n);
1674 if (n != nxm_field_bytes(header)) {
1675 const struct mf_field *field = mf_from_oxm_header(header, NULL);
1676
1677 if (field && field->variable_len) {
1678 if (n <= field->n_bytes) {
1679 int len = (nxm_hasmask(header) ? n * 2 : n) +
1680 nxm_experimenter_len(header);
1681
1682 header = NXM_HEADER(nxm_vendor(header), nxm_class(header),
1683 nxm_field(header),
1684 nxm_hasmask(header) ? 1 : 0, len);
1685 } else {
1686 ovs_fatal(0, "expected to read at most %d bytes but got "
1687 "%"PRIuSIZE, field->n_bytes, n);
1688 }
1689 } else {
1690 ovs_fatal(0, "expected to read %d bytes but got %"PRIuSIZE,
1691 nxm_field_bytes(header), n);
1692 }
1693 }
1694 nw_header = htonll(header);
1695 memcpy(b->header, &nw_header, nxm_header_len(header));
1696
1697 if (nxm_hasmask(header)) {
1698 s += strspn(s, " ");
1699 if (*s != '/') {
1700 ovs_fatal(0, "%s: missing / in masked field %.*s",
1701 full_s, name_len, name);
1702 }
1703 s = ofpbuf_put_hex(b, s + 1, &n);
1704 if (n != nxm_field_bytes(header)) {
1705 ovs_fatal(0, "%.2s: hex digits expected", s);
1706 }
1707 }
1708
1709 s += strspn(s, " ");
1710 if (*s != ')') {
1711 ovs_fatal(0, "%s: missing ) following field %.*s",
1712 full_s, name_len, name);
1713 }
1714 s++;
1715 }
1716
1717 return b->size - start_len;
1718 }
1719
1720 int
1721 nx_match_from_string(const char *s, struct ofpbuf *b)
1722 {
1723 int match_len = nx_match_from_string_raw(s, b);
1724 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1725 return match_len;
1726 }
1727
1728 int
1729 oxm_match_from_string(const char *s, struct ofpbuf *b)
1730 {
1731 int match_len;
1732 struct ofp11_match_header *omh;
1733 size_t start_len = b->size;
1734
1735 ofpbuf_put_uninit(b, sizeof *omh);
1736 match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
1737 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1738
1739 omh = ofpbuf_at(b, start_len, sizeof *omh);
1740 omh->type = htons(OFPMT_OXM);
1741 omh->length = htons(match_len);
1742
1743 return match_len;
1744 }
1745 \f
1746 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1747 * '*move'.
1748 *
1749 * Returns NULL if successful, otherwise a malloc()'d string describing the
1750 * error. The caller is responsible for freeing the returned string. */
1751 char * OVS_WARN_UNUSED_RESULT
1752 nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
1753 {
1754 const char *full_s = s;
1755 char *error;
1756
1757 error = mf_parse_subfield__(&move->src, &s);
1758 if (error) {
1759 return error;
1760 }
1761 if (strncmp(s, "->", 2)) {
1762 return xasprintf("%s: missing `->' following source", full_s);
1763 }
1764 s += 2;
1765 error = mf_parse_subfield(&move->dst, s);
1766 if (error) {
1767 return error;
1768 }
1769
1770 if (move->src.n_bits != move->dst.n_bits) {
1771 return xasprintf("%s: source field is %d bits wide but destination is "
1772 "%d bits wide", full_s,
1773 move->src.n_bits, move->dst.n_bits);
1774 }
1775 return NULL;
1776 }
1777 \f
1778 /* nxm_format_reg_move(). */
1779
1780 void
1781 nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
1782 {
1783 ds_put_format(s, "%smove:%s", colors.special, colors.end);
1784 mf_format_subfield(&move->src, s);
1785 ds_put_format(s, "%s->%s", colors.special, colors.end);
1786 mf_format_subfield(&move->dst, s);
1787 }
1788
1789 \f
1790 enum ofperr
1791 nxm_reg_move_check(const struct ofpact_reg_move *move,
1792 const struct match *match)
1793 {
1794 enum ofperr error;
1795
1796 error = mf_check_src(&move->src, match);
1797 if (error) {
1798 return error;
1799 }
1800
1801 return mf_check_dst(&move->dst, match);
1802 }
1803 \f
1804 /* nxm_execute_reg_move(). */
1805
1806 void
1807 nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
1808 struct flow *flow, struct flow_wildcards *wc)
1809 {
1810 union mf_subvalue src_subvalue;
1811 union mf_subvalue mask_value;
1812 ovs_be64 src_data_be = htonll(src_data);
1813
1814 memset(&mask_value, 0xff, sizeof mask_value);
1815 mf_write_subfield_flow(dst, &mask_value, &wc->masks);
1816
1817 bitwise_copy(&src_data_be, sizeof src_data_be, 0,
1818 &src_subvalue, sizeof src_subvalue, 0,
1819 sizeof src_data_be * 8);
1820 mf_write_subfield_flow(dst, &src_subvalue, flow);
1821 }
1822 \f
1823 /* nxm_parse_stack_action, works for both push() and pop(). */
1824
1825 /* Parses 's' as a "push" or "pop" action, in the form described in
1826 * ovs-ofctl(8), into '*stack_action'.
1827 *
1828 * Returns NULL if successful, otherwise a malloc()'d string describing the
1829 * error. The caller is responsible for freeing the returned string. */
1830 char * OVS_WARN_UNUSED_RESULT
1831 nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
1832 {
1833 char *error;
1834
1835 error = mf_parse_subfield__(&stack_action->subfield, &s);
1836 if (error) {
1837 return error;
1838 }
1839
1840 if (*s != '\0') {
1841 return xasprintf("%s: trailing garbage following push or pop", s);
1842 }
1843
1844 return NULL;
1845 }
1846
1847 void
1848 nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
1849 {
1850 ds_put_format(s, "%spush:%s", colors.param, colors.end);
1851 mf_format_subfield(&push->subfield, s);
1852 }
1853
1854 void
1855 nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
1856 {
1857 ds_put_format(s, "%spop:%s", colors.param, colors.end);
1858 mf_format_subfield(&pop->subfield, s);
1859 }
1860
1861 enum ofperr
1862 nxm_stack_push_check(const struct ofpact_stack *push,
1863 const struct match *match)
1864 {
1865 return mf_check_src(&push->subfield, match);
1866 }
1867
1868 enum ofperr
1869 nxm_stack_pop_check(const struct ofpact_stack *pop,
1870 const struct match *match)
1871 {
1872 return mf_check_dst(&pop->subfield, match);
1873 }
1874
1875 /* nxm_execute_stack_push(), nxm_execute_stack_pop().
1876 *
1877 * A stack is an ofpbuf with 'data' pointing to the bottom of the stack and
1878 * 'size' indexing the top of the stack. Each value of some byte length is
1879 * stored to the stack immediately followed by the length of the value as an
1880 * unsigned byte. This way a POP operation can first read the length byte, and
1881 * then the appropriate number of bytes from the stack. This also means that
1882 * it is only possible to traverse the stack from top to bottom. It is
1883 * possible, however, to push values also to the bottom of the stack, which is
1884 * useful when a stack has been serialized to a wire format in reverse order
1885 * (topmost value first).
1886 */
1887
1888 /* Push value 'v' of length 'bytes' to the top of 'stack'. */
1889 void
1890 nx_stack_push(struct ofpbuf *stack, const void *v, uint8_t bytes)
1891 {
1892 ofpbuf_put(stack, v, bytes);
1893 ofpbuf_put(stack, &bytes, sizeof bytes);
1894 }
1895
1896 /* Push value 'v' of length 'bytes' to the bottom of 'stack'. */
1897 void
1898 nx_stack_push_bottom(struct ofpbuf *stack, const void *v, uint8_t bytes)
1899 {
1900 ofpbuf_push(stack, &bytes, sizeof bytes);
1901 ofpbuf_push(stack, v, bytes);
1902 }
1903
1904 /* Pop the topmost value from 'stack', returning a pointer to the value in the
1905 * stack and the length of the value in '*bytes'. In case of underflow a NULL
1906 * is returned and length is returned as zero via '*bytes'. */
1907 void *
1908 nx_stack_pop(struct ofpbuf *stack, uint8_t *bytes)
1909 {
1910 if (!stack->size) {
1911 *bytes = 0;
1912 return NULL;
1913 }
1914
1915 stack->size -= sizeof *bytes;
1916 memcpy(bytes, ofpbuf_tail(stack), sizeof *bytes);
1917
1918 ovs_assert(stack->size >= *bytes);
1919 stack->size -= *bytes;
1920 return ofpbuf_tail(stack);
1921 }
1922
1923 void
1924 nxm_execute_stack_push(const struct ofpact_stack *push,
1925 const struct flow *flow, struct flow_wildcards *wc,
1926 struct ofpbuf *stack)
1927 {
1928 union mf_subvalue dst_value;
1929
1930 mf_write_subfield_flow(&push->subfield,
1931 (union mf_subvalue *)&exact_match_mask,
1932 &wc->masks);
1933
1934 mf_read_subfield(&push->subfield, flow, &dst_value);
1935 uint8_t bytes = DIV_ROUND_UP(push->subfield.n_bits, 8);
1936 nx_stack_push(stack, &dst_value.u8[sizeof dst_value - bytes], bytes);
1937 }
1938
1939 bool
1940 nxm_execute_stack_pop(const struct ofpact_stack *pop,
1941 struct flow *flow, struct flow_wildcards *wc,
1942 struct ofpbuf *stack)
1943 {
1944 uint8_t src_bytes;
1945 const void *src = nx_stack_pop(stack, &src_bytes);
1946 if (src) {
1947 union mf_subvalue src_value;
1948 uint8_t dst_bytes = DIV_ROUND_UP(pop->subfield.n_bits, 8);
1949
1950 if (src_bytes < dst_bytes) {
1951 memset(&src_value.u8[sizeof src_value - dst_bytes], 0,
1952 dst_bytes - src_bytes);
1953 }
1954 memcpy(&src_value.u8[sizeof src_value - src_bytes], src, src_bytes);
1955 mf_write_subfield_flow(&pop->subfield,
1956 (union mf_subvalue *)&exact_match_mask,
1957 &wc->masks);
1958 mf_write_subfield_flow(&pop->subfield, &src_value, flow);
1959 return true;
1960 } else {
1961 /* Attempted to pop from an empty stack. */
1962 return false;
1963 }
1964 }
1965 \f
1966 /* Formats 'sf' into 's' in a format normally acceptable to
1967 * mf_parse_subfield(). (It won't be acceptable if sf->field is NULL or if
1968 * sf->field has no NXM name.) */
1969 void
1970 mf_format_subfield(const struct mf_subfield *sf, struct ds *s)
1971 {
1972 if (!sf->field) {
1973 ds_put_cstr(s, "<unknown>");
1974 } else {
1975 const struct nxm_field *f = nxm_field_by_mf_id(sf->field->id, 0);
1976 ds_put_cstr(s, f ? f->name : sf->field->name);
1977 }
1978
1979 if (sf->field && sf->ofs == 0 && sf->n_bits == sf->field->n_bits) {
1980 ds_put_cstr(s, "[]");
1981 } else if (sf->n_bits == 1) {
1982 ds_put_format(s, "[%d]", sf->ofs);
1983 } else {
1984 ds_put_format(s, "[%d..%d]", sf->ofs, sf->ofs + sf->n_bits - 1);
1985 }
1986 }
1987
1988 static const struct nxm_field *
1989 mf_parse_subfield_name(const char *name, int name_len, bool *wild)
1990 {
1991 *wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
1992 if (*wild) {
1993 name_len -= 2;
1994 }
1995
1996 return nxm_field_by_name(name, name_len);
1997 }
1998
1999 /* Parses a subfield from the beginning of '*sp' into 'sf'. If successful,
2000 * returns NULL and advances '*sp' to the first byte following the parsed
2001 * string. On failure, returns a malloc()'d error message, does not modify
2002 * '*sp', and does not properly initialize 'sf'.
2003 *
2004 * The syntax parsed from '*sp' takes the form "header[start..end]" where
2005 * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive)
2006 * bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
2007 * may both be omitted (the [] are still required) to indicate an entire
2008 * field. */
2009 char * OVS_WARN_UNUSED_RESULT
2010 mf_parse_subfield__(struct mf_subfield *sf, const char **sp)
2011 {
2012 const struct mf_field *field = NULL;
2013 const struct nxm_field *f;
2014 const char *name;
2015 int start, end;
2016 const char *s;
2017 int name_len;
2018 bool wild;
2019
2020 s = *sp;
2021 name = s;
2022 name_len = strcspn(s, "[-");
2023
2024 f = mf_parse_subfield_name(name, name_len, &wild);
2025 field = f ? mf_from_id(f->id) : mf_from_name_len(name, name_len);
2026 if (!field) {
2027 return xasprintf("%s: unknown field `%.*s'", *sp, name_len, s);
2028 }
2029
2030 s += name_len;
2031 /* Assume full field. */
2032 start = 0;
2033 end = field->n_bits - 1;
2034 if (*s == '[') {
2035 if (!strncmp(s, "[]", 2)) {
2036 /* Nothing to do. */
2037 } else if (ovs_scan(s, "[%d..%d]", &start, &end)) {
2038 /* Nothing to do. */
2039 } else if (ovs_scan(s, "[%d]", &start)) {
2040 end = start;
2041 } else {
2042 return xasprintf("%s: syntax error expecting [] or [<bit>] or "
2043 "[<start>..<end>]", *sp);
2044 }
2045 s = strchr(s, ']') + 1;
2046 }
2047
2048 if (start > end) {
2049 return xasprintf("%s: starting bit %d is after ending bit %d",
2050 *sp, start, end);
2051 } else if (start >= field->n_bits) {
2052 return xasprintf("%s: starting bit %d is not valid because field is "
2053 "only %d bits wide", *sp, start, field->n_bits);
2054 } else if (end >= field->n_bits){
2055 return xasprintf("%s: ending bit %d is not valid because field is "
2056 "only %d bits wide", *sp, end, field->n_bits);
2057 }
2058
2059 sf->field = field;
2060 sf->ofs = start;
2061 sf->n_bits = end - start + 1;
2062
2063 *sp = s;
2064 return NULL;
2065 }
2066
2067 /* Parses a subfield from the entirety of 's' into 'sf'. Returns NULL if
2068 * successful, otherwise a malloc()'d string describing the error. The caller
2069 * is responsible for freeing the returned string.
2070 *
2071 * The syntax parsed from 's' takes the form "header[start..end]" where
2072 * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive)
2073 * bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
2074 * may both be omitted (the [] are still required) to indicate an entire
2075 * field. */
2076 char * OVS_WARN_UNUSED_RESULT
2077 mf_parse_subfield(struct mf_subfield *sf, const char *s)
2078 {
2079 char *error = mf_parse_subfield__(sf, &s);
2080 if (!error && s[0]) {
2081 error = xstrdup("unexpected input following field syntax");
2082 }
2083 return error;
2084 }
2085 \f
2086 /* Returns an bitmap in which each bit corresponds to the like-numbered field
2087 * in the OFPXMC12_OPENFLOW_BASIC OXM class, in which the bit values are taken
2088 * from the 'fields' bitmap. Only fields defined in OpenFlow 'version' are
2089 * considered.
2090 *
2091 * This is useful for encoding OpenFlow 1.2 table stats messages. */
2092 ovs_be64
2093 oxm_bitmap_from_mf_bitmap(const struct mf_bitmap *fields,
2094 enum ofp_version version)
2095 {
2096 uint64_t oxm_bitmap = 0;
2097 int i;
2098
2099 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->bm) {
2100 uint64_t oxm = mf_oxm_header(i, version);
2101 uint32_t class = nxm_class(oxm);
2102 int field = nxm_field(oxm);
2103
2104 if (class == OFPXMC12_OPENFLOW_BASIC && field < 64) {
2105 oxm_bitmap |= UINT64_C(1) << field;
2106 }
2107 }
2108 return htonll(oxm_bitmap);
2109 }
2110
2111 /* Opposite conversion from oxm_bitmap_from_mf_bitmap().
2112 *
2113 * This is useful for decoding OpenFlow 1.2 table stats messages. */
2114 struct mf_bitmap
2115 oxm_bitmap_to_mf_bitmap(ovs_be64 oxm_bitmap, enum ofp_version version)
2116 {
2117 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
2118
2119 for (enum mf_field_id id = 0; id < MFF_N_IDS; id++) {
2120 uint64_t oxm = mf_oxm_header(id, version);
2121 if (oxm && version >= nxm_field_by_header(oxm)->version) {
2122 uint32_t class = nxm_class(oxm);
2123 int field = nxm_field(oxm);
2124
2125 if (class == OFPXMC12_OPENFLOW_BASIC
2126 && field < 64
2127 && oxm_bitmap & htonll(UINT64_C(1) << field)) {
2128 bitmap_set1(fields.bm, id);
2129 }
2130 }
2131 }
2132 return fields;
2133 }
2134
2135 /* Returns a bitmap of fields that can be encoded in OXM and that can be
2136 * modified with a "set_field" action. */
2137 struct mf_bitmap
2138 oxm_writable_fields(void)
2139 {
2140 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
2141 int i;
2142
2143 for (i = 0; i < MFF_N_IDS; i++) {
2144 if (mf_oxm_header(i, 0) && mf_from_id(i)->writable) {
2145 bitmap_set1(b.bm, i);
2146 }
2147 }
2148 return b;
2149 }
2150
2151 /* Returns a bitmap of fields that can be encoded in OXM and that can be
2152 * matched in a flow table. */
2153 struct mf_bitmap
2154 oxm_matchable_fields(void)
2155 {
2156 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
2157 int i;
2158
2159 for (i = 0; i < MFF_N_IDS; i++) {
2160 if (mf_oxm_header(i, 0)) {
2161 bitmap_set1(b.bm, i);
2162 }
2163 }
2164 return b;
2165 }
2166
2167 /* Returns a bitmap of fields that can be encoded in OXM and that can be
2168 * matched in a flow table with an arbitrary bitmask. */
2169 struct mf_bitmap
2170 oxm_maskable_fields(void)
2171 {
2172 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
2173 int i;
2174
2175 for (i = 0; i < MFF_N_IDS; i++) {
2176 if (mf_oxm_header(i, 0) && mf_from_id(i)->maskable == MFM_FULLY) {
2177 bitmap_set1(b.bm, i);
2178 }
2179 }
2180 return b;
2181 }
2182 \f
2183 struct nxm_field_index {
2184 struct hmap_node header_node; /* In nxm_header_map. */
2185 struct hmap_node name_node; /* In nxm_name_map. */
2186 struct ovs_list mf_node; /* In mf_mf_map[nf.id]. */
2187 const struct nxm_field nf;
2188 };
2189
2190 #include "nx-match.inc"
2191
2192 static struct hmap nxm_header_map;
2193 static struct hmap nxm_name_map;
2194 static struct ovs_list nxm_mf_map[MFF_N_IDS];
2195
2196 static void
2197 nxm_init(void)
2198 {
2199 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2200 if (ovsthread_once_start(&once)) {
2201 hmap_init(&nxm_header_map);
2202 hmap_init(&nxm_name_map);
2203 for (int i = 0; i < MFF_N_IDS; i++) {
2204 ovs_list_init(&nxm_mf_map[i]);
2205 }
2206 for (struct nxm_field_index *nfi = all_nxm_fields;
2207 nfi < &all_nxm_fields[ARRAY_SIZE(all_nxm_fields)]; nfi++) {
2208 hmap_insert(&nxm_header_map, &nfi->header_node,
2209 hash_uint64(nxm_no_len(nfi->nf.header)));
2210 hmap_insert(&nxm_name_map, &nfi->name_node,
2211 hash_string(nfi->nf.name, 0));
2212 ovs_list_push_back(&nxm_mf_map[nfi->nf.id], &nfi->mf_node);
2213 }
2214 ovsthread_once_done(&once);
2215 }
2216 }
2217
2218 static const struct nxm_field *
2219 nxm_field_by_header(uint64_t header)
2220 {
2221 const struct nxm_field_index *nfi;
2222 uint64_t header_no_len;
2223
2224 nxm_init();
2225 if (nxm_hasmask(header)) {
2226 header = nxm_make_exact_header(header);
2227 }
2228
2229 header_no_len = nxm_no_len(header);
2230
2231 HMAP_FOR_EACH_IN_BUCKET (nfi, header_node, hash_uint64(header_no_len),
2232 &nxm_header_map) {
2233 if (header_no_len == nxm_no_len(nfi->nf.header)) {
2234 if (nxm_length(header) == nxm_length(nfi->nf.header) ||
2235 mf_from_id(nfi->nf.id)->variable_len) {
2236 return &nfi->nf;
2237 } else {
2238 return NULL;
2239 }
2240 }
2241 }
2242 return NULL;
2243 }
2244
2245 static const struct nxm_field *
2246 nxm_field_by_name(const char *name, size_t len)
2247 {
2248 const struct nxm_field_index *nfi;
2249
2250 nxm_init();
2251 HMAP_FOR_EACH_WITH_HASH (nfi, name_node, hash_bytes(name, len, 0),
2252 &nxm_name_map) {
2253 if (strlen(nfi->nf.name) == len && !memcmp(nfi->nf.name, name, len)) {
2254 return &nfi->nf;
2255 }
2256 }
2257 return NULL;
2258 }
2259
2260 static const struct nxm_field *
2261 nxm_field_by_mf_id(enum mf_field_id id, enum ofp_version version)
2262 {
2263 const struct nxm_field_index *nfi;
2264 const struct nxm_field *f;
2265
2266 nxm_init();
2267
2268 f = NULL;
2269 LIST_FOR_EACH (nfi, mf_node, &nxm_mf_map[id]) {
2270 if (!f || version >= nfi->nf.version) {
2271 f = &nfi->nf;
2272 }
2273 }
2274 return f;
2275 }